Merge

Fri, 09 Aug 2013 18:05:00 +0200

author
rbackman
date
Fri, 09 Aug 2013 18:05:00 +0200
changeset 5511
5394ec69f112
parent 5495
39127bb12d32
parent 5510
ce8969c36762
child 5512
11237ee74aae

Merge

src/os_cpu/bsd_x86/vm/bsd_x86_32.ad file | annotate | diff | comparison | revisions
src/os_cpu/bsd_x86/vm/bsd_x86_64.ad file | annotate | diff | comparison | revisions
src/os_cpu/linux_x86/vm/linux_x86_32.ad file | annotate | diff | comparison | revisions
src/os_cpu/linux_x86/vm/linux_x86_64.ad file | annotate | diff | comparison | revisions
src/os_cpu/solaris_sparc/vm/solaris_sparc.ad file | annotate | diff | comparison | revisions
src/os_cpu/solaris_x86/vm/solaris_x86_32.ad file | annotate | diff | comparison | revisions
src/os_cpu/solaris_x86/vm/solaris_x86_64.ad file | annotate | diff | comparison | revisions
src/os_cpu/windows_x86/vm/windows_x86_32.ad file | annotate | diff | comparison | revisions
src/os_cpu/windows_x86/vm/windows_x86_64.ad file | annotate | diff | comparison | revisions
     1.1 --- a/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Fri Aug 09 01:39:11 2013 -0700
     1.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Fri Aug 09 18:05:00 2013 +0200
     1.3 @@ -44,7 +44,7 @@
     1.4      Type type      = db.lookupType("PhaseCFG");
     1.5      numBlocksField = new CIntField(type.getCIntegerField("_num_blocks"), 0);
     1.6      blocksField = type.getAddressField("_blocks");
     1.7 -    bbsField = type.getAddressField("_bbs");
     1.8 +    bbsField = type.getAddressField("_node_to_block_mapping");
     1.9      brootField = type.getAddressField("_broot");
    1.10    }
    1.11  
     2.1 --- a/make/bsd/makefiles/adlc.make	Fri Aug 09 01:39:11 2013 -0700
     2.2 +++ b/make/bsd/makefiles/adlc.make	Fri Aug 09 18:05:00 2013 +0200
     2.3 @@ -41,13 +41,11 @@
     2.4  
     2.5  ifeq ("${Platform_arch_model}", "${Platform_arch}")
     2.6    SOURCES.AD = \
     2.7 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
     2.8 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
     2.9 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
    2.10  else
    2.11    SOURCES.AD = \
    2.12    $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
    2.13 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
    2.14 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
    2.15 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
    2.16  endif
    2.17  
    2.18  EXEC	= $(OUTDIR)/adlc
     3.1 --- a/make/linux/makefiles/adlc.make	Fri Aug 09 01:39:11 2013 -0700
     3.2 +++ b/make/linux/makefiles/adlc.make	Fri Aug 09 18:05:00 2013 +0200
     3.3 @@ -41,13 +41,11 @@
     3.4  
     3.5  ifeq ("${Platform_arch_model}", "${Platform_arch}")
     3.6    SOURCES.AD = \
     3.7 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
     3.8 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
     3.9 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
    3.10  else
    3.11    SOURCES.AD = \
    3.12    $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
    3.13 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
    3.14 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
    3.15 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
    3.16  endif
    3.17  
    3.18  EXEC	= $(OUTDIR)/adlc
     4.1 --- a/make/solaris/makefiles/adlc.make	Fri Aug 09 01:39:11 2013 -0700
     4.2 +++ b/make/solaris/makefiles/adlc.make	Fri Aug 09 18:05:00 2013 +0200
     4.3 @@ -42,13 +42,11 @@
     4.4  
     4.5  ifeq ("${Platform_arch_model}", "${Platform_arch}")
     4.6    SOURCES.AD = \
     4.7 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
     4.8 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
     4.9 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
    4.10  else
    4.11    SOURCES.AD = \
    4.12    $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
    4.13 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
    4.14 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
    4.15 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
    4.16  endif
    4.17  
    4.18  EXEC	= $(OUTDIR)/adlc
     5.1 --- a/make/windows/makefiles/adlc.make	Fri Aug 09 01:39:11 2013 -0700
     5.2 +++ b/make/windows/makefiles/adlc.make	Fri Aug 09 18:05:00 2013 +0200
     5.3 @@ -55,13 +55,11 @@
     5.4  
     5.5  !if "$(Platform_arch_model)" == "$(Platform_arch)"
     5.6  SOURCES_AD=\
     5.7 -  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
     5.8 -  $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
     5.9 +  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad 
    5.10  !else
    5.11  SOURCES_AD=\
    5.12    $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
    5.13 -  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad \
    5.14 -  $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
    5.15 +  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad 
    5.16  !endif
    5.17  
    5.18  # NOTE! If you add any files here, you must also update GENERATED_NAMES_IN_DIR
     6.1 --- a/src/os_cpu/bsd_x86/vm/bsd_x86_32.ad	Fri Aug 09 01:39:11 2013 -0700
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,26 +0,0 @@
     6.4 -//
     6.5 -// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     6.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.7 -//
     6.8 -// This code is free software; you can redistribute it and/or modify it
     6.9 -// under the terms of the GNU General Public License version 2 only, as
    6.10 -// published by the Free Software Foundation.
    6.11 -//
    6.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
    6.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    6.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    6.15 -// version 2 for more details (a copy is included in the LICENSE file that
    6.16 -// accompanied this code).
    6.17 -//
    6.18 -// You should have received a copy of the GNU General Public License version
    6.19 -// 2 along with this work; if not, write to the Free Software Foundation,
    6.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    6.21 -//
    6.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    6.23 -// or visit www.oracle.com if you need additional information or have any
    6.24 -// questions.
    6.25 -//
    6.26 -//
    6.27 -
    6.28 -// X86 Bsd Architecture Description File
    6.29 -
     7.1 --- a/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Fri Aug 09 01:39:11 2013 -0700
     7.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     7.3 @@ -1,65 +0,0 @@
     7.4 -//
     7.5 -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
     7.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.7 -//
     7.8 -// This code is free software; you can redistribute it and/or modify it
     7.9 -// under the terms of the GNU General Public License version 2 only, as
    7.10 -// published by the Free Software Foundation.
    7.11 -//
    7.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
    7.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    7.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    7.15 -// version 2 for more details (a copy is included in the LICENSE file that
    7.16 -// accompanied this code).
    7.17 -//
    7.18 -// You should have received a copy of the GNU General Public License version
    7.19 -// 2 along with this work; if not, write to the Free Software Foundation,
    7.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    7.21 -//
    7.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    7.23 -// or visit www.oracle.com if you need additional information or have any
    7.24 -// questions.
    7.25 -//
    7.26 -//
    7.27 -
    7.28 -// AMD64 Bsd Architecture Description File
    7.29 -
    7.30 -//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
    7.31 -// This block specifies the encoding classes used by the compiler to
    7.32 -// output byte streams.  Encoding classes generate functions which are
    7.33 -// called by Machine Instruction Nodes in order to generate the bit
    7.34 -// encoding of the instruction.  Operands specify their base encoding
    7.35 -// interface with the interface keyword.  There are currently
    7.36 -// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
    7.37 -// COND_INTER.  REG_INTER causes an operand to generate a function
    7.38 -// which returns its register number when queried.  CONST_INTER causes
    7.39 -// an operand to generate a function which returns the value of the
    7.40 -// constant when queried.  MEMORY_INTER causes an operand to generate
    7.41 -// four functions which return the Base Register, the Index Register,
    7.42 -// the Scale Value, and the Offset Value of the operand when queried.
    7.43 -// COND_INTER causes an operand to generate six functions which return
    7.44 -// the encoding code (ie - encoding bits for the instruction)
    7.45 -// associated with each basic boolean condition for a conditional
    7.46 -// instruction.  Instructions specify two basic values for encoding.
    7.47 -// They use the ins_encode keyword to specify their encoding class
    7.48 -// (which must be one of the class names specified in the encoding
    7.49 -// block), and they use the opcode keyword to specify, in order, their
    7.50 -// primary, secondary, and tertiary opcode.  Only the opcode sections
    7.51 -// which a particular instruction needs for encoding need to be
    7.52 -// specified.
    7.53 -encode %{
    7.54 -  // Build emit functions for each basic byte or larger field in the intel
    7.55 -  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
    7.56 -  // code in the enc_class source block.  Emit functions will live in the
    7.57 -  // main source block for now.  In future, we can generalize this by
    7.58 -  // adding a syntax that specifies the sizes of fields in an order,
    7.59 -  // so that the adlc can build the emit functions automagically
    7.60 -
    7.61 -%}
    7.62 -
    7.63 -
    7.64 -// Platform dependent source
    7.65 -
    7.66 -source %{
    7.67 -
    7.68 -%}
     8.1 --- a/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Fri Aug 09 01:39:11 2013 -0700
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,26 +0,0 @@
     8.4 -//
     8.5 -// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     8.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.7 -//
     8.8 -// This code is free software; you can redistribute it and/or modify it
     8.9 -// under the terms of the GNU General Public License version 2 only, as
    8.10 -// published by the Free Software Foundation.
    8.11 -//
    8.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
    8.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    8.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    8.15 -// version 2 for more details (a copy is included in the LICENSE file that
    8.16 -// accompanied this code).
    8.17 -//
    8.18 -// You should have received a copy of the GNU General Public License version
    8.19 -// 2 along with this work; if not, write to the Free Software Foundation,
    8.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    8.21 -//
    8.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    8.23 -// or visit www.oracle.com if you need additional information or have any
    8.24 -// questions.
    8.25 -//
    8.26 -//
    8.27 -
    8.28 -// X86 Linux Architecture Description File
    8.29 -
     9.1 --- a/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Fri Aug 09 01:39:11 2013 -0700
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,65 +0,0 @@
     9.4 -//
     9.5 -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
     9.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.7 -//
     9.8 -// This code is free software; you can redistribute it and/or modify it
     9.9 -// under the terms of the GNU General Public License version 2 only, as
    9.10 -// published by the Free Software Foundation.
    9.11 -//
    9.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
    9.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    9.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    9.15 -// version 2 for more details (a copy is included in the LICENSE file that
    9.16 -// accompanied this code).
    9.17 -//
    9.18 -// You should have received a copy of the GNU General Public License version
    9.19 -// 2 along with this work; if not, write to the Free Software Foundation,
    9.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    9.21 -//
    9.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    9.23 -// or visit www.oracle.com if you need additional information or have any
    9.24 -// questions.
    9.25 -//
    9.26 -//
    9.27 -
    9.28 -// AMD64 Linux Architecture Description File
    9.29 -
    9.30 -//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
    9.31 -// This block specifies the encoding classes used by the compiler to
    9.32 -// output byte streams.  Encoding classes generate functions which are
    9.33 -// called by Machine Instruction Nodes in order to generate the bit
    9.34 -// encoding of the instruction.  Operands specify their base encoding
    9.35 -// interface with the interface keyword.  There are currently
    9.36 -// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
    9.37 -// COND_INTER.  REG_INTER causes an operand to generate a function
    9.38 -// which returns its register number when queried.  CONST_INTER causes
    9.39 -// an operand to generate a function which returns the value of the
    9.40 -// constant when queried.  MEMORY_INTER causes an operand to generate
    9.41 -// four functions which return the Base Register, the Index Register,
    9.42 -// the Scale Value, and the Offset Value of the operand when queried.
    9.43 -// COND_INTER causes an operand to generate six functions which return
    9.44 -// the encoding code (ie - encoding bits for the instruction)
    9.45 -// associated with each basic boolean condition for a conditional
    9.46 -// instruction.  Instructions specify two basic values for encoding.
    9.47 -// They use the ins_encode keyword to specify their encoding class
    9.48 -// (which must be one of the class names specified in the encoding
    9.49 -// block), and they use the opcode keyword to specify, in order, their
    9.50 -// primary, secondary, and tertiary opcode.  Only the opcode sections
    9.51 -// which a particular instruction needs for encoding need to be
    9.52 -// specified.
    9.53 -encode %{
    9.54 -  // Build emit functions for each basic byte or larger field in the intel
    9.55 -  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
    9.56 -  // code in the enc_class source block.  Emit functions will live in the
    9.57 -  // main source block for now.  In future, we can generalize this by
    9.58 -  // adding a syntax that specifies the sizes of fields in an order,
    9.59 -  // so that the adlc can build the emit functions automagically
    9.60 -
    9.61 -%}
    9.62 -
    9.63 -
    9.64 -// Platform dependent source
    9.65 -
    9.66 -source %{
    9.67 -
    9.68 -%}
    10.1 --- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.ad	Fri Aug 09 01:39:11 2013 -0700
    10.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    10.3 @@ -1,27 +0,0 @@
    10.4 -//
    10.5 -// Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
    10.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.7 -//
    10.8 -// This code is free software; you can redistribute it and/or modify it
    10.9 -// under the terms of the GNU General Public License version 2 only, as
   10.10 -// published by the Free Software Foundation.
   10.11 -//
   10.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   10.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   10.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   10.15 -// version 2 for more details (a copy is included in the LICENSE file that
   10.16 -// accompanied this code).
   10.17 -//
   10.18 -// You should have received a copy of the GNU General Public License version
   10.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   10.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   10.21 -//
   10.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   10.23 -// or visit www.oracle.com if you need additional information or have any
   10.24 -// questions.
   10.25 -//
   10.26 -
   10.27 -//
   10.28 -//
   10.29 -
   10.30 -// SPARC Solaris Architecture Description File
    11.1 --- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Fri Aug 09 01:39:11 2013 -0700
    11.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    11.3 @@ -1,26 +0,0 @@
    11.4 -//
    11.5 -// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    11.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    11.7 -//
    11.8 -// This code is free software; you can redistribute it and/or modify it
    11.9 -// under the terms of the GNU General Public License version 2 only, as
   11.10 -// published by the Free Software Foundation.
   11.11 -//
   11.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   11.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   11.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   11.15 -// version 2 for more details (a copy is included in the LICENSE file that
   11.16 -// accompanied this code).
   11.17 -//
   11.18 -// You should have received a copy of the GNU General Public License version
   11.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   11.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   11.21 -//
   11.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   11.23 -// or visit www.oracle.com if you need additional information or have any
   11.24 -// questions.
   11.25 -//
   11.26 -//
   11.27 -
   11.28 -// X86 Solaris Architecture Description File
   11.29 -
    12.1 --- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Fri Aug 09 01:39:11 2013 -0700
    12.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.3 @@ -1,63 +0,0 @@
    12.4 -//
    12.5 -// Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
    12.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.7 -//
    12.8 -// This code is free software; you can redistribute it and/or modify it
    12.9 -// under the terms of the GNU General Public License version 2 only, as
   12.10 -// published by the Free Software Foundation.
   12.11 -//
   12.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   12.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12.15 -// version 2 for more details (a copy is included in the LICENSE file that
   12.16 -// accompanied this code).
   12.17 -//
   12.18 -// You should have received a copy of the GNU General Public License version
   12.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   12.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   12.21 -//
   12.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   12.23 -// or visit www.oracle.com if you need additional information or have any
   12.24 -// questions.
   12.25 -//
   12.26 -//
   12.27 -
   12.28 -// AMD64 Solaris Architecture Description File
   12.29 -
   12.30 -//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
   12.31 -// This block specifies the encoding classes used by the compiler to
   12.32 -// output byte streams.  Encoding classes generate functions which are
   12.33 -// called by Machine Instruction Nodes in order to generate the bit
   12.34 -// encoding of the instruction.  Operands specify their base encoding
   12.35 -// interface with the interface keyword.  There are currently
   12.36 -// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
   12.37 -// COND_INTER.  REG_INTER causes an operand to generate a function
   12.38 -// which returns its register number when queried.  CONST_INTER causes
   12.39 -// an operand to generate a function which returns the value of the
   12.40 -// constant when queried.  MEMORY_INTER causes an operand to generate
   12.41 -// four functions which return the Base Register, the Index Register,
   12.42 -// the Scale Value, and the Offset Value of the operand when queried.
   12.43 -// COND_INTER causes an operand to generate six functions which return
   12.44 -// the encoding code (ie - encoding bits for the instruction)
   12.45 -// associated with each basic boolean condition for a conditional
   12.46 -// instruction.  Instructions specify two basic values for encoding.
   12.47 -// They use the ins_encode keyword to specify their encoding class
   12.48 -// (which must be one of the class names specified in the encoding
   12.49 -// block), and they use the opcode keyword to specify, in order, their
   12.50 -// primary, secondary, and tertiary opcode.  Only the opcode sections
   12.51 -// which a particular instruction needs for encoding need to be
   12.52 -// specified.
   12.53 -encode %{
   12.54 -  // Build emit functions for each basic byte or larger field in the intel
   12.55 -  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
   12.56 -  // code in the enc_class source block.  Emit functions will live in the
   12.57 -  // main source block for now.  In future, we can generalize this by
   12.58 -  // adding a syntax that specifies the sizes of fields in an order,
   12.59 -  // so that the adlc can build the emit functions automagically
   12.60 -%}
   12.61 -
   12.62 -
   12.63 -// Platform dependent source
   12.64 -
   12.65 -source %{
   12.66 -%}
    13.1 --- a/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Fri Aug 09 01:39:11 2013 -0700
    13.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    13.3 @@ -1,26 +0,0 @@
    13.4 -//
    13.5 -// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    13.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.7 -//
    13.8 -// This code is free software; you can redistribute it and/or modify it
    13.9 -// under the terms of the GNU General Public License version 2 only, as
   13.10 -// published by the Free Software Foundation.
   13.11 -//
   13.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   13.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   13.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   13.15 -// version 2 for more details (a copy is included in the LICENSE file that
   13.16 -// accompanied this code).
   13.17 -//
   13.18 -// You should have received a copy of the GNU General Public License version
   13.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   13.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   13.21 -//
   13.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   13.23 -// or visit www.oracle.com if you need additional information or have any
   13.24 -// questions.
   13.25 -//
   13.26 -//
   13.27 -
   13.28 -// X86 Win32 Architecture Description File
   13.29 -
    14.1 --- a/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Fri Aug 09 01:39:11 2013 -0700
    14.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    14.3 @@ -1,63 +0,0 @@
    14.4 -//
    14.5 -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    14.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.7 -//
    14.8 -// This code is free software; you can redistribute it and/or modify it
    14.9 -// under the terms of the GNU General Public License version 2 only, as
   14.10 -// published by the Free Software Foundation.
   14.11 -//
   14.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   14.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   14.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   14.15 -// version 2 for more details (a copy is included in the LICENSE file that
   14.16 -// accompanied this code).
   14.17 -//
   14.18 -// You should have received a copy of the GNU General Public License version
   14.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   14.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   14.21 -//
   14.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   14.23 -// or visit www.oracle.com if you need additional information or have any
   14.24 -// questions.
   14.25 -//
   14.26 -//
   14.27 -
   14.28 -// AMD64 Win32 Architecture Description File
   14.29 -
   14.30 -//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
   14.31 -// This block specifies the encoding classes used by the compiler to output
   14.32 -// byte streams.  Encoding classes generate functions which are called by
   14.33 -// Machine Instruction Nodes in order to generate the bit encoding of the
   14.34 -// instruction.  Operands specify their base encoding interface with the
   14.35 -// interface keyword.  There are currently supported four interfaces,
   14.36 -// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
   14.37 -// operand to generate a function which returns its register number when
   14.38 -// queried.   CONST_INTER causes an operand to generate a function which
   14.39 -// returns the value of the constant when queried.  MEMORY_INTER causes an
   14.40 -// operand to generate four functions which return the Base Register, the
   14.41 -// Index Register, the Scale Value, and the Offset Value of the operand when
   14.42 -// queried.  COND_INTER causes an operand to generate six functions which
   14.43 -// return the encoding code (ie - encoding bits for the instruction)
   14.44 -// associated with each basic boolean condition for a conditional instruction.
   14.45 -// Instructions specify two basic values for encoding.  They use the
   14.46 -// ins_encode keyword to specify their encoding class (which must be one of
   14.47 -// the class names specified in the encoding block), and they use the
   14.48 -// opcode keyword to specify, in order, their primary, secondary, and
   14.49 -// tertiary opcode.  Only the opcode sections which a particular instruction
   14.50 -// needs for encoding need to be specified.
   14.51 -encode %{
   14.52 -  // Build emit functions for each basic byte or larger field in the intel
   14.53 -  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
   14.54 -  // code in the enc_class source block.  Emit functions will live in the
   14.55 -  // main source block for now.  In future, we can generalize this by
   14.56 -  // adding a syntax that specifies the sizes of fields in an order,
   14.57 -  // so that the adlc can build the emit functions automagically
   14.58 -
   14.59 -%}
   14.60 -
   14.61 -
   14.62 -// Platform dependent source
   14.63 -
   14.64 -source %{
   14.65 -
   14.66 -%}
    15.1 --- a/src/share/vm/opto/block.cpp	Fri Aug 09 01:39:11 2013 -0700
    15.2 +++ b/src/share/vm/opto/block.cpp	Fri Aug 09 18:05:00 2013 +0200
    15.3 @@ -221,7 +221,7 @@
    15.4  //------------------------------is_uncommon------------------------------------
    15.5  // True if block is low enough frequency or guarded by a test which
    15.6  // mostly does not go here.
    15.7 -bool Block::is_uncommon( Block_Array &bbs ) const {
    15.8 +bool Block::is_uncommon(PhaseCFG* cfg) const {
    15.9    // Initial blocks must never be moved, so are never uncommon.
   15.10    if (head()->is_Root() || head()->is_Start())  return false;
   15.11  
   15.12 @@ -238,7 +238,7 @@
   15.13    uint uncommon_for_freq_preds = 0;
   15.14  
   15.15    for( uint i=1; i<num_preds(); i++ ) {
   15.16 -    Block* guard = bbs[pred(i)->_idx];
   15.17 +    Block* guard = cfg->get_block_for_node(pred(i));
   15.18      // Check to see if this block follows its guard 1 time out of 10000
   15.19      // or less.
   15.20      //
   15.21 @@ -285,11 +285,11 @@
   15.22    }
   15.23  }
   15.24  
   15.25 -void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const {
   15.26 +void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
   15.27    if (is_connector()) {
   15.28      for (uint i=1; i<num_preds(); i++) {
   15.29 -      Block *p = ((*bbs)[pred(i)->_idx]);
   15.30 -      p->dump_pred(bbs, orig, st);
   15.31 +      Block *p = cfg->get_block_for_node(pred(i));
   15.32 +      p->dump_pred(cfg, orig, st);
   15.33      }
   15.34    } else {
   15.35      dump_bidx(orig, st);
   15.36 @@ -297,7 +297,7 @@
   15.37    }
   15.38  }
   15.39  
   15.40 -void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
   15.41 +void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
   15.42    // Print the basic block
   15.43    dump_bidx(this, st);
   15.44    st->print(": #\t");
   15.45 @@ -311,26 +311,28 @@
   15.46    if( head()->is_block_start() ) {
   15.47      for (uint i=1; i<num_preds(); i++) {
   15.48        Node *s = pred(i);
   15.49 -      if (bbs) {
   15.50 -        Block *p = (*bbs)[s->_idx];
   15.51 -        p->dump_pred(bbs, p, st);
   15.52 +      if (cfg != NULL) {
   15.53 +        Block *p = cfg->get_block_for_node(s);
   15.54 +        p->dump_pred(cfg, p, st);
   15.55        } else {
   15.56          while (!s->is_block_start())
   15.57            s = s->in(0);
   15.58          st->print("N%d ", s->_idx );
   15.59        }
   15.60      }
   15.61 -  } else
   15.62 +  } else {
   15.63      st->print("BLOCK HEAD IS JUNK  ");
   15.64 +  }
   15.65  
   15.66    // Print loop, if any
   15.67    const Block *bhead = this;    // Head of self-loop
   15.68    Node *bh = bhead->head();
   15.69 -  if( bbs && bh->is_Loop() && !head()->is_Root() ) {
   15.70 +
   15.71 +  if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
   15.72      LoopNode *loop = bh->as_Loop();
   15.73 -    const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
   15.74 +    const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
   15.75      while (bx->is_connector()) {
   15.76 -      bx = (*bbs)[bx->pred(1)->_idx];
   15.77 +      bx = cfg->get_block_for_node(bx->pred(1));
   15.78      }
   15.79      st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
   15.80      // Dump any loop-specific bits, especially for CountedLoops.
   15.81 @@ -349,29 +351,32 @@
   15.82    st->print_cr("");
   15.83  }
   15.84  
   15.85 -void Block::dump() const { dump(NULL); }
   15.86 +void Block::dump() const {
   15.87 +  dump(NULL);
   15.88 +}
   15.89  
   15.90 -void Block::dump( const Block_Array *bbs ) const {
   15.91 -  dump_head(bbs);
   15.92 -  uint cnt = _nodes.size();
   15.93 -  for( uint i=0; i<cnt; i++ )
   15.94 +void Block::dump(const PhaseCFG* cfg) const {
   15.95 +  dump_head(cfg);
   15.96 +  for (uint i=0; i< _nodes.size(); i++) {
   15.97      _nodes[i]->dump();
   15.98 +  }
   15.99    tty->print("\n");
  15.100  }
  15.101  #endif
  15.102  
  15.103  //=============================================================================
  15.104  //------------------------------PhaseCFG---------------------------------------
  15.105 -PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
  15.106 -  Phase(CFG),
  15.107 -  _bbs(a),
  15.108 -  _root(r),
  15.109 -  _node_latency(NULL)
  15.110 +PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
  15.111 +: Phase(CFG)
  15.112 +, _block_arena(arena)
  15.113 +, _node_to_block_mapping(arena)
  15.114 +, _root(root)
  15.115 +, _node_latency(NULL)
  15.116  #ifndef PRODUCT
  15.117 -  , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
  15.118 +, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
  15.119  #endif
  15.120  #ifdef ASSERT
  15.121 -  , _raw_oops(a)
  15.122 +, _raw_oops(arena)
  15.123  #endif
  15.124  {
  15.125    ResourceMark rm;
  15.126 @@ -380,13 +385,13 @@
  15.127    // Node on demand.
  15.128    Node *x = new (C) GotoNode(NULL);
  15.129    x->init_req(0, x);
  15.130 -  _goto = m.match_tree(x);
  15.131 +  _goto = matcher.match_tree(x);
  15.132    assert(_goto != NULL, "");
  15.133    _goto->set_req(0,_goto);
  15.134  
  15.135    // Build the CFG in Reverse Post Order
  15.136    _num_blocks = build_cfg();
  15.137 -  _broot = _bbs[_root->_idx];
  15.138 +  _broot = get_block_for_node(_root);
  15.139  }
  15.140  
  15.141  //------------------------------build_cfg--------------------------------------
  15.142 @@ -440,9 +445,9 @@
  15.143        // 'p' now points to the start of this basic block
  15.144  
  15.145        // Put self in array of basic blocks
  15.146 -      Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
  15.147 -      _bbs.map(p->_idx,bb);
  15.148 -      _bbs.map(x->_idx,bb);
  15.149 +      Block *bb = new (_block_arena) Block(_block_arena, p);
  15.150 +      map_node_to_block(p, bb);
  15.151 +      map_node_to_block(x, bb);
  15.152        if( x != p ) {                // Only for root is x == p
  15.153          bb->_nodes.push((Node*)x);
  15.154        }
  15.155 @@ -473,16 +478,16 @@
  15.156        // Check if it the fist node pushed on stack at the beginning.
  15.157        if (idx == 0) break;          // end of the build
  15.158        // Find predecessor basic block
  15.159 -      Block *pb = _bbs[x->_idx];
  15.160 +      Block *pb = get_block_for_node(x);
  15.161        // Insert into nodes array, if not already there
  15.162 -      if( !_bbs.lookup(proj->_idx) ) {
  15.163 +      if (!has_block(proj)) {
  15.164          assert( x != proj, "" );
  15.165          // Map basic block of projection
  15.166 -        _bbs.map(proj->_idx,pb);
  15.167 +        map_node_to_block(proj, pb);
  15.168          pb->_nodes.push(proj);
  15.169        }
  15.170        // Insert self as a child of my predecessor block
  15.171 -      pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
  15.172 +      pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
  15.173        assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
  15.174                "too many control users, not a CFG?" );
  15.175      }
  15.176 @@ -511,15 +516,15 @@
  15.177    RegionNode* region = new (C) RegionNode(2);
  15.178    region->init_req(1, proj);
  15.179    // setup corresponding basic block
  15.180 -  Block* block = new (_bbs._arena) Block(_bbs._arena, region);
  15.181 -  _bbs.map(region->_idx, block);
  15.182 +  Block* block = new (_block_arena) Block(_block_arena, region);
  15.183 +  map_node_to_block(region, block);
  15.184    C->regalloc()->set_bad(region->_idx);
  15.185    // add a goto node
  15.186    Node* gto = _goto->clone(); // get a new goto node
  15.187    gto->set_req(0, region);
  15.188    // add it to the basic block
  15.189    block->_nodes.push(gto);
  15.190 -  _bbs.map(gto->_idx, block);
  15.191 +  map_node_to_block(gto, block);
  15.192    C->regalloc()->set_bad(gto->_idx);
  15.193    // hook up successor block
  15.194    block->_succs.map(block->_num_succs++, out);
  15.195 @@ -570,7 +575,7 @@
  15.196    gto->set_req(0, b->head());
  15.197    Node *bp = b->_nodes[end_idx];
  15.198    b->_nodes.map(end_idx,gto); // Slam over NeverBranch
  15.199 -  _bbs.map(gto->_idx, b);
  15.200 +  map_node_to_block(gto, b);
  15.201    C->regalloc()->set_bad(gto->_idx);
  15.202    b->_nodes.pop();              // Yank projections
  15.203    b->_nodes.pop();              // Yank projections
  15.204 @@ -613,7 +618,7 @@
  15.205    // If the previous block conditionally falls into bx, return false,
  15.206    // because moving bx will create an extra jump.
  15.207    for(uint k = 1; k < bx->num_preds(); k++ ) {
  15.208 -    Block* pred = _bbs[bx->pred(k)->_idx];
  15.209 +    Block* pred = get_block_for_node(bx->pred(k));
  15.210      if (pred == _blocks[bx_index-1]) {
  15.211        if (pred->_num_succs != 1) {
  15.212          return false;
  15.213 @@ -682,7 +687,7 @@
  15.214  
  15.215      // Look for uncommon blocks and move to end.
  15.216      if (!C->do_freq_based_layout()) {
  15.217 -      if( b->is_uncommon(_bbs) ) {
  15.218 +      if (b->is_uncommon(this)) {
  15.219          move_to_end(b, i);
  15.220          last--;                   // No longer check for being uncommon!
  15.221          if( no_flip_branch(b) ) { // Fall-thru case must follow?
  15.222 @@ -870,28 +875,31 @@
  15.223    } while( !p->is_block_start() );
  15.224  
  15.225    // Recursively visit
  15.226 -  for( uint i=1; i<p->req(); i++ )
  15.227 -    _dump_cfg(p->in(i),visited);
  15.228 +  for (uint i = 1; i < p->req(); i++) {
  15.229 +    _dump_cfg(p->in(i), visited);
  15.230 +  }
  15.231  
  15.232    // Dump the block
  15.233 -  _bbs[p->_idx]->dump(&_bbs);
  15.234 +  get_block_for_node(p)->dump(this);
  15.235  }
  15.236  
  15.237  void PhaseCFG::dump( ) const {
  15.238    tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
  15.239 -  if( _blocks.size() ) {        // Did we do basic-block layout?
  15.240 -    for( uint i=0; i<_num_blocks; i++ )
  15.241 -      _blocks[i]->dump(&_bbs);
  15.242 +  if (_blocks.size()) {        // Did we do basic-block layout?
  15.243 +    for (uint i = 0; i < _num_blocks; i++) {
  15.244 +      _blocks[i]->dump(this);
  15.245 +    }
  15.246    } else {                      // Else do it with a DFS
  15.247 -    VectorSet visited(_bbs._arena);
  15.248 +    VectorSet visited(_block_arena);
  15.249      _dump_cfg(_root,visited);
  15.250    }
  15.251  }
  15.252  
  15.253  void PhaseCFG::dump_headers() {
  15.254    for( uint i = 0; i < _num_blocks; i++ ) {
  15.255 -    if( _blocks[i] == NULL ) continue;
  15.256 -    _blocks[i]->dump_head(&_bbs);
  15.257 +    if (_blocks[i]) {
  15.258 +      _blocks[i]->dump_head(this);
  15.259 +    }
  15.260    }
  15.261  }
  15.262  
  15.263 @@ -904,7 +912,7 @@
  15.264      uint j;
  15.265      for (j = 0; j < cnt; j++)  {
  15.266        Node *n = b->_nodes[j];
  15.267 -      assert( _bbs[n->_idx] == b, "" );
  15.268 +      assert(get_block_for_node(n) == b, "");
  15.269        if (j >= 1 && n->is_Mach() &&
  15.270            n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
  15.271          assert(j == 1 || b->_nodes[j-1]->is_Phi(),
  15.272 @@ -913,13 +921,12 @@
  15.273        for (uint k = 0; k < n->req(); k++) {
  15.274          Node *def = n->in(k);
  15.275          if (def && def != n) {
  15.276 -          assert(_bbs[def->_idx] || def->is_Con(),
  15.277 -                 "must have block; constants for debug info ok");
  15.278 +          assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
  15.279            // Verify that instructions in the block is in correct order.
  15.280            // Uses must follow their definition if they are at the same block.
  15.281            // Mostly done to check that MachSpillCopy nodes are placed correctly
  15.282            // when CreateEx node is moved in build_ifg_physical().
  15.283 -          if (_bbs[def->_idx] == b &&
  15.284 +          if (get_block_for_node(def) == b &&
  15.285                !(b->head()->is_Loop() && n->is_Phi()) &&
  15.286                // See (+++) comment in reg_split.cpp
  15.287                !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
    16.1 --- a/src/share/vm/opto/block.hpp	Fri Aug 09 01:39:11 2013 -0700
    16.2 +++ b/src/share/vm/opto/block.hpp	Fri Aug 09 18:05:00 2013 +0200
    16.3 @@ -48,13 +48,12 @@
    16.4    friend class VMStructs;
    16.5    uint _size;                   // allocated size, as opposed to formal limit
    16.6    debug_only(uint _limit;)      // limit to formal domain
    16.7 +  Arena *_arena;                // Arena to allocate in
    16.8  protected:
    16.9    Block **_blocks;
   16.10    void grow( uint i );          // Grow array node to fit
   16.11  
   16.12  public:
   16.13 -  Arena *_arena;                // Arena to allocate in
   16.14 -
   16.15    Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
   16.16      debug_only(_limit=0);
   16.17      _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
   16.18 @@ -77,7 +76,7 @@
   16.19  public:
   16.20    uint _cnt;
   16.21    Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
   16.22 -  void push( Block *b ) { map(_cnt++,b); }
   16.23 +  void push( Block *b ) {  map(_cnt++,b); }
   16.24    Block *pop() { return _blocks[--_cnt]; }
   16.25    Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
   16.26    void remove( uint i );
   16.27 @@ -284,15 +283,15 @@
   16.28    // helper function that adds caller save registers to MachProjNode
   16.29    void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
   16.30    // Schedule a call next in the block
   16.31 -  uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
   16.32 +  uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
   16.33  
   16.34    // Perform basic-block local scheduling
   16.35    Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
   16.36 -  void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
   16.37 -  void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
   16.38 +  void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
   16.39 +  void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
   16.40    bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
   16.41    // Cleanup if any code lands between a Call and his Catch
   16.42 -  void call_catch_cleanup(Block_Array &bbs, Compile *C);
   16.43 +  void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
   16.44    // Detect implicit-null-check opportunities.  Basically, find NULL checks
   16.45    // with suitable memory ops nearby.  Use the memory op to do the NULL check.
   16.46    // I can generate a memory op if there is not one nearby.
   16.47 @@ -331,15 +330,15 @@
   16.48  
   16.49    // Use frequency calculations and code shape to predict if the block
   16.50    // is uncommon.
   16.51 -  bool is_uncommon( Block_Array &bbs ) const;
   16.52 +  bool is_uncommon(PhaseCFG* cfg) const;
   16.53  
   16.54  #ifndef PRODUCT
   16.55    // Debugging print of basic block
   16.56    void dump_bidx(const Block* orig, outputStream* st = tty) const;
   16.57 -  void dump_pred(const Block_Array *bbs, Block* orig, outputStream* st = tty) const;
   16.58 -  void dump_head( const Block_Array *bbs, outputStream* st = tty ) const;
   16.59 +  void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
   16.60 +  void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
   16.61    void dump() const;
   16.62 -  void dump( const Block_Array *bbs ) const;
   16.63 +  void dump(const PhaseCFG* cfg) const;
   16.64  #endif
   16.65  };
   16.66  
   16.67 @@ -349,6 +348,12 @@
   16.68  class PhaseCFG : public Phase {
   16.69    friend class VMStructs;
   16.70   private:
   16.71 +  // Arena for the blocks to be stored in
   16.72 +  Arena* _block_arena;
   16.73 +
   16.74 +  // Map nodes to owning basic block
   16.75 +  Block_Array _node_to_block_mapping;
   16.76 +
   16.77    // Build a proper looking cfg.  Return count of basic blocks
   16.78    uint build_cfg();
   16.79  
   16.80 @@ -371,22 +376,42 @@
   16.81  
   16.82    Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
   16.83    void verify_anti_dependences(Block* LCA, Node* load) {
   16.84 -    assert(LCA == _bbs[load->_idx], "should already be scheduled");
   16.85 +    assert(LCA == get_block_for_node(load), "should already be scheduled");
   16.86      insert_anti_dependences(LCA, load, true);
   16.87    }
   16.88  
   16.89   public:
   16.90 -  PhaseCFG( Arena *a, RootNode *r, Matcher &m );
   16.91 +  PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
   16.92  
   16.93    uint _num_blocks;             // Count of basic blocks
   16.94    Block_List _blocks;           // List of basic blocks
   16.95    RootNode *_root;              // Root of whole program
   16.96 -  Block_Array _bbs;             // Map Nodes to owning Basic Block
   16.97    Block *_broot;                // Basic block of root
   16.98    uint _rpo_ctr;
   16.99    CFGLoop* _root_loop;
  16.100    float _outer_loop_freq;       // Outmost loop frequency
  16.101  
  16.102 +
  16.103 +  // set which block this node should reside in
  16.104 +  void map_node_to_block(const Node* node, Block* block) {
  16.105 +    _node_to_block_mapping.map(node->_idx, block);
  16.106 +  }
  16.107 +
  16.108 +  // removes the mapping from a node to a block
  16.109 +  void unmap_node_from_block(const Node* node) {
  16.110 +    _node_to_block_mapping.map(node->_idx, NULL);
  16.111 +  }
  16.112 +
  16.113 +  // get the block in which this node resides
  16.114 +  Block* get_block_for_node(const Node* node) const {
  16.115 +    return _node_to_block_mapping[node->_idx];
  16.116 +  }
  16.117 +
  16.118 +  // does this node reside in a block; return true
  16.119 +  bool has_block(const Node* node) const {
  16.120 +    return (_node_to_block_mapping.lookup(node->_idx) != NULL);
  16.121 +  }
  16.122 +
  16.123    // Per node latency estimation, valid only during GCM
  16.124    GrowableArray<uint> *_node_latency;
  16.125  
  16.126 @@ -405,7 +430,7 @@
  16.127    void Estimate_Block_Frequency();
  16.128  
  16.129    // Global Code Motion.  See Click's PLDI95 paper.  Place Nodes in specific
  16.130 -  // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block.
  16.131 +  // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
  16.132    void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
  16.133  
  16.134    // Compute the (backwards) latency of a node from the uses
  16.135 @@ -454,7 +479,7 @@
  16.136    // Insert a node into a block, and update the _bbs
  16.137    void insert( Block *b, uint idx, Node *n ) {
  16.138      b->_nodes.insert( idx, n );
  16.139 -    _bbs.map( n->_idx, b );
  16.140 +    map_node_to_block(n, b);
  16.141    }
  16.142  
  16.143  #ifndef PRODUCT
  16.144 @@ -543,7 +568,7 @@
  16.145      _child(NULL),
  16.146      _exit_prob(1.0f) {}
  16.147    CFGLoop* parent() { return _parent; }
  16.148 -  void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk);
  16.149 +  void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
  16.150    void add_member(CFGElement *s) { _members.push(s); }
  16.151    void add_nested_loop(CFGLoop* cl);
  16.152    Block* head() {
    17.1 --- a/src/share/vm/opto/buildOopMap.cpp	Fri Aug 09 01:39:11 2013 -0700
    17.2 +++ b/src/share/vm/opto/buildOopMap.cpp	Fri Aug 09 18:05:00 2013 +0200
    17.3 @@ -426,14 +426,16 @@
    17.4    }
    17.5    memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) );
    17.6    // Push preds onto worklist
    17.7 -  for( uint i=1; i<root->req(); i++ )
    17.8 -    worklist->push(cfg->_bbs[root->in(i)->_idx]);
    17.9 +  for (uint i = 1; i < root->req(); i++) {
   17.10 +    Block* block = cfg->get_block_for_node(root->in(i));
   17.11 +    worklist->push(block);
   17.12 +  }
   17.13  
   17.14    // ZKM.jar includes tiny infinite loops which are unreached from below.
   17.15    // If we missed any blocks, we'll retry here after pushing all missed
   17.16    // blocks on the worklist.  Normally this outer loop never trips more
   17.17    // than once.
   17.18 -  while( 1 ) {
   17.19 +  while (1) {
   17.20  
   17.21      while( worklist->size() ) { // Standard worklist algorithm
   17.22        Block *b = worklist->rpop();
   17.23 @@ -537,8 +539,10 @@
   17.24          for( l=0; l<max_reg_ints; l++ )
   17.25            old_live[l] = tmp_live[l];
   17.26          // Push preds onto worklist
   17.27 -        for( l=1; l<(int)b->num_preds(); l++ )
   17.28 -          worklist->push(cfg->_bbs[b->pred(l)->_idx]);
   17.29 +        for (l = 1; l < (int)b->num_preds(); l++) {
   17.30 +          Block* block = cfg->get_block_for_node(b->pred(l));
   17.31 +          worklist->push(block);
   17.32 +        }
   17.33        }
   17.34      }
   17.35  
   17.36 @@ -629,10 +633,9 @@
   17.37      // pred to this block.  Otherwise we have to grab a new OopFlow.
   17.38      OopFlow *flow = NULL;       // Flag for finding optimized flow
   17.39      Block *pred = (Block*)0xdeadbeef;
   17.40 -    uint j;
   17.41      // Scan this block's preds to find a done predecessor
   17.42 -    for( j=1; j<b->num_preds(); j++ ) {
   17.43 -      Block *p = _cfg->_bbs[b->pred(j)->_idx];
   17.44 +    for (uint j = 1; j < b->num_preds(); j++) {
   17.45 +      Block* p = _cfg->get_block_for_node(b->pred(j));
   17.46        OopFlow *p_flow = flows[p->_pre_order];
   17.47        if( p_flow ) {            // Predecessor is done
   17.48          assert( p_flow->_b == p, "cross check" );
    18.1 --- a/src/share/vm/opto/chaitin.cpp	Fri Aug 09 01:39:11 2013 -0700
    18.2 +++ b/src/share/vm/opto/chaitin.cpp	Fri Aug 09 18:05:00 2013 +0200
    18.3 @@ -295,7 +295,7 @@
    18.4  
    18.5  
    18.6  bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
    18.7 -  Block *bcon = _cfg._bbs[con->_idx];
    18.8 +  Block* bcon = _cfg.get_block_for_node(con);
    18.9    uint cindex = bcon->find_node(con);
   18.10    Node *con_next = bcon->_nodes[cindex+1];
   18.11    if (con_next->in(0) != con || !con_next->is_MachProj()) {
   18.12 @@ -306,7 +306,7 @@
   18.13    Node *kills = con_next->clone();
   18.14    kills->set_req(0, copy);
   18.15    b->_nodes.insert(idx, kills);
   18.16 -  _cfg._bbs.map(kills->_idx, b);
   18.17 +  _cfg.map_node_to_block(kills, b);
   18.18    new_lrg(kills, max_lrg_id);
   18.19    return true;
   18.20  }
   18.21 @@ -962,8 +962,7 @@
   18.22          // AggressiveCoalesce.  This effectively pre-virtual-splits
   18.23          // around uncommon uses of common defs.
   18.24          const RegMask &rm = n->in_RegMask(k);
   18.25 -        if( !after_aggressive &&
   18.26 -          _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
   18.27 +        if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * b->_freq) {
   18.28            // Since we are BEFORE aggressive coalesce, leave the register
   18.29            // mask untrimmed by the call.  This encourages more coalescing.
   18.30            // Later, AFTER aggressive, this live range will have to spill
   18.31 @@ -1709,16 +1708,15 @@
   18.32        // set control to _root and place it into Start block
   18.33        // (where top() node is placed).
   18.34        base->init_req(0, _cfg._root);
   18.35 -      Block *startb = _cfg._bbs[C->top()->_idx];
   18.36 +      Block *startb = _cfg.get_block_for_node(C->top());
   18.37        startb->_nodes.insert(startb->find_node(C->top()), base );
   18.38 -      _cfg._bbs.map( base->_idx, startb );
   18.39 +      _cfg.map_node_to_block(base, startb);
   18.40        assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
   18.41      }
   18.42      if (_lrg_map.live_range_id(base) == 0) {
   18.43        new_lrg(base, maxlrg++);
   18.44      }
   18.45 -    assert(base->in(0) == _cfg._root &&
   18.46 -           _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
   18.47 +    assert(base->in(0) == _cfg._root && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
   18.48      derived_base_map[derived->_idx] = base;
   18.49      return base;
   18.50    }
   18.51 @@ -1754,12 +1752,12 @@
   18.52    base->as_Phi()->set_type(t);
   18.53  
   18.54    // Search the current block for an existing base-Phi
   18.55 -  Block *b = _cfg._bbs[derived->_idx];
   18.56 +  Block *b = _cfg.get_block_for_node(derived);
   18.57    for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
   18.58      Node *phi = b->_nodes[i];
   18.59      if( !phi->is_Phi() ) {      // Found end of Phis with no match?
   18.60        b->_nodes.insert( i, base ); // Must insert created Phi here as base
   18.61 -      _cfg._bbs.map( base->_idx, b );
   18.62 +      _cfg.map_node_to_block(base, b);
   18.63        new_lrg(base,maxlrg++);
   18.64        break;
   18.65      }
   18.66 @@ -1815,8 +1813,8 @@
   18.67        if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
   18.68          Node *phi = n->in(1);
   18.69          if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
   18.70 -          Block *phi_block = _cfg._bbs[phi->_idx];
   18.71 -          if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) {
   18.72 +          Block *phi_block = _cfg.get_block_for_node(phi);
   18.73 +          if (_cfg.get_block_for_node(phi_block->pred(2)) == b) {
   18.74              const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
   18.75              Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
   18.76              insert_proj( phi_block, 1, spill, maxlrg++ );
   18.77 @@ -1870,7 +1868,7 @@
   18.78              if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
   18.79                   !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
   18.80                   (_lrg_map.live_range_id(base) > 0) && // not a constant
   18.81 -                 _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
   18.82 +                 _cfg.get_block_for_node(base) != b) { // base not def'd in blk)
   18.83                // Base pointer is not currently live.  Since I stretched
   18.84                // the base pointer to here and it crosses basic-block
   18.85                // boundaries, the global live info is now incorrect.
   18.86 @@ -1993,8 +1991,8 @@
   18.87    tty->print("\n");
   18.88  }
   18.89  
   18.90 -void PhaseChaitin::dump( const Block * b ) const {
   18.91 -  b->dump_head( &_cfg._bbs );
   18.92 +void PhaseChaitin::dump(const Block *b) const {
   18.93 +  b->dump_head(&_cfg);
   18.94  
   18.95    // For all instructions
   18.96    for( uint j = 0; j < b->_nodes.size(); j++ )
   18.97 @@ -2299,7 +2297,7 @@
   18.98        if (_lrg_map.find_const(n) == lidx) {
   18.99          if (!dump_once++) {
  18.100            tty->cr();
  18.101 -          b->dump_head( &_cfg._bbs );
  18.102 +          b->dump_head(&_cfg);
  18.103          }
  18.104          dump(n);
  18.105          continue;
  18.106 @@ -2314,7 +2312,7 @@
  18.107            if (_lrg_map.find_const(m) == lidx) {
  18.108              if (!dump_once++) {
  18.109                tty->cr();
  18.110 -              b->dump_head(&_cfg._bbs);
  18.111 +              b->dump_head(&_cfg);
  18.112              }
  18.113              dump(n);
  18.114            }
    19.1 --- a/src/share/vm/opto/coalesce.cpp	Fri Aug 09 01:39:11 2013 -0700
    19.2 +++ b/src/share/vm/opto/coalesce.cpp	Fri Aug 09 18:05:00 2013 +0200
    19.3 @@ -52,7 +52,7 @@
    19.4      // Print a nice block header
    19.5      tty->print("B%d: ",b->_pre_order);
    19.6      for( j=1; j<b->num_preds(); j++ )
    19.7 -      tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order);
    19.8 +      tty->print("B%d ", _phc._cfg.get_block_for_node(b->pred(j))->_pre_order);
    19.9      tty->print("-> ");
   19.10      for( j=0; j<b->_num_succs; j++ )
   19.11        tty->print("B%d ",b->_succs[j]->_pre_order);
   19.12 @@ -208,7 +208,7 @@
   19.13      copy->set_req(idx,tmp);
   19.14      // Save source in temp early, before source is killed
   19.15      b->_nodes.insert(kill_src_idx,tmp);
   19.16 -    _phc._cfg._bbs.map( tmp->_idx, b );
   19.17 +    _phc._cfg.map_node_to_block(tmp, b);
   19.18      last_use_idx++;
   19.19    }
   19.20  
   19.21 @@ -286,7 +286,7 @@
   19.22            Node *m = n->in(j);
   19.23            uint src_name = _phc._lrg_map.find(m);
   19.24            if (src_name != phi_name) {
   19.25 -            Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
   19.26 +            Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
   19.27              Node *copy;
   19.28              assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
   19.29              // Rematerialize constants instead of copying them
   19.30 @@ -305,7 +305,7 @@
   19.31              }
   19.32              // Insert the copy in the use-def chain
   19.33              n->set_req(j, copy);
   19.34 -            _phc._cfg._bbs.map( copy->_idx, pred );
   19.35 +            _phc._cfg.map_node_to_block(copy, pred);
   19.36              // Extend ("register allocate") the names array for the copy.
   19.37              _phc._lrg_map.extend(copy->_idx, phi_name);
   19.38            } // End of if Phi names do not match
   19.39 @@ -343,13 +343,13 @@
   19.40              n->set_req(idx, copy);
   19.41              // Extend ("register allocate") the names array for the copy.
   19.42              _phc._lrg_map.extend(copy->_idx, name);
   19.43 -            _phc._cfg._bbs.map( copy->_idx, b );
   19.44 +            _phc._cfg.map_node_to_block(copy, b);
   19.45            }
   19.46  
   19.47          } // End of is two-adr
   19.48  
   19.49          // Insert a copy at a debug use for a lrg which has high frequency
   19.50 -        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) {
   19.51 +        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
   19.52            // Walk the debug inputs to the node and check for lrg freq
   19.53            JVMState* jvms = n->jvms();
   19.54            uint debug_start = jvms ? jvms->debug_start() : 999999;
   19.55 @@ -391,7 +391,7 @@
   19.56                uint max_lrg_id = _phc._lrg_map.max_lrg_id();
   19.57                _phc.new_lrg(copy, max_lrg_id);
   19.58                _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
   19.59 -              _phc._cfg._bbs.map(copy->_idx, b);
   19.60 +              _phc._cfg.map_node_to_block(copy, b);
   19.61                //tty->print_cr("Split a debug use in Aggressive Coalesce");
   19.62              }  // End of if high frequency use/def
   19.63            }  // End of for all debug inputs
   19.64 @@ -437,7 +437,10 @@
   19.65      Block *bs = b->_succs[i];
   19.66      // Find index of 'b' in 'bs' predecessors
   19.67      uint j=1;
   19.68 -    while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++;
   19.69 +    while (_phc._cfg.get_block_for_node(bs->pred(j)) != b) {
   19.70 +      j++;
   19.71 +    }
   19.72 +
   19.73      // Visit all the Phis in successor block
   19.74      for( uint k = 1; k<bs->_nodes.size(); k++ ) {
   19.75        Node *n = bs->_nodes[k];
   19.76 @@ -510,9 +513,9 @@
   19.77    if( bindex < b->_fhrp_index ) b->_fhrp_index--;
   19.78  
   19.79    // Stretched lr1; add it to liveness of intermediate blocks
   19.80 -  Block *b2 = _phc._cfg._bbs[src_copy->_idx];
   19.81 +  Block *b2 = _phc._cfg.get_block_for_node(src_copy);
   19.82    while( b != b2 ) {
   19.83 -    b = _phc._cfg._bbs[b->pred(1)->_idx];
   19.84 +    b = _phc._cfg.get_block_for_node(b->pred(1));
   19.85      _phc._live->live(b)->insert(lr1);
   19.86    }
   19.87  }
   19.88 @@ -532,7 +535,7 @@
   19.89      bindex2--;                  // Chain backwards 1 instruction
   19.90      while( bindex2 == 0 ) {     // At block start, find prior block
   19.91        assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" );
   19.92 -      b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
   19.93 +      b2 = _phc._cfg.get_block_for_node(b2->pred(1));
   19.94        bindex2 = b2->end_idx()-1;
   19.95      }
   19.96      // Get prior instruction
   19.97 @@ -676,8 +679,8 @@
   19.98  
   19.99    if (UseFPUForSpilling && rm.is_AllStack() ) {
  19.100      // Don't coalesce when frequency difference is large
  19.101 -    Block *dst_b = _phc._cfg._bbs[dst_copy->_idx];
  19.102 -    Block *src_def_b = _phc._cfg._bbs[src_def->_idx];
  19.103 +    Block *dst_b = _phc._cfg.get_block_for_node(dst_copy);
  19.104 +    Block *src_def_b = _phc._cfg.get_block_for_node(src_def);
  19.105      if (src_def_b->_freq > 10*dst_b->_freq )
  19.106        return false;
  19.107    }
  19.108 @@ -690,7 +693,7 @@
  19.109    // Another early bail-out test is when we are double-coalescing and the
  19.110    // 2 copies are separated by some control flow.
  19.111    if( dst_copy != src_copy ) {
  19.112 -    Block *src_b = _phc._cfg._bbs[src_copy->_idx];
  19.113 +    Block *src_b = _phc._cfg.get_block_for_node(src_copy);
  19.114      Block *b2 = b;
  19.115      while( b2 != src_b ) {
  19.116        if( b2->num_preds() > 2 ){// Found merge-point
  19.117 @@ -701,7 +704,7 @@
  19.118          //record_bias( _phc._lrgs, lr1, lr2 );
  19.119          return false;           // To hard to find all interferences
  19.120        }
  19.121 -      b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
  19.122 +      b2 = _phc._cfg.get_block_for_node(b2->pred(1));
  19.123      }
  19.124    }
  19.125  
  19.126 @@ -786,8 +789,9 @@
  19.127  // Conservative (but pessimistic) copy coalescing of a single block
  19.128  void PhaseConservativeCoalesce::coalesce( Block *b ) {
  19.129    // Bail out on infrequent blocks
  19.130 -  if( b->is_uncommon(_phc._cfg._bbs) )
  19.131 +  if (b->is_uncommon(&_phc._cfg)) {
  19.132      return;
  19.133 +  }
  19.134    // Check this block for copies.
  19.135    for( uint i = 1; i<b->end_idx(); i++ ) {
  19.136      // Check for actual copies on inputs.  Coalesce a copy into its
    20.1 --- a/src/share/vm/opto/compile.cpp	Fri Aug 09 01:39:11 2013 -0700
    20.2 +++ b/src/share/vm/opto/compile.cpp	Fri Aug 09 18:05:00 2013 +0200
    20.3 @@ -2262,7 +2262,7 @@
    20.4        tty->print("%3.3x   ", pcs[n->_idx]);
    20.5      else
    20.6        tty->print("      ");
    20.7 -    b->dump_head( &_cfg->_bbs );
    20.8 +    b->dump_head(_cfg);
    20.9      if (b->is_connector()) {
   20.10        tty->print_cr("        # Empty connector block");
   20.11      } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
   20.12 @@ -3525,7 +3525,7 @@
   20.13  }
   20.14  
   20.15  Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
   20.16 -  Block* b = Compile::current()->cfg()->_bbs[n->_idx];
   20.17 +  Block* b = Compile::current()->cfg()->get_block_for_node(n);
   20.18    Constant con(type, value, b->_freq);
   20.19    add(con);
   20.20    return con;
    21.1 --- a/src/share/vm/opto/domgraph.cpp	Fri Aug 09 01:39:11 2013 -0700
    21.2 +++ b/src/share/vm/opto/domgraph.cpp	Fri Aug 09 18:05:00 2013 +0200
    21.3 @@ -105,8 +105,8 @@
    21.4  
    21.5      // Step 2:
    21.6      Node *whead = w->_block->head();
    21.7 -    for( uint j=1; j < whead->req(); j++ ) {
    21.8 -      Block *b = _bbs[whead->in(j)->_idx];
    21.9 +    for (uint j = 1; j < whead->req(); j++) {
   21.10 +      Block* b = get_block_for_node(whead->in(j));
   21.11        Tarjan *vx = &tarjan[b->_pre_order];
   21.12        Tarjan *u = vx->EVAL();
   21.13        if( u->_semi < w->_semi )
    22.1 --- a/src/share/vm/opto/gcm.cpp	Fri Aug 09 01:39:11 2013 -0700
    22.2 +++ b/src/share/vm/opto/gcm.cpp	Fri Aug 09 18:05:00 2013 +0200
    22.3 @@ -66,7 +66,7 @@
    22.4  // are in b also.
    22.5  void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
    22.6    // Set basic block of n, Add n to b,
    22.7 -  _bbs.map(n->_idx, b);
    22.8 +  map_node_to_block(n, b);
    22.9    b->add_inst(n);
   22.10  
   22.11    // After Matching, nearly any old Node may have projections trailing it.
   22.12 @@ -75,11 +75,12 @@
   22.13    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
   22.14      Node*  use  = n->fast_out(i);
   22.15      if (use->is_Proj()) {
   22.16 -      Block* buse = _bbs[use->_idx];
   22.17 +      Block* buse = get_block_for_node(use);
   22.18        if (buse != b) {              // In wrong block?
   22.19 -        if (buse != NULL)
   22.20 +        if (buse != NULL) {
   22.21            buse->find_remove(use);   // Remove from wrong block
   22.22 -        _bbs.map(use->_idx, b);     // Re-insert in this block
   22.23 +        }
   22.24 +        map_node_to_block(use, b);
   22.25          b->add_inst(use);
   22.26        }
   22.27      }
   22.28 @@ -97,7 +98,7 @@
   22.29    if (p != NULL && p != n) {    // Control from a block projection?
   22.30      assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
   22.31      // Find trailing Region
   22.32 -    Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
   22.33 +    Block *pb = get_block_for_node(in0); // Block-projection already has basic block
   22.34      uint j = 0;
   22.35      if (pb->_num_succs != 1) {  // More then 1 successor?
   22.36        // Search for successor
   22.37 @@ -127,14 +128,15 @@
   22.38    while ( spstack.is_nonempty() ) {
   22.39      Node *n = spstack.pop();
   22.40      if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
   22.41 -      if( n->pinned() && !_bbs.lookup(n->_idx) ) {  // Pinned?  Nail it down!
   22.42 +      if( n->pinned() && !has_block(n)) {  // Pinned?  Nail it down!
   22.43          assert( n->in(0), "pinned Node must have Control" );
   22.44          // Before setting block replace block_proj control edge
   22.45          replace_block_proj_ctrl(n);
   22.46          Node *input = n->in(0);
   22.47 -        while( !input->is_block_start() )
   22.48 +        while (!input->is_block_start()) {
   22.49            input = input->in(0);
   22.50 -        Block *b = _bbs[input->_idx];  // Basic block of controlling input
   22.51 +        }
   22.52 +        Block *b = get_block_for_node(input); // Basic block of controlling input
   22.53          schedule_node_into_block(n, b);
   22.54        }
   22.55        for( int i = n->req() - 1; i >= 0; --i ) {  // For all inputs
   22.56 @@ -149,7 +151,7 @@
   22.57  // Assert that new input b2 is dominated by all previous inputs.
   22.58  // Check this by by seeing that it is dominated by b1, the deepest
   22.59  // input observed until b2.
   22.60 -static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
   22.61 +static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
   22.62    if (b1 == NULL)  return;
   22.63    assert(b1->_dom_depth < b2->_dom_depth, "sanity");
   22.64    Block* tmp = b2;
   22.65 @@ -162,7 +164,7 @@
   22.66      for (uint j=0; j<n->len(); j++) { // For all inputs
   22.67        Node* inn = n->in(j); // Get input
   22.68        if (inn == NULL)  continue;  // Ignore NULL, missing inputs
   22.69 -      Block* inb = bbs[inn->_idx];
   22.70 +      Block* inb = cfg->get_block_for_node(inn);
   22.71        tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
   22.72                   inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
   22.73        inn->dump();
   22.74 @@ -174,20 +176,20 @@
   22.75  }
   22.76  #endif
   22.77  
   22.78 -static Block* find_deepest_input(Node* n, Block_Array &bbs) {
   22.79 +static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
   22.80    // Find the last input dominated by all other inputs.
   22.81    Block* deepb           = NULL;        // Deepest block so far
   22.82    int    deepb_dom_depth = 0;
   22.83    for (uint k = 0; k < n->len(); k++) { // For all inputs
   22.84      Node* inn = n->in(k);               // Get input
   22.85      if (inn == NULL)  continue;         // Ignore NULL, missing inputs
   22.86 -    Block* inb = bbs[inn->_idx];
   22.87 +    Block* inb = cfg->get_block_for_node(inn);
   22.88      assert(inb != NULL, "must already have scheduled this input");
   22.89      if (deepb_dom_depth < (int) inb->_dom_depth) {
   22.90        // The new inb must be dominated by the previous deepb.
   22.91        // The various inputs must be linearly ordered in the dom
   22.92        // tree, or else there will not be a unique deepest block.
   22.93 -      DEBUG_ONLY(assert_dom(deepb, inb, n, bbs));
   22.94 +      DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
   22.95        deepb = inb;                      // Save deepest block
   22.96        deepb_dom_depth = deepb->_dom_depth;
   22.97      }
   22.98 @@ -243,7 +245,7 @@
   22.99          ++i;
  22.100          if (in == NULL) continue;    // Ignore NULL, missing inputs
  22.101          int is_visited = visited.test_set(in->_idx);
  22.102 -        if (!_bbs.lookup(in->_idx)) { // Missing block selection?
  22.103 +        if (!has_block(in)) { // Missing block selection?
  22.104            if (is_visited) {
  22.105              // assert( !visited.test(in->_idx), "did not schedule early" );
  22.106              return false;
  22.107 @@ -265,9 +267,9 @@
  22.108          // any projections which depend on them.
  22.109          if (!n->pinned()) {
  22.110            // Set earliest legal block.
  22.111 -          _bbs.map(n->_idx, find_deepest_input(n, _bbs));
  22.112 +          map_node_to_block(n, find_deepest_input(n, this));
  22.113          } else {
  22.114 -          assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge");
  22.115 +          assert(get_block_for_node(n) == get_block_for_node(n->in(0)), "Pinned Node should be at the same block as its control edge");
  22.116          }
  22.117  
  22.118          if (nstack.is_empty()) {
  22.119 @@ -313,8 +315,8 @@
  22.120  // The definition must dominate the use, so move the LCA upward in the
  22.121  // dominator tree to dominate the use.  If the use is a phi, adjust
  22.122  // the LCA only with the phi input paths which actually use this def.
  22.123 -static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) {
  22.124 -  Block* buse = bbs[use->_idx];
  22.125 +static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
  22.126 +  Block* buse = cfg->get_block_for_node(use);
  22.127    if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
  22.128    if (!use->is_Phi())  return buse->dom_lca(LCA);
  22.129    uint pmax = use->req();       // Number of Phi inputs
  22.130 @@ -329,7 +331,7 @@
  22.131    // more than once.
  22.132    for (uint j=1; j<pmax; j++) { // For all inputs
  22.133      if (use->in(j) == def) {    // Found matching input?
  22.134 -      Block* pred = bbs[buse->pred(j)->_idx];
  22.135 +      Block* pred = cfg->get_block_for_node(buse->pred(j));
  22.136        LCA = pred->dom_lca(LCA);
  22.137      }
  22.138    }
  22.139 @@ -342,8 +344,7 @@
  22.140  // which are marked with the given index.  Return the LCA (in the dom tree)
  22.141  // of all marked blocks.  If there are none marked, return the original
  22.142  // LCA.
  22.143 -static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
  22.144 -                                    Block* early, Block_Array &bbs) {
  22.145 +static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
  22.146    Block_List worklist;
  22.147    worklist.push(LCA);
  22.148    while (worklist.size() > 0) {
  22.149 @@ -366,7 +367,7 @@
  22.150      } else {
  22.151        // Keep searching through this block's predecessors.
  22.152        for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
  22.153 -        Block* mid_parent = bbs[ mid->pred(j)->_idx ];
  22.154 +        Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
  22.155          worklist.push(mid_parent);
  22.156        }
  22.157      }
  22.158 @@ -384,7 +385,7 @@
  22.159  // be earlier (at a shallower dom_depth) than the true schedule_early
  22.160  // point of the node. We compute this earlier block as a more permissive
  22.161  // site for anti-dependency insertion, but only if subsume_loads is enabled.
  22.162 -static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
  22.163 +static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
  22.164    Node* base;
  22.165    Node* index;
  22.166    Node* store = load->in(MemNode::Memory);
  22.167 @@ -412,12 +413,12 @@
  22.168      Block* deepb           = NULL;        // Deepest block so far
  22.169      int    deepb_dom_depth = 0;
  22.170      for (int i = 0; i < mem_inputs_length; i++) {
  22.171 -      Block* inb = bbs[mem_inputs[i]->_idx];
  22.172 +      Block* inb = cfg->get_block_for_node(mem_inputs[i]);
  22.173        if (deepb_dom_depth < (int) inb->_dom_depth) {
  22.174          // The new inb must be dominated by the previous deepb.
  22.175          // The various inputs must be linearly ordered in the dom
  22.176          // tree, or else there will not be a unique deepest block.
  22.177 -        DEBUG_ONLY(assert_dom(deepb, inb, load, bbs));
  22.178 +        DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
  22.179          deepb = inb;                      // Save deepest block
  22.180          deepb_dom_depth = deepb->_dom_depth;
  22.181        }
  22.182 @@ -488,14 +489,14 @@
  22.183    // and other inputs are first available.  (Computed by schedule_early.)
  22.184    // For normal loads, 'early' is the shallowest place (dom graph wise)
  22.185    // to look for anti-deps between this load and any store.
  22.186 -  Block* early = _bbs[load_index];
  22.187 +  Block* early = get_block_for_node(load);
  22.188  
  22.189    // If we are subsuming loads, compute an "early" block that only considers
  22.190    // memory or address inputs. This block may be different than the
  22.191    // schedule_early block in that it could be at an even shallower depth in the
  22.192    // dominator tree, and allow for a broader discovery of anti-dependences.
  22.193    if (C->subsume_loads()) {
  22.194 -    early = memory_early_block(load, early, _bbs);
  22.195 +    early = memory_early_block(load, early, this);
  22.196    }
  22.197  
  22.198    ResourceArea *area = Thread::current()->resource_area();
  22.199 @@ -619,7 +620,7 @@
  22.200      // or else observe that 'store' is all the way up in the
  22.201      // earliest legal block for 'load'.  In the latter case,
  22.202      // immediately insert an anti-dependence edge.
  22.203 -    Block* store_block = _bbs[store->_idx];
  22.204 +    Block* store_block = get_block_for_node(store);
  22.205      assert(store_block != NULL, "unused killing projections skipped above");
  22.206  
  22.207      if (store->is_Phi()) {
  22.208 @@ -637,7 +638,7 @@
  22.209        for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
  22.210          if (store->in(j) == mem) {   // Found matching input?
  22.211            DEBUG_ONLY(found_match = true);
  22.212 -          Block* pred_block = _bbs[store_block->pred(j)->_idx];
  22.213 +          Block* pred_block = get_block_for_node(store_block->pred(j));
  22.214            if (pred_block != early) {
  22.215              // If any predecessor of the Phi matches the load's "early block",
  22.216              // we do not need a precedence edge between the Phi and 'load'
  22.217 @@ -711,7 +712,7 @@
  22.218    // preventing the load from sinking past any block containing
  22.219    // a store that may invalidate the memory state required by 'load'.
  22.220    if (must_raise_LCA)
  22.221 -    LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs);
  22.222 +    LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
  22.223    if (LCA == early)  return LCA;
  22.224  
  22.225    // Insert anti-dependence edges from 'load' to each store
  22.226 @@ -720,7 +721,7 @@
  22.227    if (LCA->raise_LCA_mark() == load_index) {
  22.228      while (non_early_stores.size() > 0) {
  22.229        Node* store = non_early_stores.pop();
  22.230 -      Block* store_block = _bbs[store->_idx];
  22.231 +      Block* store_block = get_block_for_node(store);
  22.232        if (store_block == LCA) {
  22.233          // add anti_dependence from store to load in its own block
  22.234          assert(store != load->in(0), "dependence cycle found");
  22.235 @@ -754,7 +755,7 @@
  22.236  
  22.237  public:
  22.238    // Constructor for the iterator
  22.239 -  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs);
  22.240 +  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
  22.241  
  22.242    // Postincrement operator to iterate over the nodes
  22.243    Node *next();
  22.244 @@ -762,12 +763,12 @@
  22.245  private:
  22.246    VectorSet   &_visited;
  22.247    Node_List   &_stack;
  22.248 -  Block_Array &_bbs;
  22.249 +  PhaseCFG &_cfg;
  22.250  };
  22.251  
  22.252  // Constructor for the Node_Backward_Iterator
  22.253 -Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs )
  22.254 -  : _visited(visited), _stack(stack), _bbs(bbs) {
  22.255 +Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
  22.256 +  : _visited(visited), _stack(stack), _cfg(cfg) {
  22.257    // The stack should contain exactly the root
  22.258    stack.clear();
  22.259    stack.push(root);
  22.260 @@ -797,8 +798,8 @@
  22.261      _visited.set(self->_idx);
  22.262  
  22.263      // Now schedule all uses as late as possible.
  22.264 -    uint src     = self->is_Proj() ? self->in(0)->_idx : self->_idx;
  22.265 -    uint src_rpo = _bbs[src]->_rpo;
  22.266 +    const Node* src = self->is_Proj() ? self->in(0) : self;
  22.267 +    uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
  22.268  
  22.269      // Schedule all nodes in a post-order visit
  22.270      Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
  22.271 @@ -814,7 +815,7 @@
  22.272  
  22.273        // do not traverse backward control edges
  22.274        Node *use = n->is_Proj() ? n->in(0) : n;
  22.275 -      uint use_rpo = _bbs[use->_idx]->_rpo;
  22.276 +      uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
  22.277  
  22.278        if ( use_rpo < src_rpo )
  22.279          continue;
  22.280 @@ -852,7 +853,7 @@
  22.281      tty->print("\n#---- ComputeLatenciesBackwards ----\n");
  22.282  #endif
  22.283  
  22.284 -  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
  22.285 +  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
  22.286    Node *n;
  22.287  
  22.288    // Walk over all the nodes from last to first
  22.289 @@ -883,7 +884,7 @@
  22.290  
  22.291    uint nlen = n->len();
  22.292    uint use_latency = _node_latency->at_grow(n->_idx);
  22.293 -  uint use_pre_order = _bbs[n->_idx]->_pre_order;
  22.294 +  uint use_pre_order = get_block_for_node(n)->_pre_order;
  22.295  
  22.296    for ( uint j=0; j<nlen; j++ ) {
  22.297      Node *def = n->in(j);
  22.298 @@ -903,7 +904,7 @@
  22.299  #endif
  22.300  
  22.301      // If the defining block is not known, assume it is ok
  22.302 -    Block *def_block = _bbs[def->_idx];
  22.303 +    Block *def_block = get_block_for_node(def);
  22.304      uint def_pre_order = def_block ? def_block->_pre_order : 0;
  22.305  
  22.306      if ( (use_pre_order <  def_pre_order) ||
  22.307 @@ -931,10 +932,11 @@
  22.308  // Compute the latency of a specific use
  22.309  int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
  22.310    // If self-reference, return no latency
  22.311 -  if (use == n || use->is_Root())
  22.312 +  if (use == n || use->is_Root()) {
  22.313      return 0;
  22.314 +  }
  22.315  
  22.316 -  uint def_pre_order = _bbs[def->_idx]->_pre_order;
  22.317 +  uint def_pre_order = get_block_for_node(def)->_pre_order;
  22.318    uint latency = 0;
  22.319  
  22.320    // If the use is not a projection, then it is simple...
  22.321 @@ -946,7 +948,7 @@
  22.322      }
  22.323  #endif
  22.324  
  22.325 -    uint use_pre_order = _bbs[use->_idx]->_pre_order;
  22.326 +    uint use_pre_order = get_block_for_node(use)->_pre_order;
  22.327  
  22.328      if (use_pre_order < def_pre_order)
  22.329        return 0;
  22.330 @@ -1018,7 +1020,7 @@
  22.331    uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
  22.332    uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
  22.333    bool in_latency    = (target <= start_latency);
  22.334 -  const Block* root_block = _bbs[_root->_idx];
  22.335 +  const Block* root_block = get_block_for_node(_root);
  22.336  
  22.337    // Turn off latency scheduling if scheduling is just plain off
  22.338    if (!C->do_scheduling())
  22.339 @@ -1126,12 +1128,12 @@
  22.340      tty->print("\n#---- schedule_late ----\n");
  22.341  #endif
  22.342  
  22.343 -  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
  22.344 +  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
  22.345    Node *self;
  22.346  
  22.347    // Walk over all the nodes from last to first
  22.348    while (self = iter.next()) {
  22.349 -    Block* early = _bbs[self->_idx];   // Earliest legal placement
  22.350 +    Block* early = get_block_for_node(self); // Earliest legal placement
  22.351  
  22.352      if (self->is_top()) {
  22.353        // Top node goes in bb #2 with other constants.
  22.354 @@ -1179,7 +1181,7 @@
  22.355        for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
  22.356          // For all uses, find LCA
  22.357          Node* use = self->fast_out(i);
  22.358 -        LCA = raise_LCA_above_use(LCA, use, self, _bbs);
  22.359 +        LCA = raise_LCA_above_use(LCA, use, self, this);
  22.360        }
  22.361      }  // (Hide defs of imax, i from rest of block.)
  22.362  
  22.363 @@ -1187,7 +1189,7 @@
  22.364      // requirement for correctness but it reduces useless
  22.365      // interference between temps and other nodes.
  22.366      if (mach != NULL && mach->is_MachTemp()) {
  22.367 -      _bbs.map(self->_idx, LCA);
  22.368 +      map_node_to_block(self, LCA);
  22.369        LCA->add_inst(self);
  22.370        continue;
  22.371      }
  22.372 @@ -1262,10 +1264,10 @@
  22.373    }
  22.374  #endif
  22.375  
  22.376 -  // Initialize the bbs.map for things on the proj_list
  22.377 -  uint i;
  22.378 -  for( i=0; i < proj_list.size(); i++ )
  22.379 -    _bbs.map(proj_list[i]->_idx, NULL);
  22.380 +  // Initialize the node to block mapping for things on the proj_list
  22.381 +  for (uint i = 0; i < proj_list.size(); i++) {
  22.382 +    unmap_node_from_block(proj_list[i]);
  22.383 +  }
  22.384  
  22.385    // Set the basic block for Nodes pinned into blocks
  22.386    Arena *a = Thread::current()->resource_area();
  22.387 @@ -1333,7 +1335,7 @@
  22.388      for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
  22.389        Node *proj = matcher._null_check_tests[i  ];
  22.390        Node *val  = matcher._null_check_tests[i+1];
  22.391 -      _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
  22.392 +      get_block_for_node(proj)->implicit_null_check(this, proj, val, allowed_reasons);
  22.393        // The implicit_null_check will only perform the transformation
  22.394        // if the null branch is truly uncommon, *and* it leads to an
  22.395        // uncommon trap.  Combined with the too_many_traps guards
  22.396 @@ -1353,7 +1355,7 @@
  22.397    uint max_idx = C->unique();
  22.398    GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
  22.399    visited.Clear();
  22.400 -  for (i = 0; i < _num_blocks; i++) {
  22.401 +  for (uint i = 0; i < _num_blocks; i++) {
  22.402      if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
  22.403        if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
  22.404          C->record_method_not_compilable("local schedule failed");
  22.405 @@ -1364,8 +1366,9 @@
  22.406  
  22.407    // If we inserted any instructions between a Call and his CatchNode,
  22.408    // clone the instructions on all paths below the Catch.
  22.409 -  for( i=0; i < _num_blocks; i++ )
  22.410 -    _blocks[i]->call_catch_cleanup(_bbs, C);
  22.411 +  for (uint i = 0; i < _num_blocks; i++) {
  22.412 +    _blocks[i]->call_catch_cleanup(this, C);
  22.413 +  }
  22.414  
  22.415  #ifndef PRODUCT
  22.416    if (trace_opto_pipelining()) {
  22.417 @@ -1392,7 +1395,7 @@
  22.418      Block_List worklist;
  22.419      Block* root_blk = _blocks[0];
  22.420      for (uint i = 1; i < root_blk->num_preds(); i++) {
  22.421 -      Block *pb = _bbs[root_blk->pred(i)->_idx];
  22.422 +      Block *pb = get_block_for_node(root_blk->pred(i));
  22.423        if (pb->has_uncommon_code()) {
  22.424          worklist.push(pb);
  22.425        }
  22.426 @@ -1401,7 +1404,7 @@
  22.427        Block* uct = worklist.pop();
  22.428        if (uct == _broot) continue;
  22.429        for (uint i = 1; i < uct->num_preds(); i++) {
  22.430 -        Block *pb = _bbs[uct->pred(i)->_idx];
  22.431 +        Block *pb = get_block_for_node(uct->pred(i));
  22.432          if (pb->_num_succs == 1) {
  22.433            worklist.push(pb);
  22.434          } else if (pb->num_fall_throughs() == 2) {
  22.435 @@ -1430,7 +1433,7 @@
  22.436      Block_List worklist;
  22.437      Block* root_blk = _blocks[0];
  22.438      for (uint i = 1; i < root_blk->num_preds(); i++) {
  22.439 -      Block *pb = _bbs[root_blk->pred(i)->_idx];
  22.440 +      Block *pb = get_block_for_node(root_blk->pred(i));
  22.441        if (pb->has_uncommon_code()) {
  22.442          worklist.push(pb);
  22.443        }
  22.444 @@ -1439,7 +1442,7 @@
  22.445        Block* uct = worklist.pop();
  22.446        uct->_freq = PROB_MIN;
  22.447        for (uint i = 1; i < uct->num_preds(); i++) {
  22.448 -        Block *pb = _bbs[uct->pred(i)->_idx];
  22.449 +        Block *pb = get_block_for_node(uct->pred(i));
  22.450          if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
  22.451            worklist.push(pb);
  22.452          }
  22.453 @@ -1499,7 +1502,7 @@
  22.454        Block* loop_head = b;
  22.455        assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
  22.456        Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
  22.457 -      Block* tail = _bbs[tail_n->_idx];
  22.458 +      Block* tail = get_block_for_node(tail_n);
  22.459  
  22.460        // Defensively filter out Loop nodes for non-single-entry loops.
  22.461        // For all reasonable loops, the head occurs before the tail in RPO.
  22.462 @@ -1514,13 +1517,13 @@
  22.463          loop_head->_loop = nloop;
  22.464          // Add to nloop so push_pred() will skip over inner loops
  22.465          nloop->add_member(loop_head);
  22.466 -        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
  22.467 +        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
  22.468  
  22.469          while (worklist.size() > 0) {
  22.470            Block* member = worklist.pop();
  22.471            if (member != loop_head) {
  22.472              for (uint j = 1; j < member->num_preds(); j++) {
  22.473 -              nloop->push_pred(member, j, worklist, _bbs);
  22.474 +              nloop->push_pred(member, j, worklist, this);
  22.475              }
  22.476            }
  22.477          }
  22.478 @@ -1557,9 +1560,9 @@
  22.479  }
  22.480  
  22.481  //------------------------------push_pred--------------------------------------
  22.482 -void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
  22.483 +void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
  22.484    Node* pred_n = blk->pred(i);
  22.485 -  Block* pred = node_to_blk[pred_n->_idx];
  22.486 +  Block* pred = cfg->get_block_for_node(pred_n);
  22.487    CFGLoop *pred_loop = pred->_loop;
  22.488    if (pred_loop == NULL) {
  22.489      // Filter out blocks for non-single-entry loops.
  22.490 @@ -1580,7 +1583,7 @@
  22.491        Block* pred_head = pred_loop->head();
  22.492        assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
  22.493        assert(pred_head != head(), "loop head in only one loop");
  22.494 -      push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
  22.495 +      push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
  22.496      } else {
  22.497        assert(pred_loop->_parent == this && _parent == NULL, "just checking");
  22.498      }
    23.1 --- a/src/share/vm/opto/idealGraphPrinter.cpp	Fri Aug 09 01:39:11 2013 -0700
    23.2 +++ b/src/share/vm/opto/idealGraphPrinter.cpp	Fri Aug 09 18:05:00 2013 +0200
    23.3 @@ -413,9 +413,9 @@
    23.4      print_prop("debug_idx", node->_debug_idx);
    23.5  #endif
    23.6  
    23.7 -    if(C->cfg() != NULL) {
    23.8 -      Block *block = C->cfg()->_bbs[node->_idx];
    23.9 -      if(block == NULL) {
   23.10 +    if (C->cfg() != NULL) {
   23.11 +      Block* block = C->cfg()->get_block_for_node(node);
   23.12 +      if (block == NULL) {
   23.13          print_prop("block", C->cfg()->_blocks[0]->_pre_order);
   23.14        } else {
   23.15          print_prop("block", block->_pre_order);
    24.1 --- a/src/share/vm/opto/ifg.cpp	Fri Aug 09 01:39:11 2013 -0700
    24.2 +++ b/src/share/vm/opto/ifg.cpp	Fri Aug 09 18:05:00 2013 +0200
    24.3 @@ -565,7 +565,7 @@
    24.4                lrgs(r)._def = 0;
    24.5              }
    24.6              n->disconnect_inputs(NULL, C);
    24.7 -            _cfg._bbs.map(n->_idx,NULL);
    24.8 +            _cfg.unmap_node_from_block(n);
    24.9              n->replace_by(C->top());
   24.10              // Since yanking a Node from block, high pressure moves up one
   24.11              hrp_index[0]--;
   24.12 @@ -607,7 +607,7 @@
   24.13            if( n->is_SpillCopy()
   24.14                && lrgs(r).is_singledef()        // MultiDef live range can still split
   24.15                && n->outcnt() == 1              // and use must be in this block
   24.16 -              && _cfg._bbs[n->unique_out()->_idx] == b ) {
   24.17 +              && _cfg.get_block_for_node(n->unique_out()) == b ) {
   24.18              // All single-use MachSpillCopy(s) that immediately precede their
   24.19              // use must color early.  If a longer live range steals their
   24.20              // color, the spill copy will split and may push another spill copy
    25.1 --- a/src/share/vm/opto/lcm.cpp	Fri Aug 09 01:39:11 2013 -0700
    25.2 +++ b/src/share/vm/opto/lcm.cpp	Fri Aug 09 18:05:00 2013 +0200
    25.3 @@ -237,7 +237,7 @@
    25.4      }
    25.5  
    25.6      // Check ctrl input to see if the null-check dominates the memory op
    25.7 -    Block *cb = cfg->_bbs[mach->_idx];
    25.8 +    Block *cb = cfg->get_block_for_node(mach);
    25.9      cb = cb->_idom;             // Always hoist at least 1 block
   25.10      if( !was_store ) {          // Stores can be hoisted only one block
   25.11        while( cb->_dom_depth > (_dom_depth + 1))
   25.12 @@ -262,7 +262,7 @@
   25.13          if( is_decoden ) continue;
   25.14        }
   25.15        // Block of memory-op input
   25.16 -      Block *inb = cfg->_bbs[mach->in(j)->_idx];
   25.17 +      Block *inb = cfg->get_block_for_node(mach->in(j));
   25.18        Block *b = this;          // Start from nul check
   25.19        while( b != inb && b->_dom_depth > inb->_dom_depth )
   25.20          b = b->_idom;           // search upwards for input
   25.21 @@ -272,7 +272,7 @@
   25.22      }
   25.23      if( j > 0 )
   25.24        continue;
   25.25 -    Block *mb = cfg->_bbs[mach->_idx];
   25.26 +    Block *mb = cfg->get_block_for_node(mach);
   25.27      // Hoisting stores requires more checks for the anti-dependence case.
   25.28      // Give up hoisting if we have to move the store past any load.
   25.29      if( was_store ) {
   25.30 @@ -291,7 +291,7 @@
   25.31            break;                // Found anti-dependent load
   25.32          // Make sure control does not do a merge (would have to check allpaths)
   25.33          if( b->num_preds() != 2 ) break;
   25.34 -        b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block
   25.35 +        b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
   25.36        }
   25.37        if( b != this ) continue;
   25.38      }
   25.39 @@ -303,15 +303,15 @@
   25.40  
   25.41      // Found a candidate!  Pick one with least dom depth - the highest
   25.42      // in the dom tree should be closest to the null check.
   25.43 -    if( !best ||
   25.44 -        cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
   25.45 +    if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
   25.46        best = mach;
   25.47        bidx = vidx;
   25.48 -
   25.49      }
   25.50    }
   25.51    // No candidate!
   25.52 -  if( !best ) return;
   25.53 +  if (best == NULL) {
   25.54 +    return;
   25.55 +  }
   25.56  
   25.57    // ---- Found an implicit null check
   25.58    extern int implicit_null_checks;
   25.59 @@ -319,29 +319,29 @@
   25.60  
   25.61    if( is_decoden ) {
   25.62      // Check if we need to hoist decodeHeapOop_not_null first.
   25.63 -    Block *valb = cfg->_bbs[val->_idx];
   25.64 +    Block *valb = cfg->get_block_for_node(val);
   25.65      if( this != valb && this->_dom_depth < valb->_dom_depth ) {
   25.66        // Hoist it up to the end of the test block.
   25.67        valb->find_remove(val);
   25.68        this->add_inst(val);
   25.69 -      cfg->_bbs.map(val->_idx,this);
   25.70 +      cfg->map_node_to_block(val, this);
   25.71        // DecodeN on x86 may kill flags. Check for flag-killing projections
   25.72        // that also need to be hoisted.
   25.73        for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
   25.74          Node* n = val->fast_out(j);
   25.75          if( n->is_MachProj() ) {
   25.76 -          cfg->_bbs[n->_idx]->find_remove(n);
   25.77 +          cfg->get_block_for_node(n)->find_remove(n);
   25.78            this->add_inst(n);
   25.79 -          cfg->_bbs.map(n->_idx,this);
   25.80 +          cfg->map_node_to_block(n, this);
   25.81          }
   25.82        }
   25.83      }
   25.84    }
   25.85    // Hoist the memory candidate up to the end of the test block.
   25.86 -  Block *old_block = cfg->_bbs[best->_idx];
   25.87 +  Block *old_block = cfg->get_block_for_node(best);
   25.88    old_block->find_remove(best);
   25.89    add_inst(best);
   25.90 -  cfg->_bbs.map(best->_idx,this);
   25.91 +  cfg->map_node_to_block(best, this);
   25.92  
   25.93    // Move the control dependence
   25.94    if (best->in(0) && best->in(0) == old_block->_nodes[0])
   25.95 @@ -352,9 +352,9 @@
   25.96    for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
   25.97      Node* n = best->fast_out(j);
   25.98      if( n->is_MachProj() ) {
   25.99 -      cfg->_bbs[n->_idx]->find_remove(n);
  25.100 +      cfg->get_block_for_node(n)->find_remove(n);
  25.101        add_inst(n);
  25.102 -      cfg->_bbs.map(n->_idx,this);
  25.103 +      cfg->map_node_to_block(n, this);
  25.104      }
  25.105    }
  25.106  
  25.107 @@ -385,7 +385,7 @@
  25.108    Node *old_tst = proj->in(0);
  25.109    MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
  25.110    _nodes.map(end_idx(),nul_chk);
  25.111 -  cfg->_bbs.map(nul_chk->_idx,this);
  25.112 +  cfg->map_node_to_block(nul_chk, this);
  25.113    // Redirect users of old_test to nul_chk
  25.114    for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
  25.115      old_tst->last_out(i2)->set_req(0, nul_chk);
  25.116 @@ -468,7 +468,7 @@
  25.117          Node* use = n->fast_out(j);
  25.118  
  25.119          // The use is a conditional branch, make them adjacent
  25.120 -        if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) {
  25.121 +        if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
  25.122            found_machif = true;
  25.123            break;
  25.124          }
  25.125 @@ -529,13 +529,14 @@
  25.126  
  25.127  
  25.128  //------------------------------set_next_call----------------------------------
  25.129 -void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) {
  25.130 +void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
  25.131    if( next_call.test_set(n->_idx) ) return;
  25.132    for( uint i=0; i<n->len(); i++ ) {
  25.133      Node *m = n->in(i);
  25.134      if( !m ) continue;  // must see all nodes in block that precede call
  25.135 -    if( bbs[m->_idx] == this )
  25.136 -      set_next_call( m, next_call, bbs );
  25.137 +    if (cfg->get_block_for_node(m) == this) {
  25.138 +      set_next_call(m, next_call, cfg);
  25.139 +    }
  25.140    }
  25.141  }
  25.142  
  25.143 @@ -545,12 +546,12 @@
  25.144  // next subroutine call get priority - basically it moves things NOT needed
  25.145  // for the next call till after the call.  This prevents me from trying to
  25.146  // carry lots of stuff live across a call.
  25.147 -void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) {
  25.148 +void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
  25.149    // Find the next control-defining Node in this block
  25.150    Node* call = NULL;
  25.151    for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
  25.152      Node* m = this_call->fast_out(i);
  25.153 -    if( bbs[m->_idx] == this && // Local-block user
  25.154 +    if(cfg->get_block_for_node(m) == this && // Local-block user
  25.155          m != this_call &&       // Not self-start node
  25.156          m->is_MachCall() )
  25.157        call = m;
  25.158 @@ -558,7 +559,7 @@
  25.159    }
  25.160    if (call == NULL)  return;    // No next call (e.g., block end is near)
  25.161    // Set next-call for all inputs to this call
  25.162 -  set_next_call(call, next_call, bbs);
  25.163 +  set_next_call(call, next_call, cfg);
  25.164  }
  25.165  
  25.166  //------------------------------add_call_kills-------------------------------------
  25.167 @@ -578,7 +579,7 @@
  25.168  
  25.169  
  25.170  //------------------------------sched_call-------------------------------------
  25.171 -uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
  25.172 +uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
  25.173    RegMask regs;
  25.174  
  25.175    // Schedule all the users of the call right now.  All the users are
  25.176 @@ -597,12 +598,14 @@
  25.177      // Check for scheduling the next control-definer
  25.178      if( n->bottom_type() == Type::CONTROL )
  25.179        // Warm up next pile of heuristic bits
  25.180 -      needed_for_next_call(n, next_call, bbs);
  25.181 +      needed_for_next_call(n, next_call, cfg);
  25.182  
  25.183      // Children of projections are now all ready
  25.184      for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
  25.185        Node* m = n->fast_out(j); // Get user
  25.186 -      if( bbs[m->_idx] != this ) continue;
  25.187 +      if(cfg->get_block_for_node(m) != this) {
  25.188 +        continue;
  25.189 +      }
  25.190        if( m->is_Phi() ) continue;
  25.191        int m_cnt = ready_cnt.at(m->_idx)-1;
  25.192        ready_cnt.at_put(m->_idx, m_cnt);
  25.193 @@ -620,7 +623,7 @@
  25.194    uint r_cnt = mcall->tf()->range()->cnt();
  25.195    int op = mcall->ideal_Opcode();
  25.196    MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
  25.197 -  bbs.map(proj->_idx,this);
  25.198 +  cfg->map_node_to_block(proj, this);
  25.199    _nodes.insert(node_cnt++, proj);
  25.200  
  25.201    // Select the right register save policy.
  25.202 @@ -708,7 +711,7 @@
  25.203        uint local = 0;
  25.204        for( uint j=0; j<cnt; j++ ) {
  25.205          Node *m = n->in(j);
  25.206 -        if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
  25.207 +        if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
  25.208            local++;              // One more block-local input
  25.209        }
  25.210        ready_cnt.at_put(n->_idx, local); // Count em up
  25.211 @@ -720,7 +723,7 @@
  25.212            for (uint prec = n->req(); prec < n->len(); prec++) {
  25.213              Node* oop_store = n->in(prec);
  25.214              if (oop_store != NULL) {
  25.215 -              assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
  25.216 +              assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
  25.217              }
  25.218            }
  25.219          }
  25.220 @@ -753,7 +756,7 @@
  25.221      Node *n = _nodes[i3];       // Get pre-scheduled
  25.222      for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
  25.223        Node* m = n->fast_out(j);
  25.224 -      if( cfg->_bbs[m->_idx] ==this ) { // Local-block user
  25.225 +      if (cfg->get_block_for_node(m) == this) { // Local-block user
  25.226          int m_cnt = ready_cnt.at(m->_idx)-1;
  25.227          ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
  25.228        }
  25.229 @@ -786,7 +789,7 @@
  25.230    }
  25.231  
  25.232    // Warm up the 'next_call' heuristic bits
  25.233 -  needed_for_next_call(_nodes[0], next_call, cfg->_bbs);
  25.234 +  needed_for_next_call(_nodes[0], next_call, cfg);
  25.235  
  25.236  #ifndef PRODUCT
  25.237      if (cfg->trace_opto_pipelining()) {
  25.238 @@ -837,7 +840,7 @@
  25.239  #endif
  25.240      if( n->is_MachCall() ) {
  25.241        MachCallNode *mcall = n->as_MachCall();
  25.242 -      phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
  25.243 +      phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
  25.244        continue;
  25.245      }
  25.246  
  25.247 @@ -847,7 +850,7 @@
  25.248        regs.OR(n->out_RegMask());
  25.249  
  25.250        MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
  25.251 -      cfg->_bbs.map(proj->_idx,this);
  25.252 +      cfg->map_node_to_block(proj, this);
  25.253        _nodes.insert(phi_cnt++, proj);
  25.254  
  25.255        add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
  25.256 @@ -856,7 +859,9 @@
  25.257      // Children are now all ready
  25.258      for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
  25.259        Node* m = n->fast_out(i5); // Get user
  25.260 -      if( cfg->_bbs[m->_idx] != this ) continue;
  25.261 +      if (cfg->get_block_for_node(m) != this) {
  25.262 +        continue;
  25.263 +      }
  25.264        if( m->is_Phi() ) continue;
  25.265        if (m->_idx >= max_idx) { // new node, skip it
  25.266          assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
  25.267 @@ -914,7 +919,7 @@
  25.268  }
  25.269  
  25.270  //------------------------------catch_cleanup_find_cloned_def------------------
  25.271 -static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
  25.272 +static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
  25.273    assert( use_blk != def_blk, "Inter-block cleanup only");
  25.274  
  25.275    // The use is some block below the Catch.  Find and return the clone of the def
  25.276 @@ -940,7 +945,8 @@
  25.277      // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
  25.278      Node_Array inputs = new Node_List(Thread::current()->resource_area());
  25.279      for(uint k = 1; k < use_blk->num_preds(); k++) {
  25.280 -      inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx));
  25.281 +      Block* block = cfg->get_block_for_node(use_blk->pred(k));
  25.282 +      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
  25.283      }
  25.284  
  25.285      // Check to see if the use_blk already has an identical phi inserted.
  25.286 @@ -962,7 +968,7 @@
  25.287      if (fixup == NULL) {
  25.288        Node *new_phi = PhiNode::make(use_blk->head(), def);
  25.289        use_blk->_nodes.insert(1, new_phi);
  25.290 -      bbs.map(new_phi->_idx, use_blk);
  25.291 +      cfg->map_node_to_block(new_phi, use_blk);
  25.292        for (uint k = 1; k < use_blk->num_preds(); k++) {
  25.293          new_phi->set_req(k, inputs[k]);
  25.294        }
  25.295 @@ -1002,17 +1008,17 @@
  25.296  //------------------------------catch_cleanup_inter_block---------------------
  25.297  // Fix all input edges in use that reference "def".  The use is in a different
  25.298  // block than the def.
  25.299 -static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
  25.300 +static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
  25.301    if( !use_blk ) return;        // Can happen if the use is a precedence edge
  25.302  
  25.303 -  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx);
  25.304 +  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
  25.305    catch_cleanup_fix_all_inputs(use, def, new_def);
  25.306  }
  25.307  
  25.308  //------------------------------call_catch_cleanup-----------------------------
  25.309  // If we inserted any instructions between a Call and his CatchNode,
  25.310  // clone the instructions on all paths below the Catch.
  25.311 -void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
  25.312 +void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
  25.313  
  25.314    // End of region to clone
  25.315    uint end = end_idx();
  25.316 @@ -1037,7 +1043,7 @@
  25.317        // since clones dominate on each path.
  25.318        Node *clone = _nodes[j-1]->clone();
  25.319        sb->_nodes.insert( 1, clone );
  25.320 -      bbs.map(clone->_idx,sb);
  25.321 +      cfg->map_node_to_block(clone, sb);
  25.322      }
  25.323    }
  25.324  
  25.325 @@ -1054,18 +1060,19 @@
  25.326      uint max = out->size();
  25.327      for (uint j = 0; j < max; j++) {// For all users
  25.328        Node *use = out->pop();
  25.329 -      Block *buse = bbs[use->_idx];
  25.330 +      Block *buse = cfg->get_block_for_node(use);
  25.331        if( use->is_Phi() ) {
  25.332          for( uint k = 1; k < use->req(); k++ )
  25.333            if( use->in(k) == n ) {
  25.334 -            Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx);
  25.335 +            Block* block = cfg->get_block_for_node(buse->pred(k));
  25.336 +            Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
  25.337              use->set_req(k, fixup);
  25.338            }
  25.339        } else {
  25.340          if (this == buse) {
  25.341            catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
  25.342          } else {
  25.343 -          catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx);
  25.344 +          catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
  25.345          }
  25.346        }
  25.347      } // End for all users
    26.1 --- a/src/share/vm/opto/live.cpp	Fri Aug 09 01:39:11 2013 -0700
    26.2 +++ b/src/share/vm/opto/live.cpp	Fri Aug 09 18:05:00 2013 +0200
    26.3 @@ -101,7 +101,7 @@
    26.4        for( uint k=1; k<cnt; k++ ) {
    26.5          Node *nk = n->in(k);
    26.6          uint nkidx = nk->_idx;
    26.7 -        if( _cfg._bbs[nkidx] != b ) {
    26.8 +        if (_cfg.get_block_for_node(nk) != b) {
    26.9            uint u = _names[nkidx];
   26.10            use->insert( u );
   26.11            DEBUG_ONLY(def_outside->insert( u );)
   26.12 @@ -121,7 +121,7 @@
   26.13  
   26.14      // Push these live-in things to predecessors
   26.15      for( uint l=1; l<b->num_preds(); l++ ) {
   26.16 -      Block *p = _cfg._bbs[b->pred(l)->_idx];
   26.17 +      Block *p = _cfg.get_block_for_node(b->pred(l));
   26.18        add_liveout( p, use, first_pass );
   26.19  
   26.20        // PhiNode uses go in the live-out set of prior blocks.
   26.21 @@ -142,8 +142,10 @@
   26.22        assert( delta->count(), "missing delta set" );
   26.23  
   26.24        // Add new-live-in to predecessors live-out sets
   26.25 -      for( uint l=1; l<b->num_preds(); l++ )
   26.26 -        add_liveout( _cfg._bbs[b->pred(l)->_idx], delta, first_pass );
   26.27 +      for (uint l = 1; l < b->num_preds(); l++) {
   26.28 +        Block* block = _cfg.get_block_for_node(b->pred(l));
   26.29 +        add_liveout(block, delta, first_pass);
   26.30 +      }
   26.31  
   26.32        freeset(b);
   26.33      } // End of while-worklist-not-empty
    27.1 --- a/src/share/vm/opto/node.hpp	Fri Aug 09 01:39:11 2013 -0700
    27.2 +++ b/src/share/vm/opto/node.hpp	Fri Aug 09 18:05:00 2013 +0200
    27.3 @@ -42,7 +42,6 @@
    27.4  class AllocateArrayNode;
    27.5  class AllocateNode;
    27.6  class Block;
    27.7 -class Block_Array;
    27.8  class BoolNode;
    27.9  class BoxLockNode;
   27.10  class CMoveNode;
    28.1 --- a/src/share/vm/opto/output.cpp	Fri Aug 09 01:39:11 2013 -0700
    28.2 +++ b/src/share/vm/opto/output.cpp	Fri Aug 09 18:05:00 2013 +0200
    28.3 @@ -68,7 +68,6 @@
    28.4      return;
    28.5    }
    28.6    // Make sure I can find the Start Node
    28.7 -  Block_Array& bbs = _cfg->_bbs;
    28.8    Block *entry = _cfg->_blocks[1];
    28.9    Block *broot = _cfg->_broot;
   28.10  
   28.11 @@ -77,8 +76,8 @@
   28.12    // Replace StartNode with prolog
   28.13    MachPrologNode *prolog = new (this) MachPrologNode();
   28.14    entry->_nodes.map( 0, prolog );
   28.15 -  bbs.map( prolog->_idx, entry );
   28.16 -  bbs.map( start->_idx, NULL ); // start is no longer in any block
   28.17 +  _cfg->map_node_to_block(prolog, entry);
   28.18 +  _cfg->unmap_node_from_block(start); // start is no longer in any block
   28.19  
   28.20    // Virtual methods need an unverified entry point
   28.21  
   28.22 @@ -117,8 +116,7 @@
   28.23        if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
   28.24          MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
   28.25          b->add_inst( epilog );
   28.26 -        bbs.map(epilog->_idx, b);
   28.27 -        //_regalloc->set_bad(epilog->_idx); // Already initialized this way.
   28.28 +        _cfg->map_node_to_block(epilog, b);
   28.29        }
   28.30      }
   28.31    }
   28.32 @@ -252,7 +250,7 @@
   28.33          if (insert) {
   28.34            Node *zap = call_zap_node(n->as_MachSafePoint(), i);
   28.35            b->_nodes.insert( j, zap );
   28.36 -          _cfg->_bbs.map( zap->_idx, b );
   28.37 +          _cfg->map_node_to_block(zap, b);
   28.38            ++j;
   28.39          }
   28.40        }
   28.41 @@ -1234,7 +1232,7 @@
   28.42  #ifdef ASSERT
   28.43      if (!b->is_connector()) {
   28.44        stringStream st;
   28.45 -      b->dump_head(&_cfg->_bbs, &st);
   28.46 +      b->dump_head(_cfg, &st);
   28.47        MacroAssembler(cb).block_comment(st.as_string());
   28.48      }
   28.49      jmp_target[i] = 0;
   28.50 @@ -1310,7 +1308,7 @@
   28.51            MachNode *nop = new (this) MachNopNode(nops_cnt);
   28.52            b->_nodes.insert(j++, nop);
   28.53            last_inst++;
   28.54 -          _cfg->_bbs.map( nop->_idx, b );
   28.55 +          _cfg->map_node_to_block(nop, b);
   28.56            nop->emit(*cb, _regalloc);
   28.57            cb->flush_bundle(true);
   28.58            current_offset = cb->insts_size();
   28.59 @@ -1395,7 +1393,7 @@
   28.60                if (needs_padding && replacement->avoid_back_to_back()) {
   28.61                  MachNode *nop = new (this) MachNopNode();
   28.62                  b->_nodes.insert(j++, nop);
   28.63 -                _cfg->_bbs.map(nop->_idx, b);
   28.64 +                _cfg->map_node_to_block(nop, b);
   28.65                  last_inst++;
   28.66                  nop->emit(*cb, _regalloc);
   28.67                  cb->flush_bundle(true);
   28.68 @@ -1549,7 +1547,7 @@
   28.69        if( padding > 0 ) {
   28.70          MachNode *nop = new (this) MachNopNode(padding / nop_size);
   28.71          b->_nodes.insert( b->_nodes.size(), nop );
   28.72 -        _cfg->_bbs.map( nop->_idx, b );
   28.73 +        _cfg->map_node_to_block(nop, b);
   28.74          nop->emit(*cb, _regalloc);
   28.75          current_offset = cb->insts_size();
   28.76        }
   28.77 @@ -1737,7 +1735,6 @@
   28.78  Scheduling::Scheduling(Arena *arena, Compile &compile)
   28.79    : _arena(arena),
   28.80      _cfg(compile.cfg()),
   28.81 -    _bbs(compile.cfg()->_bbs),
   28.82      _regalloc(compile.regalloc()),
   28.83      _reg_node(arena),
   28.84      _bundle_instr_count(0),
   28.85 @@ -2085,8 +2082,9 @@
   28.86      if( def->is_Proj() )        // If this is a machine projection, then
   28.87        def = def->in(0);         // propagate usage thru to the base instruction
   28.88  
   28.89 -    if( _bbs[def->_idx] != bb ) // Ignore if not block-local
   28.90 +    if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
   28.91        continue;
   28.92 +    }
   28.93  
   28.94      // Compute the latency
   28.95      uint l = _bundle_cycle_number + n->latency(i);
   28.96 @@ -2358,9 +2356,10 @@
   28.97        Node *inp = n->in(k);
   28.98        if (!inp) continue;
   28.99        assert(inp != n, "no cycles allowed" );
  28.100 -      if( _bbs[inp->_idx] == bb ) { // Block-local use?
  28.101 -        if( inp->is_Proj() )    // Skip through Proj's
  28.102 +      if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
  28.103 +        if (inp->is_Proj()) { // Skip through Proj's
  28.104            inp = inp->in(0);
  28.105 +        }
  28.106          ++_uses[inp->_idx];     // Count 1 block-local use
  28.107        }
  28.108      }
  28.109 @@ -2643,7 +2642,7 @@
  28.110      return;
  28.111  
  28.112    Node *pinch = _reg_node[def_reg]; // Get pinch point
  28.113 -  if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet?
  28.114 +  if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
  28.115        is_def ) {    // Check for a true def (not a kill)
  28.116      _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
  28.117      return;
  28.118 @@ -2669,7 +2668,7 @@
  28.119        _cfg->C->record_method_not_compilable("too many D-U pinch points");
  28.120        return;
  28.121      }
  28.122 -    _bbs.map(pinch->_idx,b);      // Pretend it's valid in this block (lazy init)
  28.123 +    _cfg->map_node_to_block(pinch, b);      // Pretend it's valid in this block (lazy init)
  28.124      _reg_node.map(def_reg,pinch); // Record pinch-point
  28.125      //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
  28.126      if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
  28.127 @@ -2713,9 +2712,9 @@
  28.128      return;
  28.129    Node *pinch = _reg_node[use_reg]; // Get pinch point
  28.130    // Check for no later def_reg/kill in block
  28.131 -  if( pinch && _bbs[pinch->_idx] == b &&
  28.132 +  if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
  28.133        // Use has to be block-local as well
  28.134 -      _bbs[use->_idx] == b ) {
  28.135 +      _cfg->get_block_for_node(use) == b) {
  28.136      if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
  28.137          pinch->req() == 1 ) {   // pinch not yet in block?
  28.138        pinch->del_req(0);        // yank pointer to later-def, also set flag
  28.139 @@ -2895,7 +2894,7 @@
  28.140      int trace_cnt = 0;
  28.141      for (uint k = 0; k < _reg_node.Size(); k++) {
  28.142        Node* pinch = _reg_node[k];
  28.143 -      if (pinch != NULL && pinch->Opcode() == Op_Node &&
  28.144 +      if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
  28.145            // no predecence input edges
  28.146            (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
  28.147          cleanup_pinch(pinch);
    29.1 --- a/src/share/vm/opto/output.hpp	Fri Aug 09 01:39:11 2013 -0700
    29.2 +++ b/src/share/vm/opto/output.hpp	Fri Aug 09 18:05:00 2013 +0200
    29.3 @@ -96,9 +96,6 @@
    29.4    // List of nodes currently available for choosing for scheduling
    29.5    Node_List _available;
    29.6  
    29.7 -  // Mapping from node (index) to basic block
    29.8 -  Block_Array& _bbs;
    29.9 -
   29.10    // For each instruction beginning a bundle, the number of following
   29.11    // nodes to be bundled with it.
   29.12    Bundle *_node_bundling_base;
    30.1 --- a/src/share/vm/opto/postaloc.cpp	Fri Aug 09 01:39:11 2013 -0700
    30.2 +++ b/src/share/vm/opto/postaloc.cpp	Fri Aug 09 18:05:00 2013 +0200
    30.3 @@ -78,11 +78,13 @@
    30.4  // Helper function for yank_if_dead
    30.5  int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
    30.6    int blk_adjust=0;
    30.7 -  Block *oldb = _cfg._bbs[old->_idx];
    30.8 +  Block *oldb = _cfg.get_block_for_node(old);
    30.9    oldb->find_remove(old);
   30.10    // Count 1 if deleting an instruction from the current block
   30.11 -  if( oldb == current_block ) blk_adjust++;
   30.12 -  _cfg._bbs.map(old->_idx,NULL);
   30.13 +  if (oldb == current_block) {
   30.14 +    blk_adjust++;
   30.15 +  }
   30.16 +  _cfg.unmap_node_from_block(old);
   30.17    OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
   30.18    if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
   30.19      value->map(old_reg,NULL);  // Yank from value/regnd maps
   30.20 @@ -433,7 +435,7 @@
   30.21      bool missing_some_inputs = false;
   30.22      Block *freed = NULL;
   30.23      for( j = 1; j < b->num_preds(); j++ ) {
   30.24 -      Block *pb = _cfg._bbs[b->pred(j)->_idx];
   30.25 +      Block *pb = _cfg.get_block_for_node(b->pred(j));
   30.26        // Remove copies along phi edges
   30.27        for( uint k=1; k<phi_dex; k++ )
   30.28          elide_copy( b->_nodes[k], j, b, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false );
   30.29 @@ -478,7 +480,7 @@
   30.30      } else {
   30.31        if( !freed ) {            // Didn't get a freebie prior block
   30.32          // Must clone some data
   30.33 -        freed = _cfg._bbs[b->pred(1)->_idx];
   30.34 +        freed = _cfg.get_block_for_node(b->pred(1));
   30.35          Node_List &f_value = *blk2value[freed->_pre_order];
   30.36          Node_List &f_regnd = *blk2regnd[freed->_pre_order];
   30.37          for( uint k = 0; k < (uint)_max_reg; k++ ) {
   30.38 @@ -488,7 +490,7 @@
   30.39        }
   30.40        // Merge all inputs together, setting to NULL any conflicts.
   30.41        for( j = 1; j < b->num_preds(); j++ ) {
   30.42 -        Block *pb = _cfg._bbs[b->pred(j)->_idx];
   30.43 +        Block *pb = _cfg.get_block_for_node(b->pred(j));
   30.44          if( pb == freed ) continue; // Did self already via freelist
   30.45          Node_List &p_regnd = *blk2regnd[pb->_pre_order];
   30.46          for( uint k = 0; k < (uint)_max_reg; k++ ) {
   30.47 @@ -515,8 +517,9 @@
   30.48            u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
   30.49        }
   30.50        if( u != NodeSentinel ) {    // Junk Phi.  Remove
   30.51 -        b->_nodes.remove(j--); phi_dex--;
   30.52 -        _cfg._bbs.map(phi->_idx,NULL);
   30.53 +        b->_nodes.remove(j--);
   30.54 +        phi_dex--;
   30.55 +        _cfg.unmap_node_from_block(phi);
   30.56          phi->replace_by(u);
   30.57          phi->disconnect_inputs(NULL, C);
   30.58          continue;
    31.1 --- a/src/share/vm/opto/reg_split.cpp	Fri Aug 09 01:39:11 2013 -0700
    31.2 +++ b/src/share/vm/opto/reg_split.cpp	Fri Aug 09 18:05:00 2013 +0200
    31.3 @@ -132,7 +132,7 @@
    31.4    }
    31.5  
    31.6    b->_nodes.insert(i,spill);    // Insert node in block
    31.7 -  _cfg._bbs.map(spill->_idx,b); // Update node->block mapping to reflect
    31.8 +  _cfg.map_node_to_block(spill,  b); // Update node->block mapping to reflect
    31.9    // Adjust the point where we go hi-pressure
   31.10    if( i <= b->_ihrp_index ) b->_ihrp_index++;
   31.11    if( i <= b->_fhrp_index ) b->_fhrp_index++;
   31.12 @@ -219,7 +219,7 @@
   31.13          use->set_req(useidx, def);
   31.14        } else {
   31.15          // Block and index where the use occurs.
   31.16 -        Block *b = _cfg._bbs[use->_idx];
   31.17 +        Block *b = _cfg.get_block_for_node(use);
   31.18          // Put the clone just prior to use
   31.19          int bindex = b->find_node(use);
   31.20          // DEF is UP, so must copy it DOWN and hook in USE
   31.21 @@ -270,7 +270,7 @@
   31.22    int bindex;
   31.23    // Phi input spill-copys belong at the end of the prior block
   31.24    if( use->is_Phi() ) {
   31.25 -    b = _cfg._bbs[b->pred(useidx)->_idx];
   31.26 +    b = _cfg.get_block_for_node(b->pred(useidx));
   31.27      bindex = b->end_idx();
   31.28    } else {
   31.29      // Put the clone just prior to use
   31.30 @@ -335,7 +335,7 @@
   31.31          continue;
   31.32        }
   31.33  
   31.34 -      Block *b_def = _cfg._bbs[def->_idx];
   31.35 +      Block *b_def = _cfg.get_block_for_node(def);
   31.36        int idx_def = b_def->find_node(def);
   31.37        Node *in_spill = get_spillcopy_wide( in, def, i );
   31.38        if( !in_spill ) return 0; // Bailed out
   31.39 @@ -589,7 +589,7 @@
   31.40          UPblock[slidx] = true;
   31.41          // Record following instruction in case 'n' rematerializes and
   31.42          // kills flags
   31.43 -        Block *pred1 = _cfg._bbs[b->pred(1)->_idx];
   31.44 +        Block *pred1 = _cfg.get_block_for_node(b->pred(1));
   31.45          continue;
   31.46        }
   31.47  
   31.48 @@ -601,7 +601,7 @@
   31.49        // Grab predecessor block header
   31.50        n1 = b->pred(1);
   31.51        // Grab the appropriate reaching def info for inpidx
   31.52 -      pred = _cfg._bbs[n1->_idx];
   31.53 +      pred = _cfg.get_block_for_node(n1);
   31.54        pidx = pred->_pre_order;
   31.55        Node **Ltmp = Reaches[pidx];
   31.56        bool  *Utmp = UP[pidx];
   31.57 @@ -616,7 +616,7 @@
   31.58          // Grab predecessor block headers
   31.59          n2 = b->pred(inpidx);
   31.60          // Grab the appropriate reaching def info for inpidx
   31.61 -        pred = _cfg._bbs[n2->_idx];
   31.62 +        pred = _cfg.get_block_for_node(n2);
   31.63          pidx = pred->_pre_order;
   31.64          Ltmp = Reaches[pidx];
   31.65          Utmp = UP[pidx];
   31.66 @@ -701,7 +701,7 @@
   31.67          // Grab predecessor block header
   31.68          n1 = b->pred(1);
   31.69          // Grab the appropriate reaching def info for k
   31.70 -        pred = _cfg._bbs[n1->_idx];
   31.71 +        pred = _cfg.get_block_for_node(n1);
   31.72          pidx = pred->_pre_order;
   31.73          Node **Ltmp = Reaches[pidx];
   31.74          bool  *Utmp = UP[pidx];
   31.75 @@ -919,7 +919,7 @@
   31.76                  return 0;
   31.77                }
   31.78                _lrg_map.extend(def->_idx, 0);
   31.79 -              _cfg._bbs.map(def->_idx,b);
   31.80 +              _cfg.map_node_to_block(def, b);
   31.81                n->set_req(inpidx, def);
   31.82                continue;
   31.83              }
   31.84 @@ -1291,7 +1291,7 @@
   31.85    for( insidx = 0; insidx < phis->size(); insidx++ ) {
   31.86      Node *phi = phis->at(insidx);
   31.87      assert(phi->is_Phi(),"This list must only contain Phi Nodes");
   31.88 -    Block *b = _cfg._bbs[phi->_idx];
   31.89 +    Block *b = _cfg.get_block_for_node(phi);
   31.90      // Grab the live range number
   31.91      uint lidx = _lrg_map.find_id(phi);
   31.92      uint slidx = lrg2reach[lidx];
   31.93 @@ -1315,7 +1315,7 @@
   31.94      // DEF has the wrong UP/DOWN value.
   31.95      for( uint i = 1; i < b->num_preds(); i++ ) {
   31.96        // Get predecessor block pre-order number
   31.97 -      Block *pred = _cfg._bbs[b->pred(i)->_idx];
   31.98 +      Block *pred = _cfg.get_block_for_node(b->pred(i));
   31.99        pidx = pred->_pre_order;
  31.100        // Grab reaching def
  31.101        Node *def = Reaches[pidx][slidx];
    32.1 --- a/src/share/vm/runtime/vmStructs.cpp	Fri Aug 09 01:39:11 2013 -0700
    32.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Fri Aug 09 18:05:00 2013 +0200
    32.3 @@ -1098,7 +1098,7 @@
    32.4                                                                                                                                       \
    32.5    c2_nonstatic_field(PhaseCFG,           _num_blocks,              uint)                                                             \
    32.6    c2_nonstatic_field(PhaseCFG,           _blocks,                  Block_List)                                                       \
    32.7 -  c2_nonstatic_field(PhaseCFG,           _bbs,                     Block_Array)                                                      \
    32.8 +  c2_nonstatic_field(PhaseCFG,           _node_to_block_mapping,   Block_Array)                                                      \
    32.9    c2_nonstatic_field(PhaseCFG,           _broot,                   Block*)                                                           \
   32.10                                                                                                                                       \
   32.11    c2_nonstatic_field(PhaseRegAlloc,      _node_regs,               OptoRegPair*)                                                     \

mercurial