Merge

Fri, 23 Aug 2013 22:12:18 +0100

author
chegar
date
Fri, 23 Aug 2013 22:12:18 +0100
changeset 5878
d4fa23d6c35b
parent 5877
7638e35cabc6
parent 5525
c93e0a210e1b
child 5879
07b5f47d7a18

Merge

src/os_cpu/bsd_x86/vm/bsd_x86_32.ad file | annotate | diff | comparison | revisions
src/os_cpu/bsd_x86/vm/bsd_x86_64.ad file | annotate | diff | comparison | revisions
src/os_cpu/linux_x86/vm/linux_x86_32.ad file | annotate | diff | comparison | revisions
src/os_cpu/linux_x86/vm/linux_x86_64.ad file | annotate | diff | comparison | revisions
src/os_cpu/solaris_sparc/vm/solaris_sparc.ad file | annotate | diff | comparison | revisions
src/os_cpu/solaris_x86/vm/solaris_x86_32.ad file | annotate | diff | comparison | revisions
src/os_cpu/solaris_x86/vm/solaris_x86_64.ad file | annotate | diff | comparison | revisions
src/os_cpu/windows_x86/vm/windows_x86_32.ad file | annotate | diff | comparison | revisions
src/os_cpu/windows_x86/vm/windows_x86_64.ad file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Mon Aug 19 17:47:21 2013 +0200
     1.2 +++ b/.hgtags	Fri Aug 23 22:12:18 2013 +0100
     1.3 @@ -368,3 +368,5 @@
     1.4  c4697c1c448416108743b59118b4a2498b339d0c jdk8-b102
     1.5  7f55137d6aa81efc6eb0035813709f2cb6a26b8b hs25-b45
     1.6  6f9be7f87b9653e94fd8fb3070891a0cc91b15bf jdk8-b103
     1.7 +580430d131ccd475e2f2ad4006531b8c4813d102 hs25-b46
     1.8 +104743074675359cfbf7f4dcd9ab2a5974a16627 jdk8-b104
     2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Mon Aug 19 17:47:21 2013 +0200
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/opto/PhaseCFG.java	Fri Aug 23 22:12:18 2013 +0100
     2.3 @@ -44,7 +44,7 @@
     2.4      Type type      = db.lookupType("PhaseCFG");
     2.5      numBlocksField = new CIntField(type.getCIntegerField("_num_blocks"), 0);
     2.6      blocksField = type.getAddressField("_blocks");
     2.7 -    bbsField = type.getAddressField("_bbs");
     2.8 +    bbsField = type.getAddressField("_node_to_block_mapping");
     2.9      brootField = type.getAddressField("_broot");
    2.10    }
    2.11  
     3.1 --- a/make/bsd/makefiles/adlc.make	Mon Aug 19 17:47:21 2013 +0200
     3.2 +++ b/make/bsd/makefiles/adlc.make	Fri Aug 23 22:12:18 2013 +0100
     3.3 @@ -41,13 +41,11 @@
     3.4  
     3.5  ifeq ("${Platform_arch_model}", "${Platform_arch}")
     3.6    SOURCES.AD = \
     3.7 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
     3.8 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
     3.9 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
    3.10  else
    3.11    SOURCES.AD = \
    3.12    $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
    3.13 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
    3.14 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
    3.15 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
    3.16  endif
    3.17  
    3.18  EXEC	= $(OUTDIR)/adlc
     4.1 --- a/make/hotspot_version	Mon Aug 19 17:47:21 2013 +0200
     4.2 +++ b/make/hotspot_version	Fri Aug 23 22:12:18 2013 +0100
     4.3 @@ -35,7 +35,7 @@
     4.4  
     4.5  HS_MAJOR_VER=25
     4.6  HS_MINOR_VER=0
     4.7 -HS_BUILD_NUMBER=45
     4.8 +HS_BUILD_NUMBER=46
     4.9  
    4.10  JDK_MAJOR_VER=1
    4.11  JDK_MINOR_VER=8
     5.1 --- a/make/linux/makefiles/adlc.make	Mon Aug 19 17:47:21 2013 +0200
     5.2 +++ b/make/linux/makefiles/adlc.make	Fri Aug 23 22:12:18 2013 +0100
     5.3 @@ -41,13 +41,11 @@
     5.4  
     5.5  ifeq ("${Platform_arch_model}", "${Platform_arch}")
     5.6    SOURCES.AD = \
     5.7 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
     5.8 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
     5.9 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
    5.10  else
    5.11    SOURCES.AD = \
    5.12    $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
    5.13 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
    5.14 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
    5.15 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
    5.16  endif
    5.17  
    5.18  EXEC	= $(OUTDIR)/adlc
     6.1 --- a/make/solaris/makefiles/adlc.make	Mon Aug 19 17:47:21 2013 +0200
     6.2 +++ b/make/solaris/makefiles/adlc.make	Fri Aug 23 22:12:18 2013 +0100
     6.3 @@ -42,13 +42,11 @@
     6.4  
     6.5  ifeq ("${Platform_arch_model}", "${Platform_arch}")
     6.6    SOURCES.AD = \
     6.7 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
     6.8 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
     6.9 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) 
    6.10  else
    6.11    SOURCES.AD = \
    6.12    $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch_model).ad) \
    6.13 -  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) \
    6.14 -  $(call altsrc-replace,$(HS_COMMON_SRC)/os_cpu/$(OS)_$(ARCH)/vm/$(OS)_$(Platform_arch_model).ad)
    6.15 +  $(call altsrc-replace,$(HS_COMMON_SRC)/cpu/$(ARCH)/vm/$(Platform_arch).ad) 
    6.16  endif
    6.17  
    6.18  EXEC	= $(OUTDIR)/adlc
     7.1 --- a/make/solaris/makefiles/dtrace.make	Mon Aug 19 17:47:21 2013 +0200
     7.2 +++ b/make/solaris/makefiles/dtrace.make	Fri Aug 23 22:12:18 2013 +0100
     7.3 @@ -283,9 +283,9 @@
     7.4  	$(QUIETLY) $(DTRACE_PROG) $(DTRACE_OPTS) -C -I. -G -xlazyload -o $@ -s $(DTRACE).d \
     7.5       $(DTraced_Files) ||\
     7.6    STATUS=$$?;\
     7.7 -	if [ x"$$STATUS" = x"1" -a \
     7.8 -       x`uname -r` = x"5.10" -a \
     7.9 -       x`uname -p` = x"sparc" ]; then\
    7.10 +  if [ x"$$STATUS" = x"1" ]; then \
    7.11 +      if [ x`uname -r` = x"5.10" -a \
    7.12 +           x`uname -p` = x"sparc" ]; then\
    7.13      echo "*****************************************************************";\
    7.14      echo "* If you are building server compiler, and the error message is ";\
    7.15      echo "* \"incorrect ELF machine type...\", you have run into solaris bug ";\
    7.16 @@ -294,6 +294,20 @@
    7.17      echo "* environment variable HOTSPOT_DISABLE_DTRACE_PROBES to disable ";\
    7.18      echo "* dtrace probes for this build.";\
    7.19      echo "*****************************************************************";\
    7.20 +      elif [ x`uname -r` = x"5.10" ]; then\
    7.21 +    echo "*****************************************************************";\
    7.22 +    echo "* If you are seeing 'syntax error near \"umpiconninfo_t\"' on Solaris";\
    7.23 +    echo "* 10, try doing 'cd /usr/lib/dtrace && gzip mpi.d' as root, ";\
    7.24 +    echo "* or set the environment variable HOTSPOT_DISABLE_DTRACE_PROBES";\
    7.25 +    echo "* to disable dtrace probes for this build.";\
    7.26 +    echo "*****************************************************************";\
    7.27 +      else \
    7.28 +    echo "*****************************************************************";\
    7.29 +    echo "* If you cannot fix dtrace build issues, try to ";\
    7.30 +    echo "* set the environment variable HOTSPOT_DISABLE_DTRACE_PROBES";\
    7.31 +    echo "* to disable dtrace probes for this build.";\
    7.32 +    echo "*****************************************************************";\
    7.33 +      fi; \
    7.34    fi;\
    7.35    exit $$STATUS
    7.36    # Since some DTraced_Files are in LIBJVM.o and they are touched by this
     8.1 --- a/make/windows/create.bat	Mon Aug 19 17:47:21 2013 +0200
     8.2 +++ b/make/windows/create.bat	Fri Aug 23 22:12:18 2013 +0100
     8.3 @@ -1,6 +1,6 @@
     8.4  @echo off
     8.5  REM
     8.6 -REM Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
     8.7 +REM Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     8.8  REM DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.9  REM
    8.10  REM This code is free software; you can redistribute it and/or modify it
    8.11 @@ -148,7 +148,7 @@
    8.12  
    8.13  REM This is now safe to do.
    8.14  :copyfiles
    8.15 -for /D %%i in (compiler1, compiler2, tiered, core) do (
    8.16 +for /D %%i in (compiler1, compiler2, tiered ) do (
    8.17  if NOT EXIST %HotSpotBuildSpace%\%%i\generated mkdir %HotSpotBuildSpace%\%%i\generated
    8.18  copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\generated > NUL
    8.19  )
    8.20 @@ -156,7 +156,7 @@
    8.21  REM force regneration of ProjectFile
    8.22  if exist %ProjectFile% del %ProjectFile%
    8.23  
    8.24 -for /D %%i in (compiler1, compiler2, tiered, core) do (
    8.25 +for /D %%i in (compiler1, compiler2, tiered ) do (
    8.26  echo -- %%i --
    8.27  echo # Generated file!                                                        >    %HotSpotBuildSpace%\%%i\local.make
    8.28  echo # Changing a variable below and then deleting %ProjectFile% will cause  >>    %HotSpotBuildSpace%\%%i\local.make
     9.1 --- a/make/windows/create_obj_files.sh	Mon Aug 19 17:47:21 2013 +0200
     9.2 +++ b/make/windows/create_obj_files.sh	Fri Aug 23 22:12:18 2013 +0100
     9.3 @@ -73,19 +73,17 @@
     9.4  
     9.5  BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles"
     9.6  
     9.7 -if [ -d "${ALTSRC}/share/vm/jfr" ]; then
     9.8 -  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
     9.9 +if [ -d "${ALTSRC}/share/vm/jfr/buffers" ]; then
    9.10    BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers"
    9.11  fi
    9.12  
    9.13  BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods"
    9.14  
    9.15 -CORE_PATHS="${BASE_PATHS}"
    9.16  # shared is already in BASE_PATHS. Should add vm/memory but that one is also in BASE_PATHS.
    9.17  if [ -d "${ALTSRC}/share/vm/gc_implementation" ]; then
    9.18 -  CORE_PATHS="${CORE_PATHS} `$FIND ${ALTSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
    9.19 +  BASE_PATHS="${BASE_PATHS} `$FIND ${ALTSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
    9.20  fi
    9.21 -CORE_PATHS="${CORE_PATHS} `$FIND ${COMMONSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
    9.22 +BASE_PATHS="${BASE_PATHS} `$FIND ${COMMONSRC}/share/vm/gc_implementation ! -name gc_implementation -prune -type d \! -name shared`"
    9.23  
    9.24  if [ -d "${ALTSRC}/share/vm/c1" ]; then
    9.25    COMPILER1_PATHS="${ALTSRC}/share/vm/c1"
    9.26 @@ -104,12 +102,11 @@
    9.27  
    9.28  # Include dirs per type.
    9.29  case "${TYPE}" in
    9.30 -    "core")      Src_Dirs="${CORE_PATHS}" ;;
    9.31 -    "compiler1") Src_Dirs="${CORE_PATHS} ${COMPILER1_PATHS}" ;;
    9.32 -    "compiler2") Src_Dirs="${CORE_PATHS} ${COMPILER2_PATHS}" ;;
    9.33 -    "tiered")    Src_Dirs="${CORE_PATHS} ${COMPILER1_PATHS} ${COMPILER2_PATHS}" ;;
    9.34 -    "zero")      Src_Dirs="${CORE_PATHS}" ;;
    9.35 -    "shark")     Src_Dirs="${CORE_PATHS}" ;;
    9.36 +    "compiler1") Src_Dirs="${BASE_PATHS} ${COMPILER1_PATHS}" ;;
    9.37 +    "compiler2") Src_Dirs="${BASE_PATHS} ${COMPILER2_PATHS}" ;;
    9.38 +    "tiered")    Src_Dirs="${BASE_PATHS} ${COMPILER1_PATHS} ${COMPILER2_PATHS}" ;;
    9.39 +    "zero")      Src_Dirs="${BASE_PATHS}" ;;
    9.40 +    "shark")     Src_Dirs="${BASE_PATHS}" ;;
    9.41  esac
    9.42  
    9.43  COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*"
    9.44 @@ -122,7 +119,6 @@
    9.45  
    9.46  # Exclude per type.
    9.47  case "${TYPE}" in
    9.48 -    "core")      Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
    9.49      "compiler1") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER2_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES} ciTypeFlow.cpp" ;;
    9.50      "compiler2") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${COMPILER1_SPECIFIC_FILES} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
    9.51      "tiered")    Src_Files_EXCLUDE="${Src_Files_EXCLUDE} ${ZERO_SPECIFIC_FILES} ${SHARK_SPECIFIC_FILES}" ;;
    9.52 @@ -149,9 +145,17 @@
    9.53     Src_Files="${Src_Files}`findsrc ${e}` "
    9.54  done 
    9.55  
    9.56 -Obj_Files=
    9.57 +Obj_Files=" "
    9.58  for e in ${Src_Files}; do
    9.59 -	Obj_Files="${Obj_Files}${e%\.[!.]*}.obj "
    9.60 +        o="${e%\.[!.]*}.obj"
    9.61 +        set +e
    9.62 +        chk=`expr "${Obj_Files}" : ".* $o"`
    9.63 +        set -e
    9.64 +        if [ "$chk" != 0 ]; then
    9.65 +             echo "# INFO: skipping duplicate $o"
    9.66 +             continue
    9.67 +        fi
    9.68 +	Obj_Files="${Obj_Files}$o "
    9.69  done
    9.70  
    9.71  echo Obj_Files=${Obj_Files}
    10.1 --- a/make/windows/makefiles/adlc.make	Mon Aug 19 17:47:21 2013 +0200
    10.2 +++ b/make/windows/makefiles/adlc.make	Fri Aug 23 22:12:18 2013 +0100
    10.3 @@ -55,13 +55,11 @@
    10.4  
    10.5  !if "$(Platform_arch_model)" == "$(Platform_arch)"
    10.6  SOURCES_AD=\
    10.7 -  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
    10.8 -  $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
    10.9 +  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad 
   10.10  !else
   10.11  SOURCES_AD=\
   10.12    $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch_model).ad \
   10.13 -  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad \
   10.14 -  $(WorkSpace)/src/os_cpu/windows_$(Platform_arch)/vm/windows_$(Platform_arch_model).ad
   10.15 +  $(WorkSpace)/src/cpu/$(Platform_arch)/vm/$(Platform_arch).ad 
   10.16  !endif
   10.17  
   10.18  # NOTE! If you add any files here, you must also update GENERATED_NAMES_IN_DIR
    11.1 --- a/make/windows/makefiles/projectcreator.make	Mon Aug 19 17:47:21 2013 +0200
    11.2 +++ b/make/windows/makefiles/projectcreator.make	Fri Aug 23 22:12:18 2013 +0100
    11.3 @@ -44,10 +44,11 @@
    11.4  
    11.5  # This is only used internally
    11.6  ProjectCreatorIncludesPRIVATE=\
    11.7 -        -relativeInclude src\closed\share\vm \
    11.8 -        -relativeInclude src\closed\os\windows\vm \
    11.9 -        -relativeInclude src\closed\os_cpu\windows_$(Platform_arch)\vm \
   11.10 -        -relativeInclude src\closed\cpu\$(Platform_arch)\vm \
   11.11 +        -relativeAltSrcInclude src\closed \
   11.12 +        -altRelativeInclude share\vm \
   11.13 +        -altRelativeInclude os\windows\vm \
   11.14 +        -altRelativeInclude os_cpu\windows_$(Platform_arch)\vm \
   11.15 +        -altRelativeInclude cpu\$(Platform_arch)\vm \
   11.16          -relativeInclude src\share\vm \
   11.17          -relativeInclude src\share\vm\precompiled \
   11.18          -relativeInclude src\share\vm\prims\wbtestmethods \
   11.19 @@ -91,7 +92,7 @@
   11.20          -disablePch        getThread_windows_$(Platform_arch).cpp \
   11.21          -disablePch_compiler2     opcodes.cpp
   11.22  
   11.23 -# Common options for the IDE builds for core, c1, and c2
   11.24 +# Common options for the IDE builds for c1, and c2
   11.25  ProjectCreatorIDEOptions=\
   11.26          $(ProjectCreatorIDEOptions) \
   11.27          -sourceBase $(HOTSPOTWORKSPACE) \
   11.28 @@ -158,18 +159,10 @@
   11.29   -ignoreFile_TARGET $(Platform_arch_model).ad
   11.30  
   11.31  ##################################################
   11.32 -# Without compiler(core) specific options
   11.33 -##################################################
   11.34 -ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
   11.35 -$(ProjectCreatorIDEOptionsIgnoreCompiler1:TARGET=core) \
   11.36 -$(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=core)
   11.37 -
   11.38 -##################################################
   11.39  # Client(C1) compiler specific options
   11.40  ##################################################
   11.41  ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
   11.42   -define_compiler1 COMPILER1 \
   11.43 - -ignorePath_compiler1 core \
   11.44  $(ProjectCreatorIDEOptionsIgnoreCompiler2:TARGET=compiler1)
   11.45  
   11.46  ##################################################
   11.47 @@ -178,7 +171,6 @@
   11.48  #NOTE! This list must be kept in sync with GENERATED_NAMES in adlc.make.
   11.49  ProjectCreatorIDEOptions=$(ProjectCreatorIDEOptions) \
   11.50   -define_compiler2 COMPILER2 \
   11.51 - -ignorePath_compiler2 core \
   11.52   -additionalFile_compiler2 $(Platform_arch_model).ad \
   11.53   -additionalFile_compiler2 ad_$(Platform_arch_model).cpp \
   11.54   -additionalFile_compiler2 ad_$(Platform_arch_model).hpp \
    12.1 --- a/make/windows/makefiles/trace.make	Mon Aug 19 17:47:21 2013 +0200
    12.2 +++ b/make/windows/makefiles/trace.make	Fri Aug 23 22:12:18 2013 +0100
    12.3 @@ -90,25 +90,25 @@
    12.4  !if "$(OPENJDK)" == "true"
    12.5  
    12.6  $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
    12.7 -	@echo Generating $@
    12.8 +	@echo Generating OpenJDK $@
    12.9  	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
   12.10  
   12.11  !else
   12.12  
   12.13  $(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
   12.14 -	@echo Generating $@
   12.15 +	@echo Generating AltSrc $@
   12.16  	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
   12.17  
   12.18  $(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
   12.19 -	@echo Generating $@
   12.20 +	@echo Generating AltSrc $@
   12.21  	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp
   12.22  
   12.23  $(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
   12.24 -	@echo Generating $@
   12.25 +	@echo Generating AltSrc $@
   12.26  	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
   12.27  
   12.28  $(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
   12.29 -	@echo Generating $@
   12.30 +	@echo Generating AltSrc $@
   12.31  	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
   12.32  
   12.33  !endif
    13.1 --- a/make/windows/makefiles/vm.make	Mon Aug 19 17:47:21 2013 +0200
    13.2 +++ b/make/windows/makefiles/vm.make	Fri Aug 23 22:12:18 2013 +0100
    13.3 @@ -36,10 +36,6 @@
    13.4  CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT"
    13.5  !endif
    13.6  
    13.7 -!if "$(Variant)" == "core"
    13.8 -# No need to define anything, CORE is defined as !COMPILER1 && !COMPILER2
    13.9 -!endif
   13.10 -
   13.11  !if "$(Variant)" == "compiler1"
   13.12  CXX_FLAGS=$(CXX_FLAGS) /D "COMPILER1"
   13.13  !endif
    14.1 --- a/make/windows/projectfiles/common/Makefile	Mon Aug 19 17:47:21 2013 +0200
    14.2 +++ b/make/windows/projectfiles/common/Makefile	Fri Aug 23 22:12:18 2013 +0100
    14.3 @@ -112,6 +112,7 @@
    14.4  ProjectCreatorIDEOptions = $(ProjectCreatorIDEOptions) $(ReleaseOptions)
    14.5  
    14.6  $(HOTSPOTBUILDSPACE)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class
    14.7 +	@if "$(MSC_VER)"=="1500" echo Make sure you have VS2008 SP1 or later, or you may see 'expanded command line too long'
    14.8  	@$(RUN_JAVA) -Djava.class.path="$(HOTSPOTBUILDSPACE)/classes" ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions)
    14.9  
   14.10  clean:
    15.1 --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Mon Aug 19 17:47:21 2013 +0200
    15.2 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Fri Aug 23 22:12:18 2013 +0100
    15.3 @@ -1887,6 +1887,27 @@
    15.4    if (ProfileInterpreter) {
    15.5      __ set_method_data_pointer_for_bcp();
    15.6    }
    15.7 +
    15.8 +#if INCLUDE_JVMTI
    15.9 +  if (EnableInvokeDynamic) {
   15.10 +    Label L_done;
   15.11 +
   15.12 +    __ ldub(Address(Lbcp, 0), G1_scratch);  // Load current bytecode
   15.13 +    __ cmp_and_br_short(G1_scratch, Bytecodes::_invokestatic, Assembler::notEqual, Assembler::pn, L_done);
   15.14 +
   15.15 +    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
   15.16 +    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
   15.17 +
   15.18 +    __ call_VM(G1_scratch, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), I0, Lmethod, Lbcp);
   15.19 +
   15.20 +    __ br_null(G1_scratch, false, Assembler::pn, L_done);
   15.21 +    __ delayed()->nop();
   15.22 +
   15.23 +    __ st_ptr(G1_scratch, Lesp, wordSize);
   15.24 +    __ bind(L_done);
   15.25 +  }
   15.26 +#endif // INCLUDE_JVMTI
   15.27 +
   15.28    // Resume bytecode interpretation at the current bcp
   15.29    __ dispatch_next(vtos);
   15.30    // end of JVMTI PopFrame support
    16.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Mon Aug 19 17:47:21 2013 +0200
    16.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Aug 23 22:12:18 2013 +0100
    16.3 @@ -1920,6 +1920,29 @@
    16.4    __ get_thread(thread);
    16.5    __ movl(Address(thread, JavaThread::popframe_condition_offset()), JavaThread::popframe_inactive);
    16.6  
    16.7 +#if INCLUDE_JVMTI
    16.8 +  if (EnableInvokeDynamic) {
    16.9 +    Label L_done;
   16.10 +    const Register local0 = rdi;
   16.11 +
   16.12 +    __ cmpb(Address(rsi, 0), Bytecodes::_invokestatic);
   16.13 +    __ jcc(Assembler::notEqual, L_done);
   16.14 +
   16.15 +    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
   16.16 +    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
   16.17 +
   16.18 +    __ get_method(rdx);
   16.19 +    __ movptr(rax, Address(local0, 0));
   16.20 +    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, rsi);
   16.21 +
   16.22 +    __ testptr(rax, rax);
   16.23 +    __ jcc(Assembler::zero, L_done);
   16.24 +
   16.25 +    __ movptr(Address(rbx, 0), rax);
   16.26 +    __ bind(L_done);
   16.27 +  }
   16.28 +#endif // INCLUDE_JVMTI
   16.29 +
   16.30    __ dispatch_next(vtos);
   16.31    // end of PopFrame support
   16.32  
    17.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Mon Aug 19 17:47:21 2013 +0200
    17.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Aug 23 22:12:18 2013 +0100
    17.3 @@ -1929,6 +1929,29 @@
    17.4    __ movl(Address(r15_thread, JavaThread::popframe_condition_offset()),
    17.5            JavaThread::popframe_inactive);
    17.6  
    17.7 +#if INCLUDE_JVMTI
    17.8 +  if (EnableInvokeDynamic) {
    17.9 +    Label L_done;
   17.10 +    const Register local0 = r14;
   17.11 +
   17.12 +    __ cmpb(Address(r13, 0), Bytecodes::_invokestatic);
   17.13 +    __ jcc(Assembler::notEqual, L_done);
   17.14 +
   17.15 +    // The member name argument must be restored if _invokestatic is re-executed after a PopFrame call.
   17.16 +    // Detect such a case in the InterpreterRuntime function and return the member name argument, or NULL.
   17.17 +
   17.18 +    __ get_method(rdx);
   17.19 +    __ movptr(rax, Address(local0, 0));
   17.20 +    __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::member_name_arg_or_null), rax, rdx, r13);
   17.21 +
   17.22 +    __ testptr(rax, rax);
   17.23 +    __ jcc(Assembler::zero, L_done);
   17.24 +
   17.25 +    __ movptr(Address(rbx, 0), rax);
   17.26 +    __ bind(L_done);
   17.27 +  }
   17.28 +#endif // INCLUDE_JVMTI
   17.29 +
   17.30    __ dispatch_next(vtos);
   17.31    // end of PopFrame support
   17.32  
    18.1 --- a/src/cpu/zero/vm/entryFrame_zero.hpp	Mon Aug 19 17:47:21 2013 +0200
    18.2 +++ b/src/cpu/zero/vm/entryFrame_zero.hpp	Fri Aug 23 22:12:18 2013 +0100
    18.3 @@ -58,8 +58,8 @@
    18.4                             JavaCallWrapper* call_wrapper,
    18.5                             TRAPS);
    18.6   public:
    18.7 -  JavaCallWrapper *call_wrapper() const {
    18.8 -    return (JavaCallWrapper *) value_of_word(call_wrapper_off);
    18.9 +  JavaCallWrapper **call_wrapper() const {
   18.10 +    return (JavaCallWrapper **) addr_of_word(call_wrapper_off);
   18.11    }
   18.12  
   18.13   public:
    19.1 --- a/src/cpu/zero/vm/frame_zero.inline.hpp	Mon Aug 19 17:47:21 2013 +0200
    19.2 +++ b/src/cpu/zero/vm/frame_zero.inline.hpp	Fri Aug 23 22:12:18 2013 +0100
    19.3 @@ -141,7 +141,7 @@
    19.4    return fp();
    19.5  }
    19.6  
    19.7 -inline JavaCallWrapper* frame::entry_frame_call_wrapper() const {
    19.8 +inline JavaCallWrapper** frame::entry_frame_call_wrapper_addr() const {
    19.9    return zero_entryframe()->call_wrapper();
   19.10  }
   19.11  
    20.1 --- a/src/cpu/zero/vm/stubGenerator_zero.cpp	Mon Aug 19 17:47:21 2013 +0200
    20.2 +++ b/src/cpu/zero/vm/stubGenerator_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    20.3 @@ -176,6 +176,19 @@
    20.4        StubRoutines::_oop_arraycopy;
    20.5    }
    20.6  
    20.7 +  static int SafeFetch32(int *adr, int errValue) {
    20.8 +    int value = errValue;
    20.9 +    value = *adr;
   20.10 +    return value;
   20.11 +  }
   20.12 +
   20.13 +  static intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
   20.14 +    intptr_t value = errValue;
   20.15 +    value = *adr;
   20.16 +    return value;
   20.17 +  }
   20.18 +
   20.19 +
   20.20    void generate_initial() {
   20.21      // Generates all stubs and initializes the entry points
   20.22  
   20.23 @@ -225,6 +238,15 @@
   20.24  
   20.25      // arraycopy stubs used by compilers
   20.26      generate_arraycopy_stubs();
   20.27 +
   20.28 +    // Safefetch stubs.
   20.29 +    StubRoutines::_safefetch32_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetch32);
   20.30 +    StubRoutines::_safefetch32_fault_pc = NULL;
   20.31 +    StubRoutines::_safefetch32_continuation_pc = NULL;
   20.32 +
   20.33 +    StubRoutines::_safefetchN_entry = CAST_FROM_FN_PTR(address, StubGenerator::SafeFetchN);
   20.34 +    StubRoutines::_safefetchN_fault_pc = NULL;
   20.35 +    StubRoutines::_safefetchN_continuation_pc = NULL;
   20.36    }
   20.37  
   20.38   public:
    21.1 --- a/src/os/bsd/vm/attachListener_bsd.cpp	Mon Aug 19 17:47:21 2013 +0200
    21.2 +++ b/src/os/bsd/vm/attachListener_bsd.cpp	Fri Aug 23 22:12:18 2013 +0100
    21.3 @@ -445,14 +445,14 @@
    21.4  
    21.5  void AttachListener::vm_start() {
    21.6    char fn[UNIX_PATH_MAX];
    21.7 -  struct stat64 st;
    21.8 +  struct stat st;
    21.9    int ret;
   21.10  
   21.11    int n = snprintf(fn, UNIX_PATH_MAX, "%s/.java_pid%d",
   21.12             os::get_temp_directory(), os::current_process_id());
   21.13    assert(n < (int)UNIX_PATH_MAX, "java_pid file name buffer overflow");
   21.14  
   21.15 -  RESTARTABLE(::stat64(fn, &st), ret);
   21.16 +  RESTARTABLE(::stat(fn, &st), ret);
   21.17    if (ret == 0) {
   21.18      ret = ::unlink(fn);
   21.19      if (ret == -1) {
    22.1 --- a/src/os/windows/vm/os_windows.cpp	Mon Aug 19 17:47:21 2013 +0200
    22.2 +++ b/src/os/windows/vm/os_windows.cpp	Fri Aug 23 22:12:18 2013 +0100
    22.3 @@ -1642,6 +1642,8 @@
    22.4  
    22.5  void os::win32::print_windows_version(outputStream* st) {
    22.6    OSVERSIONINFOEX osvi;
    22.7 +  SYSTEM_INFO si;
    22.8 +
    22.9    ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
   22.10    osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
   22.11  
   22.12 @@ -1651,6 +1653,18 @@
   22.13    }
   22.14  
   22.15    int os_vers = osvi.dwMajorVersion * 1000 + osvi.dwMinorVersion;
   22.16 +
   22.17 +  ZeroMemory(&si, sizeof(SYSTEM_INFO));
   22.18 +  if (os_vers >= 5002) {
   22.19 +    // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
   22.20 +    // find out whether we are running on 64 bit processor or not.
   22.21 +    if (os::Kernel32Dll::GetNativeSystemInfoAvailable()) {
   22.22 +      os::Kernel32Dll::GetNativeSystemInfo(&si);
   22.23 +    } else {
   22.24 +      GetSystemInfo(&si);
   22.25 +    }
   22.26 +  }
   22.27 +
   22.28    if (osvi.dwPlatformId == VER_PLATFORM_WIN32_NT) {
   22.29      switch (os_vers) {
   22.30      case 3051: st->print(" Windows NT 3.51"); break;
   22.31 @@ -1658,57 +1672,48 @@
   22.32      case 5000: st->print(" Windows 2000"); break;
   22.33      case 5001: st->print(" Windows XP"); break;
   22.34      case 5002:
   22.35 -    case 6000:
   22.36 -    case 6001:
   22.37 -    case 6002: {
   22.38 -      // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could
   22.39 -      // find out whether we are running on 64 bit processor or not.
   22.40 -      SYSTEM_INFO si;
   22.41 -      ZeroMemory(&si, sizeof(SYSTEM_INFO));
   22.42 -        if (!os::Kernel32Dll::GetNativeSystemInfoAvailable()){
   22.43 -          GetSystemInfo(&si);
   22.44 +      if (osvi.wProductType == VER_NT_WORKSTATION &&
   22.45 +          si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
   22.46 +        st->print(" Windows XP x64 Edition");
   22.47        } else {
   22.48 -        os::Kernel32Dll::GetNativeSystemInfo(&si);
   22.49 -      }
   22.50 -      if (os_vers == 5002) {
   22.51 -        if (osvi.wProductType == VER_NT_WORKSTATION &&
   22.52 -            si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
   22.53 -          st->print(" Windows XP x64 Edition");
   22.54 -        else
   22.55 -            st->print(" Windows Server 2003 family");
   22.56 -      } else if (os_vers == 6000) {
   22.57 -        if (osvi.wProductType == VER_NT_WORKSTATION)
   22.58 -            st->print(" Windows Vista");
   22.59 -        else
   22.60 -            st->print(" Windows Server 2008");
   22.61 -        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
   22.62 -            st->print(" , 64 bit");
   22.63 -      } else if (os_vers == 6001) {
   22.64 -        if (osvi.wProductType == VER_NT_WORKSTATION) {
   22.65 -            st->print(" Windows 7");
   22.66 -        } else {
   22.67 -            // Unrecognized windows, print out its major and minor versions
   22.68 -            st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
   22.69 -        }
   22.70 -        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
   22.71 -            st->print(" , 64 bit");
   22.72 -      } else if (os_vers == 6002) {
   22.73 -        if (osvi.wProductType == VER_NT_WORKSTATION) {
   22.74 -            st->print(" Windows 8");
   22.75 -        } else {
   22.76 -            st->print(" Windows Server 2012");
   22.77 -        }
   22.78 -        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
   22.79 -            st->print(" , 64 bit");
   22.80 -      } else { // future os
   22.81 -        // Unrecognized windows, print out its major and minor versions
   22.82 -        st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
   22.83 -        if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64)
   22.84 -            st->print(" , 64 bit");
   22.85 +        st->print(" Windows Server 2003 family");
   22.86        }
   22.87        break;
   22.88 -    }
   22.89 -    default: // future windows, print out its major and minor versions
   22.90 +
   22.91 +    case 6000:
   22.92 +      if (osvi.wProductType == VER_NT_WORKSTATION) {
   22.93 +        st->print(" Windows Vista");
   22.94 +      } else {
   22.95 +        st->print(" Windows Server 2008");
   22.96 +      }
   22.97 +      break;
   22.98 +
   22.99 +    case 6001:
  22.100 +      if (osvi.wProductType == VER_NT_WORKSTATION) {
  22.101 +        st->print(" Windows 7");
  22.102 +      } else {
  22.103 +        st->print(" Windows Server 2008 R2");
  22.104 +      }
  22.105 +      break;
  22.106 +
  22.107 +    case 6002:
  22.108 +      if (osvi.wProductType == VER_NT_WORKSTATION) {
  22.109 +        st->print(" Windows 8");
  22.110 +      } else {
  22.111 +        st->print(" Windows Server 2012");
  22.112 +      }
  22.113 +      break;
  22.114 +
  22.115 +    case 6003:
  22.116 +      if (osvi.wProductType == VER_NT_WORKSTATION) {
  22.117 +        st->print(" Windows 8.1");
  22.118 +      } else {
  22.119 +        st->print(" Windows Server 2012 R2");
  22.120 +      }
  22.121 +      break;
  22.122 +
  22.123 +    default: // future os
  22.124 +      // Unrecognized windows, print out its major and minor versions
  22.125        st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
  22.126      }
  22.127    } else {
  22.128 @@ -1720,6 +1725,11 @@
  22.129        st->print(" Windows %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion);
  22.130      }
  22.131    }
  22.132 +
  22.133 +  if (os_vers >= 6000 && si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) {
  22.134 +    st->print(" , 64 bit");
  22.135 +  }
  22.136 +
  22.137    st->print(" Build %d", osvi.dwBuildNumber);
  22.138    st->print(" %s", osvi.szCSDVersion);           // service pack
  22.139    st->cr();
    23.1 --- a/src/os_cpu/bsd_x86/vm/bsd_x86_32.ad	Mon Aug 19 17:47:21 2013 +0200
    23.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    23.3 @@ -1,26 +0,0 @@
    23.4 -//
    23.5 -// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    23.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    23.7 -//
    23.8 -// This code is free software; you can redistribute it and/or modify it
    23.9 -// under the terms of the GNU General Public License version 2 only, as
   23.10 -// published by the Free Software Foundation.
   23.11 -//
   23.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   23.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   23.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   23.15 -// version 2 for more details (a copy is included in the LICENSE file that
   23.16 -// accompanied this code).
   23.17 -//
   23.18 -// You should have received a copy of the GNU General Public License version
   23.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   23.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   23.21 -//
   23.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   23.23 -// or visit www.oracle.com if you need additional information or have any
   23.24 -// questions.
   23.25 -//
   23.26 -//
   23.27 -
   23.28 -// X86 Bsd Architecture Description File
   23.29 -
    24.1 --- a/src/os_cpu/bsd_x86/vm/bsd_x86_64.ad	Mon Aug 19 17:47:21 2013 +0200
    24.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    24.3 @@ -1,65 +0,0 @@
    24.4 -//
    24.5 -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    24.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    24.7 -//
    24.8 -// This code is free software; you can redistribute it and/or modify it
    24.9 -// under the terms of the GNU General Public License version 2 only, as
   24.10 -// published by the Free Software Foundation.
   24.11 -//
   24.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   24.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   24.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   24.15 -// version 2 for more details (a copy is included in the LICENSE file that
   24.16 -// accompanied this code).
   24.17 -//
   24.18 -// You should have received a copy of the GNU General Public License version
   24.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   24.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   24.21 -//
   24.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   24.23 -// or visit www.oracle.com if you need additional information or have any
   24.24 -// questions.
   24.25 -//
   24.26 -//
   24.27 -
   24.28 -// AMD64 Bsd Architecture Description File
   24.29 -
   24.30 -//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
   24.31 -// This block specifies the encoding classes used by the compiler to
   24.32 -// output byte streams.  Encoding classes generate functions which are
   24.33 -// called by Machine Instruction Nodes in order to generate the bit
   24.34 -// encoding of the instruction.  Operands specify their base encoding
   24.35 -// interface with the interface keyword.  There are currently
   24.36 -// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
   24.37 -// COND_INTER.  REG_INTER causes an operand to generate a function
   24.38 -// which returns its register number when queried.  CONST_INTER causes
   24.39 -// an operand to generate a function which returns the value of the
   24.40 -// constant when queried.  MEMORY_INTER causes an operand to generate
   24.41 -// four functions which return the Base Register, the Index Register,
   24.42 -// the Scale Value, and the Offset Value of the operand when queried.
   24.43 -// COND_INTER causes an operand to generate six functions which return
   24.44 -// the encoding code (ie - encoding bits for the instruction)
   24.45 -// associated with each basic boolean condition for a conditional
   24.46 -// instruction.  Instructions specify two basic values for encoding.
   24.47 -// They use the ins_encode keyword to specify their encoding class
   24.48 -// (which must be one of the class names specified in the encoding
   24.49 -// block), and they use the opcode keyword to specify, in order, their
   24.50 -// primary, secondary, and tertiary opcode.  Only the opcode sections
   24.51 -// which a particular instruction needs for encoding need to be
   24.52 -// specified.
   24.53 -encode %{
   24.54 -  // Build emit functions for each basic byte or larger field in the intel
   24.55 -  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
   24.56 -  // code in the enc_class source block.  Emit functions will live in the
   24.57 -  // main source block for now.  In future, we can generalize this by
   24.58 -  // adding a syntax that specifies the sizes of fields in an order,
   24.59 -  // so that the adlc can build the emit functions automagically
   24.60 -
   24.61 -%}
   24.62 -
   24.63 -
   24.64 -// Platform dependent source
   24.65 -
   24.66 -source %{
   24.67 -
   24.68 -%}
    25.1 --- a/src/os_cpu/linux_x86/vm/linux_x86_32.ad	Mon Aug 19 17:47:21 2013 +0200
    25.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    25.3 @@ -1,26 +0,0 @@
    25.4 -//
    25.5 -// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    25.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    25.7 -//
    25.8 -// This code is free software; you can redistribute it and/or modify it
    25.9 -// under the terms of the GNU General Public License version 2 only, as
   25.10 -// published by the Free Software Foundation.
   25.11 -//
   25.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   25.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   25.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   25.15 -// version 2 for more details (a copy is included in the LICENSE file that
   25.16 -// accompanied this code).
   25.17 -//
   25.18 -// You should have received a copy of the GNU General Public License version
   25.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   25.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   25.21 -//
   25.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   25.23 -// or visit www.oracle.com if you need additional information or have any
   25.24 -// questions.
   25.25 -//
   25.26 -//
   25.27 -
   25.28 -// X86 Linux Architecture Description File
   25.29 -
    26.1 --- a/src/os_cpu/linux_x86/vm/linux_x86_64.ad	Mon Aug 19 17:47:21 2013 +0200
    26.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    26.3 @@ -1,65 +0,0 @@
    26.4 -//
    26.5 -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    26.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    26.7 -//
    26.8 -// This code is free software; you can redistribute it and/or modify it
    26.9 -// under the terms of the GNU General Public License version 2 only, as
   26.10 -// published by the Free Software Foundation.
   26.11 -//
   26.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   26.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   26.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   26.15 -// version 2 for more details (a copy is included in the LICENSE file that
   26.16 -// accompanied this code).
   26.17 -//
   26.18 -// You should have received a copy of the GNU General Public License version
   26.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   26.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   26.21 -//
   26.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   26.23 -// or visit www.oracle.com if you need additional information or have any
   26.24 -// questions.
   26.25 -//
   26.26 -//
   26.27 -
   26.28 -// AMD64 Linux Architecture Description File
   26.29 -
   26.30 -//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
   26.31 -// This block specifies the encoding classes used by the compiler to
   26.32 -// output byte streams.  Encoding classes generate functions which are
   26.33 -// called by Machine Instruction Nodes in order to generate the bit
   26.34 -// encoding of the instruction.  Operands specify their base encoding
   26.35 -// interface with the interface keyword.  There are currently
   26.36 -// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
   26.37 -// COND_INTER.  REG_INTER causes an operand to generate a function
   26.38 -// which returns its register number when queried.  CONST_INTER causes
   26.39 -// an operand to generate a function which returns the value of the
   26.40 -// constant when queried.  MEMORY_INTER causes an operand to generate
   26.41 -// four functions which return the Base Register, the Index Register,
   26.42 -// the Scale Value, and the Offset Value of the operand when queried.
   26.43 -// COND_INTER causes an operand to generate six functions which return
   26.44 -// the encoding code (ie - encoding bits for the instruction)
   26.45 -// associated with each basic boolean condition for a conditional
   26.46 -// instruction.  Instructions specify two basic values for encoding.
   26.47 -// They use the ins_encode keyword to specify their encoding class
   26.48 -// (which must be one of the class names specified in the encoding
   26.49 -// block), and they use the opcode keyword to specify, in order, their
   26.50 -// primary, secondary, and tertiary opcode.  Only the opcode sections
   26.51 -// which a particular instruction needs for encoding need to be
   26.52 -// specified.
   26.53 -encode %{
   26.54 -  // Build emit functions for each basic byte or larger field in the intel
   26.55 -  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
   26.56 -  // code in the enc_class source block.  Emit functions will live in the
   26.57 -  // main source block for now.  In future, we can generalize this by
   26.58 -  // adding a syntax that specifies the sizes of fields in an order,
   26.59 -  // so that the adlc can build the emit functions automagically
   26.60 -
   26.61 -%}
   26.62 -
   26.63 -
   26.64 -// Platform dependent source
   26.65 -
   26.66 -source %{
   26.67 -
   26.68 -%}
    27.1 --- a/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Mon Aug 19 17:47:21 2013 +0200
    27.2 +++ b/src/os_cpu/linux_zero/vm/os_linux_zero.cpp	Fri Aug 23 22:12:18 2013 +0100
    27.3 @@ -410,16 +410,6 @@
    27.4    int SpinPause() {
    27.5    }
    27.6  
    27.7 -  int SafeFetch32(int *adr, int errValue) {
    27.8 -    int value = errValue;
    27.9 -    value = *adr;
   27.10 -    return value;
   27.11 -  }
   27.12 -  intptr_t SafeFetchN(intptr_t *adr, intptr_t errValue) {
   27.13 -    intptr_t value = errValue;
   27.14 -    value = *adr;
   27.15 -    return value;
   27.16 -  }
   27.17  
   27.18    void _Copy_conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) {
   27.19      if (from > to) {
    28.1 --- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.ad	Mon Aug 19 17:47:21 2013 +0200
    28.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.3 @@ -1,27 +0,0 @@
    28.4 -//
    28.5 -// Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved.
    28.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.7 -//
    28.8 -// This code is free software; you can redistribute it and/or modify it
    28.9 -// under the terms of the GNU General Public License version 2 only, as
   28.10 -// published by the Free Software Foundation.
   28.11 -//
   28.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   28.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   28.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   28.15 -// version 2 for more details (a copy is included in the LICENSE file that
   28.16 -// accompanied this code).
   28.17 -//
   28.18 -// You should have received a copy of the GNU General Public License version
   28.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   28.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   28.21 -//
   28.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   28.23 -// or visit www.oracle.com if you need additional information or have any
   28.24 -// questions.
   28.25 -//
   28.26 -
   28.27 -//
   28.28 -//
   28.29 -
   28.30 -// SPARC Solaris Architecture Description File
    29.1 --- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad	Mon Aug 19 17:47:21 2013 +0200
    29.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.3 @@ -1,26 +0,0 @@
    29.4 -//
    29.5 -// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    29.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.7 -//
    29.8 -// This code is free software; you can redistribute it and/or modify it
    29.9 -// under the terms of the GNU General Public License version 2 only, as
   29.10 -// published by the Free Software Foundation.
   29.11 -//
   29.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   29.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   29.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   29.15 -// version 2 for more details (a copy is included in the LICENSE file that
   29.16 -// accompanied this code).
   29.17 -//
   29.18 -// You should have received a copy of the GNU General Public License version
   29.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   29.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   29.21 -//
   29.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   29.23 -// or visit www.oracle.com if you need additional information or have any
   29.24 -// questions.
   29.25 -//
   29.26 -//
   29.27 -
   29.28 -// X86 Solaris Architecture Description File
   29.29 -
    30.1 --- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.ad	Mon Aug 19 17:47:21 2013 +0200
    30.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.3 @@ -1,63 +0,0 @@
    30.4 -//
    30.5 -// Copyright (c) 2004, 2012, Oracle and/or its affiliates. All rights reserved.
    30.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    30.7 -//
    30.8 -// This code is free software; you can redistribute it and/or modify it
    30.9 -// under the terms of the GNU General Public License version 2 only, as
   30.10 -// published by the Free Software Foundation.
   30.11 -//
   30.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   30.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   30.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   30.15 -// version 2 for more details (a copy is included in the LICENSE file that
   30.16 -// accompanied this code).
   30.17 -//
   30.18 -// You should have received a copy of the GNU General Public License version
   30.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   30.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   30.21 -//
   30.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   30.23 -// or visit www.oracle.com if you need additional information or have any
   30.24 -// questions.
   30.25 -//
   30.26 -//
   30.27 -
   30.28 -// AMD64 Solaris Architecture Description File
   30.29 -
   30.30 -//----------OS-DEPENDENT ENCODING BLOCK----------------------------------------
   30.31 -// This block specifies the encoding classes used by the compiler to
   30.32 -// output byte streams.  Encoding classes generate functions which are
   30.33 -// called by Machine Instruction Nodes in order to generate the bit
   30.34 -// encoding of the instruction.  Operands specify their base encoding
   30.35 -// interface with the interface keyword.  There are currently
   30.36 -// supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, &
   30.37 -// COND_INTER.  REG_INTER causes an operand to generate a function
   30.38 -// which returns its register number when queried.  CONST_INTER causes
   30.39 -// an operand to generate a function which returns the value of the
   30.40 -// constant when queried.  MEMORY_INTER causes an operand to generate
   30.41 -// four functions which return the Base Register, the Index Register,
   30.42 -// the Scale Value, and the Offset Value of the operand when queried.
   30.43 -// COND_INTER causes an operand to generate six functions which return
   30.44 -// the encoding code (ie - encoding bits for the instruction)
   30.45 -// associated with each basic boolean condition for a conditional
   30.46 -// instruction.  Instructions specify two basic values for encoding.
   30.47 -// They use the ins_encode keyword to specify their encoding class
   30.48 -// (which must be one of the class names specified in the encoding
   30.49 -// block), and they use the opcode keyword to specify, in order, their
   30.50 -// primary, secondary, and tertiary opcode.  Only the opcode sections
   30.51 -// which a particular instruction needs for encoding need to be
   30.52 -// specified.
   30.53 -encode %{
   30.54 -  // Build emit functions for each basic byte or larger field in the intel
   30.55 -  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
   30.56 -  // code in the enc_class source block.  Emit functions will live in the
   30.57 -  // main source block for now.  In future, we can generalize this by
   30.58 -  // adding a syntax that specifies the sizes of fields in an order,
   30.59 -  // so that the adlc can build the emit functions automagically
   30.60 -%}
   30.61 -
   30.62 -
   30.63 -// Platform dependent source
   30.64 -
   30.65 -source %{
   30.66 -%}
    31.1 --- a/src/os_cpu/windows_x86/vm/windows_x86_32.ad	Mon Aug 19 17:47:21 2013 +0200
    31.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    31.3 @@ -1,26 +0,0 @@
    31.4 -//
    31.5 -// Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    31.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    31.7 -//
    31.8 -// This code is free software; you can redistribute it and/or modify it
    31.9 -// under the terms of the GNU General Public License version 2 only, as
   31.10 -// published by the Free Software Foundation.
   31.11 -//
   31.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   31.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   31.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   31.15 -// version 2 for more details (a copy is included in the LICENSE file that
   31.16 -// accompanied this code).
   31.17 -//
   31.18 -// You should have received a copy of the GNU General Public License version
   31.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   31.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   31.21 -//
   31.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   31.23 -// or visit www.oracle.com if you need additional information or have any
   31.24 -// questions.
   31.25 -//
   31.26 -//
   31.27 -
   31.28 -// X86 Win32 Architecture Description File
   31.29 -
    32.1 --- a/src/os_cpu/windows_x86/vm/windows_x86_64.ad	Mon Aug 19 17:47:21 2013 +0200
    32.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    32.3 @@ -1,63 +0,0 @@
    32.4 -//
    32.5 -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    32.6 -// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    32.7 -//
    32.8 -// This code is free software; you can redistribute it and/or modify it
    32.9 -// under the terms of the GNU General Public License version 2 only, as
   32.10 -// published by the Free Software Foundation.
   32.11 -//
   32.12 -// This code is distributed in the hope that it will be useful, but WITHOUT
   32.13 -// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   32.14 -// FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   32.15 -// version 2 for more details (a copy is included in the LICENSE file that
   32.16 -// accompanied this code).
   32.17 -//
   32.18 -// You should have received a copy of the GNU General Public License version
   32.19 -// 2 along with this work; if not, write to the Free Software Foundation,
   32.20 -// Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   32.21 -//
   32.22 -// Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   32.23 -// or visit www.oracle.com if you need additional information or have any
   32.24 -// questions.
   32.25 -//
   32.26 -//
   32.27 -
   32.28 -// AMD64 Win32 Architecture Description File
   32.29 -
   32.30 -//----------OS-DEPENDENT ENCODING BLOCK-----------------------------------------------------
   32.31 -// This block specifies the encoding classes used by the compiler to output
   32.32 -// byte streams.  Encoding classes generate functions which are called by
   32.33 -// Machine Instruction Nodes in order to generate the bit encoding of the
   32.34 -// instruction.  Operands specify their base encoding interface with the
   32.35 -// interface keyword.  There are currently supported four interfaces,
   32.36 -// REG_INTER, CONST_INTER, MEMORY_INTER, & COND_INTER.  REG_INTER causes an
   32.37 -// operand to generate a function which returns its register number when
   32.38 -// queried.   CONST_INTER causes an operand to generate a function which
   32.39 -// returns the value of the constant when queried.  MEMORY_INTER causes an
   32.40 -// operand to generate four functions which return the Base Register, the
   32.41 -// Index Register, the Scale Value, and the Offset Value of the operand when
   32.42 -// queried.  COND_INTER causes an operand to generate six functions which
   32.43 -// return the encoding code (ie - encoding bits for the instruction)
   32.44 -// associated with each basic boolean condition for a conditional instruction.
   32.45 -// Instructions specify two basic values for encoding.  They use the
   32.46 -// ins_encode keyword to specify their encoding class (which must be one of
   32.47 -// the class names specified in the encoding block), and they use the
   32.48 -// opcode keyword to specify, in order, their primary, secondary, and
   32.49 -// tertiary opcode.  Only the opcode sections which a particular instruction
   32.50 -// needs for encoding need to be specified.
   32.51 -encode %{
   32.52 -  // Build emit functions for each basic byte or larger field in the intel
   32.53 -  // encoding scheme (opcode, rm, sib, immediate), and call them from C++
   32.54 -  // code in the enc_class source block.  Emit functions will live in the
   32.55 -  // main source block for now.  In future, we can generalize this by
   32.56 -  // adding a syntax that specifies the sizes of fields in an order,
   32.57 -  // so that the adlc can build the emit functions automagically
   32.58 -
   32.59 -%}
   32.60 -
   32.61 -
   32.62 -// Platform dependent source
   32.63 -
   32.64 -source %{
   32.65 -
   32.66 -%}
    33.1 --- a/src/share/tools/ProjectCreator/BuildConfig.java	Mon Aug 19 17:47:21 2013 +0200
    33.2 +++ b/src/share/tools/ProjectCreator/BuildConfig.java	Fri Aug 23 22:12:18 2013 +0100
    33.3 @@ -142,6 +142,69 @@
    33.4          return rv;
    33.5      }
    33.6  
    33.7 +    // Returns true if the specified path refers to a relative alternate
    33.8 +    // source file. RelativeAltSrcInclude is usually "src\closed".
    33.9 +    public static boolean matchesRelativeAltSrcInclude(String path) {
   33.10 +        String relativeAltSrcInclude =
   33.11 +            getFieldString(null, "RelativeAltSrcInclude");
   33.12 +        Vector<String> v = getFieldVector(null, "AltRelativeInclude");
   33.13 +        for (String pathPart : v) {
   33.14 +            if (path.contains(relativeAltSrcInclude + Util.sep + pathPart))  {
   33.15 +                return true;
   33.16 +            }
   33.17 +        }
   33.18 +        return false;
   33.19 +    }
   33.20 +
   33.21 +    // Returns the relative alternate source file for the specified path.
   33.22 +    // Null is returned if the specified path does not have a matching
   33.23 +    // alternate source file.
   33.24 +    public static String getMatchingRelativeAltSrcFile(String path) {
   33.25 +        Vector<String> v = getFieldVector(null, "RelativeAltSrcFileList");
   33.26 +        if (v == null) {
   33.27 +            return null;
   33.28 +        }
   33.29 +        for (String pathPart : v) {
   33.30 +            if (path.endsWith(pathPart)) {
   33.31 +                String relativeAltSrcInclude =
   33.32 +                    getFieldString(null, "RelativeAltSrcInclude");
   33.33 +                return relativeAltSrcInclude + Util.sep + pathPart;
   33.34 +            }
   33.35 +        }
   33.36 +        return null;
   33.37 +    }
   33.38 +
   33.39 +    // Returns true if the specified path has a matching alternate
   33.40 +    // source file.
   33.41 +    public static boolean matchesRelativeAltSrcFile(String path) {
   33.42 +        return getMatchingRelativeAltSrcFile(path) != null;
   33.43 +    }
   33.44 +
   33.45 +    // Track the specified alternate source file. The source file is
   33.46 +    // tracked without the leading .*<sep><RelativeAltSrcFileList><sep>
   33.47 +    // part to make matching regular source files easier.
   33.48 +    public static void trackRelativeAltSrcFile(String path) {
   33.49 +        String pattern = getFieldString(null, "RelativeAltSrcInclude") +
   33.50 +            Util.sep;
   33.51 +        int altSrcInd = path.indexOf(pattern);
   33.52 +        if (altSrcInd == -1) {
   33.53 +            // not an AltSrc path
   33.54 +            return;
   33.55 +        }
   33.56 +
   33.57 +        altSrcInd += pattern.length();
   33.58 +        if (altSrcInd >= path.length()) {
   33.59 +            // not a valid AltSrc path
   33.60 +            return;
   33.61 +        }
   33.62 +
   33.63 +        String altSrcFile = path.substring(altSrcInd);
   33.64 +        Vector v = getFieldVector(null, "RelativeAltSrcFileList");
   33.65 +        if (v == null || !v.contains(altSrcFile)) {
   33.66 +            addFieldVector(null, "RelativeAltSrcFileList", altSrcFile);
   33.67 +        }
   33.68 +    }
   33.69 +
   33.70      void addTo(Hashtable ht, String key, String value) {
   33.71          ht.put(expandFormat(key), expandFormat(value));
   33.72      }
   33.73 @@ -272,8 +335,19 @@
   33.74  
   33.75      private Vector getSourceIncludes() {
   33.76          Vector<String> rv = new Vector<String>();
   33.77 +        String sourceBase = getFieldString(null, "SourceBase");
   33.78 +
   33.79 +        // add relative alternate source include values:
   33.80 +        String relativeAltSrcInclude =
   33.81 +            getFieldString(null, "RelativeAltSrcInclude");
   33.82 +        Vector<String> asri = new Vector<String>();
   33.83 +        collectRelevantVectors(asri, "AltRelativeInclude");
   33.84 +        for (String f : asri) {
   33.85 +            rv.add(sourceBase + Util.sep + relativeAltSrcInclude +
   33.86 +                   Util.sep + f);
   33.87 +        }
   33.88 +
   33.89          Vector<String> ri = new Vector<String>();
   33.90 -        String sourceBase = getFieldString(null, "SourceBase");
   33.91          collectRelevantVectors(ri, "RelativeInclude");
   33.92          for (String f : ri) {
   33.93              rv.add(sourceBase + Util.sep + f);
   33.94 @@ -541,35 +615,6 @@
   33.95      }
   33.96  }
   33.97  
   33.98 -class CoreDebugConfig extends GenericDebugNonKernelConfig {
   33.99 -    String getOptFlag() {
  33.100 -        return getCI().getNoOptFlag();
  33.101 -    }
  33.102 -
  33.103 -    CoreDebugConfig() {
  33.104 -        initNames("core", "debug", "jvm.dll");
  33.105 -        init(getIncludes(), getDefines());
  33.106 -    }
  33.107 -}
  33.108 -
  33.109 -class CoreFastDebugConfig extends GenericDebugNonKernelConfig {
  33.110 -    String getOptFlag() {
  33.111 -        return getCI().getOptFlag();
  33.112 -    }
  33.113 -
  33.114 -    CoreFastDebugConfig() {
  33.115 -        initNames("core", "fastdebug", "jvm.dll");
  33.116 -        init(getIncludes(), getDefines());
  33.117 -    }
  33.118 -}
  33.119 -
  33.120 -class CoreProductConfig extends ProductConfig {
  33.121 -    CoreProductConfig() {
  33.122 -        initNames("core", "product", "jvm.dll");
  33.123 -        init(getIncludes(), getDefines());
  33.124 -    }
  33.125 -}
  33.126 -
  33.127  
  33.128  abstract class CompilerInterface {
  33.129      abstract Vector getBaseCompilerFlags(Vector defines, Vector includes, String outDir);
    34.1 --- a/src/share/tools/ProjectCreator/FileTreeCreator.java	Mon Aug 19 17:47:21 2013 +0200
    34.2 +++ b/src/share/tools/ProjectCreator/FileTreeCreator.java	Fri Aug 23 22:12:18 2013 +0100
    34.3 @@ -12,11 +12,15 @@
    34.4     final int startDirLength;
    34.5     Stack<DirAttributes> attributes = new Stack<DirAttributes>();
    34.6     Vector<BuildConfig> allConfigs;
    34.7 -   WinGammaPlatformVC10 wg;
    34.8 +   WinGammaPlatform wg;
    34.9 +   WinGammaPlatformVC10 wg10;
   34.10  
   34.11 -   public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatformVC10 wg) {
   34.12 +   public FileTreeCreator(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
   34.13        super();
   34.14        this.wg = wg;
   34.15 +      if (wg instanceof WinGammaPlatformVC10) {
   34.16 +          wg10 = (WinGammaPlatformVC10)wg;
   34.17 +      }
   34.18        this.allConfigs = allConfigs;
   34.19        this.startDir = startDir;
   34.20        startDirLength = startDir.toAbsolutePath().toString().length();
    35.1 --- a/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java	Mon Aug 19 17:47:21 2013 +0200
    35.2 +++ b/src/share/tools/ProjectCreator/FileTreeCreatorVC10.java	Fri Aug 23 22:12:18 2013 +0100
    35.3 @@ -1,3 +1,27 @@
    35.4 +/*
    35.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    35.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    35.7 + *
    35.8 + * This code is free software; you can redistribute it and/or modify it
    35.9 + * under the terms of the GNU General Public License version 2 only, as
   35.10 + * published by the Free Software Foundation.
   35.11 + *
   35.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   35.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   35.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   35.15 + * version 2 for more details (a copy is included in the LICENSE file that
   35.16 + * accompanied this code).
   35.17 + *
   35.18 + * You should have received a copy of the GNU General Public License version
   35.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   35.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   35.21 + *
   35.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   35.23 + * or visit www.oracle.com if you need additional information or have any
   35.24 + * questions.
   35.25 + *
   35.26 + */
   35.27 +
   35.28  import static java.nio.file.FileVisitResult.CONTINUE;
   35.29  
   35.30  import java.io.IOException;
   35.31 @@ -21,6 +45,8 @@
   35.32           boolean usePch = false;
   35.33           boolean disablePch = false;
   35.34           boolean useIgnore = false;
   35.35 +         boolean isAltSrc = false;  // only needed as a debugging crumb
   35.36 +         boolean isReplacedByAltSrc = false;
   35.37           String fileName = file.getFileName().toString();
   35.38  
   35.39           // TODO hideFile
   35.40 @@ -30,6 +56,26 @@
   35.41              usePch = true;
   35.42           }
   35.43  
   35.44 +         String fileLoc = vcProjLocation.relativize(file).toString();
   35.45 +
   35.46 +         // isAltSrc and isReplacedByAltSrc applies to all configs for a file
   35.47 +         if (BuildConfig.matchesRelativeAltSrcInclude(
   35.48 +               file.toAbsolutePath().toString())) {
   35.49 +            // current file is an alternate source file so track it
   35.50 +            isAltSrc = true;
   35.51 +            BuildConfig.trackRelativeAltSrcFile(
   35.52 +                file.toAbsolutePath().toString());
   35.53 +         } else if (BuildConfig.matchesRelativeAltSrcFile(
   35.54 +                    file.toAbsolutePath().toString())) {
   35.55 +            // current file is a regular file that matches an alternate
   35.56 +            // source file so yack about replacing the regular file
   35.57 +            isReplacedByAltSrc = true;
   35.58 +            System.out.println("INFO: alternate source file '" +
   35.59 +                               BuildConfig.getMatchingRelativeAltSrcFile(
   35.60 +                                   file.toAbsolutePath().toString()) +
   35.61 +                               "' replaces '" + fileLoc + "'");
   35.62 +         }
   35.63 +
   35.64           for (BuildConfig cfg : allConfigs) {
   35.65              if (cfg.lookupHashFieldInContext("IgnoreFile", fileName) != null) {
   35.66                 useIgnore = true;
   35.67 @@ -57,10 +103,9 @@
   35.68              }
   35.69           }
   35.70  
   35.71 -         String tagName = wg.getFileTagFromSuffix(fileName);
   35.72 -         String fileLoc = vcProjLocation.relativize(file).toString();
   35.73 +         String tagName = wg10.getFileTagFromSuffix(fileName);
   35.74  
   35.75 -         if (!useIgnore && !disablePch && !usePch) {
   35.76 +         if (!useIgnore && !disablePch && !usePch && !isReplacedByAltSrc) {
   35.77              wg.tag(tagName, new String[] { "Include", fileLoc});
   35.78           } else {
   35.79              wg.startTag(
   35.80 @@ -78,12 +123,17 @@
   35.81                 if (disablePch) {
   35.82                    wg.tag("PrecompiledHeader", "Condition", "'$(Configuration)|$(Platform)'=='" + cfg.get("Name") + "'");
   35.83                 }
   35.84 +               if (isReplacedByAltSrc) {
   35.85 +                  wg.tagData("ExcludedFromBuild", "true", "Condition",
   35.86 +                             "'$(Configuration)|$(Platform)'=='" +
   35.87 +                             cfg.get("Name") + "'");
   35.88 +               }
   35.89              }
   35.90              wg.endTag();
   35.91           }
   35.92  
   35.93           String filter = startDir.relativize(file.getParent().toAbsolutePath()).toString();
   35.94 -         wg.addFilterDependency(fileLoc, filter);
   35.95 +         wg10.addFilterDependency(fileLoc, filter);
   35.96  
   35.97           return CONTINUE;
   35.98        }
   35.99 @@ -112,7 +162,7 @@
  35.100           if (!hide) {
  35.101              String name = startDir.relativize(path.toAbsolutePath()).toString();
  35.102              if (!"".equals(name)) {
  35.103 -               wg.addFilter(name);
  35.104 +               wg10.addFilter(name);
  35.105              }
  35.106  
  35.107              attributes.push(newAttr);
  35.108 @@ -137,6 +187,4 @@
  35.109        public void writeFileTree() throws IOException {
  35.110           Files.walkFileTree(this.startDir, this);
  35.111        }
  35.112 -
  35.113 -
  35.114 -   }
  35.115 \ No newline at end of file
  35.116 +}
    36.1 --- a/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java	Mon Aug 19 17:47:21 2013 +0200
    36.2 +++ b/src/share/tools/ProjectCreator/FileTreeCreatorVC7.java	Fri Aug 23 22:12:18 2013 +0100
    36.3 @@ -12,7 +12,7 @@
    36.4  public class FileTreeCreatorVC7 extends FileTreeCreator {
    36.5  
    36.6        public FileTreeCreatorVC7(Path startDir, Vector<BuildConfig> allConfigs, WinGammaPlatform wg) {
    36.7 -         super(startDir, allConfigs, null);
    36.8 +         super(startDir, allConfigs, wg);
    36.9        }
   36.10  
   36.11        @Override
    37.1 --- a/src/share/tools/ProjectCreator/ProjectCreator.java	Mon Aug 19 17:47:21 2013 +0200
    37.2 +++ b/src/share/tools/ProjectCreator/ProjectCreator.java	Fri Aug 23 22:12:18 2013 +0100
    37.3 @@ -1,5 +1,5 @@
    37.4  /*
    37.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    37.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    37.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    37.8   *
    37.9   * This code is free software; you can redistribute it and/or modify it
   37.10 @@ -39,10 +39,15 @@
   37.11              + "jvm.dll; no trailing slash>");
   37.12        System.err.println("  If any of the above are specified, "
   37.13              + "they must all be.");
   37.14 +      System.err.println("  Note: if '-altRelativeInclude' option below is "
   37.15 +            + "used, then the '-relativeAltSrcInclude' option must be used "
   37.16 +            + "to specify the alternate source dir, e.g., 'src\\closed'");
   37.17        System.err.println("  Additional, optional arguments, which can be "
   37.18              + "specified multiple times:");
   37.19        System.err.println("    -absoluteInclude <string containing absolute "
   37.20              + "path to include directory>");
   37.21 +      System.err.println("    -altRelativeInclude <string containing "
   37.22 +            + "alternate include directory relative to -envVar>");
   37.23        System.err.println("    -relativeInclude <string containing include "
   37.24              + "directory relative to -envVar>");
   37.25        System.err.println("    -define <preprocessor flag to be #defined "
    38.1 --- a/src/share/tools/ProjectCreator/WinGammaPlatform.java	Mon Aug 19 17:47:21 2013 +0200
    38.2 +++ b/src/share/tools/ProjectCreator/WinGammaPlatform.java	Fri Aug 23 22:12:18 2013 +0100
    38.3 @@ -1,5 +1,5 @@
    38.4  /*
    38.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    38.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    38.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    38.8   *
    38.9   * This code is free software; you can redistribute it and/or modify it
   38.10 @@ -140,10 +140,17 @@
   38.11                             "already exist>");
   38.12          System.err.println("  If any of the above are specified, "+
   38.13                             "they must all be.");
   38.14 +        System.err.println("  Note: if '-altRelativeInclude' option below " +
   38.15 +                           "is used, then the '-relativeAltSrcInclude' " +
   38.16 +                           "option must be used to specify the alternate " +
   38.17 +                           "source dir, e.g., 'src\\closed'");
   38.18          System.err.println("  Additional, optional arguments, which can be " +
   38.19                             "specified multiple times:");
   38.20          System.err.println("    -absoluteInclude <string containing absolute " +
   38.21                             "path to include directory>");
   38.22 +        System.err.println("    -altRelativeInclude <string containing " +
   38.23 +                           "alternate include directory relative to " +
   38.24 +                           "-sourceBase>");
   38.25          System.err.println("    -relativeInclude <string containing include " +
   38.26                             "directory relative to -sourceBase>");
   38.27          System.err.println("    -define <preprocessor flag to be #defined " +
   38.28 @@ -343,6 +350,12 @@
   38.29                                HsArgHandler.VECTOR
   38.30                                ),
   38.31  
   38.32 +                new HsArgRule("-altRelativeInclude",
   38.33 +                              "AltRelativeInclude",
   38.34 +                              null,
   38.35 +                              HsArgHandler.VECTOR
   38.36 +                              ),
   38.37 +
   38.38                  new HsArgRule("-relativeInclude",
   38.39                                "RelativeInclude",
   38.40                                null,
   38.41 @@ -355,6 +368,12 @@
   38.42                                HsArgHandler.VECTOR
   38.43                                ),
   38.44  
   38.45 +                new HsArgRule("-relativeAltSrcInclude",
   38.46 +                              "RelativeAltSrcInclude",
   38.47 +                              null,
   38.48 +                              HsArgHandler.STRING
   38.49 +                              ),
   38.50 +
   38.51                  new HsArgRule("-relativeSrcInclude",
   38.52                                "RelativeSrcInclude",
   38.53                                null,
   38.54 @@ -560,10 +579,6 @@
   38.55          allConfigs.add(new TieredFastDebugConfig());
   38.56          allConfigs.add(new TieredProductConfig());
   38.57  
   38.58 -        allConfigs.add(new CoreDebugConfig());
   38.59 -        allConfigs.add(new CoreFastDebugConfig());
   38.60 -        allConfigs.add(new CoreProductConfig());
   38.61 -
   38.62          return allConfigs;
   38.63      }
   38.64  
    39.1 --- a/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Mon Aug 19 17:47:21 2013 +0200
    39.2 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC10.java	Fri Aug 23 22:12:18 2013 +0100
    39.3 @@ -1,3 +1,27 @@
    39.4 +/*
    39.5 + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
    39.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    39.7 + *
    39.8 + * This code is free software; you can redistribute it and/or modify it
    39.9 + * under the terms of the GNU General Public License version 2 only, as
   39.10 + * published by the Free Software Foundation.
   39.11 + *
   39.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   39.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   39.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   39.15 + * version 2 for more details (a copy is included in the LICENSE file that
   39.16 + * accompanied this code).
   39.17 + *
   39.18 + * You should have received a copy of the GNU General Public License version
   39.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   39.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   39.21 + *
   39.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   39.23 + * or visit www.oracle.com if you need additional information or have any
   39.24 + * questions.
   39.25 + *
   39.26 + */
   39.27 +
   39.28  import java.io.File;
   39.29  import java.io.FileNotFoundException;
   39.30  import java.io.IOException;
   39.31 @@ -24,7 +48,7 @@
   39.32      public void writeProjectFile(String projectFileName, String projectName,
   39.33              Vector<BuildConfig> allConfigs) throws IOException {
   39.34          System.out.println();
   39.35 -        System.out.print("    Writing .vcxproj file: " + projectFileName);
   39.36 +        System.out.println("    Writing .vcxproj file: " + projectFileName);
   39.37  
   39.38          String projDir = Util.normalize(new File(projectFileName).getParent());
   39.39  
   39.40 @@ -114,7 +138,7 @@
   39.41  
   39.42          endTag();
   39.43          printWriter.close();
   39.44 -        System.out.println("    Done.");
   39.45 +        System.out.println("    Done writing .vcxproj file.");
   39.46  
   39.47          writeFilterFile(projectFileName, projectName, allConfigs, projDir);
   39.48          writeUserFile(projectFileName, allConfigs);
    40.1 --- a/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Mon Aug 19 17:47:21 2013 +0200
    40.2 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java	Fri Aug 23 22:12:18 2013 +0100
    40.3 @@ -139,19 +139,22 @@
    40.4  
    40.5        tagV("Tool", cfg.getV("LinkerFlags"));
    40.6  
    40.7 -      tag("Tool",
    40.8 -            new String[] {
    40.9 -            "Name",
   40.10 -            "VCPostBuildEventTool",
   40.11 -            "Description",
   40.12 -            BuildConfig
   40.13 -            .getFieldString(null, "PostbuildDescription"),
   40.14 -            // Caution: String.replace(String,String) is available
   40.15 -            // from JDK5 onwards only
   40.16 -            "CommandLine",
   40.17 -            cfg.expandFormat(BuildConfig.getFieldString(null,
   40.18 -                  "PostbuildCommand").replace("\t",
   40.19 -                        "&#x0D;&#x0A;")) });
   40.20 +      String postBuildCmd = BuildConfig.getFieldString(null,
   40.21 +            "PostbuildCommand");
   40.22 +      if (postBuildCmd != null) {
   40.23 +         tag("Tool",
   40.24 +               new String[] {
   40.25 +               "Name",
   40.26 +               "VCPostBuildEventTool",
   40.27 +               "Description",
   40.28 +               BuildConfig
   40.29 +               .getFieldString(null, "PostbuildDescription"),
   40.30 +               // Caution: String.replace(String,String) is available
   40.31 +               // from JDK5 onwards only
   40.32 +               "CommandLine",
   40.33 +                   cfg.expandFormat(postBuildCmd.replace("\t",
   40.34 +                           "&#x0D;&#x0A;")) });
   40.35 +      }
   40.36  
   40.37        tag("Tool", new String[] { "Name", "VCPreBuildEventTool" });
   40.38  
    41.1 --- a/src/share/vm/classfile/javaClasses.cpp	Mon Aug 19 17:47:21 2013 +0200
    41.2 +++ b/src/share/vm/classfile/javaClasses.cpp	Fri Aug 23 22:12:18 2013 +0100
    41.3 @@ -2557,6 +2557,26 @@
    41.4    *offset = value;
    41.5  }
    41.6  
    41.7 +// Support for java_lang_invoke_DirectMethodHandle
    41.8 +
    41.9 +int java_lang_invoke_DirectMethodHandle::_member_offset;
   41.10 +
   41.11 +oop java_lang_invoke_DirectMethodHandle::member(oop dmh) {
   41.12 +  oop member_name = NULL;
   41.13 +  bool is_dmh = dmh->is_oop() && java_lang_invoke_DirectMethodHandle::is_instance(dmh);
   41.14 +  assert(is_dmh, "a DirectMethodHandle oop is expected");
   41.15 +  if (is_dmh) {
   41.16 +    member_name = dmh->obj_field(member_offset_in_bytes());
   41.17 +  }
   41.18 +  return member_name;
   41.19 +}
   41.20 +
   41.21 +void java_lang_invoke_DirectMethodHandle::compute_offsets() {
   41.22 +  Klass* klass_oop = SystemDictionary::DirectMethodHandle_klass();
   41.23 +  if (klass_oop != NULL && EnableInvokeDynamic) {
   41.24 +    compute_offset(_member_offset, klass_oop, vmSymbols::member_name(), vmSymbols::java_lang_invoke_MemberName_signature());
   41.25 +  }
   41.26 +}
   41.27  
   41.28  // Support for java_lang_invoke_MethodHandle
   41.29  
   41.30 @@ -3205,6 +3225,7 @@
   41.31    java_lang_ThreadGroup::compute_offsets();
   41.32    if (EnableInvokeDynamic) {
   41.33      java_lang_invoke_MethodHandle::compute_offsets();
   41.34 +    java_lang_invoke_DirectMethodHandle::compute_offsets();
   41.35      java_lang_invoke_MemberName::compute_offsets();
   41.36      java_lang_invoke_LambdaForm::compute_offsets();
   41.37      java_lang_invoke_MethodType::compute_offsets();
    42.1 --- a/src/share/vm/classfile/javaClasses.hpp	Mon Aug 19 17:47:21 2013 +0200
    42.2 +++ b/src/share/vm/classfile/javaClasses.hpp	Fri Aug 23 22:12:18 2013 +0100
    42.3 @@ -976,6 +976,32 @@
    42.4    static int form_offset_in_bytes()             { return _form_offset; }
    42.5  };
    42.6  
    42.7 +// Interface to java.lang.invoke.DirectMethodHandle objects
    42.8 +
    42.9 +class java_lang_invoke_DirectMethodHandle: AllStatic {
   42.10 +  friend class JavaClasses;
   42.11 +
   42.12 + private:
   42.13 +  static int _member_offset;               // the MemberName of this DMH
   42.14 +
   42.15 +  static void compute_offsets();
   42.16 +
   42.17 + public:
   42.18 +  // Accessors
   42.19 +  static oop  member(oop mh);
   42.20 +
   42.21 +  // Testers
   42.22 +  static bool is_subclass(Klass* klass) {
   42.23 +    return klass->is_subclass_of(SystemDictionary::DirectMethodHandle_klass());
   42.24 +  }
   42.25 +  static bool is_instance(oop obj) {
   42.26 +    return obj != NULL && is_subclass(obj->klass());
   42.27 +  }
   42.28 +
   42.29 +  // Accessors for code generation:
   42.30 +  static int member_offset_in_bytes()           { return _member_offset; }
   42.31 +};
   42.32 +
   42.33  // Interface to java.lang.invoke.LambdaForm objects
   42.34  // (These are a private interface for managing adapter code generation.)
   42.35  
    43.1 --- a/src/share/vm/classfile/systemDictionary.hpp	Mon Aug 19 17:47:21 2013 +0200
    43.2 +++ b/src/share/vm/classfile/systemDictionary.hpp	Fri Aug 23 22:12:18 2013 +0100
    43.3 @@ -151,6 +151,7 @@
    43.4    do_klass(reflect_CallerSensitive_klass,               sun_reflect_CallerSensitive,               Opt                 ) \
    43.5                                                                                                                           \
    43.6    /* support for dynamic typing; it's OK if these are NULL in earlier JDKs */                                            \
    43.7 +  do_klass(DirectMethodHandle_klass,                    java_lang_invoke_DirectMethodHandle,       Opt                 ) \
    43.8    do_klass(MethodHandle_klass,                          java_lang_invoke_MethodHandle,             Pre_JSR292          ) \
    43.9    do_klass(MemberName_klass,                            java_lang_invoke_MemberName,               Pre_JSR292          ) \
   43.10    do_klass(MethodHandleNatives_klass,                   java_lang_invoke_MethodHandleNatives,      Pre_JSR292          ) \
    44.1 --- a/src/share/vm/classfile/vmSymbols.hpp	Mon Aug 19 17:47:21 2013 +0200
    44.2 +++ b/src/share/vm/classfile/vmSymbols.hpp	Fri Aug 23 22:12:18 2013 +0100
    44.3 @@ -255,6 +255,7 @@
    44.4    /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */                                   \
    44.5    template(java_lang_invoke_CallSite,                 "java/lang/invoke/CallSite")                \
    44.6    template(java_lang_invoke_ConstantCallSite,         "java/lang/invoke/ConstantCallSite")        \
    44.7 +  template(java_lang_invoke_DirectMethodHandle,       "java/lang/invoke/DirectMethodHandle")      \
    44.8    template(java_lang_invoke_MutableCallSite,          "java/lang/invoke/MutableCallSite")         \
    44.9    template(java_lang_invoke_VolatileCallSite,         "java/lang/invoke/VolatileCallSite")        \
   44.10    template(java_lang_invoke_MethodHandle,             "java/lang/invoke/MethodHandle")            \
   44.11 @@ -352,6 +353,7 @@
   44.12    template(thread_id_name,                            "tid")                                      \
   44.13    template(newInstance0_name,                         "newInstance0")                             \
   44.14    template(limit_name,                                "limit")                                    \
   44.15 +  template(member_name,                               "member")                                   \
   44.16    template(forName_name,                              "forName")                                  \
   44.17    template(forName0_name,                             "forName0")                                 \
   44.18    template(isJavaIdentifierStart_name,                "isJavaIdentifierStart")                    \
    45.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Aug 19 17:47:21 2013 +0200
    45.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Aug 23 22:12:18 2013 +0100
    45.3 @@ -50,6 +50,7 @@
    45.4  #include "memory/genMarkSweep.hpp"
    45.5  #include "memory/genOopClosures.inline.hpp"
    45.6  #include "memory/iterator.hpp"
    45.7 +#include "memory/padded.hpp"
    45.8  #include "memory/referencePolicy.hpp"
    45.9  #include "memory/resourceArea.hpp"
   45.10  #include "memory/tenuredGeneration.hpp"
    46.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Aug 19 17:47:21 2013 +0200
    46.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Aug 23 22:12:18 2013 +0100
    46.3 @@ -927,11 +927,9 @@
    46.4                                     workers->active_workers(),
    46.5                                     Threads::number_of_non_daemon_threads());
    46.6    workers->set_active_workers(active_workers);
    46.7 -  _next_gen = gch->next_gen(this);
    46.8 -  assert(_next_gen != NULL,
    46.9 -    "This must be the youngest gen, and not the only gen");
   46.10    assert(gch->n_gens() == 2,
   46.11           "Par collection currently only works with single older gen.");
   46.12 +  _next_gen = gch->next_gen(this);
   46.13    // Do we have to avoid promotion_undo?
   46.14    if (gch->collector_policy()->is_concurrent_mark_sweep_policy()) {
   46.15      set_avoid_promotion_undo(true);
    47.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Mon Aug 19 17:47:21 2013 +0200
    47.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Aug 23 22:12:18 2013 +0100
    47.3 @@ -1,5 +1,5 @@
    47.4  /*
    47.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    47.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    47.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    47.8   *
    47.9   * This code is free software; you can redistribute it and/or modify it
   47.10 @@ -29,6 +29,7 @@
   47.11  #include "gc_implementation/shared/parGCAllocBuffer.hpp"
   47.12  #include "gc_implementation/shared/copyFailedInfo.hpp"
   47.13  #include "memory/defNewGeneration.hpp"
   47.14 +#include "memory/padded.hpp"
   47.15  #include "utilities/taskqueue.hpp"
   47.16  
   47.17  class ChunkArray;
    48.1 --- a/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Mon Aug 19 17:47:21 2013 +0200
    48.2 +++ b/src/share/vm/gc_implementation/parNew/parOopClosures.hpp	Fri Aug 23 22:12:18 2013 +0100
    48.3 @@ -1,5 +1,5 @@
    48.4  /*
    48.5 - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
    48.6 + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
    48.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    48.8   *
    48.9   * This code is free software; you can redistribute it and/or modify it
   48.10 @@ -26,6 +26,7 @@
   48.11  #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PAROOPCLOSURES_HPP
   48.12  
   48.13  #include "memory/genOopClosures.hpp"
   48.14 +#include "memory/padded.hpp"
   48.15  
   48.16  // Closures for ParNewGeneration
   48.17  
    49.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Mon Aug 19 17:47:21 2013 +0200
    49.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Aug 23 22:12:18 2013 +0100
    49.3 @@ -29,14 +29,16 @@
    49.4  #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
    49.5  #include "gc_implementation/shared/gcTrace.hpp"
    49.6  #include "gc_implementation/shared/mutableSpace.hpp"
    49.7 +#include "memory/allocation.inline.hpp"
    49.8  #include "memory/memRegion.hpp"
    49.9 +#include "memory/padded.inline.hpp"
   49.10  #include "oops/oop.inline.hpp"
   49.11  #include "oops/oop.psgc.inline.hpp"
   49.12  
   49.13 -PSPromotionManager**         PSPromotionManager::_manager_array = NULL;
   49.14 -OopStarTaskQueueSet*         PSPromotionManager::_stack_array_depth = NULL;
   49.15 -PSOldGen*                    PSPromotionManager::_old_gen = NULL;
   49.16 -MutableSpace*                PSPromotionManager::_young_space = NULL;
   49.17 +PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
   49.18 +OopStarTaskQueueSet*           PSPromotionManager::_stack_array_depth = NULL;
   49.19 +PSOldGen*                      PSPromotionManager::_old_gen = NULL;
   49.20 +MutableSpace*                  PSPromotionManager::_young_space = NULL;
   49.21  
   49.22  void PSPromotionManager::initialize() {
   49.23    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   49.24 @@ -45,8 +47,10 @@
   49.25    _old_gen = heap->old_gen();
   49.26    _young_space = heap->young_gen()->to_space();
   49.27  
   49.28 +  // To prevent false sharing, we pad the PSPromotionManagers
   49.29 +  // and make sure that the first instance starts at a cache line.
   49.30    assert(_manager_array == NULL, "Attempt to initialize twice");
   49.31 -  _manager_array = NEW_C_HEAP_ARRAY(PSPromotionManager*, ParallelGCThreads+1, mtGC);
   49.32 +  _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1);
   49.33    guarantee(_manager_array != NULL, "Could not initialize promotion manager");
   49.34  
   49.35    _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
   49.36 @@ -54,26 +58,21 @@
   49.37  
   49.38    // Create and register the PSPromotionManager(s) for the worker threads.
   49.39    for(uint i=0; i<ParallelGCThreads; i++) {
   49.40 -    _manager_array[i] = new PSPromotionManager();
   49.41 -    guarantee(_manager_array[i] != NULL, "Could not create PSPromotionManager");
   49.42 -    stack_array_depth()->register_queue(i, _manager_array[i]->claimed_stack_depth());
   49.43 +    stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
   49.44    }
   49.45 -
   49.46    // The VMThread gets its own PSPromotionManager, which is not available
   49.47    // for work stealing.
   49.48 -  _manager_array[ParallelGCThreads] = new PSPromotionManager();
   49.49 -  guarantee(_manager_array[ParallelGCThreads] != NULL, "Could not create PSPromotionManager");
   49.50  }
   49.51  
   49.52  PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
   49.53    assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
   49.54    assert(_manager_array != NULL, "Sanity");
   49.55 -  return _manager_array[index];
   49.56 +  return &_manager_array[index];
   49.57  }
   49.58  
   49.59  PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
   49.60    assert(_manager_array != NULL, "Sanity");
   49.61 -  return _manager_array[ParallelGCThreads];
   49.62 +  return &_manager_array[ParallelGCThreads];
   49.63  }
   49.64  
   49.65  void PSPromotionManager::pre_scavenge() {
    50.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Mon Aug 19 17:47:21 2013 +0200
    50.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Fri Aug 23 22:12:18 2013 +0100
    50.3 @@ -29,6 +29,8 @@
    50.4  #include "gc_implementation/shared/gcTrace.hpp"
    50.5  #include "gc_implementation/shared/copyFailedInfo.hpp"
    50.6  #include "memory/allocation.hpp"
    50.7 +#include "memory/padded.hpp"
    50.8 +#include "utilities/globalDefinitions.hpp"
    50.9  #include "utilities/taskqueue.hpp"
   50.10  
   50.11  //
   50.12 @@ -51,14 +53,14 @@
   50.13  class PSOldGen;
   50.14  class ParCompactionManager;
   50.15  
   50.16 -class PSPromotionManager : public CHeapObj<mtGC> {
   50.17 +class PSPromotionManager VALUE_OBJ_CLASS_SPEC {
   50.18    friend class PSScavenge;
   50.19    friend class PSRefProcTaskExecutor;
   50.20   private:
   50.21 -  static PSPromotionManager**         _manager_array;
   50.22 -  static OopStarTaskQueueSet*         _stack_array_depth;
   50.23 -  static PSOldGen*                    _old_gen;
   50.24 -  static MutableSpace*                _young_space;
   50.25 +  static PaddedEnd<PSPromotionManager>* _manager_array;
   50.26 +  static OopStarTaskQueueSet*           _stack_array_depth;
   50.27 +  static PSOldGen*                      _old_gen;
   50.28 +  static MutableSpace*                  _young_space;
   50.29  
   50.30  #if TASKQUEUE_STATS
   50.31    size_t                              _masked_pushes;
    51.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Mon Aug 19 17:47:21 2013 +0200
    51.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Aug 23 22:12:18 2013 +0100
    51.3 @@ -32,7 +32,7 @@
    51.4  inline PSPromotionManager* PSPromotionManager::manager_array(int index) {
    51.5    assert(_manager_array != NULL, "access of NULL manager_array");
    51.6    assert(index >= 0 && index <= (int)ParallelGCThreads, "out of range manager_array access");
    51.7 -  return _manager_array[index];
    51.8 +  return &_manager_array[index];
    51.9  }
   51.10  
   51.11  template <class T>
    52.1 --- a/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Mon Aug 19 17:47:21 2013 +0200
    52.2 +++ b/src/share/vm/gc_implementation/shared/objectCountEventSender.cpp	Fri Aug 23 22:12:18 2013 +0100
    52.3 @@ -32,6 +32,7 @@
    52.4  #if INCLUDE_SERVICES
    52.5  
    52.6  void ObjectCountEventSender::send(const KlassInfoEntry* entry, GCId gc_id, jlong timestamp) {
    52.7 +#if INCLUDE_TRACE
    52.8    assert(Tracing::is_event_enabled(EventObjectCountAfterGC::eventId),
    52.9           "Only call this method if the event is enabled");
   52.10  
   52.11 @@ -42,6 +43,7 @@
   52.12    event.set_totalSize(entry->words() * BytesPerWord);
   52.13    event.set_endtime(timestamp);
   52.14    event.commit();
   52.15 +#endif // INCLUDE_TRACE
   52.16  }
   52.17  
   52.18  bool ObjectCountEventSender::should_send_event() {
    53.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp	Mon Aug 19 17:47:21 2013 +0200
    53.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Fri Aug 23 22:12:18 2013 +0100
    53.3 @@ -1209,3 +1209,26 @@
    53.4                         size_of_arguments * Interpreter::stackElementSize);
    53.5  IRT_END
    53.6  #endif
    53.7 +
    53.8 +#if INCLUDE_JVMTI
    53.9 +// This is a support of the JVMTI PopFrame interface.
   53.10 +// Make sure it is an invokestatic of a polymorphic intrinsic that has a member_name argument
   53.11 +// and return it as a vm_result so that it can be reloaded in the list of invokestatic parameters.
   53.12 +// The dmh argument is a reference to a DirectMethoHandle that has a member name field.
   53.13 +IRT_ENTRY(void, InterpreterRuntime::member_name_arg_or_null(JavaThread* thread, address dmh,
   53.14 +                                                            Method* method, address bcp))
   53.15 +  Bytecodes::Code code = Bytecodes::code_at(method, bcp);
   53.16 +  if (code != Bytecodes::_invokestatic) {
   53.17 +    return;
   53.18 +  }
   53.19 +  ConstantPool* cpool = method->constants();
   53.20 +  int cp_index = Bytes::get_native_u2(bcp + 1) + ConstantPool::CPCACHE_INDEX_TAG;
   53.21 +  Symbol* cname = cpool->klass_name_at(cpool->klass_ref_index_at(cp_index));
   53.22 +  Symbol* mname = cpool->name_ref_at(cp_index);
   53.23 +
   53.24 +  if (MethodHandles::has_member_arg(cname, mname)) {
   53.25 +    oop member_name = java_lang_invoke_DirectMethodHandle::member((oop)dmh);
   53.26 +    thread->set_vm_result(member_name);
   53.27 +  }
   53.28 +IRT_END
   53.29 +#endif // INCLUDE_JVMTI
    54.1 --- a/src/share/vm/interpreter/interpreterRuntime.hpp	Mon Aug 19 17:47:21 2013 +0200
    54.2 +++ b/src/share/vm/interpreter/interpreterRuntime.hpp	Fri Aug 23 22:12:18 2013 +0100
    54.3 @@ -95,6 +95,9 @@
    54.4    static void    create_exception(JavaThread* thread, char* name, char* message);
    54.5    static void    create_klass_exception(JavaThread* thread, char* name, oopDesc* obj);
    54.6    static address exception_handler_for_exception(JavaThread* thread, oopDesc* exception);
    54.7 +#if INCLUDE_JVMTI
    54.8 +  static void    member_name_arg_or_null(JavaThread* thread, address dmh, Method* m, address bcp);
    54.9 +#endif
   54.10    static void    throw_pending_exception(JavaThread* thread);
   54.11  
   54.12    // Statics & fields
    55.1 --- a/src/share/vm/memory/cardTableRS.cpp	Mon Aug 19 17:47:21 2013 +0200
    55.2 +++ b/src/share/vm/memory/cardTableRS.cpp	Fri Aug 23 22:12:18 2013 +0100
    55.3 @@ -310,46 +310,31 @@
    55.4    _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm, cl, this);
    55.5  }
    55.6  
    55.7 -void CardTableRS::clear_into_younger(Generation* gen) {
    55.8 -  GenCollectedHeap* gch = GenCollectedHeap::heap();
    55.9 -  // Generations younger than gen have been evacuated. We can clear
   55.10 -  // card table entries for gen (we know that it has no pointers
   55.11 -  // to younger gens) and for those below. The card tables for
   55.12 -  // the youngest gen need never be cleared.
   55.13 +void CardTableRS::clear_into_younger(Generation* old_gen) {
   55.14 +  assert(old_gen->level() == 1, "Should only be called for the old generation");
   55.15 +  // The card tables for the youngest gen need never be cleared.
   55.16    // There's a bit of subtlety in the clear() and invalidate()
   55.17    // methods that we exploit here and in invalidate_or_clear()
   55.18    // below to avoid missing cards at the fringes. If clear() or
   55.19    // invalidate() are changed in the future, this code should
   55.20    // be revisited. 20040107.ysr
   55.21 -  Generation* g = gen;
   55.22 -  for(Generation* prev_gen = gch->prev_gen(g);
   55.23 -      prev_gen != NULL;
   55.24 -      g = prev_gen, prev_gen = gch->prev_gen(g)) {
   55.25 -    MemRegion to_be_cleared_mr = g->prev_used_region();
   55.26 -    clear(to_be_cleared_mr);
   55.27 -  }
   55.28 +  clear(old_gen->prev_used_region());
   55.29  }
   55.30  
   55.31 -void CardTableRS::invalidate_or_clear(Generation* gen, bool younger) {
   55.32 -  GenCollectedHeap* gch = GenCollectedHeap::heap();
   55.33 -  // For each generation gen (and younger)
   55.34 -  // invalidate the cards for the currently occupied part
   55.35 -  // of that generation and clear the cards for the
   55.36 +void CardTableRS::invalidate_or_clear(Generation* old_gen) {
   55.37 +  assert(old_gen->level() == 1, "Should only be called for the old generation");
   55.38 +  // Invalidate the cards for the currently occupied part of
   55.39 +  // the old generation and clear the cards for the
   55.40    // unoccupied part of the generation (if any, making use
   55.41    // of that generation's prev_used_region to determine that
   55.42    // region). No need to do anything for the youngest
   55.43    // generation. Also see note#20040107.ysr above.
   55.44 -  Generation* g = gen;
   55.45 -  for(Generation* prev_gen = gch->prev_gen(g); prev_gen != NULL;
   55.46 -      g = prev_gen, prev_gen = gch->prev_gen(g))  {
   55.47 -    MemRegion used_mr = g->used_region();
   55.48 -    MemRegion to_be_cleared_mr = g->prev_used_region().minus(used_mr);
   55.49 -    if (!to_be_cleared_mr.is_empty()) {
   55.50 -      clear(to_be_cleared_mr);
   55.51 -    }
   55.52 -    invalidate(used_mr);
   55.53 -    if (!younger) break;
   55.54 +  MemRegion used_mr = old_gen->used_region();
   55.55 +  MemRegion to_be_cleared_mr = old_gen->prev_used_region().minus(used_mr);
   55.56 +  if (!to_be_cleared_mr.is_empty()) {
   55.57 +    clear(to_be_cleared_mr);
   55.58    }
   55.59 +  invalidate(used_mr);
   55.60  }
   55.61  
   55.62  
    56.1 --- a/src/share/vm/memory/cardTableRS.hpp	Mon Aug 19 17:47:21 2013 +0200
    56.2 +++ b/src/share/vm/memory/cardTableRS.hpp	Fri Aug 23 22:12:18 2013 +0100
    56.3 @@ -142,12 +142,12 @@
    56.4    void verify_aligned_region_empty(MemRegion mr);
    56.5  
    56.6    void clear(MemRegion mr) { _ct_bs->clear(mr); }
    56.7 -  void clear_into_younger(Generation* gen);
    56.8 +  void clear_into_younger(Generation* old_gen);
    56.9  
   56.10    void invalidate(MemRegion mr, bool whole_heap = false) {
   56.11      _ct_bs->invalidate(mr, whole_heap);
   56.12    }
   56.13 -  void invalidate_or_clear(Generation* gen, bool younger);
   56.14 +  void invalidate_or_clear(Generation* old_gen);
   56.15  
   56.16    static uintx ct_max_alignment_constraint() {
   56.17      return CardTableModRefBS::ct_max_alignment_constraint();
    57.1 --- a/src/share/vm/memory/defNewGeneration.cpp	Mon Aug 19 17:47:21 2013 +0200
    57.2 +++ b/src/share/vm/memory/defNewGeneration.cpp	Fri Aug 23 22:12:18 2013 +0100
    57.3 @@ -567,8 +567,6 @@
    57.4    gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
    57.5  
    57.6    _next_gen = gch->next_gen(this);
    57.7 -  assert(_next_gen != NULL,
    57.8 -    "This must be the youngest gen, and not the only gen");
    57.9  
   57.10    // If the next generation is too full to accommodate promotion
   57.11    // from this generation, pass on collection; let the next generation
   57.12 @@ -901,8 +899,6 @@
   57.13    if (_next_gen == NULL) {
   57.14      GenCollectedHeap* gch = GenCollectedHeap::heap();
   57.15      _next_gen = gch->next_gen(this);
   57.16 -    assert(_next_gen != NULL,
   57.17 -           "This must be the youngest gen, and not the only gen");
   57.18    }
   57.19    return _next_gen->promotion_attempt_is_safe(used());
   57.20  }
    58.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Mon Aug 19 17:47:21 2013 +0200
    58.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Fri Aug 23 22:12:18 2013 +0100
    58.3 @@ -1070,13 +1070,13 @@
    58.4  
    58.5  
    58.6  void GenCollectedHeap::prepare_for_compaction() {
    58.7 -  Generation* scanning_gen = _gens[_n_gens-1];
    58.8 +  guarantee(_n_gens = 2, "Wrong number of generations");
    58.9 +  Generation* old_gen = _gens[1];
   58.10    // Start by compacting into same gen.
   58.11 -  CompactPoint cp(scanning_gen, NULL, NULL);
   58.12 -  while (scanning_gen != NULL) {
   58.13 -    scanning_gen->prepare_for_compaction(&cp);
   58.14 -    scanning_gen = prev_gen(scanning_gen);
   58.15 -  }
   58.16 +  CompactPoint cp(old_gen, NULL, NULL);
   58.17 +  old_gen->prepare_for_compaction(&cp);
   58.18 +  Generation* young_gen = _gens[0];
   58.19 +  young_gen->prepare_for_compaction(&cp);
   58.20  }
   58.21  
   58.22  GCStats* GenCollectedHeap::gc_stats(int level) const {
   58.23 @@ -1245,27 +1245,14 @@
   58.24    generation_iterate(&ep_cl, false);
   58.25  }
   58.26  
   58.27 -oop GenCollectedHeap::handle_failed_promotion(Generation* gen,
   58.28 +oop GenCollectedHeap::handle_failed_promotion(Generation* old_gen,
   58.29                                                oop obj,
   58.30                                                size_t obj_size) {
   58.31 +  guarantee(old_gen->level() == 1, "We only get here with an old generation");
   58.32    assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
   58.33    HeapWord* result = NULL;
   58.34  
   58.35 -  // First give each higher generation a chance to allocate the promoted object.
   58.36 -  Generation* allocator = next_gen(gen);
   58.37 -  if (allocator != NULL) {
   58.38 -    do {
   58.39 -      result = allocator->allocate(obj_size, false);
   58.40 -    } while (result == NULL && (allocator = next_gen(allocator)) != NULL);
   58.41 -  }
   58.42 -
   58.43 -  if (result == NULL) {
   58.44 -    // Then give gen and higher generations a chance to expand and allocate the
   58.45 -    // object.
   58.46 -    do {
   58.47 -      result = gen->expand_and_allocate(obj_size, false);
   58.48 -    } while (result == NULL && (gen = next_gen(gen)) != NULL);
   58.49 -  }
   58.50 +  result = old_gen->expand_and_allocate(obj_size, false);
   58.51  
   58.52    if (result != NULL) {
   58.53      Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
    59.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Mon Aug 19 17:47:21 2013 +0200
    59.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Fri Aug 23 22:12:18 2013 +0100
    59.3 @@ -368,25 +368,23 @@
    59.4    // collection.
    59.5    virtual bool is_maximal_no_gc() const;
    59.6  
    59.7 -  // Return the generation before "gen", or else NULL.
    59.8 +  // Return the generation before "gen".
    59.9    Generation* prev_gen(Generation* gen) const {
   59.10      int l = gen->level();
   59.11 -    if (l == 0) return NULL;
   59.12 -    else return _gens[l-1];
   59.13 +    guarantee(l > 0, "Out of bounds");
   59.14 +    return _gens[l-1];
   59.15    }
   59.16  
   59.17 -  // Return the generation after "gen", or else NULL.
   59.18 +  // Return the generation after "gen".
   59.19    Generation* next_gen(Generation* gen) const {
   59.20      int l = gen->level() + 1;
   59.21 -    if (l == _n_gens) return NULL;
   59.22 -    else return _gens[l];
   59.23 +    guarantee(l < _n_gens, "Out of bounds");
   59.24 +    return _gens[l];
   59.25    }
   59.26  
   59.27    Generation* get_gen(int i) const {
   59.28 -    if (i >= 0 && i < _n_gens)
   59.29 -      return _gens[i];
   59.30 -    else
   59.31 -      return NULL;
   59.32 +    guarantee(i >= 0 && i < _n_gens, "Out of bounds");
   59.33 +    return _gens[i];
   59.34    }
   59.35  
   59.36    int n_gens() const {
   59.37 @@ -485,9 +483,9 @@
   59.38  
   59.39    // Promotion of obj into gen failed.  Try to promote obj to higher
   59.40    // gens in ascending order; return the new location of obj if successful.
   59.41 -  // Otherwise, try expand-and-allocate for obj in each generation starting at
   59.42 -  // gen; return the new location of obj if successful.  Otherwise, return NULL.
   59.43 -  oop handle_failed_promotion(Generation* gen,
   59.44 +  // Otherwise, try expand-and-allocate for obj in both the young and old
   59.45 +  // generation; return the new location of obj if successful.  Otherwise, return NULL.
   59.46 +  oop handle_failed_promotion(Generation* old_gen,
   59.47                                oop obj,
   59.48                                size_t obj_size);
   59.49  
    60.1 --- a/src/share/vm/memory/genMarkSweep.cpp	Mon Aug 19 17:47:21 2013 +0200
    60.2 +++ b/src/share/vm/memory/genMarkSweep.cpp	Fri Aug 23 22:12:18 2013 +0100
    60.3 @@ -52,8 +52,8 @@
    60.4  #include "utilities/copy.hpp"
    60.5  #include "utilities/events.hpp"
    60.6  
    60.7 -void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp,
    60.8 -  bool clear_all_softrefs) {
    60.9 +void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
   60.10 +  guarantee(level == 1, "We always collect both old and young.");
   60.11    assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
   60.12  
   60.13    GenCollectedHeap* gch = GenCollectedHeap::heap();
   60.14 @@ -84,11 +84,6 @@
   60.15    // Capture heap size before collection for printing.
   60.16    size_t gch_prev_used = gch->used();
   60.17  
   60.18 -  // Some of the card table updates below assume that the perm gen is
   60.19 -  // also being collected.
   60.20 -  assert(level == gch->n_gens() - 1,
   60.21 -         "All generations are being collected, ergo perm gen too.");
   60.22 -
   60.23    // Capture used regions for each generation that will be
   60.24    // subject to collection, so that card table adjustments can
   60.25    // be made intelligently (see clear / invalidate further below).
   60.26 @@ -126,17 +121,15 @@
   60.27      all_empty = all_empty && gch->get_gen(i)->used() == 0;
   60.28    }
   60.29    GenRemSet* rs = gch->rem_set();
   60.30 +  Generation* old_gen = gch->get_gen(level);
   60.31    // Clear/invalidate below make use of the "prev_used_regions" saved earlier.
   60.32    if (all_empty) {
   60.33      // We've evacuated all generations below us.
   60.34 -    Generation* g = gch->get_gen(level);
   60.35 -    rs->clear_into_younger(g);
   60.36 +    rs->clear_into_younger(old_gen);
   60.37    } else {
   60.38      // Invalidate the cards corresponding to the currently used
   60.39 -    // region and clear those corresponding to the evacuated region
   60.40 -    // of all generations just collected (i.e. level and younger).
   60.41 -    rs->invalidate_or_clear(gch->get_gen(level),
   60.42 -                            true /* younger */);
   60.43 +    // region and clear those corresponding to the evacuated region.
   60.44 +    rs->invalidate_or_clear(old_gen);
   60.45    }
   60.46  
   60.47    Threads::gc_epilogue();
    61.1 --- a/src/share/vm/memory/genRemSet.hpp	Mon Aug 19 17:47:21 2013 +0200
    61.2 +++ b/src/share/vm/memory/genRemSet.hpp	Fri Aug 23 22:12:18 2013 +0100
    61.3 @@ -135,7 +135,7 @@
    61.4    // younger than gen from generations gen and older.
    61.5    // The parameter clear_perm indicates if the perm_gen's
    61.6    // remembered set should also be processed/cleared.
    61.7 -  virtual void clear_into_younger(Generation* gen) = 0;
    61.8 +  virtual void clear_into_younger(Generation* old_gen) = 0;
    61.9  
   61.10    // Informs the RS that refs in the given "mr" may have changed
   61.11    // arbitrarily, and therefore may contain old-to-young pointers.
   61.12 @@ -146,11 +146,8 @@
   61.13  
   61.14    // Informs the RS that refs in this generation
   61.15    // may have changed arbitrarily, and therefore may contain
   61.16 -  // old-to-young pointers in arbitrary locations. The parameter
   61.17 -  // younger indicates if the same should be done for younger generations
   61.18 -  // as well. The parameter perm indicates if the same should be done for
   61.19 -  // perm gen as well.
   61.20 -  virtual void invalidate_or_clear(Generation* gen, bool younger) = 0;
   61.21 +  // old-to-young pointers in arbitrary locations.
   61.22 +  virtual void invalidate_or_clear(Generation* old_gen) = 0;
   61.23  };
   61.24  
   61.25  #endif // SHARE_VM_MEMORY_GENREMSET_HPP
    62.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    62.2 +++ b/src/share/vm/memory/padded.hpp	Fri Aug 23 22:12:18 2013 +0100
    62.3 @@ -0,0 +1,93 @@
    62.4 +/*
    62.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    62.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    62.7 + *
    62.8 + * This code is free software; you can redistribute it and/or modify it
    62.9 + * under the terms of the GNU General Public License version 2 only, as
   62.10 + * published by the Free Software Foundation.
   62.11 + *
   62.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   62.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   62.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   62.15 + * version 2 for more details (a copy is included in the LICENSE file that
   62.16 + * accompanied this code).
   62.17 + *
   62.18 + * You should have received a copy of the GNU General Public License version
   62.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   62.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   62.21 + *
   62.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   62.23 + * or visit www.oracle.com if you need additional information or have any
   62.24 + * questions.
   62.25 + *
   62.26 + */
   62.27 +
   62.28 +#ifndef SHARE_VM_MEMORY_PADDED_HPP
   62.29 +#define SHARE_VM_MEMORY_PADDED_HPP
   62.30 +
   62.31 +#include "memory/allocation.hpp"
   62.32 +#include "utilities/globalDefinitions.hpp"
   62.33 +
   62.34 +// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
   62.35 +// expected cache line size (a power of two).  The first addend avoids sharing
   62.36 +// when the start address is not a multiple of alignment; the second maintains
   62.37 +// alignment of starting addresses that happen to be a multiple.
   62.38 +#define PADDING_SIZE(type, alignment)                           \
   62.39 +  ((alignment) + align_size_up_(sizeof(type), alignment))
   62.40 +
   62.41 +// Templates to create a subclass padded to avoid cache line sharing.  These are
   62.42 +// effective only when applied to derived-most (leaf) classes.
   62.43 +
   62.44 +// When no args are passed to the base ctor.
   62.45 +template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
   62.46 +class Padded : public T {
   62.47 + private:
   62.48 +  char _pad_buf_[PADDING_SIZE(T, alignment)];
   62.49 +};
   62.50 +
   62.51 +// When either 0 or 1 args may be passed to the base ctor.
   62.52 +template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
   62.53 +class Padded01 : public T {
   62.54 + public:
   62.55 +  Padded01(): T() { }
   62.56 +  Padded01(Arg1T arg1): T(arg1) { }
   62.57 + private:
   62.58 +  char _pad_buf_[PADDING_SIZE(T, alignment)];
   62.59 +};
   62.60 +
   62.61 +// Super class of PaddedEnd when pad_size != 0.
   62.62 +template <class T, size_t pad_size>
   62.63 +class PaddedEndImpl : public T {
   62.64 + private:
   62.65 +  char _pad_buf[pad_size];
   62.66 +};
   62.67 +
   62.68 +// Super class of PaddedEnd when pad_size == 0.
   62.69 +template <class T>
   62.70 +class PaddedEndImpl<T, /*pad_size*/ 0> : public T {
   62.71 +  // No padding.
   62.72 +};
   62.73 +
   62.74 +#define PADDED_END_SIZE(type, alignment) (align_size_up_(sizeof(type), alignment) - sizeof(type))
   62.75 +
   62.76 +// More memory conservative implementation of Padded. The subclass adds the
   62.77 +// minimal amount of padding needed to make the size of the objects be aligned.
   62.78 +// This will help reducing false sharing,
   62.79 +// if the start address is a multiple of alignment.
   62.80 +template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
   62.81 +class PaddedEnd : public PaddedEndImpl<T, PADDED_END_SIZE(T, alignment)> {
   62.82 +  // C++ don't allow zero-length arrays. The padding is put in a
   62.83 +  // super class that is specialized for the pad_size == 0 case.
   62.84 +};
   62.85 +
   62.86 +// Helper class to create an array of PaddedEnd<T> objects. All elements will
   62.87 +// start at a multiple of alignment and the size will be aligned to alignment.
   62.88 +template <class T, MEMFLAGS flags, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
   62.89 +class PaddedArray {
   62.90 + public:
   62.91 +  // Creates an aligned padded array.
   62.92 +  // The memory can't be deleted since the raw memory chunk is not returned.
   62.93 +  static PaddedEnd<T>* create_unfreeable(uint length);
   62.94 +};
   62.95 +
   62.96 +#endif // SHARE_VM_MEMORY_PADDED_HPP
    63.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    63.2 +++ b/src/share/vm/memory/padded.inline.hpp	Fri Aug 23 22:12:18 2013 +0100
    63.3 @@ -0,0 +1,49 @@
    63.4 +/*
    63.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    63.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    63.7 + *
    63.8 + * This code is free software; you can redistribute it and/or modify it
    63.9 + * under the terms of the GNU General Public License version 2 only, as
   63.10 + * published by the Free Software Foundation.
   63.11 + *
   63.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   63.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   63.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   63.15 + * version 2 for more details (a copy is included in the LICENSE file that
   63.16 + * accompanied this code).
   63.17 + *
   63.18 + * You should have received a copy of the GNU General Public License version
   63.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   63.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   63.21 + *
   63.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   63.23 + * or visit www.oracle.com if you need additional information or have any
   63.24 + * questions.
   63.25 + *
   63.26 + */
   63.27 +
   63.28 +#include "memory/allocation.inline.hpp"
   63.29 +#include "memory/padded.hpp"
   63.30 +#include "utilities/debug.hpp"
   63.31 +#include "utilities/globalDefinitions.hpp"
   63.32 +
   63.33 +// Creates an aligned padded array.
   63.34 +// The memory can't be deleted since the raw memory chunk is not returned.
   63.35 +template <class T, MEMFLAGS flags, size_t alignment>
   63.36 +PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) {
   63.37 +  // Check that the PaddedEnd class works as intended.
   63.38 +  STATIC_ASSERT(is_size_aligned_(sizeof(PaddedEnd<T>), alignment));
   63.39 +
   63.40 +  // Allocate a chunk of memory large enough to allow for some alignment.
   63.41 +  void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags);
   63.42 +
   63.43 +  // Make the initial alignment.
   63.44 +  PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_pointer_up(chunk, alignment);
   63.45 +
   63.46 +  // Call the default constructor for each element.
   63.47 +  for (uint i = 0; i < length; i++) {
   63.48 +    ::new (&aligned_padded_array[i]) T();
   63.49 +  }
   63.50 +
   63.51 +  return aligned_padded_array;
   63.52 +}
    64.1 --- a/src/share/vm/memory/universe.cpp	Mon Aug 19 17:47:21 2013 +0200
    64.2 +++ b/src/share/vm/memory/universe.cpp	Fri Aug 23 22:12:18 2013 +0100
    64.3 @@ -105,10 +105,9 @@
    64.4  Array<Klass*>* Universe::_the_array_interfaces_array = NULL;
    64.5  oop Universe::_the_null_string                        = NULL;
    64.6  oop Universe::_the_min_jint_string                   = NULL;
    64.7 -LatestMethodOopCache* Universe::_finalizer_register_cache = NULL;
    64.8 -LatestMethodOopCache* Universe::_loader_addClass_cache    = NULL;
    64.9 -LatestMethodOopCache* Universe::_pd_implies_cache         = NULL;
   64.10 -ActiveMethodOopsCache* Universe::_reflect_invoke_cache    = NULL;
   64.11 +LatestMethodCache* Universe::_finalizer_register_cache = NULL;
   64.12 +LatestMethodCache* Universe::_loader_addClass_cache    = NULL;
   64.13 +LatestMethodCache* Universe::_pd_implies_cache         = NULL;
   64.14  oop Universe::_out_of_memory_error_java_heap          = NULL;
   64.15  oop Universe::_out_of_memory_error_metaspace          = NULL;
   64.16  oop Universe::_out_of_memory_error_class_metaspace    = NULL;
   64.17 @@ -225,7 +224,6 @@
   64.18    f->do_ptr((void**)&_the_empty_klass_array);
   64.19    _finalizer_register_cache->serialize(f);
   64.20    _loader_addClass_cache->serialize(f);
   64.21 -  _reflect_invoke_cache->serialize(f);
   64.22    _pd_implies_cache->serialize(f);
   64.23  }
   64.24  
   64.25 @@ -649,10 +647,9 @@
   64.26  
   64.27    // We have a heap so create the Method* caches before
   64.28    // Metaspace::initialize_shared_spaces() tries to populate them.
   64.29 -  Universe::_finalizer_register_cache = new LatestMethodOopCache();
   64.30 -  Universe::_loader_addClass_cache    = new LatestMethodOopCache();
   64.31 -  Universe::_pd_implies_cache         = new LatestMethodOopCache();
   64.32 -  Universe::_reflect_invoke_cache     = new ActiveMethodOopsCache();
   64.33 +  Universe::_finalizer_register_cache = new LatestMethodCache();
   64.34 +  Universe::_loader_addClass_cache    = new LatestMethodCache();
   64.35 +  Universe::_pd_implies_cache         = new LatestMethodCache();
   64.36  
   64.37    if (UseSharedSpaces) {
   64.38      // Read the data structures supporting the shared spaces (shared
   64.39 @@ -1088,35 +1085,21 @@
   64.40                                    vmSymbols::register_method_name(),
   64.41                                    vmSymbols::register_method_signature());
   64.42    if (m == NULL || !m->is_static()) {
   64.43 -    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
   64.44 -      "java.lang.ref.Finalizer.register", false);
   64.45 +    tty->print_cr("Unable to link/verify Finalizer.register method");
   64.46 +    return false; // initialization failed (cannot throw exception yet)
   64.47    }
   64.48    Universe::_finalizer_register_cache->init(
   64.49 -    SystemDictionary::Finalizer_klass(), m, CHECK_false);
   64.50 -
   64.51 -  // Resolve on first use and initialize class.
   64.52 -  // Note: No race-condition here, since a resolve will always return the same result
   64.53 -
   64.54 -  // Setup method for security checks
   64.55 -  k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_reflect_Method(), true, CHECK_false);
   64.56 -  k_h = instanceKlassHandle(THREAD, k);
   64.57 -  k_h->link_class(CHECK_false);
   64.58 -  m = k_h->find_method(vmSymbols::invoke_name(), vmSymbols::object_object_array_object_signature());
   64.59 -  if (m == NULL || m->is_static()) {
   64.60 -    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
   64.61 -      "java.lang.reflect.Method.invoke", false);
   64.62 -  }
   64.63 -  Universe::_reflect_invoke_cache->init(k_h(), m, CHECK_false);
   64.64 +    SystemDictionary::Finalizer_klass(), m);
   64.65  
   64.66    // Setup method for registering loaded classes in class loader vector
   64.67    InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->link_class(CHECK_false);
   64.68    m = InstanceKlass::cast(SystemDictionary::ClassLoader_klass())->find_method(vmSymbols::addClass_name(), vmSymbols::class_void_signature());
   64.69    if (m == NULL || m->is_static()) {
   64.70 -    THROW_MSG_(vmSymbols::java_lang_NoSuchMethodException(),
   64.71 -      "java.lang.ClassLoader.addClass", false);
   64.72 +    tty->print_cr("Unable to link/verify ClassLoader.addClass method");
   64.73 +    return false; // initialization failed (cannot throw exception yet)
   64.74    }
   64.75    Universe::_loader_addClass_cache->init(
   64.76 -    SystemDictionary::ClassLoader_klass(), m, CHECK_false);
   64.77 +    SystemDictionary::ClassLoader_klass(), m);
   64.78  
   64.79    // Setup method for checking protection domain
   64.80    InstanceKlass::cast(SystemDictionary::ProtectionDomain_klass())->link_class(CHECK_false);
   64.81 @@ -1132,7 +1115,7 @@
   64.82        return false; // initialization failed
   64.83      }
   64.84      Universe::_pd_implies_cache->init(
   64.85 -      SystemDictionary::ProtectionDomain_klass(), m, CHECK_false);;
   64.86 +      SystemDictionary::ProtectionDomain_klass(), m);;
   64.87    }
   64.88  
   64.89    // The folowing is initializing converter functions for serialization in
   64.90 @@ -1455,7 +1438,7 @@
   64.91  }
   64.92  
   64.93  
   64.94 -void CommonMethodOopCache::init(Klass* k, Method* m, TRAPS) {
   64.95 +void LatestMethodCache::init(Klass* k, Method* m) {
   64.96    if (!UseSharedSpaces) {
   64.97      _klass = k;
   64.98    }
   64.99 @@ -1471,88 +1454,7 @@
  64.100  }
  64.101  
  64.102  
  64.103 -ActiveMethodOopsCache::~ActiveMethodOopsCache() {
  64.104 -  if (_prev_methods != NULL) {
  64.105 -    delete _prev_methods;
  64.106 -    _prev_methods = NULL;
  64.107 -  }
  64.108 -}
  64.109 -
  64.110 -
  64.111 -void ActiveMethodOopsCache::add_previous_version(Method* method) {
  64.112 -  assert(Thread::current()->is_VM_thread(),
  64.113 -    "only VMThread can add previous versions");
  64.114 -
  64.115 -  // Only append the previous method if it is executing on the stack.
  64.116 -  if (method->on_stack()) {
  64.117 -
  64.118 -    if (_prev_methods == NULL) {
  64.119 -      // This is the first previous version so make some space.
  64.120 -      // Start with 2 elements under the assumption that the class
  64.121 -      // won't be redefined much.
  64.122 -      _prev_methods = new (ResourceObj::C_HEAP, mtClass) GrowableArray<Method*>(2, true);
  64.123 -    }
  64.124 -
  64.125 -    // RC_TRACE macro has an embedded ResourceMark
  64.126 -    RC_TRACE(0x00000100,
  64.127 -      ("add: %s(%s): adding prev version ref for cached method @%d",
  64.128 -        method->name()->as_C_string(), method->signature()->as_C_string(),
  64.129 -        _prev_methods->length()));
  64.130 -
  64.131 -    _prev_methods->append(method);
  64.132 -  }
  64.133 -
  64.134 -
  64.135 -  // Since the caller is the VMThread and we are at a safepoint, this is a good
  64.136 -  // time to clear out unused method references.
  64.137 -
  64.138 -  if (_prev_methods == NULL) return;
  64.139 -
  64.140 -  for (int i = _prev_methods->length() - 1; i >= 0; i--) {
  64.141 -    Method* method = _prev_methods->at(i);
  64.142 -    assert(method != NULL, "weak method ref was unexpectedly cleared");
  64.143 -
  64.144 -    if (!method->on_stack()) {
  64.145 -      // This method isn't running anymore so remove it
  64.146 -      _prev_methods->remove_at(i);
  64.147 -      MetadataFactory::free_metadata(method->method_holder()->class_loader_data(), method);
  64.148 -    } else {
  64.149 -      // RC_TRACE macro has an embedded ResourceMark
  64.150 -      RC_TRACE(0x00000400,
  64.151 -        ("add: %s(%s): previous cached method @%d is alive",
  64.152 -         method->name()->as_C_string(), method->signature()->as_C_string(), i));
  64.153 -    }
  64.154 -  }
  64.155 -} // end add_previous_version()
  64.156 -
  64.157 -
  64.158 -bool ActiveMethodOopsCache::is_same_method(const Method* method) const {
  64.159 -  InstanceKlass* ik = InstanceKlass::cast(klass());
  64.160 -  const Method* check_method = ik->method_with_idnum(method_idnum());
  64.161 -  assert(check_method != NULL, "sanity check");
  64.162 -  if (check_method == method) {
  64.163 -    // done with the easy case
  64.164 -    return true;
  64.165 -  }
  64.166 -
  64.167 -  if (_prev_methods != NULL) {
  64.168 -    // The cached method has been redefined at least once so search
  64.169 -    // the previous versions for a match.
  64.170 -    for (int i = 0; i < _prev_methods->length(); i++) {
  64.171 -      check_method = _prev_methods->at(i);
  64.172 -      if (check_method == method) {
  64.173 -        // a previous version matches
  64.174 -        return true;
  64.175 -      }
  64.176 -    }
  64.177 -  }
  64.178 -
  64.179 -  // either no previous versions or no previous version matched
  64.180 -  return false;
  64.181 -}
  64.182 -
  64.183 -
  64.184 -Method* LatestMethodOopCache::get_Method() {
  64.185 +Method* LatestMethodCache::get_method() {
  64.186    if (klass() == NULL) return NULL;
  64.187    InstanceKlass* ik = InstanceKlass::cast(klass());
  64.188    Method* m = ik->method_with_idnum(method_idnum());
    65.1 --- a/src/share/vm/memory/universe.hpp	Mon Aug 19 17:47:21 2013 +0200
    65.2 +++ b/src/share/vm/memory/universe.hpp	Fri Aug 23 22:12:18 2013 +0100
    65.3 @@ -41,10 +41,11 @@
    65.4  class DeferredObjAllocEvent;
    65.5  
    65.6  
    65.7 -// Common parts of a Method* cache. This cache safely interacts with
    65.8 -// the RedefineClasses API.
    65.9 -//
   65.10 -class CommonMethodOopCache : public CHeapObj<mtClass> {
   65.11 +// A helper class for caching a Method* when the user of the cache
   65.12 +// only cares about the latest version of the Method*.  This cache safely
   65.13 +// interacts with the RedefineClasses API.
   65.14 +
   65.15 +class LatestMethodCache : public CHeapObj<mtClass> {
   65.16    // We save the Klass* and the idnum of Method* in order to get
   65.17    // the current cached Method*.
   65.18   private:
   65.19 @@ -52,12 +53,14 @@
   65.20    int                   _method_idnum;
   65.21  
   65.22   public:
   65.23 -  CommonMethodOopCache()   { _klass = NULL; _method_idnum = -1; }
   65.24 -  ~CommonMethodOopCache()  { _klass = NULL; _method_idnum = -1; }
   65.25 +  LatestMethodCache()   { _klass = NULL; _method_idnum = -1; }
   65.26 +  ~LatestMethodCache()  { _klass = NULL; _method_idnum = -1; }
   65.27  
   65.28 -  void     init(Klass* k, Method* m, TRAPS);
   65.29 -  Klass* klass() const         { return _klass; }
   65.30 -  int      method_idnum() const  { return _method_idnum; }
   65.31 +  void   init(Klass* k, Method* m);
   65.32 +  Klass* klass() const           { return _klass; }
   65.33 +  int    method_idnum() const    { return _method_idnum; }
   65.34 +
   65.35 +  Method* get_method();
   65.36  
   65.37    // Enhanced Class Redefinition support
   65.38    void classes_do(void f(Klass*)) {
   65.39 @@ -72,39 +75,6 @@
   65.40  };
   65.41  
   65.42  
   65.43 -// A helper class for caching a Method* when the user of the cache
   65.44 -// cares about all versions of the Method*.
   65.45 -//
   65.46 -class ActiveMethodOopsCache : public CommonMethodOopCache {
   65.47 -  // This subclass adds weak references to older versions of the
   65.48 -  // Method* and a query method for a Method*.
   65.49 -
   65.50 - private:
   65.51 -  // If the cached Method* has not been redefined, then
   65.52 -  // _prev_methods will be NULL. If all of the previous
   65.53 -  // versions of the method have been collected, then
   65.54 -  // _prev_methods can have a length of zero.
   65.55 -  GrowableArray<Method*>* _prev_methods;
   65.56 -
   65.57 - public:
   65.58 -  ActiveMethodOopsCache()   { _prev_methods = NULL; }
   65.59 -  ~ActiveMethodOopsCache();
   65.60 -
   65.61 -  void add_previous_version(Method* method);
   65.62 -  bool is_same_method(const Method* method) const;
   65.63 -};
   65.64 -
   65.65 -
   65.66 -// A helper class for caching a Method* when the user of the cache
   65.67 -// only cares about the latest version of the Method*.
   65.68 -//
   65.69 -class LatestMethodOopCache : public CommonMethodOopCache {
   65.70 -  // This subclass adds a getter method for the latest Method*.
   65.71 -
   65.72 - public:
   65.73 -  Method* get_Method();
   65.74 -};
   65.75 -
   65.76  // For UseCompressedOops and UseCompressedKlassPointers.
   65.77  struct NarrowPtrStruct {
   65.78    // Base address for oop/klass-within-java-object materialization.
   65.79 @@ -174,10 +144,10 @@
   65.80    static objArrayOop  _the_empty_class_klass_array;   // Canonicalized obj array of type java.lang.Class
   65.81    static oop          _the_null_string;               // A cache of "null" as a Java string
   65.82    static oop          _the_min_jint_string;          // A cache of "-2147483648" as a Java string
   65.83 -  static LatestMethodOopCache* _finalizer_register_cache; // static method for registering finalizable objects
   65.84 -  static LatestMethodOopCache* _loader_addClass_cache;    // method for registering loaded classes in class loader vector
   65.85 -  static LatestMethodOopCache* _pd_implies_cache;         // method for checking protection domain attributes
   65.86 -  static ActiveMethodOopsCache* _reflect_invoke_cache;    // method for security checks
   65.87 +  static LatestMethodCache* _finalizer_register_cache; // static method for registering finalizable objects
   65.88 +  static LatestMethodCache* _loader_addClass_cache;    // method for registering loaded classes in class loader vector
   65.89 +  static LatestMethodCache* _pd_implies_cache;         // method for checking protection domain attributes
   65.90 +
   65.91    // preallocated error objects (no backtrace)
   65.92    static oop          _out_of_memory_error_java_heap;
   65.93    static oop          _out_of_memory_error_metaspace;
   65.94 @@ -334,11 +304,11 @@
   65.95    static Array<Klass*>* the_array_interfaces_array() { return _the_array_interfaces_array;   }
   65.96    static oop          the_null_string()               { return _the_null_string;               }
   65.97    static oop          the_min_jint_string()          { return _the_min_jint_string;          }
   65.98 -  static Method*      finalizer_register_method()     { return _finalizer_register_cache->get_Method(); }
   65.99 -  static Method*      loader_addClass_method()        { return _loader_addClass_cache->get_Method(); }
  65.100  
  65.101 -  static Method*      protection_domain_implies_method() { return _pd_implies_cache->get_Method(); }
  65.102 -  static ActiveMethodOopsCache* reflect_invoke_cache() { return _reflect_invoke_cache; }
  65.103 +  static Method*      finalizer_register_method()     { return _finalizer_register_cache->get_method(); }
  65.104 +  static Method*      loader_addClass_method()        { return _loader_addClass_cache->get_method(); }
  65.105 +
  65.106 +  static Method*      protection_domain_implies_method() { return _pd_implies_cache->get_method(); }
  65.107  
  65.108    static oop          null_ptr_exception_instance()   { return _null_ptr_exception_instance;   }
  65.109    static oop          arithmetic_exception_instance() { return _arithmetic_exception_instance; }
    66.1 --- a/src/share/vm/oops/method.cpp	Mon Aug 19 17:47:21 2013 +0200
    66.2 +++ b/src/share/vm/oops/method.cpp	Fri Aug 23 22:12:18 2013 +0100
    66.3 @@ -981,7 +981,6 @@
    66.4  bool Method::is_ignored_by_security_stack_walk() const {
    66.5    const bool use_new_reflection = JDK_Version::is_gte_jdk14x_version() && UseNewReflection;
    66.6  
    66.7 -  assert(intrinsic_id() != vmIntrinsics::_invoke || Universe::reflect_invoke_cache()->is_same_method((Method*)this), "sanity");
    66.8    if (intrinsic_id() == vmIntrinsics::_invoke) {
    66.9      // This is Method.invoke() -- ignore it
   66.10      return true;
    67.1 --- a/src/share/vm/opto/block.cpp	Mon Aug 19 17:47:21 2013 +0200
    67.2 +++ b/src/share/vm/opto/block.cpp	Fri Aug 23 22:12:18 2013 +0100
    67.3 @@ -221,7 +221,7 @@
    67.4  //------------------------------is_uncommon------------------------------------
    67.5  // True if block is low enough frequency or guarded by a test which
    67.6  // mostly does not go here.
    67.7 -bool Block::is_uncommon( Block_Array &bbs ) const {
    67.8 +bool Block::is_uncommon(PhaseCFG* cfg) const {
    67.9    // Initial blocks must never be moved, so are never uncommon.
   67.10    if (head()->is_Root() || head()->is_Start())  return false;
   67.11  
   67.12 @@ -238,7 +238,7 @@
   67.13    uint uncommon_for_freq_preds = 0;
   67.14  
   67.15    for( uint i=1; i<num_preds(); i++ ) {
   67.16 -    Block* guard = bbs[pred(i)->_idx];
   67.17 +    Block* guard = cfg->get_block_for_node(pred(i));
   67.18      // Check to see if this block follows its guard 1 time out of 10000
   67.19      // or less.
   67.20      //
   67.21 @@ -285,11 +285,11 @@
   67.22    }
   67.23  }
   67.24  
   67.25 -void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const {
   67.26 +void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
   67.27    if (is_connector()) {
   67.28      for (uint i=1; i<num_preds(); i++) {
   67.29 -      Block *p = ((*bbs)[pred(i)->_idx]);
   67.30 -      p->dump_pred(bbs, orig, st);
   67.31 +      Block *p = cfg->get_block_for_node(pred(i));
   67.32 +      p->dump_pred(cfg, orig, st);
   67.33      }
   67.34    } else {
   67.35      dump_bidx(orig, st);
   67.36 @@ -297,7 +297,7 @@
   67.37    }
   67.38  }
   67.39  
   67.40 -void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
   67.41 +void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
   67.42    // Print the basic block
   67.43    dump_bidx(this, st);
   67.44    st->print(": #\t");
   67.45 @@ -311,26 +311,28 @@
   67.46    if( head()->is_block_start() ) {
   67.47      for (uint i=1; i<num_preds(); i++) {
   67.48        Node *s = pred(i);
   67.49 -      if (bbs) {
   67.50 -        Block *p = (*bbs)[s->_idx];
   67.51 -        p->dump_pred(bbs, p, st);
   67.52 +      if (cfg != NULL) {
   67.53 +        Block *p = cfg->get_block_for_node(s);
   67.54 +        p->dump_pred(cfg, p, st);
   67.55        } else {
   67.56          while (!s->is_block_start())
   67.57            s = s->in(0);
   67.58          st->print("N%d ", s->_idx );
   67.59        }
   67.60      }
   67.61 -  } else
   67.62 +  } else {
   67.63      st->print("BLOCK HEAD IS JUNK  ");
   67.64 +  }
   67.65  
   67.66    // Print loop, if any
   67.67    const Block *bhead = this;    // Head of self-loop
   67.68    Node *bh = bhead->head();
   67.69 -  if( bbs && bh->is_Loop() && !head()->is_Root() ) {
   67.70 +
   67.71 +  if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
   67.72      LoopNode *loop = bh->as_Loop();
   67.73 -    const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
   67.74 +    const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
   67.75      while (bx->is_connector()) {
   67.76 -      bx = (*bbs)[bx->pred(1)->_idx];
   67.77 +      bx = cfg->get_block_for_node(bx->pred(1));
   67.78      }
   67.79      st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
   67.80      // Dump any loop-specific bits, especially for CountedLoops.
   67.81 @@ -349,29 +351,32 @@
   67.82    st->print_cr("");
   67.83  }
   67.84  
   67.85 -void Block::dump() const { dump(NULL); }
   67.86 +void Block::dump() const {
   67.87 +  dump(NULL);
   67.88 +}
   67.89  
   67.90 -void Block::dump( const Block_Array *bbs ) const {
   67.91 -  dump_head(bbs);
   67.92 -  uint cnt = _nodes.size();
   67.93 -  for( uint i=0; i<cnt; i++ )
   67.94 +void Block::dump(const PhaseCFG* cfg) const {
   67.95 +  dump_head(cfg);
   67.96 +  for (uint i=0; i< _nodes.size(); i++) {
   67.97      _nodes[i]->dump();
   67.98 +  }
   67.99    tty->print("\n");
  67.100  }
  67.101  #endif
  67.102  
  67.103  //=============================================================================
  67.104  //------------------------------PhaseCFG---------------------------------------
  67.105 -PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
  67.106 -  Phase(CFG),
  67.107 -  _bbs(a),
  67.108 -  _root(r),
  67.109 -  _node_latency(NULL)
  67.110 +PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
  67.111 +: Phase(CFG)
  67.112 +, _block_arena(arena)
  67.113 +, _node_to_block_mapping(arena)
  67.114 +, _root(root)
  67.115 +, _node_latency(NULL)
  67.116  #ifndef PRODUCT
  67.117 -  , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
  67.118 +, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
  67.119  #endif
  67.120  #ifdef ASSERT
  67.121 -  , _raw_oops(a)
  67.122 +, _raw_oops(arena)
  67.123  #endif
  67.124  {
  67.125    ResourceMark rm;
  67.126 @@ -380,13 +385,13 @@
  67.127    // Node on demand.
  67.128    Node *x = new (C) GotoNode(NULL);
  67.129    x->init_req(0, x);
  67.130 -  _goto = m.match_tree(x);
  67.131 +  _goto = matcher.match_tree(x);
  67.132    assert(_goto != NULL, "");
  67.133    _goto->set_req(0,_goto);
  67.134  
  67.135    // Build the CFG in Reverse Post Order
  67.136    _num_blocks = build_cfg();
  67.137 -  _broot = _bbs[_root->_idx];
  67.138 +  _broot = get_block_for_node(_root);
  67.139  }
  67.140  
  67.141  //------------------------------build_cfg--------------------------------------
  67.142 @@ -440,9 +445,9 @@
  67.143        // 'p' now points to the start of this basic block
  67.144  
  67.145        // Put self in array of basic blocks
  67.146 -      Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
  67.147 -      _bbs.map(p->_idx,bb);
  67.148 -      _bbs.map(x->_idx,bb);
  67.149 +      Block *bb = new (_block_arena) Block(_block_arena, p);
  67.150 +      map_node_to_block(p, bb);
  67.151 +      map_node_to_block(x, bb);
  67.152        if( x != p ) {                // Only for root is x == p
  67.153          bb->_nodes.push((Node*)x);
  67.154        }
  67.155 @@ -473,16 +478,16 @@
  67.156        // Check if it the fist node pushed on stack at the beginning.
  67.157        if (idx == 0) break;          // end of the build
  67.158        // Find predecessor basic block
  67.159 -      Block *pb = _bbs[x->_idx];
  67.160 +      Block *pb = get_block_for_node(x);
  67.161        // Insert into nodes array, if not already there
  67.162 -      if( !_bbs.lookup(proj->_idx) ) {
  67.163 +      if (!has_block(proj)) {
  67.164          assert( x != proj, "" );
  67.165          // Map basic block of projection
  67.166 -        _bbs.map(proj->_idx,pb);
  67.167 +        map_node_to_block(proj, pb);
  67.168          pb->_nodes.push(proj);
  67.169        }
  67.170        // Insert self as a child of my predecessor block
  67.171 -      pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
  67.172 +      pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
  67.173        assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
  67.174                "too many control users, not a CFG?" );
  67.175      }
  67.176 @@ -511,15 +516,15 @@
  67.177    RegionNode* region = new (C) RegionNode(2);
  67.178    region->init_req(1, proj);
  67.179    // setup corresponding basic block
  67.180 -  Block* block = new (_bbs._arena) Block(_bbs._arena, region);
  67.181 -  _bbs.map(region->_idx, block);
  67.182 +  Block* block = new (_block_arena) Block(_block_arena, region);
  67.183 +  map_node_to_block(region, block);
  67.184    C->regalloc()->set_bad(region->_idx);
  67.185    // add a goto node
  67.186    Node* gto = _goto->clone(); // get a new goto node
  67.187    gto->set_req(0, region);
  67.188    // add it to the basic block
  67.189    block->_nodes.push(gto);
  67.190 -  _bbs.map(gto->_idx, block);
  67.191 +  map_node_to_block(gto, block);
  67.192    C->regalloc()->set_bad(gto->_idx);
  67.193    // hook up successor block
  67.194    block->_succs.map(block->_num_succs++, out);
  67.195 @@ -570,7 +575,7 @@
  67.196    gto->set_req(0, b->head());
  67.197    Node *bp = b->_nodes[end_idx];
  67.198    b->_nodes.map(end_idx,gto); // Slam over NeverBranch
  67.199 -  _bbs.map(gto->_idx, b);
  67.200 +  map_node_to_block(gto, b);
  67.201    C->regalloc()->set_bad(gto->_idx);
  67.202    b->_nodes.pop();              // Yank projections
  67.203    b->_nodes.pop();              // Yank projections
  67.204 @@ -613,7 +618,7 @@
  67.205    // If the previous block conditionally falls into bx, return false,
  67.206    // because moving bx will create an extra jump.
  67.207    for(uint k = 1; k < bx->num_preds(); k++ ) {
  67.208 -    Block* pred = _bbs[bx->pred(k)->_idx];
  67.209 +    Block* pred = get_block_for_node(bx->pred(k));
  67.210      if (pred == _blocks[bx_index-1]) {
  67.211        if (pred->_num_succs != 1) {
  67.212          return false;
  67.213 @@ -682,7 +687,7 @@
  67.214  
  67.215      // Look for uncommon blocks and move to end.
  67.216      if (!C->do_freq_based_layout()) {
  67.217 -      if( b->is_uncommon(_bbs) ) {
  67.218 +      if (b->is_uncommon(this)) {
  67.219          move_to_end(b, i);
  67.220          last--;                   // No longer check for being uncommon!
  67.221          if( no_flip_branch(b) ) { // Fall-thru case must follow?
  67.222 @@ -870,28 +875,31 @@
  67.223    } while( !p->is_block_start() );
  67.224  
  67.225    // Recursively visit
  67.226 -  for( uint i=1; i<p->req(); i++ )
  67.227 -    _dump_cfg(p->in(i),visited);
  67.228 +  for (uint i = 1; i < p->req(); i++) {
  67.229 +    _dump_cfg(p->in(i), visited);
  67.230 +  }
  67.231  
  67.232    // Dump the block
  67.233 -  _bbs[p->_idx]->dump(&_bbs);
  67.234 +  get_block_for_node(p)->dump(this);
  67.235  }
  67.236  
  67.237  void PhaseCFG::dump( ) const {
  67.238    tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
  67.239 -  if( _blocks.size() ) {        // Did we do basic-block layout?
  67.240 -    for( uint i=0; i<_num_blocks; i++ )
  67.241 -      _blocks[i]->dump(&_bbs);
  67.242 +  if (_blocks.size()) {        // Did we do basic-block layout?
  67.243 +    for (uint i = 0; i < _num_blocks; i++) {
  67.244 +      _blocks[i]->dump(this);
  67.245 +    }
  67.246    } else {                      // Else do it with a DFS
  67.247 -    VectorSet visited(_bbs._arena);
  67.248 +    VectorSet visited(_block_arena);
  67.249      _dump_cfg(_root,visited);
  67.250    }
  67.251  }
  67.252  
  67.253  void PhaseCFG::dump_headers() {
  67.254    for( uint i = 0; i < _num_blocks; i++ ) {
  67.255 -    if( _blocks[i] == NULL ) continue;
  67.256 -    _blocks[i]->dump_head(&_bbs);
  67.257 +    if (_blocks[i]) {
  67.258 +      _blocks[i]->dump_head(this);
  67.259 +    }
  67.260    }
  67.261  }
  67.262  
  67.263 @@ -904,7 +912,7 @@
  67.264      uint j;
  67.265      for (j = 0; j < cnt; j++)  {
  67.266        Node *n = b->_nodes[j];
  67.267 -      assert( _bbs[n->_idx] == b, "" );
  67.268 +      assert(get_block_for_node(n) == b, "");
  67.269        if (j >= 1 && n->is_Mach() &&
  67.270            n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
  67.271          assert(j == 1 || b->_nodes[j-1]->is_Phi(),
  67.272 @@ -913,13 +921,12 @@
  67.273        for (uint k = 0; k < n->req(); k++) {
  67.274          Node *def = n->in(k);
  67.275          if (def && def != n) {
  67.276 -          assert(_bbs[def->_idx] || def->is_Con(),
  67.277 -                 "must have block; constants for debug info ok");
  67.278 +          assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
  67.279            // Verify that instructions in the block is in correct order.
  67.280            // Uses must follow their definition if they are at the same block.
  67.281            // Mostly done to check that MachSpillCopy nodes are placed correctly
  67.282            // when CreateEx node is moved in build_ifg_physical().
  67.283 -          if (_bbs[def->_idx] == b &&
  67.284 +          if (get_block_for_node(def) == b &&
  67.285                !(b->head()->is_Loop() && n->is_Phi()) &&
  67.286                // See (+++) comment in reg_split.cpp
  67.287                !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
    68.1 --- a/src/share/vm/opto/block.hpp	Mon Aug 19 17:47:21 2013 +0200
    68.2 +++ b/src/share/vm/opto/block.hpp	Fri Aug 23 22:12:18 2013 +0100
    68.3 @@ -48,13 +48,12 @@
    68.4    friend class VMStructs;
    68.5    uint _size;                   // allocated size, as opposed to formal limit
    68.6    debug_only(uint _limit;)      // limit to formal domain
    68.7 +  Arena *_arena;                // Arena to allocate in
    68.8  protected:
    68.9    Block **_blocks;
   68.10    void grow( uint i );          // Grow array node to fit
   68.11  
   68.12  public:
   68.13 -  Arena *_arena;                // Arena to allocate in
   68.14 -
   68.15    Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
   68.16      debug_only(_limit=0);
   68.17      _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
   68.18 @@ -77,7 +76,7 @@
   68.19  public:
   68.20    uint _cnt;
   68.21    Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
   68.22 -  void push( Block *b ) { map(_cnt++,b); }
   68.23 +  void push( Block *b ) {  map(_cnt++,b); }
   68.24    Block *pop() { return _blocks[--_cnt]; }
   68.25    Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
   68.26    void remove( uint i );
   68.27 @@ -284,15 +283,15 @@
   68.28    // helper function that adds caller save registers to MachProjNode
   68.29    void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
   68.30    // Schedule a call next in the block
   68.31 -  uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
   68.32 +  uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
   68.33  
   68.34    // Perform basic-block local scheduling
   68.35    Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
   68.36 -  void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs );
   68.37 -  void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs);
   68.38 +  void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
   68.39 +  void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
   68.40    bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
   68.41    // Cleanup if any code lands between a Call and his Catch
   68.42 -  void call_catch_cleanup(Block_Array &bbs, Compile *C);
   68.43 +  void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
   68.44    // Detect implicit-null-check opportunities.  Basically, find NULL checks
   68.45    // with suitable memory ops nearby.  Use the memory op to do the NULL check.
   68.46    // I can generate a memory op if there is not one nearby.
   68.47 @@ -331,15 +330,15 @@
   68.48  
   68.49    // Use frequency calculations and code shape to predict if the block
   68.50    // is uncommon.
   68.51 -  bool is_uncommon( Block_Array &bbs ) const;
   68.52 +  bool is_uncommon(PhaseCFG* cfg) const;
   68.53  
   68.54  #ifndef PRODUCT
   68.55    // Debugging print of basic block
   68.56    void dump_bidx(const Block* orig, outputStream* st = tty) const;
   68.57 -  void dump_pred(const Block_Array *bbs, Block* orig, outputStream* st = tty) const;
   68.58 -  void dump_head( const Block_Array *bbs, outputStream* st = tty ) const;
   68.59 +  void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
   68.60 +  void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
   68.61    void dump() const;
   68.62 -  void dump( const Block_Array *bbs ) const;
   68.63 +  void dump(const PhaseCFG* cfg) const;
   68.64  #endif
   68.65  };
   68.66  
   68.67 @@ -349,6 +348,12 @@
   68.68  class PhaseCFG : public Phase {
   68.69    friend class VMStructs;
   68.70   private:
   68.71 +  // Arena for the blocks to be stored in
   68.72 +  Arena* _block_arena;
   68.73 +
   68.74 +  // Map nodes to owning basic block
   68.75 +  Block_Array _node_to_block_mapping;
   68.76 +
   68.77    // Build a proper looking cfg.  Return count of basic blocks
   68.78    uint build_cfg();
   68.79  
   68.80 @@ -371,22 +376,42 @@
   68.81  
   68.82    Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
   68.83    void verify_anti_dependences(Block* LCA, Node* load) {
   68.84 -    assert(LCA == _bbs[load->_idx], "should already be scheduled");
   68.85 +    assert(LCA == get_block_for_node(load), "should already be scheduled");
   68.86      insert_anti_dependences(LCA, load, true);
   68.87    }
   68.88  
   68.89   public:
   68.90 -  PhaseCFG( Arena *a, RootNode *r, Matcher &m );
   68.91 +  PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
   68.92  
   68.93    uint _num_blocks;             // Count of basic blocks
   68.94    Block_List _blocks;           // List of basic blocks
   68.95    RootNode *_root;              // Root of whole program
   68.96 -  Block_Array _bbs;             // Map Nodes to owning Basic Block
   68.97    Block *_broot;                // Basic block of root
   68.98    uint _rpo_ctr;
   68.99    CFGLoop* _root_loop;
  68.100    float _outer_loop_freq;       // Outmost loop frequency
  68.101  
  68.102 +
  68.103 +  // set which block this node should reside in
  68.104 +  void map_node_to_block(const Node* node, Block* block) {
  68.105 +    _node_to_block_mapping.map(node->_idx, block);
  68.106 +  }
  68.107 +
  68.108 +  // removes the mapping from a node to a block
  68.109 +  void unmap_node_from_block(const Node* node) {
  68.110 +    _node_to_block_mapping.map(node->_idx, NULL);
  68.111 +  }
  68.112 +
  68.113 +  // get the block in which this node resides
  68.114 +  Block* get_block_for_node(const Node* node) const {
  68.115 +    return _node_to_block_mapping[node->_idx];
  68.116 +  }
  68.117 +
  68.118 +  // does this node reside in a block; return true
  68.119 +  bool has_block(const Node* node) const {
  68.120 +    return (_node_to_block_mapping.lookup(node->_idx) != NULL);
  68.121 +  }
  68.122 +
  68.123    // Per node latency estimation, valid only during GCM
  68.124    GrowableArray<uint> *_node_latency;
  68.125  
  68.126 @@ -405,7 +430,7 @@
  68.127    void Estimate_Block_Frequency();
  68.128  
  68.129    // Global Code Motion.  See Click's PLDI95 paper.  Place Nodes in specific
  68.130 -  // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block.
  68.131 +  // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
  68.132    void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list );
  68.133  
  68.134    // Compute the (backwards) latency of a node from the uses
  68.135 @@ -454,7 +479,7 @@
  68.136    // Insert a node into a block, and update the _bbs
  68.137    void insert( Block *b, uint idx, Node *n ) {
  68.138      b->_nodes.insert( idx, n );
  68.139 -    _bbs.map( n->_idx, b );
  68.140 +    map_node_to_block(n, b);
  68.141    }
  68.142  
  68.143  #ifndef PRODUCT
  68.144 @@ -543,7 +568,7 @@
  68.145      _child(NULL),
  68.146      _exit_prob(1.0f) {}
  68.147    CFGLoop* parent() { return _parent; }
  68.148 -  void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk);
  68.149 +  void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
  68.150    void add_member(CFGElement *s) { _members.push(s); }
  68.151    void add_nested_loop(CFGLoop* cl);
  68.152    Block* head() {
    69.1 --- a/src/share/vm/opto/buildOopMap.cpp	Mon Aug 19 17:47:21 2013 +0200
    69.2 +++ b/src/share/vm/opto/buildOopMap.cpp	Fri Aug 23 22:12:18 2013 +0100
    69.3 @@ -426,14 +426,16 @@
    69.4    }
    69.5    memset( live, 0, cfg->_num_blocks * (max_reg_ints<<LogBytesPerInt) );
    69.6    // Push preds onto worklist
    69.7 -  for( uint i=1; i<root->req(); i++ )
    69.8 -    worklist->push(cfg->_bbs[root->in(i)->_idx]);
    69.9 +  for (uint i = 1; i < root->req(); i++) {
   69.10 +    Block* block = cfg->get_block_for_node(root->in(i));
   69.11 +    worklist->push(block);
   69.12 +  }
   69.13  
   69.14    // ZKM.jar includes tiny infinite loops which are unreached from below.
   69.15    // If we missed any blocks, we'll retry here after pushing all missed
   69.16    // blocks on the worklist.  Normally this outer loop never trips more
   69.17    // than once.
   69.18 -  while( 1 ) {
   69.19 +  while (1) {
   69.20  
   69.21      while( worklist->size() ) { // Standard worklist algorithm
   69.22        Block *b = worklist->rpop();
   69.23 @@ -537,8 +539,10 @@
   69.24          for( l=0; l<max_reg_ints; l++ )
   69.25            old_live[l] = tmp_live[l];
   69.26          // Push preds onto worklist
   69.27 -        for( l=1; l<(int)b->num_preds(); l++ )
   69.28 -          worklist->push(cfg->_bbs[b->pred(l)->_idx]);
   69.29 +        for (l = 1; l < (int)b->num_preds(); l++) {
   69.30 +          Block* block = cfg->get_block_for_node(b->pred(l));
   69.31 +          worklist->push(block);
   69.32 +        }
   69.33        }
   69.34      }
   69.35  
   69.36 @@ -629,10 +633,9 @@
   69.37      // pred to this block.  Otherwise we have to grab a new OopFlow.
   69.38      OopFlow *flow = NULL;       // Flag for finding optimized flow
   69.39      Block *pred = (Block*)0xdeadbeef;
   69.40 -    uint j;
   69.41      // Scan this block's preds to find a done predecessor
   69.42 -    for( j=1; j<b->num_preds(); j++ ) {
   69.43 -      Block *p = _cfg->_bbs[b->pred(j)->_idx];
   69.44 +    for (uint j = 1; j < b->num_preds(); j++) {
   69.45 +      Block* p = _cfg->get_block_for_node(b->pred(j));
   69.46        OopFlow *p_flow = flows[p->_pre_order];
   69.47        if( p_flow ) {            // Predecessor is done
   69.48          assert( p_flow->_b == p, "cross check" );
    70.1 --- a/src/share/vm/opto/c2_globals.hpp	Mon Aug 19 17:47:21 2013 +0200
    70.2 +++ b/src/share/vm/opto/c2_globals.hpp	Fri Aug 23 22:12:18 2013 +0100
    70.3 @@ -179,6 +179,9 @@
    70.4    product_pd(intx,  LoopUnrollLimit,                                        \
    70.5            "Unroll loop bodies with node count less than this")              \
    70.6                                                                              \
    70.7 +  product(intx,  LoopMaxUnroll, 16,                                         \
    70.8 +          "Maximum number of unrolls for main loop")                        \
    70.9 +                                                                            \
   70.10    product(intx,  LoopUnrollMin, 4,                                          \
   70.11            "Minimum number of unroll loop bodies before checking progress"   \
   70.12            "of rounds of unroll,optimize,..")                                \
    71.1 --- a/src/share/vm/opto/chaitin.cpp	Mon Aug 19 17:47:21 2013 +0200
    71.2 +++ b/src/share/vm/opto/chaitin.cpp	Fri Aug 23 22:12:18 2013 +0100
    71.3 @@ -295,7 +295,7 @@
    71.4  
    71.5  
    71.6  bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
    71.7 -  Block *bcon = _cfg._bbs[con->_idx];
    71.8 +  Block* bcon = _cfg.get_block_for_node(con);
    71.9    uint cindex = bcon->find_node(con);
   71.10    Node *con_next = bcon->_nodes[cindex+1];
   71.11    if (con_next->in(0) != con || !con_next->is_MachProj()) {
   71.12 @@ -306,7 +306,7 @@
   71.13    Node *kills = con_next->clone();
   71.14    kills->set_req(0, copy);
   71.15    b->_nodes.insert(idx, kills);
   71.16 -  _cfg._bbs.map(kills->_idx, b);
   71.17 +  _cfg.map_node_to_block(kills, b);
   71.18    new_lrg(kills, max_lrg_id);
   71.19    return true;
   71.20  }
   71.21 @@ -962,8 +962,7 @@
   71.22          // AggressiveCoalesce.  This effectively pre-virtual-splits
   71.23          // around uncommon uses of common defs.
   71.24          const RegMask &rm = n->in_RegMask(k);
   71.25 -        if( !after_aggressive &&
   71.26 -          _cfg._bbs[n->in(k)->_idx]->_freq > 1000*b->_freq ) {
   71.27 +        if (!after_aggressive && _cfg.get_block_for_node(n->in(k))->_freq > 1000 * b->_freq) {
   71.28            // Since we are BEFORE aggressive coalesce, leave the register
   71.29            // mask untrimmed by the call.  This encourages more coalescing.
   71.30            // Later, AFTER aggressive, this live range will have to spill
   71.31 @@ -1709,16 +1708,15 @@
   71.32        // set control to _root and place it into Start block
   71.33        // (where top() node is placed).
   71.34        base->init_req(0, _cfg._root);
   71.35 -      Block *startb = _cfg._bbs[C->top()->_idx];
   71.36 +      Block *startb = _cfg.get_block_for_node(C->top());
   71.37        startb->_nodes.insert(startb->find_node(C->top()), base );
   71.38 -      _cfg._bbs.map( base->_idx, startb );
   71.39 +      _cfg.map_node_to_block(base, startb);
   71.40        assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
   71.41      }
   71.42      if (_lrg_map.live_range_id(base) == 0) {
   71.43        new_lrg(base, maxlrg++);
   71.44      }
   71.45 -    assert(base->in(0) == _cfg._root &&
   71.46 -           _cfg._bbs[base->_idx] == _cfg._bbs[C->top()->_idx], "base NULL should be shared");
   71.47 +    assert(base->in(0) == _cfg._root && _cfg.get_block_for_node(base) == _cfg.get_block_for_node(C->top()), "base NULL should be shared");
   71.48      derived_base_map[derived->_idx] = base;
   71.49      return base;
   71.50    }
   71.51 @@ -1754,12 +1752,12 @@
   71.52    base->as_Phi()->set_type(t);
   71.53  
   71.54    // Search the current block for an existing base-Phi
   71.55 -  Block *b = _cfg._bbs[derived->_idx];
   71.56 +  Block *b = _cfg.get_block_for_node(derived);
   71.57    for( i = 1; i <= b->end_idx(); i++ ) {// Search for matching Phi
   71.58      Node *phi = b->_nodes[i];
   71.59      if( !phi->is_Phi() ) {      // Found end of Phis with no match?
   71.60        b->_nodes.insert( i, base ); // Must insert created Phi here as base
   71.61 -      _cfg._bbs.map( base->_idx, b );
   71.62 +      _cfg.map_node_to_block(base, b);
   71.63        new_lrg(base,maxlrg++);
   71.64        break;
   71.65      }
   71.66 @@ -1815,8 +1813,8 @@
   71.67        if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CmpI ) {
   71.68          Node *phi = n->in(1);
   71.69          if( phi->is_Phi() && phi->as_Phi()->region()->is_Loop() ) {
   71.70 -          Block *phi_block = _cfg._bbs[phi->_idx];
   71.71 -          if( _cfg._bbs[phi_block->pred(2)->_idx] == b ) {
   71.72 +          Block *phi_block = _cfg.get_block_for_node(phi);
   71.73 +          if (_cfg.get_block_for_node(phi_block->pred(2)) == b) {
   71.74              const RegMask *mask = C->matcher()->idealreg2spillmask[Op_RegI];
   71.75              Node *spill = new (C) MachSpillCopyNode( phi, *mask, *mask );
   71.76              insert_proj( phi_block, 1, spill, maxlrg++ );
   71.77 @@ -1870,7 +1868,7 @@
   71.78              if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
   71.79                   !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
   71.80                   (_lrg_map.live_range_id(base) > 0) && // not a constant
   71.81 -                 _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
   71.82 +                 _cfg.get_block_for_node(base) != b) { // base not def'd in blk)
   71.83                // Base pointer is not currently live.  Since I stretched
   71.84                // the base pointer to here and it crosses basic-block
   71.85                // boundaries, the global live info is now incorrect.
   71.86 @@ -1993,8 +1991,8 @@
   71.87    tty->print("\n");
   71.88  }
   71.89  
   71.90 -void PhaseChaitin::dump( const Block * b ) const {
   71.91 -  b->dump_head( &_cfg._bbs );
   71.92 +void PhaseChaitin::dump(const Block *b) const {
   71.93 +  b->dump_head(&_cfg);
   71.94  
   71.95    // For all instructions
   71.96    for( uint j = 0; j < b->_nodes.size(); j++ )
   71.97 @@ -2299,7 +2297,7 @@
   71.98        if (_lrg_map.find_const(n) == lidx) {
   71.99          if (!dump_once++) {
  71.100            tty->cr();
  71.101 -          b->dump_head( &_cfg._bbs );
  71.102 +          b->dump_head(&_cfg);
  71.103          }
  71.104          dump(n);
  71.105          continue;
  71.106 @@ -2314,7 +2312,7 @@
  71.107            if (_lrg_map.find_const(m) == lidx) {
  71.108              if (!dump_once++) {
  71.109                tty->cr();
  71.110 -              b->dump_head(&_cfg._bbs);
  71.111 +              b->dump_head(&_cfg);
  71.112              }
  71.113              dump(n);
  71.114            }
    72.1 --- a/src/share/vm/opto/coalesce.cpp	Mon Aug 19 17:47:21 2013 +0200
    72.2 +++ b/src/share/vm/opto/coalesce.cpp	Fri Aug 23 22:12:18 2013 +0100
    72.3 @@ -52,7 +52,7 @@
    72.4      // Print a nice block header
    72.5      tty->print("B%d: ",b->_pre_order);
    72.6      for( j=1; j<b->num_preds(); j++ )
    72.7 -      tty->print("B%d ", _phc._cfg._bbs[b->pred(j)->_idx]->_pre_order);
    72.8 +      tty->print("B%d ", _phc._cfg.get_block_for_node(b->pred(j))->_pre_order);
    72.9      tty->print("-> ");
   72.10      for( j=0; j<b->_num_succs; j++ )
   72.11        tty->print("B%d ",b->_succs[j]->_pre_order);
   72.12 @@ -208,7 +208,7 @@
   72.13      copy->set_req(idx,tmp);
   72.14      // Save source in temp early, before source is killed
   72.15      b->_nodes.insert(kill_src_idx,tmp);
   72.16 -    _phc._cfg._bbs.map( tmp->_idx, b );
   72.17 +    _phc._cfg.map_node_to_block(tmp, b);
   72.18      last_use_idx++;
   72.19    }
   72.20  
   72.21 @@ -286,7 +286,7 @@
   72.22            Node *m = n->in(j);
   72.23            uint src_name = _phc._lrg_map.find(m);
   72.24            if (src_name != phi_name) {
   72.25 -            Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
   72.26 +            Block *pred = _phc._cfg.get_block_for_node(b->pred(j));
   72.27              Node *copy;
   72.28              assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
   72.29              // Rematerialize constants instead of copying them
   72.30 @@ -305,7 +305,7 @@
   72.31              }
   72.32              // Insert the copy in the use-def chain
   72.33              n->set_req(j, copy);
   72.34 -            _phc._cfg._bbs.map( copy->_idx, pred );
   72.35 +            _phc._cfg.map_node_to_block(copy, pred);
   72.36              // Extend ("register allocate") the names array for the copy.
   72.37              _phc._lrg_map.extend(copy->_idx, phi_name);
   72.38            } // End of if Phi names do not match
   72.39 @@ -343,13 +343,13 @@
   72.40              n->set_req(idx, copy);
   72.41              // Extend ("register allocate") the names array for the copy.
   72.42              _phc._lrg_map.extend(copy->_idx, name);
   72.43 -            _phc._cfg._bbs.map( copy->_idx, b );
   72.44 +            _phc._cfg.map_node_to_block(copy, b);
   72.45            }
   72.46  
   72.47          } // End of is two-adr
   72.48  
   72.49          // Insert a copy at a debug use for a lrg which has high frequency
   72.50 -        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) {
   72.51 +        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(&_phc._cfg)) {
   72.52            // Walk the debug inputs to the node and check for lrg freq
   72.53            JVMState* jvms = n->jvms();
   72.54            uint debug_start = jvms ? jvms->debug_start() : 999999;
   72.55 @@ -391,7 +391,7 @@
   72.56                uint max_lrg_id = _phc._lrg_map.max_lrg_id();
   72.57                _phc.new_lrg(copy, max_lrg_id);
   72.58                _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
   72.59 -              _phc._cfg._bbs.map(copy->_idx, b);
   72.60 +              _phc._cfg.map_node_to_block(copy, b);
   72.61                //tty->print_cr("Split a debug use in Aggressive Coalesce");
   72.62              }  // End of if high frequency use/def
   72.63            }  // End of for all debug inputs
   72.64 @@ -437,7 +437,10 @@
   72.65      Block *bs = b->_succs[i];
   72.66      // Find index of 'b' in 'bs' predecessors
   72.67      uint j=1;
   72.68 -    while( _phc._cfg._bbs[bs->pred(j)->_idx] != b ) j++;
   72.69 +    while (_phc._cfg.get_block_for_node(bs->pred(j)) != b) {
   72.70 +      j++;
   72.71 +    }
   72.72 +
   72.73      // Visit all the Phis in successor block
   72.74      for( uint k = 1; k<bs->_nodes.size(); k++ ) {
   72.75        Node *n = bs->_nodes[k];
   72.76 @@ -510,9 +513,9 @@
   72.77    if( bindex < b->_fhrp_index ) b->_fhrp_index--;
   72.78  
   72.79    // Stretched lr1; add it to liveness of intermediate blocks
   72.80 -  Block *b2 = _phc._cfg._bbs[src_copy->_idx];
   72.81 +  Block *b2 = _phc._cfg.get_block_for_node(src_copy);
   72.82    while( b != b2 ) {
   72.83 -    b = _phc._cfg._bbs[b->pred(1)->_idx];
   72.84 +    b = _phc._cfg.get_block_for_node(b->pred(1));
   72.85      _phc._live->live(b)->insert(lr1);
   72.86    }
   72.87  }
   72.88 @@ -532,7 +535,7 @@
   72.89      bindex2--;                  // Chain backwards 1 instruction
   72.90      while( bindex2 == 0 ) {     // At block start, find prior block
   72.91        assert( b2->num_preds() == 2, "cannot double coalesce across c-flow" );
   72.92 -      b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
   72.93 +      b2 = _phc._cfg.get_block_for_node(b2->pred(1));
   72.94        bindex2 = b2->end_idx()-1;
   72.95      }
   72.96      // Get prior instruction
   72.97 @@ -676,8 +679,8 @@
   72.98  
   72.99    if (UseFPUForSpilling && rm.is_AllStack() ) {
  72.100      // Don't coalesce when frequency difference is large
  72.101 -    Block *dst_b = _phc._cfg._bbs[dst_copy->_idx];
  72.102 -    Block *src_def_b = _phc._cfg._bbs[src_def->_idx];
  72.103 +    Block *dst_b = _phc._cfg.get_block_for_node(dst_copy);
  72.104 +    Block *src_def_b = _phc._cfg.get_block_for_node(src_def);
  72.105      if (src_def_b->_freq > 10*dst_b->_freq )
  72.106        return false;
  72.107    }
  72.108 @@ -690,7 +693,7 @@
  72.109    // Another early bail-out test is when we are double-coalescing and the
  72.110    // 2 copies are separated by some control flow.
  72.111    if( dst_copy != src_copy ) {
  72.112 -    Block *src_b = _phc._cfg._bbs[src_copy->_idx];
  72.113 +    Block *src_b = _phc._cfg.get_block_for_node(src_copy);
  72.114      Block *b2 = b;
  72.115      while( b2 != src_b ) {
  72.116        if( b2->num_preds() > 2 ){// Found merge-point
  72.117 @@ -701,7 +704,7 @@
  72.118          //record_bias( _phc._lrgs, lr1, lr2 );
  72.119          return false;           // To hard to find all interferences
  72.120        }
  72.121 -      b2 = _phc._cfg._bbs[b2->pred(1)->_idx];
  72.122 +      b2 = _phc._cfg.get_block_for_node(b2->pred(1));
  72.123      }
  72.124    }
  72.125  
  72.126 @@ -786,8 +789,9 @@
  72.127  // Conservative (but pessimistic) copy coalescing of a single block
  72.128  void PhaseConservativeCoalesce::coalesce( Block *b ) {
  72.129    // Bail out on infrequent blocks
  72.130 -  if( b->is_uncommon(_phc._cfg._bbs) )
  72.131 +  if (b->is_uncommon(&_phc._cfg)) {
  72.132      return;
  72.133 +  }
  72.134    // Check this block for copies.
  72.135    for( uint i = 1; i<b->end_idx(); i++ ) {
  72.136      // Check for actual copies on inputs.  Coalesce a copy into its
    73.1 --- a/src/share/vm/opto/compile.cpp	Mon Aug 19 17:47:21 2013 +0200
    73.2 +++ b/src/share/vm/opto/compile.cpp	Fri Aug 23 22:12:18 2013 +0100
    73.3 @@ -2262,7 +2262,7 @@
    73.4        tty->print("%3.3x   ", pcs[n->_idx]);
    73.5      else
    73.6        tty->print("      ");
    73.7 -    b->dump_head( &_cfg->_bbs );
    73.8 +    b->dump_head(_cfg);
    73.9      if (b->is_connector()) {
   73.10        tty->print_cr("        # Empty connector block");
   73.11      } else if (b->num_preds() == 2 && b->pred(1)->is_CatchProj() && b->pred(1)->as_CatchProj()->_con == CatchProjNode::fall_through_index) {
   73.12 @@ -3525,7 +3525,7 @@
   73.13  }
   73.14  
   73.15  Compile::Constant Compile::ConstantTable::add(MachConstantNode* n, BasicType type, jvalue value) {
   73.16 -  Block* b = Compile::current()->cfg()->_bbs[n->_idx];
   73.17 +  Block* b = Compile::current()->cfg()->get_block_for_node(n);
   73.18    Constant con(type, value, b->_freq);
   73.19    add(con);
   73.20    return con;
    74.1 --- a/src/share/vm/opto/domgraph.cpp	Mon Aug 19 17:47:21 2013 +0200
    74.2 +++ b/src/share/vm/opto/domgraph.cpp	Fri Aug 23 22:12:18 2013 +0100
    74.3 @@ -105,8 +105,8 @@
    74.4  
    74.5      // Step 2:
    74.6      Node *whead = w->_block->head();
    74.7 -    for( uint j=1; j < whead->req(); j++ ) {
    74.8 -      Block *b = _bbs[whead->in(j)->_idx];
    74.9 +    for (uint j = 1; j < whead->req(); j++) {
   74.10 +      Block* b = get_block_for_node(whead->in(j));
   74.11        Tarjan *vx = &tarjan[b->_pre_order];
   74.12        Tarjan *u = vx->EVAL();
   74.13        if( u->_semi < w->_semi )
    75.1 --- a/src/share/vm/opto/gcm.cpp	Mon Aug 19 17:47:21 2013 +0200
    75.2 +++ b/src/share/vm/opto/gcm.cpp	Fri Aug 23 22:12:18 2013 +0100
    75.3 @@ -66,7 +66,7 @@
    75.4  // are in b also.
    75.5  void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
    75.6    // Set basic block of n, Add n to b,
    75.7 -  _bbs.map(n->_idx, b);
    75.8 +  map_node_to_block(n, b);
    75.9    b->add_inst(n);
   75.10  
   75.11    // After Matching, nearly any old Node may have projections trailing it.
   75.12 @@ -75,11 +75,12 @@
   75.13    for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
   75.14      Node*  use  = n->fast_out(i);
   75.15      if (use->is_Proj()) {
   75.16 -      Block* buse = _bbs[use->_idx];
   75.17 +      Block* buse = get_block_for_node(use);
   75.18        if (buse != b) {              // In wrong block?
   75.19 -        if (buse != NULL)
   75.20 +        if (buse != NULL) {
   75.21            buse->find_remove(use);   // Remove from wrong block
   75.22 -        _bbs.map(use->_idx, b);     // Re-insert in this block
   75.23 +        }
   75.24 +        map_node_to_block(use, b);
   75.25          b->add_inst(use);
   75.26        }
   75.27      }
   75.28 @@ -97,7 +98,7 @@
   75.29    if (p != NULL && p != n) {    // Control from a block projection?
   75.30      assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
   75.31      // Find trailing Region
   75.32 -    Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
   75.33 +    Block *pb = get_block_for_node(in0); // Block-projection already has basic block
   75.34      uint j = 0;
   75.35      if (pb->_num_succs != 1) {  // More then 1 successor?
   75.36        // Search for successor
   75.37 @@ -127,14 +128,15 @@
   75.38    while ( spstack.is_nonempty() ) {
   75.39      Node *n = spstack.pop();
   75.40      if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
   75.41 -      if( n->pinned() && !_bbs.lookup(n->_idx) ) {  // Pinned?  Nail it down!
   75.42 +      if( n->pinned() && !has_block(n)) {  // Pinned?  Nail it down!
   75.43          assert( n->in(0), "pinned Node must have Control" );
   75.44          // Before setting block replace block_proj control edge
   75.45          replace_block_proj_ctrl(n);
   75.46          Node *input = n->in(0);
   75.47 -        while( !input->is_block_start() )
   75.48 +        while (!input->is_block_start()) {
   75.49            input = input->in(0);
   75.50 -        Block *b = _bbs[input->_idx];  // Basic block of controlling input
   75.51 +        }
   75.52 +        Block *b = get_block_for_node(input); // Basic block of controlling input
   75.53          schedule_node_into_block(n, b);
   75.54        }
   75.55        for( int i = n->req() - 1; i >= 0; --i ) {  // For all inputs
   75.56 @@ -149,7 +151,7 @@
   75.57  // Assert that new input b2 is dominated by all previous inputs.
   75.58  // Check this by by seeing that it is dominated by b1, the deepest
   75.59  // input observed until b2.
   75.60 -static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
   75.61 +static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
   75.62    if (b1 == NULL)  return;
   75.63    assert(b1->_dom_depth < b2->_dom_depth, "sanity");
   75.64    Block* tmp = b2;
   75.65 @@ -162,7 +164,7 @@
   75.66      for (uint j=0; j<n->len(); j++) { // For all inputs
   75.67        Node* inn = n->in(j); // Get input
   75.68        if (inn == NULL)  continue;  // Ignore NULL, missing inputs
   75.69 -      Block* inb = bbs[inn->_idx];
   75.70 +      Block* inb = cfg->get_block_for_node(inn);
   75.71        tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
   75.72                   inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
   75.73        inn->dump();
   75.74 @@ -174,20 +176,20 @@
   75.75  }
   75.76  #endif
   75.77  
   75.78 -static Block* find_deepest_input(Node* n, Block_Array &bbs) {
   75.79 +static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
   75.80    // Find the last input dominated by all other inputs.
   75.81    Block* deepb           = NULL;        // Deepest block so far
   75.82    int    deepb_dom_depth = 0;
   75.83    for (uint k = 0; k < n->len(); k++) { // For all inputs
   75.84      Node* inn = n->in(k);               // Get input
   75.85      if (inn == NULL)  continue;         // Ignore NULL, missing inputs
   75.86 -    Block* inb = bbs[inn->_idx];
   75.87 +    Block* inb = cfg->get_block_for_node(inn);
   75.88      assert(inb != NULL, "must already have scheduled this input");
   75.89      if (deepb_dom_depth < (int) inb->_dom_depth) {
   75.90        // The new inb must be dominated by the previous deepb.
   75.91        // The various inputs must be linearly ordered in the dom
   75.92        // tree, or else there will not be a unique deepest block.
   75.93 -      DEBUG_ONLY(assert_dom(deepb, inb, n, bbs));
   75.94 +      DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
   75.95        deepb = inb;                      // Save deepest block
   75.96        deepb_dom_depth = deepb->_dom_depth;
   75.97      }
   75.98 @@ -243,7 +245,7 @@
   75.99          ++i;
  75.100          if (in == NULL) continue;    // Ignore NULL, missing inputs
  75.101          int is_visited = visited.test_set(in->_idx);
  75.102 -        if (!_bbs.lookup(in->_idx)) { // Missing block selection?
  75.103 +        if (!has_block(in)) { // Missing block selection?
  75.104            if (is_visited) {
  75.105              // assert( !visited.test(in->_idx), "did not schedule early" );
  75.106              return false;
  75.107 @@ -265,9 +267,9 @@
  75.108          // any projections which depend on them.
  75.109          if (!n->pinned()) {
  75.110            // Set earliest legal block.
  75.111 -          _bbs.map(n->_idx, find_deepest_input(n, _bbs));
  75.112 +          map_node_to_block(n, find_deepest_input(n, this));
  75.113          } else {
  75.114 -          assert(_bbs[n->_idx] == _bbs[n->in(0)->_idx], "Pinned Node should be at the same block as its control edge");
  75.115 +          assert(get_block_for_node(n) == get_block_for_node(n->in(0)), "Pinned Node should be at the same block as its control edge");
  75.116          }
  75.117  
  75.118          if (nstack.is_empty()) {
  75.119 @@ -313,8 +315,8 @@
  75.120  // The definition must dominate the use, so move the LCA upward in the
  75.121  // dominator tree to dominate the use.  If the use is a phi, adjust
  75.122  // the LCA only with the phi input paths which actually use this def.
  75.123 -static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) {
  75.124 -  Block* buse = bbs[use->_idx];
  75.125 +static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
  75.126 +  Block* buse = cfg->get_block_for_node(use);
  75.127    if (buse == NULL)    return LCA;   // Unused killing Projs have no use block
  75.128    if (!use->is_Phi())  return buse->dom_lca(LCA);
  75.129    uint pmax = use->req();       // Number of Phi inputs
  75.130 @@ -329,7 +331,7 @@
  75.131    // more than once.
  75.132    for (uint j=1; j<pmax; j++) { // For all inputs
  75.133      if (use->in(j) == def) {    // Found matching input?
  75.134 -      Block* pred = bbs[buse->pred(j)->_idx];
  75.135 +      Block* pred = cfg->get_block_for_node(buse->pred(j));
  75.136        LCA = pred->dom_lca(LCA);
  75.137      }
  75.138    }
  75.139 @@ -342,8 +344,7 @@
  75.140  // which are marked with the given index.  Return the LCA (in the dom tree)
  75.141  // of all marked blocks.  If there are none marked, return the original
  75.142  // LCA.
  75.143 -static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
  75.144 -                                    Block* early, Block_Array &bbs) {
  75.145 +static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
  75.146    Block_List worklist;
  75.147    worklist.push(LCA);
  75.148    while (worklist.size() > 0) {
  75.149 @@ -366,7 +367,7 @@
  75.150      } else {
  75.151        // Keep searching through this block's predecessors.
  75.152        for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
  75.153 -        Block* mid_parent = bbs[ mid->pred(j)->_idx ];
  75.154 +        Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
  75.155          worklist.push(mid_parent);
  75.156        }
  75.157      }
  75.158 @@ -384,7 +385,7 @@
  75.159  // be earlier (at a shallower dom_depth) than the true schedule_early
  75.160  // point of the node. We compute this earlier block as a more permissive
  75.161  // site for anti-dependency insertion, but only if subsume_loads is enabled.
  75.162 -static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
  75.163 +static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
  75.164    Node* base;
  75.165    Node* index;
  75.166    Node* store = load->in(MemNode::Memory);
  75.167 @@ -412,12 +413,12 @@
  75.168      Block* deepb           = NULL;        // Deepest block so far
  75.169      int    deepb_dom_depth = 0;
  75.170      for (int i = 0; i < mem_inputs_length; i++) {
  75.171 -      Block* inb = bbs[mem_inputs[i]->_idx];
  75.172 +      Block* inb = cfg->get_block_for_node(mem_inputs[i]);
  75.173        if (deepb_dom_depth < (int) inb->_dom_depth) {
  75.174          // The new inb must be dominated by the previous deepb.
  75.175          // The various inputs must be linearly ordered in the dom
  75.176          // tree, or else there will not be a unique deepest block.
  75.177 -        DEBUG_ONLY(assert_dom(deepb, inb, load, bbs));
  75.178 +        DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
  75.179          deepb = inb;                      // Save deepest block
  75.180          deepb_dom_depth = deepb->_dom_depth;
  75.181        }
  75.182 @@ -488,14 +489,14 @@
  75.183    // and other inputs are first available.  (Computed by schedule_early.)
  75.184    // For normal loads, 'early' is the shallowest place (dom graph wise)
  75.185    // to look for anti-deps between this load and any store.
  75.186 -  Block* early = _bbs[load_index];
  75.187 +  Block* early = get_block_for_node(load);
  75.188  
  75.189    // If we are subsuming loads, compute an "early" block that only considers
  75.190    // memory or address inputs. This block may be different than the
  75.191    // schedule_early block in that it could be at an even shallower depth in the
  75.192    // dominator tree, and allow for a broader discovery of anti-dependences.
  75.193    if (C->subsume_loads()) {
  75.194 -    early = memory_early_block(load, early, _bbs);
  75.195 +    early = memory_early_block(load, early, this);
  75.196    }
  75.197  
  75.198    ResourceArea *area = Thread::current()->resource_area();
  75.199 @@ -619,7 +620,7 @@
  75.200      // or else observe that 'store' is all the way up in the
  75.201      // earliest legal block for 'load'.  In the latter case,
  75.202      // immediately insert an anti-dependence edge.
  75.203 -    Block* store_block = _bbs[store->_idx];
  75.204 +    Block* store_block = get_block_for_node(store);
  75.205      assert(store_block != NULL, "unused killing projections skipped above");
  75.206  
  75.207      if (store->is_Phi()) {
  75.208 @@ -637,7 +638,7 @@
  75.209        for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
  75.210          if (store->in(j) == mem) {   // Found matching input?
  75.211            DEBUG_ONLY(found_match = true);
  75.212 -          Block* pred_block = _bbs[store_block->pred(j)->_idx];
  75.213 +          Block* pred_block = get_block_for_node(store_block->pred(j));
  75.214            if (pred_block != early) {
  75.215              // If any predecessor of the Phi matches the load's "early block",
  75.216              // we do not need a precedence edge between the Phi and 'load'
  75.217 @@ -711,7 +712,7 @@
  75.218    // preventing the load from sinking past any block containing
  75.219    // a store that may invalidate the memory state required by 'load'.
  75.220    if (must_raise_LCA)
  75.221 -    LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs);
  75.222 +    LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
  75.223    if (LCA == early)  return LCA;
  75.224  
  75.225    // Insert anti-dependence edges from 'load' to each store
  75.226 @@ -720,7 +721,7 @@
  75.227    if (LCA->raise_LCA_mark() == load_index) {
  75.228      while (non_early_stores.size() > 0) {
  75.229        Node* store = non_early_stores.pop();
  75.230 -      Block* store_block = _bbs[store->_idx];
  75.231 +      Block* store_block = get_block_for_node(store);
  75.232        if (store_block == LCA) {
  75.233          // add anti_dependence from store to load in its own block
  75.234          assert(store != load->in(0), "dependence cycle found");
  75.235 @@ -754,7 +755,7 @@
  75.236  
  75.237  public:
  75.238    // Constructor for the iterator
  75.239 -  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs);
  75.240 +  Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
  75.241  
  75.242    // Postincrement operator to iterate over the nodes
  75.243    Node *next();
  75.244 @@ -762,12 +763,12 @@
  75.245  private:
  75.246    VectorSet   &_visited;
  75.247    Node_List   &_stack;
  75.248 -  Block_Array &_bbs;
  75.249 +  PhaseCFG &_cfg;
  75.250  };
  75.251  
  75.252  // Constructor for the Node_Backward_Iterator
  75.253 -Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs )
  75.254 -  : _visited(visited), _stack(stack), _bbs(bbs) {
  75.255 +Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
  75.256 +  : _visited(visited), _stack(stack), _cfg(cfg) {
  75.257    // The stack should contain exactly the root
  75.258    stack.clear();
  75.259    stack.push(root);
  75.260 @@ -797,8 +798,8 @@
  75.261      _visited.set(self->_idx);
  75.262  
  75.263      // Now schedule all uses as late as possible.
  75.264 -    uint src     = self->is_Proj() ? self->in(0)->_idx : self->_idx;
  75.265 -    uint src_rpo = _bbs[src]->_rpo;
  75.266 +    const Node* src = self->is_Proj() ? self->in(0) : self;
  75.267 +    uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
  75.268  
  75.269      // Schedule all nodes in a post-order visit
  75.270      Node *unvisited = NULL;  // Unvisited anti-dependent Node, if any
  75.271 @@ -814,7 +815,7 @@
  75.272  
  75.273        // do not traverse backward control edges
  75.274        Node *use = n->is_Proj() ? n->in(0) : n;
  75.275 -      uint use_rpo = _bbs[use->_idx]->_rpo;
  75.276 +      uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
  75.277  
  75.278        if ( use_rpo < src_rpo )
  75.279          continue;
  75.280 @@ -852,7 +853,7 @@
  75.281      tty->print("\n#---- ComputeLatenciesBackwards ----\n");
  75.282  #endif
  75.283  
  75.284 -  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
  75.285 +  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
  75.286    Node *n;
  75.287  
  75.288    // Walk over all the nodes from last to first
  75.289 @@ -883,7 +884,7 @@
  75.290  
  75.291    uint nlen = n->len();
  75.292    uint use_latency = _node_latency->at_grow(n->_idx);
  75.293 -  uint use_pre_order = _bbs[n->_idx]->_pre_order;
  75.294 +  uint use_pre_order = get_block_for_node(n)->_pre_order;
  75.295  
  75.296    for ( uint j=0; j<nlen; j++ ) {
  75.297      Node *def = n->in(j);
  75.298 @@ -903,7 +904,7 @@
  75.299  #endif
  75.300  
  75.301      // If the defining block is not known, assume it is ok
  75.302 -    Block *def_block = _bbs[def->_idx];
  75.303 +    Block *def_block = get_block_for_node(def);
  75.304      uint def_pre_order = def_block ? def_block->_pre_order : 0;
  75.305  
  75.306      if ( (use_pre_order <  def_pre_order) ||
  75.307 @@ -931,10 +932,11 @@
  75.308  // Compute the latency of a specific use
  75.309  int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
  75.310    // If self-reference, return no latency
  75.311 -  if (use == n || use->is_Root())
  75.312 +  if (use == n || use->is_Root()) {
  75.313      return 0;
  75.314 +  }
  75.315  
  75.316 -  uint def_pre_order = _bbs[def->_idx]->_pre_order;
  75.317 +  uint def_pre_order = get_block_for_node(def)->_pre_order;
  75.318    uint latency = 0;
  75.319  
  75.320    // If the use is not a projection, then it is simple...
  75.321 @@ -946,7 +948,7 @@
  75.322      }
  75.323  #endif
  75.324  
  75.325 -    uint use_pre_order = _bbs[use->_idx]->_pre_order;
  75.326 +    uint use_pre_order = get_block_for_node(use)->_pre_order;
  75.327  
  75.328      if (use_pre_order < def_pre_order)
  75.329        return 0;
  75.330 @@ -1018,7 +1020,7 @@
  75.331    uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
  75.332    uint end_latency   = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
  75.333    bool in_latency    = (target <= start_latency);
  75.334 -  const Block* root_block = _bbs[_root->_idx];
  75.335 +  const Block* root_block = get_block_for_node(_root);
  75.336  
  75.337    // Turn off latency scheduling if scheduling is just plain off
  75.338    if (!C->do_scheduling())
  75.339 @@ -1126,12 +1128,12 @@
  75.340      tty->print("\n#---- schedule_late ----\n");
  75.341  #endif
  75.342  
  75.343 -  Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
  75.344 +  Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
  75.345    Node *self;
  75.346  
  75.347    // Walk over all the nodes from last to first
  75.348    while (self = iter.next()) {
  75.349 -    Block* early = _bbs[self->_idx];   // Earliest legal placement
  75.350 +    Block* early = get_block_for_node(self); // Earliest legal placement
  75.351  
  75.352      if (self->is_top()) {
  75.353        // Top node goes in bb #2 with other constants.
  75.354 @@ -1179,7 +1181,7 @@
  75.355        for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
  75.356          // For all uses, find LCA
  75.357          Node* use = self->fast_out(i);
  75.358 -        LCA = raise_LCA_above_use(LCA, use, self, _bbs);
  75.359 +        LCA = raise_LCA_above_use(LCA, use, self, this);
  75.360        }
  75.361      }  // (Hide defs of imax, i from rest of block.)
  75.362  
  75.363 @@ -1187,7 +1189,7 @@
  75.364      // requirement for correctness but it reduces useless
  75.365      // interference between temps and other nodes.
  75.366      if (mach != NULL && mach->is_MachTemp()) {
  75.367 -      _bbs.map(self->_idx, LCA);
  75.368 +      map_node_to_block(self, LCA);
  75.369        LCA->add_inst(self);
  75.370        continue;
  75.371      }
  75.372 @@ -1262,10 +1264,10 @@
  75.373    }
  75.374  #endif
  75.375  
  75.376 -  // Initialize the bbs.map for things on the proj_list
  75.377 -  uint i;
  75.378 -  for( i=0; i < proj_list.size(); i++ )
  75.379 -    _bbs.map(proj_list[i]->_idx, NULL);
  75.380 +  // Initialize the node to block mapping for things on the proj_list
  75.381 +  for (uint i = 0; i < proj_list.size(); i++) {
  75.382 +    unmap_node_from_block(proj_list[i]);
  75.383 +  }
  75.384  
  75.385    // Set the basic block for Nodes pinned into blocks
  75.386    Arena *a = Thread::current()->resource_area();
  75.387 @@ -1333,7 +1335,7 @@
  75.388      for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
  75.389        Node *proj = matcher._null_check_tests[i  ];
  75.390        Node *val  = matcher._null_check_tests[i+1];
  75.391 -      _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
  75.392 +      get_block_for_node(proj)->implicit_null_check(this, proj, val, allowed_reasons);
  75.393        // The implicit_null_check will only perform the transformation
  75.394        // if the null branch is truly uncommon, *and* it leads to an
  75.395        // uncommon trap.  Combined with the too_many_traps guards
  75.396 @@ -1353,7 +1355,7 @@
  75.397    uint max_idx = C->unique();
  75.398    GrowableArray<int> ready_cnt(max_idx, max_idx, -1);
  75.399    visited.Clear();
  75.400 -  for (i = 0; i < _num_blocks; i++) {
  75.401 +  for (uint i = 0; i < _num_blocks; i++) {
  75.402      if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
  75.403        if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
  75.404          C->record_method_not_compilable("local schedule failed");
  75.405 @@ -1364,8 +1366,9 @@
  75.406  
  75.407    // If we inserted any instructions between a Call and his CatchNode,
  75.408    // clone the instructions on all paths below the Catch.
  75.409 -  for( i=0; i < _num_blocks; i++ )
  75.410 -    _blocks[i]->call_catch_cleanup(_bbs, C);
  75.411 +  for (uint i = 0; i < _num_blocks; i++) {
  75.412 +    _blocks[i]->call_catch_cleanup(this, C);
  75.413 +  }
  75.414  
  75.415  #ifndef PRODUCT
  75.416    if (trace_opto_pipelining()) {
  75.417 @@ -1392,7 +1395,7 @@
  75.418      Block_List worklist;
  75.419      Block* root_blk = _blocks[0];
  75.420      for (uint i = 1; i < root_blk->num_preds(); i++) {
  75.421 -      Block *pb = _bbs[root_blk->pred(i)->_idx];
  75.422 +      Block *pb = get_block_for_node(root_blk->pred(i));
  75.423        if (pb->has_uncommon_code()) {
  75.424          worklist.push(pb);
  75.425        }
  75.426 @@ -1401,7 +1404,7 @@
  75.427        Block* uct = worklist.pop();
  75.428        if (uct == _broot) continue;
  75.429        for (uint i = 1; i < uct->num_preds(); i++) {
  75.430 -        Block *pb = _bbs[uct->pred(i)->_idx];
  75.431 +        Block *pb = get_block_for_node(uct->pred(i));
  75.432          if (pb->_num_succs == 1) {
  75.433            worklist.push(pb);
  75.434          } else if (pb->num_fall_throughs() == 2) {
  75.435 @@ -1430,7 +1433,7 @@
  75.436      Block_List worklist;
  75.437      Block* root_blk = _blocks[0];
  75.438      for (uint i = 1; i < root_blk->num_preds(); i++) {
  75.439 -      Block *pb = _bbs[root_blk->pred(i)->_idx];
  75.440 +      Block *pb = get_block_for_node(root_blk->pred(i));
  75.441        if (pb->has_uncommon_code()) {
  75.442          worklist.push(pb);
  75.443        }
  75.444 @@ -1439,7 +1442,7 @@
  75.445        Block* uct = worklist.pop();
  75.446        uct->_freq = PROB_MIN;
  75.447        for (uint i = 1; i < uct->num_preds(); i++) {
  75.448 -        Block *pb = _bbs[uct->pred(i)->_idx];
  75.449 +        Block *pb = get_block_for_node(uct->pred(i));
  75.450          if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
  75.451            worklist.push(pb);
  75.452          }
  75.453 @@ -1499,7 +1502,7 @@
  75.454        Block* loop_head = b;
  75.455        assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
  75.456        Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
  75.457 -      Block* tail = _bbs[tail_n->_idx];
  75.458 +      Block* tail = get_block_for_node(tail_n);
  75.459  
  75.460        // Defensively filter out Loop nodes for non-single-entry loops.
  75.461        // For all reasonable loops, the head occurs before the tail in RPO.
  75.462 @@ -1514,13 +1517,13 @@
  75.463          loop_head->_loop = nloop;
  75.464          // Add to nloop so push_pred() will skip over inner loops
  75.465          nloop->add_member(loop_head);
  75.466 -        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
  75.467 +        nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
  75.468  
  75.469          while (worklist.size() > 0) {
  75.470            Block* member = worklist.pop();
  75.471            if (member != loop_head) {
  75.472              for (uint j = 1; j < member->num_preds(); j++) {
  75.473 -              nloop->push_pred(member, j, worklist, _bbs);
  75.474 +              nloop->push_pred(member, j, worklist, this);
  75.475              }
  75.476            }
  75.477          }
  75.478 @@ -1557,9 +1560,9 @@
  75.479  }
  75.480  
  75.481  //------------------------------push_pred--------------------------------------
  75.482 -void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
  75.483 +void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
  75.484    Node* pred_n = blk->pred(i);
  75.485 -  Block* pred = node_to_blk[pred_n->_idx];
  75.486 +  Block* pred = cfg->get_block_for_node(pred_n);
  75.487    CFGLoop *pred_loop = pred->_loop;
  75.488    if (pred_loop == NULL) {
  75.489      // Filter out blocks for non-single-entry loops.
  75.490 @@ -1580,7 +1583,7 @@
  75.491        Block* pred_head = pred_loop->head();
  75.492        assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
  75.493        assert(pred_head != head(), "loop head in only one loop");
  75.494 -      push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
  75.495 +      push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
  75.496      } else {
  75.497        assert(pred_loop->_parent == this && _parent == NULL, "just checking");
  75.498      }
    76.1 --- a/src/share/vm/opto/idealGraphPrinter.cpp	Mon Aug 19 17:47:21 2013 +0200
    76.2 +++ b/src/share/vm/opto/idealGraphPrinter.cpp	Fri Aug 23 22:12:18 2013 +0100
    76.3 @@ -413,9 +413,9 @@
    76.4      print_prop("debug_idx", node->_debug_idx);
    76.5  #endif
    76.6  
    76.7 -    if(C->cfg() != NULL) {
    76.8 -      Block *block = C->cfg()->_bbs[node->_idx];
    76.9 -      if(block == NULL) {
   76.10 +    if (C->cfg() != NULL) {
   76.11 +      Block* block = C->cfg()->get_block_for_node(node);
   76.12 +      if (block == NULL) {
   76.13          print_prop("block", C->cfg()->_blocks[0]->_pre_order);
   76.14        } else {
   76.15          print_prop("block", block->_pre_order);
    77.1 --- a/src/share/vm/opto/ifg.cpp	Mon Aug 19 17:47:21 2013 +0200
    77.2 +++ b/src/share/vm/opto/ifg.cpp	Fri Aug 23 22:12:18 2013 +0100
    77.3 @@ -565,7 +565,7 @@
    77.4                lrgs(r)._def = 0;
    77.5              }
    77.6              n->disconnect_inputs(NULL, C);
    77.7 -            _cfg._bbs.map(n->_idx,NULL);
    77.8 +            _cfg.unmap_node_from_block(n);
    77.9              n->replace_by(C->top());
   77.10              // Since yanking a Node from block, high pressure moves up one
   77.11              hrp_index[0]--;
   77.12 @@ -607,7 +607,7 @@
   77.13            if( n->is_SpillCopy()
   77.14                && lrgs(r).is_singledef()        // MultiDef live range can still split
   77.15                && n->outcnt() == 1              // and use must be in this block
   77.16 -              && _cfg._bbs[n->unique_out()->_idx] == b ) {
   77.17 +              && _cfg.get_block_for_node(n->unique_out()) == b ) {
   77.18              // All single-use MachSpillCopy(s) that immediately precede their
   77.19              // use must color early.  If a longer live range steals their
   77.20              // color, the spill copy will split and may push another spill copy
    78.1 --- a/src/share/vm/opto/lcm.cpp	Mon Aug 19 17:47:21 2013 +0200
    78.2 +++ b/src/share/vm/opto/lcm.cpp	Fri Aug 23 22:12:18 2013 +0100
    78.3 @@ -237,7 +237,7 @@
    78.4      }
    78.5  
    78.6      // Check ctrl input to see if the null-check dominates the memory op
    78.7 -    Block *cb = cfg->_bbs[mach->_idx];
    78.8 +    Block *cb = cfg->get_block_for_node(mach);
    78.9      cb = cb->_idom;             // Always hoist at least 1 block
   78.10      if( !was_store ) {          // Stores can be hoisted only one block
   78.11        while( cb->_dom_depth > (_dom_depth + 1))
   78.12 @@ -262,7 +262,7 @@
   78.13          if( is_decoden ) continue;
   78.14        }
   78.15        // Block of memory-op input
   78.16 -      Block *inb = cfg->_bbs[mach->in(j)->_idx];
   78.17 +      Block *inb = cfg->get_block_for_node(mach->in(j));
   78.18        Block *b = this;          // Start from nul check
   78.19        while( b != inb && b->_dom_depth > inb->_dom_depth )
   78.20          b = b->_idom;           // search upwards for input
   78.21 @@ -272,7 +272,7 @@
   78.22      }
   78.23      if( j > 0 )
   78.24        continue;
   78.25 -    Block *mb = cfg->_bbs[mach->_idx];
   78.26 +    Block *mb = cfg->get_block_for_node(mach);
   78.27      // Hoisting stores requires more checks for the anti-dependence case.
   78.28      // Give up hoisting if we have to move the store past any load.
   78.29      if( was_store ) {
   78.30 @@ -291,7 +291,7 @@
   78.31            break;                // Found anti-dependent load
   78.32          // Make sure control does not do a merge (would have to check allpaths)
   78.33          if( b->num_preds() != 2 ) break;
   78.34 -        b = cfg->_bbs[b->pred(1)->_idx]; // Move up to predecessor block
   78.35 +        b = cfg->get_block_for_node(b->pred(1)); // Move up to predecessor block
   78.36        }
   78.37        if( b != this ) continue;
   78.38      }
   78.39 @@ -303,15 +303,15 @@
   78.40  
   78.41      // Found a candidate!  Pick one with least dom depth - the highest
   78.42      // in the dom tree should be closest to the null check.
   78.43 -    if( !best ||
   78.44 -        cfg->_bbs[mach->_idx]->_dom_depth < cfg->_bbs[best->_idx]->_dom_depth ) {
   78.45 +    if (best == NULL || cfg->get_block_for_node(mach)->_dom_depth < cfg->get_block_for_node(best)->_dom_depth) {
   78.46        best = mach;
   78.47        bidx = vidx;
   78.48 -
   78.49      }
   78.50    }
   78.51    // No candidate!
   78.52 -  if( !best ) return;
   78.53 +  if (best == NULL) {
   78.54 +    return;
   78.55 +  }
   78.56  
   78.57    // ---- Found an implicit null check
   78.58    extern int implicit_null_checks;
   78.59 @@ -319,29 +319,29 @@
   78.60  
   78.61    if( is_decoden ) {
   78.62      // Check if we need to hoist decodeHeapOop_not_null first.
   78.63 -    Block *valb = cfg->_bbs[val->_idx];
   78.64 +    Block *valb = cfg->get_block_for_node(val);
   78.65      if( this != valb && this->_dom_depth < valb->_dom_depth ) {
   78.66        // Hoist it up to the end of the test block.
   78.67        valb->find_remove(val);
   78.68        this->add_inst(val);
   78.69 -      cfg->_bbs.map(val->_idx,this);
   78.70 +      cfg->map_node_to_block(val, this);
   78.71        // DecodeN on x86 may kill flags. Check for flag-killing projections
   78.72        // that also need to be hoisted.
   78.73        for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) {
   78.74          Node* n = val->fast_out(j);
   78.75          if( n->is_MachProj() ) {
   78.76 -          cfg->_bbs[n->_idx]->find_remove(n);
   78.77 +          cfg->get_block_for_node(n)->find_remove(n);
   78.78            this->add_inst(n);
   78.79 -          cfg->_bbs.map(n->_idx,this);
   78.80 +          cfg->map_node_to_block(n, this);
   78.81          }
   78.82        }
   78.83      }
   78.84    }
   78.85    // Hoist the memory candidate up to the end of the test block.
   78.86 -  Block *old_block = cfg->_bbs[best->_idx];
   78.87 +  Block *old_block = cfg->get_block_for_node(best);
   78.88    old_block->find_remove(best);
   78.89    add_inst(best);
   78.90 -  cfg->_bbs.map(best->_idx,this);
   78.91 +  cfg->map_node_to_block(best, this);
   78.92  
   78.93    // Move the control dependence
   78.94    if (best->in(0) && best->in(0) == old_block->_nodes[0])
   78.95 @@ -352,9 +352,9 @@
   78.96    for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) {
   78.97      Node* n = best->fast_out(j);
   78.98      if( n->is_MachProj() ) {
   78.99 -      cfg->_bbs[n->_idx]->find_remove(n);
  78.100 +      cfg->get_block_for_node(n)->find_remove(n);
  78.101        add_inst(n);
  78.102 -      cfg->_bbs.map(n->_idx,this);
  78.103 +      cfg->map_node_to_block(n, this);
  78.104      }
  78.105    }
  78.106  
  78.107 @@ -385,7 +385,7 @@
  78.108    Node *old_tst = proj->in(0);
  78.109    MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx);
  78.110    _nodes.map(end_idx(),nul_chk);
  78.111 -  cfg->_bbs.map(nul_chk->_idx,this);
  78.112 +  cfg->map_node_to_block(nul_chk, this);
  78.113    // Redirect users of old_test to nul_chk
  78.114    for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2)
  78.115      old_tst->last_out(i2)->set_req(0, nul_chk);
  78.116 @@ -468,7 +468,7 @@
  78.117          Node* use = n->fast_out(j);
  78.118  
  78.119          // The use is a conditional branch, make them adjacent
  78.120 -        if (use->is_MachIf() && cfg->_bbs[use->_idx]==this ) {
  78.121 +        if (use->is_MachIf() && cfg->get_block_for_node(use) == this) {
  78.122            found_machif = true;
  78.123            break;
  78.124          }
  78.125 @@ -529,13 +529,14 @@
  78.126  
  78.127  
  78.128  //------------------------------set_next_call----------------------------------
  78.129 -void Block::set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ) {
  78.130 +void Block::set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg) {
  78.131    if( next_call.test_set(n->_idx) ) return;
  78.132    for( uint i=0; i<n->len(); i++ ) {
  78.133      Node *m = n->in(i);
  78.134      if( !m ) continue;  // must see all nodes in block that precede call
  78.135 -    if( bbs[m->_idx] == this )
  78.136 -      set_next_call( m, next_call, bbs );
  78.137 +    if (cfg->get_block_for_node(m) == this) {
  78.138 +      set_next_call(m, next_call, cfg);
  78.139 +    }
  78.140    }
  78.141  }
  78.142  
  78.143 @@ -545,12 +546,12 @@
  78.144  // next subroutine call get priority - basically it moves things NOT needed
  78.145  // for the next call till after the call.  This prevents me from trying to
  78.146  // carry lots of stuff live across a call.
  78.147 -void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs) {
  78.148 +void Block::needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg) {
  78.149    // Find the next control-defining Node in this block
  78.150    Node* call = NULL;
  78.151    for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) {
  78.152      Node* m = this_call->fast_out(i);
  78.153 -    if( bbs[m->_idx] == this && // Local-block user
  78.154 +    if(cfg->get_block_for_node(m) == this && // Local-block user
  78.155          m != this_call &&       // Not self-start node
  78.156          m->is_MachCall() )
  78.157        call = m;
  78.158 @@ -558,7 +559,7 @@
  78.159    }
  78.160    if (call == NULL)  return;    // No next call (e.g., block end is near)
  78.161    // Set next-call for all inputs to this call
  78.162 -  set_next_call(call, next_call, bbs);
  78.163 +  set_next_call(call, next_call, cfg);
  78.164  }
  78.165  
  78.166  //------------------------------add_call_kills-------------------------------------
  78.167 @@ -578,7 +579,7 @@
  78.168  
  78.169  
  78.170  //------------------------------sched_call-------------------------------------
  78.171 -uint Block::sched_call( Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
  78.172 +uint Block::sched_call( Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call ) {
  78.173    RegMask regs;
  78.174  
  78.175    // Schedule all the users of the call right now.  All the users are
  78.176 @@ -597,12 +598,14 @@
  78.177      // Check for scheduling the next control-definer
  78.178      if( n->bottom_type() == Type::CONTROL )
  78.179        // Warm up next pile of heuristic bits
  78.180 -      needed_for_next_call(n, next_call, bbs);
  78.181 +      needed_for_next_call(n, next_call, cfg);
  78.182  
  78.183      // Children of projections are now all ready
  78.184      for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
  78.185        Node* m = n->fast_out(j); // Get user
  78.186 -      if( bbs[m->_idx] != this ) continue;
  78.187 +      if(cfg->get_block_for_node(m) != this) {
  78.188 +        continue;
  78.189 +      }
  78.190        if( m->is_Phi() ) continue;
  78.191        int m_cnt = ready_cnt.at(m->_idx)-1;
  78.192        ready_cnt.at_put(m->_idx, m_cnt);
  78.193 @@ -620,7 +623,7 @@
  78.194    uint r_cnt = mcall->tf()->range()->cnt();
  78.195    int op = mcall->ideal_Opcode();
  78.196    MachProjNode *proj = new (matcher.C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj );
  78.197 -  bbs.map(proj->_idx,this);
  78.198 +  cfg->map_node_to_block(proj, this);
  78.199    _nodes.insert(node_cnt++, proj);
  78.200  
  78.201    // Select the right register save policy.
  78.202 @@ -708,7 +711,7 @@
  78.203        uint local = 0;
  78.204        for( uint j=0; j<cnt; j++ ) {
  78.205          Node *m = n->in(j);
  78.206 -        if( m && cfg->_bbs[m->_idx] == this && !m->is_top() )
  78.207 +        if( m && cfg->get_block_for_node(m) == this && !m->is_top() )
  78.208            local++;              // One more block-local input
  78.209        }
  78.210        ready_cnt.at_put(n->_idx, local); // Count em up
  78.211 @@ -720,7 +723,7 @@
  78.212            for (uint prec = n->req(); prec < n->len(); prec++) {
  78.213              Node* oop_store = n->in(prec);
  78.214              if (oop_store != NULL) {
  78.215 -              assert(cfg->_bbs[oop_store->_idx]->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
  78.216 +              assert(cfg->get_block_for_node(oop_store)->_dom_depth <= this->_dom_depth, "oop_store must dominate card-mark");
  78.217              }
  78.218            }
  78.219          }
  78.220 @@ -753,7 +756,7 @@
  78.221      Node *n = _nodes[i3];       // Get pre-scheduled
  78.222      for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) {
  78.223        Node* m = n->fast_out(j);
  78.224 -      if( cfg->_bbs[m->_idx] ==this ) { // Local-block user
  78.225 +      if (cfg->get_block_for_node(m) == this) { // Local-block user
  78.226          int m_cnt = ready_cnt.at(m->_idx)-1;
  78.227          ready_cnt.at_put(m->_idx, m_cnt);   // Fix ready count
  78.228        }
  78.229 @@ -786,7 +789,7 @@
  78.230    }
  78.231  
  78.232    // Warm up the 'next_call' heuristic bits
  78.233 -  needed_for_next_call(_nodes[0], next_call, cfg->_bbs);
  78.234 +  needed_for_next_call(_nodes[0], next_call, cfg);
  78.235  
  78.236  #ifndef PRODUCT
  78.237      if (cfg->trace_opto_pipelining()) {
  78.238 @@ -837,7 +840,7 @@
  78.239  #endif
  78.240      if( n->is_MachCall() ) {
  78.241        MachCallNode *mcall = n->as_MachCall();
  78.242 -      phi_cnt = sched_call(matcher, cfg->_bbs, phi_cnt, worklist, ready_cnt, mcall, next_call);
  78.243 +      phi_cnt = sched_call(matcher, cfg, phi_cnt, worklist, ready_cnt, mcall, next_call);
  78.244        continue;
  78.245      }
  78.246  
  78.247 @@ -847,7 +850,7 @@
  78.248        regs.OR(n->out_RegMask());
  78.249  
  78.250        MachProjNode *proj = new (matcher.C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj );
  78.251 -      cfg->_bbs.map(proj->_idx,this);
  78.252 +      cfg->map_node_to_block(proj, this);
  78.253        _nodes.insert(phi_cnt++, proj);
  78.254  
  78.255        add_call_kills(proj, regs, matcher._c_reg_save_policy, false);
  78.256 @@ -856,7 +859,9 @@
  78.257      // Children are now all ready
  78.258      for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) {
  78.259        Node* m = n->fast_out(i5); // Get user
  78.260 -      if( cfg->_bbs[m->_idx] != this ) continue;
  78.261 +      if (cfg->get_block_for_node(m) != this) {
  78.262 +        continue;
  78.263 +      }
  78.264        if( m->is_Phi() ) continue;
  78.265        if (m->_idx >= max_idx) { // new node, skip it
  78.266          assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types");
  78.267 @@ -914,7 +919,7 @@
  78.268  }
  78.269  
  78.270  //------------------------------catch_cleanup_find_cloned_def------------------
  78.271 -static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
  78.272 +static Node *catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
  78.273    assert( use_blk != def_blk, "Inter-block cleanup only");
  78.274  
  78.275    // The use is some block below the Catch.  Find and return the clone of the def
  78.276 @@ -940,7 +945,8 @@
  78.277      // PhiNode, the PhiNode uses from the def and IT's uses need fixup.
  78.278      Node_Array inputs = new Node_List(Thread::current()->resource_area());
  78.279      for(uint k = 1; k < use_blk->num_preds(); k++) {
  78.280 -      inputs.map(k, catch_cleanup_find_cloned_def(bbs[use_blk->pred(k)->_idx], def, def_blk, bbs, n_clone_idx));
  78.281 +      Block* block = cfg->get_block_for_node(use_blk->pred(k));
  78.282 +      inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, cfg, n_clone_idx));
  78.283      }
  78.284  
  78.285      // Check to see if the use_blk already has an identical phi inserted.
  78.286 @@ -962,7 +968,7 @@
  78.287      if (fixup == NULL) {
  78.288        Node *new_phi = PhiNode::make(use_blk->head(), def);
  78.289        use_blk->_nodes.insert(1, new_phi);
  78.290 -      bbs.map(new_phi->_idx, use_blk);
  78.291 +      cfg->map_node_to_block(new_phi, use_blk);
  78.292        for (uint k = 1; k < use_blk->num_preds(); k++) {
  78.293          new_phi->set_req(k, inputs[k]);
  78.294        }
  78.295 @@ -1002,17 +1008,17 @@
  78.296  //------------------------------catch_cleanup_inter_block---------------------
  78.297  // Fix all input edges in use that reference "def".  The use is in a different
  78.298  // block than the def.
  78.299 -static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, Block_Array &bbs, int n_clone_idx) {
  78.300 +static void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, PhaseCFG* cfg, int n_clone_idx) {
  78.301    if( !use_blk ) return;        // Can happen if the use is a precedence edge
  78.302  
  78.303 -  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, bbs, n_clone_idx);
  78.304 +  Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, cfg, n_clone_idx);
  78.305    catch_cleanup_fix_all_inputs(use, def, new_def);
  78.306  }
  78.307  
  78.308  //------------------------------call_catch_cleanup-----------------------------
  78.309  // If we inserted any instructions between a Call and his CatchNode,
  78.310  // clone the instructions on all paths below the Catch.
  78.311 -void Block::call_catch_cleanup(Block_Array &bbs, Compile* C) {
  78.312 +void Block::call_catch_cleanup(PhaseCFG* cfg, Compile* C) {
  78.313  
  78.314    // End of region to clone
  78.315    uint end = end_idx();
  78.316 @@ -1037,7 +1043,7 @@
  78.317        // since clones dominate on each path.
  78.318        Node *clone = _nodes[j-1]->clone();
  78.319        sb->_nodes.insert( 1, clone );
  78.320 -      bbs.map(clone->_idx,sb);
  78.321 +      cfg->map_node_to_block(clone, sb);
  78.322      }
  78.323    }
  78.324  
  78.325 @@ -1054,18 +1060,19 @@
  78.326      uint max = out->size();
  78.327      for (uint j = 0; j < max; j++) {// For all users
  78.328        Node *use = out->pop();
  78.329 -      Block *buse = bbs[use->_idx];
  78.330 +      Block *buse = cfg->get_block_for_node(use);
  78.331        if( use->is_Phi() ) {
  78.332          for( uint k = 1; k < use->req(); k++ )
  78.333            if( use->in(k) == n ) {
  78.334 -            Node *fixup = catch_cleanup_find_cloned_def(bbs[buse->pred(k)->_idx], n, this, bbs, n_clone_idx);
  78.335 +            Block* block = cfg->get_block_for_node(buse->pred(k));
  78.336 +            Node *fixup = catch_cleanup_find_cloned_def(block, n, this, cfg, n_clone_idx);
  78.337              use->set_req(k, fixup);
  78.338            }
  78.339        } else {
  78.340          if (this == buse) {
  78.341            catch_cleanup_intra_block(use, n, this, beg, n_clone_idx);
  78.342          } else {
  78.343 -          catch_cleanup_inter_block(use, buse, n, this, bbs, n_clone_idx);
  78.344 +          catch_cleanup_inter_block(use, buse, n, this, cfg, n_clone_idx);
  78.345          }
  78.346        }
  78.347      } // End for all users
    79.1 --- a/src/share/vm/opto/live.cpp	Mon Aug 19 17:47:21 2013 +0200
    79.2 +++ b/src/share/vm/opto/live.cpp	Fri Aug 23 22:12:18 2013 +0100
    79.3 @@ -101,7 +101,7 @@
    79.4        for( uint k=1; k<cnt; k++ ) {
    79.5          Node *nk = n->in(k);
    79.6          uint nkidx = nk->_idx;
    79.7 -        if( _cfg._bbs[nkidx] != b ) {
    79.8 +        if (_cfg.get_block_for_node(nk) != b) {
    79.9            uint u = _names[nkidx];
   79.10            use->insert( u );
   79.11            DEBUG_ONLY(def_outside->insert( u );)
   79.12 @@ -121,7 +121,7 @@
   79.13  
   79.14      // Push these live-in things to predecessors
   79.15      for( uint l=1; l<b->num_preds(); l++ ) {
   79.16 -      Block *p = _cfg._bbs[b->pred(l)->_idx];
   79.17 +      Block *p = _cfg.get_block_for_node(b->pred(l));
   79.18        add_liveout( p, use, first_pass );
   79.19  
   79.20        // PhiNode uses go in the live-out set of prior blocks.
   79.21 @@ -142,8 +142,10 @@
   79.22        assert( delta->count(), "missing delta set" );
   79.23  
   79.24        // Add new-live-in to predecessors live-out sets
   79.25 -      for( uint l=1; l<b->num_preds(); l++ )
   79.26 -        add_liveout( _cfg._bbs[b->pred(l)->_idx], delta, first_pass );
   79.27 +      for (uint l = 1; l < b->num_preds(); l++) {
   79.28 +        Block* block = _cfg.get_block_for_node(b->pred(l));
   79.29 +        add_liveout(block, delta, first_pass);
   79.30 +      }
   79.31  
   79.32        freeset(b);
   79.33      } // End of while-worklist-not-empty
    80.1 --- a/src/share/vm/opto/loopTransform.cpp	Mon Aug 19 17:47:21 2013 +0200
    80.2 +++ b/src/share/vm/opto/loopTransform.cpp	Fri Aug 23 22:12:18 2013 +0100
    80.3 @@ -624,8 +624,6 @@
    80.4  }
    80.5  
    80.6  
    80.7 -#define MAX_UNROLL 16 // maximum number of unrolls for main loop
    80.8 -
    80.9  //------------------------------policy_unroll----------------------------------
   80.10  // Return TRUE or FALSE if the loop should be unrolled or not.  Unroll if
   80.11  // the loop is a CountedLoop and the body is small enough.
   80.12 @@ -642,7 +640,7 @@
   80.13    if (cl->trip_count() <= (uint)(cl->is_normal_loop() ? 2 : 1)) return false;
   80.14  
   80.15    int future_unroll_ct = cl->unrolled_count() * 2;
   80.16 -  if (future_unroll_ct > MAX_UNROLL) return false;
   80.17 +  if (future_unroll_ct > LoopMaxUnroll) return false;
   80.18  
   80.19    // Check for initial stride being a small enough constant
   80.20    if (abs(cl->stride_con()) > (1<<2)*future_unroll_ct) return false;
    81.1 --- a/src/share/vm/opto/node.hpp	Mon Aug 19 17:47:21 2013 +0200
    81.2 +++ b/src/share/vm/opto/node.hpp	Fri Aug 23 22:12:18 2013 +0100
    81.3 @@ -42,7 +42,6 @@
    81.4  class AllocateArrayNode;
    81.5  class AllocateNode;
    81.6  class Block;
    81.7 -class Block_Array;
    81.8  class BoolNode;
    81.9  class BoxLockNode;
   81.10  class CMoveNode;
    82.1 --- a/src/share/vm/opto/output.cpp	Mon Aug 19 17:47:21 2013 +0200
    82.2 +++ b/src/share/vm/opto/output.cpp	Fri Aug 23 22:12:18 2013 +0100
    82.3 @@ -68,7 +68,6 @@
    82.4      return;
    82.5    }
    82.6    // Make sure I can find the Start Node
    82.7 -  Block_Array& bbs = _cfg->_bbs;
    82.8    Block *entry = _cfg->_blocks[1];
    82.9    Block *broot = _cfg->_broot;
   82.10  
   82.11 @@ -77,8 +76,8 @@
   82.12    // Replace StartNode with prolog
   82.13    MachPrologNode *prolog = new (this) MachPrologNode();
   82.14    entry->_nodes.map( 0, prolog );
   82.15 -  bbs.map( prolog->_idx, entry );
   82.16 -  bbs.map( start->_idx, NULL ); // start is no longer in any block
   82.17 +  _cfg->map_node_to_block(prolog, entry);
   82.18 +  _cfg->unmap_node_from_block(start); // start is no longer in any block
   82.19  
   82.20    // Virtual methods need an unverified entry point
   82.21  
   82.22 @@ -117,8 +116,7 @@
   82.23        if( m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt ) {
   82.24          MachEpilogNode *epilog = new (this) MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
   82.25          b->add_inst( epilog );
   82.26 -        bbs.map(epilog->_idx, b);
   82.27 -        //_regalloc->set_bad(epilog->_idx); // Already initialized this way.
   82.28 +        _cfg->map_node_to_block(epilog, b);
   82.29        }
   82.30      }
   82.31    }
   82.32 @@ -252,7 +250,7 @@
   82.33          if (insert) {
   82.34            Node *zap = call_zap_node(n->as_MachSafePoint(), i);
   82.35            b->_nodes.insert( j, zap );
   82.36 -          _cfg->_bbs.map( zap->_idx, b );
   82.37 +          _cfg->map_node_to_block(zap, b);
   82.38            ++j;
   82.39          }
   82.40        }
   82.41 @@ -1234,7 +1232,7 @@
   82.42  #ifdef ASSERT
   82.43      if (!b->is_connector()) {
   82.44        stringStream st;
   82.45 -      b->dump_head(&_cfg->_bbs, &st);
   82.46 +      b->dump_head(_cfg, &st);
   82.47        MacroAssembler(cb).block_comment(st.as_string());
   82.48      }
   82.49      jmp_target[i] = 0;
   82.50 @@ -1310,7 +1308,7 @@
   82.51            MachNode *nop = new (this) MachNopNode(nops_cnt);
   82.52            b->_nodes.insert(j++, nop);
   82.53            last_inst++;
   82.54 -          _cfg->_bbs.map( nop->_idx, b );
   82.55 +          _cfg->map_node_to_block(nop, b);
   82.56            nop->emit(*cb, _regalloc);
   82.57            cb->flush_bundle(true);
   82.58            current_offset = cb->insts_size();
   82.59 @@ -1395,7 +1393,7 @@
   82.60                if (needs_padding && replacement->avoid_back_to_back()) {
   82.61                  MachNode *nop = new (this) MachNopNode();
   82.62                  b->_nodes.insert(j++, nop);
   82.63 -                _cfg->_bbs.map(nop->_idx, b);
   82.64 +                _cfg->map_node_to_block(nop, b);
   82.65                  last_inst++;
   82.66                  nop->emit(*cb, _regalloc);
   82.67                  cb->flush_bundle(true);
   82.68 @@ -1549,7 +1547,7 @@
   82.69        if( padding > 0 ) {
   82.70          MachNode *nop = new (this) MachNopNode(padding / nop_size);
   82.71          b->_nodes.insert( b->_nodes.size(), nop );
   82.72 -        _cfg->_bbs.map( nop->_idx, b );
   82.73 +        _cfg->map_node_to_block(nop, b);
   82.74          nop->emit(*cb, _regalloc);
   82.75          current_offset = cb->insts_size();
   82.76        }
   82.77 @@ -1737,7 +1735,6 @@
   82.78  Scheduling::Scheduling(Arena *arena, Compile &compile)
   82.79    : _arena(arena),
   82.80      _cfg(compile.cfg()),
   82.81 -    _bbs(compile.cfg()->_bbs),
   82.82      _regalloc(compile.regalloc()),
   82.83      _reg_node(arena),
   82.84      _bundle_instr_count(0),
   82.85 @@ -2085,8 +2082,9 @@
   82.86      if( def->is_Proj() )        // If this is a machine projection, then
   82.87        def = def->in(0);         // propagate usage thru to the base instruction
   82.88  
   82.89 -    if( _bbs[def->_idx] != bb ) // Ignore if not block-local
   82.90 +    if(_cfg->get_block_for_node(def) != bb) { // Ignore if not block-local
   82.91        continue;
   82.92 +    }
   82.93  
   82.94      // Compute the latency
   82.95      uint l = _bundle_cycle_number + n->latency(i);
   82.96 @@ -2358,9 +2356,10 @@
   82.97        Node *inp = n->in(k);
   82.98        if (!inp) continue;
   82.99        assert(inp != n, "no cycles allowed" );
  82.100 -      if( _bbs[inp->_idx] == bb ) { // Block-local use?
  82.101 -        if( inp->is_Proj() )    // Skip through Proj's
  82.102 +      if (_cfg->get_block_for_node(inp) == bb) { // Block-local use?
  82.103 +        if (inp->is_Proj()) { // Skip through Proj's
  82.104            inp = inp->in(0);
  82.105 +        }
  82.106          ++_uses[inp->_idx];     // Count 1 block-local use
  82.107        }
  82.108      }
  82.109 @@ -2643,7 +2642,7 @@
  82.110      return;
  82.111  
  82.112    Node *pinch = _reg_node[def_reg]; // Get pinch point
  82.113 -  if( !pinch || _bbs[pinch->_idx] != b || // No pinch-point yet?
  82.114 +  if ((pinch == NULL) || _cfg->get_block_for_node(pinch) != b || // No pinch-point yet?
  82.115        is_def ) {    // Check for a true def (not a kill)
  82.116      _reg_node.map(def_reg,def); // Record def/kill as the optimistic pinch-point
  82.117      return;
  82.118 @@ -2669,7 +2668,7 @@
  82.119        _cfg->C->record_method_not_compilable("too many D-U pinch points");
  82.120        return;
  82.121      }
  82.122 -    _bbs.map(pinch->_idx,b);      // Pretend it's valid in this block (lazy init)
  82.123 +    _cfg->map_node_to_block(pinch, b);      // Pretend it's valid in this block (lazy init)
  82.124      _reg_node.map(def_reg,pinch); // Record pinch-point
  82.125      //_regalloc->set_bad(pinch->_idx); // Already initialized this way.
  82.126      if( later_def->outcnt() == 0 || later_def->ideal_reg() == MachProjNode::fat_proj ) { // Distinguish def from kill
  82.127 @@ -2713,9 +2712,9 @@
  82.128      return;
  82.129    Node *pinch = _reg_node[use_reg]; // Get pinch point
  82.130    // Check for no later def_reg/kill in block
  82.131 -  if( pinch && _bbs[pinch->_idx] == b &&
  82.132 +  if ((pinch != NULL) && _cfg->get_block_for_node(pinch) == b &&
  82.133        // Use has to be block-local as well
  82.134 -      _bbs[use->_idx] == b ) {
  82.135 +      _cfg->get_block_for_node(use) == b) {
  82.136      if( pinch->Opcode() == Op_Node && // Real pinch-point (not optimistic?)
  82.137          pinch->req() == 1 ) {   // pinch not yet in block?
  82.138        pinch->del_req(0);        // yank pointer to later-def, also set flag
  82.139 @@ -2895,7 +2894,7 @@
  82.140      int trace_cnt = 0;
  82.141      for (uint k = 0; k < _reg_node.Size(); k++) {
  82.142        Node* pinch = _reg_node[k];
  82.143 -      if (pinch != NULL && pinch->Opcode() == Op_Node &&
  82.144 +      if ((pinch != NULL) && pinch->Opcode() == Op_Node &&
  82.145            // no predecence input edges
  82.146            (pinch->req() == pinch->len() || pinch->in(pinch->req()) == NULL) ) {
  82.147          cleanup_pinch(pinch);
    83.1 --- a/src/share/vm/opto/output.hpp	Mon Aug 19 17:47:21 2013 +0200
    83.2 +++ b/src/share/vm/opto/output.hpp	Fri Aug 23 22:12:18 2013 +0100
    83.3 @@ -96,9 +96,6 @@
    83.4    // List of nodes currently available for choosing for scheduling
    83.5    Node_List _available;
    83.6  
    83.7 -  // Mapping from node (index) to basic block
    83.8 -  Block_Array& _bbs;
    83.9 -
   83.10    // For each instruction beginning a bundle, the number of following
   83.11    // nodes to be bundled with it.
   83.12    Bundle *_node_bundling_base;
    84.1 --- a/src/share/vm/opto/postaloc.cpp	Mon Aug 19 17:47:21 2013 +0200
    84.2 +++ b/src/share/vm/opto/postaloc.cpp	Fri Aug 23 22:12:18 2013 +0100
    84.3 @@ -78,11 +78,13 @@
    84.4  // Helper function for yank_if_dead
    84.5  int PhaseChaitin::yank( Node *old, Block *current_block, Node_List *value, Node_List *regnd ) {
    84.6    int blk_adjust=0;
    84.7 -  Block *oldb = _cfg._bbs[old->_idx];
    84.8 +  Block *oldb = _cfg.get_block_for_node(old);
    84.9    oldb->find_remove(old);
   84.10    // Count 1 if deleting an instruction from the current block
   84.11 -  if( oldb == current_block ) blk_adjust++;
   84.12 -  _cfg._bbs.map(old->_idx,NULL);
   84.13 +  if (oldb == current_block) {
   84.14 +    blk_adjust++;
   84.15 +  }
   84.16 +  _cfg.unmap_node_from_block(old);
   84.17    OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
   84.18    if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
   84.19      value->map(old_reg,NULL);  // Yank from value/regnd maps
   84.20 @@ -433,7 +435,7 @@
   84.21      bool missing_some_inputs = false;
   84.22      Block *freed = NULL;
   84.23      for( j = 1; j < b->num_preds(); j++ ) {
   84.24 -      Block *pb = _cfg._bbs[b->pred(j)->_idx];
   84.25 +      Block *pb = _cfg.get_block_for_node(b->pred(j));
   84.26        // Remove copies along phi edges
   84.27        for( uint k=1; k<phi_dex; k++ )
   84.28          elide_copy( b->_nodes[k], j, b, *blk2value[pb->_pre_order], *blk2regnd[pb->_pre_order], false );
   84.29 @@ -478,7 +480,7 @@
   84.30      } else {
   84.31        if( !freed ) {            // Didn't get a freebie prior block
   84.32          // Must clone some data
   84.33 -        freed = _cfg._bbs[b->pred(1)->_idx];
   84.34 +        freed = _cfg.get_block_for_node(b->pred(1));
   84.35          Node_List &f_value = *blk2value[freed->_pre_order];
   84.36          Node_List &f_regnd = *blk2regnd[freed->_pre_order];
   84.37          for( uint k = 0; k < (uint)_max_reg; k++ ) {
   84.38 @@ -488,7 +490,7 @@
   84.39        }
   84.40        // Merge all inputs together, setting to NULL any conflicts.
   84.41        for( j = 1; j < b->num_preds(); j++ ) {
   84.42 -        Block *pb = _cfg._bbs[b->pred(j)->_idx];
   84.43 +        Block *pb = _cfg.get_block_for_node(b->pred(j));
   84.44          if( pb == freed ) continue; // Did self already via freelist
   84.45          Node_List &p_regnd = *blk2regnd[pb->_pre_order];
   84.46          for( uint k = 0; k < (uint)_max_reg; k++ ) {
   84.47 @@ -515,8 +517,9 @@
   84.48            u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
   84.49        }
   84.50        if( u != NodeSentinel ) {    // Junk Phi.  Remove
   84.51 -        b->_nodes.remove(j--); phi_dex--;
   84.52 -        _cfg._bbs.map(phi->_idx,NULL);
   84.53 +        b->_nodes.remove(j--);
   84.54 +        phi_dex--;
   84.55 +        _cfg.unmap_node_from_block(phi);
   84.56          phi->replace_by(u);
   84.57          phi->disconnect_inputs(NULL, C);
   84.58          continue;
    85.1 --- a/src/share/vm/opto/reg_split.cpp	Mon Aug 19 17:47:21 2013 +0200
    85.2 +++ b/src/share/vm/opto/reg_split.cpp	Fri Aug 23 22:12:18 2013 +0100
    85.3 @@ -132,7 +132,7 @@
    85.4    }
    85.5  
    85.6    b->_nodes.insert(i,spill);    // Insert node in block
    85.7 -  _cfg._bbs.map(spill->_idx,b); // Update node->block mapping to reflect
    85.8 +  _cfg.map_node_to_block(spill,  b); // Update node->block mapping to reflect
    85.9    // Adjust the point where we go hi-pressure
   85.10    if( i <= b->_ihrp_index ) b->_ihrp_index++;
   85.11    if( i <= b->_fhrp_index ) b->_fhrp_index++;
   85.12 @@ -219,7 +219,7 @@
   85.13          use->set_req(useidx, def);
   85.14        } else {
   85.15          // Block and index where the use occurs.
   85.16 -        Block *b = _cfg._bbs[use->_idx];
   85.17 +        Block *b = _cfg.get_block_for_node(use);
   85.18          // Put the clone just prior to use
   85.19          int bindex = b->find_node(use);
   85.20          // DEF is UP, so must copy it DOWN and hook in USE
   85.21 @@ -270,7 +270,7 @@
   85.22    int bindex;
   85.23    // Phi input spill-copys belong at the end of the prior block
   85.24    if( use->is_Phi() ) {
   85.25 -    b = _cfg._bbs[b->pred(useidx)->_idx];
   85.26 +    b = _cfg.get_block_for_node(b->pred(useidx));
   85.27      bindex = b->end_idx();
   85.28    } else {
   85.29      // Put the clone just prior to use
   85.30 @@ -335,7 +335,7 @@
   85.31          continue;
   85.32        }
   85.33  
   85.34 -      Block *b_def = _cfg._bbs[def->_idx];
   85.35 +      Block *b_def = _cfg.get_block_for_node(def);
   85.36        int idx_def = b_def->find_node(def);
   85.37        Node *in_spill = get_spillcopy_wide( in, def, i );
   85.38        if( !in_spill ) return 0; // Bailed out
   85.39 @@ -589,7 +589,7 @@
   85.40          UPblock[slidx] = true;
   85.41          // Record following instruction in case 'n' rematerializes and
   85.42          // kills flags
   85.43 -        Block *pred1 = _cfg._bbs[b->pred(1)->_idx];
   85.44 +        Block *pred1 = _cfg.get_block_for_node(b->pred(1));
   85.45          continue;
   85.46        }
   85.47  
   85.48 @@ -601,7 +601,7 @@
   85.49        // Grab predecessor block header
   85.50        n1 = b->pred(1);
   85.51        // Grab the appropriate reaching def info for inpidx
   85.52 -      pred = _cfg._bbs[n1->_idx];
   85.53 +      pred = _cfg.get_block_for_node(n1);
   85.54        pidx = pred->_pre_order;
   85.55        Node **Ltmp = Reaches[pidx];
   85.56        bool  *Utmp = UP[pidx];
   85.57 @@ -616,7 +616,7 @@
   85.58          // Grab predecessor block headers
   85.59          n2 = b->pred(inpidx);
   85.60          // Grab the appropriate reaching def info for inpidx
   85.61 -        pred = _cfg._bbs[n2->_idx];
   85.62 +        pred = _cfg.get_block_for_node(n2);
   85.63          pidx = pred->_pre_order;
   85.64          Ltmp = Reaches[pidx];
   85.65          Utmp = UP[pidx];
   85.66 @@ -701,7 +701,7 @@
   85.67          // Grab predecessor block header
   85.68          n1 = b->pred(1);
   85.69          // Grab the appropriate reaching def info for k
   85.70 -        pred = _cfg._bbs[n1->_idx];
   85.71 +        pred = _cfg.get_block_for_node(n1);
   85.72          pidx = pred->_pre_order;
   85.73          Node **Ltmp = Reaches[pidx];
   85.74          bool  *Utmp = UP[pidx];
   85.75 @@ -919,7 +919,7 @@
   85.76                  return 0;
   85.77                }
   85.78                _lrg_map.extend(def->_idx, 0);
   85.79 -              _cfg._bbs.map(def->_idx,b);
   85.80 +              _cfg.map_node_to_block(def, b);
   85.81                n->set_req(inpidx, def);
   85.82                continue;
   85.83              }
   85.84 @@ -1291,7 +1291,7 @@
   85.85    for( insidx = 0; insidx < phis->size(); insidx++ ) {
   85.86      Node *phi = phis->at(insidx);
   85.87      assert(phi->is_Phi(),"This list must only contain Phi Nodes");
   85.88 -    Block *b = _cfg._bbs[phi->_idx];
   85.89 +    Block *b = _cfg.get_block_for_node(phi);
   85.90      // Grab the live range number
   85.91      uint lidx = _lrg_map.find_id(phi);
   85.92      uint slidx = lrg2reach[lidx];
   85.93 @@ -1315,7 +1315,7 @@
   85.94      // DEF has the wrong UP/DOWN value.
   85.95      for( uint i = 1; i < b->num_preds(); i++ ) {
   85.96        // Get predecessor block pre-order number
   85.97 -      Block *pred = _cfg._bbs[b->pred(i)->_idx];
   85.98 +      Block *pred = _cfg.get_block_for_node(b->pred(i));
   85.99        pidx = pred->_pre_order;
  85.100        // Grab reaching def
  85.101        Node *def = Reaches[pidx][slidx];
    86.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Mon Aug 19 17:47:21 2013 +0200
    86.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Fri Aug 23 22:12:18 2013 +0100
    86.3 @@ -3217,15 +3217,6 @@
    86.4    JvmtiBreakpoints& jvmti_breakpoints = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
    86.5    jvmti_breakpoints.clearall_in_class_at_safepoint(the_class_oop);
    86.6  
    86.7 -  if (the_class_oop == Universe::reflect_invoke_cache()->klass()) {
    86.8 -    // We are redefining java.lang.reflect.Method. Method.invoke() is
    86.9 -    // cached and users of the cache care about each active version of
   86.10 -    // the method so we have to track this previous version.
   86.11 -    // Do this before methods get switched
   86.12 -    Universe::reflect_invoke_cache()->add_previous_version(
   86.13 -      the_class->method_with_idnum(Universe::reflect_invoke_cache()->method_idnum()));
   86.14 -  }
   86.15 -
   86.16    // Deoptimize all compiled code that depends on this class
   86.17    flush_dependent_code(the_class, THREAD);
   86.18  
    87.1 --- a/src/share/vm/runtime/vmStructs.cpp	Mon Aug 19 17:47:21 2013 +0200
    87.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Fri Aug 23 22:12:18 2013 +0100
    87.3 @@ -1098,7 +1098,7 @@
    87.4                                                                                                                                       \
    87.5    c2_nonstatic_field(PhaseCFG,           _num_blocks,              uint)                                                             \
    87.6    c2_nonstatic_field(PhaseCFG,           _blocks,                  Block_List)                                                       \
    87.7 -  c2_nonstatic_field(PhaseCFG,           _bbs,                     Block_Array)                                                      \
    87.8 +  c2_nonstatic_field(PhaseCFG,           _node_to_block_mapping,   Block_Array)                                                      \
    87.9    c2_nonstatic_field(PhaseCFG,           _broot,                   Block*)                                                           \
   87.10                                                                                                                                       \
   87.11    c2_nonstatic_field(PhaseRegAlloc,      _node_regs,               OptoRegPair*)                                                     \
    88.1 --- a/src/share/vm/utilities/debug.hpp	Mon Aug 19 17:47:21 2013 +0200
    88.2 +++ b/src/share/vm/utilities/debug.hpp	Fri Aug 23 22:12:18 2013 +0100
    88.3 @@ -225,6 +225,22 @@
    88.4  
    88.5  void warning(const char* format, ...);
    88.6  
    88.7 +#ifdef ASSERT
    88.8 +// Compile-time asserts.
    88.9 +template <bool> struct StaticAssert;
   88.10 +template <> struct StaticAssert<true> {};
   88.11 +
   88.12 +// Only StaticAssert<true> is defined, so if cond evaluates to false we get
   88.13 +// a compile time exception when trying to use StaticAssert<false>.
   88.14 +#define STATIC_ASSERT(cond)                   \
   88.15 +  do {                                        \
   88.16 +    StaticAssert<(cond)> DUMMY_STATIC_ASSERT; \
   88.17 +    (void)DUMMY_STATIC_ASSERT; /* ignore */   \
   88.18 +  } while (false)
   88.19 +#else
   88.20 +#define STATIC_ASSERT(cond)
   88.21 +#endif
   88.22 +
   88.23  // out of shared space reporting
   88.24  enum SharedSpaceType {
   88.25    SharedPermGen,
    89.1 --- a/src/share/vm/utilities/exceptions.hpp	Mon Aug 19 17:47:21 2013 +0200
    89.2 +++ b/src/share/vm/utilities/exceptions.hpp	Fri Aug 23 22:12:18 2013 +0100
    89.3 @@ -306,6 +306,6 @@
    89.4  // which preserves pre-existing exceptions and does not allow new
    89.5  // exceptions.
    89.6  
    89.7 -#define EXCEPTION_MARK                           Thread* THREAD; ExceptionMark __em(THREAD);
    89.8 +#define EXCEPTION_MARK                           Thread* THREAD = NULL; ExceptionMark __em(THREAD);
    89.9  
   89.10  #endif // SHARE_VM_UTILITIES_EXCEPTIONS_HPP
    90.1 --- a/src/share/vm/utilities/globalDefinitions.hpp	Mon Aug 19 17:47:21 2013 +0200
    90.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp	Fri Aug 23 22:12:18 2013 +0100
    90.3 @@ -410,6 +410,8 @@
    90.4    return align_size_down_(size, alignment);
    90.5  }
    90.6  
    90.7 +#define is_size_aligned_(size, alignment) ((size) == (align_size_up_(size, alignment)))
    90.8 +
    90.9  // Align objects by rounding up their size, in HeapWord units.
   90.10  
   90.11  #define align_object_size_(size) align_size_up_(size, MinObjAlignment)
   90.12 @@ -428,6 +430,10 @@
   90.13    return align_size_up(offset, HeapWordsPerLong);
   90.14  }
   90.15  
   90.16 +inline void* align_pointer_up(const void* addr, size_t size) {
   90.17 +  return (void*) align_size_up_((uintptr_t)addr, size);
   90.18 +}
   90.19 +
   90.20  // Clamp an address to be within a specific page
   90.21  // 1. If addr is on the page it is returned as is
   90.22  // 2. If addr is above the page_address the start of the *next* page will be returned
   90.23 @@ -449,32 +455,6 @@
   90.24  // The expected size in bytes of a cache line, used to pad data structures.
   90.25  #define DEFAULT_CACHE_LINE_SIZE 64
   90.26  
   90.27 -// Bytes needed to pad type to avoid cache-line sharing; alignment should be the
   90.28 -// expected cache line size (a power of two).  The first addend avoids sharing
   90.29 -// when the start address is not a multiple of alignment; the second maintains
   90.30 -// alignment of starting addresses that happen to be a multiple.
   90.31 -#define PADDING_SIZE(type, alignment)                           \
   90.32 -  ((alignment) + align_size_up_(sizeof(type), alignment))
   90.33 -
   90.34 -// Templates to create a subclass padded to avoid cache line sharing.  These are
   90.35 -// effective only when applied to derived-most (leaf) classes.
   90.36 -
   90.37 -// When no args are passed to the base ctor.
   90.38 -template <class T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
   90.39 -class Padded: public T {
   90.40 -private:
   90.41 -  char _pad_buf_[PADDING_SIZE(T, alignment)];
   90.42 -};
   90.43 -
   90.44 -// When either 0 or 1 args may be passed to the base ctor.
   90.45 -template <class T, typename Arg1T, size_t alignment = DEFAULT_CACHE_LINE_SIZE>
   90.46 -class Padded01: public T {
   90.47 -public:
   90.48 -  Padded01(): T() { }
   90.49 -  Padded01(Arg1T arg1): T(arg1) { }
   90.50 -private:
   90.51 -  char _pad_buf_[PADDING_SIZE(T, alignment)];
   90.52 -};
   90.53  
   90.54  //----------------------------------------------------------------------------------------------------
   90.55  // Utility macros for compilers
    91.1 --- a/test/compiler/whitebox/ClearMethodStateTest.java	Mon Aug 19 17:47:21 2013 +0200
    91.2 +++ b/test/compiler/whitebox/ClearMethodStateTest.java	Fri Aug 23 22:12:18 2013 +0100
    91.3 @@ -26,7 +26,7 @@
    91.4   * @library /testlibrary /testlibrary/whitebox
    91.5   * @build ClearMethodStateTest
    91.6   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    91.7 - * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ClearMethodStateTest
    91.8 + * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* ClearMethodStateTest
    91.9   * @summary testing of WB::clearMethodState()
   91.10   * @author igor.ignatyev@oracle.com
   91.11   */
    92.1 --- a/test/compiler/whitebox/CompilerWhiteBoxTest.java	Mon Aug 19 17:47:21 2013 +0200
    92.2 +++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java	Fri Aug 23 22:12:18 2013 +0100
    92.3 @@ -61,6 +61,9 @@
    92.4      /** Value of {@code -XX:TieredStopAtLevel} */
    92.5      protected static final int TIERED_STOP_AT_LEVEL
    92.6              = Integer.parseInt(getVMOption("TieredStopAtLevel", "0"));
    92.7 +    /** Flag for verbose output, true if {@code -Dverbose} specified */
    92.8 +    protected static final boolean IS_VERBOSE
    92.9 +            = System.getProperty("verbose") != null;
   92.10  
   92.11      /**
   92.12       * Returns value of VM option.
   92.13 @@ -268,7 +271,9 @@
   92.14              }
   92.15              result += tmp == null ? 0 : tmp;
   92.16          }
   92.17 -        System.out.println("method was invoked " + count + " times");
   92.18 +        if (IS_VERBOSE) {
   92.19 +            System.out.println("method was invoked " + count + " times");
   92.20 +        }
   92.21          return result;
   92.22      }
   92.23  }
    93.1 --- a/test/compiler/whitebox/DeoptimizeAllTest.java	Mon Aug 19 17:47:21 2013 +0200
    93.2 +++ b/test/compiler/whitebox/DeoptimizeAllTest.java	Fri Aug 23 22:12:18 2013 +0100
    93.3 @@ -26,7 +26,7 @@
    93.4   * @library /testlibrary /testlibrary/whitebox
    93.5   * @build DeoptimizeAllTest
    93.6   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    93.7 - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeAllTest
    93.8 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* DeoptimizeAllTest
    93.9   * @summary testing of WB::deoptimizeAll()
   93.10   * @author igor.ignatyev@oracle.com
   93.11   */
    94.1 --- a/test/compiler/whitebox/DeoptimizeMethodTest.java	Mon Aug 19 17:47:21 2013 +0200
    94.2 +++ b/test/compiler/whitebox/DeoptimizeMethodTest.java	Fri Aug 23 22:12:18 2013 +0100
    94.3 @@ -26,7 +26,7 @@
    94.4   * @library /testlibrary /testlibrary/whitebox
    94.5   * @build DeoptimizeMethodTest
    94.6   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    94.7 - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeMethodTest
    94.8 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* DeoptimizeMethodTest
    94.9   * @summary testing of WB::deoptimizeMethod()
   94.10   * @author igor.ignatyev@oracle.com
   94.11   */
    95.1 --- a/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Mon Aug 19 17:47:21 2013 +0200
    95.2 +++ b/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Fri Aug 23 22:12:18 2013 +0100
    95.3 @@ -26,7 +26,7 @@
    95.4   * @library /testlibrary /testlibrary/whitebox
    95.5   * @build EnqueueMethodForCompilationTest
    95.6   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    95.7 - * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI EnqueueMethodForCompilationTest
    95.8 + * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* EnqueueMethodForCompilationTest
    95.9   * @summary testing of WB::enqueueMethodForCompilation()
   95.10   * @author igor.ignatyev@oracle.com
   95.11   */
    96.1 --- a/test/compiler/whitebox/IsMethodCompilableTest.java	Mon Aug 19 17:47:21 2013 +0200
    96.2 +++ b/test/compiler/whitebox/IsMethodCompilableTest.java	Fri Aug 23 22:12:18 2013 +0100
    96.3 @@ -27,7 +27,7 @@
    96.4   * @library /testlibrary /testlibrary/whitebox
    96.5   * @build IsMethodCompilableTest
    96.6   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    96.7 - * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI IsMethodCompilableTest
    96.8 + * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* IsMethodCompilableTest
    96.9   * @summary testing of WB::isMethodCompilable()
   96.10   * @author igor.ignatyev@oracle.com
   96.11   */
    97.1 --- a/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Mon Aug 19 17:47:21 2013 +0200
    97.2 +++ b/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Fri Aug 23 22:12:18 2013 +0100
    97.3 @@ -27,7 +27,7 @@
    97.4   * @library /testlibrary /testlibrary/whitebox
    97.5   * @build MakeMethodNotCompilableTest
    97.6   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    97.7 - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI MakeMethodNotCompilableTest
    97.8 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* MakeMethodNotCompilableTest
    97.9   * @summary testing of WB::makeMethodNotCompilable()
   97.10   * @author igor.ignatyev@oracle.com
   97.11   */
    98.1 --- a/test/compiler/whitebox/SetDontInlineMethodTest.java	Mon Aug 19 17:47:21 2013 +0200
    98.2 +++ b/test/compiler/whitebox/SetDontInlineMethodTest.java	Fri Aug 23 22:12:18 2013 +0100
    98.3 @@ -26,7 +26,7 @@
    98.4   * @library /testlibrary /testlibrary/whitebox
    98.5   * @build SetDontInlineMethodTest
    98.6   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    98.7 - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SetDontInlineMethodTest
    98.8 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* SetDontInlineMethodTest
    98.9   * @summary testing of WB::testSetDontInlineMethod()
   98.10   * @author igor.ignatyev@oracle.com
   98.11   */
    99.1 --- a/test/compiler/whitebox/SetForceInlineMethodTest.java	Mon Aug 19 17:47:21 2013 +0200
    99.2 +++ b/test/compiler/whitebox/SetForceInlineMethodTest.java	Fri Aug 23 22:12:18 2013 +0100
    99.3 @@ -26,7 +26,7 @@
    99.4   * @library /testlibrary /testlibrary/whitebox
    99.5   * @build SetForceInlineMethodTest
    99.6   * @run main ClassFileInstaller sun.hotspot.WhiteBox
    99.7 - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SetForceInlineMethodTest
    99.8 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:CompileCommand=compileonly,TestCase$Helper::* SetForceInlineMethodTest
    99.9   * @summary testing of WB::testSetForceInlineMethod()
   99.10   * @author igor.ignatyev@oracle.com
   99.11   */
   100.1 --- a/test/runtime/7107135/Test7107135.sh	Mon Aug 19 17:47:21 2013 +0200
   100.2 +++ b/test/runtime/7107135/Test7107135.sh	Fri Aug 23 22:12:18 2013 +0100
   100.3 @@ -53,9 +53,6 @@
   100.4      fi
   100.5      ;;
   100.6    *)
   100.7 -    NULL=NUL
   100.8 -    PS=";"
   100.9 -    FS="\\"
  100.10      echo "Test passed; only valid for Linux"
  100.11      exit 0;
  100.12      ;;
  100.13 @@ -87,14 +84,16 @@
  100.14  
  100.15  echo
  100.16  echo Test changing of stack protection:
  100.17 -echo ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rw
  100.18 +echo ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rwx
  100.19  ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} Test test-rwx
  100.20 +JAVA_RETVAL=$?
  100.21  
  100.22 -if [ "$?" == "0" ]
  100.23 +if [ "$JAVA_RETVAL" == "0" ]
  100.24  then
  100.25    echo
  100.26    echo ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} TestMT test-rwx
  100.27    ${TESTJAVA}${FS}bin${FS}java -cp ${THIS_DIR} TestMT test-rwx
  100.28 +  JAVA_RETVAL=$?
  100.29  fi
  100.30  
  100.31 -exit $?
  100.32 +exit $JAVA_RETVAL
   101.1 --- a/test/runtime/RedefineObject/Agent.java	Mon Aug 19 17:47:21 2013 +0200
   101.2 +++ b/test/runtime/RedefineObject/Agent.java	Fri Aug 23 22:12:18 2013 +0100
   101.3 @@ -1,5 +1,5 @@
   101.4  /*
   101.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   101.6 + * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved.
   101.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   101.8   *
   101.9   * This code is free software; you can redistribute it and/or modify it
  101.10 @@ -22,6 +22,7 @@
  101.11   */
  101.12  import java.security.*;
  101.13  import java.lang.instrument.*;
  101.14 +import java.lang.reflect.*;
  101.15  
  101.16  public class Agent implements ClassFileTransformer {
  101.17      public synchronized byte[] transform(final ClassLoader classLoader,
  101.18 @@ -29,23 +30,35 @@
  101.19                                           Class<?> classBeingRedefined,
  101.20                                           ProtectionDomain protectionDomain,
  101.21                                           byte[] classfileBuffer) {
  101.22 -        //System.out.println("Transforming class " + className);
  101.23 +        System.out.println("Transforming class " + className);
  101.24          return classfileBuffer;
  101.25      }
  101.26  
  101.27 -    public static void premain(String agentArgs, Instrumentation instrumentation) {
  101.28 +    public static void redefine(String agentArgs, Instrumentation instrumentation, Class to_redefine) {
  101.29  
  101.30 -        Agent transformer = new Agent();
  101.31 -
  101.32 -        instrumentation.addTransformer(transformer, true);
  101.33 -
  101.34 -        Class c = Object.class;
  101.35          try {
  101.36 -            instrumentation.retransformClasses(c);
  101.37 +            instrumentation.retransformClasses(to_redefine);
  101.38          } catch (Exception e) {
  101.39              e.printStackTrace();
  101.40          }
  101.41  
  101.42 +    }
  101.43 +
  101.44 +    public static void premain(String agentArgs, Instrumentation instrumentation) {
  101.45 +        Agent transformer = new Agent();
  101.46 +        instrumentation.addTransformer(transformer, true);
  101.47 +
  101.48 +        // Redefine java/lang/Object and java/lang/reflect/Method.invoke and
  101.49 +        // java/lang/ClassLoader
  101.50 +        Class object_class = Object.class;
  101.51 +        redefine(agentArgs, instrumentation, object_class);
  101.52 +
  101.53 +        Class method_class = Method.class;
  101.54 +        redefine(agentArgs, instrumentation, method_class);
  101.55 +
  101.56 +        Class loader_class = ClassLoader.class;
  101.57 +        redefine(agentArgs, instrumentation, loader_class);
  101.58 +
  101.59          instrumentation.removeTransformer(transformer);
  101.60      }
  101.61  
  101.62 @@ -57,5 +70,14 @@
  101.63              System.gc();
  101.64              ba.clone();
  101.65          }
  101.66 +        try {
  101.67 +            // Use java/lang/reflect/Method.invoke to call
  101.68 +            WalkThroughInvoke a = new WalkThroughInvoke();
  101.69 +            Class aclass = WalkThroughInvoke.class;
  101.70 +            Method m = aclass.getMethod("stackWalk");
  101.71 +            m.invoke(a);
  101.72 +        } catch (Exception x) {
  101.73 +            x.printStackTrace();
  101.74 +        }
  101.75      }
  101.76  }
   102.1 --- a/test/runtime/RedefineObject/TestRedefineObject.java	Mon Aug 19 17:47:21 2013 +0200
   102.2 +++ b/test/runtime/RedefineObject/TestRedefineObject.java	Fri Aug 23 22:12:18 2013 +0100
   102.3 @@ -1,5 +1,5 @@
   102.4  /*
   102.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   102.6 + * Copyright (c) 2013, 2013, Oracle and/or its affiliates. All rights reserved.
   102.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   102.8   *
   102.9   * This code is free software; you can redistribute it and/or modify it
  102.10 @@ -26,14 +26,17 @@
  102.11  /*
  102.12   * Test to redefine java/lang/Object and verify that it doesn't crash on vtable
  102.13   * call on basic array type.
  102.14 + * Test to redefine java/lang/ClassLoader and java/lang/reflect/Method to make
  102.15 + * sure cached versions used afterward are the current version.
  102.16   *
  102.17   * @test
  102.18   * @bug 8005056
  102.19 + * @bug 8009728
  102.20   * @library /testlibrary
  102.21   * @build Agent
  102.22   * @run main ClassFileInstaller Agent
  102.23   * @run main TestRedefineObject
  102.24 - * @run main/othervm -javaagent:agent.jar Agent
  102.25 + * @run main/othervm -javaagent:agent.jar -XX:TraceRedefineClasses=5 Agent
  102.26   */
  102.27  public class TestRedefineObject {
  102.28      public static void main(String[] args) throws Exception  {
   103.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   103.2 +++ b/test/runtime/RedefineObject/WalkThroughInvoke.java	Fri Aug 23 22:12:18 2013 +0100
   103.3 @@ -0,0 +1,38 @@
   103.4 +/*
   103.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   103.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   103.7 + *
   103.8 + * This code is free software; you can redistribute it and/or modify it
   103.9 + * under the terms of the GNU General Public License version 2 only, as
  103.10 + * published by the Free Software Foundation.
  103.11 + *
  103.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  103.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  103.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  103.15 + * version 2 for more details (a copy is included in the LICENSE file that
  103.16 + * accompanied this code).
  103.17 + *
  103.18 + * You should have received a copy of the GNU General Public License version
  103.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  103.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  103.21 + *
  103.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  103.23 + * or visit www.oracle.com if you need additional information or have any
  103.24 + * questions.
  103.25 + */
  103.26 +import java.lang.reflect.*;
  103.27 +
  103.28 +public class WalkThroughInvoke {
  103.29 +  public void stackWalk() {
  103.30 +      try {
  103.31 +          Class b = Object.class;
  103.32 +          SecurityManager sm = new SecurityManager();
  103.33 +          // Walks the stack with Method.invoke in the stack (which is the
  103.34 +          // purpose of the test) before it gets an AccessControlException.
  103.35 +          sm.checkMemberAccess(b, Member.DECLARED);
  103.36 +      } catch (java.security.AccessControlException e) {
  103.37 +          // Ignoring an 'AccessControlException' exception since
  103.38 +          // it is expected as part of this test.
  103.39 +      }
  103.40 +  }
  103.41 +};

mercurial