Merge

Fri, 18 Oct 2013 12:10:44 -0700

author
jcoomes
date
Fri, 18 Oct 2013 12:10:44 -0700
changeset 5947
c51cd6af7e61
parent 5946
bf9e50c573ad
parent 5930
7114c4597ae3
child 5948
23b8db5ea31d
child 5955
8cd1abf3ecab
child 5967
ee99e1a7c5fb

Merge

src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmStructs.cpp file | annotate | diff | comparison | revisions
test/TEST.groups file | annotate | diff | comparison | revisions
test/compiler/8013496/Test8013496.sh file | annotate | diff | comparison | revisions
test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java file | annotate | diff | comparison | revisions
test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java file | annotate | diff | comparison | revisions
     1.1 --- a/make/Makefile	Thu Oct 17 06:29:58 2013 -0700
     1.2 +++ b/make/Makefile	Fri Oct 18 12:10:44 2013 -0700
     1.3 @@ -334,6 +334,11 @@
     1.4  	$(install-file)
     1.5  $(EXPORT_SERVER_DIR)/64/%.diz:    		$(C2_BUILD_DIR)/%.diz
     1.6  	$(install-file)
     1.7 +# MacOS X
     1.8 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(C2_BUILD_DIR)/%.dSYM
     1.9 +	$(install-dir)
    1.10 +$(EXPORT_SERVER_DIR)/%.dSYM:       		$(C2_BUILD_DIR)/%.dSYM
    1.11 +	$(install-dir)
    1.12  endif
    1.13  
    1.14  # Client (C1)
    1.15 @@ -379,6 +384,11 @@
    1.16  	$(install-file)
    1.17  $(EXPORT_CLIENT_DIR)/64/%.diz:    		$(C1_BUILD_DIR)/%.diz
    1.18  	$(install-file)
    1.19 +# MacOS X
    1.20 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(C1_BUILD_DIR)/%.dSYM
    1.21 +	$(install-dir)
    1.22 +$(EXPORT_CLIENT_DIR)/%.dSYM:       		$(C1_BUILD_DIR)/%.dSYM
    1.23 +	$(install-dir)
    1.24  endif
    1.25  
    1.26  # Minimal1
    1.27 @@ -424,6 +434,7 @@
    1.28  	$(install-file)
    1.29  $(EXPORT_MINIMAL_DIR)/64/%.diz:			$(MINIMAL1_BUILD_DIR)/%.diz
    1.30  	$(install-file)
    1.31 +# MacOS X does not support Minimal1 config
    1.32  endif
    1.33  
    1.34  # Zero
    1.35 @@ -446,6 +457,11 @@
    1.36  	$(install-file)
    1.37  $(EXPORT_SERVER_DIR)/%.diz:			$(ZERO_BUILD_DIR)/%.diz
    1.38  	$(install-file)
    1.39 +# MacOS X
    1.40 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(ZERO_BUILD_DIR)/%.dSYM
    1.41 +	$(install-dir)
    1.42 +$(EXPORT_SERVER_DIR)/%.dSYM:			$(ZERO_BUILD_DIR)/%.dSYM
    1.43 +	$(install-dir)
    1.44  endif
    1.45  
    1.46  # Shark
    1.47 @@ -468,6 +484,11 @@
    1.48  	$(install-file)
    1.49  $(EXPORT_SERVER_DIR)/%.diz:			$(SHARK_BUILD_DIR)/%.diz
    1.50  	$(install-file)
    1.51 +# MacOS X
    1.52 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.dSYM: 		$(SHARK_BUILD_DIR)/%.dSYM
    1.53 +	$(install-dir)
    1.54 +$(EXPORT_SERVER_DIR)/%.dSYM:			$(SHARK_BUILD_DIR)/%.dSYM
    1.55 +	$(install-dir)
    1.56  endif
    1.57  
    1.58  $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/%
     2.1 --- a/make/bsd/Makefile	Thu Oct 17 06:29:58 2013 -0700
     2.2 +++ b/make/bsd/Makefile	Fri Oct 18 12:10:44 2013 -0700
     2.3 @@ -204,6 +204,7 @@
     2.4  BUILDTREE_MAKE    = $(GAMMADIR)/make/$(OSNAME)/makefiles/buildtree.make
     2.5  BUILDTREE_VARS    = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OSNAME) SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) LIBRARY_SUFFIX=$(LIBRARY_SUFFIX)
     2.6  BUILDTREE_VARS   += HOTSPOT_RELEASE_VERSION=$(HOTSPOT_RELEASE_VERSION) HOTSPOT_BUILD_VERSION=$(HOTSPOT_BUILD_VERSION) JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
     2.7 +BUILDTREE_VARS   += ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS) OBJCOPY=$(OBJCOPY) STRIP_POLICY=$(STRIP_POLICY) ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES) ZIPEXE=$(ZIPEXE)
     2.8  
     2.9  BUILDTREE         = $(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_VARS)
    2.10  
    2.11 @@ -337,9 +338,11 @@
    2.12  
    2.13  # Doc target.  This is the same for all build options.
    2.14  #     Hence create a docs directory beside ...$(ARCH)_[...]
    2.15 +# We specify 'BUILD_FLAVOR=product' so that the proper
    2.16 +# ENABLE_FULL_DEBUG_SYMBOLS value is used.
    2.17  docs: checks
    2.18  	$(QUIETLY) mkdir -p $(SUBDIR_DOCS)
    2.19 -	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs
    2.20 +	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs
    2.21  
    2.22  # Synonyms for win32-like targets.
    2.23  compiler2:  debug product
     3.1 --- a/make/bsd/makefiles/buildtree.make	Thu Oct 17 06:29:58 2013 -0700
     3.2 +++ b/make/bsd/makefiles/buildtree.make	Fri Oct 18 12:10:44 2013 -0700
     3.3 @@ -261,6 +261,16 @@
     3.4  	echo "$(call gamma-path,commonsrc,os/posix/vm)"; \
     3.5  	[ -n "$(CFLAGS_BROWSE)" ] && \
     3.6  	    echo && echo "CFLAGS_BROWSE = $(CFLAGS_BROWSE)"; \
     3.7 +	[ -n "$(ENABLE_FULL_DEBUG_SYMBOLS)" ] && \
     3.8 +	    echo && echo "ENABLE_FULL_DEBUG_SYMBOLS = $(ENABLE_FULL_DEBUG_SYMBOLS)"; \
     3.9 +	[ -n "$(OBJCOPY)" ] && \
    3.10 +	    echo && echo "OBJCOPY = $(OBJCOPY)"; \
    3.11 +	[ -n "$(STRIP_POLICY)" ] && \
    3.12 +	    echo && echo "STRIP_POLICY = $(STRIP_POLICY)"; \
    3.13 +	[ -n "$(ZIP_DEBUGINFO_FILES)" ] && \
    3.14 +	    echo && echo "ZIP_DEBUGINFO_FILES = $(ZIP_DEBUGINFO_FILES)"; \
    3.15 +	[ -n "$(ZIPEXE)" ] && \
    3.16 +	    echo && echo "ZIPEXE = $(ZIPEXE)"; \
    3.17  	[ -n "$(HOTSPOT_EXTRA_SYSDEFS)" ] && \
    3.18  	    echo && \
    3.19  	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
     4.1 --- a/make/bsd/makefiles/defs.make	Thu Oct 17 06:29:58 2013 -0700
     4.2 +++ b/make/bsd/makefiles/defs.make	Fri Oct 18 12:10:44 2013 -0700
     4.3 @@ -136,10 +136,127 @@
     4.4    endif
     4.5  endif
     4.6  
     4.7 +OS_VENDOR:=$(shell uname -s)
     4.8 +
     4.9 +# determine if HotSpot is being built in JDK6 or earlier version
    4.10 +JDK6_OR_EARLIER=0
    4.11 +ifeq "$(shell expr \( '$(JDK_MAJOR_VERSION)' != '' \& '$(JDK_MINOR_VERSION)' != '' \& '$(JDK_MICRO_VERSION)' != '' \))" "1"
    4.12 +  # if the longer variable names (newer build style) are set, then check those
    4.13 +  ifeq "$(shell expr \( $(JDK_MAJOR_VERSION) = 1 \& $(JDK_MINOR_VERSION) \< 7 \))" "1"
    4.14 +    JDK6_OR_EARLIER=1
    4.15 +  endif
    4.16 +else
    4.17 +  # the longer variables aren't set so check the shorter variable names
    4.18 +  ifeq "$(shell expr \( '$(JDK_MAJOR_VER)' = 1 \& '$(JDK_MINOR_VER)' \< 7 \))" "1"
    4.19 +    JDK6_OR_EARLIER=1
    4.20 +  endif
    4.21 +endif
    4.22 +
    4.23 +ifeq ($(JDK6_OR_EARLIER),0)
    4.24 +  # Full Debug Symbols is supported on JDK7 or newer.
    4.25 +  # The Full Debug Symbols (FDS) default for BUILD_FLAVOR == product
    4.26 +  # builds is enabled with debug info files ZIP'ed to save space. For
    4.27 +  # BUILD_FLAVOR != product builds, FDS is always enabled, after all a
    4.28 +  # debug build without debug info isn't very useful.
    4.29 +  # The ZIP_DEBUGINFO_FILES option only has meaning when FDS is enabled.
    4.30 +  #
    4.31 +  # If you invoke a build with FULL_DEBUG_SYMBOLS=0, then FDS will be
    4.32 +  # disabled for a BUILD_FLAVOR == product build.
    4.33 +  #
    4.34 +  # Note: Use of a different variable name for the FDS override option
    4.35 +  # versus the FDS enabled check is intentional (FULL_DEBUG_SYMBOLS
    4.36 +  # versus ENABLE_FULL_DEBUG_SYMBOLS). For auto build systems that pass
    4.37 +  # in options via environment variables, use of distinct variables
    4.38 +  # prevents strange behaviours. For example, in a BUILD_FLAVOR !=
    4.39 +  # product build, the FULL_DEBUG_SYMBOLS environment variable will be
    4.40 +  # 0, but the ENABLE_FULL_DEBUG_SYMBOLS make variable will be 1. If
    4.41 +  # the same variable name is used, then different values can be picked
    4.42 +  # up by different parts of the build. Just to be clear, we only need
    4.43 +  # two variable names because the incoming option value can be
    4.44 +  # overridden in some situations, e.g., a BUILD_FLAVOR != product
    4.45 +  # build.
    4.46 +
    4.47 +  # Due to the multiple sub-make processes that occur this logic gets
    4.48 +  # executed multiple times. We reduce the noise by at least checking that
    4.49 +  # BUILD_FLAVOR has been set.
    4.50 +  ifneq ($(BUILD_FLAVOR),)
    4.51 +    ifeq ($(BUILD_FLAVOR), product)
    4.52 +      FULL_DEBUG_SYMBOLS ?= 1
    4.53 +      ENABLE_FULL_DEBUG_SYMBOLS = $(FULL_DEBUG_SYMBOLS)
    4.54 +    else
    4.55 +      # debug variants always get Full Debug Symbols (if available)
    4.56 +      ENABLE_FULL_DEBUG_SYMBOLS = 1
    4.57 +    endif
    4.58 +    _JUNK_ := $(shell \
    4.59 +      echo >&2 "INFO: ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
    4.60 +    # since objcopy is optional, we set ZIP_DEBUGINFO_FILES later
    4.61 +
    4.62 +    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    4.63 +      ifeq ($(OS_VENDOR), Darwin)
    4.64 +          # MacOS X doesn't use OBJCOPY or STRIP_POLICY
    4.65 +          OBJCOPY=
    4.66 +          STRIP_POLICY=
    4.67 +          ZIP_DEBUGINFO_FILES ?= 1
    4.68 +      else
    4.69 +        # Default OBJCOPY comes from GNU Binutils on BSD
    4.70 +        ifeq ($(CROSS_COMPILE_ARCH),)
    4.71 +          DEF_OBJCOPY=/usr/bin/objcopy
    4.72 +        else
    4.73 +          # Assume objcopy is part of the cross-compilation toolset
    4.74 +          ifneq ($(ALT_COMPILER_PATH),)
    4.75 +            DEF_OBJCOPY=$(ALT_COMPILER_PATH)/objcopy
    4.76 +          endif
    4.77 +        endif
    4.78 +        OBJCOPY=$(shell test -x $(DEF_OBJCOPY) && echo $(DEF_OBJCOPY))
    4.79 +        ifneq ($(ALT_OBJCOPY),)
    4.80 +          _JUNK_ := $(shell echo >&2 "INFO: ALT_OBJCOPY=$(ALT_OBJCOPY)")
    4.81 +          OBJCOPY=$(shell test -x $(ALT_OBJCOPY) && echo $(ALT_OBJCOPY))
    4.82 +        endif
    4.83 +
    4.84 +        ifeq ($(OBJCOPY),)
    4.85 +          _JUNK_ := $(shell \
    4.86 +            echo >&2 "INFO: no objcopy cmd found so cannot create .debuginfo" \
    4.87 +              "files. You may need to set ALT_OBJCOPY.")
    4.88 +          ENABLE_FULL_DEBUG_SYMBOLS=0
    4.89 +          _JUNK_ := $(shell \
    4.90 +            echo >&2 "INFO:" \
    4.91 +              "ENABLE_FULL_DEBUG_SYMBOLS=$(ENABLE_FULL_DEBUG_SYMBOLS)")
    4.92 +        else
    4.93 +          _JUNK_ := $(shell \
    4.94 +            echo >&2 "INFO: $(OBJCOPY) cmd found so will create .debuginfo" \
    4.95 +              "files.")
    4.96 +
    4.97 +          # Library stripping policies for .debuginfo configs:
    4.98 +          #   all_strip - strips everything from the library
    4.99 +          #   min_strip - strips most stuff from the library; leaves
   4.100 +          #               minimum symbols
   4.101 +          #   no_strip  - does not strip the library at all
   4.102 +          #
   4.103 +          # Oracle security policy requires "all_strip". A waiver was
   4.104 +          # granted on 2011.09.01 that permits using "min_strip" in the
   4.105 +          # Java JDK and Java JRE.
   4.106 +          #
   4.107 +          # Currently, STRIP_POLICY is only used when Full Debug Symbols
   4.108 +          # is enabled.
   4.109 +          #
   4.110 +          STRIP_POLICY ?= min_strip
   4.111 +
   4.112 +          _JUNK_ := $(shell \
   4.113 +            echo >&2 "INFO: STRIP_POLICY=$(STRIP_POLICY)")
   4.114 +
   4.115 +          ZIP_DEBUGINFO_FILES ?= 1
   4.116 +        endif
   4.117 +
   4.118 +        _JUNK_ := $(shell \
   4.119 +          echo >&2 "INFO: ZIP_DEBUGINFO_FILES=$(ZIP_DEBUGINFO_FILES)")
   4.120 +      endif
   4.121 +    endif # ENABLE_FULL_DEBUG_SYMBOLS=1
   4.122 +  endif # BUILD_FLAVOR
   4.123 +endif # JDK_6_OR_EARLIER
   4.124 +
   4.125  JDK_INCLUDE_SUBDIR=bsd
   4.126  
   4.127  # Library suffix
   4.128 -OS_VENDOR:=$(shell uname -s)
   4.129  ifeq ($(OS_VENDOR),Darwin)
   4.130    LIBRARY_SUFFIX=dylib
   4.131  else
   4.132 @@ -150,6 +267,19 @@
   4.133  
   4.134  # client and server subdirectories have symbolic links to ../libjsig.so
   4.135  EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX)
   4.136 +
   4.137 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   4.138 +  ifeq ($(ZIP_DEBUGINFO_FILES),1)
   4.139 +      EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.diz
   4.140 +  else
   4.141 +    ifeq ($(OS_VENDOR), Darwin)
   4.142 +        EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
   4.143 +    else
   4.144 +        EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libjsig.debuginfo
   4.145 +    endif
   4.146 +  endif
   4.147 +endif
   4.148 +
   4.149  EXPORT_SERVER_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/server
   4.150  EXPORT_CLIENT_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/client
   4.151  EXPORT_MINIMAL_DIR = $(EXPORT_JRE_LIB_ARCH_DIR)/minimal
   4.152 @@ -157,34 +287,76 @@
   4.153  ifeq ($(findstring true, $(JVM_VARIANT_SERVER) $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   4.154    EXPORT_LIST += $(EXPORT_SERVER_DIR)/Xusage.txt
   4.155    EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX)
   4.156 +
   4.157 +  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   4.158 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   4.159 +        EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.diz
   4.160 +    else
   4.161 +      ifeq ($(OS_VENDOR), Darwin)
   4.162 +          EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM
   4.163 +      else
   4.164 +          EXPORT_LIST += $(EXPORT_SERVER_DIR)/libjvm.debuginfo
   4.165 +      endif
   4.166 +    endif
   4.167 +  endif
   4.168  endif
   4.169  
   4.170  ifeq ($(JVM_VARIANT_CLIENT),true)
   4.171    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
   4.172    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
   4.173 +
   4.174 +  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   4.175 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   4.176 +        EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.diz
   4.177 +    else
   4.178 +      ifeq ($(OS_VENDOR), Darwin)
   4.179 +          EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX).dSYM
   4.180 +      else
   4.181 +          EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
   4.182 +      endif
   4.183 +    endif
   4.184 +  endif
   4.185  endif
   4.186  
   4.187  ifeq ($(JVM_VARIANT_MINIMAL1),true)
   4.188    EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/Xusage.txt
   4.189    EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.$(LIBRARY_SUFFIX)
   4.190 -
   4.191 -  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   4.192 -    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   4.193 -	EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.diz
   4.194 -    else
   4.195 -	EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
   4.196 -    endif
   4.197 -  endif
   4.198  endif
   4.199  
   4.200  # Serviceability Binaries
   4.201  # No SA Support for PPC, IA64, ARM or zero
   4.202  ADD_SA_BINARIES/x86   = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
   4.203                          $(EXPORT_LIB_DIR)/sa-jdi.jar
   4.204 +
   4.205 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   4.206 +  ifeq ($(ZIP_DEBUGINFO_FILES),1)
   4.207 +      ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
   4.208 +  else
   4.209 +    ifeq ($(OS_VENDOR), Darwin)
   4.210 +        ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
   4.211 +    else
   4.212 +        ADD_SA_BINARIES/x86 += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
   4.213 +    endif
   4.214 +  endif
   4.215 +endif
   4.216 +
   4.217  ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
   4.218                          $(EXPORT_LIB_DIR)/sa-jdi.jar
   4.219  ADD_SA_BINARIES/universal = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
   4.220                              $(EXPORT_LIB_DIR)/sa-jdi.jar
   4.221 +
   4.222 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   4.223 +  ifeq ($(ZIP_DEBUGINFO_FILES),1)
   4.224 +      ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
   4.225 +  else
   4.226 +    ifeq ($(OS_VENDOR), Darwin)
   4.227 +        ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
   4.228 +    else
   4.229 +        ADD_SA_BINARIES/universal += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
   4.230 +    endif
   4.231 +  endif
   4.232 +endif
   4.233 +
   4.234  ADD_SA_BINARIES/ppc   =
   4.235  ADD_SA_BINARIES/ia64  =
   4.236  ADD_SA_BINARIES/arm   =
   4.237 @@ -225,6 +397,19 @@
   4.238      # Files to simply copy in place
   4.239      UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/Xusage.txt
   4.240      UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/Xusage.txt
   4.241 +    ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   4.242 +      ifeq ($(ZIP_DEBUGINFO_FILES),1)
   4.243 +          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.diz
   4.244 +          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.diz
   4.245 +          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.diz
   4.246 +          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.diz
   4.247 +      else
   4.248 +          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/server/libjvm.$(LIBRARY_SUFFIX).dSYM
   4.249 +          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/client/libjvm.$(LIBRARY_SUFFIX).dSYM
   4.250 +          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libjsig.$(LIBRARY_SUFFIX).dSYM
   4.251 +          UNIVERSAL_COPY_LIST += $(EXPORT_JRE_LIB_DIR)/libsaproc.$(LIBRARY_SUFFIX).dSYM
   4.252 +      endif
   4.253 +    endif
   4.254  
   4.255    endif
   4.256  endif
     5.1 --- a/make/bsd/makefiles/dtrace.make	Thu Oct 17 06:29:58 2013 -0700
     5.2 +++ b/make/bsd/makefiles/dtrace.make	Fri Oct 18 12:10:44 2013 -0700
     5.3 @@ -39,9 +39,15 @@
     5.4  JVM_DB = libjvm_db
     5.5  LIBJVM_DB = libjvm_db.dylib
     5.6  
     5.7 +LIBJVM_DB_DEBUGINFO   = libjvm_db.dylib.dSYM
     5.8 +LIBJVM_DB_DIZ         = libjvm_db.diz
     5.9 +
    5.10  JVM_DTRACE = jvm_dtrace
    5.11  LIBJVM_DTRACE = libjvm_dtrace.dylib
    5.12  
    5.13 +LIBJVM_DTRACE_DEBUGINFO   = libjvm_dtrace.dylib.dSYM
    5.14 +LIBJVM_DTRACE_DIZ         = libjvm_dtrace.diz
    5.15 +
    5.16  JVMOFFS = JvmOffsets
    5.17  JVMOFFS.o = $(JVMOFFS).o
    5.18  GENOFFS = generate$(JVMOFFS)
    5.19 @@ -76,21 +82,87 @@
    5.20  # Making 64/libjvm_db.so: 64-bit version of libjvm_db.so which handles 32-bit libjvm.so
    5.21  ifneq ("${ISA}","${BUILDARCH}")
    5.22  
    5.23 -XLIBJVM_DB = 64/$(LIBJVM_DB)
    5.24 -XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
    5.25 +XLIBJVM_DIR = 64
    5.26 +XLIBJVM_DB = $(XLIBJVM_DIR)/$(LIBJVM_DB)
    5.27 +XLIBJVM_DTRACE = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE)
    5.28  XARCH = $(subst sparcv9,v9,$(shell echo $(ISA)))
    5.29  
    5.30 +XLIBJVM_DB_DEBUGINFO       = $(XLIBJVM_DIR)/$(LIBJVM_DB_DEBUGINFO)
    5.31 +XLIBJVM_DB_DIZ             = $(XLIBJVM_DIR)/$(LIBJVM_DB_DIZ)
    5.32 +XLIBJVM_DTRACE_DEBUGINFO   = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO)
    5.33 +XLIBJVM_DTRACE_DIZ         = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ)
    5.34 +
    5.35  $(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
    5.36  	@echo Making $@
    5.37 -	$(QUIETLY) mkdir -p 64/ ; \
    5.38 +	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
    5.39  	$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. -I$(GENERATED) \
    5.40  		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c #-lc
    5.41 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    5.42 +  ifeq ($(OS_VENDOR), Darwin)
    5.43 +	$(DSYMUTIL) $@
    5.44 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
    5.45 +        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
    5.46 +        # is not in the archived name:
    5.47 +	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) )
    5.48 +	$(RM) -r $(XLIBJVM_DB_DEBUGINFO)
    5.49 +    endif
    5.50 +  else
    5.51 +	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DB_DEBUGINFO)
    5.52 +        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
    5.53 +        # is not in the link name:
    5.54 +        $(QUIETLY) ( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB) )
    5.55 +    ifeq ($(STRIP_POLICY),all_strip)
    5.56 +	$(QUIETLY) $(STRIP) $@
    5.57 +    else
    5.58 +      ifeq ($(STRIP_POLICY),min_strip)
    5.59 +	$(QUIETLY) $(STRIP) -x $@
    5.60 +      # implied else here is no stripping at all
    5.61 +      endif
    5.62 +    endif
    5.63 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
    5.64 +        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
    5.65 +        # is not in the archived name:
    5.66 +	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) )
    5.67 +	$(RM) $(XLIBJVM_DB_DEBUGINFO)
    5.68 +    endif
    5.69 +  endif
    5.70 +endif
    5.71  
    5.72  $(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
    5.73  	@echo Making $@
    5.74 -	$(QUIETLY) mkdir -p 64/ ; \
    5.75 +	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
    5.76  	$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. \
    5.77  		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
    5.78 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    5.79 +  ifeq ($(OS_VENDOR), Darwin)
    5.80 +	$(DSYMUTIL) $@
    5.81 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
    5.82 +        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
    5.83 +        # is not in the archived name:
    5.84 +	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) )
    5.85 +	$(RM) -r $(XLIBJVM_DTRACE_DEBUGINFO)
    5.86 +    endif
    5.87 +  else
    5.88 +	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(XLIBJVM_DTRACE_DEBUGINFO)
    5.89 +        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
    5.90 +        # is not in the link name:
    5.91 +	( cd $(XLIBJVM_DIR) && $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE) )
    5.92 +    ifeq ($(STRIP_POLICY),all_strip)
    5.93 +	$(QUIETLY) $(STRIP) $@
    5.94 +    else
    5.95 +      ifeq ($(STRIP_POLICY),min_strip)
    5.96 +	$(QUIETLY) $(STRIP) -x $@
    5.97 +      # implied else here is no stripping at all
    5.98 +      endif
    5.99 +    endif
   5.100 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   5.101 +        # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR)
   5.102 +        # is not in the archived name:
   5.103 +	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) )
   5.104 +	$(RM) $(XLIBJVM_DTRACE_DEBUGINFO)
   5.105 +    endif
   5.106 +  endif
   5.107 +endif
   5.108  
   5.109  endif # ifneq ("${ISA}","${BUILDARCH}")
   5.110  
   5.111 @@ -134,11 +206,59 @@
   5.112  	@echo Making $@
   5.113  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
   5.114  		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -Wall # -lc
   5.115 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   5.116 +  ifeq ($(OS_VENDOR), Darwin)
   5.117 +	$(DSYMUTIL) $@
   5.118 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   5.119 +	$(ZIPEXE) -q -r -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO)
   5.120 +	$(RM) -r $(LIBJVM_DB_DEBUGINFO)
   5.121 +    endif
   5.122 +  else
   5.123 +	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DB_DEBUGINFO)
   5.124 +	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DB_DEBUGINFO) $@
   5.125 +    ifeq ($(STRIP_POLICY),all_strip)
   5.126 +	$(QUIETLY) $(STRIP) $@
   5.127 +    else
   5.128 +      ifeq ($(STRIP_POLICY),min_strip)
   5.129 +	$(QUIETLY) $(STRIP) -x $@
   5.130 +      # implied else here is no stripping at all
   5.131 +      endif
   5.132 +    endif
   5.133 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   5.134 +	$(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO)
   5.135 +	$(RM) $(LIBJVM_DB_DEBUGINFO)
   5.136 +    endif
   5.137 +  endif
   5.138 +endif
   5.139  
   5.140  $(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
   5.141  	@echo Making $@
   5.142  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
   5.143  		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
   5.144 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   5.145 +  ifeq ($(OS_VENDOR), Darwin)
   5.146 +	$(DSYMUTIL) $@
   5.147 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   5.148 +	$(ZIPEXE) -q -r -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) 
   5.149 +	$(RM) -r $(LIBJVM_DTRACE_DEBUGINFO)
   5.150 +    endif
   5.151 +  else
   5.152 +	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DTRACE_DEBUGINFO)
   5.153 +	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DTRACE_DEBUGINFO) $@
   5.154 +    ifeq ($(STRIP_POLICY),all_strip)
   5.155 +	$(QUIETLY) $(STRIP) $@
   5.156 +    else
   5.157 +      ifeq ($(STRIP_POLICY),min_strip)
   5.158 +	$(QUIETLY) $(STRIP) -x $@
   5.159 +      # implied else here is no stripping at all
   5.160 +      endif
   5.161 +    endif
   5.162 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   5.163 +	$(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) 
   5.164 +	$(RM) $(LIBJVM_DTRACE_DEBUGINFO)
   5.165 +    endif
   5.166 +  endif
   5.167 +endif
   5.168  
   5.169  #$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
   5.170  #             $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
     6.1 --- a/make/bsd/makefiles/gcc.make	Thu Oct 17 06:29:58 2013 -0700
     6.2 +++ b/make/bsd/makefiles/gcc.make	Fri Oct 18 12:10:44 2013 -0700
     6.3 @@ -83,6 +83,11 @@
     6.4    AS   = $(CC) -c 
     6.5  endif
     6.6  
     6.7 +ifeq ($(OS_VENDOR), Darwin)
     6.8 +  ifeq ($(DSYMUTIL),)
     6.9 +    DSYMUTIL=dsymutil
    6.10 +  endif
    6.11 +endif
    6.12  
    6.13  ifeq ($(USE_CLANG), true)
    6.14    CC_VER_MAJOR := $(shell $(CC) -v 2>&1 | grep version | sed "s/.*version \([0-9]*\.[0-9]*\).*/\1/" | cut -d'.' -f1)
    6.15 @@ -434,6 +439,36 @@
    6.16    ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),)
    6.17    DEBUG_CFLAGS += -gstabs
    6.18    endif
    6.19 +  
    6.20 +  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    6.21 +    FASTDEBUG_CFLAGS/ia64  = -g
    6.22 +    FASTDEBUG_CFLAGS/amd64 = -g
    6.23 +    FASTDEBUG_CFLAGS/arm   = -g
    6.24 +    FASTDEBUG_CFLAGS/ppc   = -g
    6.25 +    FASTDEBUG_CFLAGS += $(FASTDEBUG_CFLAGS/$(BUILDARCH))
    6.26 +    ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),)
    6.27 +      ifeq ($(USE_CLANG), true)
    6.28 +        # Clang doesn't understand -gstabs
    6.29 +        FASTDEBUG_CFLAGS += -g
    6.30 +      else
    6.31 +        FASTDEBUG_CFLAGS += -gstabs
    6.32 +      endif
    6.33 +    endif
    6.34 +  
    6.35 +    OPT_CFLAGS/ia64  = -g
    6.36 +    OPT_CFLAGS/amd64 = -g
    6.37 +    OPT_CFLAGS/arm   = -g
    6.38 +    OPT_CFLAGS/ppc   = -g
    6.39 +    OPT_CFLAGS += $(OPT_CFLAGS/$(BUILDARCH))
    6.40 +    ifeq ($(OPT_CFLAGS/$(BUILDARCH)),)
    6.41 +      ifeq ($(USE_CLANG), true)
    6.42 +        # Clang doesn't understand -gstabs
    6.43 +        OPT_CFLAGS += -g
    6.44 +      else
    6.45 +        OPT_CFLAGS += -gstabs
    6.46 +      endif
    6.47 +    endif
    6.48 +  endif
    6.49  endif
    6.50  
    6.51  # If we are building HEADLESS, pass on to VM
     7.1 --- a/make/bsd/makefiles/jsig.make	Thu Oct 17 06:29:58 2013 -0700
     7.2 +++ b/make/bsd/makefiles/jsig.make	Fri Oct 18 12:10:44 2013 -0700
     7.3 @@ -1,5 +1,5 @@
     7.4  #
     7.5 -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
     7.6 +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
     7.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.8  #
     7.9  # This code is free software; you can redistribute it and/or modify it
    7.10 @@ -29,13 +29,21 @@
    7.11  
    7.12  ifeq ($(OS_VENDOR), Darwin)
    7.13    LIBJSIG   = lib$(JSIG).dylib
    7.14 +
    7.15 +  LIBJSIG_DEBUGINFO   = lib$(JSIG).dylib.dSYM
    7.16 +  LIBJSIG_DIZ         = lib$(JSIG).diz
    7.17  else
    7.18    LIBJSIG   = lib$(JSIG).so
    7.19 +
    7.20 +  LIBJSIG_DEBUGINFO   = lib$(JSIG).debuginfo
    7.21 +  LIBJSIG_DIZ         = lib$(JSIG).diz
    7.22  endif
    7.23  
    7.24  JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
    7.25  
    7.26 -DEST_JSIG  = $(JDK_LIBDIR)/$(LIBJSIG)
    7.27 +DEST_JSIG           = $(JDK_LIBDIR)/$(LIBJSIG)
    7.28 +DEST_JSIG_DEBUGINFO = $(JDK_LIBDIR)/$(LIBJSIG_DEBUGINFO)
    7.29 +DEST_JSIG_DIZ       = $(JDK_LIBDIR)/$(LIBJSIG_DIZ)
    7.30  
    7.31  LIBJSIG_MAPFILE = $(MAKEFILES_DIR)/mapfile-vers-jsig
    7.32  
    7.33 @@ -55,9 +63,42 @@
    7.34  	@echo Making signal interposition lib...
    7.35  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
    7.36                           $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $<
    7.37 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    7.38 +  ifeq ($(OS_VENDOR), Darwin)
    7.39 +	$(DSYMUTIL) $@
    7.40 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
    7.41 +	$(ZIPEXE) -q -r -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
    7.42 +	$(RM) -r $(LIBJSIG_DEBUGINFO)
    7.43 +    endif
    7.44 +  else
    7.45 +	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
    7.46 +	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
    7.47 +    ifeq ($(STRIP_POLICY),all_strip)
    7.48 +	$(QUIETLY) $(STRIP) $@
    7.49 +    else
    7.50 +      ifeq ($(STRIP_POLICY),min_strip)
    7.51 +	$(QUIETLY) $(STRIP) -g $@
    7.52 +      # implied else here is no stripping at all
    7.53 +      endif
    7.54 +    endif
    7.55 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
    7.56 +	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
    7.57 +	$(RM) $(LIBJSIG_DEBUGINFO)
    7.58 +    endif
    7.59 +  endif
    7.60 +endif
    7.61  
    7.62  install_jsig: $(LIBJSIG)
    7.63  	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
    7.64 +ifeq ($(OS_VENDOR), Darwin)
    7.65 +	$(QUIETLY) test -d $(LIBJSIG_DEBUGINFO) && \
    7.66 +	    cp -f -r $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
    7.67 +else
    7.68 +	$(QUIETLY) test -f $(LIBJSIG_DEBUGINFO) && \
    7.69 +	    cp -f $(LIBJSIG_DEBUGINFO) $(DEST_JSIG_DEBUGINFO)
    7.70 +endif
    7.71 +	$(QUIETLY) test -f $(LIBJSIG_DIZ) && \
    7.72 +	    cp -f $(LIBJSIG_DIZ) $(DEST_JSIG_DIZ)
    7.73  	$(QUIETLY) cp -f $(LIBJSIG) $(DEST_JSIG) && echo "Done"
    7.74  
    7.75  .PHONY: install_jsig
     8.1 --- a/make/bsd/makefiles/product.make	Thu Oct 17 06:29:58 2013 -0700
     8.2 +++ b/make/bsd/makefiles/product.make	Fri Oct 18 12:10:44 2013 -0700
     8.3 @@ -1,5 +1,5 @@
     8.4  #
     8.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     8.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     8.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.8  #
     8.9  # This code is free software; you can redistribute it and/or modify it
    8.10 @@ -43,15 +43,17 @@
    8.11  SYSDEFS += -DPRODUCT
    8.12  VERSION = optimized
    8.13  
    8.14 -# use -g to strip library as -x will discard its symbol table; -x is fine for
    8.15 -# executables.
    8.16 -ifdef CROSS_COMPILE_ARCH
    8.17 -  STRIP = $(ALT_COMPILER_PATH)/strip
    8.18 -else
    8.19 -  STRIP = strip
    8.20 +ifneq ($(OS_VENDOR), Darwin)
    8.21 +  # use -g to strip library as -x will discard its symbol table; -x is fine for
    8.22 +  # executables.
    8.23 +  ifdef CROSS_COMPILE_ARCH
    8.24 +    STRIP = $(ALT_COMPILER_PATH)/strip
    8.25 +  else
    8.26 +    STRIP = strip
    8.27 +  endif
    8.28 +  STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
    8.29 +  STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
    8.30 +
    8.31 +  # Don't strip in VM build; JDK build will strip libraries later
    8.32 +  # LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
    8.33  endif
    8.34 -STRIP_LIBJVM = $(STRIP) -g $@ || exit 1;
    8.35 -STRIP_AOUT   = $(STRIP) -x $@ || exit 1;
    8.36 -
    8.37 -# Don't strip in VM build; JDK build will strip libraries later
    8.38 -# LINK_LIB.CXX/POST_HOOK += $(STRIP_$(LINK_INTO))
     9.1 --- a/make/bsd/makefiles/saproc.make	Thu Oct 17 06:29:58 2013 -0700
     9.2 +++ b/make/bsd/makefiles/saproc.make	Fri Oct 18 12:10:44 2013 -0700
     9.3 @@ -28,9 +28,15 @@
     9.4  SAPROC   = saproc
     9.5  
     9.6  ifeq ($(OS_VENDOR), Darwin)
     9.7 -  LIBSAPROC   = lib$(SAPROC).dylib
     9.8 +  LIBSAPROC           = lib$(SAPROC).dylib
     9.9 +
    9.10 +  LIBSAPROC_DEBUGINFO = lib$(SAPROC).dylib.dSYM
    9.11 +  LIBSAPROC_DIZ       = lib$(SAPROC).diz
    9.12  else
    9.13 -  LIBSAPROC   = lib$(SAPROC).so
    9.14 +  LIBSAPROC           = lib$(SAPROC).so
    9.15 +
    9.16 +  LIBSAPROC_DEBUGINFO = lib$(SAPROC).debuginfo
    9.17 +  LIBSAPROC_DIZ       = lib$(SAPROC).diz
    9.18  endif
    9.19  
    9.20  AGENT_DIR = $(GAMMADIR)/agent
    9.21 @@ -70,7 +76,9 @@
    9.22  
    9.23  SAMAPFILE = $(SASRCDIR)/mapfile
    9.24  
    9.25 -DEST_SAPROC = $(JDK_LIBDIR)/$(LIBSAPROC)
    9.26 +DEST_SAPROC           = $(JDK_LIBDIR)/$(LIBSAPROC)
    9.27 +DEST_SAPROC_DEBUGINFO = $(JDK_LIBDIR)/$(LIBSAPROC_DEBUGINFO)
    9.28 +DEST_SAPROC_DIZ       = $(JDK_LIBDIR)/$(LIBSAPROC_DIZ)
    9.29  
    9.30  # DEBUG_BINARIES overrides everything, use full -g debug information
    9.31  ifeq ($(DEBUG_BINARIES), true)
    9.32 @@ -117,11 +125,42 @@
    9.33  	           $(SA_DEBUG_CFLAGS)                                   \
    9.34  	           -o $@                                                \
    9.35  	           $(SALIBS)
    9.36 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
    9.37 +  ifeq ($(OS_VENDOR), Darwin)
    9.38 +	$(DSYMUTIL) $@
    9.39 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
    9.40 +	$(ZIPEXE) -q -r -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
    9.41 +	$(RM) -r $(LIBSAPROC_DEBUGINFO)
    9.42 +    endif
    9.43 +  else
    9.44 +	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
    9.45 +	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
    9.46 +    ifeq ($(STRIP_POLICY),all_strip)
    9.47 +	$(QUIETLY) $(STRIP) $@
    9.48 +    else
    9.49 +      ifeq ($(STRIP_POLICY),min_strip)
    9.50 +	$(QUIETLY) $(STRIP) -g $@
    9.51 +      # implied else here is no stripping at all
    9.52 +      endif
    9.53 +    endif
    9.54 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
    9.55 +	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
    9.56 +	$(RM) $(LIBSAPROC_DEBUGINFO)
    9.57 +    endif
    9.58 +  endif
    9.59 +endif
    9.60  
    9.61  install_saproc: $(BUILDLIBSAPROC)
    9.62 -	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
    9.63 -	  echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)";     \
    9.64 -	  cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done";  \
    9.65 -	fi
    9.66 +	@echo "Copying $(LIBSAPROC) to $(DEST_SAPROC)"
    9.67 +ifeq ($(OS_VENDOR), Darwin)
    9.68 +	$(QUIETLY) test -d $(LIBSAPROC_DEBUGINFO) && \
    9.69 +	    cp -f -r $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
    9.70 +else
    9.71 +	$(QUIETLY) test -f $(LIBSAPROC_DEBUGINFO) && \
    9.72 +	    cp -f $(LIBSAPROC_DEBUGINFO) $(DEST_SAPROC_DEBUGINFO)
    9.73 +endif
    9.74 +	$(QUIETLY) test -f $(LIBSAPROC_DIZ) && \
    9.75 +	    cp -f $(LIBSAPROC_DIZ) $(DEST_SAPROC_DIZ)
    9.76 +	$(QUIETLY) cp -f $(LIBSAPROC) $(DEST_SAPROC) && echo "Done"
    9.77  
    9.78  .PHONY: install_saproc
    10.1 --- a/make/bsd/makefiles/universal.gmk	Thu Oct 17 06:29:58 2013 -0700
    10.2 +++ b/make/bsd/makefiles/universal.gmk	Fri Oct 18 12:10:44 2013 -0700
    10.3 @@ -1,5 +1,5 @@
    10.4  #
    10.5 -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
    10.6 +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
    10.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8  #
    10.9  # This code is free software; you can redistribute it and/or modify it
   10.10 @@ -19,7 +19,7 @@
   10.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   10.12  # or visit www.oracle.com if you need additional information or have any
   10.13  # questions.
   10.14 -#  
   10.15 +#
   10.16  #
   10.17  
   10.18  # macosx universal builds
   10.19 @@ -35,15 +35,15 @@
   10.20  all_product_universal:
   10.21  #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_PRODUCT_TARGETS)
   10.22  	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_PRODUCT_TARGETS)
   10.23 -	$(QUIETLY) $(MAKE) EXPORT_SUBDIR= universalize
   10.24 +	$(QUIETLY) $(MAKE) BUILD_FLAVOR=product EXPORT_SUBDIR= universalize
   10.25  all_fastdebug_universal:
   10.26  #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_FASTDEBUG_TARGETS)
   10.27  	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_FASTDEBUG_TARGETS)
   10.28 -	$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/fastdebug universalize
   10.29 +	$(QUIETLY) $(MAKE) BUILD_FLAVOR=fastdebug EXPORT_SUBDIR=/fastdebug universalize
   10.30  all_debug_universal:
   10.31  #	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=32 $(COMMON_VM_DEBUG_TARGETS)
   10.32  	$(QUIETLY) $(MAKE) ARCH_DATA_MODEL=64 $(COMMON_VM_DEBUG_TARGETS)
   10.33 -	$(QUIETLY) $(MAKE) EXPORT_SUBDIR=/debug universalize
   10.34 +	$(QUIETLY) $(MAKE) BUILD_FLAVOR=debug EXPORT_SUBDIR=/debug universalize
   10.35  
   10.36  
   10.37  # Consolidate architecture builds into a single Universal binary
   10.38 @@ -57,18 +57,18 @@
   10.39  	if [ -n "$${BUILT_LIPO_FILES}" ]; then \
   10.40  	  $(MKDIR) -p $(shell dirname $@); \
   10.41  	  lipo -create -output $@ $${BUILT_LIPO_FILES}; \
   10.42 -	fi	
   10.43 +	fi
   10.44  
   10.45  
   10.46  # Copy built non-universal binaries in place
   10.47 +# - copies directories; including empty dirs
   10.48 +# - copies files, symlinks, other non-directory files
   10.49  $(UNIVERSAL_COPY_LIST):
   10.50 -	BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) 2>/dev/null`"; \
   10.51 +	BUILT_COPY_FILES="`find $(EXPORT_JRE_LIB_DIR)/{i386,amd64}/$(subst $(EXPORT_JRE_LIB_DIR)/,,$@) -prune 2>/dev/null`"; \
   10.52  	if [ -n "$${BUILT_COPY_FILES}" ]; then \
   10.53  	  for i in $${BUILT_COPY_FILES}; do \
   10.54 -	    if [ -f $${i} ]; then \
   10.55 -	      $(MKDIR) -p $(shell dirname $@); \
   10.56 -	      $(CP) $${i} $@; \
   10.57 -	    fi; \
   10.58 +	    $(MKDIR) -p $(shell dirname $@); \
   10.59 +	    $(CP) -R $${i} $@; \
   10.60  	  done; \
   10.61  	fi
   10.62  
    11.1 --- a/make/bsd/makefiles/vm.make	Thu Oct 17 06:29:58 2013 -0700
    11.2 +++ b/make/bsd/makefiles/vm.make	Fri Oct 18 12:10:44 2013 -0700
    11.3 @@ -60,10 +60,16 @@
    11.4  # The order is important for the precompiled headers to work.
    11.5  INCLUDES += $(PRECOMPILED_HEADER_DIR:%=-I%) $(Src_Dirs_I:%=-I%)
    11.6  
    11.7 -ifeq (${VERSION}, debug)
    11.8 +# SYMFLAG is used by {jsig,saproc}.make
    11.9 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   11.10 +  # always build with debug info when we can create .dSYM/.debuginfo files
   11.11    SYMFLAG = -g
   11.12  else
   11.13 -  SYMFLAG =
   11.14 +  ifeq (${VERSION}, debug)
   11.15 +    SYMFLAG = -g
   11.16 +  else
   11.17 +    SYMFLAG =
   11.18 +  endif
   11.19  endif
   11.20  
   11.21  # HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
   11.22 @@ -147,8 +153,14 @@
   11.23    ifeq (${VERSION}, $(filter ${VERSION}, debug fastdebug))
   11.24      CFLAGS += -DALLOW_OPERATOR_NEW_USAGE
   11.25    endif
   11.26 +
   11.27 +  LIBJVM_DEBUGINFO   = lib$(JVM).dylib.dSYM
   11.28 +  LIBJVM_DIZ         = lib$(JVM).diz
   11.29  else
   11.30    LIBJVM   = lib$(JVM).so
   11.31 +
   11.32 +  LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
   11.33 +  LIBJVM_DIZ         = lib$(JVM).diz
   11.34  endif
   11.35  
   11.36  SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
   11.37 @@ -322,10 +334,47 @@
   11.38  	    rm -f $@.1; ln -s $@ $@.1;                                  \
   11.39  	}
   11.40  
   11.41 -DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)
   11.42 +ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   11.43 +  ifeq ($(OS_VENDOR), Darwin)
   11.44 +	$(DSYMUTIL) $@
   11.45 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   11.46 +	$(ZIPEXE) -q -r -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
   11.47 +	$(RM) -r $(LIBJVM_DEBUGINFO)
   11.48 +    endif
   11.49 +  else
   11.50 +	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJVM_DEBUGINFO)
   11.51 +	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJVM_DEBUGINFO) $@
   11.52 +    ifeq ($(STRIP_POLICY),all_strip)
   11.53 +	$(QUIETLY) $(STRIP) $@
   11.54 +    else
   11.55 +      ifeq ($(STRIP_POLICY),min_strip)
   11.56 +	$(QUIETLY) $(STRIP) -g $@
   11.57 +      # implied else here is no stripping at all
   11.58 +      endif
   11.59 +    endif
   11.60 +    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   11.61 +	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
   11.62 +	$(RM) $(LIBJVM_DEBUGINFO)
   11.63 +    endif
   11.64 +  endif
   11.65 +endif
   11.66 +
   11.67 +DEST_SUBDIR        = $(JDK_LIBDIR)/$(VM_SUBDIR)
   11.68 +DEST_JVM           = $(DEST_SUBDIR)/$(LIBJVM)
   11.69 +DEST_JVM_DEBUGINFO = $(DEST_SUBDIR)/$(LIBJVM_DEBUGINFO)
   11.70 +DEST_JVM_DIZ       = $(DEST_SUBDIR)/$(LIBJVM_DIZ)
   11.71  
   11.72  install_jvm: $(LIBJVM)
   11.73  	@echo "Copying $(LIBJVM) to $(DEST_JVM)"
   11.74 +ifeq ($(OS_VENDOR), Darwin)
   11.75 +	$(QUIETLY) test -d $(LIBJVM_DEBUGINFO) && \
   11.76 +	    cp -f -r $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
   11.77 +else
   11.78 +	$(QUIETLY) test -f $(LIBJVM_DEBUGINFO) && \
   11.79 +	    cp -f $(LIBJVM_DEBUGINFO) $(DEST_JVM_DEBUGINFO)
   11.80 +endif
   11.81 +	$(QUIETLY) test -f $(LIBJVM_DIZ) && \
   11.82 +	    cp -f $(LIBJVM_DIZ) $(DEST_JVM_DIZ)
   11.83  	$(QUIETLY) cp -f $(LIBJVM) $(DEST_JVM) && echo "Done"
   11.84  
   11.85  #----------------------------------------------------------------------
   11.86 @@ -340,11 +389,8 @@
   11.87  #----------------------------------------------------------------------
   11.88  
   11.89  ifeq ($(OS_VENDOR), Darwin)
   11.90 -$(LIBJVM).dSYM: $(LIBJVM)
   11.91 -	dsymutil $(LIBJVM)
   11.92 -
   11.93  # no libjvm_db for macosx
   11.94 -build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck $(LIBJVM).dSYM
   11.95 +build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(BUILDLIBSAPROC) dtraceCheck
   11.96  	echo "Doing vm.make build:"
   11.97  else
   11.98  build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC)
    12.1 --- a/make/defs.make	Thu Oct 17 06:29:58 2013 -0700
    12.2 +++ b/make/defs.make	Fri Oct 18 12:10:44 2013 -0700
    12.3 @@ -77,6 +77,16 @@
    12.4  @$(RM) $@
    12.5  $(CP) $< $@
    12.6  endef
    12.7 +
    12.8 +# MacOS X strongly discourages 'cp -r' and provides 'cp -R' instead.
    12.9 +# May need to have a MacOS X specific definition of install-dir
   12.10 +# sometime in the future.
   12.11 +define install-dir
   12.12 +@$(MKDIR) -p $(@D)
   12.13 +@$(RM) -r $@
   12.14 +$(CP) -r $< $@
   12.15 +endef
   12.16 +
   12.17  define prep-target
   12.18  @$(MKDIR) -p $(@D)
   12.19  @$(RM) $@
    13.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Thu Oct 17 06:29:58 2013 -0700
    13.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Fri Oct 18 12:10:44 2013 -0700
    13.3 @@ -3100,6 +3100,10 @@
    13.4    }
    13.5  }
    13.6  
    13.7 +void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
    13.8 +  fatal("Type profiling not implemented on this platform");
    13.9 +}
   13.10 +
   13.11  void LIR_Assembler::align_backward_branch_target() {
   13.12    __ align(OptoLoopAlignment);
   13.13  }
    14.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Oct 17 06:29:58 2013 -0700
    14.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Fri Oct 18 12:10:44 2013 -0700
    14.3 @@ -1076,6 +1076,25 @@
    14.4  
    14.5    __ verify_not_null_oop(Oexception);
    14.6  
    14.7 +#ifdef ASSERT
    14.8 +  // check that fields in JavaThread for exception oop and issuing pc are
    14.9 +  // empty before writing to them
   14.10 +  Label oop_empty;
   14.11 +  Register scratch = I7;  // We can use I7 here because it's overwritten later anyway.
   14.12 +  __ ld_ptr(Address(G2_thread, JavaThread::exception_oop_offset()), scratch);
   14.13 +  __ br_null(scratch, false, Assembler::pt, oop_empty);
   14.14 +  __ delayed()->nop();
   14.15 +  __ stop("exception oop already set");
   14.16 +  __ bind(oop_empty);
   14.17 +
   14.18 +  Label pc_empty;
   14.19 +  __ ld_ptr(Address(G2_thread, JavaThread::exception_pc_offset()), scratch);
   14.20 +  __ br_null(scratch, false, Assembler::pt, pc_empty);
   14.21 +  __ delayed()->nop();
   14.22 +  __ stop("exception pc already set");
   14.23 +  __ bind(pc_empty);
   14.24 +#endif
   14.25 +
   14.26    // save the exception and issuing pc in the thread
   14.27    __ st_ptr(Oexception,  G2_thread, in_bytes(JavaThread::exception_oop_offset()));
   14.28    __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
    15.1 --- a/src/cpu/sparc/vm/globals_sparc.hpp	Thu Oct 17 06:29:58 2013 -0700
    15.2 +++ b/src/cpu/sparc/vm/globals_sparc.hpp	Fri Oct 18 12:10:44 2013 -0700
    15.3 @@ -76,6 +76,8 @@
    15.4  // GC Ergo Flags
    15.5  define_pd_global(uintx, CMSYoungGenPerWorker, 16*M);  // default max size of CMS young gen, per GC worker thread
    15.6  
    15.7 +define_pd_global(uintx, TypeProfileLevel, 0);
    15.8 +
    15.9  #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
   15.10                                                                              \
   15.11    product(intx, UseVIS, 99,                                                 \
    16.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Thu Oct 17 06:29:58 2013 -0700
    16.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Fri Oct 18 12:10:44 2013 -0700
    16.3 @@ -3581,6 +3581,7 @@
    16.4    // the pending exception will be picked up the interpreter.
    16.5    __ ld_ptr(G2_thread, in_bytes(JavaThread::exception_oop_offset()), Oexception);
    16.6    __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_oop_offset()));
    16.7 +  __ st_ptr(G0, G2_thread, in_bytes(JavaThread::exception_pc_offset()));
    16.8    __ bind(noException);
    16.9  
   16.10    // deallocate the deoptimization frame taking care to preserve the return values
    17.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Thu Oct 17 06:29:58 2013 -0700
    17.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Fri Oct 18 12:10:44 2013 -0700
    17.3 @@ -3632,6 +3632,161 @@
    17.4    }
    17.5  }
    17.6  
    17.7 +void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
    17.8 +  Register obj = op->obj()->as_register();
    17.9 +  Register tmp = op->tmp()->as_pointer_register();
   17.10 +  Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
   17.11 +  ciKlass* exact_klass = op->exact_klass();
   17.12 +  intptr_t current_klass = op->current_klass();
   17.13 +  bool not_null = op->not_null();
   17.14 +  bool no_conflict = op->no_conflict();
   17.15 +
   17.16 +  Label update, next, none;
   17.17 +
   17.18 +  bool do_null = !not_null;
   17.19 +  bool exact_klass_set = exact_klass != NULL && ciTypeEntries::valid_ciklass(current_klass) == exact_klass;
   17.20 +  bool do_update = !TypeEntries::is_type_unknown(current_klass) && !exact_klass_set;
   17.21 +
   17.22 +  assert(do_null || do_update, "why are we here?");
   17.23 +  assert(!TypeEntries::was_null_seen(current_klass) || do_update, "why are we here?");
   17.24 +
   17.25 +  __ verify_oop(obj);
   17.26 +
   17.27 +  if (tmp != obj) {
   17.28 +    __ mov(tmp, obj);
   17.29 +  }
   17.30 +  if (do_null) {
   17.31 +    __ testptr(tmp, tmp);
   17.32 +    __ jccb(Assembler::notZero, update);
   17.33 +    if (!TypeEntries::was_null_seen(current_klass)) {
   17.34 +      __ orptr(mdo_addr, TypeEntries::null_seen);
   17.35 +    }
   17.36 +    if (do_update) {
   17.37 +#ifndef ASSERT
   17.38 +      __ jmpb(next);
   17.39 +    }
   17.40 +#else
   17.41 +      __ jmp(next);
   17.42 +    }
   17.43 +  } else {
   17.44 +    __ testptr(tmp, tmp);
   17.45 +    __ jccb(Assembler::notZero, update);
   17.46 +    __ stop("unexpect null obj");
   17.47 +#endif
   17.48 +  }
   17.49 +
   17.50 +  __ bind(update);
   17.51 +
   17.52 +  if (do_update) {
   17.53 +#ifdef ASSERT
   17.54 +    if (exact_klass != NULL) {
   17.55 +      Label ok;
   17.56 +      __ load_klass(tmp, tmp);
   17.57 +      __ push(tmp);
   17.58 +      __ mov_metadata(tmp, exact_klass->constant_encoding());
   17.59 +      __ cmpptr(tmp, Address(rsp, 0));
   17.60 +      __ jccb(Assembler::equal, ok);
   17.61 +      __ stop("exact klass and actual klass differ");
   17.62 +      __ bind(ok);
   17.63 +      __ pop(tmp);
   17.64 +    }
   17.65 +#endif
   17.66 +    if (!no_conflict) {
   17.67 +      if (exact_klass == NULL || TypeEntries::is_type_none(current_klass)) {
   17.68 +        if (exact_klass != NULL) {
   17.69 +          __ mov_metadata(tmp, exact_klass->constant_encoding());
   17.70 +        } else {
   17.71 +          __ load_klass(tmp, tmp);
   17.72 +        }
   17.73 +
   17.74 +        __ xorptr(tmp, mdo_addr);
   17.75 +        __ testptr(tmp, TypeEntries::type_klass_mask);
   17.76 +        // klass seen before, nothing to do. The unknown bit may have been
   17.77 +        // set already but no need to check.
   17.78 +        __ jccb(Assembler::zero, next);
   17.79 +
   17.80 +        __ testptr(tmp, TypeEntries::type_unknown);
   17.81 +        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
   17.82 +
   17.83 +        if (TypeEntries::is_type_none(current_klass)) {
   17.84 +          __ cmpptr(mdo_addr, 0);
   17.85 +          __ jccb(Assembler::equal, none);
   17.86 +          __ cmpptr(mdo_addr, TypeEntries::null_seen);
   17.87 +          __ jccb(Assembler::equal, none);
   17.88 +          // There is a chance that the checks above (re-reading profiling
   17.89 +          // data from memory) fail if another thread has just set the
   17.90 +          // profiling to this obj's klass
   17.91 +          __ xorptr(tmp, mdo_addr);
   17.92 +          __ testptr(tmp, TypeEntries::type_klass_mask);
   17.93 +          __ jccb(Assembler::zero, next);
   17.94 +        }
   17.95 +      } else {
   17.96 +        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
   17.97 +               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "conflict only");
   17.98 +
   17.99 +        __ movptr(tmp, mdo_addr);
  17.100 +        __ testptr(tmp, TypeEntries::type_unknown);
  17.101 +        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
  17.102 +      }
  17.103 +
  17.104 +      // different than before. Cannot keep accurate profile.
  17.105 +      __ orptr(mdo_addr, TypeEntries::type_unknown);
  17.106 +
  17.107 +      if (TypeEntries::is_type_none(current_klass)) {
  17.108 +        __ jmpb(next);
  17.109 +
  17.110 +        __ bind(none);
  17.111 +        // first time here. Set profile type.
  17.112 +        __ movptr(mdo_addr, tmp);
  17.113 +      }
  17.114 +    } else {
  17.115 +      // There's a single possible klass at this profile point
  17.116 +      assert(exact_klass != NULL, "should be");
  17.117 +      if (TypeEntries::is_type_none(current_klass)) {
  17.118 +        __ mov_metadata(tmp, exact_klass->constant_encoding());
  17.119 +        __ xorptr(tmp, mdo_addr);
  17.120 +        __ testptr(tmp, TypeEntries::type_klass_mask);
  17.121 +#ifdef ASSERT
  17.122 +        __ jcc(Assembler::zero, next);
  17.123 +
  17.124 +        {
  17.125 +          Label ok;
  17.126 +          __ push(tmp);
  17.127 +          __ cmpptr(mdo_addr, 0);
  17.128 +          __ jcc(Assembler::equal, ok);
  17.129 +          __ cmpptr(mdo_addr, TypeEntries::null_seen);
  17.130 +          __ jcc(Assembler::equal, ok);
  17.131 +          // may have been set by another thread
  17.132 +          __ mov_metadata(tmp, exact_klass->constant_encoding());
  17.133 +          __ xorptr(tmp, mdo_addr);
  17.134 +          __ testptr(tmp, TypeEntries::type_mask);
  17.135 +          __ jcc(Assembler::zero, ok);
  17.136 +
  17.137 +          __ stop("unexpected profiling mismatch");
  17.138 +          __ bind(ok);
  17.139 +          __ pop(tmp);
  17.140 +        }
  17.141 +#else
  17.142 +        __ jccb(Assembler::zero, next);
  17.143 +#endif
  17.144 +        // first time here. Set profile type.
  17.145 +        __ movptr(mdo_addr, tmp);
  17.146 +      } else {
  17.147 +        assert(ciTypeEntries::valid_ciklass(current_klass) != NULL &&
  17.148 +               ciTypeEntries::valid_ciklass(current_klass) != exact_klass, "inconsistent");
  17.149 +
  17.150 +        __ movptr(tmp, mdo_addr);
  17.151 +        __ testptr(tmp, TypeEntries::type_unknown);
  17.152 +        __ jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
  17.153 +
  17.154 +        __ orptr(mdo_addr, TypeEntries::type_unknown);
  17.155 +      }
  17.156 +    }
  17.157 +
  17.158 +    __ bind(next);
  17.159 +  }
  17.160 +}
  17.161 +
  17.162  void LIR_Assembler::emit_delay(LIR_OpDelay*) {
  17.163    Unimplemented();
  17.164  }
    18.1 --- a/src/cpu/x86/vm/globals_x86.hpp	Thu Oct 17 06:29:58 2013 -0700
    18.2 +++ b/src/cpu/x86/vm/globals_x86.hpp	Fri Oct 18 12:10:44 2013 -0700
    18.3 @@ -79,6 +79,8 @@
    18.4  // GC Ergo Flags
    18.5  define_pd_global(uintx, CMSYoungGenPerWorker, 64*M);  // default max size of CMS young gen, per GC worker thread
    18.6  
    18.7 +define_pd_global(uintx, TypeProfileLevel, 11);
    18.8 +
    18.9  #define ARCH_FLAGS(develop, product, diagnostic, experimental, notproduct) \
   18.10                                                                              \
   18.11    develop(bool, IEEEPrecision, true,                                        \
    19.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Thu Oct 17 06:29:58 2013 -0700
    19.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Fri Oct 18 12:10:44 2013 -0700
    19.3 @@ -1046,6 +1046,158 @@
    19.4    }
    19.5  }
    19.6  
    19.7 +void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
    19.8 +  Label update, next, none;
    19.9 +
   19.10 +  verify_oop(obj);
   19.11 +
   19.12 +  testptr(obj, obj);
   19.13 +  jccb(Assembler::notZero, update);
   19.14 +  orptr(mdo_addr, TypeEntries::null_seen);
   19.15 +  jmpb(next);
   19.16 +
   19.17 +  bind(update);
   19.18 +  load_klass(obj, obj);
   19.19 +
   19.20 +  xorptr(obj, mdo_addr);
   19.21 +  testptr(obj, TypeEntries::type_klass_mask);
   19.22 +  jccb(Assembler::zero, next); // klass seen before, nothing to
   19.23 +                               // do. The unknown bit may have been
   19.24 +                               // set already but no need to check.
   19.25 +
   19.26 +  testptr(obj, TypeEntries::type_unknown);
   19.27 +  jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
   19.28 +
   19.29 +  cmpptr(mdo_addr, 0);
   19.30 +  jccb(Assembler::equal, none);
   19.31 +  cmpptr(mdo_addr, TypeEntries::null_seen);
   19.32 +  jccb(Assembler::equal, none);
   19.33 +  // There is a chance that the checks above (re-reading profiling
   19.34 +  // data from memory) fail if another thread has just set the
   19.35 +  // profiling to this obj's klass
   19.36 +  xorptr(obj, mdo_addr);
   19.37 +  testptr(obj, TypeEntries::type_klass_mask);
   19.38 +  jccb(Assembler::zero, next);
   19.39 +
   19.40 +  // different than before. Cannot keep accurate profile.
   19.41 +  orptr(mdo_addr, TypeEntries::type_unknown);
   19.42 +  jmpb(next);
   19.43 +
   19.44 +  bind(none);
   19.45 +  // first time here. Set profile type.
   19.46 +  movptr(mdo_addr, obj);
   19.47 +
   19.48 +  bind(next);
   19.49 +}
   19.50 +
   19.51 +void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
   19.52 +  if (!ProfileInterpreter) {
   19.53 +    return;
   19.54 +  }
   19.55 +
   19.56 +  if (MethodData::profile_arguments() || MethodData::profile_return()) {
   19.57 +    Label profile_continue;
   19.58 +
   19.59 +    test_method_data_pointer(mdp, profile_continue);
   19.60 +
   19.61 +    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
   19.62 +
   19.63 +    cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
   19.64 +    jcc(Assembler::notEqual, profile_continue);
   19.65 +
   19.66 +    if (MethodData::profile_arguments()) {
   19.67 +      Label done;
   19.68 +      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
   19.69 +      addptr(mdp, off_to_args);
   19.70 +
   19.71 +      for (int i = 0; i < TypeProfileArgsLimit; i++) {
   19.72 +        if (i > 0 || MethodData::profile_return()) {
   19.73 +          // If return value type is profiled we may have no argument to profile
   19.74 +          movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
   19.75 +          subl(tmp, i*TypeStackSlotEntries::per_arg_count());
   19.76 +          cmpl(tmp, TypeStackSlotEntries::per_arg_count());
   19.77 +          jcc(Assembler::less, done);
   19.78 +        }
   19.79 +        movptr(tmp, Address(callee, Method::const_offset()));
   19.80 +        load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
   19.81 +        // stack offset o (zero based) from the start of the argument
   19.82 +        // list, for n arguments translates into offset n - o - 1 from
   19.83 +        // the end of the argument list
   19.84 +        subl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
   19.85 +        subl(tmp, 1);
   19.86 +        Address arg_addr = argument_address(tmp);
   19.87 +        movptr(tmp, arg_addr);
   19.88 +
   19.89 +        Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
   19.90 +        profile_obj_type(tmp, mdo_arg_addr);
   19.91 +
   19.92 +        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
   19.93 +        addptr(mdp, to_add);
   19.94 +        off_to_args += to_add;
   19.95 +      }
   19.96 +
   19.97 +      if (MethodData::profile_return()) {
   19.98 +        movl(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
   19.99 +        subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
  19.100 +      }
  19.101 +
  19.102 +      bind(done);
  19.103 +
  19.104 +      if (MethodData::profile_return()) {
  19.105 +        // We're right after the type profile for the last
  19.106 +        // argument. tmp is the number of cell left in the
  19.107 +        // CallTypeData/VirtualCallTypeData to reach its end. Non null
  19.108 +        // if there's a return to profile.
  19.109 +        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
  19.110 +        shll(tmp, exact_log2(DataLayout::cell_size));
  19.111 +        addptr(mdp, tmp);
  19.112 +      }
  19.113 +      movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
  19.114 +    } else {
  19.115 +      assert(MethodData::profile_return(), "either profile call args or call ret");
  19.116 +      update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
  19.117 +    }
  19.118 +
  19.119 +    // mdp points right after the end of the
  19.120 +    // CallTypeData/VirtualCallTypeData, right after the cells for the
  19.121 +    // return value type if there's one
  19.122 +
  19.123 +    bind(profile_continue);
  19.124 +  }
  19.125 +}
  19.126 +
  19.127 +void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
  19.128 +  assert_different_registers(mdp, ret, tmp, rsi);
  19.129 +  if (ProfileInterpreter && MethodData::profile_return()) {
  19.130 +    Label profile_continue, done;
  19.131 +
  19.132 +    test_method_data_pointer(mdp, profile_continue);
  19.133 +
  19.134 +    if (MethodData::profile_return_jsr292_only()) {
  19.135 +      // If we don't profile all invoke bytecodes we must make sure
  19.136 +      // it's a bytecode we indeed profile. We can't go back to the
  19.137 +      // begining of the ProfileData we intend to update to check its
  19.138 +      // type because we're right after it and we don't known its
  19.139 +      // length
  19.140 +      Label do_profile;
  19.141 +      cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
  19.142 +      jcc(Assembler::equal, do_profile);
  19.143 +      cmpb(Address(rsi, 0), Bytecodes::_invokehandle);
  19.144 +      jcc(Assembler::equal, do_profile);
  19.145 +      get_method(tmp);
  19.146 +      cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
  19.147 +      jcc(Assembler::notEqual, profile_continue);
  19.148 +
  19.149 +      bind(do_profile);
  19.150 +    }
  19.151 +
  19.152 +    Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
  19.153 +    mov(tmp, ret);
  19.154 +    profile_obj_type(tmp, mdo_ret_addr);
  19.155 +
  19.156 +    bind(profile_continue);
  19.157 +  }
  19.158 +}
  19.159  
  19.160  void InterpreterMacroAssembler::profile_call(Register mdp) {
  19.161    if (ProfileInterpreter) {
    20.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Thu Oct 17 06:29:58 2013 -0700
    20.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Fri Oct 18 12:10:44 2013 -0700
    20.3 @@ -215,6 +215,9 @@
    20.4  
    20.5    void profile_taken_branch(Register mdp, Register bumped_count);
    20.6    void profile_not_taken_branch(Register mdp);
    20.7 +  void profile_obj_type(Register obj, const Address& mdo_addr);
    20.8 +  void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
    20.9 +  void profile_return_type(Register mdp, Register ret, Register tmp);
   20.10    void profile_call(Register mdp);
   20.11    void profile_final_call(Register mdp);
   20.12    void profile_virtual_call(Register receiver, Register mdp, Register scratch2,
    21.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Thu Oct 17 06:29:58 2013 -0700
    21.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Fri Oct 18 12:10:44 2013 -0700
    21.3 @@ -1067,6 +1067,159 @@
    21.4    }
    21.5  }
    21.6  
    21.7 +void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& mdo_addr) {
    21.8 +  Label update, next, none;
    21.9 +
   21.10 +  verify_oop(obj);
   21.11 +
   21.12 +  testptr(obj, obj);
   21.13 +  jccb(Assembler::notZero, update);
   21.14 +  orptr(mdo_addr, TypeEntries::null_seen);
   21.15 +  jmpb(next);
   21.16 +
   21.17 +  bind(update);
   21.18 +  load_klass(obj, obj);
   21.19 +
   21.20 +  xorptr(obj, mdo_addr);
   21.21 +  testptr(obj, TypeEntries::type_klass_mask);
   21.22 +  jccb(Assembler::zero, next); // klass seen before, nothing to
   21.23 +                               // do. The unknown bit may have been
   21.24 +                               // set already but no need to check.
   21.25 +
   21.26 +  testptr(obj, TypeEntries::type_unknown);
   21.27 +  jccb(Assembler::notZero, next); // already unknown. Nothing to do anymore.
   21.28 +
   21.29 +  // There is a chance that by the time we do these checks (re-reading
   21.30 +  // profiling data from memory) another thread has set the profling
   21.31 +  // to this obj's klass and we set the profiling as unknow
   21.32 +  // erroneously
   21.33 +  cmpptr(mdo_addr, 0);
   21.34 +  jccb(Assembler::equal, none);
   21.35 +  cmpptr(mdo_addr, TypeEntries::null_seen);
   21.36 +  jccb(Assembler::equal, none);
   21.37 +  // There is a chance that the checks above (re-reading profiling
   21.38 +  // data from memory) fail if another thread has just set the
   21.39 +  // profiling to this obj's klass
   21.40 +  xorptr(obj, mdo_addr);
   21.41 +  testptr(obj, TypeEntries::type_klass_mask);
   21.42 +  jccb(Assembler::zero, next);
   21.43 +
   21.44 +  // different than before. Cannot keep accurate profile.
   21.45 +  orptr(mdo_addr, TypeEntries::type_unknown);
   21.46 +  jmpb(next);
   21.47 +
   21.48 +  bind(none);
   21.49 +  // first time here. Set profile type.
   21.50 +  movptr(mdo_addr, obj);
   21.51 +
   21.52 +  bind(next);
   21.53 +}
   21.54 +
   21.55 +void InterpreterMacroAssembler::profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual) {
   21.56 +  if (!ProfileInterpreter) {
   21.57 +    return;
   21.58 +  }
   21.59 +
   21.60 +  if (MethodData::profile_arguments() || MethodData::profile_return()) {
   21.61 +    Label profile_continue;
   21.62 +
   21.63 +    test_method_data_pointer(mdp, profile_continue);
   21.64 +
   21.65 +    int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
   21.66 +
   21.67 +    cmpb(Address(mdp, in_bytes(DataLayout::tag_offset()) - off_to_start), is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
   21.68 +    jcc(Assembler::notEqual, profile_continue);
   21.69 +
   21.70 +    if (MethodData::profile_arguments()) {
   21.71 +      Label done;
   21.72 +      int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
   21.73 +      addptr(mdp, off_to_args);
   21.74 +
   21.75 +      for (int i = 0; i < TypeProfileArgsLimit; i++) {
   21.76 +        if (i > 0 || MethodData::profile_return()) {
   21.77 +          // If return value type is profiled we may have no argument to profile
   21.78 +          movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
   21.79 +          subl(tmp, i*TypeStackSlotEntries::per_arg_count());
   21.80 +          cmpl(tmp, TypeStackSlotEntries::per_arg_count());
   21.81 +          jcc(Assembler::less, done);
   21.82 +        }
   21.83 +        movptr(tmp, Address(callee, Method::const_offset()));
   21.84 +        load_unsigned_short(tmp, Address(tmp, ConstMethod::size_of_parameters_offset()));
   21.85 +        subq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args));
   21.86 +        subl(tmp, 1);
   21.87 +        Address arg_addr = argument_address(tmp);
   21.88 +        movptr(tmp, arg_addr);
   21.89 +
   21.90 +        Address mdo_arg_addr(mdp, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args);
   21.91 +        profile_obj_type(tmp, mdo_arg_addr);
   21.92 +
   21.93 +        int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
   21.94 +        addptr(mdp, to_add);
   21.95 +        off_to_args += to_add;
   21.96 +      }
   21.97 +
   21.98 +      if (MethodData::profile_return()) {
   21.99 +        movq(tmp, Address(mdp, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args));
  21.100 +        subl(tmp, TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
  21.101 +      }
  21.102 +
  21.103 +      bind(done);
  21.104 +
  21.105 +      if (MethodData::profile_return()) {
  21.106 +        // We're right after the type profile for the last
  21.107 +        // argument. tmp is the number of cell left in the
  21.108 +        // CallTypeData/VirtualCallTypeData to reach its end. Non null
  21.109 +        // if there's a return to profile.
  21.110 +        assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
  21.111 +        shll(tmp, exact_log2(DataLayout::cell_size));
  21.112 +        addptr(mdp, tmp);
  21.113 +      }
  21.114 +      movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp);
  21.115 +    } else {
  21.116 +      assert(MethodData::profile_return(), "either profile call args or call ret");
  21.117 +      update_mdp_by_constant(mdp, in_bytes(ReturnTypeEntry::size()));
  21.118 +    }
  21.119 +
  21.120 +    // mdp points right after the end of the
  21.121 +    // CallTypeData/VirtualCallTypeData, right after the cells for the
  21.122 +    // return value type if there's one
  21.123 +
  21.124 +    bind(profile_continue);
  21.125 +  }
  21.126 +}
  21.127 +
  21.128 +void InterpreterMacroAssembler::profile_return_type(Register mdp, Register ret, Register tmp) {
  21.129 +  assert_different_registers(mdp, ret, tmp, r13);
  21.130 +  if (ProfileInterpreter && MethodData::profile_return()) {
  21.131 +    Label profile_continue, done;
  21.132 +
  21.133 +    test_method_data_pointer(mdp, profile_continue);
  21.134 +
  21.135 +    if (MethodData::profile_return_jsr292_only()) {
  21.136 +      // If we don't profile all invoke bytecodes we must make sure
  21.137 +      // it's a bytecode we indeed profile. We can't go back to the
  21.138 +      // begining of the ProfileData we intend to update to check its
  21.139 +      // type because we're right after it and we don't known its
  21.140 +      // length
  21.141 +      Label do_profile;
  21.142 +      cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
  21.143 +      jcc(Assembler::equal, do_profile);
  21.144 +      cmpb(Address(r13, 0), Bytecodes::_invokehandle);
  21.145 +      jcc(Assembler::equal, do_profile);
  21.146 +      get_method(tmp);
  21.147 +      cmpb(Address(tmp, Method::intrinsic_id_offset_in_bytes()), vmIntrinsics::_compiledLambdaForm);
  21.148 +      jcc(Assembler::notEqual, profile_continue);
  21.149 +
  21.150 +      bind(do_profile);
  21.151 +    }
  21.152 +
  21.153 +    Address mdo_ret_addr(mdp, -in_bytes(ReturnTypeEntry::size()));
  21.154 +    mov(tmp, ret);
  21.155 +    profile_obj_type(tmp, mdo_ret_addr);
  21.156 +
  21.157 +    bind(profile_continue);
  21.158 +  }
  21.159 +}
  21.160  
  21.161  void InterpreterMacroAssembler::profile_call(Register mdp) {
  21.162    if (ProfileInterpreter) {
    22.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Thu Oct 17 06:29:58 2013 -0700
    22.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Fri Oct 18 12:10:44 2013 -0700
    22.3 @@ -224,6 +224,9 @@
    22.4  
    22.5    void profile_taken_branch(Register mdp, Register bumped_count);
    22.6    void profile_not_taken_branch(Register mdp);
    22.7 +  void profile_obj_type(Register obj, const Address& mdo_addr);
    22.8 +  void profile_arguments_type(Register mdp, Register callee, Register tmp, bool is_virtual);
    22.9 +  void profile_return_type(Register mdp, Register ret, Register tmp);
   22.10    void profile_call(Register mdp);
   22.11    void profile_final_call(Register mdp);
   22.12    void profile_virtual_call(Register receiver, Register mdp,
    23.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Thu Oct 17 06:29:58 2013 -0700
    23.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Fri Oct 18 12:10:44 2013 -0700
    23.3 @@ -773,6 +773,7 @@
    23.4    void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
    23.5    void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
    23.6    void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
    23.7 +  void orptr(Address dst, int32_t imm32) { LP64_ONLY(orq(dst, imm32)) NOT_LP64(orl(dst, imm32)); }
    23.8  
    23.9    void testptr(Register src, int32_t imm32) {  LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
   23.10    void testptr(Register src1, Register src2);
    24.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Thu Oct 17 06:29:58 2013 -0700
    24.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Fri Oct 18 12:10:44 2013 -0700
    24.3 @@ -194,6 +194,12 @@
    24.4    __ restore_bcp();
    24.5    __ restore_locals();
    24.6  
    24.7 +  if (incoming_state == atos) {
    24.8 +    Register mdp = rbx;
    24.9 +    Register tmp = rcx;
   24.10 +    __ profile_return_type(mdp, rax, tmp);
   24.11 +  }
   24.12 +
   24.13    Label L_got_cache, L_giant_index;
   24.14    if (EnableInvokeDynamic) {
   24.15      __ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
    25.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Thu Oct 17 06:29:58 2013 -0700
    25.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Fri Oct 18 12:10:44 2013 -0700
    25.3 @@ -177,6 +177,12 @@
    25.4    __ restore_bcp();
    25.5    __ restore_locals();
    25.6  
    25.7 +  if (state == atos) {
    25.8 +    Register mdp = rbx;
    25.9 +    Register tmp = rcx;
   25.10 +    __ profile_return_type(mdp, rax, tmp);
   25.11 +  }
   25.12 +
   25.13    Label L_got_cache, L_giant_index;
   25.14    if (EnableInvokeDynamic) {
   25.15      __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic);
    26.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Thu Oct 17 06:29:58 2013 -0700
    26.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Fri Oct 18 12:10:44 2013 -0700
    26.3 @@ -2970,6 +2970,7 @@
    26.4  
    26.5    // profile this call
    26.6    __ profile_final_call(rax);
    26.7 +  __ profile_arguments_type(rax, method, rsi, true);
    26.8  
    26.9    __ jump_from_interpreted(method, rax);
   26.10  
   26.11 @@ -2984,6 +2985,7 @@
   26.12  
   26.13    // get target Method* & entry point
   26.14    __ lookup_virtual_method(rax, index, method);
   26.15 +  __ profile_arguments_type(rdx, method, rsi, true);
   26.16    __ jump_from_interpreted(method, rdx);
   26.17  }
   26.18  
   26.19 @@ -3013,6 +3015,7 @@
   26.20    __ null_check(rcx);
   26.21    // do the call
   26.22    __ profile_call(rax);
   26.23 +  __ profile_arguments_type(rax, rbx, rsi, false);
   26.24    __ jump_from_interpreted(rbx, rax);
   26.25  }
   26.26  
   26.27 @@ -3023,6 +3026,7 @@
   26.28    prepare_invoke(byte_no, rbx);  // get f1 Method*
   26.29    // do the call
   26.30    __ profile_call(rax);
   26.31 +  __ profile_arguments_type(rax, rbx, rsi, false);
   26.32    __ jump_from_interpreted(rbx, rax);
   26.33  }
   26.34  
   26.35 @@ -3082,6 +3086,8 @@
   26.36    __ testptr(rbx, rbx);
   26.37    __ jcc(Assembler::zero, no_such_method);
   26.38  
   26.39 +  __ profile_arguments_type(rdx, rbx, rsi, true);
   26.40 +
   26.41    // do the call
   26.42    // rcx: receiver
   26.43    // rbx,: Method*
   26.44 @@ -3138,6 +3144,7 @@
   26.45  
   26.46    // FIXME: profile the LambdaForm also
   26.47    __ profile_final_call(rax);
   26.48 +  __ profile_arguments_type(rdx, rbx_method, rsi, true);
   26.49  
   26.50    __ jump_from_interpreted(rbx_method, rdx);
   26.51  }
   26.52 @@ -3171,6 +3178,7 @@
   26.53    // %%% should make a type profile for any invokedynamic that takes a ref argument
   26.54    // profile this call
   26.55    __ profile_call(rsi);
   26.56 +  __ profile_arguments_type(rdx, rbx, rsi, false);
   26.57  
   26.58    __ verify_oop(rax_callsite);
   26.59  
    27.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Thu Oct 17 06:29:58 2013 -0700
    27.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Fri Oct 18 12:10:44 2013 -0700
    27.3 @@ -3026,6 +3026,7 @@
    27.4  
    27.5    // profile this call
    27.6    __ profile_final_call(rax);
    27.7 +  __ profile_arguments_type(rax, method, r13, true);
    27.8  
    27.9    __ jump_from_interpreted(method, rax);
   27.10  
   27.11 @@ -3040,6 +3041,7 @@
   27.12  
   27.13    // get target Method* & entry point
   27.14    __ lookup_virtual_method(rax, index, method);
   27.15 +  __ profile_arguments_type(rdx, method, r13, true);
   27.16    __ jump_from_interpreted(method, rdx);
   27.17  }
   27.18  
   27.19 @@ -3069,6 +3071,7 @@
   27.20    __ null_check(rcx);
   27.21    // do the call
   27.22    __ profile_call(rax);
   27.23 +  __ profile_arguments_type(rax, rbx, r13, false);
   27.24    __ jump_from_interpreted(rbx, rax);
   27.25  }
   27.26  
   27.27 @@ -3079,6 +3082,7 @@
   27.28    prepare_invoke(byte_no, rbx);  // get f1 Method*
   27.29    // do the call
   27.30    __ profile_call(rax);
   27.31 +  __ profile_arguments_type(rax, rbx, r13, false);
   27.32    __ jump_from_interpreted(rbx, rax);
   27.33  }
   27.34  
   27.35 @@ -3136,6 +3140,8 @@
   27.36    __ testptr(rbx, rbx);
   27.37    __ jcc(Assembler::zero, no_such_method);
   27.38  
   27.39 +  __ profile_arguments_type(rdx, rbx, r13, true);
   27.40 +
   27.41    // do the call
   27.42    // rcx: receiver
   27.43    // rbx,: Method*
   27.44 @@ -3193,6 +3199,7 @@
   27.45  
   27.46    // FIXME: profile the LambdaForm also
   27.47    __ profile_final_call(rax);
   27.48 +  __ profile_arguments_type(rdx, rbx_method, r13, true);
   27.49  
   27.50    __ jump_from_interpreted(rbx_method, rdx);
   27.51  }
   27.52 @@ -3226,6 +3233,7 @@
   27.53    // %%% should make a type profile for any invokedynamic that takes a ref argument
   27.54    // profile this call
   27.55    __ profile_call(r13);
   27.56 +  __ profile_arguments_type(rdx, rbx_method, r13, false);
   27.57  
   27.58    __ verify_oop(rax_callsite);
   27.59  
    28.1 --- a/src/os/bsd/vm/os_bsd.cpp	Thu Oct 17 06:29:58 2013 -0700
    28.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Fri Oct 18 12:10:44 2013 -0700
    28.3 @@ -159,9 +159,21 @@
    28.4    return Bsd::available_memory();
    28.5  }
    28.6  
    28.7 +// available here means free
    28.8  julong os::Bsd::available_memory() {
    28.9 -  // XXXBSD: this is just a stopgap implementation
   28.10 -  return physical_memory() >> 2;
   28.11 +  uint64_t available = physical_memory() >> 2;
   28.12 +#ifdef __APPLE__
   28.13 +  mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
   28.14 +  vm_statistics64_data_t vmstat;
   28.15 +  kern_return_t kerr = host_statistics64(mach_host_self(), HOST_VM_INFO64,
   28.16 +                                         (host_info64_t)&vmstat, &count);
   28.17 +  assert(kerr == KERN_SUCCESS,
   28.18 +         "host_statistics64 failed - check mach_host_self() and count");
   28.19 +  if (kerr == KERN_SUCCESS) {
   28.20 +    available = vmstat.free_count * os::vm_page_size();
   28.21 +  }
   28.22 +#endif
   28.23 +  return available;
   28.24  }
   28.25  
   28.26  julong os::physical_memory() {
    29.1 --- a/src/share/vm/c1/c1_Canonicalizer.cpp	Thu Oct 17 06:29:58 2013 -0700
    29.2 +++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Fri Oct 18 12:10:44 2013 -0700
    29.3 @@ -935,6 +935,7 @@
    29.4  void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
    29.5  void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
    29.6  void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
    29.7 +void Canonicalizer::do_ProfileReturnType(ProfileReturnType* x) {}
    29.8  void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
    29.9  void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
   29.10  void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
    30.1 --- a/src/share/vm/c1/c1_Canonicalizer.hpp	Thu Oct 17 06:29:58 2013 -0700
    30.2 +++ b/src/share/vm/c1/c1_Canonicalizer.hpp	Fri Oct 18 12:10:44 2013 -0700
    30.3 @@ -104,6 +104,7 @@
    30.4    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
    30.5    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
    30.6    virtual void do_ProfileCall    (ProfileCall*     x);
    30.7 +  virtual void do_ProfileReturnType (ProfileReturnType*  x);
    30.8    virtual void do_ProfileInvoke  (ProfileInvoke*   x);
    30.9    virtual void do_RuntimeCall    (RuntimeCall*     x);
   30.10    virtual void do_MemBar         (MemBar*          x);
    31.1 --- a/src/share/vm/c1/c1_Compilation.cpp	Thu Oct 17 06:29:58 2013 -0700
    31.2 +++ b/src/share/vm/c1/c1_Compilation.cpp	Fri Oct 18 12:10:44 2013 -0700
    31.3 @@ -601,6 +601,17 @@
    31.4    }
    31.5  }
    31.6  
    31.7 +ciKlass* Compilation::cha_exact_type(ciType* type) {
    31.8 +  if (type != NULL && type->is_loaded() && type->is_instance_klass()) {
    31.9 +    ciInstanceKlass* ik = type->as_instance_klass();
   31.10 +    assert(ik->exact_klass() == NULL, "no cha for final klass");
   31.11 +    if (DeoptC1 && UseCHA && !(ik->has_subklass() || ik->is_interface())) {
   31.12 +      dependency_recorder()->assert_leaf_type(ik);
   31.13 +      return ik;
   31.14 +    }
   31.15 +  }
   31.16 +  return NULL;
   31.17 +}
   31.18  
   31.19  void Compilation::print_timers() {
   31.20    // tty->print_cr("    Native methods         : %6.3f s, Average : %2.3f", CompileBroker::_t_native_compilation.seconds(), CompileBroker::_t_native_compilation.seconds() / CompileBroker::_total_native_compile_count);
    32.1 --- a/src/share/vm/c1/c1_Compilation.hpp	Thu Oct 17 06:29:58 2013 -0700
    32.2 +++ b/src/share/vm/c1/c1_Compilation.hpp	Fri Oct 18 12:10:44 2013 -0700
    32.3 @@ -246,6 +246,8 @@
    32.4        (RangeCheckElimination || UseLoopInvariantCodeMotion) &&
    32.5        method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
    32.6    }
    32.7 +
    32.8 +  ciKlass* cha_exact_type(ciType* type);
    32.9  };
   32.10  
   32.11  
    33.1 --- a/src/share/vm/c1/c1_Compiler.cpp	Thu Oct 17 06:29:58 2013 -0700
    33.2 +++ b/src/share/vm/c1/c1_Compiler.cpp	Fri Oct 18 12:10:44 2013 -0700
    33.3 @@ -42,26 +42,16 @@
    33.4  #include "runtime/interfaceSupport.hpp"
    33.5  #include "runtime/sharedRuntime.hpp"
    33.6  
    33.7 -volatile int Compiler::_runtimes = uninitialized;
    33.8  
    33.9 -Compiler::Compiler() {
   33.10 -}
   33.11 +Compiler::Compiler () {}
   33.12  
   33.13 -
   33.14 -Compiler::~Compiler() {
   33.15 -  Unimplemented();
   33.16 -}
   33.17 -
   33.18 -
   33.19 -void Compiler::initialize_all() {
   33.20 +void Compiler::init_c1_runtime() {
   33.21    BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   33.22    Arena* arena = new (mtCompiler) Arena();
   33.23    Runtime1::initialize(buffer_blob);
   33.24    FrameMap::initialize();
   33.25    // initialize data structures
   33.26    ValueType::initialize(arena);
   33.27 -  // Instruction::initialize();
   33.28 -  // BlockBegin::initialize();
   33.29    GraphBuilder::initialize();
   33.30    // note: to use more than one instance of LinearScan at a time this function call has to
   33.31    //       be moved somewhere outside of this constructor:
   33.32 @@ -70,32 +60,33 @@
   33.33  
   33.34  
   33.35  void Compiler::initialize() {
   33.36 -  if (_runtimes != initialized) {
   33.37 -    initialize_runtimes( initialize_all, &_runtimes);
   33.38 +  // Buffer blob must be allocated per C1 compiler thread at startup
   33.39 +  BufferBlob* buffer_blob = init_buffer_blob();
   33.40 +
   33.41 +  if (should_perform_init()) {
   33.42 +    if (buffer_blob == NULL) {
   33.43 +      // When we come here we are in state 'initializing'; entire C1 compilation
   33.44 +      // can be shut down.
   33.45 +      set_state(failed);
   33.46 +    } else {
   33.47 +      init_c1_runtime();
   33.48 +      set_state(initialized);
   33.49 +    }
   33.50    }
   33.51 -  mark_initialized();
   33.52  }
   33.53  
   33.54 -
   33.55 -BufferBlob* Compiler::get_buffer_blob(ciEnv* env) {
   33.56 +BufferBlob* Compiler::init_buffer_blob() {
   33.57    // Allocate buffer blob once at startup since allocation for each
   33.58    // compilation seems to be too expensive (at least on Intel win32).
   33.59 -  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   33.60 -  if (buffer_blob != NULL) {
   33.61 -    return buffer_blob;
   33.62 -  }
   33.63 +  assert (CompilerThread::current()->get_buffer_blob() == NULL, "Should initialize only once");
   33.64  
   33.65    // setup CodeBuffer.  Preallocate a BufferBlob of size
   33.66    // NMethodSizeLimit plus some extra space for constants.
   33.67    int code_buffer_size = Compilation::desired_max_code_buffer_size() +
   33.68      Compilation::desired_max_constant_size();
   33.69  
   33.70 -  buffer_blob = BufferBlob::create("Compiler1 temporary CodeBuffer",
   33.71 -                                   code_buffer_size);
   33.72 -  if (buffer_blob == NULL) {
   33.73 -    CompileBroker::handle_full_code_cache();
   33.74 -    env->record_failure("CodeCache is full");
   33.75 -  } else {
   33.76 +  BufferBlob* buffer_blob = BufferBlob::create("C1 temporary CodeBuffer", code_buffer_size);
   33.77 +  if (buffer_blob != NULL) {
   33.78      CompilerThread::current()->set_buffer_blob(buffer_blob);
   33.79    }
   33.80  
   33.81 @@ -104,15 +95,8 @@
   33.82  
   33.83  
   33.84  void Compiler::compile_method(ciEnv* env, ciMethod* method, int entry_bci) {
   33.85 -  BufferBlob* buffer_blob = Compiler::get_buffer_blob(env);
   33.86 -  if (buffer_blob == NULL) {
   33.87 -    return;
   33.88 -  }
   33.89 -
   33.90 -  if (!is_initialized()) {
   33.91 -    initialize();
   33.92 -  }
   33.93 -
   33.94 +  BufferBlob* buffer_blob = CompilerThread::current()->get_buffer_blob();
   33.95 +  assert(buffer_blob != NULL, "Must exist");
   33.96    // invoke compilation
   33.97    {
   33.98      // We are nested here because we need for the destructor
    34.1 --- a/src/share/vm/c1/c1_Compiler.hpp	Thu Oct 17 06:29:58 2013 -0700
    34.2 +++ b/src/share/vm/c1/c1_Compiler.hpp	Fri Oct 18 12:10:44 2013 -0700
    34.3 @@ -30,11 +30,9 @@
    34.4  // There is one instance of the Compiler per CompilerThread.
    34.5  
    34.6  class Compiler: public AbstractCompiler {
    34.7 -
    34.8   private:
    34.9 -
   34.10 - // Tracks whether runtime has been initialized
   34.11 - static volatile int _runtimes;
   34.12 +  static void init_c1_runtime();
   34.13 +  BufferBlob* init_buffer_blob();
   34.14  
   34.15   public:
   34.16    // Creation
   34.17 @@ -46,19 +44,12 @@
   34.18  
   34.19    virtual bool is_c1()                           { return true; };
   34.20  
   34.21 -  BufferBlob* get_buffer_blob(ciEnv* env);
   34.22 -
   34.23    // Missing feature tests
   34.24    virtual bool supports_native()                 { return true; }
   34.25    virtual bool supports_osr   ()                 { return true; }
   34.26  
   34.27 -  // Customization
   34.28 -  virtual bool needs_adapters         ()         { return false; }
   34.29 -  virtual bool needs_stubs            ()         { return false; }
   34.30 -
   34.31    // Initialization
   34.32    virtual void initialize();
   34.33 -  static  void initialize_all();
   34.34  
   34.35    // Compilation entry point for methods
   34.36    virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci);
    35.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Thu Oct 17 06:29:58 2013 -0700
    35.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Fri Oct 18 12:10:44 2013 -0700
    35.3 @@ -1466,9 +1466,22 @@
    35.4      // State at end of inlined method is the state of the caller
    35.5      // without the method parameters on stack, including the
    35.6      // return value, if any, of the inlined method on operand stack.
    35.7 +    int invoke_bci = state()->caller_state()->bci();
    35.8      set_state(state()->caller_state()->copy_for_parsing());
    35.9      if (x != NULL) {
   35.10        state()->push(x->type(), x);
   35.11 +      if (profile_calls() && MethodData::profile_return() && x->type()->is_object_kind()) {
   35.12 +        ciMethod* caller = state()->scope()->method();
   35.13 +        ciMethodData* md = caller->method_data_or_null();
   35.14 +        ciProfileData* data = md->bci_to_data(invoke_bci);
   35.15 +        if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
   35.16 +          bool has_return = data->is_CallTypeData() ? ((ciCallTypeData*)data)->has_return() : ((ciVirtualCallTypeData*)data)->has_return();
   35.17 +          // May not be true in case of an inlined call through a method handle intrinsic.
   35.18 +          if (has_return) {
   35.19 +            profile_return_type(x, method(), caller, invoke_bci);
   35.20 +          }
   35.21 +        }
   35.22 +      }
   35.23      }
   35.24      Goto* goto_callee = new Goto(continuation(), false);
   35.25  
   35.26 @@ -1658,6 +1671,42 @@
   35.27    return compilation()->dependency_recorder();
   35.28  }
   35.29  
   35.30 +// How many arguments do we want to profile?
   35.31 +Values* GraphBuilder::args_list_for_profiling(int& start, bool may_have_receiver) {
   35.32 +  int n = 0;
   35.33 +  assert(start == 0, "should be initialized");
   35.34 +  if (MethodData::profile_arguments()) {
   35.35 +    ciProfileData* data = method()->method_data()->bci_to_data(bci());
   35.36 +    if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
   35.37 +      n = data->is_CallTypeData() ? data->as_CallTypeData()->number_of_arguments() : data->as_VirtualCallTypeData()->number_of_arguments();
   35.38 +      bool has_receiver = may_have_receiver && Bytecodes::has_receiver(method()->java_code_at_bci(bci()));
   35.39 +      start = has_receiver ? 1 : 0;
   35.40 +    }
   35.41 +  }
   35.42 +  if (n > 0) {
   35.43 +    return new Values(n);
   35.44 +  }
   35.45 +  return NULL;
   35.46 +}
   35.47 +
   35.48 +// Collect arguments that we want to profile in a list
   35.49 +Values* GraphBuilder::collect_args_for_profiling(Values* args, bool may_have_receiver) {
   35.50 +  int start = 0;
   35.51 +  Values* obj_args = args_list_for_profiling(start, may_have_receiver);
   35.52 +  if (obj_args == NULL) {
   35.53 +    return NULL;
   35.54 +  }
   35.55 +  int s = obj_args->size();
   35.56 +  for (int i = start, j = 0; j < s; i++) {
   35.57 +    if (args->at(i)->type()->is_object_kind()) {
   35.58 +      obj_args->push(args->at(i));
   35.59 +      j++;
   35.60 +    }
   35.61 +  }
   35.62 +  assert(s == obj_args->length(), "missed on arg?");
   35.63 +  return obj_args;
   35.64 +}
   35.65 +
   35.66  
   35.67  void GraphBuilder::invoke(Bytecodes::Code code) {
   35.68    bool will_link;
   35.69 @@ -1957,7 +2006,7 @@
   35.70        } else if (exact_target != NULL) {
   35.71          target_klass = exact_target->holder();
   35.72        }
   35.73 -      profile_call(target, recv, target_klass);
   35.74 +      profile_call(target, recv, target_klass, collect_args_for_profiling(args, false), false);
   35.75      }
   35.76    }
   35.77  
   35.78 @@ -1972,6 +2021,9 @@
   35.79        push(result_type, result);
   35.80      }
   35.81    }
   35.82 +  if (profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
   35.83 +    profile_return_type(result, target);
   35.84 +  }
   35.85  }
   35.86  
   35.87  
   35.88 @@ -3509,7 +3561,7 @@
   35.89            recv = args->at(0);
   35.90            null_check(recv);
   35.91          }
   35.92 -        profile_call(callee, recv, NULL);
   35.93 +        profile_call(callee, recv, NULL, collect_args_for_profiling(args, true), true);
   35.94        }
   35.95      }
   35.96    }
   35.97 @@ -3520,6 +3572,10 @@
   35.98    Value value = append_split(result);
   35.99    if (result_type != voidType) push(result_type, value);
  35.100  
  35.101 +  if (callee != method() && profile_calls() && MethodData::profile_return() && result_type->is_object_kind()) {
  35.102 +    profile_return_type(result, callee);
  35.103 +  }
  35.104 +
  35.105    // done
  35.106    return true;
  35.107  }
  35.108 @@ -3763,7 +3819,28 @@
  35.109      compilation()->set_would_profile(true);
  35.110  
  35.111      if (profile_calls()) {
  35.112 -      profile_call(callee, recv, holder_known ? callee->holder() : NULL);
  35.113 +      int start = 0;
  35.114 +      Values* obj_args = args_list_for_profiling(start, has_receiver);
  35.115 +      if (obj_args != NULL) {
  35.116 +        int s = obj_args->size();
  35.117 +        // if called through method handle invoke, some arguments may have been popped
  35.118 +        for (int i = args_base+start, j = 0; j < obj_args->size() && i < state()->stack_size(); ) {
  35.119 +          Value v = state()->stack_at_inc(i);
  35.120 +          if (v->type()->is_object_kind()) {
  35.121 +            obj_args->push(v);
  35.122 +            j++;
  35.123 +          }
  35.124 +        }
  35.125 +#ifdef ASSERT
  35.126 +        {
  35.127 +          bool ignored_will_link;
  35.128 +          ciSignature* declared_signature = NULL;
  35.129 +          ciMethod* real_target = method()->get_method_at_bci(bci(), ignored_will_link, &declared_signature);
  35.130 +          assert(s == obj_args->length() || real_target->is_method_handle_intrinsic(), "missed on arg?");
  35.131 +        }
  35.132 +#endif
  35.133 +      }
  35.134 +      profile_call(callee, recv, holder_known ? callee->holder() : NULL, obj_args, true);
  35.135      }
  35.136    }
  35.137  
  35.138 @@ -4251,8 +4328,23 @@
  35.139  }
  35.140  #endif // PRODUCT
  35.141  
  35.142 -void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder) {
  35.143 -  append(new ProfileCall(method(), bci(), callee, recv, known_holder));
  35.144 +void GraphBuilder::profile_call(ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined) {
  35.145 +  append(new ProfileCall(method(), bci(), callee, recv, known_holder, obj_args, inlined));
  35.146 +}
  35.147 +
  35.148 +void GraphBuilder::profile_return_type(Value ret, ciMethod* callee, ciMethod* m, int invoke_bci) {
  35.149 +  assert((m == NULL) == (invoke_bci < 0), "invalid method and invalid bci together");
  35.150 +  if (m == NULL) {
  35.151 +    m = method();
  35.152 +  }
  35.153 +  if (invoke_bci < 0) {
  35.154 +    invoke_bci = bci();
  35.155 +  }
  35.156 +  ciMethodData* md = m->method_data_or_null();
  35.157 +  ciProfileData* data = md->bci_to_data(invoke_bci);
  35.158 +  if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
  35.159 +    append(new ProfileReturnType(m , invoke_bci, callee, ret));
  35.160 +  }
  35.161  }
  35.162  
  35.163  void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state) {
    36.1 --- a/src/share/vm/c1/c1_GraphBuilder.hpp	Thu Oct 17 06:29:58 2013 -0700
    36.2 +++ b/src/share/vm/c1/c1_GraphBuilder.hpp	Fri Oct 18 12:10:44 2013 -0700
    36.3 @@ -374,7 +374,8 @@
    36.4  
    36.5    void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true);
    36.6  
    36.7 -  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder);
    36.8 +  void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined);
    36.9 +  void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1);
   36.10    void profile_invocation(ciMethod* inlinee, ValueStack* state);
   36.11  
   36.12    // Shortcuts to profiling control.
   36.13 @@ -386,6 +387,9 @@
   36.14    bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
   36.15    bool profile_checkcasts()    { return _compilation->profile_checkcasts();    }
   36.16  
   36.17 +  Values* args_list_for_profiling(int& start, bool may_have_receiver);
   36.18 +  Values* collect_args_for_profiling(Values* args, bool may_have_receiver);
   36.19 +
   36.20   public:
   36.21    NOT_PRODUCT(void print_stats();)
   36.22  
    37.1 --- a/src/share/vm/c1/c1_Instruction.cpp	Thu Oct 17 06:29:58 2013 -0700
    37.2 +++ b/src/share/vm/c1/c1_Instruction.cpp	Fri Oct 18 12:10:44 2013 -0700
    37.3 @@ -104,6 +104,14 @@
    37.4    }
    37.5  }
    37.6  
    37.7 +ciType* Instruction::exact_type() const {
    37.8 +  ciType* t =  declared_type();
    37.9 +  if (t != NULL && t->is_klass()) {
   37.10 +    return t->as_klass()->exact_klass();
   37.11 +  }
   37.12 +  return NULL;
   37.13 +}
   37.14 +
   37.15  
   37.16  #ifndef PRODUCT
   37.17  void Instruction::check_state(ValueStack* state) {
   37.18 @@ -135,9 +143,7 @@
   37.19  
   37.20  // perform constant and interval tests on index value
   37.21  bool AccessIndexed::compute_needs_range_check() {
   37.22 -
   37.23    if (length()) {
   37.24 -
   37.25      Constant* clength = length()->as_Constant();
   37.26      Constant* cindex = index()->as_Constant();
   37.27      if (clength && cindex) {
   37.28 @@ -157,34 +163,8 @@
   37.29  }
   37.30  
   37.31  
   37.32 -ciType* Local::exact_type() const {
   37.33 -  ciType* type = declared_type();
   37.34 -
   37.35 -  // for primitive arrays, the declared type is the exact type
   37.36 -  if (type->is_type_array_klass()) {
   37.37 -    return type;
   37.38 -  } else if (type->is_instance_klass()) {
   37.39 -    ciInstanceKlass* ik = (ciInstanceKlass*)type;
   37.40 -    if (ik->is_loaded() && ik->is_final() && !ik->is_interface()) {
   37.41 -      return type;
   37.42 -    }
   37.43 -  } else if (type->is_obj_array_klass()) {
   37.44 -    ciObjArrayKlass* oak = (ciObjArrayKlass*)type;
   37.45 -    ciType* base = oak->base_element_type();
   37.46 -    if (base->is_instance_klass()) {
   37.47 -      ciInstanceKlass* ik = base->as_instance_klass();
   37.48 -      if (ik->is_loaded() && ik->is_final()) {
   37.49 -        return type;
   37.50 -      }
   37.51 -    } else if (base->is_primitive_type()) {
   37.52 -      return type;
   37.53 -    }
   37.54 -  }
   37.55 -  return NULL;
   37.56 -}
   37.57 -
   37.58  ciType* Constant::exact_type() const {
   37.59 -  if (type()->is_object()) {
   37.60 +  if (type()->is_object() && type()->as_ObjectType()->is_loaded()) {
   37.61      return type()->as_ObjectType()->exact_type();
   37.62    }
   37.63    return NULL;
   37.64 @@ -192,19 +172,18 @@
   37.65  
   37.66  ciType* LoadIndexed::exact_type() const {
   37.67    ciType* array_type = array()->exact_type();
   37.68 -  if (array_type == NULL) {
   37.69 -    return NULL;
   37.70 -  }
   37.71 -  assert(array_type->is_array_klass(), "what else?");
   37.72 -  ciArrayKlass* ak = (ciArrayKlass*)array_type;
   37.73 +  if (array_type != NULL) {
   37.74 +    assert(array_type->is_array_klass(), "what else?");
   37.75 +    ciArrayKlass* ak = (ciArrayKlass*)array_type;
   37.76  
   37.77 -  if (ak->element_type()->is_instance_klass()) {
   37.78 -    ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
   37.79 -    if (ik->is_loaded() && ik->is_final()) {
   37.80 -      return ik;
   37.81 +    if (ak->element_type()->is_instance_klass()) {
   37.82 +      ciInstanceKlass* ik = (ciInstanceKlass*)ak->element_type();
   37.83 +      if (ik->is_loaded() && ik->is_final()) {
   37.84 +        return ik;
   37.85 +      }
   37.86      }
   37.87    }
   37.88 -  return NULL;
   37.89 +  return Instruction::exact_type();
   37.90  }
   37.91  
   37.92  
   37.93 @@ -224,22 +203,6 @@
   37.94  }
   37.95  
   37.96  
   37.97 -ciType* LoadField::exact_type() const {
   37.98 -  ciType* type = declared_type();
   37.99 -  // for primitive arrays, the declared type is the exact type
  37.100 -  if (type->is_type_array_klass()) {
  37.101 -    return type;
  37.102 -  }
  37.103 -  if (type->is_instance_klass()) {
  37.104 -    ciInstanceKlass* ik = (ciInstanceKlass*)type;
  37.105 -    if (ik->is_loaded() && ik->is_final()) {
  37.106 -      return type;
  37.107 -    }
  37.108 -  }
  37.109 -  return NULL;
  37.110 -}
  37.111 -
  37.112 -
  37.113  ciType* NewTypeArray::exact_type() const {
  37.114    return ciTypeArrayKlass::make(elt_type());
  37.115  }
  37.116 @@ -264,16 +227,6 @@
  37.117    return klass();
  37.118  }
  37.119  
  37.120 -ciType* CheckCast::exact_type() const {
  37.121 -  if (klass()->is_instance_klass()) {
  37.122 -    ciInstanceKlass* ik = (ciInstanceKlass*)klass();
  37.123 -    if (ik->is_loaded() && ik->is_final()) {
  37.124 -      return ik;
  37.125 -    }
  37.126 -  }
  37.127 -  return NULL;
  37.128 -}
  37.129 -
  37.130  // Implementation of ArithmeticOp
  37.131  
  37.132  bool ArithmeticOp::is_commutative() const {
    38.1 --- a/src/share/vm/c1/c1_Instruction.hpp	Thu Oct 17 06:29:58 2013 -0700
    38.2 +++ b/src/share/vm/c1/c1_Instruction.hpp	Fri Oct 18 12:10:44 2013 -0700
    38.3 @@ -107,6 +107,7 @@
    38.4  class         UnsafePrefetchRead;
    38.5  class         UnsafePrefetchWrite;
    38.6  class   ProfileCall;
    38.7 +class   ProfileReturnType;
    38.8  class   ProfileInvoke;
    38.9  class   RuntimeCall;
   38.10  class   MemBar;
   38.11 @@ -211,6 +212,7 @@
   38.12    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) = 0;
   38.13    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
   38.14    virtual void do_ProfileCall    (ProfileCall*     x) = 0;
   38.15 +  virtual void do_ProfileReturnType (ProfileReturnType*  x) = 0;
   38.16    virtual void do_ProfileInvoke  (ProfileInvoke*   x) = 0;
   38.17    virtual void do_RuntimeCall    (RuntimeCall*     x) = 0;
   38.18    virtual void do_MemBar         (MemBar*          x) = 0;
   38.19 @@ -322,6 +324,36 @@
   38.20      _type = type;
   38.21    }
   38.22  
   38.23 +  // Helper class to keep track of which arguments need a null check
   38.24 +  class ArgsNonNullState {
   38.25 +  private:
   38.26 +    int _nonnull_state; // mask identifying which args are nonnull
   38.27 +  public:
   38.28 +    ArgsNonNullState()
   38.29 +      : _nonnull_state(AllBits) {}
   38.30 +
   38.31 +    // Does argument number i needs a null check?
   38.32 +    bool arg_needs_null_check(int i) const {
   38.33 +      // No data is kept for arguments starting at position 33 so
   38.34 +      // conservatively assume that they need a null check.
   38.35 +      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
   38.36 +        return is_set_nth_bit(_nonnull_state, i);
   38.37 +      }
   38.38 +      return true;
   38.39 +    }
   38.40 +
   38.41 +    // Set whether argument number i needs a null check or not
   38.42 +    void set_arg_needs_null_check(int i, bool check) {
   38.43 +      if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
   38.44 +        if (check) {
   38.45 +          _nonnull_state |= nth_bit(i);
   38.46 +        } else {
   38.47 +          _nonnull_state &= ~(nth_bit(i));
   38.48 +        }
   38.49 +      }
   38.50 +    }
   38.51 +  };
   38.52 +
   38.53   public:
   38.54    void* operator new(size_t size) throw() {
   38.55      Compilation* c = Compilation::current();
   38.56 @@ -566,7 +598,7 @@
   38.57    virtual void other_values_do(ValueVisitor* f)   { /* usually no other - override on demand */ }
   38.58            void       values_do(ValueVisitor* f)   { input_values_do(f); state_values_do(f); other_values_do(f); }
   38.59  
   38.60 -  virtual ciType* exact_type() const             { return NULL; }
   38.61 +  virtual ciType* exact_type() const;
   38.62    virtual ciType* declared_type() const          { return NULL; }
   38.63  
   38.64    // hashing
   38.65 @@ -689,7 +721,6 @@
   38.66    int java_index() const                         { return _java_index; }
   38.67  
   38.68    virtual ciType* declared_type() const          { return _declared_type; }
   38.69 -  virtual ciType* exact_type() const;
   38.70  
   38.71    // generic
   38.72    virtual void input_values_do(ValueVisitor* f)   { /* no values */ }
   38.73 @@ -806,7 +837,6 @@
   38.74    {}
   38.75  
   38.76    ciType* declared_type() const;
   38.77 -  ciType* exact_type() const;
   38.78  
   38.79    // generic
   38.80    HASHING2(LoadField, !needs_patching() && !field()->is_volatile(), obj()->subst(), offset())  // cannot be eliminated if needs patching or if volatile
   38.81 @@ -1299,6 +1329,7 @@
   38.82  
   38.83    virtual bool needs_exception_state() const     { return false; }
   38.84  
   38.85 +  ciType* exact_type() const                     { return NULL; }
   38.86    ciType* declared_type() const;
   38.87  
   38.88    // generic
   38.89 @@ -1422,7 +1453,6 @@
   38.90    }
   38.91  
   38.92    ciType* declared_type() const;
   38.93 -  ciType* exact_type() const;
   38.94  };
   38.95  
   38.96  
   38.97 @@ -1490,7 +1520,7 @@
   38.98    vmIntrinsics::ID _id;
   38.99    Values*          _args;
  38.100    Value            _recv;
  38.101 -  int              _nonnull_state; // mask identifying which args are nonnull
  38.102 +  ArgsNonNullState _nonnull_state;
  38.103  
  38.104   public:
  38.105    // preserves_state can be set to true for Intrinsics
  38.106 @@ -1511,7 +1541,6 @@
  38.107    , _id(id)
  38.108    , _args(args)
  38.109    , _recv(NULL)
  38.110 -  , _nonnull_state(AllBits)
  38.111    {
  38.112      assert(args != NULL, "args must exist");
  38.113      ASSERT_VALUES
  38.114 @@ -1537,21 +1566,12 @@
  38.115    Value receiver() const                         { assert(has_receiver(), "must have receiver"); return _recv; }
  38.116    bool preserves_state() const                   { return check_flag(PreservesStateFlag); }
  38.117  
  38.118 -  bool arg_needs_null_check(int i) {
  38.119 -    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
  38.120 -      return is_set_nth_bit(_nonnull_state, i);
  38.121 -    }
  38.122 -    return true;
  38.123 +  bool arg_needs_null_check(int i) const {
  38.124 +    return _nonnull_state.arg_needs_null_check(i);
  38.125    }
  38.126  
  38.127    void set_arg_needs_null_check(int i, bool check) {
  38.128 -    if (i >= 0 && i < (int)sizeof(_nonnull_state) * BitsPerByte) {
  38.129 -      if (check) {
  38.130 -        _nonnull_state |= nth_bit(i);
  38.131 -      } else {
  38.132 -        _nonnull_state &= ~(nth_bit(i));
  38.133 -      }
  38.134 -    }
  38.135 +    _nonnull_state.set_arg_needs_null_check(i, check);
  38.136    }
  38.137  
  38.138    // generic
  38.139 @@ -2450,34 +2470,87 @@
  38.140  
  38.141  LEAF(ProfileCall, Instruction)
  38.142   private:
  38.143 -  ciMethod* _method;
  38.144 -  int       _bci_of_invoke;
  38.145 -  ciMethod* _callee;         // the method that is called at the given bci
  38.146 -  Value     _recv;
  38.147 -  ciKlass*  _known_holder;
  38.148 +  ciMethod*        _method;
  38.149 +  int              _bci_of_invoke;
  38.150 +  ciMethod*        _callee;         // the method that is called at the given bci
  38.151 +  Value            _recv;
  38.152 +  ciKlass*         _known_holder;
  38.153 +  Values*          _obj_args;       // arguments for type profiling
  38.154 +  ArgsNonNullState _nonnull_state;  // Do we know whether some arguments are never null?
  38.155 +  bool             _inlined;        // Are we profiling a call that is inlined
  38.156  
  38.157   public:
  38.158 -  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder)
  38.159 +  ProfileCall(ciMethod* method, int bci, ciMethod* callee, Value recv, ciKlass* known_holder, Values* obj_args, bool inlined)
  38.160      : Instruction(voidType)
  38.161      , _method(method)
  38.162      , _bci_of_invoke(bci)
  38.163      , _callee(callee)
  38.164      , _recv(recv)
  38.165      , _known_holder(known_holder)
  38.166 +    , _obj_args(obj_args)
  38.167 +    , _inlined(inlined)
  38.168    {
  38.169      // The ProfileCall has side-effects and must occur precisely where located
  38.170      pin();
  38.171    }
  38.172  
  38.173 -  ciMethod* method()      { return _method; }
  38.174 -  int bci_of_invoke()     { return _bci_of_invoke; }
  38.175 -  ciMethod* callee()      { return _callee; }
  38.176 -  Value recv()            { return _recv; }
  38.177 -  ciKlass* known_holder() { return _known_holder; }
  38.178 -
  38.179 -  virtual void input_values_do(ValueVisitor* f)   { if (_recv != NULL) f->visit(&_recv); }
  38.180 +  ciMethod* method()             const { return _method; }
  38.181 +  int bci_of_invoke()            const { return _bci_of_invoke; }
  38.182 +  ciMethod* callee()             const { return _callee; }
  38.183 +  Value recv()                   const { return _recv; }
  38.184 +  ciKlass* known_holder()        const { return _known_holder; }
  38.185 +  int nb_profiled_args()         const { return _obj_args == NULL ? 0 : _obj_args->length(); }
  38.186 +  Value profiled_arg_at(int i)   const { return _obj_args->at(i); }
  38.187 +  bool arg_needs_null_check(int i) const {
  38.188 +    return _nonnull_state.arg_needs_null_check(i);
  38.189 +  }
  38.190 +  bool inlined()                 const { return _inlined; }
  38.191 +
  38.192 +  void set_arg_needs_null_check(int i, bool check) {
  38.193 +    _nonnull_state.set_arg_needs_null_check(i, check);
  38.194 +  }
  38.195 +
  38.196 +  virtual void input_values_do(ValueVisitor* f)   {
  38.197 +    if (_recv != NULL) {
  38.198 +      f->visit(&_recv);
  38.199 +    }
  38.200 +    for (int i = 0; i < nb_profiled_args(); i++) {
  38.201 +      f->visit(_obj_args->adr_at(i));
  38.202 +    }
  38.203 +  }
  38.204  };
  38.205  
  38.206 +LEAF(ProfileReturnType, Instruction)
  38.207 + private:
  38.208 +  ciMethod*        _method;
  38.209 +  ciMethod*        _callee;
  38.210 +  int              _bci_of_invoke;
  38.211 +  Value            _ret;
  38.212 +
  38.213 + public:
  38.214 +  ProfileReturnType(ciMethod* method, int bci, ciMethod* callee, Value ret)
  38.215 +    : Instruction(voidType)
  38.216 +    , _method(method)
  38.217 +    , _callee(callee)
  38.218 +    , _bci_of_invoke(bci)
  38.219 +    , _ret(ret)
  38.220 +  {
  38.221 +    set_needs_null_check(true);
  38.222 +    // The ProfileType has side-effects and must occur precisely where located
  38.223 +    pin();
  38.224 +  }
  38.225 +
  38.226 +  ciMethod* method()             const { return _method; }
  38.227 +  ciMethod* callee()             const { return _callee; }
  38.228 +  int bci_of_invoke()            const { return _bci_of_invoke; }
  38.229 +  Value ret()                    const { return _ret; }
  38.230 +
  38.231 +  virtual void input_values_do(ValueVisitor* f)   {
  38.232 +    if (_ret != NULL) {
  38.233 +      f->visit(&_ret);
  38.234 +    }
  38.235 +  }
  38.236 +};
  38.237  
  38.238  // Call some C runtime function that doesn't safepoint,
  38.239  // optionally passing the current thread as the first argument.
    39.1 --- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Thu Oct 17 06:29:58 2013 -0700
    39.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Fri Oct 18 12:10:44 2013 -0700
    39.3 @@ -892,10 +892,24 @@
    39.4    if (x->known_holder() != NULL) {
    39.5      output()->print(", ");
    39.6      print_klass(x->known_holder());
    39.7 +    output()->print(" ");
    39.8 +  }
    39.9 +  for (int i = 0; i < x->nb_profiled_args(); i++) {
   39.10 +    if (i > 0) output()->print(", ");
   39.11 +    print_value(x->profiled_arg_at(i));
   39.12 +    if (x->arg_needs_null_check(i)) {
   39.13 +      output()->print(" [NC]");
   39.14 +    }
   39.15    }
   39.16    output()->put(')');
   39.17  }
   39.18  
   39.19 +void InstructionPrinter::do_ProfileReturnType(ProfileReturnType* x) {
   39.20 +  output()->print("profile ret type ");
   39.21 +  print_value(x->ret());
   39.22 +  output()->print(" %s.%s", x->method()->holder()->name()->as_utf8(), x->method()->name()->as_utf8());
   39.23 +  output()->put(')');
   39.24 +}
   39.25  void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
   39.26    output()->print("profile_invoke ");
   39.27    output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
    40.1 --- a/src/share/vm/c1/c1_InstructionPrinter.hpp	Thu Oct 17 06:29:58 2013 -0700
    40.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.hpp	Fri Oct 18 12:10:44 2013 -0700
    40.3 @@ -132,6 +132,7 @@
    40.4    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
    40.5    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
    40.6    virtual void do_ProfileCall    (ProfileCall*     x);
    40.7 +  virtual void do_ProfileReturnType (ProfileReturnType*  x);
    40.8    virtual void do_ProfileInvoke  (ProfileInvoke*   x);
    40.9    virtual void do_RuntimeCall    (RuntimeCall*     x);
   40.10    virtual void do_MemBar         (MemBar*          x);
    41.1 --- a/src/share/vm/c1/c1_LIR.cpp	Thu Oct 17 06:29:58 2013 -0700
    41.2 +++ b/src/share/vm/c1/c1_LIR.cpp	Fri Oct 18 12:10:44 2013 -0700
    41.3 @@ -1001,6 +1001,17 @@
    41.4        assert(opProfileCall->_tmp1->is_valid(), "used");  do_temp(opProfileCall->_tmp1);
    41.5        break;
    41.6      }
    41.7 +
    41.8 +// LIR_OpProfileType:
    41.9 +    case lir_profile_type: {
   41.10 +      assert(op->as_OpProfileType() != NULL, "must be");
   41.11 +      LIR_OpProfileType* opProfileType = (LIR_OpProfileType*)op;
   41.12 +
   41.13 +      do_input(opProfileType->_mdp); do_temp(opProfileType->_mdp);
   41.14 +      do_input(opProfileType->_obj);
   41.15 +      do_temp(opProfileType->_tmp);
   41.16 +      break;
   41.17 +    }
   41.18    default:
   41.19      ShouldNotReachHere();
   41.20    }
   41.21 @@ -1151,6 +1162,10 @@
   41.22    masm->emit_profile_call(this);
   41.23  }
   41.24  
   41.25 +void LIR_OpProfileType::emit_code(LIR_Assembler* masm) {
   41.26 +  masm->emit_profile_type(this);
   41.27 +}
   41.28 +
   41.29  // LIR_List
   41.30  LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
   41.31    : _operations(8)
   41.32 @@ -1803,6 +1818,8 @@
   41.33       case lir_cas_int:               s = "cas_int";      break;
   41.34       // LIR_OpProfileCall
   41.35       case lir_profile_call:          s = "profile_call";  break;
   41.36 +     // LIR_OpProfileType
   41.37 +     case lir_profile_type:          s = "profile_type";  break;
   41.38       // LIR_OpAssert
   41.39  #ifdef ASSERT
   41.40       case lir_assert:                s = "assert";        break;
   41.41 @@ -2086,6 +2103,15 @@
   41.42    tmp1()->print(out);          out->print(" ");
   41.43  }
   41.44  
   41.45 +// LIR_OpProfileType
   41.46 +void LIR_OpProfileType::print_instr(outputStream* out) const {
   41.47 +  out->print("exact = "); exact_klass()->print_name_on(out);
   41.48 +  out->print("current = "); ciTypeEntries::print_ciklass(out, current_klass());
   41.49 +  mdp()->print(out);          out->print(" ");
   41.50 +  obj()->print(out);          out->print(" ");
   41.51 +  tmp()->print(out);          out->print(" ");
   41.52 +}
   41.53 +
   41.54  #endif // PRODUCT
   41.55  
   41.56  // Implementation of LIR_InsertionBuffer
    42.1 --- a/src/share/vm/c1/c1_LIR.hpp	Thu Oct 17 06:29:58 2013 -0700
    42.2 +++ b/src/share/vm/c1/c1_LIR.hpp	Fri Oct 18 12:10:44 2013 -0700
    42.3 @@ -882,6 +882,7 @@
    42.4  class    LIR_OpTypeCheck;
    42.5  class    LIR_OpCompareAndSwap;
    42.6  class    LIR_OpProfileCall;
    42.7 +class    LIR_OpProfileType;
    42.8  #ifdef ASSERT
    42.9  class    LIR_OpAssert;
   42.10  #endif
   42.11 @@ -1005,6 +1006,7 @@
   42.12    , end_opCompareAndSwap
   42.13    , begin_opMDOProfile
   42.14      , lir_profile_call
   42.15 +    , lir_profile_type
   42.16    , end_opMDOProfile
   42.17    , begin_opAssert
   42.18      , lir_assert
   42.19 @@ -1145,6 +1147,7 @@
   42.20    virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
   42.21    virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
   42.22    virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
   42.23 +  virtual LIR_OpProfileType* as_OpProfileType() { return NULL; }
   42.24  #ifdef ASSERT
   42.25    virtual LIR_OpAssert* as_OpAssert() { return NULL; }
   42.26  #endif
   42.27 @@ -1925,8 +1928,8 @@
   42.28  
   42.29   public:
   42.30    // Destroys recv
   42.31 -  LIR_OpProfileCall(LIR_Code code, ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
   42.32 -    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)  // no result, no info
   42.33 +  LIR_OpProfileCall(ciMethod* profiled_method, int profiled_bci, ciMethod* profiled_callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* known_holder)
   42.34 +    : LIR_Op(lir_profile_call, LIR_OprFact::illegalOpr, NULL)  // no result, no info
   42.35      , _profiled_method(profiled_method)
   42.36      , _profiled_bci(profiled_bci)
   42.37      , _profiled_callee(profiled_callee)
   42.38 @@ -1948,6 +1951,45 @@
   42.39    virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
   42.40  };
   42.41  
   42.42 +// LIR_OpProfileType
   42.43 +class LIR_OpProfileType : public LIR_Op {
   42.44 + friend class LIR_OpVisitState;
   42.45 +
   42.46 + private:
   42.47 +  LIR_Opr      _mdp;
   42.48 +  LIR_Opr      _obj;
   42.49 +  LIR_Opr      _tmp;
   42.50 +  ciKlass*     _exact_klass;   // non NULL if we know the klass statically (no need to load it from _obj)
   42.51 +  intptr_t     _current_klass; // what the profiling currently reports
   42.52 +  bool         _not_null;      // true if we know statically that _obj cannot be null
   42.53 +  bool         _no_conflict;   // true if we're profling parameters, _exact_klass is not NULL and we know
   42.54 +                               // _exact_klass it the only possible type for this parameter in any context.
   42.55 +
   42.56 + public:
   42.57 +  // Destroys recv
   42.58 +  LIR_OpProfileType(LIR_Opr mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict)
   42.59 +    : LIR_Op(lir_profile_type, LIR_OprFact::illegalOpr, NULL)  // no result, no info
   42.60 +    , _mdp(mdp)
   42.61 +    , _obj(obj)
   42.62 +    , _exact_klass(exact_klass)
   42.63 +    , _current_klass(current_klass)
   42.64 +    , _tmp(tmp)
   42.65 +    , _not_null(not_null)
   42.66 +    , _no_conflict(no_conflict) { }
   42.67 +
   42.68 +  LIR_Opr      mdp()              const             { return _mdp;              }
   42.69 +  LIR_Opr      obj()              const             { return _obj;              }
   42.70 +  LIR_Opr      tmp()              const             { return _tmp;              }
   42.71 +  ciKlass*     exact_klass()      const             { return _exact_klass;      }
   42.72 +  intptr_t     current_klass()    const             { return _current_klass;    }
   42.73 +  bool         not_null()         const             { return _not_null;         }
   42.74 +  bool         no_conflict()      const             { return _no_conflict;      }
   42.75 +
   42.76 +  virtual void emit_code(LIR_Assembler* masm);
   42.77 +  virtual LIR_OpProfileType* as_OpProfileType() { return this; }
   42.78 +  virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
   42.79 +};
   42.80 +
   42.81  class LIR_InsertionBuffer;
   42.82  
   42.83  //--------------------------------LIR_List---------------------------------------------------
   42.84 @@ -2247,7 +2289,10 @@
   42.85                    ciMethod* profiled_method, int profiled_bci);
   42.86    // MethodData* profiling
   42.87    void profile_call(ciMethod* method, int bci, ciMethod* callee, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
   42.88 -    append(new LIR_OpProfileCall(lir_profile_call, method, bci, callee, mdo, recv, t1, cha_klass));
   42.89 +    append(new LIR_OpProfileCall(method, bci, callee, mdo, recv, t1, cha_klass));
   42.90 +  }
   42.91 +  void profile_type(LIR_Address* mdp, LIR_Opr obj, ciKlass* exact_klass, intptr_t current_klass, LIR_Opr tmp, bool not_null, bool no_conflict) {
   42.92 +    append(new LIR_OpProfileType(LIR_OprFact::address(mdp), obj, exact_klass, current_klass, tmp, not_null, no_conflict));
   42.93    }
   42.94  
   42.95    void xadd(LIR_Opr src, LIR_Opr add, LIR_Opr res, LIR_Opr tmp) { append(new LIR_Op2(lir_xadd, src, add, res, tmp)); }
    43.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp	Thu Oct 17 06:29:58 2013 -0700
    43.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Fri Oct 18 12:10:44 2013 -0700
    43.3 @@ -208,6 +208,7 @@
    43.4    void emit_call(LIR_OpJavaCall* op);
    43.5    void emit_rtcall(LIR_OpRTCall* op);
    43.6    void emit_profile_call(LIR_OpProfileCall* op);
    43.7 +  void emit_profile_type(LIR_OpProfileType* op);
    43.8    void emit_delay(LIR_OpDelay* op);
    43.9  
   43.10    void arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr dest, CodeEmitInfo* info, bool pop_fpu_stack);
    44.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Thu Oct 17 06:29:58 2013 -0700
    44.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Fri Oct 18 12:10:44 2013 -0700
    44.3 @@ -2571,6 +2571,78 @@
    44.4  }
    44.5  
    44.6  
    44.7 +ciKlass* LIRGenerator::profile_arg_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k) {
    44.8 +  ciKlass* result = NULL;
    44.9 +  bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
   44.10 +  bool do_update = !TypeEntries::is_type_unknown(profiled_k);
   44.11 +  // known not to be null or null bit already set and already set to
   44.12 +  // unknown: nothing we can do to improve profiling
   44.13 +  if (!do_null && !do_update) {
   44.14 +    return result;
   44.15 +  }
   44.16 +
   44.17 +  ciKlass* exact_klass = NULL;
   44.18 +  Compilation* comp = Compilation::current();
   44.19 +  if (do_update) {
   44.20 +    // try to find exact type, using CHA if possible, so that loading
   44.21 +    // the klass from the object can be avoided
   44.22 +    ciType* type = arg->exact_type();
   44.23 +    if (type == NULL) {
   44.24 +      type = arg->declared_type();
   44.25 +      type = comp->cha_exact_type(type);
   44.26 +    }
   44.27 +    assert(type == NULL || type->is_klass(), "type should be class");
   44.28 +    exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
   44.29 +
   44.30 +    do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
   44.31 +  }
   44.32 +
   44.33 +  if (!do_null && !do_update) {
   44.34 +    return result;
   44.35 +  }
   44.36 +
   44.37 +  ciKlass* exact_signature_k = NULL;
   44.38 +  if (do_update) {
   44.39 +    // Is the type from the signature exact (the only one possible)?
   44.40 +    exact_signature_k = signature_k->exact_klass();
   44.41 +    if (exact_signature_k == NULL) {
   44.42 +      exact_signature_k = comp->cha_exact_type(signature_k);
   44.43 +    } else {
   44.44 +      result = exact_signature_k;
   44.45 +      do_update = false;
   44.46 +      // Known statically. No need to emit any code: prevent
   44.47 +      // LIR_Assembler::emit_profile_type() from emitting useless code
   44.48 +      profiled_k = ciTypeEntries::with_status(result, profiled_k);
   44.49 +    }
   44.50 +    if (exact_signature_k != NULL && exact_klass != exact_signature_k) {
   44.51 +      assert(exact_klass == NULL, "arg and signature disagree?");
   44.52 +      // sometimes the type of the signature is better than the best type
   44.53 +      // the compiler has
   44.54 +      exact_klass = exact_signature_k;
   44.55 +      do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
   44.56 +    }
   44.57 +  }
   44.58 +
   44.59 +  if (!do_null && !do_update) {
   44.60 +    return result;
   44.61 +  }
   44.62 +
   44.63 +  if (mdp == LIR_OprFact::illegalOpr) {
   44.64 +    mdp = new_register(T_METADATA);
   44.65 +    __ metadata2reg(md->constant_encoding(), mdp);
   44.66 +    if (md_base_offset != 0) {
   44.67 +      LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
   44.68 +      mdp = new_pointer_register();
   44.69 +      __ leal(LIR_OprFact::address(base_type_address), mdp);
   44.70 +    }
   44.71 +  }
   44.72 +  LIRItem value(arg, this);
   44.73 +  value.load_item();
   44.74 +  __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
   44.75 +                  value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
   44.76 +  return result;
   44.77 +}
   44.78 +
   44.79  void LIRGenerator::do_Base(Base* x) {
   44.80    __ std_entry(LIR_OprFact::illegalOpr);
   44.81    // Emit moves from physical registers / stack slots to virtual registers
   44.82 @@ -3004,12 +3076,52 @@
   44.83    }
   44.84  }
   44.85  
   44.86 +void LIRGenerator::profile_arguments(ProfileCall* x) {
   44.87 +  if (MethodData::profile_arguments()) {
   44.88 +    int bci = x->bci_of_invoke();
   44.89 +    ciMethodData* md = x->method()->method_data_or_null();
   44.90 +    ciProfileData* data = md->bci_to_data(bci);
   44.91 +    if (data->is_CallTypeData() || data->is_VirtualCallTypeData()) {
   44.92 +      ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
   44.93 +      int base_offset = md->byte_offset_of_slot(data, extra);
   44.94 +      LIR_Opr mdp = LIR_OprFact::illegalOpr;
   44.95 +      ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
   44.96 +
   44.97 +      Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
   44.98 +      int start = 0;
   44.99 +      int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
  44.100 +      if (x->nb_profiled_args() < stop) {
  44.101 +        // if called through method handle invoke, some arguments may have been popped
  44.102 +        stop = x->nb_profiled_args();
  44.103 +      }
  44.104 +      ciSignature* sig = x->callee()->signature();
  44.105 +      // method handle call to virtual method
  44.106 +      bool has_receiver = x->inlined() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
  44.107 +      ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
  44.108 +      for (int i = 0; i < stop; i++) {
  44.109 +        int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
  44.110 +        ciKlass* exact = profile_arg_type(md, base_offset, off,
  44.111 +                                          args->type(i), x->profiled_arg_at(i+start), mdp,
  44.112 +                                          !x->arg_needs_null_check(i+start), sig_stream.next_klass());
  44.113 +        if (exact != NULL) {
  44.114 +          md->set_argument_type(bci, i, exact);
  44.115 +        }
  44.116 +      }
  44.117 +    }
  44.118 +  }
  44.119 +}
  44.120 +
  44.121  void LIRGenerator::do_ProfileCall(ProfileCall* x) {
  44.122    // Need recv in a temporary register so it interferes with the other temporaries
  44.123    LIR_Opr recv = LIR_OprFact::illegalOpr;
  44.124    LIR_Opr mdo = new_register(T_OBJECT);
  44.125    // tmp is used to hold the counters on SPARC
  44.126    LIR_Opr tmp = new_pointer_register();
  44.127 +
  44.128 +  if (x->nb_profiled_args() > 0) {
  44.129 +    profile_arguments(x);
  44.130 +  }
  44.131 +
  44.132    if (x->recv() != NULL) {
  44.133      LIRItem value(x->recv(), this);
  44.134      value.load_item();
  44.135 @@ -3019,6 +3131,21 @@
  44.136    __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
  44.137  }
  44.138  
  44.139 +void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
  44.140 +  int bci = x->bci_of_invoke();
  44.141 +  ciMethodData* md = x->method()->method_data_or_null();
  44.142 +  ciProfileData* data = md->bci_to_data(bci);
  44.143 +  assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
  44.144 +  ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
  44.145 +  LIR_Opr mdp = LIR_OprFact::illegalOpr;
  44.146 +  ciKlass* exact = profile_arg_type(md, 0, md->byte_offset_of_slot(data, ret->type_offset()),
  44.147 +                                    ret->type(), x->ret(), mdp,
  44.148 +                                    !x->needs_null_check(), x->callee()->signature()->return_type()->as_klass());
  44.149 +  if (exact != NULL) {
  44.150 +    md->set_return_type(bci, exact);
  44.151 +  }
  44.152 +}
  44.153 +
  44.154  void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
  44.155    // We can safely ignore accessors here, since c2 will inline them anyway,
  44.156    // accessors are also always mature.
  44.157 @@ -3053,7 +3180,11 @@
  44.158    int offset = -1;
  44.159    LIR_Opr counter_holder;
  44.160    if (level == CompLevel_limited_profile) {
  44.161 -    address counters_adr = method->ensure_method_counters();
  44.162 +    MethodCounters* counters_adr = method->ensure_method_counters();
  44.163 +    if (counters_adr == NULL) {
  44.164 +      bailout("method counters allocation failed");
  44.165 +      return;
  44.166 +    }
  44.167      counter_holder = new_pointer_register();
  44.168      __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
  44.169      offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
    45.1 --- a/src/share/vm/c1/c1_LIRGenerator.hpp	Thu Oct 17 06:29:58 2013 -0700
    45.2 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Fri Oct 18 12:10:44 2013 -0700
    45.3 @@ -434,6 +434,8 @@
    45.4    void do_ThreadIDIntrinsic(Intrinsic* x);
    45.5    void do_ClassIDIntrinsic(Intrinsic* x);
    45.6  #endif
    45.7 +  ciKlass* profile_arg_type(ciMethodData* md, int md_first_offset, int md_offset, intptr_t profiled_k, Value arg, LIR_Opr& mdp, bool not_null, ciKlass* signature_k);
    45.8 +  void profile_arguments(ProfileCall* x);
    45.9  
   45.10   public:
   45.11    Compilation*  compilation() const              { return _compilation; }
   45.12 @@ -534,6 +536,7 @@
   45.13    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   45.14    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   45.15    virtual void do_ProfileCall    (ProfileCall*     x);
   45.16 +  virtual void do_ProfileReturnType (ProfileReturnType* x);
   45.17    virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   45.18    virtual void do_RuntimeCall    (RuntimeCall*     x);
   45.19    virtual void do_MemBar         (MemBar*          x);
    46.1 --- a/src/share/vm/c1/c1_Optimizer.cpp	Thu Oct 17 06:29:58 2013 -0700
    46.2 +++ b/src/share/vm/c1/c1_Optimizer.cpp	Fri Oct 18 12:10:44 2013 -0700
    46.3 @@ -531,6 +531,7 @@
    46.4    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
    46.5    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
    46.6    void do_ProfileCall    (ProfileCall*     x);
    46.7 +  void do_ProfileReturnType (ProfileReturnType*  x);
    46.8    void do_ProfileInvoke  (ProfileInvoke*   x);
    46.9    void do_RuntimeCall    (RuntimeCall*     x);
   46.10    void do_MemBar         (MemBar*          x);
   46.11 @@ -657,6 +658,8 @@
   46.12    void handle_Intrinsic       (Intrinsic* x);
   46.13    void handle_ExceptionObject (ExceptionObject* x);
   46.14    void handle_Phi             (Phi* x);
   46.15 +  void handle_ProfileCall     (ProfileCall* x);
   46.16 +  void handle_ProfileReturnType (ProfileReturnType* x);
   46.17  };
   46.18  
   46.19  
   46.20 @@ -715,7 +718,9 @@
   46.21  void NullCheckVisitor::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {}
   46.22  void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
   46.23  void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
   46.24 -void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check(); }
   46.25 +void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check();
   46.26 +                                                                nce()->handle_ProfileCall(x); }
   46.27 +void NullCheckVisitor::do_ProfileReturnType (ProfileReturnType* x) { nce()->handle_ProfileReturnType(x); }
   46.28  void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
   46.29  void NullCheckVisitor::do_RuntimeCall    (RuntimeCall*     x) {}
   46.30  void NullCheckVisitor::do_MemBar         (MemBar*          x) {}
   46.31 @@ -1134,6 +1139,15 @@
   46.32    }
   46.33  }
   46.34  
   46.35 +void NullCheckEliminator::handle_ProfileCall(ProfileCall* x) {
   46.36 +  for (int i = 0; i < x->nb_profiled_args(); i++) {
   46.37 +    x->set_arg_needs_null_check(i, !set_contains(x->profiled_arg_at(i)));
   46.38 +  }
   46.39 +}
   46.40 +
   46.41 +void NullCheckEliminator::handle_ProfileReturnType(ProfileReturnType* x) {
   46.42 +  x->set_needs_null_check(!set_contains(x->ret()));
   46.43 +}
   46.44  
   46.45  void Optimizer::eliminate_null_checks() {
   46.46    ResourceMark rm;
    47.1 --- a/src/share/vm/c1/c1_RangeCheckElimination.hpp	Thu Oct 17 06:29:58 2013 -0700
    47.2 +++ b/src/share/vm/c1/c1_RangeCheckElimination.hpp	Fri Oct 18 12:10:44 2013 -0700
    47.3 @@ -162,7 +162,8 @@
    47.4      void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ };
    47.5      void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ };
    47.6      void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ };
    47.7 -    void do_ProfileInvoke  (ProfileInvoke*  x)  { /* nothing to do */ };
    47.8 +    void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ };
    47.9 +    void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
   47.10      void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
   47.11      void do_MemBar         (MemBar*          x) { /* nothing to do */ };
   47.12      void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
    48.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Thu Oct 17 06:29:58 2013 -0700
    48.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Fri Oct 18 12:10:44 2013 -0700
    48.3 @@ -542,8 +542,7 @@
    48.4      // exception handler can cause class loading, which might throw an
    48.5      // exception and those fields are expected to be clear during
    48.6      // normal bytecode execution.
    48.7 -    thread->set_exception_oop(NULL);
    48.8 -    thread->set_exception_pc(NULL);
    48.9 +    thread->clear_exception_oop_and_pc();
   48.10  
   48.11      continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
   48.12      // If an exception was thrown during exception dispatch, the exception oop may have changed
    49.1 --- a/src/share/vm/c1/c1_ValueMap.hpp	Thu Oct 17 06:29:58 2013 -0700
    49.2 +++ b/src/share/vm/c1/c1_ValueMap.hpp	Fri Oct 18 12:10:44 2013 -0700
    49.3 @@ -203,6 +203,7 @@
    49.4    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ }
    49.5    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
    49.6    void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ }
    49.7 +  void do_ProfileReturnType (ProfileReturnType*  x) { /* nothing to do */ }
    49.8    void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
    49.9    void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
   49.10    void do_MemBar         (MemBar*          x) { /* nothing to do */ };
    50.1 --- a/src/share/vm/ci/ciClassList.hpp	Thu Oct 17 06:29:58 2013 -0700
    50.2 +++ b/src/share/vm/ci/ciClassList.hpp	Fri Oct 18 12:10:44 2013 -0700
    50.3 @@ -102,6 +102,7 @@
    50.4  friend class ciMethodHandle;           \
    50.5  friend class ciMethodType;             \
    50.6  friend class ciReceiverTypeData;       \
    50.7 +friend class ciTypeEntries;            \
    50.8  friend class ciSymbol;                 \
    50.9  friend class ciArray;                  \
   50.10  friend class ciObjArray;               \
    51.1 --- a/src/share/vm/ci/ciEnv.cpp	Thu Oct 17 06:29:58 2013 -0700
    51.2 +++ b/src/share/vm/ci/ciEnv.cpp	Fri Oct 18 12:10:44 2013 -0700
    51.3 @@ -1154,9 +1154,12 @@
    51.4    GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
    51.5  }
    51.6  
    51.7 -void ciEnv::dump_replay_data(outputStream* out) {
    51.8 -  VM_ENTRY_MARK;
    51.9 -  MutexLocker ml(Compile_lock);
   51.10 +// ------------------------------------------------------------------
   51.11 +// ciEnv::dump_replay_data*
   51.12 +
   51.13 +// Don't change thread state and acquire any locks.
   51.14 +// Safe to call from VM error reporter.
   51.15 +void ciEnv::dump_replay_data_unsafe(outputStream* out) {
   51.16    ResourceMark rm;
   51.17  #if INCLUDE_JVMTI
   51.18    out->print_cr("JvmtiExport can_access_local_variables %d",     _jvmti_can_access_local_variables);
   51.19 @@ -1181,3 +1184,10 @@
   51.20                  entry_bci, comp_level);
   51.21    out->flush();
   51.22  }
   51.23 +
   51.24 +void ciEnv::dump_replay_data(outputStream* out) {
   51.25 +  GUARDED_VM_ENTRY(
   51.26 +    MutexLocker ml(Compile_lock);
   51.27 +    dump_replay_data_unsafe(out);
   51.28 +  )
   51.29 +}
    52.1 --- a/src/share/vm/ci/ciEnv.hpp	Thu Oct 17 06:29:58 2013 -0700
    52.2 +++ b/src/share/vm/ci/ciEnv.hpp	Fri Oct 18 12:10:44 2013 -0700
    52.3 @@ -452,6 +452,7 @@
    52.4  
    52.5    // Dump the compilation replay data for the ciEnv to the stream.
    52.6    void dump_replay_data(outputStream* out);
    52.7 +  void dump_replay_data_unsafe(outputStream* out);
    52.8  };
    52.9  
   52.10  #endif // SHARE_VM_CI_CIENV_HPP
    53.1 --- a/src/share/vm/ci/ciInstanceKlass.cpp	Thu Oct 17 06:29:58 2013 -0700
    53.2 +++ b/src/share/vm/ci/ciInstanceKlass.cpp	Fri Oct 18 12:10:44 2013 -0700
    53.3 @@ -671,7 +671,6 @@
    53.4  
    53.5  
    53.6  void ciInstanceKlass::dump_replay_data(outputStream* out) {
    53.7 -  ASSERT_IN_VM;
    53.8    ResourceMark rm;
    53.9  
   53.10    InstanceKlass* ik = get_instanceKlass();
    54.1 --- a/src/share/vm/ci/ciInstanceKlass.hpp	Thu Oct 17 06:29:58 2013 -0700
    54.2 +++ b/src/share/vm/ci/ciInstanceKlass.hpp	Fri Oct 18 12:10:44 2013 -0700
    54.3 @@ -235,6 +235,13 @@
    54.4    bool is_instance_klass() const { return true; }
    54.5    bool is_java_klass() const     { return true; }
    54.6  
    54.7 +  virtual ciKlass* exact_klass() {
    54.8 +    if (is_loaded() && is_final() && !is_interface()) {
    54.9 +      return this;
   54.10 +    }
   54.11 +    return NULL;
   54.12 +  }
   54.13 +
   54.14    // Dump the current state of this klass for compilation replay.
   54.15    virtual void dump_replay_data(outputStream* out);
   54.16  };
    55.1 --- a/src/share/vm/ci/ciKlass.cpp	Thu Oct 17 06:29:58 2013 -0700
    55.2 +++ b/src/share/vm/ci/ciKlass.cpp	Fri Oct 18 12:10:44 2013 -0700
    55.3 @@ -66,7 +66,9 @@
    55.4  // ------------------------------------------------------------------
    55.5  // ciKlass::is_subtype_of
    55.6  bool ciKlass::is_subtype_of(ciKlass* that) {
    55.7 -  assert(is_loaded() && that->is_loaded(), "must be loaded");
    55.8 +  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
    55.9 +  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
   55.10 +
   55.11    // Check to see if the klasses are identical.
   55.12    if (this == that) {
   55.13      return true;
   55.14 @@ -83,8 +85,8 @@
   55.15  // ------------------------------------------------------------------
   55.16  // ciKlass::is_subclass_of
   55.17  bool ciKlass::is_subclass_of(ciKlass* that) {
   55.18 -  assert(is_loaded() && that->is_loaded(), "must be loaded");
   55.19 -  // Check to see if the klasses are identical.
   55.20 +  assert(this->is_loaded(), err_msg("must be loaded: %s", this->name()->as_quoted_ascii()));
   55.21 +  assert(that->is_loaded(), err_msg("must be loaded: %s", that->name()->as_quoted_ascii()));
   55.22  
   55.23    VM_ENTRY_MARK;
   55.24    Klass* this_klass = get_Klass();
    56.1 --- a/src/share/vm/ci/ciKlass.hpp	Thu Oct 17 06:29:58 2013 -0700
    56.2 +++ b/src/share/vm/ci/ciKlass.hpp	Fri Oct 18 12:10:44 2013 -0700
    56.3 @@ -41,6 +41,7 @@
    56.4    friend class ciEnv;
    56.5    friend class ciField;
    56.6    friend class ciMethod;
    56.7 +  friend class ciMethodData;
    56.8    friend class ciObjArrayKlass;
    56.9  
   56.10  private:
   56.11 @@ -121,6 +122,8 @@
   56.12    // What kind of ciObject is this?
   56.13    bool is_klass() const { return true; }
   56.14  
   56.15 +  virtual ciKlass* exact_klass() = 0;
   56.16 +
   56.17    void print_name_on(outputStream* st);
   56.18  };
   56.19  
    57.1 --- a/src/share/vm/ci/ciMethod.cpp	Thu Oct 17 06:29:58 2013 -0700
    57.2 +++ b/src/share/vm/ci/ciMethod.cpp	Fri Oct 18 12:10:44 2013 -0700
    57.3 @@ -846,7 +846,9 @@
    57.4  // Return true if allocation was successful or no MDO is required.
    57.5  bool ciMethod::ensure_method_data(methodHandle h_m) {
    57.6    EXCEPTION_CONTEXT;
    57.7 -  if (is_native() || is_abstract() || h_m()->is_accessor()) return true;
    57.8 +  if (is_native() || is_abstract() || h_m()->is_accessor()) {
    57.9 +    return true;
   57.10 +  }
   57.11    if (h_m()->method_data() == NULL) {
   57.12      Method::build_interpreter_method_data(h_m, THREAD);
   57.13      if (HAS_PENDING_EXCEPTION) {
   57.14 @@ -903,22 +905,21 @@
   57.15  // NULL otherwise.
   57.16  ciMethodData* ciMethod::method_data_or_null() {
   57.17    ciMethodData *md = method_data();
   57.18 -  if (md->is_empty()) return NULL;
   57.19 +  if (md->is_empty()) {
   57.20 +    return NULL;
   57.21 +  }
   57.22    return md;
   57.23  }
   57.24  
   57.25  // ------------------------------------------------------------------
   57.26  // ciMethod::ensure_method_counters
   57.27  //
   57.28 -address ciMethod::ensure_method_counters() {
   57.29 +MethodCounters* ciMethod::ensure_method_counters() {
   57.30    check_is_loaded();
   57.31    VM_ENTRY_MARK;
   57.32    methodHandle mh(THREAD, get_Method());
   57.33 -  MethodCounters *counter = mh->method_counters();
   57.34 -  if (counter == NULL) {
   57.35 -    counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL);
   57.36 -  }
   57.37 -  return (address)counter;
   57.38 +  MethodCounters* method_counters = mh->get_method_counters(CHECK_NULL);
   57.39 +  return method_counters;
   57.40  }
   57.41  
   57.42  // ------------------------------------------------------------------
   57.43 @@ -1247,7 +1248,6 @@
   57.44  #undef FETCH_FLAG_FROM_VM
   57.45  
   57.46  void ciMethod::dump_replay_data(outputStream* st) {
   57.47 -  ASSERT_IN_VM;
   57.48    ResourceMark rm;
   57.49    Method* method = get_Method();
   57.50    MethodCounters* mcs = method->method_counters();
    58.1 --- a/src/share/vm/ci/ciMethod.hpp	Thu Oct 17 06:29:58 2013 -0700
    58.2 +++ b/src/share/vm/ci/ciMethod.hpp	Fri Oct 18 12:10:44 2013 -0700
    58.3 @@ -265,7 +265,7 @@
    58.4    bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const;
    58.5    bool check_call(int refinfo_index, bool is_static) const;
    58.6    bool ensure_method_data();  // make sure it exists in the VM also
    58.7 -  address ensure_method_counters();
    58.8 +  MethodCounters* ensure_method_counters();
    58.9    int instructions_size();
   58.10    int scale_count(int count, float prof_factor = 1.);  // make MDO count commensurate with IIC
   58.11  
    59.1 --- a/src/share/vm/ci/ciMethodData.cpp	Thu Oct 17 06:29:58 2013 -0700
    59.2 +++ b/src/share/vm/ci/ciMethodData.cpp	Fri Oct 18 12:10:44 2013 -0700
    59.3 @@ -78,7 +78,9 @@
    59.4  
    59.5  void ciMethodData::load_data() {
    59.6    MethodData* mdo = get_MethodData();
    59.7 -  if (mdo == NULL) return;
    59.8 +  if (mdo == NULL) {
    59.9 +    return;
   59.10 +  }
   59.11  
   59.12    // To do: don't copy the data if it is not "ripe" -- require a minimum #
   59.13    // of invocations.
   59.14 @@ -123,7 +125,7 @@
   59.15  #endif
   59.16  }
   59.17  
   59.18 -void ciReceiverTypeData::translate_receiver_data_from(ProfileData* data) {
   59.19 +void ciReceiverTypeData::translate_receiver_data_from(const ProfileData* data) {
   59.20    for (uint row = 0; row < row_limit(); row++) {
   59.21      Klass* k = data->as_ReceiverTypeData()->receiver(row);
   59.22      if (k != NULL) {
   59.23 @@ -134,6 +136,18 @@
   59.24  }
   59.25  
   59.26  
   59.27 +void ciTypeStackSlotEntries::translate_type_data_from(const TypeStackSlotEntries* entries) {
   59.28 +  for (int i = 0; i < _number_of_entries; i++) {
   59.29 +    intptr_t k = entries->type(i);
   59.30 +    TypeStackSlotEntries::set_type(i, translate_klass(k));
   59.31 +  }
   59.32 +}
   59.33 +
   59.34 +void ciReturnTypeEntry::translate_type_data_from(const ReturnTypeEntry* ret) {
   59.35 +  intptr_t k = ret->type();
   59.36 +  set_type(translate_klass(k));
   59.37 +}
   59.38 +
   59.39  // Get the data at an arbitrary (sort of) data index.
   59.40  ciProfileData* ciMethodData::data_at(int data_index) {
   59.41    if (out_of_bounds(data_index)) {
   59.42 @@ -164,6 +178,10 @@
   59.43      return new ciMultiBranchData(data_layout);
   59.44    case DataLayout::arg_info_data_tag:
   59.45      return new ciArgInfoData(data_layout);
   59.46 +  case DataLayout::call_type_data_tag:
   59.47 +    return new ciCallTypeData(data_layout);
   59.48 +  case DataLayout::virtual_call_type_data_tag:
   59.49 +    return new ciVirtualCallTypeData(data_layout);
   59.50    };
   59.51  }
   59.52  
   59.53 @@ -286,6 +304,34 @@
   59.54    }
   59.55  }
   59.56  
   59.57 +void ciMethodData::set_argument_type(int bci, int i, ciKlass* k) {
   59.58 +  VM_ENTRY_MARK;
   59.59 +  MethodData* mdo = get_MethodData();
   59.60 +  if (mdo != NULL) {
   59.61 +    ProfileData* data = mdo->bci_to_data(bci);
   59.62 +    if (data->is_CallTypeData()) {
   59.63 +      data->as_CallTypeData()->set_argument_type(i, k->get_Klass());
   59.64 +    } else {
   59.65 +      assert(data->is_VirtualCallTypeData(), "no arguments!");
   59.66 +      data->as_VirtualCallTypeData()->set_argument_type(i, k->get_Klass());
   59.67 +    }
   59.68 +  }
   59.69 +}
   59.70 +
   59.71 +void ciMethodData::set_return_type(int bci, ciKlass* k) {
   59.72 +  VM_ENTRY_MARK;
   59.73 +  MethodData* mdo = get_MethodData();
   59.74 +  if (mdo != NULL) {
   59.75 +    ProfileData* data = mdo->bci_to_data(bci);
   59.76 +    if (data->is_CallTypeData()) {
   59.77 +      data->as_CallTypeData()->set_return_type(k->get_Klass());
   59.78 +    } else {
   59.79 +      assert(data->is_VirtualCallTypeData(), "no arguments!");
   59.80 +      data->as_VirtualCallTypeData()->set_return_type(k->get_Klass());
   59.81 +    }
   59.82 +  }
   59.83 +}
   59.84 +
   59.85  bool ciMethodData::has_escape_info() {
   59.86    return eflag_set(MethodData::estimated);
   59.87  }
   59.88 @@ -373,7 +419,6 @@
   59.89  }
   59.90  
   59.91  void ciMethodData::dump_replay_data(outputStream* out) {
   59.92 -  ASSERT_IN_VM;
   59.93    ResourceMark rm;
   59.94    MethodData* mdo = get_MethodData();
   59.95    Method* method = mdo->method();
   59.96 @@ -477,7 +522,50 @@
   59.97    }
   59.98  }
   59.99  
  59.100 -void ciReceiverTypeData::print_receiver_data_on(outputStream* st) {
  59.101 +void ciTypeEntries::print_ciklass(outputStream* st, intptr_t k) {
  59.102 +  if (TypeEntries::is_type_none(k)) {
  59.103 +    st->print("none");
  59.104 +  } else if (TypeEntries::is_type_unknown(k)) {
  59.105 +    st->print("unknown");
  59.106 +  } else {
  59.107 +    valid_ciklass(k)->print_name_on(st);
  59.108 +  }
  59.109 +  if (TypeEntries::was_null_seen(k)) {
  59.110 +    st->print(" (null seen)");
  59.111 +  }
  59.112 +}
  59.113 +
  59.114 +void ciTypeStackSlotEntries::print_data_on(outputStream* st) const {
  59.115 +  for (int i = 0; i < _number_of_entries; i++) {
  59.116 +    _pd->tab(st);
  59.117 +    st->print("%d: stack (%u) ", i, stack_slot(i));
  59.118 +    print_ciklass(st, type(i));
  59.119 +    st->cr();
  59.120 +  }
  59.121 +}
  59.122 +
  59.123 +void ciReturnTypeEntry::print_data_on(outputStream* st) const {
  59.124 +  _pd->tab(st);
  59.125 +  st->print("ret ");
  59.126 +  print_ciklass(st, type());
  59.127 +  st->cr();
  59.128 +}
  59.129 +
  59.130 +void ciCallTypeData::print_data_on(outputStream* st) const {
  59.131 +  print_shared(st, "ciCallTypeData");
  59.132 +  if (has_arguments()) {
  59.133 +    tab(st, true);
  59.134 +    st->print("argument types");
  59.135 +    args()->print_data_on(st);
  59.136 +  }
  59.137 +  if (has_return()) {
  59.138 +    tab(st, true);
  59.139 +    st->print("return type");
  59.140 +    ret()->print_data_on(st);
  59.141 +  }
  59.142 +}
  59.143 +
  59.144 +void ciReceiverTypeData::print_receiver_data_on(outputStream* st) const {
  59.145    uint row;
  59.146    int entries = 0;
  59.147    for (row = 0; row < row_limit(); row++) {
  59.148 @@ -493,13 +581,28 @@
  59.149    }
  59.150  }
  59.151  
  59.152 -void ciReceiverTypeData::print_data_on(outputStream* st) {
  59.153 +void ciReceiverTypeData::print_data_on(outputStream* st) const {
  59.154    print_shared(st, "ciReceiverTypeData");
  59.155    print_receiver_data_on(st);
  59.156  }
  59.157  
  59.158 -void ciVirtualCallData::print_data_on(outputStream* st) {
  59.159 +void ciVirtualCallData::print_data_on(outputStream* st) const {
  59.160    print_shared(st, "ciVirtualCallData");
  59.161    rtd_super()->print_receiver_data_on(st);
  59.162  }
  59.163 +
  59.164 +void ciVirtualCallTypeData::print_data_on(outputStream* st) const {
  59.165 +  print_shared(st, "ciVirtualCallTypeData");
  59.166 +  rtd_super()->print_receiver_data_on(st);
  59.167 +  if (has_arguments()) {
  59.168 +    tab(st, true);
  59.169 +    st->print("argument types");
  59.170 +    args()->print_data_on(st);
  59.171 +  }
  59.172 +  if (has_return()) {
  59.173 +    tab(st, true);
  59.174 +    st->print("return type");
  59.175 +    ret()->print_data_on(st);
  59.176 +  }
  59.177 +}
  59.178  #endif
    60.1 --- a/src/share/vm/ci/ciMethodData.hpp	Thu Oct 17 06:29:58 2013 -0700
    60.2 +++ b/src/share/vm/ci/ciMethodData.hpp	Fri Oct 18 12:10:44 2013 -0700
    60.3 @@ -41,6 +41,8 @@
    60.4  class ciArrayData;
    60.5  class ciMultiBranchData;
    60.6  class ciArgInfoData;
    60.7 +class ciCallTypeData;
    60.8 +class ciVirtualCallTypeData;
    60.9  
   60.10  typedef ProfileData ciProfileData;
   60.11  
   60.12 @@ -59,6 +61,103 @@
   60.13    ciJumpData(DataLayout* layout) : JumpData(layout) {};
   60.14  };
   60.15  
   60.16 +class ciTypeEntries {
   60.17 +protected:
   60.18 +  static intptr_t translate_klass(intptr_t k) {
   60.19 +    Klass* v = TypeEntries::valid_klass(k);
   60.20 +    if (v != NULL) {
   60.21 +      ciKlass* klass = CURRENT_ENV->get_klass(v);
   60.22 +      return with_status(klass, k);
   60.23 +    }
   60.24 +    return with_status(NULL, k);
   60.25 +  }
   60.26 +
   60.27 +public:
   60.28 +  static ciKlass* valid_ciklass(intptr_t k) {
   60.29 +    if (!TypeEntries::is_type_none(k) &&
   60.30 +        !TypeEntries::is_type_unknown(k)) {
   60.31 +      return (ciKlass*)TypeEntries::klass_part(k);
   60.32 +    } else {
   60.33 +      return NULL;
   60.34 +    }
   60.35 +  }
   60.36 +
   60.37 +  static intptr_t with_status(ciKlass* k, intptr_t in) {
   60.38 +    return TypeEntries::with_status((intptr_t)k, in);
   60.39 +  }
   60.40 +
   60.41 +#ifndef PRODUCT
   60.42 +  static void print_ciklass(outputStream* st, intptr_t k);
   60.43 +#endif
   60.44 +};
   60.45 +
   60.46 +class ciTypeStackSlotEntries : public TypeStackSlotEntries, ciTypeEntries {
   60.47 +public:
   60.48 +  void translate_type_data_from(const TypeStackSlotEntries* args);
   60.49 +
   60.50 +  ciKlass* valid_type(int i) const {
   60.51 +    return valid_ciklass(type(i));
   60.52 +  }
   60.53 +
   60.54 +#ifndef PRODUCT
   60.55 +  void print_data_on(outputStream* st) const;
   60.56 +#endif
   60.57 +};
   60.58 +
   60.59 +class ciReturnTypeEntry : public ReturnTypeEntry, ciTypeEntries {
   60.60 +public:
   60.61 +  void translate_type_data_from(const ReturnTypeEntry* ret);
   60.62 +
   60.63 +  ciKlass* valid_type() const {
   60.64 +    return valid_ciklass(type());
   60.65 +  }
   60.66 +
   60.67 +#ifndef PRODUCT
   60.68 +  void print_data_on(outputStream* st) const;
   60.69 +#endif
   60.70 +};
   60.71 +
   60.72 +class ciCallTypeData : public CallTypeData {
   60.73 +public:
   60.74 +  ciCallTypeData(DataLayout* layout) : CallTypeData(layout) {}
   60.75 +
   60.76 +  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)CallTypeData::args(); }
   60.77 +  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)CallTypeData::ret(); }
   60.78 +
   60.79 +  void translate_type_data_from(const ProfileData* data) {
   60.80 +    if (has_arguments()) {
   60.81 +      args()->translate_type_data_from(data->as_CallTypeData()->args());
   60.82 +    }
   60.83 +    if (has_return()) {
   60.84 +      ret()->translate_type_data_from(data->as_CallTypeData()->ret());
   60.85 +    }
   60.86 +  }
   60.87 +
   60.88 +  intptr_t argument_type(int i) const {
   60.89 +    assert(has_arguments(), "no arg type profiling data");
   60.90 +    return args()->type(i);
   60.91 +  }
   60.92 +
   60.93 +  ciKlass* valid_argument_type(int i) const {
   60.94 +    assert(has_arguments(), "no arg type profiling data");
   60.95 +    return args()->valid_type(i);
   60.96 +  }
   60.97 +
   60.98 +  intptr_t return_type() const {
   60.99 +    assert(has_return(), "no ret type profiling data");
  60.100 +    return ret()->type();
  60.101 +  }
  60.102 +
  60.103 +  ciKlass* valid_return_type() const {
  60.104 +    assert(has_return(), "no ret type profiling data");
  60.105 +    return ret()->valid_type();
  60.106 +  }
  60.107 +
  60.108 +#ifndef PRODUCT
  60.109 +  void print_data_on(outputStream* st) const;
  60.110 +#endif
  60.111 +};
  60.112 +
  60.113  class ciReceiverTypeData : public ReceiverTypeData {
  60.114  public:
  60.115    ciReceiverTypeData(DataLayout* layout) : ReceiverTypeData(layout) {};
  60.116 @@ -69,7 +168,7 @@
  60.117                    (intptr_t) recv);
  60.118    }
  60.119  
  60.120 -  ciKlass* receiver(uint row) {
  60.121 +  ciKlass* receiver(uint row) const {
  60.122      assert((uint)row < row_limit(), "oob");
  60.123      ciKlass* recv = (ciKlass*)intptr_at(receiver0_offset + row * receiver_type_row_cell_count);
  60.124      assert(recv == NULL || recv->is_klass(), "wrong type");
  60.125 @@ -77,19 +176,19 @@
  60.126    }
  60.127  
  60.128    // Copy & translate from oop based ReceiverTypeData
  60.129 -  virtual void translate_from(ProfileData* data) {
  60.130 +  virtual void translate_from(const ProfileData* data) {
  60.131      translate_receiver_data_from(data);
  60.132    }
  60.133 -  void translate_receiver_data_from(ProfileData* data);
  60.134 +  void translate_receiver_data_from(const ProfileData* data);
  60.135  #ifndef PRODUCT
  60.136 -  void print_data_on(outputStream* st);
  60.137 -  void print_receiver_data_on(outputStream* st);
  60.138 +  void print_data_on(outputStream* st) const;
  60.139 +  void print_receiver_data_on(outputStream* st) const;
  60.140  #endif
  60.141  };
  60.142  
  60.143  class ciVirtualCallData : public VirtualCallData {
  60.144    // Fake multiple inheritance...  It's a ciReceiverTypeData also.
  60.145 -  ciReceiverTypeData* rtd_super() { return (ciReceiverTypeData*) this; }
  60.146 +  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
  60.147  
  60.148  public:
  60.149    ciVirtualCallData(DataLayout* layout) : VirtualCallData(layout) {};
  60.150 @@ -103,11 +202,65 @@
  60.151    }
  60.152  
  60.153    // Copy & translate from oop based VirtualCallData
  60.154 -  virtual void translate_from(ProfileData* data) {
  60.155 +  virtual void translate_from(const ProfileData* data) {
  60.156      rtd_super()->translate_receiver_data_from(data);
  60.157    }
  60.158  #ifndef PRODUCT
  60.159 -  void print_data_on(outputStream* st);
  60.160 +  void print_data_on(outputStream* st) const;
  60.161 +#endif
  60.162 +};
  60.163 +
  60.164 +class ciVirtualCallTypeData : public VirtualCallTypeData {
  60.165 +private:
  60.166 +  // Fake multiple inheritance...  It's a ciReceiverTypeData also.
  60.167 +  ciReceiverTypeData* rtd_super() const { return (ciReceiverTypeData*) this; }
  60.168 +public:
  60.169 +  ciVirtualCallTypeData(DataLayout* layout) : VirtualCallTypeData(layout) {}
  60.170 +
  60.171 +  void set_receiver(uint row, ciKlass* recv) {
  60.172 +    rtd_super()->set_receiver(row, recv);
  60.173 +  }
  60.174 +
  60.175 +  ciKlass* receiver(uint row) const {
  60.176 +    return rtd_super()->receiver(row);
  60.177 +  }
  60.178 +
  60.179 +  ciTypeStackSlotEntries* args() const { return (ciTypeStackSlotEntries*)VirtualCallTypeData::args(); }
  60.180 +  ciReturnTypeEntry* ret() const { return (ciReturnTypeEntry*)VirtualCallTypeData::ret(); }
  60.181 +
  60.182 +  // Copy & translate from oop based VirtualCallData
  60.183 +  virtual void translate_from(const ProfileData* data) {
  60.184 +    rtd_super()->translate_receiver_data_from(data);
  60.185 +    if (has_arguments()) {
  60.186 +      args()->translate_type_data_from(data->as_VirtualCallTypeData()->args());
  60.187 +    }
  60.188 +    if (has_return()) {
  60.189 +      ret()->translate_type_data_from(data->as_VirtualCallTypeData()->ret());
  60.190 +    }
  60.191 +  }
  60.192 +
  60.193 +  intptr_t argument_type(int i) const {
  60.194 +    assert(has_arguments(), "no arg type profiling data");
  60.195 +    return args()->type(i);
  60.196 +  }
  60.197 +
  60.198 +  ciKlass* valid_argument_type(int i) const {
  60.199 +    assert(has_arguments(), "no arg type profiling data");
  60.200 +    return args()->valid_type(i);
  60.201 +  }
  60.202 +
  60.203 +  intptr_t return_type() const {
  60.204 +    assert(has_return(), "no ret type profiling data");
  60.205 +    return ret()->type();
  60.206 +  }
  60.207 +
  60.208 +  ciKlass* valid_return_type() const {
  60.209 +    assert(has_return(), "no ret type profiling data");
  60.210 +    return ret()->valid_type();
  60.211 +  }
  60.212 +
  60.213 +#ifndef PRODUCT
  60.214 +  void print_data_on(outputStream* st) const;
  60.215  #endif
  60.216  };
  60.217  
  60.218 @@ -232,8 +385,6 @@
  60.219  public:
  60.220    bool is_method_data() const { return true; }
  60.221  
  60.222 -  void set_mature() { _state = mature_state; }
  60.223 -
  60.224    bool is_empty()  { return _state == empty_state; }
  60.225    bool is_mature() { return _state == mature_state; }
  60.226  
  60.227 @@ -249,6 +400,10 @@
  60.228    // Also set the numer of loops and blocks in the method.
  60.229    // Again, this is used to determine if a method is trivial.
  60.230    void set_compilation_stats(short loops, short blocks);
  60.231 +  // If the compiler finds a profiled type that is known statically
  60.232 +  // for sure, set it in the MethodData
  60.233 +  void set_argument_type(int bci, int i, ciKlass* k);
  60.234 +  void set_return_type(int bci, ciKlass* k);
  60.235  
  60.236    void load_data();
  60.237  
    61.1 --- a/src/share/vm/ci/ciObjArrayKlass.cpp	Thu Oct 17 06:29:58 2013 -0700
    61.2 +++ b/src/share/vm/ci/ciObjArrayKlass.cpp	Fri Oct 18 12:10:44 2013 -0700
    61.3 @@ -179,3 +179,16 @@
    61.4  ciObjArrayKlass* ciObjArrayKlass::make(ciKlass* element_klass) {
    61.5    GUARDED_VM_ENTRY(return make_impl(element_klass);)
    61.6  }
    61.7 +
    61.8 +ciKlass* ciObjArrayKlass::exact_klass() {
    61.9 +  ciType* base = base_element_type();
   61.10 +  if (base->is_instance_klass()) {
   61.11 +    ciInstanceKlass* ik = base->as_instance_klass();
   61.12 +    if (ik->exact_klass() != NULL) {
   61.13 +      return this;
   61.14 +    }
   61.15 +  } else if (base->is_primitive_type()) {
   61.16 +    return this;
   61.17 +  }
   61.18 +  return NULL;
   61.19 +}
    62.1 --- a/src/share/vm/ci/ciObjArrayKlass.hpp	Thu Oct 17 06:29:58 2013 -0700
    62.2 +++ b/src/share/vm/ci/ciObjArrayKlass.hpp	Fri Oct 18 12:10:44 2013 -0700
    62.3 @@ -73,6 +73,8 @@
    62.4    bool is_obj_array_klass() const { return true; }
    62.5  
    62.6    static ciObjArrayKlass* make(ciKlass* element_klass);
    62.7 +
    62.8 +  virtual ciKlass* exact_klass();
    62.9  };
   62.10  
   62.11  #endif // SHARE_VM_CI_CIOBJARRAYKLASS_HPP
    63.1 --- a/src/share/vm/ci/ciReplay.cpp	Thu Oct 17 06:29:58 2013 -0700
    63.2 +++ b/src/share/vm/ci/ciReplay.cpp	Fri Oct 18 12:10:44 2013 -0700
    63.3 @@ -965,14 +965,12 @@
    63.4      tty->cr();
    63.5    } else {
    63.6      EXCEPTION_CONTEXT;
    63.7 -    MethodCounters* mcs = method->method_counters();
    63.8      // m->_instructions_size = rec->instructions_size;
    63.9      m->_instructions_size = -1;
   63.10      m->_interpreter_invocation_count = rec->interpreter_invocation_count;
   63.11      m->_interpreter_throwout_count = rec->interpreter_throwout_count;
   63.12 -    if (mcs == NULL) {
   63.13 -      mcs = Method::build_method_counters(method, CHECK_AND_CLEAR);
   63.14 -    }
   63.15 +    MethodCounters* mcs = method->get_method_counters(CHECK_AND_CLEAR);
   63.16 +    guarantee(mcs != NULL, "method counters allocation failed");
   63.17      mcs->invocation_counter()->_counter = rec->invocation_counter;
   63.18      mcs->backedge_counter()->_counter = rec->backedge_counter;
   63.19    }
    64.1 --- a/src/share/vm/ci/ciStreams.hpp	Thu Oct 17 06:29:58 2013 -0700
    64.2 +++ b/src/share/vm/ci/ciStreams.hpp	Fri Oct 18 12:10:44 2013 -0700
    64.3 @@ -277,11 +277,14 @@
    64.4  class ciSignatureStream : public StackObj {
    64.5  private:
    64.6    ciSignature* _sig;
    64.7 -  int    _pos;
    64.8 +  int          _pos;
    64.9 +  // holder is a method's holder
   64.10 +  ciKlass*     _holder;
   64.11  public:
   64.12 -  ciSignatureStream(ciSignature* signature) {
   64.13 +  ciSignatureStream(ciSignature* signature, ciKlass* holder = NULL) {
   64.14      _sig = signature;
   64.15      _pos = 0;
   64.16 +    _holder = holder;
   64.17    }
   64.18  
   64.19    bool at_return_type() { return _pos == _sig->count(); }
   64.20 @@ -301,6 +304,23 @@
   64.21        return _sig->type_at(_pos);
   64.22      }
   64.23    }
   64.24 +
   64.25 +  // next klass in the signature
   64.26 +  ciKlass* next_klass() {
   64.27 +    ciKlass* sig_k;
   64.28 +    if (_holder != NULL) {
   64.29 +      sig_k = _holder;
   64.30 +      _holder = NULL;
   64.31 +    } else {
   64.32 +      while (!type()->is_klass()) {
   64.33 +        next();
   64.34 +      }
   64.35 +      assert(!at_return_type(), "passed end of signature");
   64.36 +      sig_k = type()->as_klass();
   64.37 +      next();
   64.38 +    }
   64.39 +    return sig_k;
   64.40 +  }
   64.41  };
   64.42  
   64.43  
    65.1 --- a/src/share/vm/ci/ciTypeArrayKlass.hpp	Thu Oct 17 06:29:58 2013 -0700
    65.2 +++ b/src/share/vm/ci/ciTypeArrayKlass.hpp	Fri Oct 18 12:10:44 2013 -0700
    65.3 @@ -57,6 +57,10 @@
    65.4  
    65.5    // Make an array klass corresponding to the specified primitive type.
    65.6    static ciTypeArrayKlass* make(BasicType type);
    65.7 +
    65.8 +  virtual ciKlass* exact_klass() {
    65.9 +    return this;
   65.10 +  }
   65.11  };
   65.12  
   65.13  #endif // SHARE_VM_CI_CITYPEARRAYKLASS_HPP
    66.1 --- a/src/share/vm/classfile/defaultMethods.cpp	Thu Oct 17 06:29:58 2013 -0700
    66.2 +++ b/src/share/vm/classfile/defaultMethods.cpp	Fri Oct 18 12:10:44 2013 -0700
    66.3 @@ -857,7 +857,6 @@
    66.4    m->set_max_locals(params);
    66.5    m->constMethod()->set_stackmap_data(NULL);
    66.6    m->set_code(code_start);
    66.7 -  m->set_force_inline(true);
    66.8  
    66.9    return m;
   66.10  }
    67.1 --- a/src/share/vm/classfile/verifier.cpp	Thu Oct 17 06:29:58 2013 -0700
    67.2 +++ b/src/share/vm/classfile/verifier.cpp	Fri Oct 18 12:10:44 2013 -0700
    67.3 @@ -2439,19 +2439,19 @@
    67.4               && !ref_class_type.equals(current_type())
    67.5               && !ref_class_type.equals(VerificationType::reference_type(
    67.6                    current_class()->super()->name()))) {
    67.7 -    bool subtype = ref_class_type.is_assignable_from(
    67.8 -      current_type(), this, CHECK_VERIFY(this));
    67.9 +    bool subtype = false;
   67.10 +    if (!current_class()->is_anonymous()) {
   67.11 +      subtype = ref_class_type.is_assignable_from(
   67.12 +                 current_type(), this, CHECK_VERIFY(this));
   67.13 +    } else {
   67.14 +      subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
   67.15 +                 current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
   67.16 +    }
   67.17      if (!subtype) {
   67.18 -      if (current_class()->is_anonymous()) {
   67.19 -        subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
   67.20 -                   current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
   67.21 -      }
   67.22 -      if (!subtype) {
   67.23 -        verify_error(ErrorContext::bad_code(bci),
   67.24 -            "Bad invokespecial instruction: "
   67.25 -            "current class isn't assignable to reference class.");
   67.26 -         return;
   67.27 -      }
   67.28 +      verify_error(ErrorContext::bad_code(bci),
   67.29 +          "Bad invokespecial instruction: "
   67.30 +          "current class isn't assignable to reference class.");
   67.31 +       return;
   67.32      }
   67.33    }
   67.34    // Match method descriptor with operand stack
   67.35 @@ -2470,17 +2470,13 @@
   67.36          if (!current_class()->is_anonymous()) {
   67.37            current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
   67.38          } else {
   67.39 -          // anonymous class invokespecial calls: either the
   67.40 -          // operand stack/objectref  is a subtype of the current class OR
   67.41 -          // the objectref is a subtype of the host_klass of the current class
   67.42 +          // anonymous class invokespecial calls: check if the
   67.43 +          // objectref is a subtype of the host_klass of the current class
   67.44            // to allow an anonymous class to reference methods in the host_klass
   67.45            VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
   67.46 -          bool subtype = current_type().is_assignable_from(top, this, CHECK_VERIFY(this));
   67.47 -          if (!subtype) {
   67.48 -            VerificationType hosttype =
   67.49 -              VerificationType::reference_type(current_class()->host_klass()->name());
   67.50 -            subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
   67.51 -          }
   67.52 +          VerificationType hosttype =
   67.53 +            VerificationType::reference_type(current_class()->host_klass()->name());
   67.54 +          bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
   67.55            if (!subtype) {
   67.56              verify_error( ErrorContext::bad_type(current_frame->offset(),
   67.57                current_frame->stack_top_ctx(),
    68.1 --- a/src/share/vm/code/codeBlob.cpp	Thu Oct 17 06:29:58 2013 -0700
    68.2 +++ b/src/share/vm/code/codeBlob.cpp	Fri Oct 18 12:10:44 2013 -0700
    68.3 @@ -245,8 +245,8 @@
    68.4  }
    68.5  
    68.6  
    68.7 -void* BufferBlob::operator new(size_t s, unsigned size) throw() {
    68.8 -  void* p = CodeCache::allocate(size);
    68.9 +void* BufferBlob::operator new(size_t s, unsigned size, bool is_critical) throw() {
   68.10 +  void* p = CodeCache::allocate(size, is_critical);
   68.11    return p;
   68.12  }
   68.13  
   68.14 @@ -277,7 +277,10 @@
   68.15    unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
   68.16    {
   68.17      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   68.18 -    blob = new (size) AdapterBlob(size, cb);
   68.19 +    // The parameter 'true' indicates a critical memory allocation.
   68.20 +    // This means that CodeCacheMinimumFreeSpace is used, if necessary
   68.21 +    const bool is_critical = true;
   68.22 +    blob = new (size, is_critical) AdapterBlob(size, cb);
   68.23    }
   68.24    // Track memory usage statistic after releasing CodeCache_lock
   68.25    MemoryService::track_code_cache_memory_usage();
   68.26 @@ -299,7 +302,10 @@
   68.27    size += round_to(buffer_size, oopSize);
   68.28    {
   68.29      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   68.30 -    blob = new (size) MethodHandlesAdapterBlob(size);
   68.31 +    // The parameter 'true' indicates a critical memory allocation.
   68.32 +    // This means that CodeCacheMinimumFreeSpace is used, if necessary
   68.33 +    const bool is_critical = true;
   68.34 +    blob = new (size, is_critical) MethodHandlesAdapterBlob(size);
   68.35    }
   68.36    // Track memory usage statistic after releasing CodeCache_lock
   68.37    MemoryService::track_code_cache_memory_usage();
    69.1 --- a/src/share/vm/code/codeBlob.hpp	Thu Oct 17 06:29:58 2013 -0700
    69.2 +++ b/src/share/vm/code/codeBlob.hpp	Fri Oct 18 12:10:44 2013 -0700
    69.3 @@ -209,7 +209,7 @@
    69.4    BufferBlob(const char* name, int size);
    69.5    BufferBlob(const char* name, int size, CodeBuffer* cb);
    69.6  
    69.7 -  void* operator new(size_t s, unsigned size) throw();
    69.8 +  void* operator new(size_t s, unsigned size, bool is_critical = false) throw();
    69.9  
   69.10   public:
   69.11    // Creation
   69.12 @@ -253,7 +253,6 @@
   69.13  class MethodHandlesAdapterBlob: public BufferBlob {
   69.14  private:
   69.15    MethodHandlesAdapterBlob(int size)                 : BufferBlob("MethodHandles adapters", size) {}
   69.16 -  MethodHandlesAdapterBlob(int size, CodeBuffer* cb) : BufferBlob("MethodHandles adapters", size, cb) {}
   69.17  
   69.18  public:
   69.19    // Creation
    70.1 --- a/src/share/vm/compiler/abstractCompiler.cpp	Thu Oct 17 06:29:58 2013 -0700
    70.2 +++ b/src/share/vm/compiler/abstractCompiler.cpp	Fri Oct 18 12:10:44 2013 -0700
    70.3 @@ -24,41 +24,42 @@
    70.4  
    70.5  #include "precompiled.hpp"
    70.6  #include "compiler/abstractCompiler.hpp"
    70.7 +#include "compiler/compileBroker.hpp"
    70.8  #include "runtime/mutexLocker.hpp"
    70.9 -void AbstractCompiler::initialize_runtimes(initializer f, volatile int* state) {
   70.10 -  if (*state != initialized) {
   70.11  
   70.12 -    // We are thread in native here...
   70.13 -    CompilerThread* thread = CompilerThread::current();
   70.14 -    bool do_initialization = false;
   70.15 -    {
   70.16 -      ThreadInVMfromNative tv(thread);
   70.17 -      ResetNoHandleMark rnhm;
   70.18 -      MutexLocker only_one(CompileThread_lock, thread);
   70.19 -      if ( *state == uninitialized) {
   70.20 -        do_initialization = true;
   70.21 -        *state = initializing;
   70.22 -      } else {
   70.23 -        while (*state == initializing ) {
   70.24 -          CompileThread_lock->wait();
   70.25 -        }
   70.26 +bool AbstractCompiler::should_perform_init() {
   70.27 +  if (_compiler_state != initialized) {
   70.28 +    MutexLocker only_one(CompileThread_lock);
   70.29 +
   70.30 +    if (_compiler_state == uninitialized) {
   70.31 +      _compiler_state = initializing;
   70.32 +      return true;
   70.33 +    } else {
   70.34 +      while (_compiler_state == initializing) {
   70.35 +        CompileThread_lock->wait();
   70.36        }
   70.37      }
   70.38 -    if (do_initialization) {
   70.39 -      // We can not hold any locks here since JVMTI events may call agents
   70.40 +  }
   70.41 +  return false;
   70.42 +}
   70.43  
   70.44 -      // Compiler(s) run as native
   70.45 +bool AbstractCompiler::should_perform_shutdown() {
   70.46 +  // Since this method can be called by multiple threads, the lock ensures atomicity of
   70.47 +  // decrementing '_num_compiler_threads' and the following operations.
   70.48 +  MutexLocker only_one(CompileThread_lock);
   70.49 +  _num_compiler_threads--;
   70.50 +  assert (CompileBroker::is_compilation_disabled_forever(), "Must be set, otherwise thread waits forever");
   70.51  
   70.52 -      (*f)();
   70.53 +  // Only the last thread will perform shutdown operations
   70.54 +  if (_num_compiler_threads == 0) {
   70.55 +    return true;
   70.56 +  }
   70.57 +  return false;
   70.58 +}
   70.59  
   70.60 -      // To in_vm so we can use the lock
   70.61 -
   70.62 -      ThreadInVMfromNative tv(thread);
   70.63 -      ResetNoHandleMark rnhm;
   70.64 -      MutexLocker only_one(CompileThread_lock, thread);
   70.65 -      assert(*state == initializing, "wrong state");
   70.66 -      *state = initialized;
   70.67 -      CompileThread_lock->notify_all();
   70.68 -    }
   70.69 -  }
   70.70 +void AbstractCompiler::set_state(int state) {
   70.71 +  // Ensure that ste is only set by one thread at a time
   70.72 +  MutexLocker only_one(CompileThread_lock);
   70.73 +  _compiler_state =  state;
   70.74 +  CompileThread_lock->notify_all();
   70.75  }
    71.1 --- a/src/share/vm/compiler/abstractCompiler.hpp	Thu Oct 17 06:29:58 2013 -0700
    71.2 +++ b/src/share/vm/compiler/abstractCompiler.hpp	Fri Oct 18 12:10:44 2013 -0700
    71.3 @@ -27,22 +27,25 @@
    71.4  
    71.5  #include "ci/compilerInterface.hpp"
    71.6  
    71.7 -typedef void (*initializer)(void);
    71.8 -
    71.9  class AbstractCompiler : public CHeapObj<mtCompiler> {
   71.10   private:
   71.11 -  bool _is_initialized; // Mark whether compiler object is initialized
   71.12 +  volatile int _num_compiler_threads;
   71.13  
   71.14   protected:
   71.15 +  volatile int _compiler_state;
   71.16    // Used for tracking global state of compiler runtime initialization
   71.17 -  enum { uninitialized, initializing, initialized };
   71.18 +  enum { uninitialized, initializing, initialized, failed, shut_down };
   71.19  
   71.20 -  // This method will call the initialization method "f" once (per compiler class/subclass)
   71.21 -  // and do so without holding any locks
   71.22 -  void initialize_runtimes(initializer f, volatile int* state);
   71.23 +  // This method returns true for the first compiler thread that reaches that methods.
   71.24 +  // This thread will initialize the compiler runtime.
   71.25 +  bool should_perform_init();
   71.26  
   71.27   public:
   71.28 -  AbstractCompiler() : _is_initialized(false)    {}
   71.29 +  AbstractCompiler() : _compiler_state(uninitialized), _num_compiler_threads(0) {}
   71.30 +
   71.31 +  // This function determines the compiler thread that will perform the
   71.32 +  // shutdown of the corresponding compiler runtime.
   71.33 +  bool should_perform_shutdown();
   71.34  
   71.35    // Name of this compiler
   71.36    virtual const char* name() = 0;
   71.37 @@ -74,17 +77,18 @@
   71.38  #endif // TIERED
   71.39  
   71.40    // Customization
   71.41 -  virtual bool needs_stubs            ()         = 0;
   71.42 +  virtual void initialize () = 0;
   71.43  
   71.44 -  void mark_initialized()                        { _is_initialized = true; }
   71.45 -  bool is_initialized()                          { return _is_initialized; }
   71.46 +  void set_num_compiler_threads(int num) { _num_compiler_threads = num;  }
   71.47 +  int num_compiler_threads()             { return _num_compiler_threads; }
   71.48  
   71.49 -  virtual void initialize()                      = 0;
   71.50 -
   71.51 +  // Get/set state of compiler objects
   71.52 +  bool is_initialized()           { return _compiler_state == initialized; }
   71.53 +  bool is_failed     ()           { return _compiler_state == failed;}
   71.54 +  void set_state     (int state);
   71.55 +  void set_shut_down ()           { set_state(shut_down); }
   71.56    // Compilation entry point for methods
   71.57 -  virtual void compile_method(ciEnv* env,
   71.58 -                              ciMethod* target,
   71.59 -                              int entry_bci) {
   71.60 +  virtual void compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
   71.61      ShouldNotReachHere();
   71.62    }
   71.63  
    72.1 --- a/src/share/vm/compiler/compileBroker.cpp	Thu Oct 17 06:29:58 2013 -0700
    72.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Fri Oct 18 12:10:44 2013 -0700
    72.3 @@ -186,7 +186,7 @@
    72.4  CompileQueue* CompileBroker::_c1_method_queue    = NULL;
    72.5  CompileTask*  CompileBroker::_task_free_list     = NULL;
    72.6  
    72.7 -GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
    72.8 +GrowableArray<CompilerThread*>* CompileBroker::_compiler_threads = NULL;
    72.9  
   72.10  
   72.11  class CompilationLog : public StringEventLog {
   72.12 @@ -587,9 +587,6 @@
   72.13  
   72.14  
   72.15  
   72.16 -// ------------------------------------------------------------------
   72.17 -// CompileQueue::add
   72.18 -//
   72.19  // Add a CompileTask to a CompileQueue
   72.20  void CompileQueue::add(CompileTask* task) {
   72.21    assert(lock()->owned_by_self(), "must own lock");
   72.22 @@ -626,6 +623,16 @@
   72.23    lock()->notify_all();
   72.24  }
   72.25  
   72.26 +void CompileQueue::delete_all() {
   72.27 +  assert(lock()->owned_by_self(), "must own lock");
   72.28 +  if (_first != NULL) {
   72.29 +    for (CompileTask* task = _first; task != NULL; task = task->next()) {
   72.30 +      delete task;
   72.31 +    }
   72.32 +    _first = NULL;
   72.33 +  }
   72.34 +}
   72.35 +
   72.36  // ------------------------------------------------------------------
   72.37  // CompileQueue::get
   72.38  //
   72.39 @@ -640,6 +647,11 @@
   72.40    // case we perform code cache sweeps to free memory such that we can re-enable
   72.41    // compilation.
   72.42    while (_first == NULL) {
   72.43 +    // Exit loop if compilation is disabled forever
   72.44 +    if (CompileBroker::is_compilation_disabled_forever()) {
   72.45 +      return NULL;
   72.46 +    }
   72.47 +
   72.48      if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs()) {
   72.49        // Wait a certain amount of time to possibly do another sweep.
   72.50        // We must wait until stack scanning has happened so that we can
   72.51 @@ -664,9 +676,17 @@
   72.52        // remains unchanged. This behavior is desired, since we want to keep
   72.53        // the stable state, i.e., we do not want to evict methods from the
   72.54        // code cache if it is unnecessary.
   72.55 -      lock()->wait();
   72.56 +      // We need a timed wait here, since compiler threads can exit if compilation
   72.57 +      // is disabled forever. We use 5 seconds wait time; the exiting of compiler threads
   72.58 +      // is not critical and we do not want idle compiler threads to wake up too often.
   72.59 +      lock()->wait(!Mutex::_no_safepoint_check_flag, 5*1000);
   72.60      }
   72.61    }
   72.62 +
   72.63 +  if (CompileBroker::is_compilation_disabled_forever()) {
   72.64 +    return NULL;
   72.65 +  }
   72.66 +
   72.67    CompileTask* task = CompilationPolicy::policy()->select_task(this);
   72.68    remove(task);
   72.69    return task;
   72.70 @@ -891,10 +911,8 @@
   72.71  }
   72.72  
   72.73  
   72.74 -
   72.75 -// ------------------------------------------------------------------
   72.76 -// CompileBroker::make_compiler_thread
   72.77 -CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS) {
   72.78 +CompilerThread* CompileBroker::make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters,
   72.79 +                                                    AbstractCompiler* comp, TRAPS) {
   72.80    CompilerThread* compiler_thread = NULL;
   72.81  
   72.82    Klass* k =
   72.83 @@ -961,6 +979,7 @@
   72.84      java_lang_Thread::set_daemon(thread_oop());
   72.85  
   72.86      compiler_thread->set_threadObj(thread_oop());
   72.87 +    compiler_thread->set_compiler(comp);
   72.88      Threads::add(compiler_thread);
   72.89      Thread::start(compiler_thread);
   72.90    }
   72.91 @@ -972,25 +991,24 @@
   72.92  }
   72.93  
   72.94  
   72.95 -// ------------------------------------------------------------------
   72.96 -// CompileBroker::init_compiler_threads
   72.97 -//
   72.98 -// Initialize the compilation queue
   72.99  void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
  72.100    EXCEPTION_MARK;
  72.101  #if !defined(ZERO) && !defined(SHARK)
  72.102    assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
  72.103  #endif // !ZERO && !SHARK
  72.104 +  // Initialize the compilation queue
  72.105    if (c2_compiler_count > 0) {
  72.106      _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
  72.107 +    _compilers[1]->set_num_compiler_threads(c2_compiler_count);
  72.108    }
  72.109    if (c1_compiler_count > 0) {
  72.110      _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
  72.111 +    _compilers[0]->set_num_compiler_threads(c1_compiler_count);
  72.112    }
  72.113  
  72.114    int compiler_count = c1_compiler_count + c2_compiler_count;
  72.115  
  72.116 -  _method_threads =
  72.117 +  _compiler_threads =
  72.118      new (ResourceObj::C_HEAP, mtCompiler) GrowableArray<CompilerThread*>(compiler_count, true);
  72.119  
  72.120    char name_buffer[256];
  72.121 @@ -998,21 +1016,22 @@
  72.122      // Create a name for our thread.
  72.123      sprintf(name_buffer, "C2 CompilerThread%d", i);
  72.124      CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
  72.125 -    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
  72.126 -    _method_threads->append(new_thread);
  72.127 +    // Shark and C2
  72.128 +    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, _compilers[1], CHECK);
  72.129 +    _compiler_threads->append(new_thread);
  72.130    }
  72.131  
  72.132    for (int i = c2_compiler_count; i < compiler_count; i++) {
  72.133      // Create a name for our thread.
  72.134      sprintf(name_buffer, "C1 CompilerThread%d", i);
  72.135      CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
  72.136 -    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
  72.137 -    _method_threads->append(new_thread);
  72.138 +    // C1
  72.139 +    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, _compilers[0], CHECK);
  72.140 +    _compiler_threads->append(new_thread);
  72.141    }
  72.142  
  72.143    if (UsePerfData) {
  72.144 -    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
  72.145 -                                     compiler_count, CHECK);
  72.146 +    PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes, compiler_count, CHECK);
  72.147    }
  72.148  }
  72.149  
  72.150 @@ -1029,27 +1048,6 @@
  72.151  }
  72.152  
  72.153  // ------------------------------------------------------------------
  72.154 -// CompileBroker::is_idle
  72.155 -bool CompileBroker::is_idle() {
  72.156 -  if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
  72.157 -    return false;
  72.158 -  } else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
  72.159 -    return false;
  72.160 -  } else {
  72.161 -    int num_threads = _method_threads->length();
  72.162 -    for (int i=0; i<num_threads; i++) {
  72.163 -      if (_method_threads->at(i)->task() != NULL) {
  72.164 -        return false;
  72.165 -      }
  72.166 -    }
  72.167 -
  72.168 -    // No pending or active compilations.
  72.169 -    return true;
  72.170 -  }
  72.171 -}
  72.172 -
  72.173 -
  72.174 -// ------------------------------------------------------------------
  72.175  // CompileBroker::compile_method
  72.176  //
  72.177  // Request compilation of a method.
  72.178 @@ -1551,6 +1549,101 @@
  72.179    free_task(task);
  72.180  }
  72.181  
  72.182 +// Initialize compiler thread(s) + compiler object(s). The postcondition
  72.183 +// of this function is that the compiler runtimes are initialized and that
  72.184 +//compiler threads can start compiling.
  72.185 +bool CompileBroker::init_compiler_runtime() {
  72.186 +  CompilerThread* thread = CompilerThread::current();
  72.187 +  AbstractCompiler* comp = thread->compiler();
  72.188 +  // Final sanity check - the compiler object must exist
  72.189 +  guarantee(comp != NULL, "Compiler object must exist");
  72.190 +
  72.191 +  int system_dictionary_modification_counter;
  72.192 +  {
  72.193 +    MutexLocker locker(Compile_lock, thread);
  72.194 +    system_dictionary_modification_counter = SystemDictionary::number_of_modifications();
  72.195 +  }
  72.196 +
  72.197 +  {
  72.198 +    // Must switch to native to allocate ci_env
  72.199 +    ThreadToNativeFromVM ttn(thread);
  72.200 +    ciEnv ci_env(NULL, system_dictionary_modification_counter);
  72.201 +    // Cache Jvmti state
  72.202 +    ci_env.cache_jvmti_state();
  72.203 +    // Cache DTrace flags
  72.204 +    ci_env.cache_dtrace_flags();
  72.205 +
  72.206 +    // Switch back to VM state to do compiler initialization
  72.207 +    ThreadInVMfromNative tv(thread);
  72.208 +    ResetNoHandleMark rnhm;
  72.209 +
  72.210 +
  72.211 +    if (!comp->is_shark()) {
  72.212 +      // Perform per-thread and global initializations
  72.213 +      comp->initialize();
  72.214 +    }
  72.215 +  }
  72.216 +
  72.217 +  if (comp->is_failed()) {
  72.218 +    disable_compilation_forever();
  72.219 +    // If compiler initialization failed, no compiler thread that is specific to a
  72.220 +    // particular compiler runtime will ever start to compile methods.
  72.221 +
  72.222 +    shutdown_compiler_runtime(comp, thread);
  72.223 +    return false;
  72.224 +  }
  72.225 +
  72.226 +  // C1 specific check
  72.227 +  if (comp->is_c1() && (thread->get_buffer_blob() == NULL)) {
  72.228 +    warning("Initialization of %s thread failed (no space to run compilers)", thread->name());
  72.229 +    return false;
  72.230 +  }
  72.231 +
  72.232 +  return true;
  72.233 +}
  72.234 +
  72.235 +// If C1 and/or C2 initialization failed, we shut down all compilation.
  72.236 +// We do this to keep things simple. This can be changed if it ever turns out to be
  72.237 +// a problem.
  72.238 +void CompileBroker::shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread) {
  72.239 +  // Free buffer blob, if allocated
  72.240 +  if (thread->get_buffer_blob() != NULL) {
  72.241 +    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  72.242 +    CodeCache::free(thread->get_buffer_blob());
  72.243 +  }
  72.244 +
  72.245 +  if (comp->should_perform_shutdown()) {
  72.246 +    // There are two reasons for shutting down the compiler
  72.247 +    // 1) compiler runtime initialization failed
  72.248 +    // 2) The code cache is full and the following flag is set: -XX:-UseCodeCacheFlushing
  72.249 +    warning("Shutting down compiler %s (no space to run compilers)", comp->name());
  72.250 +
  72.251 +    // Only one thread per compiler runtime object enters here
  72.252 +    // Set state to shut down
  72.253 +    comp->set_shut_down();
  72.254 +
  72.255 +    MutexLocker mu(MethodCompileQueue_lock, thread);
  72.256 +    CompileQueue* queue;
  72.257 +    if (_c1_method_queue != NULL) {
  72.258 +      _c1_method_queue->delete_all();
  72.259 +      queue = _c1_method_queue;
  72.260 +      _c1_method_queue = NULL;
  72.261 +      delete _c1_method_queue;
  72.262 +    }
  72.263 +
  72.264 +    if (_c2_method_queue != NULL) {
  72.265 +      _c2_method_queue->delete_all();
  72.266 +      queue = _c2_method_queue;
  72.267 +      _c2_method_queue = NULL;
  72.268 +      delete _c2_method_queue;
  72.269 +    }
  72.270 +
  72.271 +    // We could delete compiler runtimes also. However, there are references to
  72.272 +    // the compiler runtime(s) (e.g.,  nmethod::is_compiled_by_c1()) which then
  72.273 +    // fail. This can be done later if necessary.
  72.274 +  }
  72.275 +}
  72.276 +
  72.277  // ------------------------------------------------------------------
  72.278  // CompileBroker::compiler_thread_loop
  72.279  //
  72.280 @@ -1558,7 +1651,6 @@
  72.281  void CompileBroker::compiler_thread_loop() {
  72.282    CompilerThread* thread = CompilerThread::current();
  72.283    CompileQueue* queue = thread->queue();
  72.284 -
  72.285    // For the thread that initializes the ciObjectFactory
  72.286    // this resource mark holds all the shared objects
  72.287    ResourceMark rm;
  72.288 @@ -1587,65 +1679,78 @@
  72.289      log->end_elem();
  72.290    }
  72.291  
  72.292 -  while (true) {
  72.293 -    {
  72.294 -      // We need this HandleMark to avoid leaking VM handles.
  72.295 -      HandleMark hm(thread);
  72.296 +  // If compiler thread/runtime initialization fails, exit the compiler thread
  72.297 +  if (!init_compiler_runtime()) {
  72.298 +    return;
  72.299 +  }
  72.300  
  72.301 -      if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
  72.302 -        // the code cache is really full
  72.303 -        handle_full_code_cache();
  72.304 -      }
  72.305 +  // Poll for new compilation tasks as long as the JVM runs. Compilation
  72.306 +  // should only be disabled if something went wrong while initializing the
  72.307 +  // compiler runtimes. This, in turn, should not happen. The only known case
  72.308 +  // when compiler runtime initialization fails is if there is not enough free
  72.309 +  // space in the code cache to generate the necessary stubs, etc.
  72.310 +  while (!is_compilation_disabled_forever()) {
  72.311 +    // We need this HandleMark to avoid leaking VM handles.
  72.312 +    HandleMark hm(thread);
  72.313  
  72.314 -      CompileTask* task = queue->get();
  72.315 +    if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
  72.316 +      // the code cache is really full
  72.317 +      handle_full_code_cache();
  72.318 +    }
  72.319  
  72.320 -      // Give compiler threads an extra quanta.  They tend to be bursty and
  72.321 -      // this helps the compiler to finish up the job.
  72.322 -      if( CompilerThreadHintNoPreempt )
  72.323 -        os::hint_no_preempt();
  72.324 +    CompileTask* task = queue->get();
  72.325 +    if (task == NULL) {
  72.326 +      continue;
  72.327 +    }
  72.328  
  72.329 -      // trace per thread time and compile statistics
  72.330 -      CompilerCounters* counters = ((CompilerThread*)thread)->counters();
  72.331 -      PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
  72.332 +    // Give compiler threads an extra quanta.  They tend to be bursty and
  72.333 +    // this helps the compiler to finish up the job.
  72.334 +    if( CompilerThreadHintNoPreempt )
  72.335 +      os::hint_no_preempt();
  72.336  
  72.337 -      // Assign the task to the current thread.  Mark this compilation
  72.338 -      // thread as active for the profiler.
  72.339 -      CompileTaskWrapper ctw(task);
  72.340 -      nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
  72.341 -      task->set_code_handle(&result_handle);
  72.342 -      methodHandle method(thread, task->method());
  72.343 +    // trace per thread time and compile statistics
  72.344 +    CompilerCounters* counters = ((CompilerThread*)thread)->counters();
  72.345 +    PerfTraceTimedEvent(counters->time_counter(), counters->compile_counter());
  72.346  
  72.347 -      // Never compile a method if breakpoints are present in it
  72.348 -      if (method()->number_of_breakpoints() == 0) {
  72.349 -        // Compile the method.
  72.350 -        if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
  72.351 +    // Assign the task to the current thread.  Mark this compilation
  72.352 +    // thread as active for the profiler.
  72.353 +    CompileTaskWrapper ctw(task);
  72.354 +    nmethodLocker result_handle;  // (handle for the nmethod produced by this task)
  72.355 +    task->set_code_handle(&result_handle);
  72.356 +    methodHandle method(thread, task->method());
  72.357 +
  72.358 +    // Never compile a method if breakpoints are present in it
  72.359 +    if (method()->number_of_breakpoints() == 0) {
  72.360 +      // Compile the method.
  72.361 +      if ((UseCompiler || AlwaysCompileLoopMethods) && CompileBroker::should_compile_new_jobs()) {
  72.362  #ifdef COMPILER1
  72.363 -          // Allow repeating compilations for the purpose of benchmarking
  72.364 -          // compile speed. This is not useful for customers.
  72.365 -          if (CompilationRepeat != 0) {
  72.366 -            int compile_count = CompilationRepeat;
  72.367 -            while (compile_count > 0) {
  72.368 -              invoke_compiler_on_method(task);
  72.369 -              nmethod* nm = method->code();
  72.370 -              if (nm != NULL) {
  72.371 -                nm->make_zombie();
  72.372 -                method->clear_code();
  72.373 -              }
  72.374 -              compile_count--;
  72.375 +        // Allow repeating compilations for the purpose of benchmarking
  72.376 +        // compile speed. This is not useful for customers.
  72.377 +        if (CompilationRepeat != 0) {
  72.378 +          int compile_count = CompilationRepeat;
  72.379 +          while (compile_count > 0) {
  72.380 +            invoke_compiler_on_method(task);
  72.381 +            nmethod* nm = method->code();
  72.382 +            if (nm != NULL) {
  72.383 +              nm->make_zombie();
  72.384 +              method->clear_code();
  72.385              }
  72.386 +            compile_count--;
  72.387            }
  72.388 +        }
  72.389  #endif /* COMPILER1 */
  72.390 -          invoke_compiler_on_method(task);
  72.391 -        } else {
  72.392 -          // After compilation is disabled, remove remaining methods from queue
  72.393 -          method->clear_queued_for_compilation();
  72.394 -        }
  72.395 +        invoke_compiler_on_method(task);
  72.396 +      } else {
  72.397 +        // After compilation is disabled, remove remaining methods from queue
  72.398 +        method->clear_queued_for_compilation();
  72.399        }
  72.400      }
  72.401    }
  72.402 +
  72.403 +  // Shut down compiler runtime
  72.404 +  shutdown_compiler_runtime(thread->compiler(), thread);
  72.405  }
  72.406  
  72.407 -
  72.408  // ------------------------------------------------------------------
  72.409  // CompileBroker::init_compiler_thread_log
  72.410  //
  72.411 @@ -1953,11 +2058,14 @@
  72.412        // Since code cache is full, immediately stop new compiles
  72.413        if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
  72.414          NMethodSweeper::log_sweep("disable_compiler");
  72.415 +
  72.416 +        // Switch to 'vm_state'. This ensures that possibly_sweep() can be called
  72.417 +        // without having to consider the state in which the current thread is.
  72.418 +        ThreadInVMfromUnknown in_vm;
  72.419          NMethodSweeper::possibly_sweep();
  72.420        }
  72.421      } else {
  72.422 -      UseCompiler               = false;
  72.423 -      AlwaysCompileLoopMethods  = false;
  72.424 +      disable_compilation_forever();
  72.425      }
  72.426    }
  72.427    codecache_print(/* detailed= */ true);
    73.1 --- a/src/share/vm/compiler/compileBroker.hpp	Thu Oct 17 06:29:58 2013 -0700
    73.2 +++ b/src/share/vm/compiler/compileBroker.hpp	Fri Oct 18 12:10:44 2013 -0700
    73.3 @@ -213,8 +213,12 @@
    73.4  
    73.5    // Redefine Classes support
    73.6    void mark_on_stack();
    73.7 +  void delete_all();
    73.8 +  void         print();
    73.9  
   73.10 -  void         print();
   73.11 +  ~CompileQueue() {
   73.12 +    assert (is_empty(), " Compile Queue must be empty");
   73.13 +  }
   73.14  };
   73.15  
   73.16  // CompileTaskWrapper
   73.17 @@ -266,7 +270,7 @@
   73.18    static CompileQueue* _c1_method_queue;
   73.19    static CompileTask* _task_free_list;
   73.20  
   73.21 -  static GrowableArray<CompilerThread*>* _method_threads;
   73.22 +  static GrowableArray<CompilerThread*>* _compiler_threads;
   73.23  
   73.24    // performance counters
   73.25    static PerfCounter* _perf_total_compilation;
   73.26 @@ -311,7 +315,7 @@
   73.27    static int _sum_nmethod_code_size;
   73.28    static long _peak_compilation_time;
   73.29  
   73.30 -  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
   73.31 +  static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, AbstractCompiler* comp, TRAPS);
   73.32    static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   73.33    static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
   73.34    static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
   73.35 @@ -351,6 +355,9 @@
   73.36      if (is_c1_compile(comp_level)) return _c1_method_queue;
   73.37      return NULL;
   73.38    }
   73.39 +  static bool init_compiler_runtime();
   73.40 +  static void shutdown_compiler_runtime(AbstractCompiler* comp, CompilerThread* thread);
   73.41 +
   73.42   public:
   73.43    enum {
   73.44      // The entry bci used for non-OSR compilations.
   73.45 @@ -378,9 +385,7 @@
   73.46                                   const char* comment, Thread* thread);
   73.47  
   73.48    static void compiler_thread_loop();
   73.49 -
   73.50    static uint get_compilation_id() { return _compilation_id; }
   73.51 -  static bool is_idle();
   73.52  
   73.53    // Set _should_block.
   73.54    // Call this from the VM, with Threads_lock held and a safepoint requested.
   73.55 @@ -391,8 +396,9 @@
   73.56  
   73.57    enum {
   73.58      // Flags for toggling compiler activity
   73.59 -    stop_compilation = 0,
   73.60 -    run_compilation  = 1
   73.61 +    stop_compilation    = 0,
   73.62 +    run_compilation     = 1,
   73.63 +    shutdown_compilaton = 2
   73.64    };
   73.65  
   73.66    static bool should_compile_new_jobs() { return UseCompiler && (_should_compile_new_jobs == run_compilation); }
   73.67 @@ -401,6 +407,16 @@
   73.68      jint old = Atomic::cmpxchg(new_state, &_should_compile_new_jobs, 1-new_state);
   73.69      return (old == (1-new_state));
   73.70    }
   73.71 +
   73.72 +  static void disable_compilation_forever() {
   73.73 +    UseCompiler               = false;
   73.74 +    AlwaysCompileLoopMethods  = false;
   73.75 +    Atomic::xchg(shutdown_compilaton, &_should_compile_new_jobs);
   73.76 +  }
   73.77 +
   73.78 +  static bool is_compilation_disabled_forever() {
   73.79 +    return _should_compile_new_jobs == shutdown_compilaton;
   73.80 +  }
   73.81    static void handle_full_code_cache();
   73.82  
   73.83    // Return total compilation ticks
    74.1 --- a/src/share/vm/interpreter/linkResolver.cpp	Thu Oct 17 06:29:58 2013 -0700
    74.2 +++ b/src/share/vm/interpreter/linkResolver.cpp	Fri Oct 18 12:10:44 2013 -0700
    74.3 @@ -1,5 +1,6 @@
    74.4  /*
    74.5   * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    74.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    74.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    74.8   *
    74.9   * This code is free software; you can redistribute it and/or modify it
   74.10 @@ -158,6 +159,22 @@
   74.11      index = vt->index_of_miranda(resolved_method->name(),
   74.12                                   resolved_method->signature());
   74.13      kind = CallInfo::vtable_call;
   74.14 +  } else if (resolved_method->has_vtable_index()) {
   74.15 +    // Can occur if an interface redeclares a method of Object.
   74.16 +
   74.17 +#ifdef ASSERT
   74.18 +    // Ensure that this is really the case.
   74.19 +    KlassHandle object_klass = SystemDictionary::Object_klass();
   74.20 +    Method * object_resolved_method = object_klass()->vtable()->method_at(index);
   74.21 +    assert(object_resolved_method->name() == resolved_method->name(),
   74.22 +      err_msg("Object and interface method names should match at vtable index %d, %s != %s",
   74.23 +      index, object_resolved_method->name()->as_C_string(), resolved_method->name()->as_C_string()));
   74.24 +    assert(object_resolved_method->signature() == resolved_method->signature(),
   74.25 +      err_msg("Object and interface method signatures should match at vtable index %d, %s != %s",
   74.26 +      index, object_resolved_method->signature()->as_C_string(), resolved_method->signature()->as_C_string()));
   74.27 +#endif // ASSERT
   74.28 +
   74.29 +    kind = CallInfo::vtable_call;
   74.30    } else {
   74.31      // A regular interface call.
   74.32      kind = CallInfo::itable_call;
   74.33 @@ -454,7 +471,7 @@
   74.34      Symbol* method_name = vmSymbols::invoke_name();
   74.35      Symbol* method_signature = pool->signature_ref_at(index);
   74.36      KlassHandle  current_klass(THREAD, pool->pool_holder());
   74.37 -    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
   74.38 +    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK);
   74.39      return;
   74.40    }
   74.41  
   74.42 @@ -476,22 +493,34 @@
   74.43  
   74.44    if (code == Bytecodes::_invokeinterface) {
   74.45      resolve_interface_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
   74.46 +  } else if (code == Bytecodes::_invokevirtual) {
   74.47 +    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, true, CHECK);
   74.48    } else {
   74.49 -    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, CHECK);
   74.50 +    resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, true, false, CHECK);
   74.51    }
   74.52  }
   74.53  
   74.54  void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle resolved_klass,
   74.55                                    Symbol* method_name, Symbol* method_signature,
   74.56 -                                  KlassHandle current_klass, bool check_access, TRAPS) {
   74.57 +                                  KlassHandle current_klass, bool check_access,
   74.58 +                                  bool require_methodref, TRAPS) {
   74.59  
   74.60    Handle nested_exception;
   74.61  
   74.62 -  // 1. lookup method in resolved klass and its super klasses
   74.63 +  // 1. check if methodref required, that resolved_klass is not interfacemethodref
   74.64 +  if (require_methodref && resolved_klass->is_interface()) {
   74.65 +    ResourceMark rm(THREAD);
   74.66 +    char buf[200];
   74.67 +    jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected",
   74.68 +        resolved_klass()->external_name());
   74.69 +    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
   74.70 +  }
   74.71 +
   74.72 +  // 2. lookup method in resolved klass and its super klasses
   74.73    lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, CHECK);
   74.74  
   74.75    if (resolved_method.is_null()) { // not found in the class hierarchy
   74.76 -    // 2. lookup method in all the interfaces implemented by the resolved klass
   74.77 +    // 3. lookup method in all the interfaces implemented by the resolved klass
   74.78      lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
   74.79  
   74.80      if (resolved_method.is_null()) {
   74.81 @@ -505,7 +534,7 @@
   74.82      }
   74.83  
   74.84      if (resolved_method.is_null()) {
   74.85 -      // 3. method lookup failed
   74.86 +      // 4. method lookup failed
   74.87        ResourceMark rm(THREAD);
   74.88        THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
   74.89                        Method::name_and_sig_as_C_string(resolved_klass(),
   74.90 @@ -515,15 +544,6 @@
   74.91      }
   74.92    }
   74.93  
   74.94 -  // 4. check if klass is not interface
   74.95 -  if (resolved_klass->is_interface() && resolved_method->is_abstract()) {
   74.96 -    ResourceMark rm(THREAD);
   74.97 -    char buf[200];
   74.98 -    jio_snprintf(buf, sizeof(buf), "Found interface %s, but class was expected",
   74.99 -        resolved_klass()->external_name());
  74.100 -    THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
  74.101 -  }
  74.102 -
  74.103    // 5. check if method is concrete
  74.104    if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
  74.105      ResourceMark rm(THREAD);
  74.106 @@ -833,7 +853,7 @@
  74.107                                                    Symbol* method_name, Symbol* method_signature,
  74.108                                                    KlassHandle current_klass, bool check_access, TRAPS) {
  74.109  
  74.110 -  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
  74.111 +  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK);
  74.112    assert(resolved_method->name() != vmSymbols::class_initializer_name(), "should have been checked in verifier");
  74.113  
  74.114    // check if static
  74.115 @@ -867,7 +887,7 @@
  74.116    // and the selected method is recalculated relative to the direct superclass
  74.117    // superinterface.method, which explicitly does not check shadowing
  74.118  
  74.119 -  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
  74.120 +  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, false, CHECK);
  74.121  
  74.122    // check if method name is <init>, that it is found in same klass as static type
  74.123    if (resolved_method->name() == vmSymbols::object_initializer_name() &&
  74.124 @@ -1013,7 +1033,7 @@
  74.125                                                     Symbol* method_name, Symbol* method_signature,
  74.126                                                     KlassHandle current_klass, bool check_access, TRAPS) {
  74.127    // normal method resolution
  74.128 -  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, CHECK);
  74.129 +  resolve_method(resolved_method, resolved_klass, method_name, method_signature, current_klass, check_access, true, CHECK);
  74.130  
  74.131    assert(resolved_method->name() != vmSymbols::object_initializer_name(), "should have been checked in verifier");
  74.132    assert(resolved_method->name() != vmSymbols::class_initializer_name (), "should have been checked in verifier");
    75.1 --- a/src/share/vm/interpreter/linkResolver.hpp	Thu Oct 17 06:29:58 2013 -0700
    75.2 +++ b/src/share/vm/interpreter/linkResolver.hpp	Fri Oct 18 12:10:44 2013 -0700
    75.3 @@ -136,7 +136,7 @@
    75.4    static void resolve_pool  (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
    75.5  
    75.6    static void resolve_interface_method(methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
    75.7 -  static void resolve_method          (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
    75.8 +  static void resolve_method          (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, bool require_methodref, TRAPS);
    75.9  
   75.10    static void linktime_resolve_static_method    (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
   75.11    static void linktime_resolve_special_method   (methodHandle& resolved_method, KlassHandle resolved_klass, Symbol* method_name, Symbol* method_signature, KlassHandle current_klass, bool check_access, TRAPS);
    76.1 --- a/src/share/vm/memory/metadataFactory.hpp	Thu Oct 17 06:29:58 2013 -0700
    76.2 +++ b/src/share/vm/memory/metadataFactory.hpp	Fri Oct 18 12:10:44 2013 -0700
    76.3 @@ -1,5 +1,5 @@
    76.4  /*
    76.5 - * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
    76.6 + * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
    76.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    76.8   *
    76.9   * This code is free software; you can redistribute it and/or modify it
   76.10 @@ -65,6 +65,7 @@
   76.11    static void free_array(ClassLoaderData* loader_data, Array<T>* data) {
   76.12      if (data != NULL) {
   76.13        assert(loader_data != NULL, "shouldn't pass null");
   76.14 +      assert(!data->is_shared(), "cannot deallocate array in shared spaces");
   76.15        int size = data->size();
   76.16        if (DumpSharedSpaces) {
   76.17          loader_data->ro_metaspace()->deallocate((MetaWord*)data, size, false);
   76.18 @@ -83,6 +84,7 @@
   76.19        // Call metadata's deallocate function which will call deallocate fields
   76.20        assert(!DumpSharedSpaces, "cannot deallocate metadata when dumping CDS archive");
   76.21        assert(!md->on_stack(), "can't deallocate things on stack");
   76.22 +      assert(!md->is_shared(), "cannot deallocate if in shared spaces");
   76.23        md->deallocate_contents(loader_data);
   76.24        loader_data->metaspace_non_null()->deallocate((MetaWord*)md, size, md->is_klass());
   76.25      }
    77.1 --- a/src/share/vm/oops/constantPool.cpp	Thu Oct 17 06:29:58 2013 -0700
    77.2 +++ b/src/share/vm/oops/constantPool.cpp	Fri Oct 18 12:10:44 2013 -0700
    77.3 @@ -869,18 +869,9 @@
    77.4  bool ConstantPool::compare_entry_to(int index1, constantPoolHandle cp2,
    77.5         int index2, TRAPS) {
    77.6  
    77.7 -  jbyte t1 = tag_at(index1).value();
    77.8 -  jbyte t2 = cp2->tag_at(index2).value();
    77.9 -
   77.10 -
   77.11 -  // JVM_CONSTANT_UnresolvedClassInError is equal to JVM_CONSTANT_UnresolvedClass
   77.12 -  // when comparing
   77.13 -  if (t1 == JVM_CONSTANT_UnresolvedClassInError) {
   77.14 -    t1 = JVM_CONSTANT_UnresolvedClass;
   77.15 -  }
   77.16 -  if (t2 == JVM_CONSTANT_UnresolvedClassInError) {
   77.17 -    t2 = JVM_CONSTANT_UnresolvedClass;
   77.18 -  }
   77.19 +  // The error tags are equivalent to non-error tags when comparing
   77.20 +  jbyte t1 = tag_at(index1).non_error_value();
   77.21 +  jbyte t2 = cp2->tag_at(index2).non_error_value();
   77.22  
   77.23    if (t1 != t2) {
   77.24      // Not the same entry type so there is nothing else to check. Note
   77.25 @@ -1001,8 +992,8 @@
   77.26  
   77.27    case JVM_CONSTANT_MethodType:
   77.28    {
   77.29 -    int k1 = method_type_index_at(index1);
   77.30 -    int k2 = cp2->method_type_index_at(index2);
   77.31 +    int k1 = method_type_index_at_error_ok(index1);
   77.32 +    int k2 = cp2->method_type_index_at_error_ok(index2);
   77.33      bool match = compare_entry_to(k1, cp2, k2, CHECK_false);
   77.34      if (match) {
   77.35        return true;
   77.36 @@ -1011,11 +1002,11 @@
   77.37  
   77.38    case JVM_CONSTANT_MethodHandle:
   77.39    {
   77.40 -    int k1 = method_handle_ref_kind_at(index1);
   77.41 -    int k2 = cp2->method_handle_ref_kind_at(index2);
   77.42 +    int k1 = method_handle_ref_kind_at_error_ok(index1);
   77.43 +    int k2 = cp2->method_handle_ref_kind_at_error_ok(index2);
   77.44      if (k1 == k2) {
   77.45 -      int i1 = method_handle_index_at(index1);
   77.46 -      int i2 = cp2->method_handle_index_at(index2);
   77.47 +      int i1 = method_handle_index_at_error_ok(index1);
   77.48 +      int i2 = cp2->method_handle_index_at_error_ok(index2);
   77.49        bool match = compare_entry_to(i1, cp2, i2, CHECK_false);
   77.50        if (match) {
   77.51          return true;
   77.52 @@ -1329,14 +1320,6 @@
   77.53      }
   77.54    } break;
   77.55  
   77.56 -  case JVM_CONSTANT_UnresolvedClassInError:
   77.57 -  {
   77.58 -    Symbol* k = from_cp->unresolved_klass_at(from_i);
   77.59 -    to_cp->unresolved_klass_at_put(to_i, k);
   77.60 -    to_cp->tag_at_put(to_i, JVM_CONSTANT_UnresolvedClassInError);
   77.61 -  } break;
   77.62 -
   77.63 -
   77.64    case JVM_CONSTANT_String:
   77.65    {
   77.66      Symbol* s = from_cp->unresolved_string_at(from_i);
   77.67 @@ -1352,15 +1335,17 @@
   77.68    } break;
   77.69  
   77.70    case JVM_CONSTANT_MethodType:
   77.71 +  case JVM_CONSTANT_MethodTypeInError:
   77.72    {
   77.73 -    jint k = from_cp->method_type_index_at(from_i);
   77.74 +    jint k = from_cp->method_type_index_at_error_ok(from_i);
   77.75      to_cp->method_type_index_at_put(to_i, k);
   77.76    } break;
   77.77  
   77.78    case JVM_CONSTANT_MethodHandle:
   77.79 +  case JVM_CONSTANT_MethodHandleInError:
   77.80    {
   77.81 -    int k1 = from_cp->method_handle_ref_kind_at(from_i);
   77.82 -    int k2 = from_cp->method_handle_index_at(from_i);
   77.83 +    int k1 = from_cp->method_handle_ref_kind_at_error_ok(from_i);
   77.84 +    int k2 = from_cp->method_handle_index_at_error_ok(from_i);
   77.85      to_cp->method_handle_index_at_put(to_i, k1, k2);
   77.86    } break;
   77.87  
    78.1 --- a/src/share/vm/oops/instanceKlass.cpp	Thu Oct 17 06:29:58 2013 -0700
    78.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Fri Oct 18 12:10:44 2013 -0700
    78.3 @@ -320,7 +320,8 @@
    78.4  
    78.5  void InstanceKlass::deallocate_methods(ClassLoaderData* loader_data,
    78.6                                         Array<Method*>* methods) {
    78.7 -  if (methods != NULL && methods != Universe::the_empty_method_array()) {
    78.8 +  if (methods != NULL && methods != Universe::the_empty_method_array() &&
    78.9 +      !methods->is_shared()) {
   78.10      for (int i = 0; i < methods->length(); i++) {
   78.11        Method* method = methods->at(i);
   78.12        if (method == NULL) continue;  // maybe null if error processing
   78.13 @@ -344,13 +345,14 @@
   78.14      // check that the interfaces don't come from super class
   78.15      Array<Klass*>* sti = (super_klass == NULL) ? NULL :
   78.16                      InstanceKlass::cast(super_klass)->transitive_interfaces();
   78.17 -    if (ti != sti) {
   78.18 +    if (ti != sti && ti != NULL && !ti->is_shared()) {
   78.19        MetadataFactory::free_array<Klass*>(loader_data, ti);
   78.20      }
   78.21    }
   78.22  
   78.23    // local interfaces can be empty
   78.24 -  if (local_interfaces != Universe::the_empty_klass_array()) {
   78.25 +  if (local_interfaces != Universe::the_empty_klass_array() &&
   78.26 +      local_interfaces != NULL && !local_interfaces->is_shared()) {
   78.27      MetadataFactory::free_array<Klass*>(loader_data, local_interfaces);
   78.28    }
   78.29  }
   78.30 @@ -380,21 +382,25 @@
   78.31    deallocate_methods(loader_data, methods());
   78.32    set_methods(NULL);
   78.33  
   78.34 -  if (method_ordering() != Universe::the_empty_int_array()) {
   78.35 +  if (method_ordering() != NULL &&
   78.36 +      method_ordering() != Universe::the_empty_int_array() &&
   78.37 +      !method_ordering()->is_shared()) {
   78.38      MetadataFactory::free_array<int>(loader_data, method_ordering());
   78.39    }
   78.40    set_method_ordering(NULL);
   78.41  
   78.42    // default methods can be empty
   78.43    if (default_methods() != NULL &&
   78.44 -      default_methods() != Universe::the_empty_method_array()) {
   78.45 +      default_methods() != Universe::the_empty_method_array() &&
   78.46 +      !default_methods()->is_shared()) {
   78.47      MetadataFactory::free_array<Method*>(loader_data, default_methods());
   78.48    }
   78.49    // Do NOT deallocate the default methods, they are owned by superinterfaces.
   78.50    set_default_methods(NULL);
   78.51  
   78.52    // default methods vtable indices can be empty
   78.53 -  if (default_vtable_indices() != NULL) {
   78.54 +  if (default_vtable_indices() != NULL &&
   78.55 +      !default_vtable_indices()->is_shared()) {
   78.56      MetadataFactory::free_array<int>(loader_data, default_vtable_indices());
   78.57    }
   78.58    set_default_vtable_indices(NULL);
   78.59 @@ -403,8 +409,10 @@
   78.60    // This array is in Klass, but remove it with the InstanceKlass since
   78.61    // this place would be the only caller and it can share memory with transitive
   78.62    // interfaces.
   78.63 -  if (secondary_supers() != Universe::the_empty_klass_array() &&
   78.64 -      secondary_supers() != transitive_interfaces()) {
   78.65 +  if (secondary_supers() != NULL &&
   78.66 +      secondary_supers() != Universe::the_empty_klass_array() &&
   78.67 +      secondary_supers() != transitive_interfaces() &&
   78.68 +      !secondary_supers()->is_shared()) {
   78.69      MetadataFactory::free_array<Klass*>(loader_data, secondary_supers());
   78.70    }
   78.71    set_secondary_supers(NULL);
   78.72 @@ -413,24 +421,32 @@
   78.73    set_transitive_interfaces(NULL);
   78.74    set_local_interfaces(NULL);
   78.75  
   78.76 -  MetadataFactory::free_array<jushort>(loader_data, fields());
   78.77 +  if (fields() != NULL && !fields()->is_shared()) {
   78.78 +    MetadataFactory::free_array<jushort>(loader_data, fields());
   78.79 +  }
   78.80    set_fields(NULL, 0);
   78.81  
   78.82    // If a method from a redefined class is using this constant pool, don't
   78.83    // delete it, yet.  The new class's previous version will point to this.
   78.84    if (constants() != NULL) {
   78.85      assert (!constants()->on_stack(), "shouldn't be called if anything is onstack");
   78.86 -    MetadataFactory::free_metadata(loader_data, constants());
   78.87 +    if (!constants()->is_shared()) {
   78.88 +      MetadataFactory::free_metadata(loader_data, constants());
   78.89 +    }
   78.90      set_constants(NULL);
   78.91    }
   78.92  
   78.93 -  if (inner_classes() != Universe::the_empty_short_array()) {
   78.94 +  if (inner_classes() != NULL &&
   78.95 +      inner_classes() != Universe::the_empty_short_array() &&
   78.96 +      !inner_classes()->is_shared()) {
   78.97      MetadataFactory::free_array<jushort>(loader_data, inner_classes());
   78.98    }
   78.99    set_inner_classes(NULL);
  78.100  
  78.101 -  // We should deallocate the Annotations instance
  78.102 -  MetadataFactory::free_metadata(loader_data, annotations());
  78.103 +  // We should deallocate the Annotations instance if it's not in shared spaces.
  78.104 +  if (annotations() != NULL && !annotations()->is_shared()) {
  78.105 +    MetadataFactory::free_metadata(loader_data, annotations());
  78.106 +  }
  78.107    set_annotations(NULL);
  78.108  }
  78.109  
    79.1 --- a/src/share/vm/oops/method.hpp	Thu Oct 17 06:29:58 2013 -0700
    79.2 +++ b/src/share/vm/oops/method.hpp	Fri Oct 18 12:10:44 2013 -0700
    79.3 @@ -805,6 +805,7 @@
    79.4   private:
    79.5    void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
    79.6  
    79.7 + public:
    79.8    MethodCounters* get_method_counters(TRAPS) {
    79.9      if (_method_counters == NULL) {
   79.10        build_method_counters(this, CHECK_AND_CLEAR_NULL);
   79.11 @@ -812,7 +813,6 @@
   79.12      return _method_counters;
   79.13    }
   79.14  
   79.15 - public:
   79.16    bool   is_not_c1_compilable() const         { return access_flags().is_not_c1_compilable();  }
   79.17    void  set_not_c1_compilable()               {       _access_flags.set_not_c1_compilable();   }
   79.18    void clear_not_c1_compilable()              {       _access_flags.clear_not_c1_compilable(); }
    80.1 --- a/src/share/vm/oops/methodData.cpp	Thu Oct 17 06:29:58 2013 -0700
    80.2 +++ b/src/share/vm/oops/methodData.cpp	Fri Oct 18 12:10:44 2013 -0700
    80.3 @@ -56,6 +56,11 @@
    80.4    if (needs_array_len(tag)) {
    80.5      set_cell_at(ArrayData::array_len_off_set, cell_count - 1); // -1 for header.
    80.6    }
    80.7 +  if (tag == call_type_data_tag) {
    80.8 +    CallTypeData::initialize(this, cell_count);
    80.9 +  } else if (tag == virtual_call_type_data_tag) {
   80.10 +    VirtualCallTypeData::initialize(this, cell_count);
   80.11 +  }
   80.12  }
   80.13  
   80.14  void DataLayout::clean_weak_klass_links(BoolObjectClosure* cl) {
   80.15 @@ -76,7 +81,7 @@
   80.16  }
   80.17  
   80.18  #ifndef PRODUCT
   80.19 -void ProfileData::print_shared(outputStream* st, const char* name) {
   80.20 +void ProfileData::print_shared(outputStream* st, const char* name) const {
   80.21    st->print("bci: %d", bci());
   80.22    st->fill_to(tab_width_one);
   80.23    st->print("%s", name);
   80.24 @@ -91,8 +96,8 @@
   80.25      st->print("flags(%d) ", flags);
   80.26  }
   80.27  
   80.28 -void ProfileData::tab(outputStream* st) {
   80.29 -  st->fill_to(tab_width_two);
   80.30 +void ProfileData::tab(outputStream* st, bool first) const {
   80.31 +  st->fill_to(first ? tab_width_one : tab_width_two);
   80.32  }
   80.33  #endif // !PRODUCT
   80.34  
   80.35 @@ -104,7 +109,7 @@
   80.36  
   80.37  
   80.38  #ifndef PRODUCT
   80.39 -void BitData::print_data_on(outputStream* st) {
   80.40 +void BitData::print_data_on(outputStream* st) const {
   80.41    print_shared(st, "BitData");
   80.42  }
   80.43  #endif // !PRODUCT
   80.44 @@ -115,7 +120,7 @@
   80.45  // A CounterData corresponds to a simple counter.
   80.46  
   80.47  #ifndef PRODUCT
   80.48 -void CounterData::print_data_on(outputStream* st) {
   80.49 +void CounterData::print_data_on(outputStream* st) const {
   80.50    print_shared(st, "CounterData");
   80.51    st->print_cr("count(%u)", count());
   80.52  }
   80.53 @@ -145,12 +150,207 @@
   80.54  }
   80.55  
   80.56  #ifndef PRODUCT
   80.57 -void JumpData::print_data_on(outputStream* st) {
   80.58 +void JumpData::print_data_on(outputStream* st) const {
   80.59    print_shared(st, "JumpData");
   80.60    st->print_cr("taken(%u) displacement(%d)", taken(), displacement());
   80.61  }
   80.62  #endif // !PRODUCT
   80.63  
   80.64 +int TypeStackSlotEntries::compute_cell_count(Symbol* signature, int max) {
   80.65 +  ResourceMark rm;
   80.66 +  SignatureStream ss(signature);
   80.67 +  int args_count = MIN2(ss.reference_parameter_count(), max);
   80.68 +  return args_count * per_arg_cell_count;
   80.69 +}
   80.70 +
   80.71 +int TypeEntriesAtCall::compute_cell_count(BytecodeStream* stream) {
   80.72 +  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
   80.73 +  assert(TypeStackSlotEntries::per_arg_count() > ReturnTypeEntry::static_cell_count(), "code to test for arguments/results broken");
   80.74 +  Bytecode_invoke inv(stream->method(), stream->bci());
   80.75 +  int args_cell = 0;
   80.76 +  if (arguments_profiling_enabled()) {
   80.77 +    args_cell = TypeStackSlotEntries::compute_cell_count(inv.signature(), TypeProfileArgsLimit);
   80.78 +  }
   80.79 +  int ret_cell = 0;
   80.80 +  if (return_profiling_enabled() && (inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY)) {
   80.81 +    ret_cell = ReturnTypeEntry::static_cell_count();
   80.82 +  }
   80.83 +  int header_cell = 0;
   80.84 +  if (args_cell + ret_cell > 0) {
   80.85 +    header_cell = header_cell_count();
   80.86 +  }
   80.87 +
   80.88 +  return header_cell + args_cell + ret_cell;
   80.89 +}
   80.90 +
   80.91 +class ArgumentOffsetComputer : public SignatureInfo {
   80.92 +private:
   80.93 +  int _max;
   80.94 +  GrowableArray<int> _offsets;
   80.95 +
   80.96 +  void set(int size, BasicType type) { _size += size; }
   80.97 +  void do_object(int begin, int end) {
   80.98 +    if (_offsets.length() < _max) {
   80.99 +      _offsets.push(_size);
  80.100 +    }
  80.101 +    SignatureInfo::do_object(begin, end);
  80.102 +  }
  80.103 +  void do_array (int begin, int end) {
  80.104 +    if (_offsets.length() < _max) {
  80.105 +      _offsets.push(_size);
  80.106 +    }
  80.107 +    SignatureInfo::do_array(begin, end);
  80.108 +  }
  80.109 +
  80.110 +public:
  80.111 +  ArgumentOffsetComputer(Symbol* signature, int max)
  80.112 +    : SignatureInfo(signature), _max(max), _offsets(Thread::current(), max) {
  80.113 +  }
  80.114 +
  80.115 +  int total() { lazy_iterate_parameters(); return _size; }
  80.116 +
  80.117 +  int off_at(int i) const { return _offsets.at(i); }
  80.118 +};
  80.119 +
  80.120 +void TypeStackSlotEntries::post_initialize(Symbol* signature, bool has_receiver) {
  80.121 +  ResourceMark rm;
  80.122 +  ArgumentOffsetComputer aos(signature, _number_of_entries);
  80.123 +  aos.total();
  80.124 +  for (int i = 0; i < _number_of_entries; i++) {
  80.125 +    set_stack_slot(i, aos.off_at(i) + (has_receiver ? 1 : 0));
  80.126 +    set_type(i, type_none());
  80.127 +  }
  80.128 +}
  80.129 +
  80.130 +void CallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
  80.131 +  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
  80.132 +  Bytecode_invoke inv(stream->method(), stream->bci());
  80.133 +
  80.134 +  SignatureStream ss(inv.signature());
  80.135 +  if (has_arguments()) {
  80.136 +#ifdef ASSERT
  80.137 +    ResourceMark rm;
  80.138 +    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
  80.139 +    assert(count > 0, "room for args type but none found?");
  80.140 +    check_number_of_arguments(count);
  80.141 +#endif
  80.142 +    _args.post_initialize(inv.signature(), inv.has_receiver());
  80.143 +  }
  80.144 +
  80.145 +  if (has_return()) {
  80.146 +    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
  80.147 +    _ret.post_initialize();
  80.148 +  }
  80.149 +}
  80.150 +
  80.151 +void VirtualCallTypeData::post_initialize(BytecodeStream* stream, MethodData* mdo) {
  80.152 +  assert(Bytecodes::is_invoke(stream->code()), "should be invoke");
  80.153 +  Bytecode_invoke inv(stream->method(), stream->bci());
  80.154 +
  80.155 +  if (has_arguments()) {
  80.156 +#ifdef ASSERT
  80.157 +    ResourceMark rm;
  80.158 +    SignatureStream ss(inv.signature());
  80.159 +    int count = MIN2(ss.reference_parameter_count(), (int)TypeProfileArgsLimit);
  80.160 +    assert(count > 0, "room for args type but none found?");
  80.161 +    check_number_of_arguments(count);
  80.162 +#endif
  80.163 +    _args.post_initialize(inv.signature(), inv.has_receiver());
  80.164 +  }
  80.165 +
  80.166 +  if (has_return()) {
  80.167 +    assert(inv.result_type() == T_OBJECT || inv.result_type() == T_ARRAY, "room for a ret type but doesn't return obj?");
  80.168 +    _ret.post_initialize();
  80.169 +  }
  80.170 +}
  80.171 +
  80.172 +bool TypeEntries::is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p) {
  80.173 +  return !is_type_none(p) &&
  80.174 +    !((Klass*)klass_part(p))->is_loader_alive(is_alive_cl);
  80.175 +}
  80.176 +
  80.177 +void TypeStackSlotEntries::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
  80.178 +  for (int i = 0; i < _number_of_entries; i++) {
  80.179 +    intptr_t p = type(i);
  80.180 +    if (is_loader_alive(is_alive_cl, p)) {
  80.181 +      set_type(i, type_none());
  80.182 +    }
  80.183 +  }
  80.184 +}
  80.185 +
  80.186 +void ReturnTypeEntry::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) {
  80.187 +  intptr_t p = type();
  80.188 +  if (is_loader_alive(is_alive_cl, p)) {
  80.189 +    set_type(type_none());
  80.190 +  }
  80.191 +}
  80.192 +
  80.193 +bool TypeEntriesAtCall::return_profiling_enabled() {
  80.194 +  return MethodData::profile_return();
  80.195 +}
  80.196 +
  80.197 +bool TypeEntriesAtCall::arguments_profiling_enabled() {
  80.198 +  return MethodData::profile_arguments();
  80.199 +}
  80.200 +
  80.201 +#ifndef PRODUCT
  80.202 +void TypeEntries::print_klass(outputStream* st, intptr_t k) {
  80.203 +  if (is_type_none(k)) {
  80.204 +    st->print("none");
  80.205 +  } else if (is_type_unknown(k)) {
  80.206 +    st->print("unknown");
  80.207 +  } else {
  80.208 +    valid_klass(k)->print_value_on(st);
  80.209 +  }
  80.210 +  if (was_null_seen(k)) {
  80.211 +    st->print(" (null seen)");
  80.212 +  }
  80.213 +}
  80.214 +
  80.215 +void TypeStackSlotEntries::print_data_on(outputStream* st) const {
  80.216 +  for (int i = 0; i < _number_of_entries; i++) {
  80.217 +    _pd->tab(st);
  80.218 +    st->print("%d: stack(%u) ", i, stack_slot(i));
  80.219 +    print_klass(st, type(i));
  80.220 +    st->cr();
  80.221 +  }
  80.222 +}
  80.223 +
  80.224 +void ReturnTypeEntry::print_data_on(outputStream* st) const {
  80.225 +  _pd->tab(st);
  80.226 +  print_klass(st, type());
  80.227 +  st->cr();
  80.228 +}
  80.229 +
  80.230 +void CallTypeData::print_data_on(outputStream* st) const {
  80.231 +  CounterData::print_data_on(st);
  80.232 +  if (has_arguments()) {
  80.233 +    tab(st, true);
  80.234 +    st->print("argument types");
  80.235 +    _args.print_data_on(st);
  80.236 +  }
  80.237 +  if (has_return()) {
  80.238 +    tab(st, true);
  80.239 +    st->print("return type");
  80.240 +    _ret.print_data_on(st);
  80.241 +  }
  80.242 +}
  80.243 +
  80.244 +void VirtualCallTypeData::print_data_on(outputStream* st) const {
  80.245 +  VirtualCallData::print_data_on(st);
  80.246 +  if (has_arguments()) {
  80.247 +    tab(st, true);
  80.248 +    st->print("argument types");
  80.249 +    _args.print_data_on(st);
  80.250 +  }
  80.251 +  if (has_return()) {
  80.252 +    tab(st, true);
  80.253 +    st->print("return type");
  80.254 +    _ret.print_data_on(st);
  80.255 +  }
  80.256 +}
  80.257 +#endif
  80.258 +
  80.259  // ==================================================================
  80.260  // ReceiverTypeData
  80.261  //
  80.262 @@ -169,7 +369,7 @@
  80.263  }
  80.264  
  80.265  #ifndef PRODUCT
  80.266 -void ReceiverTypeData::print_receiver_data_on(outputStream* st) {
  80.267 +void ReceiverTypeData::print_receiver_data_on(outputStream* st) const {
  80.268    uint row;
  80.269    int entries = 0;
  80.270    for (row = 0; row < row_limit(); row++) {
  80.271 @@ -190,11 +390,11 @@
  80.272      }
  80.273    }
  80.274  }
  80.275 -void ReceiverTypeData::print_data_on(outputStream* st) {
  80.276 +void ReceiverTypeData::print_data_on(outputStream* st) const {
  80.277    print_shared(st, "ReceiverTypeData");
  80.278    print_receiver_data_on(st);
  80.279  }
  80.280 -void VirtualCallData::print_data_on(outputStream* st) {
  80.281 +void VirtualCallData::print_data_on(outputStream* st) const {
  80.282    print_shared(st, "VirtualCallData");
  80.283    print_receiver_data_on(st);
  80.284  }
  80.285 @@ -246,7 +446,7 @@
  80.286  
  80.287  
  80.288  #ifndef PRODUCT
  80.289 -void RetData::print_data_on(outputStream* st) {
  80.290 +void RetData::print_data_on(outputStream* st) const {
  80.291    print_shared(st, "RetData");
  80.292    uint row;
  80.293    int entries = 0;
  80.294 @@ -281,7 +481,7 @@
  80.295  }
  80.296  
  80.297  #ifndef PRODUCT
  80.298 -void BranchData::print_data_on(outputStream* st) {
  80.299 +void BranchData::print_data_on(outputStream* st) const {
  80.300    print_shared(st, "BranchData");
  80.301    st->print_cr("taken(%u) displacement(%d)",
  80.302                 taken(), displacement());
  80.303 @@ -355,7 +555,7 @@
  80.304  }
  80.305  
  80.306  #ifndef PRODUCT
  80.307 -void MultiBranchData::print_data_on(outputStream* st) {
  80.308 +void MultiBranchData::print_data_on(outputStream* st) const {
  80.309    print_shared(st, "MultiBranchData");
  80.310    st->print_cr("default_count(%u) displacement(%d)",
  80.311                 default_count(), default_displacement());
  80.312 @@ -369,7 +569,7 @@
  80.313  #endif
  80.314  
  80.315  #ifndef PRODUCT
  80.316 -void ArgInfoData::print_data_on(outputStream* st) {
  80.317 +void ArgInfoData::print_data_on(outputStream* st) const {
  80.318    print_shared(st, "ArgInfoData");
  80.319    int nargs = number_of_args();
  80.320    for (int i = 0; i < nargs; i++) {
  80.321 @@ -407,7 +607,11 @@
  80.322      }
  80.323    case Bytecodes::_invokespecial:
  80.324    case Bytecodes::_invokestatic:
  80.325 -    return CounterData::static_cell_count();
  80.326 +    if (MethodData::profile_arguments() || MethodData::profile_return()) {
  80.327 +      return variable_cell_count;
  80.328 +    } else {
  80.329 +      return CounterData::static_cell_count();
  80.330 +    }
  80.331    case Bytecodes::_goto:
  80.332    case Bytecodes::_goto_w:
  80.333    case Bytecodes::_jsr:
  80.334 @@ -415,9 +619,17 @@
  80.335      return JumpData::static_cell_count();
  80.336    case Bytecodes::_invokevirtual:
  80.337    case Bytecodes::_invokeinterface:
  80.338 -    return VirtualCallData::static_cell_count();
  80.339 +    if (MethodData::profile_arguments() || MethodData::profile_return()) {
  80.340 +      return variable_cell_count;
  80.341 +    } else {
  80.342 +      return VirtualCallData::static_cell_count();
  80.343 +    }
  80.344    case Bytecodes::_invokedynamic:
  80.345 -    return CounterData::static_cell_count();
  80.346 +    if (MethodData::profile_arguments() || MethodData::profile_return()) {
  80.347 +      return variable_cell_count;
  80.348 +    } else {
  80.349 +      return CounterData::static_cell_count();
  80.350 +    }
  80.351    case Bytecodes::_ret:
  80.352      return RetData::static_cell_count();
  80.353    case Bytecodes::_ifeq:
  80.354 @@ -453,7 +665,36 @@
  80.355      return 0;
  80.356    }
  80.357    if (cell_count == variable_cell_count) {
  80.358 -    cell_count = MultiBranchData::compute_cell_count(stream);
  80.359 +    switch (stream->code()) {
  80.360 +    case Bytecodes::_lookupswitch:
  80.361 +    case Bytecodes::_tableswitch:
  80.362 +      cell_count = MultiBranchData::compute_cell_count(stream);
  80.363 +      break;
  80.364 +    case Bytecodes::_invokespecial:
  80.365 +    case Bytecodes::_invokestatic:
  80.366 +    case Bytecodes::_invokedynamic:
  80.367 +      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
  80.368 +      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  80.369 +          profile_return_for_invoke(stream->method(), stream->bci())) {
  80.370 +        cell_count = CallTypeData::compute_cell_count(stream);
  80.371 +      } else {
  80.372 +        cell_count = CounterData::static_cell_count();
  80.373 +      }
  80.374 +      break;
  80.375 +    case Bytecodes::_invokevirtual:
  80.376 +    case Bytecodes::_invokeinterface: {
  80.377 +      assert(MethodData::profile_arguments() || MethodData::profile_return(), "should be collecting args profile");
  80.378 +      if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  80.379 +          profile_return_for_invoke(stream->method(), stream->bci())) {
  80.380 +        cell_count = VirtualCallTypeData::compute_cell_count(stream);
  80.381 +      } else {
  80.382 +        cell_count = VirtualCallData::static_cell_count();
  80.383 +      }
  80.384 +      break;
  80.385 +    }
  80.386 +    default:
  80.387 +      fatal("unexpected bytecode for var length profile data");
  80.388 +    }
  80.389    }
  80.390    // Note:  cell_count might be zero, meaning that there is just
  80.391    //        a DataLayout header, with no extra cells.
  80.392 @@ -499,6 +740,7 @@
  80.393    // Add a cell to record information about modified arguments.
  80.394    int arg_size = method->size_of_parameters();
  80.395    object_size += DataLayout::compute_size_in_bytes(arg_size+1);
  80.396 +
  80.397    return object_size;
  80.398  }
  80.399  
  80.400 @@ -534,10 +776,21 @@
  80.401      }
  80.402      break;
  80.403    case Bytecodes::_invokespecial:
  80.404 -  case Bytecodes::_invokestatic:
  80.405 -    cell_count = CounterData::static_cell_count();
  80.406 -    tag = DataLayout::counter_data_tag;
  80.407 +  case Bytecodes::_invokestatic: {
  80.408 +    int counter_data_cell_count = CounterData::static_cell_count();
  80.409 +    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  80.410 +        profile_return_for_invoke(stream->method(), stream->bci())) {
  80.411 +      cell_count = CallTypeData::compute_cell_count(stream);
  80.412 +    } else {
  80.413 +      cell_count = counter_data_cell_count;
  80.414 +    }
  80.415 +    if (cell_count > counter_data_cell_count) {
  80.416 +      tag = DataLayout::call_type_data_tag;
  80.417 +    } else {
  80.418 +      tag = DataLayout::counter_data_tag;
  80.419 +    }
  80.420      break;
  80.421 +  }
  80.422    case Bytecodes::_goto:
  80.423    case Bytecodes::_goto_w:
  80.424    case Bytecodes::_jsr:
  80.425 @@ -546,15 +799,37 @@
  80.426      tag = DataLayout::jump_data_tag;
  80.427      break;
  80.428    case Bytecodes::_invokevirtual:
  80.429 -  case Bytecodes::_invokeinterface:
  80.430 -    cell_count = VirtualCallData::static_cell_count();
  80.431 -    tag = DataLayout::virtual_call_data_tag;
  80.432 +  case Bytecodes::_invokeinterface: {
  80.433 +    int virtual_call_data_cell_count = VirtualCallData::static_cell_count();
  80.434 +    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  80.435 +        profile_return_for_invoke(stream->method(), stream->bci())) {
  80.436 +      cell_count = VirtualCallTypeData::compute_cell_count(stream);
  80.437 +    } else {
  80.438 +      cell_count = virtual_call_data_cell_count;
  80.439 +    }
  80.440 +    if (cell_count > virtual_call_data_cell_count) {
  80.441 +      tag = DataLayout::virtual_call_type_data_tag;
  80.442 +    } else {
  80.443 +      tag = DataLayout::virtual_call_data_tag;
  80.444 +    }
  80.445      break;
  80.446 -  case Bytecodes::_invokedynamic:
  80.447 +  }
  80.448 +  case Bytecodes::_invokedynamic: {
  80.449      // %%% should make a type profile for any invokedynamic that takes a ref argument
  80.450 -    cell_count = CounterData::static_cell_count();
  80.451 -    tag = DataLayout::counter_data_tag;
  80.452 +    int counter_data_cell_count = CounterData::static_cell_count();
  80.453 +    if (profile_arguments_for_invoke(stream->method(), stream->bci()) ||
  80.454 +        profile_return_for_invoke(stream->method(), stream->bci())) {
  80.455 +      cell_count = CallTypeData::compute_cell_count(stream);
  80.456 +    } else {
  80.457 +      cell_count = counter_data_cell_count;
  80.458 +    }
  80.459 +    if (cell_count > counter_data_cell_count) {
  80.460 +      tag = DataLayout::call_type_data_tag;
  80.461 +    } else {
  80.462 +      tag = DataLayout::counter_data_tag;
  80.463 +    }
  80.464      break;
  80.465 +  }
  80.466    case Bytecodes::_ret:
  80.467      cell_count = RetData::static_cell_count();
  80.468      tag = DataLayout::ret_data_tag;
  80.469 @@ -585,6 +860,11 @@
  80.470      break;
  80.471    }
  80.472    assert(tag == DataLayout::multi_branch_data_tag ||
  80.473 +         ((MethodData::profile_arguments() || MethodData::profile_return()) &&
  80.474 +          (tag == DataLayout::call_type_data_tag ||
  80.475 +           tag == DataLayout::counter_data_tag ||
  80.476 +           tag == DataLayout::virtual_call_type_data_tag ||
  80.477 +           tag == DataLayout::virtual_call_data_tag)) ||
  80.478           cell_count == bytecode_cell_count(c), "cell counts must agree");
  80.479    if (cell_count >= 0) {
  80.480      assert(tag != DataLayout::no_tag, "bad tag");
  80.481 @@ -631,6 +911,10 @@
  80.482      return new MultiBranchData(this);
  80.483    case DataLayout::arg_info_data_tag:
  80.484      return new ArgInfoData(this);
  80.485 +  case DataLayout::call_type_data_tag:
  80.486 +    return new CallTypeData(this);
  80.487 +  case DataLayout::virtual_call_type_data_tag:
  80.488 +    return new VirtualCallTypeData(this);
  80.489    };
  80.490  }
  80.491  
  80.492 @@ -898,3 +1182,70 @@
  80.493    NEEDS_CLEANUP;
  80.494    // not yet implemented.
  80.495  }
  80.496 +
  80.497 +bool MethodData::profile_jsr292(methodHandle m, int bci) {
  80.498 +  if (m->is_compiled_lambda_form()) {
  80.499 +    return true;
  80.500 +  }
  80.501 +
  80.502 +  Bytecode_invoke inv(m , bci);
  80.503 +  return inv.is_invokedynamic() || inv.is_invokehandle();
  80.504 +}
  80.505 +
  80.506 +int MethodData::profile_arguments_flag() {
  80.507 +  return TypeProfileLevel % 10;
  80.508 +}
  80.509 +
  80.510 +bool MethodData::profile_arguments() {
  80.511 +  return profile_arguments_flag() > no_type_profile && profile_arguments_flag() <= type_profile_all;
  80.512 +}
  80.513 +
  80.514 +bool MethodData::profile_arguments_jsr292_only() {
  80.515 +  return profile_arguments_flag() == type_profile_jsr292;
  80.516 +}
  80.517 +
  80.518 +bool MethodData::profile_all_arguments() {
  80.519 +  return profile_arguments_flag() == type_profile_all;
  80.520 +}
  80.521 +
  80.522 +bool MethodData::profile_arguments_for_invoke(methodHandle m, int bci) {
  80.523 +  if (!profile_arguments()) {
  80.524 +    return false;
  80.525 +  }
  80.526 +
  80.527 +  if (profile_all_arguments()) {
  80.528 +    return true;
  80.529 +  }
  80.530 +
  80.531 +  assert(profile_arguments_jsr292_only(), "inconsistent");
  80.532 +  return profile_jsr292(m, bci);
  80.533 +}
  80.534 +
  80.535 +int MethodData::profile_return_flag() {
  80.536 +  return TypeProfileLevel / 10;
  80.537 +}
  80.538 +
  80.539 +bool MethodData::profile_return() {
  80.540 +  return profile_return_flag() > no_type_profile && profile_return_flag() <= type_profile_all;
  80.541 +}
  80.542 +
  80.543 +bool MethodData::profile_return_jsr292_only() {
  80.544 +  return profile_return_flag() == type_profile_jsr292;
  80.545 +}
  80.546 +
  80.547 +bool MethodData::profile_all_return() {
  80.548 +  return profile_return_flag() == type_profile_all;
  80.549 +}
  80.550 +
  80.551 +bool MethodData::profile_return_for_invoke(methodHandle m, int bci) {
  80.552 +  if (!profile_return()) {
  80.553 +    return false;
  80.554 +  }
  80.555 +
  80.556 +  if (profile_all_return()) {
  80.557 +    return true;
  80.558 +  }
  80.559 +
  80.560 +  assert(profile_return_jsr292_only(), "inconsistent");
  80.561 +  return profile_jsr292(m, bci);
  80.562 +}
    81.1 --- a/src/share/vm/oops/methodData.hpp	Thu Oct 17 06:29:58 2013 -0700
    81.2 +++ b/src/share/vm/oops/methodData.hpp	Fri Oct 18 12:10:44 2013 -0700
    81.3 @@ -117,7 +117,9 @@
    81.4      ret_data_tag,
    81.5      branch_data_tag,
    81.6      multi_branch_data_tag,
    81.7 -    arg_info_data_tag
    81.8 +    arg_info_data_tag,
    81.9 +    call_type_data_tag,
   81.10 +    virtual_call_type_data_tag
   81.11    };
   81.12  
   81.13    enum {
   81.14 @@ -165,7 +167,7 @@
   81.15    // occurred, and the MDO shows N occurrences of X, we make the
   81.16    // simplifying assumption that all N occurrences can be blamed
   81.17    // on that BCI.
   81.18 -  int trap_state() {
   81.19 +  int trap_state() const {
   81.20      return ((_header._struct._flags >> trap_shift) & trap_mask);
   81.21    }
   81.22  
   81.23 @@ -175,11 +177,11 @@
   81.24      _header._struct._flags = (new_state << trap_shift) | old_flags;
   81.25    }
   81.26  
   81.27 -  u1 flags() {
   81.28 +  u1 flags() const {
   81.29      return _header._struct._flags;
   81.30    }
   81.31  
   81.32 -  u2 bci() {
   81.33 +  u2 bci() const {
   81.34      return _header._struct._bci;
   81.35    }
   81.36  
   81.37 @@ -198,7 +200,7 @@
   81.38    void release_set_cell_at(int index, intptr_t value) {
   81.39      OrderAccess::release_store_ptr(&_cells[index], value);
   81.40    }
   81.41 -  intptr_t cell_at(int index) {
   81.42 +  intptr_t cell_at(int index) const {
   81.43      return _cells[index];
   81.44    }
   81.45  
   81.46 @@ -206,7 +208,7 @@
   81.47      assert(flag_number < flag_limit, "oob");
   81.48      _header._struct._flags |= (0x1 << flag_number);
   81.49    }
   81.50 -  bool flag_at(int flag_number) {
   81.51 +  bool flag_at(int flag_number) const {
   81.52      assert(flag_number < flag_limit, "oob");
   81.53      return (_header._struct._flags & (0x1 << flag_number)) != 0;
   81.54    }
   81.55 @@ -254,19 +256,23 @@
   81.56  class     CounterData;
   81.57  class       ReceiverTypeData;
   81.58  class         VirtualCallData;
   81.59 +class           VirtualCallTypeData;
   81.60  class       RetData;
   81.61 +class       CallTypeData;
   81.62  class   JumpData;
   81.63  class     BranchData;
   81.64  class   ArrayData;
   81.65  class     MultiBranchData;
   81.66  class     ArgInfoData;
   81.67  
   81.68 -
   81.69  // ProfileData
   81.70  //
   81.71  // A ProfileData object is created to refer to a section of profiling
   81.72  // data in a structured way.
   81.73  class ProfileData : public ResourceObj {
   81.74 +  friend class TypeEntries;
   81.75 +  friend class ReturnTypeEntry;
   81.76 +  friend class TypeStackSlotEntries;
   81.77  private:
   81.78  #ifndef PRODUCT
   81.79    enum {
   81.80 @@ -280,6 +286,7 @@
   81.81  
   81.82  protected:
   81.83    DataLayout* data() { return _data; }
   81.84 +  const DataLayout* data() const { return _data; }
   81.85  
   81.86    enum {
   81.87      cell_size = DataLayout::cell_size
   81.88 @@ -287,7 +294,7 @@
   81.89  
   81.90  public:
   81.91    // How many cells are in this?
   81.92 -  virtual int cell_count() {
   81.93 +  virtual int cell_count() const {
   81.94      ShouldNotReachHere();
   81.95      return -1;
   81.96    }
   81.97 @@ -307,7 +314,7 @@
   81.98      assert(0 <= index && index < cell_count(), "oob");
   81.99      data()->release_set_cell_at(index, value);
  81.100    }
  81.101 -  intptr_t intptr_at(int index) {
  81.102 +  intptr_t intptr_at(int index) const {
  81.103      assert(0 <= index && index < cell_count(), "oob");
  81.104      return data()->cell_at(index);
  81.105    }
  81.106 @@ -317,7 +324,7 @@
  81.107    void release_set_uint_at(int index, uint value) {
  81.108      release_set_intptr_at(index, (intptr_t) value);
  81.109    }
  81.110 -  uint uint_at(int index) {
  81.111 +  uint uint_at(int index) const {
  81.112      return (uint)intptr_at(index);
  81.113    }
  81.114    void set_int_at(int index, int value) {
  81.115 @@ -326,23 +333,23 @@
  81.116    void release_set_int_at(int index, int value) {
  81.117      release_set_intptr_at(index, (intptr_t) value);
  81.118    }
  81.119 -  int int_at(int index) {
  81.120 +  int int_at(int index) const {
  81.121      return (int)intptr_at(index);
  81.122    }
  81.123 -  int int_at_unchecked(int index) {
  81.124 +  int int_at_unchecked(int index) const {
  81.125      return (int)data()->cell_at(index);
  81.126    }
  81.127    void set_oop_at(int index, oop value) {
  81.128      set_intptr_at(index, cast_from_oop<intptr_t>(value));
  81.129    }
  81.130 -  oop oop_at(int index) {
  81.131 +  oop oop_at(int index) const {
  81.132      return cast_to_oop(intptr_at(index));
  81.133    }
  81.134  
  81.135    void set_flag_at(int flag_number) {
  81.136      data()->set_flag_at(flag_number);
  81.137    }
  81.138 -  bool flag_at(int flag_number) {
  81.139 +  bool flag_at(int flag_number) const {
  81.140      return data()->flag_at(flag_number);
  81.141    }
  81.142  
  81.143 @@ -362,7 +369,7 @@
  81.144    // Constructor for invalid ProfileData.
  81.145    ProfileData();
  81.146  
  81.147 -  u2 bci() {
  81.148 +  u2 bci() const {
  81.149      return data()->bci();
  81.150    }
  81.151  
  81.152 @@ -370,7 +377,7 @@
  81.153      return (address)_data;
  81.154    }
  81.155  
  81.156 -  int trap_state() {
  81.157 +  int trap_state() const {
  81.158      return data()->trap_state();
  81.159    }
  81.160    void set_trap_state(int new_state) {
  81.161 @@ -378,58 +385,68 @@
  81.162    }
  81.163  
  81.164    // Type checking
  81.165 -  virtual bool is_BitData()         { return false; }
  81.166 -  virtual bool is_CounterData()     { return false; }
  81.167 -  virtual bool is_JumpData()        { return false; }
  81.168 -  virtual bool is_ReceiverTypeData(){ return false; }
  81.169 -  virtual bool is_VirtualCallData() { return false; }
  81.170 -  virtual bool is_RetData()         { return false; }
  81.171 -  virtual bool is_BranchData()      { return false; }
  81.172 -  virtual bool is_ArrayData()       { return false; }
  81.173 -  virtual bool is_MultiBranchData() { return false; }
  81.174 -  virtual bool is_ArgInfoData()     { return false; }
  81.175 +  virtual bool is_BitData()         const { return false; }
  81.176 +  virtual bool is_CounterData()     const { return false; }
  81.177 +  virtual bool is_JumpData()        const { return false; }
  81.178 +  virtual bool is_ReceiverTypeData()const { return false; }
  81.179 +  virtual bool is_VirtualCallData() const { return false; }
  81.180 +  virtual bool is_RetData()         const { return false; }
  81.181 +  virtual bool is_BranchData()      const { return false; }
  81.182 +  virtual bool is_ArrayData()       const { return false; }
  81.183 +  virtual bool is_MultiBranchData() const { return false; }
  81.184 +  virtual bool is_ArgInfoData()     const { return false; }
  81.185 +  virtual bool is_CallTypeData()    const { return false; }
  81.186 +  virtual bool is_VirtualCallTypeData()const { return false; }
  81.187  
  81.188  
  81.189 -  BitData* as_BitData() {
  81.190 +  BitData* as_BitData() const {
  81.191      assert(is_BitData(), "wrong type");
  81.192      return is_BitData()         ? (BitData*)        this : NULL;
  81.193    }
  81.194 -  CounterData* as_CounterData() {
  81.195 +  CounterData* as_CounterData() const {
  81.196      assert(is_CounterData(), "wrong type");
  81.197      return is_CounterData()     ? (CounterData*)    this : NULL;
  81.198    }
  81.199 -  JumpData* as_JumpData() {
  81.200 +  JumpData* as_JumpData() const {
  81.201      assert(is_JumpData(), "wrong type");
  81.202      return is_JumpData()        ? (JumpData*)       this : NULL;
  81.203    }
  81.204 -  ReceiverTypeData* as_ReceiverTypeData() {
  81.205 +  ReceiverTypeData* as_ReceiverTypeData() const {
  81.206      assert(is_ReceiverTypeData(), "wrong type");
  81.207      return is_ReceiverTypeData() ? (ReceiverTypeData*)this : NULL;
  81.208    }
  81.209 -  VirtualCallData* as_VirtualCallData() {
  81.210 +  VirtualCallData* as_VirtualCallData() const {
  81.211      assert(is_VirtualCallData(), "wrong type");
  81.212      return is_VirtualCallData() ? (VirtualCallData*)this : NULL;
  81.213    }
  81.214 -  RetData* as_RetData() {
  81.215 +  RetData* as_RetData() const {
  81.216      assert(is_RetData(), "wrong type");
  81.217      return is_RetData()         ? (RetData*)        this : NULL;
  81.218    }
  81.219 -  BranchData* as_BranchData() {
  81.220 +  BranchData* as_BranchData() const {
  81.221      assert(is_BranchData(), "wrong type");
  81.222      return is_BranchData()      ? (BranchData*)     this : NULL;
  81.223    }
  81.224 -  ArrayData* as_ArrayData() {
  81.225 +  ArrayData* as_ArrayData() const {
  81.226      assert(is_ArrayData(), "wrong type");
  81.227      return is_ArrayData()       ? (ArrayData*)      this : NULL;
  81.228    }
  81.229 -  MultiBranchData* as_MultiBranchData() {
  81.230 +  MultiBranchData* as_MultiBranchData() const {
  81.231      assert(is_MultiBranchData(), "wrong type");
  81.232      return is_MultiBranchData() ? (MultiBranchData*)this : NULL;
  81.233    }
  81.234 -  ArgInfoData* as_ArgInfoData() {
  81.235 +  ArgInfoData* as_ArgInfoData() const {
  81.236      assert(is_ArgInfoData(), "wrong type");
  81.237      return is_ArgInfoData() ? (ArgInfoData*)this : NULL;
  81.238    }
  81.239 +  CallTypeData* as_CallTypeData() const {
  81.240 +    assert(is_CallTypeData(), "wrong type");
  81.241 +    return is_CallTypeData() ? (CallTypeData*)this : NULL;
  81.242 +  }
  81.243 +  VirtualCallTypeData* as_VirtualCallTypeData() const {
  81.244 +    assert(is_VirtualCallTypeData(), "wrong type");
  81.245 +    return is_VirtualCallTypeData() ? (VirtualCallTypeData*)this : NULL;
  81.246 +  }
  81.247  
  81.248  
  81.249    // Subclass specific initialization
  81.250 @@ -443,15 +460,15 @@
  81.251    // an oop in a ProfileData to the ci equivalent. Generally speaking,
  81.252    // most ProfileData don't require any translation, so we provide the null
  81.253    // translation here, and the required translators are in the ci subclasses.
  81.254 -  virtual void translate_from(ProfileData* data) {}
  81.255 +  virtual void translate_from(const ProfileData* data) {}
  81.256  
  81.257 -  virtual void print_data_on(outputStream* st) {
  81.258 +  virtual void print_data_on(outputStream* st) const {
  81.259      ShouldNotReachHere();
  81.260    }
  81.261  
  81.262  #ifndef PRODUCT
  81.263 -  void print_shared(outputStream* st, const char* name);
  81.264 -  void tab(outputStream* st);
  81.265 +  void print_shared(outputStream* st, const char* name) const;
  81.266 +  void tab(outputStream* st, bool first = false) const;
  81.267  #endif
  81.268  };
  81.269  
  81.270 @@ -470,13 +487,13 @@
  81.271    BitData(DataLayout* layout) : ProfileData(layout) {
  81.272    }
  81.273  
  81.274 -  virtual bool is_BitData() { return true; }
  81.275 +  virtual bool is_BitData() const { return true; }
  81.276  
  81.277    static int static_cell_count() {
  81.278      return bit_cell_count;
  81.279    }
  81.280  
  81.281 -  virtual int cell_count() {
  81.282 +  virtual int cell_count() const {
  81.283      return static_cell_count();
  81.284    }
  81.285  
  81.286 @@ -498,7 +515,7 @@
  81.287    }
  81.288  
  81.289  #ifndef PRODUCT
  81.290 -  void print_data_on(outputStream* st);
  81.291 +  void print_data_on(outputStream* st) const;
  81.292  #endif
  81.293  };
  81.294  
  81.295 @@ -514,18 +531,18 @@
  81.296  public:
  81.297    CounterData(DataLayout* layout) : BitData(layout) {}
  81.298  
  81.299 -  virtual bool is_CounterData() { return true; }
  81.300 +  virtual bool is_CounterData() const { return true; }
  81.301  
  81.302    static int static_cell_count() {
  81.303      return counter_cell_count;
  81.304    }
  81.305  
  81.306 -  virtual int cell_count() {
  81.307 +  virtual int cell_count() const {
  81.308      return static_cell_count();
  81.309    }
  81.310  
  81.311    // Direct accessor
  81.312 -  uint count() {
  81.313 +  uint count() const {
  81.314      return uint_at(count_off);
  81.315    }
  81.316  
  81.317 @@ -542,7 +559,7 @@
  81.318    }
  81.319  
  81.320  #ifndef PRODUCT
  81.321 -  void print_data_on(outputStream* st);
  81.322 +  void print_data_on(outputStream* st) const;
  81.323  #endif
  81.324  };
  81.325  
  81.326 @@ -570,18 +587,18 @@
  81.327        layout->tag() == DataLayout::branch_data_tag, "wrong type");
  81.328    }
  81.329  
  81.330 -  virtual bool is_JumpData() { return true; }
  81.331 +  virtual bool is_JumpData() const { return true; }
  81.332  
  81.333    static int static_cell_count() {
  81.334      return jump_cell_count;
  81.335    }
  81.336  
  81.337 -  virtual int cell_count() {
  81.338 +  virtual int cell_count() const {
  81.339      return static_cell_count();
  81.340    }
  81.341  
  81.342    // Direct accessor
  81.343 -  uint taken() {
  81.344 +  uint taken() const {
  81.345      return uint_at(taken_off_set);
  81.346    }
  81.347  
  81.348 @@ -598,7 +615,7 @@
  81.349      return cnt;
  81.350    }
  81.351  
  81.352 -  int displacement() {
  81.353 +  int displacement() const {
  81.354      return int_at(displacement_off_set);
  81.355    }
  81.356  
  81.357 @@ -615,7 +632,418 @@
  81.358    void post_initialize(BytecodeStream* stream, MethodData* mdo);
  81.359  
  81.360  #ifndef PRODUCT
  81.361 -  void print_data_on(outputStream* st);
  81.362 +  void print_data_on(outputStream* st) const;
  81.363 +#endif
  81.364 +};
  81.365 +
  81.366 +// Entries in a ProfileData object to record types: it can either be
  81.367 +// none (no profile), unknown (conflicting profile data) or a klass if
  81.368 +// a single one is seen. Whether a null reference was seen is also
  81.369 +// recorded. No counter is associated with the type and a single type
  81.370 +// is tracked (unlike VirtualCallData).
  81.371 +class TypeEntries {
  81.372 +
  81.373 +public:
  81.374 +
  81.375 +  // A single cell is used to record information for a type:
  81.376 +  // - the cell is initialized to 0
  81.377 +  // - when a type is discovered it is stored in the cell
  81.378 +  // - bit zero of the cell is used to record whether a null reference
  81.379 +  // was encountered or not
  81.380 +  // - bit 1 is set to record a conflict in the type information
  81.381 +
  81.382 +  enum {
  81.383 +    null_seen = 1,
  81.384 +    type_mask = ~null_seen,
  81.385 +    type_unknown = 2,
  81.386 +    status_bits = null_seen | type_unknown,
  81.387 +    type_klass_mask = ~status_bits
  81.388 +  };
  81.389 +
  81.390 +  // what to initialize a cell to
  81.391 +  static intptr_t type_none() {
  81.392 +    return 0;
  81.393 +  }
  81.394 +
  81.395 +  // null seen = bit 0 set?
  81.396 +  static bool was_null_seen(intptr_t v) {
  81.397 +    return (v & null_seen) != 0;
  81.398 +  }
  81.399 +
  81.400 +  // conflicting type information = bit 1 set?
  81.401 +  static bool is_type_unknown(intptr_t v) {
  81.402 +    return (v & type_unknown) != 0;
  81.403 +  }
  81.404 +
  81.405 +  // not type information yet = all bits cleared, ignoring bit 0?
  81.406 +  static bool is_type_none(intptr_t v) {
  81.407 +    return (v & type_mask) == 0;
  81.408 +  }
  81.409 +
  81.410 +  // recorded type: cell without bit 0 and 1
  81.411 +  static intptr_t klass_part(intptr_t v) {
  81.412 +    intptr_t r = v & type_klass_mask;
  81.413 +    assert (r != 0, "invalid");
  81.414 +    return r;
  81.415 +  }
  81.416 +
  81.417 +  // type recorded
  81.418 +  static Klass* valid_klass(intptr_t k) {
  81.419 +    if (!is_type_none(k) &&
  81.420 +        !is_type_unknown(k)) {
  81.421 +      return (Klass*)klass_part(k);
  81.422 +    } else {
  81.423 +      return NULL;
  81.424 +    }
  81.425 +  }
  81.426 +
  81.427 +  static intptr_t with_status(intptr_t k, intptr_t in) {
  81.428 +    return k | (in & status_bits);
  81.429 +  }
  81.430 +
  81.431 +  static intptr_t with_status(Klass* k, intptr_t in) {
  81.432 +    return with_status((intptr_t)k, in);
  81.433 +  }
  81.434 +
  81.435 +#ifndef PRODUCT
  81.436 +  static void print_klass(outputStream* st, intptr_t k);
  81.437 +#endif
  81.438 +
  81.439 +  // GC support
  81.440 +  static bool is_loader_alive(BoolObjectClosure* is_alive_cl, intptr_t p);
  81.441 +
  81.442 +protected:
  81.443 +  // ProfileData object these entries are part of
  81.444 +  ProfileData* _pd;
  81.445 +  // offset within the ProfileData object where the entries start
  81.446 +  const int _base_off;
  81.447 +
  81.448 +  TypeEntries(int base_off)
  81.449 +    : _base_off(base_off), _pd(NULL) {}
  81.450 +
  81.451 +  void set_intptr_at(int index, intptr_t value) {
  81.452 +    _pd->set_intptr_at(index, value);
  81.453 +  }
  81.454 +
  81.455 +  intptr_t intptr_at(int index) const {
  81.456 +    return _pd->intptr_at(index);
  81.457 +  }
  81.458 +
  81.459 +public:
  81.460 +  void set_profile_data(ProfileData* pd) {
  81.461 +    _pd = pd;
  81.462 +  }
  81.463 +};
  81.464 +
  81.465 +// Type entries used for arguments passed at a call and parameters on
  81.466 +// method entry. 2 cells per entry: one for the type encoded as in
  81.467 +// TypeEntries and one initialized with the stack slot where the
  81.468 +// profiled object is to be found so that the interpreter can locate
  81.469 +// it quickly.
  81.470 +class TypeStackSlotEntries : public TypeEntries {
  81.471 +
  81.472 +private:
  81.473 +  enum {
  81.474 +    stack_slot_entry,
  81.475 +    type_entry,
  81.476 +    per_arg_cell_count
  81.477 +  };
  81.478 +
  81.479 +  // offset of cell for stack slot for entry i within ProfileData object
  81.480 +  int stack_slot_offset(int i) const {
  81.481 +    return _base_off + stack_slot_local_offset(i);
  81.482 +  }
  81.483 +
  81.484 +protected:
  81.485 +  const int _number_of_entries;
  81.486 +
  81.487 +  // offset of cell for type for entry i within ProfileData object
  81.488 +  int type_offset(int i) const {
  81.489 +    return _base_off + type_local_offset(i);
  81.490 +  }
  81.491 +
  81.492 +public:
  81.493 +
  81.494 +  TypeStackSlotEntries(int base_off, int nb_entries)
  81.495 +    : TypeEntries(base_off), _number_of_entries(nb_entries) {}
  81.496 +
  81.497 +  static int compute_cell_count(Symbol* signature, int max);
  81.498 +
  81.499 +  void post_initialize(Symbol* signature, bool has_receiver);
  81.500 +
  81.501 +  // offset of cell for stack slot for entry i within this block of cells for a TypeStackSlotEntries
  81.502 +  static int stack_slot_local_offset(int i) {
  81.503 +    return i * per_arg_cell_count + stack_slot_entry;
  81.504 +  }
  81.505 +
  81.506 +  // offset of cell for type for entry i within this block of cells for a TypeStackSlotEntries
  81.507 +  static int type_local_offset(int i) {
  81.508 +    return i * per_arg_cell_count + type_entry;
  81.509 +  }
  81.510 +
  81.511 +  // stack slot for entry i
  81.512 +  uint stack_slot(int i) const {
  81.513 +    assert(i >= 0 && i < _number_of_entries, "oob");
  81.514 +    return _pd->uint_at(stack_slot_offset(i));
  81.515 +  }
  81.516 +
  81.517 +  // set stack slot for entry i
  81.518 +  void set_stack_slot(int i, uint num) {
  81.519 +    assert(i >= 0 && i < _number_of_entries, "oob");
  81.520 +    _pd->set_uint_at(stack_slot_offset(i), num);
  81.521 +  }
  81.522 +
  81.523 +  // type for entry i
  81.524 +  intptr_t type(int i) const {
  81.525 +    assert(i >= 0 && i < _number_of_entries, "oob");
  81.526 +    return _pd->intptr_at(type_offset(i));
  81.527 +  }
  81.528 +
  81.529 +  // set type for entry i
  81.530 +  void set_type(int i, intptr_t k) {
  81.531 +    assert(i >= 0 && i < _number_of_entries, "oob");
  81.532 +    _pd->set_intptr_at(type_offset(i), k);
  81.533 +  }
  81.534 +
  81.535 +  static ByteSize per_arg_size() {
  81.536 +    return in_ByteSize(per_arg_cell_count * DataLayout::cell_size);
  81.537 +  }
  81.538 +
  81.539 +  static int per_arg_count() {
  81.540 +    return per_arg_cell_count ;
  81.541 +  }
  81.542 +
  81.543 +  // GC support
  81.544 +  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
  81.545 +
  81.546 +#ifndef PRODUCT
  81.547 +  void print_data_on(outputStream* st) const;
  81.548 +#endif
  81.549 +};
  81.550 +
  81.551 +// Type entry used for return from a call. A single cell to record the
  81.552 +// type.
  81.553 +class ReturnTypeEntry : public TypeEntries {
  81.554 +
  81.555 +private:
  81.556 +  enum {
  81.557 +    cell_count = 1
  81.558 +  };
  81.559 +
  81.560 +public:
  81.561 +  ReturnTypeEntry(int base_off)
  81.562 +    : TypeEntries(base_off) {}
  81.563 +
  81.564 +  void post_initialize() {
  81.565 +    set_type(type_none());
  81.566 +  }
  81.567 +
  81.568 +  intptr_t type() const {
  81.569 +    return _pd->intptr_at(_base_off);
  81.570 +  }
  81.571 +
  81.572 +  void set_type(intptr_t k) {
  81.573 +    _pd->set_intptr_at(_base_off, k);
  81.574 +  }
  81.575 +
  81.576 +  static int static_cell_count() {
  81.577 +    return cell_count;
  81.578 +  }
  81.579 +
  81.580 +  static ByteSize size() {
  81.581 +    return in_ByteSize(cell_count * DataLayout::cell_size);
  81.582 +  }
  81.583 +
  81.584 +  ByteSize type_offset() {
  81.585 +    return DataLayout::cell_offset(_base_off);
  81.586 +  }
  81.587 +
  81.588 +  // GC support
  81.589 +  void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
  81.590 +
  81.591 +#ifndef PRODUCT
  81.592 +  void print_data_on(outputStream* st) const;
  81.593 +#endif
  81.594 +};
  81.595 +
  81.596 +// Entries to collect type information at a call: contains arguments
  81.597 +// (TypeStackSlotEntries), a return type (ReturnTypeEntry) and a
  81.598 +// number of cells. Because the number of cells for the return type is
  81.599 +// smaller than the number of cells for the type of an arguments, the
  81.600 +// number of cells is used to tell how many arguments are profiled and
  81.601 +// whether a return value is profiled. See has_arguments() and
  81.602 +// has_return().
  81.603 +class TypeEntriesAtCall {
  81.604 +private:
  81.605 +  static int stack_slot_local_offset(int i) {
  81.606 +    return header_cell_count() + TypeStackSlotEntries::stack_slot_local_offset(i);
  81.607 +  }
  81.608 +
  81.609 +  static int argument_type_local_offset(int i) {
  81.610 +    return header_cell_count() + TypeStackSlotEntries::type_local_offset(i);;
  81.611 +  }
  81.612 +
  81.613 +public:
  81.614 +
  81.615 +  static int header_cell_count() {
  81.616 +    return 1;
  81.617 +  }
  81.618 +
  81.619 +  static int cell_count_local_offset() {
  81.620 +    return 0;
  81.621 +  }
  81.622 +
  81.623 +  static int compute_cell_count(BytecodeStream* stream);
  81.624 +
  81.625 +  static void initialize(DataLayout* dl, int base, int cell_count) {
  81.626 +    int off = base + cell_count_local_offset();
  81.627 +    dl->set_cell_at(off, cell_count - base - header_cell_count());
  81.628 +  }
  81.629 +
  81.630 +  static bool arguments_profiling_enabled();
  81.631 +  static bool return_profiling_enabled();
  81.632 +
  81.633 +  // Code generation support
  81.634 +  static ByteSize cell_count_offset() {
  81.635 +    return in_ByteSize(cell_count_local_offset() * DataLayout::cell_size);
  81.636 +  }
  81.637 +
  81.638 +  static ByteSize args_data_offset() {
  81.639 +    return in_ByteSize(header_cell_count() * DataLayout::cell_size);
  81.640 +  }
  81.641 +
  81.642 +  static ByteSize stack_slot_offset(int i) {
  81.643 +    return in_ByteSize(stack_slot_local_offset(i) * DataLayout::cell_size);
  81.644 +  }
  81.645 +
  81.646 +  static ByteSize argument_type_offset(int i) {
  81.647 +    return in_ByteSize(argument_type_local_offset(i) * DataLayout::cell_size);
  81.648 +  }
  81.649 +};
  81.650 +
  81.651 +// CallTypeData
  81.652 +//
  81.653 +// A CallTypeData is used to access profiling information about a non
  81.654 +// virtual call for which we collect type information about arguments
  81.655 +// and return value.
  81.656 +class CallTypeData : public CounterData {
  81.657 +private:
  81.658 +  // entries for arguments if any
  81.659 +  TypeStackSlotEntries _args;
  81.660 +  // entry for return type if any
  81.661 +  ReturnTypeEntry _ret;
  81.662 +
  81.663 +  int cell_count_global_offset() const {
  81.664 +    return CounterData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
  81.665 +  }
  81.666 +
  81.667 +  // number of cells not counting the header
  81.668 +  int cell_count_no_header() const {
  81.669 +    return uint_at(cell_count_global_offset());
  81.670 +  }
  81.671 +
  81.672 +  void check_number_of_arguments(int total) {
  81.673 +    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
  81.674 +  }
  81.675 +
  81.676 +protected:
  81.677 +  // An entry for a return value takes less space than an entry for an
  81.678 +  // argument so if the number of cells exceeds the number of cells
  81.679 +  // needed for an argument, this object contains type information for
  81.680 +  // at least one argument.
  81.681 +  bool has_arguments() const {
  81.682 +    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
  81.683 +    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
  81.684 +    return res;
  81.685 +  }
  81.686 +
  81.687 +public:
  81.688 +  CallTypeData(DataLayout* layout) :
  81.689 +    CounterData(layout),
  81.690 +    _args(CounterData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
  81.691 +    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
  81.692 +  {
  81.693 +    assert(layout->tag() == DataLayout::call_type_data_tag, "wrong type");
  81.694 +    // Some compilers (VC++) don't want this passed in member initialization list
  81.695 +    _args.set_profile_data(this);
  81.696 +    _ret.set_profile_data(this);
  81.697 +  }
  81.698 +
  81.699 +  const TypeStackSlotEntries* args() const {
  81.700 +    assert(has_arguments(), "no profiling of arguments");
  81.701 +    return &_args;
  81.702 +  }
  81.703 +
  81.704 +  const ReturnTypeEntry* ret() const {
  81.705 +    assert(has_return(), "no profiling of return value");
  81.706 +    return &_ret;
  81.707 +  }
  81.708 +
  81.709 +  virtual bool is_CallTypeData() const { return true; }
  81.710 +
  81.711 +  static int static_cell_count() {
  81.712 +    return -1;
  81.713 +  }
  81.714 +
  81.715 +  static int compute_cell_count(BytecodeStream* stream) {
  81.716 +    return CounterData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
  81.717 +  }
  81.718 +
  81.719 +  static void initialize(DataLayout* dl, int cell_count) {
  81.720 +    TypeEntriesAtCall::initialize(dl, CounterData::static_cell_count(), cell_count);
  81.721 +  }
  81.722 +
  81.723 +  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
  81.724 +
  81.725 +  virtual int cell_count() const {
  81.726 +    return CounterData::static_cell_count() +
  81.727 +      TypeEntriesAtCall::header_cell_count() +
  81.728 +      int_at_unchecked(cell_count_global_offset());
  81.729 +  }
  81.730 +
  81.731 +  int number_of_arguments() const {
  81.732 +    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
  81.733 +  }
  81.734 +
  81.735 +  void set_argument_type(int i, Klass* k) {
  81.736 +    assert(has_arguments(), "no arguments!");
  81.737 +    intptr_t current = _args.type(i);
  81.738 +    _args.set_type(i, TypeEntries::with_status(k, current));
  81.739 +  }
  81.740 +
  81.741 +  void set_return_type(Klass* k) {
  81.742 +    assert(has_return(), "no return!");
  81.743 +    intptr_t current = _ret.type();
  81.744 +    _ret.set_type(TypeEntries::with_status(k, current));
  81.745 +  }
  81.746 +
  81.747 +  // An entry for a return value takes less space than an entry for an
  81.748 +  // argument, so if the remainder of the number of cells divided by
  81.749 +  // the number of cells for an argument is not null, a return value
  81.750 +  // is profiled in this object.
  81.751 +  bool has_return() const {
  81.752 +    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
  81.753 +    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
  81.754 +    return res;
  81.755 +  }
  81.756 +
  81.757 +  // Code generation support
  81.758 +  static ByteSize args_data_offset() {
  81.759 +    return cell_offset(CounterData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
  81.760 +  }
  81.761 +
  81.762 +  // GC support
  81.763 +  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
  81.764 +    if (has_arguments()) {
  81.765 +      _args.clean_weak_klass_links(is_alive_closure);
  81.766 +    }
  81.767 +    if (has_return()) {
  81.768 +      _ret.clean_weak_klass_links(is_alive_closure);
  81.769 +    }
  81.770 +  }
  81.771 +
  81.772 +#ifndef PRODUCT
  81.773 +  virtual void print_data_on(outputStream* st) const;
  81.774  #endif
  81.775  };
  81.776  
  81.777 @@ -636,16 +1064,17 @@
  81.778  public:
  81.779    ReceiverTypeData(DataLayout* layout) : CounterData(layout) {
  81.780      assert(layout->tag() == DataLayout::receiver_type_data_tag ||
  81.781 -           layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
  81.782 +           layout->tag() == DataLayout::virtual_call_data_tag ||
  81.783 +           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
  81.784    }
  81.785  
  81.786 -  virtual bool is_ReceiverTypeData() { return true; }
  81.787 +  virtual bool is_ReceiverTypeData() const { return true; }
  81.788  
  81.789    static int static_cell_count() {
  81.790      return counter_cell_count + (uint) TypeProfileWidth * receiver_type_row_cell_count;
  81.791    }
  81.792  
  81.793 -  virtual int cell_count() {
  81.794 +  virtual int cell_count() const {
  81.795      return static_cell_count();
  81.796    }
  81.797  
  81.798 @@ -660,7 +1089,7 @@
  81.799      return count0_offset + row * receiver_type_row_cell_count;
  81.800    }
  81.801  
  81.802 -  Klass* receiver(uint row) {
  81.803 +  Klass* receiver(uint row) const {
  81.804      assert(row < row_limit(), "oob");
  81.805  
  81.806      Klass* recv = (Klass*)intptr_at(receiver_cell_index(row));
  81.807 @@ -673,7 +1102,7 @@
  81.808      set_intptr_at(receiver_cell_index(row), (uintptr_t)k);
  81.809    }
  81.810  
  81.811 -  uint receiver_count(uint row) {
  81.812 +  uint receiver_count(uint row) const {
  81.813      assert(row < row_limit(), "oob");
  81.814      return uint_at(receiver_count_cell_index(row));
  81.815    }
  81.816 @@ -721,8 +1150,8 @@
  81.817    virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure);
  81.818  
  81.819  #ifndef PRODUCT
  81.820 -  void print_receiver_data_on(outputStream* st);
  81.821 -  void print_data_on(outputStream* st);
  81.822 +  void print_receiver_data_on(outputStream* st) const;
  81.823 +  void print_data_on(outputStream* st) const;
  81.824  #endif
  81.825  };
  81.826  
  81.827 @@ -733,10 +1162,11 @@
  81.828  class VirtualCallData : public ReceiverTypeData {
  81.829  public:
  81.830    VirtualCallData(DataLayout* layout) : ReceiverTypeData(layout) {
  81.831 -    assert(layout->tag() == DataLayout::virtual_call_data_tag, "wrong type");
  81.832 +    assert(layout->tag() == DataLayout::virtual_call_data_tag ||
  81.833 +           layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
  81.834    }
  81.835  
  81.836 -  virtual bool is_VirtualCallData() { return true; }
  81.837 +  virtual bool is_VirtualCallData() const { return true; }
  81.838  
  81.839    static int static_cell_count() {
  81.840      // At this point we could add more profile state, e.g., for arguments.
  81.841 @@ -744,7 +1174,7 @@
  81.842      return ReceiverTypeData::static_cell_count();
  81.843    }
  81.844  
  81.845 -  virtual int cell_count() {
  81.846 +  virtual int cell_count() const {
  81.847      return static_cell_count();
  81.848    }
  81.849  
  81.850 @@ -754,7 +1184,134 @@
  81.851    }
  81.852  
  81.853  #ifndef PRODUCT
  81.854 -  void print_data_on(outputStream* st);
  81.855 +  void print_data_on(outputStream* st) const;
  81.856 +#endif
  81.857 +};
  81.858 +
  81.859 +// VirtualCallTypeData
  81.860 +//
  81.861 +// A VirtualCallTypeData is used to access profiling information about
  81.862 +// a virtual call for which we collect type information about
  81.863 +// arguments and return value.
  81.864 +class VirtualCallTypeData : public VirtualCallData {
  81.865 +private:
  81.866 +  // entries for arguments if any
  81.867 +  TypeStackSlotEntries _args;
  81.868 +  // entry for return type if any
  81.869 +  ReturnTypeEntry _ret;
  81.870 +
  81.871 +  int cell_count_global_offset() const {
  81.872 +    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::cell_count_local_offset();
  81.873 +  }
  81.874 +
  81.875 +  // number of cells not counting the header
  81.876 +  int cell_count_no_header() const {
  81.877 +    return uint_at(cell_count_global_offset());
  81.878 +  }
  81.879 +
  81.880 +  void check_number_of_arguments(int total) {
  81.881 +    assert(number_of_arguments() == total, "should be set in DataLayout::initialize");
  81.882 +  }
  81.883 +
  81.884 +protected:
  81.885 +  // An entry for a return value takes less space than an entry for an
  81.886 +  // argument so if the number of cells exceeds the number of cells
  81.887 +  // needed for an argument, this object contains type information for
  81.888 +  // at least one argument.
  81.889 +  bool has_arguments() const {
  81.890 +    bool res = cell_count_no_header() >= TypeStackSlotEntries::per_arg_count();
  81.891 +    assert (!res || TypeEntriesAtCall::arguments_profiling_enabled(), "no profiling of arguments");
  81.892 +    return res;
  81.893 +  }
  81.894 +
  81.895 +public:
  81.896 +  VirtualCallTypeData(DataLayout* layout) :
  81.897 +    VirtualCallData(layout),
  81.898 +    _args(VirtualCallData::static_cell_count()+TypeEntriesAtCall::header_cell_count(), number_of_arguments()),
  81.899 +    _ret(cell_count() - ReturnTypeEntry::static_cell_count())
  81.900 +  {
  81.901 +    assert(layout->tag() == DataLayout::virtual_call_type_data_tag, "wrong type");
  81.902 +    // Some compilers (VC++) don't want this passed in member initialization list
  81.903 +    _args.set_profile_data(this);
  81.904 +    _ret.set_profile_data(this);
  81.905 +  }
  81.906 +
  81.907 +  const TypeStackSlotEntries* args() const {
  81.908 +    assert(has_arguments(), "no profiling of arguments");
  81.909 +    return &_args;
  81.910 +  }
  81.911 +
  81.912 +  const ReturnTypeEntry* ret() const {
  81.913 +    assert(has_return(), "no profiling of return value");
  81.914 +    return &_ret;
  81.915 +  }
  81.916 +
  81.917 +  virtual bool is_VirtualCallTypeData() const { return true; }
  81.918 +
  81.919 +  static int static_cell_count() {
  81.920 +    return -1;
  81.921 +  }
  81.922 +
  81.923 +  static int compute_cell_count(BytecodeStream* stream) {
  81.924 +    return VirtualCallData::static_cell_count() + TypeEntriesAtCall::compute_cell_count(stream);
  81.925 +  }
  81.926 +
  81.927 +  static void initialize(DataLayout* dl, int cell_count) {
  81.928 +    TypeEntriesAtCall::initialize(dl, VirtualCallData::static_cell_count(), cell_count);
  81.929 +  }
  81.930 +
  81.931 +  virtual void post_initialize(BytecodeStream* stream, MethodData* mdo);
  81.932 +
  81.933 +  virtual int cell_count() const {
  81.934 +    return VirtualCallData::static_cell_count() +
  81.935 +      TypeEntriesAtCall::header_cell_count() +
  81.936 +      int_at_unchecked(cell_count_global_offset());
  81.937 +  }
  81.938 +
  81.939 +  int number_of_arguments() const {
  81.940 +    return cell_count_no_header() / TypeStackSlotEntries::per_arg_count();
  81.941 +  }
  81.942 +
  81.943 +  void set_argument_type(int i, Klass* k) {
  81.944 +    assert(has_arguments(), "no arguments!");
  81.945 +    intptr_t current = _args.type(i);
  81.946 +    _args.set_type(i, TypeEntries::with_status(k, current));
  81.947 +  }
  81.948 +
  81.949 +  void set_return_type(Klass* k) {
  81.950 +    assert(has_return(), "no return!");
  81.951 +    intptr_t current = _ret.type();
  81.952 +    _ret.set_type(TypeEntries::with_status(k, current));
  81.953 +  }
  81.954 +
  81.955 +  // An entry for a return value takes less space than an entry for an
  81.956 +  // argument, so if the remainder of the number of cells divided by
  81.957 +  // the number of cells for an argument is not null, a return value
  81.958 +  // is profiled in this object.
  81.959 +  bool has_return() const {
  81.960 +    bool res = (cell_count_no_header() % TypeStackSlotEntries::per_arg_count()) != 0;
  81.961 +    assert (!res || TypeEntriesAtCall::return_profiling_enabled(), "no profiling of return values");
  81.962 +    return res;
  81.963 +  }
  81.964 +
  81.965 +  // Code generation support
  81.966 +  static ByteSize args_data_offset() {
  81.967 +    return cell_offset(VirtualCallData::static_cell_count()) + TypeEntriesAtCall::args_data_offset();
  81.968 +  }
  81.969 +
  81.970 +  // GC support
  81.971 +  virtual void clean_weak_klass_links(BoolObjectClosure* is_alive_closure) {
  81.972 +    ReceiverTypeData::clean_weak_klass_links(is_alive_closure);
  81.973 +    if (has_arguments()) {
  81.974 +      _args.clean_weak_klass_links(is_alive_closure);
  81.975 +    }
  81.976 +    if (has_return()) {
  81.977 +      _ret.clean_weak_klass_links(is_alive_closure);
  81.978 +    }
  81.979 +  }
  81.980 +
  81.981 +#ifndef PRODUCT
  81.982 +  virtual void print_data_on(outputStream* st) const;
  81.983  #endif
  81.984  };
  81.985  
  81.986 @@ -797,7 +1354,7 @@
  81.987      assert(layout->tag() == DataLayout::ret_data_tag, "wrong type");
  81.988    }
  81.989  
  81.990 -  virtual bool is_RetData() { return true; }
  81.991 +  virtual bool is_RetData() const { return true; }
  81.992  
  81.993    enum {
  81.994      no_bci = -1 // value of bci when bci1/2 are not in use.
  81.995 @@ -807,7 +1364,7 @@
  81.996      return counter_cell_count + (uint) BciProfileWidth * ret_row_cell_count;
  81.997    }
  81.998  
  81.999 -  virtual int cell_count() {
 81.1000 +  virtual int cell_count() const {
 81.1001      return static_cell_count();
 81.1002    }
 81.1003  
 81.1004 @@ -825,13 +1382,13 @@
 81.1005    }
 81.1006  
 81.1007    // Direct accessors
 81.1008 -  int bci(uint row) {
 81.1009 +  int bci(uint row) const {
 81.1010      return int_at(bci_cell_index(row));
 81.1011    }
 81.1012 -  uint bci_count(uint row) {
 81.1013 +  uint bci_count(uint row) const {
 81.1014      return uint_at(bci_count_cell_index(row));
 81.1015    }
 81.1016 -  int bci_displacement(uint row) {
 81.1017 +  int bci_displacement(uint row) const {
 81.1018      return int_at(bci_displacement_cell_index(row));
 81.1019    }
 81.1020  
 81.1021 @@ -853,7 +1410,7 @@
 81.1022    void post_initialize(BytecodeStream* stream, MethodData* mdo);
 81.1023  
 81.1024  #ifndef PRODUCT
 81.1025 -  void print_data_on(outputStream* st);
 81.1026 +  void print_data_on(outputStream* st) const;
 81.1027  #endif
 81.1028  };
 81.1029  
 81.1030 @@ -878,18 +1435,18 @@
 81.1031      assert(layout->tag() == DataLayout::branch_data_tag, "wrong type");
 81.1032    }
 81.1033  
 81.1034 -  virtual bool is_BranchData() { return true; }
 81.1035 +  virtual bool is_BranchData() const { return true; }
 81.1036  
 81.1037    static int static_cell_count() {
 81.1038      return branch_cell_count;
 81.1039    }
 81.1040  
 81.1041 -  virtual int cell_count() {
 81.1042 +  virtual int cell_count() const {
 81.1043      return static_cell_count();
 81.1044    }
 81.1045  
 81.1046    // Direct accessor
 81.1047 -  uint not_taken() {
 81.1048 +  uint not_taken() const {
 81.1049      return uint_at(not_taken_off_set);
 81.1050    }
 81.1051  
 81.1052 @@ -917,7 +1474,7 @@
 81.1053    void post_initialize(BytecodeStream* stream, MethodData* mdo);
 81.1054  
 81.1055  #ifndef PRODUCT
 81.1056 -  void print_data_on(outputStream* st);
 81.1057 +  void print_data_on(outputStream* st) const;
 81.1058  #endif
 81.1059  };
 81.1060  
 81.1061 @@ -935,15 +1492,15 @@
 81.1062      array_start_off_set
 81.1063    };
 81.1064  
 81.1065 -  uint array_uint_at(int index) {
 81.1066 +  uint array_uint_at(int index) const {
 81.1067      int aindex = index + array_start_off_set;
 81.1068      return uint_at(aindex);
 81.1069    }
 81.1070 -  int array_int_at(int index) {
 81.1071 +  int array_int_at(int index) const {
 81.1072      int aindex = index + array_start_off_set;
 81.1073      return int_at(aindex);
 81.1074    }
 81.1075 -  oop array_oop_at(int index) {
 81.1076 +  oop array_oop_at(int index) const {
 81.1077      int aindex = index + array_start_off_set;
 81.1078      return oop_at(aindex);
 81.1079    }
 81.1080 @@ -960,17 +1517,17 @@
 81.1081  public:
 81.1082    ArrayData(DataLayout* layout) : ProfileData(layout) {}
 81.1083  
 81.1084 -  virtual bool is_ArrayData() { return true; }
 81.1085 +  virtual bool is_ArrayData() const { return true; }
 81.1086  
 81.1087    static int static_cell_count() {
 81.1088      return -1;
 81.1089    }
 81.1090  
 81.1091 -  int array_len() {
 81.1092 +  int array_len() const {
 81.1093      return int_at_unchecked(array_len_off_set);
 81.1094    }
 81.1095  
 81.1096 -  virtual int cell_count() {
 81.1097 +  virtual int cell_count() const {
 81.1098      return array_len() + 1;
 81.1099    }
 81.1100  
 81.1101 @@ -1017,29 +1574,29 @@
 81.1102      assert(layout->tag() == DataLayout::multi_branch_data_tag, "wrong type");
 81.1103    }
 81.1104  
 81.1105 -  virtual bool is_MultiBranchData() { return true; }
 81.1106 +  virtual bool is_MultiBranchData() const { return true; }
 81.1107  
 81.1108    static int compute_cell_count(BytecodeStream* stream);
 81.1109  
 81.1110 -  int number_of_cases() {
 81.1111 +  int number_of_cases() const {
 81.1112      int alen = array_len() - 2; // get rid of default case here.
 81.1113      assert(alen % per_case_cell_count == 0, "must be even");
 81.1114      return (alen / per_case_cell_count);
 81.1115    }
 81.1116  
 81.1117 -  uint default_count() {
 81.1118 +  uint default_count() const {
 81.1119      return array_uint_at(default_count_off_set);
 81.1120    }
 81.1121 -  int default_displacement() {
 81.1122 +  int default_displacement() const {
 81.1123      return array_int_at(default_disaplacement_off_set);
 81.1124    }
 81.1125  
 81.1126 -  uint count_at(int index) {
 81.1127 +  uint count_at(int index) const {
 81.1128      return array_uint_at(case_array_start +
 81.1129                           index * per_case_cell_count +
 81.1130                           relative_count_off_set);
 81.1131    }
 81.1132 -  int displacement_at(int index) {
 81.1133 +  int displacement_at(int index) const {
 81.1134      return array_int_at(case_array_start +
 81.1135                          index * per_case_cell_count +
 81.1136                          relative_displacement_off_set);
 81.1137 @@ -1074,7 +1631,7 @@
 81.1138    void post_initialize(BytecodeStream* stream, MethodData* mdo);
 81.1139  
 81.1140  #ifndef PRODUCT
 81.1141 -  void print_data_on(outputStream* st);
 81.1142 +  void print_data_on(outputStream* st) const;
 81.1143  #endif
 81.1144  };
 81.1145  
 81.1146 @@ -1085,14 +1642,14 @@
 81.1147      assert(layout->tag() == DataLayout::arg_info_data_tag, "wrong type");
 81.1148    }
 81.1149  
 81.1150 -  virtual bool is_ArgInfoData() { return true; }
 81.1151 +  virtual bool is_ArgInfoData() const { return true; }
 81.1152  
 81.1153  
 81.1154 -  int number_of_args() {
 81.1155 +  int number_of_args() const {
 81.1156      return array_len();
 81.1157    }
 81.1158  
 81.1159 -  uint arg_modified(int arg) {
 81.1160 +  uint arg_modified(int arg) const {
 81.1161      return array_uint_at(arg);
 81.1162    }
 81.1163  
 81.1164 @@ -1101,7 +1658,7 @@
 81.1165    }
 81.1166  
 81.1167  #ifndef PRODUCT
 81.1168 -  void print_data_on(outputStream* st);
 81.1169 +  void print_data_on(outputStream* st) const;
 81.1170  #endif
 81.1171  };
 81.1172  
 81.1173 @@ -1271,6 +1828,21 @@
 81.1174    // return the argument info cell
 81.1175    ArgInfoData *arg_info();
 81.1176  
 81.1177 +  enum {
 81.1178 +    no_type_profile = 0,
 81.1179 +    type_profile_jsr292 = 1,
 81.1180 +    type_profile_all = 2
 81.1181 +  };
 81.1182 +
 81.1183 +  static bool profile_jsr292(methodHandle m, int bci);
 81.1184 +  static int profile_arguments_flag();
 81.1185 +  static bool profile_arguments_jsr292_only();
 81.1186 +  static bool profile_all_arguments();
 81.1187 +  static bool profile_arguments_for_invoke(methodHandle m, int bci);
 81.1188 +  static int profile_return_flag();
 81.1189 +  static bool profile_all_return();
 81.1190 +  static bool profile_return_for_invoke(methodHandle m, int bci);
 81.1191 +
 81.1192  public:
 81.1193    static int header_size() {
 81.1194      return sizeof(MethodData)/wordSize;
 81.1195 @@ -1510,6 +2082,10 @@
 81.1196    // verification
 81.1197    void verify_on(outputStream* st);
 81.1198    void verify_data_on(outputStream* st);
 81.1199 +
 81.1200 +  static bool profile_arguments();
 81.1201 +  static bool profile_return();
 81.1202 +  static bool profile_return_jsr292_only();
 81.1203  };
 81.1204  
 81.1205  #endif // SHARE_VM_OOPS_METHODDATAOOP_HPP
    82.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Thu Oct 17 06:29:58 2013 -0700
    82.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Fri Oct 18 12:10:44 2013 -0700
    82.3 @@ -197,6 +197,7 @@
    82.4  // negative filter: should callee NOT be inlined?
    82.5  bool InlineTree::should_not_inline(ciMethod *callee_method,
    82.6                                     ciMethod* caller_method,
    82.7 +                                   JVMState* jvms,
    82.8                                     WarmCallInfo* wci_result) {
    82.9  
   82.10    const char* fail_msg = NULL;
   82.11 @@ -226,7 +227,7 @@
   82.12      // don't inline exception code unless the top method belongs to an
   82.13      // exception class
   82.14      if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
   82.15 -      ciMethod* top_method = caller_jvms() ? caller_jvms()->of_depth(1)->method() : method();
   82.16 +      ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
   82.17        if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
   82.18          wci_result->set_profit(wci_result->profit() * 0.1);
   82.19        }
   82.20 @@ -328,7 +329,7 @@
   82.21  // return true if ok
   82.22  // Relocated from "InliningClosure::try_to_inline"
   82.23  bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
   82.24 -                               int caller_bci, ciCallProfile& profile,
   82.25 +                               int caller_bci, JVMState* jvms, ciCallProfile& profile,
   82.26                                 WarmCallInfo* wci_result, bool& should_delay) {
   82.27  
   82.28     // Old algorithm had funny accumulating BC-size counters
   82.29 @@ -346,7 +347,7 @@
   82.30                       wci_result)) {
   82.31      return false;
   82.32    }
   82.33 -  if (should_not_inline(callee_method, caller_method, wci_result)) {
   82.34 +  if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
   82.35      return false;
   82.36    }
   82.37  
   82.38 @@ -397,24 +398,35 @@
   82.39    }
   82.40  
   82.41    // detect direct and indirect recursive inlining
   82.42 -  if (!callee_method->is_compiled_lambda_form()) {
   82.43 +  {
   82.44      // count the current method and the callee
   82.45 -    int inline_level = (method() == callee_method) ? 1 : 0;
   82.46 -    if (inline_level > MaxRecursiveInlineLevel) {
   82.47 -      set_msg("recursively inlining too deep");
   82.48 -      return false;
   82.49 +    const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form();
   82.50 +    int inline_level = 0;
   82.51 +    if (!is_compiled_lambda_form) {
   82.52 +      if (method() == callee_method) {
   82.53 +        inline_level++;
   82.54 +      }
   82.55      }
   82.56      // count callers of current method and callee
   82.57 -    JVMState* jvms = caller_jvms();
   82.58 -    while (jvms != NULL && jvms->has_method()) {
   82.59 -      if (jvms->method() == callee_method) {
   82.60 -        inline_level++;
   82.61 -        if (inline_level > MaxRecursiveInlineLevel) {
   82.62 -          set_msg("recursively inlining too deep");
   82.63 -          return false;
   82.64 +    Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL;
   82.65 +    for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) {
   82.66 +      if (j->method() == callee_method) {
   82.67 +        if (is_compiled_lambda_form) {
   82.68 +          // Since compiled lambda forms are heavily reused we allow recursive inlining.  If it is truly
   82.69 +          // a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the
   82.70 +          // compiler stack.
   82.71 +          Node* caller_argument0 = j->map()->argument(j, 0)->uncast();
   82.72 +          if (caller_argument0 == callee_argument0) {
   82.73 +            inline_level++;
   82.74 +          }
   82.75 +        } else {
   82.76 +          inline_level++;
   82.77          }
   82.78        }
   82.79 -      jvms = jvms->caller();
   82.80 +    }
   82.81 +    if (inline_level > MaxRecursiveInlineLevel) {
   82.82 +      set_msg("recursive inlining is too deep");
   82.83 +      return false;
   82.84      }
   82.85    }
   82.86  
   82.87 @@ -536,7 +548,7 @@
   82.88    // Check if inlining policy says no.
   82.89    WarmCallInfo wci = *(initial_wci);
   82.90    bool success = try_to_inline(callee_method, caller_method, caller_bci,
   82.91 -                               profile, &wci, should_delay);
   82.92 +                               jvms, profile, &wci, should_delay);
   82.93  
   82.94  #ifndef PRODUCT
   82.95    if (UseOldInlining && InlineWarmCalls
    83.1 --- a/src/share/vm/opto/c2compiler.cpp	Thu Oct 17 06:29:58 2013 -0700
    83.2 +++ b/src/share/vm/opto/c2compiler.cpp	Fri Oct 18 12:10:44 2013 -0700
    83.3 @@ -44,9 +44,6 @@
    83.4  # include "adfiles/ad_ppc.hpp"
    83.5  #endif
    83.6  
    83.7 -
    83.8 -volatile int C2Compiler::_runtimes = uninitialized;
    83.9 -
   83.10  // register information defined by ADLC
   83.11  extern const char register_save_policy[];
   83.12  extern const int  register_save_type[];
   83.13 @@ -57,7 +54,7 @@
   83.14  const char* C2Compiler::retry_no_escape_analysis() {
   83.15    return "retry without escape analysis";
   83.16  }
   83.17 -void C2Compiler::initialize_runtime() {
   83.18 +bool C2Compiler::init_c2_runtime() {
   83.19  
   83.20    // Check assumptions used while running ADLC
   83.21    Compile::adlc_verification();
   83.22 @@ -90,41 +87,31 @@
   83.23  
   83.24    CompilerThread* thread = CompilerThread::current();
   83.25  
   83.26 -  HandleMark  handle_mark(thread);
   83.27 -
   83.28 -  OptoRuntime::generate(thread->env());
   83.29 -
   83.30 +  HandleMark handle_mark(thread);
   83.31 +  return OptoRuntime::generate(thread->env());
   83.32  }
   83.33  
   83.34  
   83.35  void C2Compiler::initialize() {
   83.36 -
   83.37 -  // This method can only be called once per C2Compiler object
   83.38    // The first compiler thread that gets here will initialize the
   83.39 -  // small amount of global state (and runtime stubs) that c2 needs.
   83.40 +  // small amount of global state (and runtime stubs) that C2 needs.
   83.41  
   83.42    // There is a race possible once at startup and then we're fine
   83.43  
   83.44    // Note that this is being called from a compiler thread not the
   83.45    // main startup thread.
   83.46 -
   83.47 -  if (_runtimes != initialized) {
   83.48 -    initialize_runtimes( initialize_runtime, &_runtimes);
   83.49 +  if (should_perform_init()) {
   83.50 +    bool successful = C2Compiler::init_c2_runtime();
   83.51 +    int new_state = (successful) ? initialized : failed;
   83.52 +    set_state(new_state);
   83.53    }
   83.54 -
   83.55 -  // Mark this compiler object as ready to roll
   83.56 -  mark_initialized();
   83.57  }
   83.58  
   83.59 -void C2Compiler::compile_method(ciEnv* env,
   83.60 -                                ciMethod* target,
   83.61 -                                int entry_bci) {
   83.62 -  if (!is_initialized()) {
   83.63 -    initialize();
   83.64 -  }
   83.65 +void C2Compiler::compile_method(ciEnv* env, ciMethod* target, int entry_bci) {
   83.66 +  assert(is_initialized(), "Compiler thread must be initialized");
   83.67 +
   83.68    bool subsume_loads = SubsumeLoads;
   83.69 -  bool do_escape_analysis = DoEscapeAnalysis &&
   83.70 -    !env->jvmti_can_access_local_variables();
   83.71 +  bool do_escape_analysis = DoEscapeAnalysis && !env->jvmti_can_access_local_variables();
   83.72    bool eliminate_boxing = EliminateAutoBox;
   83.73    while (!env->failing()) {
   83.74      // Attempt to compile while subsuming loads into machine instructions.
    84.1 --- a/src/share/vm/opto/c2compiler.hpp	Thu Oct 17 06:29:58 2013 -0700
    84.2 +++ b/src/share/vm/opto/c2compiler.hpp	Fri Oct 18 12:10:44 2013 -0700
    84.3 @@ -28,24 +28,17 @@
    84.4  #include "compiler/abstractCompiler.hpp"
    84.5  
    84.6  class C2Compiler : public AbstractCompiler {
    84.7 -private:
    84.8 -
    84.9 -  static void initialize_runtime();
   84.10 + private:
   84.11 +  static bool init_c2_runtime();
   84.12  
   84.13  public:
   84.14    // Name
   84.15    const char *name() { return "C2"; }
   84.16  
   84.17 -  static volatile int _runtimes;
   84.18 -
   84.19  #ifdef TIERED
   84.20    virtual bool is_c2() { return true; };
   84.21  #endif // TIERED
   84.22  
   84.23 -  // Customization
   84.24 -  bool needs_adapters         () { return true; }
   84.25 -  bool needs_stubs            () { return true; }
   84.26 -
   84.27    void initialize();
   84.28  
   84.29    // Compilation entry point for methods
    85.1 --- a/src/share/vm/opto/chaitin.hpp	Thu Oct 17 06:29:58 2013 -0700
    85.2 +++ b/src/share/vm/opto/chaitin.hpp	Fri Oct 18 12:10:44 2013 -0700
    85.3 @@ -52,6 +52,7 @@
    85.4  class LRG : public ResourceObj {
    85.5    friend class VMStructs;
    85.6  public:
    85.7 +  static const uint AllStack_size = 0xFFFFF; // This mask size is used to tell that the mask of this LRG supports stack positions
    85.8    enum { SPILL_REG=29999 };     // Register number of a spilled LRG
    85.9  
   85.10    double _cost;                 // 2 for loads/1 for stores times block freq
   85.11 @@ -80,14 +81,21 @@
   85.12  private:
   85.13    uint _eff_degree;             // Effective degree: Sum of neighbors _num_regs
   85.14  public:
   85.15 -  int degree() const { assert( _degree_valid, "" ); return _eff_degree; }
   85.16 +  int degree() const { assert( _degree_valid , "" ); return _eff_degree; }
   85.17    // Degree starts not valid and any change to the IFG neighbor
   85.18    // set makes it not valid.
   85.19 -  void set_degree( uint degree ) { _eff_degree = degree; debug_only(_degree_valid = 1;) }
   85.20 +  void set_degree( uint degree ) {
   85.21 +    _eff_degree = degree;
   85.22 +    debug_only(_degree_valid = 1;)
   85.23 +    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
   85.24 +  }
   85.25    // Made a change that hammered degree
   85.26    void invalid_degree() { debug_only(_degree_valid=0;) }
   85.27    // Incrementally modify degree.  If it was correct, it should remain correct
   85.28 -  void inc_degree( uint mod ) { _eff_degree += mod; }
   85.29 +  void inc_degree( uint mod ) {
   85.30 +    _eff_degree += mod;
   85.31 +    assert(!_mask.is_AllStack() || (_mask.is_AllStack() && lo_degree()), "_eff_degree can't be bigger than AllStack_size - _num_regs if the mask supports stack registers");
   85.32 +  }
   85.33    // Compute the degree between 2 live ranges
   85.34    int compute_degree( LRG &l ) const;
   85.35  
   85.36 @@ -95,9 +103,9 @@
   85.37    RegMask _mask;                // Allowed registers for this LRG
   85.38    uint _mask_size;              // cache of _mask.Size();
   85.39  public:
   85.40 -  int compute_mask_size() const { return _mask.is_AllStack() ? 65535 : _mask.Size(); }
   85.41 +  int compute_mask_size() const { return _mask.is_AllStack() ? AllStack_size : _mask.Size(); }
   85.42    void set_mask_size( int size ) {
   85.43 -    assert((size == 65535) || (size == (int)_mask.Size()), "");
   85.44 +    assert((size == (int)AllStack_size) || (size == (int)_mask.Size()), "");
   85.45      _mask_size = size;
   85.46  #ifdef ASSERT
   85.47      _msize_valid=1;
    86.1 --- a/src/share/vm/opto/compile.cpp	Thu Oct 17 06:29:58 2013 -0700
    86.2 +++ b/src/share/vm/opto/compile.cpp	Fri Oct 18 12:10:44 2013 -0700
    86.3 @@ -47,6 +47,7 @@
    86.4  #include "opto/machnode.hpp"
    86.5  #include "opto/macro.hpp"
    86.6  #include "opto/matcher.hpp"
    86.7 +#include "opto/mathexactnode.hpp"
    86.8  #include "opto/memnode.hpp"
    86.9  #include "opto/mulnode.hpp"
   86.10  #include "opto/node.hpp"
   86.11 @@ -2986,6 +2987,32 @@
   86.12        n->set_req(MemBarNode::Precedent, top());
   86.13      }
   86.14      break;
   86.15 +    // Must set a control edge on all nodes that produce a FlagsProj
   86.16 +    // so they can't escape the block that consumes the flags.
   86.17 +    // Must also set the non throwing branch as the control
   86.18 +    // for all nodes that depends on the result. Unless the node
   86.19 +    // already have a control that isn't the control of the
   86.20 +    // flag producer
   86.21 +  case Op_FlagsProj:
   86.22 +    {
   86.23 +      MathExactNode* math = (MathExactNode*)  n->in(0);
   86.24 +      Node* ctrl = math->control_node();
   86.25 +      Node* non_throwing = math->non_throwing_branch();
   86.26 +      math->set_req(0, ctrl);
   86.27 +
   86.28 +      Node* result = math->result_node();
   86.29 +      if (result != NULL) {
   86.30 +        for (DUIterator_Fast jmax, j = result->fast_outs(jmax); j < jmax; j++) {
   86.31 +          Node* out = result->fast_out(j);
   86.32 +          if (out->in(0) == NULL) {
   86.33 +            out->set_req(0, non_throwing);
   86.34 +          } else if (out->in(0) == ctrl) {
   86.35 +            out->set_req(0, non_throwing);
   86.36 +          }
   86.37 +        }
   86.38 +      }
   86.39 +    }
   86.40 +    break;
   86.41    default:
   86.42      assert( !n->is_Call(), "" );
   86.43      assert( !n->is_Mem(), "" );
    87.1 --- a/src/share/vm/opto/escape.cpp	Thu Oct 17 06:29:58 2013 -0700
    87.2 +++ b/src/share/vm/opto/escape.cpp	Fri Oct 18 12:10:44 2013 -0700
    87.3 @@ -780,6 +780,7 @@
    87.4        }
    87.5      } else {  // Allocate instance
    87.6        if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
    87.7 +          cik->is_subclass_of(_compile->env()->Reference_klass()) ||
    87.8           !cik->is_instance_klass() || // StressReflectiveCode
    87.9            cik->as_instance_klass()->has_finalizer()) {
   87.10          es = PointsToNode::GlobalEscape;
    88.1 --- a/src/share/vm/opto/graphKit.cpp	Thu Oct 17 06:29:58 2013 -0700
    88.2 +++ b/src/share/vm/opto/graphKit.cpp	Fri Oct 18 12:10:44 2013 -0700
    88.3 @@ -2122,7 +2122,7 @@
    88.4  // Null check oop.  Set null-path control into Region in slot 3.
    88.5  // Make a cast-not-nullness use the other not-null control.  Return cast.
    88.6  Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
    88.7 -                               bool never_see_null) {
    88.8 +                               bool never_see_null, bool safe_for_replace) {
    88.9    // Initial NULL check taken path
   88.10    (*null_control) = top();
   88.11    Node* cast = null_check_common(value, T_OBJECT, false, null_control);
   88.12 @@ -2140,6 +2140,9 @@
   88.13                    Deoptimization::Action_make_not_entrant);
   88.14      (*null_control) = top();    // NULL path is dead
   88.15    }
   88.16 +  if ((*null_control) == top() && safe_for_replace) {
   88.17 +    replace_in_map(value, cast);
   88.18 +  }
   88.19  
   88.20    // Cast away null-ness on the result
   88.21    return cast;
   88.22 @@ -2634,15 +2637,17 @@
   88.23    C->set_has_split_ifs(true); // Has chance for split-if optimization
   88.24  
   88.25    ciProfileData* data = NULL;
   88.26 +  bool safe_for_replace = false;
   88.27    if (java_bc() == Bytecodes::_instanceof) {  // Only for the bytecode
   88.28      data = method()->method_data()->bci_to_data(bci());
   88.29 +    safe_for_replace = true;
   88.30    }
   88.31    bool never_see_null = (ProfileDynamicTypes  // aggressive use of profile
   88.32                           && seems_never_null(obj, data));
   88.33  
   88.34    // Null check; get casted pointer; set region slot 3
   88.35    Node* null_ctl = top();
   88.36 -  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
   88.37 +  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
   88.38  
   88.39    // If not_null_obj is dead, only null-path is taken
   88.40    if (stopped()) {              // Doing instance-of on a NULL?
   88.41 @@ -2723,11 +2728,13 @@
   88.42    }
   88.43  
   88.44    ciProfileData* data = NULL;
   88.45 +  bool safe_for_replace = false;
   88.46    if (failure_control == NULL) {        // use MDO in regular case only
   88.47      assert(java_bc() == Bytecodes::_aastore ||
   88.48             java_bc() == Bytecodes::_checkcast,
   88.49             "interpreter profiles type checks only for these BCs");
   88.50      data = method()->method_data()->bci_to_data(bci());
   88.51 +    safe_for_replace = true;
   88.52    }
   88.53  
   88.54    // Make the merge point
   88.55 @@ -2742,7 +2749,7 @@
   88.56  
   88.57    // Null check; get casted pointer; set region slot 3
   88.58    Node* null_ctl = top();
   88.59 -  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
   88.60 +  Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
   88.61  
   88.62    // If not_null_obj is dead, only null-path is taken
   88.63    if (stopped()) {              // Doing instance-of on a NULL?
   88.64 @@ -3608,7 +3615,7 @@
   88.65    Node* marking = __ load(__ ctrl(), marking_adr, TypeInt::INT, active_type, Compile::AliasIdxRaw);
   88.66  
   88.67    // if (!marking)
   88.68 -  __ if_then(marking, BoolTest::ne, zero); {
   88.69 +  __ if_then(marking, BoolTest::ne, zero, unlikely); {
   88.70      BasicType index_bt = TypeX_X->basic_type();
   88.71      assert(sizeof(size_t) == type2aelembytes(index_bt), "Loading G1 PtrQueue::_index with wrong size.");
   88.72      Node* index   = __ load(__ ctrl(), index_adr, TypeX_X, index_bt, Compile::AliasIdxRaw);
    89.1 --- a/src/share/vm/opto/graphKit.hpp	Thu Oct 17 06:29:58 2013 -0700
    89.2 +++ b/src/share/vm/opto/graphKit.hpp	Fri Oct 18 12:10:44 2013 -0700
    89.3 @@ -378,8 +378,10 @@
    89.4    // Return a cast-not-null node which depends on the not-null control.
    89.5    // If never_see_null, use an uncommon trap (*null_control sees a top).
    89.6    // The cast is not valid along the null path; keep a copy of the original.
    89.7 +  // If safe_for_replace, then we can replace the value with the cast
    89.8 +  // in the parsing map (the cast is guaranteed to dominate the map)
    89.9    Node* null_check_oop(Node* value, Node* *null_control,
   89.10 -                       bool never_see_null = false);
   89.11 +                       bool never_see_null = false, bool safe_for_replace = false);
   89.12  
   89.13    // Check the null_seen bit.
   89.14    bool seems_never_null(Node* obj, ciProfileData* data);
    90.1 --- a/src/share/vm/opto/idealGraphPrinter.cpp	Thu Oct 17 06:29:58 2013 -0700
    90.2 +++ b/src/share/vm/opto/idealGraphPrinter.cpp	Fri Oct 18 12:10:44 2013 -0700
    90.3 @@ -616,7 +616,11 @@
    90.4        buffer[0] = 0;
    90.5        _chaitin->dump_register(node, buffer);
    90.6        print_prop("reg", buffer);
    90.7 -      print_prop("lrg", _chaitin->_lrg_map.live_range_id(node));
    90.8 +      uint lrg_id = 0;
    90.9 +      if (node->_idx < _chaitin->_lrg_map.size()) {
   90.10 +        lrg_id = _chaitin->_lrg_map.live_range_id(node);
   90.11 +      }
   90.12 +      print_prop("lrg", lrg_id);
   90.13      }
   90.14  
   90.15      node->_in_dump_cnt--;
    91.1 --- a/src/share/vm/opto/ifg.cpp	Thu Oct 17 06:29:58 2013 -0700
    91.2 +++ b/src/share/vm/opto/ifg.cpp	Fri Oct 18 12:10:44 2013 -0700
    91.3 @@ -677,7 +677,7 @@
    91.4              } else {            // Common case: size 1 bound removal
    91.5                if( lrg.mask().Member(r_reg) ) {
    91.6                  lrg.Remove(r_reg);
    91.7 -                lrg.set_mask_size(lrg.mask().is_AllStack() ? 65535:old_size-1);
    91.8 +                lrg.set_mask_size(lrg.mask().is_AllStack() ? LRG::AllStack_size : old_size - 1);
    91.9                }
   91.10              }
   91.11              // If 'l' goes completely dry, it must spill.
    92.1 --- a/src/share/vm/opto/ifnode.cpp	Thu Oct 17 06:29:58 2013 -0700
    92.2 +++ b/src/share/vm/opto/ifnode.cpp	Fri Oct 18 12:10:44 2013 -0700
    92.3 @@ -689,6 +689,7 @@
    92.4          ctrl->in(0)->in(1)->is_Bool() &&
    92.5          ctrl->in(0)->in(1)->in(1)->Opcode() == Op_CmpI &&
    92.6          ctrl->in(0)->in(1)->in(1)->in(2)->is_Con() &&
    92.7 +        ctrl->in(0)->in(1)->in(1)->in(2) != phase->C->top() &&
    92.8          ctrl->in(0)->in(1)->in(1)->in(1) == n) {
    92.9        IfNode* dom_iff = ctrl->in(0)->as_If();
   92.10        Node* otherproj = dom_iff->proj_out(!ctrl->as_Proj()->_con);
    93.1 --- a/src/share/vm/opto/mathexactnode.cpp	Thu Oct 17 06:29:58 2013 -0700
    93.2 +++ b/src/share/vm/opto/mathexactnode.cpp	Fri Oct 18 12:10:44 2013 -0700
    93.3 @@ -25,9 +25,10 @@
    93.4  #include "precompiled.hpp"
    93.5  #include "memory/allocation.inline.hpp"
    93.6  #include "opto/addnode.hpp"
    93.7 +#include "opto/cfgnode.hpp"
    93.8  #include "opto/machnode.hpp"
    93.9 +#include "opto/matcher.hpp"
   93.10  #include "opto/mathexactnode.hpp"
   93.11 -#include "opto/matcher.hpp"
   93.12  #include "opto/subnode.hpp"
   93.13  
   93.14  MathExactNode::MathExactNode(Node* ctrl, Node* n1, Node* n2) : MultiNode(3) {
   93.15 @@ -36,6 +37,33 @@
   93.16    init_req(2, n2);
   93.17  }
   93.18  
   93.19 +BoolNode* MathExactNode::bool_node() const {
   93.20 +  Node* flags = flags_node();
   93.21 +  BoolNode* boolnode = flags->unique_out()->as_Bool();
   93.22 +  assert(boolnode != NULL, "must have BoolNode");
   93.23 +  return boolnode;
   93.24 +}
   93.25 +
   93.26 +IfNode* MathExactNode::if_node() const {
   93.27 +  BoolNode* boolnode = bool_node();
   93.28 +  IfNode* ifnode = boolnode->unique_out()->as_If();
   93.29 +  assert(ifnode != NULL, "must have IfNode");
   93.30 +  return ifnode;
   93.31 +}
   93.32 +
   93.33 +Node* MathExactNode::control_node() const {
   93.34 +  IfNode* ifnode = if_node();
   93.35 +  return ifnode->in(0);
   93.36 +}
   93.37 +
   93.38 +Node* MathExactNode::non_throwing_branch() const {
   93.39 +  IfNode* ifnode = if_node();
   93.40 +  if (bool_node()->_test._test == BoolTest::overflow) {
   93.41 +    return ifnode->proj_out(0);
   93.42 +  }
   93.43 +  return ifnode->proj_out(1);
   93.44 +}
   93.45 +
   93.46  Node* AddExactINode::match(const ProjNode* proj, const Matcher* m) {
   93.47    uint ideal_reg = proj->ideal_reg();
   93.48    RegMask rm;
   93.49 @@ -62,15 +90,15 @@
   93.50      }
   93.51  
   93.52      if (flags != NULL) {
   93.53 -      BoolNode* bolnode = (BoolNode *) flags->unique_out();
   93.54 -      switch (bolnode->_test._test) {
   93.55 +      BoolNode* boolnode = bool_node();
   93.56 +      switch (boolnode->_test._test) {
   93.57          case BoolTest::overflow:
   93.58            // if the check is for overflow - never taken
   93.59 -          igvn->replace_node(bolnode, phase->intcon(0));
   93.60 +          igvn->replace_node(boolnode, phase->intcon(0));
   93.61            break;
   93.62          case BoolTest::no_overflow:
   93.63            // if the check is for no overflow - always taken
   93.64 -          igvn->replace_node(bolnode, phase->intcon(1));
   93.65 +          igvn->replace_node(boolnode, phase->intcon(1));
   93.66            break;
   93.67          default:
   93.68            fatal("Unexpected value of BoolTest");
    94.1 --- a/src/share/vm/opto/mathexactnode.hpp	Thu Oct 17 06:29:58 2013 -0700
    94.2 +++ b/src/share/vm/opto/mathexactnode.hpp	Fri Oct 18 12:10:44 2013 -0700
    94.3 @@ -27,8 +27,11 @@
    94.4  
    94.5  #include "opto/multnode.hpp"
    94.6  #include "opto/node.hpp"
    94.7 +#include "opto/subnode.hpp"
    94.8  #include "opto/type.hpp"
    94.9  
   94.10 +class BoolNode;
   94.11 +class IfNode;
   94.12  class Node;
   94.13  
   94.14  class PhaseGVN;
   94.15 @@ -49,9 +52,13 @@
   94.16    virtual bool is_CFG() const { return false; }
   94.17    virtual uint ideal_reg() const { return NotAMachineReg; }
   94.18  
   94.19 -  ProjNode* result_node() { return proj_out(result_proj_node); }
   94.20 -  ProjNode* flags_node() { return proj_out(flags_proj_node); }
   94.21 +  ProjNode* result_node() const { return proj_out(result_proj_node); }
   94.22 +  ProjNode* flags_node() const { return proj_out(flags_proj_node); }
   94.23 +  Node* control_node() const;
   94.24 +  Node* non_throwing_branch() const;
   94.25  protected:
   94.26 +  IfNode* if_node() const;
   94.27 +  BoolNode* bool_node() const;
   94.28    Node* no_overflow(PhaseGVN *phase, Node* new_result);
   94.29  };
   94.30  
    95.1 --- a/src/share/vm/opto/parse.hpp	Thu Oct 17 06:29:58 2013 -0700
    95.2 +++ b/src/share/vm/opto/parse.hpp	Fri Oct 18 12:10:44 2013 -0700
    95.3 @@ -73,6 +73,7 @@
    95.4    bool        try_to_inline(ciMethod* callee_method,
    95.5                              ciMethod* caller_method,
    95.6                              int caller_bci,
    95.7 +                            JVMState* jvms,
    95.8                              ciCallProfile& profile,
    95.9                              WarmCallInfo* wci_result,
   95.10                              bool& should_delay);
   95.11 @@ -83,6 +84,7 @@
   95.12                              WarmCallInfo* wci_result);
   95.13    bool        should_not_inline(ciMethod* callee_method,
   95.14                                  ciMethod* caller_method,
   95.15 +                                JVMState* jvms,
   95.16                                  WarmCallInfo* wci_result);
   95.17    void        print_inlining(ciMethod* callee_method, int caller_bci,
   95.18                               bool success) const;
    96.1 --- a/src/share/vm/opto/parse2.cpp	Thu Oct 17 06:29:58 2013 -0700
    96.2 +++ b/src/share/vm/opto/parse2.cpp	Fri Oct 18 12:10:44 2013 -0700
    96.3 @@ -268,7 +268,7 @@
    96.4      return adjoinRange(value, value, dest, table_index);
    96.5    }
    96.6  
    96.7 -  void print(ciEnv* env) {
    96.8 +  void print() {
    96.9      if (is_singleton())
   96.10        tty->print(" {%d}=>%d", lo(), dest());
   96.11      else if (lo() == min_jint)
   96.12 @@ -471,8 +471,8 @@
   96.13    // These are the switch destinations hanging off the jumpnode
   96.14    int i = 0;
   96.15    for (SwitchRange* r = lo; r <= hi; r++) {
   96.16 -    for (int j = r->lo(); j <= r->hi(); j++, i++) {
   96.17 -      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), j - lowval));
   96.18 +    for (int64 j = r->lo(); j <= r->hi(); j++, i++) {
   96.19 +      Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
   96.20        {
   96.21          PreserveJVMState pjvms(this);
   96.22          set_control(input);
   96.23 @@ -632,7 +632,7 @@
   96.24      }
   96.25      tty->print("   ");
   96.26      for( r = lo; r <= hi; r++ ) {
   96.27 -      r->print(env());
   96.28 +      r->print();
   96.29      }
   96.30      tty->print_cr("");
   96.31    }
    97.1 --- a/src/share/vm/opto/parseHelper.cpp	Thu Oct 17 06:29:58 2013 -0700
    97.2 +++ b/src/share/vm/opto/parseHelper.cpp	Fri Oct 18 12:10:44 2013 -0700
    97.3 @@ -343,10 +343,14 @@
    97.4  
    97.5    // Get the Method* node.
    97.6    ciMethod* m = method();
    97.7 -  address counters_adr = m->ensure_method_counters();
    97.8 +  MethodCounters* counters_adr = m->ensure_method_counters();
    97.9 +  if (counters_adr == NULL) {
   97.10 +    C->record_failure("method counters allocation failed");
   97.11 +    return;
   97.12 +  }
   97.13  
   97.14    Node* ctrl = control();
   97.15 -  const TypePtr* adr_type = TypeRawPtr::make(counters_adr);
   97.16 +  const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
   97.17    Node *counters_node = makecon(adr_type);
   97.18    Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
   97.19      MethodCounters::interpreter_invocation_counter_offset_in_bytes());
    98.1 --- a/src/share/vm/opto/reg_split.cpp	Thu Oct 17 06:29:58 2013 -0700
    98.2 +++ b/src/share/vm/opto/reg_split.cpp	Fri Oct 18 12:10:44 2013 -0700
    98.3 @@ -375,6 +375,7 @@
    98.4        }
    98.5  
    98.6        if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) {
    98.7 +        assert(Reachblock != NULL, "Reachblock must be non-NULL");
    98.8          Node *rdef = Reachblock[lrg2reach[lidx]];
    98.9          if (rdef) {
   98.10            spill->set_req(i, rdef);
   98.11 @@ -1336,7 +1337,8 @@
   98.12                 _lrg_map.find(pred->get_node(insert - 1)) >= lrgs_before_phi_split) {
   98.13            insert--;
   98.14          }
   98.15 -        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
   98.16 +        // since the def cannot contain any live range input, we can pass in NULL as Reachlock parameter
   98.17 +        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, NULL, false);
   98.18          if (!def) {
   98.19            return 0;    // Bail out
   98.20          }
    99.1 --- a/src/share/vm/opto/runtime.cpp	Thu Oct 17 06:29:58 2013 -0700
    99.2 +++ b/src/share/vm/opto/runtime.cpp	Fri Oct 18 12:10:44 2013 -0700
    99.3 @@ -138,9 +138,10 @@
    99.4  
    99.5  
    99.6  #define gen(env, var, type_func_gen, c_func, fancy_jump, pass_tls, save_arg_regs, return_pc) \
    99.7 -  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc)
    99.8 +  var = generate_stub(env, type_func_gen, CAST_FROM_FN_PTR(address, c_func), #var, fancy_jump, pass_tls, save_arg_regs, return_pc); \
    99.9 +  if (var == NULL) { return false; }
   99.10  
   99.11 -void OptoRuntime::generate(ciEnv* env) {
   99.12 +bool OptoRuntime::generate(ciEnv* env) {
   99.13  
   99.14    generate_exception_blob();
   99.15  
   99.16 @@ -158,7 +159,7 @@
   99.17    gen(env, _multianewarrayN_Java           , multianewarrayN_Type         , multianewarrayN_C               ,    0 , true , false, false);
   99.18    gen(env, _g1_wb_pre_Java                 , g1_wb_pre_Type               , SharedRuntime::g1_wb_pre        ,    0 , false, false, false);
   99.19    gen(env, _g1_wb_post_Java                , g1_wb_post_Type              , SharedRuntime::g1_wb_post       ,    0 , false, false, false);
   99.20 -  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C      ,    0 , false, false, false);
   99.21 +  gen(env, _complete_monitor_locking_Java  , complete_monitor_enter_Type  , SharedRuntime::complete_monitor_locking_C, 0, false, false, false);
   99.22    gen(env, _rethrow_Java                   , rethrow_Type                 , rethrow_C                       ,    2 , true , false, true );
   99.23  
   99.24    gen(env, _slow_arraycopy_Java            , slow_arraycopy_Type          , SharedRuntime::slow_arraycopy_C ,    0 , false, false, false);
   99.25 @@ -168,7 +169,7 @@
   99.26    gen(env, _zap_dead_Java_locals_Java      , zap_dead_locals_Type         , zap_dead_Java_locals_C          ,    0 , false, true , false );
   99.27    gen(env, _zap_dead_native_locals_Java    , zap_dead_locals_Type         , zap_dead_native_locals_C        ,    0 , false, true , false );
   99.28  # endif
   99.29 -
   99.30 +  return true;
   99.31  }
   99.32  
   99.33  #undef gen
   99.34 @@ -976,30 +977,36 @@
   99.35    address handler_address = NULL;
   99.36  
   99.37    Handle exception(thread, thread->exception_oop());
   99.38 +  address pc = thread->exception_pc();
   99.39 +
   99.40 +  // Clear out the exception oop and pc since looking up an
   99.41 +  // exception handler can cause class loading, which might throw an
   99.42 +  // exception and those fields are expected to be clear during
   99.43 +  // normal bytecode execution.
   99.44 +  thread->clear_exception_oop_and_pc();
   99.45  
   99.46    if (TraceExceptions) {
   99.47 -    trace_exception(exception(), thread->exception_pc(), "");
   99.48 +    trace_exception(exception(), pc, "");
   99.49    }
   99.50 +
   99.51    // for AbortVMOnException flag
   99.52    NOT_PRODUCT(Exceptions::debug_check_abort(exception));
   99.53  
   99.54 -  #ifdef ASSERT
   99.55 -    if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
   99.56 -      // should throw an exception here
   99.57 -      ShouldNotReachHere();
   99.58 -    }
   99.59 -  #endif
   99.60 -
   99.61 +#ifdef ASSERT
   99.62 +  if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
   99.63 +    // should throw an exception here
   99.64 +    ShouldNotReachHere();
   99.65 +  }
   99.66 +#endif
   99.67  
   99.68    // new exception handling: this method is entered only from adapters
   99.69    // exceptions from compiled java methods are handled in compiled code
   99.70    // using rethrow node
   99.71  
   99.72 -  address pc = thread->exception_pc();
   99.73    nm = CodeCache::find_nmethod(pc);
   99.74    assert(nm != NULL, "No NMethod found");
   99.75    if (nm->is_native_method()) {
   99.76 -    fatal("Native mathod should not have path to exception handling");
   99.77 +    fatal("Native method should not have path to exception handling");
   99.78    } else {
   99.79      // we are switching to old paradigm: search for exception handler in caller_frame
   99.80      // instead in exception handler of caller_frame.sender()
   99.81 @@ -1346,7 +1353,8 @@
   99.82    tty->print(" in ");
   99.83    CodeBlob* blob = CodeCache::find_blob(exception_pc);
   99.84    if (blob->is_nmethod()) {
   99.85 -    ((nmethod*)blob)->method()->print_value();
   99.86 +    nmethod* nm = blob->as_nmethod_or_null();
   99.87 +    nm->method()->print_value();
   99.88    } else if (blob->is_runtime_stub()) {
   99.89      tty->print("<runtime-stub>");
   99.90    } else {
   100.1 --- a/src/share/vm/opto/runtime.hpp	Thu Oct 17 06:29:58 2013 -0700
   100.2 +++ b/src/share/vm/opto/runtime.hpp	Fri Oct 18 12:10:44 2013 -0700
   100.3 @@ -203,8 +203,10 @@
   100.4  
   100.5    static bool is_callee_saved_register(MachRegisterNumbers reg);
   100.6  
   100.7 -  // One time only generate runtime code stubs
   100.8 -  static void generate(ciEnv* env);
   100.9 +  // One time only generate runtime code stubs. Returns true
  100.10 +  // when runtime stubs have been generated successfully and
  100.11 +  // false otherwise.
  100.12 +  static bool generate(ciEnv* env);
  100.13  
  100.14    // Returns the name of a stub
  100.15    static const char* stub_name(address entry);
   101.1 --- a/src/share/vm/opto/stringopts.cpp	Thu Oct 17 06:29:58 2013 -0700
   101.2 +++ b/src/share/vm/opto/stringopts.cpp	Fri Oct 18 12:10:44 2013 -0700
   101.3 @@ -1,5 +1,5 @@
   101.4  /*
   101.5 - * Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
   101.6 + * Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
   101.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   101.8   *
   101.9   * This code is free software; you can redistribute it and/or modify it
  101.10 @@ -50,10 +50,11 @@
  101.11    Node*               _arguments;      // The list of arguments to be concatenated
  101.12    GrowableArray<int>  _mode;           // into a String along with a mode flag
  101.13                                         // indicating how to treat the value.
  101.14 -
  101.15 +  Node_List           _constructors;   // List of constructors (many in case of stacked concat)
  101.16    Node_List           _control;        // List of control nodes that will be deleted
  101.17    Node_List           _uncommon_traps; // Uncommon traps that needs to be rewritten
  101.18                                         // to restart at the initial JVMState.
  101.19 +
  101.20   public:
  101.21    // Mode for converting arguments to Strings
  101.22    enum {
  101.23 @@ -73,6 +74,7 @@
  101.24      _arguments->del_req(0);
  101.25    }
  101.26  
  101.27 +  bool validate_mem_flow();
  101.28    bool validate_control_flow();
  101.29  
  101.30    void merge_add() {
  101.31 @@ -189,6 +191,10 @@
  101.32      assert(!_control.contains(ctrl), "only push once");
  101.33      _control.push(ctrl);
  101.34    }
  101.35 +  void add_constructor(Node* init) {
  101.36 +    assert(!_constructors.contains(init), "only push once");
  101.37 +    _constructors.push(init);
  101.38 +  }
  101.39    CallStaticJavaNode* end() { return _end; }
  101.40    AllocateNode* begin() { return _begin; }
  101.41    Node* string_alloc() { return _string_alloc; }
  101.42 @@ -301,6 +307,12 @@
  101.43      }
  101.44    }
  101.45    result->set_allocation(other->_begin);
  101.46 +  for (uint i = 0; i < _constructors.size(); i++) {
  101.47 +    result->add_constructor(_constructors.at(i));
  101.48 +  }
  101.49 +  for (uint i = 0; i < other->_constructors.size(); i++) {
  101.50 +    result->add_constructor(other->_constructors.at(i));
  101.51 +  }
  101.52    result->_multiple = true;
  101.53    return result;
  101.54  }
  101.55 @@ -510,7 +522,8 @@
  101.56        sc->add_control(constructor);
  101.57        sc->add_control(alloc);
  101.58        sc->set_allocation(alloc);
  101.59 -      if (sc->validate_control_flow()) {
  101.60 +      sc->add_constructor(constructor);
  101.61 +      if (sc->validate_control_flow() && sc->validate_mem_flow()) {
  101.62          return sc;
  101.63        } else {
  101.64          return NULL;
  101.65 @@ -620,7 +633,7 @@
  101.66  #endif
  101.67  
  101.68              StringConcat* merged = sc->merge(other, arg);
  101.69 -            if (merged->validate_control_flow()) {
  101.70 +            if (merged->validate_control_flow() && merged->validate_mem_flow()) {
  101.71  #ifndef PRODUCT
  101.72                if (PrintOptimizeStringConcat) {
  101.73                  tty->print_cr("stacking would succeed");
  101.74 @@ -708,6 +721,139 @@
  101.75  }
  101.76  
  101.77  
  101.78 +bool StringConcat::validate_mem_flow() {
  101.79 +  Compile* C = _stringopts->C;
  101.80 +
  101.81 +  for (uint i = 0; i < _control.size(); i++) {
  101.82 +#ifndef PRODUCT
  101.83 +    Node_List path;
  101.84 +#endif
  101.85 +    Node* curr = _control.at(i);
  101.86 +    if (curr->is_Call() && curr != _begin) { // For all calls except the first allocation
  101.87 +      // Now here's the main invariant in our case:
  101.88 +      // For memory between the constructor, and appends, and toString we should only see bottom memory,
  101.89 +      // produced by the previous call we know about.
  101.90 +      if (!_constructors.contains(curr)) {
  101.91 +        NOT_PRODUCT(path.push(curr);)
  101.92 +        Node* mem = curr->in(TypeFunc::Memory);
  101.93 +        assert(mem != NULL, "calls should have memory edge");
  101.94 +        assert(!mem->is_Phi(), "should be handled by control flow validation");
  101.95 +        NOT_PRODUCT(path.push(mem);)
  101.96 +        while (mem->is_MergeMem()) {
  101.97 +          for (uint i = 1; i < mem->req(); i++) {
  101.98 +            if (i != Compile::AliasIdxBot && mem->in(i) != C->top()) {
  101.99 +#ifndef PRODUCT
 101.100 +              if (PrintOptimizeStringConcat) {
 101.101 +                tty->print("fusion has incorrect memory flow (side effects) for ");
 101.102 +                _begin->jvms()->dump_spec(tty); tty->cr();
 101.103 +                path.dump();
 101.104 +              }
 101.105 +#endif
 101.106 +              return false;
 101.107 +            }
 101.108 +          }
 101.109 +          // skip through a potential MergeMem chain, linked through Bot
 101.110 +          mem = mem->in(Compile::AliasIdxBot);
 101.111 +          NOT_PRODUCT(path.push(mem);)
 101.112 +        }
 101.113 +        // now let it fall through, and see if we have a projection
 101.114 +        if (mem->is_Proj()) {
 101.115 +          // Should point to a previous known call
 101.116 +          Node *prev = mem->in(0);
 101.117 +          NOT_PRODUCT(path.push(prev);)
 101.118 +          if (!prev->is_Call() || !_control.contains(prev)) {
 101.119 +#ifndef PRODUCT
 101.120 +            if (PrintOptimizeStringConcat) {
 101.121 +              tty->print("fusion has incorrect memory flow (unknown call) for ");
 101.122 +              _begin->jvms()->dump_spec(tty); tty->cr();
 101.123 +              path.dump();
 101.124 +            }
 101.125 +#endif
 101.126 +            return false;
 101.127 +          }
 101.128 +        } else {
 101.129 +          assert(mem->is_Store() || mem->is_LoadStore(), err_msg_res("unexpected node type: %s", mem->Name()));
 101.130 +#ifndef PRODUCT
 101.131 +          if (PrintOptimizeStringConcat) {
 101.132 +            tty->print("fusion has incorrect memory flow (unexpected source) for ");
 101.133 +            _begin->jvms()->dump_spec(tty); tty->cr();
 101.134 +            path.dump();
 101.135 +          }
 101.136 +#endif
 101.137 +          return false;
 101.138 +        }
 101.139 +      } else {
 101.140 +        // For memory that feeds into constructors it's more complicated.
 101.141 +        // However the advantage is that any side effect that happens between the Allocate/Initialize and
 101.142 +        // the constructor will have to be control-dependent on Initialize.
 101.143 +        // So we actually don't have to do anything, since it's going to be caught by the control flow
 101.144 +        // analysis.
 101.145 +#ifdef ASSERT
 101.146 +        // Do a quick verification of the control pattern between the constructor and the initialize node
 101.147 +        assert(curr->is_Call(), "constructor should be a call");
 101.148 +        // Go up the control starting from the constructor call
 101.149 +        Node* ctrl = curr->in(0);
 101.150 +        IfNode* iff = NULL;
 101.151 +        RegionNode* copy = NULL;
 101.152 +
 101.153 +        while (true) {
 101.154 +          // skip known check patterns
 101.155 +          if (ctrl->is_Region()) {
 101.156 +            if (ctrl->as_Region()->is_copy()) {
 101.157 +              copy = ctrl->as_Region();
 101.158 +              ctrl = copy->is_copy();
 101.159 +            } else { // a cast
 101.160 +              assert(ctrl->req() == 3 &&
 101.161 +                     ctrl->in(1) != NULL && ctrl->in(1)->is_Proj() &&
 101.162 +                     ctrl->in(2) != NULL && ctrl->in(2)->is_Proj() &&
 101.163 +                     ctrl->in(1)->in(0) == ctrl->in(2)->in(0) &&
 101.164 +                     ctrl->in(1)->in(0) != NULL && ctrl->in(1)->in(0)->is_If(),
 101.165 +                     "must be a simple diamond");
 101.166 +              Node* true_proj = ctrl->in(1)->is_IfTrue() ? ctrl->in(1) : ctrl->in(2);
 101.167 +              for (SimpleDUIterator i(true_proj); i.has_next(); i.next()) {
 101.168 +                Node* use = i.get();
 101.169 +                assert(use == ctrl || use->is_ConstraintCast(),
 101.170 +                       err_msg_res("unexpected user: %s", use->Name()));
 101.171 +              }
 101.172 +
 101.173 +              iff = ctrl->in(1)->in(0)->as_If();
 101.174 +              ctrl = iff->in(0);
 101.175 +            }
 101.176 +          } else if (ctrl->is_IfTrue()) { // null checks, class checks
 101.177 +            iff = ctrl->in(0)->as_If();
 101.178 +            assert(iff->is_If(), "must be if");
 101.179 +            // Verify that the other arm is an uncommon trap
 101.180 +            Node* otherproj = iff->proj_out(1 - ctrl->as_Proj()->_con);
 101.181 +            CallStaticJavaNode* call = otherproj->unique_out()->isa_CallStaticJava();
 101.182 +            assert(strcmp(call->_name, "uncommon_trap") == 0, "must be uncommond trap");
 101.183 +            ctrl = iff->in(0);
 101.184 +          } else {
 101.185 +            break;
 101.186 +          }
 101.187 +        }
 101.188 +
 101.189 +        assert(ctrl->is_Proj(), "must be a projection");
 101.190 +        assert(ctrl->in(0)->is_Initialize(), "should be initialize");
 101.191 +        for (SimpleDUIterator i(ctrl); i.has_next(); i.next()) {
 101.192 +          Node* use = i.get();
 101.193 +          assert(use == copy || use == iff || use == curr || use->is_CheckCastPP() || use->is_Load(),
 101.194 +                 err_msg_res("unexpected user: %s", use->Name()));
 101.195 +        }
 101.196 +#endif // ASSERT
 101.197 +      }
 101.198 +    }
 101.199 +  }
 101.200 +
 101.201 +#ifndef PRODUCT
 101.202 +  if (PrintOptimizeStringConcat) {
 101.203 +    tty->print("fusion has correct memory flow for ");
 101.204 +    _begin->jvms()->dump_spec(tty); tty->cr();
 101.205 +    tty->cr();
 101.206 +  }
 101.207 +#endif
 101.208 +  return true;
 101.209 +}
 101.210 +
 101.211  bool StringConcat::validate_control_flow() {
 101.212    // We found all the calls and arguments now lets see if it's
 101.213    // safe to transform the graph as we would expect.
 101.214 @@ -753,7 +899,7 @@
 101.215      }
 101.216    }
 101.217  
 101.218 -  // Skip backwards through the control checking for unexpected contro flow
 101.219 +  // Skip backwards through the control checking for unexpected control flow
 101.220    Node* ptr = _end;
 101.221    bool fail = false;
 101.222    while (ptr != _begin) {
 101.223 @@ -936,7 +1082,7 @@
 101.224    if (PrintOptimizeStringConcat && !fail) {
 101.225      ttyLocker ttyl;
 101.226      tty->cr();
 101.227 -    tty->print("fusion would succeed (%d %d) for ", null_check_count, _uncommon_traps.size());
 101.228 +    tty->print("fusion has correct control flow (%d %d) for ", null_check_count, _uncommon_traps.size());
 101.229      _begin->jvms()->dump_spec(tty); tty->cr();
 101.230      for (int i = 0; i < num_arguments(); i++) {
 101.231        argument(i)->dump();
   102.1 --- a/src/share/vm/runtime/globals.hpp	Thu Oct 17 06:29:58 2013 -0700
   102.2 +++ b/src/share/vm/runtime/globals.hpp	Fri Oct 18 12:10:44 2013 -0700
   102.3 @@ -2670,6 +2670,14 @@
   102.4    product(bool, AggressiveOpts, false,                                      \
   102.5            "Enable aggressive optimizations - see arguments.cpp")            \
   102.6                                                                              \
   102.7 +  product_pd(uintx, TypeProfileLevel,                                       \
   102.8 +          "=XY, with Y, Type profiling of arguments at call"                \
   102.9 +          "          X, Type profiling of return value at call"             \
  102.10 +          "X and Y in 0->off ; 1->js292 only; 2->all methods")              \
  102.11 +                                                                            \
  102.12 +  product(intx, TypeProfileArgsLimit,     2,                                \
  102.13 +          "max number of call arguments to consider for type profiling")    \
  102.14 +                                                                            \
  102.15    /* statistics */                                                          \
  102.16    develop(bool, CountCompiledCalls, false,                                  \
  102.17            "Count method invocations")                                       \
  102.18 @@ -3820,7 +3828,6 @@
  102.19    product(bool, UseLockedTracing, false,                                    \
  102.20            "Use locked-tracing when doing event-based tracing")
  102.21  
  102.22 -
  102.23  /*
  102.24   *  Macros for factoring of globals
  102.25   */
   103.1 --- a/src/share/vm/runtime/java.cpp	Thu Oct 17 06:29:58 2013 -0700
   103.2 +++ b/src/share/vm/runtime/java.cpp	Fri Oct 18 12:10:44 2013 -0700
   103.3 @@ -183,6 +183,7 @@
   103.4    collected_profiled_methods->sort(&compare_methods);
   103.5  
   103.6    int count = collected_profiled_methods->length();
   103.7 +  int total_size = 0;
   103.8    if (count > 0) {
   103.9      for (int index = 0; index < count; index++) {
  103.10        Method* m = collected_profiled_methods->at(index);
  103.11 @@ -190,10 +191,13 @@
  103.12        tty->print_cr("------------------------------------------------------------------------");
  103.13        //m->print_name(tty);
  103.14        m->print_invocation_count();
  103.15 +      tty->print_cr("  mdo size: %d bytes", m->method_data()->size_in_bytes());
  103.16        tty->cr();
  103.17        m->print_codes();
  103.18 +      total_size += m->method_data()->size_in_bytes();
  103.19      }
  103.20      tty->print_cr("------------------------------------------------------------------------");
  103.21 +    tty->print_cr("Total MDO size: %d bytes", total_size);
  103.22    }
  103.23  }
  103.24  
   104.1 --- a/src/share/vm/runtime/signature.cpp	Thu Oct 17 06:29:58 2013 -0700
   104.2 +++ b/src/share/vm/runtime/signature.cpp	Fri Oct 18 12:10:44 2013 -0700
   104.3 @@ -378,6 +378,16 @@
   104.4    return result;
   104.5  }
   104.6  
   104.7 +int SignatureStream::reference_parameter_count() {
   104.8 +  int args_count = 0;
   104.9 +  for ( ; !at_return_type(); next()) {
  104.10 +    if (is_object()) {
  104.11 +      args_count++;
  104.12 +    }
  104.13 +  }
  104.14 +  return args_count;
  104.15 +}
  104.16 +
  104.17  bool SignatureVerifier::is_valid_signature(Symbol* sig) {
  104.18    const char* signature = (const char*)sig->bytes();
  104.19    ssize_t len = sig->utf8_length();
   105.1 --- a/src/share/vm/runtime/signature.hpp	Thu Oct 17 06:29:58 2013 -0700
   105.2 +++ b/src/share/vm/runtime/signature.hpp	Fri Oct 18 12:10:44 2013 -0700
   105.3 @@ -401,6 +401,9 @@
   105.4  
   105.5    // return same as_symbol except allocation of new symbols is avoided.
   105.6    Symbol* as_symbol_or_null();
   105.7 +
   105.8 +  // count the number of references in the signature
   105.9 +  int reference_parameter_count();
  105.10  };
  105.11  
  105.12  class SignatureVerifier : public StackObj {
   106.1 --- a/src/share/vm/runtime/thread.cpp	Thu Oct 17 06:29:58 2013 -0700
   106.2 +++ b/src/share/vm/runtime/thread.cpp	Fri Oct 18 12:10:44 2013 -0700
   106.3 @@ -1454,7 +1454,6 @@
   106.4    _interp_only_mode    = 0;
   106.5    _special_runtime_exit_condition = _no_async_condition;
   106.6    _pending_async_exception = NULL;
   106.7 -  _is_compiling = false;
   106.8    _thread_stat = NULL;
   106.9    _thread_stat = new ThreadStatistics();
  106.10    _blocked_on_compilation = false;
  106.11 @@ -1815,7 +1814,8 @@
  106.12      // Call Thread.exit(). We try 3 times in case we got another Thread.stop during
  106.13      // the execution of the method. If that is not enough, then we don't really care. Thread.stop
  106.14      // is deprecated anyhow.
  106.15 -    { int count = 3;
  106.16 +    if (!is_Compiler_thread()) {
  106.17 +      int count = 3;
  106.18        while (java_lang_Thread::threadGroup(threadObj()) != NULL && (count-- > 0)) {
  106.19          EXCEPTION_MARK;
  106.20          JavaValue result(T_VOID);
  106.21 @@ -1828,7 +1828,6 @@
  106.22          CLEAR_PENDING_EXCEPTION;
  106.23        }
  106.24      }
  106.25 -
  106.26      // notify JVMTI
  106.27      if (JvmtiExport::should_post_thread_life()) {
  106.28        JvmtiExport::post_thread_end(this);
  106.29 @@ -3239,6 +3238,7 @@
  106.30    _counters = counters;
  106.31    _buffer_blob = NULL;
  106.32    _scanned_nmethod = NULL;
  106.33 +  _compiler = NULL;
  106.34  
  106.35  #ifndef PRODUCT
  106.36    _ideal_graph_printer = NULL;
  106.37 @@ -3255,6 +3255,7 @@
  106.38    }
  106.39  }
  106.40  
  106.41 +
  106.42  // ======= Threads ========
  106.43  
  106.44  // The Threads class links together all active threads, and provides
  106.45 @@ -3275,8 +3276,6 @@
  106.46  // All JavaThreads
  106.47  #define ALL_JAVA_THREADS(X) for (JavaThread* X = _thread_list; X; X = X->next())
  106.48  
  106.49 -void os_stream();
  106.50 -
  106.51  // All JavaThreads + all non-JavaThreads (i.e., every thread in the system)
  106.52  void Threads::threads_do(ThreadClosure* tc) {
  106.53    assert_locked_or_safepoint(Threads_lock);
   107.1 --- a/src/share/vm/runtime/thread.hpp	Thu Oct 17 06:29:58 2013 -0700
   107.2 +++ b/src/share/vm/runtime/thread.hpp	Fri Oct 18 12:10:44 2013 -0700
   107.3 @@ -923,9 +923,6 @@
   107.4    volatile address _exception_handler_pc;        // PC for handler of exception
   107.5    volatile int     _is_method_handle_return;     // true (== 1) if the current exception PC is a MethodHandle call site.
   107.6  
   107.7 -  // support for compilation
   107.8 -  bool    _is_compiling;                         // is true if a compilation is active inthis thread (one compilation per thread possible)
   107.9 -
  107.10    // support for JNI critical regions
  107.11    jint    _jni_active_critical;                  // count of entries into JNI critical region
  107.12  
  107.13 @@ -1005,10 +1002,6 @@
  107.14    // Testers
  107.15    virtual bool is_Java_thread() const            { return true;  }
  107.16  
  107.17 -  // compilation
  107.18 -  void set_is_compiling(bool f)                  { _is_compiling = f; }
  107.19 -  bool is_compiling() const                      { return _is_compiling; }
  107.20 -
  107.21    // Thread chain operations
  107.22    JavaThread* next() const                       { return _next; }
  107.23    void set_next(JavaThread* p)                   { _next = p; }
  107.24 @@ -1283,6 +1276,11 @@
  107.25    void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
  107.26    void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
  107.27  
  107.28 +  void clear_exception_oop_and_pc() {
  107.29 +    set_exception_oop(NULL);
  107.30 +    set_exception_pc(NULL);
  107.31 +  }
  107.32 +
  107.33    // Stack overflow support
  107.34    inline size_t stack_available(address cur_sp);
  107.35    address stack_yellow_zone_base()
  107.36 @@ -1811,13 +1809,14 @@
  107.37   private:
  107.38    CompilerCounters* _counters;
  107.39  
  107.40 -  ciEnv*        _env;
  107.41 -  CompileLog*   _log;
  107.42 -  CompileTask*  _task;
  107.43 -  CompileQueue* _queue;
  107.44 -  BufferBlob*   _buffer_blob;
  107.45 +  ciEnv*            _env;
  107.46 +  CompileLog*       _log;
  107.47 +  CompileTask*      _task;
  107.48 +  CompileQueue*     _queue;
  107.49 +  BufferBlob*       _buffer_blob;
  107.50  
  107.51 -  nmethod*      _scanned_nmethod;  // nmethod being scanned by the sweeper
  107.52 +  nmethod*          _scanned_nmethod;  // nmethod being scanned by the sweeper
  107.53 +  AbstractCompiler* _compiler;
  107.54  
  107.55   public:
  107.56  
  107.57 @@ -1829,14 +1828,17 @@
  107.58    // Hide this compiler thread from external view.
  107.59    bool is_hidden_from_external_view() const      { return true; }
  107.60  
  107.61 -  CompileQueue* queue()                          { return _queue; }
  107.62 -  CompilerCounters* counters()                   { return _counters; }
  107.63 +  void set_compiler(AbstractCompiler* c)         { _compiler = c; }
  107.64 +  AbstractCompiler* compiler() const             { return _compiler; }
  107.65 +
  107.66 +  CompileQueue* queue()        const             { return _queue; }
  107.67 +  CompilerCounters* counters() const             { return _counters; }
  107.68  
  107.69    // Get/set the thread's compilation environment.
  107.70    ciEnv*        env()                            { return _env; }
  107.71    void          set_env(ciEnv* env)              { _env = env; }
  107.72  
  107.73 -  BufferBlob*   get_buffer_blob()                { return _buffer_blob; }
  107.74 +  BufferBlob*   get_buffer_blob() const          { return _buffer_blob; }
  107.75    void          set_buffer_blob(BufferBlob* b)   { _buffer_blob = b; };
  107.76  
  107.77    // Get/set the thread's logging information
   108.1 --- a/src/share/vm/runtime/vmStructs.cpp	Thu Oct 17 06:29:58 2013 -0700
   108.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Fri Oct 18 12:10:44 2013 -0700
   108.3 @@ -917,7 +917,6 @@
   108.4    volatile_nonstatic_field(JavaThread,         _exception_oop,                                oop)                                   \
   108.5    volatile_nonstatic_field(JavaThread,         _exception_pc,                                 address)                               \
   108.6    volatile_nonstatic_field(JavaThread,         _is_method_handle_return,                      int)                                   \
   108.7 -  nonstatic_field(JavaThread,                  _is_compiling,                                 bool)                                  \
   108.8    nonstatic_field(JavaThread,                  _special_runtime_exit_condition,               JavaThread::AsyncRequests)             \
   108.9    nonstatic_field(JavaThread,                  _saved_exception_pc,                           address)                               \
  108.10     volatile_nonstatic_field(JavaThread,        _thread_state,                                 JavaThreadState)                       \
   109.1 --- a/src/share/vm/services/heapDumper.cpp	Thu Oct 17 06:29:58 2013 -0700
   109.2 +++ b/src/share/vm/services/heapDumper.cpp	Fri Oct 18 12:10:44 2013 -0700
   109.3 @@ -1545,7 +1545,9 @@
   109.4  
   109.5  // writes a HPROF_GC_CLASS_DUMP record for the given class
   109.6  void VM_HeapDumper::do_class_dump(Klass* k) {
   109.7 -  DumperSupport::dump_class_and_array_classes(writer(), k);
   109.8 +  if (k->oop_is_instance()) {
   109.9 +    DumperSupport::dump_class_and_array_classes(writer(), k);
  109.10 +  }
  109.11  }
  109.12  
  109.13  // writes a HPROF_GC_CLASS_DUMP records for a given basic type
  109.14 @@ -1722,7 +1724,7 @@
  109.15    SymbolTable::symbols_do(&sym_dumper);
  109.16  
  109.17    // write HPROF_LOAD_CLASS records
  109.18 -  SystemDictionary::classes_do(&do_load_class);
  109.19 +  ClassLoaderDataGraph::classes_do(&do_load_class);
  109.20    Universe::basic_type_classes_do(&do_load_class);
  109.21  
  109.22    // write HPROF_FRAME and HPROF_TRACE records
  109.23 @@ -1733,7 +1735,7 @@
  109.24    write_dump_header();
  109.25  
  109.26    // Writes HPROF_GC_CLASS_DUMP records
  109.27 -  SystemDictionary::classes_do(&do_class_dump);
  109.28 +  ClassLoaderDataGraph::classes_do(&do_class_dump);
  109.29    Universe::basic_type_classes_do(&do_basic_type_array_class_dump);
  109.30    check_segment_length();
  109.31  
   110.1 --- a/src/share/vm/services/runtimeService.cpp	Thu Oct 17 06:29:58 2013 -0700
   110.2 +++ b/src/share/vm/services/runtimeService.cpp	Fri Oct 18 12:10:44 2013 -0700
   110.3 @@ -1,5 +1,5 @@
   110.4  /*
   110.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
   110.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
   110.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   110.8   *
   110.9   * This code is free software; you can redistribute it and/or modify it
  110.10 @@ -119,7 +119,7 @@
  110.11  #endif /* USDT2 */
  110.12  
  110.13    // Print the time interval in which the app was executing
  110.14 -  if (PrintGCApplicationConcurrentTime) {
  110.15 +  if (PrintGCApplicationConcurrentTime && _app_timer.is_updated()) {
  110.16      gclog_or_tty->date_stamp(PrintGCDateStamps);
  110.17      gclog_or_tty->stamp(PrintGCTimeStamps);
  110.18      gclog_or_tty->print_cr("Application time: %3.7f seconds",
   111.1 --- a/src/share/vm/shark/sharkCompiler.cpp	Thu Oct 17 06:29:58 2013 -0700
   111.2 +++ b/src/share/vm/shark/sharkCompiler.cpp	Fri Oct 18 12:10:44 2013 -0700
   111.3 @@ -133,11 +133,10 @@
   111.4      exit(1);
   111.5    }
   111.6  
   111.7 -  execution_engine()->addModule(
   111.8 -    _native_context->module());
   111.9 +  execution_engine()->addModule(_native_context->module());
  111.10  
  111.11    // All done
  111.12 -  mark_initialized();
  111.13 +  set_state(initialized);
  111.14  }
  111.15  
  111.16  void SharkCompiler::initialize() {
   112.1 --- a/src/share/vm/shark/sharkCompiler.hpp	Thu Oct 17 06:29:58 2013 -0700
   112.2 +++ b/src/share/vm/shark/sharkCompiler.hpp	Fri Oct 18 12:10:44 2013 -0700
   112.3 @@ -50,10 +50,6 @@
   112.4      return ! (method->is_method_handle_intrinsic() || method->is_compiled_lambda_form());
   112.5    }
   112.6  
   112.7 -  // Customization
   112.8 -  bool needs_adapters()  { return false; }
   112.9 -  bool needs_stubs()     { return false; }
  112.10 -
  112.11    // Initialization
  112.12    void initialize();
  112.13  
   113.1 --- a/src/share/vm/utilities/constantTag.cpp	Thu Oct 17 06:29:58 2013 -0700
   113.2 +++ b/src/share/vm/utilities/constantTag.cpp	Fri Oct 18 12:10:44 2013 -0700
   113.3 @@ -51,7 +51,9 @@
   113.4      case JVM_CONSTANT_ClassIndex :
   113.5      case JVM_CONSTANT_StringIndex :
   113.6      case JVM_CONSTANT_MethodHandle :
   113.7 +    case JVM_CONSTANT_MethodHandleInError :
   113.8      case JVM_CONSTANT_MethodType :
   113.9 +    case JVM_CONSTANT_MethodTypeInError :
  113.10        return T_OBJECT;
  113.11      default:
  113.12        ShouldNotReachHere();
  113.13 @@ -60,6 +62,19 @@
  113.14  }
  113.15  
  113.16  
  113.17 +jbyte constantTag::non_error_value() const {
  113.18 +  switch (_tag) {
  113.19 +  case JVM_CONSTANT_UnresolvedClassInError:
  113.20 +    return JVM_CONSTANT_UnresolvedClass;
  113.21 +  case JVM_CONSTANT_MethodHandleInError:
  113.22 +    return JVM_CONSTANT_MethodHandle;
  113.23 +  case JVM_CONSTANT_MethodTypeInError:
  113.24 +    return JVM_CONSTANT_MethodType;
  113.25 +  default:
  113.26 +    return _tag;
  113.27 +  }
  113.28 +}
  113.29 +
  113.30  
  113.31  const char* constantTag::internal_name() const {
  113.32    switch (_tag) {
   114.1 --- a/src/share/vm/utilities/constantTag.hpp	Thu Oct 17 06:29:58 2013 -0700
   114.2 +++ b/src/share/vm/utilities/constantTag.hpp	Fri Oct 18 12:10:44 2013 -0700
   114.3 @@ -108,7 +108,8 @@
   114.4      _tag = tag;
   114.5    }
   114.6  
   114.7 -  jbyte value()                      { return _tag; }
   114.8 +  jbyte value() const                { return _tag; }
   114.9 +  jbyte non_error_value() const;
  114.10  
  114.11    BasicType basic_type() const;        // if used with ldc, what kind of value gets pushed?
  114.12  
   115.1 --- a/src/share/vm/utilities/ostream.cpp	Thu Oct 17 06:29:58 2013 -0700
   115.2 +++ b/src/share/vm/utilities/ostream.cpp	Fri Oct 18 12:10:44 2013 -0700
   115.3 @@ -465,7 +465,7 @@
   115.4  }
   115.5  
   115.6  // log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
   115.7 -// in log_name, %p => pipd1234 and
   115.8 +// in log_name, %p => pid1234 and
   115.9  //              %t => YYYY-MM-DD_HH-MM-SS
  115.10  static const char* make_log_name(const char* log_name, const char* force_directory) {
  115.11    char timestr[32];
  115.12 @@ -792,7 +792,7 @@
  115.13  
  115.14  void defaultStream::init_log() {
  115.15    // %%% Need a MutexLocker?
  115.16 -  const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log";
  115.17 +  const char* log_name = LogFile != NULL ? LogFile : "hotspot_%p.log";
  115.18    const char* try_name = make_log_name(log_name, NULL);
  115.19    fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
  115.20    if (!file->is_open()) {
   116.1 --- a/src/share/vm/utilities/vmError.cpp	Thu Oct 17 06:29:58 2013 -0700
   116.2 +++ b/src/share/vm/utilities/vmError.cpp	Fri Oct 18 12:10:44 2013 -0700
   116.3 @@ -1050,7 +1050,7 @@
   116.4          FILE* replay_data_file = os::open(fd, "w");
   116.5          if (replay_data_file != NULL) {
   116.6            fileStream replay_data_stream(replay_data_file, /*need_close=*/true);
   116.7 -          env->dump_replay_data(&replay_data_stream);
   116.8 +          env->dump_replay_data_unsafe(&replay_data_stream);
   116.9            out.print_raw("#\n# Compiler replay data is saved as:\n# ");
  116.10            out.print_raw_cr(buffer);
  116.11          } else {
   117.1 --- a/test/TEST.groups	Thu Oct 17 06:29:58 2013 -0700
   117.2 +++ b/test/TEST.groups	Fri Oct 18 12:10:44 2013 -0700
   117.3 @@ -27,7 +27,7 @@
   117.4  # - compact1, compact2, compact3, full JRE, JDK
   117.5  #
   117.6  # In addition they support testing of the minimal VM on compact1 and compact2.
   117.7 -# Essentially this defines groups based around the specified API's and VM 
   117.8 +# Essentially this defines groups based around the specified API's and VM
   117.9  # services available in the runtime.
  117.10  #
  117.11  # The groups are defined hierarchically in two forms:
  117.12 @@ -44,9 +44,9 @@
  117.13  # by listing the top-level test directories.
  117.14  #
  117.15  # To use a group simply list it on the jtreg command line eg:
  117.16 -#   jtreg :jdk    
  117.17 +#   jtreg :jdk
  117.18  # runs all tests. While
  117.19 -#   jtreg :compact2  
  117.20 +#   jtreg :compact2
  117.21  # runs those tests that only require compact1 and compact2 API's.
  117.22  #
  117.23  
  117.24 @@ -69,6 +69,7 @@
  117.25    runtime/7107135/Test7107135.sh \
  117.26    runtime/7158988/FieldMonitor.java \
  117.27    runtime/7194254/Test7194254.java \
  117.28 +  runtime/8026365/InvokeSpecialAnonTest.java \
  117.29    runtime/jsig/Test8017498.sh \
  117.30    runtime/Metaspace/FragmentMetaspace.java \
  117.31    runtime/NMT/BaselineWithParameter.java \
  117.32 @@ -140,7 +141,7 @@
  117.33   -:needs_jdk
  117.34  
  117.35  # Tests that require compact2 API's and a full VM
  117.36 -#  
  117.37 +#
  117.38  needs_full_vm_compact2 =
  117.39  
  117.40  # Compact 1 adds full VM tests
   118.1 --- a/test/compiler/8013496/Test8013496.sh	Thu Oct 17 06:29:58 2013 -0700
   118.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   118.3 @@ -1,55 +0,0 @@
   118.4 -#!/bin/sh
   118.5 -# 
   118.6 -# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   118.7 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   118.8 -# 
   118.9 -# This code is free software; you can redistribute it and/or modify it
  118.10 -# under the terms of the GNU General Public License version 2 only, as
  118.11 -# published by the Free Software Foundation.
  118.12 -# 
  118.13 -# This code is distributed in the hope that it will be useful, but WITHOUT
  118.14 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  118.15 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  118.16 -# version 2 for more details (a copy is included in the LICENSE file that
  118.17 -# accompanied this code).
  118.18 -# 
  118.19 -# You should have received a copy of the GNU General Public License version
  118.20 -# 2 along with this work; if not, write to the Free Software Foundation,
  118.21 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  118.22 -# 
  118.23 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  118.24 -# or visit www.oracle.com if you need additional information or have any
  118.25 -# questions.
  118.26 -# 
  118.27 -#
  118.28 -# @test
  118.29 -# @bug 8013496
  118.30 -# @summary Test checks that the order in which ReversedCodeCacheSize and 
  118.31 -#          InitialCodeCacheSize are passed to the VM is irrelevant.  
  118.32 -# @run shell Test8013496.sh
  118.33 -#
  118.34 -#
  118.35 -## some tests require path to find test source dir
  118.36 -if [ "${TESTSRC}" = "" ]
  118.37 -then
  118.38 -  TESTSRC=${PWD}
  118.39 -  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
  118.40 -fi
  118.41 -echo "TESTSRC=${TESTSRC}"
  118.42 -## Adding common setup Variables for running shell tests.
  118.43 -. ${TESTSRC}/../../test_env.sh
  118.44 -set -x
  118.45 -
  118.46 -${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:ReservedCodeCacheSize=2m -XX:InitialCodeCacheSize=500K -version > 1.out 2>&1
  118.47 -${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:InitialCodeCacheSize=500K -XX:ReservedCodeCacheSize=2m -version > 2.out 2>&1
  118.48 -
  118.49 -diff 1.out 2.out
  118.50 -
  118.51 -result=$?
  118.52 -if [ $result -eq 0 ] ; then  
  118.53 -  echo "Test Passed"
  118.54 -  exit 0
  118.55 -else
  118.56 -  echo "Test Failed"
  118.57 -  exit 1
  118.58 -fi
   119.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   119.2 +++ b/test/compiler/codecache/CheckReservedInitialCodeCacheSizeArgOrder.java	Fri Oct 18 12:10:44 2013 -0700
   119.3 @@ -0,0 +1,53 @@
   119.4 +/*
   119.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   119.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   119.7 + *
   119.8 + * This code is free software; you can redistribute it and/or modify it
   119.9 + * under the terms of the GNU General Public License version 2 only, as
  119.10 + * published by the Free Software Foundation.
  119.11 + *
  119.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  119.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  119.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  119.15 + * version 2 for more details (a copy is included in the LICENSE file that
  119.16 + * accompanied this code).
  119.17 + *
  119.18 + * You should have received a copy of the GNU General Public License version
  119.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  119.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  119.21 + *
  119.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  119.23 + * or visit www.oracle.com if you need additional information or have any
  119.24 + * questions.
  119.25 + */
  119.26 +
  119.27 +/*
  119.28 + * @test
  119.29 + * @bug 8013496
  119.30 + * @summary Test checks that the order in which ReversedCodeCacheSize and
  119.31 + *          InitialCodeCacheSize are passed to the VM is irrelevant.
  119.32 + * @library /testlibrary
  119.33 + *
  119.34 + */
  119.35 +import com.oracle.java.testlibrary.*;
  119.36 +
  119.37 +public class CheckReservedInitialCodeCacheSizeArgOrder {
  119.38 +  public static void main(String[] args) throws Exception {
  119.39 +    ProcessBuilder pb1,  pb2;
  119.40 +    OutputAnalyzer out1, out2;
  119.41 +
  119.42 +    pb1 = ProcessTools.createJavaProcessBuilder("-XX:InitialCodeCacheSize=4m", "-XX:ReservedCodeCacheSize=8m", "-version");
  119.43 +    pb2 = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=8m", "-XX:InitialCodeCacheSize=4m", "-version");
  119.44 +
  119.45 +    out1 = new OutputAnalyzer(pb1.start());
  119.46 +    out2 = new OutputAnalyzer(pb2.start());
  119.47 +
  119.48 +    // Check that the outputs are equal
  119.49 +    if (out1.getStdout().compareTo(out2.getStdout()) != 0) {
  119.50 +      throw new RuntimeException("Test failed");
  119.51 +    }
  119.52 +
  119.53 +    out1.shouldHaveExitValue(0);
  119.54 +    out2.shouldHaveExitValue(0);
  119.55 +  }
  119.56 +}
   120.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   120.2 +++ b/test/compiler/intrinsics/mathexact/RepeatTest.java	Fri Oct 18 12:10:44 2013 -0700
   120.3 @@ -0,0 +1,107 @@
   120.4 +/*
   120.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   120.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   120.7 + *
   120.8 + * This code is free software; you can redistribute it and/or modify it
   120.9 + * under the terms of the GNU General Public License version 2 only, as
  120.10 + * published by the Free Software Foundation.
  120.11 + *
  120.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  120.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  120.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  120.15 + * version 2 for more details (a copy is included in the LICENSE file that
  120.16 + * accompanied this code).
  120.17 + *
  120.18 + * You should have received a copy of the GNU General Public License version
  120.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  120.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  120.21 + *
  120.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  120.23 + * or visit www.oracle.com if you need additional information or have any
  120.24 + * questions.
  120.25 + */
  120.26 +
  120.27 +/*
  120.28 + * @test
  120.29 + * @bug 8025657
  120.30 + * @summary Test repeating addExact
  120.31 + * @compile RepeatTest.java
  120.32 + * @run main RepeatTest
  120.33 + *
  120.34 + */
  120.35 +
  120.36 +import java.lang.ArithmeticException;
  120.37 +
  120.38 +public class RepeatTest {
  120.39 +  public static void main(String[] args) {
  120.40 +    java.util.Random rnd = new java.util.Random();
  120.41 +    for (int i = 0; i < 50000; ++i) {
  120.42 +      int x = Integer.MAX_VALUE - 10;
  120.43 +      int y = Integer.MAX_VALUE - 10 + rnd.nextInt(5); //rnd.nextInt() / 2;
  120.44 +
  120.45 +      int c = rnd.nextInt() / 2;
  120.46 +      int d = rnd.nextInt() / 2;
  120.47 +
  120.48 +      int a = addExact(x, y);
  120.49 +
  120.50 +      if (a != 36) {
  120.51 +          throw new RuntimeException("a != 0 : " + a);
  120.52 +      }
  120.53 +
  120.54 +      int b = nonExact(c, d);
  120.55 +      int n = addExact2(c, d);
  120.56 +
  120.57 +
  120.58 +      if (n != b) {
  120.59 +        throw new RuntimeException("n != b : " + n + " != " + b);
  120.60 +      }
  120.61 +    }
  120.62 +  }
  120.63 +
  120.64 +  public static int addExact2(int x, int y) {
  120.65 +      int result = 0;
  120.66 +      result += java.lang.Math.addExact(x, y);
  120.67 +      result += java.lang.Math.addExact(x, y);
  120.68 +      result += java.lang.Math.addExact(x, y);
  120.69 +      result += java.lang.Math.addExact(x, y);
  120.70 +      return result;
  120.71 +  }
  120.72 +
  120.73 +  public static int addExact(int x, int y) {
  120.74 +    int result = 0;
  120.75 +    try {
  120.76 +        result += 5;
  120.77 +        result = java.lang.Math.addExact(x, y);
  120.78 +    } catch (ArithmeticException e) {
  120.79 +        result += 1;
  120.80 +    }
  120.81 +    try {
  120.82 +        result += 6;
  120.83 +
  120.84 +        result += java.lang.Math.addExact(x, y);
  120.85 +    } catch (ArithmeticException e) {
  120.86 +        result += 2;
  120.87 +    }
  120.88 +    try {
  120.89 +        result += 7;
  120.90 +        result += java.lang.Math.addExact(x, y);
  120.91 +    } catch (ArithmeticException e) {
  120.92 +        result += 3;
  120.93 +    }
  120.94 +    try {
  120.95 +        result += 8;
  120.96 +        result += java.lang.Math.addExact(x, y);
  120.97 +    } catch (ArithmeticException e) {
  120.98 +        result += 4;
  120.99 +    }
 120.100 +    return result;
 120.101 +  }
 120.102 +
 120.103 +  public static int nonExact(int x, int y) {
 120.104 +    int result = x + y;
 120.105 +    result += x + y;
 120.106 +    result += x + y;
 120.107 +    result += x + y;
 120.108 +    return result;
 120.109 +  }
 120.110 +}
   121.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   121.2 +++ b/test/compiler/jsr292/CreatesInterfaceDotEqualsCallInfo.java	Fri Oct 18 12:10:44 2013 -0700
   121.3 @@ -0,0 +1,40 @@
   121.4 +/*
   121.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   121.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   121.7 + *
   121.8 + * This code is free software; you can redistribute it and/or modify it
   121.9 + * under the terms of the GNU General Public License version 2 only, as
  121.10 + * published by the Free Software Foundation.
  121.11 + *
  121.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  121.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  121.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  121.15 + * version 2 for more details (a copy is included in the LICENSE file that
  121.16 + * accompanied this code).
  121.17 + *
  121.18 + * You should have received a copy of the GNU General Public License version
  121.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  121.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  121.21 + *
  121.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  121.23 + * or visit www.oracle.com if you need additional information or have any
  121.24 + * questions.
  121.25 + *
  121.26 + */
  121.27 +
  121.28 +/**
  121.29 + * @test
  121.30 + * @bug 8026124
  121.31 + * @summary Javascript file provoked assertion failure in linkResolver.cpp
  121.32 + *
  121.33 + * @run main/othervm CreatesInterfaceDotEqualsCallInfo
  121.34 + */
  121.35 +
  121.36 +public class CreatesInterfaceDotEqualsCallInfo {
  121.37 +  public static void main(String[] args) throws java.io.IOException {
  121.38 +    String[] jsargs = { System.getProperty("test.src", ".") +
  121.39 +                        "/createsInterfaceDotEqualsCallInfo.js" };
  121.40 +    jdk.nashorn.tools.Shell.main(System.in, System.out, System.err, jsargs);
  121.41 +    System.out.println("PASS, did not crash running Javascript");
  121.42 +  }
  121.43 +}
   122.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   122.2 +++ b/test/compiler/jsr292/createsInterfaceDotEqualsCallInfo.js	Fri Oct 18 12:10:44 2013 -0700
   122.3 @@ -0,0 +1,26 @@
   122.4 +/*
   122.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   122.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   122.7 + *
   122.8 + * This code is free software; you can redistribute it and/or modify it
   122.9 + * under the terms of the GNU General Public License version 2 only, as
  122.10 + * published by the Free Software Foundation.
  122.11 + *
  122.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  122.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  122.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  122.15 + * version 2 for more details (a copy is included in the LICENSE file that
  122.16 + * accompanied this code).
  122.17 + *
  122.18 + * You should have received a copy of the GNU General Public License version
  122.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  122.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  122.21 + *
  122.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  122.23 + * or visit www.oracle.com if you need additional information or have any
  122.24 + * questions.
  122.25 + *
  122.26 + */
  122.27 +
  122.28 +var path = new java.io.File("/Users/someone").toPath();
  122.29 +path.toString();
   123.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   123.2 +++ b/test/compiler/startup/SmallCodeCacheStartup.java	Fri Oct 18 12:10:44 2013 -0700
   123.3 @@ -0,0 +1,43 @@
   123.4 +/*
   123.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   123.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   123.7 + *
   123.8 + * This code is free software; you can redistribute it and/or modify it
   123.9 + * under the terms of the GNU General Public License version 2 only, as
  123.10 + * published by the Free Software Foundation.
  123.11 + *
  123.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  123.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  123.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  123.15 + * version 2 for more details (a copy is included in the LICENSE file that
  123.16 + * accompanied this code).
  123.17 + *
  123.18 + * You should have received a copy of the GNU General Public License version
  123.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  123.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  123.21 + *
  123.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  123.23 + * or visit www.oracle.com if you need additional information or have any
  123.24 + * questions.
  123.25 + */
  123.26 +
  123.27 +/*
  123.28 + * @test
  123.29 + * @bug 8023014
  123.30 + * @summary Test ensures that there is no crash when compiler initialization fails
  123.31 + * @library /testlibrary
  123.32 + *
  123.33 + */
  123.34 +import com.oracle.java.testlibrary.*;
  123.35 +
  123.36 +public class SmallCodeCacheStartup {
  123.37 +  public static void main(String[] args) throws Exception {
  123.38 +    ProcessBuilder pb;
  123.39 +    OutputAnalyzer out;
  123.40 +
  123.41 +    pb = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=3m", "-XX:CICompilerCount=64", "-version");
  123.42 +    out = new OutputAnalyzer(pb.start());
  123.43 +    out.shouldContain("no space to run compiler");
  123.44 +    out.shouldHaveExitValue(0);
  123.45 +  }
  123.46 +}
   124.1 --- a/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Thu Oct 17 06:29:58 2013 -0700
   124.2 +++ b/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Fri Oct 18 12:10:44 2013 -0700
   124.3 @@ -107,7 +107,7 @@
   124.4              System.out.println("Extracted pid: " + pid);
   124.5  
   124.6              JDKToolLauncher jMapLauncher = JDKToolLauncher
   124.7 -                    .create("jmap", false);
   124.8 +                    .createUsingTestJDK("jmap");
   124.9              jMapLauncher.addToolArg("-dump:format=b,file=" + pid + "-"
  124.10                      + HEAP_DUMP_FILE_NAME);
  124.11              jMapLauncher.addToolArg(String.valueOf(pid));
   125.1 --- a/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java	Thu Oct 17 06:29:58 2013 -0700
   125.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java	Fri Oct 18 12:10:44 2013 -0700
   125.3 @@ -74,17 +74,15 @@
   125.4      }
   125.5  
   125.6      /**
   125.7 -     * Creates a new JDKToolLauncher for the specified tool.
   125.8 +     * Creates a new JDKToolLauncher for the specified tool in the Tested JDK.
   125.9       *
  125.10       * @param tool
  125.11       *            The name of the tool
  125.12 -     * @param useCompilerPath
  125.13 -     *            If true use the compiler JDK path, otherwise use the tested
  125.14 -     *            JDK path.
  125.15 +     *
  125.16       * @return A new JDKToolLauncher
  125.17       */
  125.18 -    public static JDKToolLauncher create(String tool, boolean useCompilerJDK) {
  125.19 -        return new JDKToolLauncher(tool, useCompilerJDK);
  125.20 +    public static JDKToolLauncher createUsingTestJDK(String tool) {
  125.21 +        return new JDKToolLauncher(tool, false);
  125.22      }
  125.23  
  125.24      /**

mercurial