Merge

Wed, 24 Apr 2013 21:11:02 -0400

author
dlong
date
Wed, 24 Apr 2013 21:11:02 -0400
changeset 5001
e10e43e58e92
parent 5000
a6e09d6dd8e5
parent 4957
f78763f49817
child 5002
3c0584fec1e6
child 5003
78603aa58b1e

Merge

make/bsd/makefiles/jvmg.make file | annotate | diff | comparison | revisions
make/bsd/makefiles/profiled.make file | annotate | diff | comparison | revisions
make/linux/makefiles/jvmg.make file | annotate | diff | comparison | revisions
make/linux/makefiles/profiled.make file | annotate | diff | comparison | revisions
make/solaris/makefiles/jvmg.make file | annotate | diff | comparison | revisions
make/solaris/makefiles/profiled.make file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/sparc.ad file | annotate | diff | comparison | revisions
src/cpu/x86/vm/x86_32.ad file | annotate | diff | comparison | revisions
src/cpu/x86/vm/x86_64.ad file | annotate | diff | comparison | revisions
src/os/bsd/vm/chaitin_bsd.cpp file | annotate | diff | comparison | revisions
src/os/linux/vm/chaitin_linux.cpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/chaitin_solaris.cpp file | annotate | diff | comparison | revisions
src/os/windows/vm/chaitin_windows.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/output.cpp file | annotate | diff | comparison | revisions
test/gc/6941923/test6941923.sh file | annotate | diff | comparison | revisions
test/gc/TestVerifyBeforeGCDuringStartup.java file | annotate | diff | comparison | revisions
test/runtime/NMT/AllocTestType.java file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Wed Apr 24 20:55:28 2013 -0400
     1.2 +++ b/.hgtags	Wed Apr 24 21:11:02 2013 -0400
     1.3 @@ -330,3 +330,8 @@
     1.4  8d0f263a370c5f3e61791bb06054560804117288 hs25-b25
     1.5  af788b85010ebabbc1e8f52c6766e08c7a95cf99 jdk8-b84
     1.6  a947f40fb536e5b9e0aa210cf26abb430f80887a hs25-b26
     1.7 +42fe530cd478744a4d12a0cbf803f0fc804bab1a jdk8-b85
     1.8 +09b0d3e9ba6cdf7da07d4010d2d1df14596f6864 hs25-b27
     1.9 +6d88a566d369f6a1f86912cad7d0912686b2fda1 hs25-b28
    1.10 +86db4847f195c0ecceea646431f1ff22d56282e8 jdk8-b86
    1.11 +01d5f04e64dc2d64625b2db2056f5ed4de918a45 hs25-b29
     2.1 --- a/make/Makefile	Wed Apr 24 20:55:28 2013 -0400
     2.2 +++ b/make/Makefile	Wed Apr 24 21:11:02 2013 -0400
     2.3 @@ -19,7 +19,7 @@
     2.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
     2.5  # or visit www.oracle.com if you need additional information or have any
     2.6  # questions.
     2.7 -#  
     2.8 +#
     2.9  #
    2.10  
    2.11  # Top level gnumake file for hotspot builds
    2.12 @@ -85,15 +85,15 @@
    2.13  endif
    2.14  
    2.15  # Typical C1/C2 targets made available with this Makefile
    2.16 -C1_VM_TARGETS=product1 fastdebug1 optimized1 jvmg1
    2.17 -C2_VM_TARGETS=product  fastdebug  optimized  jvmg
    2.18 -ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero jvmgzero
    2.19 -SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark jvmgshark
    2.20 -MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 jvmgminimal1
    2.21 +C1_VM_TARGETS=product1 fastdebug1 optimized1 debug1
    2.22 +C2_VM_TARGETS=product  fastdebug  optimized  debug
    2.23 +ZERO_VM_TARGETS=productzero fastdebugzero optimizedzero debugzero
    2.24 +SHARK_VM_TARGETS=productshark fastdebugshark optimizedshark debugshark
    2.25 +MINIMAL1_VM_TARGETS=productminimal1 fastdebugminimal1 debugminimal1
    2.26  
    2.27  COMMON_VM_PRODUCT_TARGETS=product product1 docs export_product
    2.28  COMMON_VM_FASTDEBUG_TARGETS=fastdebug fastdebug1 docs export_fastdebug
    2.29 -COMMON_VM_DEBUG_TARGETS=jvmg jvmg1 docs export_debug
    2.30 +COMMON_VM_DEBUG_TARGETS=debug debug1 docs export_debug
    2.31  
    2.32  # JDK directory list
    2.33  JDK_DIRS=bin include jre lib demo
    2.34 @@ -103,13 +103,13 @@
    2.35  ifeq ($(JVM_VARIANT_MINIMAL1),true)
    2.36  all_product:	productminimal1
    2.37  all_fastdebug:	fastdebugminimal1
    2.38 -all_debug:	jvmgminimal1
    2.39 +all_debug:	debugminimal1
    2.40  endif
    2.41  
    2.42  ifdef BUILD_CLIENT_ONLY
    2.43  all_product:   product1 docs export_product
    2.44  all_fastdebug: fastdebug1 docs export_fastdebug
    2.45 -all_debug:     jvmg1 docs export_debug
    2.46 +all_debug:     debug1 docs export_debug
    2.47  else
    2.48  ifeq ($(MACOSX_UNIVERSAL),true)
    2.49  all_product:   universal_product
    2.50 @@ -127,13 +127,13 @@
    2.51  allzero:           all_productzero all_fastdebugzero
    2.52  all_productzero:   productzero docs export_product
    2.53  all_fastdebugzero: fastdebugzero docs export_fastdebug
    2.54 -all_debugzero:     jvmgzero docs export_debug
    2.55 +all_debugzero:     debugzero docs export_debug
    2.56  all_optimizedzero: optimizedzero docs export_optimized
    2.57  
    2.58  allshark:           all_productshark all_fastdebugshark
    2.59  all_productshark:   productshark docs export_product
    2.60  all_fastdebugshark: fastdebugshark docs export_fastdebug
    2.61 -all_debugshark:     jvmgshark docs export_debug
    2.62 +all_debugshark:     debugshark docs export_debug
    2.63  all_optimizedshark: optimizedshark docs export_optimized
    2.64  
    2.65  # Do everything
    2.66 @@ -227,7 +227,7 @@
    2.67  	$(MKDIR) -p $(OUTPUTDIR)
    2.68  	$(CD) $(OUTPUTDIR); \
    2.69  		$(MAKE) -f $(ABS_OS_MAKEFILE) \
    2.70 -			$(MAKE_ARGS) $(VM_TARGET) 
    2.71 +			$(MAKE_ARGS) $(VM_TARGET)
    2.72  
    2.73  generic_buildminimal1:
    2.74  ifeq ($(JVM_VARIANT_MINIMAL1),true)
    2.75 @@ -260,7 +260,7 @@
    2.76  	  EXPORT_SUBDIR=/$(@:export_%=%) \
    2.77  	  generic_export
    2.78  export_debug:
    2.79 -	$(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=${VM_DEBUG} \
    2.80 +	$(MAKE) BUILD_FLAVOR=$(@:export_%=%) VM_SUBDIR=$(@:export_%=%) \
    2.81  	  EXPORT_SUBDIR=/$(@:export_%=%) \
    2.82  	  generic_export
    2.83  export_optimized:
    2.84 @@ -281,192 +281,197 @@
    2.85  	  ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \
    2.86  	  generic_export
    2.87  export_debug_jdk::
    2.88 -	$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) VM_SUBDIR=${VM_DEBUG} \
    2.89 +	$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) VM_SUBDIR=$(@:export_%_jdk=%) \
    2.90  	  ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) \
    2.91  	  generic_export
    2.92  
    2.93  # Export file copy rules
    2.94  XUSAGE=$(HS_SRC_DIR)/share/vm/Xusage.txt
    2.95 -DOCS_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_docs
    2.96 -C1_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1
    2.97 -C2_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2
    2.98 -ZERO_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_zero
    2.99 -SHARK_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_shark
   2.100 -C1_DIR=$(C1_BASE_DIR)/$(VM_SUBDIR)
   2.101 -C2_DIR=$(C2_BASE_DIR)/$(VM_SUBDIR)
   2.102 -ZERO_DIR=$(ZERO_BASE_DIR)/$(VM_SUBDIR)
   2.103 -SHARK_DIR=$(SHARK_BASE_DIR)/$(VM_SUBDIR)
   2.104 -MINIMAL1_BASE_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1
   2.105 -MINIMAL1_DIR=$(MINIMAL1_BASE_DIR)/$(VM_SUBDIR)
   2.106 +DOCS_DIR    =$(OUTPUTDIR)/$(VM_PLATFORM)_docs
   2.107 +C1_DIR      =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler1/$(VM_SUBDIR)
   2.108 +C2_DIR      =$(OUTPUTDIR)/$(VM_PLATFORM)_compiler2/$(VM_SUBDIR)
   2.109 +MINIMAL1_DIR=$(OUTPUTDIR)/$(VM_PLATFORM)_minimal1/$(VM_SUBDIR)
   2.110 +ZERO_DIR    =$(OUTPUTDIR)/$(VM_PLATFORM)_zero/$(VM_SUBDIR)
   2.111 +SHARK_DIR   =$(OUTPUTDIR)/$(VM_PLATFORM)_shark/$(VM_SUBDIR)
   2.112  
   2.113 +# Server (C2)
   2.114  ifeq ($(JVM_VARIANT_SERVER), true)
   2.115 -    MISC_DIR=$(C2_DIR)
   2.116 -    GEN_DIR=$(C2_BASE_DIR)/generated
   2.117 -endif
   2.118 -ifeq ($(JVM_VARIANT_CLIENT), true)
   2.119 -    MISC_DIR=$(C1_DIR)
   2.120 -    GEN_DIR=$(C1_BASE_DIR)/generated
   2.121 -endif
   2.122 -ifeq ($(JVM_VARIANT_ZEROSHARK), true)
   2.123 -    MISC_DIR=$(SHARK_DIR)
   2.124 -    GEN_DIR=$(SHARK_BASE_DIR)/generated
   2.125 -endif
   2.126 -ifeq ($(JVM_VARIANT_ZERO), true)
   2.127 -    MISC_DIR=$(ZERO_DIR)
   2.128 -    GEN_DIR=$(ZERO_BASE_DIR)/generated
   2.129 -endif
   2.130 -ifeq ($(JVM_VARIANT_MINIMAL1), true)
   2.131 -    MISC_DIR=$(MINIMAL1_DIR)
   2.132 -    GEN_DIR=$(MINIMAL1_BASE_DIR)/generated
   2.133 -endif
   2.134 -
   2.135 -# Bin files (windows)
   2.136 -ifeq ($(OSNAME),windows)
   2.137 -
   2.138 -# Get jvm.lib 
   2.139 -$(EXPORT_LIB_DIR)/%.lib:  $(MISC_DIR)/%.lib
   2.140 +# Common
   2.141 +$(EXPORT_SERVER_DIR)/%.diz:       		$(C2_DIR)/%.diz
   2.142  	$(install-file)
   2.143 -
   2.144 -# Other libraries (like SA)
   2.145 -$(EXPORT_JRE_BIN_DIR)/%.diz: $(MISC_DIR)/%.diz
   2.146 +$(EXPORT_LIB_DIR)/%.jar:			$(C2_DIR)/../generated/%.jar
   2.147  	$(install-file)
   2.148 -$(EXPORT_JRE_BIN_DIR)/%.dll: $(MISC_DIR)/%.dll
   2.149 +$(EXPORT_INCLUDE_DIR)/%:			$(C2_DIR)/../generated/jvmtifiles/%
   2.150  	$(install-file)
   2.151 -$(EXPORT_JRE_BIN_DIR)/%.pdb: $(MISC_DIR)/%.pdb
   2.152 +# Windows
   2.153 +$(EXPORT_SERVER_DIR)/%.dll:			$(C2_DIR)/%.dll
   2.154  	$(install-file)
   2.155 -$(EXPORT_JRE_BIN_DIR)/%.map: $(MISC_DIR)/%.map
   2.156 +$(EXPORT_SERVER_DIR)/%.pdb:			$(C2_DIR)/%.pdb
   2.157  	$(install-file)
   2.158 -
   2.159 -# Client files always come from C1 area
   2.160 -$(EXPORT_CLIENT_DIR)/%.diz:  $(C1_DIR)/%.diz
   2.161 +$(EXPORT_SERVER_DIR)/%.map:			$(C2_DIR)/%.map
   2.162  	$(install-file)
   2.163 -$(EXPORT_CLIENT_DIR)/%.dll:  $(C1_DIR)/%.dll
   2.164 +$(EXPORT_LIB_DIR)/%.lib:			$(C2_DIR)/%.lib
   2.165  	$(install-file)
   2.166 -$(EXPORT_CLIENT_DIR)/%.pdb:  $(C1_DIR)/%.pdb
   2.167 +$(EXPORT_JRE_BIN_DIR)/%.diz:			$(C2_DIR)/%.diz
   2.168  	$(install-file)
   2.169 -$(EXPORT_CLIENT_DIR)/%.map:  $(C1_DIR)/%.map
   2.170 +$(EXPORT_JRE_BIN_DIR)/%.dll:			$(C2_DIR)/%.dll
   2.171  	$(install-file)
   2.172 -
   2.173 -# Server files always come from C2 area
   2.174 -$(EXPORT_SERVER_DIR)/%.diz:  $(C2_DIR)/%.diz
   2.175 +$(EXPORT_JRE_BIN_DIR)/%.pdb:			$(C2_DIR)/%.pdb
   2.176  	$(install-file)
   2.177 -$(EXPORT_SERVER_DIR)/%.dll:  $(C2_DIR)/%.dll
   2.178 +$(EXPORT_JRE_BIN_DIR)/%.map:			$(C2_DIR)/%.map
   2.179  	$(install-file)
   2.180 -$(EXPORT_SERVER_DIR)/%.pdb:  $(C2_DIR)/%.pdb
   2.181 +# Unix
   2.182 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX)
   2.183  	$(install-file)
   2.184 -$(EXPORT_SERVER_DIR)/%.map:  $(C2_DIR)/%.map
   2.185 +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(C2_DIR)/%.$(LIBRARY_SUFFIX)
   2.186 +	$(install-file)
   2.187 +$(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX):    $(C2_DIR)/%.$(LIBRARY_SUFFIX)
   2.188 +	$(install-file)
   2.189 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: 	$(C2_DIR)/%.debuginfo
   2.190 +	$(install-file)
   2.191 +$(EXPORT_SERVER_DIR)/%.debuginfo:       	$(C2_DIR)/%.debuginfo
   2.192 +	$(install-file)
   2.193 +$(EXPORT_SERVER_DIR)/64/%.debuginfo:    	$(C2_DIR)/%.debuginfo
   2.194 +	$(install-file)
   2.195 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: 		$(C2_DIR)/%.diz
   2.196 +	$(install-file)
   2.197 +$(EXPORT_SERVER_DIR)/64/%.diz:    		$(C2_DIR)/%.diz
   2.198  	$(install-file)
   2.199  endif
   2.200  
   2.201 -# Minimal JVM files always come from minimal area
   2.202 -$(EXPORT_MINIMAL_DIR)/%.diz:  $(MINIMAL1_DIR)/%.diz
   2.203 +# Client (C1)
   2.204 +ifeq ($(JVM_VARIANT_CLIENT), true)
   2.205 +# Common
   2.206 +$(EXPORT_CLIENT_DIR)/%.diz:       		$(C1_DIR)/%.diz
   2.207  	$(install-file)
   2.208 -$(EXPORT_MINIMAL_DIR)/%.dll:  $(MINIMAL1_DIR)/%.dll
   2.209 +$(EXPORT_LIB_DIR)/%.jar:			$(C1_DIR)/../generated/%.jar
   2.210  	$(install-file)
   2.211 -$(EXPORT_MINIMAL_DIR)/%.pdb:  $(MINIMAL1_DIR)/%.pdb
   2.212 +$(EXPORT_INCLUDE_DIR)/%:			$(C1_DIR)/../generated/jvmtifiles/%
   2.213  	$(install-file)
   2.214 -$(EXPORT_MINIMAL_DIR)/%.map:  $(MINIMAL1_DIR)/%.map
   2.215 +# Windows
   2.216 +$(EXPORT_CLIENT_DIR)/%.dll:			$(C1_DIR)/%.dll
   2.217  	$(install-file)
   2.218 -
   2.219 -# Shared Library
   2.220 -ifneq ($(OSNAME),windows)
   2.221 -    ifeq ($(JVM_VARIANT_SERVER), true)
   2.222 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C2_DIR)/%.$(LIBRARY_SUFFIX)
   2.223 -		$(install-file)
   2.224 -        $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(C2_DIR)/%.$(LIBRARY_SUFFIX)
   2.225 -		$(install-file)
   2.226 -        $(EXPORT_SERVER_DIR)/64/%.$(LIBRARY_SUFFIX):    $(C2_DIR)/%.$(LIBRARY_SUFFIX)
   2.227 -		$(install-file)
   2.228 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: 		$(C2_DIR)/%.debuginfo
   2.229 -		$(install-file)
   2.230 -        $(EXPORT_SERVER_DIR)/%.debuginfo:       		$(C2_DIR)/%.debuginfo
   2.231 -		$(install-file)
   2.232 -        $(EXPORT_SERVER_DIR)/64/%.debuginfo:    		$(C2_DIR)/%.debuginfo
   2.233 -		$(install-file)
   2.234 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: 			$(C2_DIR)/%.diz
   2.235 -		$(install-file)
   2.236 -        $(EXPORT_SERVER_DIR)/%.diz:       			$(C2_DIR)/%.diz
   2.237 -		$(install-file)
   2.238 -        $(EXPORT_SERVER_DIR)/64/%.diz:    			$(C2_DIR)/%.diz
   2.239 -		$(install-file)
   2.240 -    endif
   2.241 -    ifeq ($(JVM_VARIANT_CLIENT), true)
   2.242 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX)
   2.243 -		$(install-file)
   2.244 -        $(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX):       $(C1_DIR)/%.$(LIBRARY_SUFFIX)
   2.245 -		$(install-file)
   2.246 -        $(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX):    $(C1_DIR)/%.$(LIBRARY_SUFFIX)
   2.247 -		$(install-file)
   2.248 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: 		$(C1_DIR)/%.debuginfo
   2.249 -		$(install-file)
   2.250 -        $(EXPORT_CLIENT_DIR)/%.debuginfo:       		$(C1_DIR)/%.debuginfo
   2.251 -		$(install-file)
   2.252 -        $(EXPORT_CLIENT_DIR)/64/%.debuginfo:    		$(C1_DIR)/%.debuginfo
   2.253 -		$(install-file)
   2.254 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: 			$(C1_DIR)/%.diz
   2.255 -		$(install-file)
   2.256 -        $(EXPORT_CLIENT_DIR)/%.diz:       			$(C1_DIR)/%.diz
   2.257 -		$(install-file)
   2.258 -        $(EXPORT_CLIENT_DIR)/64/%.diz:    			$(C1_DIR)/%.diz
   2.259 -		$(install-file)
   2.260 -    endif
   2.261 -    ifeq ($(JVM_VARIANT_ZEROSHARK), true)
   2.262 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
   2.263 -		$(install-file)
   2.264 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo):	$(SHARK_DIR)/%.debuginfo
   2.265 -		$(install-file)
   2.266 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(SHARK_DIR)/%.diz
   2.267 -		$(install-file)
   2.268 -        $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
   2.269 -		$(install-file)
   2.270 -        $(EXPORT_SERVER_DIR)/%.debuginfo:		$(SHARK_DIR)/%.debuginfo
   2.271 -		$(install-file)
   2.272 -        $(EXPORT_SERVER_DIR)/%.diz:			$(SHARK_DIR)/%.diz
   2.273 -		$(install-file)
   2.274 -    endif
   2.275 -    ifeq ($(JVM_VARIANT_ZERO), true)
   2.276 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
   2.277 -		$(install-file)
   2.278 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo:		$(ZERO_DIR)/%.debuginfo
   2.279 -		$(install-file)
   2.280 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(ZERO_DIR)/%.diz
   2.281 -		$(install-file)
   2.282 -        $(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
   2.283 -		$(install-file)
   2.284 -        $(EXPORT_SERVER_DIR)/%.debuginfo:		$(ZERO_DIR)/%.debuginfo
   2.285 -		$(install-file)
   2.286 -        $(EXPORT_SERVER_DIR)/%.diz:			$(ZERO_DIR)/%.diz
   2.287 -		$(install-file)
   2.288 -    endif
   2.289 -    ifeq ($(JVM_VARIANT_MINIMAL1), true)
   2.290 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX):	$(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
   2.291 -		$(install-file)
   2.292 -        $(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX):	$(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
   2.293 -		$(install-file)
   2.294 -        $(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX):	$(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
   2.295 -		$(install-file)
   2.296 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo:		$(MINIMAL1_DIR)/%.debuginfo
   2.297 -		$(install-file)
   2.298 -        $(EXPORT_MINIMAL_DIR)/%.debuginfo:		$(MINIMAL1_DIR)/%.debuginfo
   2.299 -		$(install-file)
   2.300 -        $(EXPORT_MINIMAL_DIR)/64/%.debuginfo:		$(MINIMAL1_DIR)/%.debuginfo
   2.301 -		$(install-file)
   2.302 -        $(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(MINIMAL1_DIR)/%.diz
   2.303 -		$(install-file)
   2.304 -        $(EXPORT_MINIMAL_DIR)/%.diz:			$(MINIMAL1_DIR)/%.diz
   2.305 -		$(install-file)
   2.306 -        $(EXPORT_MINIMAL_DIR)/64/%.diz:			$(MINIMAL1_DIR)/%.diz
   2.307 -		$(install-file)
   2.308 -    endif
   2.309 +$(EXPORT_CLIENT_DIR)/%.pdb:			$(C1_DIR)/%.pdb
   2.310 +	$(install-file)
   2.311 +$(EXPORT_CLIENT_DIR)/%.map:			$(C1_DIR)/%.map
   2.312 +	$(install-file)
   2.313 +$(EXPORT_LIB_DIR)/%.lib:			$(C1_DIR)/%.lib
   2.314 +	$(install-file)
   2.315 +$(EXPORT_JRE_BIN_DIR)/%.diz:			$(C1_DIR)/%.diz
   2.316 +	$(install-file)
   2.317 +$(EXPORT_JRE_BIN_DIR)/%.dll:			$(C1_DIR)/%.dll
   2.318 +	$(install-file)
   2.319 +$(EXPORT_JRE_BIN_DIR)/%.pdb:			$(C1_DIR)/%.pdb
   2.320 +	$(install-file)
   2.321 +$(EXPORT_JRE_BIN_DIR)/%.map:			$(C1_DIR)/%.map
   2.322 +	$(install-file)
   2.323 +# Unix
   2.324 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(C1_DIR)/%.$(LIBRARY_SUFFIX)
   2.325 +	$(install-file)
   2.326 +$(EXPORT_CLIENT_DIR)/%.$(LIBRARY_SUFFIX):       $(C1_DIR)/%.$(LIBRARY_SUFFIX)
   2.327 +	$(install-file)
   2.328 +$(EXPORT_CLIENT_DIR)/64/%.$(LIBRARY_SUFFIX):    $(C1_DIR)/%.$(LIBRARY_SUFFIX)
   2.329 +	$(install-file)
   2.330 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo: 	$(C1_DIR)/%.debuginfo
   2.331 +	$(install-file)
   2.332 +$(EXPORT_CLIENT_DIR)/%.debuginfo:       	$(C1_DIR)/%.debuginfo
   2.333 +	$(install-file)
   2.334 +$(EXPORT_CLIENT_DIR)/64/%.debuginfo:    	$(C1_DIR)/%.debuginfo
   2.335 +	$(install-file)
   2.336 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz: 		$(C1_DIR)/%.diz
   2.337 +	$(install-file)
   2.338 +$(EXPORT_CLIENT_DIR)/64/%.diz:    		$(C1_DIR)/%.diz
   2.339 +	$(install-file)
   2.340  endif
   2.341  
   2.342 -# Jar file (sa-jdi.jar)
   2.343 -$(EXPORT_LIB_DIR)/%.jar: $(GEN_DIR)/%.jar
   2.344 +# Minimal1
   2.345 +ifeq ($(JVM_VARIANT_MINIMAL1), true)
   2.346 +# Common
   2.347 +$(EXPORT_MINIMAL_DIR)/%.diz:			$(MINIMAL1_DIR)/%.diz
   2.348  	$(install-file)
   2.349 +$(EXPORT_LIB_DIR)/%.jar:			$(MINIMAL1_DIR)/../generated/%.jar
   2.350 +	$(install-file)
   2.351 +$(EXPORT_INCLUDE_DIR)/%:			$(MINIMAL1_DIR)/../generated/jvmtifiles/%
   2.352 +	$(install-file)
   2.353 +# Windows
   2.354 +$(EXPORT_MINIMAL_DIR)/%.dll:			$(MINIMAL1_DIR)/%.dll
   2.355 +	$(install-file)
   2.356 +$(EXPORT_MINIMAL_DIR)/%.pdb:			$(MINIMAL1_DIR)/%.pdb
   2.357 +	$(install-file)
   2.358 +$(EXPORT_MINIMAL_DIR)/%.map:			$(MINIMAL1_DIR)/%.map
   2.359 +	$(install-file)
   2.360 +$(EXPORT_LIB_DIR)/%.lib:			$(MINIMAL1_DIR)/%.lib
   2.361 +	$(install-file)
   2.362 +$(EXPORT_JRE_BIN_DIR)/%.diz:			$(MINIMAL1_DIR)/%.diz
   2.363 +	$(install-file)
   2.364 +$(EXPORT_JRE_BIN_DIR)/%.dll:			$(MINIMAL1_DIR)/%.dll
   2.365 +	$(install-file)
   2.366 +$(EXPORT_JRE_BIN_DIR)/%.pdb:			$(MINIMAL1_DIR)/%.pdb
   2.367 +	$(install-file)
   2.368 +$(EXPORT_JRE_BIN_DIR)/%.map:			$(MINIMAL1_DIR)/%.map
   2.369 +	$(install-file)
   2.370 +# Unix
   2.371 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX):	$(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
   2.372 +	$(install-file)
   2.373 +$(EXPORT_MINIMAL_DIR)/%.$(LIBRARY_SUFFIX):	$(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
   2.374 +	$(install-file)
   2.375 +$(EXPORT_MINIMAL_DIR)/64/%.$(LIBRARY_SUFFIX):	$(MINIMAL1_DIR)/%.$(LIBRARY_SUFFIX)
   2.376 +	$(install-file)
   2.377 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo:		$(MINIMAL1_DIR)/%.debuginfo
   2.378 +	$(install-file)
   2.379 +$(EXPORT_MINIMAL_DIR)/%.debuginfo:		$(MINIMAL1_DIR)/%.debuginfo
   2.380 +	$(install-file)
   2.381 +$(EXPORT_MINIMAL_DIR)/64/%.debuginfo:		$(MINIMAL1_DIR)/%.debuginfo
   2.382 +	$(install-file)
   2.383 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(MINIMAL1_DIR)/%.diz
   2.384 +	$(install-file)
   2.385 +$(EXPORT_MINIMAL_DIR)/64/%.diz:			$(MINIMAL1_DIR)/%.diz
   2.386 +	$(install-file)
   2.387 +endif
   2.388  
   2.389 -# Include files (jvmti.h, jvmticmlr.h, jni.h, $(JDK_INCLUDE_SUBDIR)/jni_md.h, jmm.h, jfr.h)
   2.390 -$(EXPORT_INCLUDE_DIR)/%: $(GEN_DIR)/jvmtifiles/%
   2.391 +# Zero
   2.392 +ifeq ($(JVM_VARIANT_ZERO), true)
   2.393 +# Common
   2.394 +$(EXPORT_LIB_DIR)/%.jar:			$(ZERO_DIR)/../generated/%.jar
   2.395  	$(install-file)
   2.396 +$(EXPORT_INCLUDE_DIR)/%:			$(ZERO_DIR)/../generated/jvmtifiles/%
   2.397 +	$(install-file)
   2.398 +# Unix
   2.399 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
   2.400 +	$(install-file)
   2.401 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo:		$(ZERO_DIR)/%.debuginfo
   2.402 +	$(install-file)
   2.403 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(ZERO_DIR)/%.diz
   2.404 +	$(install-file)
   2.405 +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(ZERO_DIR)/%.$(LIBRARY_SUFFIX)
   2.406 +	$(install-file)
   2.407 +$(EXPORT_SERVER_DIR)/%.debuginfo:		$(ZERO_DIR)/%.debuginfo
   2.408 +	$(install-file)
   2.409 +$(EXPORT_SERVER_DIR)/%.diz:			$(ZERO_DIR)/%.diz
   2.410 +	$(install-file)
   2.411 +endif
   2.412 +
   2.413 +# Shark
   2.414 +ifeq ($(JVM_VARIANT_ZEROSHARK), true)
   2.415 +# Common
   2.416 +$(EXPORT_LIB_DIR)/%.jar:			$(SHARK_DIR)/../generated/%.jar
   2.417 +	$(install-file)
   2.418 +$(EXPORT_INCLUDE_DIR)/%:			$(SHARK_DIR)/../generated/jvmtifiles/%
   2.419 +	$(install-file)
   2.420 +# Unix
   2.421 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.$(LIBRARY_SUFFIX): $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
   2.422 +	$(install-file)
   2.423 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.debuginfo):	$(SHARK_DIR)/%.debuginfo
   2.424 +	$(install-file)
   2.425 +$(EXPORT_JRE_LIB_ARCH_DIR)/%.diz:		$(SHARK_DIR)/%.diz
   2.426 +	$(install-file)
   2.427 +$(EXPORT_SERVER_DIR)/%.$(LIBRARY_SUFFIX):       $(SHARK_DIR)/%.$(LIBRARY_SUFFIX)
   2.428 +	$(install-file)
   2.429 +$(EXPORT_SERVER_DIR)/%.debuginfo:		$(SHARK_DIR)/%.debuginfo
   2.430 +	$(install-file)
   2.431 +$(EXPORT_SERVER_DIR)/%.diz:			$(SHARK_DIR)/%.diz
   2.432 +	$(install-file)
   2.433 +endif
   2.434  
   2.435  $(EXPORT_INCLUDE_DIR)/%: $(HS_SRC_DIR)/share/vm/code/%
   2.436  	$(install-file)
   2.437 @@ -541,11 +546,11 @@
   2.438  	@$(RUN_JVM) -XXaltjvm=$(ALTJVM_DIR) -showversion -help
   2.439  
   2.440  # C2 test targets
   2.441 -test_product test_optimized test_fastdebug test_jvmg:
   2.442 +test_product test_optimized test_fastdebug test_debug:
   2.443  	@$(MAKE) generic_test ALTJVM_DIR="$(C2_DIR)/$(@:test_%=%)"
   2.444  
   2.445  # C1 test targets
   2.446 -test_product1 test_optimized1 test_fastdebug1 test_jvmg1:
   2.447 +test_product1 test_optimized1 test_fastdebug1 test_debug1:
   2.448    ifeq ($(ARCH_DATA_MODEL), 32)
   2.449  	@$(MAKE) generic_test ALTJVM_DIR="$(C1_DIR)/$(@:test_%1=%)"
   2.450    else
   2.451 @@ -553,15 +558,15 @@
   2.452    endif
   2.453  
   2.454  # Zero test targets
   2.455 -test_productzero test_optimizedzero test_fastdebugzero test_jvmgzero:
   2.456 +test_productzero test_optimizedzero test_fastdebugzero test_debugzero:
   2.457  	@$(MAKE) generic_test ALTJVM_DIR="$(ZERO_DIR)/$(@:test_%zero=%)"
   2.458  
   2.459  # Shark test targets
   2.460 -test_productshark test_optimizedshark test_fastdebugshark test_jvmgshark:
   2.461 +test_productshark test_optimizedshark test_fastdebugshark test_debugshark:
   2.462  	@$(MAKE) generic_test ALTJVM_DIR="$(SHARK_DIR)/$(@:test_%shark=%)"
   2.463  
   2.464  # Minimal1 test targets
   2.465 -test_productminimal1 test_optimizedminimal1 test_fastdebugminimal1 test_jvmgminimal1:
   2.466 +test_productminimal1 test_optimizedminimal1 test_fastdebugminimal1 test_debugminimal1:
   2.467  	@$(MAKE) generic_test ALTJVM_DIR="$(MINIMAL1_DIR)/$(@:test_%minimal1=%)"
   2.468  
   2.469  
   2.470 @@ -626,7 +631,7 @@
   2.471  # Intro help message
   2.472  intro_help:
   2.473  	@$(ECHO) \
   2.474 -"Makefile for the Hotspot workspace." 
   2.475 +"Makefile for the Hotspot workspace."
   2.476  	@$(ECHO) \
   2.477  "Default behavior is to build and create an export area for the j2se builds."
   2.478  
   2.479 @@ -637,7 +642,7 @@
   2.480  	@$(ECHO) "world:            Same as: all create_jdk"
   2.481  	@$(ECHO) "all_product:      Same as: product product1 export_product"
   2.482  	@$(ECHO) "all_fastdebug:    Same as: fastdebug fastdebug1 export_fastdebug"
   2.483 -	@$(ECHO) "all_debug:        Same as: jvmg jvmg1 export_debug"
   2.484 +	@$(ECHO) "all_debug:        Same as: debug debug1 export_debug"
   2.485  	@$(ECHO) "all_optimized:    Same as: optimized optimized1 export_optimized"
   2.486  	@$(ECHO) "clean:            Clean all areas"
   2.487  	@$(ECHO) "export_product:   Export product files to EXPORT_PATH"
   2.488 @@ -730,7 +735,7 @@
   2.489  	@$(ECHO) \
   2.490  "  $(MAKE) world"
   2.491  	@$(ECHO) \
   2.492 -"  $(MAKE) ALT_BOOTDIR=/opt/java/jdk$(PREVIOUS_JDK_VERSION)" 
   2.493 +"  $(MAKE) ALT_BOOTDIR=/opt/java/jdk$(PREVIOUS_JDK_VERSION)"
   2.494  	@$(ECHO) \
   2.495  "  $(MAKE) ALT_JDK_IMPORT_PATH=/opt/java/jdk$(JDK_VERSION)"
   2.496  
   2.497 @@ -741,6 +746,23 @@
   2.498  endif
   2.499  endif
   2.500  
   2.501 +# Compatibility for transition to new naming
   2.502 +warn_jvmg_deprecated:
   2.503 +	echo "Warning: The jvmg target has been replaced with debug"
   2.504 +	echo "Warning: Please update your usage"
   2.505 +
   2.506 +jvmg: warn_jvmg_deprecated debug
   2.507 +
   2.508 +jvmg1: warn_jvmg_deprecated debug1
   2.509 +
   2.510 +jvmgminimal1: warn_jvmg_deprecated debugminimal1
   2.511 +
   2.512 +jvmgcore: warn_jvmg_deprecated debugcore
   2.513 +
   2.514 +jvmgzero: warn_jvmg_deprecated debugzero
   2.515 +
   2.516 +jvmgshark: warn_jvmg_deprecated debugshark
   2.517 +
   2.518  # JPRT rule to build this workspace
   2.519  include $(GAMMADIR)/make/jprt.gmk
   2.520  
     3.1 --- a/make/bsd/Makefile	Wed Apr 24 20:55:28 2013 -0400
     3.2 +++ b/make/bsd/Makefile	Wed Apr 24 21:11:02 2013 -0400
     3.3 @@ -1,5 +1,5 @@
     3.4  #
     3.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     3.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     3.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.8  #
     3.9  # This code is free software; you can redistribute it and/or modify it
    3.10 @@ -142,55 +142,43 @@
    3.11  #
    3.12  #       debug           compiler2       <os>_<arch>_compiler2/debug
    3.13  #       fastdebug       compiler2       <os>_<arch>_compiler2/fastdebug
    3.14 -#       jvmg            compiler2       <os>_<arch>_compiler2/jvmg
    3.15  #       optimized       compiler2       <os>_<arch>_compiler2/optimized
    3.16 -#       profiled        compiler2       <os>_<arch>_compiler2/profiled
    3.17  #       product         compiler2       <os>_<arch>_compiler2/product
    3.18  #
    3.19  #       debug1          compiler1       <os>_<arch>_compiler1/debug
    3.20  #       fastdebug1      compiler1       <os>_<arch>_compiler1/fastdebug
    3.21 -#       jvmg1           compiler1       <os>_<arch>_compiler1/jvmg
    3.22  #       optimized1      compiler1       <os>_<arch>_compiler1/optimized
    3.23 -#       profiled1       compiler1       <os>_<arch>_compiler1/profiled
    3.24  #       product1        compiler1       <os>_<arch>_compiler1/product
    3.25  #
    3.26  #       debugcore       core            <os>_<arch>_core/debug
    3.27  #       fastdebugcore   core            <os>_<arch>_core/fastdebug
    3.28 -#       jvmgcore        core            <os>_<arch>_core/jvmg
    3.29  #       optimizedcore   core            <os>_<arch>_core/optimized
    3.30 -#       profiledcore    core            <os>_<arch>_core/profiled
    3.31  #       productcore     core            <os>_<arch>_core/product
    3.32  #
    3.33  #       debugzero       zero            <os>_<arch>_zero/debug
    3.34  #       fastdebugzero   zero            <os>_<arch>_zero/fastdebug
    3.35 -#       jvmgzero        zero            <os>_<arch>_zero/jvmg
    3.36  #       optimizedzero   zero            <os>_<arch>_zero/optimized
    3.37 -#       profiledzero    zero            <os>_<arch>_zero/profiled
    3.38  #       productzero     zero            <os>_<arch>_zero/product
    3.39  #
    3.40  #       debugshark      shark           <os>_<arch>_shark/debug
    3.41  #       fastdebugshark  shark           <os>_<arch>_shark/fastdebug
    3.42 -#       jvmgshark       shark           <os>_<arch>_shark/jvmg
    3.43  #       optimizedshark  shark           <os>_<arch>_shark/optimized
    3.44 -#       profiledshark   shark           <os>_<arch>_shark/profiled
    3.45  #       productshark    shark           <os>_<arch>_shark/product
    3.46  #
    3.47  #       fastdebugminimal1 minimal1      <os>_<arch>_minimal1/fastdebug
    3.48 -#       jvmgminimal1      minimal1      <os>_<arch>_minimal1/jvmg
    3.49 +#       debugminimal1     minimal1      <os>_<arch>_minimal1/debug
    3.50  #       productminimal1   minimal1      <os>_<arch>_minimal1/product
    3.51  #
    3.52  # What you get with each target:
    3.53  #
    3.54 -# debug*     - "thin" libjvm - debug info linked into the gamma launcher
    3.55 +# debug*     - debug compile with asserts enabled
    3.56  # fastdebug* - optimized compile, but with asserts enabled
    3.57 -# jvmg*      - "fat" libjvm - debug info linked into libjvm.so
    3.58  # optimized* - optimized compile, no asserts
    3.59 -# profiled*  - gprof
    3.60  # product*   - the shippable thing:  optimized compile, no asserts, -DPRODUCT
    3.61  
    3.62  # This target list needs to be coordinated with the usage message
    3.63  # in the build.sh script:
    3.64 -TARGETS           = debug jvmg fastdebug optimized profiled product
    3.65 +TARGETS           = debug fastdebug optimized product
    3.66  
    3.67  ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
    3.68    SUBDIR_DOCS     = $(OSNAME)_$(VARIANTARCH)_docs
    3.69 @@ -354,15 +342,29 @@
    3.70  	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) jvmtidocs
    3.71  
    3.72  # Synonyms for win32-like targets.
    3.73 -compiler2:  jvmg product
    3.74 +compiler2:  debug product
    3.75  
    3.76 -compiler1:  jvmg1 product1
    3.77 +compiler1:  debug1 product1
    3.78  
    3.79 -core: jvmgcore productcore
    3.80 +core: debugcore productcore
    3.81  
    3.82 -zero: jvmgzero productzero
    3.83 +zero: debugzero productzero
    3.84  
    3.85 -shark: jvmgshark productshark
    3.86 +shark: debugshark productshark
    3.87 +
    3.88 +warn_jvmg_deprecated:
    3.89 +	echo "Warning: The jvmg target has been replaced with debug"
    3.90 +	echo "Warning: Please update your usage"
    3.91 +
    3.92 +jvmg: warn_jvmg_deprecated debug
    3.93 +
    3.94 +jvmg1: warn_jvmg_deprecated debug1
    3.95 +
    3.96 +jvmgcore: warn_jvmg_deprecated debugcore
    3.97 +
    3.98 +jvmgzero: warn_jvmg_deprecated debugzero
    3.99 +
   3.100 +jvmgshark: warn_jvmg_deprecated debugshark
   3.101  
   3.102  clean_docs:
   3.103  	rm -rf $(SUBDIR_DOCS)
     4.1 --- a/make/bsd/makefiles/buildtree.make	Wed Apr 24 20:55:28 2013 -0400
     4.2 +++ b/make/bsd/makefiles/buildtree.make	Wed Apr 24 21:11:02 2013 -0400
     4.3 @@ -19,7 +19,7 @@
     4.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
     4.5  # or visit www.oracle.com if you need additional information or have any
     4.6  # questions.
     4.7 -#  
     4.8 +#
     4.9  #
    4.10  
    4.11  # Usage:
    4.12 @@ -46,11 +46,11 @@
    4.13  # Makefile	- for "make foo"
    4.14  # flags.make	- with macro settings
    4.15  # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
    4.16 -# adlc.make	- 
    4.17 +# adlc.make	-
    4.18  # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
    4.19  # sa.make	- generate SA jar file and natives
    4.20  # env.[ck]sh	- environment settings
    4.21 -# 
    4.22 +#
    4.23  # The makefiles are split this way so that "make foo" will run faster by not
    4.24  # having to read the dependency files for the vm.
    4.25  
    4.26 @@ -122,7 +122,7 @@
    4.27  	$(PLATFORM_DIR)/generated/jvmtifiles \
    4.28  	$(PLATFORM_DIR)/generated/dtracefiles
    4.29  
    4.30 -TARGETS      = debug fastdebug jvmg optimized product profiled
    4.31 +TARGETS      = debug fastdebug optimized product
    4.32  SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
    4.33  
    4.34  # For dependencies and recursive makes.
    4.35 @@ -186,8 +186,8 @@
    4.36  	$(QUIETLY) mkdir -p $@
    4.37  
    4.38  # Convenience macro which takes a source relative path, applies $(1) to the
    4.39 -# absolute path, and then replaces $(GAMMADIR) in the result with a 
    4.40 -# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.  
    4.41 +# absolute path, and then replaces $(GAMMADIR) in the result with a
    4.42 +# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
    4.43  gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
    4.44  
    4.45  # This bit is needed to enable local rebuilds.
    4.46 @@ -279,8 +279,6 @@
    4.47  	$(QUIETLY) ( \
    4.48  	$(BUILDTREE_COMMENT); \
    4.49  	echo; \
    4.50 -	[ "$(TARGET)" = profiled ] && \
    4.51 -	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \
    4.52  	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \
    4.53  	) > $@
    4.54  
    4.55 @@ -381,7 +379,7 @@
    4.56  	$(QUIETLY) ( \
    4.57  	$(BUILDTREE_COMMENT); \
    4.58  	echo "JDK=${JAVA_HOME}"; \
    4.59 -	) > $@	   
    4.60 +	) > $@
    4.61  
    4.62  FORCE:
    4.63  
     5.1 --- a/make/bsd/makefiles/debug.make	Wed Apr 24 20:55:28 2013 -0400
     5.2 +++ b/make/bsd/makefiles/debug.make	Wed Apr 24 21:11:02 2013 -0400
     5.3 @@ -1,5 +1,5 @@
     5.4  #
     5.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     5.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     5.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5.8  #
     5.9  # This code is free software; you can redistribute it and/or modify it
    5.10 @@ -19,7 +19,7 @@
    5.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    5.12  # or visit www.oracle.com if you need additional information or have any
    5.13  # questions.
    5.14 -#  
    5.15 +#
    5.16  #
    5.17  
    5.18  # Sets make macros for making debug version of VM
    5.19 @@ -27,17 +27,16 @@
    5.20  # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
    5.21  DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
    5.22  DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
    5.23 -CFLAGS += $(DEBUG_CFLAGS/BYFILE)
    5.24 +
    5.25 +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
    5.26 +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_
    5.27 +
    5.28 +# Set the environment variable HOTSPARC_GENERIC to "true"
    5.29 +# to inhibit the effect of the previous line on CFLAGS.
    5.30  
    5.31  # Linker mapfile
    5.32  MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
    5.33  
    5.34 -_JUNK_ := $(shell echo -e >&2 ""\
    5.35 - "----------------------------------------------------------------------\n" \
    5.36 - "WARNING: 'make debug' is deprecated. It will be removed in the future.\n" \
    5.37 - "Please use 'make jvmg' to build debug JVM.                            \n" \
    5.38 - "----------------------------------------------------------------------\n")
    5.39 -
    5.40  VERSION = debug
    5.41 -SYSDEFS += -DASSERT -DDEBUG
    5.42 +SYSDEFS += -DASSERT
    5.43  PICFLAGS = DEFAULT
     6.1 --- a/make/bsd/makefiles/defs.make	Wed Apr 24 20:55:28 2013 -0400
     6.2 +++ b/make/bsd/makefiles/defs.make	Wed Apr 24 21:11:02 2013 -0400
     6.3 @@ -1,5 +1,5 @@
     6.4  #
     6.5 -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
     6.6 +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
     6.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.8  #
     6.9  # This code is free software; you can redistribute it and/or modify it
    6.10 @@ -19,7 +19,7 @@
    6.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    6.12  # or visit www.oracle.com if you need additional information or have any
    6.13  # questions.
    6.14 -#  
    6.15 +#
    6.16  #
    6.17  
    6.18  # The common definitions for hotspot bsd builds.
    6.19 @@ -86,7 +86,7 @@
    6.20      VM_PLATFORM     = bsd_i486
    6.21      HS_ARCH         = x86
    6.22      # We have to reset ARCH to i386 since SRCARCH relies on it
    6.23 -    ARCH            = i386   
    6.24 +    ARCH            = i386
    6.25    endif
    6.26  endif
    6.27  
    6.28 @@ -146,9 +146,6 @@
    6.29    LIBRARY_SUFFIX=so
    6.30  endif
    6.31  
    6.32 -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms
    6.33 -VM_DEBUG=jvmg
    6.34 -
    6.35  EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
    6.36  
    6.37  # client and server subdirectories have symbolic links to ../libjsig.so
    6.38 @@ -177,7 +174,7 @@
    6.39      else
    6.40  	EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
    6.41      endif
    6.42 -  endif 
    6.43 +  endif
    6.44  endif
    6.45  
    6.46  # Serviceability Binaries
     7.1 --- a/make/bsd/makefiles/fastdebug.make	Wed Apr 24 20:55:28 2013 -0400
     7.2 +++ b/make/bsd/makefiles/fastdebug.make	Wed Apr 24 21:11:02 2013 -0400
     7.3 @@ -1,5 +1,5 @@
     7.4  #
     7.5 -# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
     7.6 +# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     7.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.8  #
     7.9  # This code is free software; you can redistribute it and/or modify it
    7.10 @@ -19,7 +19,7 @@
    7.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    7.12  # or visit www.oracle.com if you need additional information or have any
    7.13  # questions.
    7.14 -#  
    7.15 +#
    7.16  #
    7.17  
    7.18  # Sets make macros for making debug version of VM
    7.19 @@ -59,5 +59,5 @@
    7.20  MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
    7.21  
    7.22  VERSION = optimized
    7.23 -SYSDEFS += -DASSERT -DFASTDEBUG
    7.24 +SYSDEFS += -DASSERT
    7.25  PICFLAGS = DEFAULT
     8.1 --- a/make/bsd/makefiles/jvmg.make	Wed Apr 24 20:55:28 2013 -0400
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,42 +0,0 @@
     8.4 -#
     8.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     8.6 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.7 -#
     8.8 -# This code is free software; you can redistribute it and/or modify it
     8.9 -# under the terms of the GNU General Public License version 2 only, as
    8.10 -# published by the Free Software Foundation.
    8.11 -#
    8.12 -# This code is distributed in the hope that it will be useful, but WITHOUT
    8.13 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    8.14 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    8.15 -# version 2 for more details (a copy is included in the LICENSE file that
    8.16 -# accompanied this code).
    8.17 -#
    8.18 -# You should have received a copy of the GNU General Public License version
    8.19 -# 2 along with this work; if not, write to the Free Software Foundation,
    8.20 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    8.21 -#
    8.22 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    8.23 -# or visit www.oracle.com if you need additional information or have any
    8.24 -# questions.
    8.25 -#  
    8.26 -#
    8.27 -
    8.28 -# Sets make macros for making debug version of VM
    8.29 -
    8.30 -# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
    8.31 -DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
    8.32 -DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
    8.33 -
    8.34 -# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
    8.35 -CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_
    8.36 -
    8.37 -# Set the environment variable HOTSPARC_GENERIC to "true"
    8.38 -# to inhibit the effect of the previous line on CFLAGS.
    8.39 -
    8.40 -# Linker mapfile
    8.41 -MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
    8.42 -
    8.43 -VERSION = debug
    8.44 -SYSDEFS += -DASSERT -DDEBUG
    8.45 -PICFLAGS = DEFAULT
     9.1 --- a/make/bsd/makefiles/profiled.make	Wed Apr 24 20:55:28 2013 -0400
     9.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     9.3 @@ -1,30 +0,0 @@
     9.4 -#
     9.5 -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
     9.6 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.7 -#
     9.8 -# This code is free software; you can redistribute it and/or modify it
     9.9 -# under the terms of the GNU General Public License version 2 only, as
    9.10 -# published by the Free Software Foundation.
    9.11 -#
    9.12 -# This code is distributed in the hope that it will be useful, but WITHOUT
    9.13 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    9.14 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    9.15 -# version 2 for more details (a copy is included in the LICENSE file that
    9.16 -# accompanied this code).
    9.17 -#
    9.18 -# You should have received a copy of the GNU General Public License version
    9.19 -# 2 along with this work; if not, write to the Free Software Foundation,
    9.20 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    9.21 -#
    9.22 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    9.23 -# or visit www.oracle.com if you need additional information or have any
    9.24 -# questions.
    9.25 -#  
    9.26 -#
    9.27 -
    9.28 -# Sets make macros for making profiled version of Gamma VM
    9.29 -# (It is also optimized.)
    9.30 -
    9.31 -CFLAGS += -pg
    9.32 -AOUT_FLAGS += -pg
    9.33 -LDNOMAP = true
    10.1 --- a/make/bsd/makefiles/vm.make	Wed Apr 24 20:55:28 2013 -0400
    10.2 +++ b/make/bsd/makefiles/vm.make	Wed Apr 24 21:11:02 2013 -0400
    10.3 @@ -187,7 +187,7 @@
    10.4  Src_Dirs/SHARK     := $(CORE_PATHS) $(SHARK_PATHS)
    10.5  Src_Dirs := $(Src_Dirs/$(TYPE))
    10.6  
    10.7 -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\*
    10.8 +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
    10.9  COMPILER1_SPECIFIC_FILES := c1_\*
   10.10  SHARK_SPECIFIC_FILES     := shark
   10.11  ZERO_SPECIFIC_FILES      := zero
    11.1 --- a/make/hotspot_version	Wed Apr 24 20:55:28 2013 -0400
    11.2 +++ b/make/hotspot_version	Wed Apr 24 21:11:02 2013 -0400
    11.3 @@ -35,7 +35,7 @@
    11.4  
    11.5  HS_MAJOR_VER=25
    11.6  HS_MINOR_VER=0
    11.7 -HS_BUILD_NUMBER=27
    11.8 +HS_BUILD_NUMBER=30
    11.9  
   11.10  JDK_MAJOR_VER=1
   11.11  JDK_MINOR_VER=8
    12.1 --- a/make/jprt.properties	Wed Apr 24 20:55:28 2013 -0400
    12.2 +++ b/make/jprt.properties	Wed Apr 24 21:11:02 2013 -0400
    12.3 @@ -133,15 +133,15 @@
    12.4  # Standard list of jprt build targets for this source tree
    12.5  
    12.6  jprt.build.targets.standard= \
    12.7 -    ${jprt.my.solaris.sparc}-{product|fastdebug|debug}, \
    12.8 -    ${jprt.my.solaris.sparcv9}-{product|fastdebug|debug}, \
    12.9 -    ${jprt.my.solaris.i586}-{product|fastdebug|debug}, \
   12.10 -    ${jprt.my.solaris.x64}-{product|fastdebug|debug}, \
   12.11 -    ${jprt.my.linux.i586}-{product|fastdebug|debug}, \
   12.12 +    ${jprt.my.solaris.sparc}-{product|fastdebug}, \
   12.13 +    ${jprt.my.solaris.sparcv9}-{product|fastdebug}, \
   12.14 +    ${jprt.my.solaris.i586}-{product|fastdebug}, \
   12.15 +    ${jprt.my.solaris.x64}-{product|fastdebug}, \
   12.16 +    ${jprt.my.linux.i586}-{product|fastdebug}, \
   12.17      ${jprt.my.linux.x64}-{product|fastdebug}, \
   12.18 -    ${jprt.my.macosx.x64}-{product|fastdebug|debug}, \
   12.19 -    ${jprt.my.windows.i586}-{product|fastdebug|debug}, \
   12.20 -    ${jprt.my.windows.x64}-{product|fastdebug|debug}, \
   12.21 +    ${jprt.my.macosx.x64}-{product|fastdebug}, \
   12.22 +    ${jprt.my.windows.i586}-{product|fastdebug}, \
   12.23 +    ${jprt.my.windows.x64}-{product|fastdebug}, \
   12.24      ${jprt.my.linux.armvh}-{product|fastdebug}
   12.25  
   12.26  jprt.build.targets.open= \
   12.27 @@ -150,7 +150,7 @@
   12.28      ${jprt.my.linux.x64}-{productOpen}
   12.29  
   12.30  jprt.build.targets.embedded= \
   12.31 -    ${jprt.my.linux.i586}-{productEmb|fastdebugEmb|debugEmb}, \
   12.32 +    ${jprt.my.linux.i586}-{productEmb|fastdebugEmb}, \
   12.33      ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
   12.34      ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
   12.35      ${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \
   12.36 @@ -174,21 +174,18 @@
   12.37      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-scimark, \
   12.38      ${jprt.my.solaris.sparc}-product-{c1|c2}-runThese, \
   12.39      ${jprt.my.solaris.sparc}-fastdebug-c1-runThese_Xshare, \
   12.40 -    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_default, \
   12.41      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
   12.42      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
   12.43      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
   12.44      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
   12.45      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
   12.46      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
   12.47 -    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_default, \
   12.48      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_SerialGC, \
   12.49      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParallelGC, \
   12.50      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParNewGC, \
   12.51      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_CMS, \
   12.52      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_G1, \
   12.53      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-GCOld_ParOldGC, \
   12.54 -    ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_default, \
   12.55      ${jprt.my.solaris.sparc}-{product|fastdebug}-c2-jbb_default_nontiered, \
   12.56      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_SerialGC, \
   12.57      ${jprt.my.solaris.sparc}-{product|fastdebug}-{c1|c2}-jbb_ParallelGC, \
   12.58 @@ -201,21 +198,18 @@
   12.59      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jvm98_nontiered, \
   12.60      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-scimark, \
   12.61      ${jprt.my.solaris.sparcv9}-product-c2-runThese, \
   12.62 -    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_default, \
   12.63      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_SerialGC, \
   12.64      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
   12.65      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
   12.66      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_CMS, \
   12.67      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_G1, \
   12.68      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
   12.69 -    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_default, \
   12.70      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_SerialGC, \
   12.71      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParallelGC, \
   12.72      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParNewGC, \
   12.73      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_CMS, \
   12.74      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_G1, \
   12.75      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-GCOld_ParOldGC, \
   12.76 -    ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default, \
   12.77      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_default_nontiered, \
   12.78      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_SerialGC, \
   12.79      ${jprt.my.solaris.sparcv9}-{product|fastdebug}-c2-jbb_ParallelGC, \
   12.80 @@ -229,21 +223,18 @@
   12.81      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-scimark, \
   12.82      ${jprt.my.solaris.x64}-product-c2-runThese, \
   12.83      ${jprt.my.solaris.x64}-product-c2-runThese_Xcomp, \
   12.84 -    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_default, \
   12.85      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
   12.86      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
   12.87      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
   12.88      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
   12.89      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_G1, \
   12.90      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
   12.91 -    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_default, \
   12.92      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
   12.93      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
   12.94      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
   12.95      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_CMS, \
   12.96      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_G1, \
   12.97      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
   12.98 -    ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default, \
   12.99      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
  12.100      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_SerialGC, \
  12.101      ${jprt.my.solaris.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
  12.102 @@ -258,28 +249,24 @@
  12.103      ${jprt.my.solaris.i586}-product-{c1|c2}-runThese_Xcomp, \
  12.104      ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xcomp, \
  12.105      ${jprt.my.solaris.i586}-fastdebug-c1-runThese_Xshare, \
  12.106 -    ${jprt.my.solaris.i586}-product-c1-GCBasher_default, \
  12.107      ${jprt.my.solaris.i586}-product-c1-GCBasher_SerialGC, \
  12.108      ${jprt.my.solaris.i586}-product-c1-GCBasher_ParallelGC, \
  12.109      ${jprt.my.solaris.i586}-product-c1-GCBasher_ParNewGC, \
  12.110      ${jprt.my.solaris.i586}-product-c1-GCBasher_CMS, \
  12.111      ${jprt.my.solaris.i586}-product-c1-GCBasher_G1, \
  12.112      ${jprt.my.solaris.i586}-product-c1-GCBasher_ParOldGC, \
  12.113 -    ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_default, \
  12.114      ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_SerialGC, \
  12.115      ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParallelGC, \
  12.116      ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParNewGC, \
  12.117      ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_CMS, \
  12.118      ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_G1, \
  12.119      ${jprt.my.solaris.i586}-fastdebug-c2-GCBasher_ParOldGC, \
  12.120 -    ${jprt.my.solaris.i586}-product-c1-GCOld_default, \
  12.121      ${jprt.my.solaris.i586}-product-c1-GCOld_SerialGC, \
  12.122      ${jprt.my.solaris.i586}-product-c1-GCOld_ParallelGC, \
  12.123      ${jprt.my.solaris.i586}-product-c1-GCOld_ParNewGC, \
  12.124      ${jprt.my.solaris.i586}-product-c1-GCOld_CMS, \
  12.125      ${jprt.my.solaris.i586}-product-c1-GCOld_G1, \
  12.126      ${jprt.my.solaris.i586}-product-c1-GCOld_ParOldGC, \
  12.127 -    ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default, \
  12.128      ${jprt.my.solaris.i586}-fastdebug-c2-jbb_default_nontiered, \
  12.129      ${jprt.my.solaris.i586}-fastdebug-c2-jbb_ParallelGC, \
  12.130      ${jprt.my.solaris.i586}-fastdebug-c2-jbb_CMS, \
  12.131 @@ -293,21 +280,19 @@
  12.132      ${jprt.my.linux.i586}-product-c1-runThese_Xcomp, \
  12.133      ${jprt.my.linux.i586}-fastdebug-c1-runThese_Xshare, \
  12.134      ${jprt.my.linux.i586}-fastdebug-c2-runThese_Xcomp, \
  12.135 -    ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_default, \
  12.136      ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
  12.137      ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
  12.138      ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
  12.139      ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
  12.140      ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
  12.141      ${jprt.my.linux.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
  12.142 -    ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_default, \
  12.143      ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_SerialGC, \
  12.144      ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParallelGC, \
  12.145      ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParNewGC, \
  12.146      ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_CMS, \
  12.147      ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_G1, \
  12.148      ${jprt.my.linux.i586}-product-{c1|c2}-GCOld_ParOldGC, \
  12.149 -    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_default, \
  12.150 +    ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_SerialGC, \
  12.151      ${jprt.my.linux.i586}-{product|fastdebug}-c2-jbb_default_nontiered, \
  12.152      ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_ParallelGC, \
  12.153      ${jprt.my.linux.i586}-{product|fastdebug}-c1-jbb_CMS, \
  12.154 @@ -318,21 +303,18 @@
  12.155      ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98, \
  12.156      ${jprt.my.linux.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
  12.157      ${jprt.my.linux.x64}-{product|fastdebug}-c2-scimark, \
  12.158 -    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_default, \
  12.159      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
  12.160      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
  12.161      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
  12.162      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
  12.163      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_G1, \
  12.164      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
  12.165 -    ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_default, \
  12.166      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
  12.167      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
  12.168      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
  12.169      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_CMS, \
  12.170      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_G1, \
  12.171      ${jprt.my.linux.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
  12.172 -    ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default, \
  12.173      ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
  12.174      ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
  12.175      ${jprt.my.linux.x64}-{product|fastdebug}-c2-jbb_G1, \
  12.176 @@ -342,21 +324,18 @@
  12.177      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98, \
  12.178      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jvm98_nontiered, \
  12.179      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-scimark, \
  12.180 -    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_default, \
  12.181      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
  12.182      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
  12.183      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
  12.184      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
  12.185      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_G1, \
  12.186      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
  12.187 -    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_default, \
  12.188      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
  12.189      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
  12.190      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
  12.191      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_CMS, \
  12.192      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_G1, \
  12.193      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-GCOld_ParOldGC, \
  12.194 -    ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default, \
  12.195      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_default_nontiered, \
  12.196      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_ParallelGC, \
  12.197      ${jprt.my.macosx.x64}-{product|fastdebug}-c2-jbb_G1, \
  12.198 @@ -369,14 +348,12 @@
  12.199      ${jprt.my.windows.i586}-product-{c1|c2}-runThese, \
  12.200      ${jprt.my.windows.i586}-product-{c1|c2}-runThese_Xcomp, \
  12.201      ${jprt.my.windows.i586}-fastdebug-c1-runThese_Xshare, \
  12.202 -    ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_default, \
  12.203      ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_SerialGC, \
  12.204      ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParallelGC, \
  12.205      ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParNewGC, \
  12.206      ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_CMS, \
  12.207      ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_G1, \
  12.208      ${jprt.my.windows.i586}-{product|fastdebug}-{c1|c2}-GCBasher_ParOldGC, \
  12.209 -    ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_default, \
  12.210      ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_SerialGC, \
  12.211      ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParallelGC, \
  12.212      ${jprt.my.windows.i586}-product-{c1|c2}-GCOld_ParNewGC, \
  12.213 @@ -396,14 +373,12 @@
  12.214      ${jprt.my.windows.x64}-{product|fastdebug}-c2-scimark, \
  12.215      ${jprt.my.windows.x64}-product-c2-runThese, \
  12.216      ${jprt.my.windows.x64}-product-c2-runThese_Xcomp, \
  12.217 -    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_default, \
  12.218      ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_SerialGC, \
  12.219      ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParallelGC, \
  12.220      ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParNewGC, \
  12.221      ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_CMS, \
  12.222      ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_G1, \
  12.223      ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCBasher_ParOldGC, \
  12.224 -    ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_default, \
  12.225      ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_SerialGC, \
  12.226      ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParallelGC, \
  12.227      ${jprt.my.windows.x64}-{product|fastdebug}-c2-GCOld_ParNewGC, \
  12.228 @@ -419,7 +394,7 @@
  12.229  
  12.230  # Some basic "smoke" tests for OpenJDK builds
  12.231  jprt.test.targets.open = \
  12.232 -    ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98, \
  12.233 +    ${jprt.my.solaris.x64}-{productOpen|fastdebugOpen}-c2-jvm98, \
  12.234      ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98, \
  12.235      ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98
  12.236  
  12.237 @@ -520,5 +495,5 @@
  12.238  jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
  12.239  
  12.240  # 7155453: Work-around to prevent popups on OSX from blocking test completion
  12.241 -# but the work-around is added to all platforms to be consistent 
  12.242 +# but the work-around is added to all platforms to be consistent
  12.243  jprt.jbb.options=-Djava.awt.headless=true
    13.1 --- a/make/linux/Makefile	Wed Apr 24 20:55:28 2013 -0400
    13.2 +++ b/make/linux/Makefile	Wed Apr 24 21:11:02 2013 -0400
    13.3 @@ -1,5 +1,5 @@
    13.4  #
    13.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    13.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    13.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.8  #
    13.9  # This code is free software; you can redistribute it and/or modify it
   13.10 @@ -142,55 +142,42 @@
   13.11  #
   13.12  #       debug           compiler2       <os>_<arch>_compiler2/debug
   13.13  #       fastdebug       compiler2       <os>_<arch>_compiler2/fastdebug
   13.14 -#       jvmg            compiler2       <os>_<arch>_compiler2/jvmg
   13.15  #       optimized       compiler2       <os>_<arch>_compiler2/optimized
   13.16 -#       profiled        compiler2       <os>_<arch>_compiler2/profiled
   13.17  #       product         compiler2       <os>_<arch>_compiler2/product
   13.18  #
   13.19  #       debug1          compiler1       <os>_<arch>_compiler1/debug
   13.20  #       fastdebug1      compiler1       <os>_<arch>_compiler1/fastdebug
   13.21 -#       jvmg1           compiler1       <os>_<arch>_compiler1/jvmg
   13.22  #       optimized1      compiler1       <os>_<arch>_compiler1/optimized
   13.23 -#       profiled1       compiler1       <os>_<arch>_compiler1/profiled
   13.24  #       product1        compiler1       <os>_<arch>_compiler1/product
   13.25  #
   13.26  #       debugcore       core            <os>_<arch>_core/debug
   13.27  #       fastdebugcore   core            <os>_<arch>_core/fastdebug
   13.28 -#       jvmgcore        core            <os>_<arch>_core/jvmg
   13.29  #       optimizedcore   core            <os>_<arch>_core/optimized
   13.30 -#       profiledcore    core            <os>_<arch>_core/profiled
   13.31  #       productcore     core            <os>_<arch>_core/product
   13.32  #
   13.33  #       debugzero       zero            <os>_<arch>_zero/debug
   13.34  #       fastdebugzero   zero            <os>_<arch>_zero/fastdebug
   13.35 -#       jvmgzero        zero            <os>_<arch>_zero/jvmg
   13.36  #       optimizedzero   zero            <os>_<arch>_zero/optimized
   13.37 -#       profiledzero    zero            <os>_<arch>_zero/profiled
   13.38  #       productzero     zero            <os>_<arch>_zero/product
   13.39  #
   13.40  #       debugshark      shark           <os>_<arch>_shark/debug
   13.41  #       fastdebugshark  shark           <os>_<arch>_shark/fastdebug
   13.42 -#       jvmgshark       shark           <os>_<arch>_shark/jvmg
   13.43  #       optimizedshark  shark           <os>_<arch>_shark/optimized
   13.44 -#       profiledshark   shark           <os>_<arch>_shark/profiled
   13.45  #       productshark    shark           <os>_<arch>_shark/product
   13.46  #
   13.47  #       fastdebugminimal1 minimal1      <os>_<arch>_minimal1/fastdebug
   13.48 -#       jvmgminimal1      minimal1      <os>_<arch>_minimal1/jvmg
   13.49  #       productminimal1   minimal1      <os>_<arch>_minimal1/product
   13.50  #
   13.51  # What you get with each target:
   13.52  #
   13.53 -# debug*     - "thin" libjvm - debug info linked into the gamma launcher
   13.54 +# debug*     - debug compile with asserts enabled
   13.55  # fastdebug* - optimized compile, but with asserts enabled
   13.56 -# jvmg*      - "fat" libjvm - debug info linked into libjvm.so
   13.57  # optimized* - optimized compile, no asserts
   13.58 -# profiled*  - gprof
   13.59  # product*   - the shippable thing:  optimized compile, no asserts, -DPRODUCT
   13.60  
   13.61  # This target list needs to be coordinated with the usage message
   13.62  # in the build.sh script:
   13.63 -TARGETS           = debug jvmg fastdebug optimized profiled product
   13.64 +TARGETS           = debug fastdebug optimized product
   13.65  
   13.66  ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
   13.67    SUBDIR_DOCS     = $(OSNAME)_$(VARIANTARCH)_docs
   13.68 @@ -357,15 +344,29 @@
   13.69  	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs
   13.70  
   13.71  # Synonyms for win32-like targets.
   13.72 -compiler2:  jvmg product
   13.73 +compiler2:  debug product
   13.74  
   13.75 -compiler1:  jvmg1 product1
   13.76 +compiler1:  debug1 product1
   13.77  
   13.78 -core: jvmgcore productcore
   13.79 +core: debugcore productcore
   13.80  
   13.81 -zero: jvmgzero productzero
   13.82 +zero: debugzero productzero
   13.83  
   13.84 -shark: jvmgshark productshark
   13.85 +shark: debugshark productshark
   13.86 +
   13.87 +warn_jvmg_deprecated:
   13.88 +	echo "Warning: The jvmg target has been replaced with debug"
   13.89 +	echo "Warning: Please update your usage"
   13.90 +
   13.91 +jvmg: warn_jvmg_deprecated debug
   13.92 +
   13.93 +jvmg1: warn_jvmg_deprecated debug1
   13.94 +
   13.95 +jvmgcore: warn_jvmg_deprecated debugcore
   13.96 +
   13.97 +jvmgzero: warn_jvmg_deprecated debugzero
   13.98 +
   13.99 +jvmgshark: warn_jvmg_deprecated debugshark
  13.100  
  13.101  clean_docs:
  13.102  	rm -rf $(SUBDIR_DOCS)
    14.1 --- a/make/linux/makefiles/buildtree.make	Wed Apr 24 20:55:28 2013 -0400
    14.2 +++ b/make/linux/makefiles/buildtree.make	Wed Apr 24 21:11:02 2013 -0400
    14.3 @@ -19,7 +19,7 @@
    14.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    14.5  # or visit www.oracle.com if you need additional information or have any
    14.6  # questions.
    14.7 -#  
    14.8 +#
    14.9  #
   14.10  
   14.11  # Usage:
   14.12 @@ -46,11 +46,11 @@
   14.13  # Makefile	- for "make foo"
   14.14  # flags.make	- with macro settings
   14.15  # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
   14.16 -# adlc.make	- 
   14.17 +# adlc.make	-
   14.18  # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
   14.19  # sa.make	- generate SA jar file and natives
   14.20  # env.[ck]sh	- environment settings
   14.21 -# 
   14.22 +#
   14.23  # The makefiles are split this way so that "make foo" will run faster by not
   14.24  # having to read the dependency files for the vm.
   14.25  
   14.26 @@ -117,7 +117,7 @@
   14.27  	$(PLATFORM_DIR)/generated/adfiles \
   14.28  	$(PLATFORM_DIR)/generated/jvmtifiles
   14.29  
   14.30 -TARGETS      = debug fastdebug jvmg optimized product profiled
   14.31 +TARGETS      = debug fastdebug optimized product
   14.32  SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
   14.33  
   14.34  # For dependencies and recursive makes.
   14.35 @@ -179,8 +179,8 @@
   14.36  	$(QUIETLY) mkdir -p $@
   14.37  
   14.38  # Convenience macro which takes a source relative path, applies $(1) to the
   14.39 -# absolute path, and then replaces $(GAMMADIR) in the result with a 
   14.40 -# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.  
   14.41 +# absolute path, and then replaces $(GAMMADIR) in the result with a
   14.42 +# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
   14.43  gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
   14.44  
   14.45  # This bit is needed to enable local rebuilds.
   14.46 @@ -284,8 +284,6 @@
   14.47  	$(QUIETLY) ( \
   14.48  	$(BUILDTREE_COMMENT); \
   14.49  	echo; \
   14.50 -	[ "$(TARGET)" = profiled ] && \
   14.51 -	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \
   14.52  	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \
   14.53  	) > $@
   14.54  
   14.55 @@ -376,7 +374,7 @@
   14.56  	$(QUIETLY) ( \
   14.57  	$(BUILDTREE_COMMENT); \
   14.58  	echo "JDK=${JAVA_HOME}"; \
   14.59 -	) > $@	   
   14.60 +	) > $@
   14.61  
   14.62  FORCE:
   14.63  
    15.1 --- a/make/linux/makefiles/debug.make	Wed Apr 24 20:55:28 2013 -0400
    15.2 +++ b/make/linux/makefiles/debug.make	Wed Apr 24 21:11:02 2013 -0400
    15.3 @@ -1,5 +1,5 @@
    15.4  #
    15.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    15.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    15.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    15.8  #
    15.9  # This code is free software; you can redistribute it and/or modify it
   15.10 @@ -19,7 +19,7 @@
   15.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   15.12  # or visit www.oracle.com if you need additional information or have any
   15.13  # questions.
   15.14 -#  
   15.15 +#
   15.16  #
   15.17  
   15.18  # Sets make macros for making debug version of VM
   15.19 @@ -27,17 +27,16 @@
   15.20  # Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
   15.21  DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
   15.22  DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
   15.23 -CFLAGS += $(DEBUG_CFLAGS/BYFILE)
   15.24 +
   15.25 +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
   15.26 +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_
   15.27 +
   15.28 +# Set the environment variable HOTSPARC_GENERIC to "true"
   15.29 +# to inhibit the effect of the previous line on CFLAGS.
   15.30  
   15.31  # Linker mapfile
   15.32  MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
   15.33  
   15.34 -_JUNK_ := $(shell echo -e >&2 ""\
   15.35 - "----------------------------------------------------------------------\n" \
   15.36 - "WARNING: 'make debug' is deprecated. It will be removed in the future.\n" \
   15.37 - "Please use 'make jvmg' to build debug JVM.                            \n" \
   15.38 - "----------------------------------------------------------------------\n")
   15.39 -
   15.40  VERSION = debug
   15.41 -SYSDEFS += -DASSERT -DDEBUG
   15.42 +SYSDEFS += -DASSERT
   15.43  PICFLAGS = DEFAULT
    16.1 --- a/make/linux/makefiles/defs.make	Wed Apr 24 20:55:28 2013 -0400
    16.2 +++ b/make/linux/makefiles/defs.make	Wed Apr 24 21:11:02 2013 -0400
    16.3 @@ -1,5 +1,5 @@
    16.4  #
    16.5 -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
    16.6 +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
    16.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    16.8  #
    16.9  # This code is free software; you can redistribute it and/or modify it
   16.10 @@ -19,7 +19,7 @@
   16.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   16.12  # or visit www.oracle.com if you need additional information or have any
   16.13  # questions.
   16.14 -#  
   16.15 +#
   16.16  #
   16.17  
   16.18  # The common definitions for hotspot linux builds.
   16.19 @@ -92,7 +92,7 @@
   16.20      VM_PLATFORM     = linux_i486
   16.21      HS_ARCH         = x86
   16.22      # We have to reset ARCH to i686 since SRCARCH relies on it
   16.23 -    ARCH            = i686   
   16.24 +    ARCH            = i686
   16.25    endif
   16.26  endif
   16.27  
   16.28 @@ -240,9 +240,6 @@
   16.29  # Library suffix
   16.30  LIBRARY_SUFFIX=so
   16.31  
   16.32 -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms
   16.33 -VM_DEBUG=jvmg
   16.34 -
   16.35  EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
   16.36  
   16.37  # client and server subdirectories have symbolic links to ../libjsig.so
   16.38 @@ -279,7 +276,7 @@
   16.39      else
   16.40        EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.debuginfo
   16.41      endif
   16.42 -  endif 
   16.43 +  endif
   16.44  endif
   16.45  
   16.46  ifeq ($(JVM_VARIANT_MINIMAL1),true)
   16.47 @@ -292,15 +289,15 @@
   16.48      else
   16.49  	EXPORT_LIST += $(EXPORT_MINIMAL_DIR)/libjvm.debuginfo
   16.50      endif
   16.51 -  endif 
   16.52 +  endif
   16.53  endif
   16.54  
   16.55  # Serviceability Binaries
   16.56  # No SA Support for PPC, IA64, ARM or zero
   16.57  ADD_SA_BINARIES/x86   = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
   16.58 -                        $(EXPORT_LIB_DIR)/sa-jdi.jar 
   16.59 +                        $(EXPORT_LIB_DIR)/sa-jdi.jar
   16.60  ADD_SA_BINARIES/sparc = $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.$(LIBRARY_SUFFIX) \
   16.61 -                        $(EXPORT_LIB_DIR)/sa-jdi.jar 
   16.62 +                        $(EXPORT_LIB_DIR)/sa-jdi.jar
   16.63  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   16.64    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   16.65      ADD_SA_BINARIES/x86   += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.diz
   16.66 @@ -310,10 +307,10 @@
   16.67      ADD_SA_BINARIES/sparc += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
   16.68    endif
   16.69  endif
   16.70 -ADD_SA_BINARIES/ppc   = 
   16.71 -ADD_SA_BINARIES/ia64  = 
   16.72 -ADD_SA_BINARIES/arm   = 
   16.73 -ADD_SA_BINARIES/zero  = 
   16.74 +ADD_SA_BINARIES/ppc   =
   16.75 +ADD_SA_BINARIES/ia64  =
   16.76 +ADD_SA_BINARIES/arm   =
   16.77 +ADD_SA_BINARIES/zero  =
   16.78  
   16.79  -include $(HS_ALT_MAKE)/linux/makefiles/defs.make
   16.80  
    17.1 --- a/make/linux/makefiles/fastdebug.make	Wed Apr 24 20:55:28 2013 -0400
    17.2 +++ b/make/linux/makefiles/fastdebug.make	Wed Apr 24 21:11:02 2013 -0400
    17.3 @@ -1,5 +1,5 @@
    17.4  #
    17.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    17.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    17.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    17.8  #
    17.9  # This code is free software; you can redistribute it and/or modify it
   17.10 @@ -19,7 +19,7 @@
   17.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   17.12  # or visit www.oracle.com if you need additional information or have any
   17.13  # questions.
   17.14 -#  
   17.15 +#
   17.16  #
   17.17  
   17.18  # Sets make macros for making debug version of VM
   17.19 @@ -59,5 +59,5 @@
   17.20  MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
   17.21  
   17.22  VERSION = optimized
   17.23 -SYSDEFS += -DASSERT -DFASTDEBUG
   17.24 +SYSDEFS += -DASSERT
   17.25  PICFLAGS = DEFAULT
    18.1 --- a/make/linux/makefiles/gcc.make	Wed Apr 24 20:55:28 2013 -0400
    18.2 +++ b/make/linux/makefiles/gcc.make	Wed Apr 24 21:11:02 2013 -0400
    18.3 @@ -126,14 +126,12 @@
    18.4  # Compiler warnings are treated as errors
    18.5  WARNINGS_ARE_ERRORS = -Werror
    18.6  
    18.7 -# Except for a few acceptable ones
    18.8 +WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function
    18.9 +
   18.10  # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
   18.11 -# conversions which might affect the values. To avoid that, we need to turn
   18.12 -# it off explicitly. 
   18.13 -ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
   18.14 -WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef
   18.15 -else
   18.16 -WARNING_FLAGS = -Wpointer-arith -Wconversion -Wsign-compare -Wundef
   18.17 +# conversions which might affect the values. Only enable it in earlier versions.
   18.18 +ifeq "$(shell expr \( $(CC_VER_MAJOR) \> 4 \) \| \( \( $(CC_VER_MAJOR) = 4 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0"
   18.19 +WARNING_FLAGS += -Wconversion
   18.20  endif
   18.21  
   18.22  CFLAGS_WARN/DEFAULT = $(WARNINGS_ARE_ERRORS) $(WARNING_FLAGS)
    19.1 --- a/make/linux/makefiles/jvmg.make	Wed Apr 24 20:55:28 2013 -0400
    19.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    19.3 @@ -1,42 +0,0 @@
    19.4 -#
    19.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    19.6 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    19.7 -#
    19.8 -# This code is free software; you can redistribute it and/or modify it
    19.9 -# under the terms of the GNU General Public License version 2 only, as
   19.10 -# published by the Free Software Foundation.
   19.11 -#
   19.12 -# This code is distributed in the hope that it will be useful, but WITHOUT
   19.13 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   19.14 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   19.15 -# version 2 for more details (a copy is included in the LICENSE file that
   19.16 -# accompanied this code).
   19.17 -#
   19.18 -# You should have received a copy of the GNU General Public License version
   19.19 -# 2 along with this work; if not, write to the Free Software Foundation,
   19.20 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   19.21 -#
   19.22 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   19.23 -# or visit www.oracle.com if you need additional information or have any
   19.24 -# questions.
   19.25 -#  
   19.26 -#
   19.27 -
   19.28 -# Sets make macros for making debug version of VM
   19.29 -
   19.30 -# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
   19.31 -DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
   19.32 -DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
   19.33 -
   19.34 -# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
   19.35 -CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_
   19.36 -
   19.37 -# Set the environment variable HOTSPARC_GENERIC to "true"
   19.38 -# to inhibit the effect of the previous line on CFLAGS.
   19.39 -
   19.40 -# Linker mapfile
   19.41 -MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
   19.42 -
   19.43 -VERSION = debug
   19.44 -SYSDEFS += -DASSERT -DDEBUG
   19.45 -PICFLAGS = DEFAULT
    20.1 --- a/make/linux/makefiles/profiled.make	Wed Apr 24 20:55:28 2013 -0400
    20.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    20.3 @@ -1,30 +0,0 @@
    20.4 -#
    20.5 -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
    20.6 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    20.7 -#
    20.8 -# This code is free software; you can redistribute it and/or modify it
    20.9 -# under the terms of the GNU General Public License version 2 only, as
   20.10 -# published by the Free Software Foundation.
   20.11 -#
   20.12 -# This code is distributed in the hope that it will be useful, but WITHOUT
   20.13 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   20.14 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   20.15 -# version 2 for more details (a copy is included in the LICENSE file that
   20.16 -# accompanied this code).
   20.17 -#
   20.18 -# You should have received a copy of the GNU General Public License version
   20.19 -# 2 along with this work; if not, write to the Free Software Foundation,
   20.20 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   20.21 -#
   20.22 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   20.23 -# or visit www.oracle.com if you need additional information or have any
   20.24 -# questions.
   20.25 -#  
   20.26 -#
   20.27 -
   20.28 -# Sets make macros for making profiled version of Gamma VM
   20.29 -# (It is also optimized.)
   20.30 -
   20.31 -CFLAGS += -pg
   20.32 -AOUT_FLAGS += -pg
   20.33 -LDNOMAP = true
    21.1 --- a/make/linux/makefiles/vm.make	Wed Apr 24 20:55:28 2013 -0400
    21.2 +++ b/make/linux/makefiles/vm.make	Wed Apr 24 21:11:02 2013 -0400
    21.3 @@ -189,7 +189,7 @@
    21.4  Src_Dirs/SHARK     := $(CORE_PATHS) $(SHARK_PATHS)
    21.5  Src_Dirs := $(Src_Dirs/$(TYPE))
    21.6  
    21.7 -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\*
    21.8 +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
    21.9  COMPILER1_SPECIFIC_FILES := c1_\*
   21.10  SHARK_SPECIFIC_FILES     := shark
   21.11  ZERO_SPECIFIC_FILES      := zero
    22.1 --- a/make/solaris/Makefile	Wed Apr 24 20:55:28 2013 -0400
    22.2 +++ b/make/solaris/Makefile	Wed Apr 24 21:11:02 2013 -0400
    22.3 @@ -120,37 +120,29 @@
    22.4  #
    22.5  #       debug           compiler2       <os>_<arch>_compiler2/debug
    22.6  #       fastdebug       compiler2       <os>_<arch>_compiler2/fastdebug
    22.7 -#       jvmg            compiler2       <os>_<arch>_compiler2/jvmg
    22.8  #       optimized       compiler2       <os>_<arch>_compiler2/optimized
    22.9 -#       profiled        compiler2       <os>_<arch>_compiler2/profiled
   22.10  #       product         compiler2       <os>_<arch>_compiler2/product
   22.11  #
   22.12  #       debug1          compiler1       <os>_<arch>_compiler1/debug
   22.13  #       fastdebug1      compiler1       <os>_<arch>_compiler1/fastdebug
   22.14 -#       jvmg1           compiler1       <os>_<arch>_compiler1/jvmg
   22.15  #       optimized1      compiler1       <os>_<arch>_compiler1/optimized
   22.16 -#       profiled1       compiler1       <os>_<arch>_compiler1/profiled
   22.17  #       product1        compiler1       <os>_<arch>_compiler1/product
   22.18  #
   22.19  #       debugcore       core            <os>_<arch>_core/debug
   22.20  #       fastdebugcore   core            <os>_<arch>_core/fastdebug
   22.21 -#       jvmgcore        core            <os>_<arch>_core/jvmg
   22.22  #       optimizedcore   core            <os>_<arch>_core/optimized
   22.23 -#       profiledcore    core            <os>_<arch>_core/profiled
   22.24  #       productcore     core            <os>_<arch>_core/product
   22.25  #
   22.26  # What you get with each target:
   22.27  #
   22.28 -# debug*     - "thin" libjvm - debug info linked into the gamma launcher
   22.29 +# debug*     - debug compile with asserts enabled
   22.30  # fastdebug* - optimized compile, but with asserts enabled
   22.31 -# jvmg*      - "fat" libjvm - debug info linked into libjvm.so
   22.32  # optimized* - optimized compile, no asserts
   22.33 -# profiled*  - gprof
   22.34  # product*   - the shippable thing:  optimized compile, no asserts, -DPRODUCT
   22.35  
   22.36  # This target list needs to be coordinated with the usage message
   22.37  # in the build.sh script:
   22.38 -TARGETS           = debug jvmg fastdebug optimized profiled product
   22.39 +TARGETS           = debug fastdebug optimized product
   22.40  
   22.41  SUBDIR_DOCS       = $(OSNAME)_$(BUILDARCH)_docs
   22.42  SUBDIRS_C1        = $(addprefix $(OSNAME)_$(BUILDARCH)_compiler1/,$(TARGETS))
   22.43 @@ -267,11 +259,21 @@
   22.44  	$(MAKE) -f $(GAMMADIR)/make/$(OSNAME)/makefiles/jvmti.make $(MFLAGS) $(BUILDTREE_VARS) JvmtiOutDir=$(SUBDIR_DOCS) BUILD_FLAVOR=product jvmtidocs
   22.45  
   22.46  # Synonyms for win32-like targets.
   22.47 -compiler2:  jvmg product
   22.48 +compiler2:  debug product
   22.49  
   22.50 -compiler1:  jvmg1 product1
   22.51 +compiler1:  debug1 product1
   22.52  
   22.53 -core: jvmgcore productcore
   22.54 +core: debugcore productcore
   22.55 +
   22.56 +warn_jvmg_deprecated:
   22.57 +	echo "Warning: The jvmg target has been replaced with debug"
   22.58 +	echo "Warning: Please update your usage"
   22.59 +
   22.60 +jvmg: warn_jvmg_deprecated debug
   22.61 +
   22.62 +jvmg1: warn_jvmg_deprecated debug1
   22.63 +
   22.64 +jvmgcore: warn_jvmg_deprecated debugcore
   22.65  
   22.66  clean_docs:
   22.67  	rm -rf $(SUBDIR_DOCS)
    23.1 --- a/make/solaris/makefiles/buildtree.make	Wed Apr 24 20:55:28 2013 -0400
    23.2 +++ b/make/solaris/makefiles/buildtree.make	Wed Apr 24 21:11:02 2013 -0400
    23.3 @@ -19,7 +19,7 @@
    23.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    23.5  # or visit www.oracle.com if you need additional information or have any
    23.6  # questions.
    23.7 -#  
    23.8 +#
    23.9  #
   23.10  
   23.11  # Usage:
   23.12 @@ -46,11 +46,11 @@
   23.13  # Makefile	- for "make foo"
   23.14  # flags.make	- with macro settings
   23.15  # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
   23.16 -# adlc.make	- 
   23.17 +# adlc.make	-
   23.18  # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
   23.19  # sa.make	- generate SA jar file and natives
   23.20  # env.[ck]sh	- environment settings
   23.21 -# 
   23.22 +#
   23.23  # The makefiles are split this way so that "make foo" will run faster by not
   23.24  # having to read the dependency files for the vm.
   23.25  
   23.26 @@ -69,7 +69,7 @@
   23.27  GCC_LIB         = /usr/local/lib
   23.28  else
   23.29  PLATFORM_FILE	= $(GAMMADIR)/make/$(OS_FAMILY)/platform_$(BUILDARCH)
   23.30 -GCC_LIB         = 
   23.31 +GCC_LIB         =
   23.32  endif
   23.33  
   23.34  ifdef FORCE_TIERED
   23.35 @@ -110,7 +110,7 @@
   23.36  	$(PLATFORM_DIR)/generated/adfiles \
   23.37  	$(PLATFORM_DIR)/generated/jvmtifiles
   23.38  
   23.39 -TARGETS      = debug fastdebug jvmg optimized product profiled
   23.40 +TARGETS      = debug fastdebug optimized product
   23.41  SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
   23.42  
   23.43  # For dependencies and recursive makes.
   23.44 @@ -153,7 +153,7 @@
   23.45    endif
   23.46  endif
   23.47  
   23.48 -BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION) 
   23.49 +BUILDTREE_VARS += HOTSPOT_RELEASE_VERSION=$(HS_BUILD_VER) HOTSPOT_BUILD_VERSION= JRE_RELEASE_VERSION=$(JRE_RELEASE_VERSION)
   23.50  
   23.51  BUILDTREE	= \
   23.52  	$(MAKE) -f $(BUILDTREE_MAKE) $(BUILDTREE_TARGETS) $(BUILDTREE_VARS)
   23.53 @@ -172,8 +172,8 @@
   23.54  	$(QUIETLY) mkdir -p $@
   23.55  
   23.56  # Convenience macro which takes a source relative path, applies $(1) to the
   23.57 -# absolute path, and then replaces $(GAMMADIR) in the result with a 
   23.58 -# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.  
   23.59 +# absolute path, and then replaces $(GAMMADIR) in the result with a
   23.60 +# literal "$(GAMMADIR)/" suitable for inclusion in a Makefile.
   23.61  gamma-path=$(subst $(GAMMADIR),\$$(GAMMADIR),$(call $(1),$(HS_COMMON_SRC)/$(2)))
   23.62  
   23.63  # This bit is needed to enable local rebuilds.
   23.64 @@ -274,8 +274,6 @@
   23.65  	$(QUIETLY) ( \
   23.66  	$(BUILDTREE_COMMENT); \
   23.67  	echo; \
   23.68 -	[ "$(TARGET)" = profiled ] && \
   23.69 -	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/optimized.make"; \
   23.70  	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(TARGET).make"; \
   23.71  	) > $@
   23.72  
   23.73 @@ -366,7 +364,7 @@
   23.74  	$(QUIETLY) ( \
   23.75  	$(BUILDTREE_COMMENT); \
   23.76  	echo "JDK=${JAVA_HOME}"; \
   23.77 -	) > $@	   
   23.78 +	) > $@
   23.79  
   23.80  FORCE:
   23.81  
    24.1 --- a/make/solaris/makefiles/debug.make	Wed Apr 24 20:55:28 2013 -0400
    24.2 +++ b/make/solaris/makefiles/debug.make	Wed Apr 24 21:11:02 2013 -0400
    24.3 @@ -1,5 +1,5 @@
    24.4  #
    24.5 -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    24.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    24.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    24.8  #
    24.9  # This code is free software; you can redistribute it and/or modify it
   24.10 @@ -19,7 +19,7 @@
   24.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   24.12  # or visit www.oracle.com if you need additional information or have any
   24.13  # questions.
   24.14 -#  
   24.15 +#
   24.16  #
   24.17  
   24.18  # Sets make macros for making debug version of VM
   24.19 @@ -37,22 +37,20 @@
   24.20  endif
   24.21  endif
   24.22  
   24.23 -CFLAGS += $(DEBUG_CFLAGS/BYFILE)
   24.24 +# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
   24.25 +CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_
   24.26 +
   24.27 +# Set the environment variable HOTSPARC_GENERIC to "true"
   24.28 +# to inhibit the effect of the previous line on CFLAGS.
   24.29  
   24.30  # Linker mapfiles
   24.31  MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
   24.32            $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
   24.33  
   24.34 -# This mapfile is only needed when compiling with dtrace support, 
   24.35 +# This mapfile is only needed when compiling with dtrace support,
   24.36  # and mustn't be otherwise.
   24.37  MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
   24.38  
   24.39 -_JUNK_ := $(shell echo >&2 ""\
   24.40 - "-------------------------------------------------------------------------\n" \
   24.41 - "WARNING: 'gnumake debug' is deprecated. It will be removed in the future.\n" \
   24.42 - "Please use 'gnumake jvmg' to build debug JVM.                            \n" \
   24.43 - "-------------------------------------------------------------------------\n")
   24.44 -
   24.45  VERSION = debug
   24.46 -SYSDEFS += -DASSERT -DDEBUG
   24.47 +SYSDEFS += -DASSERT
   24.48  PICFLAGS = DEFAULT
    25.1 --- a/make/solaris/makefiles/defs.make	Wed Apr 24 20:55:28 2013 -0400
    25.2 +++ b/make/solaris/makefiles/defs.make	Wed Apr 24 21:11:02 2013 -0400
    25.3 @@ -1,5 +1,5 @@
    25.4  #
    25.5 -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
    25.6 +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
    25.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    25.8  #
    25.9  # This code is free software; you can redistribute it and/or modify it
   25.10 @@ -19,7 +19,7 @@
   25.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   25.12  # or visit www.oracle.com if you need additional information or have any
   25.13  # questions.
   25.14 -#  
   25.15 +#
   25.16  #
   25.17  
   25.18  # The common definitions for hotspot solaris builds.
   25.19 @@ -172,9 +172,6 @@
   25.20  # Library suffix
   25.21  LIBRARY_SUFFIX=so
   25.22  
   25.23 -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms
   25.24 -VM_DEBUG=jvmg
   25.25 -
   25.26  EXPORT_LIST += $(EXPORT_DOCS_DIR)/platform/jvmti/jvmti.html
   25.27  
   25.28  # client and server subdirectories have symbolic links to ../libjsig.$(LIBRARY_SUFFIX)
   25.29 @@ -221,8 +218,8 @@
   25.30  endif
   25.31  ifeq ($(JVM_VARIANT_CLIENT),true)
   25.32    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/Xusage.txt
   25.33 -  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX) 
   25.34 -  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.$(LIBRARY_SUFFIX) 
   25.35 +  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm.$(LIBRARY_SUFFIX)
   25.36 +  EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_db.$(LIBRARY_SUFFIX)
   25.37    EXPORT_LIST += $(EXPORT_CLIENT_DIR)/libjvm_dtrace.$(LIBRARY_SUFFIX)
   25.38    ifeq ($(ARCH_DATA_MODEL),32)
   25.39      EXPORT_LIST += $(EXPORT_CLIENT_DIR)/64/libjvm_db.$(LIBRARY_SUFFIX)
   25.40 @@ -257,4 +254,4 @@
   25.41      EXPORT_LIST += $(EXPORT_JRE_LIB_ARCH_DIR)/libsaproc.debuginfo
   25.42    endif
   25.43  endif
   25.44 -EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar 
   25.45 +EXPORT_LIST += $(EXPORT_LIB_DIR)/sa-jdi.jar
    26.1 --- a/make/solaris/makefiles/fastdebug.make	Wed Apr 24 20:55:28 2013 -0400
    26.2 +++ b/make/solaris/makefiles/fastdebug.make	Wed Apr 24 21:11:02 2013 -0400
    26.3 @@ -1,5 +1,5 @@
    26.4  #
    26.5 -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    26.6 +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    26.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    26.8  #
    26.9  # This code is free software; you can redistribute it and/or modify it
   26.10 @@ -19,7 +19,7 @@
   26.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   26.12  # or visit www.oracle.com if you need additional information or have any
   26.13  # questions.
   26.14 -#  
   26.15 +#
   26.16  #
   26.17  
   26.18  # Sets make macros for making debug version of VM
   26.19 @@ -118,10 +118,10 @@
   26.20  MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
   26.21  	  $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
   26.22  
   26.23 -# This mapfile is only needed when compiling with dtrace support, 
   26.24 +# This mapfile is only needed when compiling with dtrace support,
   26.25  # and mustn't be otherwise.
   26.26  MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
   26.27  
   26.28  VERSION = optimized
   26.29 -SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS
   26.30 +SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
   26.31  PICFLAGS = DEFAULT
    27.1 --- a/make/solaris/makefiles/jvmg.make	Wed Apr 24 20:55:28 2013 -0400
    27.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.3 @@ -1,56 +0,0 @@
    27.4 -#
    27.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    27.6 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    27.7 -#
    27.8 -# This code is free software; you can redistribute it and/or modify it
    27.9 -# under the terms of the GNU General Public License version 2 only, as
   27.10 -# published by the Free Software Foundation.
   27.11 -#
   27.12 -# This code is distributed in the hope that it will be useful, but WITHOUT
   27.13 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   27.14 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   27.15 -# version 2 for more details (a copy is included in the LICENSE file that
   27.16 -# accompanied this code).
   27.17 -#
   27.18 -# You should have received a copy of the GNU General Public License version
   27.19 -# 2 along with this work; if not, write to the Free Software Foundation,
   27.20 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   27.21 -#
   27.22 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   27.23 -# or visit www.oracle.com if you need additional information or have any
   27.24 -# questions.
   27.25 -#  
   27.26 -#
   27.27 -
   27.28 -# Sets make macros for making debug version of VM
   27.29 -
   27.30 -# Compiler specific DEBUG_CFLAGS are passed in from gcc.make, sparcWorks.make
   27.31 -DEBUG_CFLAGS/DEFAULT= $(DEBUG_CFLAGS)
   27.32 -DEBUG_CFLAGS/BYFILE = $(DEBUG_CFLAGS/$@)$(DEBUG_CFLAGS/DEFAULT$(DEBUG_CFLAGS/$@))
   27.33 -
   27.34 -ifeq ("${Platform_compiler}", "sparcWorks")
   27.35 -
   27.36 -ifeq ($(COMPILER_REV_NUMERIC),508)
   27.37 -  # SS11 SEGV when compiling with -g and -xarch=v8, using different backend
   27.38 -  DEBUG_CFLAGS/compileBroker.o = $(DEBUG_CFLAGS) -xO0
   27.39 -  DEBUG_CFLAGS/jvmtiTagMap.o   = $(DEBUG_CFLAGS) -xO0
   27.40 -endif
   27.41 -endif
   27.42 -
   27.43 -# _NMT_NOINLINE_ informs NMT that no inlining by Compiler
   27.44 -CFLAGS += $(DEBUG_CFLAGS/BYFILE) -D_NMT_NOINLINE_
   27.45 -
   27.46 -# Set the environment variable HOTSPARC_GENERIC to "true"
   27.47 -# to inhibit the effect of the previous line on CFLAGS.
   27.48 -
   27.49 -# Linker mapfiles
   27.50 -MAPFILE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers \
   27.51 -          $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-debug
   27.52 -
   27.53 -# This mapfile is only needed when compiling with dtrace support,
   27.54 -# and mustn't be otherwise.
   27.55 -MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
   27.56 -
   27.57 -VERSION = debug
   27.58 -SYSDEFS += -DASSERT -DDEBUG
   27.59 -PICFLAGS = DEFAULT
    28.1 --- a/make/solaris/makefiles/profiled.make	Wed Apr 24 20:55:28 2013 -0400
    28.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.3 @@ -1,44 +0,0 @@
    28.4 -#
    28.5 -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    28.6 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.7 -#
    28.8 -# This code is free software; you can redistribute it and/or modify it
    28.9 -# under the terms of the GNU General Public License version 2 only, as
   28.10 -# published by the Free Software Foundation.
   28.11 -#
   28.12 -# This code is distributed in the hope that it will be useful, but WITHOUT
   28.13 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   28.14 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   28.15 -# version 2 for more details (a copy is included in the LICENSE file that
   28.16 -# accompanied this code).
   28.17 -#
   28.18 -# You should have received a copy of the GNU General Public License version
   28.19 -# 2 along with this work; if not, write to the Free Software Foundation,
   28.20 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   28.21 -#
   28.22 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   28.23 -# or visit www.oracle.com if you need additional information or have any
   28.24 -# questions.
   28.25 -#  
   28.26 -#
   28.27 -
   28.28 -# Sets make macros for making profiled version of Gamma VM
   28.29 -# (It is also optimized.)
   28.30 -
   28.31 -CFLAGS += -pg
   28.32 -
   28.33 -# On x86 Solaris 2.6, 7, and 8 if LD_LIBRARY_PATH has /usr/lib in it then
   28.34 -# adlc linked with -pg puts out empty header files. To avoid linking adlc
   28.35 -# with -pg the profile flag is split out separately and used in rules.make
   28.36 -
   28.37 -PROF_AOUT_FLAGS += -pg
   28.38 -
   28.39 -# To do a profiled build of the product, such as for generating the
   28.40 -# reordering file, set PROFILE_PRODUCT.  Otherwise the reordering file will
   28.41 -# contain references to functions which are not defined in the PRODUCT build.
   28.42 -
   28.43 -ifdef PROFILE_PRODUCT
   28.44 -  SYSDEFS += -DPRODUCT
   28.45 -endif
   28.46 -
   28.47 -LDNOMAP = true
    29.1 --- a/make/solaris/makefiles/vm.make	Wed Apr 24 20:55:28 2013 -0400
    29.2 +++ b/make/solaris/makefiles/vm.make	Wed Apr 24 21:11:02 2013 -0400
    29.3 @@ -202,7 +202,7 @@
    29.4  Src_Dirs/SHARK     := $(CORE_PATHS)
    29.5  Src_Dirs := $(Src_Dirs/$(TYPE))
    29.6  
    29.7 -COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp chaitin\* c2_\* runtime_\*
    29.8 +COMPILER2_SPECIFIC_FILES := opto libadt bcEscapeAnalyzer.cpp c2_\* runtime_\*
    29.9  COMPILER1_SPECIFIC_FILES := c1_\*
   29.10  SHARK_SPECIFIC_FILES     := shark
   29.11  ZERO_SPECIFIC_FILES      := zero
    30.1 --- a/make/windows/build.make	Wed Apr 24 20:55:28 2013 -0400
    30.2 +++ b/make/windows/build.make	Wed Apr 24 21:11:02 2013 -0400
    30.3 @@ -1,5 +1,5 @@
    30.4  #
    30.5 -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    30.6 +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    30.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    30.8  #
    30.9  # This code is free software; you can redistribute it and/or modify it
   30.10 @@ -235,18 +235,14 @@
   30.11  	cd $(variantDir)
   30.12  	nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product ARCH=$(ARCH)
   30.13  
   30.14 -# The debug or jvmg (all the same thing) is an optional build
   30.15 -debug jvmg: checks $(variantDir) $(variantDir)\local.make sanity
   30.16 +# The debug build is an optional build
   30.17 +debug: checks $(variantDir) $(variantDir)\local.make sanity
   30.18  	cd $(variantDir)
   30.19  	nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=debug ARCH=$(ARCH)
   30.20  fastdebug: checks $(variantDir) $(variantDir)\local.make sanity
   30.21  	cd $(variantDir)
   30.22  	nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=fastdebug ARCH=$(ARCH)
   30.23  
   30.24 -develop: checks $(variantDir) $(variantDir)\local.make sanity
   30.25 -	cd $(variantDir)
   30.26 -	nmake -nologo -f $(WorkSpace)\make\windows\makefiles\top.make BUILD_FLAVOR=product DEVELOP=1 ARCH=$(ARCH)
   30.27 -
   30.28  # target to create just the directory structure
   30.29  tree: checks $(variantDir) $(variantDir)\local.make sanity
   30.30  	mkdir $(variantDir)\product
    31.1 --- a/make/windows/create_obj_files.sh	Wed Apr 24 20:55:28 2013 -0400
    31.2 +++ b/make/windows/create_obj_files.sh	Wed Apr 24 21:11:02 2013 -0400
    31.3 @@ -114,7 +114,7 @@
    31.4      "shark")     Src_Dirs="${CORE_PATHS}" ;;
    31.5  esac
    31.6  
    31.7 -COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp chaitin* c2_* runtime_*"
    31.8 +COMPILER2_SPECIFIC_FILES="opto libadt bcEscapeAnalyzer.cpp c2_* runtime_*"
    31.9  COMPILER1_SPECIFIC_FILES="c1_*"
   31.10  SHARK_SPECIFIC_FILES="shark"
   31.11  ZERO_SPECIFIC_FILES="zero"
    32.1 --- a/make/windows/makefiles/defs.make	Wed Apr 24 20:55:28 2013 -0400
    32.2 +++ b/make/windows/makefiles/defs.make	Wed Apr 24 21:11:02 2013 -0400
    32.3 @@ -19,7 +19,7 @@
    32.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    32.5  # or visit www.oracle.com if you need additional information or have any
    32.6  # questions.
    32.7 -#  
    32.8 +#
    32.9  #
   32.10  
   32.11  # The common definitions for hotspot windows builds.
   32.12 @@ -209,8 +209,6 @@
   32.13  ifneq (,$(findstring MINGW,$(SYSTEM_UNAME)))
   32.14    USING_MINGW=true
   32.15  endif
   32.16 -# FIXUP: The subdirectory for a debug build is NOT the same on all platforms
   32.17 -VM_DEBUG=debug
   32.18  
   32.19  # Windows wants particular paths due to nmake (must be after macros defined)
   32.20  #   It is important that gnumake invokes nmake with C:\\...\\  formated
   32.21 @@ -292,7 +290,7 @@
   32.22    MAKE_ARGS += BUILD_WIN_SA=1
   32.23  endif
   32.24  
   32.25 -# Propagate compiler and tools paths from configure to nmake. 
   32.26 +# Propagate compiler and tools paths from configure to nmake.
   32.27  # Need to make sure they contain \\ and not /.
   32.28  ifneq ($(SPEC),)
   32.29    ifeq ($(USING_CYGWIN), true)
    33.1 --- a/make/windows/makefiles/vm.make	Wed Apr 24 20:55:28 2013 -0400
    33.2 +++ b/make/windows/makefiles/vm.make	Wed Apr 24 21:11:02 2013 -0400
    33.3 @@ -31,11 +31,7 @@
    33.4  ALTSRC=$(WorkSpace)\src\closed
    33.5  
    33.6  !ifdef RELEASE
    33.7 -!ifdef DEVELOP
    33.8 -CXX_FLAGS=$(CXX_FLAGS) /D "DEBUG"
    33.9 -!else
   33.10  CXX_FLAGS=$(CXX_FLAGS) /D "PRODUCT"
   33.11 -!endif
   33.12  !else
   33.13  CXX_FLAGS=$(CXX_FLAGS) /D "ASSERT"
   33.14  !endif
   33.15 @@ -186,7 +182,7 @@
   33.16  
   33.17  # Special case files not using precompiled header files.
   33.18  
   33.19 -c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp 
   33.20 +c1_RInfo_$(Platform_arch).obj: $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp
   33.21  	 $(CXX) $(CXX_FLAGS) $(CXX_DONT_USE_PCH) /c $(WorkSpace)\src\cpu\$(Platform_arch)\vm\c1_RInfo_$(Platform_arch).cpp
   33.22  
   33.23  os_windows.obj: $(WorkSpace)\src\os\windows\vm\os_windows.cpp
    34.1 --- a/make/windows/projectfiles/compiler2/ADLCompiler.dsp	Wed Apr 24 20:55:28 2013 -0400
    34.2 +++ b/make/windows/projectfiles/compiler2/ADLCompiler.dsp	Wed Apr 24 21:11:02 2013 -0400
    34.3 @@ -67,7 +67,7 @@
    34.4  # PROP Ignore_Export_Lib 0
    34.5  # PROP Target_Dir ""
    34.6  # ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
    34.7 -# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "DEBUG" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c
    34.8 +# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c
    34.9  # ADD BASE RSC /l 0x409
   34.10  # ADD RSC /l 0x409
   34.11  BSC32=bscmake.exe
    35.1 --- a/make/windows/projectfiles/tiered/ADLCompiler.dsp	Wed Apr 24 20:55:28 2013 -0400
    35.2 +++ b/make/windows/projectfiles/tiered/ADLCompiler.dsp	Wed Apr 24 21:11:02 2013 -0400
    35.3 @@ -67,7 +67,7 @@
    35.4  # PROP Ignore_Export_Lib 0
    35.5  # PROP Target_Dir ""
    35.6  # ADD BASE CPP /nologo /W3 /Gm /GX /Zi /Od /D "WIN32" /D "_CONSOLE" /D "_MBCS" /YX /FD /c
    35.7 -# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "DEBUG" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c
    35.8 +# ADD CPP /nologo /ML /W3 /WX /Gm /GX /Zi /Od /I "." /I "$(HotSpotWorkSpace)\src\share\vm\opto" /I "$(HotSpotWorkSpace)\src\share\vm\prims" /I "$(HotSpotWorkSpace)\src\share\vm\lookup" /I "$(HotSpotWorkSpace)\src\share\vm\interpreter" /I "$(HotSpotWorkSpace)\src\share\vm\asm" /I "$(HotSpotWorkSpace)\src\share\vm\compiler" /I "$(HotSpotWorkSpace)\src\share\vm\utilities" /I "$(HotSpotWorkSpace)\src\share\vm\code" /I "$(HotSpotWorkSpace)\src\share\vm\oops" /I "$(HotSpotWorkSpace)\src\share\vm\runtime" /I "$(HotSpotWorkSpace)\src\share\vm\memory" /I "$(HotSpotWorkSpace)\src\share\vm\libadt" /I "$(HotSpotWorkSpace)\src\cpu\i486\vm" /I "$(HotSpotWorkSpace)\src\os\win32\vm" /D "WIN32" /D "_WINDOWS" /D "ASSERT" /Fr /FD /c
    35.9  # ADD BASE RSC /l 0x409
   35.10  # ADD RSC /l 0x409
   35.11  BSC32=bscmake.exe
    36.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Apr 24 20:55:28 2013 -0400
    36.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Apr 24 21:11:02 2013 -0400
    36.3 @@ -1000,9 +1000,10 @@
    36.4          DeoptimizationBlob* deopt_blob = SharedRuntime::deopt_blob();
    36.5          assert(deopt_blob != NULL, "deoptimization blob must have been created");
    36.6          restore_live_registers(sasm);
    36.7 -        __ restore();
    36.8 -        __ br(Assembler::always, false, Assembler::pt, deopt_blob->unpack_with_reexecution(), relocInfo::runtime_call_type);
    36.9 -        __ delayed()->nop();
   36.10 +
   36.11 +        AddressLiteral dest(deopt_blob->unpack_with_reexecution());
   36.12 +        __ jump_to(dest, O0);
   36.13 +        __ delayed()->restore();
   36.14        }
   36.15        break;
   36.16  
    37.1 --- a/src/cpu/sparc/vm/frame_sparc.cpp	Wed Apr 24 20:55:28 2013 -0400
    37.2 +++ b/src/cpu/sparc/vm/frame_sparc.cpp	Wed Apr 24 21:11:02 2013 -0400
    37.3 @@ -1,5 +1,5 @@
    37.4  /*
    37.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    37.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    37.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    37.8   *
    37.9   * This code is free software; you can redistribute it and/or modify it
   37.10 @@ -304,7 +304,7 @@
   37.11  
   37.12      // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
   37.13      // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
   37.14 -    // window use. So if a runtime stub creates two frames (common in fastdebug/jvmg) then we see the
   37.15 +    // window use. So if a runtime stub creates two frames (common in fastdebug/debug) then we see the
   37.16      // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
   37.17      // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
   37.18      // that initial frame and retrying.
    38.1 --- a/src/cpu/sparc/vm/sparc.ad	Wed Apr 24 20:55:28 2013 -0400
    38.2 +++ b/src/cpu/sparc/vm/sparc.ad	Wed Apr 24 21:11:02 2013 -0400
    38.3 @@ -1,5 +1,5 @@
    38.4  //
    38.5 -// Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    38.6 +// Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    38.7  // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    38.8  //
    38.9  // This code is free software; you can redistribute it and/or modify it
   38.10 @@ -8176,10 +8176,25 @@
   38.11    format %{ "SUBcc  $p,$q,$p\t! p' = p-q\n\t"
   38.12              "ADD    $p,$y,$tmp\t! g3=p-q+y\n\t"
   38.13              "MOVlt  $tmp,$p\t! p' < 0 ? p'+y : p'" %}
   38.14 -  ins_encode( enc_cadd_cmpLTMask(p, q, y, tmp) );
   38.15 -  ins_pipe( cadd_cmpltmask );
   38.16 -%}
   38.17 -
   38.18 +  ins_encode(enc_cadd_cmpLTMask(p, q, y, tmp));
   38.19 +  ins_pipe(cadd_cmpltmask);
   38.20 +%}
   38.21 +
   38.22 +instruct and_cmpLTMask(iRegI p, iRegI q, iRegI y, flagsReg ccr) %{
   38.23 +  match(Set p (AndI (CmpLTMask p q) y));
   38.24 +  effect(KILL ccr);
   38.25 +  ins_cost(DEFAULT_COST*3);
   38.26 +
   38.27 +  format %{ "CMP  $p,$q\n\t"
   38.28 +            "MOV  $y,$p\n\t"
   38.29 +            "MOVge G0,$p" %}
   38.30 +  ins_encode %{
   38.31 +    __ cmp($p$$Register, $q$$Register);
   38.32 +    __ mov($y$$Register, $p$$Register);
   38.33 +    __ movcc(Assembler::greaterEqual, false, Assembler::icc, G0, $p$$Register);
   38.34 +  %}
   38.35 +  ins_pipe(ialu_reg_reg_ialu);
   38.36 +%}
   38.37  
   38.38  //-----------------------------------------------------------------
   38.39  // Direct raw moves between float and general registers using VIS3.
    39.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Apr 24 20:55:28 2013 -0400
    39.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Apr 24 21:11:02 2013 -0400
    39.3 @@ -63,6 +63,13 @@
    39.4                                  noreg /* pre_val */,
    39.5                                  tmp, true /*preserve_o_regs*/);
    39.6  
    39.7 +        // G1 barrier needs uncompressed oop for region cross check.
    39.8 +        Register new_val = val;
    39.9 +        if (UseCompressedOops && val != G0) {
   39.10 +          new_val = tmp;
   39.11 +          __ mov(val, new_val);
   39.12 +        }
   39.13 +
   39.14          if (index == noreg ) {
   39.15            assert(Assembler::is_simm13(offset), "fix this code");
   39.16            __ store_heap_oop(val, base, offset);
   39.17 @@ -79,7 +86,7 @@
   39.18                __ add(base, index, base);
   39.19              }
   39.20            }
   39.21 -          __ g1_write_barrier_post(base, val, tmp);
   39.22 +          __ g1_write_barrier_post(base, new_val, tmp);
   39.23          }
   39.24        }
   39.25        break;
    40.1 --- a/src/cpu/x86/vm/assembler_x86.cpp	Wed Apr 24 20:55:28 2013 -0400
    40.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp	Wed Apr 24 21:11:02 2013 -0400
    40.3 @@ -214,14 +214,6 @@
    40.4    return enc;
    40.5  }
    40.6  
    40.7 -static int encode(XMMRegister r) {
    40.8 -  int enc = r->encoding();
    40.9 -  if (enc >= 8) {
   40.10 -    enc -= 8;
   40.11 -  }
   40.12 -  return enc;
   40.13 -}
   40.14 -
   40.15  void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
   40.16    assert(dst->has_byte_register(), "must have byte register");
   40.17    assert(isByte(op1) && isByte(op2), "wrong opcode");
    41.1 --- a/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Apr 24 20:55:28 2013 -0400
    41.2 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Apr 24 21:11:02 2013 -0400
    41.3 @@ -41,11 +41,6 @@
    41.4  
    41.5  #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
    41.6  
    41.7 -// Workaround for C++ overloading nastiness on '0' for RegisterOrConstant.
    41.8 -static RegisterOrConstant constant(int value) {
    41.9 -  return RegisterOrConstant(value);
   41.10 -}
   41.11 -
   41.12  void MethodHandles::load_klass_from_Class(MacroAssembler* _masm, Register klass_reg) {
   41.13    if (VerifyMethodHandles)
   41.14      verify_klass(_masm, klass_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_Class),
    42.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Apr 24 20:55:28 2013 -0400
    42.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Apr 24 21:11:02 2013 -0400
    42.3 @@ -158,14 +158,19 @@
    42.4          if (val == noreg) {
    42.5            __ store_heap_oop_null(Address(rdx, 0));
    42.6          } else {
    42.7 +          // G1 barrier needs uncompressed oop for region cross check.
    42.8 +          Register new_val = val;
    42.9 +          if (UseCompressedOops) {
   42.10 +            new_val = rbx;
   42.11 +            __ movptr(new_val, val);
   42.12 +          }
   42.13            __ store_heap_oop(Address(rdx, 0), val);
   42.14            __ g1_write_barrier_post(rdx /* store_adr */,
   42.15 -                                   val /* new_val */,
   42.16 +                                   new_val /* new_val */,
   42.17                                     r15_thread /* thread */,
   42.18                                     r8 /* tmp */,
   42.19                                     rbx /* tmp2 */);
   42.20          }
   42.21 -
   42.22        }
   42.23        break;
   42.24  #endif // INCLUDE_ALL_GCS
    43.1 --- a/src/cpu/x86/vm/x86_32.ad	Wed Apr 24 20:55:28 2013 -0400
    43.2 +++ b/src/cpu/x86/vm/x86_32.ad	Wed Apr 24 21:11:02 2013 -0400
    43.3 @@ -1,5 +1,5 @@
    43.4  //
    43.5 -// Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    43.6 +// Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    43.7  // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    43.8  //
    43.9  // This code is free software; you can redistribute it and/or modify it
   43.10 @@ -2280,30 +2280,6 @@
   43.11      emit_rm(cbuf, 0x3, $p$$reg, tmpReg);
   43.12    %}
   43.13  
   43.14 -  enc_class enc_cmpLTP_mem(rRegI p, rRegI q, memory mem, eCXRegI tmp) %{    // cadd_cmpLT
   43.15 -    int tmpReg = $tmp$$reg;
   43.16 -
   43.17 -    // SUB $p,$q
   43.18 -    emit_opcode(cbuf,0x2B);
   43.19 -    emit_rm(cbuf, 0x3, $p$$reg, $q$$reg);
   43.20 -    // SBB $tmp,$tmp
   43.21 -    emit_opcode(cbuf,0x1B);
   43.22 -    emit_rm(cbuf, 0x3, tmpReg, tmpReg);
   43.23 -    // AND $tmp,$y
   43.24 -    cbuf.set_insts_mark();       // Mark start of opcode for reloc info in mem operand
   43.25 -    emit_opcode(cbuf,0x23);
   43.26 -    int reg_encoding = tmpReg;
   43.27 -    int base  = $mem$$base;
   43.28 -    int index = $mem$$index;
   43.29 -    int scale = $mem$$scale;
   43.30 -    int displace = $mem$$disp;
   43.31 -    relocInfo::relocType disp_reloc = $mem->disp_reloc();
   43.32 -    encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_reloc);
   43.33 -    // ADD $p,$tmp
   43.34 -    emit_opcode(cbuf,0x03);
   43.35 -    emit_rm(cbuf, 0x3, $p$$reg, tmpReg);
   43.36 -  %}
   43.37 -
   43.38    enc_class shift_left_long( eRegL dst, eCXRegI shift ) %{
   43.39      // TEST shift,32
   43.40      emit_opcode(cbuf,0xF7);
   43.41 @@ -8885,9 +8861,9 @@
   43.42    %}
   43.43  %}
   43.44  
   43.45 -instruct cmpLTMask( eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr ) %{
   43.46 +instruct cmpLTMask(eCXRegI dst, ncxRegI p, ncxRegI q, eFlagsReg cr) %{
   43.47    match(Set dst (CmpLTMask p q));
   43.48 -  effect( KILL cr );
   43.49 +  effect(KILL cr);
   43.50    ins_cost(400);
   43.51  
   43.52    // SETlt can only use low byte of EAX,EBX, ECX, or EDX as destination
   43.53 @@ -8895,50 +8871,83 @@
   43.54              "CMP    $p,$q\n\t"
   43.55              "SETlt  $dst\n\t"
   43.56              "NEG    $dst" %}
   43.57 -  ins_encode( OpcRegReg(0x33,dst,dst),
   43.58 -              OpcRegReg(0x3B,p,q),
   43.59 -              setLT_reg(dst), neg_reg(dst) );
   43.60 -  ins_pipe( pipe_slow );
   43.61 -%}
   43.62 -
   43.63 -instruct cmpLTMask0( rRegI dst, immI0 zero, eFlagsReg cr ) %{
   43.64 +  ins_encode %{
   43.65 +    Register Rp = $p$$Register;
   43.66 +    Register Rq = $q$$Register;
   43.67 +    Register Rd = $dst$$Register;
   43.68 +    Label done;
   43.69 +    __ xorl(Rd, Rd);
   43.70 +    __ cmpl(Rp, Rq);
   43.71 +    __ setb(Assembler::less, Rd);
   43.72 +    __ negl(Rd);
   43.73 +  %}
   43.74 +
   43.75 +  ins_pipe(pipe_slow);
   43.76 +%}
   43.77 +
   43.78 +instruct cmpLTMask0(rRegI dst, immI0 zero, eFlagsReg cr) %{
   43.79    match(Set dst (CmpLTMask dst zero));
   43.80 -  effect( DEF dst, KILL cr );
   43.81 +  effect(DEF dst, KILL cr);
   43.82    ins_cost(100);
   43.83  
   43.84 -  format %{ "SAR    $dst,31" %}
   43.85 -  opcode(0xC1, 0x7);  /* C1 /7 ib */
   43.86 -  ins_encode( RegOpcImm( dst, 0x1F ) );
   43.87 -  ins_pipe( ialu_reg );
   43.88 -%}
   43.89 -
   43.90 -
   43.91 -instruct cadd_cmpLTMask( ncxRegI p, ncxRegI q, ncxRegI y, eCXRegI tmp, eFlagsReg cr ) %{
   43.92 +  format %{ "SAR    $dst,31\t# cmpLTMask0" %}
   43.93 +  ins_encode %{
   43.94 +  __ sarl($dst$$Register, 31);
   43.95 +  %}
   43.96 +  ins_pipe(ialu_reg);
   43.97 +%}
   43.98 +
   43.99 +/* better to save a register than avoid a branch */
  43.100 +instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{
  43.101    match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
  43.102 -  effect( KILL tmp, KILL cr );
  43.103 +  effect(KILL cr);
  43.104    ins_cost(400);
  43.105 -  // annoyingly, $tmp has no edges so you cant ask for it in
  43.106 -  // any format or encoding
  43.107 -  format %{ "SUB    $p,$q\n\t"
  43.108 -            "SBB    ECX,ECX\n\t"
  43.109 -            "AND    ECX,$y\n\t"
  43.110 -            "ADD    $p,ECX" %}
  43.111 -  ins_encode( enc_cmpLTP(p,q,y,tmp) );
  43.112 -  ins_pipe( pipe_cmplt );
  43.113 +  format %{ "SUB    $p,$q\t# cadd_cmpLTMask\n\t"
  43.114 +            "JGE    done\n\t"
  43.115 +            "ADD    $p,$y\n"
  43.116 +            "done:  " %}
  43.117 +  ins_encode %{
  43.118 +    Register Rp = $p$$Register;
  43.119 +    Register Rq = $q$$Register;
  43.120 +    Register Ry = $y$$Register;
  43.121 +    Label done;
  43.122 +    __ subl(Rp, Rq);
  43.123 +    __ jccb(Assembler::greaterEqual, done);
  43.124 +    __ addl(Rp, Ry);
  43.125 +    __ bind(done);
  43.126 +  %}
  43.127 +
  43.128 +  ins_pipe(pipe_cmplt);
  43.129 +%}
  43.130 +
  43.131 +/* better to save a register than avoid a branch */
  43.132 +instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, eFlagsReg cr) %{
  43.133 +  match(Set y (AndI (CmpLTMask p q) y));
  43.134 +  effect(KILL cr);
  43.135 +
  43.136 +  ins_cost(300);
  43.137 +
  43.138 +  format %{ "CMPL     $p, $q\t# and_cmpLTMask\n\t"
  43.139 +            "JLT      done\n\t"
  43.140 +            "XORL     $y, $y\n"
  43.141 +            "done:  " %}
  43.142 +  ins_encode %{
  43.143 +    Register Rp = $p$$Register;
  43.144 +    Register Rq = $q$$Register;
  43.145 +    Register Ry = $y$$Register;
  43.146 +    Label done;
  43.147 +    __ cmpl(Rp, Rq);
  43.148 +    __ jccb(Assembler::less, done);
  43.149 +    __ xorl(Ry, Ry);
  43.150 +    __ bind(done);
  43.151 +  %}
  43.152 +
  43.153 +  ins_pipe(pipe_cmplt);
  43.154  %}
  43.155  
  43.156  /* If I enable this, I encourage spilling in the inner loop of compress.
  43.157 -instruct cadd_cmpLTMask_mem( ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr ) %{
  43.158 +instruct cadd_cmpLTMask_mem(ncxRegI p, ncxRegI q, memory y, eCXRegI tmp, eFlagsReg cr) %{
  43.159    match(Set p (AddI (AndI (CmpLTMask p q) (LoadI y)) (SubI p q)));
  43.160 -  effect( USE_KILL tmp, KILL cr );
  43.161 -  ins_cost(400);
  43.162 -
  43.163 -  format %{ "SUB    $p,$q\n\t"
  43.164 -            "SBB    ECX,ECX\n\t"
  43.165 -            "AND    ECX,$y\n\t"
  43.166 -            "ADD    $p,ECX" %}
  43.167 -  ins_encode( enc_cmpLTP_mem(p,q,y,tmp) );
  43.168 -%}
  43.169  */
  43.170  
  43.171  //----------Long Instructions------------------------------------------------
    44.1 --- a/src/cpu/x86/vm/x86_64.ad	Wed Apr 24 20:55:28 2013 -0400
    44.2 +++ b/src/cpu/x86/vm/x86_64.ad	Wed Apr 24 21:11:02 2013 -0400
    44.3 @@ -1,5 +1,5 @@
    44.4  //
    44.5 -// Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    44.6 +// Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    44.7  // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44.8  //
    44.9  // This code is free software; you can redistribute it and/or modify it
   44.10 @@ -1651,17 +1651,6 @@
   44.11    return PTR_RBP_REG_mask();
   44.12  }
   44.13  
   44.14 -static Address build_address(int b, int i, int s, int d) {
   44.15 -  Register index = as_Register(i);
   44.16 -  Address::ScaleFactor scale = (Address::ScaleFactor)s;
   44.17 -  if (index == rsp) {
   44.18 -    index = noreg;
   44.19 -    scale = Address::no_scale;
   44.20 -  }
   44.21 -  Address addr(as_Register(b), index, scale, d);
   44.22 -  return addr;
   44.23 -}
   44.24 -
   44.25  %}
   44.26  
   44.27  //----------ENCODING BLOCK-----------------------------------------------------
   44.28 @@ -9403,7 +9392,7 @@
   44.29    match(Set dst (CmpLTMask p q));
   44.30    effect(KILL cr);
   44.31  
   44.32 -  ins_cost(400); // XXX
   44.33 +  ins_cost(400);
   44.34    format %{ "cmpl    $p, $q\t# cmpLTMask\n\t"
   44.35              "setlt   $dst\n\t"
   44.36              "movzbl  $dst, $dst\n\t"
   44.37 @@ -9421,37 +9410,63 @@
   44.38    match(Set dst (CmpLTMask dst zero));
   44.39    effect(KILL cr);
   44.40  
   44.41 -  ins_cost(100); // XXX
   44.42 +  ins_cost(100);
   44.43    format %{ "sarl    $dst, #31\t# cmpLTMask0" %}
   44.44 -  opcode(0xC1, 0x7);  /* C1 /7 ib */
   44.45 -  ins_encode(reg_opc_imm(dst, 0x1F));
   44.46 +  ins_encode %{
   44.47 +  __ sarl($dst$$Register, 31);
   44.48 +  %}
   44.49    ins_pipe(ialu_reg);
   44.50  %}
   44.51  
   44.52 -
   44.53 -instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rRegI tmp, rFlagsReg cr)
   44.54 +/* Better to save a register than avoid a branch */
   44.55 +instruct cadd_cmpLTMask(rRegI p, rRegI q, rRegI y, rFlagsReg cr)
   44.56  %{
   44.57    match(Set p (AddI (AndI (CmpLTMask p q) y) (SubI p q)));
   44.58 -  effect(TEMP tmp, KILL cr);
   44.59 -
   44.60 -  ins_cost(400); // XXX
   44.61 -  format %{ "subl    $p, $q\t# cadd_cmpLTMask1\n\t"
   44.62 -            "sbbl    $tmp, $tmp\n\t"
   44.63 -            "andl    $tmp, $y\n\t"
   44.64 -            "addl    $p, $tmp" %}
   44.65 +  effect(KILL cr);
   44.66 +  ins_cost(300);
   44.67 +  format %{ "subl   $p,$q\t# cadd_cmpLTMask\n\t"
   44.68 +            "jge    done\n\t"
   44.69 +            "addl   $p,$y\n"
   44.70 +            "done:  " %}
   44.71    ins_encode %{
   44.72      Register Rp = $p$$Register;
   44.73      Register Rq = $q$$Register;
   44.74      Register Ry = $y$$Register;
   44.75 -    Register Rt = $tmp$$Register;
   44.76 +    Label done;
   44.77      __ subl(Rp, Rq);
   44.78 -    __ sbbl(Rt, Rt);
   44.79 -    __ andl(Rt, Ry);
   44.80 -    __ addl(Rp, Rt);
   44.81 +    __ jccb(Assembler::greaterEqual, done);
   44.82 +    __ addl(Rp, Ry);
   44.83 +    __ bind(done);
   44.84    %}
   44.85    ins_pipe(pipe_cmplt);
   44.86  %}
   44.87  
   44.88 +/* Better to save a register than avoid a branch */
   44.89 +instruct and_cmpLTMask(rRegI p, rRegI q, rRegI y, rFlagsReg cr)
   44.90 +%{
   44.91 +  match(Set y (AndI (CmpLTMask p q) y));
   44.92 +  effect(KILL cr);
   44.93 +
   44.94 +  ins_cost(300);
   44.95 +
   44.96 +  format %{ "cmpl     $p, $q\t# and_cmpLTMask\n\t"
   44.97 +            "jlt      done\n\t"
   44.98 +            "xorl     $y, $y\n"
   44.99 +            "done:  " %}
  44.100 +  ins_encode %{
  44.101 +    Register Rp = $p$$Register;
  44.102 +    Register Rq = $q$$Register;
  44.103 +    Register Ry = $y$$Register;
  44.104 +    Label done;
  44.105 +    __ cmpl(Rp, Rq);
  44.106 +    __ jccb(Assembler::less, done);
  44.107 +    __ xorl(Ry, Ry);
  44.108 +    __ bind(done);
  44.109 +  %}
  44.110 +  ins_pipe(pipe_cmplt);
  44.111 +%}
  44.112 +
  44.113 +
  44.114  //---------- FP Instructions------------------------------------------------
  44.115  
  44.116  instruct cmpF_cc_reg(rFlagsRegU cr, regF src1, regF src2)
    45.1 --- a/src/os/bsd/dtrace/generateJvmOffsets.cpp	Wed Apr 24 20:55:28 2013 -0400
    45.2 +++ b/src/os/bsd/dtrace/generateJvmOffsets.cpp	Wed Apr 24 21:11:02 2013 -0400
    45.3 @@ -1,5 +1,5 @@
    45.4  /*
    45.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    45.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    45.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    45.8   *
    45.9   * This code is free software; you can redistribute it and/or modify it
   45.10 @@ -60,7 +60,7 @@
   45.11  #define PR_MODEL_LP64  2
   45.12  
   45.13  #ifdef COMPILER1
   45.14 -#if defined(DEBUG) || defined(FASTDEBUG)
   45.15 +#ifdef ASSERT
   45.16  
   45.17  /*
   45.18   * To avoid the most part of potential link errors
   45.19 @@ -84,7 +84,7 @@
   45.20  
   45.21  StubQueue* AbstractInterpreter::_code = NULL;
   45.22  
   45.23 -#endif /* defined(DEBUG) || defined(FASTDEBUG) */
   45.24 +#endif /* ASSERT */
   45.25  #endif /* COMPILER1 */
   45.26  
   45.27  #define GEN_OFFS(Type,Name)                             \
    46.1 --- a/src/os/bsd/vm/chaitin_bsd.cpp	Wed Apr 24 20:55:28 2013 -0400
    46.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    46.3 @@ -1,42 +0,0 @@
    46.4 -/*
    46.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    46.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    46.7 - *
    46.8 - * This code is free software; you can redistribute it and/or modify it
    46.9 - * under the terms of the GNU General Public License version 2 only, as
   46.10 - * published by the Free Software Foundation.
   46.11 - *
   46.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   46.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   46.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   46.15 - * version 2 for more details (a copy is included in the LICENSE file that
   46.16 - * accompanied this code).
   46.17 - *
   46.18 - * You should have received a copy of the GNU General Public License version
   46.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   46.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   46.21 - *
   46.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   46.23 - * or visit www.oracle.com if you need additional information or have any
   46.24 - * questions.
   46.25 - *
   46.26 - */
   46.27 -
   46.28 -#include "precompiled.hpp"
   46.29 -#include "opto/chaitin.hpp"
   46.30 -#include "opto/machnode.hpp"
   46.31 -
   46.32 -void PhaseRegAlloc::pd_preallocate_hook() {
   46.33 -  // no action
   46.34 -}
   46.35 -
   46.36 -#ifdef ASSERT
   46.37 -void PhaseRegAlloc::pd_postallocate_verify_hook() {
   46.38 -  // no action
   46.39 -}
   46.40 -#endif
   46.41 -
   46.42 -
   46.43 -// Reconciliation History
   46.44 -// chaitin_solaris.cpp  1.7 99/07/12 23:54:22
   46.45 -// End
    47.1 --- a/src/os/bsd/vm/os_bsd.cpp	Wed Apr 24 20:55:28 2013 -0400
    47.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Wed Apr 24 21:11:02 2013 -0400
    47.3 @@ -152,7 +152,6 @@
    47.4  // utility functions
    47.5  
    47.6  static int SR_initialize();
    47.7 -static int SR_finalize();
    47.8  
    47.9  julong os::available_memory() {
   47.10    return Bsd::available_memory();
   47.11 @@ -1200,6 +1199,9 @@
   47.12    } else if (strchr(pname, *os::path_separator()) != NULL) {
   47.13      int n;
   47.14      char** pelements = split_path(pname, &n);
   47.15 +    if (pelements == NULL) {
   47.16 +      return false;
   47.17 +    }
   47.18      for (int i = 0 ; i < n ; i++) {
   47.19        // Really shouldn't be NULL, but check can't hurt
   47.20        if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
   47.21 @@ -2766,10 +2768,6 @@
   47.22    return 0;
   47.23  }
   47.24  
   47.25 -static int SR_finalize() {
   47.26 -  return 0;
   47.27 -}
   47.28 -
   47.29  
   47.30  // returns true on success and false on error - really an error is fatal
   47.31  // but this seems the normal response to library errors
   47.32 @@ -3578,16 +3576,6 @@
   47.33  ////////////////////////////////////////////////////////////////////////////////
   47.34  // debug support
   47.35  
   47.36 -static address same_page(address x, address y) {
   47.37 -  int page_bits = -os::vm_page_size();
   47.38 -  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
   47.39 -    return x;
   47.40 -  else if (x > y)
   47.41 -    return (address)(intptr_t(y) | ~page_bits) + 1;
   47.42 -  else
   47.43 -    return (address)(intptr_t(y) & page_bits);
   47.44 -}
   47.45 -
   47.46  bool os::find(address addr, outputStream* st) {
   47.47    Dl_info dlinfo;
   47.48    memset(&dlinfo, 0, sizeof(dlinfo));
   47.49 @@ -3611,8 +3599,8 @@
   47.50  
   47.51      if (Verbose) {
   47.52        // decode some bytes around the PC
   47.53 -      address begin = same_page(addr-40, addr);
   47.54 -      address end   = same_page(addr+40, addr);
   47.55 +      address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
   47.56 +      address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
   47.57        address       lowest = (address) dlinfo.dli_sname;
   47.58        if (!lowest)  lowest = (address) dlinfo.dli_fbase;
   47.59        if (begin < lowest)  begin = lowest;
    48.1 --- a/src/os/bsd/vm/perfMemory_bsd.cpp	Wed Apr 24 20:55:28 2013 -0400
    48.2 +++ b/src/os/bsd/vm/perfMemory_bsd.cpp	Wed Apr 24 21:11:02 2013 -0400
    48.3 @@ -672,15 +672,15 @@
    48.4    RESTARTABLE(::open(filename, oflags), result);
    48.5    if (result == OS_ERR) {
    48.6      if (errno == ENOENT) {
    48.7 -      THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
    48.8 -                  "Process not found");
    48.9 +      THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
   48.10 +                  "Process not found", OS_ERR);
   48.11      }
   48.12      else if (errno == EACCES) {
   48.13 -      THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
   48.14 -                  "Permission denied");
   48.15 +      THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
   48.16 +                  "Permission denied", OS_ERR);
   48.17      }
   48.18      else {
   48.19 -      THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
   48.20 +      THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
   48.21      }
   48.22    }
   48.23  
   48.24 @@ -828,7 +828,7 @@
   48.25    char* mapAddress;
   48.26    int result;
   48.27    int fd;
   48.28 -  size_t size;
   48.29 +  size_t size = 0;
   48.30    const char* luser = NULL;
   48.31  
   48.32    int mmap_prot;
   48.33 @@ -899,9 +899,12 @@
   48.34  
   48.35    if (*sizep == 0) {
   48.36      size = sharedmem_filesize(fd, CHECK);
   48.37 -    assert(size != 0, "unexpected size");
   48.38 +  } else {
   48.39 +    size = *sizep;
   48.40    }
   48.41  
   48.42 +  assert(size > 0, "unexpected size <= 0");
   48.43 +
   48.44    mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
   48.45  
   48.46    // attempt to close the file - restart if it gets interrupted,
    49.1 --- a/src/os/linux/vm/chaitin_linux.cpp	Wed Apr 24 20:55:28 2013 -0400
    49.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    49.3 @@ -1,42 +0,0 @@
    49.4 -/*
    49.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    49.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    49.7 - *
    49.8 - * This code is free software; you can redistribute it and/or modify it
    49.9 - * under the terms of the GNU General Public License version 2 only, as
   49.10 - * published by the Free Software Foundation.
   49.11 - *
   49.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   49.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   49.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   49.15 - * version 2 for more details (a copy is included in the LICENSE file that
   49.16 - * accompanied this code).
   49.17 - *
   49.18 - * You should have received a copy of the GNU General Public License version
   49.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   49.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   49.21 - *
   49.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   49.23 - * or visit www.oracle.com if you need additional information or have any
   49.24 - * questions.
   49.25 - *
   49.26 - */
   49.27 -
   49.28 -#include "precompiled.hpp"
   49.29 -#include "opto/chaitin.hpp"
   49.30 -#include "opto/machnode.hpp"
   49.31 -
   49.32 -void PhaseRegAlloc::pd_preallocate_hook() {
   49.33 -  // no action
   49.34 -}
   49.35 -
   49.36 -#ifdef ASSERT
   49.37 -void PhaseRegAlloc::pd_postallocate_verify_hook() {
   49.38 -  // no action
   49.39 -}
   49.40 -#endif
   49.41 -
   49.42 -
   49.43 -// Reconciliation History
   49.44 -// chaitin_solaris.cpp  1.7 99/07/12 23:54:22
   49.45 -// End
    50.1 --- a/src/os/linux/vm/os_linux.cpp	Wed Apr 24 20:55:28 2013 -0400
    50.2 +++ b/src/os/linux/vm/os_linux.cpp	Wed Apr 24 21:11:02 2013 -0400
    50.3 @@ -176,7 +176,6 @@
    50.4  // utility functions
    50.5  
    50.6  static int SR_initialize();
    50.7 -static int SR_finalize();
    50.8  
    50.9  julong os::available_memory() {
   50.10    return Linux::available_memory();
   50.11 @@ -1633,6 +1632,9 @@
   50.12    } else if (strchr(pname, *os::path_separator()) != NULL) {
   50.13      int n;
   50.14      char** pelements = split_path(pname, &n);
   50.15 +    if (pelements == NULL) {
   50.16 +      return false;
   50.17 +    }
   50.18      for (int i = 0 ; i < n ; i++) {
   50.19        // Really shouldn't be NULL, but check can't hurt
   50.20        if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
   50.21 @@ -3655,10 +3657,6 @@
   50.22    return 0;
   50.23  }
   50.24  
   50.25 -static int SR_finalize() {
   50.26 -  return 0;
   50.27 -}
   50.28 -
   50.29  
   50.30  // returns true on success and false on error - really an error is fatal
   50.31  // but this seems the normal response to library errors
   50.32 @@ -4500,16 +4498,6 @@
   50.33  ////////////////////////////////////////////////////////////////////////////////
   50.34  // debug support
   50.35  
   50.36 -static address same_page(address x, address y) {
   50.37 -  int page_bits = -os::vm_page_size();
   50.38 -  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
   50.39 -    return x;
   50.40 -  else if (x > y)
   50.41 -    return (address)(intptr_t(y) | ~page_bits) + 1;
   50.42 -  else
   50.43 -    return (address)(intptr_t(y) & page_bits);
   50.44 -}
   50.45 -
   50.46  bool os::find(address addr, outputStream* st) {
   50.47    Dl_info dlinfo;
   50.48    memset(&dlinfo, 0, sizeof(dlinfo));
   50.49 @@ -4533,8 +4521,8 @@
   50.50  
   50.51      if (Verbose) {
   50.52        // decode some bytes around the PC
   50.53 -      address begin = same_page(addr-40, addr);
   50.54 -      address end   = same_page(addr+40, addr);
   50.55 +      address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
   50.56 +      address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
   50.57        address       lowest = (address) dlinfo.dli_sname;
   50.58        if (!lowest)  lowest = (address) dlinfo.dli_fbase;
   50.59        if (begin < lowest)  begin = lowest;
    51.1 --- a/src/os/linux/vm/perfMemory_linux.cpp	Wed Apr 24 20:55:28 2013 -0400
    51.2 +++ b/src/os/linux/vm/perfMemory_linux.cpp	Wed Apr 24 21:11:02 2013 -0400
    51.3 @@ -672,15 +672,15 @@
    51.4    RESTARTABLE(::open(filename, oflags), result);
    51.5    if (result == OS_ERR) {
    51.6      if (errno == ENOENT) {
    51.7 -      THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
    51.8 -                  "Process not found");
    51.9 +      THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
   51.10 +                  "Process not found", OS_ERR);
   51.11      }
   51.12      else if (errno == EACCES) {
   51.13 -      THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
   51.14 -                  "Permission denied");
   51.15 +      THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
   51.16 +                  "Permission denied", OS_ERR);
   51.17      }
   51.18      else {
   51.19 -      THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
   51.20 +      THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
   51.21      }
   51.22    }
   51.23  
   51.24 @@ -828,7 +828,7 @@
   51.25    char* mapAddress;
   51.26    int result;
   51.27    int fd;
   51.28 -  size_t size;
   51.29 +  size_t size = 0;
   51.30    const char* luser = NULL;
   51.31  
   51.32    int mmap_prot;
   51.33 @@ -899,9 +899,12 @@
   51.34  
   51.35    if (*sizep == 0) {
   51.36      size = sharedmem_filesize(fd, CHECK);
   51.37 -    assert(size != 0, "unexpected size");
   51.38 +  } else {
   51.39 +    size = *sizep;
   51.40    }
   51.41  
   51.42 +  assert(size > 0, "unexpected size <= 0");
   51.43 +
   51.44    mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
   51.45  
   51.46    // attempt to close the file - restart if it gets interrupted,
    52.1 --- a/src/os/solaris/dtrace/generateJvmOffsets.cpp	Wed Apr 24 20:55:28 2013 -0400
    52.2 +++ b/src/os/solaris/dtrace/generateJvmOffsets.cpp	Wed Apr 24 21:11:02 2013 -0400
    52.3 @@ -1,5 +1,5 @@
    52.4  /*
    52.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    52.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    52.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    52.8   *
    52.9   * This code is free software; you can redistribute it and/or modify it
   52.10 @@ -55,14 +55,14 @@
   52.11  #include "utilities/accessFlags.hpp"
   52.12  #include "utilities/globalDefinitions.hpp"
   52.13  #ifdef COMPILER1
   52.14 -#if defined(DEBUG) || defined(FASTDEBUG)
   52.15 +#ifdef ASSERT
   52.16  
   52.17  /*
   52.18   * To avoid the most part of potential link errors
   52.19   * we link this program with -z nodefs .
   52.20   *
   52.21   * But for 'debug1' and 'fastdebug1' we still have to provide
   52.22 - * a particular workaround for the following symbols bellow.
   52.23 + * a particular workaround for the following symbols below.
   52.24   * It will be good to find out a generic way in the future.
   52.25   */
   52.26  
   52.27 @@ -79,7 +79,7 @@
   52.28  
   52.29  StubQueue* AbstractInterpreter::_code = NULL;
   52.30  
   52.31 -#endif /* defined(DEBUG) || defined(FASTDEBUG) */
   52.32 +#endif /* ASSERT */
   52.33  #endif /* COMPILER1 */
   52.34  
   52.35  #define GEN_OFFS(Type,Name)                             \
    53.1 --- a/src/os/solaris/vm/chaitin_solaris.cpp	Wed Apr 24 20:55:28 2013 -0400
    53.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    53.3 @@ -1,46 +0,0 @@
    53.4 -/*
    53.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    53.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    53.7 - *
    53.8 - * This code is free software; you can redistribute it and/or modify it
    53.9 - * under the terms of the GNU General Public License version 2 only, as
   53.10 - * published by the Free Software Foundation.
   53.11 - *
   53.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   53.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   53.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   53.15 - * version 2 for more details (a copy is included in the LICENSE file that
   53.16 - * accompanied this code).
   53.17 - *
   53.18 - * You should have received a copy of the GNU General Public License version
   53.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   53.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   53.21 - *
   53.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   53.23 - * or visit www.oracle.com if you need additional information or have any
   53.24 - * questions.
   53.25 - *
   53.26 - */
   53.27 -
   53.28 -#include "precompiled.hpp"
   53.29 -#include "opto/chaitin.hpp"
   53.30 -#include "opto/machnode.hpp"
   53.31 -
   53.32 -void PhaseRegAlloc::pd_preallocate_hook() {
   53.33 -  // no action
   53.34 -}
   53.35 -
   53.36 -#ifdef ASSERT
   53.37 -void PhaseRegAlloc::pd_postallocate_verify_hook() {
   53.38 -  // no action
   53.39 -}
   53.40 -#endif
   53.41 -
   53.42 -
   53.43 -//Reconciliation History
   53.44 -// 1.1 99/02/12 15:35:26 chaitin_win32.cpp
   53.45 -// 1.2 99/02/18 15:38:56 chaitin_win32.cpp
   53.46 -// 1.4 99/03/09 10:37:48 chaitin_win32.cpp
   53.47 -// 1.6 99/03/25 11:07:44 chaitin_win32.cpp
   53.48 -// 1.8 99/06/22 16:38:58 chaitin_win32.cpp
   53.49 -//End
    54.1 --- a/src/os/solaris/vm/os_solaris.cpp	Wed Apr 24 20:55:28 2013 -0400
    54.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Wed Apr 24 21:11:02 2013 -0400
    54.3 @@ -1885,6 +1885,9 @@
    54.4    } else if (strchr(pname, *os::path_separator()) != NULL) {
    54.5      int n;
    54.6      char** pelements = split_path(pname, &n);
    54.7 +    if (pelements == NULL) {
    54.8 +      return false;
    54.9 +    }
   54.10      for (int i = 0 ; i < n ; i++) {
   54.11        // really shouldn't be NULL but what the heck, check can't hurt
   54.12        if (pelements[i] == NULL || strlen(pelements[i]) == 0) {
   54.13 @@ -5787,16 +5790,6 @@
   54.14  
   54.15  //---------------------------------------------------------------------------------
   54.16  
   54.17 -static address same_page(address x, address y) {
   54.18 -  intptr_t page_bits = -os::vm_page_size();
   54.19 -  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits))
   54.20 -    return x;
   54.21 -  else if (x > y)
   54.22 -    return (address)(intptr_t(y) | ~page_bits) + 1;
   54.23 -  else
   54.24 -    return (address)(intptr_t(y) & page_bits);
   54.25 -}
   54.26 -
   54.27  bool os::find(address addr, outputStream* st) {
   54.28    Dl_info dlinfo;
   54.29    memset(&dlinfo, 0, sizeof(dlinfo));
   54.30 @@ -5822,8 +5815,8 @@
   54.31  
   54.32      if (Verbose) {
   54.33        // decode some bytes around the PC
   54.34 -      address begin = same_page(addr-40, addr);
   54.35 -      address end   = same_page(addr+40, addr);
   54.36 +      address begin = clamp_address_in_page(addr-40, addr, os::vm_page_size());
   54.37 +      address end   = clamp_address_in_page(addr+40, addr, os::vm_page_size());
   54.38        address       lowest = (address) dlinfo.dli_sname;
   54.39        if (!lowest)  lowest = (address) dlinfo.dli_fbase;
   54.40        if (begin < lowest)  begin = lowest;
    55.1 --- a/src/os/solaris/vm/perfMemory_solaris.cpp	Wed Apr 24 20:55:28 2013 -0400
    55.2 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Wed Apr 24 21:11:02 2013 -0400
    55.3 @@ -687,15 +687,15 @@
    55.4    RESTARTABLE(::open(filename, oflags), result);
    55.5    if (result == OS_ERR) {
    55.6      if (errno == ENOENT) {
    55.7 -      THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
    55.8 -                  "Process not found");
    55.9 +      THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
   55.10 +                  "Process not found", OS_ERR);
   55.11      }
   55.12      else if (errno == EACCES) {
   55.13 -      THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(),
   55.14 -                  "Permission denied");
   55.15 +      THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
   55.16 +                  "Permission denied", OS_ERR);
   55.17      }
   55.18      else {
   55.19 -      THROW_MSG_0(vmSymbols::java_io_IOException(), strerror(errno));
   55.20 +      THROW_MSG_(vmSymbols::java_io_IOException(), strerror(errno), OS_ERR);
   55.21      }
   55.22    }
   55.23  
   55.24 @@ -843,7 +843,7 @@
   55.25    char* mapAddress;
   55.26    int result;
   55.27    int fd;
   55.28 -  size_t size;
   55.29 +  size_t size = 0;
   55.30    const char* luser = NULL;
   55.31  
   55.32    int mmap_prot;
   55.33 @@ -914,9 +914,12 @@
   55.34  
   55.35    if (*sizep == 0) {
   55.36      size = sharedmem_filesize(fd, CHECK);
   55.37 -    assert(size != 0, "unexpected size");
   55.38 +  } else {
   55.39 +    size = *sizep;
   55.40    }
   55.41  
   55.42 +  assert(size > 0, "unexpected size <= 0");
   55.43 +
   55.44    mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
   55.45  
   55.46    // attempt to close the file - restart if it gets interrupted,
    56.1 --- a/src/os/windows/vm/chaitin_windows.cpp	Wed Apr 24 20:55:28 2013 -0400
    56.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    56.3 @@ -1,78 +0,0 @@
    56.4 -/*
    56.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    56.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    56.7 - *
    56.8 - * This code is free software; you can redistribute it and/or modify it
    56.9 - * under the terms of the GNU General Public License version 2 only, as
   56.10 - * published by the Free Software Foundation.
   56.11 - *
   56.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   56.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   56.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   56.15 - * version 2 for more details (a copy is included in the LICENSE file that
   56.16 - * accompanied this code).
   56.17 - *
   56.18 - * You should have received a copy of the GNU General Public License version
   56.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   56.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   56.21 - *
   56.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   56.23 - * or visit www.oracle.com if you need additional information or have any
   56.24 - * questions.
   56.25 - *
   56.26 - */
   56.27 -
   56.28 -#include "precompiled.hpp"
   56.29 -#include "opto/chaitin.hpp"
   56.30 -#include "opto/machnode.hpp"
   56.31 -
   56.32 -// Disallow the use of the frame pointer (EBP) for implicit null exceptions
   56.33 -// on win95/98.  If we do not do this, the OS gets confused and gives a stack
   56.34 -// error.
   56.35 -void PhaseRegAlloc::pd_preallocate_hook() {
   56.36 -#ifndef _WIN64
   56.37 -  if (ImplicitNullChecks && !os::win32::is_nt()) {
   56.38 -    for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) {
   56.39 -      Block *block = _cfg._blocks[block_num];
   56.40 -
   56.41 -      Node *block_end = block->end();
   56.42 -      if (block_end->is_MachNullCheck() &&
   56.43 -          block_end->as_Mach()->ideal_Opcode() != Op_Con) {
   56.44 -        // The last instruction in the block is an implicit null check.
   56.45 -        // Fix its input so that it does not load into the frame pointer.
   56.46 -        _matcher.pd_implicit_null_fixup(block_end->in(1)->as_Mach(),
   56.47 -                                        block_end->as_MachNullCheck()->_vidx);
   56.48 -      }
   56.49 -    }
   56.50 -  }
   56.51 -#else
   56.52 -  // WIN64==itanium on XP
   56.53 -#endif
   56.54 -}
   56.55 -
   56.56 -#ifdef ASSERT
   56.57 -// Verify that no implicit null check uses the frame pointer (EBP) as
   56.58 -// its register on win95/98.  Use of the frame pointer in an implicit
   56.59 -// null check confuses the OS, yielding a stack error.
   56.60 -void PhaseRegAlloc::pd_postallocate_verify_hook() {
   56.61 -#ifndef _WIN64
   56.62 -  if (ImplicitNullChecks && !os::win32::is_nt()) {
   56.63 -    for (uint block_num=1; block_num<_cfg._num_blocks; block_num++) {
   56.64 -      Block *block = _cfg._blocks[block_num];
   56.65 -
   56.66 -      Node *block_end = block->_nodes[block->_nodes.size()-1];
   56.67 -      if (block_end->is_MachNullCheck() && block_end->as_Mach()->ideal_Opcode() != Op_Con) {
   56.68 -        // The last instruction in the block is an implicit
   56.69 -        // null check.  Verify that this instruction does not
   56.70 -        // use the frame pointer.
   56.71 -        int reg = get_reg_first(block_end->in(1)->in(block_end->as_MachNullCheck()->_vidx));
   56.72 -        assert(reg != EBP_num,
   56.73 -               "implicit null check using frame pointer on win95/98");
   56.74 -      }
   56.75 -    }
   56.76 -  }
   56.77 -#else
   56.78 -  // WIN64==itanium on XP
   56.79 -#endif
   56.80 -}
   56.81 -#endif
    57.1 --- a/src/os/windows/vm/os_windows.cpp	Wed Apr 24 20:55:28 2013 -0400
    57.2 +++ b/src/os/windows/vm/os_windows.cpp	Wed Apr 24 21:11:02 2013 -0400
    57.3 @@ -1182,6 +1182,9 @@
    57.4    } else if (strchr(pname, *os::path_separator()) != NULL) {
    57.5      int n;
    57.6      char** pelements = split_path(pname, &n);
    57.7 +    if (pelements == NULL) {
    57.8 +      return false;
    57.9 +    }
   57.10      for (int i = 0 ; i < n ; i++) {
   57.11        char* path = pelements[i];
   57.12        // Really shouldn't be NULL, but check can't hurt
   57.13 @@ -4235,9 +4238,6 @@
   57.14            path[3] = '\0';
   57.15    }
   57.16  
   57.17 -  #ifdef DEBUG
   57.18 -    jio_fprintf(stderr, "sysNativePath: %s\n", path);
   57.19 -  #endif DEBUG
   57.20    return path;
   57.21  }
   57.22  
    58.1 --- a/src/os/windows/vm/perfMemory_windows.cpp	Wed Apr 24 20:55:28 2013 -0400
    58.2 +++ b/src/os/windows/vm/perfMemory_windows.cpp	Wed Apr 24 21:11:02 2013 -0400
    58.3 @@ -1581,7 +1581,7 @@
    58.4    ResourceMark rm;
    58.5  
    58.6    void *mapAddress = 0;
    58.7 -  size_t size;
    58.8 +  size_t size = 0;
    58.9    HANDLE fmh;
   58.10    DWORD ofm_access;
   58.11    DWORD mv_access;
   58.12 @@ -1652,9 +1652,12 @@
   58.13  
   58.14    if (*sizep == 0) {
   58.15      size = sharedmem_filesize(rfilename, CHECK);
   58.16 -    assert(size != 0, "unexpected size");
   58.17 +  } else {
   58.18 +    size = *sizep;
   58.19    }
   58.20  
   58.21 +  assert(size > 0, "unexpected size <= 0");
   58.22 +
   58.23    // Open the file mapping object with the given name
   58.24    fmh = open_sharedmem_object(robjectname, ofm_access, CHECK);
   58.25  
    59.1 --- a/src/share/tools/hsdis/Makefile	Wed Apr 24 20:55:28 2013 -0400
    59.2 +++ b/src/share/tools/hsdis/Makefile	Wed Apr 24 21:11:02 2013 -0400
    59.3 @@ -1,5 +1,5 @@
    59.4  #
    59.5 -# Copyright (c) 2008, 2012, Oracle and/or its affiliates. All rights reserved.
    59.6 +# Copyright (c) 2008, 2013, Oracle and/or its affiliates. All rights reserved.
    59.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    59.8  #
    59.9  # This code is free software; you can redistribute it and/or modify it
   59.10 @@ -19,7 +19,7 @@
   59.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   59.12  # or visit www.oracle.com if you need additional information or have any
   59.13  # questions.
   59.14 -#  
   59.15 +#
   59.16  #
   59.17  
   59.18  # Single gnu makefile for solaris, linux and windows (windows requires cygwin and mingw)
   59.19 @@ -66,7 +66,7 @@
   59.20  endif
   59.21  CC 		= $(MINGW)-gcc
   59.22  CONFIGURE_ARGS= --host=$(MINGW) --target=$(MINGW)
   59.23 -else   #linux 
   59.24 +else   #linux
   59.25  CPU             = $(shell uname -m)
   59.26  ARCH1=$(CPU:x86_64=amd64)
   59.27  ARCH=$(ARCH1:i686=i386)
   59.28 @@ -116,7 +116,6 @@
   59.29  else #Windows
   59.30  OS		= windows
   59.31  CC		= gcc
   59.32 -#CPPFLAGS	+= /D"WIN32" /D"_WINDOWS" /D"DEBUG" /D"NDEBUG"
   59.33  CFLAGS		+=  /nologo /MD /W3 /WX /O2 /Fo$(@:.dll=.obj) /Gi-
   59.34  CFLAGS		+= LIBARCH=\"$(LIBARCH)\"
   59.35  DLDFLAGS	+= /dll /subsystem:windows /incremental:no \
    60.1 --- a/src/share/tools/launcher/wildcard.c	Wed Apr 24 20:55:28 2013 -0400
    60.2 +++ b/src/share/tools/launcher/wildcard.c	Wed Apr 24 21:11:02 2013 -0400
    60.3 @@ -368,8 +368,10 @@
    60.4      const char *basename;
    60.5      FileList fl = FileList_new(16);
    60.6      WildcardIterator it = WildcardIterator_for(wildcard);
    60.7 -    if (it == NULL)
    60.8 +    if (it == NULL) {
    60.9 +        FileList_free(fl);
   60.10          return NULL;
   60.11 +    }
   60.12      while ((basename = WildcardIterator_next(it)) != NULL)
   60.13          if (isJarFileName(basename))
   60.14              FileList_add(fl, wildcardConcat(wildcard, basename));
    61.1 --- a/src/share/vm/adlc/output_c.cpp	Wed Apr 24 20:55:28 2013 -0400
    61.2 +++ b/src/share/vm/adlc/output_c.cpp	Wed Apr 24 21:11:02 2013 -0400
    61.3 @@ -63,11 +63,10 @@
    61.4      RegDef *reg_def = NULL;
    61.5      RegDef *next = NULL;
    61.6      registers->reset_RegDefs();
    61.7 -    for( reg_def = registers->iter_RegDefs(); reg_def != NULL; reg_def = next ) {
    61.8 +    for (reg_def = registers->iter_RegDefs(); reg_def != NULL; reg_def = next) {
    61.9        next = registers->iter_RegDefs();
   61.10        const char *comma = (next != NULL) ? "," : " // no trailing comma";
   61.11 -      fprintf(fp,"  \"%s\"%s\n",
   61.12 -                 reg_def->_regname, comma );
   61.13 +      fprintf(fp,"  \"%s\"%s\n", reg_def->_regname, comma);
   61.14      }
   61.15  
   61.16      // Finish defining enumeration
   61.17 @@ -79,10 +78,10 @@
   61.18      reg_def = NULL;
   61.19      next = NULL;
   61.20      registers->reset_RegDefs();
   61.21 -    for( reg_def = registers->iter_RegDefs(); reg_def != NULL; reg_def = next ) {
   61.22 +    for (reg_def = registers->iter_RegDefs(); reg_def != NULL; reg_def = next) {
   61.23        next = registers->iter_RegDefs();
   61.24        const char *comma = (next != NULL) ? "," : " // no trailing comma";
   61.25 -      fprintf(fp,"\t%s%s\n", reg_def->_concrete, comma );
   61.26 +      fprintf(fp,"\t%s%s\n", reg_def->_concrete, comma);
   61.27      }
   61.28      // Finish defining array
   61.29      fprintf(fp,"\t};\n");
   61.30 @@ -104,19 +103,17 @@
   61.31      RegDef *reg_def = NULL;
   61.32      RegDef *next    = NULL;
   61.33      registers->reset_RegDefs();
   61.34 -    for( reg_def = registers->iter_RegDefs(); reg_def != NULL; reg_def = next ) {
   61.35 +    for (reg_def = registers->iter_RegDefs(); reg_def != NULL; reg_def = next) {
   61.36        next = registers->iter_RegDefs();
   61.37        const char* register_encode = reg_def->register_encode();
   61.38        const char *comma = (next != NULL) ? "," : " // no trailing comma";
   61.39        int encval;
   61.40        if (!ADLParser::is_int_token(register_encode, encval)) {
   61.41 -        fprintf(fp,"  %s%s  // %s\n",
   61.42 -                register_encode, comma, reg_def->_regname );
   61.43 +        fprintf(fp,"  %s%s  // %s\n", register_encode, comma, reg_def->_regname);
   61.44        } else {
   61.45          // Output known constants in hex char format (backward compatibility).
   61.46          assert(encval < 256, "Exceeded supported width for register encoding");
   61.47 -        fprintf(fp,"  (unsigned char)'\\x%X'%s  // %s\n",
   61.48 -                encval,          comma, reg_def->_regname );
   61.49 +        fprintf(fp,"  (unsigned char)'\\x%X'%s  // %s\n", encval, comma, reg_def->_regname);
   61.50        }
   61.51      }
   61.52      // Finish defining enumeration
   61.53 @@ -133,9 +130,10 @@
   61.54      fprintf(fp,"// Enumeration of register class names\n");
   61.55      fprintf(fp, "enum machRegisterClass {\n");
   61.56      registers->_rclasses.reset();
   61.57 -    for( const char *class_name = NULL;
   61.58 -         (class_name = registers->_rclasses.iter()) != NULL; ) {
   61.59 -      fprintf(fp,"  %s,\n", toUpper( class_name ));
   61.60 +    for (const char *class_name = NULL; (class_name = registers->_rclasses.iter()) != NULL;) {
   61.61 +      const char * class_name_to_upper = toUpper(class_name);
   61.62 +      fprintf(fp,"  %s,\n", class_name_to_upper);
   61.63 +      delete[] class_name_to_upper;
   61.64      }
   61.65      // Finish defining enumeration
   61.66      fprintf(fp, "  _last_Mach_Reg_Class\n");
   61.67 @@ -148,7 +146,7 @@
   61.68  void ArchDesc::declare_register_masks(FILE *fp_hpp) {
   61.69    const char  *rc_name;
   61.70  
   61.71 -  if( _register ) {
   61.72 +  if (_register) {
   61.73      // Build enumeration of user-defined register classes.
   61.74      defineRegClassEnum(fp_hpp, _register);
   61.75  
   61.76 @@ -156,24 +154,27 @@
   61.77      fprintf(fp_hpp,"\n");
   61.78      fprintf(fp_hpp,"// Register masks, one for each register class.\n");
   61.79      _register->_rclasses.reset();
   61.80 -    for( rc_name = NULL;
   61.81 -         (rc_name = _register->_rclasses.iter()) != NULL; ) {
   61.82 -      const char *prefix    = "";
   61.83 -      RegClass   *reg_class = _register->getRegClass(rc_name);
   61.84 -      assert( reg_class, "Using an undefined register class");
   61.85 +    for (rc_name = NULL; (rc_name = _register->_rclasses.iter()) != NULL;) {
   61.86 +      const char *prefix = "";
   61.87 +      RegClass *reg_class = _register->getRegClass(rc_name);
   61.88 +      assert(reg_class, "Using an undefined register class");
   61.89 +
   61.90 +      const char* rc_name_to_upper = toUpper(rc_name);
   61.91  
   61.92        if (reg_class->_user_defined == NULL) {
   61.93 -        fprintf(fp_hpp, "extern const RegMask _%s%s_mask;\n", prefix, toUpper( rc_name ) );
   61.94 -        fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { return _%s%s_mask; }\n", prefix, toUpper( rc_name ), prefix, toUpper( rc_name ));
   61.95 +        fprintf(fp_hpp, "extern const RegMask _%s%s_mask;\n", prefix,  rc_name_to_upper);
   61.96 +        fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { return _%s%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
   61.97        } else {
   61.98 -        fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { %s }\n", prefix, toUpper( rc_name ), reg_class->_user_defined);
   61.99 +        fprintf(fp_hpp, "inline const RegMask &%s%s_mask() { %s }\n", prefix, rc_name_to_upper, reg_class->_user_defined);
  61.100        }
  61.101  
  61.102 -      if( reg_class->_stack_or_reg ) {
  61.103 +      if (reg_class->_stack_or_reg) {
  61.104          assert(reg_class->_user_defined == NULL, "no user defined reg class here");
  61.105 -        fprintf(fp_hpp, "extern const RegMask _%sSTACK_OR_%s_mask;\n", prefix, toUpper( rc_name ) );
  61.106 -        fprintf(fp_hpp, "inline const RegMask &%sSTACK_OR_%s_mask() { return _%sSTACK_OR_%s_mask; }\n", prefix, toUpper( rc_name ), prefix, toUpper( rc_name ) );
  61.107 +        fprintf(fp_hpp, "extern const RegMask _%sSTACK_OR_%s_mask;\n", prefix, rc_name_to_upper);
  61.108 +        fprintf(fp_hpp, "inline const RegMask &%sSTACK_OR_%s_mask() { return _%sSTACK_OR_%s_mask; }\n", prefix, rc_name_to_upper, prefix, rc_name_to_upper);
  61.109        }
  61.110 +      delete[] rc_name_to_upper;
  61.111 +
  61.112      }
  61.113    }
  61.114  }
  61.115 @@ -183,34 +184,41 @@
  61.116  void ArchDesc::build_register_masks(FILE *fp_cpp) {
  61.117    const char  *rc_name;
  61.118  
  61.119 -  if( _register ) {
  61.120 +  if (_register) {
  61.121      // Generate a list of register masks, one for each class.
  61.122      fprintf(fp_cpp,"\n");
  61.123      fprintf(fp_cpp,"// Register masks, one for each register class.\n");
  61.124      _register->_rclasses.reset();
  61.125 -    for( rc_name = NULL;
  61.126 -         (rc_name = _register->_rclasses.iter()) != NULL; ) {
  61.127 -      const char *prefix    = "";
  61.128 -      RegClass   *reg_class = _register->getRegClass(rc_name);
  61.129 -      assert( reg_class, "Using an undefined register class");
  61.130 -
  61.131 -      if (reg_class->_user_defined != NULL) continue;
  61.132 +    for (rc_name = NULL; (rc_name = _register->_rclasses.iter()) != NULL;) {
  61.133 +      const char *prefix = "";
  61.134 +      RegClass *reg_class = _register->getRegClass(rc_name);
  61.135 +      assert(reg_class, "Using an undefined register class");
  61.136 +
  61.137 +      if (reg_class->_user_defined != NULL) {
  61.138 +        continue;
  61.139 +      }
  61.140  
  61.141        int len = RegisterForm::RegMask_Size();
  61.142 -      fprintf(fp_cpp, "const RegMask _%s%s_mask(", prefix, toUpper( rc_name ) );
  61.143 -      { int i;
  61.144 -        for( i = 0; i < len-1; i++ )
  61.145 -          fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i,false));
  61.146 -        fprintf(fp_cpp," 0x%x );\n",reg_class->regs_in_word(i,false));
  61.147 +      const char* rc_name_to_upper = toUpper(rc_name);
  61.148 +      fprintf(fp_cpp, "const RegMask _%s%s_mask(", prefix, rc_name_to_upper);
  61.149 +
  61.150 +      {
  61.151 +        int i;
  61.152 +        for(i = 0; i < len - 1; i++) {
  61.153 +          fprintf(fp_cpp," 0x%x,", reg_class->regs_in_word(i, false));
  61.154 +        }
  61.155 +        fprintf(fp_cpp," 0x%x );\n", reg_class->regs_in_word(i, false));
  61.156        }
  61.157  
  61.158 -      if( reg_class->_stack_or_reg ) {
  61.159 +      if (reg_class->_stack_or_reg) {
  61.160          int i;
  61.161 -        fprintf(fp_cpp, "const RegMask _%sSTACK_OR_%s_mask(", prefix, toUpper( rc_name ) );
  61.162 -        for( i = 0; i < len-1; i++ )
  61.163 -          fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i,true));
  61.164 -        fprintf(fp_cpp," 0x%x );\n",reg_class->regs_in_word(i,true));
  61.165 +        fprintf(fp_cpp, "const RegMask _%sSTACK_OR_%s_mask(", prefix, rc_name_to_upper);
  61.166 +        for(i = 0; i < len - 1; i++) {
  61.167 +          fprintf(fp_cpp," 0x%x,",reg_class->regs_in_word(i, true));
  61.168 +        }
  61.169 +        fprintf(fp_cpp," 0x%x );\n",reg_class->regs_in_word(i, true));
  61.170        }
  61.171 +      delete[] rc_name_to_upper;
  61.172      }
  61.173    }
  61.174  }
  61.175 @@ -2676,7 +2684,9 @@
  61.176        if (strcmp(first_reg_class, "stack_slots") == 0) {
  61.177          fprintf(fp,"  return &(Compile::current()->FIRST_STACK_mask());\n");
  61.178        } else {
  61.179 -        fprintf(fp,"  return &%s_mask();\n", toUpper(first_reg_class));
  61.180 +        const char* first_reg_class_to_upper = toUpper(first_reg_class);
  61.181 +        fprintf(fp,"  return &%s_mask();\n", first_reg_class_to_upper);
  61.182 +        delete[] first_reg_class_to_upper;
  61.183        }
  61.184      } else {
  61.185        // Build a switch statement to return the desired mask.
  61.186 @@ -2688,7 +2698,9 @@
  61.187          if( !strcmp(reg_class, "stack_slots") ) {
  61.188            fprintf(fp, "  case %d: return &(Compile::current()->FIRST_STACK_mask());\n", index);
  61.189          } else {
  61.190 -          fprintf(fp, "  case %d: return &%s_mask();\n", index, toUpper(reg_class));
  61.191 +          const char* reg_class_to_upper = toUpper(reg_class);
  61.192 +          fprintf(fp, "  case %d: return &%s_mask();\n", index, reg_class_to_upper);
  61.193 +          delete[] reg_class_to_upper;
  61.194          }
  61.195        }
  61.196        fprintf(fp,"  }\n");
    62.1 --- a/src/share/vm/adlc/output_h.cpp	Wed Apr 24 20:55:28 2013 -0400
    62.2 +++ b/src/share/vm/adlc/output_h.cpp	Wed Apr 24 21:11:02 2013 -0400
    62.3 @@ -2069,9 +2069,21 @@
    62.4    void closing()     { fprintf(_cpp, "  _LAST_MACH_OPER\n");
    62.5                         OutputMap::closing();
    62.6    }
    62.7 -  void map(OpClassForm &opc)  { fprintf(_cpp, "  %s", _AD.machOperEnum(opc._ident) ); }
    62.8 -  void map(OperandForm &oper) { fprintf(_cpp, "  %s", _AD.machOperEnum(oper._ident) ); }
    62.9 -  void map(char        *name) { fprintf(_cpp, "  %s", _AD.machOperEnum(name)); }
   62.10 +  void map(OpClassForm &opc)  {
   62.11 +    const char* opc_ident_to_upper = _AD.machOperEnum(opc._ident);
   62.12 +    fprintf(_cpp, "  %s", opc_ident_to_upper);
   62.13 +    delete[] opc_ident_to_upper;
   62.14 +  }
   62.15 +  void map(OperandForm &oper) {
   62.16 +    const char* oper_ident_to_upper = _AD.machOperEnum(oper._ident);
   62.17 +    fprintf(_cpp, "  %s", oper_ident_to_upper);
   62.18 +    delete[] oper_ident_to_upper;
   62.19 +  }
   62.20 +  void map(char *name) {
   62.21 +    const char* name_to_upper = _AD.machOperEnum(name);
   62.22 +    fprintf(_cpp, "  %s", name_to_upper);
   62.23 +    delete[] name_to_upper;
   62.24 +  }
   62.25  
   62.26    bool do_instructions()      { return false; }
   62.27    void map(InstructForm &inst){ assert( false, "ShouldNotCallThis()"); }
    63.1 --- a/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Apr 24 20:55:28 2013 -0400
    63.2 +++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Apr 24 21:11:02 2013 -0400
    63.3 @@ -938,5 +938,7 @@
    63.4  void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
    63.5  void Canonicalizer::do_RuntimeCall(RuntimeCall* x) {}
    63.6  void Canonicalizer::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
    63.7 +#ifdef ASSERT
    63.8  void Canonicalizer::do_Assert(Assert* x) {}
    63.9 +#endif
   63.10  void Canonicalizer::do_MemBar(MemBar* x) {}
    64.1 --- a/src/share/vm/c1/c1_Canonicalizer.hpp	Wed Apr 24 20:55:28 2013 -0400
    64.2 +++ b/src/share/vm/c1/c1_Canonicalizer.hpp	Wed Apr 24 21:11:02 2013 -0400
    64.3 @@ -108,7 +108,9 @@
    64.4    virtual void do_RuntimeCall    (RuntimeCall*     x);
    64.5    virtual void do_MemBar         (MemBar*          x);
    64.6    virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
    64.7 +#ifdef ASSERT
    64.8    virtual void do_Assert         (Assert*          x);
    64.9 +#endif
   64.10  };
   64.11  
   64.12  #endif // SHARE_VM_C1_C1_CANONICALIZER_HPP
    65.1 --- a/src/share/vm/c1/c1_Instruction.hpp	Wed Apr 24 20:55:28 2013 -0400
    65.2 +++ b/src/share/vm/c1/c1_Instruction.hpp	Wed Apr 24 21:11:02 2013 -0400
    65.3 @@ -111,7 +111,9 @@
    65.4  class   RuntimeCall;
    65.5  class   MemBar;
    65.6  class   RangeCheckPredicate;
    65.7 +#ifdef ASSERT
    65.8  class   Assert;
    65.9 +#endif
   65.10  
   65.11  // A Value is a reference to the instruction creating the value
   65.12  typedef Instruction* Value;
    66.1 --- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Apr 24 20:55:28 2013 -0400
    66.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Apr 24 21:11:02 2013 -0400
    66.3 @@ -871,12 +871,14 @@
    66.4    }
    66.5  }
    66.6  
    66.7 +#ifdef ASSERT
    66.8  void InstructionPrinter::do_Assert(Assert* x) {
    66.9    output()->print("assert ");
   66.10    print_value(x->x());
   66.11    output()->print(" %s ", cond_name(x->cond()));
   66.12    print_value(x->y());
   66.13  }
   66.14 +#endif
   66.15  
   66.16  void InstructionPrinter::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
   66.17    print_unsafe_object_op(x, "UnsafePrefetchWrite");
    67.1 --- a/src/share/vm/c1/c1_InstructionPrinter.hpp	Wed Apr 24 20:55:28 2013 -0400
    67.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.hpp	Wed Apr 24 21:11:02 2013 -0400
    67.3 @@ -136,7 +136,9 @@
    67.4    virtual void do_RuntimeCall    (RuntimeCall*     x);
    67.5    virtual void do_MemBar         (MemBar*          x);
    67.6    virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
    67.7 +#ifdef ASSERT
    67.8    virtual void do_Assert         (Assert*          x);
    67.9 +#endif
   67.10  };
   67.11  #endif // PRODUCT
   67.12  
    68.1 --- a/src/share/vm/c1/c1_LIR.cpp	Wed Apr 24 20:55:28 2013 -0400
    68.2 +++ b/src/share/vm/c1/c1_LIR.cpp	Wed Apr 24 21:11:02 2013 -0400
    68.3 @@ -1778,7 +1778,9 @@
    68.4       // LIR_OpProfileCall
    68.5       case lir_profile_call:          s = "profile_call";  break;
    68.6       // LIR_OpAssert
    68.7 +#ifdef ASSERT
    68.8       case lir_assert:                s = "assert";        break;
    68.9 +#endif
   68.10       case lir_none:                  ShouldNotReachHere();break;
   68.11      default:                         s = "illegal_op";    break;
   68.12    }
   68.13 @@ -2025,12 +2027,14 @@
   68.14    out->print("[lbl:0x%x]", stub()->entry());
   68.15  }
   68.16  
   68.17 +#ifdef ASSERT
   68.18  void LIR_OpAssert::print_instr(outputStream* out) const {
   68.19    print_condition(out, condition()); out->print(" ");
   68.20    in_opr1()->print(out);             out->print(" ");
   68.21    in_opr2()->print(out);             out->print(", \"");
   68.22    out->print(msg());                 out->print("\"");
   68.23  }
   68.24 +#endif
   68.25  
   68.26  
   68.27  void LIR_OpDelay::print_instr(outputStream* out) const {
    69.1 --- a/src/share/vm/c1/c1_LIR.hpp	Wed Apr 24 20:55:28 2013 -0400
    69.2 +++ b/src/share/vm/c1/c1_LIR.hpp	Wed Apr 24 21:11:02 2013 -0400
    69.3 @@ -881,8 +881,9 @@
    69.4  class    LIR_OpTypeCheck;
    69.5  class    LIR_OpCompareAndSwap;
    69.6  class    LIR_OpProfileCall;
    69.7 +#ifdef ASSERT
    69.8  class    LIR_OpAssert;
    69.9 -
   69.10 +#endif
   69.11  
   69.12  // LIR operation codes
   69.13  enum LIR_Code {
   69.14 @@ -1139,7 +1140,9 @@
   69.15    virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; }
   69.16    virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; }
   69.17    virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; }
   69.18 +#ifdef ASSERT
   69.19    virtual LIR_OpAssert* as_OpAssert() { return NULL; }
   69.20 +#endif
   69.21  
   69.22    virtual void verify() const {}
   69.23  };
    70.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Apr 24 20:55:28 2013 -0400
    70.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Apr 24 21:11:02 2013 -0400
    70.3 @@ -711,25 +711,6 @@
    70.4    }
    70.5  }
    70.6  
    70.7 -static Value maxvalue(IfOp* ifop) {
    70.8 -  switch (ifop->cond()) {
    70.9 -    case If::eql: return NULL;
   70.10 -    case If::neq: return NULL;
   70.11 -    case If::lss: // x <  y ? x : y
   70.12 -    case If::leq: // x <= y ? x : y
   70.13 -      if (ifop->x() == ifop->tval() &&
   70.14 -          ifop->y() == ifop->fval()) return ifop->y();
   70.15 -      return NULL;
   70.16 -
   70.17 -    case If::gtr: // x >  y ? y : x
   70.18 -    case If::geq: // x >= y ? y : x
   70.19 -      if (ifop->x() == ifop->tval() &&
   70.20 -          ifop->y() == ifop->fval()) return ifop->y();
   70.21 -      return NULL;
   70.22 -
   70.23 -  }
   70.24 -}
   70.25 -
   70.26  static ciType* phi_declared_type(Phi* phi) {
   70.27    ciType* t = phi->operand_at(0)->declared_type();
   70.28    if (t == NULL) {
   70.29 @@ -3123,8 +3104,8 @@
   70.30    }
   70.31  }
   70.32  
   70.33 +#ifdef ASSERT
   70.34  void LIRGenerator::do_Assert(Assert *x) {
   70.35 -#ifdef ASSERT
   70.36    ValueTag tag = x->x()->type()->tag();
   70.37    If::Condition cond = x->cond();
   70.38  
   70.39 @@ -3144,9 +3125,8 @@
   70.40    LIR_Opr right = yin->result();
   70.41  
   70.42    __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
   70.43 +}
   70.44  #endif
   70.45 -}
   70.46 -
   70.47  
   70.48  void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
   70.49  
    71.1 --- a/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Apr 24 20:55:28 2013 -0400
    71.2 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Apr 24 21:11:02 2013 -0400
    71.3 @@ -537,7 +537,9 @@
    71.4    virtual void do_RuntimeCall    (RuntimeCall*     x);
    71.5    virtual void do_MemBar         (MemBar*          x);
    71.6    virtual void do_RangeCheckPredicate(RangeCheckPredicate* x);
    71.7 +#ifdef ASSERT
    71.8    virtual void do_Assert         (Assert*          x);
    71.9 +#endif
   71.10  };
   71.11  
   71.12  
    72.1 --- a/src/share/vm/c1/c1_Optimizer.cpp	Wed Apr 24 20:55:28 2013 -0400
    72.2 +++ b/src/share/vm/c1/c1_Optimizer.cpp	Wed Apr 24 21:11:02 2013 -0400
    72.3 @@ -535,7 +535,9 @@
    72.4    void do_RuntimeCall    (RuntimeCall*     x);
    72.5    void do_MemBar         (MemBar*          x);
    72.6    void do_RangeCheckPredicate(RangeCheckPredicate* x);
    72.7 +#ifdef ASSERT
    72.8    void do_Assert         (Assert*          x);
    72.9 +#endif
   72.10  };
   72.11  
   72.12  
   72.13 @@ -718,8 +720,9 @@
   72.14  void NullCheckVisitor::do_RuntimeCall    (RuntimeCall*     x) {}
   72.15  void NullCheckVisitor::do_MemBar         (MemBar*          x) {}
   72.16  void NullCheckVisitor::do_RangeCheckPredicate(RangeCheckPredicate* x) {}
   72.17 +#ifdef ASSERT
   72.18  void NullCheckVisitor::do_Assert         (Assert*          x) {}
   72.19 -
   72.20 +#endif
   72.21  
   72.22  void NullCheckEliminator::visit(Value* p) {
   72.23    assert(*p != NULL, "should not find NULL instructions");
    73.1 --- a/src/share/vm/c1/c1_RangeCheckElimination.hpp	Wed Apr 24 20:55:28 2013 -0400
    73.2 +++ b/src/share/vm/c1/c1_RangeCheckElimination.hpp	Wed Apr 24 21:11:02 2013 -0400
    73.3 @@ -166,7 +166,9 @@
    73.4      void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
    73.5      void do_MemBar         (MemBar*          x) { /* nothing to do */ };
    73.6      void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
    73.7 +#ifdef ASSERT
    73.8      void do_Assert         (Assert*          x) { /* nothing to do */ };
    73.9 +#endif
   73.10    };
   73.11  
   73.12  #ifdef ASSERT
    74.1 --- a/src/share/vm/c1/c1_ValueMap.cpp	Wed Apr 24 20:55:28 2013 -0400
    74.2 +++ b/src/share/vm/c1/c1_ValueMap.cpp	Wed Apr 24 21:11:02 2013 -0400
    74.3 @@ -316,6 +316,7 @@
    74.4    ShortLoopOptimizer*   _short_loop_optimizer;
    74.5    Instruction*          _insertion_point;
    74.6    ValueStack *          _state;
    74.7 +  bool                  _insert_is_pred;
    74.8  
    74.9    void set_invariant(Value v) const    { _gvn->set_processed(v); }
   74.10    bool is_invariant(Value v) const     { return _gvn->is_processed(v); }
   74.11 @@ -339,6 +340,7 @@
   74.12  
   74.13    assert(insertion_block->end()->as_Base() == NULL, "cannot insert into entry block");
   74.14    _insertion_point = insertion_block->end()->prev();
   74.15 +  _insert_is_pred = loop_header->is_predecessor(insertion_block);
   74.16  
   74.17    BlockEnd *block_end = insertion_block->end();
   74.18    _state = block_end->state_before();
   74.19 @@ -379,13 +381,13 @@
   74.20      } else if (cur->as_LoadField() != NULL) {
   74.21        LoadField* lf = (LoadField*)cur;
   74.22        // deoptimizes on NullPointerException
   74.23 -      cur_invariant = !lf->needs_patching() && !lf->field()->is_volatile() && !_short_loop_optimizer->has_field_store(lf->field()->type()->basic_type()) && is_invariant(lf->obj());
   74.24 +      cur_invariant = !lf->needs_patching() && !lf->field()->is_volatile() && !_short_loop_optimizer->has_field_store(lf->field()->type()->basic_type()) && is_invariant(lf->obj()) && _insert_is_pred;
   74.25      } else if (cur->as_ArrayLength() != NULL) {
   74.26        ArrayLength *length = cur->as_ArrayLength();
   74.27        cur_invariant = is_invariant(length->array());
   74.28      } else if (cur->as_LoadIndexed() != NULL) {
   74.29        LoadIndexed *li = (LoadIndexed *)cur->as_LoadIndexed();
   74.30 -      cur_invariant = !_short_loop_optimizer->has_indexed_store(as_BasicType(cur->type())) && is_invariant(li->array()) && is_invariant(li->index());
   74.31 +      cur_invariant = !_short_loop_optimizer->has_indexed_store(as_BasicType(cur->type())) && is_invariant(li->array()) && is_invariant(li->index()) && _insert_is_pred;
   74.32      }
   74.33  
   74.34      if (cur_invariant) {
    75.1 --- a/src/share/vm/c1/c1_ValueMap.hpp	Wed Apr 24 20:55:28 2013 -0400
    75.2 +++ b/src/share/vm/c1/c1_ValueMap.hpp	Wed Apr 24 21:11:02 2013 -0400
    75.3 @@ -207,7 +207,9 @@
    75.4    void do_RuntimeCall    (RuntimeCall*     x) { /* nothing to do */ };
    75.5    void do_MemBar         (MemBar*          x) { /* nothing to do */ };
    75.6    void do_RangeCheckPredicate(RangeCheckPredicate* x) { /* nothing to do */ };
    75.7 +#ifdef ASSERT
    75.8    void do_Assert         (Assert*          x) { /* nothing to do */ };
    75.9 +#endif
   75.10  };
   75.11  
   75.12  
    76.1 --- a/src/share/vm/classfile/classFileParser.cpp	Wed Apr 24 20:55:28 2013 -0400
    76.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Wed Apr 24 21:11:02 2013 -0400
    76.3 @@ -1723,9 +1723,6 @@
    76.4        } else {
    76.5          coll->set_contended_group(0); // default contended group
    76.6        }
    76.7 -      coll->set_contended(true);
    76.8 -    } else {
    76.9 -      coll->set_contended(false);
   76.10      }
   76.11    }
   76.12  }
    77.1 --- a/src/share/vm/classfile/classFileParser.hpp	Wed Apr 24 20:55:28 2013 -0400
    77.2 +++ b/src/share/vm/classfile/classFileParser.hpp	Wed Apr 24 21:11:02 2013 -0400
    77.3 @@ -150,7 +150,6 @@
    77.4      void set_contended_group(u2 group) { _contended_group = group; }
    77.5      u2 contended_group() { return _contended_group; }
    77.6  
    77.7 -    void set_contended(bool contended) { set_annotation(_sun_misc_Contended); }
    77.8      bool is_contended() { return has_annotation(_sun_misc_Contended); }
    77.9    };
   77.10  
    78.1 --- a/src/share/vm/classfile/classLoader.cpp	Wed Apr 24 20:55:28 2013 -0400
    78.2 +++ b/src/share/vm/classfile/classLoader.cpp	Wed Apr 24 21:11:02 2013 -0400
    78.3 @@ -1274,13 +1274,16 @@
    78.4    Handle system_class_loader (THREAD, SystemDictionary::java_system_loader());
    78.5    // Iterate over all bootstrap class path entries
    78.6    ClassPathEntry* e = _first_entry;
    78.7 +  jlong start = os::javaTimeMillis();
    78.8    while (e != NULL) {
    78.9      // We stop at rt.jar, unless it is the first bootstrap path entry
   78.10      if (e->is_rt_jar() && e != _first_entry) break;
   78.11      e->compile_the_world(system_class_loader, CATCH);
   78.12      e = e->next();
   78.13    }
   78.14 -  tty->print_cr("CompileTheWorld : Done");
   78.15 +  jlong end = os::javaTimeMillis();
   78.16 +  tty->print_cr("CompileTheWorld : Done (%d classes, %d methods, %d ms)",
   78.17 +                _compile_the_world_class_counter, _compile_the_world_method_counter, (end - start));
   78.18    {
   78.19      // Print statistics as if before normal exit:
   78.20      extern void print_statistics();
   78.21 @@ -1289,7 +1292,8 @@
   78.22    vm_exit(0);
   78.23  }
   78.24  
   78.25 -int ClassLoader::_compile_the_world_counter = 0;
   78.26 +int ClassLoader::_compile_the_world_class_counter = 0;
   78.27 +int ClassLoader::_compile_the_world_method_counter = 0;
   78.28  static int _codecache_sweep_counter = 0;
   78.29  
   78.30  // Filter out all exceptions except OOMs
   78.31 @@ -1311,8 +1315,8 @@
   78.32      // If the file has a period after removing .class, it's not really a
   78.33      // valid class file.  The class loader will check everything else.
   78.34      if (strchr(buffer, '.') == NULL) {
   78.35 -      _compile_the_world_counter++;
   78.36 -      if (_compile_the_world_counter > CompileTheWorldStopAt) return;
   78.37 +      _compile_the_world_class_counter++;
   78.38 +      if (_compile_the_world_class_counter > CompileTheWorldStopAt) return;
   78.39  
   78.40        // Construct name without extension
   78.41        TempNewSymbol sym = SymbolTable::new_symbol(buffer, CHECK);
   78.42 @@ -1329,16 +1333,16 @@
   78.43          if (HAS_PENDING_EXCEPTION) {
   78.44            // If something went wrong in preloading we just ignore it
   78.45            clear_pending_exception_if_not_oom(CHECK);
   78.46 -          tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_counter, buffer);
   78.47 +          tty->print_cr("Preloading failed for (%d) %s", _compile_the_world_class_counter, buffer);
   78.48          }
   78.49        }
   78.50  
   78.51 -      if (_compile_the_world_counter >= CompileTheWorldStartAt) {
   78.52 +      if (_compile_the_world_class_counter >= CompileTheWorldStartAt) {
   78.53          if (k.is_null() || exception_occurred) {
   78.54            // If something went wrong (e.g. ExceptionInInitializerError) we skip this class
   78.55 -          tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer);
   78.56 +          tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_class_counter, buffer);
   78.57          } else {
   78.58 -          tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_counter, buffer);
   78.59 +          tty->print_cr("CompileTheWorld (%d) : %s", _compile_the_world_class_counter, buffer);
   78.60            // Preload all classes to get around uncommon traps
   78.61            // Iterate over all methods in class
   78.62            for (int n = 0; n < k->methods()->length(); n++) {
   78.63 @@ -1356,7 +1360,9 @@
   78.64                                              methodHandle(), 0, "CTW", THREAD);
   78.65                if (HAS_PENDING_EXCEPTION) {
   78.66                  clear_pending_exception_if_not_oom(CHECK);
   78.67 -                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
   78.68 +                tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
   78.69 +              } else {
   78.70 +                _compile_the_world_method_counter++;
   78.71                }
   78.72                if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) {
   78.73                  // Clobber the first compile and force second tier compilation
   78.74 @@ -1370,7 +1376,9 @@
   78.75                                                methodHandle(), 0, "CTW", THREAD);
   78.76                  if (HAS_PENDING_EXCEPTION) {
   78.77                    clear_pending_exception_if_not_oom(CHECK);
   78.78 -                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_counter, m->name()->as_C_string());
   78.79 +                  tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name()->as_C_string());
   78.80 +                } else {
   78.81 +                  _compile_the_world_method_counter++;
   78.82                  }
   78.83                }
   78.84              }
    79.1 --- a/src/share/vm/classfile/classLoader.hpp	Wed Apr 24 20:55:28 2013 -0400
    79.2 +++ b/src/share/vm/classfile/classLoader.hpp	Wed Apr 24 21:11:02 2013 -0400
    79.3 @@ -340,11 +340,12 @@
    79.4    // Force compilation of all methods in all classes in bootstrap class path (stress test)
    79.5  #ifndef PRODUCT
    79.6   private:
    79.7 -  static int _compile_the_world_counter;
    79.8 +  static int _compile_the_world_class_counter;
    79.9 +  static int _compile_the_world_method_counter;
   79.10   public:
   79.11    static void compile_the_world();
   79.12    static void compile_the_world_in(char* name, Handle loader, TRAPS);
   79.13 -  static int  compile_the_world_counter() { return _compile_the_world_counter; }
   79.14 +  static int  compile_the_world_counter() { return _compile_the_world_class_counter; }
   79.15  #endif //PRODUCT
   79.16  };
   79.17  
    80.1 --- a/src/share/vm/classfile/classLoaderData.cpp	Wed Apr 24 20:55:28 2013 -0400
    80.2 +++ b/src/share/vm/classfile/classLoaderData.cpp	Wed Apr 24 21:11:02 2013 -0400
    80.3 @@ -70,15 +70,19 @@
    80.4    _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
    80.5    _metaspace(NULL), _unloading(false), _klasses(NULL),
    80.6    _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
    80.7 -  _next(NULL), _dependencies(NULL),
    80.8 +  _next(NULL), _dependencies(),
    80.9    _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
   80.10      // empty
   80.11  }
   80.12  
   80.13  void ClassLoaderData::init_dependencies(TRAPS) {
   80.14 +  _dependencies.init(CHECK);
   80.15 +}
   80.16 +
   80.17 +void ClassLoaderData::Dependencies::init(TRAPS) {
   80.18    // Create empty dependencies array to add to. CMS requires this to be
   80.19    // an oop so that it can track additions via card marks.  We think.
   80.20 -  _dependencies = (oop)oopFactory::new_objectArray(2, CHECK);
   80.21 +  _list_head = oopFactory::new_objectArray(2, CHECK);
   80.22  }
   80.23  
   80.24  bool ClassLoaderData::claim() {
   80.25 @@ -95,13 +99,17 @@
   80.26    }
   80.27  
   80.28    f->do_oop(&_class_loader);
   80.29 -  f->do_oop(&_dependencies);
   80.30 +  _dependencies.oops_do(f);
   80.31    _handles->oops_do(f);
   80.32    if (klass_closure != NULL) {
   80.33      classes_do(klass_closure);
   80.34    }
   80.35  }
   80.36  
   80.37 +void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
   80.38 +  f->do_oop((oop*)&_list_head);
   80.39 +}
   80.40 +
   80.41  void ClassLoaderData::classes_do(KlassClosure* klass_closure) {
   80.42    for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
   80.43      klass_closure->do_klass(k);
   80.44 @@ -154,14 +162,14 @@
   80.45    // It's a dependency we won't find through GC, add it. This is relatively rare
   80.46    // Must handle over GC point.
   80.47    Handle dependency(THREAD, to);
   80.48 -  from_cld->add_dependency(dependency, CHECK);
   80.49 +  from_cld->_dependencies.add(dependency, CHECK);
   80.50  }
   80.51  
   80.52  
   80.53 -void ClassLoaderData::add_dependency(Handle dependency, TRAPS) {
   80.54 +void ClassLoaderData::Dependencies::add(Handle dependency, TRAPS) {
   80.55    // Check first if this dependency is already in the list.
   80.56    // Save a pointer to the last to add to under the lock.
   80.57 -  objArrayOop ok = (objArrayOop)_dependencies;
   80.58 +  objArrayOop ok = _list_head;
   80.59    objArrayOop last = NULL;
   80.60    while (ok != NULL) {
   80.61      last = ok;
   80.62 @@ -184,16 +192,17 @@
   80.63    objArrayHandle new_dependency(THREAD, deps);
   80.64  
   80.65    // Add the dependency under lock
   80.66 -  locked_add_dependency(last_handle, new_dependency);
   80.67 +  locked_add(last_handle, new_dependency, THREAD);
   80.68  }
   80.69  
   80.70 -void ClassLoaderData::locked_add_dependency(objArrayHandle last_handle,
   80.71 -                                            objArrayHandle new_dependency) {
   80.72 +void ClassLoaderData::Dependencies::locked_add(objArrayHandle last_handle,
   80.73 +                                               objArrayHandle new_dependency,
   80.74 +                                               Thread* THREAD) {
   80.75  
   80.76    // Have to lock and put the new dependency on the end of the dependency
   80.77    // array so the card mark for CMS sees that this dependency is new.
   80.78    // Can probably do this lock free with some effort.
   80.79 -  MutexLockerEx ml(metaspace_lock(),  Mutex::_no_safepoint_check_flag);
   80.80 +  ObjectLocker ol(Handle(THREAD, _list_head), THREAD);
   80.81  
   80.82    oop loader_or_mirror = new_dependency->obj_at(0);
   80.83  
    81.1 --- a/src/share/vm/classfile/classLoaderData.hpp	Wed Apr 24 20:55:28 2013 -0400
    81.2 +++ b/src/share/vm/classfile/classLoaderData.hpp	Wed Apr 24 21:11:02 2013 -0400
    81.3 @@ -93,6 +93,18 @@
    81.4  class ClassLoaderData : public CHeapObj<mtClass> {
    81.5    friend class VMStructs;
    81.6   private:
    81.7 +  class Dependencies VALUE_OBJ_CLASS_SPEC {
    81.8 +    objArrayOop _list_head;
    81.9 +    void locked_add(objArrayHandle last,
   81.10 +                    objArrayHandle new_dependency,
   81.11 +                    Thread* THREAD);
   81.12 +   public:
   81.13 +    Dependencies() : _list_head(NULL) {}
   81.14 +    void add(Handle dependency, TRAPS);
   81.15 +    void init(TRAPS);
   81.16 +    void oops_do(OopClosure* f);
   81.17 +  };
   81.18 +
   81.19    friend class ClassLoaderDataGraph;
   81.20    friend class ClassLoaderDataGraphMetaspaceIterator;
   81.21    friend class MetaDataFactory;
   81.22 @@ -100,10 +112,11 @@
   81.23  
   81.24    static ClassLoaderData * _the_null_class_loader_data;
   81.25  
   81.26 -  oop _class_loader;       // oop used to uniquely identify a class loader
   81.27 -                           // class loader or a canonical class path
   81.28 -  oop _dependencies;       // oop to hold dependencies from this class loader
   81.29 -                           // data to others.
   81.30 +  oop _class_loader;          // oop used to uniquely identify a class loader
   81.31 +                              // class loader or a canonical class path
   81.32 +  Dependencies _dependencies; // holds dependencies from this class loader
   81.33 +                              // data to others.
   81.34 +
   81.35    Metaspace * _metaspace;  // Meta-space where meta-data defined by the
   81.36                             // classes in the class loader are allocated.
   81.37    Mutex* _metaspace_lock;  // Locks the metaspace for allocations and setup.
   81.38 @@ -134,9 +147,6 @@
   81.39    static Metaspace* _ro_metaspace;
   81.40    static Metaspace* _rw_metaspace;
   81.41  
   81.42 -  void add_dependency(Handle dependency, TRAPS);
   81.43 -  void locked_add_dependency(objArrayHandle last, objArrayHandle new_dependency);
   81.44 -
   81.45    void set_next(ClassLoaderData* next) { _next = next; }
   81.46    ClassLoaderData* next() const        { return _next; }
   81.47  
    82.1 --- a/src/share/vm/classfile/stackMapFrame.hpp	Wed Apr 24 20:55:28 2013 -0400
    82.2 +++ b/src/share/vm/classfile/stackMapFrame.hpp	Wed Apr 24 21:11:02 2013 -0400
    82.3 @@ -1,5 +1,5 @@
    82.4  /*
    82.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    82.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    82.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    82.8   *
    82.9   * This code is free software; you can redistribute it and/or modify it
   82.10 @@ -175,14 +175,14 @@
   82.11        ErrorContext* ctx, TRAPS) const;
   82.12  
   82.13    inline void set_mark() {
   82.14 -#ifdef DEBUG
   82.15 +#ifdef ASSERT
   82.16      // Put bogus type to indicate it's no longer valid.
   82.17      if (_stack_mark != -1) {
   82.18        for (int i = _stack_mark - 1; i >= _stack_size; --i) {
   82.19          _stack[i] = VerificationType::bogus_type();
   82.20        }
   82.21      }
   82.22 -#endif // def DEBUG
   82.23 +#endif // def ASSERT
   82.24      _stack_mark = _stack_size;
   82.25    }
   82.26  
    83.1 --- a/src/share/vm/classfile/systemDictionary.cpp	Wed Apr 24 20:55:28 2013 -0400
    83.2 +++ b/src/share/vm/classfile/systemDictionary.cpp	Wed Apr 24 21:11:02 2013 -0400
    83.3 @@ -1592,9 +1592,10 @@
    83.4  // Used for assertions and verification only
    83.5  Klass* SystemDictionary::find_class(Symbol* class_name, ClassLoaderData* loader_data) {
    83.6    #ifndef ASSERT
    83.7 -  guarantee(VerifyBeforeGC   ||
    83.8 -            VerifyDuringGC   ||
    83.9 -            VerifyBeforeExit ||
   83.10 +  guarantee(VerifyBeforeGC      ||
   83.11 +            VerifyDuringGC      ||
   83.12 +            VerifyBeforeExit    ||
   83.13 +            VerifyDuringStartup ||
   83.14              VerifyAfterGC, "too expensive");
   83.15    #endif
   83.16    assert_locked_or_safepoint(SystemDictionary_lock);
    84.1 --- a/src/share/vm/classfile/verifier.cpp	Wed Apr 24 20:55:28 2013 -0400
    84.2 +++ b/src/share/vm/classfile/verifier.cpp	Wed Apr 24 21:11:02 2013 -0400
    84.3 @@ -63,6 +63,7 @@
    84.4  
    84.5  #define NOFAILOVER_MAJOR_VERSION                       51
    84.6  #define NONZERO_PADDING_BYTES_IN_SWITCH_MAJOR_VERSION  51
    84.7 +#define STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION       52
    84.8  
    84.9  // Access to external entry for VerifyClassCodes - old byte code verifier
   84.10  
   84.11 @@ -2320,6 +2321,11 @@
   84.12        types = (1 << JVM_CONSTANT_InterfaceMethodref) |
   84.13                (1 << JVM_CONSTANT_Methodref);
   84.14        break;
   84.15 +    case Bytecodes::_invokestatic:
   84.16 +      types = (_klass->major_version() < STATIC_METHOD_IN_INTERFACE_MAJOR_VERSION) ?
   84.17 +        (1 << JVM_CONSTANT_Methodref) :
   84.18 +        ((1 << JVM_CONSTANT_InterfaceMethodref) | (1 << JVM_CONSTANT_Methodref));
   84.19 +      break;
   84.20      default:
   84.21        types = 1 << JVM_CONSTANT_Methodref;
   84.22    }
    85.1 --- a/src/share/vm/code/codeBlob.cpp	Wed Apr 24 20:55:28 2013 -0400
    85.2 +++ b/src/share/vm/code/codeBlob.cpp	Wed Apr 24 21:11:02 2013 -0400
    85.3 @@ -348,14 +348,14 @@
    85.4  
    85.5  
    85.6  void* RuntimeStub::operator new(size_t s, unsigned size) {
    85.7 -  void* p = CodeCache::allocate(size);
    85.8 +  void* p = CodeCache::allocate(size, true);
    85.9    if (!p) fatal("Initial size of CodeCache is too small");
   85.10    return p;
   85.11  }
   85.12  
   85.13  // operator new shared by all singletons:
   85.14  void* SingletonBlob::operator new(size_t s, unsigned size) {
   85.15 -  void* p = CodeCache::allocate(size);
   85.16 +  void* p = CodeCache::allocate(size, true);
   85.17    if (!p) fatal("Initial size of CodeCache is too small");
   85.18    return p;
   85.19  }
    86.1 --- a/src/share/vm/code/codeCache.cpp	Wed Apr 24 20:55:28 2013 -0400
    86.2 +++ b/src/share/vm/code/codeCache.cpp	Wed Apr 24 21:11:02 2013 -0400
    86.3 @@ -172,7 +172,7 @@
    86.4  
    86.5  static size_t maxCodeCacheUsed = 0;
    86.6  
    86.7 -CodeBlob* CodeCache::allocate(int size) {
    86.8 +CodeBlob* CodeCache::allocate(int size, bool is_critical) {
    86.9    // Do not seize the CodeCache lock here--if the caller has not
   86.10    // already done so, we are going to lose bigtime, since the code
   86.11    // cache will contain a garbage CodeBlob until the caller can
   86.12 @@ -183,7 +183,7 @@
   86.13    CodeBlob* cb = NULL;
   86.14    _number_of_blobs++;
   86.15    while (true) {
   86.16 -    cb = (CodeBlob*)_heap->allocate(size);
   86.17 +    cb = (CodeBlob*)_heap->allocate(size, is_critical);
   86.18      if (cb != NULL) break;
   86.19      if (!_heap->expand_by(CodeCacheExpansionSize)) {
   86.20        // Expansion failed
   86.21 @@ -192,8 +192,8 @@
   86.22      if (PrintCodeCacheExtension) {
   86.23        ResourceMark rm;
   86.24        tty->print_cr("code cache extended to [" INTPTR_FORMAT ", " INTPTR_FORMAT "] (%d bytes)",
   86.25 -                    (intptr_t)_heap->begin(), (intptr_t)_heap->end(),
   86.26 -                    (address)_heap->end() - (address)_heap->begin());
   86.27 +                    (intptr_t)_heap->low_boundary(), (intptr_t)_heap->high(),
   86.28 +                    (address)_heap->high() - (address)_heap->low_boundary());
   86.29      }
   86.30    }
   86.31    maxCodeCacheUsed = MAX2(maxCodeCacheUsed, ((address)_heap->high_boundary() -
   86.32 @@ -608,13 +608,13 @@
   86.33  
   86.34  address CodeCache::first_address() {
   86.35    assert_locked_or_safepoint(CodeCache_lock);
   86.36 -  return (address)_heap->begin();
   86.37 +  return (address)_heap->low_boundary();
   86.38  }
   86.39  
   86.40  
   86.41  address CodeCache::last_address() {
   86.42    assert_locked_or_safepoint(CodeCache_lock);
   86.43 -  return (address)_heap->end();
   86.44 +  return (address)_heap->high();
   86.45  }
   86.46  
   86.47  
   86.48 @@ -996,10 +996,9 @@
   86.49  void CodeCache::print_summary(outputStream* st, bool detailed) {
   86.50    size_t total = (_heap->high_boundary() - _heap->low_boundary());
   86.51    st->print_cr("CodeCache: size=" SIZE_FORMAT "Kb used=" SIZE_FORMAT
   86.52 -               "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT
   86.53 -               "Kb max_free_chunk=" SIZE_FORMAT "Kb",
   86.54 +               "Kb max_used=" SIZE_FORMAT "Kb free=" SIZE_FORMAT "Kb",
   86.55                 total/K, (total - unallocated_capacity())/K,
   86.56 -               maxCodeCacheUsed/K, unallocated_capacity()/K, largest_free_block()/K);
   86.57 +               maxCodeCacheUsed/K, unallocated_capacity()/K);
   86.58  
   86.59    if (detailed) {
   86.60      st->print_cr(" bounds [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT "]",
   86.61 @@ -1018,19 +1017,8 @@
   86.62  
   86.63  void CodeCache::log_state(outputStream* st) {
   86.64    st->print(" total_blobs='" UINT32_FORMAT "' nmethods='" UINT32_FORMAT "'"
   86.65 -            " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'"
   86.66 -            " largest_free_block='" SIZE_FORMAT "'",
   86.67 +            " adapters='" UINT32_FORMAT "' free_code_cache='" SIZE_FORMAT "'",
   86.68              nof_blobs(), nof_nmethods(), nof_adapters(),
   86.69 -            unallocated_capacity(), largest_free_block());
   86.70 +            unallocated_capacity());
   86.71  }
   86.72  
   86.73 -size_t CodeCache::largest_free_block() {
   86.74 -  // This is called both with and without CodeCache_lock held so
   86.75 -  // handle both cases.
   86.76 -  if (CodeCache_lock->owned_by_self()) {
   86.77 -    return _heap->largest_free_block();
   86.78 -  } else {
   86.79 -    MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   86.80 -    return _heap->largest_free_block();
   86.81 -  }
   86.82 -}
    87.1 --- a/src/share/vm/code/codeCache.hpp	Wed Apr 24 20:55:28 2013 -0400
    87.2 +++ b/src/share/vm/code/codeCache.hpp	Wed Apr 24 21:11:02 2013 -0400
    87.3 @@ -70,7 +70,7 @@
    87.4    static void initialize();
    87.5  
    87.6    // Allocation/administration
    87.7 -  static CodeBlob* allocate(int size);              // allocates a new CodeBlob
    87.8 +  static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
    87.9    static void commit(CodeBlob* cb);                 // called when the allocated CodeBlob has been filled
   87.10    static int alignment_unit();                      // guaranteed alignment of all CodeBlobs
   87.11    static int alignment_offset();                    // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header)
   87.12 @@ -156,19 +156,13 @@
   87.13    static address  low_bound()                    { return (address) _heap->low_boundary(); }
   87.14    static address  high_bound()                   { return (address) _heap->high_boundary(); }
   87.15  
   87.16 -  static bool has_space(int size) {
   87.17 -    // Always leave some room in the CodeCache for I2C/C2I adapters
   87.18 -    return largest_free_block() > (CodeCacheMinimumFreeSpace + size);
   87.19 -  }
   87.20 -
   87.21    // Profiling
   87.22    static address first_address();                // first address used for CodeBlobs
   87.23    static address last_address();                 // last  address used for CodeBlobs
   87.24    static size_t  capacity()                      { return _heap->capacity(); }
   87.25    static size_t  max_capacity()                  { return _heap->max_capacity(); }
   87.26    static size_t  unallocated_capacity()          { return _heap->unallocated_capacity(); }
   87.27 -  static size_t  largest_free_block();
   87.28 -  static bool    needs_flushing()                { return largest_free_block() < CodeCacheFlushingMinimumFreeSpace; }
   87.29 +  static bool    needs_flushing()                { return unallocated_capacity() < CodeCacheFlushingMinimumFreeSpace; }
   87.30  
   87.31    static bool needs_cache_clean()                { return _needs_cache_clean; }
   87.32    static void set_needs_cache_clean(bool v)      { _needs_cache_clean = v;    }
    88.1 --- a/src/share/vm/code/nmethod.cpp	Wed Apr 24 20:55:28 2013 -0400
    88.2 +++ b/src/share/vm/code/nmethod.cpp	Wed Apr 24 21:11:02 2013 -0400
    88.3 @@ -501,18 +501,17 @@
    88.4    {
    88.5      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
    88.6      int native_nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
    88.7 -    if (CodeCache::has_space(native_nmethod_size)) {
    88.8 -      CodeOffsets offsets;
    88.9 -      offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
   88.10 -      offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
   88.11 -      nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
   88.12 -                                             compile_id, &offsets,
   88.13 -                                             code_buffer, frame_size,
   88.14 -                                             basic_lock_owner_sp_offset,
   88.15 -                                             basic_lock_sp_offset, oop_maps);
   88.16 -      NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
   88.17 -      if (PrintAssembly && nm != NULL)
   88.18 -        Disassembler::decode(nm);
   88.19 +    CodeOffsets offsets;
   88.20 +    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
   88.21 +    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
   88.22 +    nm = new (native_nmethod_size) nmethod(method(), native_nmethod_size,
   88.23 +                                            compile_id, &offsets,
   88.24 +                                            code_buffer, frame_size,
   88.25 +                                            basic_lock_owner_sp_offset,
   88.26 +                                            basic_lock_sp_offset, oop_maps);
   88.27 +    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_native_nmethod(nm));
   88.28 +    if (PrintAssembly && nm != NULL) {
   88.29 +      Disassembler::decode(nm);
   88.30      }
   88.31    }
   88.32    // verify nmethod
   88.33 @@ -538,18 +537,17 @@
   88.34    {
   88.35      MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
   88.36      int nmethod_size = allocation_size(code_buffer, sizeof(nmethod));
   88.37 -    if (CodeCache::has_space(nmethod_size)) {
   88.38 -      CodeOffsets offsets;
   88.39 -      offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
   88.40 -      offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
   88.41 -      offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
   88.42 +    CodeOffsets offsets;
   88.43 +    offsets.set_value(CodeOffsets::Verified_Entry, vep_offset);
   88.44 +    offsets.set_value(CodeOffsets::Dtrace_trap, trap_offset);
   88.45 +    offsets.set_value(CodeOffsets::Frame_Complete, frame_complete);
   88.46  
   88.47 -      nm = new (nmethod_size) nmethod(method(), nmethod_size,
   88.48 -                                      &offsets, code_buffer, frame_size);
   88.49 +    nm = new (nmethod_size) nmethod(method(), nmethod_size,
   88.50 +                                    &offsets, code_buffer, frame_size);
   88.51  
   88.52 -      NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
   88.53 -      if (PrintAssembly && nm != NULL)
   88.54 -        Disassembler::decode(nm);
   88.55 +    NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
   88.56 +    if (PrintAssembly && nm != NULL) {
   88.57 +      Disassembler::decode(nm);
   88.58      }
   88.59    }
   88.60    // verify nmethod
   88.61 @@ -591,16 +589,16 @@
   88.62        + round_to(handler_table->size_in_bytes(), oopSize)
   88.63        + round_to(nul_chk_table->size_in_bytes(), oopSize)
   88.64        + round_to(debug_info->data_size()       , oopSize);
   88.65 -    if (CodeCache::has_space(nmethod_size)) {
   88.66 -      nm = new (nmethod_size)
   88.67 -      nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
   88.68 -              orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
   88.69 -              oop_maps,
   88.70 -              handler_table,
   88.71 -              nul_chk_table,
   88.72 -              compiler,
   88.73 -              comp_level);
   88.74 -    }
   88.75 +
   88.76 +    nm = new (nmethod_size)
   88.77 +    nmethod(method(), nmethod_size, compile_id, entry_bci, offsets,
   88.78 +            orig_pc_offset, debug_info, dependencies, code_buffer, frame_size,
   88.79 +            oop_maps,
   88.80 +            handler_table,
   88.81 +            nul_chk_table,
   88.82 +            compiler,
   88.83 +            comp_level);
   88.84 +
   88.85      if (nm != NULL) {
   88.86        // To make dependency checking during class loading fast, record
   88.87        // the nmethod dependencies in the classes it is dependent on.
   88.88 @@ -612,15 +610,18 @@
   88.89        // classes the slow way is too slow.
   88.90        for (Dependencies::DepStream deps(nm); deps.next(); ) {
   88.91          Klass* klass = deps.context_type();
   88.92 -        if (klass == NULL)  continue;  // ignore things like evol_method
   88.93 +        if (klass == NULL) {
   88.94 +          continue;  // ignore things like evol_method
   88.95 +        }
   88.96  
   88.97          // record this nmethod as dependent on this klass
   88.98          InstanceKlass::cast(klass)->add_dependent_nmethod(nm);
   88.99        }
  88.100      }
  88.101      NOT_PRODUCT(if (nm != NULL)  nmethod_stats.note_nmethod(nm));
  88.102 -    if (PrintAssembly && nm != NULL)
  88.103 +    if (PrintAssembly && nm != NULL) {
  88.104        Disassembler::decode(nm);
  88.105 +    }
  88.106    }
  88.107  
  88.108    // verify nmethod
  88.109 @@ -798,13 +799,11 @@
  88.110  }
  88.111  #endif // def HAVE_DTRACE_H
  88.112  
  88.113 -void* nmethod::operator new(size_t size, int nmethod_size) {
  88.114 -  void*  alloc = CodeCache::allocate(nmethod_size);
  88.115 -  guarantee(alloc != NULL, "CodeCache should have enough space");
  88.116 -  return alloc;
  88.117 +void* nmethod::operator new(size_t size, int nmethod_size) throw () {
  88.118 +  // Not critical, may return null if there is too little continuous memory
  88.119 +  return CodeCache::allocate(nmethod_size);
  88.120  }
  88.121  
  88.122 -
  88.123  nmethod::nmethod(
  88.124    Method* method,
  88.125    int nmethod_size,
    89.1 --- a/src/share/vm/compiler/compileBroker.cpp	Wed Apr 24 20:55:28 2013 -0400
    89.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Wed Apr 24 21:11:02 2013 -0400
    89.3 @@ -1206,11 +1206,8 @@
    89.4    assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
    89.5    assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
    89.6    assert(!method->method_holder()->is_not_initialized(), "method holder must be initialized");
    89.7 -
    89.8 -  if (!TieredCompilation) {
    89.9 -    comp_level = CompLevel_highest_tier;
   89.10 -  }
   89.11 -
   89.12 +  // allow any levels for WhiteBox
   89.13 +  assert(WhiteBoxAPI || TieredCompilation || comp_level == CompLevel_highest_tier, "only CompLevel_highest_tier must be used in non-tiered");
   89.14    // return quickly if possible
   89.15  
   89.16    // lock, make sure that the compilation
   89.17 @@ -1584,7 +1581,7 @@
   89.18        // We need this HandleMark to avoid leaking VM handles.
   89.19        HandleMark hm(thread);
   89.20  
   89.21 -      if (CodeCache::largest_free_block() < CodeCacheMinimumFreeSpace) {
   89.22 +      if (CodeCache::unallocated_capacity() < CodeCacheMinimumFreeSpace) {
   89.23          // the code cache is really full
   89.24          handle_full_code_cache();
   89.25        } else if (UseCodeCacheFlushing && CodeCache::needs_flushing()) {
    90.1 --- a/src/share/vm/compiler/compileLog.cpp	Wed Apr 24 20:55:28 2013 -0400
    90.2 +++ b/src/share/vm/compiler/compileLog.cpp	Wed Apr 24 21:11:02 2013 -0400
    90.3 @@ -60,28 +60,6 @@
    90.4  }
    90.5  
    90.6  
    90.7 -// Advance kind up to a null or space, return this tail.
    90.8 -// Make sure kind is null-terminated, not space-terminated.
    90.9 -// Use the buffer if necessary.
   90.10 -static const char* split_attrs(const char* &kind, char* buffer) {
   90.11 -  const char* attrs = strchr(kind, ' ');
   90.12 -  // Tease apart the first word from the rest:
   90.13 -  if (attrs == NULL) {
   90.14 -    return "";  // no attrs, no split
   90.15 -  } else if (kind == buffer) {
   90.16 -    ((char*) attrs)[-1] = 0;
   90.17 -    return attrs;
   90.18 -  } else {
   90.19 -    // park it in the buffer, so we can put a null on the end
   90.20 -    assert(!(kind >= buffer && kind < buffer+100), "not obviously in buffer");
   90.21 -    int klen = attrs - kind;
   90.22 -    strncpy(buffer, kind, klen);
   90.23 -    buffer[klen] = 0;
   90.24 -    kind = buffer;  // return by reference
   90.25 -    return attrs;
   90.26 -  }
   90.27 -}
   90.28 -
   90.29  // see_tag, pop_tag:  Override the default do-nothing methods on xmlStream.
   90.30  // These methods provide a hook for managing the the extra context markup.
   90.31  void CompileLog::see_tag(const char* tag, bool push) {
    91.1 --- a/src/share/vm/compiler/compilerOracle.cpp	Wed Apr 24 20:55:28 2013 -0400
    91.2 +++ b/src/share/vm/compiler/compilerOracle.cpp	Wed Apr 24 21:11:02 2013 -0400
    91.3 @@ -237,13 +237,6 @@
    91.4    "help"
    91.5  };
    91.6  
    91.7 -static const char * command_name(OracleCommand command) {
    91.8 -  if (command < OracleFirstCommand || command >= OracleCommandCount) {
    91.9 -    return "unknown command";
   91.10 -  }
   91.11 -  return command_names[command];
   91.12 -}
   91.13 -
   91.14  class MethodMatcher;
   91.15  static MethodMatcher* lists[OracleCommandCount] = { 0, };
   91.16  
    92.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Apr 24 20:55:28 2013 -0400
    92.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Apr 24 21:11:02 2013 -0400
    92.3 @@ -48,6 +48,7 @@
    92.4  #include "memory/iterator.hpp"
    92.5  #include "memory/referencePolicy.hpp"
    92.6  #include "memory/resourceArea.hpp"
    92.7 +#include "memory/tenuredGeneration.hpp"
    92.8  #include "oops/oop.inline.hpp"
    92.9  #include "prims/jvmtiExport.hpp"
   92.10  #include "runtime/globals_extension.hpp"
   92.11 @@ -916,7 +917,31 @@
   92.12      return;
   92.13    }
   92.14  
   92.15 -  size_t expand_bytes = 0;
   92.16 +  // Compute some numbers about the state of the heap.
   92.17 +  const size_t used_after_gc = used();
   92.18 +  const size_t capacity_after_gc = capacity();
   92.19 +
   92.20 +  CardGeneration::compute_new_size();
   92.21 +
   92.22 +  // Reset again after a possible resizing
   92.23 +  cmsSpace()->reset_after_compaction();
   92.24 +
   92.25 +  assert(used() == used_after_gc && used_after_gc <= capacity(),
   92.26 +         err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
   92.27 +         " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
   92.28 +}
   92.29 +
   92.30 +void ConcurrentMarkSweepGeneration::compute_new_size_free_list() {
   92.31 +  assert_locked_or_safepoint(Heap_lock);
   92.32 +
   92.33 +  // If incremental collection failed, we just want to expand
   92.34 +  // to the limit.
   92.35 +  if (incremental_collection_failed()) {
   92.36 +    clear_incremental_collection_failed();
   92.37 +    grow_to_reserved();
   92.38 +    return;
   92.39 +  }
   92.40 +
   92.41    double free_percentage = ((double) free()) / capacity();
   92.42    double desired_free_percentage = (double) MinHeapFreeRatio / 100;
   92.43    double maximum_free_percentage = (double) MaxHeapFreeRatio / 100;
   92.44 @@ -925,9 +950,7 @@
   92.45    if (free_percentage < desired_free_percentage) {
   92.46      size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
   92.47      assert(desired_capacity >= capacity(), "invalid expansion size");
   92.48 -    expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
   92.49 -  }
   92.50 -  if (expand_bytes > 0) {
   92.51 +    size_t expand_bytes = MAX2(desired_capacity - capacity(), MinHeapDeltaBytes);
   92.52      if (PrintGCDetails && Verbose) {
   92.53        size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
   92.54        gclog_or_tty->print_cr("\nFrom compute_new_size: ");
   92.55 @@ -961,6 +984,14 @@
   92.56        gclog_or_tty->print_cr("  Expanded free fraction %f",
   92.57          ((double) free()) / capacity());
   92.58      }
   92.59 +  } else {
   92.60 +    size_t desired_capacity = (size_t)(used() / ((double) 1 - desired_free_percentage));
   92.61 +    assert(desired_capacity <= capacity(), "invalid expansion size");
   92.62 +    size_t shrink_bytes = capacity() - desired_capacity;
   92.63 +    // Don't shrink unless the delta is greater than the minimum shrink we want
   92.64 +    if (shrink_bytes >= MinHeapDeltaBytes) {
   92.65 +      shrink_free_list_by(shrink_bytes);
   92.66 +    }
   92.67    }
   92.68  }
   92.69  
   92.70 @@ -1872,7 +1903,7 @@
   92.71    assert_locked_or_safepoint(Heap_lock);
   92.72    FreelistLocker z(this);
   92.73    MetaspaceGC::compute_new_size();
   92.74 -  _cmsGen->compute_new_size();
   92.75 +  _cmsGen->compute_new_size_free_list();
   92.76  }
   92.77  
   92.78  // A work method used by foreground collection to determine
   92.79 @@ -2601,6 +2632,10 @@
   92.80  }
   92.81  
   92.82  void ConcurrentMarkSweepGeneration::gc_prologue(bool full) {
   92.83 +
   92.84 +  _capacity_at_prologue = capacity();
   92.85 +  _used_at_prologue = used();
   92.86 +
   92.87    // Delegate to CMScollector which knows how to coordinate between
   92.88    // this and any other CMS generations that it is responsible for
   92.89    // collecting.
   92.90 @@ -2774,6 +2809,23 @@
   92.91    }
   92.92  }
   92.93  
   92.94 +
   92.95 +void
   92.96 +CMSCollector::print_on_error(outputStream* st) {
   92.97 +  CMSCollector* collector = ConcurrentMarkSweepGeneration::_collector;
   92.98 +  if (collector != NULL) {
   92.99 +    CMSBitMap* bitmap = &collector->_markBitMap;
  92.100 +    st->print_cr("Marking Bits: (CMSBitMap*) " PTR_FORMAT, bitmap);
  92.101 +    bitmap->print_on_error(st, " Bits: ");
  92.102 +
  92.103 +    st->cr();
  92.104 +
  92.105 +    CMSBitMap* mut_bitmap = &collector->_modUnionTable;
  92.106 +    st->print_cr("Mod Union Table: (CMSBitMap*) " PTR_FORMAT, mut_bitmap);
  92.107 +    mut_bitmap->print_on_error(st, " Bits: ");
  92.108 +  }
  92.109 +}
  92.110 +
  92.111  ////////////////////////////////////////////////////////
  92.112  // CMS Verification Support
  92.113  ////////////////////////////////////////////////////////
  92.114 @@ -3300,6 +3352,26 @@
  92.115  }
  92.116  
  92.117  
  92.118 +void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
  92.119 +  assert_locked_or_safepoint(ExpandHeap_lock);
  92.120 +  // Shrink committed space
  92.121 +  _virtual_space.shrink_by(bytes);
  92.122 +  // Shrink space; this also shrinks the space's BOT
  92.123 +  _cmsSpace->set_end((HeapWord*) _virtual_space.high());
  92.124 +  size_t new_word_size = heap_word_size(_cmsSpace->capacity());
  92.125 +  // Shrink the shared block offset array
  92.126 +  _bts->resize(new_word_size);
  92.127 +  MemRegion mr(_cmsSpace->bottom(), new_word_size);
  92.128 +  // Shrink the card table
  92.129 +  Universe::heap()->barrier_set()->resize_covered_region(mr);
  92.130 +
  92.131 +  if (Verbose && PrintGC) {
  92.132 +    size_t new_mem_size = _virtual_space.committed_size();
  92.133 +    size_t old_mem_size = new_mem_size + bytes;
  92.134 +    gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
  92.135 +                  name(), old_mem_size/K, new_mem_size/K);
  92.136 +  }
  92.137 +}
  92.138  
  92.139  void ConcurrentMarkSweepGeneration::shrink(size_t bytes) {
  92.140    assert_locked_or_safepoint(Heap_lock);
  92.141 @@ -3351,7 +3423,7 @@
  92.142    return success;
  92.143  }
  92.144  
  92.145 -void ConcurrentMarkSweepGeneration::shrink_by(size_t bytes) {
  92.146 +void ConcurrentMarkSweepGeneration::shrink_free_list_by(size_t bytes) {
  92.147    assert_locked_or_safepoint(Heap_lock);
  92.148    assert_lock_strong(freelistLock());
  92.149    // XXX Fix when compaction is implemented.
  92.150 @@ -6476,6 +6548,10 @@
  92.151    }
  92.152  }
  92.153  
  92.154 +void CMSBitMap::print_on_error(outputStream* st, const char* prefix) const {
  92.155 +  _bm.print_on_error(st, prefix);
  92.156 +}
  92.157 +
  92.158  #ifndef PRODUCT
  92.159  void CMSBitMap::assert_locked() const {
  92.160    CMSLockVerifier::assert_locked(lock());
  92.161 @@ -6845,7 +6921,7 @@
  92.162            size = CompactibleFreeListSpace::adjustObjectSize(
  92.163                     p->oop_iterate(_scanningClosure));
  92.164          }
  92.165 -        #ifdef DEBUG
  92.166 +        #ifdef ASSERT
  92.167            size_t direct_size =
  92.168              CompactibleFreeListSpace::adjustObjectSize(p->size());
  92.169            assert(size == direct_size, "Inconsistency in size");
  92.170 @@ -6857,7 +6933,7 @@
  92.171              assert(_bitMap->isMarked(addr+size-1),
  92.172                     "inconsistent Printezis mark");
  92.173            }
  92.174 -        #endif // DEBUG
  92.175 +        #endif // ASSERT
  92.176      } else {
  92.177        // an unitialized object
  92.178        assert(_bitMap->isMarked(addr+1), "missing Printezis mark?");
  92.179 @@ -6999,14 +7075,14 @@
  92.180    HeapWord* addr = (HeapWord*)p;
  92.181    assert(_span.contains(addr), "we are scanning the CMS generation");
  92.182    bool is_obj_array = false;
  92.183 -  #ifdef DEBUG
  92.184 +  #ifdef ASSERT
  92.185      if (!_parallel) {
  92.186        assert(_mark_stack->isEmpty(), "pre-condition (eager drainage)");
  92.187        assert(_collector->overflow_list_is_empty(),
  92.188               "overflow list should be empty");
  92.189  
  92.190      }
  92.191 -  #endif // DEBUG
  92.192 +  #endif // ASSERT
  92.193    if (_bit_map->isMarked(addr)) {
  92.194      // Obj arrays are precisely marked, non-arrays are not;
  92.195      // so we scan objArrays precisely and non-arrays in their
  92.196 @@ -7026,14 +7102,14 @@
  92.197        }
  92.198      }
  92.199    }
  92.200 -  #ifdef DEBUG
  92.201 +  #ifdef ASSERT
  92.202      if (!_parallel) {
  92.203        assert(_mark_stack->isEmpty(), "post-condition (eager drainage)");
  92.204        assert(_collector->overflow_list_is_empty(),
  92.205               "overflow list should be empty");
  92.206  
  92.207      }
  92.208 -  #endif // DEBUG
  92.209 +  #endif // ASSERT
  92.210    return is_obj_array;
  92.211  }
  92.212  
  92.213 @@ -8244,7 +8320,7 @@
  92.214      assert(size == CompactibleFreeListSpace::adjustObjectSize(size),
  92.215             "alignment problem");
  92.216  
  92.217 -#ifdef DEBUG
  92.218 +#ifdef ASSERT
  92.219        if (oop(addr)->klass_or_null() != NULL) {
  92.220          // Ignore mark word because we are running concurrent with mutators
  92.221          assert(oop(addr)->is_oop(true), "live block should be an oop");
  92.222 @@ -9074,51 +9150,6 @@
  92.223    }
  92.224  }
  92.225  
  92.226 -// The desired expansion delta is computed so that:
  92.227 -// . desired free percentage or greater is used
  92.228 -void ASConcurrentMarkSweepGeneration::compute_new_size() {
  92.229 -  assert_locked_or_safepoint(Heap_lock);
  92.230 -
  92.231 -  GenCollectedHeap* gch = (GenCollectedHeap*) GenCollectedHeap::heap();
  92.232 -
  92.233 -  // If incremental collection failed, we just want to expand
  92.234 -  // to the limit.
  92.235 -  if (incremental_collection_failed()) {
  92.236 -    clear_incremental_collection_failed();
  92.237 -    grow_to_reserved();
  92.238 -    return;
  92.239 -  }
  92.240 -
  92.241 -  assert(UseAdaptiveSizePolicy, "Should be using adaptive sizing");
  92.242 -
  92.243 -  assert(gch->kind() == CollectedHeap::GenCollectedHeap,
  92.244 -    "Wrong type of heap");
  92.245 -  int prev_level = level() - 1;
  92.246 -  assert(prev_level >= 0, "The cms generation is the lowest generation");
  92.247 -  Generation* prev_gen = gch->get_gen(prev_level);
  92.248 -  assert(prev_gen->kind() == Generation::ASParNew,
  92.249 -    "Wrong type of young generation");
  92.250 -  ParNewGeneration* younger_gen = (ParNewGeneration*) prev_gen;
  92.251 -  size_t cur_eden = younger_gen->eden()->capacity();
  92.252 -  CMSAdaptiveSizePolicy* size_policy = cms_size_policy();
  92.253 -  size_t cur_promo = free();
  92.254 -  size_policy->compute_tenured_generation_free_space(cur_promo,
  92.255 -                                                       max_available(),
  92.256 -                                                       cur_eden);
  92.257 -  resize(cur_promo, size_policy->promo_size());
  92.258 -
  92.259 -  // Record the new size of the space in the cms generation
  92.260 -  // that is available for promotions.  This is temporary.
  92.261 -  // It should be the desired promo size.
  92.262 -  size_policy->avg_cms_promo()->sample(free());
  92.263 -  size_policy->avg_old_live()->sample(used());
  92.264 -
  92.265 -  if (UsePerfData) {
  92.266 -    CMSGCAdaptivePolicyCounters* counters = gc_adaptive_policy_counters();
  92.267 -    counters->update_cms_capacity_counter(capacity());
  92.268 -  }
  92.269 -}
  92.270 -
  92.271  void ASConcurrentMarkSweepGeneration::shrink_by(size_t desired_bytes) {
  92.272    assert_locked_or_safepoint(Heap_lock);
  92.273    assert_lock_strong(freelistLock());
    93.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Apr 24 20:55:28 2013 -0400
    93.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Apr 24 21:11:02 2013 -0400
    93.3 @@ -60,6 +60,7 @@
    93.4  class FreeChunk;
    93.5  class PromotionInfo;
    93.6  class ScanMarkedObjectsAgainCarefullyClosure;
    93.7 +class TenuredGeneration;
    93.8  
    93.9  // A generic CMS bit map. It's the basis for both the CMS marking bit map
   93.10  // as well as for the mod union table (in each case only a subset of the
   93.11 @@ -150,6 +151,8 @@
   93.12    size_t    heapWordToOffset(HeapWord* addr) const;
   93.13    size_t    heapWordDiffToOffsetDiff(size_t diff) const;
   93.14  
   93.15 +  void print_on_error(outputStream* st, const char* prefix) const;
   93.16 +
   93.17    // debugging
   93.18    // is this address range covered by the bit-map?
   93.19    NOT_PRODUCT(
   93.20 @@ -810,9 +813,6 @@
   93.21    // used regions of each generation to limit the extent of sweep
   93.22    void save_sweep_limits();
   93.23  
   93.24 -  // Resize the generations included in the collector.
   93.25 -  void compute_new_size();
   93.26 -
   93.27    // A work method used by foreground collection to determine
   93.28    // what type of collection (compacting or not, continuing or fresh)
   93.29    // it should do.
   93.30 @@ -909,6 +909,9 @@
   93.31    void releaseFreelistLocks() const;
   93.32    bool haveFreelistLocks() const;
   93.33  
   93.34 +  // Adjust size of underlying generation
   93.35 +  void compute_new_size();
   93.36 +
   93.37    // GC prologue and epilogue
   93.38    void gc_prologue(bool full);
   93.39    void gc_epilogue(bool full);
   93.40 @@ -983,6 +986,8 @@
   93.41    CMSAdaptiveSizePolicy* size_policy();
   93.42    CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
   93.43  
   93.44 +  static void print_on_error(outputStream* st);
   93.45 +
   93.46    // debugging
   93.47    void verify();
   93.48    bool verify_after_remark();
   93.49 @@ -1082,7 +1087,7 @@
   93.50  
   93.51   protected:
   93.52    // Shrink generation by specified size (returns false if unable to shrink)
   93.53 -  virtual void shrink_by(size_t bytes);
   93.54 +  void shrink_free_list_by(size_t bytes);
   93.55  
   93.56    // Update statistics for GC
   93.57    virtual void update_gc_stats(int level, bool full);
   93.58 @@ -1233,6 +1238,7 @@
   93.59      CMSExpansionCause::Cause cause);
   93.60    virtual bool expand(size_t bytes, size_t expand_bytes);
   93.61    void shrink(size_t bytes);
   93.62 +  void shrink_by(size_t bytes);
   93.63    HeapWord* expand_and_par_lab_allocate(CMSParGCThreadState* ps, size_t word_sz);
   93.64    bool expand_and_ensure_spooling_space(PromotionInfo* promo);
   93.65  
   93.66 @@ -1293,7 +1299,13 @@
   93.67    bool must_be_youngest() const { return false; }
   93.68    bool must_be_oldest()   const { return true; }
   93.69  
   93.70 -  void compute_new_size();
   93.71 +  // Resize the generation after a compacting GC.  The
   93.72 +  // generation can be treated as a contiguous space
   93.73 +  // after the compaction.
   93.74 +  virtual void compute_new_size();
   93.75 +  // Resize the generation after a non-compacting
   93.76 +  // collection.
   93.77 +  void compute_new_size_free_list();
   93.78  
   93.79    CollectionTypes debug_collection_type() { return _debug_collection_type; }
   93.80    void rotate_debug_collection_type();
   93.81 @@ -1315,7 +1327,6 @@
   93.82    virtual void shrink_by(size_t bytes);
   93.83  
   93.84   public:
   93.85 -  virtual void compute_new_size();
   93.86    ASConcurrentMarkSweepGeneration(ReservedSpace rs, size_t initial_byte_size,
   93.87                                    int level, CardTableRS* ct,
   93.88                                    bool use_adaptive_freelists,
    94.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Apr 24 20:55:28 2013 -0400
    94.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed Apr 24 21:11:02 2013 -0400
    94.3 @@ -101,6 +101,10 @@
    94.4  }
    94.5  #endif
    94.6  
    94.7 +void CMBitMapRO::print_on_error(outputStream* st, const char* prefix) const {
    94.8 +  _bm.print_on_error(st, prefix);
    94.9 +}
   94.10 +
   94.11  bool CMBitMap::allocate(ReservedSpace heap_rs) {
   94.12    _bmStartWord = (HeapWord*)(heap_rs.base());
   94.13    _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
   94.14 @@ -3277,6 +3281,13 @@
   94.15    }
   94.16  }
   94.17  
   94.18 +void ConcurrentMark::print_on_error(outputStream* st) const {
   94.19 +  st->print_cr("Marking Bits (Prev, Next): (CMBitMap*) " PTR_FORMAT ", (CMBitMap*) " PTR_FORMAT,
   94.20 +      _prevMarkBitMap, _nextMarkBitMap);
   94.21 +  _prevMarkBitMap->print_on_error(st, " Prev Bits: ");
   94.22 +  _nextMarkBitMap->print_on_error(st, " Next Bits: ");
   94.23 +}
   94.24 +
   94.25  // We take a break if someone is trying to stop the world.
   94.26  bool ConcurrentMark::do_yield_check(uint worker_id) {
   94.27    if (should_yield()) {
    95.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Apr 24 20:55:28 2013 -0400
    95.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed Apr 24 21:11:02 2013 -0400
    95.3 @@ -113,6 +113,8 @@
    95.4      return res;
    95.5    }
    95.6  
    95.7 +  void print_on_error(outputStream* st, const char* prefix) const;
    95.8 +
    95.9    // debugging
   95.10    NOT_PRODUCT(bool covers(ReservedSpace rs) const;)
   95.11  };
   95.12 @@ -829,6 +831,8 @@
   95.13  
   95.14    void print_worker_threads_on(outputStream* st) const;
   95.15  
   95.16 +  void print_on_error(outputStream* st) const;
   95.17 +
   95.18    // The following indicate whether a given verbose level has been
   95.19    // set. Notice that anything above stats is conditional to
   95.20    // _MARKING_VERBOSE_ having been set to 1
    96.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Apr 24 20:55:28 2013 -0400
    96.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed Apr 24 21:11:02 2013 -0400
    96.3 @@ -1,5 +1,5 @@
    96.4  /*
    96.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    96.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    96.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    96.8   *
    96.9   * This code is free software; you can redistribute it and/or modify it
   96.10 @@ -1322,233 +1322,239 @@
   96.11      gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
   96.12      TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
   96.13  
   96.14 -    TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
   96.15 -    TraceCollectorStats tcs(g1mm()->full_collection_counters());
   96.16 -    TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
   96.17 -
   96.18 -    double start = os::elapsedTime();
   96.19 -    g1_policy()->record_full_collection_start();
   96.20 -
   96.21 -    // Note: When we have a more flexible GC logging framework that
   96.22 -    // allows us to add optional attributes to a GC log record we
   96.23 -    // could consider timing and reporting how long we wait in the
   96.24 -    // following two methods.
   96.25 -    wait_while_free_regions_coming();
   96.26 -    // If we start the compaction before the CM threads finish
   96.27 -    // scanning the root regions we might trip them over as we'll
   96.28 -    // be moving objects / updating references. So let's wait until
   96.29 -    // they are done. By telling them to abort, they should complete
   96.30 -    // early.
   96.31 -    _cm->root_regions()->abort();
   96.32 -    _cm->root_regions()->wait_until_scan_finished();
   96.33 -    append_secondary_free_list_if_not_empty_with_lock();
   96.34 -
   96.35 -    gc_prologue(true);
   96.36 -    increment_total_collections(true /* full gc */);
   96.37 -    increment_old_marking_cycles_started();
   96.38 -
   96.39 -    size_t g1h_prev_used = used();
   96.40 -    assert(used() == recalculate_used(), "Should be equal");
   96.41 -
   96.42 -    verify_before_gc();
   96.43 -
   96.44 -    pre_full_gc_dump();
   96.45 -
   96.46 -    COMPILER2_PRESENT(DerivedPointerTable::clear());
   96.47 -
   96.48 -    // Disable discovery and empty the discovered lists
   96.49 -    // for the CM ref processor.
   96.50 -    ref_processor_cm()->disable_discovery();
   96.51 -    ref_processor_cm()->abandon_partial_discovery();
   96.52 -    ref_processor_cm()->verify_no_references_recorded();
   96.53 -
   96.54 -    // Abandon current iterations of concurrent marking and concurrent
   96.55 -    // refinement, if any are in progress. We have to do this before
   96.56 -    // wait_until_scan_finished() below.
   96.57 -    concurrent_mark()->abort();
   96.58 -
   96.59 -    // Make sure we'll choose a new allocation region afterwards.
   96.60 -    release_mutator_alloc_region();
   96.61 -    abandon_gc_alloc_regions();
   96.62 -    g1_rem_set()->cleanupHRRS();
   96.63 -
   96.64 -    // We should call this after we retire any currently active alloc
   96.65 -    // regions so that all the ALLOC / RETIRE events are generated
   96.66 -    // before the start GC event.
   96.67 -    _hr_printer.start_gc(true /* full */, (size_t) total_collections());
   96.68 -
   96.69 -    // We may have added regions to the current incremental collection
   96.70 -    // set between the last GC or pause and now. We need to clear the
   96.71 -    // incremental collection set and then start rebuilding it afresh
   96.72 -    // after this full GC.
   96.73 -    abandon_collection_set(g1_policy()->inc_cset_head());
   96.74 -    g1_policy()->clear_incremental_cset();
   96.75 -    g1_policy()->stop_incremental_cset_building();
   96.76 -
   96.77 -    tear_down_region_sets(false /* free_list_only */);
   96.78 -    g1_policy()->set_gcs_are_young(true);
   96.79 -
   96.80 -    // See the comments in g1CollectedHeap.hpp and
   96.81 -    // G1CollectedHeap::ref_processing_init() about
   96.82 -    // how reference processing currently works in G1.
   96.83 -
   96.84 -    // Temporarily make discovery by the STW ref processor single threaded (non-MT).
   96.85 -    ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
   96.86 -
   96.87 -    // Temporarily clear the STW ref processor's _is_alive_non_header field.
   96.88 -    ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
   96.89 -
   96.90 -    ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
   96.91 -    ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
   96.92 -
   96.93 -    // Do collection work
   96.94      {
   96.95 -      HandleMark hm;  // Discard invalid handles created during gc
   96.96 -      G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
   96.97 +      TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
   96.98 +      TraceCollectorStats tcs(g1mm()->full_collection_counters());
   96.99 +      TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
  96.100 +
  96.101 +      double start = os::elapsedTime();
  96.102 +      g1_policy()->record_full_collection_start();
  96.103 +
  96.104 +      // Note: When we have a more flexible GC logging framework that
  96.105 +      // allows us to add optional attributes to a GC log record we
  96.106 +      // could consider timing and reporting how long we wait in the
  96.107 +      // following two methods.
  96.108 +      wait_while_free_regions_coming();
  96.109 +      // If we start the compaction before the CM threads finish
  96.110 +      // scanning the root regions we might trip them over as we'll
  96.111 +      // be moving objects / updating references. So let's wait until
  96.112 +      // they are done. By telling them to abort, they should complete
  96.113 +      // early.
  96.114 +      _cm->root_regions()->abort();
  96.115 +      _cm->root_regions()->wait_until_scan_finished();
  96.116 +      append_secondary_free_list_if_not_empty_with_lock();
  96.117 +
  96.118 +      gc_prologue(true);
  96.119 +      increment_total_collections(true /* full gc */);
  96.120 +      increment_old_marking_cycles_started();
  96.121 +
  96.122 +      assert(used() == recalculate_used(), "Should be equal");
  96.123 +
  96.124 +      verify_before_gc();
  96.125 +
  96.126 +      pre_full_gc_dump();
  96.127 +
  96.128 +      COMPILER2_PRESENT(DerivedPointerTable::clear());
  96.129 +
  96.130 +      // Disable discovery and empty the discovered lists
  96.131 +      // for the CM ref processor.
  96.132 +      ref_processor_cm()->disable_discovery();
  96.133 +      ref_processor_cm()->abandon_partial_discovery();
  96.134 +      ref_processor_cm()->verify_no_references_recorded();
  96.135 +
  96.136 +      // Abandon current iterations of concurrent marking and concurrent
  96.137 +      // refinement, if any are in progress. We have to do this before
  96.138 +      // wait_until_scan_finished() below.
  96.139 +      concurrent_mark()->abort();
  96.140 +
  96.141 +      // Make sure we'll choose a new allocation region afterwards.
  96.142 +      release_mutator_alloc_region();
  96.143 +      abandon_gc_alloc_regions();
  96.144 +      g1_rem_set()->cleanupHRRS();
  96.145 +
  96.146 +      // We should call this after we retire any currently active alloc
  96.147 +      // regions so that all the ALLOC / RETIRE events are generated
  96.148 +      // before the start GC event.
  96.149 +      _hr_printer.start_gc(true /* full */, (size_t) total_collections());
  96.150 +
  96.151 +      // We may have added regions to the current incremental collection
  96.152 +      // set between the last GC or pause and now. We need to clear the
  96.153 +      // incremental collection set and then start rebuilding it afresh
  96.154 +      // after this full GC.
  96.155 +      abandon_collection_set(g1_policy()->inc_cset_head());
  96.156 +      g1_policy()->clear_incremental_cset();
  96.157 +      g1_policy()->stop_incremental_cset_building();
  96.158 +
  96.159 +      tear_down_region_sets(false /* free_list_only */);
  96.160 +      g1_policy()->set_gcs_are_young(true);
  96.161 +
  96.162 +      // See the comments in g1CollectedHeap.hpp and
  96.163 +      // G1CollectedHeap::ref_processing_init() about
  96.164 +      // how reference processing currently works in G1.
  96.165 +
  96.166 +      // Temporarily make discovery by the STW ref processor single threaded (non-MT).
  96.167 +      ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
  96.168 +
  96.169 +      // Temporarily clear the STW ref processor's _is_alive_non_header field.
  96.170 +      ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
  96.171 +
  96.172 +      ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
  96.173 +      ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
  96.174 +
  96.175 +      // Do collection work
  96.176 +      {
  96.177 +        HandleMark hm;  // Discard invalid handles created during gc
  96.178 +        G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
  96.179 +      }
  96.180 +
  96.181 +      assert(free_regions() == 0, "we should not have added any free regions");
  96.182 +      rebuild_region_sets(false /* free_list_only */);
  96.183 +
  96.184 +      // Enqueue any discovered reference objects that have
  96.185 +      // not been removed from the discovered lists.
  96.186 +      ref_processor_stw()->enqueue_discovered_references();
  96.187 +
  96.188 +      COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  96.189 +
  96.190 +      MemoryService::track_memory_usage();
  96.191 +
  96.192 +      verify_after_gc();
  96.193 +
  96.194 +      assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  96.195 +      ref_processor_stw()->verify_no_references_recorded();
  96.196 +
  96.197 +      // Delete metaspaces for unloaded class loaders and clean up loader_data graph
  96.198 +      ClassLoaderDataGraph::purge();
  96.199 +
  96.200 +      // Note: since we've just done a full GC, concurrent
  96.201 +      // marking is no longer active. Therefore we need not
  96.202 +      // re-enable reference discovery for the CM ref processor.
  96.203 +      // That will be done at the start of the next marking cycle.
  96.204 +      assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
  96.205 +      ref_processor_cm()->verify_no_references_recorded();
  96.206 +
  96.207 +      reset_gc_time_stamp();
  96.208 +      // Since everything potentially moved, we will clear all remembered
  96.209 +      // sets, and clear all cards.  Later we will rebuild remebered
  96.210 +      // sets. We will also reset the GC time stamps of the regions.
  96.211 +      clear_rsets_post_compaction();
  96.212 +      check_gc_time_stamps();
  96.213 +
  96.214 +      // Resize the heap if necessary.
  96.215 +      resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  96.216 +
  96.217 +      if (_hr_printer.is_active()) {
  96.218 +        // We should do this after we potentially resize the heap so
  96.219 +        // that all the COMMIT / UNCOMMIT events are generated before
  96.220 +        // the end GC event.
  96.221 +
  96.222 +        print_hrs_post_compaction();
  96.223 +        _hr_printer.end_gc(true /* full */, (size_t) total_collections());
  96.224 +      }
  96.225 +
  96.226 +      if (_cg1r->use_cache()) {
  96.227 +        _cg1r->clear_and_record_card_counts();
  96.228 +        _cg1r->clear_hot_cache();
  96.229 +      }
  96.230 +
  96.231 +      // Rebuild remembered sets of all regions.
  96.232 +      if (G1CollectedHeap::use_parallel_gc_threads()) {
  96.233 +        uint n_workers =
  96.234 +          AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  96.235 +                                                  workers()->active_workers(),
  96.236 +                                                  Threads::number_of_non_daemon_threads());
  96.237 +        assert(UseDynamicNumberOfGCThreads ||
  96.238 +               n_workers == workers()->total_workers(),
  96.239 +               "If not dynamic should be using all the  workers");
  96.240 +        workers()->set_active_workers(n_workers);
  96.241 +        // Set parallel threads in the heap (_n_par_threads) only
  96.242 +        // before a parallel phase and always reset it to 0 after
  96.243 +        // the phase so that the number of parallel threads does
  96.244 +        // no get carried forward to a serial phase where there
  96.245 +        // may be code that is "possibly_parallel".
  96.246 +        set_par_threads(n_workers);
  96.247 +
  96.248 +        ParRebuildRSTask rebuild_rs_task(this);
  96.249 +        assert(check_heap_region_claim_values(
  96.250 +               HeapRegion::InitialClaimValue), "sanity check");
  96.251 +        assert(UseDynamicNumberOfGCThreads ||
  96.252 +               workers()->active_workers() == workers()->total_workers(),
  96.253 +               "Unless dynamic should use total workers");
  96.254 +        // Use the most recent number of  active workers
  96.255 +        assert(workers()->active_workers() > 0,
  96.256 +               "Active workers not properly set");
  96.257 +        set_par_threads(workers()->active_workers());
  96.258 +        workers()->run_task(&rebuild_rs_task);
  96.259 +        set_par_threads(0);
  96.260 +        assert(check_heap_region_claim_values(
  96.261 +               HeapRegion::RebuildRSClaimValue), "sanity check");
  96.262 +        reset_heap_region_claim_values();
  96.263 +      } else {
  96.264 +        RebuildRSOutOfRegionClosure rebuild_rs(this);
  96.265 +        heap_region_iterate(&rebuild_rs);
  96.266 +      }
  96.267 +
  96.268 +      if (true) { // FIXME
  96.269 +        MetaspaceGC::compute_new_size();
  96.270 +      }
  96.271 +
  96.272 +#ifdef TRACESPINNING
  96.273 +      ParallelTaskTerminator::print_termination_counts();
  96.274 +#endif
  96.275 +
  96.276 +      // Discard all rset updates
  96.277 +      JavaThread::dirty_card_queue_set().abandon_logs();
  96.278 +      assert(!G1DeferredRSUpdate
  96.279 +             || (G1DeferredRSUpdate &&
  96.280 +                (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  96.281 +
  96.282 +      _young_list->reset_sampled_info();
  96.283 +      // At this point there should be no regions in the
  96.284 +      // entire heap tagged as young.
  96.285 +      assert(check_young_list_empty(true /* check_heap */),
  96.286 +             "young list should be empty at this point");
  96.287 +
  96.288 +      // Update the number of full collections that have been completed.
  96.289 +      increment_old_marking_cycles_completed(false /* concurrent */);
  96.290 +
  96.291 +      _hrs.verify_optional();
  96.292 +      verify_region_sets_optional();
  96.293 +
  96.294 +      // Start a new incremental collection set for the next pause
  96.295 +      assert(g1_policy()->collection_set() == NULL, "must be");
  96.296 +      g1_policy()->start_incremental_cset_building();
  96.297 +
  96.298 +      // Clear the _cset_fast_test bitmap in anticipation of adding
  96.299 +      // regions to the incremental collection set for the next
  96.300 +      // evacuation pause.
  96.301 +      clear_cset_fast_test();
  96.302 +
  96.303 +      init_mutator_alloc_region();
  96.304 +
  96.305 +      double end = os::elapsedTime();
  96.306 +      g1_policy()->record_full_collection_end();
  96.307 +
  96.308 +      if (G1Log::fine()) {
  96.309 +        g1_policy()->print_heap_transition();
  96.310 +      }
  96.311 +
  96.312 +      // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  96.313 +      // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  96.314 +      // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  96.315 +      // before any GC notifications are raised.
  96.316 +      g1mm()->update_sizes();
  96.317 +
  96.318 +      gc_epilogue(true);
  96.319      }
  96.320  
  96.321 -    assert(free_regions() == 0, "we should not have added any free regions");
  96.322 -    rebuild_region_sets(false /* free_list_only */);
  96.323 -
  96.324 -    // Enqueue any discovered reference objects that have
  96.325 -    // not been removed from the discovered lists.
  96.326 -    ref_processor_stw()->enqueue_discovered_references();
  96.327 -
  96.328 -    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
  96.329 -
  96.330 -    MemoryService::track_memory_usage();
  96.331 -
  96.332 -    verify_after_gc();
  96.333 -
  96.334 -    assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
  96.335 -    ref_processor_stw()->verify_no_references_recorded();
  96.336 -
  96.337 -    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
  96.338 -    ClassLoaderDataGraph::purge();
  96.339 -
  96.340 -    // Note: since we've just done a full GC, concurrent
  96.341 -    // marking is no longer active. Therefore we need not
  96.342 -    // re-enable reference discovery for the CM ref processor.
  96.343 -    // That will be done at the start of the next marking cycle.
  96.344 -    assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
  96.345 -    ref_processor_cm()->verify_no_references_recorded();
  96.346 -
  96.347 -    reset_gc_time_stamp();
  96.348 -    // Since everything potentially moved, we will clear all remembered
  96.349 -    // sets, and clear all cards.  Later we will rebuild remebered
  96.350 -    // sets. We will also reset the GC time stamps of the regions.
  96.351 -    clear_rsets_post_compaction();
  96.352 -    check_gc_time_stamps();
  96.353 -
  96.354 -    // Resize the heap if necessary.
  96.355 -    resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
  96.356 -
  96.357 -    if (_hr_printer.is_active()) {
  96.358 -      // We should do this after we potentially resize the heap so
  96.359 -      // that all the COMMIT / UNCOMMIT events are generated before
  96.360 -      // the end GC event.
  96.361 -
  96.362 -      print_hrs_post_compaction();
  96.363 -      _hr_printer.end_gc(true /* full */, (size_t) total_collections());
  96.364 +    if (G1Log::finer()) {
  96.365 +      g1_policy()->print_detailed_heap_transition();
  96.366      }
  96.367  
  96.368 -    if (_cg1r->use_cache()) {
  96.369 -      _cg1r->clear_and_record_card_counts();
  96.370 -      _cg1r->clear_hot_cache();
  96.371 -    }
  96.372 -
  96.373 -    // Rebuild remembered sets of all regions.
  96.374 -    if (G1CollectedHeap::use_parallel_gc_threads()) {
  96.375 -      uint n_workers =
  96.376 -        AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
  96.377 -                                       workers()->active_workers(),
  96.378 -                                       Threads::number_of_non_daemon_threads());
  96.379 -      assert(UseDynamicNumberOfGCThreads ||
  96.380 -             n_workers == workers()->total_workers(),
  96.381 -             "If not dynamic should be using all the  workers");
  96.382 -      workers()->set_active_workers(n_workers);
  96.383 -      // Set parallel threads in the heap (_n_par_threads) only
  96.384 -      // before a parallel phase and always reset it to 0 after
  96.385 -      // the phase so that the number of parallel threads does
  96.386 -      // no get carried forward to a serial phase where there
  96.387 -      // may be code that is "possibly_parallel".
  96.388 -      set_par_threads(n_workers);
  96.389 -
  96.390 -      ParRebuildRSTask rebuild_rs_task(this);
  96.391 -      assert(check_heap_region_claim_values(
  96.392 -             HeapRegion::InitialClaimValue), "sanity check");
  96.393 -      assert(UseDynamicNumberOfGCThreads ||
  96.394 -             workers()->active_workers() == workers()->total_workers(),
  96.395 -        "Unless dynamic should use total workers");
  96.396 -      // Use the most recent number of  active workers
  96.397 -      assert(workers()->active_workers() > 0,
  96.398 -        "Active workers not properly set");
  96.399 -      set_par_threads(workers()->active_workers());
  96.400 -      workers()->run_task(&rebuild_rs_task);
  96.401 -      set_par_threads(0);
  96.402 -      assert(check_heap_region_claim_values(
  96.403 -             HeapRegion::RebuildRSClaimValue), "sanity check");
  96.404 -      reset_heap_region_claim_values();
  96.405 -    } else {
  96.406 -      RebuildRSOutOfRegionClosure rebuild_rs(this);
  96.407 -      heap_region_iterate(&rebuild_rs);
  96.408 -    }
  96.409 -
  96.410 -    if (G1Log::fine()) {
  96.411 -      print_size_transition(gclog_or_tty, g1h_prev_used, used(), capacity());
  96.412 -    }
  96.413 -
  96.414 -    if (true) { // FIXME
  96.415 -      MetaspaceGC::compute_new_size();
  96.416 -    }
  96.417 -
  96.418 -    // Start a new incremental collection set for the next pause
  96.419 -    assert(g1_policy()->collection_set() == NULL, "must be");
  96.420 -    g1_policy()->start_incremental_cset_building();
  96.421 -
  96.422 -    // Clear the _cset_fast_test bitmap in anticipation of adding
  96.423 -    // regions to the incremental collection set for the next
  96.424 -    // evacuation pause.
  96.425 -    clear_cset_fast_test();
  96.426 -
  96.427 -    init_mutator_alloc_region();
  96.428 -
  96.429 -    double end = os::elapsedTime();
  96.430 -    g1_policy()->record_full_collection_end();
  96.431 -
  96.432 -#ifdef TRACESPINNING
  96.433 -    ParallelTaskTerminator::print_termination_counts();
  96.434 -#endif
  96.435 -
  96.436 -    gc_epilogue(true);
  96.437 -
  96.438 -    // Discard all rset updates
  96.439 -    JavaThread::dirty_card_queue_set().abandon_logs();
  96.440 -    assert(!G1DeferredRSUpdate
  96.441 -           || (G1DeferredRSUpdate && (dirty_card_queue_set().completed_buffers_num() == 0)), "Should not be any");
  96.442 -
  96.443 -    _young_list->reset_sampled_info();
  96.444 -    // At this point there should be no regions in the
  96.445 -    // entire heap tagged as young.
  96.446 -    assert( check_young_list_empty(true /* check_heap */),
  96.447 -      "young list should be empty at this point");
  96.448 -
  96.449 -    // Update the number of full collections that have been completed.
  96.450 -    increment_old_marking_cycles_completed(false /* concurrent */);
  96.451 -
  96.452 -    _hrs.verify_optional();
  96.453 -    verify_region_sets_optional();
  96.454 -
  96.455      print_heap_after_gc();
  96.456  
  96.457 -    // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  96.458 -    // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  96.459 -    // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  96.460 -    // before any GC notifications are raised.
  96.461 -    g1mm()->update_sizes();
  96.462 -  }
  96.463 -
  96.464 -  post_full_gc_dump();
  96.465 +    post_full_gc_dump();
  96.466 +  }
  96.467  
  96.468    return true;
  96.469  }
  96.470 @@ -3427,6 +3433,15 @@
  96.471    heap_region_iterate(&blk);
  96.472  }
  96.473  
  96.474 +void G1CollectedHeap::print_on_error(outputStream* st) const {
  96.475 +  this->CollectedHeap::print_on_error(st);
  96.476 +
  96.477 +  if (_cm != NULL) {
  96.478 +    st->cr();
  96.479 +    _cm->print_on_error(st);
  96.480 +  }
  96.481 +}
  96.482 +
  96.483  void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
  96.484    if (G1CollectedHeap::use_parallel_gc_threads()) {
  96.485      workers()->print_worker_threads_on(st);
  96.486 @@ -3829,7 +3844,6 @@
  96.487          // The elapsed time induced by the start time below deliberately elides
  96.488          // the possible verification above.
  96.489          double sample_start_time_sec = os::elapsedTime();
  96.490 -        size_t start_used_bytes = used();
  96.491  
  96.492  #if YOUNG_LIST_VERBOSE
  96.493          gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
  96.494 @@ -3837,8 +3851,7 @@
  96.495          g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  96.496  #endif // YOUNG_LIST_VERBOSE
  96.497  
  96.498 -        g1_policy()->record_collection_pause_start(sample_start_time_sec,
  96.499 -                                                   start_used_bytes);
  96.500 +        g1_policy()->record_collection_pause_start(sample_start_time_sec);
  96.501  
  96.502          double scan_wait_start = os::elapsedTime();
  96.503          // We have to wait until the CM threads finish scanning the
    97.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Apr 24 20:55:28 2013 -0400
    97.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed Apr 24 21:11:02 2013 -0400
    97.3 @@ -1575,6 +1575,7 @@
    97.4    virtual void verify(bool silent);
    97.5    virtual void print_on(outputStream* st) const;
    97.6    virtual void print_extended_on(outputStream* st) const;
    97.7 +  virtual void print_on_error(outputStream* st) const;
    97.8  
    97.9    virtual void print_gc_threads_on(outputStream* st) const;
   97.10    virtual void gc_threads_do(ThreadClosure* tc) const;
    98.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Apr 24 20:55:28 2013 -0400
    98.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Wed Apr 24 21:11:02 2013 -0400
    98.3 @@ -1,5 +1,5 @@
    98.4  /*
    98.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    98.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    98.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    98.8   *
    98.9   * This code is free software; you can redistribute it and/or modify it
   98.10 @@ -406,7 +406,6 @@
   98.11    }
   98.12    _free_regions_at_end_of_collection = _g1->free_regions();
   98.13    update_young_list_target_length();
   98.14 -  _prev_eden_capacity = _young_list_target_length * HeapRegion::GrainBytes;
   98.15  
   98.16    // We may immediately start allocating regions and placing them on the
   98.17    // collection set list. Initialize the per-collection set info
   98.18 @@ -746,6 +745,7 @@
   98.19  
   98.20  void G1CollectorPolicy::record_full_collection_start() {
   98.21    _full_collection_start_sec = os::elapsedTime();
   98.22 +  record_heap_size_info_at_start();
   98.23    // Release the future to-space so that it is available for compaction into.
   98.24    _g1->set_full_collection();
   98.25  }
   98.26 @@ -788,8 +788,7 @@
   98.27    _stop_world_start = os::elapsedTime();
   98.28  }
   98.29  
   98.30 -void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
   98.31 -                                                      size_t start_used) {
   98.32 +void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
   98.33    // We only need to do this here as the policy will only be applied
   98.34    // to the GC we're about to start. so, no point is calculating this
   98.35    // every time we calculate / recalculate the target young length.
   98.36 @@ -803,19 +802,14 @@
   98.37    _trace_gen0_time_data.record_start_collection(s_w_t_ms);
   98.38    _stop_world_start = 0.0;
   98.39  
   98.40 +  record_heap_size_info_at_start();
   98.41 +
   98.42    phase_times()->record_cur_collection_start_sec(start_time_sec);
   98.43 -  _cur_collection_pause_used_at_start_bytes = start_used;
   98.44 -  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   98.45    _pending_cards = _g1->pending_card_num();
   98.46  
   98.47    _collection_set_bytes_used_before = 0;
   98.48    _bytes_copied_during_gc = 0;
   98.49  
   98.50 -  YoungList* young_list = _g1->young_list();
   98.51 -  _eden_bytes_before_gc = young_list->eden_used_bytes();
   98.52 -  _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   98.53 -  _capacity_before_gc = _g1->capacity();
   98.54 -
   98.55    _last_gc_was_young = false;
   98.56  
   98.57    // do that for any other surv rate groups
   98.58 @@ -1153,6 +1147,21 @@
   98.59    byte_size_in_proper_unit((double)(bytes)),                    \
   98.60    proper_unit_for_byte_size((bytes))
   98.61  
   98.62 +void G1CollectorPolicy::record_heap_size_info_at_start() {
   98.63 +  YoungList* young_list = _g1->young_list();
   98.64 +  _eden_bytes_before_gc = young_list->eden_used_bytes();
   98.65 +  _survivor_bytes_before_gc = young_list->survivor_used_bytes();
   98.66 +  _capacity_before_gc = _g1->capacity();
   98.67 +
   98.68 +  _cur_collection_pause_used_at_start_bytes = _g1->used();
   98.69 +  _cur_collection_pause_used_regions_at_start = _g1->used_regions();
   98.70 +
   98.71 +  size_t eden_capacity_before_gc =
   98.72 +         (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_bytes_before_gc;
   98.73 +
   98.74 +  _prev_eden_capacity = eden_capacity_before_gc;
   98.75 +}
   98.76 +
   98.77  void G1CollectorPolicy::print_heap_transition() {
   98.78    _g1->print_size_transition(gclog_or_tty,
   98.79      _cur_collection_pause_used_at_start_bytes, _g1->used(), _g1->capacity());
   98.80 @@ -1183,8 +1192,6 @@
   98.81        EXT_SIZE_PARAMS(_capacity_before_gc),
   98.82        EXT_SIZE_PARAMS(used),
   98.83        EXT_SIZE_PARAMS(capacity));
   98.84 -
   98.85 -    _prev_eden_capacity = eden_capacity;
   98.86  }
   98.87  
   98.88  void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
   98.89 @@ -1359,18 +1366,6 @@
   98.90  #endif // PRODUCT
   98.91  }
   98.92  
   98.93 -#ifndef PRODUCT
   98.94 -// for debugging, bit of a hack...
   98.95 -static char*
   98.96 -region_num_to_mbs(int length) {
   98.97 -  static char buffer[64];
   98.98 -  double bytes = (double) (length * HeapRegion::GrainBytes);
   98.99 -  double mbs = bytes / (double) (1024 * 1024);
  98.100 -  sprintf(buffer, "%7.2lfMB", mbs);
  98.101 -  return buffer;
  98.102 -}
  98.103 -#endif // PRODUCT
  98.104 -
  98.105  uint G1CollectorPolicy::max_regions(int purpose) {
  98.106    switch (purpose) {
  98.107      case GCAllocForSurvived:
    99.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Apr 24 20:55:28 2013 -0400
    99.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Wed Apr 24 21:11:02 2013 -0400
    99.3 @@ -1,5 +1,5 @@
    99.4  /*
    99.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    99.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    99.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    99.8   *
    99.9   * This code is free software; you can redistribute it and/or modify it
   99.10 @@ -671,34 +671,36 @@
   99.11  
   99.12    bool need_to_start_conc_mark(const char* source, size_t alloc_word_size = 0);
   99.13  
   99.14 -  // Update the heuristic info to record a collection pause of the given
   99.15 -  // start time, where the given number of bytes were used at the start.
   99.16 -  // This may involve changing the desired size of a collection set.
   99.17 +  // Record the start and end of an evacuation pause.
   99.18 +  void record_collection_pause_start(double start_time_sec);
   99.19 +  void record_collection_pause_end(double pause_time_ms);
   99.20  
   99.21 -  void record_stop_world_start();
   99.22 -
   99.23 -  void record_collection_pause_start(double start_time_sec, size_t start_used);
   99.24 +  // Record the start and end of a full collection.
   99.25 +  void record_full_collection_start();
   99.26 +  void record_full_collection_end();
   99.27  
   99.28    // Must currently be called while the world is stopped.
   99.29 -  void record_concurrent_mark_init_end(double
   99.30 -                                           mark_init_elapsed_time_ms);
   99.31 +  void record_concurrent_mark_init_end(double mark_init_elapsed_time_ms);
   99.32  
   99.33 +  // Record start and end of remark.
   99.34    void record_concurrent_mark_remark_start();
   99.35    void record_concurrent_mark_remark_end();
   99.36  
   99.37 +  // Record start, end, and completion of cleanup.
   99.38    void record_concurrent_mark_cleanup_start();
   99.39    void record_concurrent_mark_cleanup_end(int no_of_gc_threads);
   99.40    void record_concurrent_mark_cleanup_completed();
   99.41  
   99.42 -  void record_concurrent_pause();
   99.43 +  // Records the information about the heap size for reporting in
   99.44 +  // print_detailed_heap_transition
   99.45 +  void record_heap_size_info_at_start();
   99.46  
   99.47 -  void record_collection_pause_end(double pause_time);
   99.48 +  // Print heap sizing transition (with less and more detail).
   99.49    void print_heap_transition();
   99.50    void print_detailed_heap_transition();
   99.51  
   99.52 -  // Record the fact that a full collection occurred.
   99.53 -  void record_full_collection_start();
   99.54 -  void record_full_collection_end();
   99.55 +  void record_stop_world_start();
   99.56 +  void record_concurrent_pause();
   99.57  
   99.58    // Record how much space we copied during a GC. This is typically
   99.59    // called when a GC alloc region is being retired.
   100.1 --- a/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Apr 24 20:55:28 2013 -0400
   100.2 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.cpp	Wed Apr 24 21:11:02 2013 -0400
   100.3 @@ -53,15 +53,6 @@
   100.4  }
   100.5  
   100.6  
   100.7 -static int byte_index_to_index(int ind) {
   100.8 -  assert((ind % oopSize) == 0, "Invariant.");
   100.9 -  return ind / oopSize;
  100.10 -}
  100.11 -
  100.12 -static int index_to_byte_index(int byte_ind) {
  100.13 -  return byte_ind * oopSize;
  100.14 -}
  100.15 -
  100.16  void PtrQueue::enqueue_known_active(void* ptr) {
  100.17    assert(0 <= _index && _index <= _sz, "Invariant.");
  100.18    assert(_index == 0 || _buf != NULL, "invariant");
   101.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Wed Apr 24 20:55:28 2013 -0400
   101.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parMarkBitMap.hpp	Wed Apr 24 21:11:02 2013 -0400
   101.3 @@ -173,6 +173,12 @@
   101.4    void reset_counters();
   101.5  #endif  // #ifndef PRODUCT
   101.6  
   101.7 +  void print_on_error(outputStream* st) const {
   101.8 +    st->print_cr("Marking Bits: (ParMarkBitMap*) " PTR_FORMAT, this);
   101.9 +    _beg_bits.print_on_error(st, " Begin Bits: ");
  101.10 +    _end_bits.print_on_error(st, " End Bits:   ");
  101.11 +  }
  101.12 +
  101.13  #ifdef  ASSERT
  101.14    void verify_clear() const;
  101.15    inline void verify_bit(idx_t bit) const;
   102.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Apr 24 20:55:28 2013 -0400
   102.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed Apr 24 21:11:02 2013 -0400
   102.3 @@ -648,6 +648,15 @@
   102.4    MetaspaceAux::print_on(st);
   102.5  }
   102.6  
   102.7 +void ParallelScavengeHeap::print_on_error(outputStream* st) const {
   102.8 +  this->CollectedHeap::print_on_error(st);
   102.9 +
  102.10 +  if (UseParallelOldGC) {
  102.11 +    st->cr();
  102.12 +    PSParallelCompact::print_on_error(st);
  102.13 +  }
  102.14 +}
  102.15 +
  102.16  void ParallelScavengeHeap::gc_threads_do(ThreadClosure* tc) const {
  102.17    PSScavenge::gc_task_manager()->threads_do(tc);
  102.18  }
   103.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Apr 24 20:55:28 2013 -0400
   103.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed Apr 24 21:11:02 2013 -0400
   103.3 @@ -220,6 +220,7 @@
   103.4  
   103.5    void prepare_for_verify();
   103.6    virtual void print_on(outputStream* st) const;
   103.7 +  virtual void print_on_error(outputStream* st) const;
   103.8    virtual void print_gc_threads_on(outputStream* st) const;
   103.9    virtual void gc_threads_do(ThreadClosure* tc) const;
  103.10    virtual void print_tracing_info() const;
   104.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Apr 24 20:55:28 2013 -0400
   104.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Wed Apr 24 21:11:02 2013 -0400
   104.3 @@ -165,6 +165,10 @@
   104.4  #endif  // #ifdef ASSERT
   104.5  
   104.6  
   104.7 +void PSParallelCompact::print_on_error(outputStream* st) {
   104.8 +  _mark_bitmap.print_on_error(st);
   104.9 +}
  104.10 +
  104.11  #ifndef PRODUCT
  104.12  const char* PSParallelCompact::space_names[] = {
  104.13    "old ", "eden", "from", "to  "
   105.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Wed Apr 24 20:55:28 2013 -0400
   105.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Wed Apr 24 21:11:02 2013 -0400
   105.3 @@ -1163,6 +1163,8 @@
   105.4    // Time since last full gc (in milliseconds).
   105.5    static jlong millis_since_last_gc();
   105.6  
   105.7 +  static void print_on_error(outputStream* st);
   105.8 +
   105.9  #ifndef PRODUCT
  105.10    // Debugging support.
  105.11    static const char* space_names[last_space_id];
   106.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Wed Apr 24 20:55:28 2013 -0400
   106.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Wed Apr 24 21:11:02 2013 -0400
   106.3 @@ -567,6 +567,14 @@
   106.4      print_on(st);
   106.5    }
   106.6  
   106.7 +  virtual void print_on_error(outputStream* st) const {
   106.8 +    st->print_cr("Heap:");
   106.9 +    print_extended_on(st);
  106.10 +    st->cr();
  106.11 +
  106.12 +    _barrier_set->print_on(st);
  106.13 +  }
  106.14 +
  106.15    // Print all GC threads (other than the VM thread)
  106.16    // used by this heap.
  106.17    virtual void print_gc_threads_on(outputStream* st) const = 0;
   107.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Apr 24 20:55:28 2013 -0400
   107.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Apr 24 21:11:02 2013 -0400
   107.3 @@ -557,11 +557,6 @@
   107.4  // be shared by method invocation and synchronized blocks.
   107.5  //%note synchronization_3
   107.6  
   107.7 -static void trace_locking(Handle& h_locking_obj, bool is_locking) {
   107.8 -  ObjectSynchronizer::trace_locking(h_locking_obj, false, true, is_locking);
   107.9 -}
  107.10 -
  107.11 -
  107.12  //%note monitor_1
  107.13  IRT_ENTRY_NO_ASYNC(void, InterpreterRuntime::monitorenter(JavaThread* thread, BasicObjectLock* elem))
  107.14  #ifdef ASSERT
   108.1 --- a/src/share/vm/interpreter/linkResolver.cpp	Wed Apr 24 20:55:28 2013 -0400
   108.2 +++ b/src/share/vm/interpreter/linkResolver.cpp	Wed Apr 24 21:11:02 2013 -0400
   108.3 @@ -217,6 +217,7 @@
   108.4                                               TRAPS) {
   108.5    vmIntrinsics::ID iid = MethodHandles::signature_polymorphic_name_id(name);
   108.6    if (TraceMethodHandles) {
   108.7 +    ResourceMark rm(THREAD);
   108.8      tty->print_cr("lookup_polymorphic_method iid=%s %s.%s%s",
   108.9                    vmIntrinsics::name_at(iid), klass->external_name(),
  108.10                    name->as_C_string(), full_signature->as_C_string());
  108.11 @@ -231,6 +232,7 @@
  108.12        TempNewSymbol basic_signature =
  108.13          MethodHandles::lookup_basic_type_signature(full_signature, keep_last_arg, CHECK);
  108.14        if (TraceMethodHandles) {
  108.15 +        ResourceMark rm(THREAD);
  108.16          tty->print_cr("lookup_polymorphic_method %s %s => basic %s",
  108.17                        name->as_C_string(),
  108.18                        full_signature->as_C_string(),
  108.19 @@ -283,6 +285,8 @@
  108.20        }
  108.21        if (result.not_null()) {
  108.22  #ifdef ASSERT
  108.23 +        ResourceMark rm(THREAD);
  108.24 +
  108.25          TempNewSymbol basic_signature =
  108.26            MethodHandles::lookup_basic_type_signature(full_signature, CHECK);
  108.27          int actual_size_of_params = result->size_of_parameters();
  108.28 @@ -1222,8 +1226,10 @@
  108.29    Symbol* method_signature = NULL;
  108.30    KlassHandle  current_klass;
  108.31    resolve_pool(resolved_klass, method_name,  method_signature, current_klass, pool, index, CHECK);
  108.32 -  if (TraceMethodHandles)
  108.33 +  if (TraceMethodHandles) {
  108.34 +    ResourceMark rm(THREAD);
  108.35      tty->print_cr("resolve_invokehandle %s %s", method_name->as_C_string(), method_signature->as_C_string());
  108.36 +  }
  108.37    resolve_handle_call(result, resolved_klass, method_name, method_signature, current_klass, CHECK);
  108.38  }
  108.39  
   109.1 --- a/src/share/vm/memory/allocation.hpp	Wed Apr 24 20:55:28 2013 -0400
   109.2 +++ b/src/share/vm/memory/allocation.hpp	Wed Apr 24 21:11:02 2013 -0400
   109.3 @@ -1,5 +1,5 @@
   109.4  /*
   109.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   109.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   109.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   109.8   *
   109.9   * This code is free software; you can redistribute it and/or modify it
  109.10 @@ -178,7 +178,7 @@
  109.11  #endif // INCLUDE_NMT
  109.12  
  109.13  // debug build does not inline
  109.14 -#if defined(_DEBUG_)
  109.15 +#if defined(_NMT_NOINLINE_)
  109.16    #define CURRENT_PC       (NMT_track_callsite ? os::get_caller_pc(1) : 0)
  109.17    #define CALLER_PC        (NMT_track_callsite ? os::get_caller_pc(2) : 0)
  109.18    #define CALLER_CALLER_PC (NMT_track_callsite ? os::get_caller_pc(3) : 0)
  109.19 @@ -611,4 +611,23 @@
  109.20    void check()    PRODUCT_RETURN;
  109.21  };
  109.22  
  109.23 +// Helper class to allocate arrays that may become large.
  109.24 +// Uses the OS malloc for allocations smaller than ArrayAllocatorMallocLimit
  109.25 +// and uses mapped memory for larger allocations.
  109.26 +// Most OS mallocs do something similar but Solaris malloc does not revert
  109.27 +// to mapped memory for large allocations. By default ArrayAllocatorMallocLimit
  109.28 +// is set so that we always use malloc except for Solaris where we set the
  109.29 +// limit to get mapped memory.
  109.30 +template <class E, MEMFLAGS F>
  109.31 +class ArrayAllocator : StackObj {
  109.32 +  char* _addr;
  109.33 +  bool _use_malloc;
  109.34 +  size_t _size;
  109.35 + public:
  109.36 +  ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { }
  109.37 +  ~ArrayAllocator() { free(); }
  109.38 +  E* allocate(size_t length);
  109.39 +  void free();
  109.40 +};
  109.41 +
  109.42  #endif // SHARE_VM_MEMORY_ALLOCATION_HPP
   110.1 --- a/src/share/vm/memory/allocation.inline.hpp	Wed Apr 24 20:55:28 2013 -0400
   110.2 +++ b/src/share/vm/memory/allocation.inline.hpp	Wed Apr 24 21:11:02 2013 -0400
   110.3 @@ -108,5 +108,49 @@
   110.4     FreeHeap(p, F);
   110.5  }
   110.6  
   110.7 +template <class E, MEMFLAGS F>
   110.8 +E* ArrayAllocator<E, F>::allocate(size_t length) {
   110.9 +  assert(_addr == NULL, "Already in use");
  110.10 +
  110.11 +  _size = sizeof(E) * length;
  110.12 +  _use_malloc = _size < ArrayAllocatorMallocLimit;
  110.13 +
  110.14 +  if (_use_malloc) {
  110.15 +    _addr = AllocateHeap(_size, F);
  110.16 +    if (_addr == NULL && _size >=  (size_t)os::vm_allocation_granularity()) {
  110.17 +      // malloc failed let's try with mmap instead
  110.18 +      _use_malloc = false;
  110.19 +    } else {
  110.20 +      return (E*)_addr;
  110.21 +    }
  110.22 +  }
  110.23 +
  110.24 +  int alignment = os::vm_allocation_granularity();
  110.25 +  _size = align_size_up(_size, alignment);
  110.26 +
  110.27 +  _addr = os::reserve_memory(_size, NULL, alignment);
  110.28 +  if (_addr == NULL) {
  110.29 +    vm_exit_out_of_memory(_size, "Allocator (reserve)");
  110.30 +  }
  110.31 +
  110.32 +  bool success = os::commit_memory(_addr, _size, false /* executable */);
  110.33 +  if (!success) {
  110.34 +    vm_exit_out_of_memory(_size, "Allocator (commit)");
  110.35 +  }
  110.36 +
  110.37 +  return (E*)_addr;
  110.38 +}
  110.39 +
  110.40 +template<class E, MEMFLAGS F>
  110.41 +void ArrayAllocator<E, F>::free() {
  110.42 +  if (_addr != NULL) {
  110.43 +    if (_use_malloc) {
  110.44 +      FreeHeap(_addr, F);
  110.45 +    } else {
  110.46 +      os::release_memory(_addr, _size);
  110.47 +    }
  110.48 +    _addr = NULL;
  110.49 +  }
  110.50 +}
  110.51  
  110.52  #endif // SHARE_VM_MEMORY_ALLOCATION_INLINE_HPP
   111.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Wed Apr 24 20:55:28 2013 -0400
   111.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed Apr 24 21:11:02 2013 -0400
   111.3 @@ -819,12 +819,13 @@
   111.4  // Returns "TRUE" iff "p" points into the committed areas of the heap.
   111.5  bool GenCollectedHeap::is_in(const void* p) const {
   111.6    #ifndef ASSERT
   111.7 -  guarantee(VerifyBeforeGC   ||
   111.8 -            VerifyDuringGC   ||
   111.9 -            VerifyBeforeExit ||
  111.10 -            PrintAssembly    ||
  111.11 -            tty->count() != 0 ||   // already printing
  111.12 -            VerifyAfterGC    ||
  111.13 +  guarantee(VerifyBeforeGC      ||
  111.14 +            VerifyDuringGC      ||
  111.15 +            VerifyBeforeExit    ||
  111.16 +            VerifyDuringStartup ||
  111.17 +            PrintAssembly       ||
  111.18 +            tty->count() != 0   ||   // already printing
  111.19 +            VerifyAfterGC       ||
  111.20      VMError::fatal_error_in_progress(), "too expensive");
  111.21  
  111.22    #endif
  111.23 @@ -1132,6 +1133,17 @@
  111.24  #endif // INCLUDE_ALL_GCS
  111.25  }
  111.26  
  111.27 +void GenCollectedHeap::print_on_error(outputStream* st) const {
  111.28 +  this->CollectedHeap::print_on_error(st);
  111.29 +
  111.30 +#if INCLUDE_ALL_GCS
  111.31 +  if (UseConcMarkSweepGC) {
  111.32 +    st->cr();
  111.33 +    CMSCollector::print_on_error(st);
  111.34 +  }
  111.35 +#endif // INCLUDE_ALL_GCS
  111.36 +}
  111.37 +
  111.38  void GenCollectedHeap::print_tracing_info() const {
  111.39    if (TraceGen0Time) {
  111.40      get_gen(0)->print_summary_info();
   112.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Wed Apr 24 20:55:28 2013 -0400
   112.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Wed Apr 24 21:11:02 2013 -0400
   112.3 @@ -344,6 +344,7 @@
   112.4    virtual void print_gc_threads_on(outputStream* st) const;
   112.5    virtual void gc_threads_do(ThreadClosure* tc) const;
   112.6    virtual void print_tracing_info() const;
   112.7 +  virtual void print_on_error(outputStream* st) const;
   112.8  
   112.9    // PrintGC, PrintGCDetails support
  112.10    void print_heap_change(size_t prev_used) const;
   113.1 --- a/src/share/vm/memory/generation.cpp	Wed Apr 24 20:55:28 2013 -0400
   113.2 +++ b/src/share/vm/memory/generation.cpp	Wed Apr 24 21:11:02 2013 -0400
   113.3 @@ -382,7 +382,9 @@
   113.4  CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
   113.5                                 int level,
   113.6                                 GenRemSet* remset) :
   113.7 -  Generation(rs, initial_byte_size, level), _rs(remset)
   113.8 +  Generation(rs, initial_byte_size, level), _rs(remset),
   113.9 +  _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
  113.10 +  _used_at_prologue()
  113.11  {
  113.12    HeapWord* start = (HeapWord*)rs.base();
  113.13    size_t reserved_byte_size = rs.size();
  113.14 @@ -406,6 +408,9 @@
  113.15      // the end if we try.
  113.16      guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
  113.17    }
  113.18 +  _min_heap_delta_bytes = MinHeapDeltaBytes;
  113.19 +  _capacity_at_prologue = initial_byte_size;
  113.20 +  _used_at_prologue = 0;
  113.21  }
  113.22  
  113.23  bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
  113.24 @@ -457,6 +462,160 @@
  113.25  }
  113.26  
  113.27  
  113.28 +void CardGeneration::compute_new_size() {
  113.29 +  assert(_shrink_factor <= 100, "invalid shrink factor");
  113.30 +  size_t current_shrink_factor = _shrink_factor;
  113.31 +  _shrink_factor = 0;
  113.32 +
  113.33 +  // We don't have floating point command-line arguments
  113.34 +  // Note:  argument processing ensures that MinHeapFreeRatio < 100.
  113.35 +  const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
  113.36 +  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  113.37 +
  113.38 +  // Compute some numbers about the state of the heap.
  113.39 +  const size_t used_after_gc = used();
  113.40 +  const size_t capacity_after_gc = capacity();
  113.41 +
  113.42 +  const double min_tmp = used_after_gc / maximum_used_percentage;
  113.43 +  size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
  113.44 +  // Don't shrink less than the initial generation size
  113.45 +  minimum_desired_capacity = MAX2(minimum_desired_capacity,
  113.46 +                                  spec()->init_size());
  113.47 +  assert(used_after_gc <= minimum_desired_capacity, "sanity check");
  113.48 +
  113.49 +  if (PrintGC && Verbose) {
  113.50 +    const size_t free_after_gc = free();
  113.51 +    const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
  113.52 +    gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
  113.53 +    gclog_or_tty->print_cr("  "
  113.54 +                  "  minimum_free_percentage: %6.2f"
  113.55 +                  "  maximum_used_percentage: %6.2f",
  113.56 +                  minimum_free_percentage,
  113.57 +                  maximum_used_percentage);
  113.58 +    gclog_or_tty->print_cr("  "
  113.59 +                  "   free_after_gc   : %6.1fK"
  113.60 +                  "   used_after_gc   : %6.1fK"
  113.61 +                  "   capacity_after_gc   : %6.1fK",
  113.62 +                  free_after_gc / (double) K,
  113.63 +                  used_after_gc / (double) K,
  113.64 +                  capacity_after_gc / (double) K);
  113.65 +    gclog_or_tty->print_cr("  "
  113.66 +                  "   free_percentage: %6.2f",
  113.67 +                  free_percentage);
  113.68 +  }
  113.69 +
  113.70 +  if (capacity_after_gc < minimum_desired_capacity) {
  113.71 +    // If we have less free space than we want then expand
  113.72 +    size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  113.73 +    // Don't expand unless it's significant
  113.74 +    if (expand_bytes >= _min_heap_delta_bytes) {
  113.75 +      expand(expand_bytes, 0); // safe if expansion fails
  113.76 +    }
  113.77 +    if (PrintGC && Verbose) {
  113.78 +      gclog_or_tty->print_cr("    expanding:"
  113.79 +                    "  minimum_desired_capacity: %6.1fK"
  113.80 +                    "  expand_bytes: %6.1fK"
  113.81 +                    "  _min_heap_delta_bytes: %6.1fK",
  113.82 +                    minimum_desired_capacity / (double) K,
  113.83 +                    expand_bytes / (double) K,
  113.84 +                    _min_heap_delta_bytes / (double) K);
  113.85 +    }
  113.86 +    return;
  113.87 +  }
  113.88 +
  113.89 +  // No expansion, now see if we want to shrink
  113.90 +  size_t shrink_bytes = 0;
  113.91 +  // We would never want to shrink more than this
  113.92 +  size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
  113.93 +
  113.94 +  if (MaxHeapFreeRatio < 100) {
  113.95 +    const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
  113.96 +    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  113.97 +    const double max_tmp = used_after_gc / minimum_used_percentage;
  113.98 +    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
  113.99 +    maximum_desired_capacity = MAX2(maximum_desired_capacity,
 113.100 +                                    spec()->init_size());
 113.101 +    if (PrintGC && Verbose) {
 113.102 +      gclog_or_tty->print_cr("  "
 113.103 +                             "  maximum_free_percentage: %6.2f"
 113.104 +                             "  minimum_used_percentage: %6.2f",
 113.105 +                             maximum_free_percentage,
 113.106 +                             minimum_used_percentage);
 113.107 +      gclog_or_tty->print_cr("  "
 113.108 +                             "  _capacity_at_prologue: %6.1fK"
 113.109 +                             "  minimum_desired_capacity: %6.1fK"
 113.110 +                             "  maximum_desired_capacity: %6.1fK",
 113.111 +                             _capacity_at_prologue / (double) K,
 113.112 +                             minimum_desired_capacity / (double) K,
 113.113 +                             maximum_desired_capacity / (double) K);
 113.114 +    }
 113.115 +    assert(minimum_desired_capacity <= maximum_desired_capacity,
 113.116 +           "sanity check");
 113.117 +
 113.118 +    if (capacity_after_gc > maximum_desired_capacity) {
 113.119 +      // Capacity too large, compute shrinking size
 113.120 +      shrink_bytes = capacity_after_gc - maximum_desired_capacity;
 113.121 +      // We don't want shrink all the way back to initSize if people call
 113.122 +      // System.gc(), because some programs do that between "phases" and then
 113.123 +      // we'd just have to grow the heap up again for the next phase.  So we
 113.124 +      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 113.125 +      // on the third call, and 100% by the fourth call.  But if we recompute
 113.126 +      // size without shrinking, it goes back to 0%.
 113.127 +      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 113.128 +      assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 113.129 +      if (current_shrink_factor == 0) {
 113.130 +        _shrink_factor = 10;
 113.131 +      } else {
 113.132 +        _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
 113.133 +      }
 113.134 +      if (PrintGC && Verbose) {
 113.135 +        gclog_or_tty->print_cr("  "
 113.136 +                      "  shrinking:"
 113.137 +                      "  initSize: %.1fK"
 113.138 +                      "  maximum_desired_capacity: %.1fK",
 113.139 +                      spec()->init_size() / (double) K,
 113.140 +                      maximum_desired_capacity / (double) K);
 113.141 +        gclog_or_tty->print_cr("  "
 113.142 +                      "  shrink_bytes: %.1fK"
 113.143 +                      "  current_shrink_factor: %d"
 113.144 +                      "  new shrink factor: %d"
 113.145 +                      "  _min_heap_delta_bytes: %.1fK",
 113.146 +                      shrink_bytes / (double) K,
 113.147 +                      current_shrink_factor,
 113.148 +                      _shrink_factor,
 113.149 +                      _min_heap_delta_bytes / (double) K);
 113.150 +      }
 113.151 +    }
 113.152 +  }
 113.153 +
 113.154 +  if (capacity_after_gc > _capacity_at_prologue) {
 113.155 +    // We might have expanded for promotions, in which case we might want to
 113.156 +    // take back that expansion if there's room after GC.  That keeps us from
 113.157 +    // stretching the heap with promotions when there's plenty of room.
 113.158 +    size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
 113.159 +    expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
 113.160 +    // We have two shrinking computations, take the largest
 113.161 +    shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
 113.162 +    assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 113.163 +    if (PrintGC && Verbose) {
 113.164 +      gclog_or_tty->print_cr("  "
 113.165 +                             "  aggressive shrinking:"
 113.166 +                             "  _capacity_at_prologue: %.1fK"
 113.167 +                             "  capacity_after_gc: %.1fK"
 113.168 +                             "  expansion_for_promotion: %.1fK"
 113.169 +                             "  shrink_bytes: %.1fK",
 113.170 +                             capacity_after_gc / (double) K,
 113.171 +                             _capacity_at_prologue / (double) K,
 113.172 +                             expansion_for_promotion / (double) K,
 113.173 +                             shrink_bytes / (double) K);
 113.174 +    }
 113.175 +  }
 113.176 +  // Don't shrink unless it's significant
 113.177 +  if (shrink_bytes >= _min_heap_delta_bytes) {
 113.178 +    shrink(shrink_bytes);
 113.179 +  }
 113.180 +}
 113.181 +
 113.182  // Currently nothing to do.
 113.183  void CardGeneration::prepare_for_verify() {}
 113.184  
   114.1 --- a/src/share/vm/memory/generation.hpp	Wed Apr 24 20:55:28 2013 -0400
   114.2 +++ b/src/share/vm/memory/generation.hpp	Wed Apr 24 21:11:02 2013 -0400
   114.3 @@ -634,6 +634,17 @@
   114.4    // This is local to this generation.
   114.5    BlockOffsetSharedArray* _bts;
   114.6  
   114.7 +  // current shrinking effect: this damps shrinking when the heap gets empty.
   114.8 +  size_t _shrink_factor;
   114.9 +
  114.10 +  size_t _min_heap_delta_bytes;   // Minimum amount to expand.
  114.11 +
  114.12 +  // Some statistics from before gc started.
  114.13 +  // These are gathered in the gc_prologue (and should_collect)
  114.14 +  // to control growing/shrinking policy in spite of promotions.
  114.15 +  size_t _capacity_at_prologue;
  114.16 +  size_t _used_at_prologue;
  114.17 +
  114.18    CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
  114.19                   GenRemSet* remset);
  114.20  
  114.21 @@ -644,6 +655,11 @@
  114.22    // necessarily the full "bytes") was done.
  114.23    virtual bool expand(size_t bytes, size_t expand_bytes);
  114.24  
  114.25 +  // Shrink generation with specified size (returns false if unable to shrink)
  114.26 +  virtual void shrink(size_t bytes) = 0;
  114.27 +
  114.28 +  virtual void compute_new_size();
  114.29 +
  114.30    virtual void clear_remembered_set();
  114.31  
  114.32    virtual void invalidate_remembered_set();
  114.33 @@ -667,7 +683,6 @@
  114.34    friend class VM_PopulateDumpSharedSpace;
  114.35  
  114.36   protected:
  114.37 -  size_t     _min_heap_delta_bytes;   // Minimum amount to expand.
  114.38    ContiguousSpace*  _the_space;       // actual space holding objects
  114.39    WaterMark  _last_gc;                // watermark between objects allocated before
  114.40                                        // and after last GC.
  114.41 @@ -688,11 +703,10 @@
  114.42  
  114.43   public:
  114.44    OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size,
  114.45 -                               size_t min_heap_delta_bytes,
  114.46                                 int level, GenRemSet* remset,
  114.47                                 ContiguousSpace* space) :
  114.48      CardGeneration(rs, initial_byte_size, level, remset),
  114.49 -    _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes)
  114.50 +    _the_space(space)
  114.51    {}
  114.52  
  114.53    inline bool is_in(const void* p) const;
   115.1 --- a/src/share/vm/memory/heap.cpp	Wed Apr 24 20:55:28 2013 -0400
   115.2 +++ b/src/share/vm/memory/heap.cpp	Wed Apr 24 21:11:02 2013 -0400
   115.3 @@ -42,7 +42,7 @@
   115.4    _log2_segment_size            = 0;
   115.5    _next_segment                 = 0;
   115.6    _freelist                     = NULL;
   115.7 -  _free_segments                = 0;
   115.8 +  _freelist_segments            = 0;
   115.9  }
  115.10  
  115.11  
  115.12 @@ -79,13 +79,6 @@
  115.13  }
  115.14  
  115.15  
  115.16 -static size_t align_to_allocation_size(size_t size) {
  115.17 -  const size_t alignment = (size_t)os::vm_allocation_granularity();
  115.18 -  assert(is_power_of_2(alignment), "no kidding ???");
  115.19 -  return (size + alignment - 1) & ~(alignment - 1);
  115.20 -}
  115.21 -
  115.22 -
  115.23  void CodeHeap::on_code_mapping(char* base, size_t size) {
  115.24  #ifdef LINUX
  115.25    extern void linux_wrap_code(char* base, size_t size);
  115.26 @@ -122,8 +115,8 @@
  115.27    }
  115.28  
  115.29    on_code_mapping(_memory.low(), _memory.committed_size());
  115.30 -  _number_of_committed_segments = number_of_segments(_memory.committed_size());
  115.31 -  _number_of_reserved_segments  = number_of_segments(_memory.reserved_size());
  115.32 +  _number_of_committed_segments = size_to_segments(_memory.committed_size());
  115.33 +  _number_of_reserved_segments  = size_to_segments(_memory.reserved_size());
  115.34    assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
  115.35  
  115.36    // reserve space for _segmap
  115.37 @@ -156,8 +149,8 @@
  115.38      if (!_memory.expand_by(dm)) return false;
  115.39      on_code_mapping(base, dm);
  115.40      size_t i = _number_of_committed_segments;
  115.41 -    _number_of_committed_segments = number_of_segments(_memory.committed_size());
  115.42 -    assert(_number_of_reserved_segments == number_of_segments(_memory.reserved_size()), "number of reserved segments should not change");
  115.43 +    _number_of_committed_segments = size_to_segments(_memory.committed_size());
  115.44 +    assert(_number_of_reserved_segments == size_to_segments(_memory.reserved_size()), "number of reserved segments should not change");
  115.45      assert(_number_of_reserved_segments >= _number_of_committed_segments, "just checking");
  115.46      // expand _segmap space
  115.47      size_t ds = align_to_page_size(_number_of_committed_segments) - _segmap.committed_size();
  115.48 @@ -183,33 +176,44 @@
  115.49  }
  115.50  
  115.51  
  115.52 -void* CodeHeap::allocate(size_t size) {
  115.53 -  size_t length = number_of_segments(size + sizeof(HeapBlock));
  115.54 -  assert(length *_segment_size >= sizeof(FreeBlock), "not enough room for FreeList");
  115.55 +void* CodeHeap::allocate(size_t instance_size, bool is_critical) {
  115.56 +  size_t number_of_segments = size_to_segments(instance_size + sizeof(HeapBlock));
  115.57 +  assert(segments_to_size(number_of_segments) >= sizeof(FreeBlock), "not enough room for FreeList");
  115.58  
  115.59    // First check if we can satify request from freelist
  115.60    debug_only(verify());
  115.61 -  HeapBlock* block = search_freelist(length);
  115.62 +  HeapBlock* block = search_freelist(number_of_segments, is_critical);
  115.63    debug_only(if (VerifyCodeCacheOften) verify());
  115.64    if (block != NULL) {
  115.65 -    assert(block->length() >= length && block->length() < length + CodeCacheMinBlockLength, "sanity check");
  115.66 +    assert(block->length() >= number_of_segments && block->length() < number_of_segments + CodeCacheMinBlockLength, "sanity check");
  115.67      assert(!block->free(), "must be marked free");
  115.68  #ifdef ASSERT
  115.69 -    memset((void *)block->allocated_space(), badCodeHeapNewVal, size);
  115.70 +    memset((void *)block->allocated_space(), badCodeHeapNewVal, instance_size);
  115.71  #endif
  115.72      return block->allocated_space();
  115.73    }
  115.74  
  115.75 -  if (length < CodeCacheMinBlockLength) {
  115.76 -    length = CodeCacheMinBlockLength;
  115.77 +  // Ensure minimum size for allocation to the heap.
  115.78 +  if (number_of_segments < CodeCacheMinBlockLength) {
  115.79 +    number_of_segments = CodeCacheMinBlockLength;
  115.80    }
  115.81 -  if (_next_segment + length <= _number_of_committed_segments) {
  115.82 -    mark_segmap_as_used(_next_segment, _next_segment + length);
  115.83 +
  115.84 +  if (!is_critical) {
  115.85 +    // Make sure the allocation fits in the unallocated heap without using
  115.86 +    // the CodeCacheMimimumFreeSpace that is reserved for critical allocations.
  115.87 +    if (segments_to_size(number_of_segments) > (heap_unallocated_capacity() - CodeCacheMinimumFreeSpace)) {
  115.88 +      // Fail allocation
  115.89 +      return NULL;
  115.90 +    }
  115.91 +  }
  115.92 +
  115.93 +  if (_next_segment + number_of_segments <= _number_of_committed_segments) {
  115.94 +    mark_segmap_as_used(_next_segment, _next_segment + number_of_segments);
  115.95      HeapBlock* b =  block_at(_next_segment);
  115.96 -    b->initialize(length);
  115.97 -    _next_segment += length;
  115.98 +    b->initialize(number_of_segments);
  115.99 +    _next_segment += number_of_segments;
 115.100  #ifdef ASSERT
 115.101 -    memset((void *)b->allocated_space(), badCodeHeapNewVal, size);
 115.102 +    memset((void *)b->allocated_space(), badCodeHeapNewVal, instance_size);
 115.103  #endif
 115.104      return b->allocated_space();
 115.105    } else {
 115.106 @@ -226,7 +230,7 @@
 115.107  #ifdef ASSERT
 115.108    memset((void *)b->allocated_space(),
 115.109           badCodeHeapFreeVal,
 115.110 -         size(b->length()) - sizeof(HeapBlock));
 115.111 +         segments_to_size(b->length()) - sizeof(HeapBlock));
 115.112  #endif
 115.113    add_to_freelist(b);
 115.114  
 115.115 @@ -306,32 +310,14 @@
 115.116  }
 115.117  
 115.118  size_t CodeHeap::allocated_capacity() const {
 115.119 -  // Start with the committed size in _memory;
 115.120 -  size_t l = _memory.committed_size();
 115.121 -
 115.122 -  // Subtract the committed, but unused, segments
 115.123 -  l -= size(_number_of_committed_segments - _next_segment);
 115.124 -
 115.125 -  // Subtract the size of the freelist
 115.126 -  l -= size(_free_segments);
 115.127 -
 115.128 -  return l;
 115.129 +  // size of used heap - size on freelist
 115.130 +  return segments_to_size(_next_segment - _freelist_segments);
 115.131  }
 115.132  
 115.133 -size_t CodeHeap::largest_free_block() const {
 115.134 -  // First check unused space excluding free blocks.
 115.135 -  size_t free_sz = size(_free_segments);
 115.136 -  size_t unused  = max_capacity() - allocated_capacity() - free_sz;
 115.137 -  if (unused >= free_sz)
 115.138 -    return unused;
 115.139 -
 115.140 -  // Now check largest free block.
 115.141 -  size_t len = 0;
 115.142 -  for (FreeBlock* b = _freelist; b != NULL; b = b->link()) {
 115.143 -    if (b->length() > len)
 115.144 -      len = b->length();
 115.145 -  }
 115.146 -  return MAX2(unused, size(len));
 115.147 +// Returns size of the unallocated heap block
 115.148 +size_t CodeHeap::heap_unallocated_capacity() const {
 115.149 +  // Total number of segments - number currently used
 115.150 +  return segments_to_size(_number_of_reserved_segments - _next_segment);
 115.151  }
 115.152  
 115.153  // Free list management
 115.154 @@ -372,7 +358,7 @@
 115.155    assert(b != _freelist, "cannot be removed twice");
 115.156  
 115.157    // Mark as free and update free space count
 115.158 -  _free_segments += b->length();
 115.159 +  _freelist_segments += b->length();
 115.160    b->set_free();
 115.161  
 115.162    // First element in list?
 115.163 @@ -407,7 +393,7 @@
 115.164  
 115.165  // Search freelist for an entry on the list with the best fit
 115.166  // Return NULL if no one was found
 115.167 -FreeBlock* CodeHeap::search_freelist(size_t length) {
 115.168 +FreeBlock* CodeHeap::search_freelist(size_t length, bool is_critical) {
 115.169    FreeBlock *best_block = NULL;
 115.170    FreeBlock *best_prev  = NULL;
 115.171    size_t best_length = 0;
 115.172 @@ -418,6 +404,16 @@
 115.173    while(cur != NULL) {
 115.174      size_t l = cur->length();
 115.175      if (l >= length && (best_block == NULL || best_length > l)) {
 115.176 +
 115.177 +      // Non critical allocations are not allowed to use the last part of the code heap.
 115.178 +      if (!is_critical) {
 115.179 +        // Make sure the end of the allocation doesn't cross into the last part of the code heap
 115.180 +        if (((size_t)cur + length) > ((size_t)high_boundary() - CodeCacheMinimumFreeSpace)) {
 115.181 +          // the freelist is sorted by address - if one fails, all consecutive will also fail.
 115.182 +          break;
 115.183 +        }
 115.184 +      }
 115.185 +
 115.186        // Remember best block, its previous element, and its length
 115.187        best_block = cur;
 115.188        best_prev  = prev;
 115.189 @@ -459,7 +455,7 @@
 115.190    }
 115.191  
 115.192    best_block->set_used();
 115.193 -  _free_segments -= length;
 115.194 +  _freelist_segments -= length;
 115.195    return best_block;
 115.196  }
 115.197  
 115.198 @@ -485,7 +481,7 @@
 115.199    }
 115.200  
 115.201    // Verify that freelist contains the right amount of free space
 115.202 -  //  guarantee(len == _free_segments, "wrong freelist");
 115.203 +  //  guarantee(len == _freelist_segments, "wrong freelist");
 115.204  
 115.205    // Verify that the number of free blocks is not out of hand.
 115.206    static int free_block_threshold = 10000;
   116.1 --- a/src/share/vm/memory/heap.hpp	Wed Apr 24 20:55:28 2013 -0400
   116.2 +++ b/src/share/vm/memory/heap.hpp	Wed Apr 24 21:11:02 2013 -0400
   116.3 @@ -91,11 +91,11 @@
   116.4    size_t       _next_segment;
   116.5  
   116.6    FreeBlock*   _freelist;
   116.7 -  size_t       _free_segments;                   // No. of segments in freelist
   116.8 +  size_t       _freelist_segments;               // No. of segments in freelist
   116.9  
  116.10    // Helper functions
  116.11 -  size_t   number_of_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; }
  116.12 -  size_t   size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; }
  116.13 +  size_t   size_to_segments(size_t size) const { return (size + _segment_size - 1) >> _log2_segment_size; }
  116.14 +  size_t   segments_to_size(size_t number_of_segments) const { return number_of_segments << _log2_segment_size; }
  116.15  
  116.16    size_t   segment_for(void* p) const            { return ((char*)p - _memory.low()) >> _log2_segment_size; }
  116.17    HeapBlock* block_at(size_t i) const            { return (HeapBlock*)(_memory.low() + (i << _log2_segment_size)); }
  116.18 @@ -110,7 +110,7 @@
  116.19  
  116.20    // Toplevel freelist management
  116.21    void add_to_freelist(HeapBlock *b);
  116.22 -  FreeBlock* search_freelist(size_t length);
  116.23 +  FreeBlock* search_freelist(size_t length, bool is_critical);
  116.24  
  116.25    // Iteration helpers
  116.26    void*      next_free(HeapBlock* b) const;
  116.27 @@ -132,22 +132,19 @@
  116.28    void  clear();                                 // clears all heap contents
  116.29  
  116.30    // Memory allocation
  116.31 -  void* allocate  (size_t size);                 // allocates a block of size or returns NULL
  116.32 +  void* allocate  (size_t size, bool is_critical);  // allocates a block of size or returns NULL
  116.33    void  deallocate(void* p);                     // deallocates a block
  116.34  
  116.35    // Attributes
  116.36 -  void*  begin() const                           { return _memory.low (); }
  116.37 -  void*  end() const                             { return _memory.high(); }
  116.38 -  bool   contains(void* p) const                 { return begin() <= p && p < end(); }
  116.39 -  void*  find_start(void* p) const;              // returns the block containing p or NULL
  116.40 -  size_t alignment_unit() const;                 // alignment of any block
  116.41 -  size_t alignment_offset() const;               // offset of first byte of any block, within the enclosing alignment unit
  116.42 -  static size_t header_size();                   // returns the header size for each heap block
  116.43 +  char* low_boundary() const                     { return _memory.low_boundary (); }
  116.44 +  char* high() const                             { return _memory.high(); }
  116.45 +  char* high_boundary() const                    { return _memory.high_boundary(); }
  116.46  
  116.47 -  // Returns reserved area high and low addresses
  116.48 -  char *low_boundary() const                     { return _memory.low_boundary (); }
  116.49 -  char *high() const                             { return _memory.high(); }
  116.50 -  char *high_boundary() const                    { return _memory.high_boundary(); }
  116.51 +  bool  contains(const void* p) const            { return low_boundary() <= p && p < high(); }
  116.52 +  void* find_start(void* p) const;              // returns the block containing p or NULL
  116.53 +  size_t alignment_unit() const;                // alignment of any block
  116.54 +  size_t alignment_offset() const;              // offset of first byte of any block, within the enclosing alignment unit
  116.55 +  static size_t header_size();                  // returns the header size for each heap block
  116.56  
  116.57    // Iteration
  116.58  
  116.59 @@ -161,8 +158,11 @@
  116.60    size_t max_capacity() const;
  116.61    size_t allocated_capacity() const;
  116.62    size_t unallocated_capacity() const            { return max_capacity() - allocated_capacity(); }
  116.63 -  size_t largest_free_block() const;
  116.64  
  116.65 +private:
  116.66 +  size_t heap_unallocated_capacity() const;
  116.67 +
  116.68 +public:
  116.69    // Debugging
  116.70    void verify();
  116.71    void print()  PRODUCT_RETURN;
   117.1 --- a/src/share/vm/memory/metaspace.cpp	Wed Apr 24 20:55:28 2013 -0400
   117.2 +++ b/src/share/vm/memory/metaspace.cpp	Wed Apr 24 21:11:02 2013 -0400
   117.3 @@ -103,27 +103,7 @@
   117.4  // a chunk is placed on the free list of blocks (BlockFreelist) and
   117.5  // reused from there.
   117.6  
   117.7 -// Pointer to list of Metachunks.
   117.8 -class ChunkList VALUE_OBJ_CLASS_SPEC {
   117.9 -  // List of free chunks
  117.10 -  Metachunk* _head;
  117.11 -
  117.12 - public:
  117.13 -  // Constructor
  117.14 -  ChunkList() : _head(NULL) {}
  117.15 -
  117.16 -  // Accessors
  117.17 -  Metachunk* head() { return _head; }
  117.18 -  void set_head(Metachunk* v) { _head = v; }
  117.19 -
  117.20 -  // Link at head of the list
  117.21 -  void add_at_head(Metachunk* head, Metachunk* tail);
  117.22 -  void add_at_head(Metachunk* head);
  117.23 -
  117.24 -  size_t sum_list_size();
  117.25 -  size_t sum_list_count();
  117.26 -  size_t sum_list_capacity();
  117.27 -};
  117.28 +typedef class FreeList<Metachunk> ChunkList;
  117.29  
  117.30  // Manages the global free lists of chunks.
  117.31  // Has three lists of free chunks, and a total size and
  117.32 @@ -185,6 +165,10 @@
  117.33    // for special, small, medium, and humongous chunks.
  117.34    static ChunkIndex list_index(size_t size);
  117.35  
  117.36 +  // Add the simple linked list of chunks to the freelist of chunks
  117.37 +  // of type index.
  117.38 +  void return_chunks(ChunkIndex index, Metachunk* chunks);
  117.39 +
  117.40    // Total of the space in the free chunks list
  117.41    size_t free_chunks_total();
  117.42    size_t free_chunks_total_in_bytes();
  117.43 @@ -899,6 +883,9 @@
  117.44                     Mutex::_no_safepoint_check_flag);
  117.45    bool initialization_succeeded = grow_vs(word_size);
  117.46  
  117.47 +  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  117.48 +  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
  117.49 +  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
  117.50    assert(initialization_succeeded,
  117.51      " VirtualSpaceList initialization should not fail");
  117.52  }
  117.53 @@ -913,6 +900,9 @@
  117.54                     Mutex::_no_safepoint_check_flag);
  117.55    VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  117.56    bool succeeded = class_entry->initialize();
  117.57 +  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  117.58 +  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  117.59 +  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
  117.60    assert(succeeded, " VirtualSpaceList initialization should not fail");
  117.61    link_vs(class_entry, rs.size()/BytesPerWord);
  117.62  }
  117.63 @@ -1380,76 +1370,6 @@
  117.64  }
  117.65  #endif
  117.66  
  117.67 -// ChunkList methods
  117.68 -
  117.69 -size_t ChunkList::sum_list_size() {
  117.70 -  size_t result = 0;
  117.71 -  Metachunk* cur = head();
  117.72 -  while (cur != NULL) {
  117.73 -    result += cur->word_size();
  117.74 -    cur = cur->next();
  117.75 -  }
  117.76 -  return result;
  117.77 -}
  117.78 -
  117.79 -size_t ChunkList::sum_list_count() {
  117.80 -  size_t result = 0;
  117.81 -  Metachunk* cur = head();
  117.82 -  while (cur != NULL) {
  117.83 -    result++;
  117.84 -    cur = cur->next();
  117.85 -  }
  117.86 -  return result;
  117.87 -}
  117.88 -
  117.89 -size_t ChunkList::sum_list_capacity() {
  117.90 -  size_t result = 0;
  117.91 -  Metachunk* cur = head();
  117.92 -  while (cur != NULL) {
  117.93 -    result += cur->capacity_word_size();
  117.94 -    cur = cur->next();
  117.95 -  }
  117.96 -  return result;
  117.97 -}
  117.98 -
  117.99 -void ChunkList::add_at_head(Metachunk* head, Metachunk* tail) {
 117.100 -  assert_lock_strong(SpaceManager::expand_lock());
 117.101 -  assert(head == tail || tail->next() == NULL,
 117.102 -         "Not the tail or the head has already been added to a list");
 117.103 -
 117.104 -  if (TraceMetadataChunkAllocation && Verbose) {
 117.105 -    gclog_or_tty->print("ChunkList::add_at_head(head, tail): ");
 117.106 -    Metachunk* cur = head;
 117.107 -    while (cur != NULL) {
 117.108 -      gclog_or_tty->print(PTR_FORMAT " (" SIZE_FORMAT ") ", cur, cur->word_size());
 117.109 -      cur = cur->next();
 117.110 -    }
 117.111 -    gclog_or_tty->print_cr("");
 117.112 -  }
 117.113 -
 117.114 -  if (tail != NULL) {
 117.115 -    tail->set_next(_head);
 117.116 -  }
 117.117 -  set_head(head);
 117.118 -}
 117.119 -
 117.120 -void ChunkList::add_at_head(Metachunk* list) {
 117.121 -  if (list == NULL) {
 117.122 -    // Nothing to add
 117.123 -    return;
 117.124 -  }
 117.125 -  assert_lock_strong(SpaceManager::expand_lock());
 117.126 -  Metachunk* head = list;
 117.127 -  Metachunk* tail = list;
 117.128 -  Metachunk* cur = head->next();
 117.129 -  // Search for the tail since it is not passed.
 117.130 -  while (cur != NULL) {
 117.131 -    tail = cur;
 117.132 -    cur = cur->next();
 117.133 -  }
 117.134 -  add_at_head(head, tail);
 117.135 -}
 117.136 -
 117.137  // ChunkManager methods
 117.138  
 117.139  // Verification of _free_chunks_total and _free_chunks_count does not
 117.140 @@ -1553,7 +1473,7 @@
 117.141        continue;
 117.142      }
 117.143  
 117.144 -    result = result + list->sum_list_capacity();
 117.145 +    result = result + list->count() * list->size();
 117.146    }
 117.147    result = result + humongous_dictionary()->total_size();
 117.148    return result;
 117.149 @@ -1567,7 +1487,7 @@
 117.150      if (list == NULL) {
 117.151        continue;
 117.152      }
 117.153 -    count = count + list->sum_list_count();
 117.154 +    count = count + list->count();
 117.155    }
 117.156    count = count + humongous_dictionary()->total_free_blocks();
 117.157    return count;
 117.158 @@ -1622,7 +1542,7 @@
 117.159      }
 117.160  
 117.161      // Remove the chunk as the head of the list.
 117.162 -    free_list->set_head(chunk->next());
 117.163 +    free_list->remove_chunk(chunk);
 117.164  
 117.165      // Chunk is being removed from the chunks free list.
 117.166      dec_free_chunks_total(chunk->capacity_word_size());
 117.167 @@ -1679,7 +1599,7 @@
 117.168      size_t list_count;
 117.169      if (list_index(word_size) < HumongousIndex) {
 117.170        ChunkList* list = find_free_chunks_list(word_size);
 117.171 -      list_count = list->sum_list_count();
 117.172 +      list_count = list->count();
 117.173      } else {
 117.174        list_count = humongous_dictionary()->total_count();
 117.175      }
 117.176 @@ -1958,6 +1878,29 @@
 117.177    }
 117.178  }
 117.179  
 117.180 +void ChunkManager::return_chunks(ChunkIndex index, Metachunk* chunks) {
 117.181 +  if (chunks == NULL) {
 117.182 +    return;
 117.183 +  }
 117.184 +  ChunkList* list = free_chunks(index);
 117.185 +  assert(list->size() == chunks->word_size(), "Mismatch in chunk sizes");
 117.186 +  assert_lock_strong(SpaceManager::expand_lock());
 117.187 +  Metachunk* cur = chunks;
 117.188 +
 117.189 +  // This return chunks one at a time.  If a new
 117.190 +  // class List can be created that is a base class
 117.191 +  // of FreeList then something like FreeList::prepend()
 117.192 +  // can be used in place of this loop
 117.193 +  while (cur != NULL) {
 117.194 +    // Capture the next link before it is changed
 117.195 +    // by the call to return_chunk_at_head();
 117.196 +    Metachunk* next = cur->next();
 117.197 +    cur->set_is_free(true);
 117.198 +    list->return_chunk_at_head(cur);
 117.199 +    cur = next;
 117.200 +  }
 117.201 +}
 117.202 +
 117.203  SpaceManager::~SpaceManager() {
 117.204    // This call this->_lock which can't be done while holding expand_lock()
 117.205    const size_t in_use_before = sum_capacity_in_chunks_in_use();
 117.206 @@ -1995,11 +1938,11 @@
 117.207                               chunk_size_name(i));
 117.208      }
 117.209      Metachunk* chunks = chunks_in_use(i);
 117.210 -    chunk_manager->free_chunks(i)->add_at_head(chunks);
 117.211 +    chunk_manager->return_chunks(i, chunks);
 117.212      set_chunks_in_use(i, NULL);
 117.213      if (TraceMetadataChunkAllocation && Verbose) {
 117.214        gclog_or_tty->print_cr("updated freelist count %d %s",
 117.215 -                             chunk_manager->free_chunks(i)->sum_list_count(),
 117.216 +                             chunk_manager->free_chunks(i)->count(),
 117.217                               chunk_size_name(i));
 117.218      }
 117.219      assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
   118.1 --- a/src/share/vm/memory/tenuredGeneration.cpp	Wed Apr 24 20:55:28 2013 -0400
   118.2 +++ b/src/share/vm/memory/tenuredGeneration.cpp	Wed Apr 24 21:11:02 2013 -0400
   118.3 @@ -39,7 +39,7 @@
   118.4                                       size_t initial_byte_size, int level,
   118.5                                       GenRemSet* remset) :
   118.6    OneContigSpaceCardGeneration(rs, initial_byte_size,
   118.7 -                               MinHeapDeltaBytes, level, remset, NULL)
   118.8 +                               level, remset, NULL)
   118.9  {
  118.10    HeapWord* bottom = (HeapWord*) _virtual_space.low();
  118.11    HeapWord* end    = (HeapWord*) _virtual_space.high();
  118.12 @@ -86,162 +86,6 @@
  118.13    return "tenured generation";
  118.14  }
  118.15  
  118.16 -void TenuredGeneration::compute_new_size() {
  118.17 -  assert(_shrink_factor <= 100, "invalid shrink factor");
  118.18 -  size_t current_shrink_factor = _shrink_factor;
  118.19 -  _shrink_factor = 0;
  118.20 -
  118.21 -  // We don't have floating point command-line arguments
  118.22 -  // Note:  argument processing ensures that MinHeapFreeRatio < 100.
  118.23 -  const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
  118.24 -  const double maximum_used_percentage = 1.0 - minimum_free_percentage;
  118.25 -
  118.26 -  // Compute some numbers about the state of the heap.
  118.27 -  const size_t used_after_gc = used();
  118.28 -  const size_t capacity_after_gc = capacity();
  118.29 -
  118.30 -  const double min_tmp = used_after_gc / maximum_used_percentage;
  118.31 -  size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
  118.32 -  // Don't shrink less than the initial generation size
  118.33 -  minimum_desired_capacity = MAX2(minimum_desired_capacity,
  118.34 -                                  spec()->init_size());
  118.35 -  assert(used_after_gc <= minimum_desired_capacity, "sanity check");
  118.36 -
  118.37 -  if (PrintGC && Verbose) {
  118.38 -    const size_t free_after_gc = free();
  118.39 -    const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
  118.40 -    gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
  118.41 -    gclog_or_tty->print_cr("  "
  118.42 -                  "  minimum_free_percentage: %6.2f"
  118.43 -                  "  maximum_used_percentage: %6.2f",
  118.44 -                  minimum_free_percentage,
  118.45 -                  maximum_used_percentage);
  118.46 -    gclog_or_tty->print_cr("  "
  118.47 -                  "   free_after_gc   : %6.1fK"
  118.48 -                  "   used_after_gc   : %6.1fK"
  118.49 -                  "   capacity_after_gc   : %6.1fK",
  118.50 -                  free_after_gc / (double) K,
  118.51 -                  used_after_gc / (double) K,
  118.52 -                  capacity_after_gc / (double) K);
  118.53 -    gclog_or_tty->print_cr("  "
  118.54 -                  "   free_percentage: %6.2f",
  118.55 -                  free_percentage);
  118.56 -  }
  118.57 -
  118.58 -  if (capacity_after_gc < minimum_desired_capacity) {
  118.59 -    // If we have less free space than we want then expand
  118.60 -    size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
  118.61 -    // Don't expand unless it's significant
  118.62 -    if (expand_bytes >= _min_heap_delta_bytes) {
  118.63 -      expand(expand_bytes, 0); // safe if expansion fails
  118.64 -    }
  118.65 -    if (PrintGC && Verbose) {
  118.66 -      gclog_or_tty->print_cr("    expanding:"
  118.67 -                    "  minimum_desired_capacity: %6.1fK"
  118.68 -                    "  expand_bytes: %6.1fK"
  118.69 -                    "  _min_heap_delta_bytes: %6.1fK",
  118.70 -                    minimum_desired_capacity / (double) K,
  118.71 -                    expand_bytes / (double) K,
  118.72 -                    _min_heap_delta_bytes / (double) K);
  118.73 -    }
  118.74 -    return;
  118.75 -  }
  118.76 -
  118.77 -  // No expansion, now see if we want to shrink
  118.78 -  size_t shrink_bytes = 0;
  118.79 -  // We would never want to shrink more than this
  118.80 -  size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
  118.81 -
  118.82 -  if (MaxHeapFreeRatio < 100) {
  118.83 -    const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
  118.84 -    const double minimum_used_percentage = 1.0 - maximum_free_percentage;
  118.85 -    const double max_tmp = used_after_gc / minimum_used_percentage;
  118.86 -    size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
  118.87 -    maximum_desired_capacity = MAX2(maximum_desired_capacity,
  118.88 -                                    spec()->init_size());
  118.89 -    if (PrintGC && Verbose) {
  118.90 -      gclog_or_tty->print_cr("  "
  118.91 -                             "  maximum_free_percentage: %6.2f"
  118.92 -                             "  minimum_used_percentage: %6.2f",
  118.93 -                             maximum_free_percentage,
  118.94 -                             minimum_used_percentage);
  118.95 -      gclog_or_tty->print_cr("  "
  118.96 -                             "  _capacity_at_prologue: %6.1fK"
  118.97 -                             "  minimum_desired_capacity: %6.1fK"
  118.98 -                             "  maximum_desired_capacity: %6.1fK",
  118.99 -                             _capacity_at_prologue / (double) K,
 118.100 -                             minimum_desired_capacity / (double) K,
 118.101 -                             maximum_desired_capacity / (double) K);
 118.102 -    }
 118.103 -    assert(minimum_desired_capacity <= maximum_desired_capacity,
 118.104 -           "sanity check");
 118.105 -
 118.106 -    if (capacity_after_gc > maximum_desired_capacity) {
 118.107 -      // Capacity too large, compute shrinking size
 118.108 -      shrink_bytes = capacity_after_gc - maximum_desired_capacity;
 118.109 -      // We don't want shrink all the way back to initSize if people call
 118.110 -      // System.gc(), because some programs do that between "phases" and then
 118.111 -      // we'd just have to grow the heap up again for the next phase.  So we
 118.112 -      // damp the shrinking: 0% on the first call, 10% on the second call, 40%
 118.113 -      // on the third call, and 100% by the fourth call.  But if we recompute
 118.114 -      // size without shrinking, it goes back to 0%.
 118.115 -      shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
 118.116 -      assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 118.117 -      if (current_shrink_factor == 0) {
 118.118 -        _shrink_factor = 10;
 118.119 -      } else {
 118.120 -        _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
 118.121 -      }
 118.122 -      if (PrintGC && Verbose) {
 118.123 -        gclog_or_tty->print_cr("  "
 118.124 -                      "  shrinking:"
 118.125 -                      "  initSize: %.1fK"
 118.126 -                      "  maximum_desired_capacity: %.1fK",
 118.127 -                      spec()->init_size() / (double) K,
 118.128 -                      maximum_desired_capacity / (double) K);
 118.129 -        gclog_or_tty->print_cr("  "
 118.130 -                      "  shrink_bytes: %.1fK"
 118.131 -                      "  current_shrink_factor: %d"
 118.132 -                      "  new shrink factor: %d"
 118.133 -                      "  _min_heap_delta_bytes: %.1fK",
 118.134 -                      shrink_bytes / (double) K,
 118.135 -                      current_shrink_factor,
 118.136 -                      _shrink_factor,
 118.137 -                      _min_heap_delta_bytes / (double) K);
 118.138 -      }
 118.139 -    }
 118.140 -  }
 118.141 -
 118.142 -  if (capacity_after_gc > _capacity_at_prologue) {
 118.143 -    // We might have expanded for promotions, in which case we might want to
 118.144 -    // take back that expansion if there's room after GC.  That keeps us from
 118.145 -    // stretching the heap with promotions when there's plenty of room.
 118.146 -    size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
 118.147 -    expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
 118.148 -    // We have two shrinking computations, take the largest
 118.149 -    shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
 118.150 -    assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
 118.151 -    if (PrintGC && Verbose) {
 118.152 -      gclog_or_tty->print_cr("  "
 118.153 -                             "  aggressive shrinking:"
 118.154 -                             "  _capacity_at_prologue: %.1fK"
 118.155 -                             "  capacity_after_gc: %.1fK"
 118.156 -                             "  expansion_for_promotion: %.1fK"
 118.157 -                             "  shrink_bytes: %.1fK",
 118.158 -                             capacity_after_gc / (double) K,
 118.159 -                             _capacity_at_prologue / (double) K,
 118.160 -                             expansion_for_promotion / (double) K,
 118.161 -                             shrink_bytes / (double) K);
 118.162 -    }
 118.163 -  }
 118.164 -  // Don't shrink unless it's significant
 118.165 -  if (shrink_bytes >= _min_heap_delta_bytes) {
 118.166 -    shrink(shrink_bytes);
 118.167 -  }
 118.168 -  assert(used() == used_after_gc && used_after_gc <= capacity(),
 118.169 -         "sanity check");
 118.170 -}
 118.171 -
 118.172  void TenuredGeneration::gc_prologue(bool full) {
 118.173    _capacity_at_prologue = capacity();
 118.174    _used_at_prologue = used();
 118.175 @@ -312,6 +156,19 @@
 118.176                                          size, is_tlab);
 118.177  }
 118.178  
 118.179 +void TenuredGeneration::compute_new_size() {
 118.180 +  assert_locked_or_safepoint(Heap_lock);
 118.181 +
 118.182 +  // Compute some numbers about the state of the heap.
 118.183 +  const size_t used_after_gc = used();
 118.184 +  const size_t capacity_after_gc = capacity();
 118.185 +
 118.186 +  CardGeneration::compute_new_size();
 118.187 +
 118.188 +  assert(used() == used_after_gc && used_after_gc <= capacity(),
 118.189 +         err_msg("used: " SIZE_FORMAT " used_after_gc: " SIZE_FORMAT
 118.190 +         " capacity: " SIZE_FORMAT, used(), used_after_gc, capacity()));
 118.191 +}
 118.192  void TenuredGeneration::update_gc_stats(int current_level,
 118.193                                          bool full) {
 118.194    // If the next lower level(s) has been collected, gather any statistics
   119.1 --- a/src/share/vm/memory/tenuredGeneration.hpp	Wed Apr 24 20:55:28 2013 -0400
   119.2 +++ b/src/share/vm/memory/tenuredGeneration.hpp	Wed Apr 24 21:11:02 2013 -0400
   119.3 @@ -38,13 +38,6 @@
   119.4  class TenuredGeneration: public OneContigSpaceCardGeneration {
   119.5    friend class VMStructs;
   119.6   protected:
   119.7 -  // current shrinking effect: this damps shrinking when the heap gets empty.
   119.8 -  size_t _shrink_factor;
   119.9 -  // Some statistics from before gc started.
  119.10 -  // These are gathered in the gc_prologue (and should_collect)
  119.11 -  // to control growing/shrinking policy in spite of promotions.
  119.12 -  size_t _capacity_at_prologue;
  119.13 -  size_t _used_at_prologue;
  119.14  
  119.15  #if INCLUDE_ALL_GCS
  119.16    // To support parallel promotion: an array of parallel allocation
  119.17 @@ -80,9 +73,6 @@
  119.18      return !CollectGen0First;
  119.19    }
  119.20  
  119.21 -  // Mark sweep support
  119.22 -  void compute_new_size();
  119.23 -
  119.24    virtual void gc_prologue(bool full);
  119.25    virtual void gc_epilogue(bool full);
  119.26    bool should_collect(bool   full,
  119.27 @@ -93,6 +83,7 @@
  119.28                         bool clear_all_soft_refs,
  119.29                         size_t size,
  119.30                         bool is_tlab);
  119.31 +  virtual void compute_new_size();
  119.32  
  119.33  #if INCLUDE_ALL_GCS
  119.34    // Overrides.
   120.1 --- a/src/share/vm/memory/universe.cpp	Wed Apr 24 20:55:28 2013 -0400
   120.2 +++ b/src/share/vm/memory/universe.cpp	Wed Apr 24 21:11:02 2013 -0400
   120.3 @@ -1326,6 +1326,8 @@
   120.4  static uintptr_t _verify_klass_data[2] = {0, (uintptr_t)-1};
   120.5  
   120.6  
   120.7 +#ifndef PRODUCT
   120.8 +
   120.9  static void calculate_verify_data(uintptr_t verify_data[2],
  120.10                                    HeapWord* low_boundary,
  120.11                                    HeapWord* high_boundary) {
  120.12 @@ -1360,9 +1362,7 @@
  120.13    verify_data[1] = bits;
  120.14  }
  120.15  
  120.16 -
  120.17  // Oop verification (see MacroAssembler::verify_oop)
  120.18 -#ifndef PRODUCT
  120.19  
  120.20  uintptr_t Universe::verify_oop_mask() {
  120.21    MemRegion m = heap()->reserved_region();
   121.1 --- a/src/share/vm/oops/constantPool.cpp	Wed Apr 24 20:55:28 2013 -0400
   121.2 +++ b/src/share/vm/oops/constantPool.cpp	Wed Apr 24 21:11:02 2013 -0400
   121.3 @@ -1378,12 +1378,13 @@
   121.4  
   121.5  // JVMTI GetConstantPool support
   121.6  
   121.7 -// For temporary use until code is stable.
   121.8 -#define DBG(code)
   121.9 +// For debugging of constant pool
  121.10 +const bool debug_cpool = false;
  121.11  
  121.12 -static const char* WARN_MSG = "Must not be such entry!";
  121.13 +#define DBG(code) do { if (debug_cpool) { (code); } } while(0)
  121.14  
  121.15  static void print_cpool_bytes(jint cnt, u1 *bytes) {
  121.16 +  const char* WARN_MSG = "Must not be such entry!";
  121.17    jint size = 0;
  121.18    u2   idx1, idx2;
  121.19  
  121.20 @@ -1669,8 +1670,7 @@
  121.21          idx1 = tbl->symbol_to_value(sym);
  121.22          assert(idx1 != 0, "Have not found a hashtable entry");
  121.23          Bytes::put_Java_u2((address) (bytes+1), idx1);
  121.24 -        DBG(char *str = sym->as_utf8());
  121.25 -        DBG(printf("JVM_CONSTANT_String: idx=#%03hd, %s", idx1, str));
  121.26 +        DBG(printf("JVM_CONSTANT_String: idx=#%03hd, %s", idx1, sym->as_utf8()));
  121.27          break;
  121.28        }
  121.29        case JVM_CONSTANT_Fieldref:
  121.30 @@ -1745,6 +1745,8 @@
  121.31    return (int)(bytes - start_bytes);
  121.32  } /* end copy_cpool_bytes */
  121.33  
  121.34 +#undef DBG
  121.35 +
  121.36  
  121.37  void ConstantPool::set_on_stack(const bool value) {
  121.38    if (value) {
   122.1 --- a/src/share/vm/oops/instanceKlass.cpp	Wed Apr 24 20:55:28 2013 -0400
   122.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Wed Apr 24 21:11:02 2013 -0400
   122.3 @@ -3157,7 +3157,7 @@
   122.4      Array<int>* method_ordering = this->method_ordering();
   122.5      int length = method_ordering->length();
   122.6      if (JvmtiExport::can_maintain_original_method_order() ||
   122.7 -        (UseSharedSpaces && length != 0)) {
   122.8 +        ((UseSharedSpaces || DumpSharedSpaces) && length != 0)) {
   122.9        guarantee(length == methods()->length(), "invalid method ordering length");
  122.10        jlong sum = 0;
  122.11        for (int j = 0; j < length; j++) {
   123.1 --- a/src/share/vm/oops/method.hpp	Wed Apr 24 20:55:28 2013 -0400
   123.2 +++ b/src/share/vm/oops/method.hpp	Wed Apr 24 21:11:02 2013 -0400
   123.3 @@ -816,15 +816,19 @@
   123.4    }
   123.5  
   123.6   public:
   123.7 -  bool  is_not_c1_compilable() const          { return access_flags().is_not_c1_compilable(); }
   123.8 -  void set_not_c1_compilable()                {       _access_flags.set_not_c1_compilable();  }
   123.9 -  bool  is_not_c2_compilable() const          { return access_flags().is_not_c2_compilable(); }
  123.10 -  void set_not_c2_compilable()                {       _access_flags.set_not_c2_compilable();  }
  123.11 +  bool   is_not_c1_compilable() const         { return access_flags().is_not_c1_compilable();  }
  123.12 +  void  set_not_c1_compilable()               {       _access_flags.set_not_c1_compilable();   }
  123.13 +  void clear_not_c1_compilable()              {       _access_flags.clear_not_c1_compilable(); }
  123.14 +  bool   is_not_c2_compilable() const         { return access_flags().is_not_c2_compilable();  }
  123.15 +  void  set_not_c2_compilable()               {       _access_flags.set_not_c2_compilable();   }
  123.16 +  void clear_not_c2_compilable()              {       _access_flags.clear_not_c2_compilable(); }
  123.17  
  123.18 -  bool  is_not_c1_osr_compilable() const      { return is_not_c1_compilable(); }  // don't waste an accessFlags bit
  123.19 -  void set_not_c1_osr_compilable()            {       set_not_c1_compilable(); }  // don't waste an accessFlags bit
  123.20 -  bool  is_not_c2_osr_compilable() const      { return access_flags().is_not_c2_osr_compilable(); }
  123.21 -  void set_not_c2_osr_compilable()            {       _access_flags.set_not_c2_osr_compilable();  }
  123.22 +  bool    is_not_c1_osr_compilable() const    { return is_not_c1_compilable(); }  // don't waste an accessFlags bit
  123.23 +  void   set_not_c1_osr_compilable()          {       set_not_c1_compilable(); }  // don't waste an accessFlags bit
  123.24 +  void clear_not_c1_osr_compilable()          {     clear_not_c1_compilable(); }  // don't waste an accessFlags bit
  123.25 +  bool   is_not_c2_osr_compilable() const     { return access_flags().is_not_c2_osr_compilable();  }
  123.26 +  void  set_not_c2_osr_compilable()           {       _access_flags.set_not_c2_osr_compilable();   }
  123.27 +  void clear_not_c2_osr_compilable()          {       _access_flags.clear_not_c2_osr_compilable(); }
  123.28  
  123.29    // Background compilation support
  123.30    bool queued_for_compilation() const  { return access_flags().queued_for_compilation(); }
   124.1 --- a/src/share/vm/oops/methodData.cpp	Wed Apr 24 20:55:28 2013 -0400
   124.2 +++ b/src/share/vm/oops/methodData.cpp	Wed Apr 24 21:11:02 2013 -0400
   124.3 @@ -660,29 +660,9 @@
   124.4    // Set the method back-pointer.
   124.5    _method = method();
   124.6  
   124.7 -  _invocation_counter.init();
   124.8 -  _backedge_counter.init();
   124.9 -  _invocation_counter_start = 0;
  124.10 -  _backedge_counter_start = 0;
  124.11 -  _num_loops = 0;
  124.12 -  _num_blocks = 0;
  124.13 -  _highest_comp_level = 0;
  124.14 -  _highest_osr_comp_level = 0;
  124.15 -  _would_profile = true;
  124.16 +  init();
  124.17    set_creation_mileage(mileage_of(method()));
  124.18  
  124.19 -  // Initialize flags and trap history.
  124.20 -  _nof_decompiles = 0;
  124.21 -  _nof_overflow_recompiles = 0;
  124.22 -  _nof_overflow_traps = 0;
  124.23 -  _eflags = 0;
  124.24 -  _arg_local = 0;
  124.25 -  _arg_stack = 0;
  124.26 -  _arg_returned = 0;
  124.27 -  assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align");
  124.28 -  Copy::zero_to_words((HeapWord*) &_trap_hist,
  124.29 -                      sizeof(_trap_hist) / sizeof(HeapWord));
  124.30 -
  124.31    // Go through the bytecodes and allocate and initialize the
  124.32    // corresponding data cells.
  124.33    int data_size = 0;
  124.34 @@ -721,7 +701,27 @@
  124.35    post_initialize(&stream);
  124.36  
  124.37    set_size(object_size);
  124.38 +}
  124.39  
  124.40 +void MethodData::init() {
  124.41 +  _invocation_counter.init();
  124.42 +  _backedge_counter.init();
  124.43 +  _invocation_counter_start = 0;
  124.44 +  _backedge_counter_start = 0;
  124.45 +  _num_loops = 0;
  124.46 +  _num_blocks = 0;
  124.47 +  _highest_comp_level = 0;
  124.48 +  _highest_osr_comp_level = 0;
  124.49 +  _would_profile = true;
  124.50 +
  124.51 +  // Initialize flags and trap history.
  124.52 +  _nof_decompiles = 0;
  124.53 +  _nof_overflow_recompiles = 0;
  124.54 +  _nof_overflow_traps = 0;
  124.55 +  clear_escape_info();
  124.56 +  assert(sizeof(_trap_hist) % sizeof(HeapWord) == 0, "align");
  124.57 +  Copy::zero_to_words((HeapWord*) &_trap_hist,
  124.58 +                      sizeof(_trap_hist) / sizeof(HeapWord));
  124.59  }
  124.60  
  124.61  // Get a measure of how much mileage the method has on it.
   125.1 --- a/src/share/vm/oops/methodData.hpp	Wed Apr 24 20:55:28 2013 -0400
   125.2 +++ b/src/share/vm/oops/methodData.hpp	Wed Apr 24 21:11:02 2013 -0400
   125.3 @@ -1284,8 +1284,8 @@
   125.4      return bytecode_cell_count(code) != no_profile_data;
   125.5    }
   125.6  
   125.7 -  // Perform initialization of a new MethodData*
   125.8 -  void initialize(methodHandle method);
   125.9 +  // reset into original state
  125.10 +  void init();
  125.11  
  125.12    // My size
  125.13    int size_in_bytes() const { return _size; }
  125.14 @@ -1365,6 +1365,7 @@
  125.15    intx arg_stack()                               { return _arg_stack; }
  125.16    intx arg_returned()                            { return _arg_returned; }
  125.17    uint arg_modified(int a)                       { ArgInfoData *aid = arg_info();
  125.18 +                                                   assert(aid != NULL, "arg_info must be not null");
  125.19                                                     assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
  125.20                                                     return aid->arg_modified(a); }
  125.21  
  125.22 @@ -1373,8 +1374,8 @@
  125.23    void set_arg_stack(intx v)                     { _arg_stack = v; }
  125.24    void set_arg_returned(intx v)                  { _arg_returned = v; }
  125.25    void set_arg_modified(int a, uint v)           { ArgInfoData *aid = arg_info();
  125.26 +                                                   assert(aid != NULL, "arg_info must be not null");
  125.27                                                     assert(a >= 0 && a < aid->number_of_args(), "valid argument number");
  125.28 -
  125.29                                                     aid->set_arg_modified(a, v); }
  125.30  
  125.31    void clear_escape_info()                       { _eflags = _arg_local = _arg_stack = _arg_returned = 0; }
   126.1 --- a/src/share/vm/opto/block.cpp	Wed Apr 24 20:55:28 2013 -0400
   126.2 +++ b/src/share/vm/opto/block.cpp	Wed Apr 24 21:11:02 2013 -0400
   126.3 @@ -1028,26 +1028,6 @@
   126.4  }
   126.5  
   126.6  #ifndef PRODUCT
   126.7 -static void edge_dump(GrowableArray<CFGEdge *> *edges) {
   126.8 -  tty->print_cr("---- Edges ----");
   126.9 -  for (int i = 0; i < edges->length(); i++) {
  126.10 -    CFGEdge *e = edges->at(i);
  126.11 -    if (e != NULL) {
  126.12 -      edges->at(i)->dump();
  126.13 -    }
  126.14 -  }
  126.15 -}
  126.16 -
  126.17 -static void trace_dump(Trace *traces[], int count) {
  126.18 -  tty->print_cr("---- Traces ----");
  126.19 -  for (int i = 0; i < count; i++) {
  126.20 -    Trace *tr = traces[i];
  126.21 -    if (tr != NULL) {
  126.22 -      tr->dump();
  126.23 -    }
  126.24 -  }
  126.25 -}
  126.26 -
  126.27  void Trace::dump( ) const {
  126.28    tty->print_cr("Trace (freq %f)", first_block()->_freq);
  126.29    for (Block *b = first_block(); b != NULL; b = next(b)) {
   127.1 --- a/src/share/vm/opto/cfgnode.cpp	Wed Apr 24 20:55:28 2013 -0400
   127.2 +++ b/src/share/vm/opto/cfgnode.cpp	Wed Apr 24 21:11:02 2013 -0400
   127.3 @@ -1,5 +1,5 @@
   127.4  /*
   127.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   127.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   127.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   127.8   *
   127.9   * This code is free software; you can redistribute it and/or modify it
  127.10 @@ -1306,10 +1306,11 @@
  127.11      return NULL;
  127.12  
  127.13    Node *x = n2;
  127.14 -  Node *y = n1->in(1);
  127.15 -  if( n2 == n1->in(1) ) {
  127.16 +  Node *y = NULL;
  127.17 +  if( x == n1->in(1) ) {
  127.18      y = n1->in(2);
  127.19 -  } else if( n2 == n1->in(1) ) {
  127.20 +  } else if( x == n1->in(2) ) {
  127.21 +    y = n1->in(1);
  127.22    } else return NULL;
  127.23  
  127.24    // Not so profitable if compare and add are constants
   128.1 --- a/src/share/vm/opto/chaitin.cpp	Wed Apr 24 20:55:28 2013 -0400
   128.2 +++ b/src/share/vm/opto/chaitin.cpp	Wed Apr 24 21:11:02 2013 -0400
   128.3 @@ -145,6 +145,72 @@
   128.4  
   128.5  #define NUMBUCKS 3
   128.6  
   128.7 +// Straight out of Tarjan's union-find algorithm
   128.8 +uint LiveRangeMap::find_compress(uint lrg) {
   128.9 +  uint cur = lrg;
  128.10 +  uint next = _uf_map[cur];
  128.11 +  while (next != cur) { // Scan chain of equivalences
  128.12 +    assert( next < cur, "always union smaller");
  128.13 +    cur = next; // until find a fixed-point
  128.14 +    next = _uf_map[cur];
  128.15 +  }
  128.16 +
  128.17 +  // Core of union-find algorithm: update chain of
  128.18 +  // equivalences to be equal to the root.
  128.19 +  while (lrg != next) {
  128.20 +    uint tmp = _uf_map[lrg];
  128.21 +    _uf_map.map(lrg, next);
  128.22 +    lrg = tmp;
  128.23 +  }
  128.24 +  return lrg;
  128.25 +}
  128.26 +
  128.27 +// Reset the Union-Find map to identity
  128.28 +void LiveRangeMap::reset_uf_map(uint max_lrg_id) {
  128.29 +  _max_lrg_id= max_lrg_id;
  128.30 +  // Force the Union-Find mapping to be at least this large
  128.31 +  _uf_map.extend(_max_lrg_id, 0);
  128.32 +  // Initialize it to be the ID mapping.
  128.33 +  for (uint i = 0; i < _max_lrg_id; ++i) {
  128.34 +    _uf_map.map(i, i);
  128.35 +  }
  128.36 +}
  128.37 +
  128.38 +// Make all Nodes map directly to their final live range; no need for
  128.39 +// the Union-Find mapping after this call.
  128.40 +void LiveRangeMap::compress_uf_map_for_nodes() {
  128.41 +  // For all Nodes, compress mapping
  128.42 +  uint unique = _names.Size();
  128.43 +  for (uint i = 0; i < unique; ++i) {
  128.44 +    uint lrg = _names[i];
  128.45 +    uint compressed_lrg = find(lrg);
  128.46 +    if (lrg != compressed_lrg) {
  128.47 +      _names.map(i, compressed_lrg);
  128.48 +    }
  128.49 +  }
  128.50 +}
  128.51 +
  128.52 +// Like Find above, but no path compress, so bad asymptotic behavior
  128.53 +uint LiveRangeMap::find_const(uint lrg) const {
  128.54 +  if (!lrg) {
  128.55 +    return lrg; // Ignore the zero LRG
  128.56 +  }
  128.57 +
  128.58 +  // Off the end?  This happens during debugging dumps when you got
  128.59 +  // brand new live ranges but have not told the allocator yet.
  128.60 +  if (lrg >= _max_lrg_id) {
  128.61 +    return lrg;
  128.62 +  }
  128.63 +
  128.64 +  uint next = _uf_map[lrg];
  128.65 +  while (next != lrg) { // Scan chain of equivalences
  128.66 +    assert(next < lrg, "always union smaller");
  128.67 +    lrg = next; // until find a fixed-point
  128.68 +    next = _uf_map[lrg];
  128.69 +  }
  128.70 +  return next;
  128.71 +}
  128.72 +
  128.73  //------------------------------Chaitin----------------------------------------
  128.74  PhaseChaitin::PhaseChaitin(uint unique, PhaseCFG &cfg, Matcher &matcher)
  128.75    : PhaseRegAlloc(unique, cfg, matcher,
  128.76 @@ -153,13 +219,13 @@
  128.77  #else
  128.78         NULL
  128.79  #endif
  128.80 -       ),
  128.81 -    _names(unique), _uf_map(unique),
  128.82 -    _maxlrg(0), _live(0),
  128.83 -    _spilled_once(Thread::current()->resource_area()),
  128.84 -    _spilled_twice(Thread::current()->resource_area()),
  128.85 -    _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0),
  128.86 -    _oldphi(unique)
  128.87 +       )
  128.88 +  , _lrg_map(unique)
  128.89 +  , _live(0)
  128.90 +  , _spilled_once(Thread::current()->resource_area())
  128.91 +  , _spilled_twice(Thread::current()->resource_area())
  128.92 +  , _lo_degree(0), _lo_stk_degree(0), _hi_degree(0), _simplified(0)
  128.93 +  , _oldphi(unique)
  128.94  #ifndef PRODUCT
  128.95    , _trace_spilling(TraceSpilling || C->method_has_option("TraceSpilling"))
  128.96  #endif
  128.97 @@ -168,7 +234,6 @@
  128.98  
  128.99    _high_frequency_lrg = MIN2(float(OPTO_LRG_HIGH_FREQ), _cfg._outer_loop_freq);
 128.100  
 128.101 -  uint i,j;
 128.102    // Build a list of basic blocks, sorted by frequency
 128.103    _blks = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
 128.104    // Experiment with sorting strategies to speed compilation
 128.105 @@ -176,30 +241,30 @@
 128.106    Block **buckets[NUMBUCKS];             // Array of buckets
 128.107    uint    buckcnt[NUMBUCKS];             // Array of bucket counters
 128.108    double  buckval[NUMBUCKS];             // Array of bucket value cutoffs
 128.109 -  for( i = 0; i < NUMBUCKS; i++ ) {
 128.110 -    buckets[i] = NEW_RESOURCE_ARRAY( Block *, _cfg._num_blocks );
 128.111 +  for (uint i = 0; i < NUMBUCKS; i++) {
 128.112 +    buckets[i] = NEW_RESOURCE_ARRAY(Block *, _cfg._num_blocks);
 128.113      buckcnt[i] = 0;
 128.114      // Bump by three orders of magnitude each time
 128.115      cutoff *= 0.001;
 128.116      buckval[i] = cutoff;
 128.117 -    for( j = 0; j < _cfg._num_blocks; j++ ) {
 128.118 +    for (uint j = 0; j < _cfg._num_blocks; j++) {
 128.119        buckets[i][j] = NULL;
 128.120      }
 128.121    }
 128.122    // Sort blocks into buckets
 128.123 -  for( i = 0; i < _cfg._num_blocks; i++ ) {
 128.124 -    for( j = 0; j < NUMBUCKS; j++ ) {
 128.125 -      if( (j == NUMBUCKS-1) || (_cfg._blocks[i]->_freq > buckval[j]) ) {
 128.126 +  for (uint i = 0; i < _cfg._num_blocks; i++) {
 128.127 +    for (uint j = 0; j < NUMBUCKS; j++) {
 128.128 +      if ((j == NUMBUCKS - 1) || (_cfg._blocks[i]->_freq > buckval[j])) {
 128.129          // Assign block to end of list for appropriate bucket
 128.130          buckets[j][buckcnt[j]++] = _cfg._blocks[i];
 128.131 -        break;                      // kick out of inner loop
 128.132 +        break; // kick out of inner loop
 128.133        }
 128.134      }
 128.135    }
 128.136    // Dump buckets into final block array
 128.137    uint blkcnt = 0;
 128.138 -  for( i = 0; i < NUMBUCKS; i++ ) {
 128.139 -    for( j = 0; j < buckcnt[i]; j++ ) {
 128.140 +  for (uint i = 0; i < NUMBUCKS; i++) {
 128.141 +    for (uint j = 0; j < buckcnt[i]; j++) {
 128.142        _blks[blkcnt++] = buckets[i][j];
 128.143      }
 128.144    }
 128.145 @@ -207,6 +272,77 @@
 128.146    assert(blkcnt == _cfg._num_blocks, "Block array not totally filled");
 128.147  }
 128.148  
 128.149 +//------------------------------Union------------------------------------------
 128.150 +// union 2 sets together.
 128.151 +void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
 128.152 +  uint src = _lrg_map.find(src_n);
 128.153 +  uint dst = _lrg_map.find(dst_n);
 128.154 +  assert(src, "");
 128.155 +  assert(dst, "");
 128.156 +  assert(src < _lrg_map.max_lrg_id(), "oob");
 128.157 +  assert(dst < _lrg_map.max_lrg_id(), "oob");
 128.158 +  assert(src < dst, "always union smaller");
 128.159 +  _lrg_map.uf_map(dst, src);
 128.160 +}
 128.161 +
 128.162 +//------------------------------new_lrg----------------------------------------
 128.163 +void PhaseChaitin::new_lrg(const Node *x, uint lrg) {
 128.164 +  // Make the Node->LRG mapping
 128.165 +  _lrg_map.extend(x->_idx,lrg);
 128.166 +  // Make the Union-Find mapping an identity function
 128.167 +  _lrg_map.uf_extend(lrg, lrg);
 128.168 +}
 128.169 +
 128.170 +
 128.171 +bool PhaseChaitin::clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id) {
 128.172 +  Block *bcon = _cfg._bbs[con->_idx];
 128.173 +  uint cindex = bcon->find_node(con);
 128.174 +  Node *con_next = bcon->_nodes[cindex+1];
 128.175 +  if (con_next->in(0) != con || !con_next->is_MachProj()) {
 128.176 +    return false;               // No MachProj's follow
 128.177 +  }
 128.178 +
 128.179 +  // Copy kills after the cloned constant
 128.180 +  Node *kills = con_next->clone();
 128.181 +  kills->set_req(0, copy);
 128.182 +  b->_nodes.insert(idx, kills);
 128.183 +  _cfg._bbs.map(kills->_idx, b);
 128.184 +  new_lrg(kills, max_lrg_id);
 128.185 +  return true;
 128.186 +}
 128.187 +
 128.188 +//------------------------------compact----------------------------------------
 128.189 +// Renumber the live ranges to compact them.  Makes the IFG smaller.
 128.190 +void PhaseChaitin::compact() {
 128.191 +  // Current the _uf_map contains a series of short chains which are headed
 128.192 +  // by a self-cycle.  All the chains run from big numbers to little numbers.
 128.193 +  // The Find() call chases the chains & shortens them for the next Find call.
 128.194 +  // We are going to change this structure slightly.  Numbers above a moving
 128.195 +  // wave 'i' are unchanged.  Numbers below 'j' point directly to their
 128.196 +  // compacted live range with no further chaining.  There are no chains or
 128.197 +  // cycles below 'i', so the Find call no longer works.
 128.198 +  uint j=1;
 128.199 +  uint i;
 128.200 +  for (i = 1; i < _lrg_map.max_lrg_id(); i++) {
 128.201 +    uint lr = _lrg_map.uf_live_range_id(i);
 128.202 +    // Ignore unallocated live ranges
 128.203 +    if (!lr) {
 128.204 +      continue;
 128.205 +    }
 128.206 +    assert(lr <= i, "");
 128.207 +    _lrg_map.uf_map(i, ( lr == i ) ? j++ : _lrg_map.uf_live_range_id(lr));
 128.208 +  }
 128.209 +  // Now change the Node->LR mapping to reflect the compacted names
 128.210 +  uint unique = _lrg_map.size();
 128.211 +  for (i = 0; i < unique; i++) {
 128.212 +    uint lrg_id = _lrg_map.live_range_id(i);
 128.213 +    _lrg_map.map(i, _lrg_map.uf_live_range_id(lrg_id));
 128.214 +  }
 128.215 +
 128.216 +  // Reset the Union-Find mapping
 128.217 +  _lrg_map.reset_uf_map(j);
 128.218 +}
 128.219 +
 128.220  void PhaseChaitin::Register_Allocate() {
 128.221  
 128.222    // Above the OLD FP (and in registers) are the incoming arguments.  Stack
 128.223 @@ -231,14 +367,12 @@
 128.224    // all copy-related live ranges low and then using the max copy-related
 128.225    // live range as a cut-off for LIVE and the IFG.  In other words, I can
 128.226    // build a subset of LIVE and IFG just for copies.
 128.227 -  PhaseLive live(_cfg,_names,&live_arena);
 128.228 +  PhaseLive live(_cfg, _lrg_map.names(), &live_arena);
 128.229  
 128.230    // Need IFG for coalescing and coloring
 128.231 -  PhaseIFG ifg( &live_arena );
 128.232 +  PhaseIFG ifg(&live_arena);
 128.233    _ifg = &ifg;
 128.234  
 128.235 -  if (C->unique() > _names.Size())  _names.extend(C->unique()-1, 0);
 128.236 -
 128.237    // Come out of SSA world to the Named world.  Assign (virtual) registers to
 128.238    // Nodes.  Use the same register for all inputs and the output of PhiNodes
 128.239    // - effectively ending SSA form.  This requires either coalescing live
 128.240 @@ -258,9 +392,9 @@
 128.241      _live = NULL;                 // Mark live as being not available
 128.242      rm.reset_to_mark();           // Reclaim working storage
 128.243      IndexSet::reset_memory(C, &live_arena);
 128.244 -    ifg.init(_maxlrg);            // Empty IFG
 128.245 +    ifg.init(_lrg_map.max_lrg_id()); // Empty IFG
 128.246      gather_lrg_masks( false );    // Collect LRG masks
 128.247 -    live.compute( _maxlrg );      // Compute liveness
 128.248 +    live.compute(_lrg_map.max_lrg_id()); // Compute liveness
 128.249      _live = &live;                // Mark LIVE as being available
 128.250    }
 128.251  
 128.252 @@ -270,19 +404,19 @@
 128.253    // across any GC point where the derived value is live.  So this code looks
 128.254    // at all the GC points, and "stretches" the live range of any base pointer
 128.255    // to the GC point.
 128.256 -  if( stretch_base_pointer_live_ranges(&live_arena) ) {
 128.257 -    NOT_PRODUCT( Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler); )
 128.258 +  if (stretch_base_pointer_live_ranges(&live_arena)) {
 128.259 +    NOT_PRODUCT(Compile::TracePhase t3("computeLive (sbplr)", &_t_computeLive, TimeCompiler);)
 128.260      // Since some live range stretched, I need to recompute live
 128.261      _live = NULL;
 128.262      rm.reset_to_mark();         // Reclaim working storage
 128.263      IndexSet::reset_memory(C, &live_arena);
 128.264 -    ifg.init(_maxlrg);
 128.265 -    gather_lrg_masks( false );
 128.266 -    live.compute( _maxlrg );
 128.267 +    ifg.init(_lrg_map.max_lrg_id());
 128.268 +    gather_lrg_masks(false);
 128.269 +    live.compute(_lrg_map.max_lrg_id());
 128.270      _live = &live;
 128.271    }
 128.272    // Create the interference graph using virtual copies
 128.273 -  build_ifg_virtual( );  // Include stack slots this time
 128.274 +  build_ifg_virtual();  // Include stack slots this time
 128.275  
 128.276    // Aggressive (but pessimistic) copy coalescing.
 128.277    // This pass works on virtual copies.  Any virtual copies which are not
 128.278 @@ -296,8 +430,8 @@
 128.279      // given Node and search them for an instance, i.e., time O(#MaxLRG)).
 128.280      _ifg->SquareUp();
 128.281  
 128.282 -    PhaseAggressiveCoalesce coalesce( *this );
 128.283 -    coalesce.coalesce_driver( );
 128.284 +    PhaseAggressiveCoalesce coalesce(*this);
 128.285 +    coalesce.coalesce_driver();
 128.286      // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
 128.287      // not match the Phi itself, insert a copy.
 128.288      coalesce.insert_copies(_matcher);
 128.289 @@ -310,28 +444,36 @@
 128.290      _live = NULL;
 128.291      rm.reset_to_mark();           // Reclaim working storage
 128.292      IndexSet::reset_memory(C, &live_arena);
 128.293 -    ifg.init(_maxlrg);
 128.294 +    ifg.init(_lrg_map.max_lrg_id());
 128.295      gather_lrg_masks( true );
 128.296 -    live.compute( _maxlrg );
 128.297 +    live.compute(_lrg_map.max_lrg_id());
 128.298      _live = &live;
 128.299    }
 128.300  
 128.301    // Build physical interference graph
 128.302    uint must_spill = 0;
 128.303 -  must_spill = build_ifg_physical( &live_arena );
 128.304 +  must_spill = build_ifg_physical(&live_arena);
 128.305    // If we have a guaranteed spill, might as well spill now
 128.306 -  if( must_spill ) {
 128.307 -    if( !_maxlrg ) return;
 128.308 +  if (must_spill) {
 128.309 +    if(!_lrg_map.max_lrg_id()) {
 128.310 +      return;
 128.311 +    }
 128.312      // Bail out if unique gets too large (ie - unique > MaxNodeLimit)
 128.313      C->check_node_count(10*must_spill, "out of nodes before split");
 128.314 -    if (C->failing())  return;
 128.315 -    _maxlrg = Split(_maxlrg, &split_arena);  // Split spilling LRG everywhere
 128.316 +    if (C->failing()) {
 128.317 +      return;
 128.318 +    }
 128.319 +
 128.320 +    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
 128.321 +    _lrg_map.set_max_lrg_id(new_max_lrg_id);
 128.322      // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
 128.323      // or we failed to split
 128.324      C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after physical split");
 128.325 -    if (C->failing())  return;
 128.326 +    if (C->failing()) {
 128.327 +      return;
 128.328 +    }
 128.329  
 128.330 -    NOT_PRODUCT( C->verify_graph_edges(); )
 128.331 +    NOT_PRODUCT(C->verify_graph_edges();)
 128.332  
 128.333      compact();                  // Compact LRGs; return new lower max lrg
 128.334  
 128.335 @@ -340,23 +482,23 @@
 128.336        _live = NULL;
 128.337        rm.reset_to_mark();         // Reclaim working storage
 128.338        IndexSet::reset_memory(C, &live_arena);
 128.339 -      ifg.init(_maxlrg);          // Build a new interference graph
 128.340 +      ifg.init(_lrg_map.max_lrg_id()); // Build a new interference graph
 128.341        gather_lrg_masks( true );   // Collect intersect mask
 128.342 -      live.compute( _maxlrg );    // Compute LIVE
 128.343 +      live.compute(_lrg_map.max_lrg_id()); // Compute LIVE
 128.344        _live = &live;
 128.345      }
 128.346 -    build_ifg_physical( &live_arena );
 128.347 +    build_ifg_physical(&live_arena);
 128.348      _ifg->SquareUp();
 128.349      _ifg->Compute_Effective_Degree();
 128.350      // Only do conservative coalescing if requested
 128.351 -    if( OptoCoalesce ) {
 128.352 +    if (OptoCoalesce) {
 128.353        // Conservative (and pessimistic) copy coalescing of those spills
 128.354 -      PhaseConservativeCoalesce coalesce( *this );
 128.355 +      PhaseConservativeCoalesce coalesce(*this);
 128.356        // If max live ranges greater than cutoff, don't color the stack.
 128.357        // This cutoff can be larger than below since it is only done once.
 128.358 -      coalesce.coalesce_driver( );
 128.359 +      coalesce.coalesce_driver();
 128.360      }
 128.361 -    compress_uf_map_for_nodes();
 128.362 +    _lrg_map.compress_uf_map_for_nodes();
 128.363  
 128.364  #ifdef ASSERT
 128.365      verify(&live_arena, true);
 128.366 @@ -390,13 +532,18 @@
 128.367        }
 128.368      }
 128.369  
 128.370 -    if( !_maxlrg ) return;
 128.371 -    _maxlrg = Split(_maxlrg, &split_arena);  // Split spilling LRG everywhere
 128.372 +    if (!_lrg_map.max_lrg_id()) {
 128.373 +      return;
 128.374 +    }
 128.375 +    uint new_max_lrg_id = Split(_lrg_map.max_lrg_id(), &split_arena);  // Split spilling LRG everywhere
 128.376 +    _lrg_map.set_max_lrg_id(new_max_lrg_id);
 128.377      // Bail out if unique gets too large (ie - unique > MaxNodeLimit - 2*NodeLimitFudgeFactor)
 128.378 -    C->check_node_count(2*NodeLimitFudgeFactor, "out of nodes after split");
 128.379 -    if (C->failing())  return;
 128.380 +    C->check_node_count(2 * NodeLimitFudgeFactor, "out of nodes after split");
 128.381 +    if (C->failing()) {
 128.382 +      return;
 128.383 +    }
 128.384  
 128.385 -    compact();                  // Compact LRGs; return new lower max lrg
 128.386 +    compact(); // Compact LRGs; return new lower max lrg
 128.387  
 128.388      // Nuke the live-ness and interference graph and LiveRanGe info
 128.389      {
 128.390 @@ -404,26 +551,26 @@
 128.391        _live = NULL;
 128.392        rm.reset_to_mark();         // Reclaim working storage
 128.393        IndexSet::reset_memory(C, &live_arena);
 128.394 -      ifg.init(_maxlrg);
 128.395 +      ifg.init(_lrg_map.max_lrg_id());
 128.396  
 128.397        // Create LiveRanGe array.
 128.398        // Intersect register masks for all USEs and DEFs
 128.399 -      gather_lrg_masks( true );
 128.400 -      live.compute( _maxlrg );
 128.401 +      gather_lrg_masks(true);
 128.402 +      live.compute(_lrg_map.max_lrg_id());
 128.403        _live = &live;
 128.404      }
 128.405 -    must_spill = build_ifg_physical( &live_arena );
 128.406 +    must_spill = build_ifg_physical(&live_arena);
 128.407      _ifg->SquareUp();
 128.408      _ifg->Compute_Effective_Degree();
 128.409  
 128.410      // Only do conservative coalescing if requested
 128.411 -    if( OptoCoalesce ) {
 128.412 +    if (OptoCoalesce) {
 128.413        // Conservative (and pessimistic) copy coalescing
 128.414 -      PhaseConservativeCoalesce coalesce( *this );
 128.415 +      PhaseConservativeCoalesce coalesce(*this);
 128.416        // Check for few live ranges determines how aggressive coalesce is.
 128.417 -      coalesce.coalesce_driver( );
 128.418 +      coalesce.coalesce_driver();
 128.419      }
 128.420 -    compress_uf_map_for_nodes();
 128.421 +    _lrg_map.compress_uf_map_for_nodes();
 128.422  #ifdef ASSERT
 128.423      verify(&live_arena, true);
 128.424  #endif
 128.425 @@ -435,7 +582,7 @@
 128.426  
 128.427      // Select colors by re-inserting LRGs back into the IFG in reverse order.
 128.428      // Return whether or not something spills.
 128.429 -    spills = Select( );
 128.430 +    spills = Select();
 128.431    }
 128.432  
 128.433    // Count number of Simplify-Select trips per coloring success.
 128.434 @@ -452,9 +599,12 @@
 128.435  
 128.436    // max_reg is past the largest *register* used.
 128.437    // Convert that to a frame_slot number.
 128.438 -  if( _max_reg <= _matcher._new_SP )
 128.439 +  if (_max_reg <= _matcher._new_SP) {
 128.440      _framesize = C->out_preserve_stack_slots();
 128.441 -  else _framesize = _max_reg -_matcher._new_SP;
 128.442 +  }
 128.443 +  else {
 128.444 +    _framesize = _max_reg -_matcher._new_SP;
 128.445 +  }
 128.446    assert((int)(_matcher._new_SP+_framesize) >= (int)_matcher._out_arg_limit, "framesize must be large enough");
 128.447  
 128.448    // This frame must preserve the required fp alignment
 128.449 @@ -462,8 +612,9 @@
 128.450    assert( _framesize >= 0 && _framesize <= 1000000, "sanity check" );
 128.451  #ifndef PRODUCT
 128.452    _total_framesize += _framesize;
 128.453 -  if( (int)_framesize > _max_framesize )
 128.454 +  if ((int)_framesize > _max_framesize) {
 128.455      _max_framesize = _framesize;
 128.456 +  }
 128.457  #endif
 128.458  
 128.459    // Convert CISC spills
 128.460 @@ -475,15 +626,17 @@
 128.461      log->elem("regalloc attempts='%d' success='%d'", _trip_cnt, !C->failing());
 128.462    }
 128.463  
 128.464 -  if (C->failing())  return;
 128.465 +  if (C->failing()) {
 128.466 +    return;
 128.467 +  }
 128.468  
 128.469 -  NOT_PRODUCT( C->verify_graph_edges(); )
 128.470 +  NOT_PRODUCT(C->verify_graph_edges();)
 128.471  
 128.472    // Move important info out of the live_arena to longer lasting storage.
 128.473 -  alloc_node_regs(_names.Size());
 128.474 -  for (uint i=0; i < _names.Size(); i++) {
 128.475 -    if (_names[i]) {           // Live range associated with Node?
 128.476 -      LRG &lrg = lrgs(_names[i]);
 128.477 +  alloc_node_regs(_lrg_map.size());
 128.478 +  for (uint i=0; i < _lrg_map.size(); i++) {
 128.479 +    if (_lrg_map.live_range_id(i)) { // Live range associated with Node?
 128.480 +      LRG &lrg = lrgs(_lrg_map.live_range_id(i));
 128.481        if (!lrg.alive()) {
 128.482          set_bad(i);
 128.483        } else if (lrg.num_regs() == 1) {
 128.484 @@ -537,11 +690,11 @@
 128.485        Node *n = b->_nodes[j];
 128.486        // Pre-color to the zero live range, or pick virtual register
 128.487        const RegMask &rm = n->out_RegMask();
 128.488 -      _names.map( n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0 );
 128.489 +      _lrg_map.map(n->_idx, rm.is_NotEmpty() ? lr_counter++ : 0);
 128.490      }
 128.491    }
 128.492    // Reset the Union-Find mapping to be identity
 128.493 -  reset_uf_map(lr_counter);
 128.494 +  _lrg_map.reset_uf_map(lr_counter);
 128.495  }
 128.496  
 128.497  
 128.498 @@ -551,7 +704,7 @@
 128.499  void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
 128.500  
 128.501    // Nail down the frame pointer live range
 128.502 -  uint fp_lrg = n2lidx(_cfg._root->in(1)->in(TypeFunc::FramePtr));
 128.503 +  uint fp_lrg = _lrg_map.live_range_id(_cfg._root->in(1)->in(TypeFunc::FramePtr));
 128.504    lrgs(fp_lrg)._cost += 1e12;   // Cost is infinite
 128.505  
 128.506    // For all blocks
 128.507 @@ -566,14 +719,14 @@
 128.508        uint idx = n->is_Copy();
 128.509  
 128.510        // Get virtual register number, same as LiveRanGe index
 128.511 -      uint vreg = n2lidx(n);
 128.512 +      uint vreg = _lrg_map.live_range_id(n);
 128.513        LRG &lrg = lrgs(vreg);
 128.514        if( vreg ) {              // No vreg means un-allocable (e.g. memory)
 128.515  
 128.516          // Collect has-copy bit
 128.517          if( idx ) {
 128.518            lrg._has_copy = 1;
 128.519 -          uint clidx = n2lidx(n->in(idx));
 128.520 +          uint clidx = _lrg_map.live_range_id(n->in(idx));
 128.521            LRG &copy_src = lrgs(clidx);
 128.522            copy_src._has_copy = 1;
 128.523          }
 128.524 @@ -773,8 +926,10 @@
 128.525        }
 128.526        // Prepare register mask for each input
 128.527        for( uint k = input_edge_start; k < cnt; k++ ) {
 128.528 -        uint vreg = n2lidx(n->in(k));
 128.529 -        if( !vreg ) continue;
 128.530 +        uint vreg = _lrg_map.live_range_id(n->in(k));
 128.531 +        if (!vreg) {
 128.532 +          continue;
 128.533 +        }
 128.534  
 128.535          // If this instruction is CISC Spillable, add the flags
 128.536          // bit to its appropriate input
 128.537 @@ -857,7 +1012,7 @@
 128.538    } // end for all blocks
 128.539  
 128.540    // Final per-liverange setup
 128.541 -  for (uint i2=0; i2<_maxlrg; i2++) {
 128.542 +  for (uint i2 = 0; i2 < _lrg_map.max_lrg_id(); i2++) {
 128.543      LRG &lrg = lrgs(i2);
 128.544      assert(!lrg._is_vector || !lrg._fat_proj, "sanity");
 128.545      if (lrg.num_regs() > 1 && !lrg._fat_proj) {
 128.546 @@ -879,7 +1034,7 @@
 128.547  // The bit is checked in Simplify.
 128.548  void PhaseChaitin::set_was_low() {
 128.549  #ifdef ASSERT
 128.550 -  for( uint i = 1; i < _maxlrg; i++ ) {
 128.551 +  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
 128.552      int size = lrgs(i).num_regs();
 128.553      uint old_was_lo = lrgs(i)._was_lo;
 128.554      lrgs(i)._was_lo = 0;
 128.555 @@ -913,7 +1068,7 @@
 128.556  // Compute cost/area ratio, in case we spill.  Build the lo-degree list.
 128.557  void PhaseChaitin::cache_lrg_info( ) {
 128.558  
 128.559 -  for( uint i = 1; i < _maxlrg; i++ ) {
 128.560 +  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
 128.561      LRG &lrg = lrgs(i);
 128.562  
 128.563      // Check for being of low degree: means we can be trivially colored.
 128.564 @@ -949,10 +1104,10 @@
 128.565  
 128.566    // Warm up the lo-degree no-copy list
 128.567    int lo_no_copy = 0;
 128.568 -  for( uint i = 1; i < _maxlrg; i++ ) {
 128.569 -    if( (lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
 128.570 +  for (uint i = 1; i < _lrg_map.max_lrg_id(); i++) {
 128.571 +    if ((lrgs(i).lo_degree() && !lrgs(i)._has_copy) ||
 128.572          !lrgs(i).alive() ||
 128.573 -        lrgs(i)._must_spill ) {
 128.574 +        lrgs(i)._must_spill) {
 128.575        lrgs(i)._next = lo_no_copy;
 128.576        lo_no_copy = i;
 128.577      }
 128.578 @@ -1163,7 +1318,7 @@
 128.579  OptoReg::Name PhaseChaitin::bias_color( LRG &lrg, int chunk ) {
 128.580  
 128.581    // Check for "at_risk" LRG's
 128.582 -  uint risk_lrg = Find(lrg._risk_bias);
 128.583 +  uint risk_lrg = _lrg_map.find(lrg._risk_bias);
 128.584    if( risk_lrg != 0 ) {
 128.585      // Walk the colored neighbors of the "at_risk" candidate
 128.586      // Choose a color which is both legal and already taken by a neighbor
 128.587 @@ -1179,7 +1334,7 @@
 128.588      }
 128.589    }
 128.590  
 128.591 -  uint copy_lrg = Find(lrg._copy_bias);
 128.592 +  uint copy_lrg = _lrg_map.find(lrg._copy_bias);
 128.593    if( copy_lrg != 0 ) {
 128.594      // If he has a color,
 128.595      if( !(*(_ifg->_yanked))[copy_lrg] ) {
 128.596 @@ -1423,10 +1578,10 @@
 128.597  void PhaseChaitin::copy_was_spilled( Node *src, Node *dst ) {
 128.598    if( _spilled_once.test(src->_idx) ) {
 128.599      _spilled_once.set(dst->_idx);
 128.600 -    lrgs(Find(dst))._was_spilled1 = 1;
 128.601 +    lrgs(_lrg_map.find(dst))._was_spilled1 = 1;
 128.602      if( _spilled_twice.test(src->_idx) ) {
 128.603        _spilled_twice.set(dst->_idx);
 128.604 -      lrgs(Find(dst))._was_spilled2 = 1;
 128.605 +      lrgs(_lrg_map.find(dst))._was_spilled2 = 1;
 128.606      }
 128.607    }
 128.608  }
 128.609 @@ -1471,7 +1626,7 @@
 128.610          MachNode *mach = n->as_Mach();
 128.611          inp = mach->operand_index(inp);
 128.612          Node *src = n->in(inp);   // Value to load or store
 128.613 -        LRG &lrg_cisc = lrgs( Find_const(src) );
 128.614 +        LRG &lrg_cisc = lrgs(_lrg_map.find_const(src));
 128.615          OptoReg::Name src_reg = lrg_cisc.reg();
 128.616          // Doubles record the HIGH register of an adjacent pair.
 128.617          src_reg = OptoReg::add(src_reg,1-lrg_cisc.num_regs());
 128.618 @@ -1554,9 +1709,9 @@
 128.619        Block *startb = _cfg._bbs[C->top()->_idx];
 128.620        startb->_nodes.insert(startb->find_node(C->top()), base );
 128.621        _cfg._bbs.map( base->_idx, startb );
 128.622 -      assert (n2lidx(base) == 0, "should not have LRG yet");
 128.623 +      assert(_lrg_map.live_range_id(base) == 0, "should not have LRG yet");
 128.624      }
 128.625 -    if (n2lidx(base) == 0) {
 128.626 +    if (_lrg_map.live_range_id(base) == 0) {
 128.627        new_lrg(base, maxlrg++);
 128.628      }
 128.629      assert(base->in(0) == _cfg._root &&
 128.630 @@ -1566,7 +1721,7 @@
 128.631    }
 128.632  
 128.633    // Check for AddP-related opcodes
 128.634 -  if( !derived->is_Phi() ) {
 128.635 +  if (!derived->is_Phi()) {
 128.636      assert(derived->as_Mach()->ideal_Opcode() == Op_AddP, err_msg_res("but is: %s", derived->Name()));
 128.637      Node *base = derived->in(AddPNode::Base);
 128.638      derived_base_map[derived->_idx] = base;
 128.639 @@ -1629,9 +1784,9 @@
 128.640  // base pointer that is live across the Safepoint for oopmap building.  The
 128.641  // edge pairs get added in after sfpt->jvmtail()->oopoff(), but are in the
 128.642  // required edge set.
 128.643 -bool PhaseChaitin::stretch_base_pointer_live_ranges( ResourceArea *a ) {
 128.644 +bool PhaseChaitin::stretch_base_pointer_live_ranges(ResourceArea *a) {
 128.645    int must_recompute_live = false;
 128.646 -  uint maxlrg = _maxlrg;
 128.647 +  uint maxlrg = _lrg_map.max_lrg_id();
 128.648    Node **derived_base_map = (Node**)a->Amalloc(sizeof(Node*)*C->unique());
 128.649    memset( derived_base_map, 0, sizeof(Node*)*C->unique() );
 128.650  
 128.651 @@ -1669,15 +1824,18 @@
 128.652        }
 128.653  
 128.654        // Get value being defined
 128.655 -      uint lidx = n2lidx(n);
 128.656 -      if( lidx && lidx < _maxlrg /* Ignore the occasional brand-new live range */) {
 128.657 +      uint lidx = _lrg_map.live_range_id(n);
 128.658 +      // Ignore the occasional brand-new live range
 128.659 +      if (lidx && lidx < _lrg_map.max_lrg_id()) {
 128.660          // Remove from live-out set
 128.661          liveout.remove(lidx);
 128.662  
 128.663          // Copies do not define a new value and so do not interfere.
 128.664          // Remove the copies source from the liveout set before interfering.
 128.665          uint idx = n->is_Copy();
 128.666 -        if( idx ) liveout.remove( n2lidx(n->in(idx)) );
 128.667 +        if (idx) {
 128.668 +          liveout.remove(_lrg_map.live_range_id(n->in(idx)));
 128.669 +        }
 128.670        }
 128.671  
 128.672        // Found a safepoint?
 128.673 @@ -1695,21 +1853,21 @@
 128.674                    derived->bottom_type()->make_ptr()->is_ptr()->_offset == 0, "sanity");
 128.675            // If its an OOP with a non-zero offset, then it is derived.
 128.676            if( tj && tj->_offset != 0 && tj->isa_oop_ptr() ) {
 128.677 -            Node *base = find_base_for_derived( derived_base_map, derived, maxlrg );
 128.678 -            assert( base->_idx < _names.Size(), "" );
 128.679 +            Node *base = find_base_for_derived(derived_base_map, derived, maxlrg);
 128.680 +            assert(base->_idx < _lrg_map.size(), "");
 128.681              // Add reaching DEFs of derived pointer and base pointer as a
 128.682              // pair of inputs
 128.683 -            n->add_req( derived );
 128.684 -            n->add_req( base );
 128.685 +            n->add_req(derived);
 128.686 +            n->add_req(base);
 128.687  
 128.688              // See if the base pointer is already live to this point.
 128.689              // Since I'm working on the SSA form, live-ness amounts to
 128.690              // reaching def's.  So if I find the base's live range then
 128.691              // I know the base's def reaches here.
 128.692 -            if( (n2lidx(base) >= _maxlrg ||// (Brand new base (hence not live) or
 128.693 -                 !liveout.member( n2lidx(base) ) ) && // not live) AND
 128.694 -                 (n2lidx(base) > 0)                && // not a constant
 128.695 -                 _cfg._bbs[base->_idx] != b ) {     //  base not def'd in blk)
 128.696 +            if ((_lrg_map.live_range_id(base) >= _lrg_map.max_lrg_id() || // (Brand new base (hence not live) or
 128.697 +                 !liveout.member(_lrg_map.live_range_id(base))) && // not live) AND
 128.698 +                 (_lrg_map.live_range_id(base) > 0) && // not a constant
 128.699 +                 _cfg._bbs[base->_idx] != b) { // base not def'd in blk)
 128.700                // Base pointer is not currently live.  Since I stretched
 128.701                // the base pointer to here and it crosses basic-block
 128.702                // boundaries, the global live info is now incorrect.
 128.703 @@ -1721,11 +1879,12 @@
 128.704        } // End of if found a GC point
 128.705  
 128.706        // Make all inputs live
 128.707 -      if( !n->is_Phi() ) {      // Phi function uses come from prior block
 128.708 -        for( uint k = 1; k < n->req(); k++ ) {
 128.709 -          uint lidx = n2lidx(n->in(k));
 128.710 -          if( lidx < _maxlrg )
 128.711 -            liveout.insert( lidx );
 128.712 +      if (!n->is_Phi()) {      // Phi function uses come from prior block
 128.713 +        for (uint k = 1; k < n->req(); k++) {
 128.714 +          uint lidx = _lrg_map.live_range_id(n->in(k));
 128.715 +          if (lidx < _lrg_map.max_lrg_id()) {
 128.716 +            liveout.insert(lidx);
 128.717 +          }
 128.718          }
 128.719        }
 128.720  
 128.721 @@ -1733,11 +1892,12 @@
 128.722      liveout.clear();  // Free the memory used by liveout.
 128.723  
 128.724    } // End of forall blocks
 128.725 -  _maxlrg = maxlrg;
 128.726 +  _lrg_map.set_max_lrg_id(maxlrg);
 128.727  
 128.728    // If I created a new live range I need to recompute live
 128.729 -  if( maxlrg != _ifg->_maxlrg )
 128.730 +  if (maxlrg != _ifg->_maxlrg) {
 128.731      must_recompute_live = true;
 128.732 +  }
 128.733  
 128.734    return must_recompute_live != 0;
 128.735  }
 128.736 @@ -1745,16 +1905,17 @@
 128.737  
 128.738  //------------------------------add_reference----------------------------------
 128.739  // Extend the node to LRG mapping
 128.740 -void PhaseChaitin::add_reference( const Node *node, const Node *old_node ) {
 128.741 -  _names.extend( node->_idx, n2lidx(old_node) );
 128.742 +
 128.743 +void PhaseChaitin::add_reference(const Node *node, const Node *old_node) {
 128.744 +  _lrg_map.extend(node->_idx, _lrg_map.live_range_id(old_node));
 128.745  }
 128.746  
 128.747  //------------------------------dump-------------------------------------------
 128.748  #ifndef PRODUCT
 128.749 -void PhaseChaitin::dump( const Node *n ) const {
 128.750 -  uint r = (n->_idx < _names.Size() ) ? Find_const(n) : 0;
 128.751 +void PhaseChaitin::dump(const Node *n) const {
 128.752 +  uint r = (n->_idx < _lrg_map.size()) ? _lrg_map.find_const(n) : 0;
 128.753    tty->print("L%d",r);
 128.754 -  if( r && n->Opcode() != Op_Phi ) {
 128.755 +  if (r && n->Opcode() != Op_Phi) {
 128.756      if( _node_regs ) {          // Got a post-allocation copy of allocation?
 128.757        tty->print("[");
 128.758        OptoReg::Name second = get_reg_second(n);
 128.759 @@ -1775,11 +1936,13 @@
 128.760    tty->print("/N%d\t",n->_idx);
 128.761    tty->print("%s === ", n->Name());
 128.762    uint k;
 128.763 -  for( k = 0; k < n->req(); k++) {
 128.764 +  for (k = 0; k < n->req(); k++) {
 128.765      Node *m = n->in(k);
 128.766 -    if( !m ) tty->print("_ ");
 128.767 +    if (!m) {
 128.768 +      tty->print("_ ");
 128.769 +    }
 128.770      else {
 128.771 -      uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0;
 128.772 +      uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
 128.773        tty->print("L%d",r);
 128.774        // Data MultiNode's can have projections with no real registers.
 128.775        // Don't die while dumping them.
 128.776 @@ -1810,8 +1973,10 @@
 128.777    if( k < n->len() && n->in(k) ) tty->print("| ");
 128.778    for( ; k < n->len(); k++ ) {
 128.779      Node *m = n->in(k);
 128.780 -    if( !m ) break;
 128.781 -    uint r = (m->_idx < _names.Size() ) ? Find_const(m) : 0;
 128.782 +    if(!m) {
 128.783 +      break;
 128.784 +    }
 128.785 +    uint r = (m->_idx < _lrg_map.size()) ? _lrg_map.find_const(m) : 0;
 128.786      tty->print("L%d",r);
 128.787      tty->print("/N%d ",m->_idx);
 128.788    }
 128.789 @@ -1839,7 +2004,7 @@
 128.790      tty->print("{");
 128.791      uint i;
 128.792      while ((i = elements.next()) != 0) {
 128.793 -      tty->print("L%d ", Find_const(i));
 128.794 +      tty->print("L%d ", _lrg_map.find_const(i));
 128.795      }
 128.796      tty->print_cr("}");
 128.797    }
 128.798 @@ -1863,10 +2028,14 @@
 128.799  
 128.800    // Dump LRG array
 128.801    tty->print("--- Live RanGe Array ---\n");
 128.802 -  for(uint i2 = 1; i2 < _maxlrg; i2++ ) {
 128.803 +  for (uint i2 = 1; i2 < _lrg_map.max_lrg_id(); i2++) {
 128.804      tty->print("L%d: ",i2);
 128.805 -    if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( );
 128.806 -    else tty->print_cr("new LRG");
 128.807 +    if (i2 < _ifg->_maxlrg) {
 128.808 +      lrgs(i2).dump();
 128.809 +    }
 128.810 +    else {
 128.811 +      tty->print_cr("new LRG");
 128.812 +    }
 128.813    }
 128.814    tty->print_cr("");
 128.815  
 128.816 @@ -1939,7 +2108,7 @@
 128.817      // Post allocation, use direct mappings, no LRG info available
 128.818      print_reg( get_reg_first(n), this, buf );
 128.819    } else {
 128.820 -    uint lidx = Find_const(n); // Grab LRG number
 128.821 +    uint lidx = _lrg_map.find_const(n); // Grab LRG number
 128.822      if( !_ifg ) {
 128.823        sprintf(buf,"L%d",lidx);  // No register binding yet
 128.824      } else if( !lidx ) {        // Special, not allocated value
 128.825 @@ -1968,7 +2137,7 @@
 128.826    if( WizardMode && (PrintCompilation || PrintOpto) ) {
 128.827      // Display which live ranges need to be split and the allocator's state
 128.828      tty->print_cr("Graph-Coloring Iteration %d will split the following live ranges", _trip_cnt);
 128.829 -    for( uint bidx = 1; bidx < _maxlrg; bidx++ ) {
 128.830 +    for (uint bidx = 1; bidx < _lrg_map.max_lrg_id(); bidx++) {
 128.831        if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
 128.832          tty->print("L%d: ", bidx);
 128.833          lrgs(bidx).dump();
 128.834 @@ -2099,14 +2268,17 @@
 128.835  void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
 128.836    tty->print_cr("---dump of L%d---",lidx);
 128.837  
 128.838 -  if( _ifg ) {
 128.839 -    if( lidx >= _maxlrg ) {
 128.840 +  if (_ifg) {
 128.841 +    if (lidx >= _lrg_map.max_lrg_id()) {
 128.842        tty->print("Attempt to print live range index beyond max live range.\n");
 128.843        return;
 128.844      }
 128.845      tty->print("L%d: ",lidx);
 128.846 -    if( lidx < _ifg->_maxlrg ) lrgs(lidx).dump( );
 128.847 -    else tty->print_cr("new LRG");
 128.848 +    if (lidx < _ifg->_maxlrg) {
 128.849 +      lrgs(lidx).dump();
 128.850 +    } else {
 128.851 +      tty->print_cr("new LRG");
 128.852 +    }
 128.853    }
 128.854    if( _ifg && lidx < _ifg->_maxlrg) {
 128.855      tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
 128.856 @@ -2121,8 +2293,8 @@
 128.857      // For all instructions
 128.858      for( uint j = 0; j < b->_nodes.size(); j++ ) {
 128.859        Node *n = b->_nodes[j];
 128.860 -      if( Find_const(n) == lidx ) {
 128.861 -        if( !dump_once++ ) {
 128.862 +      if (_lrg_map.find_const(n) == lidx) {
 128.863 +        if (!dump_once++) {
 128.864            tty->cr();
 128.865            b->dump_head( &_cfg._bbs );
 128.866          }
 128.867 @@ -2133,11 +2305,13 @@
 128.868          uint cnt = n->req();
 128.869          for( uint k = 1; k < cnt; k++ ) {
 128.870            Node *m = n->in(k);
 128.871 -          if (!m)  continue;  // be robust in the dumper
 128.872 -          if( Find_const(m) == lidx ) {
 128.873 -            if( !dump_once++ ) {
 128.874 +          if (!m)  {
 128.875 +            continue;  // be robust in the dumper
 128.876 +          }
 128.877 +          if (_lrg_map.find_const(m) == lidx) {
 128.878 +            if (!dump_once++) {
 128.879                tty->cr();
 128.880 -              b->dump_head( &_cfg._bbs );
 128.881 +              b->dump_head(&_cfg._bbs);
 128.882              }
 128.883              dump(n);
 128.884            }
   129.1 --- a/src/share/vm/opto/chaitin.hpp	Wed Apr 24 20:55:28 2013 -0400
   129.2 +++ b/src/share/vm/opto/chaitin.hpp	Wed Apr 24 21:11:02 2013 -0400
   129.3 @@ -265,18 +265,118 @@
   129.4    int effective_degree( uint lidx ) const;
   129.5  };
   129.6  
   129.7 -// TEMPORARILY REPLACED WITH COMMAND LINE FLAG
   129.8 +// The LiveRangeMap class is responsible for storing node to live range id mapping.
   129.9 +// Each node is mapped to a live range id (a virtual register). Nodes that are
  129.10 +// not considered for register allocation are given live range id 0.
  129.11 +class LiveRangeMap VALUE_OBJ_CLASS_SPEC {
  129.12  
  129.13 -//// !!!!! Magic Constants need to move into ad file
  129.14 -#ifdef SPARC
  129.15 -//#define FLOAT_PRESSURE 30  /*     SFLT_REG_mask.Size() - 1 */
  129.16 -//#define INT_PRESSURE   23  /* NOTEMP_I_REG_mask.Size() - 1 */
  129.17 -#define FLOAT_INCREMENT(regs) regs
  129.18 -#else
  129.19 -//#define FLOAT_PRESSURE 6
  129.20 -//#define INT_PRESSURE   6
  129.21 -#define FLOAT_INCREMENT(regs) 1
  129.22 -#endif
  129.23 +private:
  129.24 +
  129.25 +  uint _max_lrg_id;
  129.26 +
  129.27 +  // Union-find map.  Declared as a short for speed.
  129.28 +  // Indexed by live-range number, it returns the compacted live-range number
  129.29 +  LRG_List _uf_map;
  129.30 +
  129.31 +  // Map from Nodes to live ranges
  129.32 +  LRG_List _names;
  129.33 +
  129.34 +  // Straight out of Tarjan's union-find algorithm
  129.35 +  uint find_compress(const Node *node) {
  129.36 +    uint lrg_id = find_compress(_names[node->_idx]);
  129.37 +    _names.map(node->_idx, lrg_id);
  129.38 +    return lrg_id;
  129.39 +  }
  129.40 +
  129.41 +  uint find_compress(uint lrg);
  129.42 +
  129.43 +public:
  129.44 +
  129.45 +  const LRG_List& names() {
  129.46 +    return _names;
  129.47 +  }
  129.48 +
  129.49 +  uint max_lrg_id() const {
  129.50 +    return _max_lrg_id;
  129.51 +  }
  129.52 +
  129.53 +  void set_max_lrg_id(uint max_lrg_id) {
  129.54 +    _max_lrg_id = max_lrg_id;
  129.55 +  }
  129.56 +
  129.57 +  uint size() const {
  129.58 +    return _names.Size();
  129.59 +  }
  129.60 +
  129.61 +  uint live_range_id(uint idx) const {
  129.62 +    return _names[idx];
  129.63 +  }
  129.64 +
  129.65 +  uint live_range_id(const Node *node) const {
  129.66 +    return _names[node->_idx];
  129.67 +  }
  129.68 +
  129.69 +  uint uf_live_range_id(uint lrg_id) const {
  129.70 +    return _uf_map[lrg_id];
  129.71 +  }
  129.72 +
  129.73 +  void map(uint idx, uint lrg_id) {
  129.74 +    _names.map(idx, lrg_id);
  129.75 +  }
  129.76 +
  129.77 +  void uf_map(uint dst_lrg_id, uint src_lrg_id) {
  129.78 +    _uf_map.map(dst_lrg_id, src_lrg_id);
  129.79 +  }
  129.80 +
  129.81 +  void extend(uint idx, uint lrg_id) {
  129.82 +    _names.extend(idx, lrg_id);
  129.83 +  }
  129.84 +
  129.85 +  void uf_extend(uint dst_lrg_id, uint src_lrg_id) {
  129.86 +    _uf_map.extend(dst_lrg_id, src_lrg_id);
  129.87 +  }
  129.88 +
  129.89 +  LiveRangeMap(uint unique)
  129.90 +  : _names(unique)
  129.91 +  , _uf_map(unique)
  129.92 +  , _max_lrg_id(0) {}
  129.93 +
  129.94 +  uint find_id( const Node *n ) {
  129.95 +    uint retval = live_range_id(n);
  129.96 +    assert(retval == find(n),"Invalid node to lidx mapping");
  129.97 +    return retval;
  129.98 +  }
  129.99 +
 129.100 +  // Reset the Union-Find map to identity
 129.101 +  void reset_uf_map(uint max_lrg_id);
 129.102 +
 129.103 +  // Make all Nodes map directly to their final live range; no need for
 129.104 +  // the Union-Find mapping after this call.
 129.105 +  void compress_uf_map_for_nodes();
 129.106 +
 129.107 +  uint find(uint lidx) {
 129.108 +    uint uf_lidx = _uf_map[lidx];
 129.109 +    return (uf_lidx == lidx) ? uf_lidx : find_compress(lidx);
 129.110 +  }
 129.111 +
 129.112 +  // Convert a Node into a Live Range Index - a lidx
 129.113 +  uint find(const Node *node) {
 129.114 +    uint lidx = live_range_id(node);
 129.115 +    uint uf_lidx = _uf_map[lidx];
 129.116 +    return (uf_lidx == lidx) ? uf_lidx : find_compress(node);
 129.117 +  }
 129.118 +
 129.119 +  // Like Find above, but no path compress, so bad asymptotic behavior
 129.120 +  uint find_const(uint lrg) const;
 129.121 +
 129.122 +  // Like Find above, but no path compress, so bad asymptotic behavior
 129.123 +  uint find_const(const Node *node) const {
 129.124 +    if(node->_idx >= _names.Size()) {
 129.125 +      return 0; // not mapped, usual for debug dump
 129.126 +    }
 129.127 +    return find_const(_names[node->_idx]);
 129.128 +  }
 129.129 +};
 129.130  
 129.131  //------------------------------Chaitin----------------------------------------
 129.132  // Briggs-Chaitin style allocation, mostly.
 129.133 @@ -286,7 +386,6 @@
 129.134    int _trip_cnt;
 129.135    int _alternate;
 129.136  
 129.137 -  uint _maxlrg;                 // Max live range number
 129.138    LRG &lrgs(uint idx) const { return _ifg->lrgs(idx); }
 129.139    PhaseLive *_live;             // Liveness, used in the interference graph
 129.140    PhaseIFG *_ifg;               // Interference graph (for original chunk)
 129.141 @@ -294,16 +393,6 @@
 129.142    VectorSet _spilled_once;      // Nodes that have been spilled
 129.143    VectorSet _spilled_twice;     // Nodes that have been spilled twice
 129.144  
 129.145 -  LRG_List _names;              // Map from Nodes to Live RanGes
 129.146 -
 129.147 -  // Union-find map.  Declared as a short for speed.
 129.148 -  // Indexed by live-range number, it returns the compacted live-range number
 129.149 -  LRG_List _uf_map;
 129.150 -  // Reset the Union-Find map to identity
 129.151 -  void reset_uf_map( uint maxlrg );
 129.152 -  // Remove the need for the Union-Find mapping
 129.153 -  void compress_uf_map_for_nodes( );
 129.154 -
 129.155    // Combine the Live Range Indices for these 2 Nodes into a single live
 129.156    // range.  Future requests for any Node in either live range will
 129.157    // return the live range index for the combined live range.
 129.158 @@ -322,7 +411,34 @@
 129.159    // Helper functions for Split()
 129.160    uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
 129.161    uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
 129.162 -  int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg );
 129.163 +
 129.164 +  bool clone_projs(Block *b, uint idx, Node *con, Node *copy, LiveRangeMap &lrg_map) {
 129.165 +    bool found_projs = clone_projs_shared(b, idx, con, copy, lrg_map.max_lrg_id());
 129.166 +
 129.167 +    if(found_projs) {
 129.168 +      uint max_lrg_id = lrg_map.max_lrg_id();
 129.169 +      lrg_map.set_max_lrg_id(max_lrg_id + 1);
 129.170 +    }
 129.171 +
 129.172 +    return found_projs;
 129.173 +  }
 129.174 +
 129.175 +  //------------------------------clone_projs------------------------------------
 129.176 +  // After cloning some rematerialized instruction, clone any MachProj's that
 129.177 +  // follow it.  Example: Intel zero is XOR, kills flags.  Sparc FP constants
 129.178 +  // use G3 as an address temp.
 129.179 +  bool clone_projs(Block *b, uint idx, Node *con, Node *copy, uint &max_lrg_id) {
 129.180 +    bool found_projs = clone_projs_shared(b, idx, con, copy, max_lrg_id);
 129.181 +
 129.182 +    if(found_projs) {
 129.183 +      max_lrg_id++;
 129.184 +    }
 129.185 +
 129.186 +    return found_projs;
 129.187 +  }
 129.188 +
 129.189 +  bool clone_projs_shared(Block *b, uint idx, Node *con, Node *copy, uint max_lrg_id);
 129.190 +
 129.191    Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits,
 129.192                              int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
 129.193    // True if lidx is used before any real register is def'd in the block
 129.194 @@ -349,20 +465,11 @@
 129.195    PhaseChaitin( uint unique, PhaseCFG &cfg, Matcher &matcher );
 129.196    ~PhaseChaitin() {}
 129.197  
 129.198 -  // Convert a Node into a Live Range Index - a lidx
 129.199 -  uint Find( const Node *n ) {
 129.200 -    uint lidx = n2lidx(n);
 129.201 -    uint uf_lidx = _uf_map[lidx];
 129.202 -    return (uf_lidx == lidx) ? uf_lidx : Find_compress(n);
 129.203 -  }
 129.204 -  uint Find_const( uint lrg ) const;
 129.205 -  uint Find_const( const Node *n ) const;
 129.206 +  LiveRangeMap _lrg_map;
 129.207  
 129.208    // Do all the real work of allocate
 129.209    void Register_Allocate();
 129.210  
 129.211 -  uint n2lidx( const Node *n ) const { return _names[n->_idx]; }
 129.212 -
 129.213    float high_frequency_lrg() const { return _high_frequency_lrg; }
 129.214  
 129.215  #ifndef PRODUCT
 129.216 @@ -374,18 +481,6 @@
 129.217    // all inputs to a PhiNode, effectively coalescing live ranges.  Insert
 129.218    // copies as needed.
 129.219    void de_ssa();
 129.220 -  uint Find_compress( const Node *n );
 129.221 -  uint Find( uint lidx ) {
 129.222 -    uint uf_lidx = _uf_map[lidx];
 129.223 -    return (uf_lidx == lidx) ? uf_lidx : Find_compress(lidx);
 129.224 -  }
 129.225 -  uint Find_compress( uint lidx );
 129.226 -
 129.227 -  uint Find_id( const Node *n ) {
 129.228 -    uint retval = n2lidx(n);
 129.229 -    assert(retval == Find(n),"Invalid node to lidx mapping");
 129.230 -    return retval;
 129.231 -  }
 129.232  
 129.233    // Add edge between reg and everything in the vector.
 129.234    // Same as _ifg->add_vector(reg,live) EXCEPT use the RegMask
   130.1 --- a/src/share/vm/opto/coalesce.cpp	Wed Apr 24 20:55:28 2013 -0400
   130.2 +++ b/src/share/vm/opto/coalesce.cpp	Wed Apr 24 21:11:02 2013 -0400
   130.3 @@ -35,159 +35,11 @@
   130.4  #include "opto/regmask.hpp"
   130.5  
   130.6  //=============================================================================
   130.7 -//------------------------------reset_uf_map-----------------------------------
   130.8 -void PhaseChaitin::reset_uf_map( uint maxlrg ) {
   130.9 -  _maxlrg = maxlrg;
  130.10 -  // Force the Union-Find mapping to be at least this large
  130.11 -  _uf_map.extend(_maxlrg,0);
  130.12 -  // Initialize it to be the ID mapping.
  130.13 -  for( uint i=0; i<_maxlrg; i++ )
  130.14 -    _uf_map.map(i,i);
  130.15 -}
  130.16 -
  130.17 -//------------------------------compress_uf_map--------------------------------
  130.18 -// Make all Nodes map directly to their final live range; no need for
  130.19 -// the Union-Find mapping after this call.
  130.20 -void PhaseChaitin::compress_uf_map_for_nodes( ) {
  130.21 -  // For all Nodes, compress mapping
  130.22 -  uint unique = _names.Size();
  130.23 -  for( uint i=0; i<unique; i++ ) {
  130.24 -    uint lrg = _names[i];
  130.25 -    uint compressed_lrg = Find(lrg);
  130.26 -    if( lrg != compressed_lrg )
  130.27 -      _names.map(i,compressed_lrg);
  130.28 -  }
  130.29 -}
  130.30 -
  130.31 -//------------------------------Find-------------------------------------------
  130.32 -// Straight out of Tarjan's union-find algorithm
  130.33 -uint PhaseChaitin::Find_compress( uint lrg ) {
  130.34 -  uint cur = lrg;
  130.35 -  uint next = _uf_map[cur];
  130.36 -  while( next != cur ) {        // Scan chain of equivalences
  130.37 -    assert( next < cur, "always union smaller" );
  130.38 -    cur = next;                 // until find a fixed-point
  130.39 -    next = _uf_map[cur];
  130.40 -  }
  130.41 -  // Core of union-find algorithm: update chain of
  130.42 -  // equivalences to be equal to the root.
  130.43 -  while( lrg != next ) {
  130.44 -    uint tmp = _uf_map[lrg];
  130.45 -    _uf_map.map(lrg, next);
  130.46 -    lrg = tmp;
  130.47 -  }
  130.48 -  return lrg;
  130.49 -}
  130.50 -
  130.51 -//------------------------------Find-------------------------------------------
  130.52 -// Straight out of Tarjan's union-find algorithm
  130.53 -uint PhaseChaitin::Find_compress( const Node *n ) {
  130.54 -  uint lrg = Find_compress(_names[n->_idx]);
  130.55 -  _names.map(n->_idx,lrg);
  130.56 -  return lrg;
  130.57 -}
  130.58 -
  130.59 -//------------------------------Find_const-------------------------------------
  130.60 -// Like Find above, but no path compress, so bad asymptotic behavior
  130.61 -uint PhaseChaitin::Find_const( uint lrg ) const {
  130.62 -  if( !lrg ) return lrg;        // Ignore the zero LRG
  130.63 -  // Off the end?  This happens during debugging dumps when you got
  130.64 -  // brand new live ranges but have not told the allocator yet.
  130.65 -  if( lrg >= _maxlrg ) return lrg;
  130.66 -  uint next = _uf_map[lrg];
  130.67 -  while( next != lrg ) {        // Scan chain of equivalences
  130.68 -    assert( next < lrg, "always union smaller" );
  130.69 -    lrg = next;                 // until find a fixed-point
  130.70 -    next = _uf_map[lrg];
  130.71 -  }
  130.72 -  return next;
  130.73 -}
  130.74 -
  130.75 -//------------------------------Find-------------------------------------------
  130.76 -// Like Find above, but no path compress, so bad asymptotic behavior
  130.77 -uint PhaseChaitin::Find_const( const Node *n ) const {
  130.78 -  if( n->_idx >= _names.Size() ) return 0; // not mapped, usual for debug dump
  130.79 -  return Find_const( _names[n->_idx] );
  130.80 -}
  130.81 -
  130.82 -//------------------------------Union------------------------------------------
  130.83 -// union 2 sets together.
  130.84 -void PhaseChaitin::Union( const Node *src_n, const Node *dst_n ) {
  130.85 -  uint src = Find(src_n);
  130.86 -  uint dst = Find(dst_n);
  130.87 -  assert( src, "" );
  130.88 -  assert( dst, "" );
  130.89 -  assert( src < _maxlrg, "oob" );
  130.90 -  assert( dst < _maxlrg, "oob" );
  130.91 -  assert( src < dst, "always union smaller" );
  130.92 -  _uf_map.map(dst,src);
  130.93 -}
  130.94 -
  130.95 -//------------------------------new_lrg----------------------------------------
  130.96 -void PhaseChaitin::new_lrg( const Node *x, uint lrg ) {
  130.97 -  // Make the Node->LRG mapping
  130.98 -  _names.extend(x->_idx,lrg);
  130.99 -  // Make the Union-Find mapping an identity function
 130.100 -  _uf_map.extend(lrg,lrg);
 130.101 -}
 130.102 -
 130.103 -//------------------------------clone_projs------------------------------------
 130.104 -// After cloning some rematerialized instruction, clone any MachProj's that
 130.105 -// follow it.  Example: Intel zero is XOR, kills flags.  Sparc FP constants
 130.106 -// use G3 as an address temp.
 130.107 -int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) {
 130.108 -  Block *bcon = _cfg._bbs[con->_idx];
 130.109 -  uint cindex = bcon->find_node(con);
 130.110 -  Node *con_next = bcon->_nodes[cindex+1];
 130.111 -  if( con_next->in(0) != con || !con_next->is_MachProj() )
 130.112 -    return false;               // No MachProj's follow
 130.113 -
 130.114 -  // Copy kills after the cloned constant
 130.115 -  Node *kills = con_next->clone();
 130.116 -  kills->set_req( 0, copy );
 130.117 -  b->_nodes.insert( idx, kills );
 130.118 -  _cfg._bbs.map( kills->_idx, b );
 130.119 -  new_lrg( kills, maxlrg++ );
 130.120 -  return true;
 130.121 -}
 130.122 -
 130.123 -//------------------------------compact----------------------------------------
 130.124 -// Renumber the live ranges to compact them.  Makes the IFG smaller.
 130.125 -void PhaseChaitin::compact() {
 130.126 -  // Current the _uf_map contains a series of short chains which are headed
 130.127 -  // by a self-cycle.  All the chains run from big numbers to little numbers.
 130.128 -  // The Find() call chases the chains & shortens them for the next Find call.
 130.129 -  // We are going to change this structure slightly.  Numbers above a moving
 130.130 -  // wave 'i' are unchanged.  Numbers below 'j' point directly to their
 130.131 -  // compacted live range with no further chaining.  There are no chains or
 130.132 -  // cycles below 'i', so the Find call no longer works.
 130.133 -  uint j=1;
 130.134 -  uint i;
 130.135 -  for( i=1; i < _maxlrg; i++ ) {
 130.136 -    uint lr = _uf_map[i];
 130.137 -    // Ignore unallocated live ranges
 130.138 -    if( !lr ) continue;
 130.139 -    assert( lr <= i, "" );
 130.140 -    _uf_map.map(i, ( lr == i ) ? j++ : _uf_map[lr]);
 130.141 -  }
 130.142 -  if( false )                  // PrintOptoCompactLiveRanges
 130.143 -    printf("Compacted %d LRs from %d\n",i-j,i);
 130.144 -  // Now change the Node->LR mapping to reflect the compacted names
 130.145 -  uint unique = _names.Size();
 130.146 -  for( i=0; i<unique; i++ )
 130.147 -    _names.map(i,_uf_map[_names[i]]);
 130.148 -
 130.149 -  // Reset the Union-Find mapping
 130.150 -  reset_uf_map(j);
 130.151 -
 130.152 -}
 130.153 -
 130.154 -//=============================================================================
 130.155  //------------------------------Dump-------------------------------------------
 130.156  #ifndef PRODUCT
 130.157 -void PhaseCoalesce::dump( Node *n ) const {
 130.158 +void PhaseCoalesce::dump(Node *n) const {
 130.159    // Being a const function means I cannot use 'Find'
 130.160 -  uint r = _phc.Find(n);
 130.161 +  uint r = _phc._lrg_map.find(n);
 130.162    tty->print("L%d/N%d ",r,n->_idx);
 130.163  }
 130.164  
 130.165 @@ -235,9 +87,9 @@
 130.166  
 130.167  //------------------------------combine_these_two------------------------------
 130.168  // Combine the live ranges def'd by these 2 Nodes.  N2 is an input to N1.
 130.169 -void PhaseCoalesce::combine_these_two( Node *n1, Node *n2 ) {
 130.170 -  uint lr1 = _phc.Find(n1);
 130.171 -  uint lr2 = _phc.Find(n2);
 130.172 +void PhaseCoalesce::combine_these_two(Node *n1, Node *n2) {
 130.173 +  uint lr1 = _phc._lrg_map.find(n1);
 130.174 +  uint lr2 = _phc._lrg_map.find(n2);
 130.175    if( lr1 != lr2 &&             // Different live ranges already AND
 130.176        !_phc._ifg->test_edge_sq( lr1, lr2 ) ) {  // Do not interfere
 130.177      LRG *lrg1 = &_phc.lrgs(lr1);
 130.178 @@ -306,14 +158,18 @@
 130.179    // I am about to clobber the dst_name, so the copy must be inserted
 130.180    // after the last use.  Last use is really first-use on a backwards scan.
 130.181    uint i = b->end_idx()-1;
 130.182 -  while( 1 ) {
 130.183 +  while(1) {
 130.184      Node *n = b->_nodes[i];
 130.185      // Check for end of virtual copies; this is also the end of the
 130.186      // parallel renaming effort.
 130.187 -    if( n->_idx < _unique ) break;
 130.188 +    if (n->_idx < _unique) {
 130.189 +      break;
 130.190 +    }
 130.191      uint idx = n->is_Copy();
 130.192      assert( idx || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
 130.193 -    if( idx && _phc.Find(n->in(idx)) == dst_name ) break;
 130.194 +    if (idx && _phc._lrg_map.find(n->in(idx)) == dst_name) {
 130.195 +      break;
 130.196 +    }
 130.197      i--;
 130.198    }
 130.199    uint last_use_idx = i;
 130.200 @@ -324,24 +180,29 @@
 130.201    // There can be only 1 kill that exits any block and that is
 130.202    // the last kill.  Thus it is the first kill on a backwards scan.
 130.203    i = b->end_idx()-1;
 130.204 -  while( 1 ) {
 130.205 +  while (1) {
 130.206      Node *n = b->_nodes[i];
 130.207      // Check for end of virtual copies; this is also the end of the
 130.208      // parallel renaming effort.
 130.209 -    if( n->_idx < _unique ) break;
 130.210 +    if (n->_idx < _unique) {
 130.211 +      break;
 130.212 +    }
 130.213      assert( n->is_Copy() || n->is_Con() || n->is_MachProj(), "Only copies during parallel renaming" );
 130.214 -    if( _phc.Find(n) == src_name ) {
 130.215 +    if (_phc._lrg_map.find(n) == src_name) {
 130.216        kill_src_idx = i;
 130.217        break;
 130.218      }
 130.219      i--;
 130.220    }
 130.221    // Need a temp?  Last use of dst comes after the kill of src?
 130.222 -  if( last_use_idx >= kill_src_idx ) {
 130.223 +  if (last_use_idx >= kill_src_idx) {
 130.224      // Need to break a cycle with a temp
 130.225      uint idx = copy->is_Copy();
 130.226      Node *tmp = copy->clone();
 130.227 -    _phc.new_lrg(tmp,_phc._maxlrg++);
 130.228 +    uint max_lrg_id = _phc._lrg_map.max_lrg_id();
 130.229 +    _phc.new_lrg(tmp, max_lrg_id);
 130.230 +    _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
 130.231 +
 130.232      // Insert new temp between copy and source
 130.233      tmp ->set_req(idx,copy->in(idx));
 130.234      copy->set_req(idx,tmp);
 130.235 @@ -359,14 +220,14 @@
 130.236  void PhaseAggressiveCoalesce::insert_copies( Matcher &matcher ) {
 130.237    // We do LRGs compressing and fix a liveout data only here since the other
 130.238    // place in Split() is guarded by the assert which we never hit.
 130.239 -  _phc.compress_uf_map_for_nodes();
 130.240 +  _phc._lrg_map.compress_uf_map_for_nodes();
 130.241    // Fix block's liveout data for compressed live ranges.
 130.242 -  for(uint lrg = 1; lrg < _phc._maxlrg; lrg++ ) {
 130.243 -    uint compressed_lrg = _phc.Find(lrg);
 130.244 -    if( lrg != compressed_lrg ) {
 130.245 -      for( uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++ ) {
 130.246 +  for (uint lrg = 1; lrg < _phc._lrg_map.max_lrg_id(); lrg++) {
 130.247 +    uint compressed_lrg = _phc._lrg_map.find(lrg);
 130.248 +    if (lrg != compressed_lrg) {
 130.249 +      for (uint bidx = 0; bidx < _phc._cfg._num_blocks; bidx++) {
 130.250          IndexSet *liveout = _phc._live->live(_phc._cfg._blocks[bidx]);
 130.251 -        if( liveout->member(lrg) ) {
 130.252 +        if (liveout->member(lrg)) {
 130.253            liveout->remove(lrg);
 130.254            liveout->insert(compressed_lrg);
 130.255          }
 130.256 @@ -392,8 +253,9 @@
 130.257          uint cidx = copy->is_Copy();
 130.258          if( cidx ) {
 130.259            Node *def = copy->in(cidx);
 130.260 -          if( _phc.Find(copy) == _phc.Find(def) )
 130.261 -            n->set_req(k,def);
 130.262 +          if (_phc._lrg_map.find(copy) == _phc._lrg_map.find(def)) {
 130.263 +            n->set_req(k, def);
 130.264 +          }
 130.265          }
 130.266        }
 130.267  
 130.268 @@ -401,7 +263,7 @@
 130.269        uint cidx = n->is_Copy();
 130.270        if( cidx ) {
 130.271          Node *def = n->in(cidx);
 130.272 -        if( _phc.Find(n) == _phc.Find(def) ) {
 130.273 +        if (_phc._lrg_map.find(n) == _phc._lrg_map.find(def)) {
 130.274            n->replace_by(def);
 130.275            n->set_req(cidx,NULL);
 130.276            b->_nodes.remove(l);
 130.277 @@ -410,16 +272,18 @@
 130.278          }
 130.279        }
 130.280  
 130.281 -      if( n->is_Phi() ) {
 130.282 +      if (n->is_Phi()) {
 130.283          // Get the chosen name for the Phi
 130.284 -        uint phi_name = _phc.Find( n );
 130.285 +        uint phi_name = _phc._lrg_map.find(n);
 130.286          // Ignore the pre-allocated specials
 130.287 -        if( !phi_name ) continue;
 130.288 +        if (!phi_name) {
 130.289 +          continue;
 130.290 +        }
 130.291          // Check for mismatch inputs to Phi
 130.292 -        for( uint j = 1; j<cnt; j++ ) {
 130.293 +        for (uint j = 1; j < cnt; j++) {
 130.294            Node *m = n->in(j);
 130.295 -          uint src_name = _phc.Find(m);
 130.296 -          if( src_name != phi_name ) {
 130.297 +          uint src_name = _phc._lrg_map.find(m);
 130.298 +          if (src_name != phi_name) {
 130.299              Block *pred = _phc._cfg._bbs[b->pred(j)->_idx];
 130.300              Node *copy;
 130.301              assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
 130.302 @@ -430,18 +294,18 @@
 130.303                // Insert the copy in the predecessor basic block
 130.304                pred->add_inst(copy);
 130.305                // Copy any flags as well
 130.306 -              _phc.clone_projs( pred, pred->end_idx(), m, copy, _phc._maxlrg );
 130.307 +              _phc.clone_projs(pred, pred->end_idx(), m, copy, _phc._lrg_map);
 130.308              } else {
 130.309                const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
 130.310 -              copy = new (C) MachSpillCopyNode(m,*rm,*rm);
 130.311 +              copy = new (C) MachSpillCopyNode(m, *rm, *rm);
 130.312                // Find a good place to insert.  Kinda tricky, use a subroutine
 130.313                insert_copy_with_overlap(pred,copy,phi_name,src_name);
 130.314              }
 130.315              // Insert the copy in the use-def chain
 130.316 -            n->set_req( j, copy );
 130.317 +            n->set_req(j, copy);
 130.318              _phc._cfg._bbs.map( copy->_idx, pred );
 130.319              // Extend ("register allocate") the names array for the copy.
 130.320 -            _phc._names.extend( copy->_idx, phi_name );
 130.321 +            _phc._lrg_map.extend(copy->_idx, phi_name);
 130.322            } // End of if Phi names do not match
 130.323          } // End of for all inputs to Phi
 130.324        } else { // End of if Phi
 130.325 @@ -450,39 +314,40 @@
 130.326          uint idx;
 130.327          if( n->is_Mach() && (idx=n->as_Mach()->two_adr()) ) {
 130.328            // Get the chosen name for the Node
 130.329 -          uint name = _phc.Find( n );
 130.330 -          assert( name, "no 2-address specials" );
 130.331 +          uint name = _phc._lrg_map.find(n);
 130.332 +          assert (name, "no 2-address specials");
 130.333            // Check for name mis-match on the 2-address input
 130.334            Node *m = n->in(idx);
 130.335 -          if( _phc.Find(m) != name ) {
 130.336 +          if (_phc._lrg_map.find(m) != name) {
 130.337              Node *copy;
 130.338              assert(!m->is_Con() || m->is_Mach(), "all Con must be Mach");
 130.339              // At this point it is unsafe to extend live ranges (6550579).
 130.340              // Rematerialize only constants as we do for Phi above.
 130.341 -            if( m->is_Mach() && m->as_Mach()->is_Con() &&
 130.342 -                m->as_Mach()->rematerialize() ) {
 130.343 +            if(m->is_Mach() && m->as_Mach()->is_Con() &&
 130.344 +               m->as_Mach()->rematerialize()) {
 130.345                copy = m->clone();
 130.346                // Insert the copy in the basic block, just before us
 130.347 -              b->_nodes.insert( l++, copy );
 130.348 -              if( _phc.clone_projs( b, l, m, copy, _phc._maxlrg ) )
 130.349 +              b->_nodes.insert(l++, copy);
 130.350 +              if(_phc.clone_projs(b, l, m, copy, _phc._lrg_map)) {
 130.351                  l++;
 130.352 +              }
 130.353              } else {
 130.354                const RegMask *rm = C->matcher()->idealreg2spillmask[m->ideal_reg()];
 130.355 -              copy = new (C) MachSpillCopyNode( m, *rm, *rm );
 130.356 +              copy = new (C) MachSpillCopyNode(m, *rm, *rm);
 130.357                // Insert the copy in the basic block, just before us
 130.358 -              b->_nodes.insert( l++, copy );
 130.359 +              b->_nodes.insert(l++, copy);
 130.360              }
 130.361              // Insert the copy in the use-def chain
 130.362 -            n->set_req(idx, copy );
 130.363 +            n->set_req(idx, copy);
 130.364              // Extend ("register allocate") the names array for the copy.
 130.365 -            _phc._names.extend( copy->_idx, name );
 130.366 +            _phc._lrg_map.extend(copy->_idx, name);
 130.367              _phc._cfg._bbs.map( copy->_idx, b );
 130.368            }
 130.369  
 130.370          } // End of is two-adr
 130.371  
 130.372          // Insert a copy at a debug use for a lrg which has high frequency
 130.373 -        if( b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs) ) {
 130.374 +        if (b->_freq < OPTO_DEBUG_SPLIT_FREQ || b->is_uncommon(_phc._cfg._bbs)) {
 130.375            // Walk the debug inputs to the node and check for lrg freq
 130.376            JVMState* jvms = n->jvms();
 130.377            uint debug_start = jvms ? jvms->debug_start() : 999999;
 130.378 @@ -490,9 +355,11 @@
 130.379            for(uint inpidx = debug_start; inpidx < debug_end; inpidx++) {
 130.380              // Do not split monitors; they are only needed for debug table
 130.381              // entries and need no code.
 130.382 -            if( jvms->is_monitor_use(inpidx) ) continue;
 130.383 +            if (jvms->is_monitor_use(inpidx)) {
 130.384 +              continue;
 130.385 +            }
 130.386              Node *inp = n->in(inpidx);
 130.387 -            uint nidx = _phc.n2lidx(inp);
 130.388 +            uint nidx = _phc._lrg_map.live_range_id(inp);
 130.389              LRG &lrg = lrgs(nidx);
 130.390  
 130.391              // If this lrg has a high frequency use/def
 130.392 @@ -519,8 +386,10 @@
 130.393                // Insert the copy in the basic block, just before us
 130.394                b->_nodes.insert( l++, copy );
 130.395                // Extend ("register allocate") the names array for the copy.
 130.396 -              _phc.new_lrg( copy, _phc._maxlrg++ );
 130.397 -              _phc._cfg._bbs.map( copy->_idx, b );
 130.398 +              uint max_lrg_id = _phc._lrg_map.max_lrg_id();
 130.399 +              _phc.new_lrg(copy, max_lrg_id);
 130.400 +              _phc._lrg_map.set_max_lrg_id(max_lrg_id + 1);
 130.401 +              _phc._cfg._bbs.map(copy->_idx, b);
 130.402                //tty->print_cr("Split a debug use in Aggressive Coalesce");
 130.403              }  // End of if high frequency use/def
 130.404            }  // End of for all debug inputs
 130.405 @@ -583,17 +452,17 @@
 130.406      uint idx;
 130.407      // 2-address instructions have a virtual Copy matching their input
 130.408      // to their output
 130.409 -    if( n->is_Mach() && (idx = n->as_Mach()->two_adr()) ) {
 130.410 +    if (n->is_Mach() && (idx = n->as_Mach()->two_adr())) {
 130.411        MachNode *mach = n->as_Mach();
 130.412 -      combine_these_two( mach, mach->in(idx) );
 130.413 +      combine_these_two(mach, mach->in(idx));
 130.414      }
 130.415    } // End of for all instructions in block
 130.416  }
 130.417  
 130.418  //=============================================================================
 130.419  //------------------------------PhaseConservativeCoalesce----------------------
 130.420 -PhaseConservativeCoalesce::PhaseConservativeCoalesce( PhaseChaitin &chaitin ) : PhaseCoalesce(chaitin) {
 130.421 -  _ulr.initialize(_phc._maxlrg);
 130.422 +PhaseConservativeCoalesce::PhaseConservativeCoalesce(PhaseChaitin &chaitin) : PhaseCoalesce(chaitin) {
 130.423 +  _ulr.initialize(_phc._lrg_map.max_lrg_id());
 130.424  }
 130.425  
 130.426  //------------------------------verify-----------------------------------------
 130.427 @@ -673,10 +542,14 @@
 130.428        // Else work back one in copy chain
 130.429        prev_copy = prev_copy->in(prev_copy->is_Copy());
 130.430      } else {                    // Else collect interferences
 130.431 -      uint lidx = _phc.Find(x);
 130.432 +      uint lidx = _phc._lrg_map.find(x);
 130.433        // Found another def of live-range being stretched?
 130.434 -      if( lidx == lr1 ) return max_juint;
 130.435 -      if( lidx == lr2 ) return max_juint;
 130.436 +      if(lidx == lr1) {
 130.437 +        return max_juint;
 130.438 +      }
 130.439 +      if(lidx == lr2) {
 130.440 +        return max_juint;
 130.441 +      }
 130.442  
 130.443        // If we attempt to coalesce across a bound def
 130.444        if( lrgs(lidx).is_bound() ) {
 130.445 @@ -751,33 +624,43 @@
 130.446  // See if I can coalesce a series of multiple copies together.  I need the
 130.447  // final dest copy and the original src copy.  They can be the same Node.
 130.448  // Compute the compatible register masks.
 130.449 -bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block *b, uint bindex ) {
 130.450 +bool PhaseConservativeCoalesce::copy_copy(Node *dst_copy, Node *src_copy, Block *b, uint bindex) {
 130.451  
 130.452 -  if( !dst_copy->is_SpillCopy() ) return false;
 130.453 -  if( !src_copy->is_SpillCopy() ) return false;
 130.454 +  if (!dst_copy->is_SpillCopy()) {
 130.455 +    return false;
 130.456 +  }
 130.457 +  if (!src_copy->is_SpillCopy()) {
 130.458 +    return false;
 130.459 +  }
 130.460    Node *src_def = src_copy->in(src_copy->is_Copy());
 130.461 -  uint lr1 = _phc.Find(dst_copy);
 130.462 -  uint lr2 = _phc.Find(src_def );
 130.463 +  uint lr1 = _phc._lrg_map.find(dst_copy);
 130.464 +  uint lr2 = _phc._lrg_map.find(src_def);
 130.465  
 130.466    // Same live ranges already?
 130.467 -  if( lr1 == lr2 ) return false;
 130.468 +  if (lr1 == lr2) {
 130.469 +    return false;
 130.470 +  }
 130.471  
 130.472    // Interfere?
 130.473 -  if( _phc._ifg->test_edge_sq( lr1, lr2 ) ) return false;
 130.474 +  if (_phc._ifg->test_edge_sq(lr1, lr2)) {
 130.475 +    return false;
 130.476 +  }
 130.477  
 130.478    // Not an oop->int cast; oop->oop, int->int, AND int->oop are OK.
 130.479 -  if( !lrgs(lr1)._is_oop && lrgs(lr2)._is_oop ) // not an oop->int cast
 130.480 +  if (!lrgs(lr1)._is_oop && lrgs(lr2)._is_oop) { // not an oop->int cast
 130.481      return false;
 130.482 +  }
 130.483  
 130.484    // Coalescing between an aligned live range and a mis-aligned live range?
 130.485    // No, no!  Alignment changes how we count degree.
 130.486 -  if( lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj )
 130.487 +  if (lrgs(lr1)._fat_proj != lrgs(lr2)._fat_proj) {
 130.488      return false;
 130.489 +  }
 130.490  
 130.491    // Sort; use smaller live-range number
 130.492    Node *lr1_node = dst_copy;
 130.493    Node *lr2_node = src_def;
 130.494 -  if( lr1 > lr2 ) {
 130.495 +  if (lr1 > lr2) {
 130.496      uint tmp = lr1; lr1 = lr2; lr2 = tmp;
 130.497      lr1_node = src_def;  lr2_node = dst_copy;
 130.498    }
 130.499 @@ -916,17 +799,5 @@
 130.500        PhaseChaitin::_conserv_coalesce++;  // Collect stats on success
 130.501        continue;
 130.502      }
 130.503 -
 130.504 -    /* do not attempt pairs.  About 1/2 of all pairs can be removed by
 130.505 -       post-alloc.  The other set are too few to bother.
 130.506 -    Node *copy2 = copy1->in(idx1);
 130.507 -    uint idx2 = copy2->is_Copy();
 130.508 -    if( !idx2 ) continue;
 130.509 -    if( copy_copy(copy1,copy2,b,i) ) {
 130.510 -      i--;                      // Retry, same location in block
 130.511 -      PhaseChaitin::_conserv_coalesce_pair++; // Collect stats on success
 130.512 -      continue;
 130.513 -    }
 130.514 -    */
 130.515    }
 130.516  }
   131.1 --- a/src/share/vm/opto/coalesce.hpp	Wed Apr 24 20:55:28 2013 -0400
   131.2 +++ b/src/share/vm/opto/coalesce.hpp	Wed Apr 24 21:11:02 2013 -0400
   131.3 @@ -41,23 +41,25 @@
   131.4  
   131.5  public:
   131.6    // Coalesce copies
   131.7 -  PhaseCoalesce( PhaseChaitin &chaitin ) : Phase(Coalesce), _phc(chaitin) { }
   131.8 +  PhaseCoalesce(PhaseChaitin &phc)
   131.9 +  : Phase(Coalesce)
  131.10 +  , _phc(phc) {}
  131.11  
  131.12    virtual void verify() = 0;
  131.13  
  131.14    // Coalesce copies
  131.15 -  void coalesce_driver( );
  131.16 +  void coalesce_driver();
  131.17  
  131.18    // Coalesce copies in this block
  131.19 -  virtual void coalesce( Block *b ) = 0;
  131.20 +  virtual void coalesce(Block *b) = 0;
  131.21  
  131.22    // Attempt to coalesce live ranges defined by these 2
  131.23 -  void combine_these_two( Node *n1, Node *n2 );
  131.24 +  void combine_these_two(Node *n1, Node *n2);
  131.25  
  131.26 -  LRG &lrgs( uint lidx ) { return _phc.lrgs(lidx); }
  131.27 +  LRG &lrgs(uint lidx) { return _phc.lrgs(lidx); }
  131.28  #ifndef PRODUCT
  131.29    // Dump internally name
  131.30 -  void dump( Node *n ) const;
  131.31 +  void dump(Node *n) const;
  131.32    // Dump whole shebang
  131.33    void dump() const;
  131.34  #endif
   132.1 --- a/src/share/vm/opto/compile.cpp	Wed Apr 24 20:55:28 2013 -0400
   132.2 +++ b/src/share/vm/opto/compile.cpp	Wed Apr 24 21:11:02 2013 -0400
   132.3 @@ -2127,22 +2127,19 @@
   132.4    }
   132.5    NOT_PRODUCT( verify_graph_edges(); )
   132.6  
   132.7 -  PhaseChaitin regalloc(unique(),cfg,m);
   132.8 +  PhaseChaitin regalloc(unique(), cfg, m);
   132.9    _regalloc = &regalloc;
  132.10    {
  132.11      TracePhase t2("regalloc", &_t_registerAllocation, true);
  132.12 -    // Perform any platform dependent preallocation actions.  This is used,
  132.13 -    // for example, to avoid taking an implicit null pointer exception
  132.14 -    // using the frame pointer on win95.
  132.15 -    _regalloc->pd_preallocate_hook();
  132.16 -
  132.17      // Perform register allocation.  After Chaitin, use-def chains are
  132.18      // no longer accurate (at spill code) and so must be ignored.
  132.19      // Node->LRG->reg mappings are still accurate.
  132.20      _regalloc->Register_Allocate();
  132.21  
  132.22      // Bail out if the allocator builds too many nodes
  132.23 -    if (failing())  return;
  132.24 +    if (failing()) {
  132.25 +      return;
  132.26 +    }
  132.27    }
  132.28  
  132.29    // Prior to register allocation we kept empty basic blocks in case the
  132.30 @@ -2160,9 +2157,6 @@
  132.31      cfg.fixup_flow();
  132.32    }
  132.33  
  132.34 -  // Perform any platform dependent postallocation verifications.
  132.35 -  debug_only( _regalloc->pd_postallocate_verify_hook(); )
  132.36 -
  132.37    // Apply peephole optimizations
  132.38    if( OptoPeephole ) {
  132.39      NOT_PRODUCT( TracePhase t2("peephole", &_t_peephole, TimeCompiler); )
  132.40 @@ -2326,12 +2320,14 @@
  132.41    int  get_inner_loop_count() const { return _inner_loop_count; }
  132.42  };
  132.43  
  132.44 +#ifdef ASSERT
  132.45  static bool oop_offset_is_sane(const TypeInstPtr* tp) {
  132.46    ciInstanceKlass *k = tp->klass()->as_instance_klass();
  132.47    // Make sure the offset goes inside the instance layout.
  132.48    return k->contains_field_offset(tp->offset());
  132.49    // Note that OffsetBot and OffsetTop are very negative.
  132.50  }
  132.51 +#endif
  132.52  
  132.53  // Eliminate trivially redundant StoreCMs and accumulate their
  132.54  // precedence edges.
   133.1 --- a/src/share/vm/opto/connode.cpp	Wed Apr 24 20:55:28 2013 -0400
   133.2 +++ b/src/share/vm/opto/connode.cpp	Wed Apr 24 21:11:02 2013 -0400
   133.3 @@ -465,29 +465,6 @@
   133.4    return (phase->type(in(1)) == phase->type(this)) ? in(1) : this;
   133.5  }
   133.6  
   133.7 -// Determine whether "n" is a node which can cause an alias of one of its inputs.  Node types
   133.8 -// which can create aliases are: CheckCastPP, Phi, and any store (if there is also a load from
   133.9 -// the location.)
  133.10 -// Note:  this checks for aliases created in this compilation, not ones which may
  133.11 -//        be potentially created at call sites.
  133.12 -static bool can_cause_alias(Node *n, PhaseTransform *phase) {
  133.13 -  bool possible_alias = false;
  133.14 -
  133.15 -  if (n->is_Store()) {
  133.16 -    possible_alias = !n->as_Store()->value_never_loaded(phase);
  133.17 -  } else {
  133.18 -    int opc = n->Opcode();
  133.19 -    possible_alias = n->is_Phi() ||
  133.20 -        opc == Op_CheckCastPP ||
  133.21 -        opc == Op_StorePConditional ||
  133.22 -        opc == Op_CompareAndSwapP ||
  133.23 -        opc == Op_CompareAndSwapN ||
  133.24 -        opc == Op_GetAndSetP ||
  133.25 -        opc == Op_GetAndSetN;
  133.26 -  }
  133.27 -  return possible_alias;
  133.28 -}
  133.29 -
  133.30  //------------------------------Value------------------------------------------
  133.31  // Take 'join' of input and cast-up type, unless working with an Interface
  133.32  const Type *CheckCastPPNode::Value( PhaseTransform *phase ) const {
   134.1 --- a/src/share/vm/opto/idealGraphPrinter.cpp	Wed Apr 24 20:55:28 2013 -0400
   134.2 +++ b/src/share/vm/opto/idealGraphPrinter.cpp	Wed Apr 24 21:11:02 2013 -0400
   134.3 @@ -616,7 +616,7 @@
   134.4        buffer[0] = 0;
   134.5        _chaitin->dump_register(node, buffer);
   134.6        print_prop("reg", buffer);
   134.7 -      print_prop("lrg", _chaitin->n2lidx(node));
   134.8 +      print_prop("lrg", _chaitin->_lrg_map.live_range_id(node));
   134.9      }
  134.10  
  134.11      node->_in_dump_cnt--;
   135.1 --- a/src/share/vm/opto/ifg.cpp	Wed Apr 24 20:55:28 2013 -0400
   135.2 +++ b/src/share/vm/opto/ifg.cpp	Wed Apr 24 21:11:02 2013 -0400
   135.3 @@ -286,15 +286,14 @@
   135.4      uint idx;
   135.5      uint last = 0;
   135.6      while ((idx = elements.next()) != 0) {
   135.7 -      assert( idx != i, "Must have empty diagonal");
   135.8 -      assert( pc->Find_const(idx) == idx, "Must not need Find" );
   135.9 -      assert( _adjs[idx].member(i), "IFG not square" );
  135.10 -      assert( !(*_yanked)[idx], "No yanked neighbors" );
  135.11 -      assert( last < idx, "not sorted increasing");
  135.12 +      assert(idx != i, "Must have empty diagonal");
  135.13 +      assert(pc->_lrg_map.find_const(idx) == idx, "Must not need Find");
  135.14 +      assert(_adjs[idx].member(i), "IFG not square");
  135.15 +      assert(!(*_yanked)[idx], "No yanked neighbors");
  135.16 +      assert(last < idx, "not sorted increasing");
  135.17        last = idx;
  135.18      }
  135.19 -    assert( !lrgs(i)._degree_valid ||
  135.20 -            effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong" );
  135.21 +    assert(!lrgs(i)._degree_valid || effective_degree(i) == lrgs(i).degree(), "degree is valid but wrong");
  135.22    }
  135.23  }
  135.24  #endif
  135.25 @@ -342,10 +341,10 @@
  135.26        Node *n = b->_nodes[j-1];
  135.27  
  135.28        // Get value being defined
  135.29 -      uint r = n2lidx(n);
  135.30 +      uint r = _lrg_map.live_range_id(n);
  135.31  
  135.32        // Some special values do not allocate
  135.33 -      if( r ) {
  135.34 +      if (r) {
  135.35  
  135.36          // Remove from live-out set
  135.37          liveout->remove(r);
  135.38 @@ -353,16 +352,19 @@
  135.39          // Copies do not define a new value and so do not interfere.
  135.40          // Remove the copies source from the liveout set before interfering.
  135.41          uint idx = n->is_Copy();
  135.42 -        if( idx ) liveout->remove( n2lidx(n->in(idx)) );
  135.43 +        if (idx) {
  135.44 +          liveout->remove(_lrg_map.live_range_id(n->in(idx)));
  135.45 +        }
  135.46  
  135.47          // Interfere with everything live
  135.48 -        interfere_with_live( r, liveout );
  135.49 +        interfere_with_live(r, liveout);
  135.50        }
  135.51  
  135.52        // Make all inputs live
  135.53 -      if( !n->is_Phi() ) {      // Phi function uses come from prior block
  135.54 -        for( uint k = 1; k < n->req(); k++ )
  135.55 -          liveout->insert( n2lidx(n->in(k)) );
  135.56 +      if (!n->is_Phi()) {      // Phi function uses come from prior block
  135.57 +        for(uint k = 1; k < n->req(); k++) {
  135.58 +          liveout->insert(_lrg_map.live_range_id(n->in(k)));
  135.59 +        }
  135.60        }
  135.61  
  135.62        // 2-address instructions always have the defined value live
  135.63 @@ -394,11 +396,12 @@
  135.64            n->set_req( 2, tmp );
  135.65          }
  135.66          // Defined value interferes with all inputs
  135.67 -        uint lidx = n2lidx(n->in(idx));
  135.68 -        for( uint k = 1; k < n->req(); k++ ) {
  135.69 -          uint kidx = n2lidx(n->in(k));
  135.70 -          if( kidx != lidx )
  135.71 -            _ifg->add_edge( r, kidx );
  135.72 +        uint lidx = _lrg_map.live_range_id(n->in(idx));
  135.73 +        for (uint k = 1; k < n->req(); k++) {
  135.74 +          uint kidx = _lrg_map.live_range_id(n->in(k));
  135.75 +          if (kidx != lidx) {
  135.76 +            _ifg->add_edge(r, kidx);
  135.77 +          }
  135.78          }
  135.79        }
  135.80      } // End of forall instructions in block
  135.81 @@ -542,10 +545,10 @@
  135.82        Node *n = b->_nodes[j - 1];
  135.83  
  135.84        // Get value being defined
  135.85 -      uint r = n2lidx(n);
  135.86 +      uint r = _lrg_map.live_range_id(n);
  135.87  
  135.88        // Some special values do not allocate
  135.89 -      if( r ) {
  135.90 +      if(r) {
  135.91          // A DEF normally costs block frequency; rematerialized values are
  135.92          // removed from the DEF sight, so LOWER costs here.
  135.93          lrgs(r)._cost += n->rematerialize() ? 0 : b->_freq;
  135.94 @@ -556,9 +559,11 @@
  135.95            Node *def = n->in(0);
  135.96            if( !n->is_Proj() ||
  135.97                // Could also be a flags-projection of a dead ADD or such.
  135.98 -              (n2lidx(def) && !liveout.member(n2lidx(def)) ) ) {
  135.99 +              (_lrg_map.live_range_id(def) && !liveout.member(_lrg_map.live_range_id(def)))) {
 135.100              b->_nodes.remove(j - 1);
 135.101 -            if( lrgs(r)._def == n ) lrgs(r)._def = 0;
 135.102 +            if (lrgs(r)._def == n) {
 135.103 +              lrgs(r)._def = 0;
 135.104 +            }
 135.105              n->disconnect_inputs(NULL, C);
 135.106              _cfg._bbs.map(n->_idx,NULL);
 135.107              n->replace_by(C->top());
 135.108 @@ -570,7 +575,7 @@
 135.109  
 135.110            // Fat-projections kill many registers which cannot be used to
 135.111            // hold live ranges.
 135.112 -          if( lrgs(r)._fat_proj ) {
 135.113 +          if (lrgs(r)._fat_proj) {
 135.114              // Count the int-only registers
 135.115              RegMask itmp = lrgs(r).mask();
 135.116              itmp.AND(*Matcher::idealreg2regmask[Op_RegI]);
 135.117 @@ -636,12 +641,12 @@
 135.118            // Copies do not define a new value and so do not interfere.
 135.119            // Remove the copies source from the liveout set before interfering.
 135.120            uint idx = n->is_Copy();
 135.121 -          if( idx ) {
 135.122 -            uint x = n2lidx(n->in(idx));
 135.123 -            if( liveout.remove( x ) ) {
 135.124 +          if (idx) {
 135.125 +            uint x = _lrg_map.live_range_id(n->in(idx));
 135.126 +            if (liveout.remove(x)) {
 135.127                lrgs(x)._area -= cost;
 135.128                // Adjust register pressure.
 135.129 -              lower_pressure( &lrgs(x), j-1, b, pressure, hrp_index );
 135.130 +              lower_pressure(&lrgs(x), j-1, b, pressure, hrp_index);
 135.131                assert( pressure[0] == count_int_pressure  (&liveout), "" );
 135.132                assert( pressure[1] == count_float_pressure(&liveout), "" );
 135.133              }
 135.134 @@ -727,18 +732,21 @@
 135.135          // the flags and assumes it's dead.  This keeps the (useless)
 135.136          // flag-setting behavior alive while also keeping the (useful)
 135.137          // memory update effect.
 135.138 -        for( uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++ ) {
 135.139 +        for (uint k = ((n->Opcode() == Op_SCMemProj) ? 0:1); k < n->req(); k++) {
 135.140            Node *def = n->in(k);
 135.141 -          uint x = n2lidx(def);
 135.142 -          if( !x ) continue;
 135.143 +          uint x = _lrg_map.live_range_id(def);
 135.144 +          if (!x) {
 135.145 +            continue;
 135.146 +          }
 135.147            LRG &lrg = lrgs(x);
 135.148            // No use-side cost for spilling debug info
 135.149 -          if( k < debug_start )
 135.150 +          if (k < debug_start) {
 135.151              // A USE costs twice block frequency (once for the Load, once
 135.152              // for a Load-delay).  Rematerialized uses only cost once.
 135.153              lrg._cost += (def->rematerialize() ? b->_freq : (b->_freq + b->_freq));
 135.154 +          }
 135.155            // It is live now
 135.156 -          if( liveout.insert( x ) ) {
 135.157 +          if (liveout.insert(x)) {
 135.158              // Newly live things assumed live from here to top of block
 135.159              lrg._area += cost;
 135.160              // Adjust register pressure
   136.1 --- a/src/share/vm/opto/live.cpp	Wed Apr 24 20:55:28 2013 -0400
   136.2 +++ b/src/share/vm/opto/live.cpp	Wed Apr 24 21:11:02 2013 -0400
   136.3 @@ -44,7 +44,7 @@
   136.4  // block is put on the worklist.
   136.5  //   The locally live-in stuff is computed once and added to predecessor
   136.6  // live-out sets.  This separate compilation is done in the outer loop below.
   136.7 -PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
   136.8 +PhaseLive::PhaseLive( const PhaseCFG &cfg, const LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) {
   136.9  }
  136.10  
  136.11  void PhaseLive::compute(uint maxlrg) {
   137.1 --- a/src/share/vm/opto/live.hpp	Wed Apr 24 20:55:28 2013 -0400
   137.2 +++ b/src/share/vm/opto/live.hpp	Wed Apr 24 21:11:02 2013 -0400
   137.3 @@ -80,7 +80,7 @@
   137.4    Block_List *_worklist;        // Worklist for iterative solution
   137.5  
   137.6    const PhaseCFG &_cfg;         // Basic blocks
   137.7 -  LRG_List &_names;             // Mapping from Nodes to live ranges
   137.8 +  const LRG_List &_names;       // Mapping from Nodes to live ranges
   137.9    uint _maxlrg;                 // Largest live-range number
  137.10    Arena *_arena;
  137.11  
  137.12 @@ -91,7 +91,7 @@
  137.13    void add_liveout( Block *p, IndexSet *lo, VectorSet &first_pass );
  137.14  
  137.15  public:
  137.16 -  PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena );
  137.17 +  PhaseLive(const PhaseCFG &cfg, const LRG_List &names, Arena *arena);
  137.18    ~PhaseLive() {}
  137.19    // Compute liveness info
  137.20    void compute(uint maxlrg);
   138.1 --- a/src/share/vm/opto/output.cpp	Wed Apr 24 20:55:28 2013 -0400
   138.2 +++ b/src/share/vm/opto/output.cpp	Wed Apr 24 21:11:02 2013 -0400
   138.3 @@ -1043,21 +1043,6 @@
   138.4    debug_info->end_non_safepoint(pc_offset);
   138.5  }
   138.6  
   138.7 -
   138.8 -
   138.9 -// helper for fill_buffer bailout logic
  138.10 -static void turn_off_compiler(Compile* C) {
  138.11 -  if (CodeCache::largest_free_block() >= CodeCacheMinimumFreeSpace*10) {
  138.12 -    // Do not turn off compilation if a single giant method has
  138.13 -    // blown the code cache size.
  138.14 -    C->record_failure("excessive request to CodeCache");
  138.15 -  } else {
  138.16 -    // Let CompilerBroker disable further compilations.
  138.17 -    C->record_failure("CodeCache is full");
  138.18 -  }
  138.19 -}
  138.20 -
  138.21 -
  138.22  //------------------------------init_buffer------------------------------------
  138.23  CodeBuffer* Compile::init_buffer(uint* blk_starts) {
  138.24  
  138.25 @@ -1157,7 +1142,7 @@
  138.26  
  138.27    // Have we run out of code space?
  138.28    if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
  138.29 -    turn_off_compiler(this);
  138.30 +    C->record_failure("CodeCache is full");
  138.31      return NULL;
  138.32    }
  138.33    // Configure the code buffer.
  138.34 @@ -1475,7 +1460,7 @@
  138.35        // Verify that there is sufficient space remaining
  138.36        cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size);
  138.37        if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
  138.38 -        turn_off_compiler(this);
  138.39 +        C->record_failure("CodeCache is full");
  138.40          return;
  138.41        }
  138.42  
  138.43 @@ -1632,7 +1617,7 @@
  138.44  
  138.45    // One last check for failed CodeBuffer::expand:
  138.46    if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
  138.47 -    turn_off_compiler(this);
  138.48 +    C->record_failure("CodeCache is full");
  138.49      return;
  138.50    }
  138.51  
   139.1 --- a/src/share/vm/opto/postaloc.cpp	Wed Apr 24 20:55:28 2013 -0400
   139.2 +++ b/src/share/vm/opto/postaloc.cpp	Wed Apr 24 21:11:02 2013 -0400
   139.3 @@ -56,7 +56,7 @@
   139.4    int i;
   139.5    for( i=0; i < limit; i++ ) {
   139.6      if( def->is_Proj() && def->in(0)->is_Start() &&
   139.7 -        _matcher.is_save_on_entry(lrgs(n2lidx(def)).reg()) )
   139.8 +        _matcher.is_save_on_entry(lrgs(_lrg_map.live_range_id(def)).reg()))
   139.9        return true;              // Direct use of callee-save proj
  139.10      if( def->is_Copy() )        // Copies carry value through
  139.11        def = def->in(def->is_Copy());
  139.12 @@ -83,7 +83,7 @@
  139.13    // Count 1 if deleting an instruction from the current block
  139.14    if( oldb == current_block ) blk_adjust++;
  139.15    _cfg._bbs.map(old->_idx,NULL);
  139.16 -  OptoReg::Name old_reg = lrgs(n2lidx(old)).reg();
  139.17 +  OptoReg::Name old_reg = lrgs(_lrg_map.live_range_id(old)).reg();
  139.18    if( regnd && (*regnd)[old_reg]==old ) { // Instruction is currently available?
  139.19      value->map(old_reg,NULL);  // Yank from value/regnd maps
  139.20      regnd->map(old_reg,NULL);  // This register's value is now unknown
  139.21 @@ -164,7 +164,7 @@
  139.22    // Not every pair of physical registers are assignment compatible,
  139.23    // e.g. on sparc floating point registers are not assignable to integer
  139.24    // registers.
  139.25 -  const LRG &def_lrg = lrgs(n2lidx(def));
  139.26 +  const LRG &def_lrg = lrgs(_lrg_map.live_range_id(def));
  139.27    OptoReg::Name def_reg = def_lrg.reg();
  139.28    const RegMask &use_mask = n->in_RegMask(idx);
  139.29    bool can_use = ( RegMask::can_represent(def_reg) ? (use_mask.Member(def_reg) != 0)
  139.30 @@ -209,11 +209,12 @@
  139.31  // Skip through any number of copies (that don't mod oop-i-ness)
  139.32  Node *PhaseChaitin::skip_copies( Node *c ) {
  139.33    int idx = c->is_Copy();
  139.34 -  uint is_oop = lrgs(n2lidx(c))._is_oop;
  139.35 +  uint is_oop = lrgs(_lrg_map.live_range_id(c))._is_oop;
  139.36    while (idx != 0) {
  139.37      guarantee(c->in(idx) != NULL, "must not resurrect dead copy");
  139.38 -    if (lrgs(n2lidx(c->in(idx)))._is_oop != is_oop)
  139.39 +    if (lrgs(_lrg_map.live_range_id(c->in(idx)))._is_oop != is_oop) {
  139.40        break;  // casting copy, not the same value
  139.41 +    }
  139.42      c = c->in(idx);
  139.43      idx = c->is_Copy();
  139.44    }
  139.45 @@ -225,8 +226,8 @@
  139.46  int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List &regnd, bool can_change_regs ) {
  139.47    int blk_adjust = 0;
  139.48  
  139.49 -  uint nk_idx = n2lidx(n->in(k));
  139.50 -  OptoReg::Name nk_reg = lrgs(nk_idx ).reg();
  139.51 +  uint nk_idx = _lrg_map.live_range_id(n->in(k));
  139.52 +  OptoReg::Name nk_reg = lrgs(nk_idx).reg();
  139.53  
  139.54    // Remove obvious same-register copies
  139.55    Node *x = n->in(k);
  139.56 @@ -234,9 +235,13 @@
  139.57    while( (idx=x->is_Copy()) != 0 ) {
  139.58      Node *copy = x->in(idx);
  139.59      guarantee(copy != NULL, "must not resurrect dead copy");
  139.60 -    if( lrgs(n2lidx(copy)).reg() != nk_reg ) break;
  139.61 +    if(lrgs(_lrg_map.live_range_id(copy)).reg() != nk_reg) {
  139.62 +      break;
  139.63 +    }
  139.64      blk_adjust += use_prior_register(n,k,copy,current_block,value,regnd);
  139.65 -    if( n->in(k) != copy ) break; // Failed for some cutout?
  139.66 +    if (n->in(k) != copy) {
  139.67 +      break; // Failed for some cutout?
  139.68 +    }
  139.69      x = copy;                   // Progress, try again
  139.70    }
  139.71  
  139.72 @@ -256,7 +261,7 @@
  139.73  
  139.74    if (val == x && nk_idx != 0 &&
  139.75        regnd[nk_reg] != NULL && regnd[nk_reg] != x &&
  139.76 -      n2lidx(x) == n2lidx(regnd[nk_reg])) {
  139.77 +      _lrg_map.live_range_id(x) == _lrg_map.live_range_id(regnd[nk_reg])) {
  139.78      // When rematerialzing nodes and stretching lifetimes, the
  139.79      // allocator will reuse the original def for multidef LRG instead
  139.80      // of the current reaching def because it can't know it's safe to
  139.81 @@ -270,7 +275,7 @@
  139.82    if (val == x) return blk_adjust; // No progress?
  139.83  
  139.84    int n_regs = RegMask::num_registers(val->ideal_reg());
  139.85 -  uint val_idx = n2lidx(val);
  139.86 +  uint val_idx = _lrg_map.live_range_id(val);
  139.87    OptoReg::Name val_reg = lrgs(val_idx).reg();
  139.88  
  139.89    // See if it happens to already be in the correct register!
  139.90 @@ -499,12 +504,12 @@
  139.91      for( j = 1; j < phi_dex; j++ ) {
  139.92        uint k;
  139.93        Node *phi = b->_nodes[j];
  139.94 -      uint pidx = n2lidx(phi);
  139.95 -      OptoReg::Name preg = lrgs(n2lidx(phi)).reg();
  139.96 +      uint pidx = _lrg_map.live_range_id(phi);
  139.97 +      OptoReg::Name preg = lrgs(_lrg_map.live_range_id(phi)).reg();
  139.98  
  139.99        // Remove copies remaining on edges.  Check for junk phi.
 139.100        Node *u = NULL;
 139.101 -      for( k=1; k<phi->req(); k++ ) {
 139.102 +      for (k = 1; k < phi->req(); k++) {
 139.103          Node *x = phi->in(k);
 139.104          if( phi != x && u != x ) // Found a different input
 139.105            u = u ? NodeSentinel : x; // Capture unique input, or NodeSentinel for 2nd input
 139.106 @@ -555,10 +560,10 @@
 139.107        // alive and well at the use (or else the allocator fubar'd).  Take
 139.108        // advantage of this info to set a reaching def for the use-reg.
 139.109        uint k;
 139.110 -      for( k = 1; k < n->req(); k++ ) {
 139.111 +      for (k = 1; k < n->req(); k++) {
 139.112          Node *def = n->in(k);   // n->in(k) is a USE; def is the DEF for this USE
 139.113          guarantee(def != NULL, "no disconnected nodes at this point");
 139.114 -        uint useidx = n2lidx(def); // useidx is the live range index for this USE
 139.115 +        uint useidx = _lrg_map.live_range_id(def); // useidx is the live range index for this USE
 139.116  
 139.117          if( useidx ) {
 139.118            OptoReg::Name ureg = lrgs(useidx).reg();
 139.119 @@ -566,7 +571,7 @@
 139.120              int idx;            // Skip occasional useless copy
 139.121              while( (idx=def->is_Copy()) != 0 &&
 139.122                     def->in(idx) != NULL &&  // NULL should not happen
 139.123 -                   ureg == lrgs(n2lidx(def->in(idx))).reg() )
 139.124 +                   ureg == lrgs(_lrg_map.live_range_id(def->in(idx))).reg())
 139.125                def = def->in(idx);
 139.126              Node *valdef = skip_copies(def); // tighten up val through non-useless copies
 139.127              value.map(ureg,valdef); // record improved reaching-def info
 139.128 @@ -594,8 +599,10 @@
 139.129          j -= elide_copy( n, k, b, value, regnd, two_adr!=k );
 139.130  
 139.131        // Unallocated Nodes define no registers
 139.132 -      uint lidx = n2lidx(n);
 139.133 -      if( !lidx ) continue;
 139.134 +      uint lidx = _lrg_map.live_range_id(n);
 139.135 +      if (!lidx) {
 139.136 +        continue;
 139.137 +      }
 139.138  
 139.139        // Update the register defined by this instruction
 139.140        OptoReg::Name nreg = lrgs(lidx).reg();
   140.1 --- a/src/share/vm/opto/reg_split.cpp	Wed Apr 24 20:55:28 2013 -0400
   140.2 +++ b/src/share/vm/opto/reg_split.cpp	Wed Apr 24 21:11:02 2013 -0400
   140.3 @@ -318,9 +318,13 @@
   140.4      for( uint i = 1; i < def->req(); i++ ) {
   140.5        Node *in = def->in(i);
   140.6        // Check for single-def (LRG cannot redefined)
   140.7 -      uint lidx = n2lidx(in);
   140.8 -      if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy
   140.9 -      if (lrgs(lidx).is_singledef()) continue;
  140.10 +      uint lidx = _lrg_map.live_range_id(in);
  140.11 +      if (lidx >= _lrg_map.max_lrg_id()) {
  140.12 +        continue; // Value is a recent spill-copy
  140.13 +      }
  140.14 +      if (lrgs(lidx).is_singledef()) {
  140.15 +        continue;
  140.16 +      }
  140.17  
  140.18        Block *b_def = _cfg._bbs[def->_idx];
  140.19        int idx_def = b_def->find_node(def);
  140.20 @@ -344,26 +348,28 @@
  140.21    if( spill->req() > 1 ) {
  140.22      for( uint i = 1; i < spill->req(); i++ ) {
  140.23        Node *in = spill->in(i);
  140.24 -      uint lidx = Find_id(in);
  140.25 +      uint lidx = _lrg_map.find_id(in);
  140.26  
  140.27        // Walk backwards thru spill copy node intermediates
  140.28        if (walkThru) {
  140.29 -        while ( in->is_SpillCopy() && lidx >= _maxlrg ) {
  140.30 +        while (in->is_SpillCopy() && lidx >= _lrg_map.max_lrg_id()) {
  140.31            in = in->in(1);
  140.32 -          lidx = Find_id(in);
  140.33 +          lidx = _lrg_map.find_id(in);
  140.34          }
  140.35  
  140.36 -        if (lidx < _maxlrg && lrgs(lidx).is_multidef()) {
  140.37 +        if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).is_multidef()) {
  140.38            // walkThru found a multidef LRG, which is unsafe to use, so
  140.39            // just keep the original def used in the clone.
  140.40            in = spill->in(i);
  140.41 -          lidx = Find_id(in);
  140.42 +          lidx = _lrg_map.find_id(in);
  140.43          }
  140.44        }
  140.45  
  140.46 -      if( lidx < _maxlrg && lrgs(lidx).reg() >= LRG::SPILL_REG ) {
  140.47 +      if (lidx < _lrg_map.max_lrg_id() && lrgs(lidx).reg() >= LRG::SPILL_REG) {
  140.48          Node *rdef = Reachblock[lrg2reach[lidx]];
  140.49 -        if( rdef ) spill->set_req(i,rdef);
  140.50 +        if (rdef) {
  140.51 +          spill->set_req(i, rdef);
  140.52 +        }
  140.53        }
  140.54      }
  140.55    }
  140.56 @@ -382,7 +388,7 @@
  140.57  #endif
  140.58    // See if the cloned def kills any flags, and copy those kills as well
  140.59    uint i = insidx+1;
  140.60 -  if( clone_projs( b, i, def, spill, maxlrg ) ) {
  140.61 +  if( clone_projs( b, i, def, spill, maxlrg) ) {
  140.62      // Adjust the point where we go hi-pressure
  140.63      if( i <= b->_ihrp_index ) b->_ihrp_index++;
  140.64      if( i <= b->_fhrp_index ) b->_fhrp_index++;
  140.65 @@ -424,17 +430,25 @@
  140.66  //------------------------------prompt_use---------------------------------
  140.67  // True if lidx is used before any real register is def'd in the block
  140.68  bool PhaseChaitin::prompt_use( Block *b, uint lidx ) {
  140.69 -  if( lrgs(lidx)._was_spilled2 ) return false;
  140.70 +  if (lrgs(lidx)._was_spilled2) {
  140.71 +    return false;
  140.72 +  }
  140.73  
  140.74    // Scan block for 1st use.
  140.75    for( uint i = 1; i <= b->end_idx(); i++ ) {
  140.76      Node *n = b->_nodes[i];
  140.77      // Ignore PHI use, these can be up or down
  140.78 -    if( n->is_Phi() ) continue;
  140.79 -    for( uint j = 1; j < n->req(); j++ )
  140.80 -      if( Find_id(n->in(j)) == lidx )
  140.81 +    if (n->is_Phi()) {
  140.82 +      continue;
  140.83 +    }
  140.84 +    for (uint j = 1; j < n->req(); j++) {
  140.85 +      if (_lrg_map.find_id(n->in(j)) == lidx) {
  140.86          return true;          // Found 1st use!
  140.87 -    if( n->out_RegMask().is_NotEmpty() ) return false;
  140.88 +      }
  140.89 +    }
  140.90 +    if (n->out_RegMask().is_NotEmpty()) {
  140.91 +      return false;
  140.92 +    }
  140.93    }
  140.94    return false;
  140.95  }
  140.96 @@ -464,23 +478,23 @@
  140.97    bool                 u1, u2, u3;
  140.98    Block               *b, *pred;
  140.99    PhiNode             *phi;
 140.100 -  GrowableArray<uint>  lidxs(split_arena, _maxlrg, 0, 0);
 140.101 +  GrowableArray<uint>  lidxs(split_arena, maxlrg, 0, 0);
 140.102  
 140.103    // Array of counters to count splits per live range
 140.104 -  GrowableArray<uint>  splits(split_arena, _maxlrg, 0, 0);
 140.105 +  GrowableArray<uint>  splits(split_arena, maxlrg, 0, 0);
 140.106  
 140.107  #define NEW_SPLIT_ARRAY(type, size)\
 140.108    (type*) split_arena->allocate_bytes((size) * sizeof(type))
 140.109  
 140.110    //----------Setup Code----------
 140.111    // Create a convenient mapping from lrg numbers to reaches/leaves indices
 140.112 -  uint *lrg2reach = NEW_SPLIT_ARRAY( uint, _maxlrg );
 140.113 +  uint *lrg2reach = NEW_SPLIT_ARRAY(uint, maxlrg);
 140.114    // Keep track of DEFS & Phis for later passes
 140.115    defs = new Node_List();
 140.116    phis = new Node_List();
 140.117    // Gather info on which LRG's are spilling, and build maps
 140.118 -  for( bidx = 1; bidx < _maxlrg; bidx++ ) {
 140.119 -    if( lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG ) {
 140.120 +  for (bidx = 1; bidx < maxlrg; bidx++) {
 140.121 +    if (lrgs(bidx).alive() && lrgs(bidx).reg() >= LRG::SPILL_REG) {
 140.122        assert(!lrgs(bidx).mask().is_AllStack(),"AllStack should color");
 140.123        lrg2reach[bidx] = spill_cnt;
 140.124        spill_cnt++;
 140.125 @@ -629,7 +643,7 @@
 140.126            break;
 140.127          }
 140.128          // must be looking at a phi
 140.129 -        if( Find_id(n1) == lidxs.at(slidx) ) {
 140.130 +        if (_lrg_map.find_id(n1) == lidxs.at(slidx)) {
 140.131            // found the necessary phi
 140.132            needs_phi = false;
 140.133            has_phi = true;
 140.134 @@ -651,11 +665,11 @@
 140.135            Reachblock[slidx] = phi;
 140.136  
 140.137            // add node to block & node_to_block mapping
 140.138 -          insert_proj( b, insidx++, phi, maxlrg++ );
 140.139 +          insert_proj(b, insidx++, phi, maxlrg++);
 140.140            non_phi++;
 140.141            // Reset new phi's mapping to be the spilling live range
 140.142 -          _names.map(phi->_idx, lidx);
 140.143 -          assert(Find_id(phi) == lidx,"Bad update on Union-Find mapping");
 140.144 +          _lrg_map.map(phi->_idx, lidx);
 140.145 +          assert(_lrg_map.find_id(phi) == lidx, "Bad update on Union-Find mapping");
 140.146          }  // end if not found correct phi
 140.147          // Here you have either found or created the Phi, so record it
 140.148          assert(phi != NULL,"Must have a Phi Node here");
 140.149 @@ -721,12 +735,12 @@
 140.150      for( insidx = 1; insidx <= b->end_idx(); insidx++ ) {
 140.151        Node *n = b->_nodes[insidx];
 140.152        // Find the defining Node's live range index
 140.153 -      uint defidx = Find_id(n);
 140.154 +      uint defidx = _lrg_map.find_id(n);
 140.155        uint cnt = n->req();
 140.156  
 140.157 -      if( n->is_Phi() ) {
 140.158 +      if (n->is_Phi()) {
 140.159          // Skip phi nodes after removing dead copies.
 140.160 -        if( defidx < _maxlrg ) {
 140.161 +        if (defidx < _lrg_map.max_lrg_id()) {
 140.162            // Check for useless Phis.  These appear if we spill, then
 140.163            // coalesce away copies.  Dont touch Phis in spilling live
 140.164            // ranges; they are busy getting modifed in this pass.
 140.165 @@ -744,8 +758,8 @@
 140.166                }
 140.167              }
 140.168              assert( u, "at least 1 valid input expected" );
 140.169 -            if( i >= cnt ) {    // Found one unique input
 140.170 -              assert(Find_id(n) == Find_id(u), "should be the same lrg");
 140.171 +            if (i >= cnt) {    // Found one unique input
 140.172 +              assert(_lrg_map.find_id(n) == _lrg_map.find_id(u), "should be the same lrg");
 140.173                n->replace_by(u); // Then replace with unique input
 140.174                n->disconnect_inputs(NULL, C);
 140.175                b->_nodes.remove(insidx);
 140.176 @@ -793,16 +807,24 @@
 140.177                  while( insert_point > 0 ) {
 140.178                    Node *n = b->_nodes[insert_point];
 140.179                    // Hit top of block?  Quit going backwards
 140.180 -                  if( n->is_Phi() ) break;
 140.181 +                  if (n->is_Phi()) {
 140.182 +                    break;
 140.183 +                  }
 140.184                    // Found a def?  Better split after it.
 140.185 -                  if( n2lidx(n) == lidx ) break;
 140.186 +                  if (_lrg_map.live_range_id(n) == lidx) {
 140.187 +                    break;
 140.188 +                  }
 140.189                    // Look for a use
 140.190                    uint i;
 140.191 -                  for( i = 1; i < n->req(); i++ )
 140.192 -                    if( n2lidx(n->in(i)) == lidx )
 140.193 +                  for( i = 1; i < n->req(); i++ ) {
 140.194 +                    if (_lrg_map.live_range_id(n->in(i)) == lidx) {
 140.195                        break;
 140.196 +                    }
 140.197 +                  }
 140.198                    // Found a use?  Better split after it.
 140.199 -                  if( i < n->req() ) break;
 140.200 +                  if (i < n->req()) {
 140.201 +                    break;
 140.202 +                  }
 140.203                    insert_point--;
 140.204                  }
 140.205                  uint orig_eidx = b->end_idx();
 140.206 @@ -812,8 +834,9 @@
 140.207                    return 0;
 140.208                  }
 140.209                  // Spill of NULL check mem op goes into the following block.
 140.210 -                if (b->end_idx() > orig_eidx)
 140.211 +                if (b->end_idx() > orig_eidx) {
 140.212                    insidx++;
 140.213 +                }
 140.214                }
 140.215                // This is a new DEF, so update UP
 140.216                UPblock[slidx] = false;
 140.217 @@ -832,13 +855,13 @@
 140.218        }  // end if crossing HRP Boundry
 140.219  
 140.220        // If the LRG index is oob, then this is a new spillcopy, skip it.
 140.221 -      if( defidx >= _maxlrg ) {
 140.222 +      if (defidx >= _lrg_map.max_lrg_id()) {
 140.223          continue;
 140.224        }
 140.225        LRG &deflrg = lrgs(defidx);
 140.226        uint copyidx = n->is_Copy();
 140.227        // Remove coalesced copy from CFG
 140.228 -      if( copyidx && defidx == n2lidx(n->in(copyidx)) ) {
 140.229 +      if (copyidx && defidx == _lrg_map.live_range_id(n->in(copyidx))) {
 140.230          n->replace_by( n->in(copyidx) );
 140.231          n->set_req( copyidx, NULL );
 140.232          b->_nodes.remove(insidx--);
 140.233 @@ -864,13 +887,13 @@
 140.234            // If inpidx > old_last, then one of these new inputs is being
 140.235            // handled. Skip the derived part of the pair, but process
 140.236            // the base like any other input.
 140.237 -          if( inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED ) {
 140.238 +          if (inpidx > old_last && ((inpidx - oopoff) & 1) == DERIVED) {
 140.239              continue;  // skip derived_debug added below
 140.240            }
 140.241            // Get lidx of input
 140.242 -          uint useidx = Find_id(n->in(inpidx));
 140.243 +          uint useidx = _lrg_map.find_id(n->in(inpidx));
 140.244            // Not a brand-new split, and it is a spill use
 140.245 -          if( useidx < _maxlrg && lrgs(useidx).reg() >= LRG::SPILL_REG ) {
 140.246 +          if (useidx < _lrg_map.max_lrg_id() && lrgs(useidx).reg() >= LRG::SPILL_REG) {
 140.247              // Check for valid reaching DEF
 140.248              slidx = lrg2reach[useidx];
 140.249              Node *def = Reachblock[slidx];
 140.250 @@ -886,7 +909,7 @@
 140.251                if (def == NULL || C->check_node_count(NodeLimitFudgeFactor, out_of_nodes)) {
 140.252                  return 0;
 140.253                }
 140.254 -              _names.extend(def->_idx,0);
 140.255 +              _lrg_map.extend(def->_idx, 0);
 140.256                _cfg._bbs.map(def->_idx,b);
 140.257                n->set_req(inpidx, def);
 140.258                continue;
 140.259 @@ -1186,10 +1209,10 @@
 140.260        // ********** Split Left Over Mem-Mem Moves **********
 140.261        // Check for mem-mem copies and split them now.  Do not do this
 140.262        // to copies about to be spilled; they will be Split shortly.
 140.263 -      if( copyidx ) {
 140.264 +      if (copyidx) {
 140.265          Node *use = n->in(copyidx);
 140.266 -        uint useidx = Find_id(use);
 140.267 -        if( useidx < _maxlrg &&       // This is not a new split
 140.268 +        uint useidx = _lrg_map.find_id(use);
 140.269 +        if (useidx < _lrg_map.max_lrg_id() &&       // This is not a new split
 140.270              OptoReg::is_stack(deflrg.reg()) &&
 140.271              deflrg.reg() < LRG::SPILL_REG ) { // And DEF is from stack
 140.272            LRG &uselrg = lrgs(useidx);
 140.273 @@ -1228,7 +1251,7 @@
 140.274          uint member;
 140.275          IndexSetIterator isi(liveout);
 140.276          while ((member = isi.next()) != 0) {
 140.277 -          assert(defidx != Find_const(member), "Live out member has not been compressed");
 140.278 +          assert(defidx != _lrg_map.find_const(member), "Live out member has not been compressed");
 140.279          }
 140.280  #endif
 140.281          Reachblock[slidx] = NULL;
 140.282 @@ -1261,7 +1284,7 @@
 140.283      assert(phi->is_Phi(),"This list must only contain Phi Nodes");
 140.284      Block *b = _cfg._bbs[phi->_idx];
 140.285      // Grab the live range number
 140.286 -    uint lidx = Find_id(phi);
 140.287 +    uint lidx = _lrg_map.find_id(phi);
 140.288      uint slidx = lrg2reach[lidx];
 140.289      // Update node to lidx map
 140.290      new_lrg(phi, maxlrg++);
 140.291 @@ -1296,11 +1319,13 @@
 140.292          int insert = pred->end_idx();
 140.293          while (insert >= 1 &&
 140.294                 pred->_nodes[insert - 1]->is_SpillCopy() &&
 140.295 -               Find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
 140.296 +               _lrg_map.find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
 140.297            insert--;
 140.298          }
 140.299 -        def = split_Rematerialize( def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false );
 140.300 -        if( !def ) return 0;    // Bail out
 140.301 +        def = split_Rematerialize(def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false);
 140.302 +        if (!def) {
 140.303 +          return 0;    // Bail out
 140.304 +        }
 140.305        }
 140.306        // Update the Phi's input edge array
 140.307        phi->set_req(i,def);
 140.308 @@ -1316,7 +1341,7 @@
 140.309      }  // End for all inputs to the Phi
 140.310    }  // End for all Phi Nodes
 140.311    // Update _maxlrg to save Union asserts
 140.312 -  _maxlrg = maxlrg;
 140.313 +  _lrg_map.set_max_lrg_id(maxlrg);
 140.314  
 140.315  
 140.316    //----------PASS 3----------
 140.317 @@ -1328,47 +1353,51 @@
 140.318      for( uint i = 1; i < phi->req(); i++ ) {
 140.319        // Grab the input node
 140.320        Node *n = phi->in(i);
 140.321 -      assert( n, "" );
 140.322 -      uint lidx = Find(n);
 140.323 -      uint pidx = Find(phi);
 140.324 -      if( lidx < pidx )
 140.325 +      assert(n, "node should exist");
 140.326 +      uint lidx = _lrg_map.find(n);
 140.327 +      uint pidx = _lrg_map.find(phi);
 140.328 +      if (lidx < pidx) {
 140.329          Union(n, phi);
 140.330 -      else if( lidx > pidx )
 140.331 +      }
 140.332 +      else if(lidx > pidx) {
 140.333          Union(phi, n);
 140.334 +      }
 140.335      }  // End for all inputs to the Phi Node
 140.336    }  // End for all Phi Nodes
 140.337    // Now union all two address instructions
 140.338 -  for( insidx = 0; insidx < defs->size(); insidx++ ) {
 140.339 +  for (insidx = 0; insidx < defs->size(); insidx++) {
 140.340      // Grab the def
 140.341      n1 = defs->at(insidx);
 140.342      // Set new lidx for DEF & handle 2-addr instructions
 140.343 -    if( n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0) ) {
 140.344 -      assert( Find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index");
 140.345 +    if (n1->is_Mach() && ((twoidx = n1->as_Mach()->two_adr()) != 0)) {
 140.346 +      assert(_lrg_map.find(n1->in(twoidx)) < maxlrg,"Assigning bad live range index");
 140.347        // Union the input and output live ranges
 140.348 -      uint lr1 = Find(n1);
 140.349 -      uint lr2 = Find(n1->in(twoidx));
 140.350 -      if( lr1 < lr2 )
 140.351 +      uint lr1 = _lrg_map.find(n1);
 140.352 +      uint lr2 = _lrg_map.find(n1->in(twoidx));
 140.353 +      if (lr1 < lr2) {
 140.354          Union(n1, n1->in(twoidx));
 140.355 -      else if( lr1 > lr2 )
 140.356 +      }
 140.357 +      else if (lr1 > lr2) {
 140.358          Union(n1->in(twoidx), n1);
 140.359 +      }
 140.360      }  // End if two address
 140.361    }  // End for all defs
 140.362    // DEBUG
 140.363  #ifdef ASSERT
 140.364    // Validate all live range index assignments
 140.365 -  for( bidx = 0; bidx < _cfg._num_blocks; bidx++ ) {
 140.366 +  for (bidx = 0; bidx < _cfg._num_blocks; bidx++) {
 140.367      b  = _cfg._blocks[bidx];
 140.368 -    for( insidx = 0; insidx <= b->end_idx(); insidx++ ) {
 140.369 +    for (insidx = 0; insidx <= b->end_idx(); insidx++) {
 140.370        Node *n = b->_nodes[insidx];
 140.371 -      uint defidx = Find(n);
 140.372 -      assert(defidx < _maxlrg,"Bad live range index in Split");
 140.373 +      uint defidx = _lrg_map.find(n);
 140.374 +      assert(defidx < _lrg_map.max_lrg_id(), "Bad live range index in Split");
 140.375        assert(defidx < maxlrg,"Bad live range index in Split");
 140.376      }
 140.377    }
 140.378    // Issue a warning if splitting made no progress
 140.379    int noprogress = 0;
 140.380 -  for( slidx = 0; slidx < spill_cnt; slidx++ ) {
 140.381 -    if( PrintOpto && WizardMode && splits.at(slidx) == 0 ) {
 140.382 +  for (slidx = 0; slidx < spill_cnt; slidx++) {
 140.383 +    if (PrintOpto && WizardMode && splits.at(slidx) == 0) {
 140.384        tty->print_cr("Failed to split live range %d", lidxs.at(slidx));
 140.385        //BREAKPOINT;
 140.386      }
   141.1 --- a/src/share/vm/opto/regalloc.hpp	Wed Apr 24 20:55:28 2013 -0400
   141.2 +++ b/src/share/vm/opto/regalloc.hpp	Wed Apr 24 21:11:02 2013 -0400
   141.3 @@ -113,7 +113,7 @@
   141.4    OptoReg::Name offset2reg( int stk_offset ) const;
   141.5  
   141.6    // Get the register encoding associated with the Node
   141.7 -  int get_encode( const Node *n ) const {
   141.8 +  int get_encode(const Node *n) const {
   141.9      assert( n->_idx < _node_regs_max_index, "Exceeded _node_regs array");
  141.10      OptoReg::Name first = _node_regs[n->_idx].first();
  141.11      OptoReg::Name second = _node_regs[n->_idx].second();
  141.12 @@ -122,15 +122,6 @@
  141.13      return Matcher::_regEncode[first];
  141.14    }
  141.15  
  141.16 -  // Platform dependent hook for actions prior to allocation
  141.17 -  void  pd_preallocate_hook();
  141.18 -
  141.19 -#ifdef ASSERT
  141.20 -  // Platform dependent hook for verification after allocation.  Will
  141.21 -  // only get called when compiling with asserts.
  141.22 -  void  pd_postallocate_verify_hook();
  141.23 -#endif
  141.24 -
  141.25  #ifndef PRODUCT
  141.26    static int _total_framesize;
  141.27    static int _max_framesize;
   142.1 --- a/src/share/vm/opto/subnode.cpp	Wed Apr 24 20:55:28 2013 -0400
   142.2 +++ b/src/share/vm/opto/subnode.cpp	Wed Apr 24 21:11:02 2013 -0400
   142.3 @@ -1078,16 +1078,6 @@
   142.4    return (_test._test == b->_test._test);
   142.5  }
   142.6  
   142.7 -//------------------------------clone_cmp--------------------------------------
   142.8 -// Clone a compare/bool tree
   142.9 -static Node *clone_cmp( Node *cmp, Node *cmp1, Node *cmp2, PhaseGVN *gvn, BoolTest::mask test ) {
  142.10 -  Node *ncmp = cmp->clone();
  142.11 -  ncmp->set_req(1,cmp1);
  142.12 -  ncmp->set_req(2,cmp2);
  142.13 -  ncmp = gvn->transform( ncmp );
  142.14 -  return new (gvn->C) BoolNode( ncmp, test );
  142.15 -}
  142.16 -
  142.17  //-------------------------------make_predicate--------------------------------
  142.18  Node* BoolNode::make_predicate(Node* test_value, PhaseGVN* phase) {
  142.19    if (test_value->is_Con())   return test_value;
   143.1 --- a/src/share/vm/prims/jni.cpp	Wed Apr 24 20:55:28 2013 -0400
   143.2 +++ b/src/share/vm/prims/jni.cpp	Wed Apr 24 21:11:02 2013 -0400
   143.3 @@ -1289,32 +1289,6 @@
   143.4    JNI_NONVIRTUAL
   143.5  };
   143.6  
   143.7 -static methodHandle jni_resolve_interface_call(Handle recv, methodHandle method, TRAPS) {
   143.8 -  assert(!method.is_null() , "method should not be null");
   143.9 -
  143.10 -  KlassHandle recv_klass; // Default to NULL (use of ?: can confuse gcc)
  143.11 -  if (recv.not_null()) recv_klass = KlassHandle(THREAD, recv->klass());
  143.12 -  KlassHandle spec_klass (THREAD, method->method_holder());
  143.13 -  Symbol*  name  = method->name();
  143.14 -  Symbol*  signature  = method->signature();
  143.15 -  CallInfo info;
  143.16 -  LinkResolver::resolve_interface_call(info, recv, recv_klass,  spec_klass, name, signature, KlassHandle(), false, true, CHECK_(methodHandle()));
  143.17 -  return info.selected_method();
  143.18 -}
  143.19 -
  143.20 -static methodHandle jni_resolve_virtual_call(Handle recv, methodHandle method, TRAPS) {
  143.21 -  assert(!method.is_null() , "method should not be null");
  143.22 -
  143.23 -  KlassHandle recv_klass; // Default to NULL (use of ?: can confuse gcc)
  143.24 -  if (recv.not_null()) recv_klass = KlassHandle(THREAD, recv->klass());
  143.25 -  KlassHandle spec_klass (THREAD, method->method_holder());
  143.26 -  Symbol*  name  = method->name();
  143.27 -  Symbol*  signature  = method->signature();
  143.28 -  CallInfo info;
  143.29 -  LinkResolver::resolve_virtual_call(info, recv, recv_klass,  spec_klass, name, signature, KlassHandle(), false, true, CHECK_(methodHandle()));
  143.30 -  return info.selected_method();
  143.31 -}
  143.32 -
  143.33  
  143.34  
  143.35  static void jni_invoke_static(JNIEnv *env, JavaValue* result, jobject receiver, JNICallType call_type, jmethodID method_id, JNI_ArgumentPusher *args, TRAPS) {
  143.36 @@ -5053,6 +5027,7 @@
  143.37  void execute_internal_vm_tests() {
  143.38    if (ExecuteInternalVMTests) {
  143.39      tty->print_cr("Running internal VM tests");
  143.40 +    run_unit_test(GlobalDefinitions::test_globals());
  143.41      run_unit_test(arrayOopDesc::test_max_array_length());
  143.42      run_unit_test(CollectedHeap::test_is_in());
  143.43      run_unit_test(QuickSort::test_quick_sort());
   144.1 --- a/src/share/vm/prims/jniCheck.hpp	Wed Apr 24 20:55:28 2013 -0400
   144.2 +++ b/src/share/vm/prims/jniCheck.hpp	Wed Apr 24 21:11:02 2013 -0400
   144.3 @@ -33,7 +33,7 @@
   144.4    // within IN_VM macro), one to be called when in NATIVE state.
   144.5  
   144.6    // When in VM state:
   144.7 -  static void ReportJNIFatalError(JavaThread* thr, const char *msg) {
   144.8 +  static inline void ReportJNIFatalError(JavaThread* thr, const char *msg) {
   144.9      tty->print_cr("FATAL ERROR in native method: %s", msg);
  144.10      thr->print_stack();
  144.11      os::abort(true);
   145.1 --- a/src/share/vm/prims/whitebox.cpp	Wed Apr 24 20:55:28 2013 -0400
   145.2 +++ b/src/share/vm/prims/whitebox.cpp	Wed Apr 24 21:11:02 2013 -0400
   145.3 @@ -49,6 +49,7 @@
   145.4  #endif // INCLUDE_NMT
   145.5  
   145.6  #include "compiler/compileBroker.hpp"
   145.7 +#include "runtime/compilationPolicy.hpp"
   145.8  
   145.9  bool WhiteBox::_used = false;
  145.10  
  145.11 @@ -118,45 +119,46 @@
  145.12  #endif // INCLUDE_ALL_GCS
  145.13  
  145.14  #ifdef INCLUDE_NMT
  145.15 -// Keep track of the 3 allocations in NMTAllocTest so we can free them later
  145.16 -// on and verify that they're not visible anymore
  145.17 -static void* nmtMtTest1 = NULL, *nmtMtTest2 = NULL, *nmtMtTest3 = NULL;
  145.18 -
  145.19  // Alloc memory using the test memory type so that we can use that to see if
  145.20  // NMT picks it up correctly
  145.21 -WB_ENTRY(jboolean, WB_NMTAllocTest(JNIEnv* env))
  145.22 -  void *mem;
  145.23 +WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size))
  145.24 +  jlong addr = 0;
  145.25  
  145.26 -  if (!MemTracker::is_on() || MemTracker::shutdown_in_progress()) {
  145.27 -    return false;
  145.28 +  if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
  145.29 +    addr = (jlong)(uintptr_t)os::malloc(size, mtTest);
  145.30    }
  145.31  
  145.32 -  // Allocate 2 * 128k + 256k + 1024k and free the 1024k one to make sure we track
  145.33 -  // everything correctly. Total should be 512k held alive.
  145.34 -  nmtMtTest1 = os::malloc(128 * 1024, mtTest);
  145.35 -  mem = os::malloc(1024 * 1024, mtTest);
  145.36 -  nmtMtTest2 = os::malloc(256 * 1024, mtTest);
  145.37 -  os::free(mem, mtTest);
  145.38 -  nmtMtTest3 = os::malloc(128 * 1024, mtTest);
  145.39 -
  145.40 -  return true;
  145.41 +  return addr;
  145.42  WB_END
  145.43  
  145.44  // Free the memory allocated by NMTAllocTest
  145.45 -WB_ENTRY(jboolean, WB_NMTFreeTestMemory(JNIEnv* env))
  145.46 +WB_ENTRY(void, WB_NMTFree(JNIEnv* env, jobject o, jlong mem))
  145.47 +  os::free((void*)(uintptr_t)mem, mtTest);
  145.48 +WB_END
  145.49  
  145.50 -  if (nmtMtTest1 == NULL || nmtMtTest2 == NULL || nmtMtTest3 == NULL) {
  145.51 -    return false;
  145.52 +WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
  145.53 +  jlong addr = 0;
  145.54 +
  145.55 +  if (MemTracker::is_on() && !MemTracker::shutdown_in_progress()) {
  145.56 +    addr = (jlong)(uintptr_t)os::reserve_memory(size);
  145.57 +    MemTracker::record_virtual_memory_type((address)addr, mtTest);
  145.58    }
  145.59  
  145.60 -  os::free(nmtMtTest1, mtTest);
  145.61 -  nmtMtTest1 = NULL;
  145.62 -  os::free(nmtMtTest2, mtTest);
  145.63 -  nmtMtTest2 = NULL;
  145.64 -  os::free(nmtMtTest3, mtTest);
  145.65 -  nmtMtTest3 = NULL;
  145.66 +  return addr;
  145.67 +WB_END
  145.68  
  145.69 -  return true;
  145.70 +
  145.71 +WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
  145.72 +  os::commit_memory((char *)(uintptr_t)addr, size);
  145.73 +  MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
  145.74 +WB_END
  145.75 +
  145.76 +WB_ENTRY(void, WB_NMTUncommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
  145.77 +  os::uncommit_memory((char *)(uintptr_t)addr, size);
  145.78 +WB_END
  145.79 +
  145.80 +WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
  145.81 +  os::release_memory((char *)(uintptr_t)addr, size);
  145.82  WB_END
  145.83  
  145.84  // Block until the current generation of NMT data to be merged, used to reliably test the NMT feature
  145.85 @@ -213,11 +215,11 @@
  145.86    return (code->is_alive() && !code->is_marked_for_deoptimization());
  145.87  WB_END
  145.88  
  145.89 -WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method))
  145.90 +WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level))
  145.91    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
  145.92    MutexLockerEx mu(Compile_lock);
  145.93    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
  145.94 -  return !mh->is_not_compilable();
  145.95 +  return CompilationPolicy::can_be_compiled(mh, comp_level);
  145.96  WB_END
  145.97  
  145.98  WB_ENTRY(jboolean, WB_IsMethodQueuedForCompilation(JNIEnv* env, jobject o, jobject method))
  145.99 @@ -235,13 +237,13 @@
 145.100  WB_END
 145.101  
 145.102  
 145.103 -WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method))
 145.104 +WB_ENTRY(void, WB_MakeMethodNotCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level))
 145.105    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
 145.106    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
 145.107 -  mh->set_not_compilable();
 145.108 +  mh->set_not_compilable(comp_level, true /* report */, "WhiteBox");
 145.109  WB_END
 145.110  
 145.111 -WB_ENTRY(jboolean, WB_SetDontInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
 145.112 +WB_ENTRY(jboolean, WB_TestSetDontInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
 145.113    jmethodID jmid = reflected_method_to_jmid(thread, env, method);
 145.114    methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
 145.115    bool result = mh->dont_inline();
 145.116 @@ -254,6 +256,57 @@
 145.117           CompileBroker::queue_size(CompLevel_full_profile) /* C1 */;
 145.118  WB_END
 145.119  
 145.120 +
 145.121 +WB_ENTRY(jboolean, WB_TestSetForceInlineMethod(JNIEnv* env, jobject o, jobject method, jboolean value))
 145.122 +  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
 145.123 +  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
 145.124 +  bool result = mh->force_inline();
 145.125 +  mh->set_force_inline(value == JNI_TRUE);
 145.126 +  return result;
 145.127 +WB_END
 145.128 +
 145.129 +WB_ENTRY(jboolean, WB_EnqueueMethodForCompilation(JNIEnv* env, jobject o, jobject method, jint comp_level))
 145.130 +  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
 145.131 +  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
 145.132 +  nmethod* nm = CompileBroker::compile_method(mh, InvocationEntryBci, comp_level, mh, mh->invocation_count(), "WhiteBox", THREAD);
 145.133 +  MutexLockerEx mu(Compile_lock);
 145.134 +  return (mh->queued_for_compilation() || nm != NULL);
 145.135 +WB_END
 145.136 +
 145.137 +WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
 145.138 +  jmethodID jmid = reflected_method_to_jmid(thread, env, method);
 145.139 +  methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
 145.140 +  MutexLockerEx mu(Compile_lock);
 145.141 +  MethodData* mdo = mh->method_data();
 145.142 +  MethodCounters* mcs = mh->method_counters();
 145.143 +
 145.144 +  if (mdo != NULL) {
 145.145 +    mdo->init();
 145.146 +    ResourceMark rm;
 145.147 +    int arg_count = mdo->method()->size_of_parameters();
 145.148 +    for (int i = 0; i < arg_count; i++) {
 145.149 +      mdo->set_arg_modified(i, 0);
 145.150 +    }
 145.151 +  }
 145.152 +
 145.153 +  mh->clear_not_c1_compilable();
 145.154 +  mh->clear_not_c2_compilable();
 145.155 +  mh->clear_not_c2_osr_compilable();
 145.156 +  NOT_PRODUCT(mh->set_compiled_invocation_count(0));
 145.157 +  if (mcs != NULL) {
 145.158 +    mcs->backedge_counter()->init();
 145.159 +    mcs->invocation_counter()->init();
 145.160 +    mcs->set_interpreter_invocation_count(0);
 145.161 +    mcs->set_interpreter_throwout_count(0);
 145.162 +
 145.163 +#ifdef TIERED
 145.164 +    mcs->set_rate(0.0F);
 145.165 +    mh->set_prev_event_count(0, THREAD);
 145.166 +    mh->set_prev_time(0, THREAD);
 145.167 +#endif
 145.168 +  }
 145.169 +WB_END
 145.170 +
 145.171  WB_ENTRY(jboolean, WB_IsInStringTable(JNIEnv* env, jobject o, jstring javaString))
 145.172    ResourceMark rm(THREAD);
 145.173    int len;
 145.174 @@ -271,7 +324,6 @@
 145.175    Universe::heap()->collect(GCCause::_last_ditch_collection);
 145.176  WB_END
 145.177  
 145.178 -
 145.179  //Some convenience methods to deal with objects from java
 145.180  int WhiteBox::offset_for_field(const char* field_name, oop object,
 145.181      Symbol* signature_symbol) {
 145.182 @@ -340,27 +392,37 @@
 145.183    {CC"g1RegionSize",       CC"()I",                   (void*)&WB_G1RegionSize      },
 145.184  #endif // INCLUDE_ALL_GCS
 145.185  #ifdef INCLUDE_NMT
 145.186 -  {CC"NMTAllocTest",       CC"()Z",                   (void*)&WB_NMTAllocTest      },
 145.187 -  {CC"NMTFreeTestMemory",  CC"()Z",                   (void*)&WB_NMTFreeTestMemory },
 145.188 -  {CC"NMTWaitForDataMerge",CC"()Z",                   (void*)&WB_NMTWaitForDataMerge},
 145.189 +  {CC"NMTMalloc",           CC"(J)J",                 (void*)&WB_NMTMalloc          },
 145.190 +  {CC"NMTFree",             CC"(J)V",                 (void*)&WB_NMTFree            },
 145.191 +  {CC"NMTReserveMemory",    CC"(J)J",                 (void*)&WB_NMTReserveMemory   },
 145.192 +  {CC"NMTCommitMemory",     CC"(JJ)V",                (void*)&WB_NMTCommitMemory    },
 145.193 +  {CC"NMTUncommitMemory",   CC"(JJ)V",                (void*)&WB_NMTUncommitMemory  },
 145.194 +  {CC"NMTReleaseMemory",    CC"(JJ)V",                (void*)&WB_NMTReleaseMemory   },
 145.195 +  {CC"NMTWaitForDataMerge", CC"()Z",                  (void*)&WB_NMTWaitForDataMerge},
 145.196  #endif // INCLUDE_NMT
 145.197    {CC"deoptimizeAll",      CC"()V",                   (void*)&WB_DeoptimizeAll     },
 145.198 -  {CC"deoptimizeMethod",   CC"(Ljava/lang/reflect/Method;)I",
 145.199 +  {CC"deoptimizeMethod",   CC"(Ljava/lang/reflect/Executable;)I",
 145.200                                                        (void*)&WB_DeoptimizeMethod  },
 145.201 -  {CC"isMethodCompiled",   CC"(Ljava/lang/reflect/Method;)Z",
 145.202 +  {CC"isMethodCompiled",   CC"(Ljava/lang/reflect/Executable;)Z",
 145.203                                                        (void*)&WB_IsMethodCompiled  },
 145.204 -  {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Method;)Z",
 145.205 +  {CC"isMethodCompilable", CC"(Ljava/lang/reflect/Executable;I)Z",
 145.206                                                        (void*)&WB_IsMethodCompilable},
 145.207    {CC"isMethodQueuedForCompilation",
 145.208 -      CC"(Ljava/lang/reflect/Method;)Z",              (void*)&WB_IsMethodQueuedForCompilation},
 145.209 +      CC"(Ljava/lang/reflect/Executable;)Z",          (void*)&WB_IsMethodQueuedForCompilation},
 145.210    {CC"makeMethodNotCompilable",
 145.211 -      CC"(Ljava/lang/reflect/Method;)V",              (void*)&WB_MakeMethodNotCompilable},
 145.212 -  {CC"setDontInlineMethod",
 145.213 -      CC"(Ljava/lang/reflect/Method;Z)Z",             (void*)&WB_SetDontInlineMethod},
 145.214 +      CC"(Ljava/lang/reflect/Executable;I)V",         (void*)&WB_MakeMethodNotCompilable},
 145.215 +  {CC"testSetDontInlineMethod",
 145.216 +      CC"(Ljava/lang/reflect/Executable;Z)Z",         (void*)&WB_TestSetDontInlineMethod},
 145.217    {CC"getMethodCompilationLevel",
 145.218 -      CC"(Ljava/lang/reflect/Method;)I",              (void*)&WB_GetMethodCompilationLevel},
 145.219 +      CC"(Ljava/lang/reflect/Executable;)I",          (void*)&WB_GetMethodCompilationLevel},
 145.220    {CC"getCompileQueuesSize",
 145.221        CC"()I",                                        (void*)&WB_GetCompileQueuesSize},
 145.222 +  {CC"testSetForceInlineMethod",
 145.223 +      CC"(Ljava/lang/reflect/Executable;Z)Z",         (void*)&WB_TestSetForceInlineMethod},
 145.224 +  {CC"enqueueMethodForCompilation",
 145.225 +      CC"(Ljava/lang/reflect/Executable;I)Z",         (void*)&WB_EnqueueMethodForCompilation},
 145.226 +  {CC"clearMethodState",
 145.227 +      CC"(Ljava/lang/reflect/Executable;)V",          (void*)&WB_ClearMethodState},
 145.228    {CC"isInStringTable",   CC"(Ljava/lang/String;)Z",  (void*)&WB_IsInStringTable  },
 145.229    {CC"fullGC",   CC"()V",                             (void*)&WB_FullGC },
 145.230  };
   146.1 --- a/src/share/vm/runtime/arguments.cpp	Wed Apr 24 20:55:28 2013 -0400
   146.2 +++ b/src/share/vm/runtime/arguments.cpp	Wed Apr 24 21:11:02 2013 -0400
   146.3 @@ -1754,11 +1754,15 @@
   146.4    return false;
   146.5  }
   146.6  
   146.7 +#if !INCLUDE_ALL_GCS
   146.8 +#ifdef ASSERT
   146.9  static bool verify_serial_gc_flags() {
  146.10    return (UseSerialGC &&
  146.11          !(UseParNewGC || (UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC ||
  146.12            UseParallelGC || UseParallelOldGC));
  146.13  }
  146.14 +#endif // ASSERT
  146.15 +#endif // INCLUDE_ALL_GCS
  146.16  
  146.17  // check if do gclog rotation
  146.18  // +UseGCLogFileRotation is a must,
  146.19 @@ -2006,11 +2010,12 @@
  146.20    // than just disable the lock verification. This will be fixed under
  146.21    // bug 4788986.
  146.22    if (UseConcMarkSweepGC && FLSVerifyAllHeapReferences) {
  146.23 -    if (VerifyGCStartAt == 0) {
  146.24 +    if (VerifyDuringStartup) {
  146.25        warning("Heap verification at start-up disabled "
  146.26                "(due to current incompatibility with FLSVerifyAllHeapReferences)");
  146.27 -      VerifyGCStartAt = 1;      // Disable verification at start-up
  146.28 +      VerifyDuringStartup = false; // Disable verification at start-up
  146.29      }
  146.30 +
  146.31      if (VerifyBeforeExit) {
  146.32        warning("Heap verification at shutdown disabled "
  146.33                "(due to current incompatibility with FLSVerifyAllHeapReferences)");
  146.34 @@ -3092,6 +3097,7 @@
  146.35    }                                                                   \
  146.36  } while(0)
  146.37  
  146.38 +#if !INCLUDE_ALL_GCS
  146.39  static void force_serial_gc() {
  146.40    FLAG_SET_DEFAULT(UseSerialGC, true);
  146.41    FLAG_SET_DEFAULT(CMSIncrementalMode, false);  // special CMS suboption
  146.42 @@ -3101,6 +3107,7 @@
  146.43    UNSUPPORTED_GC_OPTION(UseConcMarkSweepGC);
  146.44    UNSUPPORTED_GC_OPTION(UseParNewGC);
  146.45  }
  146.46 +#endif // INCLUDE_ALL_GCS
  146.47  
  146.48  // Parse entry point called from JNI_CreateJavaVM
  146.49  
   147.1 --- a/src/share/vm/runtime/compilationPolicy.cpp	Wed Apr 24 20:55:28 2013 -0400
   147.2 +++ b/src/share/vm/runtime/compilationPolicy.cpp	Wed Apr 24 21:11:02 2013 -0400
   147.3 @@ -123,9 +123,10 @@
   147.4    }
   147.5    if (comp_level == CompLevel_all) {
   147.6      return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization);
   147.7 -  } else {
   147.8 +  } else if (is_compile(comp_level)) {
   147.9      return !m->is_not_compilable(comp_level);
  147.10    }
  147.11 +  return false;
  147.12  }
  147.13  
  147.14  bool CompilationPolicy::is_compilation_enabled() {
   148.1 --- a/src/share/vm/runtime/compilationPolicy.hpp	Wed Apr 24 20:55:28 2013 -0400
   148.2 +++ b/src/share/vm/runtime/compilationPolicy.hpp	Wed Apr 24 21:11:02 2013 -0400
   148.3 @@ -96,7 +96,7 @@
   148.4    void reset_counter_for_back_branch_event(methodHandle method);
   148.5  public:
   148.6    NonTieredCompPolicy() : _compiler_count(0) { }
   148.7 -  virtual CompLevel initial_compile_level() { return CompLevel_initial_compile; }
   148.8 +  virtual CompLevel initial_compile_level() { return CompLevel_highest_tier; }
   148.9    virtual int compiler_count(CompLevel comp_level);
  148.10    virtual void do_safepoint_work();
  148.11    virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
   149.1 --- a/src/share/vm/runtime/globals.hpp	Wed Apr 24 20:55:28 2013 -0400
   149.2 +++ b/src/share/vm/runtime/globals.hpp	Wed Apr 24 21:11:02 2013 -0400
   149.3 @@ -2123,6 +2123,10 @@
   149.4    product(intx, PrefetchFieldsAhead, -1,                                    \
   149.5            "How many fields ahead to prefetch in oop scan (<= 0 means off)") \
   149.6                                                                              \
   149.7 +  diagnostic(bool, VerifyDuringStartup, false,                              \
   149.8 +          "Verify memory system before executing any Java code "            \
   149.9 +          "during VM initialization")                                       \
  149.10 +                                                                            \
  149.11    diagnostic(bool, VerifyBeforeExit, trueInDebug,                           \
  149.12            "Verify system before exiting")                                   \
  149.13                                                                              \
  149.14 @@ -3664,8 +3668,13 @@
  149.15    product(bool, PrintGCCause, true,                                         \
  149.16            "Include GC cause in GC logging")                                 \
  149.17                                                                              \
  149.18 -  product(bool, AllowNonVirtualCalls, false,                                \
  149.19 -          "Obey the ACC_SUPER flag and allow invokenonvirtual calls")
  149.20 +  product(bool , AllowNonVirtualCalls, false,                               \
  149.21 +          "Obey the ACC_SUPER flag and allow invokenonvirtual calls")       \
  149.22 +                                                                            \
  149.23 +  experimental(uintx, ArrayAllocatorMallocLimit,                            \
  149.24 +          SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx),                        \
  149.25 +          "Allocation less than this value will be allocated "              \
  149.26 +          "using malloc. Larger allocations will use mmap.")
  149.27  
  149.28  /*
  149.29   *  Macros for factoring of globals
   150.1 --- a/src/share/vm/runtime/safepoint.cpp	Wed Apr 24 20:55:28 2013 -0400
   150.2 +++ b/src/share/vm/runtime/safepoint.cpp	Wed Apr 24 21:11:02 2013 -0400
   150.3 @@ -735,6 +735,9 @@
   150.4  // Exception handlers
   150.5  
   150.6  #ifndef PRODUCT
   150.7 +
   150.8 +#ifdef SPARC
   150.9 +
  150.10  #ifdef _LP64
  150.11  #define PTR_PAD ""
  150.12  #else
  150.13 @@ -755,7 +758,6 @@
  150.14                  newptr, is_oop?"oop":"   ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":"     "));
  150.15  }
  150.16  
  150.17 -#ifdef SPARC
  150.18  static void print_me(intptr_t *new_sp, intptr_t *old_sp, bool *was_oops) {
  150.19  #ifdef _LP64
  150.20    tty->print_cr("--------+------address-----+------before-----------+-------after----------+");
   151.1 --- a/src/share/vm/runtime/synchronizer.cpp	Wed Apr 24 20:55:28 2013 -0400
   151.2 +++ b/src/share/vm/runtime/synchronizer.cpp	Wed Apr 24 21:11:02 2013 -0400
   151.3 @@ -449,8 +449,6 @@
   151.4  // and explicit fences (barriers) to control for architectural reordering performed
   151.5  // by the CPU(s) or platform.
   151.6  
   151.7 -static int  MBFence (int x) { OrderAccess::fence(); return x; }
   151.8 -
   151.9  struct SharedGlobals {
  151.10      // These are highly shared mostly-read variables.
  151.11      // To avoid false-sharing they need to be the sole occupants of a $ line.
  151.12 @@ -1639,11 +1637,6 @@
  151.13  
  151.14  #ifndef PRODUCT
  151.15  
  151.16 -void ObjectSynchronizer::trace_locking(Handle locking_obj, bool is_compiled,
  151.17 -                                       bool is_method, bool is_locking) {
  151.18 -  // Don't know what to do here
  151.19 -}
  151.20 -
  151.21  // Verify all monitors in the monitor cache, the verification is weak.
  151.22  void ObjectSynchronizer::verify() {
  151.23    ObjectMonitor* block = gBlockList;
   152.1 --- a/src/share/vm/runtime/synchronizer.hpp	Wed Apr 24 20:55:28 2013 -0400
   152.2 +++ b/src/share/vm/runtime/synchronizer.hpp	Wed Apr 24 21:11:02 2013 -0400
   152.3 @@ -121,7 +121,6 @@
   152.4    static void oops_do(OopClosure* f);
   152.5  
   152.6    // debugging
   152.7 -  static void trace_locking(Handle obj, bool is_compiled, bool is_method, bool is_locking) PRODUCT_RETURN;
   152.8    static void verify() PRODUCT_RETURN;
   152.9    static int  verify_objmon_isinpool(ObjectMonitor *addr) PRODUCT_RETURN0;
  152.10  
   153.1 --- a/src/share/vm/runtime/thread.cpp	Wed Apr 24 20:55:28 2013 -0400
   153.2 +++ b/src/share/vm/runtime/thread.cpp	Wed Apr 24 21:11:02 2013 -0400
   153.3 @@ -3446,9 +3446,9 @@
   153.4    }
   153.5  
   153.6    assert (Universe::is_fully_initialized(), "not initialized");
   153.7 -  if (VerifyBeforeGC && VerifyGCStartAt == 0) {
   153.8 -    Universe::heap()->prepare_for_verify();
   153.9 -    Universe::verify();   // make sure we're starting with a clean slate
  153.10 +  if (VerifyDuringStartup) {
  153.11 +    VM_Verify verify_op(false /* silent */);   // make sure we're starting with a clean slate
  153.12 +    VMThread::execute(&verify_op);
  153.13    }
  153.14  
  153.15    EXCEPTION_MARK;
   154.1 --- a/src/share/vm/runtime/thread.hpp	Wed Apr 24 20:55:28 2013 -0400
   154.2 +++ b/src/share/vm/runtime/thread.hpp	Wed Apr 24 21:11:02 2013 -0400
   154.3 @@ -1056,11 +1056,11 @@
   154.4  #if INCLUDE_NMT
   154.5    // native memory tracking
   154.6    inline MemRecorder* get_recorder() const          { return (MemRecorder*)_recorder; }
   154.7 -  inline void         set_recorder(MemRecorder* rc) { _recorder = (volatile MemRecorder*)rc; }
   154.8 +  inline void         set_recorder(MemRecorder* rc) { _recorder = rc; }
   154.9  
  154.10   private:
  154.11    // per-thread memory recorder
  154.12 -  volatile MemRecorder* _recorder;
  154.13 +  MemRecorder* volatile _recorder;
  154.14  #endif // INCLUDE_NMT
  154.15  
  154.16    // Suspend/resume support for JavaThread
   155.1 --- a/src/share/vm/runtime/vmStructs.cpp	Wed Apr 24 20:55:28 2013 -0400
   155.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Wed Apr 24 21:11:02 2013 -0400
   155.3 @@ -480,6 +480,9 @@
   155.4                                                                                                                                       \
   155.5    nonstatic_field(CardGeneration,              _rs,                                           GenRemSet*)                            \
   155.6    nonstatic_field(CardGeneration,              _bts,                                          BlockOffsetSharedArray*)               \
   155.7 +  nonstatic_field(CardGeneration,              _shrink_factor,                                size_t)                                \
   155.8 +  nonstatic_field(CardGeneration,              _capacity_at_prologue,                         size_t)                                \
   155.9 +  nonstatic_field(CardGeneration,              _used_at_prologue,                             size_t)                                \
  155.10                                                                                                                                       \
  155.11    nonstatic_field(CardTableModRefBS,           _whole_heap,                                   const MemRegion)                       \
  155.12    nonstatic_field(CardTableModRefBS,           _guard_index,                                  const size_t)                          \
  155.13 @@ -550,8 +553,6 @@
  155.14    nonstatic_field(Space,                       _bottom,                                       HeapWord*)                             \
  155.15    nonstatic_field(Space,                       _end,                                          HeapWord*)                             \
  155.16                                                                                                                                       \
  155.17 -  nonstatic_field(TenuredGeneration,           _shrink_factor,                                size_t)                                \
  155.18 -  nonstatic_field(TenuredGeneration,           _capacity_at_prologue,                         size_t)                                \
  155.19    nonstatic_field(ThreadLocalAllocBuffer,      _start,                                        HeapWord*)                             \
  155.20    nonstatic_field(ThreadLocalAllocBuffer,      _top,                                          HeapWord*)                             \
  155.21    nonstatic_field(ThreadLocalAllocBuffer,      _end,                                          HeapWord*)                             \
  155.22 @@ -1116,7 +1117,6 @@
  155.23    c2_nonstatic_field(PhaseChaitin,       _lo_stk_degree,           uint)                                                             \
  155.24    c2_nonstatic_field(PhaseChaitin,       _hi_degree,               uint)                                                             \
  155.25    c2_nonstatic_field(PhaseChaitin,       _simplified,              uint)                                                             \
  155.26 -  c2_nonstatic_field(PhaseChaitin,       _maxlrg,                  uint)                                                             \
  155.27                                                                                                                                       \
  155.28    c2_nonstatic_field(Block,              _nodes,                   Node_List)                                                        \
  155.29    c2_nonstatic_field(Block,              _succs,                   Block_Array)                                                      \
   156.1 --- a/src/share/vm/runtime/vmThread.cpp	Wed Apr 24 20:55:28 2013 -0400
   156.2 +++ b/src/share/vm/runtime/vmThread.cpp	Wed Apr 24 21:11:02 2013 -0400
   156.3 @@ -123,7 +123,7 @@
   156.4    _queue[prio]->set_next(_queue[prio]);
   156.5    _queue[prio]->set_prev(_queue[prio]);
   156.6    assert(queue_empty(prio), "drain corrupted queue");
   156.7 -#ifdef DEBUG
   156.8 +#ifdef ASSERT
   156.9    int len = 0;
  156.10    VM_Operation* cur;
  156.11    for(cur = r; cur != NULL; cur=cur->next()) len++;
   157.1 --- a/src/share/vm/runtime/vm_operations.cpp	Wed Apr 24 20:55:28 2013 -0400
   157.2 +++ b/src/share/vm/runtime/vm_operations.cpp	Wed Apr 24 21:11:02 2013 -0400
   157.3 @@ -175,7 +175,8 @@
   157.4  }
   157.5  
   157.6  void VM_Verify::doit() {
   157.7 -  Universe::verify();
   157.8 +  Universe::heap()->prepare_for_verify();
   157.9 +  Universe::verify(_silent);
  157.10  }
  157.11  
  157.12  bool VM_PrintThreads::doit_prologue() {
   158.1 --- a/src/share/vm/runtime/vm_operations.hpp	Wed Apr 24 20:55:28 2013 -0400
   158.2 +++ b/src/share/vm/runtime/vm_operations.hpp	Wed Apr 24 21:11:02 2013 -0400
   158.3 @@ -300,9 +300,9 @@
   158.4  
   158.5  class VM_Verify: public VM_Operation {
   158.6   private:
   158.7 -  KlassHandle _dependee;
   158.8 +  bool _silent;
   158.9   public:
  158.10 -  VM_Verify() {}
  158.11 +  VM_Verify(bool silent) : _silent(silent) {}
  158.12    VMOp_Type type() const { return VMOp_Verify; }
  158.13    void doit();
  158.14  };
   159.1 --- a/src/share/vm/services/diagnosticArgument.cpp	Wed Apr 24 20:55:28 2013 -0400
   159.2 +++ b/src/share/vm/services/diagnosticArgument.cpp	Wed Apr 24 21:11:02 2013 -0400
   159.3 @@ -1,5 +1,5 @@
   159.4  /*
   159.5 - * Copyright (c) 2011, 2013 Oracle and/or its affiliates. All rights reserved.
   159.6 + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
   159.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   159.8   *
   159.9   * This code is free software; you can redistribute it and/or modify it
   160.1 --- a/src/share/vm/services/memTrackWorker.cpp	Wed Apr 24 20:55:28 2013 -0400
   160.2 +++ b/src/share/vm/services/memTrackWorker.cpp	Wed Apr 24 21:11:02 2013 -0400
   160.3 @@ -39,7 +39,7 @@
   160.4    }
   160.5  }
   160.6  
   160.7 -MemTrackWorker::MemTrackWorker() {
   160.8 +MemTrackWorker::MemTrackWorker(MemSnapshot* snapshot): _snapshot(snapshot) {
   160.9    // create thread uses cgc thread type for now. We should revisit
  160.10    // the option, or create new thread type.
  160.11    _has_error = !os::create_thread(this, os::cgc_thread);
  160.12 @@ -88,8 +88,7 @@
  160.13    assert(MemTracker::is_on(), "native memory tracking is off");
  160.14    this->initialize_thread_local_storage();
  160.15    this->record_stack_base_and_size();
  160.16 -  MemSnapshot* snapshot = MemTracker::get_snapshot();
  160.17 -  assert(snapshot != NULL, "Worker should not be started");
  160.18 +  assert(_snapshot != NULL, "Worker should not be started");
  160.19    MemRecorder* rec;
  160.20    unsigned long processing_generation = 0;
  160.21    bool          worker_idle = false;
  160.22 @@ -109,7 +108,7 @@
  160.23        }
  160.24  
  160.25        // merge the recorder into staging area
  160.26 -      if (!snapshot->merge(rec)) {
  160.27 +      if (!_snapshot->merge(rec)) {
  160.28          MemTracker::shutdown(MemTracker::NMT_out_of_memory);
  160.29        } else {
  160.30          NOT_PRODUCT(_merge_count ++;)
  160.31 @@ -132,7 +131,7 @@
  160.32            _head = (_head + 1) % MAX_GENERATIONS;
  160.33          }
  160.34          // promote this generation data to snapshot
  160.35 -        if (!snapshot->promote(number_of_classes)) {
  160.36 +        if (!_snapshot->promote(number_of_classes)) {
  160.37            // failed to promote, means out of memory
  160.38            MemTracker::shutdown(MemTracker::NMT_out_of_memory);
  160.39          }
  160.40 @@ -140,7 +139,7 @@
  160.41          // worker thread is idle
  160.42          worker_idle = true;
  160.43          MemTracker::report_worker_idle();
  160.44 -        snapshot->wait(1000);
  160.45 +        _snapshot->wait(1000);
  160.46          ThreadCritical tc;
  160.47          // check if more data arrived
  160.48          if (!_gen[_head].has_more_recorder()) {
   161.1 --- a/src/share/vm/services/memTrackWorker.hpp	Wed Apr 24 20:55:28 2013 -0400
   161.2 +++ b/src/share/vm/services/memTrackWorker.hpp	Wed Apr 24 21:11:02 2013 -0400
   161.3 @@ -85,8 +85,10 @@
   161.4  
   161.5    bool            _has_error;
   161.6  
   161.7 +  MemSnapshot*    _snapshot;
   161.8 +
   161.9   public:
  161.10 -  MemTrackWorker();
  161.11 +  MemTrackWorker(MemSnapshot* snapshot);
  161.12    ~MemTrackWorker();
  161.13    _NOINLINE_ void* operator new(size_t size);
  161.14    _NOINLINE_ void* operator new(size_t size, const std::nothrow_t& nothrow_constant);
   162.1 --- a/src/share/vm/services/memTracker.cpp	Wed Apr 24 20:55:28 2013 -0400
   162.2 +++ b/src/share/vm/services/memTracker.cpp	Wed Apr 24 21:11:02 2013 -0400
   162.3 @@ -1,5 +1,5 @@
   162.4  /*
   162.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   162.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   162.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   162.8   *
   162.9   * This code is free software; you can redistribute it and/or modify it
  162.10 @@ -53,12 +53,12 @@
  162.11  }
  162.12  
  162.13  
  162.14 -MemRecorder*                    MemTracker::_global_recorder = NULL;
  162.15 +MemRecorder* volatile           MemTracker::_global_recorder = NULL;
  162.16  MemSnapshot*                    MemTracker::_snapshot = NULL;
  162.17  MemBaseline                     MemTracker::_baseline;
  162.18  Mutex*                          MemTracker::_query_lock = NULL;
  162.19 -volatile MemRecorder*           MemTracker::_merge_pending_queue = NULL;
  162.20 -volatile MemRecorder*           MemTracker::_pooled_recorders = NULL;
  162.21 +MemRecorder* volatile           MemTracker::_merge_pending_queue = NULL;
  162.22 +MemRecorder* volatile           MemTracker::_pooled_recorders = NULL;
  162.23  MemTrackWorker*                 MemTracker::_worker_thread = NULL;
  162.24  int                             MemTracker::_sync_point_skip_count = 0;
  162.25  MemTracker::NMTLevel            MemTracker::_tracking_level = MemTracker::NMT_off;
  162.26 @@ -127,12 +127,15 @@
  162.27    assert(_state == NMT_bootstrapping_multi_thread, "wrong state");
  162.28  
  162.29    _snapshot = new (std::nothrow)MemSnapshot();
  162.30 -  if (_snapshot != NULL && !_snapshot->out_of_memory()) {
  162.31 -    if (start_worker()) {
  162.32 +  if (_snapshot != NULL) {
  162.33 +    if (!_snapshot->out_of_memory() && start_worker(_snapshot)) {
  162.34        _state = NMT_started;
  162.35        NMT_track_callsite = (_tracking_level == NMT_detail && can_walk_stack());
  162.36        return;
  162.37      }
  162.38 +
  162.39 +    delete _snapshot;
  162.40 +    _snapshot = NULL;
  162.41    }
  162.42  
  162.43    // fail to start native memory tracking, shut it down
  162.44 @@ -206,7 +209,7 @@
  162.45  // delete all pooled recorders
  162.46  void MemTracker::delete_all_pooled_recorders() {
  162.47    // free all pooled recorders
  162.48 -  volatile MemRecorder* cur_head = _pooled_recorders;
  162.49 +  MemRecorder* volatile cur_head = _pooled_recorders;
  162.50    if (cur_head != NULL) {
  162.51      MemRecorder* null_ptr = NULL;
  162.52      while (cur_head != NULL && (void*)cur_head != Atomic::cmpxchg_ptr((void*)null_ptr,
  162.53 @@ -540,11 +543,14 @@
  162.54  /*
  162.55   * Start worker thread.
  162.56   */
  162.57 -bool MemTracker::start_worker() {
  162.58 -  assert(_worker_thread == NULL, "Just Check");
  162.59 -  _worker_thread = new (std::nothrow) MemTrackWorker();
  162.60 -  if (_worker_thread == NULL || _worker_thread->has_error()) {
  162.61 -    shutdown(NMT_initialization);
  162.62 +bool MemTracker::start_worker(MemSnapshot* snapshot) {
  162.63 +  assert(_worker_thread == NULL && _snapshot != NULL, "Just Check");
  162.64 +  _worker_thread = new (std::nothrow) MemTrackWorker(snapshot);
  162.65 +  if (_worker_thread == NULL) {
  162.66 +    return false;
  162.67 +  } else if (_worker_thread->has_error()) {
  162.68 +    delete _worker_thread;
  162.69 +    _worker_thread = NULL;
  162.70      return false;
  162.71    }
  162.72    _worker_thread->start();
   163.1 --- a/src/share/vm/services/memTracker.hpp	Wed Apr 24 20:55:28 2013 -0400
   163.2 +++ b/src/share/vm/services/memTracker.hpp	Wed Apr 24 21:11:02 2013 -0400
   163.3 @@ -421,7 +421,7 @@
   163.4  
   163.5   private:
   163.6    // start native memory tracking worker thread
   163.7 -  static bool start_worker();
   163.8 +  static bool start_worker(MemSnapshot* snapshot);
   163.9  
  163.10    // called by worker thread to complete shutdown process
  163.11    static void final_shutdown();
  163.12 @@ -475,18 +475,18 @@
  163.13    // a thread can start to allocate memory before it is attached
  163.14    // to VM 'Thread', those memory activities are recorded here.
  163.15    // ThreadCritical is required to guard this global recorder.
  163.16 -  static MemRecorder*     _global_recorder;
  163.17 +  static MemRecorder* volatile _global_recorder;
  163.18  
  163.19    // main thread id
  163.20    debug_only(static intx   _main_thread_tid;)
  163.21  
  163.22    // pending recorders to be merged
  163.23 -  static volatile MemRecorder*      _merge_pending_queue;
  163.24 +  static MemRecorder* volatile     _merge_pending_queue;
  163.25  
  163.26    NOT_PRODUCT(static volatile jint   _pending_recorder_count;)
  163.27  
  163.28    // pooled memory recorders
  163.29 -  static volatile MemRecorder*      _pooled_recorders;
  163.30 +  static MemRecorder* volatile     _pooled_recorders;
  163.31  
  163.32    // memory recorder pool management, uses following
  163.33    // counter to determine if a released memory recorder
   164.1 --- a/src/share/vm/services/runtimeService.cpp	Wed Apr 24 20:55:28 2013 -0400
   164.2 +++ b/src/share/vm/services/runtimeService.cpp	Wed Apr 24 21:11:02 2013 -0400
   164.3 @@ -120,6 +120,8 @@
   164.4  
   164.5    // Print the time interval in which the app was executing
   164.6    if (PrintGCApplicationConcurrentTime) {
   164.7 +    gclog_or_tty->date_stamp(PrintGCDateStamps);
   164.8 +    gclog_or_tty->stamp(PrintGCTimeStamps);
   164.9      gclog_or_tty->print_cr("Application time: %3.7f seconds",
  164.10                                  last_application_time_sec());
  164.11    }
  164.12 @@ -150,6 +152,8 @@
  164.13    // Print the time interval for which the app was stopped
  164.14    // during the current safepoint operation.
  164.15    if (PrintGCApplicationStoppedTime) {
  164.16 +    gclog_or_tty->date_stamp(PrintGCDateStamps);
  164.17 +    gclog_or_tty->stamp(PrintGCTimeStamps);
  164.18      gclog_or_tty->print_cr("Total time for which application threads "
  164.19                             "were stopped: %3.7f seconds",
  164.20                             last_safepoint_time_sec());
   165.1 --- a/src/share/vm/utilities/accessFlags.hpp	Wed Apr 24 20:55:28 2013 -0400
   165.2 +++ b/src/share/vm/utilities/accessFlags.hpp	Wed Apr 24 21:11:02 2013 -0400
   165.3 @@ -194,6 +194,9 @@
   165.4    void set_is_obsolete()               { atomic_set_bits(JVM_ACC_IS_OBSOLETE);             }
   165.5    void set_is_prefixed_native()        { atomic_set_bits(JVM_ACC_IS_PREFIXED_NATIVE);      }
   165.6  
   165.7 +  void clear_not_c1_compilable()       { atomic_clear_bits(JVM_ACC_NOT_C1_COMPILABLE);       }
   165.8 +  void clear_not_c2_compilable()       { atomic_clear_bits(JVM_ACC_NOT_C2_COMPILABLE);       }
   165.9 +  void clear_not_c2_osr_compilable()   { atomic_clear_bits(JVM_ACC_NOT_C2_OSR_COMPILABLE);   }
  165.10    // Klass* flags
  165.11    void set_has_vanilla_constructor()   { atomic_set_bits(JVM_ACC_HAS_VANILLA_CONSTRUCTOR); }
  165.12    void set_has_finalizer()             { atomic_set_bits(JVM_ACC_HAS_FINALIZER);           }
   166.1 --- a/src/share/vm/utilities/bitMap.cpp	Wed Apr 24 20:55:28 2013 -0400
   166.2 +++ b/src/share/vm/utilities/bitMap.cpp	Wed Apr 24 21:11:02 2013 -0400
   166.3 @@ -516,6 +516,10 @@
   166.4    return sum;
   166.5  }
   166.6  
   166.7 +void BitMap::print_on_error(outputStream* st, const char* prefix) const {
   166.8 +  st->print_cr("%s[" PTR_FORMAT ", " PTR_FORMAT ")",
   166.9 +      prefix, map(), (char*)map() + (size() >> LogBitsPerByte));
  166.10 +}
  166.11  
  166.12  #ifndef PRODUCT
  166.13  
   167.1 --- a/src/share/vm/utilities/bitMap.hpp	Wed Apr 24 20:55:28 2013 -0400
   167.2 +++ b/src/share/vm/utilities/bitMap.hpp	Wed Apr 24 21:11:02 2013 -0400
   167.3 @@ -262,6 +262,7 @@
   167.4    bool is_full() const;
   167.5    bool is_empty() const;
   167.6  
   167.7 +  void print_on_error(outputStream* st, const char* prefix) const;
   167.8  
   167.9  #ifndef PRODUCT
  167.10   public:
   168.1 --- a/src/share/vm/utilities/debug.cpp	Wed Apr 24 20:55:28 2013 -0400
   168.2 +++ b/src/share/vm/utilities/debug.cpp	Wed Apr 24 21:11:02 2013 -0400
   168.3 @@ -608,18 +608,6 @@
   168.4    return  CodeCache::find_nmethod((address)addr);
   168.5  }
   168.6  
   168.7 -static address same_page(address x, address y) {
   168.8 -  intptr_t page_bits = -os::vm_page_size();
   168.9 -  if ((intptr_t(x) & page_bits) == (intptr_t(y) & page_bits)) {
  168.10 -    return x;
  168.11 -  } else if (x > y) {
  168.12 -    return (address)(intptr_t(y) | ~page_bits) + 1;
  168.13 -  } else {
  168.14 -    return (address)(intptr_t(y) & page_bits);
  168.15 -  }
  168.16 -}
  168.17 -
  168.18 -
  168.19  // Another interface that isn't ambiguous in dbx.
  168.20  // Can we someday rename the other find to hsfind?
  168.21  extern "C" void hsfind(intptr_t x) {
   169.1 --- a/src/share/vm/utilities/globalDefinitions.cpp	Wed Apr 24 20:55:28 2013 -0400
   169.2 +++ b/src/share/vm/utilities/globalDefinitions.cpp	Wed Apr 24 21:11:02 2013 -0400
   169.3 @@ -355,3 +355,33 @@
   169.4  
   169.5      return size_t(result);
   169.6  }
   169.7 +
   169.8 +#ifndef PRODUCT
   169.9 +
  169.10 +void GlobalDefinitions::test_globals() {
  169.11 +  intptr_t page_sizes[] = { os::vm_page_size(), 4096, 8192, 65536, 2*1024*1024 };
  169.12 +  const int num_page_sizes = sizeof(page_sizes) / sizeof(page_sizes[0]);
  169.13 +
  169.14 +  for (int i = 0; i < num_page_sizes; i++) {
  169.15 +    intptr_t page_size = page_sizes[i];
  169.16 +
  169.17 +    address a_page = (address)(10*page_size);
  169.18 +
  169.19 +    // Check that address within page is returned as is
  169.20 +    assert(clamp_address_in_page(a_page, a_page, page_size) == a_page, "incorrect");
  169.21 +    assert(clamp_address_in_page(a_page + 128, a_page, page_size) == a_page + 128, "incorrect");
  169.22 +    assert(clamp_address_in_page(a_page + page_size - 1, a_page, page_size) == a_page + page_size - 1, "incorrect");
  169.23 +
  169.24 +    // Check that address above page returns start of next page
  169.25 +    assert(clamp_address_in_page(a_page + page_size, a_page, page_size) == a_page + page_size, "incorrect");
  169.26 +    assert(clamp_address_in_page(a_page + page_size + 1, a_page, page_size) == a_page + page_size, "incorrect");
  169.27 +    assert(clamp_address_in_page(a_page + page_size*5 + 1, a_page, page_size) == a_page + page_size, "incorrect");
  169.28 +
  169.29 +    // Check that address below page returns start of page
  169.30 +    assert(clamp_address_in_page(a_page - 1, a_page, page_size) == a_page, "incorrect");
  169.31 +    assert(clamp_address_in_page(a_page - 2*page_size - 1, a_page, page_size) == a_page, "incorrect");
  169.32 +    assert(clamp_address_in_page(a_page - 5*page_size - 1, a_page, page_size) == a_page, "incorrect");
  169.33 +  }
  169.34 +}
  169.35 +
  169.36 +#endif // PRODUCT
   170.1 --- a/src/share/vm/utilities/globalDefinitions.hpp	Wed Apr 24 20:55:28 2013 -0400
   170.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp	Wed Apr 24 21:11:02 2013 -0400
   170.3 @@ -419,6 +419,24 @@
   170.4    return align_size_up(offset, HeapWordsPerLong);
   170.5  }
   170.6  
   170.7 +// Clamp an address to be within a specific page
   170.8 +// 1. If addr is on the page it is returned as is
   170.9 +// 2. If addr is above the page_address the start of the *next* page will be returned
  170.10 +// 3. Otherwise, if addr is below the page_address the start of the page will be returned
  170.11 +inline address clamp_address_in_page(address addr, address page_address, intptr_t page_size) {
  170.12 +  if (align_size_down(intptr_t(addr), page_size) == align_size_down(intptr_t(page_address), page_size)) {
  170.13 +    // address is in the specified page, just return it as is
  170.14 +    return addr;
  170.15 +  } else if (addr > page_address) {
  170.16 +    // address is above specified page, return start of next page
  170.17 +    return (address)align_size_down(intptr_t(page_address), page_size) + page_size;
  170.18 +  } else {
  170.19 +    // address is below specified page, return start of page
  170.20 +    return (address)align_size_down(intptr_t(page_address), page_size);
  170.21 +  }
  170.22 +}
  170.23 +
  170.24 +
  170.25  // The expected size in bytes of a cache line, used to pad data structures.
  170.26  #define DEFAULT_CACHE_LINE_SIZE 64
  170.27  
  170.28 @@ -827,6 +845,10 @@
  170.29    return comp_level == CompLevel_highest_tier;
  170.30  }
  170.31  
  170.32 +inline bool is_compile(int comp_level) {
  170.33 +  return is_c1_compile(comp_level) || is_c2_compile(comp_level);
  170.34 +}
  170.35 +
  170.36  //----------------------------------------------------------------------------------------------------
  170.37  // 'Forward' declarations of frequently used classes
  170.38  // (in order to reduce interface dependencies & reduce
  170.39 @@ -1296,4 +1318,15 @@
  170.40    return *(void**)addr;
  170.41  }
  170.42  
  170.43 +
  170.44 +#ifndef PRODUCT
  170.45 +
  170.46 +// For unit testing only
  170.47 +class GlobalDefinitions {
  170.48 +public:
  170.49 +  static void test_globals();
  170.50 +};
  170.51 +
  170.52 +#endif // PRODUCT
  170.53 +
  170.54  #endif // SHARE_VM_UTILITIES_GLOBALDEFINITIONS_HPP
   171.1 --- a/src/share/vm/utilities/taskqueue.hpp	Wed Apr 24 20:55:28 2013 -0400
   171.2 +++ b/src/share/vm/utilities/taskqueue.hpp	Wed Apr 24 21:11:02 2013 -0400
   171.3 @@ -253,6 +253,7 @@
   171.4  
   171.5  template <class E, MEMFLAGS F, unsigned int N = TASKQUEUE_SIZE>
   171.6  class GenericTaskQueue: public TaskQueueSuper<N, F> {
   171.7 +  ArrayAllocator<E, F> _array_allocator;
   171.8  protected:
   171.9    typedef typename TaskQueueSuper<N, F>::Age Age;
  171.10    typedef typename TaskQueueSuper<N, F>::idx_t idx_t;
  171.11 @@ -314,7 +315,7 @@
  171.12  
  171.13  template<class E, MEMFLAGS F, unsigned int N>
  171.14  void GenericTaskQueue<E, F, N>::initialize() {
  171.15 -  _elems = NEW_C_HEAP_ARRAY(E, N, F);
  171.16 +  _elems = _array_allocator.allocate(N);
  171.17  }
  171.18  
  171.19  template<class E, MEMFLAGS F, unsigned int N>
   172.1 --- a/src/share/vm/utilities/vmError.cpp	Wed Apr 24 20:55:28 2013 -0400
   172.2 +++ b/src/share/vm/utilities/vmError.cpp	Wed Apr 24 21:11:02 2013 -0400
   172.3 @@ -685,13 +685,7 @@
   172.4    STEP(190, "(printing heap information)" )
   172.5  
   172.6       if (_verbose && Universe::is_fully_initialized()) {
   172.7 -       // Print heap information before vm abort. As we'd like as much
   172.8 -       // information as possible in the report we ask for the
   172.9 -       // extended (i.e., more detailed) version.
  172.10 -       Universe::print_on(st, true /* extended */);
  172.11 -       st->cr();
  172.12 -
  172.13 -       Universe::heap()->barrier_set()->print_on(st);
  172.14 +       Universe::heap()->print_on_error(st);
  172.15         st->cr();
  172.16  
  172.17         st->print_cr("Polling page: " INTPTR_FORMAT, os::get_polling_page());
   173.1 --- a/test/Makefile	Wed Apr 24 20:55:28 2013 -0400
   173.2 +++ b/test/Makefile	Wed Apr 24 21:11:02 2013 -0400
   173.3 @@ -162,7 +162,9 @@
   173.4  # jtreg tests
   173.5  
   173.6  # Expect JT_HOME to be set for jtreg tests. (home for jtreg)
   173.7 -JT_HOME = $(SLASH_JAVA)/re/jtreg/4.0/promoted/latest/binaries/jtreg
   173.8 +ifndef JT_HOME
   173.9 +  JT_HOME = $(SLASH_JAVA)/re/jtreg/4.0/promoted/latest/binaries/jtreg
  173.10 +endif
  173.11  ifdef JPRT_JTREG_HOME
  173.12    JT_HOME = $(JPRT_JTREG_HOME)
  173.13  endif
   174.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   174.2 +++ b/test/compiler/6443505/Test6443505.java	Wed Apr 24 21:11:02 2013 -0400
   174.3 @@ -0,0 +1,107 @@
   174.4 +/*
   174.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   174.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   174.7 + *
   174.8 + * This code is free software; you can redistribute it and/or modify it
   174.9 + * under the terms of the GNU General Public License version 2 only, as
  174.10 + * published by the Free Software Foundation.
  174.11 + *
  174.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  174.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  174.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  174.15 + * version 2 for more details (a copy is included in the LICENSE file that
  174.16 + * accompanied this code).
  174.17 + *
  174.18 + * You should have received a copy of the GNU General Public License version
  174.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  174.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  174.21 + *
  174.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  174.23 + * or visit www.oracle.com if you need additional information or have any
  174.24 + * questions.
  174.25 + *
  174.26 + */
  174.27 +
  174.28 +/**
  174.29 + * @test
  174.30 + * @bug 6443505
  174.31 + * @summary Some cases for CmpLTMask missed; also wrong code.
  174.32 + *
  174.33 + * @run main/othervm -Xcomp -XX:CompileOnly="Test6443505.compiled" Test6443505
  174.34 + */
  174.35 +
  174.36 +public class Test6443505 {
  174.37 +
  174.38 +    public static void main(String[] args) throws InterruptedException {
  174.39 +        test(Integer.MIN_VALUE, 0);
  174.40 +        test(0, Integer.MIN_VALUE);
  174.41 +        test(Integer.MIN_VALUE, -1);
  174.42 +        test(-1, Integer.MIN_VALUE);
  174.43 +        test(Integer.MIN_VALUE, 1);
  174.44 +        test(1, Integer.MIN_VALUE);
  174.45 +
  174.46 +        test(Integer.MAX_VALUE, 0);
  174.47 +        test(0, Integer.MAX_VALUE);
  174.48 +        test(Integer.MAX_VALUE, -1);
  174.49 +        test(-1, Integer.MAX_VALUE);
  174.50 +        test(Integer.MAX_VALUE, 1);
  174.51 +        test(1, Integer.MAX_VALUE);
  174.52 +
  174.53 +        test(Integer.MIN_VALUE, Integer.MAX_VALUE);
  174.54 +        test(Integer.MAX_VALUE, Integer.MIN_VALUE);
  174.55 +
  174.56 +        test(1, -1);
  174.57 +        test(1, 0);
  174.58 +        test(1, 1);
  174.59 +        test(-1, -1);
  174.60 +        test(-1, 0);
  174.61 +        test(-1, 1);
  174.62 +        test(0, -1);
  174.63 +        test(0, 0);
  174.64 +        test(0, 1);
  174.65 +    }
  174.66 +
  174.67 +    public static void test(int a, int b) throws InterruptedException {
  174.68 +        int C = compiled(4, a, b);
  174.69 +        int I = interpreted(4, a, b);
  174.70 +        if (C != I) {
  174.71 +            System.err.println("#1 C = " + C + ", I = " + I);
  174.72 +            System.err.println("#1 C != I, FAIL");
  174.73 +            System.exit(97);
  174.74 +        }
  174.75 +
  174.76 +        C = compiled(a, b, q, 4);
  174.77 +        I = interpreted(a, b, q, 4);
  174.78 +        if (C != I) {
  174.79 +            System.err.println("#2 C = " + C + ", I = " + I);
  174.80 +            System.err.println("#2 C != I, FAIL");
  174.81 +            System.exit(97);
  174.82 +        }
  174.83 +
  174.84 +    }
  174.85 +
  174.86 +    static int q = 4;
  174.87 +
  174.88 +    // If improperly compiled, uses carry/borrow bit, which is wrong.
  174.89 +    // with -XX:+PrintOptoAssembly, look for cadd_cmpLTMask
  174.90 +    static int compiled(int p, int x, int y) {
  174.91 +        return (x < y) ? q + (x - y) : (x - y);
  174.92 +    }
  174.93 +
  174.94 +    // interpreted reference
  174.95 +    static int interpreted(int p, int x, int y) {
  174.96 +        return (x < y) ? q + (x - y) : (x - y);
  174.97 +    }
  174.98 +
  174.99 +    // Test new code with a range of cases
 174.100 +    // with -XX:+PrintOptoAssembly, look for and_cmpLTMask
 174.101 +    static int compiled(int x, int y, int q, int p) {
 174.102 +        return (x < y) ? p + q : q;
 174.103 +    }
 174.104 +
 174.105 +    // interpreted reference
 174.106 +    static int interpreted(int x, int y, int q, int p) {
 174.107 +        return (x < y) ? p + q : q;
 174.108 +    }
 174.109 +
 174.110 +}
   175.1 --- a/test/compiler/6863420/Test.java	Wed Apr 24 20:55:28 2013 -0400
   175.2 +++ b/test/compiler/6863420/Test.java	Wed Apr 24 21:11:02 2013 -0400
   175.3 @@ -27,17 +27,35 @@
   175.4   * @bug 6863420
   175.5   * @summary os::javaTimeNanos() go backward on Solaris x86
   175.6   *
   175.7 - * @run main/othervm Test
   175.8 + * Notice the internal timeout in timeout thread Test.TOT.
   175.9 + * @run main/othervm/timeout=300 Test
  175.10   */
  175.11  
  175.12  public class Test {
  175.13 +
  175.14 +    static final int INTERNAL_TIMEOUT=240;
  175.15 +    static class TOT extends Thread {
  175.16 +       public void run() {
  175.17 +           try {
  175.18 +               Thread.sleep(INTERNAL_TIMEOUT*1000);
  175.19 +           } catch (InterruptedException ex) {
  175.20 +           }
  175.21 +           done = true;
  175.22 +       }
  175.23 +    }
  175.24 +
  175.25      static long value = 0;
  175.26      static boolean got_backward_time = false;
  175.27 +    static volatile boolean done = false;
  175.28  
  175.29      public static void main(String args[]) {
  175.30          final int count = 100000;
  175.31  
  175.32 -        for (int numThreads = 1; numThreads <= 32; numThreads++) {
  175.33 +        TOT tot = new TOT();
  175.34 +        tot.setDaemon(true);
  175.35 +        tot.start();
  175.36 +
  175.37 +        for (int numThreads = 1; !done && numThreads <= 32; numThreads++) {
  175.38              final int numRuns = 1;
  175.39              for (int t=1; t <= numRuns; t++) {
  175.40                  final int curRun = t;
  175.41 @@ -48,7 +66,7 @@
  175.42                      Runnable thread =
  175.43                          new Runnable() {
  175.44                              public void run() {
  175.45 -                                for (long l = 0; l < 100000; l++) {
  175.46 +                                for (long l = 0; !done && l < 100000; l++) {
  175.47                                      final long start = System.nanoTime();
  175.48                                      if (value == 12345678) {
  175.49                                          System.out.println("Wow!");
   176.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   176.2 +++ b/test/compiler/8011706/Test8011706.java	Wed Apr 24 21:11:02 2013 -0400
   176.3 @@ -0,0 +1,65 @@
   176.4 +/*
   176.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   176.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   176.7 + *
   176.8 + * This code is free software; you can redistribute it and/or modify it
   176.9 + * under the terms of the GNU General Public License version 2 only, as
  176.10 + * published by the Free Software Foundation.
  176.11 + *
  176.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  176.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  176.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  176.15 + * version 2 for more details (a copy is included in the LICENSE file that
  176.16 + * accompanied this code).
  176.17 + *
  176.18 + * You should have received a copy of the GNU General Public License version
  176.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  176.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  176.21 + *
  176.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  176.23 + * or visit www.oracle.com if you need additional information or have any
  176.24 + * questions.
  176.25 + */
  176.26 +
  176.27 +/*
  176.28 + * @test
  176.29 + * @bug 8011706
  176.30 + * @summary loop invariant code motion may move load before store to the same field
  176.31 + * @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8011706
  176.32 + *
  176.33 + */
  176.34 +
  176.35 +public class Test8011706 {
  176.36 +    int[] array;
  176.37 +
  176.38 +    void m(boolean test, int[] array1, int[] array2) {
  176.39 +        int i = 0;
  176.40 +        if (test) {
  176.41 +            array = array1;
  176.42 +        } else {
  176.43 +            array = array2;
  176.44 +        }
  176.45 +
  176.46 +        while(true) {
  176.47 +            int v = array[i];
  176.48 +            i++;
  176.49 +            if (i >= 10) return;
  176.50 +        }
  176.51 +    }
  176.52 +
  176.53 +    static public void main(String[] args) {
  176.54 +        int[] new_array = new int[10];
  176.55 +        Test8011706 ti = new Test8011706();
  176.56 +        boolean failed = false;
  176.57 +        try {
  176.58 +            for (int i = 0; i < 10000; i++) {
  176.59 +                ti.array = null;
  176.60 +                ti.m(true, new_array, new_array);
  176.61 +            }
  176.62 +        } catch(NullPointerException ex) {
  176.63 +            throw new RuntimeException("TEST FAILED", ex);
  176.64 +        }
  176.65 +        System.out.println("TEST PASSED");
  176.66 +    }
  176.67 +
  176.68 +}
   177.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   177.2 +++ b/test/compiler/whitebox/ClearMethodStateTest.java	Wed Apr 24 21:11:02 2013 -0400
   177.3 @@ -0,0 +1,90 @@
   177.4 +/*
   177.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   177.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   177.7 + *
   177.8 + * This code is free software; you can redistribute it and/or modify it
   177.9 + * under the terms of the GNU General Public License version 2 only, as
  177.10 + * published by the Free Software Foundation.
  177.11 + *
  177.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  177.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  177.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  177.15 + * version 2 for more details (a copy is included in the LICENSE file that
  177.16 + * accompanied this code).
  177.17 + *
  177.18 + * You should have received a copy of the GNU General Public License version
  177.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  177.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  177.21 + *
  177.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  177.23 + * or visit www.oracle.com if you need additional information or have any
  177.24 + * questions.
  177.25 + */
  177.26 +
  177.27 +/*
  177.28 + * @test ClearMethodStateTest
  177.29 + * @library /testlibrary /testlibrary/whitebox
  177.30 + * @build ClearMethodStateTest
  177.31 + * @run main ClassFileInstaller sun.hotspot.WhiteBox
  177.32 + * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI ClearMethodStateTest
  177.33 + * @summary testing of WB::clearMethodState()
  177.34 + * @author igor.ignatyev@oracle.com
  177.35 + */
  177.36 +public class ClearMethodStateTest extends CompilerWhiteBoxTest {
  177.37 +
  177.38 +    public static void main(String[] args) throws Exception {
  177.39 +        for (TestCase test : TestCase.values()) {
  177.40 +            new ClearMethodStateTest(test).runTest();
  177.41 +        }
  177.42 +    }
  177.43 +
  177.44 +    public ClearMethodStateTest(TestCase testCase) {
  177.45 +        super(testCase);
  177.46 +        // to prevent inlining of #method
  177.47 +        WHITE_BOX.testSetDontInlineMethod(method, true);
  177.48 +    }
  177.49 +
  177.50 +
  177.51 +    /**
  177.52 +     * Tests {@code WB::clearMethodState()} by calling it before/after
  177.53 +     * compilation. For non-tiered, checks that counters will be rested after
  177.54 +     * clearing of method state.
  177.55 +     *
  177.56 +     * @throws Exception if one of the checks fails.
  177.57 +     */
  177.58 +    @Override
  177.59 +    protected void test() throws Exception {
  177.60 +        checkNotCompiled();
  177.61 +        compile();
  177.62 +        WHITE_BOX.clearMethodState(method);
  177.63 +        checkCompiled();
  177.64 +        WHITE_BOX.clearMethodState(method);
  177.65 +        WHITE_BOX.deoptimizeMethod(method);
  177.66 +        checkNotCompiled();
  177.67 +
  177.68 +
  177.69 +        if (!TIERED_COMPILATION) {
  177.70 +            WHITE_BOX.clearMethodState(method);
  177.71 +            compile(COMPILE_THRESHOLD);
  177.72 +            checkCompiled();
  177.73 +
  177.74 +            WHITE_BOX.deoptimizeMethod(method);
  177.75 +            checkNotCompiled();
  177.76 +            WHITE_BOX.clearMethodState(method);
  177.77 +
  177.78 +            // invoke method one less time than needed to compile
  177.79 +            if (COMPILE_THRESHOLD > 1) {
  177.80 +                compile(COMPILE_THRESHOLD - 1);
  177.81 +                checkNotCompiled();
  177.82 +            } else {
  177.83 +                System.err.println("Warning: 'CompileThreshold' <= 1");
  177.84 +            }
  177.85 +
  177.86 +            compile(1);
  177.87 +            checkCompiled();
  177.88 +        } else {
  177.89 +            System.err.println(
  177.90 +                    "Warning: part of test is not applicable in Tiered");
  177.91 +        }
  177.92 +    }
  177.93 +}
   178.1 --- a/test/compiler/whitebox/CompilerWhiteBoxTest.java	Wed Apr 24 20:55:28 2013 -0400
   178.2 +++ b/test/compiler/whitebox/CompilerWhiteBoxTest.java	Wed Apr 24 21:11:02 2013 -0400
   178.3 @@ -21,66 +21,135 @@
   178.4   * questions.
   178.5   */
   178.6  
   178.7 +import com.sun.management.HotSpotDiagnosticMXBean;
   178.8 +import com.sun.management.VMOption;
   178.9  import sun.hotspot.WhiteBox;
  178.10  import sun.management.ManagementFactoryHelper;
  178.11 -import com.sun.management.HotSpotDiagnosticMXBean;
  178.12  
  178.13 +import java.lang.reflect.Constructor;
  178.14 +import java.lang.reflect.Executable;
  178.15  import java.lang.reflect.Method;
  178.16 +import java.util.Objects;
  178.17 +import java.util.concurrent.Callable;
  178.18  
  178.19 -/*
  178.20 +/**
  178.21 + * Abstract class for WhiteBox testing of JIT.
  178.22 + *
  178.23   * @author igor.ignatyev@oracle.com
  178.24   */
  178.25  public abstract class CompilerWhiteBoxTest {
  178.26 +    /** {@code CompLevel::CompLevel_none} -- Interpreter */
  178.27 +    protected static int COMP_LEVEL_NONE = 0;
  178.28 +    /** {@code CompLevel::CompLevel_any}, {@code CompLevel::CompLevel_all} */
  178.29 +    protected static int COMP_LEVEL_ANY = -1;
  178.30 +    /** Instance of WhiteBox */
  178.31      protected static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
  178.32 -    protected static final Method METHOD = getMethod("method");
  178.33 +    /** Value of {@code -XX:CompileThreshold} */
  178.34      protected static final int COMPILE_THRESHOLD
  178.35              = Integer.parseInt(getVMOption("CompileThreshold", "10000"));
  178.36 +    /** Value of {@code -XX:BackgroundCompilation} */
  178.37      protected static final boolean BACKGROUND_COMPILATION
  178.38              = Boolean.valueOf(getVMOption("BackgroundCompilation", "true"));
  178.39 +    /** Value of {@code -XX:TieredCompilation} */
  178.40 +    protected static final boolean TIERED_COMPILATION
  178.41 +            = Boolean.valueOf(getVMOption("TieredCompilation", "false"));
  178.42 +    /** Value of {@code -XX:TieredStopAtLevel} */
  178.43 +    protected static final int TIERED_STOP_AT_LEVEL
  178.44 +            = Integer.parseInt(getVMOption("TieredStopAtLevel", "0"));
  178.45  
  178.46 -    protected static Method getMethod(String name) {
  178.47 +    /**
  178.48 +     * Returns value of VM option.
  178.49 +     *
  178.50 +     * @param name option's name
  178.51 +     * @return value of option or {@code null}, if option doesn't exist
  178.52 +     * @throws NullPointerException if name is null
  178.53 +     */
  178.54 +    protected static String getVMOption(String name) {
  178.55 +        Objects.requireNonNull(name);
  178.56 +        HotSpotDiagnosticMXBean diagnostic
  178.57 +                = ManagementFactoryHelper.getDiagnosticMXBean();
  178.58 +        VMOption tmp;
  178.59          try {
  178.60 -            return CompilerWhiteBoxTest.class.getDeclaredMethod(name);
  178.61 -        } catch (NoSuchMethodException | SecurityException e) {
  178.62 -            throw new RuntimeException(
  178.63 -                    "exception on getting method " + name, e);
  178.64 +            tmp = diagnostic.getVMOption(name);
  178.65 +        } catch (IllegalArgumentException e) {
  178.66 +            tmp = null;
  178.67          }
  178.68 +        return (tmp == null ? null : tmp.getValue());
  178.69      }
  178.70  
  178.71 -    protected static String getVMOption(String name) {
  178.72 -        String result;
  178.73 -        HotSpotDiagnosticMXBean diagnostic
  178.74 -                = ManagementFactoryHelper.getDiagnosticMXBean();
  178.75 -        result = diagnostic.getVMOption(name).getValue();
  178.76 -        return result;
  178.77 -    }
  178.78 -
  178.79 +    /**
  178.80 +     * Returns value of VM option or default value.
  178.81 +     *
  178.82 +     * @param name         option's name
  178.83 +     * @param defaultValue default value
  178.84 +     * @return value of option or {@code defaultValue}, if option doesn't exist
  178.85 +     * @throws NullPointerException if name is null
  178.86 +     * @see #getVMOption(String)
  178.87 +     */
  178.88      protected static String getVMOption(String name, String defaultValue) {
  178.89          String result = getVMOption(name);
  178.90          return result == null ? defaultValue : result;
  178.91      }
  178.92  
  178.93 -    protected final void runTest() throws RuntimeException {
  178.94 +    /** tested method */
  178.95 +    protected final Executable method;
  178.96 +    private final Callable<Integer> callable;
  178.97 +
  178.98 +    /**
  178.99 +     * Constructor.
 178.100 +     *
 178.101 +     * @param testCase object, that contains tested method and way to invoke it.
 178.102 +     */
 178.103 +    protected CompilerWhiteBoxTest(TestCase testCase) {
 178.104 +        Objects.requireNonNull(testCase);
 178.105 +        System.out.println("TEST CASE:" + testCase.name());
 178.106 +        method = testCase.executable;
 178.107 +        callable = testCase.callable;
 178.108 +    }
 178.109 +
 178.110 +    /**
 178.111 +     * Template method for testing. Prints tested method's info before
 178.112 +     * {@linkplain #test()} and after {@linkplain #test()} or on thrown
 178.113 +     * exception.
 178.114 +     *
 178.115 +     * @throws RuntimeException if method {@linkplain #test()} throws any
 178.116 +     *                          exception
 178.117 +     * @see #test()
 178.118 +     */
 178.119 +    protected final void runTest() {
 178.120          if (ManagementFactoryHelper.getCompilationMXBean() == null) {
 178.121              System.err.println(
 178.122                      "Warning: test is not applicable in interpreted mode");
 178.123              return;
 178.124          }
 178.125          System.out.println("at test's start:");
 178.126 -        printInfo(METHOD);
 178.127 +        printInfo();
 178.128          try {
 178.129              test();
 178.130          } catch (Exception e) {
 178.131              System.out.printf("on exception '%s':", e.getMessage());
 178.132 -            printInfo(METHOD);
 178.133 +            printInfo();
 178.134              e.printStackTrace();
 178.135 +            if (e instanceof RuntimeException) {
 178.136 +                throw (RuntimeException) e;
 178.137 +            }
 178.138              throw new RuntimeException(e);
 178.139          }
 178.140          System.out.println("at test's end:");
 178.141 -        printInfo(METHOD);
 178.142 +        printInfo();
 178.143      }
 178.144  
 178.145 -    protected static void checkNotCompiled(Method method) {
 178.146 +    /**
 178.147 +     * Checks, that {@linkplain #method} is not compiled.
 178.148 +     *
 178.149 +     * @throws RuntimeException if {@linkplain #method} is in compiler queue or
 178.150 +     *                          is compiled, or if {@linkplain #method} has zero
 178.151 +     *                          compilation level.
 178.152 +     */
 178.153 +    protected final void checkNotCompiled() {
 178.154 +        if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
 178.155 +            throw new RuntimeException(method + " must not be in queue");
 178.156 +        }
 178.157          if (WHITE_BOX.isMethodCompiled(method)) {
 178.158              throw new RuntimeException(method + " must be not compiled");
 178.159          }
 178.160 @@ -89,10 +158,16 @@
 178.161          }
 178.162      }
 178.163  
 178.164 -    protected static void checkCompiled(Method method)
 178.165 -            throws InterruptedException {
 178.166 +    /**
 178.167 +     * Checks, that {@linkplain #method} is compiled.
 178.168 +     *
 178.169 +     * @throws RuntimeException if {@linkplain #method} isn't in compiler queue
 178.170 +     *                          and isn't compiled, or if {@linkplain #method}
 178.171 +     *                          has nonzero compilation level
 178.172 +     */
 178.173 +    protected final void checkCompiled() {
 178.174          final long start = System.currentTimeMillis();
 178.175 -        waitBackgroundCompilation(method);
 178.176 +        waitBackgroundCompilation();
 178.177          if (WHITE_BOX.isMethodQueuedForCompilation(method)) {
 178.178              System.err.printf("Warning: %s is still in queue after %dms%n",
 178.179                      method, System.currentTimeMillis() - start);
 178.180 @@ -106,23 +181,30 @@
 178.181          }
 178.182      }
 178.183  
 178.184 -    protected static void waitBackgroundCompilation(Method method)
 178.185 -            throws InterruptedException {
 178.186 +    /**
 178.187 +     * Waits for completion of background compilation of {@linkplain #method}.
 178.188 +     */
 178.189 +    protected final void waitBackgroundCompilation() {
 178.190          if (!BACKGROUND_COMPILATION) {
 178.191              return;
 178.192          }
 178.193          final Object obj = new Object();
 178.194 -        synchronized (obj) {
 178.195 -            for (int i = 0; i < 10; ++i) {
 178.196 -                if (!WHITE_BOX.isMethodQueuedForCompilation(method)) {
 178.197 -                    break;
 178.198 +        for (int i = 0; i < 10
 178.199 +                && WHITE_BOX.isMethodQueuedForCompilation(method); ++i) {
 178.200 +            synchronized (obj) {
 178.201 +                try {
 178.202 +                    obj.wait(1000);
 178.203 +                } catch (InterruptedException e) {
 178.204 +                    Thread.currentThread().interrupt();
 178.205                  }
 178.206 -                obj.wait(1000);
 178.207              }
 178.208          }
 178.209      }
 178.210  
 178.211 -    protected static void printInfo(Method method) {
 178.212 +    /**
 178.213 +     * Prints information about {@linkplain #method}.
 178.214 +     */
 178.215 +    protected final void printInfo() {
 178.216          System.out.printf("%n%s:%n", method);
 178.217          System.out.printf("\tcompilable:\t%b%n",
 178.218                  WHITE_BOX.isMethodCompilable(method));
 178.219 @@ -136,19 +218,139 @@
 178.220                  WHITE_BOX.getCompileQueuesSize());
 178.221      }
 178.222  
 178.223 +    /**
 178.224 +     * Executes testing.
 178.225 +     */
 178.226      protected abstract void test() throws Exception;
 178.227  
 178.228 +    /**
 178.229 +     * Tries to trigger compilation of {@linkplain #method} by call
 178.230 +     * {@linkplain #callable} enough times.
 178.231 +     *
 178.232 +     * @return accumulated result
 178.233 +     * @see #compile(int)
 178.234 +     */
 178.235      protected final int compile() {
 178.236 +        return compile(Math.max(COMPILE_THRESHOLD, 150000));
 178.237 +    }
 178.238 +
 178.239 +    /**
 178.240 +     * Tries to trigger compilation of {@linkplain #method} by call
 178.241 +     * {@linkplain #callable} specified times.
 178.242 +     *
 178.243 +     * @param count invocation count
 178.244 +     * @return accumulated result
 178.245 +     */
 178.246 +    protected final int compile(int count) {
 178.247          int result = 0;
 178.248 -        int count = Math.max(COMPILE_THRESHOLD, 150000);
 178.249 +        Integer tmp;
 178.250          for (int i = 0; i < count; ++i) {
 178.251 -            result += method();
 178.252 +            try {
 178.253 +                tmp = callable.call();
 178.254 +            } catch (Exception e) {
 178.255 +                tmp = null;
 178.256 +            }
 178.257 +            result += tmp == null ? 0 : tmp;
 178.258          }
 178.259          System.out.println("method was invoked " + count + " times");
 178.260          return result;
 178.261      }
 178.262 +}
 178.263  
 178.264 -    protected int method() {
 178.265 -        return 42;
 178.266 +/**
 178.267 + * Utility structure containing tested method and object to invoke it.
 178.268 + */
 178.269 +enum TestCase {
 178.270 +    /** constructor test case */
 178.271 +    CONSTRUCTOR_TEST(Helper.CONSTRUCTOR, Helper.CONSTRUCTOR_CALLABLE),
 178.272 +    /** method test case */
 178.273 +    METOD_TEST(Helper.METHOD, Helper.METHOD_CALLABLE),
 178.274 +    /** static method test case */
 178.275 +    STATIC_TEST(Helper.STATIC, Helper.STATIC_CALLABLE);
 178.276 +
 178.277 +    /** tested method */
 178.278 +    final Executable executable;
 178.279 +    /** object to invoke {@linkplain #executable} */
 178.280 +    final Callable<Integer> callable;
 178.281 +
 178.282 +    private TestCase(Executable executable, Callable<Integer> callable) {
 178.283 +        this.executable = executable;
 178.284 +        this.callable = callable;
 178.285 +    }
 178.286 +
 178.287 +    private static class Helper {
 178.288 +        private static final Callable<Integer> CONSTRUCTOR_CALLABLE
 178.289 +                = new Callable<Integer>() {
 178.290 +            @Override
 178.291 +            public Integer call() throws Exception {
 178.292 +                return new Helper(1337).hashCode();
 178.293 +            }
 178.294 +        };
 178.295 +
 178.296 +        private static final Callable<Integer> METHOD_CALLABLE
 178.297 +                = new Callable<Integer>() {
 178.298 +            private final Helper helper = new Helper();
 178.299 +
 178.300 +            @Override
 178.301 +            public Integer call() throws Exception {
 178.302 +                return helper.method();
 178.303 +            }
 178.304 +        };
 178.305 +
 178.306 +        private static final Callable<Integer> STATIC_CALLABLE
 178.307 +                = new Callable<Integer>() {
 178.308 +            @Override
 178.309 +            public Integer call() throws Exception {
 178.310 +                return staticMethod();
 178.311 +            }
 178.312 +        };
 178.313 +
 178.314 +        private static final Constructor CONSTRUCTOR;
 178.315 +        private static final Method METHOD;
 178.316 +        private static final Method STATIC;
 178.317 +
 178.318 +        static {
 178.319 +            try {
 178.320 +                CONSTRUCTOR = Helper.class.getDeclaredConstructor(int.class);
 178.321 +            } catch (NoSuchMethodException | SecurityException e) {
 178.322 +                throw new RuntimeException(
 178.323 +                        "exception on getting method Helper.<init>(int)", e);
 178.324 +            }
 178.325 +            try {
 178.326 +                METHOD = Helper.class.getDeclaredMethod("method");
 178.327 +            } catch (NoSuchMethodException | SecurityException e) {
 178.328 +                throw new RuntimeException(
 178.329 +                        "exception on getting method Helper.method()", e);
 178.330 +            }
 178.331 +            try {
 178.332 +                STATIC = Helper.class.getDeclaredMethod("staticMethod");
 178.333 +            } catch (NoSuchMethodException | SecurityException e) {
 178.334 +                throw new RuntimeException(
 178.335 +                        "exception on getting method Helper.staticMethod()", e);
 178.336 +            }
 178.337 +        }
 178.338 +
 178.339 +        private static int staticMethod() {
 178.340 +            return 1138;
 178.341 +        }
 178.342 +
 178.343 +        private int method() {
 178.344 +            return 42;
 178.345 +        }
 178.346 +
 178.347 +        private final int x;
 178.348 +
 178.349 +        public Helper() {
 178.350 +            x = 0;
 178.351 +        }
 178.352 +
 178.353 +        private Helper(int x) {
 178.354 +            this.x = x;
 178.355 +        }
 178.356 +
 178.357 +        @Override
 178.358 +        public int hashCode() {
 178.359 +            return x;
 178.360 +        }
 178.361      }
 178.362  }
   179.1 --- a/test/compiler/whitebox/DeoptimizeAllTest.java	Wed Apr 24 20:55:28 2013 -0400
   179.2 +++ b/test/compiler/whitebox/DeoptimizeAllTest.java	Wed Apr 24 21:11:02 2013 -0400
   179.3 @@ -27,20 +27,34 @@
   179.4   * @build DeoptimizeAllTest
   179.5   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   179.6   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeAllTest
   179.7 + * @summary testing of WB::deoptimizeAll()
   179.8   * @author igor.ignatyev@oracle.com
   179.9   */
  179.10  public class DeoptimizeAllTest extends CompilerWhiteBoxTest {
  179.11  
  179.12      public static void main(String[] args) throws Exception {
  179.13 -        // to prevent inlining #method into #compile()
  179.14 -        WHITE_BOX.setDontInlineMethod(METHOD, true);
  179.15 -        new DeoptimizeAllTest().runTest();
  179.16 +        for (TestCase test : TestCase.values()) {
  179.17 +            new DeoptimizeAllTest(test).runTest();
  179.18 +        }
  179.19      }
  179.20  
  179.21 +    public DeoptimizeAllTest(TestCase testCase) {
  179.22 +        super(testCase);
  179.23 +        // to prevent inlining of #method
  179.24 +        WHITE_BOX.testSetDontInlineMethod(method, true);
  179.25 +    }
  179.26 +
  179.27 +    /**
  179.28 +     * Tests {@code WB::deoptimizeAll()} by calling it after
  179.29 +     * compilation and checking that method isn't compiled.
  179.30 +     *
  179.31 +     * @throws Exception if one of the checks fails.
  179.32 +     */
  179.33 +    @Override
  179.34      protected void test() throws Exception {
  179.35          compile();
  179.36 -        checkCompiled(METHOD);
  179.37 +        checkCompiled();
  179.38          WHITE_BOX.deoptimizeAll();
  179.39 -        checkNotCompiled(METHOD);
  179.40 +        checkNotCompiled();
  179.41      }
  179.42  }
   180.1 --- a/test/compiler/whitebox/DeoptimizeMethodTest.java	Wed Apr 24 20:55:28 2013 -0400
   180.2 +++ b/test/compiler/whitebox/DeoptimizeMethodTest.java	Wed Apr 24 21:11:02 2013 -0400
   180.3 @@ -27,20 +27,34 @@
   180.4   * @build DeoptimizeMethodTest
   180.5   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   180.6   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI DeoptimizeMethodTest
   180.7 + * @summary testing of WB::deoptimizeMethod()
   180.8   * @author igor.ignatyev@oracle.com
   180.9   */
  180.10  public class DeoptimizeMethodTest extends CompilerWhiteBoxTest {
  180.11  
  180.12      public static void main(String[] args) throws Exception {
  180.13 -        // to prevent inlining #method into #compile()
  180.14 -        WHITE_BOX.setDontInlineMethod(METHOD, true);
  180.15 -        new DeoptimizeMethodTest().runTest();
  180.16 +        for (TestCase test : TestCase.values()) {
  180.17 +            new DeoptimizeMethodTest(test).runTest();
  180.18 +        }
  180.19      }
  180.20  
  180.21 +    public DeoptimizeMethodTest(TestCase testCase) {
  180.22 +        super(testCase);
  180.23 +        // to prevent inlining of #method
  180.24 +        WHITE_BOX.testSetDontInlineMethod(method, true);
  180.25 +    }
  180.26 +
  180.27 +    /**
  180.28 +     * Tests {@code WB::deoptimizeMethod()} by calling it after
  180.29 +     * compilation and checking that method isn't compiled.
  180.30 +     *
  180.31 +     * @throws Exception if one of the checks fails.
  180.32 +     */
  180.33 +    @Override
  180.34      protected void test() throws Exception {
  180.35          compile();
  180.36 -        checkCompiled(METHOD);
  180.37 -        WHITE_BOX.deoptimizeMethod(METHOD);
  180.38 -        checkNotCompiled(METHOD);
  180.39 +        checkCompiled();
  180.40 +        WHITE_BOX.deoptimizeMethod(method);
  180.41 +        checkNotCompiled();
  180.42      }
  180.43  }
   181.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   181.2 +++ b/test/compiler/whitebox/EnqueueMethodForCompilationTest.java	Wed Apr 24 21:11:02 2013 -0400
   181.3 @@ -0,0 +1,86 @@
   181.4 +/*
   181.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   181.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   181.7 + *
   181.8 + * This code is free software; you can redistribute it and/or modify it
   181.9 + * under the terms of the GNU General Public License version 2 only, as
  181.10 + * published by the Free Software Foundation.
  181.11 + *
  181.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  181.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  181.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  181.15 + * version 2 for more details (a copy is included in the LICENSE file that
  181.16 + * accompanied this code).
  181.17 + *
  181.18 + * You should have received a copy of the GNU General Public License version
  181.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  181.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  181.21 + *
  181.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  181.23 + * or visit www.oracle.com if you need additional information or have any
  181.24 + * questions.
  181.25 + */
  181.26 +
  181.27 +/*
  181.28 + * @test EnqueueMethodForCompilationTest
  181.29 + * @library /testlibrary /testlibrary/whitebox
  181.30 + * @build EnqueueMethodForCompilationTest
  181.31 + * @run main ClassFileInstaller sun.hotspot.WhiteBox
  181.32 + * @run main/othervm -Xbootclasspath/a:. -Xmixed -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI EnqueueMethodForCompilationTest
  181.33 + * @summary testing of WB::enqueueMethodForCompilation()
  181.34 + * @author igor.ignatyev@oracle.com
  181.35 + */
  181.36 +public class EnqueueMethodForCompilationTest extends CompilerWhiteBoxTest {
  181.37 +
  181.38 +    public static void main(String[] args) throws Exception {
  181.39 +        for (TestCase test : TestCase.values()) {
  181.40 +            new EnqueueMethodForCompilationTest(test).runTest();
  181.41 +        }
  181.42 +    }
  181.43 +
  181.44 +    public EnqueueMethodForCompilationTest(TestCase testCase) {
  181.45 +        super(testCase);
  181.46 +        // to prevent inlining of #method
  181.47 +        WHITE_BOX.testSetDontInlineMethod(method, true);
  181.48 +    }
  181.49 +
  181.50 +    @Override
  181.51 +    protected void test() throws Exception {
  181.52 +        checkNotCompiled();
  181.53 +
  181.54 +        // method can not be compiled on level 'none'
  181.55 +        WHITE_BOX.enqueueMethodForCompilation(method, COMP_LEVEL_NONE);
  181.56 +        if (WHITE_BOX.isMethodCompilable(method, COMP_LEVEL_NONE)) {
  181.57 +            throw new RuntimeException(method
  181.58 +                    + " is compilable at level COMP_LEVEL_NONE");
  181.59 +        }
  181.60 +        checkNotCompiled();
  181.61 +
  181.62 +        // COMP_LEVEL_ANY is inapplicable as level for compilation
  181.63 +        WHITE_BOX.enqueueMethodForCompilation(method, COMP_LEVEL_ANY);
  181.64 +        checkNotCompiled();
  181.65 +
  181.66 +        WHITE_BOX.enqueueMethodForCompilation(method, 5);
  181.67 +        if (!WHITE_BOX.isMethodCompilable(method, 5)) {
  181.68 +            checkNotCompiled();
  181.69 +            compile();
  181.70 +            checkCompiled();
  181.71 +        } else {
  181.72 +            checkCompiled();
  181.73 +        }
  181.74 +
  181.75 +        int compLevel = WHITE_BOX.getMethodCompilationLevel(method);
  181.76 +        WHITE_BOX.deoptimizeMethod(method);
  181.77 +        checkNotCompiled();
  181.78 +
  181.79 +        WHITE_BOX.enqueueMethodForCompilation(method, compLevel);
  181.80 +        checkCompiled();
  181.81 +        WHITE_BOX.deoptimizeMethod(method);
  181.82 +        checkNotCompiled();
  181.83 +
  181.84 +        compile();
  181.85 +        checkCompiled();
  181.86 +        WHITE_BOX.deoptimizeMethod(method);
  181.87 +        checkNotCompiled();
  181.88 +    }
  181.89 +}
   182.1 --- a/test/compiler/whitebox/IsMethodCompilableTest.java	Wed Apr 24 20:55:28 2013 -0400
   182.2 +++ b/test/compiler/whitebox/IsMethodCompilableTest.java	Wed Apr 24 21:11:02 2013 -0400
   182.3 @@ -28,9 +28,13 @@
   182.4   * @build IsMethodCompilableTest
   182.5   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   182.6   * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI IsMethodCompilableTest
   182.7 + * @summary testing of WB::isMethodCompilable()
   182.8   * @author igor.ignatyev@oracle.com
   182.9   */
  182.10  public class IsMethodCompilableTest extends CompilerWhiteBoxTest {
  182.11 +    /**
  182.12 +     * Value of {@code -XX:PerMethodRecompilationCutoff}
  182.13 +     */
  182.14      protected static final long PER_METHOD_RECOMPILATION_CUTOFF;
  182.15  
  182.16      static {
  182.17 @@ -44,14 +48,28 @@
  182.18      }
  182.19  
  182.20      public static void main(String[] args) throws Exception {
  182.21 -        // to prevent inlining #method into #compile()
  182.22 -        WHITE_BOX.setDontInlineMethod(METHOD, true);
  182.23 -        new IsMethodCompilableTest().runTest();
  182.24 +        for (TestCase test : TestCase.values()) {
  182.25 +            new IsMethodCompilableTest(test).runTest();
  182.26 +        }
  182.27      }
  182.28  
  182.29 +    public IsMethodCompilableTest(TestCase testCase) {
  182.30 +        super(testCase);
  182.31 +        // to prevent inlining of #method
  182.32 +        WHITE_BOX.testSetDontInlineMethod(method, true);
  182.33 +    }
  182.34 +
  182.35 +    /**
  182.36 +     * Tests {@code WB::isMethodCompilable()} by recompilation of tested method
  182.37 +     * 'PerMethodRecompilationCutoff' times and checks compilation status. Also
  182.38 +     * checks that WB::clearMethodState() clears no-compilable flags.
  182.39 +     *
  182.40 +     * @throws Exception if one of the checks fails.
  182.41 +     */
  182.42 +    @Override
  182.43      protected void test() throws Exception {
  182.44 -        if (!WHITE_BOX.isMethodCompilable(METHOD)) {
  182.45 -            throw new RuntimeException(METHOD + " must be compilable");
  182.46 +        if (!WHITE_BOX.isMethodCompilable(method)) {
  182.47 +            throw new RuntimeException(method + " must be compilable");
  182.48          }
  182.49          System.out.println("PerMethodRecompilationCutoff = "
  182.50                  + PER_METHOD_RECOMPILATION_CUTOFF);
  182.51 @@ -60,26 +78,48 @@
  182.52                      "Warning: test is not applicable if PerMethodRecompilationCutoff == Inf");
  182.53              return;
  182.54          }
  182.55 -        boolean madeNotCompilable = false;
  182.56  
  182.57 -        for (long i = 0; i < PER_METHOD_RECOMPILATION_CUTOFF; ++i) {
  182.58 -            compile();
  182.59 -            waitBackgroundCompilation(METHOD);
  182.60 -            WHITE_BOX.deoptimizeMethod(METHOD);
  182.61 -            if (!WHITE_BOX.isMethodCompilable(METHOD)) {
  182.62 -                madeNotCompilable = true;
  182.63 -                break;
  182.64 -            }
  182.65 +        // deoptimize 'PerMethodRecompilationCutoff' times and clear state
  182.66 +        for (long i = 0L, n = PER_METHOD_RECOMPILATION_CUTOFF - 1; i < n; ++i) {
  182.67 +            compileAndDeoptimize();
  182.68          }
  182.69 -        if (!madeNotCompilable) {
  182.70 -            throw new RuntimeException(METHOD + " is still compilable after "
  182.71 +        if (!WHITE_BOX.isMethodCompilable(method)) {
  182.72 +            throw new RuntimeException(method + " is not compilable after "
  182.73 +                    + (PER_METHOD_RECOMPILATION_CUTOFF - 1) + " iterations");
  182.74 +        }
  182.75 +        WHITE_BOX.clearMethodState(method);
  182.76 +
  182.77 +        // deoptimize 'PerMethodRecompilationCutoff' + 1 times
  182.78 +        long i;
  182.79 +        for (i = 0L; i < PER_METHOD_RECOMPILATION_CUTOFF
  182.80 +                && WHITE_BOX.isMethodCompilable(method); ++i) {
  182.81 +            compileAndDeoptimize();
  182.82 +        }
  182.83 +        if (i != PER_METHOD_RECOMPILATION_CUTOFF) {
  182.84 +            throw new RuntimeException(method + " is not compilable after "
  182.85 +                    + i + " iterations, but must only after "
  182.86 +                    + PER_METHOD_RECOMPILATION_CUTOFF);
  182.87 +        }
  182.88 +        if (WHITE_BOX.isMethodCompilable(method)) {
  182.89 +            throw new RuntimeException(method + " is still compilable after "
  182.90                      + PER_METHOD_RECOMPILATION_CUTOFF + " iterations");
  182.91          }
  182.92          compile();
  182.93 -        if (WHITE_BOX.isMethodCompiled(METHOD)) {
  182.94 -            printInfo(METHOD);
  182.95 -            throw new RuntimeException(
  182.96 -                    METHOD + " is not compilable but compiled");
  182.97 +        checkNotCompiled();
  182.98 +
  182.99 +        // WB.clearMethodState() must reset no-compilable flags
 182.100 +        WHITE_BOX.clearMethodState(method);
 182.101 +        if (!WHITE_BOX.isMethodCompilable(method)) {
 182.102 +            throw new RuntimeException(method
 182.103 +                    + " is not compilable after clearMethodState()");
 182.104          }
 182.105 +        compile();
 182.106 +        checkCompiled();
 182.107 +    }
 182.108 +
 182.109 +    private void compileAndDeoptimize() throws Exception {
 182.110 +        compile();
 182.111 +        waitBackgroundCompilation();
 182.112 +        WHITE_BOX.deoptimizeMethod(method);
 182.113      }
 182.114  }
   183.1 --- a/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Wed Apr 24 20:55:28 2013 -0400
   183.2 +++ b/test/compiler/whitebox/MakeMethodNotCompilableTest.java	Wed Apr 24 21:11:02 2013 -0400
   183.3 @@ -27,31 +27,85 @@
   183.4   * @build MakeMethodNotCompilableTest
   183.5   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   183.6   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI MakeMethodNotCompilableTest
   183.7 + * @summary testing of WB::makeMethodNotCompilable()
   183.8   * @author igor.ignatyev@oracle.com
   183.9   */
  183.10  public class MakeMethodNotCompilableTest extends CompilerWhiteBoxTest {
  183.11  
  183.12      public static void main(String[] args) throws Exception {
  183.13 -        // to prevent inlining #method into #compile()
  183.14 -        WHITE_BOX.setDontInlineMethod(METHOD, true);
  183.15 -        new MakeMethodNotCompilableTest().runTest();
  183.16 +        if (args.length == 0) {
  183.17 +            for (TestCase test : TestCase.values()) {
  183.18 +                new MakeMethodNotCompilableTest(test).runTest();
  183.19 +            }
  183.20 +        } else {
  183.21 +            for (String name : args) {
  183.22 +                new MakeMethodNotCompilableTest(
  183.23 +                        TestCase.valueOf(name)).runTest();
  183.24 +            }
  183.25 +        }
  183.26      }
  183.27  
  183.28 -    protected void test() throws Exception  {
  183.29 -        if (!WHITE_BOX.isMethodCompilable(METHOD)) {
  183.30 -            throw new RuntimeException(METHOD + " must be compilable");
  183.31 +    public MakeMethodNotCompilableTest(TestCase testCase) {
  183.32 +        super(testCase);
  183.33 +        // to prevent inlining of #method
  183.34 +        WHITE_BOX.testSetDontInlineMethod(method, true);
  183.35 +    }
  183.36 +
  183.37 +    /**
  183.38 +     * Tests {@code WB::makeMethodNotCompilable()} by calling it before
  183.39 +     * compilation and checking that method isn't compiled. Also
  183.40 +     * checks that WB::clearMethodState() clears no-compilable flags. For
  183.41 +     * tiered, additional checks for all available levels are conducted.
  183.42 +     *
  183.43 +     * @throws Exception if one of the checks fails.
  183.44 +     */
  183.45 +    @Override
  183.46 +    protected void test() throws Exception {
  183.47 +        checkNotCompiled();
  183.48 +        if (!WHITE_BOX.isMethodCompilable(method)) {
  183.49 +            throw new RuntimeException(method + " must be compilable");
  183.50          }
  183.51 -        WHITE_BOX.makeMethodNotCompilable(METHOD);
  183.52 -        if (WHITE_BOX.isMethodCompilable(METHOD)) {
  183.53 -            throw new RuntimeException(METHOD + " must be not compilable");
  183.54 +
  183.55 +        if (TIERED_COMPILATION) {
  183.56 +            for (int i = 1, n = TIERED_STOP_AT_LEVEL + 1; i < n; ++i) {
  183.57 +                WHITE_BOX.makeMethodNotCompilable(method, i);
  183.58 +                if (WHITE_BOX.isMethodCompilable(method, i)) {
  183.59 +                    throw new RuntimeException(method
  183.60 +                            + " must be not compilable at level" + i);
  183.61 +                }
  183.62 +                WHITE_BOX.enqueueMethodForCompilation(method, i);
  183.63 +                checkNotCompiled();
  183.64 +
  183.65 +                if (!WHITE_BOX.isMethodCompilable(method)) {
  183.66 +                    System.out.println(method
  183.67 +                            + " is not compilable after level " + i);
  183.68 +                }
  183.69 +            }
  183.70 +
  183.71 +            // WB.clearMethodState() must reset no-compilable flags
  183.72 +            WHITE_BOX.clearMethodState(method);
  183.73 +            if (!WHITE_BOX.isMethodCompilable(method)) {
  183.74 +                throw new RuntimeException(method
  183.75 +                        + " is not compilable after clearMethodState()");
  183.76 +            }
  183.77 +        }
  183.78 +        WHITE_BOX.makeMethodNotCompilable(method);
  183.79 +        if (WHITE_BOX.isMethodCompilable(method)) {
  183.80 +            throw new RuntimeException(method + " must be not compilable");
  183.81 +        }
  183.82 +
  183.83 +        compile();
  183.84 +        checkNotCompiled();
  183.85 +        if (WHITE_BOX.isMethodCompilable(method)) {
  183.86 +            throw new RuntimeException(method + " must be not compilable");
  183.87 +        }
  183.88 +        // WB.clearMethodState() must reset no-compilable flags
  183.89 +        WHITE_BOX.clearMethodState(method);
  183.90 +        if (!WHITE_BOX.isMethodCompilable(method)) {
  183.91 +            throw new RuntimeException(method
  183.92 +                    + " is not compilable after clearMethodState()");
  183.93          }
  183.94          compile();
  183.95 -        if (WHITE_BOX.isMethodQueuedForCompilation(METHOD)) {
  183.96 -            throw new RuntimeException(METHOD + " must not be in queue");
  183.97 -        }
  183.98 -        checkNotCompiled(METHOD);
  183.99 -        if (WHITE_BOX.isMethodCompilable(METHOD)) {
 183.100 -            throw new RuntimeException(METHOD + " must be not compilable");
 183.101 -        }
 183.102 +        checkCompiled();
 183.103      }
 183.104  }
   184.1 --- a/test/compiler/whitebox/SetDontInlineMethodTest.java	Wed Apr 24 20:55:28 2013 -0400
   184.2 +++ b/test/compiler/whitebox/SetDontInlineMethodTest.java	Wed Apr 24 21:11:02 2013 -0400
   184.3 @@ -27,33 +27,47 @@
   184.4   * @build SetDontInlineMethodTest
   184.5   * @run main ClassFileInstaller sun.hotspot.WhiteBox
   184.6   * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SetDontInlineMethodTest
   184.7 + * @summary testing of WB::testSetDontInlineMethod()
   184.8   * @author igor.ignatyev@oracle.com
   184.9   */
  184.10  public class SetDontInlineMethodTest extends CompilerWhiteBoxTest {
  184.11  
  184.12      public static void main(String[] args) throws Exception {
  184.13 -        new SetDontInlineMethodTest().runTest();
  184.14 +        for (TestCase test : TestCase.values()) {
  184.15 +            new SetDontInlineMethodTest(test).runTest();
  184.16 +        }
  184.17      }
  184.18  
  184.19 +    public SetDontInlineMethodTest(TestCase testCase) {
  184.20 +        super(testCase);
  184.21 +    }
  184.22 +
  184.23 +    /**
  184.24 +     * Tests {@code WB::testSetDontInlineMethod()} by sequential calling it and
  184.25 +     * checking of return value.
  184.26 +     *
  184.27 +     * @throws Exception if one of the checks fails.
  184.28 +     */
  184.29 +    @Override
  184.30      protected void test() throws Exception {
  184.31 -        if (WHITE_BOX.setDontInlineMethod(METHOD, true)) {
  184.32 -            throw new RuntimeException("on start " + METHOD
  184.33 +        if (WHITE_BOX.testSetDontInlineMethod(method, true)) {
  184.34 +            throw new RuntimeException("on start " + method
  184.35                      + " must be inlineable");
  184.36          }
  184.37 -        if (!WHITE_BOX.setDontInlineMethod(METHOD, true)) {
  184.38 -            throw new RuntimeException("after first change to true " + METHOD
  184.39 +        if (!WHITE_BOX.testSetDontInlineMethod(method, true)) {
  184.40 +            throw new RuntimeException("after first change to true " + method
  184.41                      + " must be not inlineable");
  184.42          }
  184.43 -        if (!WHITE_BOX.setDontInlineMethod(METHOD, false)) {
  184.44 -            throw new RuntimeException("after second change to true " + METHOD
  184.45 +        if (!WHITE_BOX.testSetDontInlineMethod(method, false)) {
  184.46 +            throw new RuntimeException("after second change to true " + method
  184.47                      + " must be still not inlineable");
  184.48          }
  184.49 -        if (WHITE_BOX.setDontInlineMethod(METHOD, false)) {
  184.50 -            throw new RuntimeException("after first change to false" + METHOD
  184.51 +        if (WHITE_BOX.testSetDontInlineMethod(method, false)) {
  184.52 +            throw new RuntimeException("after first change to false" + method
  184.53                      + " must be inlineable");
  184.54          }
  184.55 -        if (WHITE_BOX.setDontInlineMethod(METHOD, false)) {
  184.56 -            throw new RuntimeException("after second change to false " + METHOD
  184.57 +        if (WHITE_BOX.testSetDontInlineMethod(method, false)) {
  184.58 +            throw new RuntimeException("after second change to false " + method
  184.59                      + " must be inlineable");
  184.60          }
  184.61      }
   185.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   185.2 +++ b/test/compiler/whitebox/SetForceInlineMethodTest.java	Wed Apr 24 21:11:02 2013 -0400
   185.3 @@ -0,0 +1,74 @@
   185.4 +/*
   185.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   185.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   185.7 + *
   185.8 + * This code is free software; you can redistribute it and/or modify it
   185.9 + * under the terms of the GNU General Public License version 2 only, as
  185.10 + * published by the Free Software Foundation.
  185.11 + *
  185.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  185.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  185.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  185.15 + * version 2 for more details (a copy is included in the LICENSE file that
  185.16 + * accompanied this code).
  185.17 + *
  185.18 + * You should have received a copy of the GNU General Public License version
  185.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  185.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  185.21 + *
  185.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  185.23 + * or visit www.oracle.com if you need additional information or have any
  185.24 + * questions.
  185.25 + */
  185.26 +
  185.27 +/*
  185.28 + * @test SetForceInlineMethodTest
  185.29 + * @library /testlibrary /testlibrary/whitebox
  185.30 + * @build SetForceInlineMethodTest
  185.31 + * @run main ClassFileInstaller sun.hotspot.WhiteBox
  185.32 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI SetForceInlineMethodTest
  185.33 + * @summary testing of WB::testSetForceInlineMethod()
  185.34 + * @author igor.ignatyev@oracle.com
  185.35 + */
  185.36 +public class SetForceInlineMethodTest extends CompilerWhiteBoxTest {
  185.37 +
  185.38 +    public static void main(String[] args) throws Exception {
  185.39 +        for (TestCase test : TestCase.values()) {
  185.40 +            new SetForceInlineMethodTest(test).runTest();
  185.41 +        }
  185.42 +    }
  185.43 +
  185.44 +    public SetForceInlineMethodTest(TestCase testCase) {
  185.45 +        super(testCase);
  185.46 +    }
  185.47 +
  185.48 +    /**
  185.49 +     * Tests {@code WB::testSetForceInlineMethod()} by sequential calling it and
  185.50 +     * checking of return value.
  185.51 +     *
  185.52 +     * @throws Exception if one of the checks fails.
  185.53 +     */
  185.54 +    @Override
  185.55 +    protected void test() throws Exception {
  185.56 +        if (WHITE_BOX.testSetForceInlineMethod(method, true)) {
  185.57 +            throw new RuntimeException("on start " + method
  185.58 +                    + " must be not force inlineable");
  185.59 +        }
  185.60 +        if (!WHITE_BOX.testSetForceInlineMethod(method, true)) {
  185.61 +            throw new RuntimeException("after first change to true " + method
  185.62 +                    + " must be force inlineable");
  185.63 +        }
  185.64 +        if (!WHITE_BOX.testSetForceInlineMethod(method, false)) {
  185.65 +            throw new RuntimeException("after second change to true " + method
  185.66 +                    + " must be still force inlineable");
  185.67 +        }
  185.68 +        if (WHITE_BOX.testSetForceInlineMethod(method, false)) {
  185.69 +            throw new RuntimeException("after first change to false" + method
  185.70 +                    + " must be not force inlineable");
  185.71 +        }
  185.72 +        if (WHITE_BOX.testSetForceInlineMethod(method, false)) {
  185.73 +            throw new RuntimeException("after second change to false " + method
  185.74 +                    + " must be not force inlineable");
  185.75 +        }
  185.76 +    }
  185.77 +}
   186.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   186.2 +++ b/test/gc/6941923/Test6941923.java	Wed Apr 24 21:11:02 2013 -0400
   186.3 @@ -0,0 +1,121 @@
   186.4 +/*
   186.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   186.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   186.7 + *
   186.8 + * This code is free software; you can redistribute it and/or modify it
   186.9 + * under the terms of the GNU General Public License version 2 only, as
  186.10 + * published by the Free Software Foundation.
  186.11 + *
  186.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  186.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  186.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  186.15 + * version 2 for more details (a copy is included in the LICENSE file that
  186.16 + * accompanied this code).
  186.17 + *
  186.18 + * You should have received a copy of the GNU General Public License version
  186.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  186.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  186.21 + *
  186.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  186.23 + * or visit www.oracle.com if you need additional information or have any
  186.24 + * questions.
  186.25 + */
  186.26 +
  186.27 +/*
  186.28 + * @test Test6941923.java
  186.29 + * @bug 6941923
  186.30 + * @summary test flags for gc log rotation
  186.31 + * @library /testlibrary
  186.32 + * @run main/othervm/timeout=600 Test6941923
  186.33 + *
  186.34 + */
  186.35 +import com.oracle.java.testlibrary.*;
  186.36 +import java.io.File;
  186.37 +import java.io.FilenameFilter;
  186.38 +import java.util.ArrayList;
  186.39 +import java.util.Arrays;
  186.40 +
  186.41 +class GCLoggingGenerator {
  186.42 +
  186.43 +    public static void main(String[] args) throws Exception {
  186.44 +
  186.45 +        long sizeOfLog = Long.parseLong(args[0]);
  186.46 +        long lines = sizeOfLog / 80;
  186.47 +        // full.GC generates ad least 1-line which is not shorter then 80 chars
  186.48 +        // for some GC 2 shorter lines are generated
  186.49 +        for (long i = 0; i < lines; i++) {
  186.50 +            System.gc();
  186.51 +        }
  186.52 +    }
  186.53 +}
  186.54 +
  186.55 +public class Test6941923 {
  186.56 +
  186.57 +    static final File currentDirectory = new File(".");
  186.58 +    static final String logFileName = "test.log";
  186.59 +    static final int logFileSizeK = 16;
  186.60 +    static FilenameFilter logFilter = new FilenameFilter() {
  186.61 +        @Override
  186.62 +        public boolean accept(File dir, String name) {
  186.63 +            return name.startsWith(logFileName);
  186.64 +        }
  186.65 +    };
  186.66 +
  186.67 +    public static void cleanLogs() {
  186.68 +        for (File log : currentDirectory.listFiles(logFilter)) {
  186.69 +            if (!log.delete()) {
  186.70 +                throw new Error("Unable to delete " + log.getAbsolutePath());
  186.71 +            }
  186.72 +        }
  186.73 +    }
  186.74 +
  186.75 +    public static void runTest(int numberOfFiles) throws Exception {
  186.76 +
  186.77 +        ArrayList<String> args = new ArrayList();
  186.78 +        String[] logOpts = new String[]{
  186.79 +            "-cp", System.getProperty("java.class.path"),
  186.80 +            "-Xloggc:" + logFileName,
  186.81 +            "-XX:-DisableExplicitGC", // to sure that System.gc() works
  186.82 +            "-XX:+PrintGC", "-XX:+PrintGCDetails", "-XX:+UseGCLogFileRotation",
  186.83 +            "-XX:NumberOfGCLogFiles=" + numberOfFiles,
  186.84 +            "-XX:GCLogFileSize=" + logFileSizeK + "K", "-Xmx128M"};
  186.85 +        // System.getProperty("test.java.opts") is '' if no options is set
  186.86 +        // need to skip such empty
  186.87 +        String[] externalVMopts = System.getProperty("test.java.opts").length() == 0
  186.88 +                ? new String[0]
  186.89 +                : System.getProperty("test.java.opts").split(" ");
  186.90 +        args.addAll(Arrays.asList(externalVMopts));
  186.91 +        args.addAll(Arrays.asList(logOpts));
  186.92 +        args.add(GCLoggingGenerator.class.getName());
  186.93 +        args.add(String.valueOf(numberOfFiles * logFileSizeK * 1024));
  186.94 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[0]));
  186.95 +        pb.redirectErrorStream(true);
  186.96 +        pb.redirectOutput(new File(GCLoggingGenerator.class.getName() + ".log"));
  186.97 +        Process process = pb.start();
  186.98 +        int result = process.waitFor();
  186.99 +        if (result != 0) {
 186.100 +            throw new Error("Unexpected exit code = " + result);
 186.101 +        }
 186.102 +        File[] logs = currentDirectory.listFiles(logFilter);
 186.103 +        int smallFilesNumber = 0;
 186.104 +        for (File log : logs) {
 186.105 +            if (log.length() < logFileSizeK * 1024) {
 186.106 +                smallFilesNumber++;
 186.107 +            }
 186.108 +        }
 186.109 +        if (logs.length != numberOfFiles) {
 186.110 +            throw new Error("There are only " + logs.length + " logs instead " + numberOfFiles);
 186.111 +        }
 186.112 +        if (smallFilesNumber > 1) {
 186.113 +            throw new Error("There should maximum one log with size < " + logFileSizeK + "K");
 186.114 +        }
 186.115 +    }
 186.116 +
 186.117 +    public static void main(String[] args) throws Exception {
 186.118 +        cleanLogs();
 186.119 +        runTest(1);
 186.120 +        cleanLogs();
 186.121 +        runTest(3);
 186.122 +        cleanLogs();
 186.123 +    }
 186.124 +}
   187.1 --- a/test/gc/6941923/test6941923.sh	Wed Apr 24 20:55:28 2013 -0400
   187.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   187.3 @@ -1,166 +0,0 @@
   187.4 -##
   187.5 -## @test @(#)test6941923.sh
   187.6 -## @bug 6941923 
   187.7 -## @summary test new added flags for gc log rotation 
   187.8 -## @author yqi 
   187.9 -## @run shell test6941923.sh
  187.10 -##
  187.11 -## some tests require path to find test source dir
  187.12 -if [ "${TESTSRC}" = "" ]
  187.13 -then
  187.14 -  TESTSRC=${PWD}
  187.15 -  echo "TESTSRC not set.  Using "${TESTSRC}" as default"
  187.16 -fi
  187.17 -echo "TESTSRC=${TESTSRC}"
  187.18 -## Adding common setup Variables for running shell tests.
  187.19 -. ${TESTSRC}/../../test_env.sh
  187.20 -
  187.21 -## skip on windows
  187.22 -OS=`uname -s`
  187.23 -case "$OS" in
  187.24 -  Windows_* | CYGWIN_* )
  187.25 -    echo "Test skipped for Windows"
  187.26 -    exit 0 
  187.27 -    ;;
  187.28 -esac
  187.29 -
  187.30 -# create a small test case
  187.31 -testname="Test"
  187.32 -if [ -e ${testname}.java ]; then
  187.33 -  rm -rf ${testname}.*
  187.34 -fi
  187.35 -
  187.36 -cat >> ${testname}.java << __EOF__
  187.37 -import java.util.Vector;
  187.38 -
  187.39 -public class Test implements Runnable
  187.40 -{
  187.41 -  private boolean _should_stop = false;
  187.42 -
  187.43 -  public static void main(String[] args) throws Exception {
  187.44 -
  187.45 -    long limit = Long.parseLong(args[0]) * 60L * 1000L;   // minutes
  187.46 -    Test t = new Test();
  187.47 -    t.set_stop(false);
  187.48 -    Thread thr = new Thread(t);
  187.49 -    thr.start();
  187.50 -
  187.51 -    long time1 = System.currentTimeMillis();
  187.52 -    long time2 = System.currentTimeMillis();
  187.53 -    while (time2 - time1 < limit) {
  187.54 -      try {
  187.55 -        Thread.sleep(2000); // 2 seconds
  187.56 -      }
  187.57 -      catch(Exception e) {}
  187.58 -      time2 = System.currentTimeMillis();
  187.59 -      System.out.print("\r... " + (time2 - time1)/1000 + " seconds");
  187.60 -    }
  187.61 -    System.out.println();
  187.62 -    t.set_stop(true);
  187.63 -  }
  187.64 -  public void set_stop(boolean value) { _should_stop = value; }
  187.65 -  public void run() {
  187.66 -    int cap = 20000;
  187.67 -    int fix_size = 2048;
  187.68 -    int loop = 0;
  187.69 -    Vector< byte[] > v = new Vector< byte[] >(cap);
  187.70 -    while(!_should_stop) {
  187.71 -      byte[] g = new byte[fix_size];
  187.72 -      v.add(g);
  187.73 -      loop++;
  187.74 -      if (loop > cap) {
  187.75 -         v = null;
  187.76 -         cap *= 2;
  187.77 -         if (cap > 80000) cap = 80000;
  187.78 -         v = new Vector< byte[] >(cap);
  187.79 -      }
  187.80 -    }
  187.81 -  }
  187.82 -}
  187.83 -__EOF__
  187.84 -
  187.85 -msgsuccess="succeeded"
  187.86 -msgfail="failed"
  187.87 -gclogsize="16K"
  187.88 -filesize=$((16*1024))
  187.89 -${COMPILEJAVA}/bin/javac ${TESTJAVACOPTS} ${testname}.java > $NULL 2>&1
  187.90 -
  187.91 -if [ $? != 0 ]; then
  187.92 -  echo "${COMPILEJAVA}/bin/javac ${testname}.java $fail"
  187.93 -  exit -1
  187.94 -fi
  187.95 -
  187.96 -# test for 2 minutes, it will complete circulation of gc log rotation
  187.97 -tts=2
  187.98 -logfile="test.log"
  187.99 -hotspotlog="hotspot.log"
 187.100 -
 187.101 -if [ -e $logfile  ]; then
 187.102 -  rm -rf $logfile
 187.103 -fi
 187.104 -
 187.105 -#also delete $hotspotlog if it exists
 187.106 -if [ -f $hotspotlog ]; then 
 187.107 -  rm -rf $hotspotlog
 187.108 -fi
 187.109 -
 187.110 -options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation  -XX:NumberOfGCLogFiles=1 -XX:GCLogFileSize=$gclogsize"
 187.111 -echo "Test gc log rotation in same file, wait for $tts minutes ...."
 187.112 -${TESTJAVA}/bin/java $options $testname $tts
 187.113 -if [ $? != 0 ]; then
 187.114 -  echo "$msgfail"
 187.115 -  exit -1
 187.116 -fi
 187.117 -
 187.118 -# rotation file will be $logfile.0 
 187.119 -if [ -f $logfile.0 ]; then
 187.120 -  outfilesize=`ls -l $logfile.0 | awk '{print $5 }'`
 187.121 -  if [ $((outfilesize)) -ge $((filesize)) ]; then
 187.122 -    echo $msgsuccess
 187.123 -  else
 187.124 -    echo $msgfail
 187.125 -  fi
 187.126 -else 
 187.127 -  echo $msgfail
 187.128 -  exit -1
 187.129 -fi
 187.130 -
 187.131 -# delete log file 
 187.132 -rm -rf $logfile.0
 187.133 -if [ -f $hotspotlog ]; then
 187.134 -  rm -rf $hotspotlog
 187.135 -fi
 187.136 -
 187.137 -#multiple log files
 187.138 -numoffiles=3
 187.139 -options="-Xloggc:$logfile -XX:+UseConcMarkSweepGC -XX:+PrintGC -XX:+PrintGCDetails -XX:+UseGCLogFileRotation  -XX:NumberOfGCLogFiles=$numoffiles -XX:GCLogFileSize=$gclogsize"
 187.140 -echo "Test gc log rotation in $numoffiles files, wait for $tts minutes ...."
 187.141 -${TESTJAVA}/bin/java $options $testname $tts
 187.142 -if [ $? != 0 ]; then
 187.143 -  echo "$msgfail"
 187.144 -  exit -1
 187.145 -fi
 187.146 -
 187.147 -atleast=0    # at least size of numoffile-1 files >= $gclogsize
 187.148 -tk=0
 187.149 -while [ $(($tk)) -lt $(($numoffiles)) ]
 187.150 -do
 187.151 -  if [ -f $logfile.$tk ]; then
 187.152 -    outfilesize=`ls -l $logfile.$tk | awk '{ print $5 }'`
 187.153 -    if [ $(($outfilesize)) -ge $(($filesize)) ]; then
 187.154 -      atleast=$((atleast+1))
 187.155 -    fi
 187.156 -  fi
 187.157 -  tk=$((tk+1))
 187.158 -done
 187.159 -
 187.160 -rm -rf $logfile.*
 187.161 -rm -rf $testname.*
 187.162 -rm -rf $hotspotlog
 187.163 -
 187.164 -if [ $(($atleast)) -ge $(($numoffiles-1)) ]; then
 187.165 -  echo $msgsuccess
 187.166 -else
 187.167 -  echo $msgfail
 187.168 -  exit -1
 187.169 -fi
   188.1 --- a/test/gc/TestVerifyBeforeGCDuringStartup.java	Wed Apr 24 20:55:28 2013 -0400
   188.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   188.3 @@ -1,45 +0,0 @@
   188.4 -/*
   188.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   188.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   188.7 - *
   188.8 - * This code is free software; you can redistribute it and/or modify it
   188.9 - * under the terms of the GNU General Public License version 2 only, as
  188.10 - * published by the Free Software Foundation.
  188.11 - *
  188.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
  188.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  188.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  188.15 - * version 2 for more details (a copy is included in the LICENSE file that
  188.16 - * accompanied this code).
  188.17 - *
  188.18 - * You should have received a copy of the GNU General Public License version
  188.19 - * 2 along with this work; if not, write to the Free Software Foundation,
  188.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  188.21 - *
  188.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  188.23 - * or visit www.oracle.com if you need additional information or have any
  188.24 - * questions.
  188.25 - */
  188.26 -
  188.27 -/* @test TestVerifyBeforeGCDuringStartup.java
  188.28 - * @key gc
  188.29 - * @bug 8010463
  188.30 - * @summary Simple test run with -XX:+VerifyBeforeGC -XX:-UseTLAB to verify 8010463
  188.31 - * @library /testlibrary
  188.32 - */
  188.33 -
  188.34 -import com.oracle.java.testlibrary.OutputAnalyzer;
  188.35 -import com.oracle.java.testlibrary.ProcessTools;
  188.36 -
  188.37 -public class TestVerifyBeforeGCDuringStartup {
  188.38 -  public static void main(String args[]) throws Exception {
  188.39 -    ProcessBuilder pb =
  188.40 -      ProcessTools.createJavaProcessBuilder(System.getProperty("test.vm.opts"),
  188.41 -                                            "-XX:-UseTLAB",
  188.42 -                                            "-XX:+UnlockDiagnosticVMOptions",
  188.43 -                                            "-XX:+VerifyBeforeGC", "-version");
  188.44 -    OutputAnalyzer output = new OutputAnalyzer(pb.start());
  188.45 -    output.shouldContain("[Verifying");
  188.46 -    output.shouldHaveExitValue(0);
  188.47 -  }
  188.48 -}
   189.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   189.2 +++ b/test/gc/TestVerifyDuringStartup.java	Wed Apr 24 21:11:02 2013 -0400
   189.3 @@ -0,0 +1,45 @@
   189.4 +/*
   189.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   189.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   189.7 + *
   189.8 + * This code is free software; you can redistribute it and/or modify it
   189.9 + * under the terms of the GNU General Public License version 2 only, as
  189.10 + * published by the Free Software Foundation.
  189.11 + *
  189.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  189.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  189.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  189.15 + * version 2 for more details (a copy is included in the LICENSE file that
  189.16 + * accompanied this code).
  189.17 + *
  189.18 + * You should have received a copy of the GNU General Public License version
  189.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  189.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  189.21 + *
  189.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  189.23 + * or visit www.oracle.com if you need additional information or have any
  189.24 + * questions.
  189.25 + */
  189.26 +
  189.27 +/* @test TestVerifyDuringStartup.java
  189.28 + * @key gc
  189.29 + * @bug 8010463
  189.30 + * @summary Simple test run with -XX:+VerifyDuringStartup -XX:-UseTLAB to verify 8010463
  189.31 + * @library /testlibrary
  189.32 + */
  189.33 +
  189.34 +import com.oracle.java.testlibrary.OutputAnalyzer;
  189.35 +import com.oracle.java.testlibrary.ProcessTools;
  189.36 +
  189.37 +public class TestVerifyDuringStartup {
  189.38 +  public static void main(String args[]) throws Exception {
  189.39 +    ProcessBuilder pb =
  189.40 +      ProcessTools.createJavaProcessBuilder(System.getProperty("test.vm.opts"),
  189.41 +                                            "-XX:-UseTLAB",
  189.42 +                                            "-XX:+UnlockDiagnosticVMOptions",
  189.43 +                                            "-XX:+VerifyDuringStartup", "-version");
  189.44 +    OutputAnalyzer output = new OutputAnalyzer(pb.start());
  189.45 +    output.shouldContain("[Verifying");
  189.46 +    output.shouldHaveExitValue(0);
  189.47 +  }
  189.48 +}
   190.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   190.2 +++ b/test/gc/metaspace/G1AddMetaspaceDependency.java	Wed Apr 24 21:11:02 2013 -0400
   190.3 @@ -0,0 +1,123 @@
   190.4 +/*
   190.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   190.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   190.7 + *
   190.8 + * This code is free software; you can redistribute it and/or modify it
   190.9 + * under the terms of the GNU General Public License version 2 only, as
  190.10 + * published by the Free Software Foundation.
  190.11 + *
  190.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  190.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  190.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  190.15 + * version 2 for more details (a copy is included in the LICENSE file that
  190.16 + * accompanied this code).
  190.17 + *
  190.18 + * You should have received a copy of the GNU General Public License version
  190.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  190.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  190.21 + *
  190.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  190.23 + * or visit www.oracle.com if you need additional information or have any
  190.24 + * questions.
  190.25 + */
  190.26 +
  190.27 +/*
  190.28 + * @test G1AddMetaspaceDependency
  190.29 + * @bug 8010196
  190.30 + * @summary Checks that we don't get locking problems when adding metaspace dependencies with the G1 update buffer monitor
  190.31 + * @run main/othervm -XX:+UseG1GC -XX:G1UpdateBufferSize=1 G1AddMetaspaceDependency
  190.32 + */
  190.33 +
  190.34 +import java.io.InputStream;
  190.35 +
  190.36 +public class G1AddMetaspaceDependency {
  190.37 +
  190.38 +  static byte[] getClassBytes(String name) {
  190.39 +    byte[] b = null;
  190.40 +    try (InputStream is = ClassLoader.getSystemResourceAsStream(name)) {
  190.41 +      byte[] tmp = new byte[is.available()];
  190.42 +      is.read(tmp);
  190.43 +      b = tmp;
  190.44 +    } finally {
  190.45 +      if (b == null) {
  190.46 +        throw new RuntimeException("Unable to load class file");
  190.47 +      }
  190.48 +      return b;
  190.49 +    }
  190.50 +  }
  190.51 +
  190.52 +  static final String a_name = G1AddMetaspaceDependency.class.getName() + "$A";
  190.53 +  static final String b_name = G1AddMetaspaceDependency.class.getName() + "$B";
  190.54 +
  190.55 +  public static void main(String... args) throws Exception {
  190.56 +    final byte[] a_bytes = getClassBytes(a_name + ".class");
  190.57 +    final byte[] b_bytes = getClassBytes(b_name + ".class");
  190.58 +
  190.59 +    for (int i = 0; i < 1000; i += 1) {
  190.60 +      runTest(a_bytes, b_bytes);
  190.61 +    }
  190.62 +  }
  190.63 +
  190.64 +  static class Loader extends ClassLoader {
  190.65 +    private final String myClass;
  190.66 +    private final byte[] myBytes;
  190.67 +    private final String friendClass;
  190.68 +    private final ClassLoader friendLoader;
  190.69 +
  190.70 +    Loader(String myClass, byte[] myBytes,
  190.71 +           String friendClass, ClassLoader friendLoader) {
  190.72 +      this.myClass = myClass;
  190.73 +      this.myBytes = myBytes;
  190.74 +      this.friendClass = friendClass;
  190.75 +      this.friendLoader = friendLoader;
  190.76 +    }
  190.77 +
  190.78 +    Loader(String myClass, byte[] myBytes) {
  190.79 +      this(myClass, myBytes, null, null);
  190.80 +    }
  190.81 +
  190.82 +    @Override
  190.83 +    public Class<?> loadClass(String name) throws ClassNotFoundException {
  190.84 +      Class<?> c = findLoadedClass(name);
  190.85 +      if (c != null) {
  190.86 +        return c;
  190.87 +      }
  190.88 +
  190.89 +      if (name.equals(friendClass)) {
  190.90 +        return friendLoader.loadClass(name);
  190.91 +      }
  190.92 +
  190.93 +      if (name.equals(myClass)) {
  190.94 +        c = defineClass(name, myBytes, 0, myBytes.length);
  190.95 +        resolveClass(c);
  190.96 +        return c;
  190.97 +      }
  190.98 +
  190.99 +      return findSystemClass(name);
 190.100 +    }
 190.101 +
 190.102 +  }
 190.103 +
 190.104 +  private static void runTest(final byte[] a_bytes, final byte[] b_bytes) throws Exception {
 190.105 +    Loader a_loader = new Loader(a_name, a_bytes);
 190.106 +    Loader b_loader = new Loader(b_name, b_bytes, a_name, a_loader);
 190.107 +    Loader c_loader = new Loader(b_name, b_bytes, a_name, a_loader);
 190.108 +    Loader d_loader = new Loader(b_name, b_bytes, a_name, a_loader);
 190.109 +    Loader e_loader = new Loader(b_name, b_bytes, a_name, a_loader);
 190.110 +    Loader f_loader = new Loader(b_name, b_bytes, a_name, a_loader);
 190.111 +    Loader g_loader = new Loader(b_name, b_bytes, a_name, a_loader);
 190.112 +
 190.113 +    byte[] b = new byte[20 * 2 << 20];
 190.114 +    Class<?> c;
 190.115 +    c = b_loader.loadClass(b_name);
 190.116 +    c = c_loader.loadClass(b_name);
 190.117 +    c = d_loader.loadClass(b_name);
 190.118 +    c = e_loader.loadClass(b_name);
 190.119 +    c = f_loader.loadClass(b_name);
 190.120 +    c = g_loader.loadClass(b_name);
 190.121 +  }
 190.122 +  public class A {
 190.123 +  }
 190.124 +  class B extends A {
 190.125 +  }
 190.126 +}
   191.1 --- a/test/runtime/NMT/AllocTestType.java	Wed Apr 24 20:55:28 2013 -0400
   191.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   191.3 @@ -1,73 +0,0 @@
   191.4 -/*
   191.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   191.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   191.7 - *
   191.8 - * This code is free software; you can redistribute it and/or modify it
   191.9 - * under the terms of the GNU General Public License version 2 only, as
  191.10 - * published by the Free Software Foundation.
  191.11 - *
  191.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
  191.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  191.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  191.15 - * version 2 for more details (a copy is included in the LICENSE file that
  191.16 - * accompanied this code).
  191.17 - *
  191.18 - * You should have received a copy of the GNU General Public License version
  191.19 - * 2 along with this work; if not, write to the Free Software Foundation,
  191.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  191.21 - *
  191.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  191.23 - * or visit www.oracle.com if you need additional information or have any
  191.24 - * questions.
  191.25 - */
  191.26 -
  191.27 -/*
  191.28 - * @test
  191.29 - * @summary Test consistency of NMT by leaking a few select allocations of the Test type and then verify visibility with jcmd
  191.30 - * @key nmt jcmd
  191.31 - * @library /testlibrary /testlibrary/whitebox
  191.32 - * @build AllocTestType
  191.33 - * @run main ClassFileInstaller sun.hotspot.WhiteBox
  191.34 - * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail AllocTestType
  191.35 - */
  191.36 -
  191.37 -import com.oracle.java.testlibrary.*;
  191.38 -import sun.hotspot.WhiteBox;
  191.39 -
  191.40 -public class AllocTestType {
  191.41 -
  191.42 -  public static void main(String args[]) throws Exception {
  191.43 -    OutputAnalyzer output;
  191.44 -
  191.45 -    // Grab my own PID
  191.46 -    String pid = Integer.toString(ProcessTools.getProcessId());
  191.47 -    ProcessBuilder pb = new ProcessBuilder();
  191.48 -
  191.49 -    // Use WB API to alloc with the mtTest type
  191.50 -    if (!WhiteBox.getWhiteBox().NMTAllocTest()) {
  191.51 -      throw new Exception("Call to WB API NMTAllocTest() failed");
  191.52 -    }
  191.53 -
  191.54 -    // Use WB API to ensure that all data has been merged before we continue
  191.55 -    if (!WhiteBox.getWhiteBox().NMTWaitForDataMerge()) {
  191.56 -      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
  191.57 -    }
  191.58 -
  191.59 -    // Run 'jcmd <pid> VM.native_memory summary'
  191.60 -    pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
  191.61 -    output = new OutputAnalyzer(pb.start());
  191.62 -    output.shouldContain("Test (reserved=512KB, committed=512KB)");
  191.63 -
  191.64 -    // Free the memory allocated by NMTAllocTest
  191.65 -    if (!WhiteBox.getWhiteBox().NMTFreeTestMemory()) {
  191.66 -      throw new Exception("Call to WB API NMTFreeTestMemory() failed");
  191.67 -    }
  191.68 -
  191.69 -    // Use WB API to ensure that all data has been merged before we continue
  191.70 -    if (!WhiteBox.getWhiteBox().NMTWaitForDataMerge()) {
  191.71 -      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
  191.72 -    }
  191.73 -    output = new OutputAnalyzer(pb.start());
  191.74 -    output.shouldNotContain("Test (reserved=");
  191.75 -  }
  191.76 -}
   192.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   192.2 +++ b/test/runtime/NMT/MallocTestType.java	Wed Apr 24 21:11:02 2013 -0400
   192.3 @@ -0,0 +1,74 @@
   192.4 +/*
   192.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   192.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   192.7 + *
   192.8 + * This code is free software; you can redistribute it and/or modify it
   192.9 + * under the terms of the GNU General Public License version 2 only, as
  192.10 + * published by the Free Software Foundation.
  192.11 + *
  192.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  192.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  192.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  192.15 + * version 2 for more details (a copy is included in the LICENSE file that
  192.16 + * accompanied this code).
  192.17 + *
  192.18 + * You should have received a copy of the GNU General Public License version
  192.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  192.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  192.21 + *
  192.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  192.23 + * or visit www.oracle.com if you need additional information or have any
  192.24 + * questions.
  192.25 + */
  192.26 +
  192.27 +/*
  192.28 + * @test
  192.29 + * @summary Test consistency of NMT by leaking a few select allocations of the Test type and then verify visibility with jcmd
  192.30 + * @key nmt jcmd
  192.31 + * @library /testlibrary /testlibrary/whitebox
  192.32 + * @build MallocTestType
  192.33 + * @run main ClassFileInstaller sun.hotspot.WhiteBox
  192.34 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail MallocTestType
  192.35 + */
  192.36 +
  192.37 +import com.oracle.java.testlibrary.*;
  192.38 +import sun.hotspot.WhiteBox;
  192.39 +
  192.40 +public class MallocTestType {
  192.41 +
  192.42 +  public static void main(String args[]) throws Exception {
  192.43 +    OutputAnalyzer output;
  192.44 +    WhiteBox wb = WhiteBox.getWhiteBox();
  192.45 +
  192.46 +    // Grab my own PID
  192.47 +    String pid = Integer.toString(ProcessTools.getProcessId());
  192.48 +    ProcessBuilder pb = new ProcessBuilder();
  192.49 +
  192.50 +    // Use WB API to alloc and free with the mtTest type
  192.51 +    long memAlloc3 = wb.NMTMalloc(128 * 1024);
  192.52 +    long memAlloc2 = wb.NMTMalloc(256 * 1024);
  192.53 +    wb.NMTFree(memAlloc3);
  192.54 +    long memAlloc1 = wb.NMTMalloc(512 * 1024);
  192.55 +    wb.NMTFree(memAlloc2);
  192.56 +
  192.57 +    // Use WB API to ensure that all data has been merged before we continue
  192.58 +    if (!wb.NMTWaitForDataMerge()) {
  192.59 +      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
  192.60 +    }
  192.61 +
  192.62 +    // Run 'jcmd <pid> VM.native_memory summary'
  192.63 +    pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
  192.64 +    output = new OutputAnalyzer(pb.start());
  192.65 +    output.shouldContain("Test (reserved=512KB, committed=512KB)");
  192.66 +
  192.67 +    // Free the memory allocated by NMTAllocTest
  192.68 +    wb.NMTFree(memAlloc1);
  192.69 +
  192.70 +    // Use WB API to ensure that all data has been merged before we continue
  192.71 +    if (!wb.NMTWaitForDataMerge()) {
  192.72 +      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
  192.73 +    }
  192.74 +    output = new OutputAnalyzer(pb.start());
  192.75 +    output.shouldNotContain("Test (reserved=");
  192.76 +  }
  192.77 +}
   193.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   193.2 +++ b/test/runtime/NMT/ThreadedMallocTestType.java	Wed Apr 24 21:11:02 2013 -0400
   193.3 @@ -0,0 +1,91 @@
   193.4 +/*
   193.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   193.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   193.7 + *
   193.8 + * This code is free software; you can redistribute it and/or modify it
   193.9 + * under the terms of the GNU General Public License version 2 only, as
  193.10 + * published by the Free Software Foundation.
  193.11 + *
  193.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  193.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  193.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  193.15 + * version 2 for more details (a copy is included in the LICENSE file that
  193.16 + * accompanied this code).
  193.17 + *
  193.18 + * You should have received a copy of the GNU General Public License version
  193.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  193.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  193.21 + *
  193.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  193.23 + * or visit www.oracle.com if you need additional information or have any
  193.24 + * questions.
  193.25 + */
  193.26 +
  193.27 +/*
  193.28 + * @test
  193.29 + * @key nmt jcmd
  193.30 + * @library /testlibrary /testlibrary/whitebox
  193.31 + * @build ThreadedMallocTestType
  193.32 + * @run main ClassFileInstaller sun.hotspot.WhiteBox
  193.33 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ThreadedMallocTestType
  193.34 + */
  193.35 +
  193.36 +import com.oracle.java.testlibrary.*;
  193.37 +import sun.hotspot.WhiteBox;
  193.38 +
  193.39 +public class ThreadedMallocTestType {
  193.40 +  public static long memAlloc1;
  193.41 +  public static long memAlloc2;
  193.42 +  public static long memAlloc3;
  193.43 +
  193.44 +  public static void main(String args[]) throws Exception {
  193.45 +    OutputAnalyzer output;
  193.46 +    final WhiteBox wb = WhiteBox.getWhiteBox();
  193.47 +
  193.48 +    // Grab my own PID
  193.49 +    String pid = Integer.toString(ProcessTools.getProcessId());
  193.50 +    ProcessBuilder pb = new ProcessBuilder();
  193.51 +
  193.52 +    Thread allocThread = new Thread() {
  193.53 +      public void run() {
  193.54 +        // Alloc memory using the WB api
  193.55 +        memAlloc1 = wb.NMTMalloc(128 * 1024);
  193.56 +        memAlloc2 = wb.NMTMalloc(256 * 1024);
  193.57 +        memAlloc3 = wb.NMTMalloc(512 * 1024);
  193.58 +      }
  193.59 +    };
  193.60 +
  193.61 +    allocThread.start();
  193.62 +    allocThread.join();
  193.63 +
  193.64 +    // Use WB API to ensure that all data has been merged before we continue
  193.65 +    if (!wb.NMTWaitForDataMerge()) {
  193.66 +      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
  193.67 +    }
  193.68 +
  193.69 +    // Run 'jcmd <pid> VM.native_memory summary'
  193.70 +    pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "summary"});
  193.71 +    output = new OutputAnalyzer(pb.start());
  193.72 +    output.shouldContain("Test (reserved=896KB, committed=896KB)");
  193.73 +
  193.74 +    Thread freeThread = new Thread() {
  193.75 +      public void run() {
  193.76 +        // Free the memory allocated by NMTMalloc
  193.77 +        wb.NMTFree(memAlloc1);
  193.78 +        wb.NMTFree(memAlloc2);
  193.79 +        wb.NMTFree(memAlloc3);
  193.80 +      }
  193.81 +    };
  193.82 +
  193.83 +    freeThread.start();
  193.84 +    freeThread.join();
  193.85 +
  193.86 +    // Use WB API to ensure that all data has been merged before we continue
  193.87 +    if (!wb.NMTWaitForDataMerge()) {
  193.88 +      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
  193.89 +    }
  193.90 +
  193.91 +    output = new OutputAnalyzer(pb.start());
  193.92 +    output.shouldNotContain("Test (reserved=");
  193.93 +  }
  193.94 +}
   194.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   194.2 +++ b/test/runtime/NMT/ThreadedVirtualAllocTestType.java	Wed Apr 24 21:11:02 2013 -0400
   194.3 @@ -0,0 +1,112 @@
   194.4 +/*
   194.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   194.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   194.7 + *
   194.8 + * This code is free software; you can redistribute it and/or modify it
   194.9 + * under the terms of the GNU General Public License version 2 only, as
  194.10 + * published by the Free Software Foundation.
  194.11 + *
  194.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  194.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  194.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  194.15 + * version 2 for more details (a copy is included in the LICENSE file that
  194.16 + * accompanied this code).
  194.17 + *
  194.18 + * You should have received a copy of the GNU General Public License version
  194.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  194.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  194.21 + *
  194.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  194.23 + * or visit www.oracle.com if you need additional information or have any
  194.24 + * questions.
  194.25 + */
  194.26 +
  194.27 +/*
  194.28 + * @test
  194.29 + * @key nmt jcmd
  194.30 + * @library /testlibrary /testlibrary/whitebox
  194.31 + * @build ThreadedVirtualAllocTestType
  194.32 + * @run main ClassFileInstaller sun.hotspot.WhiteBox
  194.33 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ThreadedVirtualAllocTestType
  194.34 + */
  194.35 +
  194.36 +import com.oracle.java.testlibrary.*;
  194.37 +import sun.hotspot.WhiteBox;
  194.38 +
  194.39 +public class ThreadedVirtualAllocTestType {
  194.40 +  public static long addr;
  194.41 +  public static final WhiteBox wb = WhiteBox.getWhiteBox();
  194.42 +  public static final long commitSize = 128 * 1024;
  194.43 +  public static final long reserveSize = 512 * 1024;
  194.44 +
  194.45 +  public static void main(String args[]) throws Exception {
  194.46 +    OutputAnalyzer output;
  194.47 +
  194.48 +    String pid = Integer.toString(ProcessTools.getProcessId());
  194.49 +    ProcessBuilder pb = new ProcessBuilder();
  194.50 +
  194.51 +    Thread reserveThread = new Thread() {
  194.52 +      public void run() {
  194.53 +        addr = wb.NMTReserveMemory(reserveSize);
  194.54 +      }
  194.55 +    };
  194.56 +    reserveThread.start();
  194.57 +    reserveThread.join();
  194.58 +
  194.59 +    mergeData();
  194.60 +
  194.61 +    pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
  194.62 +    output = new OutputAnalyzer(pb.start());
  194.63 +    output.shouldContain("Test (reserved=512KB, committed=0KB)");
  194.64 +    output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 512KB for Test");
  194.65 +
  194.66 +    Thread commitThread = new Thread() {
  194.67 +      public void run() {
  194.68 +        wb.NMTCommitMemory(addr, commitSize);
  194.69 +      }
  194.70 +    };
  194.71 +    commitThread.start();
  194.72 +    commitThread.join();
  194.73 +
  194.74 +    mergeData();
  194.75 +
  194.76 +    output = new OutputAnalyzer(pb.start());
  194.77 +    output.shouldContain("Test (reserved=512KB, committed=128KB)");
  194.78 +    output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB");
  194.79 +
  194.80 +    Thread uncommitThread = new Thread() {
  194.81 +      public void run() {
  194.82 +        wb.NMTUncommitMemory(addr, commitSize);
  194.83 +      }
  194.84 +    };
  194.85 +    uncommitThread.start();
  194.86 +    uncommitThread.join();
  194.87 +
  194.88 +    mergeData();
  194.89 +
  194.90 +    output = new OutputAnalyzer(pb.start());
  194.91 +    output.shouldContain("Test (reserved=512KB, committed=0KB)");
  194.92 +    output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed");
  194.93 +
  194.94 +    Thread releaseThread = new Thread() {
  194.95 +      public void run() {
  194.96 +        wb.NMTReleaseMemory(addr, reserveSize);
  194.97 +      }
  194.98 +    };
  194.99 +    releaseThread.start();
 194.100 +    releaseThread.join();
 194.101 +
 194.102 +    mergeData();
 194.103 +
 194.104 +    output = new OutputAnalyzer(pb.start());
 194.105 +    output.shouldNotContain("Test (reserved=");
 194.106 +    output.shouldNotContain("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved");
 194.107 +  }
 194.108 +
 194.109 +  public static void mergeData() throws Exception {
 194.110 +    // Use WB API to ensure that all data has been merged before we continue
 194.111 +    if (!wb.NMTWaitForDataMerge()) {
 194.112 +      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
 194.113 +    }
 194.114 +  }
 194.115 +}
   195.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   195.2 +++ b/test/runtime/NMT/VirtualAllocTestType.java	Wed Apr 24 21:11:02 2013 -0400
   195.3 @@ -0,0 +1,88 @@
   195.4 +/*
   195.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   195.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   195.7 + *
   195.8 + * This code is free software; you can redistribute it and/or modify it
   195.9 + * under the terms of the GNU General Public License version 2 only, as
  195.10 + * published by the Free Software Foundation.
  195.11 + *
  195.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  195.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  195.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  195.15 + * version 2 for more details (a copy is included in the LICENSE file that
  195.16 + * accompanied this code).
  195.17 + *
  195.18 + * You should have received a copy of the GNU General Public License version
  195.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  195.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  195.21 + *
  195.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  195.23 + * or visit www.oracle.com if you need additional information or have any
  195.24 + * questions.
  195.25 + */
  195.26 +
  195.27 +/*
  195.28 + * @test
  195.29 + * @summary Test Reserve/Commit/Uncommit/Release of virtual memory and that we track it correctly
  195.30 + * @key nmt jcmd
  195.31 + * @library /testlibrary /testlibrary/whitebox
  195.32 + * @build VirtualAllocTestType
  195.33 + * @run main ClassFileInstaller sun.hotspot.WhiteBox
  195.34 + * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail VirtualAllocTestType
  195.35 + */
  195.36 +
  195.37 +import com.oracle.java.testlibrary.*;
  195.38 +import sun.hotspot.WhiteBox;
  195.39 +
  195.40 +public class VirtualAllocTestType {
  195.41 +
  195.42 +  public static WhiteBox wb = WhiteBox.getWhiteBox();
  195.43 +  public static void main(String args[]) throws Exception {
  195.44 +    OutputAnalyzer output;
  195.45 +    long commitSize = 128 * 1024;
  195.46 +    long reserveSize = 256 * 1024;
  195.47 +    long addr;
  195.48 +
  195.49 +    String pid = Integer.toString(ProcessTools.getProcessId());
  195.50 +    ProcessBuilder pb = new ProcessBuilder();
  195.51 +
  195.52 +    addr = wb.NMTReserveMemory(reserveSize);
  195.53 +    mergeData();
  195.54 +
  195.55 +    pb.command(new String[] { JDKToolFinder.getJDKTool("jcmd"), pid, "VM.native_memory", "detail"});
  195.56 +    output = new OutputAnalyzer(pb.start());
  195.57 +    output.shouldContain("Test (reserved=256KB, committed=0KB)");
  195.58 +    output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved 256KB for Test");
  195.59 +
  195.60 +    wb.NMTCommitMemory(addr, commitSize);
  195.61 +
  195.62 +    mergeData();
  195.63 +
  195.64 +    output = new OutputAnalyzer(pb.start());
  195.65 +    output.shouldContain("Test (reserved=256KB, committed=128KB)");
  195.66 +    output.shouldMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed 128KB");
  195.67 +
  195.68 +    wb.NMTUncommitMemory(addr, commitSize);
  195.69 +
  195.70 +    mergeData();
  195.71 +
  195.72 +    output = new OutputAnalyzer(pb.start());
  195.73 +    output.shouldContain("Test (reserved=256KB, committed=0KB)");
  195.74 +    output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + commitSize) + "\\] committed");
  195.75 +
  195.76 +    wb.NMTReleaseMemory(addr, reserveSize);
  195.77 +
  195.78 +    mergeData();
  195.79 +
  195.80 +    output = new OutputAnalyzer(pb.start());
  195.81 +    output.shouldNotContain("Test (reserved=");
  195.82 +    output.shouldNotMatch("\\[0x[0]*" + Long.toHexString(addr) + " - 0x[0]*" + Long.toHexString(addr + reserveSize) + "\\] reserved");
  195.83 +  }
  195.84 +
  195.85 +  public static void mergeData() throws Exception {
  195.86 +    // Use WB API to ensure that all data has been merged before we continue
  195.87 +    if (!wb.NMTWaitForDataMerge()) {
  195.88 +      throw new Exception("Call to WB API NMTWaitForDataMerge() failed");
  195.89 +    }
  195.90 +  }
  195.91 +}
   196.1 --- a/test/sanity/WBApi.java	Wed Apr 24 20:55:28 2013 -0400
   196.2 +++ b/test/sanity/WBApi.java	Wed Apr 24 21:11:02 2013 -0400
   196.3 @@ -1,5 +1,5 @@
   196.4  /*
   196.5 - * Copyright (c) 2012, 2013 Oracle and/or its affiliates. All rights reserved.
   196.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   196.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   196.8   *
   196.9   * This code is free software; you can redistribute it and/or modify it
   197.1 --- a/test/serviceability/ParserTest.java	Wed Apr 24 20:55:28 2013 -0400
   197.2 +++ b/test/serviceability/ParserTest.java	Wed Apr 24 21:11:02 2013 -0400
   197.3 @@ -1,5 +1,5 @@
   197.4  /*
   197.5 - * Copyright (c) 2012, 2013 Oracle and/or its affiliates. All rights reserved.
   197.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   197.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   197.8   *
   197.9   * This code is free software; you can redistribute it and/or modify it
   198.1 --- a/test/testlibrary/OutputAnalyzerTest.java	Wed Apr 24 20:55:28 2013 -0400
   198.2 +++ b/test/testlibrary/OutputAnalyzerTest.java	Wed Apr 24 21:11:02 2013 -0400
   198.3 @@ -36,6 +36,11 @@
   198.4      String stdout = "aaaaaa";
   198.5      String stderr = "bbbbbb";
   198.6  
   198.7 +    // Regexps used for testing pattern matching of the test input
   198.8 +    String stdoutPattern = "[a]";
   198.9 +    String stderrPattern = "[b]";
  198.10 +    String nonExistingPattern = "[c]";
  198.11 +
  198.12      OutputAnalyzer output = new OutputAnalyzer(stdout, stderr);
  198.13  
  198.14      if (!stdout.equals(output.getStdout())) {
  198.15 @@ -99,10 +104,73 @@
  198.16      }
  198.17  
  198.18      try {
  198.19 -      output.stderrShouldNotContain(stderr);
  198.20 -      throw new Exception("shouldContain() failed to throw exception");
  198.21 +        output.stderrShouldNotContain(stderr);
  198.22 +        throw new Exception("shouldContain() failed to throw exception");
  198.23      } catch (RuntimeException e) {
  198.24 -      // expected
  198.25 +        // expected
  198.26 +    }
  198.27 +
  198.28 +    // Should match
  198.29 +    try {
  198.30 +        output.shouldMatch(stdoutPattern);
  198.31 +        output.stdoutShouldMatch(stdoutPattern);
  198.32 +        output.shouldMatch(stderrPattern);
  198.33 +        output.stderrShouldMatch(stderrPattern);
  198.34 +    } catch (RuntimeException e) {
  198.35 +        throw new Exception("shouldMatch() failed", e);
  198.36 +    }
  198.37 +
  198.38 +    try {
  198.39 +        output.shouldMatch(nonExistingPattern);
  198.40 +        throw new Exception("shouldMatch() failed to throw exception");
  198.41 +    } catch (RuntimeException e) {
  198.42 +        // expected
  198.43 +    }
  198.44 +
  198.45 +    try {
  198.46 +        output.stdoutShouldMatch(stderrPattern);
  198.47 +        throw new Exception(
  198.48 +                "stdoutShouldMatch() failed to throw exception");
  198.49 +    } catch (RuntimeException e) {
  198.50 +        // expected
  198.51 +    }
  198.52 +
  198.53 +    try {
  198.54 +        output.stderrShouldMatch(stdoutPattern);
  198.55 +        throw new Exception(
  198.56 +                "stderrShouldMatch() failed to throw exception");
  198.57 +    } catch (RuntimeException e) {
  198.58 +        // expected
  198.59 +    }
  198.60 +
  198.61 +    // Should not match
  198.62 +    try {
  198.63 +        output.shouldNotMatch(nonExistingPattern);
  198.64 +        output.stdoutShouldNotMatch(nonExistingPattern);
  198.65 +        output.stderrShouldNotMatch(nonExistingPattern);
  198.66 +    } catch (RuntimeException e) {
  198.67 +        throw new Exception("shouldNotMatch() failed", e);
  198.68 +    }
  198.69 +
  198.70 +    try {
  198.71 +        output.shouldNotMatch(stdoutPattern);
  198.72 +        throw new Exception("shouldNotMatch() failed to throw exception");
  198.73 +    } catch (RuntimeException e) {
  198.74 +        // expected
  198.75 +    }
  198.76 +
  198.77 +    try {
  198.78 +        output.stdoutShouldNotMatch(stdoutPattern);
  198.79 +        throw new Exception("shouldNotMatch() failed to throw exception");
  198.80 +    } catch (RuntimeException e) {
  198.81 +        // expected
  198.82 +    }
  198.83 +
  198.84 +    try {
  198.85 +        output.stderrShouldNotMatch(stderrPattern);
  198.86 +        throw new Exception("shouldNotMatch() failed to throw exception");
  198.87 +    } catch (RuntimeException e) {
  198.88 +        // expected
  198.89      }
  198.90    }
  198.91  }
   199.1 --- a/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Wed Apr 24 20:55:28 2013 -0400
   199.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/OutputAnalyzer.java	Wed Apr 24 21:11:02 2013 -0400
   199.3 @@ -24,6 +24,8 @@
   199.4  package com.oracle.java.testlibrary;
   199.5  
   199.6  import java.io.IOException;
   199.7 +import java.util.regex.Matcher;
   199.8 +import java.util.regex.Pattern;
   199.9  
  199.10  public final class OutputAnalyzer {
  199.11  
  199.12 @@ -142,15 +144,112 @@
  199.13    }
  199.14  
  199.15    /**
  199.16 +   * Verify that the stdout and stderr contents of output buffer matches
  199.17 +   * the pattern
  199.18 +   *
  199.19 +   * @param pattern
  199.20 +   * @throws RuntimeException If the pattern was not found
  199.21 +   */
  199.22 +  public void shouldMatch(String pattern) {
  199.23 +      Matcher stdoutMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
  199.24 +      Matcher stderrMatcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
  199.25 +      if (!stdoutMatcher.find() && !stderrMatcher.find()) {
  199.26 +          throw new RuntimeException("'" + pattern
  199.27 +                  + "' missing from stdout/stderr: [" + stdout + stderr
  199.28 +                  + "]\n");
  199.29 +      }
  199.30 +  }
  199.31 +
  199.32 +  /**
  199.33 +   * Verify that the stdout contents of output buffer matches the
  199.34 +   * pattern
  199.35 +   *
  199.36 +   * @param pattern
  199.37 +   * @throws RuntimeException If the pattern was not found
  199.38 +   */
  199.39 +  public void stdoutShouldMatch(String pattern) {
  199.40 +      Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
  199.41 +      if (!matcher.find()) {
  199.42 +          throw new RuntimeException("'" + pattern
  199.43 +                  + "' missing from stdout: [" + stdout + "]\n");
  199.44 +      }
  199.45 +  }
  199.46 +
  199.47 +  /**
  199.48 +   * Verify that the stderr contents of output buffer matches the
  199.49 +   * pattern
  199.50 +   *
  199.51 +   * @param pattern
  199.52 +   * @throws RuntimeException If the pattern was not found
  199.53 +   */
  199.54 +  public void stderrShouldMatch(String pattern) {
  199.55 +      Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
  199.56 +      if (!matcher.find()) {
  199.57 +          throw new RuntimeException("'" + pattern
  199.58 +                  + "' missing from stderr: [" + stderr + "]\n");
  199.59 +      }
  199.60 +  }
  199.61 +
  199.62 +  /**
  199.63 +   * Verify that the stdout and stderr contents of output buffer does not
  199.64 +   * match the pattern
  199.65 +   *
  199.66 +   * @param pattern
  199.67 +   * @throws RuntimeException If the pattern was found
  199.68 +   */
  199.69 +  public void shouldNotMatch(String pattern) {
  199.70 +      Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
  199.71 +      if (matcher.find()) {
  199.72 +          throw new RuntimeException("'" + pattern
  199.73 +                  + "' found in stdout: [" + stdout + "]\n");
  199.74 +      }
  199.75 +      matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
  199.76 +      if (matcher.find()) {
  199.77 +          throw new RuntimeException("'" + pattern
  199.78 +                  + "' found in stderr: [" + stderr + "]\n");
  199.79 +      }
  199.80 +  }
  199.81 +
  199.82 +  /**
  199.83 +   * Verify that the stdout contents of output buffer does not match the
  199.84 +   * pattern
  199.85 +   *
  199.86 +   * @param pattern
  199.87 +   * @throws RuntimeException If the pattern was found
  199.88 +   */
  199.89 +  public void stdoutShouldNotMatch(String pattern) {
  199.90 +      Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stdout);
  199.91 +      if (matcher.find()) {
  199.92 +          throw new RuntimeException("'" + pattern
  199.93 +                  + "' found in stdout: [" + stdout + "]\n");
  199.94 +      }
  199.95 +  }
  199.96 +
  199.97 +  /**
  199.98 +   * Verify that the stderr contents of output buffer does not match the
  199.99 +   * pattern
 199.100 +   *
 199.101 +   * @param pattern
 199.102 +   * @throws RuntimeException If the pattern was found
 199.103 +   */
 199.104 +  public void stderrShouldNotMatch(String pattern) {
 199.105 +      Matcher matcher = Pattern.compile(pattern, Pattern.MULTILINE).matcher(stderr);
 199.106 +      if (matcher.find()) {
 199.107 +          throw new RuntimeException("'" + pattern
 199.108 +                  + "' found in stderr: [" + stderr + "]\n");
 199.109 +      }
 199.110 +  }
 199.111 +
 199.112 +  /**
 199.113     * Verifiy the exit value of the process
 199.114     *
 199.115     * @param expectedExitValue Expected exit value from process
 199.116     * @throws RuntimeException If the exit value from the process did not match the expected value
 199.117     */
 199.118    public void shouldHaveExitValue(int expectedExitValue) {
 199.119 -    if (getExitValue() != expectedExitValue) {
 199.120 -      throw new RuntimeException("Exit value " + getExitValue() + " , expected to get " + expectedExitValue);
 199.121 -    }
 199.122 +      if (getExitValue() != expectedExitValue) {
 199.123 +          throw new RuntimeException("Exit value " + getExitValue() + " , expected to get " + expectedExitValue);
 199.124 +      }
 199.125    }
 199.126  
 199.127    /**
   200.1 --- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Apr 24 20:55:28 2013 -0400
   200.2 +++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java	Wed Apr 24 21:11:02 2013 -0400
   200.3 @@ -1,5 +1,5 @@
   200.4  /*
   200.5 - * Copyright (c) 2012, 2013 Oracle and/or its affiliates. All rights reserved.
   200.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   200.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   200.8   *
   200.9   * This code is free software; you can redistribute it and/or modify it
  200.10 @@ -24,7 +24,7 @@
  200.11  
  200.12  package sun.hotspot;
  200.13  
  200.14 -import java.lang.reflect.Method;
  200.15 +import java.lang.reflect.Executable;
  200.16  import java.security.BasicPermission;
  200.17  import sun.hotspot.parser.DiagnosticCommand;
  200.18  
  200.19 @@ -80,22 +80,35 @@
  200.20    public native Object[]    parseCommandLine(String commandline, DiagnosticCommand[] args);
  200.21  
  200.22    // NMT
  200.23 -  public native boolean NMTAllocTest();
  200.24 -  public native boolean NMTFreeTestMemory();
  200.25 +  public native long NMTMalloc(long size);
  200.26 +  public native void NMTFree(long mem);
  200.27 +  public native long NMTReserveMemory(long size);
  200.28 +  public native void NMTCommitMemory(long addr, long size);
  200.29 +  public native void NMTUncommitMemory(long addr, long size);
  200.30 +  public native void NMTReleaseMemory(long addr, long size);
  200.31    public native boolean NMTWaitForDataMerge();
  200.32  
  200.33    // Compiler
  200.34    public native void    deoptimizeAll();
  200.35 -  public native boolean isMethodCompiled(Method method);
  200.36 -  public native boolean isMethodCompilable(Method method);
  200.37 -  public native boolean isMethodQueuedForCompilation(Method method);
  200.38 -  public native int     deoptimizeMethod(Method method);
  200.39 -  public native void    makeMethodNotCompilable(Method method);
  200.40 -  public native int     getMethodCompilationLevel(Method method);
  200.41 -  public native boolean setDontInlineMethod(Method method, boolean value);
  200.42 +  public native boolean isMethodCompiled(Executable method);
  200.43 +  public boolean isMethodCompilable(Executable method) {
  200.44 +      return isMethodCompilable(method, -1 /*any*/);
  200.45 +  }
  200.46 +  public native boolean isMethodCompilable(Executable method, int compLevel);
  200.47 +  public native boolean isMethodQueuedForCompilation(Executable method);
  200.48 +  public native int     deoptimizeMethod(Executable method);
  200.49 +  public void makeMethodNotCompilable(Executable method) {
  200.50 +      makeMethodNotCompilable(method, -1 /*any*/);
  200.51 +  }
  200.52 +  public native void    makeMethodNotCompilable(Executable method, int compLevel);
  200.53 +  public native int     getMethodCompilationLevel(Executable method);
  200.54 +  public native boolean testSetDontInlineMethod(Executable method, boolean value);
  200.55    public native int     getCompileQueuesSize();
  200.56 +  public native boolean testSetForceInlineMethod(Executable method, boolean value);
  200.57 +  public native boolean enqueueMethodForCompilation(Executable method, int compLevel);
  200.58 +  public native void    clearMethodState(Executable method);
  200.59  
  200.60 -  //Intered strings
  200.61 +  // Intered strings
  200.62    public native boolean isInStringTable(String str);
  200.63  
  200.64    // force Full GC
   201.1 --- a/test/testlibrary/whitebox/sun/hotspot/parser/DiagnosticCommand.java	Wed Apr 24 20:55:28 2013 -0400
   201.2 +++ b/test/testlibrary/whitebox/sun/hotspot/parser/DiagnosticCommand.java	Wed Apr 24 21:11:02 2013 -0400
   201.3 @@ -1,5 +1,5 @@
   201.4  /*
   201.5 - * Copyright (c) 2012, 2013 Oracle and/or its affiliates. All rights reserved.
   201.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   201.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   201.8   *
   201.9   * This code is free software; you can redistribute it and/or modify it

mercurial