Merge

Wed, 04 May 2011 23:10:58 -0400

author
dholmes
date
Wed, 04 May 2011 23:10:58 -0400
changeset 2859
f49c31acee88
parent 2858
6c978a541c03
parent 2846
6dce0126f44e
child 2861
52df0980eb50

Merge

make/linux/makefiles/cscope.make file | annotate | diff | comparison | revisions
make/solaris/makefiles/cscope.make file | annotate | diff | comparison | revisions
     1.1 --- a/.hgignore	Wed May 04 19:16:49 2011 -0400
     1.2 +++ b/.hgignore	Wed May 04 23:10:58 2011 -0400
     1.3 @@ -5,3 +5,4 @@
     1.4  ^src/share/tools/IdealGraphVisualizer/[a-zA-Z0-9]*/build/
     1.5  ^src/share/tools/IdealGraphVisualizer/build/
     1.6  ^src/share/tools/IdealGraphVisualizer/dist/
     1.7 +^.hgtip
     2.1 --- a/.hgtags	Wed May 04 19:16:49 2011 -0400
     2.2 +++ b/.hgtags	Wed May 04 23:10:58 2011 -0400
     2.3 @@ -162,3 +162,7 @@
     2.4  bd586e392d93b7ed7a1636dcc8da2b6a4203a102 hs21-b06
     2.5  2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f jdk7-b137
     2.6  2dbcb4a4d8dace5fe78ceb563b134f1fb296cd8f hs21-b07
     2.7 +0930dc920c185afbf40fed9a655290b8e5b16783 jdk7-b138
     2.8 +0930dc920c185afbf40fed9a655290b8e5b16783 hs21-b08
     2.9 +611e19a16519d6fb5deea9ab565336e6e6ee475d jdk7-b139
    2.10 +611e19a16519d6fb5deea9ab565336e6e6ee475d hs21-b09
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/make/cscope.make	Wed May 04 23:10:58 2011 -0400
     3.3 @@ -0,0 +1,141 @@
     3.4 +#
     3.5 +# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
     3.6 +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     3.7 +#
     3.8 +# This code is free software; you can redistribute it and/or modify it
     3.9 +# under the terms of the GNU General Public License version 2 only, as
    3.10 +# published by the Free Software Foundation.
    3.11 +#
    3.12 +# This code is distributed in the hope that it will be useful, but WITHOUT
    3.13 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    3.14 +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    3.15 +# version 2 for more details (a copy is included in the LICENSE file that
    3.16 +# accompanied this code).
    3.17 +#
    3.18 +# You should have received a copy of the GNU General Public License version
    3.19 +# 2 along with this work; if not, write to the Free Software Foundation,
    3.20 +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    3.21 +#
    3.22 +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    3.23 +# or visit www.oracle.com if you need additional information or have any
    3.24 +# questions.
    3.25 +#  
    3.26 +#
    3.27 +
    3.28 +# The cscope.out file is generated in the current directory.  The old cscope.out
    3.29 +# file is *not* removed because cscope is smart enough to only build what has
    3.30 +# changed.  cscope can be confused if files are renamed or removed, so it may be
    3.31 +# necessary to remove cscope.out (gmake cscope.clean) if a lot of reorganization
    3.32 +# has occurred.
    3.33 +
    3.34 +include $(GAMMADIR)/make/scm.make
    3.35 +
    3.36 +RM	= rm -f
    3.37 +HG	= hg
    3.38 +CS_TOP	= $(GAMMADIR)
    3.39 +
    3.40 +CSDIRS	= $(CS_TOP)/src $(CS_TOP)/make
    3.41 +CSINCS	= $(CSDIRS:%=-I%)
    3.42 +
    3.43 +CSCOPE		= cscope
    3.44 +CSCOPE_OUT	= cscope.out
    3.45 +CSCOPE_FLAGS	= -b
    3.46 +
    3.47 +# Allow .java files to be added from the environment (CSCLASSES=yes).
    3.48 +ifdef	CSCLASSES
    3.49 +ADDCLASSES=	-o -name '*.java'
    3.50 +endif
    3.51 +
    3.52 +# Adding CClassHeaders also pushes the file count of a full workspace up about
    3.53 +# 200 files (these files also don't exist in a new workspace, and thus will
    3.54 +# cause the recreation of the database as they get created, which might seem
    3.55 +# a little confusing).  Thus allow these files to be added from the environment
    3.56 +# (CSHEADERS=yes).
    3.57 +ifndef	CSHEADERS
    3.58 +RMCCHEADERS=	-o -name CClassHeaders
    3.59 +endif
    3.60 +
    3.61 +# Ignore build products.
    3.62 +CS_PRUNE_GENERATED	= -o -name '${OSNAME}_*_core' -o \
    3.63 +			     -name '${OSNAME}_*_compiler?'
    3.64 +
    3.65 +# O/S-specific files for all systems are included by default.  Set CS_OS to a
    3.66 +# space-separated list of identifiers to include only those systems.
    3.67 +ifdef	CS_OS
    3.68 +CS_PRUNE_OS	= $(patsubst %,-o -name '*%*',\
    3.69 +		    $(filter-out ${CS_OS},linux macos solaris windows))
    3.70 +endif
    3.71 +
    3.72 +# CPU-specific files for all processors are included by default.  Set CS_CPU 
    3.73 +# space-separated list identifiers to include only those CPUs.
    3.74 +ifdef	CS_CPU
    3.75 +CS_PRUNE_CPU	= $(patsubst %,-o -name '*%*',\
    3.76 +		    $(filter-out ${CS_CPU},arm ppc sparc x86 zero))
    3.77 +endif
    3.78 +
    3.79 +# What files should we include?  A simple rule might be just those files under
    3.80 +# SCCS control, however this would miss files we create like the opcodes and
    3.81 +# CClassHeaders.  The following attempts to find everything that is *useful*.
    3.82 +# (.del files are created by sccsrm, demo directories contain many .java files
    3.83 +# that probably aren't useful for development, and the pkgarchive may contain
    3.84 +# duplicates of files within the source hierarchy).
    3.85 +
    3.86 +# Directories to exclude.
    3.87 +CS_PRUNE_STD	= $(SCM_DIRS) \
    3.88 +		  -o -name '.del-*' \
    3.89 +		  -o -name '*demo' \
    3.90 +		  -o -name pkgarchive
    3.91 +
    3.92 +# Placeholder for user-defined excludes.
    3.93 +CS_PRUNE_EX	=
    3.94 +
    3.95 +CS_PRUNE	= $(CS_PRUNE_STD) \
    3.96 +		  $(CS_PRUNE_OS) \
    3.97 +		  $(CS_PRUNE_CPU) \
    3.98 +		  $(CS_PRUNE_GENERATED) \
    3.99 +		  $(CS_PRUNE_EX) \
   3.100 +		  $(RMCCHEADERS)
   3.101 +
   3.102 +# File names to include.
   3.103 +CSFILENAMES	= -name '*.[ch]pp' \
   3.104 +		  -o -name '*.[Ccshlxy]' \
   3.105 +		  $(CS_ADD_GENERATED) \
   3.106 +		  -o -name '*.d' \
   3.107 +		  -o -name '*.il' \
   3.108 +		  -o -name '*.cc' \
   3.109 +		  -o -name '*[Mm]akefile*' \
   3.110 +		  -o -name '*.gmk' \
   3.111 +		  -o -name '*.make' \
   3.112 +		  -o -name '*.ad' \
   3.113 +		  $(ADDCLASSES)
   3.114 +
   3.115 +.PHONY:		cscope cscope.clean cscope.scratch TAGS.clean FORCE
   3.116 +.PRECIOUS:	cscope.out
   3.117 +
   3.118 +cscope $(CSCOPE_OUT): cscope.files FORCE
   3.119 +	$(CSCOPE) -f $(CSCOPE_OUT) $(CSCOPE_FLAGS)
   3.120 +
   3.121 +cscope.clean:
   3.122 +	$(QUIETLY) $(RM) $(CSCOPE_OUT) cscope.files
   3.123 +
   3.124 +cscope.scratch:  cscope.clean cscope
   3.125 +
   3.126 +# The raw list is reordered so cscope displays the most relevant files first.
   3.127 +cscope.files:
   3.128 +	$(QUIETLY)						\
   3.129 +	raw=cscope.$$$$;					\
   3.130 +	find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o	\
   3.131 +	    -type f \( $(CSFILENAMES) \) -print > $$raw;	\
   3.132 +	{							\
   3.133 +	echo "$(CSINCS)";					\
   3.134 +	egrep -v "\.java|/make/" $$raw;				\
   3.135 +	fgrep ".java" $$raw;					\
   3.136 +	fgrep "/make/" $$raw;					\
   3.137 +	} > $@;							\
   3.138 +	rm -f $$raw
   3.139 +
   3.140 +TAGS:  cscope.files FORCE
   3.141 +	egrep -v '^-|^$$' $< | etags --members -
   3.142 +
   3.143 +TAGS.clean:
   3.144 +	$(RM) TAGS
     4.1 --- a/make/hotspot_version	Wed May 04 19:16:49 2011 -0400
     4.2 +++ b/make/hotspot_version	Wed May 04 23:10:58 2011 -0400
     4.3 @@ -35,7 +35,7 @@
     4.4  
     4.5  HS_MAJOR_VER=21
     4.6  HS_MINOR_VER=0
     4.7 -HS_BUILD_NUMBER=08
     4.8 +HS_BUILD_NUMBER=11
     4.9  
    4.10  JDK_MAJOR_VER=1
    4.11  JDK_MINOR_VER=7
     5.1 --- a/make/linux/Makefile	Wed May 04 19:16:49 2011 -0400
     5.2 +++ b/make/linux/Makefile	Wed May 04 23:10:58 2011 -0400
     5.3 @@ -359,7 +359,7 @@
     5.4  
     5.5  clean:  clean_compiler2 clean_compiler1 clean_core clean_zero clean_shark clean_docs
     5.6  
     5.7 -include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make
     5.8 +include $(GAMMADIR)/make/cscope.make
     5.9  
    5.10  #-------------------------------------------------------------------------------
    5.11  
     6.1 --- a/make/linux/makefiles/cscope.make	Wed May 04 19:16:49 2011 -0400
     6.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     6.3 @@ -1,160 +0,0 @@
     6.4 -#
     6.5 -# Copyright (c) 2005, 2008, Oracle and/or its affiliates. All rights reserved.
     6.6 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.7 -#
     6.8 -# This code is free software; you can redistribute it and/or modify it
     6.9 -# under the terms of the GNU General Public License version 2 only, as
    6.10 -# published by the Free Software Foundation.
    6.11 -#
    6.12 -# This code is distributed in the hope that it will be useful, but WITHOUT
    6.13 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    6.14 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    6.15 -# version 2 for more details (a copy is included in the LICENSE file that
    6.16 -# accompanied this code).
    6.17 -#
    6.18 -# You should have received a copy of the GNU General Public License version
    6.19 -# 2 along with this work; if not, write to the Free Software Foundation,
    6.20 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    6.21 -#
    6.22 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    6.23 -# or visit www.oracle.com if you need additional information or have any
    6.24 -# questions.
    6.25 -#  
    6.26 -#
    6.27 -
    6.28 -#
    6.29 -# The cscope.out file is made in the current directory and spans the entire
    6.30 -# source tree.
    6.31 -#
    6.32 -# Things to note:
    6.33 -#	1. We use relative names for cscope.
    6.34 -#	2. We *don't* remove the old cscope.out file, because cscope is smart
    6.35 -#	   enough to only build what has changed.  It can be confused, however,
    6.36 -#	   if files are renamed or removed, so it may be necessary to manually
    6.37 -#	   remove cscope.out if a lot of reorganization has occurred.
    6.38 -#
    6.39 -
    6.40 -include $(GAMMADIR)/make/scm.make
    6.41 -
    6.42 -NAWK	= awk
    6.43 -RM	= rm -f
    6.44 -HG	= hg
    6.45 -CS_TOP	= ../..
    6.46 -
    6.47 -CSDIRS	= $(CS_TOP)/src $(CS_TOP)/build
    6.48 -CSINCS	= $(CSDIRS:%=-I%)
    6.49 -
    6.50 -CSCOPE		= cscope
    6.51 -CSCOPE_FLAGS	= -b
    6.52 -
    6.53 -# Allow .java files to be added from the environment (CSCLASSES=yes).
    6.54 -ifdef	CSCLASSES
    6.55 -ADDCLASSES=	-o -name '*.java'
    6.56 -endif
    6.57 -
    6.58 -# Adding CClassHeaders also pushes the file count of a full workspace up about
    6.59 -# 200 files (these files also don't exist in a new workspace, and thus will
    6.60 -# cause the recreation of the database as they get created, which might seem
    6.61 -# a little confusing).  Thus allow these files to be added from the environment
    6.62 -# (CSHEADERS=yes).
    6.63 -ifndef	CSHEADERS
    6.64 -RMCCHEADERS=	-o -name CClassHeaders
    6.65 -endif
    6.66 -
    6.67 -# Use CS_GENERATED=x to include auto-generated files in the build directories.
    6.68 -ifdef	CS_GENERATED
    6.69 -CS_ADD_GENERATED	= -o -name '*.incl'
    6.70 -else
    6.71 -CS_PRUNE_GENERATED	= -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?'
    6.72 -endif
    6.73 -
    6.74 -# OS-specific files for other systems are excluded by default.  Use CS_OS=yes
    6.75 -# to include platform-specific files for other platforms.
    6.76 -ifndef	CS_OS
    6.77 -CS_OS		= linux macos solaris win32
    6.78 -CS_PRUNE_OS	= $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS}))
    6.79 -endif
    6.80 -
    6.81 -# Processor-specific files for other processors are excluded by default.  Use
    6.82 -# CS_CPU=x to include platform-specific files for other platforms.
    6.83 -ifndef	CS_CPU
    6.84 -CS_CPU		= i486 sparc amd64 ia64
    6.85 -CS_PRUNE_CPU	= $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU}))
    6.86 -endif
    6.87 -
    6.88 -# What files should we include?  A simple rule might be just those files under
    6.89 -# SCCS control, however this would miss files we create like the opcodes and
    6.90 -# CClassHeaders.  The following attempts to find everything that is *useful*.
    6.91 -# (.del files are created by sccsrm, demo directories contain many .java files
    6.92 -# that probably aren't useful for development, and the pkgarchive may contain
    6.93 -# duplicates of files within the source hierarchy).
    6.94 -
    6.95 -# Directories to exclude.
    6.96 -CS_PRUNE_STD	= $(SCM_DIRS) \
    6.97 -		  -o -name '.del-*' \
    6.98 -		  -o -name '*demo' \
    6.99 -		  -o -name pkgarchive
   6.100 -
   6.101 -CS_PRUNE	= $(CS_PRUNE_STD) \
   6.102 -		  $(CS_PRUNE_OS) \
   6.103 -		  $(CS_PRUNE_CPU) \
   6.104 -		  $(CS_PRUNE_GENERATED) \
   6.105 -		  $(RMCCHEADERS)
   6.106 -
   6.107 -# File names to include.
   6.108 -CSFILENAMES	= -name '*.[ch]pp' \
   6.109 -		  -o -name '*.[Ccshlxy]' \
   6.110 -		  $(CS_ADD_GENERATED) \
   6.111 -		  -o -name '*.il' \
   6.112 -		  -o -name '*.cc' \
   6.113 -		  -o -name '*[Mm]akefile*' \
   6.114 -		  -o -name '*.gmk' \
   6.115 -		  -o -name '*.make' \
   6.116 -		  -o -name '*.ad' \
   6.117 -		  $(ADDCLASSES)
   6.118 -
   6.119 -.PRECIOUS:	cscope.out
   6.120 -
   6.121 -cscope cscope.out: cscope.files FORCE
   6.122 -	$(CSCOPE) $(CSCOPE_FLAGS)
   6.123 -
   6.124 -# The .raw file is reordered here in an attempt to make cscope display the most
   6.125 -# relevant files first.
   6.126 -cscope.files: .cscope.files.raw
   6.127 -	echo "$(CSINCS)" > $@
   6.128 -	-egrep -v "\.java|\/make\/"	$< >> $@
   6.129 -	-fgrep ".java"			$< >> $@
   6.130 -	-fgrep "/make/"		$< >> $@
   6.131 -
   6.132 -.cscope.files.raw:  .nametable.files
   6.133 -	-find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \
   6.134 -	    -type f \( $(CSFILENAMES) \) -print > $@
   6.135 -
   6.136 -cscope.clean:  nametable.clean
   6.137 -	-$(RM) cscope.out cscope.files .cscope.files.raw
   6.138 -
   6.139 -TAGS:  cscope.files FORCE
   6.140 -	egrep -v '^-|^$$' $< | etags --members -
   6.141 -
   6.142 -TAGS.clean:  nametable.clean
   6.143 -	-$(RM) TAGS
   6.144 -
   6.145 -# .nametable.files and .nametable.files.tmp are used to determine if any files
   6.146 -# were added to/deleted from/renamed in the workspace.  If not, then there's
   6.147 -# normally no need to rebuild the cscope database. To force a rebuild of
   6.148 -# the cscope database: gmake nametable.clean.
   6.149 -.nametable.files:  .nametable.files.tmp
   6.150 -	( cmp -s $@ $< ) || ( cp $< $@ )
   6.151 -	-$(RM) $<
   6.152 -
   6.153 -# `hg status' is slightly faster than `hg fstatus'. Both are
   6.154 -# quite a bit slower on an NFS mounted file system, so this is
   6.155 -# really geared towards repos on local file systems.
   6.156 -.nametable.files.tmp:
   6.157 -	-$(HG) fstatus -acmn > $@
   6.158 -nametable.clean:
   6.159 -	-$(RM) .nametable.files .nametable.files.tmp
   6.160 -
   6.161 -FORCE:
   6.162 -
   6.163 -.PHONY:		cscope cscope.clean TAGS.clean nametable.clean FORCE
     7.1 --- a/make/solaris/Makefile	Wed May 04 19:16:49 2011 -0400
     7.2 +++ b/make/solaris/Makefile	Wed May 04 23:10:58 2011 -0400
     7.3 @@ -296,7 +296,7 @@
     7.4  
     7.5  clean:  clean_compiler2 clean_compiler1 clean_core clean_docs clean_kernel
     7.6  
     7.7 -include $(GAMMADIR)/make/$(OSNAME)/makefiles/cscope.make
     7.8 +include $(GAMMADIR)/make/cscope.make
     7.9  
    7.10  #-------------------------------------------------------------------------------
    7.11  
     8.1 --- a/make/solaris/makefiles/cscope.make	Wed May 04 19:16:49 2011 -0400
     8.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
     8.3 @@ -1,162 +0,0 @@
     8.4 -#
     8.5 -# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
     8.6 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.7 -#
     8.8 -# This code is free software; you can redistribute it and/or modify it
     8.9 -# under the terms of the GNU General Public License version 2 only, as
    8.10 -# published by the Free Software Foundation.
    8.11 -#
    8.12 -# This code is distributed in the hope that it will be useful, but WITHOUT
    8.13 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    8.14 -# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    8.15 -# version 2 for more details (a copy is included in the LICENSE file that
    8.16 -# accompanied this code).
    8.17 -#
    8.18 -# You should have received a copy of the GNU General Public License version
    8.19 -# 2 along with this work; if not, write to the Free Software Foundation,
    8.20 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    8.21 -#
    8.22 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    8.23 -# or visit www.oracle.com if you need additional information or have any
    8.24 -# questions.
    8.25 -#  
    8.26 -#
    8.27 -
    8.28 -#
    8.29 -# The cscope.out file is made in the current directory and spans the entire
    8.30 -# source tree.
    8.31 -#
    8.32 -# Things to note:
    8.33 -#	1. We use relative names for cscope.
    8.34 -#	2. We *don't* remove the old cscope.out file, because cscope is smart
    8.35 -#	   enough to only build what has changed.  It can be confused, however,
    8.36 -#	   if files are renamed or removed, so it may be necessary to manually
    8.37 -#	   remove cscope.out if a lot of reorganization has occurred.
    8.38 -#
    8.39 -
    8.40 -include $(GAMMADIR)/make/scm.make
    8.41 -
    8.42 -NAWK	= /usr/xpg4/bin/awk
    8.43 -RM	= rm -f
    8.44 -HG	= hg
    8.45 -CS_TOP	= ../..
    8.46 -
    8.47 -CSDIRS	= $(CS_TOP)/src $(CS_TOP)/make
    8.48 -CSINCS	= $(CSDIRS:%=-I%)
    8.49 -
    8.50 -CSCOPE		= cscope
    8.51 -CSCOPE_FLAGS	= -b
    8.52 -
    8.53 -# Allow .java files to be added from the environment (CSCLASSES=yes).
    8.54 -ifdef	CSCLASSES
    8.55 -ADDCLASSES=	-o -name '*.java'
    8.56 -endif
    8.57 -
    8.58 -# Adding CClassHeaders also pushes the file count of a full workspace up about
    8.59 -# 200 files (these files also don't exist in a new workspace, and thus will
    8.60 -# cause the recreation of the database as they get created, which might seem
    8.61 -# a little confusing).  Thus allow these files to be added from the environment
    8.62 -# (CSHEADERS=yes).
    8.63 -ifndef	CSHEADERS
    8.64 -RMCCHEADERS=	-o -name CClassHeaders
    8.65 -endif
    8.66 -
    8.67 -# Use CS_GENERATED=x to include auto-generated files in the make directories.
    8.68 -ifdef	CS_GENERATED
    8.69 -CS_ADD_GENERATED	= -o -name '*.incl'
    8.70 -else
    8.71 -CS_PRUNE_GENERATED	= -o -name '${OS}_*_core' -o -name '${OS}_*_compiler?'
    8.72 -endif
    8.73 -
    8.74 -# OS-specific files for other systems are excluded by default.  Use CS_OS=yes
    8.75 -# to include platform-specific files for other platforms.
    8.76 -ifndef	CS_OS
    8.77 -CS_OS		= linux macos solaris win32
    8.78 -CS_PRUNE_OS	= $(patsubst %,-o -name '*%*',$(filter-out ${OS},${CS_OS}))
    8.79 -endif
    8.80 -
    8.81 -# Processor-specific files for other processors are excluded by default.  Use
    8.82 -# CS_CPU=x to include platform-specific files for other platforms.
    8.83 -ifndef	CS_CPU
    8.84 -CS_CPU		= i486 sparc amd64 ia64
    8.85 -CS_PRUNE_CPU	= $(patsubst %,-o -name '*%*',$(filter-out ${SRCARCH},${CS_CPU}))
    8.86 -endif
    8.87 -
    8.88 -# What files should we include?  A simple rule might be just those files under
    8.89 -# SCCS control, however this would miss files we create like the opcodes and
    8.90 -# CClassHeaders.  The following attempts to find everything that is *useful*.
    8.91 -# (.del files are created by sccsrm, demo directories contain many .java files
    8.92 -# that probably aren't useful for development, and the pkgarchive may contain
    8.93 -# duplicates of files within the source hierarchy).
    8.94 -
    8.95 -# Directories to exclude.
    8.96 -CS_PRUNE_STD	= $(SCM_DIRS) \
    8.97 -		  -o -name '.del-*' \
    8.98 -		  -o -name '*demo' \
    8.99 -		  -o -name pkgarchive
   8.100 -
   8.101 -CS_PRUNE	= $(CS_PRUNE_STD) \
   8.102 -		  $(CS_PRUNE_OS) \
   8.103 -		  $(CS_PRUNE_CPU) \
   8.104 -		  $(CS_PRUNE_GENERATED) \
   8.105 -		  $(RMCCHEADERS)
   8.106 -
   8.107 -# File names to include.
   8.108 -CSFILENAMES	= -name '*.[ch]pp' \
   8.109 -		  -o -name '*.[Ccshlxy]' \
   8.110 -		  $(CS_ADD_GENERATED) \
   8.111 -		  -o -name '*.d' \
   8.112 -		  -o -name '*.il' \
   8.113 -		  -o -name '*.cc' \
   8.114 -		  -o -name '*[Mm]akefile*' \
   8.115 -		  -o -name '*.gmk' \
   8.116 -		  -o -name '*.make' \
   8.117 -		  -o -name '*.ad' \
   8.118 -		  $(ADDCLASSES)
   8.119 -
   8.120 -.PRECIOUS:	cscope.out
   8.121 -
   8.122 -cscope cscope.out: cscope.files FORCE
   8.123 -	$(CSCOPE) $(CSCOPE_FLAGS)
   8.124 -
   8.125 -# The .raw file is reordered here in an attempt to make cscope display the most
   8.126 -# relevant files first.
   8.127 -cscope.files: .cscope.files.raw
   8.128 -	echo "$(CSINCS)" > $@
   8.129 -	-egrep -v "\.java|\/make\/"	$< >> $@
   8.130 -	-fgrep ".java"			$< >> $@
   8.131 -	-fgrep "/make/"		$< >> $@
   8.132 -
   8.133 -.cscope.files.raw:  .nametable.files
   8.134 -	-find $(CSDIRS) -type d \( $(CS_PRUNE) \) -prune -o \
   8.135 -	    -type f \( $(CSFILENAMES) \) -print > $@
   8.136 -
   8.137 -cscope.clean:  nametable.clean
   8.138 -	-$(RM) cscope.out cscope.files .cscope.files.raw
   8.139 -
   8.140 -TAGS:  cscope.files FORCE
   8.141 -	egrep -v '^-|^$$' $< | etags --members -
   8.142 -
   8.143 -TAGS.clean:  nametable.clean
   8.144 -	-$(RM) TAGS
   8.145 -
   8.146 -# .nametable.files and .nametable.files.tmp are used to determine if any files
   8.147 -# were added to/deleted from/renamed in the workspace.  If not, then there's
   8.148 -# normally no need to rebuild the cscope database. To force a rebuild of
   8.149 -# the cscope database: gmake nametable.clean.
   8.150 -.nametable.files:  .nametable.files.tmp
   8.151 -	( cmp -s $@ $< ) || ( cp $< $@ )
   8.152 -	-$(RM) $<
   8.153 -
   8.154 -# `hg status' is slightly faster than `hg fstatus'. Both are
   8.155 -# quite a bit slower on an NFS mounted file system, so this is
   8.156 -# really geared towards repos on local file systems.
   8.157 -.nametable.files.tmp:
   8.158 -	-$(HG) fstatus -acmn > $@
   8.159 -
   8.160 -nametable.clean:
   8.161 -	-$(RM) .nametable.files .nametable.files.tmp
   8.162 -
   8.163 -FORCE:
   8.164 -
   8.165 -.PHONY:		cscope cscope.clean TAGS.clean nametable.clean FORCE
     9.1 --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed May 04 19:16:49 2011 -0400
     9.2 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Wed May 04 23:10:58 2011 -0400
     9.3 @@ -486,7 +486,7 @@
     9.4        if (ek == _invokespecial_mh) {
     9.5          // Must load & check the first argument before entering the target method.
     9.6          __ load_method_handle_vmslots(O0_argslot, G3_method_handle, O1_scratch);
     9.7 -        __ ld_ptr(__ argument_address(O0_argslot), G3_method_handle);
     9.8 +        __ ld_ptr(__ argument_address(O0_argslot, -1), G3_method_handle);
     9.9          __ null_check(G3_method_handle);
    9.10          __ verify_oop(G3_method_handle);
    9.11        }
    10.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed May 04 19:16:49 2011 -0400
    10.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed May 04 23:10:58 2011 -0400
    10.3 @@ -3293,8 +3293,6 @@
    10.4                               /*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
    10.5    __ mov(SP, O5_savedSP);  // record SP that we wanted the callee to restore
    10.6  
    10.7 -  __ verify_oop(G5_callsite);
    10.8 -
    10.9    // profile this call
   10.10    __ profile_call(O4);
   10.11  
   10.12 @@ -3307,8 +3305,10 @@
   10.13    __ sll(Rret, LogBytesPerWord, Rret);
   10.14    __ ld_ptr(Rtemp, Rret, Rret);  // get return address
   10.15  
   10.16 +  __ verify_oop(G5_callsite);
   10.17    __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
   10.18    __ null_check(G3_method_handle);
   10.19 +  __ verify_oop(G3_method_handle);
   10.20  
   10.21    // Adjust Rret first so Llast_SP can be same as Rret
   10.22    __ add(Rret, -frame::pc_return_offset, O7);
    11.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed May 04 19:16:49 2011 -0400
    11.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed May 04 23:10:58 2011 -0400
    11.3 @@ -422,7 +422,7 @@
    11.4  
    11.5    Label L_done, L_throw_exception;
    11.6    const Register con_klass_temp = rcx;  // same as Rcache
    11.7 -  __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
    11.8 +  __ load_klass(con_klass_temp, rax);
    11.9    __ cmpptr(con_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
   11.10    __ jcc(Assembler::notEqual, L_done);
   11.11    __ cmpl(Address(rax, arrayOopDesc::length_offset_in_bytes()), 0);
   11.12 @@ -432,7 +432,7 @@
   11.13  
   11.14    // Load the exception from the system-array which wraps it:
   11.15    __ bind(L_throw_exception);
   11.16 -  __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
   11.17 +  __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
   11.18    __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
   11.19  
   11.20    __ bind(L_done);
   11.21 @@ -946,9 +946,9 @@
   11.22    __ jcc(Assembler::zero, is_null);
   11.23  
   11.24    // Move subklass into EBX
   11.25 -  __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
   11.26 +  __ load_klass(rbx, rax);
   11.27    // Move superklass into EAX
   11.28 -  __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
   11.29 +  __ load_klass(rax, rdx);
   11.30    __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
   11.31    // Compress array+index*wordSize+12 into a single register.  Frees ECX.
   11.32    __ lea(rdx, element_address);
   11.33 @@ -2001,7 +2001,7 @@
   11.34    if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
   11.35      assert(state == vtos, "only valid state");
   11.36      __ movptr(rax, aaddress(0));
   11.37 -    __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
   11.38 +    __ load_klass(rdi, rax);
   11.39      __ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
   11.40      __ testl(rdi, JVM_ACC_HAS_FINALIZER);
   11.41      Label skip_register_finalizer;
   11.42 @@ -2948,7 +2948,7 @@
   11.43    // get receiver klass
   11.44    __ null_check(recv, oopDesc::klass_offset_in_bytes());
   11.45    // Keep recv in rcx for callee expects it there
   11.46 -  __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
   11.47 +  __ load_klass(rax, recv);
   11.48    __ verify_oop(rax);
   11.49  
   11.50    // profile this call
   11.51 @@ -3028,7 +3028,7 @@
   11.52  
   11.53    // Get receiver klass into rdx - also a null check
   11.54    __ restore_locals();  // restore rdi
   11.55 -  __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
   11.56 +  __ load_klass(rdx, rcx);
   11.57    __ verify_oop(rdx);
   11.58  
   11.59    // profile this call
   11.60 @@ -3083,6 +3083,7 @@
   11.61  
   11.62  void TemplateTable::invokedynamic(int byte_no) {
   11.63    transition(vtos, vtos);
   11.64 +  assert(byte_no == f1_oop, "use this argument");
   11.65  
   11.66    if (!EnableInvokeDynamic) {
   11.67      // We should not encounter this bytecode if !EnableInvokeDynamic.
   11.68 @@ -3095,7 +3096,6 @@
   11.69      return;
   11.70    }
   11.71  
   11.72 -  assert(byte_no == f1_oop, "use this argument");
   11.73    prepare_invoke(rax, rbx, byte_no);
   11.74  
   11.75    // rax: CallSite object (f1)
   11.76 @@ -3106,14 +3106,14 @@
   11.77    Register rax_callsite      = rax;
   11.78    Register rcx_method_handle = rcx;
   11.79  
   11.80 -  if (ProfileInterpreter) {
   11.81 -    // %%% should make a type profile for any invokedynamic that takes a ref argument
   11.82 -    // profile this call
   11.83 -    __ profile_call(rsi);
   11.84 -  }
   11.85 -
   11.86 -  __ movptr(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
   11.87 +  // %%% should make a type profile for any invokedynamic that takes a ref argument
   11.88 +  // profile this call
   11.89 +  __ profile_call(rsi);
   11.90 +
   11.91 +  __ verify_oop(rax_callsite);
   11.92 +  __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
   11.93    __ null_check(rcx_method_handle);
   11.94 +  __ verify_oop(rcx_method_handle);
   11.95    __ prepare_to_jump_from_interpreted();
   11.96    __ jump_to_method_handle_entry(rcx_method_handle, rdx);
   11.97  }
   11.98 @@ -3258,7 +3258,7 @@
   11.99                  (int32_t)markOopDesc::prototype()); // header
  11.100        __ pop(rcx);   // get saved klass back in the register.
  11.101      }
  11.102 -    __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx);  // klass
  11.103 +    __ store_klass(rax, rcx);  // klass
  11.104  
  11.105      {
  11.106        SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
  11.107 @@ -3333,7 +3333,7 @@
  11.108    __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
  11.109  
  11.110    __ bind(resolved);
  11.111 -  __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
  11.112 +  __ load_klass(rbx, rdx);
  11.113  
  11.114    // Generate subtype check.  Blows ECX.  Resets EDI.  Object in EDX.
  11.115    // Superklass in EAX.  Subklass in EBX.
  11.116 @@ -3376,12 +3376,12 @@
  11.117    __ push(atos);
  11.118    call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
  11.119    __ pop_ptr(rdx);
  11.120 -  __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
  11.121 +  __ load_klass(rdx, rdx);
  11.122    __ jmp(resolved);
  11.123  
  11.124    // Get superklass in EAX and subklass in EDX
  11.125    __ bind(quicked);
  11.126 -  __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
  11.127 +  __ load_klass(rdx, rax);
  11.128    __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
  11.129  
  11.130    __ bind(resolved);
    12.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed May 04 19:16:49 2011 -0400
    12.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed May 04 23:10:58 2011 -0400
    12.3 @@ -436,7 +436,7 @@
    12.4    Label L_done, L_throw_exception;
    12.5    const Register con_klass_temp = rcx;  // same as cache
    12.6    const Register array_klass_temp = rdx;  // same as index
    12.7 -  __ movptr(con_klass_temp, Address(rax, oopDesc::klass_offset_in_bytes()));
    12.8 +  __ load_klass(con_klass_temp, rax);
    12.9    __ lea(array_klass_temp, ExternalAddress((address)Universe::systemObjArrayKlassObj_addr()));
   12.10    __ cmpptr(con_klass_temp, Address(array_klass_temp, 0));
   12.11    __ jcc(Assembler::notEqual, L_done);
   12.12 @@ -447,7 +447,7 @@
   12.13  
   12.14    // Load the exception from the system-array which wraps it:
   12.15    __ bind(L_throw_exception);
   12.16 -  __ movptr(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
   12.17 +  __ load_heap_oop(rax, Address(rax, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
   12.18    __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
   12.19  
   12.20    __ bind(L_done);
   12.21 @@ -3137,7 +3137,6 @@
   12.22      return;
   12.23    }
   12.24  
   12.25 -  assert(byte_no == f1_oop, "use this argument");
   12.26    prepare_invoke(rax, rbx, byte_no);
   12.27  
   12.28    // rax: CallSite object (f1)
   12.29 @@ -3148,14 +3147,14 @@
   12.30    Register rax_callsite      = rax;
   12.31    Register rcx_method_handle = rcx;
   12.32  
   12.33 -  if (ProfileInterpreter) {
   12.34 -    // %%% should make a type profile for any invokedynamic that takes a ref argument
   12.35 -    // profile this call
   12.36 -    __ profile_call(r13);
   12.37 -  }
   12.38 -
   12.39 -  __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rcx)));
   12.40 +  // %%% should make a type profile for any invokedynamic that takes a ref argument
   12.41 +  // profile this call
   12.42 +  __ profile_call(r13);
   12.43 +
   12.44 +  __ verify_oop(rax_callsite);
   12.45 +  __ load_heap_oop(rcx_method_handle, Address(rax_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, rdx)));
   12.46    __ null_check(rcx_method_handle);
   12.47 +  __ verify_oop(rcx_method_handle);
   12.48    __ prepare_to_jump_from_interpreted();
   12.49    __ jump_to_method_handle_entry(rcx_method_handle, rdx);
   12.50  }
    13.1 --- a/src/cpu/x86/vm/vm_version_x86.cpp	Wed May 04 19:16:49 2011 -0400
    13.2 +++ b/src/cpu/x86/vm/vm_version_x86.cpp	Wed May 04 23:10:58 2011 -0400
    13.3 @@ -441,12 +441,25 @@
    13.4        }
    13.5      }
    13.6  
    13.7 -    // On family 21 processors default is no sw prefetch
    13.8 -    if ( cpu_family() == 21 ) {
    13.9 +    // some defaults for AMD family 15h
   13.10 +    if ( cpu_family() == 0x15 ) {
   13.11 +      // On family 15h processors default is no sw prefetch
   13.12        if (FLAG_IS_DEFAULT(AllocatePrefetchStyle)) {
   13.13          AllocatePrefetchStyle = 0;
   13.14        }
   13.15 +      // Also, if some other prefetch style is specified, default instruction type is PREFETCHW
   13.16 +      if (FLAG_IS_DEFAULT(AllocatePrefetchInstr)) {
   13.17 +        AllocatePrefetchInstr = 3;
   13.18 +      }
   13.19 +      // On family 15h processors use XMM and UnalignedLoadStores for Array Copy
   13.20 +      if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
   13.21 +        UseXMMForArrayCopy = true;
   13.22 +      }
   13.23 +      if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
   13.24 +        UseUnalignedLoadStores = true;
   13.25 +      }
   13.26      }
   13.27 +
   13.28    }
   13.29  
   13.30    if( is_intel() ) { // Intel cpus specific settings
    14.1 --- a/src/os/linux/vm/globals_linux.hpp	Wed May 04 19:16:49 2011 -0400
    14.2 +++ b/src/os/linux/vm/globals_linux.hpp	Wed May 04 23:10:58 2011 -0400
    14.3 @@ -29,13 +29,19 @@
    14.4  // Defines Linux specific flags. They are not available on other platforms.
    14.5  //
    14.6  #define RUNTIME_OS_FLAGS(develop, develop_pd, product, product_pd, diagnostic, notproduct) \
    14.7 -  product(bool, UseOprofile, false,                                 \
    14.8 -        "enable support for Oprofile profiler")                     \
    14.9 -                                                                    \
   14.10 -  product(bool, UseLinuxPosixThreadCPUClocks, true,                 \
   14.11 -          "enable fast Linux Posix clocks where available")
   14.12 -// NB: The default value of UseLinuxPosixThreadCPUClocks may be
   14.13 -// overridden in Arguments::parse_each_vm_init_arg.
   14.14 +  product(bool, UseOprofile, false,                                     \
   14.15 +        "enable support for Oprofile profiler")                         \
   14.16 +                                                                        \
   14.17 +  product(bool, UseLinuxPosixThreadCPUClocks, true,                     \
   14.18 +          "enable fast Linux Posix clocks where available")             \
   14.19 +/*  NB: The default value of UseLinuxPosixThreadCPUClocks may be        \
   14.20 +    overridden in Arguments::parse_each_vm_init_arg.  */                \
   14.21 +                                                                        \
   14.22 +  product(bool, UseHugeTLBFS, false,                                    \
   14.23 +          "Use MAP_HUGETLB for large pages")                            \
   14.24 +                                                                        \
   14.25 +  product(bool, UseSHM, false,                                          \
   14.26 +          "Use SYSV shared memory for large pages")
   14.27  
   14.28  //
   14.29  // Defines Linux-specific default values. The flags are available on all
    15.1 --- a/src/os/linux/vm/os_linux.cpp	Wed May 04 19:16:49 2011 -0400
    15.2 +++ b/src/os/linux/vm/os_linux.cpp	Wed May 04 23:10:58 2011 -0400
    15.3 @@ -2465,16 +2465,40 @@
    15.4    return res != (uintptr_t) MAP_FAILED;
    15.5  }
    15.6  
    15.7 +// Define MAP_HUGETLB here so we can build HotSpot on old systems.
    15.8 +#ifndef MAP_HUGETLB
    15.9 +#define MAP_HUGETLB 0x40000
   15.10 +#endif
   15.11 +
   15.12 +// Define MADV_HUGEPAGE here so we can build HotSpot on old systems.
   15.13 +#ifndef MADV_HUGEPAGE
   15.14 +#define MADV_HUGEPAGE 14
   15.15 +#endif
   15.16 +
   15.17  bool os::commit_memory(char* addr, size_t size, size_t alignment_hint,
   15.18                         bool exec) {
   15.19 +  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
   15.20 +    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   15.21 +    uintptr_t res =
   15.22 +      (uintptr_t) ::mmap(addr, size, prot,
   15.23 +                         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS|MAP_HUGETLB,
   15.24 +                         -1, 0);
   15.25 +    return res != (uintptr_t) MAP_FAILED;
   15.26 +  }
   15.27 +
   15.28    return commit_memory(addr, size, exec);
   15.29  }
   15.30  
   15.31 -void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) { }
   15.32 +void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
   15.33 +  if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
   15.34 +    // We don't check the return value: madvise(MADV_HUGEPAGE) may not
   15.35 +    // be supported or the memory may already be backed by huge pages.
   15.36 +    ::madvise(addr, bytes, MADV_HUGEPAGE);
   15.37 +  }
   15.38 +}
   15.39  
   15.40  void os::free_memory(char *addr, size_t bytes) {
   15.41 -  ::mmap(addr, bytes, PROT_READ | PROT_WRITE,
   15.42 -         MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
   15.43 +  ::madvise(addr, bytes, MADV_DONTNEED);
   15.44  }
   15.45  
   15.46  void os::numa_make_global(char *addr, size_t bytes) {
   15.47 @@ -2812,6 +2836,43 @@
   15.48    return linux_mprotect(addr, size, PROT_READ|PROT_WRITE);
   15.49  }
   15.50  
   15.51 +bool os::Linux::hugetlbfs_sanity_check(bool warn, size_t page_size) {
   15.52 +  bool result = false;
   15.53 +  void *p = mmap (NULL, page_size, PROT_READ|PROT_WRITE,
   15.54 +                  MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
   15.55 +                  -1, 0);
   15.56 +
   15.57 +  if (p != (void *) -1) {
   15.58 +    // We don't know if this really is a huge page or not.
   15.59 +    FILE *fp = fopen("/proc/self/maps", "r");
   15.60 +    if (fp) {
   15.61 +      while (!feof(fp)) {
   15.62 +        char chars[257];
   15.63 +        long x = 0;
   15.64 +        if (fgets(chars, sizeof(chars), fp)) {
   15.65 +          if (sscanf(chars, "%lx-%*lx", &x) == 1
   15.66 +              && x == (long)p) {
   15.67 +            if (strstr (chars, "hugepage")) {
   15.68 +              result = true;
   15.69 +              break;
   15.70 +            }
   15.71 +          }
   15.72 +        }
   15.73 +      }
   15.74 +      fclose(fp);
   15.75 +    }
   15.76 +    munmap (p, page_size);
   15.77 +    if (result)
   15.78 +      return true;
   15.79 +  }
   15.80 +
   15.81 +  if (warn) {
   15.82 +    warning("HugeTLBFS is not supported by the operating system.");
   15.83 +  }
   15.84 +
   15.85 +  return result;
   15.86 +}
   15.87 +
   15.88  /*
   15.89  * Set the coredump_filter bits to include largepages in core dump (bit 6)
   15.90  *
   15.91 @@ -2854,7 +2915,16 @@
   15.92  static size_t _large_page_size = 0;
   15.93  
   15.94  bool os::large_page_init() {
   15.95 -  if (!UseLargePages) return false;
   15.96 +  if (!UseLargePages) {
   15.97 +    UseHugeTLBFS = false;
   15.98 +    UseSHM = false;
   15.99 +    return false;
  15.100 +  }
  15.101 +
  15.102 +  if (FLAG_IS_DEFAULT(UseHugeTLBFS) && FLAG_IS_DEFAULT(UseSHM)) {
  15.103 +    // Our user has not expressed a preference, so we'll try both.
  15.104 +    UseHugeTLBFS = UseSHM = true;
  15.105 +  }
  15.106  
  15.107    if (LargePageSizeInBytes) {
  15.108      _large_page_size = LargePageSizeInBytes;
  15.109 @@ -2899,6 +2969,9 @@
  15.110      }
  15.111    }
  15.112  
  15.113 +  // print a warning if any large page related flag is specified on command line
  15.114 +  bool warn_on_failure = !FLAG_IS_DEFAULT(UseHugeTLBFS);
  15.115 +
  15.116    const size_t default_page_size = (size_t)Linux::page_size();
  15.117    if (_large_page_size > default_page_size) {
  15.118      _page_sizes[0] = _large_page_size;
  15.119 @@ -2906,6 +2979,14 @@
  15.120      _page_sizes[2] = 0;
  15.121    }
  15.122  
  15.123 +  UseHugeTLBFS = UseHugeTLBFS &&
  15.124 +                 Linux::hugetlbfs_sanity_check(warn_on_failure, _large_page_size);
  15.125 +
  15.126 +  if (UseHugeTLBFS)
  15.127 +    UseSHM = false;
  15.128 +
  15.129 +  UseLargePages = UseHugeTLBFS || UseSHM;
  15.130 +
  15.131    set_coredump_filter();
  15.132  
  15.133    // Large page support is available on 2.6 or newer kernel, some vendors
  15.134 @@ -2922,7 +3003,7 @@
  15.135  char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
  15.136    // "exec" is passed in but not used.  Creating the shared image for
  15.137    // the code cache doesn't have an SHM_X executable permission to check.
  15.138 -  assert(UseLargePages, "only for large pages");
  15.139 +  assert(UseLargePages && UseSHM, "only for SHM large pages");
  15.140  
  15.141    key_t key = IPC_PRIVATE;
  15.142    char *addr;
  15.143 @@ -2989,16 +3070,15 @@
  15.144    return _large_page_size;
  15.145  }
  15.146  
  15.147 -// Linux does not support anonymous mmap with large page memory. The only way
  15.148 -// to reserve large page memory without file backing is through SysV shared
  15.149 -// memory API. The entire memory region is committed and pinned upfront.
  15.150 -// Hopefully this will change in the future...
  15.151 +// HugeTLBFS allows application to commit large page memory on demand;
  15.152 +// with SysV SHM the entire memory region must be allocated as shared
  15.153 +// memory.
  15.154  bool os::can_commit_large_page_memory() {
  15.155 -  return false;
  15.156 +  return UseHugeTLBFS;
  15.157  }
  15.158  
  15.159  bool os::can_execute_large_page_memory() {
  15.160 -  return false;
  15.161 +  return UseHugeTLBFS;
  15.162  }
  15.163  
  15.164  // Reserve memory at an arbitrary address, only if that area is
  15.165 @@ -4090,6 +4170,23 @@
  15.166          UseNUMA = false;
  15.167        }
  15.168      }
  15.169 +    // With SHM large pages we cannot uncommit a page, so there's not way
  15.170 +    // we can make the adaptive lgrp chunk resizing work. If the user specified
  15.171 +    // both UseNUMA and UseLargePages (or UseSHM) on the command line - warn and
  15.172 +    // disable adaptive resizing.
  15.173 +    if (UseNUMA && UseLargePages && UseSHM) {
  15.174 +      if (!FLAG_IS_DEFAULT(UseNUMA)) {
  15.175 +        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseSHM)) {
  15.176 +          UseLargePages = false;
  15.177 +        } else {
  15.178 +          warning("UseNUMA is not fully compatible with SHM large pages, disabling adaptive resizing");
  15.179 +          UseAdaptiveSizePolicy = false;
  15.180 +          UseAdaptiveNUMAChunkSizing = false;
  15.181 +        }
  15.182 +      } else {
  15.183 +        UseNUMA = false;
  15.184 +      }
  15.185 +    }
  15.186      if (!UseNUMA && ForceNUMA) {
  15.187        UseNUMA = true;
  15.188      }
    16.1 --- a/src/os/linux/vm/os_linux.hpp	Wed May 04 19:16:49 2011 -0400
    16.2 +++ b/src/os/linux/vm/os_linux.hpp	Wed May 04 23:10:58 2011 -0400
    16.3 @@ -86,6 +86,9 @@
    16.4  
    16.5    static void rebuild_cpu_to_node_map();
    16.6    static GrowableArray<int>* cpu_to_node()    { return _cpu_to_node; }
    16.7 +
    16.8 +  static bool hugetlbfs_sanity_check(bool warn, size_t page_size);
    16.9 +
   16.10   public:
   16.11    static void init_thread_fpu_state();
   16.12    static int  get_fpu_control_word();
    17.1 --- a/src/os/solaris/vm/os_solaris.cpp	Wed May 04 19:16:49 2011 -0400
    17.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Wed May 04 23:10:58 2011 -0400
    17.3 @@ -2826,7 +2826,9 @@
    17.4  void os::realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
    17.5    assert((intptr_t)addr % alignment_hint == 0, "Address should be aligned.");
    17.6    assert((intptr_t)(addr + bytes) % alignment_hint == 0, "End should be aligned.");
    17.7 -  Solaris::set_mpss_range(addr, bytes, alignment_hint);
    17.8 +  if (UseLargePages && UseMPSS) {
    17.9 +    Solaris::set_mpss_range(addr, bytes, alignment_hint);
   17.10 +  }
   17.11  }
   17.12  
   17.13  // Tell the OS to make the range local to the first-touching LWP
   17.14 @@ -5044,6 +5046,20 @@
   17.15          UseNUMA = false;
   17.16        }
   17.17      }
   17.18 +    // ISM is not compatible with the NUMA allocator - it always allocates
   17.19 +    // pages round-robin across the lgroups.
   17.20 +    if (UseNUMA && UseLargePages && UseISM) {
   17.21 +      if (!FLAG_IS_DEFAULT(UseNUMA)) {
   17.22 +        if (FLAG_IS_DEFAULT(UseLargePages) && FLAG_IS_DEFAULT(UseISM)) {
   17.23 +          UseLargePages = false;
   17.24 +        } else {
   17.25 +          warning("UseNUMA is not compatible with ISM large pages, disabling NUMA allocator");
   17.26 +          UseNUMA = false;
   17.27 +        }
   17.28 +      } else {
   17.29 +        UseNUMA = false;
   17.30 +      }
   17.31 +    }
   17.32      if (!UseNUMA && ForceNUMA) {
   17.33        UseNUMA = true;
   17.34      }
    18.1 --- a/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed May 04 19:16:49 2011 -0400
    18.2 +++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp	Wed May 04 23:10:58 2011 -0400
    18.3 @@ -232,14 +232,7 @@
    18.4    }
    18.5  
    18.6    // compute size of arguments
    18.7 -  int arg_size = target->arg_size();
    18.8 -  if (code == Bytecodes::_invokedynamic) {
    18.9 -    assert(!target->is_static(), "receiver explicit in method");
   18.10 -    arg_size--;  // implicit, not really on stack
   18.11 -  }
   18.12 -  if (!target->is_loaded() && code == Bytecodes::_invokestatic) {
   18.13 -    arg_size--;
   18.14 -  }
   18.15 +  int arg_size = target->invoke_arg_size(code);
   18.16    int arg_base = MAX2(state._stack_height - arg_size, 0);
   18.17  
   18.18    // direct recursive calls are skipped if they can be bound statically without introducing
    19.1 --- a/src/share/vm/ci/ciEnv.cpp	Wed May 04 19:16:49 2011 -0400
    19.2 +++ b/src/share/vm/ci/ciEnv.cpp	Wed May 04 23:10:58 2011 -0400
    19.3 @@ -756,7 +756,7 @@
    19.4    assert(bc == Bytecodes::_invokedynamic, "must be invokedynamic");
    19.5  
    19.6    bool is_resolved = cpool->cache()->main_entry_at(index)->is_resolved(bc);
    19.7 -  if (is_resolved && (oop) cpool->cache()->secondary_entry_at(index)->f1() == NULL)
    19.8 +  if (is_resolved && cpool->cache()->secondary_entry_at(index)->is_f1_null())
    19.9      // FIXME: code generation could allow for null (unlinked) call site
   19.10      is_resolved = false;
   19.11  
   19.12 @@ -770,7 +770,7 @@
   19.13  
   19.14    // Get the invoker methodOop from the constant pool.
   19.15    oop f1_value = cpool->cache()->main_entry_at(index)->f1();
   19.16 -  methodOop signature_invoker = methodOop(f1_value);
   19.17 +  methodOop signature_invoker = (methodOop) f1_value;
   19.18    assert(signature_invoker != NULL && signature_invoker->is_method() && signature_invoker->is_method_handle_invoke(),
   19.19           "correct result from LinkResolver::resolve_invokedynamic");
   19.20  
    20.1 --- a/src/share/vm/ci/ciMethod.hpp	Wed May 04 19:16:49 2011 -0400
    20.2 +++ b/src/share/vm/ci/ciMethod.hpp	Wed May 04 23:10:58 2011 -0400
    20.3 @@ -127,7 +127,24 @@
    20.4    ciSignature* signature() const                 { return _signature; }
    20.5    ciType*      return_type() const               { return _signature->return_type(); }
    20.6    int          arg_size_no_receiver() const      { return _signature->size(); }
    20.7 -  int          arg_size() const                  { return _signature->size() + (_flags.is_static() ? 0 : 1); }
    20.8 +  // Can only be used on loaded ciMethods
    20.9 +  int          arg_size() const                  {
   20.10 +    check_is_loaded();
   20.11 +    return _signature->size() + (_flags.is_static() ? 0 : 1);
   20.12 +  }
   20.13 +  // Report the number of elements on stack when invoking this method.
   20.14 +  // This is different than the regular arg_size because invokdynamic
   20.15 +  // has an implicit receiver.
   20.16 +  int invoke_arg_size(Bytecodes::Code code) const {
   20.17 +    int arg_size = _signature->size();
   20.18 +    // Add a receiver argument, maybe:
   20.19 +    if (code != Bytecodes::_invokestatic &&
   20.20 +        code != Bytecodes::_invokedynamic) {
   20.21 +      arg_size++;
   20.22 +    }
   20.23 +    return arg_size;
   20.24 +  }
   20.25 +
   20.26  
   20.27    // Method code and related information.
   20.28    address code()                                 { if (_code == NULL) load_code(); return _code; }
   20.29 @@ -276,9 +293,9 @@
   20.30    void print_short_name(outputStream* st = tty);
   20.31  
   20.32    methodOop get_method_handle_target() {
   20.33 -    klassOop receiver_limit_oop = NULL;
   20.34 -    int flags = 0;
   20.35 -    return MethodHandles::decode_method(get_oop(), receiver_limit_oop, flags);
   20.36 +    KlassHandle receiver_limit; int flags = 0;
   20.37 +    methodHandle m = MethodHandles::decode_method(get_oop(), receiver_limit, flags);
   20.38 +    return m();
   20.39    }
   20.40  };
   20.41  
    21.1 --- a/src/share/vm/ci/ciObject.cpp	Wed May 04 19:16:49 2011 -0400
    21.2 +++ b/src/share/vm/ci/ciObject.cpp	Wed May 04 23:10:58 2011 -0400
    21.3 @@ -194,6 +194,16 @@
    21.4  // ciObject::should_be_constant()
    21.5  bool ciObject::should_be_constant() {
    21.6    if (ScavengeRootsInCode >= 2)  return true;  // force everybody to be a constant
    21.7 +  if (!JavaObjectsInPerm && !is_null_object()) {
    21.8 +    // We want Strings and Classes to be embeddable by default since
    21.9 +    // they used to be in the perm world.  Not all Strings used to be
   21.10 +    // embeddable but there's no easy way to distinguish the interned
   21.11 +    // from the regulars ones so just treat them all that way.
   21.12 +    ciEnv* env = CURRENT_ENV;
   21.13 +    if (klass() == env->String_klass() || klass() == env->Class_klass()) {
   21.14 +      return true;
   21.15 +    }
   21.16 +  }
   21.17    return handle() == NULL || !is_scavengable();
   21.18  }
   21.19  
    22.1 --- a/src/share/vm/compiler/compileBroker.cpp	Wed May 04 19:16:49 2011 -0400
    22.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Wed May 04 23:10:58 2011 -0400
    22.3 @@ -976,6 +976,15 @@
    22.4      return;
    22.5    }
    22.6  
    22.7 +  // If the requesting thread is holding the pending list lock
    22.8 +  // then we just return. We can't risk blocking while holding
    22.9 +  // the pending list lock or a 3-way deadlock may occur
   22.10 +  // between the reference handler thread, a GC (instigated
   22.11 +  // by a compiler thread), and compiled method registration.
   22.12 +  if (instanceRefKlass::owns_pending_list_lock(JavaThread::current())) {
   22.13 +    return;
   22.14 +  }
   22.15 +
   22.16    // Outputs from the following MutexLocker block:
   22.17    CompileTask* task     = NULL;
   22.18    bool         blocking = false;
   22.19 @@ -1304,17 +1313,8 @@
   22.20  // Should the current thread be blocked until this compilation request
   22.21  // has been fulfilled?
   22.22  bool CompileBroker::is_compile_blocking(methodHandle method, int osr_bci) {
   22.23 -  if (!BackgroundCompilation) {
   22.24 -    Symbol* class_name = method->method_holder()->klass_part()->name();
   22.25 -    if (class_name->starts_with("java/lang/ref/Reference", 23)) {
   22.26 -      // The reference handler thread can dead lock with the GC if compilation is blocking,
   22.27 -      // so we avoid blocking compiles for anything in the java.lang.ref.Reference class,
   22.28 -      // including inner classes such as ReferenceHandler.
   22.29 -      return false;
   22.30 -    }
   22.31 -    return true;
   22.32 -  }
   22.33 -  return false;
   22.34 +  assert(!instanceRefKlass::owns_pending_list_lock(JavaThread::current()), "possible deadlock");
   22.35 +  return !BackgroundCompilation;
   22.36  }
   22.37  
   22.38  
    23.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed May 04 19:16:49 2011 -0400
    23.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp	Wed May 04 23:10:58 2011 -0400
    23.3 @@ -1963,10 +1963,21 @@
    23.4  // Iteration support, mostly delegated from a CMS generation
    23.5  
    23.6  void CompactibleFreeListSpace::save_marks() {
    23.7 -  // mark the "end" of the used space at the time of this call;
    23.8 +  assert(Thread::current()->is_VM_thread(),
    23.9 +         "Global variable should only be set when single-threaded");
   23.10 +  // Mark the "end" of the used space at the time of this call;
   23.11    // note, however, that promoted objects from this point
   23.12    // on are tracked in the _promoInfo below.
   23.13    set_saved_mark_word(unallocated_block());
   23.14 +#ifdef ASSERT
   23.15 +  // Check the sanity of save_marks() etc.
   23.16 +  MemRegion ur    = used_region();
   23.17 +  MemRegion urasm = used_region_at_save_marks();
   23.18 +  assert(ur.contains(urasm),
   23.19 +         err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
   23.20 +                 " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
   23.21 +                 ur.start(), ur.end(), urasm.start(), urasm.end()));
   23.22 +#endif
   23.23    // inform allocator that promotions should be tracked.
   23.24    assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
   23.25    _promoInfo.startTrackingPromotions();
    24.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed May 04 19:16:49 2011 -0400
    24.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed May 04 23:10:58 2011 -0400
    24.3 @@ -3189,10 +3189,9 @@
    24.4  }
    24.5  
    24.6  void CMSCollector::setup_cms_unloading_and_verification_state() {
    24.7 -  const  bool should_verify =    VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
    24.8 +  const  bool should_verify =   VerifyBeforeGC || VerifyAfterGC || VerifyDuringGC
    24.9                               || VerifyBeforeExit;
   24.10 -  const  int  rso           =    SharedHeap::SO_Symbols | SharedHeap::SO_Strings
   24.11 -                             |   SharedHeap::SO_CodeCache;
   24.12 +  const  int  rso           =   SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
   24.13  
   24.14    if (should_unload_classes()) {   // Should unload classes this cycle
   24.15      remove_root_scanning_option(rso);  // Shrink the root set appropriately
    25.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed May 04 19:16:49 2011 -0400
    25.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed May 04 23:10:58 2011 -0400
    25.3 @@ -1161,6 +1161,7 @@
    25.4      TraceTime t(system_gc ? "Full GC (System.gc())" : "Full GC",
    25.5                  PrintGC, true, gclog_or_tty);
    25.6  
    25.7 +    TraceCollectorStats tcs(g1mm()->full_collection_counters());
    25.8      TraceMemoryManagerStats tms(true /* fullGC */);
    25.9  
   25.10      double start = os::elapsedTime();
   25.11 @@ -1339,6 +1340,7 @@
   25.12    if (PrintHeapAtGC) {
   25.13      Universe::print_heap_after_gc();
   25.14    }
   25.15 +  g1mm()->update_counters();
   25.16  
   25.17    return true;
   25.18  }
   25.19 @@ -1971,6 +1973,10 @@
   25.20  
   25.21    init_mutator_alloc_region();
   25.22  
   25.23 +  // Do create of the monitoring and management support so that
   25.24 +  // values in the heap have been properly initialized.
   25.25 +  _g1mm = new G1MonitoringSupport(this, &_g1_storage);
   25.26 +
   25.27    return JNI_OK;
   25.28  }
   25.29  
   25.30 @@ -2113,6 +2119,28 @@
   25.31       (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent));
   25.32  }
   25.33  
   25.34 +#ifndef PRODUCT
   25.35 +void G1CollectedHeap::allocate_dummy_regions() {
   25.36 +  // Let's fill up most of the region
   25.37 +  size_t word_size = HeapRegion::GrainWords - 1024;
   25.38 +  // And as a result the region we'll allocate will be humongous.
   25.39 +  guarantee(isHumongous(word_size), "sanity");
   25.40 +
   25.41 +  for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
   25.42 +    // Let's use the existing mechanism for the allocation
   25.43 +    HeapWord* dummy_obj = humongous_obj_allocate(word_size);
   25.44 +    if (dummy_obj != NULL) {
   25.45 +      MemRegion mr(dummy_obj, word_size);
   25.46 +      CollectedHeap::fill_with_object(mr);
   25.47 +    } else {
   25.48 +      // If we can't allocate once, we probably cannot allocate
   25.49 +      // again. Let's get out of the loop.
   25.50 +      break;
   25.51 +    }
   25.52 +  }
   25.53 +}
   25.54 +#endif // !PRODUCT
   25.55 +
   25.56  void G1CollectedHeap::increment_full_collections_completed(bool concurrent) {
   25.57    MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
   25.58  
   25.59 @@ -2777,17 +2805,26 @@
   25.60                               bool silent,
   25.61                               bool use_prev_marking) {
   25.62    if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
   25.63 -    if (!silent) { gclog_or_tty->print("roots "); }
   25.64 +    if (!silent) { gclog_or_tty->print("Roots (excluding permgen) "); }
   25.65      VerifyRootsClosure rootsCl(use_prev_marking);
   25.66      CodeBlobToOopClosure blobsCl(&rootsCl, /*do_marking=*/ false);
   25.67 -    process_strong_roots(true,  // activate StrongRootsScope
   25.68 -                         false,
   25.69 -                         SharedHeap::SO_AllClasses,
   25.70 +    // We apply the relevant closures to all the oops in the
   25.71 +    // system dictionary, the string table and the code cache.
   25.72 +    const int so = SharedHeap::SO_AllClasses | SharedHeap::SO_Strings | SharedHeap::SO_CodeCache;
   25.73 +    process_strong_roots(true,      // activate StrongRootsScope
   25.74 +                         true,      // we set "collecting perm gen" to true,
   25.75 +                                    // so we don't reset the dirty cards in the perm gen.
   25.76 +                         SharedHeap::ScanningOption(so),  // roots scanning options
   25.77                           &rootsCl,
   25.78                           &blobsCl,
   25.79                           &rootsCl);
   25.80 +    // Since we used "collecting_perm_gen" == true above, we will not have
   25.81 +    // checked the refs from perm into the G1-collected heap. We check those
   25.82 +    // references explicitly below. Whether the relevant cards are dirty
   25.83 +    // is checked further below in the rem set verification.
   25.84 +    if (!silent) { gclog_or_tty->print("Permgen roots "); }
   25.85 +    perm_gen()->oop_iterate(&rootsCl);
   25.86      bool failures = rootsCl.failures();
   25.87 -    rem_set()->invalidate(perm_gen()->used_region(), false);
   25.88      if (!silent) { gclog_or_tty->print("HeapRegionSets "); }
   25.89      verify_region_sets();
   25.90      if (!silent) { gclog_or_tty->print("HeapRegions "); }
   25.91 @@ -3164,6 +3201,7 @@
   25.92      TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   25.93      TraceTime t(verbose_str, PrintGC && !PrintGCDetails, true, gclog_or_tty);
   25.94  
   25.95 +    TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
   25.96      TraceMemoryManagerStats tms(false /* fullGC */);
   25.97  
   25.98      // If the secondary_free_list is not empty, append it to the
   25.99 @@ -3338,6 +3376,8 @@
  25.100          doConcurrentMark();
  25.101        }
  25.102  
  25.103 +      allocate_dummy_regions();
  25.104 +
  25.105  #if YOUNG_LIST_VERBOSE
  25.106        gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
  25.107        _young_list->print();
  25.108 @@ -3401,6 +3441,8 @@
  25.109    if (PrintHeapAtGC) {
  25.110      Universe::print_heap_after_gc();
  25.111    }
  25.112 +  g1mm()->update_counters();
  25.113 +
  25.114    if (G1SummarizeRSetStats &&
  25.115        (G1SummarizeRSetStatsPeriod > 0) &&
  25.116        (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
  25.117 @@ -5314,6 +5356,7 @@
  25.118      if (new_alloc_region != NULL) {
  25.119        g1_policy()->update_region_num(true /* next_is_young */);
  25.120        set_region_short_lived_locked(new_alloc_region);
  25.121 +      g1mm()->update_eden_counters();
  25.122        return new_alloc_region;
  25.123      }
  25.124    }
    26.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed May 04 19:16:49 2011 -0400
    26.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed May 04 23:10:58 2011 -0400
    26.3 @@ -28,7 +28,9 @@
    26.4  #include "gc_implementation/g1/concurrentMark.hpp"
    26.5  #include "gc_implementation/g1/g1AllocRegion.hpp"
    26.6  #include "gc_implementation/g1/g1RemSet.hpp"
    26.7 +#include "gc_implementation/g1/g1MonitoringSupport.hpp"
    26.8  #include "gc_implementation/g1/heapRegionSets.hpp"
    26.9 +#include "gc_implementation/shared/hSpaceCounters.hpp"
   26.10  #include "gc_implementation/parNew/parGCAllocBuffer.hpp"
   26.11  #include "memory/barrierSet.hpp"
   26.12  #include "memory/memRegion.hpp"
   26.13 @@ -57,6 +59,7 @@
   26.14  class ConcurrentMark;
   26.15  class ConcurrentMarkThread;
   26.16  class ConcurrentG1Refine;
   26.17 +class GenerationCounters;
   26.18  
   26.19  typedef OverflowTaskQueue<StarTask>         RefToScanQueue;
   26.20  typedef GenericTaskQueueSet<RefToScanQueue> RefToScanQueueSet;
   26.21 @@ -236,6 +239,9 @@
   26.22    // current collection.
   26.23    HeapRegion* _gc_alloc_region_list;
   26.24  
   26.25 +  // Helper for monitoring and management support.
   26.26 +  G1MonitoringSupport* _g1mm;
   26.27 +
   26.28    // Determines PLAB size for a particular allocation purpose.
   26.29    static size_t desired_plab_sz(GCAllocPurpose purpose);
   26.30  
   26.31 @@ -298,6 +304,14 @@
   26.32    // started is maintained in _total_full_collections in CollectedHeap.
   26.33    volatile unsigned int _full_collections_completed;
   26.34  
   26.35 +  // This is a non-product method that is helpful for testing. It is
   26.36 +  // called at the end of a GC and artificially expands the heap by
   26.37 +  // allocating a number of dead regions. This way we can induce very
   26.38 +  // frequent marking cycles and stress the cleanup / concurrent
   26.39 +  // cleanup code more (as all the regions that will be allocated by
   26.40 +  // this method will be found dead by the marking cycle).
   26.41 +  void allocate_dummy_regions() PRODUCT_RETURN;
   26.42 +
   26.43    // These are macros so that, if the assert fires, we get the correct
   26.44    // line number, file, etc.
   26.45  
   26.46 @@ -542,6 +556,9 @@
   26.47    HeapWord* expand_and_allocate(size_t word_size);
   26.48  
   26.49  public:
   26.50 +
   26.51 +  G1MonitoringSupport* g1mm() { return _g1mm; }
   26.52 +
   26.53    // Expand the garbage-first heap by at least the given size (in bytes!).
   26.54    // Returns true if the heap was expanded by the requested amount;
   26.55    // false otherwise.
    27.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    27.2 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.cpp	Wed May 04 23:10:58 2011 -0400
    27.3 @@ -0,0 +1,178 @@
    27.4 +/*
    27.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    27.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    27.7 + *
    27.8 + * This code is free software; you can redistribute it and/or modify it
    27.9 + * under the terms of the GNU General Public License version 2 only, as
   27.10 + * published by the Free Software Foundation.
   27.11 + *
   27.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   27.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   27.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   27.15 + * version 2 for more details (a copy is included in the LICENSE file that
   27.16 + * accompanied this code).
   27.17 + *
   27.18 + * You should have received a copy of the GNU General Public License version
   27.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   27.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   27.21 + *
   27.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   27.23 + * or visit www.oracle.com if you need additional information or have any
   27.24 + * questions.
   27.25 + *
   27.26 + */
   27.27 +
   27.28 +#include "precompiled.hpp"
   27.29 +#include "gc_implementation/g1/g1MonitoringSupport.hpp"
   27.30 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
   27.31 +#include "gc_implementation/g1/g1CollectorPolicy.hpp"
   27.32 +
   27.33 +G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h,
   27.34 +                                         VirtualSpace* g1_storage_addr) :
   27.35 +  _g1h(g1h),
   27.36 +  _incremental_collection_counters(NULL),
   27.37 +  _full_collection_counters(NULL),
   27.38 +  _non_young_collection_counters(NULL),
   27.39 +  _old_space_counters(NULL),
   27.40 +  _young_collection_counters(NULL),
   27.41 +  _eden_counters(NULL),
   27.42 +  _from_counters(NULL),
   27.43 +  _to_counters(NULL),
   27.44 +  _g1_storage_addr(g1_storage_addr)
   27.45 +{
   27.46 +  // Counters for GC collections
   27.47 +  //
   27.48 +  //  name "collector.0".  In a generational collector this would be the
   27.49 +  // young generation collection.
   27.50 +  _incremental_collection_counters =
   27.51 +    new CollectorCounters("G1 incremental collections", 0);
   27.52 +  //   name "collector.1".  In a generational collector this would be the
   27.53 +  // old generation collection.
   27.54 +  _full_collection_counters =
   27.55 +    new CollectorCounters("G1 stop-the-world full collections", 1);
   27.56 +
   27.57 +  // timer sampling for all counters supporting sampling only update the
   27.58 +  // used value.  See the take_sample() method.  G1 requires both used and
   27.59 +  // capacity updated so sampling is not currently used.  It might
   27.60 +  // be sufficient to update all counters in take_sample() even though
   27.61 +  // take_sample() only returns "used".  When sampling was used, there
   27.62 +  // were some anomolous values emitted which may have been the consequence
   27.63 +  // of not updating all values simultaneously (i.e., see the calculation done
   27.64 +  // in eden_space_used(), is it possbile that the values used to
   27.65 +  // calculate either eden_used or survivor_used are being updated by
   27.66 +  // the collector when the sample is being done?).
   27.67 +  const bool sampled = false;
   27.68 +
   27.69 +  // "Generation" and "Space" counters.
   27.70 +  //
   27.71 +  //  name "generation.1" This is logically the old generation in
   27.72 +  // generational GC terms.  The "1, 1" parameters are for
   27.73 +  // the n-th generation (=1) with 1 space.
   27.74 +  // Counters are created from minCapacity, maxCapacity, and capacity
   27.75 +  _non_young_collection_counters =
   27.76 +    new GenerationCounters("whole heap", 1, 1, _g1_storage_addr);
   27.77 +
   27.78 +  //  name  "generation.1.space.0"
   27.79 +  // Counters are created from maxCapacity, capacity, initCapacity,
   27.80 +  // and used.
   27.81 +  _old_space_counters = new HSpaceCounters("space", 0,
   27.82 +    _g1h->max_capacity(), _g1h->capacity(), _non_young_collection_counters);
   27.83 +
   27.84 +  //   Young collection set
   27.85 +  //  name "generation.0".  This is logically the young generation.
   27.86 +  //  The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
   27.87 +  // See  _non_young_collection_counters for additional counters
   27.88 +  _young_collection_counters = new GenerationCounters("young", 0, 3, NULL);
   27.89 +
   27.90 +  // Replace "max_heap_byte_size() with maximum young gen size for
   27.91 +  // g1Collectedheap
   27.92 +  //  name "generation.0.space.0"
   27.93 +  // See _old_space_counters for additional counters
   27.94 +  _eden_counters = new HSpaceCounters("eden", 0,
   27.95 +    _g1h->max_capacity(), eden_space_committed(),
   27.96 +    _young_collection_counters);
   27.97 +
   27.98 +  //  name "generation.0.space.1"
   27.99 +  // See _old_space_counters for additional counters
  27.100 +  // Set the arguments to indicate that this survivor space is not used.
  27.101 +  _from_counters = new HSpaceCounters("s0", 1, (long) 0, (long) 0,
  27.102 +    _young_collection_counters);
  27.103 +
  27.104 +  //  name "generation.0.space.2"
  27.105 +  // See _old_space_counters for additional counters
  27.106 +  _to_counters = new HSpaceCounters("s1", 2,
  27.107 +    _g1h->max_capacity(),
  27.108 +    survivor_space_committed(),
  27.109 +    _young_collection_counters);
  27.110 +}
  27.111 +
  27.112 +size_t G1MonitoringSupport::overall_committed() {
  27.113 +  return g1h()->capacity();
  27.114 +}
  27.115 +
  27.116 +size_t G1MonitoringSupport::overall_used() {
  27.117 +  return g1h()->used_unlocked();
  27.118 +}
  27.119 +
  27.120 +size_t G1MonitoringSupport::eden_space_committed() {
  27.121 +  return MAX2(eden_space_used(), (size_t) HeapRegion::GrainBytes);
  27.122 +}
  27.123 +
  27.124 +size_t G1MonitoringSupport::eden_space_used() {
  27.125 +  size_t young_list_length = g1h()->young_list()->length();
  27.126 +  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
  27.127 +  size_t survivor_used = survivor_space_used();
  27.128 +  eden_used = subtract_up_to_zero(eden_used, survivor_used);
  27.129 +  return eden_used;
  27.130 +}
  27.131 +
  27.132 +size_t G1MonitoringSupport::survivor_space_committed() {
  27.133 +  return MAX2(survivor_space_used(),
  27.134 +              (size_t) HeapRegion::GrainBytes);
  27.135 +}
  27.136 +
  27.137 +size_t G1MonitoringSupport::survivor_space_used() {
  27.138 +  size_t survivor_num = g1h()->g1_policy()->recorded_survivor_regions();
  27.139 +  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
  27.140 +  return survivor_used;
  27.141 +}
  27.142 +
  27.143 +size_t G1MonitoringSupport::old_space_committed() {
  27.144 +  size_t committed = overall_committed();
  27.145 +  size_t eden_committed = eden_space_committed();
  27.146 +  size_t survivor_committed = survivor_space_committed();
  27.147 +  committed = subtract_up_to_zero(committed, eden_committed);
  27.148 +  committed = subtract_up_to_zero(committed, survivor_committed);
  27.149 +  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
  27.150 +  return committed;
  27.151 +}
  27.152 +
  27.153 +// See the comment near the top of g1MonitoringSupport.hpp for
  27.154 +// an explanation of these calculations for "used" and "capacity".
  27.155 +size_t G1MonitoringSupport::old_space_used() {
  27.156 +  size_t used = overall_used();
  27.157 +  size_t eden_used = eden_space_used();
  27.158 +  size_t survivor_used = survivor_space_used();
  27.159 +  used = subtract_up_to_zero(used, eden_used);
  27.160 +  used = subtract_up_to_zero(used, survivor_used);
  27.161 +  return used;
  27.162 +}
  27.163 +
  27.164 +void G1MonitoringSupport::update_counters() {
  27.165 +  if (UsePerfData) {
  27.166 +    eden_counters()->update_capacity(eden_space_committed());
  27.167 +    eden_counters()->update_used(eden_space_used());
  27.168 +    to_counters()->update_capacity(survivor_space_committed());
  27.169 +    to_counters()->update_used(survivor_space_used());
  27.170 +    old_space_counters()->update_capacity(old_space_committed());
  27.171 +    old_space_counters()->update_used(old_space_used());
  27.172 +    non_young_collection_counters()->update_all();
  27.173 +  }
  27.174 +}
  27.175 +
  27.176 +void G1MonitoringSupport::update_eden_counters() {
  27.177 +  if (UsePerfData) {
  27.178 +    eden_counters()->update_capacity(eden_space_committed());
  27.179 +    eden_counters()->update_used(eden_space_used());
  27.180 +  }
  27.181 +}
    28.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    28.2 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Wed May 04 23:10:58 2011 -0400
    28.3 @@ -0,0 +1,203 @@
    28.4 +/*
    28.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    28.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.7 + *
    28.8 + * This code is free software; you can redistribute it and/or modify it
    28.9 + * under the terms of the GNU General Public License version 2 only, as
   28.10 + * published by the Free Software Foundation.
   28.11 + *
   28.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   28.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   28.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   28.15 + * version 2 for more details (a copy is included in the LICENSE file that
   28.16 + * accompanied this code).
   28.17 + *
   28.18 + * You should have received a copy of the GNU General Public License version
   28.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   28.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   28.21 + *
   28.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   28.23 + * or visit www.oracle.com if you need additional information or have any
   28.24 + * questions.
   28.25 + *
   28.26 + */
   28.27 +
   28.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
   28.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
   28.30 +
   28.31 +#include "gc_implementation/shared/hSpaceCounters.hpp"
   28.32 +
   28.33 +class G1CollectedHeap;
   28.34 +class G1SpaceMonitoringSupport;
   28.35 +
   28.36 +// Class for monitoring logical spaces in G1.
   28.37 +// G1 defines a set of regions as a young
   28.38 +// collection (analogous to a young generation).
   28.39 +// The young collection is a logical generation
   28.40 +// with no fixed chunk (see space.hpp) reflecting
   28.41 +// the address space for the generation.  In addition
   28.42 +// to the young collection there is its complement
   28.43 +// the non-young collection that is simply the regions
   28.44 +// not in the young collection.  The non-young collection
   28.45 +// is treated here as a logical old generation only
   28.46 +// because the monitoring tools expect a generational
   28.47 +// heap.  The monitoring tools expect that a Space
   28.48 +// (see space.hpp) exists that describe the
   28.49 +// address space of young collection and non-young
   28.50 +// collection and such a view is provided here.
   28.51 +//
   28.52 +// This class provides interfaces to access
   28.53 +// the value of variables for the young collection
   28.54 +// that include the "capacity" and "used" of the
   28.55 +// young collection along with constant values
   28.56 +// for the minimum and maximum capacities for
   28.57 +// the logical spaces.  Similarly for the non-young
   28.58 +// collection.
   28.59 +//
   28.60 +// Also provided are counters for G1 concurrent collections
   28.61 +// and stop-the-world full heap collecitons.
   28.62 +//
   28.63 +// Below is a description of how "used" and "capactiy"
   28.64 +// (or committed) is calculated for the logical spaces.
   28.65 +//
   28.66 +// 1) The used space calculation for a pool is not necessarily
   28.67 +// independent of the others. We can easily get from G1 the overall
   28.68 +// used space in the entire heap, the number of regions in the young
   28.69 +// generation (includes both eden and survivors), and the number of
   28.70 +// survivor regions. So, from that we calculate:
   28.71 +//
   28.72 +//  survivor_used = survivor_num * region_size
   28.73 +//  eden_used     = young_region_num * region_size - survivor_used
   28.74 +//  old_gen_used  = overall_used - eden_used - survivor_used
   28.75 +//
   28.76 +// Note that survivor_used and eden_used are upper bounds. To get the
   28.77 +// actual value we would have to iterate over the regions and add up
   28.78 +// ->used(). But that'd be expensive. So, we'll accept some lack of
   28.79 +// accuracy for those two. But, we have to be careful when calculating
   28.80 +// old_gen_used, in case we subtract from overall_used more then the
   28.81 +// actual number and our result goes negative.
   28.82 +//
   28.83 +// 2) Calculating the used space is straightforward, as described
   28.84 +// above. However, how do we calculate the committed space, given that
   28.85 +// we allocate space for the eden, survivor, and old gen out of the
   28.86 +// same pool of regions? One way to do this is to use the used value
   28.87 +// as also the committed value for the eden and survivor spaces and
   28.88 +// then calculate the old gen committed space as follows:
   28.89 +//
   28.90 +//  old_gen_committed = overall_committed - eden_committed - survivor_committed
   28.91 +//
   28.92 +// Maybe a better way to do that would be to calculate used for eden
   28.93 +// and survivor as a sum of ->used() over their regions and then
   28.94 +// calculate committed as region_num * region_size (i.e., what we use
   28.95 +// to calculate the used space now). This is something to consider
   28.96 +// in the future.
   28.97 +//
   28.98 +// 3) Another decision that is again not straightforward is what is
   28.99 +// the max size that each memory pool can grow to. One way to do this
  28.100 +// would be to use the committed size for the max for the eden and
  28.101 +// survivors and calculate the old gen max as follows (basically, it's
  28.102 +// a similar pattern to what we use for the committed space, as
  28.103 +// described above):
  28.104 +//
  28.105 +//  old_gen_max = overall_max - eden_max - survivor_max
  28.106 +//
  28.107 +// Unfortunately, the above makes the max of each pool fluctuate over
  28.108 +// time and, even though this is allowed according to the spec, it
  28.109 +// broke several assumptions in the M&M framework (there were cases
  28.110 +// where used would reach a value greater than max). So, for max we
  28.111 +// use -1, which means "undefined" according to the spec.
  28.112 +//
  28.113 +// 4) Now, there is a very subtle issue with all the above. The
  28.114 +// framework will call get_memory_usage() on the three pools
  28.115 +// asynchronously. As a result, each call might get a different value
  28.116 +// for, say, survivor_num which will yield inconsistent values for
  28.117 +// eden_used, survivor_used, and old_gen_used (as survivor_num is used
  28.118 +// in the calculation of all three). This would normally be
  28.119 +// ok. However, it's possible that this might cause the sum of
  28.120 +// eden_used, survivor_used, and old_gen_used to go over the max heap
  28.121 +// size and this seems to sometimes cause JConsole (and maybe other
  28.122 +// clients) to get confused. There's not a really an easy / clean
  28.123 +// solution to this problem, due to the asynchrounous nature of the
  28.124 +// framework.
  28.125 +
  28.126 +class G1MonitoringSupport : public CHeapObj {
  28.127 +  G1CollectedHeap* _g1h;
  28.128 +  VirtualSpace* _g1_storage_addr;
  28.129 +
  28.130 +  // jstat performance counters
  28.131 +  //  incremental collections both fully and partially young
  28.132 +  CollectorCounters*   _incremental_collection_counters;
  28.133 +  //  full stop-the-world collections
  28.134 +  CollectorCounters*   _full_collection_counters;
  28.135 +  //  young collection set counters.  The _eden_counters,
  28.136 +  // _from_counters, and _to_counters are associated with
  28.137 +  // this "generational" counter.
  28.138 +  GenerationCounters*  _young_collection_counters;
  28.139 +  //  non-young collection set counters. The _old_space_counters
  28.140 +  // below are associated with this "generational" counter.
  28.141 +  GenerationCounters*  _non_young_collection_counters;
  28.142 +  // Counters for the capacity and used for
  28.143 +  //   the whole heap
  28.144 +  HSpaceCounters*      _old_space_counters;
  28.145 +  //   the young collection
  28.146 +  HSpaceCounters*      _eden_counters;
  28.147 +  //   the survivor collection (only one, _to_counters, is actively used)
  28.148 +  HSpaceCounters*      _from_counters;
  28.149 +  HSpaceCounters*      _to_counters;
  28.150 +
  28.151 +  // It returns x - y if x > y, 0 otherwise.
  28.152 +  // As described in the comment above, some of the inputs to the
  28.153 +  // calculations we have to do are obtained concurrently and hence
  28.154 +  // may be inconsistent with each other. So, this provides a
  28.155 +  // defensive way of performing the subtraction and avoids the value
  28.156 +  // going negative (which would mean a very large result, given that
  28.157 +  // the parameter are size_t).
  28.158 +  static size_t subtract_up_to_zero(size_t x, size_t y) {
  28.159 +    if (x > y) {
  28.160 +      return x - y;
  28.161 +    } else {
  28.162 +      return 0;
  28.163 +    }
  28.164 +  }
  28.165 +
  28.166 + public:
  28.167 +  G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr);
  28.168 +
  28.169 +  G1CollectedHeap* g1h() { return _g1h; }
  28.170 +  VirtualSpace* g1_storage_addr() { return _g1_storage_addr; }
  28.171 +
  28.172 +  // Performance Counter accessors
  28.173 +  void update_counters();
  28.174 +  void update_eden_counters();
  28.175 +
  28.176 +  CollectorCounters* incremental_collection_counters() {
  28.177 +    return _incremental_collection_counters;
  28.178 +  }
  28.179 +  CollectorCounters* full_collection_counters() {
  28.180 +    return _full_collection_counters;
  28.181 +  }
  28.182 +  GenerationCounters* non_young_collection_counters() {
  28.183 +    return _non_young_collection_counters;
  28.184 +  }
  28.185 +  HSpaceCounters*      old_space_counters() { return _old_space_counters; }
  28.186 +  HSpaceCounters*      eden_counters() { return _eden_counters; }
  28.187 +  HSpaceCounters*      from_counters() { return _from_counters; }
  28.188 +  HSpaceCounters*      to_counters() { return _to_counters; }
  28.189 +
  28.190 +  // Monitoring support used by
  28.191 +  //   MemoryService
  28.192 +  //   jstat counters
  28.193 +  size_t overall_committed();
  28.194 +  size_t overall_used();
  28.195 +
  28.196 +  size_t eden_space_committed();
  28.197 +  size_t eden_space_used();
  28.198 +
  28.199 +  size_t survivor_space_committed();
  28.200 +  size_t survivor_space_used();
  28.201 +
  28.202 +  size_t old_space_committed();
  28.203 +  size_t old_space_used();
  28.204 +};
  28.205 +
  28.206 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
    29.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed May 04 19:16:49 2011 -0400
    29.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp	Wed May 04 23:10:58 2011 -0400
    29.3 @@ -300,6 +300,11 @@
    29.4    develop(uintx, G1StressConcRegionFreeingDelayMillis, 0,                   \
    29.5            "Artificial delay during concurrent region freeing")              \
    29.6                                                                              \
    29.7 +  develop(uintx, G1DummyRegionsPerGC, 0,                                    \
    29.8 +          "The number of dummy regions G1 will allocate at the end of "     \
    29.9 +          "each evacuation pause in order to artificially fill up the "     \
   29.10 +          "heap and stress the marking implementation.")                    \
   29.11 +                                                                            \
   29.12    develop(bool, ReduceInitialCardMarksForG1, false,                         \
   29.13            "When ReduceInitialCardMarks is true, this flag setting "         \
   29.14            " controls whether G1 allows the RICM optimization")              \
    30.1 --- a/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed May 04 19:16:49 2011 -0400
    30.2 +++ b/src/share/vm/gc_implementation/parNew/parCardTableModRefBS.cpp	Wed May 04 23:10:58 2011 -0400
    30.3 @@ -33,44 +33,43 @@
    30.4  #include "runtime/mutexLocker.hpp"
    30.5  #include "runtime/virtualspace.hpp"
    30.6  
    30.7 -void CardTableModRefBS::par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
    30.8 -                                                        DirtyCardToOopClosure* dcto_cl,
    30.9 -                                                        MemRegionClosure* cl,
   30.10 -                                                        int n_threads) {
   30.11 -  if (n_threads > 0) {
   30.12 -    assert((n_threads == 1 && ParallelGCThreads == 0) ||
   30.13 -           n_threads <= (int)ParallelGCThreads,
   30.14 -           "# worker threads != # requested!");
   30.15 -    // Make sure the LNC array is valid for the space.
   30.16 -    jbyte**   lowest_non_clean;
   30.17 -    uintptr_t lowest_non_clean_base_chunk_index;
   30.18 -    size_t    lowest_non_clean_chunk_size;
   30.19 -    get_LNC_array_for_space(sp, lowest_non_clean,
   30.20 -                            lowest_non_clean_base_chunk_index,
   30.21 -                            lowest_non_clean_chunk_size);
   30.22 +void CardTableModRefBS::non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
   30.23 +                                                             DirtyCardToOopClosure* dcto_cl,
   30.24 +                                                             ClearNoncleanCardWrapper* cl,
   30.25 +                                                             int n_threads) {
   30.26 +  assert(n_threads > 0, "Error: expected n_threads > 0");
   30.27 +  assert((n_threads == 1 && ParallelGCThreads == 0) ||
   30.28 +         n_threads <= (int)ParallelGCThreads,
   30.29 +         "# worker threads != # requested!");
   30.30 +  // Make sure the LNC array is valid for the space.
   30.31 +  jbyte**   lowest_non_clean;
   30.32 +  uintptr_t lowest_non_clean_base_chunk_index;
   30.33 +  size_t    lowest_non_clean_chunk_size;
   30.34 +  get_LNC_array_for_space(sp, lowest_non_clean,
   30.35 +                          lowest_non_clean_base_chunk_index,
   30.36 +                          lowest_non_clean_chunk_size);
   30.37  
   30.38 -    int n_strides = n_threads * StridesPerThread;
   30.39 -    SequentialSubTasksDone* pst = sp->par_seq_tasks();
   30.40 -    pst->set_n_threads(n_threads);
   30.41 -    pst->set_n_tasks(n_strides);
   30.42 +  int n_strides = n_threads * StridesPerThread;
   30.43 +  SequentialSubTasksDone* pst = sp->par_seq_tasks();
   30.44 +  pst->set_n_threads(n_threads);
   30.45 +  pst->set_n_tasks(n_strides);
   30.46  
   30.47 -    int stride = 0;
   30.48 -    while (!pst->is_task_claimed(/* reference */ stride)) {
   30.49 -      process_stride(sp, mr, stride, n_strides, dcto_cl, cl,
   30.50 -                     lowest_non_clean,
   30.51 -                     lowest_non_clean_base_chunk_index,
   30.52 -                     lowest_non_clean_chunk_size);
   30.53 -    }
   30.54 -    if (pst->all_tasks_completed()) {
   30.55 -      // Clear lowest_non_clean array for next time.
   30.56 -      intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
   30.57 -      uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
   30.58 -      for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
   30.59 -        intptr_t ind = ch - lowest_non_clean_base_chunk_index;
   30.60 -        assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
   30.61 -               "Bounds error");
   30.62 -        lowest_non_clean[ind] = NULL;
   30.63 -      }
   30.64 +  int stride = 0;
   30.65 +  while (!pst->is_task_claimed(/* reference */ stride)) {
   30.66 +    process_stride(sp, mr, stride, n_strides, dcto_cl, cl,
   30.67 +                   lowest_non_clean,
   30.68 +                   lowest_non_clean_base_chunk_index,
   30.69 +                   lowest_non_clean_chunk_size);
   30.70 +  }
   30.71 +  if (pst->all_tasks_completed()) {
   30.72 +    // Clear lowest_non_clean array for next time.
   30.73 +    intptr_t first_chunk_index = addr_to_chunk_index(mr.start());
   30.74 +    uintptr_t last_chunk_index  = addr_to_chunk_index(mr.last());
   30.75 +    for (uintptr_t ch = first_chunk_index; ch <= last_chunk_index; ch++) {
   30.76 +      intptr_t ind = ch - lowest_non_clean_base_chunk_index;
   30.77 +      assert(0 <= ind && ind < (intptr_t)lowest_non_clean_chunk_size,
   30.78 +             "Bounds error");
   30.79 +      lowest_non_clean[ind] = NULL;
   30.80      }
   30.81    }
   30.82  }
   30.83 @@ -81,7 +80,7 @@
   30.84                 MemRegion used,
   30.85                 jint stride, int n_strides,
   30.86                 DirtyCardToOopClosure* dcto_cl,
   30.87 -               MemRegionClosure* cl,
   30.88 +               ClearNoncleanCardWrapper* cl,
   30.89                 jbyte** lowest_non_clean,
   30.90                 uintptr_t lowest_non_clean_base_chunk_index,
   30.91                 size_t    lowest_non_clean_chunk_size) {
   30.92 @@ -127,7 +126,11 @@
   30.93                               lowest_non_clean_base_chunk_index,
   30.94                               lowest_non_clean_chunk_size);
   30.95  
   30.96 -    non_clean_card_iterate_work(chunk_mr, cl);
   30.97 +    // We do not call the non_clean_card_iterate_serial() version because
   30.98 +    // we want to clear the cards, and the ClearNoncleanCardWrapper closure
   30.99 +    // itself does the work of finding contiguous dirty ranges of cards to
  30.100 +    // process (and clear).
  30.101 +    cl->do_MemRegion(chunk_mr);
  30.102  
  30.103      // Find the next chunk of the stride.
  30.104      chunk_card_start += CardsPerStrideChunk * n_strides;
    31.1 --- a/src/share/vm/gc_implementation/shared/generationCounters.cpp	Wed May 04 19:16:49 2011 -0400
    31.2 +++ b/src/share/vm/gc_implementation/shared/generationCounters.cpp	Wed May 04 23:10:58 2011 -0400
    31.3 @@ -1,5 +1,5 @@
    31.4  /*
    31.5 - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
    31.6 + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
    31.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    31.8   *
    31.9   * This code is free software; you can redistribute it and/or modify it
   31.10 @@ -51,15 +51,18 @@
   31.11  
   31.12      cname = PerfDataManager::counter_name(_name_space, "minCapacity");
   31.13      PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
   31.14 +                                     _virtual_space == NULL ? 0 :
   31.15                                       _virtual_space->committed_size(), CHECK);
   31.16  
   31.17      cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
   31.18      PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
   31.19 +                                     _virtual_space == NULL ? 0 :
   31.20                                       _virtual_space->reserved_size(), CHECK);
   31.21  
   31.22      cname = PerfDataManager::counter_name(_name_space, "capacity");
   31.23      _current_size = PerfDataManager::create_variable(SUN_GC, cname,
   31.24 -                                      PerfData::U_Bytes,
   31.25 +                                     PerfData::U_Bytes,
   31.26 +                                     _virtual_space == NULL ? 0 :
   31.27                                       _virtual_space->committed_size(), CHECK);
   31.28    }
   31.29  }
    32.1 --- a/src/share/vm/gc_implementation/shared/generationCounters.hpp	Wed May 04 19:16:49 2011 -0400
    32.2 +++ b/src/share/vm/gc_implementation/shared/generationCounters.hpp	Wed May 04 23:10:58 2011 -0400
    32.3 @@ -1,5 +1,5 @@
    32.4  /*
    32.5 - * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
    32.6 + * Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
    32.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    32.8   *
    32.9   * This code is free software; you can redistribute it and/or modify it
   32.10 @@ -61,10 +61,11 @@
   32.11    }
   32.12  
   32.13    virtual void update_all() {
   32.14 -    _current_size->set_value(_virtual_space->committed_size());
   32.15 +    _current_size->set_value(_virtual_space == NULL ? 0 :
   32.16 +                             _virtual_space->committed_size());
   32.17    }
   32.18  
   32.19    const char* name_space() const        { return _name_space; }
   32.20 +
   32.21  };
   32.22 -
   32.23  #endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GENERATIONCOUNTERS_HPP
    33.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    33.2 +++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.cpp	Wed May 04 23:10:58 2011 -0400
    33.3 @@ -0,0 +1,66 @@
    33.4 +/*
    33.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    33.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    33.7 + *
    33.8 + * This code is free software; you can redistribute it and/or modify it
    33.9 + * under the terms of the GNU General Public License version 2 only, as
   33.10 + * published by the Free Software Foundation.
   33.11 + *
   33.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   33.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   33.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   33.15 + * version 2 for more details (a copy is included in the LICENSE file that
   33.16 + * accompanied this code).
   33.17 + *
   33.18 + * You should have received a copy of the GNU General Public License version
   33.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   33.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   33.21 + *
   33.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   33.23 + * or visit www.oracle.com if you need additional information or have any
   33.24 + * questions.
   33.25 + *
   33.26 + */
   33.27 +
   33.28 +#include "precompiled.hpp"
   33.29 +#include "gc_implementation/shared/hSpaceCounters.hpp"
   33.30 +#include "memory/generation.hpp"
   33.31 +#include "memory/resourceArea.hpp"
   33.32 +
   33.33 +HSpaceCounters::HSpaceCounters(const char* name,
   33.34 +                               int ordinal,
   33.35 +                               size_t max_size,
   33.36 +                               size_t initial_capacity,
   33.37 +                               GenerationCounters* gc) {
   33.38 +
   33.39 +  if (UsePerfData) {
   33.40 +    EXCEPTION_MARK;
   33.41 +    ResourceMark rm;
   33.42 +
   33.43 +    const char* cns =
   33.44 +      PerfDataManager::name_space(gc->name_space(), "space", ordinal);
   33.45 +
   33.46 +    _name_space = NEW_C_HEAP_ARRAY(char, strlen(cns)+1);
   33.47 +    strcpy(_name_space, cns);
   33.48 +
   33.49 +    const char* cname = PerfDataManager::counter_name(_name_space, "name");
   33.50 +    PerfDataManager::create_string_constant(SUN_GC, cname, name, CHECK);
   33.51 +
   33.52 +    cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
   33.53 +    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
   33.54 +                                     (jlong)max_size, CHECK);
   33.55 +
   33.56 +    cname = PerfDataManager::counter_name(_name_space, "capacity");
   33.57 +    _capacity = PerfDataManager::create_variable(SUN_GC, cname,
   33.58 +                                                 PerfData::U_Bytes,
   33.59 +                                                 initial_capacity, CHECK);
   33.60 +
   33.61 +    cname = PerfDataManager::counter_name(_name_space, "used");
   33.62 +    _used = PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
   33.63 +                                             (jlong) 0, CHECK);
   33.64 +
   33.65 +    cname = PerfDataManager::counter_name(_name_space, "initCapacity");
   33.66 +    PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
   33.67 +                                     initial_capacity, CHECK);
   33.68 +  }
   33.69 +}
    34.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    34.2 +++ b/src/share/vm/gc_implementation/shared/hSpaceCounters.hpp	Wed May 04 23:10:58 2011 -0400
    34.3 @@ -0,0 +1,87 @@
    34.4 +/*
    34.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
    34.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    34.7 + *
    34.8 + * This code is free software; you can redistribute it and/or modify it
    34.9 + * under the terms of the GNU General Public License version 2 only, as
   34.10 + * published by the Free Software Foundation.
   34.11 + *
   34.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   34.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   34.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   34.15 + * version 2 for more details (a copy is included in the LICENSE file that
   34.16 + * accompanied this code).
   34.17 + *
   34.18 + * You should have received a copy of the GNU General Public License version
   34.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   34.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   34.21 + *
   34.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   34.23 + * or visit www.oracle.com if you need additional information or have any
   34.24 + * questions.
   34.25 + *
   34.26 + */
   34.27 +
   34.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
   34.29 +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
   34.30 +
   34.31 +#ifndef SERIALGC
   34.32 +#include "gc_implementation/shared/generationCounters.hpp"
   34.33 +#include "memory/generation.hpp"
   34.34 +#include "runtime/perfData.hpp"
   34.35 +#endif
   34.36 +
   34.37 +// A HSpaceCounter is a holder class for performance counters
   34.38 +// that track a collections (logical spaces) in a heap;
   34.39 +
   34.40 +class HeapSpaceUsedHelper;
   34.41 +class G1SpaceMonitoringSupport;
   34.42 +
   34.43 +class HSpaceCounters: public CHeapObj {
   34.44 +  friend class VMStructs;
   34.45 +
   34.46 + private:
   34.47 +  PerfVariable*        _capacity;
   34.48 +  PerfVariable*        _used;
   34.49 +
   34.50 +  // Constant PerfData types don't need to retain a reference.
   34.51 +  // However, it's a good idea to document them here.
   34.52 +
   34.53 +  char*             _name_space;
   34.54 +
   34.55 + public:
   34.56 +
   34.57 +  HSpaceCounters(const char* name, int ordinal, size_t max_size,
   34.58 +                 size_t initial_capacity, GenerationCounters* gc);
   34.59 +
   34.60 +  ~HSpaceCounters() {
   34.61 +    if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
   34.62 +  }
   34.63 +
   34.64 +  inline void update_capacity(size_t v) {
   34.65 +    _capacity->set_value(v);
   34.66 +  }
   34.67 +
   34.68 +  inline void update_used(size_t v) {
   34.69 +    _used->set_value(v);
   34.70 +  }
   34.71 +
   34.72 +  debug_only(
   34.73 +    // for security reasons, we do not allow arbitrary reads from
   34.74 +    // the counters as they may live in shared memory.
   34.75 +    jlong used() {
   34.76 +      return _used->get_value();
   34.77 +    }
   34.78 +    jlong capacity() {
   34.79 +      return _used->get_value();
   34.80 +    }
   34.81 +  )
   34.82 +
   34.83 +  inline void update_all(size_t capacity, size_t used) {
   34.84 +    update_capacity(capacity);
   34.85 +    update_used(used);
   34.86 +  }
   34.87 +
   34.88 +  const char* name_space() const        { return _name_space; }
   34.89 +};
   34.90 +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_HSPACECOUNTERS_HPP
    35.1 --- a/src/share/vm/memory/cardTableModRefBS.cpp	Wed May 04 19:16:49 2011 -0400
    35.2 +++ b/src/share/vm/memory/cardTableModRefBS.cpp	Wed May 04 23:10:58 2011 -0400
    35.3 @@ -456,31 +456,35 @@
    35.4  }
    35.5  
    35.6  
    35.7 -void CardTableModRefBS::non_clean_card_iterate(Space* sp,
    35.8 -                                               MemRegion mr,
    35.9 -                                               DirtyCardToOopClosure* dcto_cl,
   35.10 -                                               MemRegionClosure* cl) {
   35.11 +void CardTableModRefBS::non_clean_card_iterate_possibly_parallel(Space* sp,
   35.12 +                                                                 MemRegion mr,
   35.13 +                                                                 DirtyCardToOopClosure* dcto_cl,
   35.14 +                                                                 ClearNoncleanCardWrapper* cl) {
   35.15    if (!mr.is_empty()) {
   35.16      int n_threads = SharedHeap::heap()->n_par_threads();
   35.17      if (n_threads > 0) {
   35.18  #ifndef SERIALGC
   35.19 -      par_non_clean_card_iterate_work(sp, mr, dcto_cl, cl, n_threads);
   35.20 +      non_clean_card_iterate_parallel_work(sp, mr, dcto_cl, cl, n_threads);
   35.21  #else  // SERIALGC
   35.22        fatal("Parallel gc not supported here.");
   35.23  #endif // SERIALGC
   35.24      } else {
   35.25 -      non_clean_card_iterate_work(mr, cl);
   35.26 +      // We do not call the non_clean_card_iterate_serial() version below because
   35.27 +      // we want to clear the cards (which non_clean_card_iterate_serial() does not
   35.28 +      // do for us), and the ClearNoncleanCardWrapper closure itself does the work
   35.29 +      // of finding contiguous dirty ranges of cards to process (and clear).
   35.30 +      cl->do_MemRegion(mr);
   35.31      }
   35.32    }
   35.33  }
   35.34  
   35.35 -// NOTE: For this to work correctly, it is important that
   35.36 -// we look for non-clean cards below (so as to catch those
   35.37 -// marked precleaned), rather than look explicitly for dirty
   35.38 -// cards (and miss those marked precleaned). In that sense,
   35.39 -// the name precleaned is currently somewhat of a misnomer.
   35.40 -void CardTableModRefBS::non_clean_card_iterate_work(MemRegion mr,
   35.41 -                                                    MemRegionClosure* cl) {
   35.42 +// The iterator itself is not MT-aware, but
   35.43 +// MT-aware callers and closures can use this to
   35.44 +// accomplish dirty card iteration in parallel. The
   35.45 +// iterator itself does not clear the dirty cards, or
   35.46 +// change their values in any manner.
   35.47 +void CardTableModRefBS::non_clean_card_iterate_serial(MemRegion mr,
   35.48 +                                                      MemRegionClosure* cl) {
   35.49    for (int i = 0; i < _cur_covered_regions; i++) {
   35.50      MemRegion mri = mr.intersection(_covered[i]);
   35.51      if (mri.word_size() > 0) {
   35.52 @@ -661,7 +665,7 @@
   35.53  
   35.54  void CardTableModRefBS::verify_clean_region(MemRegion mr) {
   35.55    GuaranteeNotModClosure blk(this);
   35.56 -  non_clean_card_iterate_work(mr, &blk);
   35.57 +  non_clean_card_iterate_serial(mr, &blk);
   35.58  }
   35.59  
   35.60  // To verify a MemRegion is entirely dirty this closure is passed to
    36.1 --- a/src/share/vm/memory/cardTableModRefBS.hpp	Wed May 04 19:16:49 2011 -0400
    36.2 +++ b/src/share/vm/memory/cardTableModRefBS.hpp	Wed May 04 23:10:58 2011 -0400
    36.3 @@ -44,6 +44,7 @@
    36.4  class Generation;
    36.5  class OopsInGenClosure;
    36.6  class DirtyCardToOopClosure;
    36.7 +class ClearNoncleanCardWrapper;
    36.8  
    36.9  class CardTableModRefBS: public ModRefBarrierSet {
   36.10    // Some classes get to look at some private stuff.
   36.11 @@ -165,22 +166,28 @@
   36.12  
   36.13    // Iterate over the portion of the card-table which covers the given
   36.14    // region mr in the given space and apply cl to any dirty sub-regions
   36.15 -  // of mr. cl and dcto_cl must either be the same closure or cl must
   36.16 -  // wrap dcto_cl. Both are required - neither may be NULL. Also, dcto_cl
   36.17 -  // may be modified. Note that this function will operate in a parallel
   36.18 -  // mode if worker threads are available.
   36.19 -  void non_clean_card_iterate(Space* sp, MemRegion mr,
   36.20 -                              DirtyCardToOopClosure* dcto_cl,
   36.21 -                              MemRegionClosure* cl);
   36.22 +  // of mr. Dirty cards are _not_ cleared by the iterator method itself,
   36.23 +  // but closures may arrange to do so on their own should they so wish.
   36.24 +  void non_clean_card_iterate_serial(MemRegion mr, MemRegionClosure* cl);
   36.25  
   36.26 -  // Utility function used to implement the other versions below.
   36.27 -  void non_clean_card_iterate_work(MemRegion mr, MemRegionClosure* cl);
   36.28 +  // A variant of the above that will operate in a parallel mode if
   36.29 +  // worker threads are available, and clear the dirty cards as it
   36.30 +  // processes them.
   36.31 +  // ClearNoncleanCardWrapper cl must wrap the DirtyCardToOopClosure dcto_cl,
   36.32 +  // which may itself be modified by the method.
   36.33 +  void non_clean_card_iterate_possibly_parallel(Space* sp, MemRegion mr,
   36.34 +                                                DirtyCardToOopClosure* dcto_cl,
   36.35 +                                                ClearNoncleanCardWrapper* cl);
   36.36  
   36.37 -  void par_non_clean_card_iterate_work(Space* sp, MemRegion mr,
   36.38 -                                       DirtyCardToOopClosure* dcto_cl,
   36.39 -                                       MemRegionClosure* cl,
   36.40 -                                       int n_threads);
   36.41 + private:
   36.42 +  // Work method used to implement non_clean_card_iterate_possibly_parallel()
   36.43 +  // above in the parallel case.
   36.44 +  void non_clean_card_iterate_parallel_work(Space* sp, MemRegion mr,
   36.45 +                                            DirtyCardToOopClosure* dcto_cl,
   36.46 +                                            ClearNoncleanCardWrapper* cl,
   36.47 +                                            int n_threads);
   36.48  
   36.49 + protected:
   36.50    // Dirty the bytes corresponding to "mr" (not all of which must be
   36.51    // covered.)
   36.52    void dirty_MemRegion(MemRegion mr);
   36.53 @@ -237,7 +244,7 @@
   36.54                        MemRegion used,
   36.55                        jint stride, int n_strides,
   36.56                        DirtyCardToOopClosure* dcto_cl,
   36.57 -                      MemRegionClosure* cl,
   36.58 +                      ClearNoncleanCardWrapper* cl,
   36.59                        jbyte** lowest_non_clean,
   36.60                        uintptr_t lowest_non_clean_base_chunk_index,
   36.61                        size_t lowest_non_clean_chunk_size);
   36.62 @@ -409,14 +416,14 @@
   36.63    // marking, where a dirty card may cause scanning, and summarization
   36.64    // marking, of objects that extend onto subsequent cards.)
   36.65    void mod_card_iterate(MemRegionClosure* cl) {
   36.66 -    non_clean_card_iterate_work(_whole_heap, cl);
   36.67 +    non_clean_card_iterate_serial(_whole_heap, cl);
   36.68    }
   36.69  
   36.70    // Like the "mod_cards_iterate" above, except only invokes the closure
   36.71    // for cards within the MemRegion "mr" (which is required to be
   36.72    // card-aligned and sized.)
   36.73    void mod_card_iterate(MemRegion mr, MemRegionClosure* cl) {
   36.74 -    non_clean_card_iterate_work(mr, cl);
   36.75 +    non_clean_card_iterate_serial(mr, cl);
   36.76    }
   36.77  
   36.78    static uintx ct_max_alignment_constraint();
   36.79 @@ -493,4 +500,5 @@
   36.80    void set_CTRS(CardTableRS* rs) { _rs = rs; }
   36.81  };
   36.82  
   36.83 +
   36.84  #endif // SHARE_VM_MEMORY_CARDTABLEMODREFBS_HPP
    37.1 --- a/src/share/vm/memory/cardTableRS.cpp	Wed May 04 19:16:49 2011 -0400
    37.2 +++ b/src/share/vm/memory/cardTableRS.cpp	Wed May 04 23:10:58 2011 -0400
    37.3 @@ -105,107 +105,111 @@
    37.4    g->younger_refs_iterate(blk);
    37.5  }
    37.6  
    37.7 -class ClearNoncleanCardWrapper: public MemRegionClosure {
    37.8 -  MemRegionClosure* _dirty_card_closure;
    37.9 -  CardTableRS* _ct;
   37.10 -  bool _is_par;
   37.11 -private:
   37.12 -  // Clears the given card, return true if the corresponding card should be
   37.13 -  // processed.
   37.14 -  bool clear_card(jbyte* entry) {
   37.15 -    if (_is_par) {
   37.16 -      while (true) {
   37.17 -        // In the parallel case, we may have to do this several times.
   37.18 -        jbyte entry_val = *entry;
   37.19 -        assert(entry_val != CardTableRS::clean_card_val(),
   37.20 -               "We shouldn't be looking at clean cards, and this should "
   37.21 -               "be the only place they get cleaned.");
   37.22 -        if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
   37.23 -            || _ct->is_prev_youngergen_card_val(entry_val)) {
   37.24 -          jbyte res =
   37.25 -            Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
   37.26 -          if (res == entry_val) {
   37.27 -            break;
   37.28 -          } else {
   37.29 -            assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
   37.30 -                   "The CAS above should only fail if another thread did "
   37.31 -                   "a GC write barrier.");
   37.32 -          }
   37.33 -        } else if (entry_val ==
   37.34 -                   CardTableRS::cur_youngergen_and_prev_nonclean_card) {
   37.35 -          // Parallelism shouldn't matter in this case.  Only the thread
   37.36 -          // assigned to scan the card should change this value.
   37.37 -          *entry = _ct->cur_youngergen_card_val();
   37.38 -          break;
   37.39 -        } else {
   37.40 -          assert(entry_val == _ct->cur_youngergen_card_val(),
   37.41 -                 "Should be the only possibility.");
   37.42 -          // In this case, the card was clean before, and become
   37.43 -          // cur_youngergen only because of processing of a promoted object.
   37.44 -          // We don't have to look at the card.
   37.45 -          return false;
   37.46 -        }
   37.47 +inline bool ClearNoncleanCardWrapper::clear_card(jbyte* entry) {
   37.48 +  if (_is_par) {
   37.49 +    return clear_card_parallel(entry);
   37.50 +  } else {
   37.51 +    return clear_card_serial(entry);
   37.52 +  }
   37.53 +}
   37.54 +
   37.55 +inline bool ClearNoncleanCardWrapper::clear_card_parallel(jbyte* entry) {
   37.56 +  while (true) {
   37.57 +    // In the parallel case, we may have to do this several times.
   37.58 +    jbyte entry_val = *entry;
   37.59 +    assert(entry_val != CardTableRS::clean_card_val(),
   37.60 +           "We shouldn't be looking at clean cards, and this should "
   37.61 +           "be the only place they get cleaned.");
   37.62 +    if (CardTableRS::card_is_dirty_wrt_gen_iter(entry_val)
   37.63 +        || _ct->is_prev_youngergen_card_val(entry_val)) {
   37.64 +      jbyte res =
   37.65 +        Atomic::cmpxchg(CardTableRS::clean_card_val(), entry, entry_val);
   37.66 +      if (res == entry_val) {
   37.67 +        break;
   37.68 +      } else {
   37.69 +        assert(res == CardTableRS::cur_youngergen_and_prev_nonclean_card,
   37.70 +               "The CAS above should only fail if another thread did "
   37.71 +               "a GC write barrier.");
   37.72        }
   37.73 -      return true;
   37.74 +    } else if (entry_val ==
   37.75 +               CardTableRS::cur_youngergen_and_prev_nonclean_card) {
   37.76 +      // Parallelism shouldn't matter in this case.  Only the thread
   37.77 +      // assigned to scan the card should change this value.
   37.78 +      *entry = _ct->cur_youngergen_card_val();
   37.79 +      break;
   37.80      } else {
   37.81 -      jbyte entry_val = *entry;
   37.82 -      assert(entry_val != CardTableRS::clean_card_val(),
   37.83 -             "We shouldn't be looking at clean cards, and this should "
   37.84 -             "be the only place they get cleaned.");
   37.85 -      assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
   37.86 -             "This should be possible in the sequential case.");
   37.87 -      *entry = CardTableRS::clean_card_val();
   37.88 -      return true;
   37.89 +      assert(entry_val == _ct->cur_youngergen_card_val(),
   37.90 +             "Should be the only possibility.");
   37.91 +      // In this case, the card was clean before, and become
   37.92 +      // cur_youngergen only because of processing of a promoted object.
   37.93 +      // We don't have to look at the card.
   37.94 +      return false;
   37.95      }
   37.96    }
   37.97 +  return true;
   37.98 +}
   37.99  
  37.100 -public:
  37.101 -  ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure,
  37.102 -                           CardTableRS* ct) :
  37.103 +
  37.104 +inline bool ClearNoncleanCardWrapper::clear_card_serial(jbyte* entry) {
  37.105 +  jbyte entry_val = *entry;
  37.106 +  assert(entry_val != CardTableRS::clean_card_val(),
  37.107 +         "We shouldn't be looking at clean cards, and this should "
  37.108 +         "be the only place they get cleaned.");
  37.109 +  assert(entry_val != CardTableRS::cur_youngergen_and_prev_nonclean_card,
  37.110 +         "This should be possible in the sequential case.");
  37.111 +  *entry = CardTableRS::clean_card_val();
  37.112 +  return true;
  37.113 +}
  37.114 +
  37.115 +ClearNoncleanCardWrapper::ClearNoncleanCardWrapper(
  37.116 +  MemRegionClosure* dirty_card_closure, CardTableRS* ct) :
  37.117      _dirty_card_closure(dirty_card_closure), _ct(ct) {
  37.118      _is_par = (SharedHeap::heap()->n_par_threads() > 0);
  37.119 +}
  37.120 +
  37.121 +void ClearNoncleanCardWrapper::do_MemRegion(MemRegion mr) {
  37.122 +  assert(mr.word_size() > 0, "Error");
  37.123 +  assert(_ct->is_aligned(mr.start()), "mr.start() should be card aligned");
  37.124 +  // mr.end() may not necessarily be card aligned.
  37.125 +  jbyte* cur_entry = _ct->byte_for(mr.last());
  37.126 +  const jbyte* limit = _ct->byte_for(mr.start());
  37.127 +  HeapWord* end_of_non_clean = mr.end();
  37.128 +  HeapWord* start_of_non_clean = end_of_non_clean;
  37.129 +  while (cur_entry >= limit) {
  37.130 +    HeapWord* cur_hw = _ct->addr_for(cur_entry);
  37.131 +    if ((*cur_entry != CardTableRS::clean_card_val()) && clear_card(cur_entry)) {
  37.132 +      // Continue the dirty range by opening the
  37.133 +      // dirty window one card to the left.
  37.134 +      start_of_non_clean = cur_hw;
  37.135 +    } else {
  37.136 +      // We hit a "clean" card; process any non-empty
  37.137 +      // "dirty" range accumulated so far.
  37.138 +      if (start_of_non_clean < end_of_non_clean) {
  37.139 +        const MemRegion mrd(start_of_non_clean, end_of_non_clean);
  37.140 +        _dirty_card_closure->do_MemRegion(mrd);
  37.141 +      }
  37.142 +      // Reset the dirty window, while continuing to look
  37.143 +      // for the next dirty card that will start a
  37.144 +      // new dirty window.
  37.145 +      end_of_non_clean = cur_hw;
  37.146 +      start_of_non_clean = cur_hw;
  37.147 +    }
  37.148 +    // Note that "cur_entry" leads "start_of_non_clean" in
  37.149 +    // its leftward excursion after this point
  37.150 +    // in the loop and, when we hit the left end of "mr",
  37.151 +    // will point off of the left end of the card-table
  37.152 +    // for "mr".
  37.153 +    cur_entry--;
  37.154    }
  37.155 -  void do_MemRegion(MemRegion mr) {
  37.156 -    // We start at the high end of "mr", walking backwards
  37.157 -    // while accumulating a contiguous dirty range of cards in
  37.158 -    // [start_of_non_clean, end_of_non_clean) which we then
  37.159 -    // process en masse.
  37.160 -    HeapWord* end_of_non_clean = mr.end();
  37.161 -    HeapWord* start_of_non_clean = end_of_non_clean;
  37.162 -    jbyte*       entry = _ct->byte_for(mr.last());
  37.163 -    const jbyte* first_entry = _ct->byte_for(mr.start());
  37.164 -    while (entry >= first_entry) {
  37.165 -      HeapWord* cur = _ct->addr_for(entry);
  37.166 -      if (!clear_card(entry)) {
  37.167 -        // We hit a clean card; process any non-empty
  37.168 -        // dirty range accumulated so far.
  37.169 -        if (start_of_non_clean < end_of_non_clean) {
  37.170 -          MemRegion mr2(start_of_non_clean, end_of_non_clean);
  37.171 -          _dirty_card_closure->do_MemRegion(mr2);
  37.172 -        }
  37.173 -        // Reset the dirty window while continuing to
  37.174 -        // look for the next dirty window to process.
  37.175 -        end_of_non_clean = cur;
  37.176 -        start_of_non_clean = end_of_non_clean;
  37.177 -      }
  37.178 -      // Open the left end of the window one card to the left.
  37.179 -      start_of_non_clean = cur;
  37.180 -      // Note that "entry" leads "start_of_non_clean" in
  37.181 -      // its leftward excursion after this point
  37.182 -      // in the loop and, when we hit the left end of "mr",
  37.183 -      // will point off of the left end of the card-table
  37.184 -      // for "mr".
  37.185 -      entry--;
  37.186 -    }
  37.187 -    // If the first card of "mr" was dirty, we will have
  37.188 -    // been left with a dirty window, co-initial with "mr",
  37.189 -    // which we now process.
  37.190 -    if (start_of_non_clean < end_of_non_clean) {
  37.191 -      MemRegion mr2(start_of_non_clean, end_of_non_clean);
  37.192 -      _dirty_card_closure->do_MemRegion(mr2);
  37.193 -    }
  37.194 +  // If the first card of "mr" was dirty, we will have
  37.195 +  // been left with a dirty window, co-initial with "mr",
  37.196 +  // which we now process.
  37.197 +  if (start_of_non_clean < end_of_non_clean) {
  37.198 +    const MemRegion mrd(start_of_non_clean, end_of_non_clean);
  37.199 +    _dirty_card_closure->do_MemRegion(mrd);
  37.200    }
  37.201 -};
  37.202 +}
  37.203 +
  37.204  // clean (by dirty->clean before) ==> cur_younger_gen
  37.205  // dirty                          ==> cur_youngergen_and_prev_nonclean_card
  37.206  // precleaned                     ==> cur_youngergen_and_prev_nonclean_card
  37.207 @@ -246,8 +250,35 @@
  37.208                                                     cl->gen_boundary());
  37.209    ClearNoncleanCardWrapper clear_cl(dcto_cl, this);
  37.210  
  37.211 -  _ct_bs->non_clean_card_iterate(sp, sp->used_region_at_save_marks(),
  37.212 -                                dcto_cl, &clear_cl);
  37.213 +  const MemRegion urasm = sp->used_region_at_save_marks();
  37.214 +#ifdef ASSERT
  37.215 +  // Convert the assertion check to a warning if we are running
  37.216 +  // CMS+ParNew until related bug is fixed.
  37.217 +  MemRegion ur    = sp->used_region();
  37.218 +  assert(ur.contains(urasm) || (UseConcMarkSweepGC && UseParNewGC),
  37.219 +         err_msg("Did you forget to call save_marks()? "
  37.220 +                 "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
  37.221 +                 "[" PTR_FORMAT ", " PTR_FORMAT ")",
  37.222 +                 urasm.start(), urasm.end(), ur.start(), ur.end()));
  37.223 +  // In the case of CMS+ParNew, issue a warning
  37.224 +  if (!ur.contains(urasm)) {
  37.225 +    assert(UseConcMarkSweepGC && UseParNewGC, "Tautology: see assert above");
  37.226 +    warning("CMS+ParNew: Did you forget to call save_marks()? "
  37.227 +            "[" PTR_FORMAT ", " PTR_FORMAT ") is not contained in "
  37.228 +            "[" PTR_FORMAT ", " PTR_FORMAT ")",
  37.229 +             urasm.start(), urasm.end(), ur.start(), ur.end());
  37.230 +    MemRegion ur2 = sp->used_region();
  37.231 +    MemRegion urasm2 = sp->used_region_at_save_marks();
  37.232 +    if (!ur.equals(ur2)) {
  37.233 +      warning("CMS+ParNew: Flickering used_region()!!");
  37.234 +    }
  37.235 +    if (!urasm.equals(urasm2)) {
  37.236 +      warning("CMS+ParNew: Flickering used_region_at_save_marks()!!");
  37.237 +    }
  37.238 +  }
  37.239 +#endif
  37.240 +  _ct_bs->non_clean_card_iterate_possibly_parallel(sp, urasm,
  37.241 +                                                   dcto_cl, &clear_cl);
  37.242  }
  37.243  
  37.244  void CardTableRS::clear_into_younger(Generation* gen, bool clear_perm) {
    38.1 --- a/src/share/vm/memory/cardTableRS.hpp	Wed May 04 19:16:49 2011 -0400
    38.2 +++ b/src/share/vm/memory/cardTableRS.hpp	Wed May 04 23:10:58 2011 -0400
    38.3 @@ -1,5 +1,5 @@
    38.4  /*
    38.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    38.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    38.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    38.8   *
    38.9   * This code is free software; you can redistribute it and/or modify it
   38.10 @@ -166,4 +166,21 @@
   38.11  
   38.12  };
   38.13  
   38.14 +class ClearNoncleanCardWrapper: public MemRegionClosure {
   38.15 +  MemRegionClosure* _dirty_card_closure;
   38.16 +  CardTableRS* _ct;
   38.17 +  bool _is_par;
   38.18 +private:
   38.19 +  // Clears the given card, return true if the corresponding card should be
   38.20 +  // processed.
   38.21 +  inline bool clear_card(jbyte* entry);
   38.22 +  // Work methods called by the clear_card()
   38.23 +  inline bool clear_card_serial(jbyte* entry);
   38.24 +  inline bool clear_card_parallel(jbyte* entry);
   38.25 +
   38.26 +public:
   38.27 +  ClearNoncleanCardWrapper(MemRegionClosure* dirty_card_closure, CardTableRS* ct);
   38.28 +  void do_MemRegion(MemRegion mr);
   38.29 +};
   38.30 +
   38.31  #endif // SHARE_VM_MEMORY_CARDTABLERS_HPP
    39.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Wed May 04 19:16:49 2011 -0400
    39.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Wed May 04 23:10:58 2011 -0400
    39.3 @@ -427,13 +427,13 @@
    39.4    // explicitly mark reachable objects in younger generations, to avoid
    39.5    // excess storage retention.)  If "collecting_perm_gen" is false, then
    39.6    // roots that may only contain references to permGen objects are not
    39.7 -  // scanned. The "so" argument determines which of the roots
    39.8 +  // scanned; instead, the older_gens closure is applied to all outgoing
    39.9 +  // references in the perm gen.  The "so" argument determines which of the roots
   39.10    // the closure is applied to:
   39.11    // "SO_None" does none;
   39.12    // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
   39.13    // "SO_SystemClasses" to all the "system" classes and loaders;
   39.14 -  // "SO_Symbols_and_Strings" applies the closure to all entries in
   39.15 -  // SymbolsTable and StringTable.
   39.16 +  // "SO_Strings" applies the closure to all entries in the StringTable.
   39.17    void gen_process_strong_roots(int level,
   39.18                                  bool younger_gens_as_roots,
   39.19                                  // The remaining arguments are in an order
    40.1 --- a/src/share/vm/memory/genOopClosures.hpp	Wed May 04 19:16:49 2011 -0400
    40.2 +++ b/src/share/vm/memory/genOopClosures.hpp	Wed May 04 23:10:58 2011 -0400
    40.3 @@ -1,5 +1,5 @@
    40.4  /*
    40.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    40.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    40.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    40.8   *
    40.9   * This code is free software; you can redistribute it and/or modify it
   40.10 @@ -175,7 +175,7 @@
   40.11   protected:
   40.12    template <class T> inline void do_oop_work(T* p) {
   40.13      oop obj = oopDesc::load_decode_heap_oop(p);
   40.14 -    guarantee(obj->is_oop_or_null(), "invalid oop");
   40.15 +    guarantee(obj->is_oop_or_null(), err_msg("invalid oop: " INTPTR_FORMAT, obj));
   40.16    }
   40.17   public:
   40.18    virtual void do_oop(oop* p);
    41.1 --- a/src/share/vm/memory/sharedHeap.cpp	Wed May 04 19:16:49 2011 -0400
    41.2 +++ b/src/share/vm/memory/sharedHeap.cpp	Wed May 04 23:10:58 2011 -0400
    41.3 @@ -46,7 +46,6 @@
    41.4    SH_PS_Management_oops_do,
    41.5    SH_PS_SystemDictionary_oops_do,
    41.6    SH_PS_jvmti_oops_do,
    41.7 -  SH_PS_SymbolTable_oops_do,
    41.8    SH_PS_StringTable_oops_do,
    41.9    SH_PS_CodeCache_oops_do,
   41.10    // Leave this one last.
   41.11 @@ -161,13 +160,9 @@
   41.12    if (!_process_strong_tasks->is_task_claimed(SH_PS_SystemDictionary_oops_do)) {
   41.13      if (so & SO_AllClasses) {
   41.14        SystemDictionary::oops_do(roots);
   41.15 -    } else
   41.16 -      if (so & SO_SystemClasses) {
   41.17 -        SystemDictionary::always_strong_oops_do(roots);
   41.18 -      }
   41.19 -  }
   41.20 -
   41.21 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_SymbolTable_oops_do)) {
   41.22 +    } else if (so & SO_SystemClasses) {
   41.23 +      SystemDictionary::always_strong_oops_do(roots);
   41.24 +    }
   41.25    }
   41.26  
   41.27    if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
    42.1 --- a/src/share/vm/memory/sharedHeap.hpp	Wed May 04 19:16:49 2011 -0400
    42.2 +++ b/src/share/vm/memory/sharedHeap.hpp	Wed May 04 23:10:58 2011 -0400
    42.3 @@ -192,9 +192,8 @@
    42.4      SO_None                = 0x0,
    42.5      SO_AllClasses          = 0x1,
    42.6      SO_SystemClasses       = 0x2,
    42.7 -    SO_Symbols             = 0x4,
    42.8 -    SO_Strings             = 0x8,
    42.9 -    SO_CodeCache           = 0x10
   42.10 +    SO_Strings             = 0x4,
   42.11 +    SO_CodeCache           = 0x8
   42.12    };
   42.13  
   42.14    FlexibleWorkGang* workers() const { return _workers; }
   42.15 @@ -208,14 +207,13 @@
   42.16  
   42.17    // Invoke the "do_oop" method the closure "roots" on all root locations.
   42.18    // If "collecting_perm_gen" is false, then roots that may only contain
   42.19 -  // references to permGen objects are not scanned.  If true, the
   42.20 -  // "perm_gen" closure is applied to all older-to-younger refs in the
   42.21 +  // references to permGen objects are not scanned; instead, in that case,
   42.22 +  // the "perm_blk" closure is applied to all outgoing refs in the
   42.23    // permanent generation.  The "so" argument determines which of roots
   42.24    // the closure is applied to:
   42.25    // "SO_None" does none;
   42.26    // "SO_AllClasses" applies the closure to all entries in the SystemDictionary;
   42.27    // "SO_SystemClasses" to all the "system" classes and loaders;
   42.28 -  // "SO_Symbols" applies the closure to all entries in SymbolsTable;
   42.29    // "SO_Strings" applies the closure to all entries in StringTable;
   42.30    // "SO_CodeCache" applies the closure to all elements of the CodeCache.
   42.31    void process_strong_roots(bool activate_scope,
    43.1 --- a/src/share/vm/oops/cpCacheOop.cpp	Wed May 04 19:16:49 2011 -0400
    43.2 +++ b/src/share/vm/oops/cpCacheOop.cpp	Wed May 04 23:10:58 2011 -0400
    43.3 @@ -104,7 +104,7 @@
    43.4    void* result = Atomic::cmpxchg_ptr(f1, f1_addr, NULL);
    43.5    bool success = (result == NULL);
    43.6    if (success) {
    43.7 -    update_barrier_set(f1_addr, f1);
    43.8 +    update_barrier_set((void*) f1_addr, f1);
    43.9    }
   43.10  }
   43.11  
   43.12 @@ -275,21 +275,23 @@
   43.13    return (int) bsm_cache_index;
   43.14  }
   43.15  
   43.16 -void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site,
   43.17 -                                              methodHandle signature_invoker) {
   43.18 +void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, methodHandle signature_invoker) {
   43.19    assert(is_secondary_entry(), "");
   43.20 +  // NOTE: it's important that all other values are set before f1 is
   43.21 +  // set since some users short circuit on f1 being set
   43.22 +  // (i.e. non-null) and that may result in uninitialized values for
   43.23 +  // other racing threads (e.g. flags).
   43.24    int param_size = signature_invoker->size_of_parameters();
   43.25    assert(param_size >= 1, "method argument size must include MH.this");
   43.26 -  param_size -= 1;              // do not count MH.this; it is not stacked for invokedynamic
   43.27 -  if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
   43.28 -    // racing threads might be trying to install their own favorites
   43.29 -    set_f1(call_site());
   43.30 -  }
   43.31 +  param_size -= 1;  // do not count MH.this; it is not stacked for invokedynamic
   43.32    bool is_final = true;
   43.33    assert(signature_invoker->is_final_method(), "is_final");
   43.34 -  set_flags(as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size);
   43.35 +  int flags = as_flags(as_TosState(signature_invoker->result_type()), is_final, false, false, false, true) | param_size;
   43.36 +  assert(_flags == 0 || _flags == flags, "flags should be the same");
   43.37 +  set_flags(flags);
   43.38    // do not do set_bytecode on a secondary CP cache entry
   43.39    //set_bytecode_1(Bytecodes::_invokedynamic);
   43.40 +  set_f1_if_null_atomic(call_site());  // This must be the last one to set (see NOTE above)!
   43.41  }
   43.42  
   43.43  
    44.1 --- a/src/share/vm/opto/escape.cpp	Wed May 04 19:16:49 2011 -0400
    44.2 +++ b/src/share/vm/opto/escape.cpp	Wed May 04 23:10:58 2011 -0400
    44.3 @@ -1437,7 +1437,10 @@
    44.4  
    44.5    // Update the memory inputs of MemNodes with the value we computed
    44.6    // in Phase 2 and move stores memory users to corresponding memory slices.
    44.7 -#ifdef ASSERT
    44.8 +
    44.9 +  // Disable memory split verification code until the fix for 6984348.
   44.10 +  // Currently it produces false negative results since it does not cover all cases.
   44.11 +#if 0 // ifdef ASSERT
   44.12    visited.Reset();
   44.13    Node_Stack old_mems(arena, _compile->unique() >> 2);
   44.14  #endif
   44.15 @@ -1447,7 +1450,7 @@
   44.16        Node *n = ptnode_adr(i)->_node;
   44.17        assert(n != NULL, "sanity");
   44.18        if (n->is_Mem()) {
   44.19 -#ifdef ASSERT
   44.20 +#if 0 // ifdef ASSERT
   44.21          Node* old_mem = n->in(MemNode::Memory);
   44.22          if (!visited.test_set(old_mem->_idx)) {
   44.23            old_mems.push(old_mem, old_mem->outcnt());
   44.24 @@ -1469,13 +1472,13 @@
   44.25        }
   44.26      }
   44.27    }
   44.28 -#ifdef ASSERT
   44.29 +#if 0 // ifdef ASSERT
   44.30    // Verify that memory was split correctly
   44.31    while (old_mems.is_nonempty()) {
   44.32      Node* old_mem = old_mems.node();
   44.33      uint  old_cnt = old_mems.index();
   44.34      old_mems.pop();
   44.35 -    assert(old_cnt = old_mem->outcnt(), "old mem could be lost");
   44.36 +    assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
   44.37    }
   44.38  #endif
   44.39  }
    45.1 --- a/src/share/vm/opto/graphKit.cpp	Wed May 04 19:16:49 2011 -0400
    45.2 +++ b/src/share/vm/opto/graphKit.cpp	Wed May 04 23:10:58 2011 -0400
    45.3 @@ -1033,14 +1033,10 @@
    45.4        iter.reset_to_bci(bci());
    45.5        iter.next();
    45.6        ciMethod* method = iter.get_method(ignore);
    45.7 -      inputs = method->arg_size_no_receiver();
    45.8 -      // Add a receiver argument, maybe:
    45.9 -      if (code != Bytecodes::_invokestatic &&
   45.10 -          code != Bytecodes::_invokedynamic)
   45.11 -        inputs += 1;
   45.12        // (Do not use ciMethod::arg_size(), because
   45.13        // it might be an unloaded method, which doesn't
   45.14        // know whether it is static or not.)
   45.15 +      inputs = method->invoke_arg_size(code);
   45.16        int size = method->return_type()->size();
   45.17        depth = size - inputs;
   45.18      }
   45.19 @@ -2957,8 +2953,7 @@
   45.20  
   45.21  //---------------------------set_output_for_allocation-------------------------
   45.22  Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
   45.23 -                                          const TypeOopPtr* oop_type,
   45.24 -                                          bool raw_mem_only) {
   45.25 +                                          const TypeOopPtr* oop_type) {
   45.26    int rawidx = Compile::AliasIdxRaw;
   45.27    alloc->set_req( TypeFunc::FramePtr, frameptr() );
   45.28    add_safepoint_edges(alloc);
   45.29 @@ -2982,7 +2977,7 @@
   45.30                                                   rawoop)->as_Initialize();
   45.31    assert(alloc->initialization() == init,  "2-way macro link must work");
   45.32    assert(init ->allocation()     == alloc, "2-way macro link must work");
   45.33 -  if (ReduceFieldZeroing && !raw_mem_only) {
   45.34 +  {
   45.35      // Extract memory strands which may participate in the new object's
   45.36      // initialization, and source them from the new InitializeNode.
   45.37      // This will allow us to observe initializations when they occur,
   45.38 @@ -3043,11 +3038,9 @@
   45.39  // the type to a constant.
   45.40  // The optional arguments are for specialized use by intrinsics:
   45.41  //  - If 'extra_slow_test' if not null is an extra condition for the slow-path.
   45.42 -//  - If 'raw_mem_only', do not cast the result to an oop.
   45.43  //  - If 'return_size_val', report the the total object size to the caller.
   45.44  Node* GraphKit::new_instance(Node* klass_node,
   45.45                               Node* extra_slow_test,
   45.46 -                             bool raw_mem_only, // affect only raw memory
   45.47                               Node* *return_size_val) {
   45.48    // Compute size in doublewords
   45.49    // The size is always an integral number of doublewords, represented
   45.50 @@ -3118,7 +3111,7 @@
   45.51                       size, klass_node,
   45.52                       initial_slow_test);
   45.53  
   45.54 -  return set_output_for_allocation(alloc, oop_type, raw_mem_only);
   45.55 +  return set_output_for_allocation(alloc, oop_type);
   45.56  }
   45.57  
   45.58  //-------------------------------new_array-------------------------------------
   45.59 @@ -3128,7 +3121,6 @@
   45.60  Node* GraphKit::new_array(Node* klass_node,     // array klass (maybe variable)
   45.61                            Node* length,         // number of array elements
   45.62                            int   nargs,          // number of arguments to push back for uncommon trap
   45.63 -                          bool raw_mem_only,    // affect only raw memory
   45.64                            Node* *return_size_val) {
   45.65    jint  layout_con = Klass::_lh_neutral_value;
   45.66    Node* layout_val = get_layout_helper(klass_node, layout_con);
   45.67 @@ -3273,7 +3265,7 @@
   45.68      ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
   45.69    }
   45.70  
   45.71 -  Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only);
   45.72 +  Node* javaoop = set_output_for_allocation(alloc, ary_type);
   45.73  
   45.74    // Cast length on remaining path to be as narrow as possible
   45.75    if (map()->find_edge(length) >= 0) {
   45.76 @@ -3462,9 +3454,22 @@
   45.77  
   45.78    // Get the alias_index for raw card-mark memory
   45.79    int adr_type = Compile::AliasIdxRaw;
   45.80 +  Node*   zero = __ ConI(0); // Dirty card value
   45.81 +  BasicType bt = T_BYTE;
   45.82 +
   45.83 +  if (UseCondCardMark) {
   45.84 +    // The classic GC reference write barrier is typically implemented
   45.85 +    // as a store into the global card mark table.  Unfortunately
   45.86 +    // unconditional stores can result in false sharing and excessive
   45.87 +    // coherence traffic as well as false transactional aborts.
   45.88 +    // UseCondCardMark enables MP "polite" conditional card mark
   45.89 +    // stores.  In theory we could relax the load from ctrl() to
   45.90 +    // no_ctrl, but that doesn't buy much latitude.
   45.91 +    Node* card_val = __ load( __ ctrl(), card_adr, TypeInt::BYTE, bt, adr_type);
   45.92 +    __ if_then(card_val, BoolTest::ne, zero);
   45.93 +  }
   45.94 +
   45.95    // Smash zero into card
   45.96 -  Node*   zero = __ ConI(0);
   45.97 -  BasicType bt = T_BYTE;
   45.98    if( !UseConcMarkSweepGC ) {
   45.99      __ store(__ ctrl(), card_adr, zero, bt, adr_type);
  45.100    } else {
  45.101 @@ -3472,6 +3477,10 @@
  45.102      __ storeCM(__ ctrl(), card_adr, zero, oop_store, adr_idx, bt, adr_type);
  45.103    }
  45.104  
  45.105 +  if (UseCondCardMark) {
  45.106 +    __ end_if();
  45.107 +  }
  45.108 +
  45.109    // Final sync IdealKit and GraphKit.
  45.110    final_sync(ideal);
  45.111  }
    46.1 --- a/src/share/vm/opto/graphKit.hpp	Wed May 04 19:16:49 2011 -0400
    46.2 +++ b/src/share/vm/opto/graphKit.hpp	Wed May 04 23:10:58 2011 -0400
    46.3 @@ -773,15 +773,13 @@
    46.4  
    46.5    // implementation of object creation
    46.6    Node* set_output_for_allocation(AllocateNode* alloc,
    46.7 -                                  const TypeOopPtr* oop_type,
    46.8 -                                  bool raw_mem_only);
    46.9 +                                  const TypeOopPtr* oop_type);
   46.10    Node* get_layout_helper(Node* klass_node, jint& constant_value);
   46.11    Node* new_instance(Node* klass_node,
   46.12                       Node* slow_test = NULL,
   46.13 -                     bool raw_mem_only = false,
   46.14                       Node* *return_size_val = NULL);
   46.15    Node* new_array(Node* klass_node, Node* count_val, int nargs,
   46.16 -                  bool raw_mem_only = false, Node* *return_size_val = NULL);
   46.17 +                  Node* *return_size_val = NULL);
   46.18  
   46.19    // Handy for making control flow
   46.20    IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
    47.1 --- a/src/share/vm/opto/library_call.cpp	Wed May 04 19:16:49 2011 -0400
    47.2 +++ b/src/share/vm/opto/library_call.cpp	Wed May 04 23:10:58 2011 -0400
    47.3 @@ -3527,8 +3527,7 @@
    47.4        Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
    47.5        Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
    47.6  
    47.7 -      const bool raw_mem_only = true;
    47.8 -      newcopy = new_array(klass_node, length, 0, raw_mem_only);
    47.9 +      newcopy = new_array(klass_node, length, 0);
   47.10  
   47.11        // Generate a direct call to the right arraycopy function(s).
   47.12        // We know the copy is disjoint but we might not know if the
   47.13 @@ -4325,8 +4324,6 @@
   47.14  
   47.15      const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
   47.16      int raw_adr_idx = Compile::AliasIdxRaw;
   47.17 -    const bool raw_mem_only = true;
   47.18 -
   47.19  
   47.20      Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
   47.21      if (array_ctl != NULL) {
   47.22 @@ -4335,8 +4332,7 @@
   47.23        set_control(array_ctl);
   47.24        Node* obj_length = load_array_length(obj);
   47.25        Node* obj_size  = NULL;
   47.26 -      Node* alloc_obj = new_array(obj_klass, obj_length, 0,
   47.27 -                                  raw_mem_only, &obj_size);
   47.28 +      Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);
   47.29  
   47.30        if (!use_ReduceInitialCardMarks()) {
   47.31          // If it is an oop array, it requires very special treatment,
   47.32 @@ -4408,7 +4404,7 @@
   47.33        // It's an instance, and it passed the slow-path tests.
   47.34        PreserveJVMState pjvms(this);
   47.35        Node* obj_size  = NULL;
   47.36 -      Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size);
   47.37 +      Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size);
   47.38  
   47.39        copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
   47.40  
    48.1 --- a/src/share/vm/opto/loopopts.cpp	Wed May 04 19:16:49 2011 -0400
    48.2 +++ b/src/share/vm/opto/loopopts.cpp	Wed May 04 23:10:58 2011 -0400
    48.3 @@ -2262,6 +2262,9 @@
    48.4  //                  stmt1
    48.5  //                    |
    48.6  //                    v
    48.7 +//               loop predicate
    48.8 +//                    |
    48.9 +//                    v
   48.10  //                  stmt2 clone
   48.11  //                    |
   48.12  //                    v
   48.13 @@ -2272,9 +2275,6 @@
   48.14  //         :  false   true
   48.15  //         :  |       |
   48.16  //         :  |       v
   48.17 -//         :  | loop predicate
   48.18 -//         :  |       |
   48.19 -//         :  |       v
   48.20  //         :  |    newloop<-----+
   48.21  //         :  |        |        |
   48.22  //         :  |     stmt3 clone |
   48.23 @@ -2330,7 +2330,6 @@
   48.24      }
   48.25    }
   48.26  
   48.27 -  Node* entry = head->in(LoopNode::EntryControl);
   48.28    int dd = dom_depth(head);
   48.29  
   48.30    // Step 1: find cut point
   48.31 @@ -2627,8 +2626,6 @@
   48.32  
   48.33    // Backedge of the surviving new_head (the clone) is original last_peel
   48.34    _igvn.hash_delete(new_head_clone);
   48.35 -  Node* new_entry = move_loop_predicates(entry, new_head_clone->in(LoopNode::EntryControl));
   48.36 -  new_head_clone->set_req(LoopNode::EntryControl, new_entry);
   48.37    new_head_clone->set_req(LoopNode::LoopBackControl, last_peel);
   48.38    _igvn._worklist.push(new_head_clone);
   48.39  
    49.1 --- a/src/share/vm/opto/macro.cpp	Wed May 04 19:16:49 2011 -0400
    49.2 +++ b/src/share/vm/opto/macro.cpp	Wed May 04 23:10:58 2011 -0400
    49.3 @@ -221,9 +221,16 @@
    49.4      Node *shift = p2x->unique_out();
    49.5      Node *addp = shift->unique_out();
    49.6      for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) {
    49.7 -      Node *st = addp->last_out(j);
    49.8 -      assert(st->is_Store(), "store required");
    49.9 -      _igvn.replace_node(st, st->in(MemNode::Memory));
   49.10 +      Node *mem = addp->last_out(j);
   49.11 +      if (UseCondCardMark && mem->is_Load()) {
   49.12 +        assert(mem->Opcode() == Op_LoadB, "unexpected code shape");
   49.13 +        // The load is checking if the card has been written so
   49.14 +        // replace it with zero to fold the test.
   49.15 +        _igvn.replace_node(mem, intcon(0));
   49.16 +        continue;
   49.17 +      }
   49.18 +      assert(mem->is_Store(), "store required");
   49.19 +      _igvn.replace_node(mem, mem->in(MemNode::Memory));
   49.20      }
   49.21    } else {
   49.22      // G1 pre/post barriers
    50.1 --- a/src/share/vm/opto/memnode.cpp	Wed May 04 19:16:49 2011 -0400
    50.2 +++ b/src/share/vm/opto/memnode.cpp	Wed May 04 23:10:58 2011 -0400
    50.3 @@ -1259,15 +1259,18 @@
    50.4      return NULL; // Wait stable graph
    50.5    }
    50.6    uint cnt = mem->req();
    50.7 -  for( uint i = 1; i < cnt; i++ ) {
    50.8 +  for (uint i = 1; i < cnt; i++) {
    50.9 +    Node* rc = region->in(i);
   50.10 +    if (rc == NULL || phase->type(rc) == Type::TOP)
   50.11 +      return NULL; // Wait stable graph
   50.12      Node *in = mem->in(i);
   50.13 -    if( in == NULL ) {
   50.14 +    if (in == NULL) {
   50.15        return NULL; // Wait stable graph
   50.16      }
   50.17    }
   50.18    // Check for loop invariant.
   50.19    if (cnt == 3) {
   50.20 -    for( uint i = 1; i < cnt; i++ ) {
   50.21 +    for (uint i = 1; i < cnt; i++) {
   50.22        Node *in = mem->in(i);
   50.23        Node* m = MemNode::optimize_memory_chain(in, addr_t, phase);
   50.24        if (m == mem) {
   50.25 @@ -1281,38 +1284,37 @@
   50.26  
   50.27    // Do nothing here if Identity will find a value
   50.28    // (to avoid infinite chain of value phis generation).
   50.29 -  if ( !phase->eqv(this, this->Identity(phase)) )
   50.30 +  if (!phase->eqv(this, this->Identity(phase)))
   50.31      return NULL;
   50.32  
   50.33    // Skip the split if the region dominates some control edge of the address.
   50.34 -  if (cnt == 3 && !MemNode::all_controls_dominate(address, region))
   50.35 +  if (!MemNode::all_controls_dominate(address, region))
   50.36      return NULL;
   50.37  
   50.38    const Type* this_type = this->bottom_type();
   50.39    int this_index  = phase->C->get_alias_index(addr_t);
   50.40    int this_offset = addr_t->offset();
   50.41    int this_iid    = addr_t->is_oopptr()->instance_id();
   50.42 -  int wins = 0;
   50.43    PhaseIterGVN *igvn = phase->is_IterGVN();
   50.44    Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
   50.45 -  for( uint i = 1; i < region->req(); i++ ) {
   50.46 +  for (uint i = 1; i < region->req(); i++) {
   50.47      Node *x;
   50.48      Node* the_clone = NULL;
   50.49 -    if( region->in(i) == phase->C->top() ) {
   50.50 +    if (region->in(i) == phase->C->top()) {
   50.51        x = phase->C->top();      // Dead path?  Use a dead data op
   50.52      } else {
   50.53        x = this->clone();        // Else clone up the data op
   50.54        the_clone = x;            // Remember for possible deletion.
   50.55        // Alter data node to use pre-phi inputs
   50.56 -      if( this->in(0) == region ) {
   50.57 -        x->set_req( 0, region->in(i) );
   50.58 +      if (this->in(0) == region) {
   50.59 +        x->set_req(0, region->in(i));
   50.60        } else {
   50.61 -        x->set_req( 0, NULL );
   50.62 +        x->set_req(0, NULL);
   50.63        }
   50.64 -      for( uint j = 1; j < this->req(); j++ ) {
   50.65 +      for (uint j = 1; j < this->req(); j++) {
   50.66          Node *in = this->in(j);
   50.67 -        if( in->is_Phi() && in->in(0) == region )
   50.68 -          x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone
   50.69 +        if (in->is_Phi() && in->in(0) == region)
   50.70 +          x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
   50.71        }
   50.72      }
   50.73      // Check for a 'win' on some paths
   50.74 @@ -1321,12 +1323,11 @@
   50.75      bool singleton = t->singleton();
   50.76  
   50.77      // See comments in PhaseIdealLoop::split_thru_phi().
   50.78 -    if( singleton && t == Type::TOP ) {
   50.79 +    if (singleton && t == Type::TOP) {
   50.80        singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
   50.81      }
   50.82  
   50.83 -    if( singleton ) {
   50.84 -      wins++;
   50.85 +    if (singleton) {
   50.86        x = igvn->makecon(t);
   50.87      } else {
   50.88        // We now call Identity to try to simplify the cloned node.
   50.89 @@ -1340,13 +1341,11 @@
   50.90        // igvn->type(x) is set to x->Value() already.
   50.91        x->raise_bottom_type(t);
   50.92        Node *y = x->Identity(igvn);
   50.93 -      if( y != x ) {
   50.94 -        wins++;
   50.95 +      if (y != x) {
   50.96          x = y;
   50.97        } else {
   50.98          y = igvn->hash_find(x);
   50.99 -        if( y ) {
  50.100 -          wins++;
  50.101 +        if (y) {
  50.102            x = y;
  50.103          } else {
  50.104            // Else x is a new node we are keeping
  50.105 @@ -1360,13 +1359,9 @@
  50.106        igvn->remove_dead_node(the_clone);
  50.107      phi->set_req(i, x);
  50.108    }
  50.109 -  if( wins > 0 ) {
  50.110 -    // Record Phi
  50.111 -    igvn->register_new_node_with_optimizer(phi);
  50.112 -    return phi;
  50.113 -  }
  50.114 -  igvn->remove_dead_node(phi);
  50.115 -  return NULL;
  50.116 +  // Record Phi
  50.117 +  igvn->register_new_node_with_optimizer(phi);
  50.118 +  return phi;
  50.119  }
  50.120  
  50.121  //------------------------------Ideal------------------------------------------
  50.122 @@ -1677,14 +1672,15 @@
  50.123    // If we are loading from a freshly-allocated object, produce a zero,
  50.124    // if the load is provably beyond the header of the object.
  50.125    // (Also allow a variable load from a fresh array to produce zero.)
  50.126 -  if (ReduceFieldZeroing) {
  50.127 +  const TypeOopPtr *tinst = tp->isa_oopptr();
  50.128 +  bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
  50.129 +  if (ReduceFieldZeroing || is_instance) {
  50.130      Node* value = can_see_stored_value(mem,phase);
  50.131      if (value != NULL && value->is_Con())
  50.132        return value->bottom_type();
  50.133    }
  50.134  
  50.135 -  const TypeOopPtr *tinst = tp->isa_oopptr();
  50.136 -  if (tinst != NULL && tinst->is_known_instance_field()) {
  50.137 +  if (is_instance) {
  50.138      // If we have an instance type and our memory input is the
  50.139      // programs's initial memory state, there is no matching store,
  50.140      // so just return a zero of the appropriate type
    51.1 --- a/src/share/vm/opto/stringopts.cpp	Wed May 04 19:16:49 2011 -0400
    51.2 +++ b/src/share/vm/opto/stringopts.cpp	Wed May 04 23:10:58 2011 -0400
    51.3 @@ -1172,16 +1172,16 @@
    51.4  
    51.5  Node* PhaseStringOpts::copy_string(GraphKit& kit, Node* str, Node* char_array, Node* start) {
    51.6    Node* string = str;
    51.7 -  Node* offset = kit.make_load(NULL,
    51.8 +  Node* offset = kit.make_load(kit.control(),
    51.9                                 kit.basic_plus_adr(string, string, java_lang_String::offset_offset_in_bytes()),
   51.10                                 TypeInt::INT, T_INT, offset_field_idx);
   51.11 -  Node* count = kit.make_load(NULL,
   51.12 +  Node* count = kit.make_load(kit.control(),
   51.13                                kit.basic_plus_adr(string, string, java_lang_String::count_offset_in_bytes()),
   51.14                                TypeInt::INT, T_INT, count_field_idx);
   51.15    const TypeAryPtr*  value_type = TypeAryPtr::make(TypePtr::NotNull,
   51.16                                                     TypeAry::make(TypeInt::CHAR,TypeInt::POS),
   51.17                                                     ciTypeArrayKlass::make(T_CHAR), true, 0);
   51.18 -  Node* value = kit.make_load(NULL,
   51.19 +  Node* value = kit.make_load(kit.control(),
   51.20                                kit.basic_plus_adr(string, string, java_lang_String::value_offset_in_bytes()),
   51.21                                value_type, T_OBJECT, value_field_idx);
   51.22  
   51.23 @@ -1342,7 +1342,7 @@
   51.24          }
   51.25          //         Node* offset = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, offset_offset),
   51.26          //                                      TypeInt::INT, T_INT, offset_field_idx);
   51.27 -        Node* count = kit.make_load(NULL, kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()),
   51.28 +        Node* count = kit.make_load(kit.control(), kit.basic_plus_adr(arg, arg, java_lang_String::count_offset_in_bytes()),
   51.29                                      TypeInt::INT, T_INT, count_field_idx);
   51.30          length = __ AddI(length, count);
   51.31          string_sizes->init_req(argi, NULL);
    52.1 --- a/src/share/vm/prims/methodHandleWalk.cpp	Wed May 04 19:16:49 2011 -0400
    52.2 +++ b/src/share/vm/prims/methodHandleWalk.cpp	Wed May 04 23:10:58 2011 -0400
    52.3 @@ -82,10 +82,8 @@
    52.4  
    52.5  void MethodHandleChain::set_last_method(oop target, TRAPS) {
    52.6    _is_last = true;
    52.7 -  klassOop receiver_limit_oop = NULL;
    52.8 -  int flags = 0;
    52.9 -  methodOop m = MethodHandles::decode_method(target, receiver_limit_oop, flags);
   52.10 -  _last_method = methodHandle(THREAD, m);
   52.11 +  KlassHandle receiver_limit; int flags = 0;
   52.12 +  _last_method = MethodHandles::decode_method(target, receiver_limit, flags);
   52.13    if ((flags & MethodHandles::_dmf_has_receiver) == 0)
   52.14      _last_invoke = Bytecodes::_invokestatic;
   52.15    else if ((flags & MethodHandles::_dmf_does_dispatch) == 0)
    53.1 --- a/src/share/vm/prims/methodHandles.cpp	Wed May 04 19:16:49 2011 -0400
    53.2 +++ b/src/share/vm/prims/methodHandles.cpp	Wed May 04 23:10:58 2011 -0400
    53.3 @@ -153,9 +153,9 @@
    53.4  // and local, like parse a data structure.  For speed, such methods work on plain
    53.5  // oops, not handles.  Trapping methods uniformly operate on handles.
    53.6  
    53.7 -methodOop MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype,
    53.8 -                                         klassOop& receiver_limit_result, int& decode_flags_result) {
    53.9 -  if (vmtarget == NULL)  return NULL;
   53.10 +methodHandle MethodHandles::decode_vmtarget(oop vmtarget, int vmindex, oop mtype,
   53.11 +                                            KlassHandle& receiver_limit_result, int& decode_flags_result) {
   53.12 +  if (vmtarget == NULL)  return methodHandle();
   53.13    assert(methodOopDesc::nonvirtual_vtable_index < 0, "encoding");
   53.14    if (vmindex < 0) {
   53.15      // this DMH performs no dispatch; it is directly bound to a methodOop
   53.16 @@ -198,20 +198,20 @@
   53.17  // MemberName and DirectMethodHandle have the same linkage to the JVM internals.
   53.18  // (MemberName is the non-operational name used for queries and setup.)
   53.19  
   53.20 -methodOop MethodHandles::decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
   53.21 +methodHandle MethodHandles::decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
   53.22    oop vmtarget = java_lang_invoke_DirectMethodHandle::vmtarget(mh);
   53.23    int vmindex  = java_lang_invoke_DirectMethodHandle::vmindex(mh);
   53.24    oop mtype    = java_lang_invoke_DirectMethodHandle::type(mh);
   53.25    return decode_vmtarget(vmtarget, vmindex, mtype, receiver_limit_result, decode_flags_result);
   53.26  }
   53.27  
   53.28 -methodOop MethodHandles::decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
   53.29 +methodHandle MethodHandles::decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
   53.30    assert(java_lang_invoke_BoundMethodHandle::is_instance(mh), "");
   53.31    assert(mh->klass() != SystemDictionary::AdapterMethodHandle_klass(), "");
   53.32    for (oop bmh = mh;;) {
   53.33      // Bound MHs can be stacked to bind several arguments.
   53.34      oop target = java_lang_invoke_MethodHandle::vmtarget(bmh);
   53.35 -    if (target == NULL)  return NULL;
   53.36 +    if (target == NULL)  return methodHandle();
   53.37      decode_flags_result |= MethodHandles::_dmf_binds_argument;
   53.38      klassOop tk = target->klass();
   53.39      if (tk == SystemDictionary::BoundMethodHandle_klass()) {
   53.40 @@ -236,14 +236,14 @@
   53.41    }
   53.42  }
   53.43  
   53.44 -methodOop MethodHandles::decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
   53.45 +methodHandle MethodHandles::decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
   53.46    assert(mh->klass() == SystemDictionary::AdapterMethodHandle_klass(), "");
   53.47    for (oop amh = mh;;) {
   53.48      // Adapter MHs can be stacked to convert several arguments.
   53.49      int conv_op = adapter_conversion_op(java_lang_invoke_AdapterMethodHandle::conversion(amh));
   53.50      decode_flags_result |= (_dmf_adapter_lsb << conv_op) & _DMF_ADAPTER_MASK;
   53.51      oop target = java_lang_invoke_MethodHandle::vmtarget(amh);
   53.52 -    if (target == NULL)  return NULL;
   53.53 +    if (target == NULL)  return methodHandle();
   53.54      klassOop tk = target->klass();
   53.55      if (tk == SystemDictionary::AdapterMethodHandle_klass()) {
   53.56        amh = target;
   53.57 @@ -255,8 +255,8 @@
   53.58    }
   53.59  }
   53.60  
   53.61 -methodOop MethodHandles::decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result) {
   53.62 -  if (mh == NULL)  return NULL;
   53.63 +methodHandle MethodHandles::decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result) {
   53.64 +  if (mh == NULL)  return methodHandle();
   53.65    klassOop mhk = mh->klass();
   53.66    assert(java_lang_invoke_MethodHandle::is_subclass(mhk), "must be a MethodHandle");
   53.67    if (mhk == SystemDictionary::DirectMethodHandle_klass()) {
   53.68 @@ -270,7 +270,7 @@
   53.69      return decode_BoundMethodHandle(mh, receiver_limit_result, decode_flags_result);
   53.70    } else {
   53.71      assert(false, "cannot parse this MH");
   53.72 -    return NULL;              // random MH?
   53.73 +    return methodHandle();  // random MH?
   53.74    }
   53.75  }
   53.76  
   53.77 @@ -299,9 +299,9 @@
   53.78  
   53.79  // A trusted party is handing us a cookie to determine a method.
   53.80  // Let's boil it down to the method oop they really want.
   53.81 -methodOop MethodHandles::decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result) {
   53.82 +methodHandle MethodHandles::decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result) {
   53.83    decode_flags_result = 0;
   53.84 -  receiver_limit_result = NULL;
   53.85 +  receiver_limit_result = KlassHandle();
   53.86    klassOop xk = x->klass();
   53.87    if (xk == Universe::methodKlassObj()) {
   53.88      return decode_methodOop((methodOop) x, decode_flags_result);
   53.89 @@ -329,7 +329,7 @@
   53.90      assert(!x->is_method(), "already checked");
   53.91      assert(!java_lang_invoke_MemberName::is_instance(x), "already checked");
   53.92    }
   53.93 -  return NULL;
   53.94 +  return methodHandle();
   53.95  }
   53.96  
   53.97  
   53.98 @@ -389,11 +389,10 @@
   53.99      int offset = instanceKlass::cast(k)->offset_from_fields(slot);
  53.100      init_MemberName(mname_oop, k, accessFlags_from(mods), offset);
  53.101    } else {
  53.102 -    int decode_flags = 0; klassOop receiver_limit = NULL;
  53.103 -    methodOop m = MethodHandles::decode_method(target_oop,
  53.104 -                                               receiver_limit, decode_flags);
  53.105 +    KlassHandle receiver_limit; int decode_flags = 0;
  53.106 +    methodHandle m = MethodHandles::decode_method(target_oop, receiver_limit, decode_flags);
  53.107      bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0);
  53.108 -    init_MemberName(mname_oop, m, do_dispatch);
  53.109 +    init_MemberName(mname_oop, m(), do_dispatch);
  53.110    }
  53.111  }
  53.112  
  53.113 @@ -423,13 +422,14 @@
  53.114  }
  53.115  
  53.116  
  53.117 -methodOop MethodHandles::decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result) {
  53.118 +methodHandle MethodHandles::decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result) {
  53.119 +  methodHandle empty;
  53.120    int flags  = java_lang_invoke_MemberName::flags(mname);
  53.121 -  if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0)  return NULL;  // not invocable
  53.122 +  if ((flags & (IS_METHOD | IS_CONSTRUCTOR)) == 0)  return empty;  // not invocable
  53.123    oop vmtarget = java_lang_invoke_MemberName::vmtarget(mname);
  53.124    int vmindex  = java_lang_invoke_MemberName::vmindex(mname);
  53.125 -  if (vmindex == VM_INDEX_UNINITIALIZED)  return NULL; // not resolved
  53.126 -  methodOop m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result);
  53.127 +  if (vmindex == VM_INDEX_UNINITIALIZED)  return empty;  // not resolved
  53.128 +  methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit_result, decode_flags_result);
  53.129    oop clazz = java_lang_invoke_MemberName::clazz(mname);
  53.130    if (clazz != NULL && java_lang_Class::is_instance(clazz)) {
  53.131      klassOop klass = java_lang_Class::as_klassOop(clazz);
  53.132 @@ -439,9 +439,7 @@
  53.133  }
  53.134  
  53.135  // convert the external string or reflective type to an internal signature
  53.136 -Symbol* MethodHandles::convert_to_signature(oop type_str,
  53.137 -                                            bool polymorphic,
  53.138 -                                            TRAPS) {
  53.139 +Symbol* MethodHandles::convert_to_signature(oop type_str, bool polymorphic, TRAPS) {
  53.140    if (java_lang_invoke_MethodType::is_instance(type_str)) {
  53.141      return java_lang_invoke_MethodType::as_signature(type_str, polymorphic, CHECK_NULL);
  53.142    } else if (java_lang_Class::is_instance(type_str)) {
  53.143 @@ -474,48 +472,48 @@
  53.144  #endif
  53.145    if (java_lang_invoke_MemberName::vmindex(mname()) != VM_INDEX_UNINITIALIZED)
  53.146      return;  // already resolved
  53.147 -  oop defc_oop = java_lang_invoke_MemberName::clazz(mname());
  53.148 -  oop name_str = java_lang_invoke_MemberName::name(mname());
  53.149 -  oop type_str = java_lang_invoke_MemberName::type(mname());
  53.150 -  int flags    = java_lang_invoke_MemberName::flags(mname());
  53.151 +  Handle defc_oop(THREAD, java_lang_invoke_MemberName::clazz(mname()));
  53.152 +  Handle name_str(THREAD, java_lang_invoke_MemberName::name( mname()));
  53.153 +  Handle type_str(THREAD, java_lang_invoke_MemberName::type( mname()));
  53.154 +  int    flags    =       java_lang_invoke_MemberName::flags(mname());
  53.155  
  53.156 -  if (defc_oop == NULL || name_str == NULL || type_str == NULL) {
  53.157 +  if (defc_oop.is_null() || name_str.is_null() || type_str.is_null()) {
  53.158      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "nothing to resolve");
  53.159    }
  53.160 -  klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop);
  53.161 -  defc_oop = NULL;  // safety
  53.162 -  if (defc_klassOop == NULL)  return;  // a primitive; no resolution possible
  53.163 -  if (!Klass::cast(defc_klassOop)->oop_is_instance()) {
  53.164 -    if (!Klass::cast(defc_klassOop)->oop_is_array())  return;
  53.165 -    defc_klassOop = SystemDictionary::Object_klass();
  53.166 +
  53.167 +  instanceKlassHandle defc;
  53.168 +  {
  53.169 +    klassOop defc_klassOop = java_lang_Class::as_klassOop(defc_oop());
  53.170 +    if (defc_klassOop == NULL)  return;  // a primitive; no resolution possible
  53.171 +    if (!Klass::cast(defc_klassOop)->oop_is_instance()) {
  53.172 +      if (!Klass::cast(defc_klassOop)->oop_is_array())  return;
  53.173 +      defc_klassOop = SystemDictionary::Object_klass();
  53.174 +    }
  53.175 +    defc = instanceKlassHandle(THREAD, defc_klassOop);
  53.176    }
  53.177 -  instanceKlassHandle defc(THREAD, defc_klassOop);
  53.178 -  defc_klassOop = NULL;  // safety
  53.179    if (defc.is_null()) {
  53.180      THROW_MSG(vmSymbols::java_lang_InternalError(), "primitive class");
  53.181    }
  53.182 -  defc->link_class(CHECK);
  53.183 +  defc->link_class(CHECK);  // possible safepoint
  53.184  
  53.185    // convert the external string name to an internal symbol
  53.186 -  TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str);
  53.187 +  TempNewSymbol name = java_lang_String::as_symbol_or_null(name_str());
  53.188    if (name == NULL)  return;  // no such name
  53.189 -  name_str = NULL;  // safety
  53.190  
  53.191    Handle polymorphic_method_type;
  53.192    bool polymorphic_signature = false;
  53.193    if ((flags & ALL_KINDS) == IS_METHOD &&
  53.194        (defc() == SystemDictionary::MethodHandle_klass() &&
  53.195 -       methodOopDesc::is_method_handle_invoke_name(name)))
  53.196 +       methodOopDesc::is_method_handle_invoke_name(name))) {
  53.197      polymorphic_signature = true;
  53.198 +  }
  53.199  
  53.200    // convert the external string or reflective type to an internal signature
  53.201 -  TempNewSymbol type = convert_to_signature(type_str, polymorphic_signature, CHECK);
  53.202 -  if (java_lang_invoke_MethodType::is_instance(type_str) && polymorphic_signature) {
  53.203 -    polymorphic_method_type = Handle(THREAD, type_str);  //preserve exactly
  53.204 +  TempNewSymbol type = convert_to_signature(type_str(), polymorphic_signature, CHECK);
  53.205 +  if (java_lang_invoke_MethodType::is_instance(type_str()) && polymorphic_signature) {
  53.206 +    polymorphic_method_type = type_str;  // preserve exactly
  53.207    }
  53.208 -
  53.209    if (type == NULL)  return;  // no such signature exists in the VM
  53.210 -  type_str = NULL; // safety
  53.211  
  53.212    // Time to do the lookup.
  53.213    switch (flags & ALL_KINDS) {
  53.214 @@ -560,8 +558,8 @@
  53.215        java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget);
  53.216        java_lang_invoke_MemberName::set_vmindex(mname(),  vmindex);
  53.217        java_lang_invoke_MemberName::set_modifiers(mname(), mods);
  53.218 -      DEBUG_ONLY(int junk; klassOop junk2);
  53.219 -      assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(),
  53.220 +      DEBUG_ONLY(KlassHandle junk1; int junk2);
  53.221 +      assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(),
  53.222               "properly stored for later decoding");
  53.223        return;
  53.224      }
  53.225 @@ -589,8 +587,8 @@
  53.226        java_lang_invoke_MemberName::set_vmtarget(mname(), vmtarget);
  53.227        java_lang_invoke_MemberName::set_vmindex(mname(),  vmindex);
  53.228        java_lang_invoke_MemberName::set_modifiers(mname(), mods);
  53.229 -      DEBUG_ONLY(int junk; klassOop junk2);
  53.230 -      assert(decode_MemberName(mname(), junk2, junk) == result.resolved_method()(),
  53.231 +      DEBUG_ONLY(KlassHandle junk1; int junk2);
  53.232 +      assert(decode_MemberName(mname(), junk1, junk2) == result.resolved_method(),
  53.233               "properly stored for later decoding");
  53.234        return;
  53.235      }
  53.236 @@ -677,16 +675,14 @@
  53.237    case IS_METHOD:
  53.238    case IS_CONSTRUCTOR:
  53.239      {
  53.240 -      klassOop receiver_limit = NULL;
  53.241 -      int      decode_flags   = 0;
  53.242 -      methodHandle m(THREAD, decode_vmtarget(vmtarget, vmindex, NULL,
  53.243 -                                             receiver_limit, decode_flags));
  53.244 +      KlassHandle receiver_limit; int decode_flags = 0;
  53.245 +      methodHandle m = decode_vmtarget(vmtarget, vmindex, NULL, receiver_limit, decode_flags);
  53.246        if (m.is_null())  break;
  53.247        if (!have_defc) {
  53.248          klassOop defc = m->method_holder();
  53.249 -        if (receiver_limit != NULL && receiver_limit != defc
  53.250 -            && Klass::cast(receiver_limit)->is_subtype_of(defc))
  53.251 -          defc = receiver_limit;
  53.252 +        if (receiver_limit.not_null() && receiver_limit() != defc
  53.253 +            && Klass::cast(receiver_limit())->is_subtype_of(defc))
  53.254 +          defc = receiver_limit();
  53.255          java_lang_invoke_MemberName::set_clazz(mname(), Klass::cast(defc)->java_mirror());
  53.256        }
  53.257        if (!have_name) {
  53.258 @@ -884,10 +880,9 @@
  53.259    // - AMH can have methodOop for static invoke with bound receiver
  53.260    // - DMH can have methodOop for static invoke (on variable receiver)
  53.261    // - DMH can have klassOop for dispatched (non-static) invoke
  53.262 -  klassOop receiver_limit = NULL;
  53.263 -  int decode_flags = 0;
  53.264 -  methodOop m = decode_MethodHandle(mh(), receiver_limit, decode_flags);
  53.265 -  if (m == NULL)  return NULL;
  53.266 +  KlassHandle receiver_limit; int decode_flags = 0;
  53.267 +  methodHandle m = decode_MethodHandle(mh(), receiver_limit, decode_flags);
  53.268 +  if (m.is_null())  return NULL;
  53.269    switch (format) {
  53.270    case ETF_REFLECT_METHOD:
  53.271      // same as jni_ToReflectedMethod:
  53.272 @@ -903,10 +898,10 @@
  53.273        if (SystemDictionary::MemberName_klass() == NULL)  break;
  53.274        instanceKlassHandle mname_klass(THREAD, SystemDictionary::MemberName_klass());
  53.275        mname_klass->initialize(CHECK_NULL);
  53.276 -      Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL);
  53.277 +      Handle mname = mname_klass->allocate_instance_handle(CHECK_NULL);  // possible safepoint
  53.278        java_lang_invoke_MemberName::set_vmindex(mname(), VM_INDEX_UNINITIALIZED);
  53.279        bool do_dispatch = ((decode_flags & MethodHandles::_dmf_does_dispatch) != 0);
  53.280 -      init_MemberName(mname(), m, do_dispatch);
  53.281 +      init_MemberName(mname(), m(), do_dispatch);
  53.282        expand_MemberName(mname, 0, CHECK_NULL);
  53.283        return mname();
  53.284      }
  53.285 @@ -1459,8 +1454,8 @@
  53.286    // that links the interpreter calls to the method.  We need the same
  53.287    // bits, and will use the same calling sequence code.
  53.288  
  53.289 -  int vmindex = methodOopDesc::garbage_vtable_index;
  53.290 -  oop vmtarget = NULL;
  53.291 +  int    vmindex = methodOopDesc::garbage_vtable_index;
  53.292 +  Handle vmtarget;
  53.293  
  53.294    instanceKlass::cast(m->method_holder())->link_class(CHECK);
  53.295  
  53.296 @@ -1478,7 +1473,7 @@
  53.297    } else if (!do_dispatch || m->can_be_statically_bound()) {
  53.298      // We are simulating an invokestatic or invokespecial instruction.
  53.299      // Set up the method pointer, just like ConstantPoolCacheEntry::set_method().
  53.300 -    vmtarget = m();
  53.301 +    vmtarget = m;
  53.302      // this does not help dispatch, but it will make it possible to parse this MH:
  53.303      vmindex  = methodOopDesc::nonvirtual_vtable_index;
  53.304      assert(vmindex < 0, "(>=0) == do_dispatch");
  53.305 @@ -1490,7 +1485,7 @@
  53.306        // For a DMH, it is done now, when the handle is created.
  53.307        Klass* k = Klass::cast(m->method_holder());
  53.308        if (k->should_be_initialized()) {
  53.309 -        k->initialize(CHECK);
  53.310 +        k->initialize(CHECK);  // possible safepoint
  53.311        }
  53.312      }
  53.313    } else {
  53.314 @@ -1504,10 +1499,10 @@
  53.315  
  53.316    if (me == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
  53.317  
  53.318 -  java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget);
  53.319 -  java_lang_invoke_DirectMethodHandle::set_vmindex(mh(),  vmindex);
  53.320 -  DEBUG_ONLY(int flags; klassOop rlimit);
  53.321 -  assert(MethodHandles::decode_method(mh(), rlimit, flags) == m(),
  53.322 +  java_lang_invoke_DirectMethodHandle::set_vmtarget(mh(), vmtarget());
  53.323 +  java_lang_invoke_DirectMethodHandle::set_vmindex( mh(), vmindex);
  53.324 +  DEBUG_ONLY(KlassHandle rlimit; int flags);
  53.325 +  assert(MethodHandles::decode_method(mh(), rlimit, flags) == m,
  53.326           "properly stored for later decoding");
  53.327    DEBUG_ONLY(bool actual_do_dispatch = ((flags & _dmf_does_dispatch) != 0));
  53.328    assert(!(actual_do_dispatch && !do_dispatch),
  53.329 @@ -1523,10 +1518,13 @@
  53.330                                                             methodHandle m,
  53.331                                                             TRAPS) {
  53.332    // Verify type.
  53.333 -  oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh());
  53.334 +  KlassHandle bound_recv_type;
  53.335 +  {
  53.336 +    oop receiver = java_lang_invoke_BoundMethodHandle::argument(mh());
  53.337 +    if (receiver != NULL)
  53.338 +      bound_recv_type = KlassHandle(THREAD, receiver->klass());
  53.339 +  }
  53.340    Handle mtype(THREAD, java_lang_invoke_MethodHandle::type(mh()));
  53.341 -  KlassHandle bound_recv_type;
  53.342 -  if (receiver != NULL)  bound_recv_type = KlassHandle(THREAD, receiver->klass());
  53.343    verify_method_type(m, mtype, true, bound_recv_type, CHECK);
  53.344  
  53.345    int receiver_pos = m->size_of_parameters() - 1;
  53.346 @@ -1573,8 +1571,8 @@
  53.347  
  53.348    java_lang_invoke_BoundMethodHandle::set_vmtarget(mh(), m());
  53.349  
  53.350 -  DEBUG_ONLY(int junk; klassOop junk2);
  53.351 -  assert(MethodHandles::decode_method(mh(), junk2, junk) == m(), "properly stored for later decoding");
  53.352 +  DEBUG_ONLY(KlassHandle junk1; int junk2);
  53.353 +  assert(MethodHandles::decode_method(mh(), junk1, junk2) == m, "properly stored for later decoding");
  53.354    assert(decode_MethodHandle_stack_pushes(mh()) == 1, "BMH pushes one stack slot");
  53.355  
  53.356    // Done!
  53.357 @@ -1682,8 +1680,11 @@
  53.358    }
  53.359  
  53.360    // Get bound type and required slots.
  53.361 -  oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum);
  53.362 -  BasicType ptype = java_lang_Class::as_BasicType(ptype_oop);
  53.363 +  BasicType ptype;
  53.364 +  {
  53.365 +    oop ptype_oop = java_lang_invoke_MethodType::ptype(java_lang_invoke_MethodHandle::type(target()), argnum);
  53.366 +    ptype = java_lang_Class::as_BasicType(ptype_oop);
  53.367 +  }
  53.368    int slots_pushed = type2size[ptype];
  53.369  
  53.370    // If (a) the target is a direct non-dispatched method handle,
  53.371 @@ -1694,13 +1695,12 @@
  53.372    if (OptimizeMethodHandles &&
  53.373        target->klass() == SystemDictionary::DirectMethodHandle_klass() &&
  53.374        (argnum == 0 || java_lang_invoke_DirectMethodHandle::vmindex(target()) < 0)) {
  53.375 -    int decode_flags = 0; klassOop receiver_limit_oop = NULL;
  53.376 -    methodHandle m(THREAD, decode_method(target(), receiver_limit_oop, decode_flags));
  53.377 +    KlassHandle receiver_limit; int decode_flags = 0;
  53.378 +    methodHandle m = decode_method(target(), receiver_limit, decode_flags);
  53.379      if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "DMH failed to decode"); }
  53.380      DEBUG_ONLY(int m_vmslots = m->size_of_parameters() - slots_pushed); // pos. of 1st arg.
  53.381      assert(java_lang_invoke_BoundMethodHandle::vmslots(mh()) == m_vmslots, "type w/ m sig");
  53.382      if (argnum == 0 && (decode_flags & _dmf_has_receiver) != 0) {
  53.383 -      KlassHandle receiver_limit(THREAD, receiver_limit_oop);
  53.384        init_BoundMethodHandle_with_receiver(mh, m,
  53.385                                             receiver_limit, decode_flags,
  53.386                                             CHECK);
  53.387 @@ -2019,7 +2019,6 @@
  53.388  }
  53.389  
  53.390  void MethodHandles::init_AdapterMethodHandle(Handle mh, Handle target, int argnum, TRAPS) {
  53.391 -  oop  argument   = java_lang_invoke_AdapterMethodHandle::argument(mh());
  53.392    int  argslot    = java_lang_invoke_AdapterMethodHandle::vmargslot(mh());
  53.393    jint conversion = java_lang_invoke_AdapterMethodHandle::conversion(mh());
  53.394    jint conv_op    = adapter_conversion_op(conversion);
  53.395 @@ -2215,18 +2214,14 @@
  53.396  
  53.397    // which method are we really talking about?
  53.398    if (target_jh == NULL) { THROW(vmSymbols::java_lang_InternalError()); }
  53.399 -  oop target_oop = JNIHandles::resolve_non_null(target_jh);
  53.400 -  if (java_lang_invoke_MemberName::is_instance(target_oop) &&
  53.401 -      java_lang_invoke_MemberName::vmindex(target_oop) == VM_INDEX_UNINITIALIZED) {
  53.402 -    Handle mname(THREAD, target_oop);
  53.403 -    MethodHandles::resolve_MemberName(mname, CHECK);
  53.404 -    target_oop = mname(); // in case of GC
  53.405 +  Handle target(THREAD, JNIHandles::resolve_non_null(target_jh));
  53.406 +  if (java_lang_invoke_MemberName::is_instance(target()) &&
  53.407 +      java_lang_invoke_MemberName::vmindex(target()) == VM_INDEX_UNINITIALIZED) {
  53.408 +    MethodHandles::resolve_MemberName(target, CHECK);
  53.409    }
  53.410  
  53.411 -  int decode_flags = 0; klassOop receiver_limit = NULL;
  53.412 -  methodHandle m(THREAD,
  53.413 -                 MethodHandles::decode_method(target_oop,
  53.414 -                                              receiver_limit, decode_flags));
  53.415 +  KlassHandle receiver_limit; int decode_flags = 0;
  53.416 +  methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags);
  53.417    if (m.is_null()) { THROW_MSG(vmSymbols::java_lang_InternalError(), "no such method"); }
  53.418  
  53.419    // The trusted Java code that calls this method should already have performed
  53.420 @@ -2284,12 +2279,8 @@
  53.421      // Target object is a reflective method.  (%%% Do we need this alternate path?)
  53.422      Untested("init_BMH of non-MH");
  53.423      if (argnum != 0) { THROW(vmSymbols::java_lang_InternalError()); }
  53.424 -    int decode_flags = 0; klassOop receiver_limit_oop = NULL;
  53.425 -    methodHandle m(THREAD,
  53.426 -                   MethodHandles::decode_method(target(),
  53.427 -                                                receiver_limit_oop,
  53.428 -                                                decode_flags));
  53.429 -    KlassHandle receiver_limit(THREAD, receiver_limit_oop);
  53.430 +    KlassHandle receiver_limit; int decode_flags = 0;
  53.431 +    methodHandle m = MethodHandles::decode_method(target(), receiver_limit, decode_flags);
  53.432      MethodHandles::init_BoundMethodHandle_with_receiver(mh, m,
  53.433                                                         receiver_limit,
  53.434                                                         decode_flags,
  53.435 @@ -2424,12 +2415,12 @@
  53.436  #ifndef PRODUCT
  53.437    if (which >= 0 && which < con_value_count) {
  53.438      int con = con_values[which];
  53.439 -    objArrayOop box = (objArrayOop) JNIHandles::resolve(box_jh);
  53.440 -    if (box != NULL && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) {
  53.441 +    objArrayHandle box(THREAD, (objArrayOop) JNIHandles::resolve(box_jh));
  53.442 +    if (box.not_null() && box->klass() == Universe::objectArrayKlassObj() && box->length() > 0) {
  53.443        const char* str = &con_names[0];
  53.444        for (int i = 0; i < which; i++)
  53.445          str += strlen(str) + 1;   // skip name and null
  53.446 -      oop name = java_lang_String::create_oop_from_str(str, CHECK_0);
  53.447 +      oop name = java_lang_String::create_oop_from_str(str, CHECK_0);  // possible safepoint
  53.448        box->obj_at_put(0, name);
  53.449      }
  53.450      return con;
  53.451 @@ -2486,10 +2477,10 @@
  53.452                                 jclass clazz_jh, jstring name_jh, jstring sig_jh,
  53.453                                 int mflags, jclass caller_jh, jint skip, jobjectArray results_jh)) {
  53.454    if (clazz_jh == NULL || results_jh == NULL)  return -1;
  53.455 -  klassOop k_oop = java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh));
  53.456 +  KlassHandle k(THREAD, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(clazz_jh)));
  53.457  
  53.458 -  objArrayOop results = (objArrayOop) JNIHandles::resolve(results_jh);
  53.459 -  if (results == NULL || !results->is_objArray())       return -1;
  53.460 +  objArrayHandle results(THREAD, (objArrayOop) JNIHandles::resolve(results_jh));
  53.461 +  if (results.is_null() || !results->is_objArray())  return -1;
  53.462  
  53.463    TempNewSymbol name = NULL;
  53.464    TempNewSymbol sig = NULL;
  53.465 @@ -2502,20 +2493,20 @@
  53.466      if (sig == NULL)  return 0; // a match is not possible
  53.467    }
  53.468  
  53.469 -  klassOop caller = NULL;
  53.470 +  KlassHandle caller;
  53.471    if (caller_jh != NULL) {
  53.472      oop caller_oop = JNIHandles::resolve_non_null(caller_jh);
  53.473      if (!java_lang_Class::is_instance(caller_oop))  return -1;
  53.474 -    caller = java_lang_Class::as_klassOop(caller_oop);
  53.475 +    caller = KlassHandle(THREAD, java_lang_Class::as_klassOop(caller_oop));
  53.476    }
  53.477  
  53.478 -  if (name != NULL && sig != NULL && results != NULL) {
  53.479 +  if (name != NULL && sig != NULL && results.not_null()) {
  53.480      // try a direct resolve
  53.481      // %%% TO DO
  53.482    }
  53.483  
  53.484 -  int res = MethodHandles::find_MemberNames(k_oop, name, sig, mflags,
  53.485 -                                            caller, skip, results);
  53.486 +  int res = MethodHandles::find_MemberNames(k(), name, sig, mflags,
  53.487 +                                            caller(), skip, results());
  53.488    // TO DO: expand at least some of the MemberNames, to avoid massive callbacks
  53.489    return res;
  53.490  }
    54.1 --- a/src/share/vm/prims/methodHandles.hpp	Wed May 04 19:16:49 2011 -0400
    54.2 +++ b/src/share/vm/prims/methodHandles.hpp	Wed May 04 23:10:58 2011 -0400
    54.3 @@ -265,13 +265,13 @@
    54.4    static inline address from_interpreted_entry(EntryKind ek);
    54.5  
    54.6    // helpers for decode_method.
    54.7 -  static methodOop decode_methodOop(methodOop m, int& decode_flags_result);
    54.8 -  static methodOop decode_vmtarget(oop vmtarget, int vmindex, oop mtype, klassOop& receiver_limit_result, int& decode_flags_result);
    54.9 -  static methodOop decode_MemberName(oop mname, klassOop& receiver_limit_result, int& decode_flags_result);
   54.10 -  static methodOop decode_MethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
   54.11 -  static methodOop decode_DirectMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
   54.12 -  static methodOop decode_BoundMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
   54.13 -  static methodOop decode_AdapterMethodHandle(oop mh, klassOop& receiver_limit_result, int& decode_flags_result);
   54.14 +  static methodOop    decode_methodOop(methodOop m, int& decode_flags_result);
   54.15 +  static methodHandle decode_vmtarget(oop vmtarget, int vmindex, oop mtype, KlassHandle& receiver_limit_result, int& decode_flags_result);
   54.16 +  static methodHandle decode_MemberName(oop mname, KlassHandle& receiver_limit_result, int& decode_flags_result);
   54.17 +  static methodHandle decode_MethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
   54.18 +  static methodHandle decode_DirectMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
   54.19 +  static methodHandle decode_BoundMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
   54.20 +  static methodHandle decode_AdapterMethodHandle(oop mh, KlassHandle& receiver_limit_result, int& decode_flags_result);
   54.21  
   54.22    // Find out how many stack slots an mh pushes or pops.
   54.23    // The result is *not* reported as a multiple of stack_move_unit();
   54.24 @@ -317,7 +317,7 @@
   54.25      _dmf_adapter_lsb    = 0x20,
   54.26      _DMF_ADAPTER_MASK   = (_dmf_adapter_lsb << CONV_OP_LIMIT) - _dmf_adapter_lsb
   54.27    };
   54.28 -  static methodOop decode_method(oop x, klassOop& receiver_limit_result, int& decode_flags_result);
   54.29 +  static methodHandle decode_method(oop x, KlassHandle& receiver_limit_result, int& decode_flags_result);
   54.30    enum {
   54.31      // format of query to getConstant:
   54.32      GC_JVM_PUSH_LIMIT = 0,
    55.1 --- a/src/share/vm/runtime/globals.hpp	Wed May 04 19:16:49 2011 -0400
    55.2 +++ b/src/share/vm/runtime/globals.hpp	Wed May 04 23:10:58 2011 -0400
    55.3 @@ -620,6 +620,9 @@
    55.4    product(bool, UseSSE42Intrinsics, false,                                  \
    55.5            "SSE4.2 versions of intrinsics")                                  \
    55.6                                                                              \
    55.7 +  product(bool, UseCondCardMark, false,                                     \
    55.8 +          "Check for already marked card before updating card table")       \
    55.9 +                                                                            \
   55.10    develop(bool, TraceCallFixup, false,                                      \
   55.11            "traces all call fixups")                                         \
   55.12                                                                              \
    56.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Wed May 04 19:16:49 2011 -0400
    56.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Wed May 04 23:10:58 2011 -0400
    56.3 @@ -1721,14 +1721,14 @@
    56.4          targetArity = ArgumentCount(target->signature()).size();
    56.5        }
    56.6      }
    56.7 -    klassOop kignore; int dmf_flags = 0;
    56.8 -    methodOop actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags);
    56.9 +    KlassHandle kignore; int dmf_flags = 0;
   56.10 +    methodHandle actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags);
   56.11      if ((dmf_flags & ~(MethodHandles::_dmf_has_receiver |
   56.12                         MethodHandles::_dmf_does_dispatch |
   56.13                         MethodHandles::_dmf_from_interface)) != 0)
   56.14 -      actual_method = NULL;  // MH does extra binds, drops, etc.
   56.15 +      actual_method = methodHandle();  // MH does extra binds, drops, etc.
   56.16      bool has_receiver = ((dmf_flags & MethodHandles::_dmf_has_receiver) != 0);
   56.17 -    if (actual_method != NULL) {
   56.18 +    if (actual_method.not_null()) {
   56.19        mhName = actual_method->signature()->as_C_string();
   56.20        mhArity = ArgumentCount(actual_method->signature()).size();
   56.21        if (!actual_method->is_static())  mhArity += 1;
    57.1 --- a/src/share/vm/runtime/vmThread.cpp	Wed May 04 19:16:49 2011 -0400
    57.2 +++ b/src/share/vm/runtime/vmThread.cpp	Wed May 04 23:10:58 2011 -0400
    57.3 @@ -291,7 +291,9 @@
    57.4      // Among other things, this ensures that Eden top is correct.
    57.5      Universe::heap()->prepare_for_verify();
    57.6      os::check_heap();
    57.7 -    Universe::verify(true, true); // Silent verification to not polute normal output
    57.8 +    // Silent verification so as not to pollute normal output,
    57.9 +    // unless we really asked for it.
   57.10 +    Universe::verify(true, !(PrintGCDetails || Verbose));
   57.11    }
   57.12  
   57.13    CompileBroker::set_should_block();
    58.1 --- a/src/share/vm/services/g1MemoryPool.cpp	Wed May 04 19:16:49 2011 -0400
    58.2 +++ b/src/share/vm/services/g1MemoryPool.cpp	Wed May 04 23:10:58 2011 -0400
    58.3 @@ -1,5 +1,5 @@
    58.4  /*
    58.5 - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
    58.6 + * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
    58.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    58.8   *
    58.9   * This code is free software; you can redistribute it and/or modify it
   58.10 @@ -34,10 +34,10 @@
   58.11                                       size_t init_size,
   58.12                                       bool support_usage_threshold) :
   58.13    _g1h(g1h), CollectedMemoryPool(name,
   58.14 -                                 MemoryPool::Heap,
   58.15 -                                 init_size,
   58.16 -                                 undefined_max(),
   58.17 -                                 support_usage_threshold) {
   58.18 +                                   MemoryPool::Heap,
   58.19 +                                   init_size,
   58.20 +                                   undefined_max(),
   58.21 +                                   support_usage_threshold) {
   58.22    assert(UseG1GC, "sanity");
   58.23  }
   58.24  
   58.25 @@ -48,44 +48,27 @@
   58.26  
   58.27  // See the comment at the top of g1MemoryPool.hpp
   58.28  size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
   58.29 -  size_t young_list_length = g1h->young_list()->length();
   58.30 -  size_t eden_used = young_list_length * HeapRegion::GrainBytes;
   58.31 -  size_t survivor_used = survivor_space_used(g1h);
   58.32 -  eden_used = subtract_up_to_zero(eden_used, survivor_used);
   58.33 -  return eden_used;
   58.34 +  return g1h->g1mm()->eden_space_used();
   58.35  }
   58.36  
   58.37  // See the comment at the top of g1MemoryPool.hpp
   58.38  size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
   58.39 -  return MAX2(survivor_space_used(g1h), (size_t) HeapRegion::GrainBytes);
   58.40 +  return g1h->g1mm()->survivor_space_committed();
   58.41  }
   58.42  
   58.43  // See the comment at the top of g1MemoryPool.hpp
   58.44  size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
   58.45 -  size_t survivor_num = g1h->g1_policy()->recorded_survivor_regions();
   58.46 -  size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
   58.47 -  return survivor_used;
   58.48 +  return g1h->g1mm()->survivor_space_used();
   58.49  }
   58.50  
   58.51  // See the comment at the top of g1MemoryPool.hpp
   58.52  size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
   58.53 -  size_t committed = overall_committed(g1h);
   58.54 -  size_t eden_committed = eden_space_committed(g1h);
   58.55 -  size_t survivor_committed = survivor_space_committed(g1h);
   58.56 -  committed = subtract_up_to_zero(committed, eden_committed);
   58.57 -  committed = subtract_up_to_zero(committed, survivor_committed);
   58.58 -  committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
   58.59 -  return committed;
   58.60 +  return g1h->g1mm()->old_space_committed();
   58.61  }
   58.62  
   58.63  // See the comment at the top of g1MemoryPool.hpp
   58.64  size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
   58.65 -  size_t used = overall_used(g1h);
   58.66 -  size_t eden_used = eden_space_used(g1h);
   58.67 -  size_t survivor_used = survivor_space_used(g1h);
   58.68 -  used = subtract_up_to_zero(used, eden_used);
   58.69 -  used = subtract_up_to_zero(used, survivor_used);
   58.70 -  return used;
   58.71 +  return g1h->g1mm()->old_space_used();
   58.72  }
   58.73  
   58.74  G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
    59.1 --- a/src/share/vm/services/g1MemoryPool.hpp	Wed May 04 19:16:49 2011 -0400
    59.2 +++ b/src/share/vm/services/g1MemoryPool.hpp	Wed May 04 23:10:58 2011 -0400
    59.3 @@ -1,5 +1,5 @@
    59.4  /*
    59.5 - * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
    59.6 + * Copyright (c) 2007, 2011, Oracle and/or its affiliates. All rights reserved.
    59.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    59.8   *
    59.9   * This code is free software; you can redistribute it and/or modify it
   59.10 @@ -46,68 +46,9 @@
   59.11  // get, as this does affect the performance and behavior of G1. Which
   59.12  // is why we introduce the three memory pools implemented here.
   59.13  //
   59.14 -// The above approach inroduces a couple of challenging issues in the
   59.15 -// implementation of the three memory pools:
   59.16 +// See comments in g1MonitoringSupport.hpp for additional details
   59.17 +// on this model.
   59.18  //
   59.19 -// 1) The used space calculation for a pool is not necessarily
   59.20 -// independent of the others. We can easily get from G1 the overall
   59.21 -// used space in the entire heap, the number of regions in the young
   59.22 -// generation (includes both eden and survivors), and the number of
   59.23 -// survivor regions. So, from that we calculate:
   59.24 -//
   59.25 -//  survivor_used = survivor_num * region_size
   59.26 -//  eden_used     = young_region_num * region_size - survivor_used
   59.27 -//  old_gen_used  = overall_used - eden_used - survivor_used
   59.28 -//
   59.29 -// Note that survivor_used and eden_used are upper bounds. To get the
   59.30 -// actual value we would have to iterate over the regions and add up
   59.31 -// ->used(). But that'd be expensive. So, we'll accept some lack of
   59.32 -// accuracy for those two. But, we have to be careful when calculating
   59.33 -// old_gen_used, in case we subtract from overall_used more then the
   59.34 -// actual number and our result goes negative.
   59.35 -//
   59.36 -// 2) Calculating the used space is straightforward, as described
   59.37 -// above. However, how do we calculate the committed space, given that
   59.38 -// we allocate space for the eden, survivor, and old gen out of the
   59.39 -// same pool of regions? One way to do this is to use the used value
   59.40 -// as also the committed value for the eden and survivor spaces and
   59.41 -// then calculate the old gen committed space as follows:
   59.42 -//
   59.43 -//  old_gen_committed = overall_committed - eden_committed - survivor_committed
   59.44 -//
   59.45 -// Maybe a better way to do that would be to calculate used for eden
   59.46 -// and survivor as a sum of ->used() over their regions and then
   59.47 -// calculate committed as region_num * region_size (i.e., what we use
   59.48 -// to calculate the used space now). This is something to consider
   59.49 -// in the future.
   59.50 -//
   59.51 -// 3) Another decision that is again not straightforward is what is
   59.52 -// the max size that each memory pool can grow to. One way to do this
   59.53 -// would be to use the committed size for the max for the eden and
   59.54 -// survivors and calculate the old gen max as follows (basically, it's
   59.55 -// a similar pattern to what we use for the committed space, as
   59.56 -// described above):
   59.57 -//
   59.58 -//  old_gen_max = overall_max - eden_max - survivor_max
   59.59 -//
   59.60 -// Unfortunately, the above makes the max of each pool fluctuate over
   59.61 -// time and, even though this is allowed according to the spec, it
   59.62 -// broke several assumptions in the M&M framework (there were cases
   59.63 -// where used would reach a value greater than max). So, for max we
   59.64 -// use -1, which means "undefined" according to the spec.
   59.65 -//
   59.66 -// 4) Now, there is a very subtle issue with all the above. The
   59.67 -// framework will call get_memory_usage() on the three pools
   59.68 -// asynchronously. As a result, each call might get a different value
   59.69 -// for, say, survivor_num which will yield inconsistent values for
   59.70 -// eden_used, survivor_used, and old_gen_used (as survivor_num is used
   59.71 -// in the calculation of all three). This would normally be
   59.72 -// ok. However, it's possible that this might cause the sum of
   59.73 -// eden_used, survivor_used, and old_gen_used to go over the max heap
   59.74 -// size and this seems to sometimes cause JConsole (and maybe other
   59.75 -// clients) to get confused. There's not a really an easy / clean
   59.76 -// solution to this problem, due to the asynchrounous nature of the
   59.77 -// framework.
   59.78  
   59.79  
   59.80  // This class is shared by the three G1 memory pool classes
   59.81 @@ -116,22 +57,6 @@
   59.82  // (see comment above), we put the calculations in this class so that
   59.83  // we can easily share them among the subclasses.
   59.84  class G1MemoryPoolSuper : public CollectedMemoryPool {
   59.85 -private:
   59.86 -  // It returns x - y if x > y, 0 otherwise.
   59.87 -  // As described in the comment above, some of the inputs to the
   59.88 -  // calculations we have to do are obtained concurrently and hence
   59.89 -  // may be inconsistent with each other. So, this provides a
   59.90 -  // defensive way of performing the subtraction and avoids the value
   59.91 -  // going negative (which would mean a very large result, given that
   59.92 -  // the parameter are size_t).
   59.93 -  static size_t subtract_up_to_zero(size_t x, size_t y) {
   59.94 -    if (x > y) {
   59.95 -      return x - y;
   59.96 -    } else {
   59.97 -      return 0;
   59.98 -    }
   59.99 -  }
  59.100 -
  59.101  protected:
  59.102    G1CollectedHeap* _g1h;
  59.103  
  59.104 @@ -148,13 +73,6 @@
  59.105      return (size_t) -1;
  59.106    }
  59.107  
  59.108 -  static size_t overall_committed(G1CollectedHeap* g1h) {
  59.109 -    return g1h->capacity();
  59.110 -  }
  59.111 -  static size_t overall_used(G1CollectedHeap* g1h) {
  59.112 -    return g1h->used_unlocked();
  59.113 -  }
  59.114 -
  59.115    static size_t eden_space_committed(G1CollectedHeap* g1h);
  59.116    static size_t eden_space_used(G1CollectedHeap* g1h);
  59.117  

mercurial