8005849: JEP 167: Event-Based JVM Tracing

Mon, 10 Jun 2013 11:30:51 +0200

author
sla
date
Mon, 10 Jun 2013 11:30:51 +0200
changeset 5237
f2110083203d
parent 5236
d0add7016434
child 5238
69689078dff8
child 5255
a837fa3d3f86
child 5256
2bffd20a0fcc

8005849: JEP 167: Event-Based JVM Tracing
Reviewed-by: acorn, coleenp, sla
Contributed-by: Karen Kinnear <karen.kinnear@oracle.com>, Bengt Rutisson <bengt.rutisson@oracle.com>, Calvin Cheung <calvin.cheung@oracle.com>, Erik Gahlin <erik.gahlin@oracle.com>, Erik Helin <erik.helin@oracle.com>, Jesper Wilhelmsson <jesper.wilhelmsson@oracle.com>, Keith McGuigan <keith.mcguigan@oracle.com>, Mattias Tobiasson <mattias.tobiasson@oracle.com>, Markus Gronlund <markus.gronlund@oracle.com>, Mikael Auno <mikael.auno@oracle.com>, Nils Eliasson <nils.eliasson@oracle.com>, Nils Loodin <nils.loodin@oracle.com>, Rickard Backman <rickard.backman@oracle.com>, Staffan Larsen <staffan.larsen@oracle.com>, Stefan Karlsson <stefan.karlsson@oracle.com>, Yekaterina Kantserova <yekaterina.kantserova@oracle.com>

make/Makefile file | annotate | diff | comparison | revisions
make/bsd/makefiles/buildtree.make file | annotate | diff | comparison | revisions
make/bsd/makefiles/minimal1.make file | annotate | diff | comparison | revisions
make/bsd/makefiles/top.make file | annotate | diff | comparison | revisions
make/bsd/makefiles/trace.make file | annotate | diff | comparison | revisions
make/bsd/makefiles/vm.make file | annotate | diff | comparison | revisions
make/defs.make file | annotate | diff | comparison | revisions
make/excludeSrc.make file | annotate | diff | comparison | revisions
make/linux/makefiles/buildtree.make file | annotate | diff | comparison | revisions
make/linux/makefiles/minimal1.make file | annotate | diff | comparison | revisions
make/linux/makefiles/top.make file | annotate | diff | comparison | revisions
make/linux/makefiles/trace.make file | annotate | diff | comparison | revisions
make/linux/makefiles/vm.make file | annotate | diff | comparison | revisions
make/solaris/makefiles/buildtree.make file | annotate | diff | comparison | revisions
make/solaris/makefiles/top.make file | annotate | diff | comparison | revisions
make/solaris/makefiles/trace.make file | annotate | diff | comparison | revisions
make/solaris/makefiles/vm.make file | annotate | diff | comparison | revisions
make/windows/build.make file | annotate | diff | comparison | revisions
make/windows/create_obj_files.sh file | annotate | diff | comparison | revisions
make/windows/makefiles/generated.make file | annotate | diff | comparison | revisions
make/windows/makefiles/projectcreator.make file | annotate | diff | comparison | revisions
make/windows/makefiles/trace.make file | annotate | diff | comparison | revisions
make/windows/makefiles/vm.make file | annotate | diff | comparison | revisions
make/windows/projectfiles/common/Makefile file | annotate | diff | comparison | revisions
src/cpu/sparc/vm/frame_sparc.cpp file | annotate | diff | comparison | revisions
src/cpu/x86/vm/frame_x86.cpp file | annotate | diff | comparison | revisions
src/os/bsd/vm/osThread_bsd.hpp file | annotate | diff | comparison | revisions
src/os/bsd/vm/os_bsd.cpp file | annotate | diff | comparison | revisions
src/os/bsd/vm/os_bsd.hpp file | annotate | diff | comparison | revisions
src/os/bsd/vm/os_bsd.inline.hpp file | annotate | diff | comparison | revisions
src/os/linux/vm/osThread_linux.hpp file | annotate | diff | comparison | revisions
src/os/linux/vm/os_linux.cpp file | annotate | diff | comparison | revisions
src/os/linux/vm/os_linux.hpp file | annotate | diff | comparison | revisions
src/os/linux/vm/os_linux.inline.hpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/osThread_solaris.cpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/osThread_solaris.hpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/os_share_solaris.hpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/os_solaris.cpp file | annotate | diff | comparison | revisions
src/os/solaris/vm/os_solaris.hpp file | annotate | diff | comparison | revisions
src/os/windows/vm/os_windows.cpp file | annotate | diff | comparison | revisions
src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp file | annotate | diff | comparison | revisions
src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp file | annotate | diff | comparison | revisions
src/os_cpu/linux_x86/vm/thread_linux_x86.cpp file | annotate | diff | comparison | revisions
src/os_cpu/linux_x86/vm/thread_linux_x86.hpp file | annotate | diff | comparison | revisions
src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp file | annotate | diff | comparison | revisions
src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp file | annotate | diff | comparison | revisions
src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp file | annotate | diff | comparison | revisions
src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp file | annotate | diff | comparison | revisions
src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp file | annotate | diff | comparison | revisions
src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp file | annotate | diff | comparison | revisions
src/os_cpu/windows_x86/vm/thread_windows_x86.cpp file | annotate | diff | comparison | revisions
src/os_cpu/windows_x86/vm/thread_windows_x86.hpp file | annotate | diff | comparison | revisions
src/share/tools/ProjectCreator/BuildConfig.java file | annotate | diff | comparison | revisions
src/share/vm/classfile/classFileParser.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/classLoaderData.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/classLoaderData.hpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/javaClasses.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/systemDictionary.cpp file | annotate | diff | comparison | revisions
src/share/vm/classfile/systemDictionary.hpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeCache.cpp file | annotate | diff | comparison | revisions
src/share/vm/code/codeCache.hpp file | annotate | diff | comparison | revisions
src/share/vm/compiler/compileBroker.cpp file | annotate | diff | comparison | revisions
src/share/vm/compiler/compileBroker.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/concurrentMark.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/concurrentMark.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/evacuationInfo.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1MarkSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1MarkSweep.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/g1YCTypes.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parNew/parNewGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parNew/parNewGeneration.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/copyFailedInfo.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcHeapSummary.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcTimer.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcTimer.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcTrace.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcTrace.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcTraceSend.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcTraceTime.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcTraceTime.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/gcWhen.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/markSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/markSweep.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/vmGCOperations.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_implementation/shared/vmGCOperations.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/allocTracer.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/allocTracer.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/collectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/collectedHeap.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/collectedHeap.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/gcCause.cpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/gcCause.hpp file | annotate | diff | comparison | revisions
src/share/vm/gc_interface/gcName.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/allocation.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/defNewGeneration.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/defNewGeneration.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/genCollectedHeap.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/genMarkSweep.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/generation.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/heapInspection.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/heapInspection.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/klassInfoClosure.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metaspace.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/oopFactory.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/referenceProcessor.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/referenceProcessor.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/referenceProcessorStats.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/referenceType.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/universe.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/universe.hpp file | annotate | diff | comparison | revisions
src/share/vm/oops/instanceKlass.hpp file | annotate | diff | comparison | revisions
src/share/vm/oops/klass.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/compile.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/compile.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/escape.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/library_call.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/loopnode.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/matcher.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/phasetype.hpp file | annotate | diff | comparison | revisions
src/share/vm/precompiled/precompiled.hpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jni.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvm.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiGen.java file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiImpl.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvmtiImpl.hpp file | annotate | diff | comparison | revisions
src/share/vm/prims/unsafe.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/frame.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/frame.inline.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/java.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/mutexLocker.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/objectMonitor.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/objectMonitor.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/os.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/os.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/perfData.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/perfData.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/stubRoutines.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/sweeper.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/sweeper.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/synchronizer.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/task.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/thread.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/timer.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/timer.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmStructs.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vmThread.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vm_operations.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/vm_operations.hpp file | annotate | diff | comparison | revisions
src/share/vm/services/attachListener.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/diagnosticArgument.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/diagnosticCommand.cpp file | annotate | diff | comparison | revisions
src/share/vm/services/memBaseline.cpp file | annotate | diff | comparison | revisions
src/share/vm/trace/noTraceBackend.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/trace.dtd file | annotate | diff | comparison | revisions
src/share/vm/trace/trace.xml file | annotate | diff | comparison | revisions
src/share/vm/trace/traceBackend.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/traceDataTypes.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/traceEvent.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/traceEventClasses.xsl file | annotate | diff | comparison | revisions
src/share/vm/trace/traceEventIds.xsl file | annotate | diff | comparison | revisions
src/share/vm/trace/traceEventTypes.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/traceMacros.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/traceStream.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/traceTime.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/traceTypes.xsl file | annotate | diff | comparison | revisions
src/share/vm/trace/tracetypes.xml file | annotate | diff | comparison | revisions
src/share/vm/trace/tracing.hpp file | annotate | diff | comparison | revisions
src/share/vm/trace/xinclude.mod file | annotate | diff | comparison | revisions
src/share/vm/trace/xsl_util.xsl file | annotate | diff | comparison | revisions
src/share/vm/utilities/globalDefinitions.hpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/macros.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/make/Makefile	Fri Jun 07 09:33:01 2013 -0700
     1.2 +++ b/make/Makefile	Mon Jun 10 11:30:51 2013 +0200
     1.3 @@ -486,7 +486,7 @@
     1.4  JFR_EXISTS=$(shell if [ -d $(HS_ALT_SRC) ]; then echo 1; else echo 0; fi)
     1.5  # export jfr.h
     1.6  ifeq ($JFR_EXISTS,1)
     1.7 -$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/agent/%
     1.8 +$(EXPORT_INCLUDE_DIR)/%: $(HS_ALT_SRC)/share/vm/jfr/%
     1.9  	$(install-file)
    1.10  else
    1.11  $(EXPORT_INCLUDE_DIR)/jfr.h:
     2.1 --- a/make/bsd/makefiles/buildtree.make	Fri Jun 07 09:33:01 2013 -0700
     2.2 +++ b/make/bsd/makefiles/buildtree.make	Mon Jun 10 11:30:51 2013 +0200
     2.3 @@ -47,6 +47,7 @@
     2.4  # flags.make	- with macro settings
     2.5  # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
     2.6  # adlc.make	-
     2.7 +# trace.make	- generate tracing event and type definitions
     2.8  # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
     2.9  # sa.make	- generate SA jar file and natives
    2.10  #
    2.11 @@ -119,6 +120,7 @@
    2.12  	$(PLATFORM_DIR)/generated/dependencies \
    2.13  	$(PLATFORM_DIR)/generated/adfiles \
    2.14  	$(PLATFORM_DIR)/generated/jvmtifiles \
    2.15 +	$(PLATFORM_DIR)/generated/tracefiles \
    2.16  	$(PLATFORM_DIR)/generated/dtracefiles
    2.17  
    2.18  TARGETS      = debug fastdebug optimized product
    2.19 @@ -128,7 +130,7 @@
    2.20  BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
    2.21  
    2.22  # dtrace.make is used on BSD versions that implement Dtrace (like MacOS X)
    2.23 -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make dtrace.make
    2.24 +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make dtrace.make
    2.25  
    2.26  BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
    2.27  	SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
    2.28 @@ -331,6 +333,16 @@
    2.29  	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
    2.30  	) > $@
    2.31  
    2.32 +trace.make: $(BUILDTREE_MAKE)
    2.33 +	@echo Creating $@ ...
    2.34 +	$(QUIETLY) ( \
    2.35 +	$(BUILDTREE_COMMENT); \
    2.36 +	echo; \
    2.37 +	echo include flags.make; \
    2.38 +	echo; \
    2.39 +	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
    2.40 +	) > $@
    2.41 +
    2.42  sa.make: $(BUILDTREE_MAKE)
    2.43  	@echo Creating $@ ...
    2.44  	$(QUIETLY) ( \
     3.1 --- a/make/bsd/makefiles/minimal1.make	Fri Jun 07 09:33:01 2013 -0700
     3.2 +++ b/make/bsd/makefiles/minimal1.make	Mon Jun 10 11:30:51 2013 +0200
     3.3 @@ -19,7 +19,7 @@
     3.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
     3.5  # or visit www.oracle.com if you need additional information or have any
     3.6  # questions.
     3.7 -#  
     3.8 +#
     3.9  #
    3.10  
    3.11  TYPE=MINIMAL1
    3.12 @@ -32,6 +32,7 @@
    3.13  INCLUDE_MANAGEMENT ?= false
    3.14  INCLUDE_ALL_GCS ?= false
    3.15  INCLUDE_NMT ?= false
    3.16 +INCLUDE_TRACE ?= false
    3.17  INCLUDE_CDS ?= false
    3.18  
    3.19  CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
     4.1 --- a/make/bsd/makefiles/top.make	Fri Jun 07 09:33:01 2013 -0700
     4.2 +++ b/make/bsd/makefiles/top.make	Mon Jun 10 11:30:51 2013 +0200
     4.3 @@ -1,5 +1,5 @@
     4.4  #
     4.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     4.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     4.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4.8  #
     4.9  # This code is free software; you can redistribute it and/or modify it
    4.10 @@ -80,7 +80,7 @@
    4.11  	@echo All done.
    4.12  
    4.13  # This is an explicit dependency for the sake of parallel makes.
    4.14 -vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff dtrace_stuff
    4.15 +vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff dtrace_stuff
    4.16  	@# We need a null action here, so implicit rules don't get consulted.
    4.17  
    4.18  $(Cached_plat): $(Plat_File)
    4.19 @@ -94,6 +94,10 @@
    4.20  jvmti_stuff: $(Cached_plat) $(adjust-mflags)
    4.21  	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
    4.22  
    4.23 +# generate trace files
    4.24 +trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
    4.25 +	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
    4.26 +
    4.27  ifeq ($(OS_VENDOR), Darwin)
    4.28  # generate dtrace header files
    4.29  dtrace_stuff: $(Cached_plat) $(adjust-mflags)
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/make/bsd/makefiles/trace.make	Mon Jun 10 11:30:51 2013 +0200
     5.3 @@ -0,0 +1,121 @@
     5.4 +#
     5.5 +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
     5.6 +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5.7 +#
     5.8 +# This code is free software; you can redistribute it and/or modify it
     5.9 +# under the terms of the GNU General Public License version 2 only, as
    5.10 +# published by the Free Software Foundation.
    5.11 +#
    5.12 +# This code is distributed in the hope that it will be useful, but WITHOUT
    5.13 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.14 +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    5.15 +# version 2 for more details (a copy is included in the LICENSE file that
    5.16 +# accompanied this code).
    5.17 +#
    5.18 +# You should have received a copy of the GNU General Public License version
    5.19 +# 2 along with this work; if not, write to the Free Software Foundation,
    5.20 +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    5.21 +#
    5.22 +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    5.23 +# or visit www.oracle.com if you need additional information or have any
    5.24 +# questions.
    5.25 +#
    5.26 +#
    5.27 +
    5.28 +# This makefile (trace.make) is included from the trace.make in the
    5.29 +# build directories.
    5.30 +#
    5.31 +# It knows how to build and run the tools to generate trace files.
    5.32 +
    5.33 +include $(GAMMADIR)/make/bsd/makefiles/rules.make
    5.34 +include $(GAMMADIR)/make/altsrc.make
    5.35 +
    5.36 +# #########################################################################
    5.37 +
    5.38 +HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
    5.39 +  echo "true"; else echo "false";\
    5.40 +  fi)
    5.41 +
    5.42 +TOPDIR      = $(shell echo `pwd`)
    5.43 +GENERATED   = $(TOPDIR)/../generated
    5.44 +JvmtiOutDir = $(GENERATED)/jvmtifiles
    5.45 +TraceOutDir   = $(GENERATED)/tracefiles
    5.46 +
    5.47 +TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
    5.48 +TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
    5.49 +
    5.50 +# set VPATH so make knows where to look for source files
    5.51 +Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
    5.52 +VPATH += $(Src_Dirs_V:%=%:)
    5.53 +
    5.54 +TraceGeneratedNames =     \
    5.55 +    traceEventClasses.hpp \
    5.56 +	traceEventIds.hpp     \
    5.57 +	traceTypes.hpp
    5.58 +
    5.59 +ifeq ($(HAS_ALT_SRC), true)
    5.60 +TraceGeneratedNames +=  \
    5.61 +	traceRequestables.hpp \
    5.62 +    traceEventControl.hpp
    5.63 +
    5.64 +ifneq ($(INCLUDE_TRACE), false)
    5.65 +TraceGeneratedNames += traceProducer.cpp
    5.66 +endif
    5.67 +
    5.68 +endif
    5.69 +
    5.70 +
    5.71 +TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
    5.72 +
    5.73 +XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
    5.74 +
    5.75 +XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
    5.76 +	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
    5.77 +ifeq ($(HAS_ALT_SRC), true)
    5.78 +	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
    5.79 +endif
    5.80 +
    5.81 +.PHONY: all clean cleanall
    5.82 +
    5.83 +# #########################################################################
    5.84 +
    5.85 +all: $(TraceGeneratedFiles)
    5.86 +
    5.87 +GENERATE_CODE= \
    5.88 +  $(QUIETLY) echo Generating $@; \
    5.89 +  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
    5.90 +  test -f $@
    5.91 +
    5.92 +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
    5.93 +	$(GENERATE_CODE)
    5.94 +
    5.95 +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
    5.96 +	$(GENERATE_CODE)
    5.97 +
    5.98 +ifeq ($(HAS_ALT_SRC), false)
    5.99 +
   5.100 +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
   5.101 +	$(GENERATE_CODE)
   5.102 +
   5.103 +else
   5.104 +
   5.105 +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
   5.106 +	$(GENERATE_CODE)
   5.107 +
   5.108 +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
   5.109 +	$(GENERATE_CODE)
   5.110 +
   5.111 +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
   5.112 +	$(GENERATE_CODE)
   5.113 +
   5.114 +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
   5.115 +	$(GENERATE_CODE)
   5.116 +
   5.117 +endif
   5.118 +
   5.119 +# #########################################################################
   5.120 +
   5.121 +
   5.122 +clean cleanall:
   5.123 +	rm $(TraceGeneratedFiles)
   5.124 +
     6.1 --- a/make/bsd/makefiles/vm.make	Fri Jun 07 09:33:01 2013 -0700
     6.2 +++ b/make/bsd/makefiles/vm.make	Mon Jun 10 11:30:51 2013 +0200
     6.3 @@ -1,5 +1,5 @@
     6.4  #
     6.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     6.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     6.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.8  #
     6.9  # This code is free software; you can redistribute it and/or modify it
    6.10 @@ -19,7 +19,7 @@
    6.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    6.12  # or visit www.oracle.com if you need additional information or have any
    6.13  # questions.
    6.14 -#  
    6.15 +#
    6.16  #
    6.17  
    6.18  # Rules to build JVM and related libraries, included from vm.make in the build
    6.19 @@ -52,7 +52,7 @@
    6.20  # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
    6.21  # The adfiles directory contains ad_<arch>.[ch]pp.
    6.22  # The jvmtifiles directory contains jvmti*.[ch]pp
    6.23 -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles
    6.24 +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
    6.25  VPATH += $(Src_Dirs_V:%=%:)
    6.26  
    6.27  # set INCLUDES for C preprocessor.
    6.28 @@ -66,7 +66,7 @@
    6.29    SYMFLAG =
    6.30  endif
    6.31  
    6.32 -# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined 
    6.33 +# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
    6.34  # in $(GAMMADIR)/make/defs.make
    6.35  ifeq ($(HOTSPOT_BUILD_VERSION),)
    6.36    BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\""
    6.37 @@ -93,7 +93,7 @@
    6.38  
    6.39  # This is VERY important! The version define must only be supplied to vm_version.o
    6.40  # If not, ccache will not re-use the cache at all, since the version string might contain
    6.41 -# a time and date. 
    6.42 +# a time and date.
    6.43  CXXFLAGS/vm_version.o += ${JRE_VERSION}
    6.44  
    6.45  CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
    6.46 @@ -105,10 +105,6 @@
    6.47  CXXFLAGS += -DDEFAULT_LIBPATH="\"$(DEFAULT_LIBPATH)\""
    6.48  endif
    6.49  
    6.50 -ifndef JAVASE_EMBEDDED
    6.51 -CFLAGS += -DINCLUDE_TRACE
    6.52 -endif
    6.53 -
    6.54  # CFLAGS_WARN holds compiler options to suppress/enable warnings.
    6.55  CFLAGS += $(CFLAGS_WARN/BYFILE)
    6.56  
    6.57 @@ -165,15 +161,15 @@
    6.58  SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
    6.59  SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
    6.60  
    6.61 -ifndef JAVASE_EMBEDDED
    6.62 -SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
    6.63 +CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
    6.64 +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
    6.65 +
    6.66 +ifneq ($(INCLUDE_TRACE), false)
    6.67 +CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
    6.68    find $(HS_ALT_SRC)/share/vm/jfr -type d; \
    6.69    fi)
    6.70  endif
    6.71  
    6.72 -CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
    6.73 -CORE_PATHS+=$(GENERATED)/jvmtifiles
    6.74 -
    6.75  COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
    6.76  COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
    6.77  
     7.1 --- a/make/defs.make	Fri Jun 07 09:33:01 2013 -0700
     7.2 +++ b/make/defs.make	Mon Jun 10 11:30:51 2013 +0200
     7.3 @@ -1,5 +1,5 @@
     7.4  #
     7.5 -# Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
     7.6 +# Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved.
     7.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.8  #
     7.9  # This code is free software; you can redistribute it and/or modify it
    7.10 @@ -19,7 +19,7 @@
    7.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    7.12  # or visit www.oracle.com if you need additional information or have any
    7.13  # questions.
    7.14 -#  
    7.15 +#
    7.16  #
    7.17  
    7.18  # The common definitions for hotspot builds.
    7.19 @@ -236,7 +236,7 @@
    7.20    JDK_IMAGE_DIR=$(ALT_JDK_IMAGE_DIR)
    7.21  endif
    7.22  
    7.23 -# The platform dependent defs.make defines platform specific variable such 
    7.24 +# The platform dependent defs.make defines platform specific variable such
    7.25  # as ARCH, EXPORT_LIST etc. We must place the include here after BOOTDIR is defined.
    7.26  include $(GAMMADIR)/make/$(OSNAME)/makefiles/defs.make
    7.27  
    7.28 @@ -258,7 +258,7 @@
    7.29    #   LIBARCH   - directory name in JDK/JRE
    7.30  
    7.31    # Use uname output for SRCARCH, but deal with platform differences. If ARCH
    7.32 -  # is not explicitly listed below, it is treated as x86. 
    7.33 +  # is not explicitly listed below, it is treated as x86.
    7.34    SRCARCH     = $(ARCH/$(filter sparc sparc64 ia64 amd64 x86_64 arm ppc zero,$(ARCH)))
    7.35    ARCH/       = x86
    7.36    ARCH/sparc  = sparc
    7.37 @@ -337,8 +337,5 @@
    7.38  EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/$(JDK_INCLUDE_SUBDIR)/jni_md.h
    7.39  EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jmm.h
    7.40  
    7.41 -ifndef JAVASE_EMBEDDED
    7.42 -EXPORT_LIST += $(EXPORT_INCLUDE_DIR)/jfr.h
    7.43 -endif
    7.44 +.PHONY: $(HS_ALT_MAKE)/defs.make
    7.45  
    7.46 -.PHONY: $(HS_ALT_MAKE)/defs.make
     8.1 --- a/make/excludeSrc.make	Fri Jun 07 09:33:01 2013 -0700
     8.2 +++ b/make/excludeSrc.make	Mon Jun 10 11:30:51 2013 +0200
     8.3 @@ -19,7 +19,7 @@
     8.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
     8.5  # or visit www.oracle.com if you need additional information or have any
     8.6  # questions.
     8.7 -#  
     8.8 +#
     8.9  #
    8.10  ifeq ($(INCLUDE_JVMTI), false)
    8.11        CXXFLAGS += -DINCLUDE_JVMTI=0
    8.12 @@ -100,7 +100,7 @@
    8.13  	parCardTableModRefBS.cpp parGCAllocBuffer.cpp parNewGeneration.cpp mutableSpace.cpp \
    8.14  	gSpaceCounters.cpp allocationStats.cpp spaceCounters.cpp gcAdaptivePolicyCounters.cpp \
    8.15  	mutableNUMASpace.cpp immutableSpace.cpp yieldingWorkGroup.cpp
    8.16 -endif 
    8.17 +endif
    8.18  
    8.19  ifeq ($(INCLUDE_NMT), false)
    8.20        CXXFLAGS += -DINCLUDE_NMT=0
    8.21 @@ -110,3 +110,5 @@
    8.22  	 memBaseline.cpp memPtr.cpp memRecorder.cpp memReporter.cpp memSnapshot.cpp memTrackWorker.cpp \
    8.23  	 memTracker.cpp nmtDCmd.cpp
    8.24  endif
    8.25 +
    8.26 +-include $(HS_ALT_MAKE)/excludeSrc.make
     9.1 --- a/make/linux/makefiles/buildtree.make	Fri Jun 07 09:33:01 2013 -0700
     9.2 +++ b/make/linux/makefiles/buildtree.make	Mon Jun 10 11:30:51 2013 +0200
     9.3 @@ -47,6 +47,7 @@
     9.4  # flags.make	- with macro settings
     9.5  # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
     9.6  # adlc.make	-
     9.7 +# trace.make	- generate tracing event and type definitions
     9.8  # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
     9.9  # sa.make	- generate SA jar file and natives
    9.10  #
    9.11 @@ -114,7 +115,8 @@
    9.12  SIMPLE_DIRS	= \
    9.13  	$(PLATFORM_DIR)/generated/dependencies \
    9.14  	$(PLATFORM_DIR)/generated/adfiles \
    9.15 -	$(PLATFORM_DIR)/generated/jvmtifiles
    9.16 +	$(PLATFORM_DIR)/generated/jvmtifiles \
    9.17 +	$(PLATFORM_DIR)/generated/tracefiles
    9.18  
    9.19  TARGETS      = debug fastdebug optimized product
    9.20  SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
    9.21 @@ -122,7 +124,7 @@
    9.22  # For dependencies and recursive makes.
    9.23  BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
    9.24  
    9.25 -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make
    9.26 +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
    9.27  
    9.28  BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
    9.29  	SRCARCH=$(SRCARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
    9.30 @@ -269,6 +271,8 @@
    9.31  	    echo && \
    9.32  	    echo "HOTSPOT_EXTRA_SYSDEFS\$$(HOTSPOT_EXTRA_SYSDEFS) = $(HOTSPOT_EXTRA_SYSDEFS)" && \
    9.33  	    echo "SYSDEFS += \$$(HOTSPOT_EXTRA_SYSDEFS)"; \
    9.34 +	[ -n "$(INCLUDE_TRACE)" ] && \
    9.35 +	    echo && echo "INCLUDE_TRACE = $(INCLUDE_TRACE)"; \
    9.36  	echo; \
    9.37  	[ -n "$(SPEC)" ] && \
    9.38  	    echo "include $(SPEC)"; \
    9.39 @@ -337,6 +341,16 @@
    9.40  	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
    9.41  	) > $@
    9.42  
    9.43 +trace.make: $(BUILDTREE_MAKE)
    9.44 +	@echo Creating $@ ...
    9.45 +	$(QUIETLY) ( \
    9.46 +	$(BUILDTREE_COMMENT); \
    9.47 +	echo; \
    9.48 +	echo include flags.make; \
    9.49 +	echo; \
    9.50 +	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
    9.51 +	) > $@
    9.52 +
    9.53  sa.make: $(BUILDTREE_MAKE)
    9.54  	@echo Creating $@ ...
    9.55  	$(QUIETLY) ( \
    10.1 --- a/make/linux/makefiles/minimal1.make	Fri Jun 07 09:33:01 2013 -0700
    10.2 +++ b/make/linux/makefiles/minimal1.make	Mon Jun 10 11:30:51 2013 +0200
    10.3 @@ -19,7 +19,7 @@
    10.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    10.5  # or visit www.oracle.com if you need additional information or have any
    10.6  # questions.
    10.7 -#  
    10.8 +#
    10.9  #
   10.10  
   10.11  TYPE=MINIMAL1
   10.12 @@ -32,6 +32,7 @@
   10.13  INCLUDE_MANAGEMENT ?= false
   10.14  INCLUDE_ALL_GCS ?= false
   10.15  INCLUDE_NMT ?= false
   10.16 +INCLUDE_TRACE ?= false
   10.17  INCLUDE_CDS ?= false
   10.18  
   10.19  CXXFLAGS += -DMINIMAL_JVM -DCOMPILER1 -DVMTYPE=\"Minimal\"
    11.1 --- a/make/linux/makefiles/top.make	Fri Jun 07 09:33:01 2013 -0700
    11.2 +++ b/make/linux/makefiles/top.make	Mon Jun 10 11:30:51 2013 +0200
    11.3 @@ -1,5 +1,5 @@
    11.4  #
    11.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    11.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    11.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    11.8  #
    11.9  # This code is free software; you can redistribute it and/or modify it
   11.10 @@ -80,7 +80,7 @@
   11.11  	@echo All done.
   11.12  
   11.13  # This is an explicit dependency for the sake of parallel makes.
   11.14 -vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff
   11.15 +vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) trace_stuff jvmti_stuff sa_stuff
   11.16  	@# We need a null action here, so implicit rules don't get consulted.
   11.17  
   11.18  $(Cached_plat): $(Plat_File)
   11.19 @@ -94,6 +94,10 @@
   11.20  jvmti_stuff: $(Cached_plat) $(adjust-mflags)
   11.21  	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
   11.22  
   11.23 +# generate trace files
   11.24 +trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
   11.25 +	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
   11.26 +
   11.27  # generate SA jar files and native header
   11.28  sa_stuff:
   11.29  	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
    12.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    12.2 +++ b/make/linux/makefiles/trace.make	Mon Jun 10 11:30:51 2013 +0200
    12.3 @@ -0,0 +1,120 @@
    12.4 +#
    12.5 +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    12.6 +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.7 +#
    12.8 +# This code is free software; you can redistribute it and/or modify it
    12.9 +# under the terms of the GNU General Public License version 2 only, as
   12.10 +# published by the Free Software Foundation.
   12.11 +#
   12.12 +# This code is distributed in the hope that it will be useful, but WITHOUT
   12.13 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   12.14 +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   12.15 +# version 2 for more details (a copy is included in the LICENSE file that
   12.16 +# accompanied this code).
   12.17 +#
   12.18 +# You should have received a copy of the GNU General Public License version
   12.19 +# 2 along with this work; if not, write to the Free Software Foundation,
   12.20 +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   12.21 +#
   12.22 +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   12.23 +# or visit www.oracle.com if you need additional information or have any
   12.24 +# questions.
   12.25 +#
   12.26 +#
   12.27 +
   12.28 +# This makefile (trace.make) is included from the trace.make in the
   12.29 +# build directories.
   12.30 +#
   12.31 +# It knows how to build and run the tools to generate trace files.
   12.32 +
   12.33 +include $(GAMMADIR)/make/linux/makefiles/rules.make
   12.34 +include $(GAMMADIR)/make/altsrc.make
   12.35 +
   12.36 +# #########################################################################
   12.37 +
   12.38 +HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
   12.39 +  echo "true"; else echo "false";\
   12.40 +  fi)
   12.41 +
   12.42 +TOPDIR      = $(shell echo `pwd`)
   12.43 +GENERATED   = $(TOPDIR)/../generated
   12.44 +JvmtiOutDir = $(GENERATED)/jvmtifiles
   12.45 +TraceOutDir   = $(GENERATED)/tracefiles
   12.46 +
   12.47 +TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
   12.48 +TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
   12.49 +
   12.50 +# set VPATH so make knows where to look for source files
   12.51 +Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
   12.52 +VPATH += $(Src_Dirs_V:%=%:)
   12.53 +
   12.54 +TraceGeneratedNames =     \
   12.55 +    traceEventClasses.hpp \
   12.56 +	traceEventIds.hpp     \
   12.57 +	traceTypes.hpp
   12.58 +
   12.59 +ifeq ($(HAS_ALT_SRC), true)
   12.60 +TraceGeneratedNames +=  \
   12.61 +	traceRequestables.hpp \
   12.62 +    traceEventControl.hpp
   12.63 +
   12.64 +ifneq ($(INCLUDE_TRACE), false)
   12.65 +TraceGeneratedNames += traceProducer.cpp
   12.66 +endif
   12.67 +
   12.68 +endif
   12.69 +
   12.70 +TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
   12.71 +
   12.72 +XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
   12.73 +
   12.74 +XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
   12.75 +	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
   12.76 +ifeq ($(HAS_ALT_SRC), true)
   12.77 +	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
   12.78 +endif
   12.79 +
   12.80 +.PHONY: all clean cleanall
   12.81 +
   12.82 +# #########################################################################
   12.83 +
   12.84 +all: $(TraceGeneratedFiles)
   12.85 +
   12.86 +GENERATE_CODE= \
   12.87 +  $(QUIETLY) echo Generating $@; \
   12.88 +  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
   12.89 +  test -f $@
   12.90 +
   12.91 +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
   12.92 +	$(GENERATE_CODE)
   12.93 +
   12.94 +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
   12.95 +	$(GENERATE_CODE)
   12.96 +
   12.97 +ifeq ($(HAS_ALT_SRC), false)
   12.98 +
   12.99 +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
  12.100 +	$(GENERATE_CODE)
  12.101 +
  12.102 +else
  12.103 +
  12.104 +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
  12.105 +	$(GENERATE_CODE)
  12.106 +
  12.107 +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
  12.108 +	$(GENERATE_CODE)
  12.109 +
  12.110 +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
  12.111 +	$(GENERATE_CODE)
  12.112 +
  12.113 +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
  12.114 +	$(GENERATE_CODE)
  12.115 +
  12.116 +endif
  12.117 +
  12.118 +# #########################################################################
  12.119 +
  12.120 +clean cleanall:
  12.121 +	rm $(TraceGeneratedFiles)
  12.122 +
  12.123 +
    13.1 --- a/make/linux/makefiles/vm.make	Fri Jun 07 09:33:01 2013 -0700
    13.2 +++ b/make/linux/makefiles/vm.make	Mon Jun 10 11:30:51 2013 +0200
    13.3 @@ -1,5 +1,5 @@
    13.4  #
    13.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    13.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    13.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.8  #
    13.9  # This code is free software; you can redistribute it and/or modify it
   13.10 @@ -19,7 +19,7 @@
   13.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   13.12  # or visit www.oracle.com if you need additional information or have any
   13.13  # questions.
   13.14 -#  
   13.15 +#
   13.16  #
   13.17  
   13.18  # Rules to build JVM and related libraries, included from vm.make in the build
   13.19 @@ -52,7 +52,7 @@
   13.20  # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
   13.21  # The adfiles directory contains ad_<arch>.[ch]pp.
   13.22  # The jvmtifiles directory contains jvmti*.[ch]pp
   13.23 -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles
   13.24 +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
   13.25  VPATH += $(Src_Dirs_V:%=%:)
   13.26  
   13.27  # set INCLUDES for C preprocessor.
   13.28 @@ -72,7 +72,7 @@
   13.29    endif
   13.30  endif
   13.31  
   13.32 -# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined 
   13.33 +# HOTSPOT_RELEASE_VERSION and HOTSPOT_BUILD_VERSION are defined
   13.34  # in $(GAMMADIR)/make/defs.make
   13.35  ifeq ($(HOTSPOT_BUILD_VERSION),)
   13.36    BUILD_VERSION = -DHOTSPOT_RELEASE_VERSION="\"$(HOTSPOT_RELEASE_VERSION)\""
   13.37 @@ -99,7 +99,7 @@
   13.38  
   13.39  # This is VERY important! The version define must only be supplied to vm_version.o
   13.40  # If not, ccache will not re-use the cache at all, since the version string might contain
   13.41 -# a time and date. 
   13.42 +# a time and date.
   13.43  CXXFLAGS/vm_version.o += ${JRE_VERSION}
   13.44  
   13.45  CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
   13.46 @@ -108,12 +108,6 @@
   13.47  CXXFLAGS += $(CXXFLAGS/BYFILE)
   13.48  
   13.49  
   13.50 -ifndef JAVASE_EMBEDDED 
   13.51 -ifneq (${ARCH},arm)
   13.52 -CFLAGS += -DINCLUDE_TRACE
   13.53 -endif
   13.54 -endif
   13.55 -
   13.56  # CFLAGS_WARN holds compiler options to suppress/enable warnings.
   13.57  CFLAGS += $(CFLAGS_WARN/BYFILE)
   13.58  
   13.59 @@ -158,16 +152,14 @@
   13.60  SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
   13.61  SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
   13.62  
   13.63 -ifndef JAVASE_EMBEDDED 
   13.64 -ifneq (${ARCH},arm)
   13.65 -SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
   13.66 +CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
   13.67 +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
   13.68 +
   13.69 +ifneq ($(INCLUDE_TRACE), false)
   13.70 +CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
   13.71    find $(HS_ALT_SRC)/share/vm/jfr -type d; \
   13.72    fi)
   13.73  endif
   13.74 -endif
   13.75 -
   13.76 -CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
   13.77 -CORE_PATHS+=$(GENERATED)/jvmtifiles
   13.78  
   13.79  COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
   13.80  COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
   13.81 @@ -316,7 +308,7 @@
   13.82  # With more recent Redhat releases (or the cutting edge version Fedora), if
   13.83  # SELinux is configured to be enabled, the runtime linker will fail to apply
   13.84  # the text relocation to libjvm.so considering that it is built as a non-PIC
   13.85 -# DSO. To workaround that, we run chcon to libjvm.so after it is built. See 
   13.86 +# DSO. To workaround that, we run chcon to libjvm.so after it is built. See
   13.87  # details in bug 6538311.
   13.88  $(LIBJVM): $(LIBJVM.o) $(LIBJVM_MAPFILE) $(LD_SCRIPT)
   13.89  	$(QUIETLY) {                                                    \
    14.1 --- a/make/solaris/makefiles/buildtree.make	Fri Jun 07 09:33:01 2013 -0700
    14.2 +++ b/make/solaris/makefiles/buildtree.make	Mon Jun 10 11:30:51 2013 +0200
    14.3 @@ -47,6 +47,7 @@
    14.4  # flags.make	- with macro settings
    14.5  # vm.make	- to support making "$(MAKE) -v vm.make" in makefiles
    14.6  # adlc.make	-
    14.7 +# trace.make	- generate tracing event and type definitions
    14.8  # jvmti.make	- generate JVMTI bindings from the spec (JSR-163)
    14.9  # sa.make	- generate SA jar file and natives
   14.10  #
   14.11 @@ -107,7 +108,8 @@
   14.12  SIMPLE_DIRS	= \
   14.13  	$(PLATFORM_DIR)/generated/dependencies \
   14.14  	$(PLATFORM_DIR)/generated/adfiles \
   14.15 -	$(PLATFORM_DIR)/generated/jvmtifiles
   14.16 +	$(PLATFORM_DIR)/generated/jvmtifiles \
   14.17 +	$(PLATFORM_DIR)/generated/tracefiles
   14.18  
   14.19  TARGETS      = debug fastdebug optimized product
   14.20  SUBMAKE_DIRS = $(addprefix $(PLATFORM_DIR)/,$(TARGETS))
   14.21 @@ -115,7 +117,7 @@
   14.22  # For dependencies and recursive makes.
   14.23  BUILDTREE_MAKE	= $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make
   14.24  
   14.25 -BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make
   14.26 +BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make trace.make sa.make
   14.27  
   14.28  BUILDTREE_VARS	= GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \
   14.29  	ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT)
   14.30 @@ -327,6 +329,16 @@
   14.31  	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
   14.32  	) > $@
   14.33  
   14.34 +trace.make: $(BUILDTREE_MAKE)
   14.35 +	@echo Creating $@ ...
   14.36 +	$(QUIETLY) ( \
   14.37 +	$(BUILDTREE_COMMENT); \
   14.38 +	echo; \
   14.39 +	echo include flags.make; \
   14.40 +	echo; \
   14.41 +	echo "include \$$(GAMMADIR)/make/$(OS_FAMILY)/makefiles/$(@F)"; \
   14.42 +	) > $@
   14.43 +
   14.44  sa.make: $(BUILDTREE_MAKE)
   14.45  	@echo Creating $@ ...
   14.46  	$(QUIETLY) ( \
    15.1 --- a/make/solaris/makefiles/top.make	Fri Jun 07 09:33:01 2013 -0700
    15.2 +++ b/make/solaris/makefiles/top.make	Mon Jun 10 11:30:51 2013 +0200
    15.3 @@ -1,5 +1,5 @@
    15.4  #
    15.5 -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    15.6 +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    15.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    15.8  #
    15.9  # This code is free software; you can redistribute it and/or modify it
   15.10 @@ -73,7 +73,7 @@
   15.11  	@echo All done.
   15.12  
   15.13  # This is an explicit dependency for the sake of parallel makes.
   15.14 -vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff sa_stuff
   15.15 +vm_build_preliminaries:  checks $(Cached_plat) $(AD_Files_If_Required) jvmti_stuff trace_stuff sa_stuff
   15.16  	@# We need a null action here, so implicit rules don't get consulted.
   15.17  
   15.18  $(Cached_plat): $(Plat_File)
   15.19 @@ -87,6 +87,10 @@
   15.20  jvmti_stuff: $(Cached_plat) $(adjust-mflags)
   15.21  	@$(MAKE) -f jvmti.make $(MFLAGS-adjusted)
   15.22  
   15.23 +# generate trace files 
   15.24 +trace_stuff: jvmti_stuff $(Cached_plat) $(adjust-mflags)
   15.25 +	@$(MAKE) -f trace.make $(MFLAGS-adjusted)
   15.26 +
   15.27  # generate SA jar files and native header
   15.28  sa_stuff:
   15.29  	@$(MAKE) -f sa.make $(MFLAGS-adjusted)
   15.30 @@ -127,5 +131,5 @@
   15.31  	rm -fr $(GENERATED)
   15.32  
   15.33  .PHONY: default vm_build_preliminaries
   15.34 -.PHONY: lists ad_stuff jvmti_stuff sa_stuff the_vm clean realclean
   15.35 +.PHONY: lists ad_stuff jvmti_stuff trace_stuff sa_stuff the_vm clean realclean
   15.36  .PHONY: checks check_os_version install
    16.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    16.2 +++ b/make/solaris/makefiles/trace.make	Mon Jun 10 11:30:51 2013 +0200
    16.3 @@ -0,0 +1,116 @@
    16.4 +#
    16.5 +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    16.6 +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    16.7 +#
    16.8 +# This code is free software; you can redistribute it and/or modify it
    16.9 +# under the terms of the GNU General Public License version 2 only, as
   16.10 +# published by the Free Software Foundation.
   16.11 +#
   16.12 +# This code is distributed in the hope that it will be useful, but WITHOUT
   16.13 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   16.14 +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   16.15 +# version 2 for more details (a copy is included in the LICENSE file that
   16.16 +# accompanied this code).
   16.17 +#
   16.18 +# You should have received a copy of the GNU General Public License version
   16.19 +# 2 along with this work; if not, write to the Free Software Foundation,
   16.20 +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   16.21 +#
   16.22 +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   16.23 +# or visit www.oracle.com if you need additional information or have any
   16.24 +# questions.
   16.25 +#
   16.26 +#
   16.27 +
   16.28 +# This makefile (trace.make) is included from the trace.make in the
   16.29 +# build directories.
   16.30 +#
   16.31 +# It knows how to build and run the tools to generate trace files.
   16.32 +
   16.33 +include $(GAMMADIR)/make/solaris/makefiles/rules.make
   16.34 +include $(GAMMADIR)/make/altsrc.make
   16.35 +
   16.36 +# #########################################################################
   16.37 +
   16.38 +HAS_ALT_SRC:=$(shell if [ -d $(HS_ALT_SRC)/share/vm/trace ]; then \
   16.39 +  echo "true"; else echo "false";\
   16.40 +  fi)
   16.41 +
   16.42 +TOPDIR      = $(shell echo `pwd`)
   16.43 +GENERATED   = $(TOPDIR)/../generated
   16.44 +JvmtiOutDir = $(GENERATED)/jvmtifiles
   16.45 +TraceOutDir   = $(GENERATED)/tracefiles
   16.46 +
   16.47 +TraceAltSrcDir = $(HS_ALT_SRC)/share/vm/trace
   16.48 +TraceSrcDir = $(HS_COMMON_SRC)/share/vm/trace
   16.49 +
   16.50 +# set VPATH so make knows where to look for source files
   16.51 +Src_Dirs_V += $(TraceSrcDir) $(TraceAltSrcDir)
   16.52 +VPATH += $(Src_Dirs_V:%=%:)
   16.53 +
   16.54 +TraceGeneratedNames =     \
   16.55 +    traceEventClasses.hpp \
   16.56 +	traceEventIds.hpp     \
   16.57 +	traceTypes.hpp
   16.58 +
   16.59 +ifeq ($(HAS_ALT_SRC), true)
   16.60 +TraceGeneratedNames +=  \
   16.61 +	traceRequestables.hpp \
   16.62 +    traceEventControl.hpp \
   16.63 +    traceProducer.cpp
   16.64 +endif
   16.65 +
   16.66 +TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)
   16.67 +
   16.68 +XSLT = $(REMOTE) $(RUN.JAVA) -classpath $(JvmtiOutDir) jvmtiGen
   16.69 +
   16.70 +XML_DEPS =  $(TraceSrcDir)/trace.xml  $(TraceSrcDir)/tracetypes.xml \
   16.71 +	$(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
   16.72 +ifeq ($(HAS_ALT_SRC), true)
   16.73 +	XML_DEPS += $(TraceAltSrcDir)/traceevents.xml
   16.74 +endif
   16.75 +
   16.76 +.PHONY: all clean cleanall
   16.77 +
   16.78 +# #########################################################################
   16.79 +
   16.80 +all: $(TraceGeneratedFiles)
   16.81 +
   16.82 +GENERATE_CODE= \
   16.83 +  $(QUIETLY) echo Generating $@; \
   16.84 +  $(XSLT) -IN $(word 1,$^) -XSL $(word 2,$^) -OUT $@; \
   16.85 +  test -f $@
   16.86 +
   16.87 +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
   16.88 +	$(GENERATE_CODE)
   16.89 +
   16.90 +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
   16.91 +	$(GENERATE_CODE)
   16.92 +
   16.93 +ifeq ($(HAS_ALT_SRC), false)
   16.94 +
   16.95 +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
   16.96 +	$(GENERATE_CODE)
   16.97 +
   16.98 +else
   16.99 +
  16.100 +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
  16.101 +	$(GENERATE_CODE)
  16.102 +
  16.103 +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
  16.104 +	$(GENERATE_CODE)
  16.105 +
  16.106 +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
  16.107 +	$(GENERATE_CODE)
  16.108 +
  16.109 +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
  16.110 +	$(GENERATE_CODE)
  16.111 +
  16.112 +endif
  16.113 +
  16.114 +# #########################################################################
  16.115 +
  16.116 +clean cleanall:
  16.117 +	rm $(TraceGeneratedFiles)
  16.118 +
  16.119 +
    17.1 --- a/make/solaris/makefiles/vm.make	Fri Jun 07 09:33:01 2013 -0700
    17.2 +++ b/make/solaris/makefiles/vm.make	Mon Jun 10 11:30:51 2013 +0200
    17.3 @@ -1,5 +1,5 @@
    17.4  #
    17.5 -# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    17.6 +# Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    17.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    17.8  #
    17.9  # This code is free software; you can redistribute it and/or modify it
   17.10 @@ -19,7 +19,7 @@
   17.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   17.12  # or visit www.oracle.com if you need additional information or have any
   17.13  # questions.
   17.14 -#  
   17.15 +#
   17.16  #
   17.17  
   17.18  # Rules to build JVM and related libraries, included from vm.make in the build
   17.19 @@ -48,7 +48,7 @@
   17.20  # Src_Dirs_V is everything in src/share/vm/*, plus the right os/*/vm and cpu/*/vm
   17.21  # The adfiles directory contains ad_<arch>.[ch]pp.
   17.22  # The jvmtifiles directory contains jvmti*.[ch]pp
   17.23 -Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles
   17.24 +Src_Dirs_V += $(GENERATED)/adfiles $(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
   17.25  VPATH += $(Src_Dirs_V:%=%:)
   17.26  
   17.27  # set INCLUDES for C preprocessor
   17.28 @@ -87,7 +87,7 @@
   17.29  
   17.30  # This is VERY important! The version define must only be supplied to vm_version.o
   17.31  # If not, ccache will not re-use the cache at all, since the version string might contain
   17.32 -# a time and date. 
   17.33 +# a time and date.
   17.34  CXXFLAGS/vm_version.o += ${JRE_VERSION}
   17.35  
   17.36  CXXFLAGS/BYFILE = $(CXXFLAGS/$@)
   17.37 @@ -103,7 +103,7 @@
   17.38  CFLAGS += $(CFLAGS/NOEX)
   17.39  
   17.40  # Extra flags from gnumake's invocation or environment
   17.41 -CFLAGS += $(EXTRA_CFLAGS) -DINCLUDE_TRACE
   17.42 +CFLAGS += $(EXTRA_CFLAGS)
   17.43  
   17.44  # Math Library (libm.so), do not use -lm.
   17.45  #    There might be two versions of libm.so on the build system:
   17.46 @@ -137,9 +137,7 @@
   17.47  LIBS += -lsocket -lsched -ldl $(LIBM) -lthread -lc -ldemangle
   17.48  endif # sparcWorks
   17.49  
   17.50 -ifeq ("${Platform_arch}", "sparc")
   17.51  LIBS += -lkstat
   17.52 -endif
   17.53  
   17.54  # By default, link the *.o into the library, not the executable.
   17.55  LINK_INTO$(LINK_INTO) = LIBJVM
   17.56 @@ -177,12 +175,14 @@
   17.57  SOURCE_PATHS+=$(HS_COMMON_SRC)/cpu/$(Platform_arch)/vm
   17.58  SOURCE_PATHS+=$(HS_COMMON_SRC)/os_cpu/$(Platform_os_arch)/vm
   17.59  
   17.60 -SOURCE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
   17.61 +CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
   17.62 +CORE_PATHS+=$(GENERATED)/jvmtifiles $(GENERATED)/tracefiles
   17.63 +
   17.64 +ifneq ($(INCLUDE_TRACE), false)
   17.65 +CORE_PATHS+=$(shell if [ -d $(HS_ALT_SRC)/share/vm/jfr ]; then \
   17.66    find $(HS_ALT_SRC)/share/vm/jfr -type d; \
   17.67    fi)
   17.68 -
   17.69 -CORE_PATHS=$(foreach path,$(SOURCE_PATHS),$(call altsrc,$(path)) $(path))
   17.70 -CORE_PATHS+=$(GENERATED)/jvmtifiles
   17.71 +endif
   17.72  
   17.73  COMPILER1_PATHS := $(call altsrc,$(HS_COMMON_SRC)/share/vm/c1)
   17.74  COMPILER1_PATHS += $(HS_COMMON_SRC)/share/vm/c1
   17.75 @@ -287,7 +287,7 @@
   17.76  LINK_VM = $(LINK_LIB.CXX)
   17.77  endif
   17.78  # making the library:
   17.79 -$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE) 
   17.80 +$(LIBJVM): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(LIBJVM.o) $(LIBJVM_MAPFILE)
   17.81  ifeq ($(filter -sbfast -xsbfast, $(CFLAGS_BROWSE)),)
   17.82  	@echo Linking vm...
   17.83  	$(QUIETLY) $(LINK_LIB.CXX/PRE_HOOK)
    18.1 --- a/make/windows/build.make	Fri Jun 07 09:33:01 2013 -0700
    18.2 +++ b/make/windows/build.make	Mon Jun 10 11:30:51 2013 +0200
    18.3 @@ -196,6 +196,12 @@
    18.4  
    18.5  # End VERSIONINFO parameters
    18.6  
    18.7 +# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK
    18.8 +!ifndef OPENJDK
    18.9 +!if !exists($(WorkSpace)\src\closed)
   18.10 +OPENJDK=true
   18.11 +!endif
   18.12 +!endif
   18.13  
   18.14  # We don't support SA on ia64, and we can't
   18.15  # build it if we are using a version of Vis Studio
   18.16 @@ -273,6 +279,7 @@
   18.17  	@ echo HS_COMPANY=$(COMPANY_NAME)			>> $@
   18.18  	@ echo HS_FILEDESC=$(HS_FILEDESC)			>> $@
   18.19  	@ echo HOTSPOT_VM_DISTRO=$(HOTSPOT_VM_DISTRO)		>> $@
   18.20 +	@ if "$(OPENJDK)" NEQ "" echo OPENJDK=$(OPENJDK)	>> $@
   18.21  	@ echo HS_COPYRIGHT=$(HOTSPOT_VM_COPYRIGHT)		>> $@
   18.22  	@ echo HS_NAME=$(PRODUCT_NAME) $(JDK_MKTG_VERSION)	>> $@
   18.23  	@ echo HS_BUILD_VER=$(HS_BUILD_VER)			>> $@
    19.1 --- a/make/windows/create_obj_files.sh	Fri Jun 07 09:33:01 2013 -0700
    19.2 +++ b/make/windows/create_obj_files.sh	Mon Jun 10 11:30:51 2013 +0200
    19.3 @@ -71,13 +71,11 @@
    19.4    BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/${sd}"
    19.5  done
    19.6  
    19.7 -BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles"
    19.8 +BASE_PATHS="${BASE_PATHS} ${GENERATED}/jvmtifiles ${GENERATED}/tracefiles"
    19.9  
   19.10  if [ -d "${ALTSRC}/share/vm/jfr" ]; then
   19.11 -  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent"
   19.12 -  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/agent/isolated_deps/util"
   19.13 -  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/jvm"
   19.14 -  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
   19.15 +  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr"
   19.16 +  BASE_PATHS="${BASE_PATHS} ${ALTSRC}/share/vm/jfr/buffers"
   19.17  fi
   19.18  
   19.19  BASE_PATHS="${BASE_PATHS} ${COMMONSRC}/share/vm/prims/wbtestmethods"
    20.1 --- a/make/windows/makefiles/generated.make	Fri Jun 07 09:33:01 2013 -0700
    20.2 +++ b/make/windows/makefiles/generated.make	Mon Jun 10 11:30:51 2013 +0200
    20.3 @@ -1,5 +1,5 @@
    20.4  #
    20.5 -# Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
    20.6 +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
    20.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    20.8  #
    20.9  # This code is free software; you can redistribute it and/or modify it
   20.10 @@ -30,15 +30,19 @@
   20.11  JvmtiOutDir=jvmtifiles
   20.12  !include $(WorkSpace)/make/windows/makefiles/jvmti.make
   20.13  
   20.14 +# Pick up rules for building trace
   20.15 +TraceOutDir=tracefiles
   20.16 +!include $(WorkSpace)/make/windows/makefiles/trace.make
   20.17 +
   20.18  # Pick up rules for building SA
   20.19  !include $(WorkSpace)/make/windows/makefiles/sa.make
   20.20  
   20.21  AdlcOutDir=adfiles
   20.22  
   20.23  !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered")
   20.24 -default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) buildobjfiles
   20.25 +default:: $(AdlcOutDir)/ad_$(Platform_arch_model).cpp $(AdlcOutDir)/dfa_$(Platform_arch_model).cpp $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles
   20.26  !else
   20.27 -default:: $(JvmtiGeneratedFiles) buildobjfiles
   20.28 +default:: $(JvmtiGeneratedFiles) $(TraceGeneratedFiles) buildobjfiles
   20.29  !endif
   20.30  
   20.31  buildobjfiles:
    21.1 --- a/make/windows/makefiles/projectcreator.make	Fri Jun 07 09:33:01 2013 -0700
    21.2 +++ b/make/windows/makefiles/projectcreator.make	Mon Jun 10 11:30:51 2013 +0200
    21.3 @@ -19,7 +19,7 @@
    21.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    21.5  # or visit www.oracle.com if you need additional information or have any
    21.6  # questions.
    21.7 -#  
    21.8 +#
    21.9  #
   21.10  
   21.11  !include $(WorkSpace)/make/windows/makefiles/rules.make
   21.12 @@ -72,7 +72,7 @@
   21.13          -ignorePath ppc \
   21.14          -ignorePath zero \
   21.15          -hidePath .hg
   21.16 -	
   21.17 +
   21.18  
   21.19  # This is referenced externally by both the IDE and batch builds
   21.20  ProjectCreatorOptions=
   21.21 @@ -89,7 +89,7 @@
   21.22          -disablePch        bytecodeInterpreter.cpp \
   21.23          -disablePch        bytecodeInterpreterWithChecks.cpp \
   21.24          -disablePch        getThread_windows_$(Platform_arch).cpp \
   21.25 -        -disablePch_compiler2     opcodes.cpp    
   21.26 +        -disablePch_compiler2     opcodes.cpp
   21.27  
   21.28  # Common options for the IDE builds for core, c1, and c2
   21.29  ProjectCreatorIDEOptions=\
   21.30 @@ -115,7 +115,7 @@
   21.31          -define TARGET_OS_ARCH_windows_x86 \
   21.32          -define TARGET_OS_FAMILY_windows \
   21.33          -define TARGET_COMPILER_visCPP \
   21.34 -        -define INCLUDE_TRACE \
   21.35 +        -define INCLUDE_TRACE=1 \
   21.36         $(ProjectCreatorIncludesPRIVATE)
   21.37  
   21.38  # Add in build-specific options
   21.39 @@ -203,4 +203,12 @@
   21.40   -additionalFile jvmtiEnter.cpp \
   21.41   -additionalFile jvmtiEnterTrace.cpp \
   21.42   -additionalFile jvmti.h \
   21.43 - -additionalFile bytecodeInterpreterWithChecks.cpp
   21.44 + -additionalFile bytecodeInterpreterWithChecks.cpp \
   21.45 + -additionalFile traceEventClasses.hpp \
   21.46 + -additionalFile traceEventIds.hpp \
   21.47 +!if "$(OPENJDK)" != "true"
   21.48 + -additionalFile traceRequestables.hpp \
   21.49 + -additionalFile traceEventControl.hpp \
   21.50 + -additionalFile traceProducer.cpp \
   21.51 +!endif
   21.52 + -additionalFile traceTypes.hpp
    22.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    22.2 +++ b/make/windows/makefiles/trace.make	Mon Jun 10 11:30:51 2013 +0200
    22.3 @@ -0,0 +1,121 @@
    22.4 +#
    22.5 +# Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    22.6 +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    22.7 +#
    22.8 +# This code is free software; you can redistribute it and/or modify it
    22.9 +# under the terms of the GNU General Public License version 2 only, as
   22.10 +# published by the Free Software Foundation.
   22.11 +#
   22.12 +# This code is distributed in the hope that it will be useful, but WITHOUT
   22.13 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   22.14 +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   22.15 +# version 2 for more details (a copy is included in the LICENSE file that
   22.16 +# accompanied this code).
   22.17 +#
   22.18 +# You should have received a copy of the GNU General Public License version
   22.19 +# 2 along with this work; if not, write to the Free Software Foundation,
   22.20 +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   22.21 +#
   22.22 +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   22.23 +# or visit www.oracle.com if you need additional information or have any
   22.24 +# questions.
   22.25 +#
   22.26 +#
   22.27 +
   22.28 +# This makefile (trace.make) is included from the trace.make in the
   22.29 +# build directories.
   22.30 +#
   22.31 +# It knows how to build and run the tools to generate trace files.
   22.32 +
   22.33 +!include $(WorkSpace)/make/windows/makefiles/rules.make
   22.34 +
   22.35 +# #########################################################################
   22.36 +
   22.37 +
   22.38 +TraceAltSrcDir = $(WorkSpace)/src/closed/share/vm/trace
   22.39 +TraceSrcDir = $(WorkSpace)/src/share/vm/trace
   22.40 +
   22.41 +TraceGeneratedNames =     \
   22.42 +    traceEventClasses.hpp \
   22.43 +    traceEventIds.hpp     \
   22.44 +    traceTypes.hpp
   22.45 +
   22.46 +
   22.47 +!if "$(OPENJDK)" != "true"
   22.48 +TraceGeneratedNames = $(TraceGeneratedNames) \
   22.49 +    traceRequestables.hpp \
   22.50 +    traceEventControl.hpp \
   22.51 +    traceProducer.cpp
   22.52 +!endif
   22.53 +
   22.54 +
   22.55 +#Note: TraceGeneratedFiles must be kept in sync with TraceGeneratedNames by hand.
   22.56 +#Should be equivalent to "TraceGeneratedFiles = $(TraceGeneratedNames:%=$(TraceOutDir)/%)"
   22.57 +TraceGeneratedFiles = \
   22.58 +    $(TraceOutDir)/traceEventClasses.hpp \
   22.59 +	$(TraceOutDir)/traceEventIds.hpp     \
   22.60 +	$(TraceOutDir)/traceTypes.hpp
   22.61 +
   22.62 +!if "$(OPENJDK)" != "true"
   22.63 +TraceGeneratedFiles = $(TraceGeneratedFiles) \
   22.64 +	$(TraceOutDir)/traceRequestables.hpp \
   22.65 +    $(TraceOutDir)/traceEventControl.hpp \
   22.66 +	$(TraceOutDir)/traceProducer.cpp
   22.67 +!endif
   22.68 +
   22.69 +XSLT = $(QUIETLY) $(REMOTE) $(RUN_JAVA) -classpath $(JvmtiOutDir) jvmtiGen
   22.70 +
   22.71 +XML_DEPS = $(TraceSrcDir)/trace.xml $(TraceSrcDir)/tracetypes.xml \
   22.72 +    $(TraceSrcDir)/trace.dtd $(TraceSrcDir)/xinclude.mod
   22.73 +
   22.74 +!if "$(OPENJDK)" != "true"
   22.75 +XML_DEPS = $(XML_DEPS) $(TraceAltSrcDir)/traceevents.xml
   22.76 +!endif
   22.77 +
   22.78 +.PHONY: all clean cleanall
   22.79 +
   22.80 +# #########################################################################
   22.81 +
   22.82 +default::
   22.83 +	@if not exist $(TraceOutDir) mkdir $(TraceOutDir)
   22.84 +
   22.85 +$(TraceOutDir)/traceEventIds.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventIds.xsl $(XML_DEPS)
   22.86 +	@echo Generating $@
   22.87 +	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventIds.xsl -OUT $(TraceOutDir)/traceEventIds.hpp
   22.88 +
   22.89 +$(TraceOutDir)/traceTypes.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceTypes.xsl $(XML_DEPS)
   22.90 +	@echo Generating $@
   22.91 +	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceTypes.xsl -OUT $(TraceOutDir)/traceTypes.hpp
   22.92 +
   22.93 +!if "$(OPENJDK)" == "true"
   22.94 +
   22.95 +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceSrcDir)/traceEventClasses.xsl $(XML_DEPS)
   22.96 +	@echo Generating $@
   22.97 +	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
   22.98 +
   22.99 +!else
  22.100 +
  22.101 +$(TraceOutDir)/traceEventClasses.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventClasses.xsl $(XML_DEPS)
  22.102 +	@echo Generating $@
  22.103 +	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventClasses.xsl -OUT $(TraceOutDir)/traceEventClasses.hpp
  22.104 +
  22.105 +$(TraceOutDir)/traceProducer.cpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceProducer.xsl $(XML_DEPS)
  22.106 +	@echo Generating $@
  22.107 +	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceProducer.xsl -OUT $(TraceOutDir)/traceProducer.cpp
  22.108 +
  22.109 +$(TraceOutDir)/traceRequestables.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceRequestables.xsl $(XML_DEPS)
  22.110 +	@echo Generating $@
  22.111 +	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceRequestables.xsl -OUT $(TraceOutDir)/traceRequestables.hpp
  22.112 +
  22.113 +$(TraceOutDir)/traceEventControl.hpp: $(TraceSrcDir)/trace.xml $(TraceAltSrcDir)/traceEventControl.xsl $(XML_DEPS)
  22.114 +	@echo Generating $@
  22.115 +	@$(XSLT) -IN $(TraceSrcDir)/trace.xml -XSL $(TraceAltSrcDir)/traceEventControl.xsl -OUT $(TraceOutDir)/traceEventControl.hpp
  22.116 +
  22.117 +!endif
  22.118 +
  22.119 +# #########################################################################
  22.120 +
  22.121 +cleanall :
  22.122 +	rm $(TraceGeneratedFiles)
  22.123 +
  22.124 +
    23.1 --- a/make/windows/makefiles/vm.make	Fri Jun 07 09:33:01 2013 -0700
    23.2 +++ b/make/windows/makefiles/vm.make	Mon Jun 10 11:30:51 2013 +0200
    23.3 @@ -66,10 +66,6 @@
    23.4  CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\""
    23.5  CXX_FLAGS=$(CXX_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\""
    23.6  
    23.7 -!ifndef JAVASE_EMBEDDED
    23.8 -CXX_FLAGS=$(CXX_FLAGS) /D "INCLUDE_TRACE"
    23.9 -!endif
   23.10 -
   23.11  CXX_FLAGS=$(CXX_FLAGS) $(CXX_INCLUDE_DIRS)
   23.12  
   23.13  # Define that so jni.h is on correct side
   23.14 @@ -144,6 +140,7 @@
   23.15  VM_PATH=../generated
   23.16  VM_PATH=$(VM_PATH);../generated/adfiles
   23.17  VM_PATH=$(VM_PATH);../generated/jvmtifiles
   23.18 +VM_PATH=$(VM_PATH);../generated/tracefiles
   23.19  VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/c1
   23.20  VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/compiler
   23.21  VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/code
   23.22 @@ -172,10 +169,8 @@
   23.23  VM_PATH=$(VM_PATH);$(WorkSpace)/src/share/vm/opto
   23.24  
   23.25  !if exists($(ALTSRC)\share\vm\jfr)
   23.26 -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent
   23.27 -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/agent/isolated_deps/util
   23.28 -VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/jvm
   23.29  VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr
   23.30 +VM_PATH=$(VM_PATH);$(ALTSRC)/share/vm/jfr/buffers
   23.31  !endif
   23.32  
   23.33  VM_PATH={$(VM_PATH)}
   23.34 @@ -384,16 +379,13 @@
   23.35  {..\generated\jvmtifiles}.cpp.obj::
   23.36          $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
   23.37  
   23.38 +{..\generated\tracefiles}.cpp.obj::
   23.39 +        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
   23.40 +
   23.41  {$(ALTSRC)\share\vm\jfr}.cpp.obj::
   23.42          $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
   23.43  
   23.44 -{$(ALTSRC)\share\vm\jfr\agent}.cpp.obj::
   23.45 -        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
   23.46 -
   23.47 -{$(ALTSRC)\share\vm\jfr\agent\isolated_deps\util}.cpp.obj::
   23.48 -        $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
   23.49 -
   23.50 -{$(ALTSRC)\share\vm\jfr\jvm}.cpp.obj::
   23.51 +{$(ALTSRC)\share\vm\jfr\buffers}.cpp.obj::
   23.52          $(CXX) $(CXX_FLAGS) $(CXX_USE_PCH) /c $<
   23.53  
   23.54  default::
    24.1 --- a/make/windows/projectfiles/common/Makefile	Fri Jun 07 09:33:01 2013 -0700
    24.2 +++ b/make/windows/projectfiles/common/Makefile	Mon Jun 10 11:30:51 2013 +0200
    24.3 @@ -1,5 +1,5 @@
    24.4  #
    24.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    24.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    24.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    24.8  #
    24.9  # This code is free software; you can redistribute it and/or modify it
   24.10 @@ -45,6 +45,12 @@
   24.11  !endif
   24.12  !endif
   24.13  
   24.14 +# if hotspot-only build and/or OPENJDK isn't passed down, need to set OPENJDK
   24.15 +!ifndef OPENJDK
   24.16 +!if !exists($(WorkSpace)\src\closed)
   24.17 +OPENJDK=true
   24.18 +!endif
   24.19 +!endif
   24.20  
   24.21  
   24.22  !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/projectcreator.make
   24.23 @@ -54,6 +60,10 @@
   24.24  JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles
   24.25  !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/jvmti.make
   24.26  
   24.27 +# Pick up rules for building trace
   24.28 +TraceOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\tracefiles
   24.29 +!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/trace.make
   24.30 +
   24.31  !if "$(Variant)" == "compiler2"
   24.32  # Pick up rules for building adlc
   24.33  !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make
   24.34 @@ -66,7 +76,7 @@
   24.35  
   24.36  HS_INTERNAL_NAME=jvm
   24.37  
   24.38 -default:: $(AdditionalTargets) $(JvmtiGeneratedFiles)
   24.39 +default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) $(TraceGeneratedFiles)
   24.40  
   24.41  !include $(HOTSPOTWORKSPACE)/make/hotspot_version
   24.42  
    25.1 --- a/src/cpu/sparc/vm/frame_sparc.cpp	Fri Jun 07 09:33:01 2013 -0700
    25.2 +++ b/src/cpu/sparc/vm/frame_sparc.cpp	Mon Jun 10 11:30:51 2013 +0200
    25.3 @@ -252,6 +252,16 @@
    25.4        return false;
    25.5      }
    25.6  
    25.7 +    // Could be a zombie method
    25.8 +    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
    25.9 +      return false;
   25.10 +    }
   25.11 +
   25.12 +    // Could be a zombie method
   25.13 +    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
   25.14 +      return false;
   25.15 +    }
   25.16 +
   25.17      // It should be safe to construct the sender though it might not be valid
   25.18  
   25.19      frame sender(_SENDER_SP, younger_sp, adjusted_stack);
   25.20 @@ -294,10 +304,10 @@
   25.21        return jcw_safe;
   25.22      }
   25.23  
   25.24 -    // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
   25.25 +    // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
   25.26      // because you must allocate window space
   25.27  
   25.28 -    if (sender_blob->frame_size() == 0) {
   25.29 +    if (sender_blob->frame_size() <= 0) {
   25.30        assert(!sender_blob->is_nmethod(), "should count return address at least");
   25.31        return false;
   25.32      }
    26.1 --- a/src/cpu/x86/vm/frame_x86.cpp	Fri Jun 07 09:33:01 2013 -0700
    26.2 +++ b/src/cpu/x86/vm/frame_x86.cpp	Mon Jun 10 11:30:51 2013 +0200
    26.3 @@ -1,5 +1,5 @@
    26.4  /*
    26.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    26.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    26.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    26.8   *
    26.9   * This code is free software; you can redistribute it and/or modify it
   26.10 @@ -33,6 +33,7 @@
   26.11  #include "runtime/handles.inline.hpp"
   26.12  #include "runtime/javaCalls.hpp"
   26.13  #include "runtime/monitorChunk.hpp"
   26.14 +#include "runtime/os.hpp"
   26.15  #include "runtime/signature.hpp"
   26.16  #include "runtime/stubCodeGenerator.hpp"
   26.17  #include "runtime/stubRoutines.hpp"
   26.18 @@ -54,16 +55,22 @@
   26.19    address   sp = (address)_sp;
   26.20    address   fp = (address)_fp;
   26.21    address   unextended_sp = (address)_unextended_sp;
   26.22 -  // sp must be within the stack
   26.23 -  bool sp_safe = (sp <= thread->stack_base()) &&
   26.24 -                 (sp >= thread->stack_base() - thread->stack_size());
   26.25 +
   26.26 +  // consider stack guards when trying to determine "safe" stack pointers
   26.27 +  static size_t stack_guard_size = os::uses_stack_guard_pages() ? (StackYellowPages + StackRedPages) * os::vm_page_size() : 0;
   26.28 +  size_t usable_stack_size = thread->stack_size() - stack_guard_size;
   26.29 +
   26.30 +  // sp must be within the usable part of the stack (not in guards)
   26.31 +  bool sp_safe = (sp < thread->stack_base()) &&
   26.32 +                 (sp >= thread->stack_base() - usable_stack_size);
   26.33 +
   26.34  
   26.35    if (!sp_safe) {
   26.36      return false;
   26.37    }
   26.38  
   26.39    // unextended sp must be within the stack and above or equal sp
   26.40 -  bool unextended_sp_safe = (unextended_sp <= thread->stack_base()) &&
   26.41 +  bool unextended_sp_safe = (unextended_sp < thread->stack_base()) &&
   26.42                              (unextended_sp >= sp);
   26.43  
   26.44    if (!unextended_sp_safe) {
   26.45 @@ -71,7 +78,8 @@
   26.46    }
   26.47  
   26.48    // an fp must be within the stack and above (but not equal) sp
   26.49 -  bool fp_safe = (fp <= thread->stack_base()) && (fp > sp);
   26.50 +  // second evaluation on fp+ is added to handle situation where fp is -1
   26.51 +  bool fp_safe = (fp < thread->stack_base() && (fp > sp) && (((fp + (return_addr_offset * sizeof(void*))) < thread->stack_base())));
   26.52  
   26.53    // We know sp/unextended_sp are safe only fp is questionable here
   26.54  
   26.55 @@ -86,6 +94,13 @@
   26.56      // other generic buffer blobs are more problematic so we just assume they are
   26.57      // ok. adapter blobs never have a frame complete and are never ok.
   26.58  
   26.59 +    // check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
   26.60 +
   26.61 +    if (!Interpreter::contains(_pc) && _cb->frame_size() <= 0) {
   26.62 +      //assert(0, "Invalid frame_size");
   26.63 +      return false;
   26.64 +    }
   26.65 +
   26.66      if (!_cb->is_frame_complete_at(_pc)) {
   26.67        if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
   26.68          return false;
   26.69 @@ -107,7 +122,7 @@
   26.70  
   26.71        address jcw = (address)entry_frame_call_wrapper();
   26.72  
   26.73 -      bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > fp);
   26.74 +      bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > fp);
   26.75  
   26.76        return jcw_safe;
   26.77  
   26.78 @@ -134,12 +149,6 @@
   26.79        sender_pc = (address) *(sender_sp-1);
   26.80      }
   26.81  
   26.82 -    // We must always be able to find a recognizable pc
   26.83 -    CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
   26.84 -    if (sender_pc == NULL ||  sender_blob == NULL) {
   26.85 -      return false;
   26.86 -    }
   26.87 -
   26.88  
   26.89      // If the potential sender is the interpreter then we can do some more checking
   26.90      if (Interpreter::contains(sender_pc)) {
   26.91 @@ -149,7 +158,7 @@
   26.92        // is really a frame pointer.
   26.93  
   26.94        intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
   26.95 -      bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
   26.96 +      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
   26.97  
   26.98        if (!saved_fp_safe) {
   26.99          return false;
  26.100 @@ -163,6 +172,17 @@
  26.101  
  26.102      }
  26.103  
  26.104 +    // We must always be able to find a recognizable pc
  26.105 +    CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
  26.106 +    if (sender_pc == NULL ||  sender_blob == NULL) {
  26.107 +      return false;
  26.108 +    }
  26.109 +
  26.110 +    // Could be a zombie method
  26.111 +    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
  26.112 +      return false;
  26.113 +    }
  26.114 +
  26.115      // Could just be some random pointer within the codeBlob
  26.116      if (!sender_blob->code_contains(sender_pc)) {
  26.117        return false;
  26.118 @@ -174,10 +194,9 @@
  26.119      }
  26.120  
  26.121      // Could be the call_stub
  26.122 -
  26.123      if (StubRoutines::returns_to_call_stub(sender_pc)) {
  26.124        intptr_t *saved_fp = (intptr_t*)*(sender_sp - frame::sender_sp_offset);
  26.125 -      bool saved_fp_safe = ((address)saved_fp <= thread->stack_base()) && (saved_fp > sender_sp);
  26.126 +      bool saved_fp_safe = ((address)saved_fp < thread->stack_base()) && (saved_fp > sender_sp);
  26.127  
  26.128        if (!saved_fp_safe) {
  26.129          return false;
  26.130 @@ -190,15 +209,24 @@
  26.131        // Validate the JavaCallWrapper an entry frame must have
  26.132        address jcw = (address)sender.entry_frame_call_wrapper();
  26.133  
  26.134 -      bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > (address)sender.fp());
  26.135 +      bool jcw_safe = (jcw < thread->stack_base()) && ( jcw > (address)sender.fp());
  26.136  
  26.137        return jcw_safe;
  26.138      }
  26.139  
  26.140 -    // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
  26.141 +    if (sender_blob->is_nmethod()) {
  26.142 +        nmethod* nm = sender_blob->as_nmethod_or_null();
  26.143 +        if (nm != NULL) {
  26.144 +            if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc)) {
  26.145 +                return false;
  26.146 +            }
  26.147 +        }
  26.148 +    }
  26.149 +
  26.150 +    // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
  26.151      // because the return address counts against the callee's frame.
  26.152  
  26.153 -    if (sender_blob->frame_size() == 0) {
  26.154 +    if (sender_blob->frame_size() <= 0) {
  26.155        assert(!sender_blob->is_nmethod(), "should count return address at least");
  26.156        return false;
  26.157      }
  26.158 @@ -208,7 +236,9 @@
  26.159      // should not be anything but the call stub (already covered), the interpreter (already covered)
  26.160      // or an nmethod.
  26.161  
  26.162 -    assert(sender_blob->is_nmethod(), "Impossible call chain");
  26.163 +    if (!sender_blob->is_nmethod()) {
  26.164 +        return false;
  26.165 +    }
  26.166  
  26.167      // Could put some more validation for the potential non-interpreted sender
  26.168      // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
    27.1 --- a/src/os/bsd/vm/osThread_bsd.hpp	Fri Jun 07 09:33:01 2013 -0700
    27.2 +++ b/src/os/bsd/vm/osThread_bsd.hpp	Mon Jun 10 11:30:51 2013 +0200
    27.3 @@ -1,5 +1,5 @@
    27.4  /*
    27.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    27.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    27.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    27.8   *
    27.9   * This code is free software; you can redistribute it and/or modify it
   27.10 @@ -94,7 +94,7 @@
   27.11    // flags that support signal based suspend/resume on Bsd are in a
   27.12    // separate class to avoid confusion with many flags in OSThread that
   27.13    // are used by VM level suspend/resume.
   27.14 -  os::Bsd::SuspendResume sr;
   27.15 +  os::SuspendResume sr;
   27.16  
   27.17    // _ucontext and _siginfo are used by SR_handler() to save thread context,
   27.18    // and they will later be used to walk the stack or reposition thread PC.
    28.1 --- a/src/os/bsd/vm/os_bsd.cpp	Fri Jun 07 09:33:01 2013 -0700
    28.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Mon Jun 10 11:30:51 2013 +0200
    28.3 @@ -1852,17 +1852,118 @@
    28.4  
    28.5  // Bsd(POSIX) specific hand shaking semaphore.
    28.6  #ifdef __APPLE__
    28.7 -static semaphore_t sig_sem;
    28.8 +typedef semaphore_t os_semaphore_t;
    28.9  #define SEM_INIT(sem, value)    semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, value)
   28.10 -#define SEM_WAIT(sem)           semaphore_wait(sem);
   28.11 -#define SEM_POST(sem)           semaphore_signal(sem);
   28.12 +#define SEM_WAIT(sem)           semaphore_wait(sem)
   28.13 +#define SEM_POST(sem)           semaphore_signal(sem)
   28.14 +#define SEM_DESTROY(sem)        semaphore_destroy(mach_task_self(), sem)
   28.15  #else
   28.16 -static sem_t sig_sem;
   28.17 +typedef sem_t os_semaphore_t;
   28.18  #define SEM_INIT(sem, value)    sem_init(&sem, 0, value)
   28.19 -#define SEM_WAIT(sem)           sem_wait(&sem);
   28.20 -#define SEM_POST(sem)           sem_post(&sem);
   28.21 +#define SEM_WAIT(sem)           sem_wait(&sem)
   28.22 +#define SEM_POST(sem)           sem_post(&sem)
   28.23 +#define SEM_DESTROY(sem)        sem_destroy(&sem)
   28.24  #endif
   28.25  
   28.26 +class Semaphore : public StackObj {
   28.27 +  public:
   28.28 +    Semaphore();
   28.29 +    ~Semaphore();
   28.30 +    void signal();
   28.31 +    void wait();
   28.32 +    bool trywait();
   28.33 +    bool timedwait(unsigned int sec, int nsec);
   28.34 +  private:
   28.35 +    jlong currenttime() const;
   28.36 +    semaphore_t _semaphore;
   28.37 +};
   28.38 +
   28.39 +Semaphore::Semaphore() : _semaphore(0) {
   28.40 +  SEM_INIT(_semaphore, 0);
   28.41 +}
   28.42 +
   28.43 +Semaphore::~Semaphore() {
   28.44 +  SEM_DESTROY(_semaphore);
   28.45 +}
   28.46 +
   28.47 +void Semaphore::signal() {
   28.48 +  SEM_POST(_semaphore);
   28.49 +}
   28.50 +
   28.51 +void Semaphore::wait() {
   28.52 +  SEM_WAIT(_semaphore);
   28.53 +}
   28.54 +
   28.55 +jlong Semaphore::currenttime() const {
   28.56 +    struct timeval tv;
   28.57 +    gettimeofday(&tv, NULL);
   28.58 +    return (tv.tv_sec * NANOSECS_PER_SEC) + (tv.tv_usec * 1000);
   28.59 +}
   28.60 +
   28.61 +#ifdef __APPLE__
   28.62 +bool Semaphore::trywait() {
   28.63 +  return timedwait(0, 0);
   28.64 +}
   28.65 +
   28.66 +bool Semaphore::timedwait(unsigned int sec, int nsec) {
   28.67 +  kern_return_t kr = KERN_ABORTED;
   28.68 +  mach_timespec_t waitspec;
   28.69 +  waitspec.tv_sec = sec;
   28.70 +  waitspec.tv_nsec = nsec;
   28.71 +
   28.72 +  jlong starttime = currenttime();
   28.73 +
   28.74 +  kr = semaphore_timedwait(_semaphore, waitspec);
   28.75 +  while (kr == KERN_ABORTED) {
   28.76 +    jlong totalwait = (sec * NANOSECS_PER_SEC) + nsec;
   28.77 +
   28.78 +    jlong current = currenttime();
   28.79 +    jlong passedtime = current - starttime;
   28.80 +
   28.81 +    if (passedtime >= totalwait) {
   28.82 +      waitspec.tv_sec = 0;
   28.83 +      waitspec.tv_nsec = 0;
   28.84 +    } else {
   28.85 +      jlong waittime = totalwait - (current - starttime);
   28.86 +      waitspec.tv_sec = waittime / NANOSECS_PER_SEC;
   28.87 +      waitspec.tv_nsec = waittime % NANOSECS_PER_SEC;
   28.88 +    }
   28.89 +
   28.90 +    kr = semaphore_timedwait(_semaphore, waitspec);
   28.91 +  }
   28.92 +
   28.93 +  return kr == KERN_SUCCESS;
   28.94 +}
   28.95 +
   28.96 +#else
   28.97 +
   28.98 +bool Semaphore::trywait() {
   28.99 +  return sem_trywait(&_semaphore) == 0;
  28.100 +}
  28.101 +
  28.102 +bool Semaphore::timedwait(unsigned int sec, int nsec) {
  28.103 +  struct timespec ts;
  28.104 +  jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
  28.105 +
  28.106 +  while (1) {
  28.107 +    int result = sem_timedwait(&_semaphore, &ts);
  28.108 +    if (result == 0) {
  28.109 +      return true;
  28.110 +    } else if (errno == EINTR) {
  28.111 +      continue;
  28.112 +    } else if (errno == ETIMEDOUT) {
  28.113 +      return false;
  28.114 +    } else {
  28.115 +      return false;
  28.116 +    }
  28.117 +  }
  28.118 +}
  28.119 +
  28.120 +#endif // __APPLE__
  28.121 +
  28.122 +static os_semaphore_t sig_sem;
  28.123 +static Semaphore sr_semaphore;
  28.124 +
  28.125  void os::signal_init_pd() {
  28.126    // Initialize signal structures
  28.127    ::memset((void*)pending_signals, 0, sizeof(pending_signals));
  28.128 @@ -2616,9 +2717,6 @@
  28.129  static void resume_clear_context(OSThread *osthread) {
  28.130    osthread->set_ucontext(NULL);
  28.131    osthread->set_siginfo(NULL);
  28.132 -
  28.133 -  // notify the suspend action is completed, we have now resumed
  28.134 -  osthread->sr.clear_suspended();
  28.135  }
  28.136  
  28.137  static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
  28.138 @@ -2638,7 +2736,7 @@
  28.139  // its signal handlers run and prevents sigwait()'s use with the
  28.140  // mutex granting granting signal.
  28.141  //
  28.142 -// Currently only ever called on the VMThread
  28.143 +// Currently only ever called on the VMThread or JavaThread
  28.144  //
  28.145  static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
  28.146    // Save and restore errno to avoid confusing native code with EINTR
  28.147 @@ -2647,38 +2745,48 @@
  28.148  
  28.149    Thread* thread = Thread::current();
  28.150    OSThread* osthread = thread->osthread();
  28.151 -  assert(thread->is_VM_thread(), "Must be VMThread");
  28.152 -  // read current suspend action
  28.153 -  int action = osthread->sr.suspend_action();
  28.154 -  if (action == os::Bsd::SuspendResume::SR_SUSPEND) {
  28.155 +  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
  28.156 +
  28.157 +  os::SuspendResume::State current = osthread->sr.state();
  28.158 +  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
  28.159      suspend_save_context(osthread, siginfo, context);
  28.160  
  28.161 -    // Notify the suspend action is about to be completed. do_suspend()
  28.162 -    // waits until SR_SUSPENDED is set and then returns. We will wait
  28.163 -    // here for a resume signal and that completes the suspend-other
  28.164 -    // action. do_suspend/do_resume is always called as a pair from
  28.165 -    // the same thread - so there are no races
  28.166 -
  28.167 -    // notify the caller
  28.168 -    osthread->sr.set_suspended();
  28.169 -
  28.170 -    sigset_t suspend_set;  // signals for sigsuspend()
  28.171 -
  28.172 -    // get current set of blocked signals and unblock resume signal
  28.173 -    pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
  28.174 -    sigdelset(&suspend_set, SR_signum);
  28.175 -
  28.176 -    // wait here until we are resumed
  28.177 -    do {
  28.178 -      sigsuspend(&suspend_set);
  28.179 -      // ignore all returns until we get a resume signal
  28.180 -    } while (osthread->sr.suspend_action() != os::Bsd::SuspendResume::SR_CONTINUE);
  28.181 +    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
  28.182 +    os::SuspendResume::State state = osthread->sr.suspended();
  28.183 +    if (state == os::SuspendResume::SR_SUSPENDED) {
  28.184 +      sigset_t suspend_set;  // signals for sigsuspend()
  28.185 +
  28.186 +      // get current set of blocked signals and unblock resume signal
  28.187 +      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
  28.188 +      sigdelset(&suspend_set, SR_signum);
  28.189 +
  28.190 +      sr_semaphore.signal();
  28.191 +      // wait here until we are resumed
  28.192 +      while (1) {
  28.193 +        sigsuspend(&suspend_set);
  28.194 +
  28.195 +        os::SuspendResume::State result = osthread->sr.running();
  28.196 +        if (result == os::SuspendResume::SR_RUNNING) {
  28.197 +          sr_semaphore.signal();
  28.198 +          break;
  28.199 +        } else if (result != os::SuspendResume::SR_SUSPENDED) {
  28.200 +          ShouldNotReachHere();
  28.201 +        }
  28.202 +      }
  28.203 +
  28.204 +    } else if (state == os::SuspendResume::SR_RUNNING) {
  28.205 +      // request was cancelled, continue
  28.206 +    } else {
  28.207 +      ShouldNotReachHere();
  28.208 +    }
  28.209  
  28.210      resume_clear_context(osthread);
  28.211 -
  28.212 +  } else if (current == os::SuspendResume::SR_RUNNING) {
  28.213 +    // request was cancelled, continue
  28.214 +  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
  28.215 +    // ignore
  28.216    } else {
  28.217 -    assert(action == os::Bsd::SuspendResume::SR_CONTINUE, "unexpected sr action");
  28.218 -    // nothing special to do - just leave the handler
  28.219 +    // ignore
  28.220    }
  28.221  
  28.222    errno = old_errno;
  28.223 @@ -2722,42 +2830,82 @@
  28.224    return 0;
  28.225  }
  28.226  
  28.227 +static int sr_notify(OSThread* osthread) {
  28.228 +  int status = pthread_kill(osthread->pthread_id(), SR_signum);
  28.229 +  assert_status(status == 0, status, "pthread_kill");
  28.230 +  return status;
  28.231 +}
  28.232 +
  28.233 +// "Randomly" selected value for how long we want to spin
  28.234 +// before bailing out on suspending a thread, also how often
  28.235 +// we send a signal to a thread we want to resume
  28.236 +static const int RANDOMLY_LARGE_INTEGER = 1000000;
  28.237 +static const int RANDOMLY_LARGE_INTEGER2 = 100;
  28.238  
  28.239  // returns true on success and false on error - really an error is fatal
  28.240  // but this seems the normal response to library errors
  28.241  static bool do_suspend(OSThread* osthread) {
  28.242 +  assert(osthread->sr.is_running(), "thread should be running");
  28.243 +  assert(!sr_semaphore.trywait(), "semaphore has invalid state");
  28.244 +
  28.245    // mark as suspended and send signal
  28.246 -  osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_SUSPEND);
  28.247 -  int status = pthread_kill(osthread->pthread_id(), SR_signum);
  28.248 -  assert_status(status == 0, status, "pthread_kill");
  28.249 -
  28.250 -  // check status and wait until notified of suspension
  28.251 -  if (status == 0) {
  28.252 -    for (int i = 0; !osthread->sr.is_suspended(); i++) {
  28.253 -      os::yield_all(i);
  28.254 -    }
  28.255 -    osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
  28.256 -    return true;
  28.257 -  }
  28.258 -  else {
  28.259 -    osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
  28.260 +  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
  28.261 +    // failed to switch, state wasn't running?
  28.262 +    ShouldNotReachHere();
  28.263      return false;
  28.264    }
  28.265 +
  28.266 +  if (sr_notify(osthread) != 0) {
  28.267 +    ShouldNotReachHere();
  28.268 +  }
  28.269 +
  28.270 +  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
  28.271 +  while (true) {
  28.272 +    if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
  28.273 +      break;
  28.274 +    } else {
  28.275 +      // timeout
  28.276 +      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
  28.277 +      if (cancelled == os::SuspendResume::SR_RUNNING) {
  28.278 +        return false;
  28.279 +      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
  28.280 +        // make sure that we consume the signal on the semaphore as well
  28.281 +        sr_semaphore.wait();
  28.282 +        break;
  28.283 +      } else {
  28.284 +        ShouldNotReachHere();
  28.285 +        return false;
  28.286 +      }
  28.287 +    }
  28.288 +  }
  28.289 +
  28.290 +  guarantee(osthread->sr.is_suspended(), "Must be suspended");
  28.291 +  return true;
  28.292  }
  28.293  
  28.294  static void do_resume(OSThread* osthread) {
  28.295    assert(osthread->sr.is_suspended(), "thread should be suspended");
  28.296 -  osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_CONTINUE);
  28.297 -
  28.298 -  int status = pthread_kill(osthread->pthread_id(), SR_signum);
  28.299 -  assert_status(status == 0, status, "pthread_kill");
  28.300 -  // check status and wait unit notified of resumption
  28.301 -  if (status == 0) {
  28.302 -    for (int i = 0; osthread->sr.is_suspended(); i++) {
  28.303 -      os::yield_all(i);
  28.304 +  assert(!sr_semaphore.trywait(), "invalid semaphore state");
  28.305 +
  28.306 +  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
  28.307 +    // failed to switch to WAKEUP_REQUEST
  28.308 +    ShouldNotReachHere();
  28.309 +    return;
  28.310 +  }
  28.311 +
  28.312 +  while (true) {
  28.313 +    if (sr_notify(osthread) == 0) {
  28.314 +      if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
  28.315 +        if (osthread->sr.is_running()) {
  28.316 +          return;
  28.317 +        }
  28.318 +      }
  28.319 +    } else {
  28.320 +      ShouldNotReachHere();
  28.321      }
  28.322    }
  28.323 -  osthread->sr.set_suspend_action(os::Bsd::SuspendResume::SR_NONE);
  28.324 +
  28.325 +  guarantee(osthread->sr.is_running(), "Must be running!");
  28.326  }
  28.327  
  28.328  ////////////////////////////////////////////////////////////////////////////////
  28.329 @@ -3508,7 +3656,40 @@
  28.330    return false;
  28.331  }
  28.332  
  28.333 +void os::SuspendedThreadTask::internal_do_task() {
  28.334 +  if (do_suspend(_thread->osthread())) {
  28.335 +    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
  28.336 +    do_task(context);
  28.337 +    do_resume(_thread->osthread());
  28.338 +  }
  28.339 +}
  28.340 +
  28.341  ///
  28.342 +class PcFetcher : public os::SuspendedThreadTask {
  28.343 +public:
  28.344 +  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
  28.345 +  ExtendedPC result();
  28.346 +protected:
  28.347 +  void do_task(const os::SuspendedThreadTaskContext& context);
  28.348 +private:
  28.349 +  ExtendedPC _epc;
  28.350 +};
  28.351 +
  28.352 +ExtendedPC PcFetcher::result() {
  28.353 +  guarantee(is_done(), "task is not done yet.");
  28.354 +  return _epc;
  28.355 +}
  28.356 +
  28.357 +void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
  28.358 +  Thread* thread = context.thread();
  28.359 +  OSThread* osthread = thread->osthread();
  28.360 +  if (osthread->ucontext() != NULL) {
  28.361 +    _epc = os::Bsd::ucontext_get_pc((ucontext_t *) context.ucontext());
  28.362 +  } else {
  28.363 +    // NULL context is unexpected, double-check this is the VMThread
  28.364 +    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
  28.365 +  }
  28.366 +}
  28.367  
  28.368  // Suspends the target using the signal mechanism and then grabs the PC before
  28.369  // resuming the target. Used by the flat-profiler only
  28.370 @@ -3517,22 +3698,9 @@
  28.371    assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
  28.372    assert(thread->is_VM_thread(), "Can only be called for VMThread");
  28.373  
  28.374 -  ExtendedPC epc;
  28.375 -
  28.376 -  OSThread* osthread = thread->osthread();
  28.377 -  if (do_suspend(osthread)) {
  28.378 -    if (osthread->ucontext() != NULL) {
  28.379 -      epc = os::Bsd::ucontext_get_pc(osthread->ucontext());
  28.380 -    } else {
  28.381 -      // NULL context is unexpected, double-check this is the VMThread
  28.382 -      guarantee(thread->is_VM_thread(), "can only be called for VMThread");
  28.383 -    }
  28.384 -    do_resume(osthread);
  28.385 -  }
  28.386 -  // failure means pthread_kill failed for some reason - arguably this is
  28.387 -  // a fatal problem, but such problems are ignored elsewhere
  28.388 -
  28.389 -  return epc;
  28.390 +  PcFetcher fetcher(thread);
  28.391 +  fetcher.run();
  28.392 +  return fetcher.result();
  28.393  }
  28.394  
  28.395  int os::Bsd::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
  28.396 @@ -4517,3 +4685,4 @@
  28.397  
  28.398    return n;
  28.399  }
  28.400 +
    29.1 --- a/src/os/bsd/vm/os_bsd.hpp	Fri Jun 07 09:33:01 2013 -0700
    29.2 +++ b/src/os/bsd/vm/os_bsd.hpp	Mon Jun 10 11:30:51 2013 +0200
    29.3 @@ -145,36 +145,6 @@
    29.4    // BsdThreads work-around for 6292965
    29.5    static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
    29.6  
    29.7 -
    29.8 -  // Bsd suspend/resume support - this helper is a shadow of its former
    29.9 -  // self now that low-level suspension is barely used, and old workarounds
   29.10 -  // for BsdThreads are no longer needed.
   29.11 -  class SuspendResume {
   29.12 -  private:
   29.13 -    volatile int  _suspend_action;
   29.14 -    volatile jint _state;
   29.15 -  public:
   29.16 -    // values for suspend_action:
   29.17 -    enum {
   29.18 -      SR_NONE              = 0x00,
   29.19 -      SR_SUSPEND           = 0x01,  // suspend request
   29.20 -      SR_CONTINUE          = 0x02,  // resume request
   29.21 -      SR_SUSPENDED         = 0x20   // values for _state: + SR_NONE
   29.22 -    };
   29.23 -
   29.24 -    SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
   29.25 -
   29.26 -    int suspend_action() const     { return _suspend_action; }
   29.27 -    void set_suspend_action(int x) { _suspend_action = x;    }
   29.28 -
   29.29 -    // atomic updates for _state
   29.30 -    inline void set_suspended();
   29.31 -    inline void clear_suspended();
   29.32 -    bool is_suspended()            { return _state & SR_SUSPENDED;       }
   29.33 -
   29.34 -    #undef SR_SUSPENDED
   29.35 -  };
   29.36 -
   29.37  private:
   29.38    typedef int (*sched_getcpu_func_t)(void);
   29.39    typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
   29.40 @@ -250,7 +220,7 @@
   29.41      int  TryPark () ;
   29.42      int  park (jlong millis) ;
   29.43      void SetAssociation (Thread * a) { _Assoc = a ; }
   29.44 -} ;
   29.45 +};
   29.46  
   29.47  class PlatformParker : public CHeapObj<mtInternal> {
   29.48    protected:
   29.49 @@ -268,6 +238,6 @@
   29.50        status = pthread_mutex_init (_mutex, NULL);
   29.51        assert_status(status == 0, status, "mutex_init");
   29.52      }
   29.53 -} ;
   29.54 +};
   29.55  
   29.56  #endif // OS_BSD_VM_OS_BSD_HPP
    30.1 --- a/src/os/bsd/vm/os_bsd.inline.hpp	Fri Jun 07 09:33:01 2013 -0700
    30.2 +++ b/src/os/bsd/vm/os_bsd.inline.hpp	Mon Jun 10 11:30:51 2013 +0200
    30.3 @@ -286,20 +286,4 @@
    30.4    return ::setsockopt(fd, level, optname, optval, optlen);
    30.5  }
    30.6  
    30.7 -inline void os::Bsd::SuspendResume::set_suspended()           {
    30.8 -  jint temp, temp2;
    30.9 -  do {
   30.10 -    temp = _state;
   30.11 -    temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
   30.12 -  } while (temp2 != temp);
   30.13 -}
   30.14 -
   30.15 -inline void os::Bsd::SuspendResume::clear_suspended()        {
   30.16 -  jint temp, temp2;
   30.17 -  do {
   30.18 -    temp = _state;
   30.19 -    temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
   30.20 -  } while (temp2 != temp);
   30.21 -}
   30.22 -
   30.23  #endif // OS_BSD_VM_OS_BSD_INLINE_HPP
    31.1 --- a/src/os/linux/vm/osThread_linux.hpp	Fri Jun 07 09:33:01 2013 -0700
    31.2 +++ b/src/os/linux/vm/osThread_linux.hpp	Mon Jun 10 11:30:51 2013 +0200
    31.3 @@ -1,5 +1,5 @@
    31.4  /*
    31.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    31.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    31.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    31.8   *
    31.9   * This code is free software; you can redistribute it and/or modify it
   31.10 @@ -77,7 +77,7 @@
   31.11    // flags that support signal based suspend/resume on Linux are in a
   31.12    // separate class to avoid confusion with many flags in OSThread that
   31.13    // are used by VM level suspend/resume.
   31.14 -  os::Linux::SuspendResume sr;
   31.15 +  os::SuspendResume sr;
   31.16  
   31.17    // _ucontext and _siginfo are used by SR_handler() to save thread context,
   31.18    // and they will later be used to walk the stack or reposition thread PC.
    32.1 --- a/src/os/linux/vm/os_linux.cpp	Fri Jun 07 09:33:01 2013 -0700
    32.2 +++ b/src/os/linux/vm/os_linux.cpp	Mon Jun 10 11:30:51 2013 +0200
    32.3 @@ -151,6 +151,9 @@
    32.4  /* Used to protect dlsym() calls */
    32.5  static pthread_mutex_t dl_mutex;
    32.6  
    32.7 +// Declarations
    32.8 +static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
    32.9 +
   32.10  #ifdef JAVASE_EMBEDDED
   32.11  class MemNotifyThread: public Thread {
   32.12    friend class VMStructs;
   32.13 @@ -2407,6 +2410,57 @@
   32.14    return CAST_FROM_FN_PTR(void*, UserHandler);
   32.15  }
   32.16  
   32.17 +class Semaphore : public StackObj {
   32.18 +  public:
   32.19 +    Semaphore();
   32.20 +    ~Semaphore();
   32.21 +    void signal();
   32.22 +    void wait();
   32.23 +    bool trywait();
   32.24 +    bool timedwait(unsigned int sec, int nsec);
   32.25 +  private:
   32.26 +    sem_t _semaphore;
   32.27 +};
   32.28 +
   32.29 +
   32.30 +Semaphore::Semaphore() {
   32.31 +  sem_init(&_semaphore, 0, 0);
   32.32 +}
   32.33 +
   32.34 +Semaphore::~Semaphore() {
   32.35 +  sem_destroy(&_semaphore);
   32.36 +}
   32.37 +
   32.38 +void Semaphore::signal() {
   32.39 +  sem_post(&_semaphore);
   32.40 +}
   32.41 +
   32.42 +void Semaphore::wait() {
   32.43 +  sem_wait(&_semaphore);
   32.44 +}
   32.45 +
   32.46 +bool Semaphore::trywait() {
   32.47 +  return sem_trywait(&_semaphore) == 0;
   32.48 +}
   32.49 +
   32.50 +bool Semaphore::timedwait(unsigned int sec, int nsec) {
   32.51 +  struct timespec ts;
   32.52 +  unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
   32.53 +
   32.54 +  while (1) {
   32.55 +    int result = sem_timedwait(&_semaphore, &ts);
   32.56 +    if (result == 0) {
   32.57 +      return true;
   32.58 +    } else if (errno == EINTR) {
   32.59 +      continue;
   32.60 +    } else if (errno == ETIMEDOUT) {
   32.61 +      return false;
   32.62 +    } else {
   32.63 +      return false;
   32.64 +    }
   32.65 +  }
   32.66 +}
   32.67 +
   32.68  extern "C" {
   32.69    typedef void (*sa_handler_t)(int);
   32.70    typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
   32.71 @@ -2446,6 +2500,7 @@
   32.72  
   32.73  // Linux(POSIX) specific hand shaking semaphore.
   32.74  static sem_t sig_sem;
   32.75 +static Semaphore sr_semaphore;
   32.76  
   32.77  void os::signal_init_pd() {
   32.78    // Initialize signal structures
   32.79 @@ -3559,9 +3614,6 @@
   32.80  static void resume_clear_context(OSThread *osthread) {
   32.81    osthread->set_ucontext(NULL);
   32.82    osthread->set_siginfo(NULL);
   32.83 -
   32.84 -  // notify the suspend action is completed, we have now resumed
   32.85 -  osthread->sr.clear_suspended();
   32.86  }
   32.87  
   32.88  static void suspend_save_context(OSThread *osthread, siginfo_t* siginfo, ucontext_t* context) {
   32.89 @@ -3581,7 +3633,7 @@
   32.90  // its signal handlers run and prevents sigwait()'s use with the
   32.91  // mutex granting granting signal.
   32.92  //
   32.93 -// Currently only ever called on the VMThread
   32.94 +// Currently only ever called on the VMThread and JavaThreads (PC sampling)
   32.95  //
   32.96  static void SR_handler(int sig, siginfo_t* siginfo, ucontext_t* context) {
   32.97    // Save and restore errno to avoid confusing native code with EINTR
   32.98 @@ -3590,38 +3642,46 @@
   32.99  
  32.100    Thread* thread = Thread::current();
  32.101    OSThread* osthread = thread->osthread();
  32.102 -  assert(thread->is_VM_thread(), "Must be VMThread");
  32.103 -  // read current suspend action
  32.104 -  int action = osthread->sr.suspend_action();
  32.105 -  if (action == os::Linux::SuspendResume::SR_SUSPEND) {
  32.106 +  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
  32.107 +
  32.108 +  os::SuspendResume::State current = osthread->sr.state();
  32.109 +  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
  32.110      suspend_save_context(osthread, siginfo, context);
  32.111  
  32.112 -    // Notify the suspend action is about to be completed. do_suspend()
  32.113 -    // waits until SR_SUSPENDED is set and then returns. We will wait
  32.114 -    // here for a resume signal and that completes the suspend-other
  32.115 -    // action. do_suspend/do_resume is always called as a pair from
  32.116 -    // the same thread - so there are no races
  32.117 -
  32.118 -    // notify the caller
  32.119 -    osthread->sr.set_suspended();
  32.120 -
  32.121 -    sigset_t suspend_set;  // signals for sigsuspend()
  32.122 -
  32.123 -    // get current set of blocked signals and unblock resume signal
  32.124 -    pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
  32.125 -    sigdelset(&suspend_set, SR_signum);
  32.126 -
  32.127 -    // wait here until we are resumed
  32.128 -    do {
  32.129 -      sigsuspend(&suspend_set);
  32.130 -      // ignore all returns until we get a resume signal
  32.131 -    } while (osthread->sr.suspend_action() != os::Linux::SuspendResume::SR_CONTINUE);
  32.132 +    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
  32.133 +    os::SuspendResume::State state = osthread->sr.suspended();
  32.134 +    if (state == os::SuspendResume::SR_SUSPENDED) {
  32.135 +      sigset_t suspend_set;  // signals for sigsuspend()
  32.136 +
  32.137 +      // get current set of blocked signals and unblock resume signal
  32.138 +      pthread_sigmask(SIG_BLOCK, NULL, &suspend_set);
  32.139 +      sigdelset(&suspend_set, SR_signum);
  32.140 +
  32.141 +      sr_semaphore.signal();
  32.142 +      // wait here until we are resumed
  32.143 +      while (1) {
  32.144 +        sigsuspend(&suspend_set);
  32.145 +
  32.146 +        os::SuspendResume::State result = osthread->sr.running();
  32.147 +        if (result == os::SuspendResume::SR_RUNNING) {
  32.148 +          sr_semaphore.signal();
  32.149 +          break;
  32.150 +        }
  32.151 +      }
  32.152 +
  32.153 +    } else if (state == os::SuspendResume::SR_RUNNING) {
  32.154 +      // request was cancelled, continue
  32.155 +    } else {
  32.156 +      ShouldNotReachHere();
  32.157 +    }
  32.158  
  32.159      resume_clear_context(osthread);
  32.160 -
  32.161 +  } else if (current == os::SuspendResume::SR_RUNNING) {
  32.162 +    // request was cancelled, continue
  32.163 +  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
  32.164 +    // ignore
  32.165    } else {
  32.166 -    assert(action == os::Linux::SuspendResume::SR_CONTINUE, "unexpected sr action");
  32.167 -    // nothing special to do - just leave the handler
  32.168 +    // ignore
  32.169    }
  32.170  
  32.171    errno = old_errno;
  32.172 @@ -3665,42 +3725,82 @@
  32.173    return 0;
  32.174  }
  32.175  
  32.176 +static int sr_notify(OSThread* osthread) {
  32.177 +  int status = pthread_kill(osthread->pthread_id(), SR_signum);
  32.178 +  assert_status(status == 0, status, "pthread_kill");
  32.179 +  return status;
  32.180 +}
  32.181 +
  32.182 +// "Randomly" selected value for how long we want to spin
  32.183 +// before bailing out on suspending a thread, also how often
  32.184 +// we send a signal to a thread we want to resume
  32.185 +static const int RANDOMLY_LARGE_INTEGER = 1000000;
  32.186 +static const int RANDOMLY_LARGE_INTEGER2 = 100;
  32.187  
  32.188  // returns true on success and false on error - really an error is fatal
  32.189  // but this seems the normal response to library errors
  32.190  static bool do_suspend(OSThread* osthread) {
  32.191 +  assert(osthread->sr.is_running(), "thread should be running");
  32.192 +  assert(!sr_semaphore.trywait(), "semaphore has invalid state");
  32.193 +
  32.194    // mark as suspended and send signal
  32.195 -  osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_SUSPEND);
  32.196 -  int status = pthread_kill(osthread->pthread_id(), SR_signum);
  32.197 -  assert_status(status == 0, status, "pthread_kill");
  32.198 -
  32.199 -  // check status and wait until notified of suspension
  32.200 -  if (status == 0) {
  32.201 -    for (int i = 0; !osthread->sr.is_suspended(); i++) {
  32.202 -      os::yield_all(i);
  32.203 +  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
  32.204 +    // failed to switch, state wasn't running?
  32.205 +    ShouldNotReachHere();
  32.206 +    return false;
  32.207 +  }
  32.208 +
  32.209 +  if (sr_notify(osthread) != 0) {
  32.210 +    ShouldNotReachHere();
  32.211 +  }
  32.212 +
  32.213 +  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
  32.214 +  while (true) {
  32.215 +    if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
  32.216 +      break;
  32.217 +    } else {
  32.218 +      // timeout
  32.219 +      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
  32.220 +      if (cancelled == os::SuspendResume::SR_RUNNING) {
  32.221 +        return false;
  32.222 +      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
  32.223 +        // make sure that we consume the signal on the semaphore as well
  32.224 +        sr_semaphore.wait();
  32.225 +        break;
  32.226 +      } else {
  32.227 +        ShouldNotReachHere();
  32.228 +        return false;
  32.229 +      }
  32.230      }
  32.231 -    osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
  32.232 -    return true;
  32.233 -  }
  32.234 -  else {
  32.235 -    osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
  32.236 -    return false;
  32.237 -  }
  32.238 +  }
  32.239 +
  32.240 +  guarantee(osthread->sr.is_suspended(), "Must be suspended");
  32.241 +  return true;
  32.242  }
  32.243  
  32.244  static void do_resume(OSThread* osthread) {
  32.245    assert(osthread->sr.is_suspended(), "thread should be suspended");
  32.246 -  osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_CONTINUE);
  32.247 -
  32.248 -  int status = pthread_kill(osthread->pthread_id(), SR_signum);
  32.249 -  assert_status(status == 0, status, "pthread_kill");
  32.250 -  // check status and wait unit notified of resumption
  32.251 -  if (status == 0) {
  32.252 -    for (int i = 0; osthread->sr.is_suspended(); i++) {
  32.253 -      os::yield_all(i);
  32.254 +  assert(!sr_semaphore.trywait(), "invalid semaphore state");
  32.255 +
  32.256 +  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
  32.257 +    // failed to switch to WAKEUP_REQUEST
  32.258 +    ShouldNotReachHere();
  32.259 +    return;
  32.260 +  }
  32.261 +
  32.262 +  while (true) {
  32.263 +    if (sr_notify(osthread) == 0) {
  32.264 +      if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
  32.265 +        if (osthread->sr.is_running()) {
  32.266 +          return;
  32.267 +        }
  32.268 +      }
  32.269 +    } else {
  32.270 +      ShouldNotReachHere();
  32.271      }
  32.272    }
  32.273 -  osthread->sr.set_suspend_action(os::Linux::SuspendResume::SR_NONE);
  32.274 +
  32.275 +  guarantee(osthread->sr.is_running(), "Must be running!");
  32.276  }
  32.277  
  32.278  ////////////////////////////////////////////////////////////////////////////////
  32.279 @@ -4472,6 +4572,40 @@
  32.280  
  32.281  ///
  32.282  
  32.283 +void os::SuspendedThreadTask::internal_do_task() {
  32.284 +  if (do_suspend(_thread->osthread())) {
  32.285 +    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
  32.286 +    do_task(context);
  32.287 +    do_resume(_thread->osthread());
  32.288 +  }
  32.289 +}
  32.290 +
  32.291 +class PcFetcher : public os::SuspendedThreadTask {
  32.292 +public:
  32.293 +  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
  32.294 +  ExtendedPC result();
  32.295 +protected:
  32.296 +  void do_task(const os::SuspendedThreadTaskContext& context);
  32.297 +private:
  32.298 +  ExtendedPC _epc;
  32.299 +};
  32.300 +
  32.301 +ExtendedPC PcFetcher::result() {
  32.302 +  guarantee(is_done(), "task is not done yet.");
  32.303 +  return _epc;
  32.304 +}
  32.305 +
  32.306 +void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
  32.307 +  Thread* thread = context.thread();
  32.308 +  OSThread* osthread = thread->osthread();
  32.309 +  if (osthread->ucontext() != NULL) {
  32.310 +    _epc = os::Linux::ucontext_get_pc((ucontext_t *) context.ucontext());
  32.311 +  } else {
  32.312 +    // NULL context is unexpected, double-check this is the VMThread
  32.313 +    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
  32.314 +  }
  32.315 +}
  32.316 +
  32.317  // Suspends the target using the signal mechanism and then grabs the PC before
  32.318  // resuming the target. Used by the flat-profiler only
  32.319  ExtendedPC os::get_thread_pc(Thread* thread) {
  32.320 @@ -4479,22 +4613,9 @@
  32.321    assert(Thread::current()->is_Watcher_thread(), "Must be watcher");
  32.322    assert(thread->is_VM_thread(), "Can only be called for VMThread");
  32.323  
  32.324 -  ExtendedPC epc;
  32.325 -
  32.326 -  OSThread* osthread = thread->osthread();
  32.327 -  if (do_suspend(osthread)) {
  32.328 -    if (osthread->ucontext() != NULL) {
  32.329 -      epc = os::Linux::ucontext_get_pc(osthread->ucontext());
  32.330 -    } else {
  32.331 -      // NULL context is unexpected, double-check this is the VMThread
  32.332 -      guarantee(thread->is_VM_thread(), "can only be called for VMThread");
  32.333 -    }
  32.334 -    do_resume(osthread);
  32.335 -  }
  32.336 -  // failure means pthread_kill failed for some reason - arguably this is
  32.337 -  // a fatal problem, but such problems are ignored elsewhere
  32.338 -
  32.339 -  return epc;
  32.340 +  PcFetcher fetcher(thread);
  32.341 +  fetcher.run();
  32.342 +  return fetcher.result();
  32.343  }
  32.344  
  32.345  int os::Linux::safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime)
  32.346 @@ -5616,4 +5737,5 @@
  32.347      new MemNotifyThread(fd);
  32.348    }
  32.349  }
  32.350 +
  32.351  #endif // JAVASE_EMBEDDED
    33.1 --- a/src/os/linux/vm/os_linux.hpp	Fri Jun 07 09:33:01 2013 -0700
    33.2 +++ b/src/os/linux/vm/os_linux.hpp	Mon Jun 10 11:30:51 2013 +0200
    33.3 @@ -210,35 +210,6 @@
    33.4    // LinuxThreads work-around for 6292965
    33.5    static int safe_cond_timedwait(pthread_cond_t *_cond, pthread_mutex_t *_mutex, const struct timespec *_abstime);
    33.6  
    33.7 -
    33.8 -  // Linux suspend/resume support - this helper is a shadow of its former
    33.9 -  // self now that low-level suspension is barely used, and old workarounds
   33.10 -  // for LinuxThreads are no longer needed.
   33.11 -  class SuspendResume {
   33.12 -  private:
   33.13 -    volatile int  _suspend_action;
   33.14 -    volatile jint _state;
   33.15 -  public:
   33.16 -    // values for suspend_action:
   33.17 -    enum {
   33.18 -      SR_NONE              = 0x00,
   33.19 -      SR_SUSPEND           = 0x01,  // suspend request
   33.20 -      SR_CONTINUE          = 0x02,  // resume request
   33.21 -      SR_SUSPENDED         = 0x20   // values for _state: + SR_NONE
   33.22 -    };
   33.23 -
   33.24 -    SuspendResume() { _suspend_action = SR_NONE; _state = SR_NONE; }
   33.25 -
   33.26 -    int suspend_action() const     { return _suspend_action; }
   33.27 -    void set_suspend_action(int x) { _suspend_action = x;    }
   33.28 -
   33.29 -    // atomic updates for _state
   33.30 -    inline void set_suspended();
   33.31 -    inline void clear_suspended();
   33.32 -    bool is_suspended()            { return _state & SR_SUSPENDED;       }
   33.33 -
   33.34 -  };
   33.35 -
   33.36  private:
   33.37    typedef int (*sched_getcpu_func_t)(void);
   33.38    typedef int (*numa_node_to_cpus_func_t)(int node, unsigned long *buffer, int bufferlen);
   33.39 @@ -333,6 +304,6 @@
   33.40        status = pthread_mutex_init (_mutex, NULL);
   33.41        assert_status(status == 0, status, "mutex_init");
   33.42      }
   33.43 -} ;
   33.44 +};
   33.45  
   33.46  #endif // OS_LINUX_VM_OS_LINUX_HPP
    34.1 --- a/src/os/linux/vm/os_linux.inline.hpp	Fri Jun 07 09:33:01 2013 -0700
    34.2 +++ b/src/os/linux/vm/os_linux.inline.hpp	Mon Jun 10 11:30:51 2013 +0200
    34.3 @@ -288,20 +288,4 @@
    34.4    return ::setsockopt(fd, level, optname, optval, optlen);
    34.5  }
    34.6  
    34.7 -inline void os::Linux::SuspendResume::set_suspended() {
    34.8 -  jint temp, temp2;
    34.9 -  do {
   34.10 -    temp = _state;
   34.11 -    temp2 = Atomic::cmpxchg(temp | SR_SUSPENDED, &_state, temp);
   34.12 -  } while (temp2 != temp);
   34.13 -}
   34.14 -
   34.15 -inline void os::Linux::SuspendResume::clear_suspended()        {
   34.16 -  jint temp, temp2;
   34.17 -  do {
   34.18 -    temp = _state;
   34.19 -    temp2 = Atomic::cmpxchg(temp & ~SR_SUSPENDED, &_state, temp);
   34.20 -  } while (temp2 != temp);
   34.21 -}
   34.22 -
   34.23  #endif // OS_LINUX_VM_OS_LINUX_INLINE_HPP
    35.1 --- a/src/os/solaris/vm/osThread_solaris.cpp	Fri Jun 07 09:33:01 2013 -0700
    35.2 +++ b/src/os/solaris/vm/osThread_solaris.cpp	Mon Jun 10 11:30:51 2013 +0200
    35.3 @@ -1,5 +1,5 @@
    35.4  /*
    35.5 - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    35.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    35.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    35.8   *
    35.9   * This code is free software; you can redistribute it and/or modify it
   35.10 @@ -41,10 +41,6 @@
   35.11    _thread_id                         = 0;
   35.12    sigemptyset(&_caller_sigmask);
   35.13  
   35.14 -  _current_callback                  = NULL;
   35.15 -  _current_callback_lock = VM_Version::supports_compare_and_exchange() ? NULL
   35.16 -                    : new Mutex(Mutex::suspend_resume, "Callback_lock", true);
   35.17 -
   35.18    _saved_interrupt_thread_state      = _thread_new;
   35.19    _vm_created_thread                 = false;
   35.20  }
   35.21 @@ -52,172 +48,6 @@
   35.22  void OSThread::pd_destroy() {
   35.23  }
   35.24  
   35.25 -// Synchronous interrupt support
   35.26 -//
   35.27 -// _current_callback == NULL          no pending callback
   35.28 -//                   == 1             callback_in_progress
   35.29 -//                   == other value   pointer to the pending callback
   35.30 -//
   35.31 -
   35.32 -// CAS on v8 is implemented by using a global atomic_memory_operation_lock,
   35.33 -// which is shared by other atomic functions. It is OK for normal uses, but
   35.34 -// dangerous if used after some thread is suspended or if used in signal
   35.35 -// handlers. Instead here we use a special per-thread lock to synchronize
   35.36 -// updating _current_callback if we are running on v8. Note in general trying
   35.37 -// to grab locks after a thread is suspended is not safe, but it is safe for
   35.38 -// updating _current_callback, because synchronous interrupt callbacks are
   35.39 -// currently only used in:
   35.40 -// 1. GetThreadPC_Callback - used by WatcherThread to profile VM thread
   35.41 -// There is no overlap between the callbacks, which means we won't try to
   35.42 -// grab a thread's sync lock after the thread has been suspended while holding
   35.43 -// the same lock.
   35.44 -
   35.45 -// used after a thread is suspended
   35.46 -static intptr_t compare_and_exchange_current_callback (
   35.47 -       intptr_t callback, intptr_t *addr, intptr_t compare_value, Mutex *sync) {
   35.48 -  if (VM_Version::supports_compare_and_exchange()) {
   35.49 -    return Atomic::cmpxchg_ptr(callback, addr, compare_value);
   35.50 -  } else {
   35.51 -    MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
   35.52 -    if (*addr == compare_value) {
   35.53 -      *addr = callback;
   35.54 -      return compare_value;
   35.55 -    } else {
   35.56 -      return callback;
   35.57 -    }
   35.58 -  }
   35.59 -}
   35.60 -
   35.61 -// used in signal handler
   35.62 -static intptr_t exchange_current_callback(intptr_t callback, intptr_t *addr, Mutex *sync) {
   35.63 -  if (VM_Version::supports_compare_and_exchange()) {
   35.64 -    return Atomic::xchg_ptr(callback, addr);
   35.65 -  } else {
   35.66 -    MutexLockerEx ml(sync, Mutex::_no_safepoint_check_flag);
   35.67 -    intptr_t cb = *addr;
   35.68 -    *addr = callback;
   35.69 -    return cb;
   35.70 -  }
   35.71 -}
   35.72 -
   35.73 -// one interrupt at a time. spin if _current_callback != NULL
   35.74 -int OSThread::set_interrupt_callback(Sync_Interrupt_Callback * cb) {
   35.75 -  int count = 0;
   35.76 -  while (compare_and_exchange_current_callback(
   35.77 -         (intptr_t)cb, (intptr_t *)&_current_callback, (intptr_t)NULL, _current_callback_lock) != NULL) {
   35.78 -    while (_current_callback != NULL) {
   35.79 -      count++;
   35.80 -#ifdef ASSERT
   35.81 -      if ((WarnOnStalledSpinLock > 0) &&
   35.82 -          (count % WarnOnStalledSpinLock == 0)) {
   35.83 -          warning("_current_callback seems to be stalled: %p", _current_callback);
   35.84 -      }
   35.85 -#endif
   35.86 -      os::yield_all(count);
   35.87 -    }
   35.88 -  }
   35.89 -  return 0;
   35.90 -}
   35.91 -
   35.92 -// reset _current_callback, spin if _current_callback is callback_in_progress
   35.93 -void OSThread::remove_interrupt_callback(Sync_Interrupt_Callback * cb) {
   35.94 -  int count = 0;
   35.95 -  while (compare_and_exchange_current_callback(
   35.96 -         (intptr_t)NULL, (intptr_t *)&_current_callback, (intptr_t)cb, _current_callback_lock) != (intptr_t)cb) {
   35.97 -#ifdef ASSERT
   35.98 -    intptr_t p = (intptr_t)_current_callback;
   35.99 -    assert(p == (intptr_t)callback_in_progress ||
  35.100 -           p == (intptr_t)cb, "wrong _current_callback value");
  35.101 -#endif
  35.102 -    while (_current_callback != cb) {
  35.103 -      count++;
  35.104 -#ifdef ASSERT
  35.105 -      if ((WarnOnStalledSpinLock > 0) &&
  35.106 -          (count % WarnOnStalledSpinLock == 0)) {
  35.107 -          warning("_current_callback seems to be stalled: %p", _current_callback);
  35.108 -      }
  35.109 -#endif
  35.110 -      os::yield_all(count);
  35.111 -    }
  35.112 -  }
  35.113 -}
  35.114 -
  35.115 -void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args) {
  35.116 -  Sync_Interrupt_Callback * cb;
  35.117 -  cb = (Sync_Interrupt_Callback *)exchange_current_callback(
  35.118 -        (intptr_t)callback_in_progress, (intptr_t *)&_current_callback, _current_callback_lock);
  35.119 -
  35.120 -  if (cb == NULL) {
  35.121 -    // signal is delivered too late (thread is masking interrupt signal??).
  35.122 -    // there is nothing we need to do because requesting thread has given up.
  35.123 -  } else if ((intptr_t)cb == (intptr_t)callback_in_progress) {
  35.124 -    fatal("invalid _current_callback state");
  35.125 -  } else {
  35.126 -    assert(cb->target()->osthread() == this, "wrong target");
  35.127 -    cb->execute(args);
  35.128 -    cb->leave_callback();             // notify the requester
  35.129 -  }
  35.130 -
  35.131 -  // restore original _current_callback value
  35.132 -  intptr_t p;
  35.133 -  p = exchange_current_callback((intptr_t)cb, (intptr_t *)&_current_callback, _current_callback_lock);
  35.134 -  assert(p == (intptr_t)callback_in_progress, "just checking");
  35.135 -}
  35.136 -
  35.137 -// Called by the requesting thread to send a signal to target thread and
  35.138 -// execute "this" callback from the signal handler.
  35.139 -int OSThread::Sync_Interrupt_Callback::interrupt(Thread * target, int timeout) {
  35.140 -  // Let signals to the vm_thread go even if the Threads_lock is not acquired
  35.141 -  assert(Threads_lock->owned_by_self() || (target == VMThread::vm_thread()),
  35.142 -         "must have threads lock to call this");
  35.143 -
  35.144 -  OSThread * osthread = target->osthread();
  35.145 -
  35.146 -  // may block if target thread already has a pending callback
  35.147 -  osthread->set_interrupt_callback(this);
  35.148 -
  35.149 -  _target = target;
  35.150 -
  35.151 -  int rslt = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
  35.152 -  assert(rslt == 0, "thr_kill != 0");
  35.153 -
  35.154 -  bool status = false;
  35.155 -  jlong t1 = os::javaTimeMillis();
  35.156 -  { // don't use safepoint check because we might be the watcher thread.
  35.157 -    MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
  35.158 -    while (!is_done()) {
  35.159 -      status = _sync->wait(Mutex::_no_safepoint_check_flag, timeout);
  35.160 -
  35.161 -      // status == true if timed out
  35.162 -      if (status) break;
  35.163 -
  35.164 -      // update timeout
  35.165 -      jlong t2 = os::javaTimeMillis();
  35.166 -      timeout -= t2 - t1;
  35.167 -      t1 = t2;
  35.168 -    }
  35.169 -  }
  35.170 -
  35.171 -  // reset current_callback
  35.172 -  osthread->remove_interrupt_callback(this);
  35.173 -
  35.174 -  return status;
  35.175 -}
  35.176 -
  35.177 -void OSThread::Sync_Interrupt_Callback::leave_callback() {
  35.178 -  if (!_sync->owned_by_self()) {
  35.179 -    // notify requesting thread
  35.180 -    MutexLockerEx ml(_sync, Mutex::_no_safepoint_check_flag);
  35.181 -    _is_done = true;
  35.182 -    _sync->notify_all();
  35.183 -  } else {
  35.184 -    // Current thread is interrupted while it is holding the _sync lock, trying
  35.185 -    // to grab it again will deadlock. The requester will timeout anyway,
  35.186 -    // so just return.
  35.187 -    _is_done = true;
  35.188 -  }
  35.189 -}
  35.190 -
  35.191  // copied from synchronizer.cpp
  35.192  
  35.193  void OSThread::handle_spinlock_contention(int tries) {
  35.194 @@ -229,3 +59,7 @@
  35.195      os::yield();          // Yield to threads of same or higher priority
  35.196    }
  35.197  }
  35.198 +
  35.199 +void OSThread::SR_handler(Thread* thread, ucontext_t* uc) {
  35.200 +  os::Solaris::SR_handler(thread, uc);
  35.201 +}
    36.1 --- a/src/os/solaris/vm/osThread_solaris.hpp	Fri Jun 07 09:33:01 2013 -0700
    36.2 +++ b/src/os/solaris/vm/osThread_solaris.hpp	Mon Jun 10 11:30:51 2013 +0200
    36.3 @@ -1,5 +1,5 @@
    36.4  /*
    36.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    36.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    36.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    36.8   *
    36.9   * This code is free software; you can redistribute it and/or modify it
   36.10 @@ -72,61 +72,15 @@
   36.11   // ***************************************************************
   36.12  
   36.13   public:
   36.14 -
   36.15 -  class InterruptArguments : StackObj {
   36.16 -   private:
   36.17 -    Thread*     _thread;   // the thread to signal was dispatched to
   36.18 -    ucontext_t* _ucontext; // the machine context at the time of the signal
   36.19 -
   36.20 -   public:
   36.21 -    InterruptArguments(Thread* thread, ucontext_t* ucontext) {
   36.22 -      _thread   = thread;
   36.23 -      _ucontext = ucontext;
   36.24 -    }
   36.25 -
   36.26 -    Thread*     thread()   const { return _thread;   }
   36.27 -    ucontext_t* ucontext() const { return _ucontext; }
   36.28 -  };
   36.29 -
   36.30 -  // There are currently no asynchronous callbacks - and we'd better not
   36.31 -  // support them in the future either, as they need to be deallocated from
   36.32 -  // the interrupt handler, which is not safe; they also require locks to
   36.33 -  // protect the callback queue.
   36.34 -
   36.35 -  class Sync_Interrupt_Callback : private StackObj {
   36.36 -   protected:
   36.37 -    volatile bool _is_done;
   36.38 -    Monitor*      _sync;
   36.39 -    Thread*       _target;
   36.40 -   public:
   36.41 -    Sync_Interrupt_Callback(Monitor * sync) {
   36.42 -      _is_done = false;  _target = NULL;  _sync = sync;
   36.43 -    }
   36.44 -
   36.45 -    bool is_done() const               { return _is_done; }
   36.46 -    Thread* target() const             { return _target;  }
   36.47 -
   36.48 -    int interrupt(Thread * target, int timeout);
   36.49 -
   36.50 -    // override to implement the callback.
   36.51 -    virtual void execute(InterruptArguments *args) = 0;
   36.52 -
   36.53 -    void leave_callback();
   36.54 -  };
   36.55 +  os::SuspendResume sr;
   36.56  
   36.57   private:
   36.58 -
   36.59 -  Sync_Interrupt_Callback * volatile _current_callback;
   36.60 -  enum {
   36.61 -    callback_in_progress = 1
   36.62 -  };
   36.63 -  Mutex * _current_callback_lock;       // only used on v8
   36.64 +  ucontext_t* _ucontext;
   36.65  
   36.66   public:
   36.67 -
   36.68 -  int set_interrupt_callback    (Sync_Interrupt_Callback * cb);
   36.69 -  void remove_interrupt_callback(Sync_Interrupt_Callback * cb);
   36.70 -  void do_interrupt_callbacks_at_interrupt(InterruptArguments *args);
   36.71 +  ucontext_t* ucontext() const { return _ucontext; }
   36.72 +  void set_ucontext(ucontext_t* ptr) { _ucontext = ptr; }
   36.73 +  static void SR_handler(Thread* thread, ucontext_t* uc);
   36.74  
   36.75   // ***************************************************************
   36.76   // java.lang.Thread.interrupt state.
    37.1 --- a/src/os/solaris/vm/os_share_solaris.hpp	Fri Jun 07 09:33:01 2013 -0700
    37.2 +++ b/src/os/solaris/vm/os_share_solaris.hpp	Mon Jun 10 11:30:51 2013 +0200
    37.3 @@ -1,5 +1,5 @@
    37.4  /*
    37.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    37.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    37.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    37.8   *
    37.9   * This code is free software; you can redistribute it and/or modify it
   37.10 @@ -27,28 +27,6 @@
   37.11  
   37.12  // Defines the interfaces to Solaris operating systems that vary across platforms
   37.13  
   37.14 -
   37.15 -// This is a simple callback that just fetches a PC for an interrupted thread.
   37.16 -// The thread need not be suspended and the fetched PC is just a hint.
   37.17 -// Returned PC and nPC are not necessarily consecutive.
   37.18 -// This one is currently used for profiling the VMThread ONLY!
   37.19 -
   37.20 -// Must be synchronous
   37.21 -class GetThreadPC_Callback : public OSThread::Sync_Interrupt_Callback {
   37.22 - private:
   37.23 -  ExtendedPC _addr;
   37.24 -
   37.25 - public:
   37.26 -
   37.27 -  GetThreadPC_Callback(Monitor *sync) :
   37.28 -    OSThread::Sync_Interrupt_Callback(sync) { }
   37.29 -  ExtendedPC addr() const { return _addr; }
   37.30 -
   37.31 -  void set_addr(ExtendedPC addr) { _addr = addr; }
   37.32 -
   37.33 -  void execute(OSThread::InterruptArguments *args);
   37.34 -};
   37.35 -
   37.36  // misc
   37.37  extern "C" {
   37.38    void signalHandler(int, siginfo_t*, void*);
    38.1 --- a/src/os/solaris/vm/os_solaris.cpp	Fri Jun 07 09:33:01 2013 -0700
    38.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Mon Jun 10 11:30:51 2013 +0200
    38.3 @@ -240,6 +240,8 @@
    38.4    static int pthread_cond_default_init(cond_t *cv, int scope, void *arg){ memset(cv, 0, sizeof(cond_t)); return 0; }
    38.5  }
    38.6  
    38.7 +static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
    38.8 +
    38.9  // Thread Local Storage
   38.10  // This is common to all Solaris platforms so it is defined here,
   38.11  // in this common file.
   38.12 @@ -2580,6 +2582,57 @@
   38.13    return CAST_FROM_FN_PTR(void*, UserHandler);
   38.14  }
   38.15  
   38.16 +class Semaphore : public StackObj {
   38.17 +  public:
   38.18 +    Semaphore();
   38.19 +    ~Semaphore();
   38.20 +    void signal();
   38.21 +    void wait();
   38.22 +    bool trywait();
   38.23 +    bool timedwait(unsigned int sec, int nsec);
   38.24 +  private:
   38.25 +    sema_t _semaphore;
   38.26 +};
   38.27 +
   38.28 +
   38.29 +Semaphore::Semaphore() {
   38.30 +  sema_init(&_semaphore, 0, NULL, NULL);
   38.31 +}
   38.32 +
   38.33 +Semaphore::~Semaphore() {
   38.34 +  sema_destroy(&_semaphore);
   38.35 +}
   38.36 +
   38.37 +void Semaphore::signal() {
   38.38 +  sema_post(&_semaphore);
   38.39 +}
   38.40 +
   38.41 +void Semaphore::wait() {
   38.42 +  sema_wait(&_semaphore);
   38.43 +}
   38.44 +
   38.45 +bool Semaphore::trywait() {
   38.46 +  return sema_trywait(&_semaphore) == 0;
   38.47 +}
   38.48 +
   38.49 +bool Semaphore::timedwait(unsigned int sec, int nsec) {
   38.50 +  struct timespec ts;
   38.51 +  unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
   38.52 +
   38.53 +  while (1) {
   38.54 +    int result = sema_timedwait(&_semaphore, &ts);
   38.55 +    if (result == 0) {
   38.56 +      return true;
   38.57 +    } else if (errno == EINTR) {
   38.58 +      continue;
   38.59 +    } else if (errno == ETIME) {
   38.60 +      return false;
   38.61 +    } else {
   38.62 +      return false;
   38.63 +    }
   38.64 +  }
   38.65 +}
   38.66 +
   38.67  extern "C" {
   38.68    typedef void (*sa_handler_t)(int);
   38.69    typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
   38.70 @@ -4164,6 +4217,68 @@
   38.71    schedctl_start(schedctl_init());
   38.72  }
   38.73  
   38.74 +static void resume_clear_context(OSThread *osthread) {
   38.75 +  osthread->set_ucontext(NULL);
   38.76 +}
   38.77 +
   38.78 +static void suspend_save_context(OSThread *osthread, ucontext_t* context) {
   38.79 +  osthread->set_ucontext(context);
   38.80 +}
   38.81 +
   38.82 +static Semaphore sr_semaphore;
   38.83 +
   38.84 +void os::Solaris::SR_handler(Thread* thread, ucontext_t* uc) {
   38.85 +  // Save and restore errno to avoid confusing native code with EINTR
   38.86 +  // after sigsuspend.
   38.87 +  int old_errno = errno;
   38.88 +
   38.89 +  OSThread* osthread = thread->osthread();
   38.90 +  assert(thread->is_VM_thread() || thread->is_Java_thread(), "Must be VMThread or JavaThread");
   38.91 +
   38.92 +  os::SuspendResume::State current = osthread->sr.state();
   38.93 +  if (current == os::SuspendResume::SR_SUSPEND_REQUEST) {
   38.94 +    suspend_save_context(osthread, uc);
   38.95 +
   38.96 +    // attempt to switch the state, we assume we had a SUSPEND_REQUEST
   38.97 +    os::SuspendResume::State state = osthread->sr.suspended();
   38.98 +    if (state == os::SuspendResume::SR_SUSPENDED) {
   38.99 +      sigset_t suspend_set;  // signals for sigsuspend()
  38.100 +
  38.101 +      // get current set of blocked signals and unblock resume signal
  38.102 +      thr_sigsetmask(SIG_BLOCK, NULL, &suspend_set);
  38.103 +      sigdelset(&suspend_set, os::Solaris::SIGasync());
  38.104 +
  38.105 +      sr_semaphore.signal();
  38.106 +      // wait here until we are resumed
  38.107 +      while (1) {
  38.108 +        sigsuspend(&suspend_set);
  38.109 +
  38.110 +        os::SuspendResume::State result = osthread->sr.running();
  38.111 +        if (result == os::SuspendResume::SR_RUNNING) {
  38.112 +          sr_semaphore.signal();
  38.113 +          break;
  38.114 +        }
  38.115 +      }
  38.116 +
  38.117 +    } else if (state == os::SuspendResume::SR_RUNNING) {
  38.118 +      // request was cancelled, continue
  38.119 +    } else {
  38.120 +      ShouldNotReachHere();
  38.121 +    }
  38.122 +
  38.123 +    resume_clear_context(osthread);
  38.124 +  } else if (current == os::SuspendResume::SR_RUNNING) {
  38.125 +    // request was cancelled, continue
  38.126 +  } else if (current == os::SuspendResume::SR_WAKEUP_REQUEST) {
  38.127 +    // ignore
  38.128 +  } else {
  38.129 +    // ignore
  38.130 +  }
  38.131 +
  38.132 +  errno = old_errno;
  38.133 +}
  38.134 +
  38.135 +
  38.136  void os::interrupt(Thread* thread) {
  38.137    assert(Thread::current() == thread || Threads_lock->owned_by_self(), "possibility of dangling Thread pointer");
  38.138  
  38.139 @@ -4247,6 +4362,116 @@
  38.140    return buf[0] == 'y' || buf[0] == 'Y';
  38.141  }
  38.142  
  38.143 +static int sr_notify(OSThread* osthread) {
  38.144 +  int status = thr_kill(osthread->thread_id(), os::Solaris::SIGasync());
  38.145 +  assert_status(status == 0, status, "thr_kill");
  38.146 +  return status;
  38.147 +}
  38.148 +
  38.149 +// "Randomly" selected value for how long we want to spin
  38.150 +// before bailing out on suspending a thread, also how often
  38.151 +// we send a signal to a thread we want to resume
  38.152 +static const int RANDOMLY_LARGE_INTEGER = 1000000;
  38.153 +static const int RANDOMLY_LARGE_INTEGER2 = 100;
  38.154 +
  38.155 +static bool do_suspend(OSThread* osthread) {
  38.156 +  assert(osthread->sr.is_running(), "thread should be running");
  38.157 +  assert(!sr_semaphore.trywait(), "semaphore has invalid state");
  38.158 +
  38.159 +  // mark as suspended and send signal
  38.160 +  if (osthread->sr.request_suspend() != os::SuspendResume::SR_SUSPEND_REQUEST) {
  38.161 +    // failed to switch, state wasn't running?
  38.162 +    ShouldNotReachHere();
  38.163 +    return false;
  38.164 +  }
  38.165 +
  38.166 +  if (sr_notify(osthread) != 0) {
  38.167 +    ShouldNotReachHere();
  38.168 +  }
  38.169 +
  38.170 +  // managed to send the signal and switch to SUSPEND_REQUEST, now wait for SUSPENDED
  38.171 +  while (true) {
  38.172 +    if (sr_semaphore.timedwait(0, 2000 * NANOSECS_PER_MILLISEC)) {
  38.173 +      break;
  38.174 +    } else {
  38.175 +      // timeout
  38.176 +      os::SuspendResume::State cancelled = osthread->sr.cancel_suspend();
  38.177 +      if (cancelled == os::SuspendResume::SR_RUNNING) {
  38.178 +        return false;
  38.179 +      } else if (cancelled == os::SuspendResume::SR_SUSPENDED) {
  38.180 +        // make sure that we consume the signal on the semaphore as well
  38.181 +        sr_semaphore.wait();
  38.182 +        break;
  38.183 +      } else {
  38.184 +        ShouldNotReachHere();
  38.185 +        return false;
  38.186 +      }
  38.187 +    }
  38.188 +  }
  38.189 +
  38.190 +  guarantee(osthread->sr.is_suspended(), "Must be suspended");
  38.191 +  return true;
  38.192 +}
  38.193 +
  38.194 +static void do_resume(OSThread* osthread) {
  38.195 +  assert(osthread->sr.is_suspended(), "thread should be suspended");
  38.196 +  assert(!sr_semaphore.trywait(), "invalid semaphore state");
  38.197 +
  38.198 +  if (osthread->sr.request_wakeup() != os::SuspendResume::SR_WAKEUP_REQUEST) {
  38.199 +    // failed to switch to WAKEUP_REQUEST
  38.200 +    ShouldNotReachHere();
  38.201 +    return;
  38.202 +  }
  38.203 +
  38.204 +  while (true) {
  38.205 +    if (sr_notify(osthread) == 0) {
  38.206 +      if (sr_semaphore.timedwait(0, 2 * NANOSECS_PER_MILLISEC)) {
  38.207 +        if (osthread->sr.is_running()) {
  38.208 +          return;
  38.209 +        }
  38.210 +      }
  38.211 +    } else {
  38.212 +      ShouldNotReachHere();
  38.213 +    }
  38.214 +  }
  38.215 +
  38.216 +  guarantee(osthread->sr.is_running(), "Must be running!");
  38.217 +}
  38.218 +
  38.219 +void os::SuspendedThreadTask::internal_do_task() {
  38.220 +  if (do_suspend(_thread->osthread())) {
  38.221 +    SuspendedThreadTaskContext context(_thread, _thread->osthread()->ucontext());
  38.222 +    do_task(context);
  38.223 +    do_resume(_thread->osthread());
  38.224 +  }
  38.225 +}
  38.226 +
  38.227 +class PcFetcher : public os::SuspendedThreadTask {
  38.228 +public:
  38.229 +  PcFetcher(Thread* thread) : os::SuspendedThreadTask(thread) {}
  38.230 +  ExtendedPC result();
  38.231 +protected:
  38.232 +  void do_task(const os::SuspendedThreadTaskContext& context);
  38.233 +private:
  38.234 +  ExtendedPC _epc;
  38.235 +};
  38.236 +
  38.237 +ExtendedPC PcFetcher::result() {
  38.238 +  guarantee(is_done(), "task is not done yet.");
  38.239 +  return _epc;
  38.240 +}
  38.241 +
  38.242 +void PcFetcher::do_task(const os::SuspendedThreadTaskContext& context) {
  38.243 +  Thread* thread = context.thread();
  38.244 +  OSThread* osthread = thread->osthread();
  38.245 +  if (osthread->ucontext() != NULL) {
  38.246 +    _epc = os::Solaris::ucontext_get_pc((ucontext_t *) context.ucontext());
  38.247 +  } else {
  38.248 +    // NULL context is unexpected, double-check this is the VMThread
  38.249 +    guarantee(thread->is_VM_thread(), "can only be called for VMThread");
  38.250 +  }
  38.251 +}
  38.252 +
  38.253  // A lightweight implementation that does not suspend the target thread and
  38.254  // thus returns only a hint. Used for profiling only!
  38.255  ExtendedPC os::get_thread_pc(Thread* thread) {
  38.256 @@ -4254,21 +4479,9 @@
  38.257    assert(Thread::current()->is_Watcher_thread(), "Must be watcher and own Threads_lock");
  38.258    // For now, is only used to profile the VM Thread
  38.259    assert(thread->is_VM_thread(), "Can only be called for VMThread");
  38.260 -  ExtendedPC epc;
  38.261 -
  38.262 -  GetThreadPC_Callback  cb(ProfileVM_lock);
  38.263 -  OSThread *osthread = thread->osthread();
  38.264 -  const int time_to_wait = 400; // 400ms wait for initial response
  38.265 -  int status = cb.interrupt(thread, time_to_wait);
  38.266 -
  38.267 -  if (cb.is_done() ) {
  38.268 -    epc = cb.addr();
  38.269 -  } else {
  38.270 -    DEBUG_ONLY(tty->print_cr("Failed to get pc for thread: %d got %d status",
  38.271 -                              osthread->thread_id(), status););
  38.272 -    // epc is already NULL
  38.273 -  }
  38.274 -  return epc;
  38.275 +  PcFetcher fetcher(thread);
  38.276 +  fetcher.run();
  38.277 +  return fetcher.result();
  38.278  }
  38.279  
  38.280  
    39.1 --- a/src/os/solaris/vm/os_solaris.hpp	Fri Jun 07 09:33:01 2013 -0700
    39.2 +++ b/src/os/solaris/vm/os_solaris.hpp	Mon Jun 10 11:30:51 2013 +0200
    39.3 @@ -1,5 +1,5 @@
    39.4  /*
    39.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    39.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    39.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    39.8   *
    39.9   * This code is free software; you can redistribute it and/or modify it
   39.10 @@ -127,7 +127,6 @@
   39.11    static void set_SIGinterrupt(int newsig) { _SIGinterrupt = newsig; }
   39.12    static void set_SIGasync(int newsig) { _SIGasync = newsig; }
   39.13  
   39.14 -
   39.15   public:
   39.16    // Large Page Support--ISM.
   39.17    static bool largepage_range(char* addr, size_t size);
   39.18 @@ -145,6 +144,7 @@
   39.19    static intptr_t*   ucontext_get_sp(ucontext_t* uc);
   39.20    // ucontext_get_fp() is only used by Solaris X86 (see note below)
   39.21    static intptr_t*   ucontext_get_fp(ucontext_t* uc);
   39.22 +  static address    ucontext_get_pc(ucontext_t* uc);
   39.23  
   39.24    // For Analyzer Forte AsyncGetCallTrace profiling support:
   39.25    // Parameter ret_fp is only used by Solaris X86.
   39.26 @@ -157,6 +157,8 @@
   39.27  
   39.28    static void hotspot_sigmask(Thread* thread);
   39.29  
   39.30 +  // SR_handler
   39.31 +  static void SR_handler(Thread* thread, ucontext_t* uc);
   39.32   protected:
   39.33    // Solaris-specific interface goes here
   39.34    static julong available_memory();
    40.1 --- a/src/os/windows/vm/os_windows.cpp	Fri Jun 07 09:33:01 2013 -0700
    40.2 +++ b/src/os/windows/vm/os_windows.cpp	Mon Jun 10 11:30:51 2013 +0200
    40.3 @@ -5048,6 +5048,71 @@
    40.4    return ::setsockopt(fd, level, optname, optval, optlen);
    40.5  }
    40.6  
    40.7 +// WINDOWS CONTEXT Flags for THREAD_SAMPLING
    40.8 +#if defined(IA32)
    40.9 +#  define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
   40.10 +#elif defined (AMD64)
   40.11 +#  define sampling_context_flags (CONTEXT_FULL | CONTEXT_FLOATING_POINT)
   40.12 +#endif
   40.13 +
   40.14 +// returns true if thread could be suspended,
   40.15 +// false otherwise
   40.16 +static bool do_suspend(HANDLE* h) {
   40.17 +  if (h != NULL) {
   40.18 +    if (SuspendThread(*h) != ~0) {
   40.19 +      return true;
   40.20 +    }
   40.21 +  }
   40.22 +  return false;
   40.23 +}
   40.24 +
   40.25 +// resume the thread
   40.26 +// calling resume on an active thread is a no-op
   40.27 +static void do_resume(HANDLE* h) {
   40.28 +  if (h != NULL) {
   40.29 +    ResumeThread(*h);
   40.30 +  }
   40.31 +}
   40.32 +
   40.33 +// retrieve a suspend/resume context capable handle
   40.34 +// from the tid. Caller validates handle return value.
   40.35 +void get_thread_handle_for_extended_context(HANDLE* h, OSThread::thread_id_t tid) {
   40.36 +  if (h != NULL) {
   40.37 +    *h = OpenThread(THREAD_SUSPEND_RESUME | THREAD_GET_CONTEXT | THREAD_QUERY_INFORMATION, FALSE, tid);
   40.38 +  }
   40.39 +}
   40.40 +
   40.41 +//
   40.42 +// Thread sampling implementation
   40.43 +//
   40.44 +void os::SuspendedThreadTask::internal_do_task() {
   40.45 +  CONTEXT    ctxt;
   40.46 +  HANDLE     h = NULL;
   40.47 +
   40.48 +  // get context capable handle for thread
   40.49 +  get_thread_handle_for_extended_context(&h, _thread->osthread()->thread_id());
   40.50 +
   40.51 +  // sanity
   40.52 +  if (h == NULL || h == INVALID_HANDLE_VALUE) {
   40.53 +    return;
   40.54 +  }
   40.55 +
   40.56 +  // suspend the thread
   40.57 +  if (do_suspend(&h)) {
   40.58 +    ctxt.ContextFlags = sampling_context_flags;
   40.59 +    // get thread context
   40.60 +    GetThreadContext(h, &ctxt);
   40.61 +    SuspendedThreadTaskContext context(_thread, &ctxt);
   40.62 +    // pass context to Thread Sampling impl
   40.63 +    do_task(context);
   40.64 +    // resume thread
   40.65 +    do_resume(&h);
   40.66 +  }
   40.67 +
   40.68 +  // close handle
   40.69 +  CloseHandle(h);
   40.70 +}
   40.71 +
   40.72  
   40.73  // Kernel32 API
   40.74  typedef SIZE_T (WINAPI* GetLargePageMinimum_Fn)(void);
    41.1 --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Fri Jun 07 09:33:01 2013 -0700
    41.2 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.cpp	Mon Jun 10 11:30:51 2013 +0200
    41.3 @@ -1,5 +1,5 @@
    41.4  /*
    41.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    41.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    41.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    41.8   *
    41.9   * This code is free software; you can redistribute it and/or modify it
   41.10 @@ -30,10 +30,16 @@
   41.11  // currently interrupted by SIGPROF
   41.12  bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
   41.13    void* ucontext, bool isInJava) {
   41.14 +  assert(Thread::current() == this, "caller must be current thread");
   41.15 +  return pd_get_top_frame(fr_addr, ucontext, isInJava);
   41.16 +}
   41.17  
   41.18 -  assert(Thread::current() == this, "caller must be current thread");
   41.19 +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
   41.20 +  return pd_get_top_frame(fr_addr, ucontext, isInJava);
   41.21 +}
   41.22 +
   41.23 +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
   41.24    assert(this->is_Java_thread(), "must be JavaThread");
   41.25 -
   41.26    JavaThread* jt = (JavaThread *)this;
   41.27  
   41.28    // If we have a last_Java_frame, then we should use it even if
    42.1 --- a/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp	Fri Jun 07 09:33:01 2013 -0700
    42.2 +++ b/src/os_cpu/bsd_x86/vm/thread_bsd_x86.hpp	Mon Jun 10 11:30:51 2013 +0200
    42.3 @@ -1,5 +1,5 @@
    42.4  /*
    42.5 - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    42.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    42.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    42.8   *
    42.9   * This code is free software; you can redistribute it and/or modify it
   42.10 @@ -61,6 +61,13 @@
   42.11    bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
   42.12      bool isInJava);
   42.13  
   42.14 +  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
   42.15 +    bool isInJava);
   42.16 +
   42.17 +private:
   42.18 +  bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
   42.19 +public:
   42.20 +
   42.21    // These routines are only used on cpu architectures that
   42.22    // have separate register stacks (Itanium).
   42.23    static bool register_stack_overflow() { return false; }
    43.1 --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp	Fri Jun 07 09:33:01 2013 -0700
    43.2 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.cpp	Mon Jun 10 11:30:51 2013 +0200
    43.3 @@ -1,5 +1,5 @@
    43.4  /*
    43.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    43.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    43.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    43.8   *
    43.9   * This code is free software; you can redistribute it and/or modify it
   43.10 @@ -32,8 +32,15 @@
   43.11    void* ucontext, bool isInJava) {
   43.12  
   43.13    assert(Thread::current() == this, "caller must be current thread");
   43.14 +  return pd_get_top_frame(fr_addr, ucontext, isInJava);
   43.15 +}
   43.16 +
   43.17 +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
   43.18 +  return pd_get_top_frame(fr_addr, ucontext, isInJava);
   43.19 +}
   43.20 +
   43.21 +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
   43.22    assert(this->is_Java_thread(), "must be JavaThread");
   43.23 -
   43.24    JavaThread* jt = (JavaThread *)this;
   43.25  
   43.26    // If we have a last_Java_frame, then we should use it even if
    44.1 --- a/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp	Fri Jun 07 09:33:01 2013 -0700
    44.2 +++ b/src/os_cpu/linux_x86/vm/thread_linux_x86.hpp	Mon Jun 10 11:30:51 2013 +0200
    44.3 @@ -1,5 +1,5 @@
    44.4  /*
    44.5 - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    44.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    44.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44.8   *
    44.9   * This code is free software; you can redistribute it and/or modify it
   44.10 @@ -61,6 +61,11 @@
   44.11    bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
   44.12      bool isInJava);
   44.13  
   44.14 +  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
   44.15 +private:
   44.16 +  bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
   44.17 +public:
   44.18 +
   44.19    // These routines are only used on cpu architectures that
   44.20    // have separate register stacks (Itanium).
   44.21    static bool register_stack_overflow() { return false; }
    45.1 --- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Fri Jun 07 09:33:01 2013 -0700
    45.2 +++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp	Mon Jun 10 11:30:51 2013 +0200
    45.3 @@ -1,5 +1,5 @@
    45.4  /*
    45.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    45.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    45.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    45.8   *
    45.9   * This code is free software; you can redistribute it and/or modify it
   45.10 @@ -194,6 +194,11 @@
   45.11    return NULL;
   45.12  }
   45.13  
   45.14 +address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
   45.15 +  return (address) uc->uc_mcontext.gregs[REG_PC];
   45.16 +}
   45.17 +
   45.18 +
   45.19  // For Forte Analyzer AsyncGetCallTrace profiling support - thread
   45.20  // is currently interrupted by SIGPROF.
   45.21  //
   45.22 @@ -265,22 +270,6 @@
   45.23    }
   45.24  }
   45.25  
   45.26 -
   45.27 -void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) {
   45.28 -  Thread*     thread = args->thread();
   45.29 -  ucontext_t* uc     = args->ucontext();
   45.30 -  intptr_t* sp;
   45.31 -
   45.32 -  assert(ProfileVM && thread->is_VM_thread(), "just checking");
   45.33 -
   45.34 -  // Skip the mcontext corruption verification. If if occasionally
   45.35 -  // things get corrupt, it is ok for profiling - we will just get an unresolved
   45.36 -  // function name
   45.37 -  ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]);
   45.38 -  _addr = new_addr;
   45.39 -}
   45.40 -
   45.41 -
   45.42  static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
   45.43    char lwpstatusfile[PROCFILE_LENGTH];
   45.44    int lwpfd, err;
   45.45 @@ -358,13 +347,8 @@
   45.46    guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
   45.47  
   45.48    if (sig == os::Solaris::SIGasync()) {
   45.49 -    if (thread) {
   45.50 -      OSThread::InterruptArguments args(thread, uc);
   45.51 -      thread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
   45.52 -      return true;
   45.53 -    } else if (vmthread) {
   45.54 -      OSThread::InterruptArguments args(vmthread, uc);
   45.55 -      vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
   45.56 +    if (thread || vmthread) {
   45.57 +      OSThread::SR_handler(t, uc);
   45.58        return true;
   45.59      } else if (os::Solaris::chained_handler(sig, info, ucVoid)) {
   45.60        return true;
    46.1 --- a/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp	Fri Jun 07 09:33:01 2013 -0700
    46.2 +++ b/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.cpp	Mon Jun 10 11:30:51 2013 +0200
    46.3 @@ -1,5 +1,5 @@
    46.4  /*
    46.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    46.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    46.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    46.8   *
    46.9   * This code is free software; you can redistribute it and/or modify it
   46.10 @@ -36,11 +36,21 @@
   46.11    void* ucontext, bool isInJava) {
   46.12  
   46.13    assert(Thread::current() == this, "caller must be current thread");
   46.14 +  return pd_get_top_frame(fr_addr, ucontext, isInJava, true);
   46.15 +}
   46.16 +
   46.17 +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
   46.18 +  // get ucontext somehow
   46.19 +  return pd_get_top_frame(fr_addr, ucontext, isInJava, false);
   46.20 +}
   46.21 +
   46.22 +bool JavaThread::pd_get_top_frame(frame* fr_addr,
   46.23 +  void* ucontext, bool isInJava, bool makeWalkable) {
   46.24    assert(this->is_Java_thread(), "must be JavaThread");
   46.25  
   46.26    JavaThread* jt = (JavaThread *)this;
   46.27  
   46.28 -  if (!isInJava) {
   46.29 +  if (!isInJava && makeWalkable) {
   46.30      // make_walkable flushes register windows and grabs last_Java_pc
   46.31      // which can not be done if the ucontext sp matches last_Java_sp
   46.32      // stack walking utilities assume last_Java_pc set if marked flushed
    47.1 --- a/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp	Fri Jun 07 09:33:01 2013 -0700
    47.2 +++ b/src/os_cpu/solaris_sparc/vm/thread_solaris_sparc.hpp	Mon Jun 10 11:30:51 2013 +0200
    47.3 @@ -1,5 +1,5 @@
    47.4  /*
    47.5 - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
    47.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    47.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    47.8   *
    47.9   * This code is free software; you can redistribute it and/or modify it
   47.10 @@ -93,6 +93,11 @@
   47.11    bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
   47.12      bool isInJava);
   47.13  
   47.14 +  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
   47.15 +private:
   47.16 +  bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava, bool makeWalkable);
   47.17 +public:
   47.18 +
   47.19    // These routines are only used on cpu architectures that
   47.20    // have separate register stacks (Itanium).
   47.21    static bool register_stack_overflow() { return false; }
    48.1 --- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Fri Jun 07 09:33:01 2013 -0700
    48.2 +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp	Mon Jun 10 11:30:51 2013 +0200
    48.3 @@ -1,5 +1,5 @@
    48.4  /*
    48.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    48.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    48.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    48.8   *
    48.9   * This code is free software; you can redistribute it and/or modify it
   48.10 @@ -183,6 +183,10 @@
   48.11    return (intptr_t*)uc->uc_mcontext.gregs[REG_FP];
   48.12  }
   48.13  
   48.14 +address os::Solaris::ucontext_get_pc(ucontext_t *uc) {
   48.15 +  return (address) uc->uc_mcontext.gregs[REG_PC];
   48.16 +}
   48.17 +
   48.18  // For Forte Analyzer AsyncGetCallTrace profiling support - thread
   48.19  // is currently interrupted by SIGPROF.
   48.20  //
   48.21 @@ -252,22 +256,6 @@
   48.22    }
   48.23  }
   48.24  
   48.25 -// This is a simple callback that just fetches a PC for an interrupted thread.
   48.26 -// The thread need not be suspended and the fetched PC is just a hint.
   48.27 -// This one is currently used for profiling the VMThread ONLY!
   48.28 -
   48.29 -// Must be synchronous
   48.30 -void GetThreadPC_Callback::execute(OSThread::InterruptArguments *args) {
   48.31 -  Thread*     thread = args->thread();
   48.32 -  ucontext_t* uc     = args->ucontext();
   48.33 -  intptr_t* sp;
   48.34 -
   48.35 -  assert(ProfileVM && thread->is_VM_thread(), "just checking");
   48.36 -
   48.37 -  ExtendedPC new_addr((address)uc->uc_mcontext.gregs[REG_PC]);
   48.38 -  _addr = new_addr;
   48.39 -}
   48.40 -
   48.41  static int threadgetstate(thread_t tid, int *flags, lwpid_t *lwp, stack_t *ss, gregset_t rs, lwpstatus_t *lwpstatus) {
   48.42    char lwpstatusfile[PROCFILE_LENGTH];
   48.43    int lwpfd, err;
   48.44 @@ -419,14 +407,8 @@
   48.45    guarantee(sig != os::Solaris::SIGinterrupt(), "Can not chain VM interrupt signal, try -XX:+UseAltSigs");
   48.46  
   48.47    if (sig == os::Solaris::SIGasync()) {
   48.48 -    if(thread){
   48.49 -      OSThread::InterruptArguments args(thread, uc);
   48.50 -      thread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
   48.51 -      return true;
   48.52 -    }
   48.53 -    else if(vmthread){
   48.54 -      OSThread::InterruptArguments args(vmthread, uc);
   48.55 -      vmthread->osthread()->do_interrupt_callbacks_at_interrupt(&args);
   48.56 +    if(thread || vmthread){
   48.57 +      OSThread::SR_handler(t, uc);
   48.58        return true;
   48.59      } else if (os::Solaris::chained_handler(sig, info, ucVoid)) {
   48.60        return true;
    49.1 --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Fri Jun 07 09:33:01 2013 -0700
    49.2 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.cpp	Mon Jun 10 11:30:51 2013 +0200
    49.3 @@ -1,5 +1,5 @@
    49.4  /*
    49.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    49.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    49.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    49.8   *
    49.9   * This code is free software; you can redistribute it and/or modify it
   49.10 @@ -30,8 +30,17 @@
   49.11  // currently interrupted by SIGPROF
   49.12  bool JavaThread::pd_get_top_frame_for_signal_handler(frame* fr_addr,
   49.13    void* ucontext, bool isInJava) {
   49.14 +  assert(Thread::current() == this, "caller must be current thread");
   49.15 +  return pd_get_top_frame(fr_addr, ucontext, isInJava);
   49.16 +}
   49.17  
   49.18 -  assert(Thread::current() == this, "caller must be current thread");
   49.19 +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr,
   49.20 +  void* ucontext, bool isInJava) {
   49.21 +  return pd_get_top_frame(fr_addr, ucontext, isInJava);
   49.22 +}
   49.23 +
   49.24 +bool JavaThread::pd_get_top_frame(frame* fr_addr,
   49.25 +  void* ucontext, bool isInJava) {
   49.26    assert(this->is_Java_thread(), "must be JavaThread");
   49.27    JavaThread* jt = (JavaThread *)this;
   49.28  
    50.1 --- a/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp	Fri Jun 07 09:33:01 2013 -0700
    50.2 +++ b/src/os_cpu/solaris_x86/vm/thread_solaris_x86.hpp	Mon Jun 10 11:30:51 2013 +0200
    50.3 @@ -1,5 +1,5 @@
    50.4  /*
    50.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    50.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    50.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    50.8   *
    50.9   * This code is free software; you can redistribute it and/or modify it
   50.10 @@ -54,6 +54,12 @@
   50.11  
   50.12    bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
   50.13      bool isInJava);
   50.14 +  bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext,
   50.15 +    bool isInJava);
   50.16 +private:
   50.17 +  bool pd_get_top_frame(frame* fr_addr, void* ucontext,
   50.18 +    bool isInJava);
   50.19 +public:
   50.20  
   50.21    // These routines are only used on cpu architectures that
   50.22    // have separate register stacks (Itanium).
    51.1 --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp	Fri Jun 07 09:33:01 2013 -0700
    51.2 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.cpp	Mon Jun 10 11:30:51 2013 +0200
    51.3 @@ -1,5 +1,5 @@
    51.4  /*
    51.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    51.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    51.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    51.8   *
    51.9   * This code is free software; you can redistribute it and/or modify it
   51.10 @@ -32,6 +32,15 @@
   51.11    void* ucontext, bool isInJava) {
   51.12  
   51.13    assert(Thread::current() == this, "caller must be current thread");
   51.14 +  return pd_get_top_frame(fr_addr, ucontext, isInJava);
   51.15 +}
   51.16 +
   51.17 +bool JavaThread::pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava) {
   51.18 +  return pd_get_top_frame(fr_addr, ucontext, isInJava);
   51.19 +}
   51.20 +
   51.21 +bool JavaThread::pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava) {
   51.22 +
   51.23    assert(this->is_Java_thread(), "must be JavaThread");
   51.24  
   51.25    JavaThread* jt = (JavaThread *)this;
   51.26 @@ -87,4 +96,3 @@
   51.27  }
   51.28  
   51.29  void JavaThread::cache_global_variables() { }
   51.30 -
    52.1 --- a/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp	Fri Jun 07 09:33:01 2013 -0700
    52.2 +++ b/src/os_cpu/windows_x86/vm/thread_windows_x86.hpp	Mon Jun 10 11:30:51 2013 +0200
    52.3 @@ -1,5 +1,5 @@
    52.4  /*
    52.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    52.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    52.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    52.8   *
    52.9   * This code is free software; you can redistribute it and/or modify it
   52.10 @@ -58,6 +58,12 @@
   52.11    bool pd_get_top_frame_for_signal_handler(frame* fr_addr, void* ucontext,
   52.12      bool isInJava);
   52.13  
   52.14 +   bool pd_get_top_frame_for_profiling(frame* fr_addr, void* ucontext, bool isInJava);
   52.15 +
   52.16 +private:
   52.17 +  bool pd_get_top_frame(frame* fr_addr, void* ucontext, bool isInJava);
   52.18 +
   52.19 + public:
   52.20    // These routines are only used on cpu architectures that
   52.21    // have separate register stacks (Itanium).
   52.22    static bool register_stack_overflow() { return false; }
    53.1 --- a/src/share/tools/ProjectCreator/BuildConfig.java	Fri Jun 07 09:33:01 2013 -0700
    53.2 +++ b/src/share/tools/ProjectCreator/BuildConfig.java	Mon Jun 10 11:30:51 2013 +0200
    53.3 @@ -1,5 +1,5 @@
    53.4  /*
    53.5 - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
    53.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
    53.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    53.8   *
    53.9   * This code is free software; you can redistribute it and/or modify it
   53.10 @@ -152,7 +152,7 @@
   53.11          sysDefines.add("_WINDOWS");
   53.12          sysDefines.add("HOTSPOT_BUILD_USER=\\\""+System.getProperty("user.name")+"\\\"");
   53.13          sysDefines.add("HOTSPOT_BUILD_TARGET=\\\""+get("Build")+"\\\"");
   53.14 -        sysDefines.add("INCLUDE_TRACE");
   53.15 +        sysDefines.add("INCLUDE_TRACE=1");
   53.16          sysDefines.add("_JNI_IMPLEMENTATION_");
   53.17          if (vars.get("PlatformName").equals("Win32")) {
   53.18              sysDefines.add("HOTSPOT_LIB_ARCH=\\\"i386\\\"");
    54.1 --- a/src/share/vm/classfile/classFileParser.cpp	Fri Jun 07 09:33:01 2013 -0700
    54.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Mon Jun 10 11:30:51 2013 +0200
    54.3 @@ -39,6 +39,7 @@
    54.4  #include "memory/gcLocker.hpp"
    54.5  #include "memory/metadataFactory.hpp"
    54.6  #include "memory/oopFactory.hpp"
    54.7 +#include "memory/referenceType.hpp"
    54.8  #include "memory/universe.inline.hpp"
    54.9  #include "oops/constantPool.hpp"
   54.10  #include "oops/fieldStreams.hpp"
    55.1 --- a/src/share/vm/classfile/classLoaderData.cpp	Fri Jun 07 09:33:01 2013 -0700
    55.2 +++ b/src/share/vm/classfile/classLoaderData.cpp	Mon Jun 10 11:30:51 2013 +0200
    55.3 @@ -64,6 +64,11 @@
    55.4  #include "utilities/growableArray.hpp"
    55.5  #include "utilities/ostream.hpp"
    55.6  
    55.7 +#if INCLUDE_TRACE
    55.8 + #include "trace/tracing.hpp"
    55.9 +#endif
   55.10 +
   55.11 +
   55.12  ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
   55.13  
   55.14  ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Dependencies dependencies) :
   55.15 @@ -120,6 +125,12 @@
   55.16    }
   55.17  }
   55.18  
   55.19 +void ClassLoaderData::classes_do(void f(Klass * const)) {
   55.20 +  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
   55.21 +    f(k);
   55.22 +  }
   55.23 +}
   55.24 +
   55.25  void ClassLoaderData::classes_do(void f(InstanceKlass*)) {
   55.26    for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
   55.27      if (k->oop_is_instance()) {
   55.28 @@ -583,6 +594,19 @@
   55.29    }
   55.30  }
   55.31  
   55.32 +void ClassLoaderDataGraph::classes_do(void f(Klass* const)) {
   55.33 +  for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
   55.34 +    cld->classes_do(f);
   55.35 +  }
   55.36 +}
   55.37 +
   55.38 +void ClassLoaderDataGraph::classes_unloading_do(void f(Klass* const)) {
   55.39 +  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
   55.40 +  for (ClassLoaderData* cld = _unloading; cld != NULL; cld = cld->next()) {
   55.41 +    cld->classes_do(f);
   55.42 +  }
   55.43 +}
   55.44 +
   55.45  GrowableArray<ClassLoaderData*>* ClassLoaderDataGraph::new_clds() {
   55.46    assert(_head == NULL || _saved_head != NULL, "remember_new_clds(true) not called?");
   55.47  
   55.48 @@ -687,6 +711,11 @@
   55.49      dead->set_next(_unloading);
   55.50      _unloading = dead;
   55.51    }
   55.52 +
   55.53 +  if (seen_dead_loader) {
   55.54 +    post_class_unload_events();
   55.55 +  }
   55.56 +
   55.57    return seen_dead_loader;
   55.58  }
   55.59  
   55.60 @@ -702,6 +731,20 @@
   55.61    Metaspace::purge();
   55.62  }
   55.63  
   55.64 +void ClassLoaderDataGraph::post_class_unload_events(void) {
   55.65 +#if INCLUDE_TRACE
   55.66 +  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint!");
   55.67 +  if (Tracing::enabled()) {
   55.68 +    if (Tracing::is_event_enabled(TraceClassUnloadEvent)) {
   55.69 +      assert(_unloading != NULL, "need class loader data unload list!");
   55.70 +      _class_unload_time = Tracing::time();
   55.71 +      classes_unloading_do(&class_unload_event);
   55.72 +    }
   55.73 +    Tracing::on_unloading_classes();
   55.74 +  }
   55.75 +#endif
   55.76 +}
   55.77 +
   55.78  // CDS support
   55.79  
   55.80  // Global metaspaces for writing information to the shared archive.  When
   55.81 @@ -769,3 +812,21 @@
   55.82      class_loader()->print_value_on(out);
   55.83    }
   55.84  }
   55.85 +
   55.86 +#if INCLUDE_TRACE
   55.87 +
   55.88 +TracingTime ClassLoaderDataGraph::_class_unload_time;
   55.89 +
   55.90 +void ClassLoaderDataGraph::class_unload_event(Klass* const k) {
   55.91 +
   55.92 +  // post class unload event
   55.93 +  EventClassUnload event(UNTIMED);
   55.94 +  event.set_endtime(_class_unload_time);
   55.95 +  event.set_unloadedClass(k);
   55.96 +  oop defining_class_loader = k->class_loader();
   55.97 +  event.set_definingClassLoader(defining_class_loader != NULL ?
   55.98 +                                defining_class_loader->klass() : (Klass*)NULL);
   55.99 +  event.commit();
  55.100 +}
  55.101 +
  55.102 +#endif /* INCLUDE_TRACE */
    56.1 --- a/src/share/vm/classfile/classLoaderData.hpp	Fri Jun 07 09:33:01 2013 -0700
    56.2 +++ b/src/share/vm/classfile/classLoaderData.hpp	Mon Jun 10 11:30:51 2013 +0200
    56.3 @@ -32,6 +32,10 @@
    56.4  #include "runtime/mutex.hpp"
    56.5  #include "utilities/growableArray.hpp"
    56.6  
    56.7 +#if INCLUDE_TRACE
    56.8 +# include "trace/traceTime.hpp"
    56.9 +#endif
   56.10 +
   56.11  //
   56.12  // A class loader represents a linkset. Conceptually, a linkset identifies
   56.13  // the complete transitive closure of resolved links that a dynamic linker can
   56.14 @@ -49,6 +53,7 @@
   56.15  class JNIMethodBlock;
   56.16  class JNIHandleBlock;
   56.17  class Metadebug;
   56.18 +
   56.19  // GC root for walking class loader data created
   56.20  
   56.21  class ClassLoaderDataGraph : public AllStatic {
   56.22 @@ -63,6 +68,7 @@
   56.23    static ClassLoaderData* _saved_head;
   56.24  
   56.25    static ClassLoaderData* add(Handle class_loader, bool anonymous, TRAPS);
   56.26 +  static void post_class_unload_events(void);
   56.27   public:
   56.28    static ClassLoaderData* find_or_create(Handle class_loader, TRAPS);
   56.29    static void purge();
   56.30 @@ -71,6 +77,8 @@
   56.31    static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
   56.32    static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
   56.33    static void classes_do(KlassClosure* klass_closure);
   56.34 +  static void classes_do(void f(Klass* const));
   56.35 +  static void classes_unloading_do(void f(Klass* const));
   56.36    static bool do_unloading(BoolObjectClosure* is_alive);
   56.37  
   56.38    // CMS support.
   56.39 @@ -86,6 +94,12 @@
   56.40    static bool contains(address x);
   56.41    static bool contains_loader_data(ClassLoaderData* loader_data);
   56.42  #endif
   56.43 +
   56.44 +#if INCLUDE_TRACE
   56.45 + private:
   56.46 +  static TracingTime _class_unload_time;
   56.47 +  static void class_unload_event(Klass* const k);
   56.48 +#endif
   56.49  };
   56.50  
   56.51  // ClassLoaderData class
   56.52 @@ -171,7 +185,7 @@
   56.53    void unload();
   56.54    bool keep_alive() const       { return _keep_alive; }
   56.55    bool is_alive(BoolObjectClosure* is_alive_closure) const;
   56.56 -
   56.57 +  void classes_do(void f(Klass*));
   56.58    void classes_do(void f(InstanceKlass*));
   56.59  
   56.60    // Deallocate free list during class unloading.
    57.1 --- a/src/share/vm/classfile/javaClasses.cpp	Fri Jun 07 09:33:01 2013 -0700
    57.2 +++ b/src/share/vm/classfile/javaClasses.cpp	Mon Jun 10 11:30:51 2013 +0200
    57.3 @@ -961,7 +961,7 @@
    57.4  
    57.5  // Read thread status value from threadStatus field in java.lang.Thread java class.
    57.6  java_lang_Thread::ThreadStatus java_lang_Thread::get_thread_status(oop java_thread) {
    57.7 -  assert(Thread::current()->is_VM_thread() ||
    57.8 +  assert(Thread::current()->is_Watcher_thread() || Thread::current()->is_VM_thread() ||
    57.9           JavaThread::current()->thread_state() == _thread_in_vm,
   57.10           "Java Thread is not running in vm");
   57.11    // The threadStatus is only present starting in 1.5
    58.1 --- a/src/share/vm/classfile/systemDictionary.cpp	Fri Jun 07 09:33:01 2013 -0700
    58.2 +++ b/src/share/vm/classfile/systemDictionary.cpp	Mon Jun 10 11:30:51 2013 +0200
    58.3 @@ -56,6 +56,11 @@
    58.4  #include "services/classLoadingService.hpp"
    58.5  #include "services/threadService.hpp"
    58.6  
    58.7 +#if INCLUDE_TRACE
    58.8 + #include "trace/tracing.hpp"
    58.9 + #include "trace/traceMacros.hpp"
   58.10 +#endif
   58.11 +
   58.12  
   58.13  Dictionary*            SystemDictionary::_dictionary          = NULL;
   58.14  PlaceholderTable*      SystemDictionary::_placeholders        = NULL;
   58.15 @@ -586,10 +591,15 @@
   58.16  }
   58.17  
   58.18  
   58.19 -Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name, Handle class_loader, Handle protection_domain, TRAPS) {
   58.20 +Klass* SystemDictionary::resolve_instance_class_or_null(Symbol* name,
   58.21 +                                                        Handle class_loader,
   58.22 +                                                        Handle protection_domain,
   58.23 +                                                        TRAPS) {
   58.24    assert(name != NULL && !FieldType::is_array(name) &&
   58.25           !FieldType::is_obj(name), "invalid class name");
   58.26  
   58.27 +  TracingTime class_load_start_time = Tracing::time();
   58.28 +
   58.29    // UseNewReflection
   58.30    // Fix for 4474172; see evaluation for more details
   58.31    class_loader = Handle(THREAD, java_lang_ClassLoader::non_reflection_class_loader(class_loader()));
   58.32 @@ -804,8 +814,9 @@
   58.33              // during compilations.
   58.34              MutexLocker mu(Compile_lock, THREAD);
   58.35              update_dictionary(d_index, d_hash, p_index, p_hash,
   58.36 -                            k, class_loader, THREAD);
   58.37 +                              k, class_loader, THREAD);
   58.38            }
   58.39 +
   58.40            if (JvmtiExport::should_post_class_load()) {
   58.41              Thread *thread = THREAD;
   58.42              assert(thread->is_Java_thread(), "thread->is_Java_thread()");
   58.43 @@ -861,8 +872,8 @@
   58.44        // This brackets the SystemDictionary updates for both defining
   58.45        // and initiating loaders
   58.46        MutexLocker mu(SystemDictionary_lock, THREAD);
   58.47 -        placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD);
   58.48 -        SystemDictionary_lock->notify_all();
   58.49 +      placeholders()->find_and_remove(p_index, p_hash, name, loader_data, PlaceholderTable::LOAD_INSTANCE, THREAD);
   58.50 +      SystemDictionary_lock->notify_all();
   58.51      }
   58.52    }
   58.53  
   58.54 @@ -870,6 +881,8 @@
   58.55      return NULL;
   58.56    }
   58.57  
   58.58 +  post_class_load_event(class_load_start_time, k, class_loader);
   58.59 +
   58.60  #ifdef ASSERT
   58.61    {
   58.62      ClassLoaderData* loader_data = k->class_loader_data();
   58.63 @@ -993,6 +1006,8 @@
   58.64                                        TRAPS) {
   58.65    TempNewSymbol parsed_name = NULL;
   58.66  
   58.67 +  TracingTime class_load_start_time = Tracing::time();
   58.68 +
   58.69    ClassLoaderData* loader_data;
   58.70    if (host_klass.not_null()) {
   58.71      // Create a new CLD for anonymous class, that uses the same class loader
   58.72 @@ -1048,6 +1063,8 @@
   58.73          assert(THREAD->is_Java_thread(), "thread->is_Java_thread()");
   58.74          JvmtiExport::post_class_load((JavaThread *) THREAD, k());
   58.75      }
   58.76 +
   58.77 +    post_class_load_event(class_load_start_time, k, class_loader);
   58.78    }
   58.79    assert(host_klass.not_null() || cp_patches == NULL,
   58.80           "cp_patches only found with host_klass");
   58.81 @@ -1435,6 +1452,7 @@
   58.82        JvmtiExport::post_class_load((JavaThread *) THREAD, k());
   58.83  
   58.84    }
   58.85 +
   58.86  }
   58.87  
   58.88  // Support parallel classloading
   58.89 @@ -1678,6 +1696,7 @@
   58.90    }
   58.91    return newsize;
   58.92  }
   58.93 +
   58.94  // Assumes classes in the SystemDictionary are only unloaded at a safepoint
   58.95  // Note: anonymous classes are not in the SD.
   58.96  bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
   58.97 @@ -2024,12 +2043,6 @@
   58.98      }
   58.99    }
  58.100  
  58.101 -  // Assign a classid if one has not already been assigned.  The
  58.102 -  // counter does not need to be atomically incremented since this
  58.103 -  // is only done while holding the SystemDictionary_lock.
  58.104 -  // All loaded classes get a unique ID.
  58.105 -  TRACE_INIT_ID(k);
  58.106 -
  58.107    // Make a new system dictionary entry.
  58.108    Klass* sd_check = find_class(d_index, d_hash, name, loader_data);
  58.109    if (sd_check == NULL) {
  58.110 @@ -2612,6 +2625,27 @@
  58.111              "Loaded klasses should be in SystemDictionary");
  58.112  }
  58.113  
  58.114 +// utility function for class load event
  58.115 +void SystemDictionary::post_class_load_event(TracingTime start_time,
  58.116 +                                             instanceKlassHandle k,
  58.117 +                                             Handle initiating_loader) {
  58.118 +#if INCLUDE_TRACE
  58.119 +  EventClassLoad event(UNTIMED);
  58.120 +  if (event.should_commit()) {
  58.121 +    event.set_endtime(Tracing::time());
  58.122 +    event.set_starttime(start_time);
  58.123 +    event.set_loadedClass(k());
  58.124 +    oop defining_class_loader = k->class_loader();
  58.125 +    event.set_definingClassLoader(defining_class_loader !=  NULL ?
  58.126 +                                    defining_class_loader->klass() : (Klass*)NULL);
  58.127 +    oop class_loader = initiating_loader.is_null() ? (oop)NULL : initiating_loader();
  58.128 +    event.set_initiatingClassLoader(class_loader != NULL ?
  58.129 +                                      class_loader->klass() : (Klass*)NULL);
  58.130 +    event.commit();
  58.131 +  }
  58.132 +#endif /* INCLUDE_TRACE */
  58.133 +}
  58.134 +
  58.135  #ifndef PRODUCT
  58.136  
  58.137  // statistics code
    59.1 --- a/src/share/vm/classfile/systemDictionary.hpp	Fri Jun 07 09:33:01 2013 -0700
    59.2 +++ b/src/share/vm/classfile/systemDictionary.hpp	Mon Jun 10 11:30:51 2013 +0200
    59.3 @@ -31,9 +31,11 @@
    59.4  #include "oops/symbol.hpp"
    59.5  #include "runtime/java.hpp"
    59.6  #include "runtime/reflectionUtils.hpp"
    59.7 +#include "trace/traceTime.hpp"
    59.8  #include "utilities/hashtable.hpp"
    59.9  #include "utilities/hashtable.inline.hpp"
   59.10  
   59.11 +
   59.12  // The system dictionary stores all loaded classes and maps:
   59.13  //
   59.14  //   [class name,class loader] -> class   i.e.  [Symbol*,oop] -> Klass*
   59.15 @@ -636,6 +638,9 @@
   59.16    // Setup link to hierarchy
   59.17    static void add_to_hierarchy(instanceKlassHandle k, TRAPS);
   59.18  
   59.19 +  // event based tracing
   59.20 +  static void post_class_load_event(TracingTime start_time, instanceKlassHandle k,
   59.21 +                                    Handle initiating_loader);
   59.22    // We pass in the hashtable index so we can calculate it outside of
   59.23    // the SystemDictionary_lock.
   59.24  
    60.1 --- a/src/share/vm/code/codeCache.cpp	Fri Jun 07 09:33:01 2013 -0700
    60.2 +++ b/src/share/vm/code/codeCache.cpp	Mon Jun 10 11:30:51 2013 +0200
    60.3 @@ -1,5 +1,5 @@
    60.4  /*
    60.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    60.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    60.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    60.8   *
    60.9   * This code is free software; you can redistribute it and/or modify it
   60.10 @@ -45,6 +45,7 @@
   60.11  #include "runtime/java.hpp"
   60.12  #include "runtime/mutexLocker.hpp"
   60.13  #include "services/memoryService.hpp"
   60.14 +#include "trace/tracing.hpp"
   60.15  #include "utilities/xmlstream.hpp"
   60.16  
   60.17  // Helper class for printing in CodeCache
   60.18 @@ -114,7 +115,6 @@
   60.19    }
   60.20  };
   60.21  
   60.22 -
   60.23  // CodeCache implementation
   60.24  
   60.25  CodeHeap * CodeCache::_heap = new CodeHeap();
   60.26 @@ -126,6 +126,7 @@
   60.27  nmethod* CodeCache::_scavenge_root_nmethods = NULL;
   60.28  nmethod* CodeCache::_saved_nmethods = NULL;
   60.29  
   60.30 +int CodeCache::_codemem_full_count = 0;
   60.31  
   60.32  CodeBlob* CodeCache::first() {
   60.33    assert_locked_or_safepoint(CodeCache_lock);
   60.34 @@ -829,6 +830,22 @@
   60.35    }
   60.36  }
   60.37  
   60.38 +void CodeCache::report_codemem_full() {
   60.39 +  _codemem_full_count++;
   60.40 +  EventCodeCacheFull event;
   60.41 +  if (event.should_commit()) {
   60.42 +    event.set_startAddress((u8)low_bound());
   60.43 +    event.set_commitedTopAddress((u8)high());
   60.44 +    event.set_reservedTopAddress((u8)high_bound());
   60.45 +    event.set_entryCount(nof_blobs());
   60.46 +    event.set_methodCount(nof_nmethods());
   60.47 +    event.set_adaptorCount(nof_adapters());
   60.48 +    event.set_unallocatedCapacity(unallocated_capacity()/K);
   60.49 +    event.set_fullCount(_codemem_full_count);
   60.50 +    event.commit();
   60.51 +  }
   60.52 +}
   60.53 +
   60.54  //------------------------------------------------------------------------------------------------
   60.55  // Non-product version
   60.56  
    61.1 --- a/src/share/vm/code/codeCache.hpp	Fri Jun 07 09:33:01 2013 -0700
    61.2 +++ b/src/share/vm/code/codeCache.hpp	Mon Jun 10 11:30:51 2013 +0200
    61.3 @@ -1,5 +1,5 @@
    61.4  /*
    61.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    61.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    61.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    61.8   *
    61.9   * This code is free software; you can redistribute it and/or modify it
   61.10 @@ -64,11 +64,15 @@
   61.11    static void mark_scavenge_root_nmethods() PRODUCT_RETURN;
   61.12    static void verify_perm_nmethods(CodeBlobClosure* f_or_null) PRODUCT_RETURN;
   61.13  
   61.14 +  static int _codemem_full_count;
   61.15 +
   61.16   public:
   61.17  
   61.18    // Initialization
   61.19    static void initialize();
   61.20  
   61.21 +  static void report_codemem_full();
   61.22 +
   61.23    // Allocation/administration
   61.24    static CodeBlob* allocate(int size, bool is_critical = false); // allocates a new CodeBlob
   61.25    static void commit(CodeBlob* cb);                 // called when the allocated CodeBlob has been filled
   61.26 @@ -155,6 +159,7 @@
   61.27    // The full limits of the codeCache
   61.28    static address  low_bound()                    { return (address) _heap->low_boundary(); }
   61.29    static address  high_bound()                   { return (address) _heap->high_boundary(); }
   61.30 +  static address  high()                         { return (address) _heap->high(); }
   61.31  
   61.32    // Profiling
   61.33    static address first_address();                // first address used for CodeBlobs
   61.34 @@ -186,6 +191,8 @@
   61.35  
   61.36      // tells how many nmethods have dependencies
   61.37    static int number_of_nmethods_with_dependencies();
   61.38 +
   61.39 +  static int get_codemem_full_count() { return _codemem_full_count; }
   61.40  };
   61.41  
   61.42  #endif // SHARE_VM_CODE_CODECACHE_HPP
    62.1 --- a/src/share/vm/compiler/compileBroker.cpp	Fri Jun 07 09:33:01 2013 -0700
    62.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Mon Jun 10 11:30:51 2013 +0200
    62.3 @@ -1,5 +1,5 @@
    62.4  /*
    62.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    62.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    62.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    62.8   *
    62.9   * This code is free software; you can redistribute it and/or modify it
   62.10 @@ -43,6 +43,7 @@
   62.11  #include "runtime/os.hpp"
   62.12  #include "runtime/sharedRuntime.hpp"
   62.13  #include "runtime/sweeper.hpp"
   62.14 +#include "trace/tracing.hpp"
   62.15  #include "utilities/dtrace.hpp"
   62.16  #include "utilities/events.hpp"
   62.17  #ifdef COMPILER1
   62.18 @@ -179,9 +180,11 @@
   62.19  int CompileBroker::_sum_nmethod_size             = 0;
   62.20  int CompileBroker::_sum_nmethod_code_size        = 0;
   62.21  
   62.22 -CompileQueue* CompileBroker::_c2_method_queue   = NULL;
   62.23 -CompileQueue* CompileBroker::_c1_method_queue   = NULL;
   62.24 -CompileTask*  CompileBroker::_task_free_list = NULL;
   62.25 +long CompileBroker::_peak_compilation_time       = 0;
   62.26 +
   62.27 +CompileQueue* CompileBroker::_c2_method_queue    = NULL;
   62.28 +CompileQueue* CompileBroker::_c1_method_queue    = NULL;
   62.29 +CompileTask*  CompileBroker::_task_free_list     = NULL;
   62.30  
   62.31  GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
   62.32  
   62.33 @@ -1795,6 +1798,7 @@
   62.34      ciMethod* target = ci_env.get_method_from_handle(target_handle);
   62.35  
   62.36      TraceTime t1("compilation", &time);
   62.37 +    EventCompilation event;
   62.38  
   62.39      AbstractCompiler *comp = compiler(task_level);
   62.40      if (comp == NULL) {
   62.41 @@ -1836,6 +1840,16 @@
   62.42      }
   62.43      // simulate crash during compilation
   62.44      assert(task->compile_id() != CICrashAt, "just as planned");
   62.45 +    if (event.should_commit()) {
   62.46 +      event.set_method(target->get_Method());
   62.47 +      event.set_compileID(compile_id);
   62.48 +      event.set_compileLevel(task->comp_level());
   62.49 +      event.set_succeded(task->is_success());
   62.50 +      event.set_isOsr(is_osr);
   62.51 +      event.set_codeSize((task->code() == NULL) ? 0 : task->code()->total_size());
   62.52 +      event.set_inlinedBytes(task->num_inlined_bytecodes());
   62.53 +      event.commit();
   62.54 +    }
   62.55    }
   62.56    pop_jni_handle_block();
   62.57  
   62.58 @@ -1916,6 +1930,10 @@
   62.59      }
   62.60      warning("CodeCache is full. Compiler has been disabled.");
   62.61      warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
   62.62 +
   62.63 +    CodeCache::report_codemem_full();
   62.64 +
   62.65 +
   62.66  #ifndef PRODUCT
   62.67      if (CompileTheWorld || ExitOnFullCodeCache) {
   62.68        codecache_print(/* detailed= */ true);
   62.69 @@ -2073,8 +2091,10 @@
   62.70      // java.lang.management.CompilationMBean
   62.71      _perf_total_compilation->inc(time.ticks());
   62.72  
   62.73 +    _t_total_compilation.add(time);
   62.74 +    _peak_compilation_time = time.milliseconds() > _peak_compilation_time ? time.milliseconds() : _peak_compilation_time;
   62.75 +
   62.76      if (CITime) {
   62.77 -      _t_total_compilation.add(time);
   62.78        if (is_osr) {
   62.79          _t_osr_compilation.add(time);
   62.80          _sum_osr_bytes_compiled += method->code_size() + task->num_inlined_bytecodes();
   62.81 @@ -2172,7 +2192,6 @@
   62.82    tty->print_cr("  nmethod total size       : %6d bytes", CompileBroker::_sum_nmethod_size);
   62.83  }
   62.84  
   62.85 -
   62.86  // Debugging output for failure
   62.87  void CompileBroker::print_last_compile() {
   62.88    if ( _last_compile_level != CompLevel_none &&
    63.1 --- a/src/share/vm/compiler/compileBroker.hpp	Fri Jun 07 09:33:01 2013 -0700
    63.2 +++ b/src/share/vm/compiler/compileBroker.hpp	Mon Jun 10 11:30:51 2013 +0200
    63.3 @@ -1,5 +1,5 @@
    63.4  /*
    63.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    63.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    63.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    63.8   *
    63.9   * This code is free software; you can redistribute it and/or modify it
   63.10 @@ -299,17 +299,17 @@
   63.11    static elapsedTimer _t_osr_compilation;
   63.12    static elapsedTimer _t_standard_compilation;
   63.13  
   63.14 +  static int _total_compile_count;
   63.15    static int _total_bailout_count;
   63.16    static int _total_invalidated_count;
   63.17 -  static int _total_compile_count;
   63.18    static int _total_native_compile_count;
   63.19    static int _total_osr_compile_count;
   63.20    static int _total_standard_compile_count;
   63.21 -
   63.22    static int _sum_osr_bytes_compiled;
   63.23    static int _sum_standard_bytes_compiled;
   63.24    static int _sum_nmethod_size;
   63.25    static int _sum_nmethod_code_size;
   63.26 +  static long _peak_compilation_time;
   63.27  
   63.28    static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
   63.29    static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
   63.30 @@ -421,6 +421,19 @@
   63.31  
   63.32    // compiler name for debugging
   63.33    static const char* compiler_name(int comp_level);
   63.34 +
   63.35 +  static int get_total_compile_count() {          return _total_compile_count; }
   63.36 +  static int get_total_bailout_count() {          return _total_bailout_count; }
   63.37 +  static int get_total_invalidated_count() {      return _total_invalidated_count; }
   63.38 +  static int get_total_native_compile_count() {   return _total_native_compile_count; }
   63.39 +  static int get_total_osr_compile_count() {      return _total_osr_compile_count; }
   63.40 +  static int get_total_standard_compile_count() { return _total_standard_compile_count; }
   63.41 +  static int get_sum_osr_bytes_compiled() {       return _sum_osr_bytes_compiled; }
   63.42 +  static int get_sum_standard_bytes_compiled() {  return _sum_standard_bytes_compiled; }
   63.43 +  static int get_sum_nmethod_size() {             return _sum_nmethod_size;}
   63.44 +  static int get_sum_nmethod_code_size() {        return _sum_nmethod_code_size; }
   63.45 +  static long get_peak_compilation_time() {       return _peak_compilation_time; }
   63.46 +  static long get_total_compilation_time() {      return _t_total_compilation.milliseconds(); }
   63.47  };
   63.48  
   63.49  #endif // SHARE_VM_COMPILER_COMPILEBROKER_HPP
    64.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Fri Jun 07 09:33:01 2013 -0700
    64.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Mon Jun 10 11:30:51 2013 +0200
    64.3 @@ -37,8 +37,12 @@
    64.4  #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
    64.5  #include "gc_implementation/parNew/parNewGeneration.hpp"
    64.6  #include "gc_implementation/shared/collectorCounters.hpp"
    64.7 +#include "gc_implementation/shared/gcTimer.hpp"
    64.8 +#include "gc_implementation/shared/gcTrace.hpp"
    64.9 +#include "gc_implementation/shared/gcTraceTime.hpp"
   64.10  #include "gc_implementation/shared/isGCActiveMark.hpp"
   64.11  #include "gc_interface/collectedHeap.inline.hpp"
   64.12 +#include "memory/allocation.hpp"
   64.13  #include "memory/cardTableRS.hpp"
   64.14  #include "memory/collectorPolicy.hpp"
   64.15  #include "memory/gcLocker.inline.hpp"
   64.16 @@ -60,7 +64,8 @@
   64.17  
   64.18  // statics
   64.19  CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL;
   64.20 -bool          CMSCollector::_full_gc_requested          = false;
   64.21 +bool CMSCollector::_full_gc_requested = false;
   64.22 +GCCause::Cause CMSCollector::_full_gc_cause = GCCause::_no_gc;
   64.23  
   64.24  //////////////////////////////////////////////////////////////////
   64.25  // In support of CMS/VM thread synchronization
   64.26 @@ -591,7 +596,10 @@
   64.27    _concurrent_cycles_since_last_unload(0),
   64.28    _roots_scanning_options(0),
   64.29    _inter_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
   64.30 -  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding)
   64.31 +  _intra_sweep_estimate(CMS_SweepWeight, CMS_SweepPadding),
   64.32 +  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) CMSTracer()),
   64.33 +  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
   64.34 +  _cms_start_registered(false)
   64.35  {
   64.36    if (ExplicitGCInvokesConcurrentAndUnloadsClasses) {
   64.37      ExplicitGCInvokesConcurrent = true;
   64.38 @@ -1676,18 +1684,38 @@
   64.39    _full_gcs_since_conc_gc++;
   64.40  }
   64.41  
   64.42 -void CMSCollector::request_full_gc(unsigned int full_gc_count) {
   64.43 +void CMSCollector::request_full_gc(unsigned int full_gc_count, GCCause::Cause cause) {
   64.44    GenCollectedHeap* gch = GenCollectedHeap::heap();
   64.45    unsigned int gc_count = gch->total_full_collections();
   64.46    if (gc_count == full_gc_count) {
   64.47      MutexLockerEx y(CGC_lock, Mutex::_no_safepoint_check_flag);
   64.48      _full_gc_requested = true;
   64.49 +    _full_gc_cause = cause;
   64.50      CGC_lock->notify();   // nudge CMS thread
   64.51    } else {
   64.52      assert(gc_count > full_gc_count, "Error: causal loop");
   64.53    }
   64.54  }
   64.55  
   64.56 +bool CMSCollector::is_external_interruption() {
   64.57 +  GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
   64.58 +  return GCCause::is_user_requested_gc(cause) ||
   64.59 +         GCCause::is_serviceability_requested_gc(cause);
   64.60 +}
   64.61 +
   64.62 +void CMSCollector::report_concurrent_mode_interruption() {
   64.63 +  if (is_external_interruption()) {
   64.64 +    if (PrintGCDetails) {
   64.65 +      gclog_or_tty->print(" (concurrent mode interrupted)");
   64.66 +    }
   64.67 +  } else {
   64.68 +    if (PrintGCDetails) {
   64.69 +      gclog_or_tty->print(" (concurrent mode failure)");
   64.70 +    }
   64.71 +    _gc_tracer_cm->report_concurrent_mode_failure();
   64.72 +  }
   64.73 +}
   64.74 +
   64.75  
   64.76  // The foreground and background collectors need to coordinate in order
   64.77  // to make sure that they do not mutually interfere with CMS collections.
   64.78 @@ -1845,14 +1873,8 @@
   64.79    }
   64.80  )
   64.81  
   64.82 -  if (PrintGCDetails && first_state > Idling) {
   64.83 -    GCCause::Cause cause = GenCollectedHeap::heap()->gc_cause();
   64.84 -    if (GCCause::is_user_requested_gc(cause) ||
   64.85 -        GCCause::is_serviceability_requested_gc(cause)) {
   64.86 -      gclog_or_tty->print(" (concurrent mode interrupted)");
   64.87 -    } else {
   64.88 -      gclog_or_tty->print(" (concurrent mode failure)");
   64.89 -    }
   64.90 +  if (first_state > Idling) {
   64.91 +    report_concurrent_mode_interruption();
   64.92    }
   64.93  
   64.94    set_did_compact(should_compact);
   64.95 @@ -1868,6 +1890,10 @@
   64.96      // Reference objects are active.
   64.97      ref_processor()->clean_up_discovered_references();
   64.98  
   64.99 +    if (first_state > Idling) {
  64.100 +      save_heap_summary();
  64.101 +    }
  64.102 +
  64.103      do_compaction_work(clear_all_soft_refs);
  64.104  
  64.105      // Has the GC time limit been exceeded?
  64.106 @@ -1971,7 +1997,14 @@
  64.107  // a mark-sweep-compact.
  64.108  void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
  64.109    GenCollectedHeap* gch = GenCollectedHeap::heap();
  64.110 -  TraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, gclog_or_tty);
  64.111 +
  64.112 +  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
  64.113 +  gc_timer->register_gc_start(os::elapsed_counter());
  64.114 +
  64.115 +  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
  64.116 +  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
  64.117 +
  64.118 +  GCTraceTime t("CMS:MSC ", PrintGCDetails && Verbose, true, NULL);
  64.119    if (PrintGC && Verbose && !(GCCause::is_user_requested_gc(gch->gc_cause()))) {
  64.120      gclog_or_tty->print_cr("Compact ConcurrentMarkSweepGeneration after %d "
  64.121        "collections passed to foreground collector", _full_gcs_since_conc_gc);
  64.122 @@ -2062,6 +2095,10 @@
  64.123      size_policy()->msc_collection_end(gch->gc_cause());
  64.124    }
  64.125  
  64.126 +  gc_timer->register_gc_end(os::elapsed_counter());
  64.127 +
  64.128 +  gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
  64.129 +
  64.130    // For a mark-sweep-compact, compute_new_size() will be called
  64.131    // in the heap's do_collection() method.
  64.132  }
  64.133 @@ -2093,7 +2130,7 @@
  64.134        // required.
  64.135        _collectorState = FinalMarking;
  64.136    }
  64.137 -  collect_in_foreground(clear_all_soft_refs);
  64.138 +  collect_in_foreground(clear_all_soft_refs, GenCollectedHeap::heap()->gc_cause());
  64.139  
  64.140    // For a mark-sweep, compute_new_size() will be called
  64.141    // in the heap's do_collection() method.
  64.142 @@ -2153,7 +2190,7 @@
  64.143  // one "collect" method between the background collector and the foreground
  64.144  // collector but the if-then-else required made it cleaner to have
  64.145  // separate methods.
  64.146 -void CMSCollector::collect_in_background(bool clear_all_soft_refs) {
  64.147 +void CMSCollector::collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause) {
  64.148    assert(Thread::current()->is_ConcurrentGC_thread(),
  64.149      "A CMS asynchronous collection is only allowed on a CMS thread.");
  64.150  
  64.151 @@ -2172,6 +2209,7 @@
  64.152      } else {
  64.153        assert(_collectorState == Idling, "Should be idling before start.");
  64.154        _collectorState = InitialMarking;
  64.155 +      register_gc_start(cause);
  64.156        // Reset the expansion cause, now that we are about to begin
  64.157        // a new cycle.
  64.158        clear_expansion_cause();
  64.159 @@ -2184,6 +2222,7 @@
  64.160      // ensuing concurrent GC cycle.
  64.161      update_should_unload_classes();
  64.162      _full_gc_requested = false;           // acks all outstanding full gc requests
  64.163 +    _full_gc_cause = GCCause::_no_gc;
  64.164      // Signal that we are about to start a collection
  64.165      gch->increment_total_full_collections();  // ... starting a collection cycle
  64.166      _collection_count_start = gch->total_full_collections();
  64.167 @@ -2263,7 +2302,6 @@
  64.168          {
  64.169            ReleaseForegroundGC x(this);
  64.170            stats().record_cms_begin();
  64.171 -
  64.172            VM_CMS_Initial_Mark initial_mark_op(this);
  64.173            VMThread::execute(&initial_mark_op);
  64.174          }
  64.175 @@ -2343,6 +2381,7 @@
  64.176            CMSTokenSync        z(true);   // not strictly needed.
  64.177            if (_collectorState == Resizing) {
  64.178              compute_new_size();
  64.179 +            save_heap_summary();
  64.180              _collectorState = Resetting;
  64.181            } else {
  64.182              assert(_collectorState == Idling, "The state should only change"
  64.183 @@ -2401,7 +2440,39 @@
  64.184    }
  64.185  }
  64.186  
  64.187 -void CMSCollector::collect_in_foreground(bool clear_all_soft_refs) {
  64.188 +void CMSCollector::register_foreground_gc_start(GCCause::Cause cause) {
  64.189 +  if (!_cms_start_registered) {
  64.190 +    register_gc_start(cause);
  64.191 +  }
  64.192 +}
  64.193 +
  64.194 +void CMSCollector::register_gc_start(GCCause::Cause cause) {
  64.195 +  _cms_start_registered = true;
  64.196 +  _gc_timer_cm->register_gc_start(os::elapsed_counter());
  64.197 +  _gc_tracer_cm->report_gc_start(cause, _gc_timer_cm->gc_start());
  64.198 +}
  64.199 +
  64.200 +void CMSCollector::register_gc_end() {
  64.201 +  if (_cms_start_registered) {
  64.202 +    report_heap_summary(GCWhen::AfterGC);
  64.203 +
  64.204 +    _gc_timer_cm->register_gc_end(os::elapsed_counter());
  64.205 +    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
  64.206 +    _cms_start_registered = false;
  64.207 +  }
  64.208 +}
  64.209 +
  64.210 +void CMSCollector::save_heap_summary() {
  64.211 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
  64.212 +  _last_heap_summary = gch->create_heap_summary();
  64.213 +  _last_metaspace_summary = gch->create_metaspace_summary();
  64.214 +}
  64.215 +
  64.216 +void CMSCollector::report_heap_summary(GCWhen::Type when) {
  64.217 +  _gc_tracer_cm->report_gc_heap_summary(when, _last_heap_summary, _last_metaspace_summary);
  64.218 +}
  64.219 +
  64.220 +void CMSCollector::collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause) {
  64.221    assert(_foregroundGCIsActive && !_foregroundGCShouldWait,
  64.222           "Foreground collector should be waiting, not executing");
  64.223    assert(Thread::current()->is_VM_thread(), "A foreground collection"
  64.224 @@ -2409,8 +2480,8 @@
  64.225    assert(ConcurrentMarkSweepThread::vm_thread_has_cms_token(),
  64.226           "VM thread should have CMS token");
  64.227  
  64.228 -  NOT_PRODUCT(TraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
  64.229 -    true, gclog_or_tty);)
  64.230 +  NOT_PRODUCT(GCTraceTime t("CMS:MS (foreground) ", PrintGCDetails && Verbose,
  64.231 +    true, NULL);)
  64.232    if (UseAdaptiveSizePolicy) {
  64.233      size_policy()->ms_collection_begin();
  64.234    }
  64.235 @@ -2434,6 +2505,7 @@
  64.236      }
  64.237      switch (_collectorState) {
  64.238        case InitialMarking:
  64.239 +        register_foreground_gc_start(cause);
  64.240          init_mark_was_synchronous = true;  // fact to be exploited in re-mark
  64.241          checkpointRootsInitial(false);
  64.242          assert(_collectorState == Marking, "Collector state should have changed"
  64.243 @@ -2482,6 +2554,7 @@
  64.244              GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
  64.245            Universe::verify("Verify before reset: ");
  64.246          }
  64.247 +        save_heap_summary();
  64.248          reset(false);
  64.249          assert(_collectorState == Idling, "Collector state should "
  64.250            "have changed");
  64.251 @@ -3504,6 +3577,9 @@
  64.252    check_correct_thread_executing();
  64.253    TraceCMSMemoryManagerStats tms(_collectorState,GenCollectedHeap::heap()->gc_cause());
  64.254  
  64.255 +  save_heap_summary();
  64.256 +  report_heap_summary(GCWhen::BeforeGC);
  64.257 +
  64.258    ReferenceProcessor* rp = ref_processor();
  64.259    SpecializationStats::clear();
  64.260    assert(_restart_addr == NULL, "Control point invariant");
  64.261 @@ -3549,8 +3625,8 @@
  64.262    // CMS collection cycle.
  64.263    setup_cms_unloading_and_verification_state();
  64.264  
  64.265 -  NOT_PRODUCT(TraceTime t("\ncheckpointRootsInitialWork",
  64.266 -    PrintGCDetails && Verbose, true, gclog_or_tty);)
  64.267 +  NOT_PRODUCT(GCTraceTime t("\ncheckpointRootsInitialWork",
  64.268 +    PrintGCDetails && Verbose, true, _gc_timer_cm);)
  64.269    if (UseAdaptiveSizePolicy) {
  64.270      size_policy()->checkpoint_roots_initial_begin();
  64.271    }
  64.272 @@ -4542,8 +4618,10 @@
  64.273      // The code in this method may need further
  64.274      // tweaking for better performance and some restructuring
  64.275      // for cleaner interfaces.
  64.276 +    GCTimer *gc_timer = NULL; // Currently not tracing concurrent phases
  64.277      rp->preclean_discovered_references(
  64.278 -          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl);
  64.279 +          rp->is_alive_non_header(), &keep_alive, &complete_trace, &yield_cl,
  64.280 +          gc_timer);
  64.281    }
  64.282  
  64.283    if (clean_survivor) {  // preclean the active survivor space(s)
  64.284 @@ -4885,8 +4963,8 @@
  64.285        // Temporarily set flag to false, GCH->do_collection will
  64.286        // expect it to be false and set to true
  64.287        FlagSetting fl(gch->_is_gc_active, false);
  64.288 -      NOT_PRODUCT(TraceTime t("Scavenge-Before-Remark",
  64.289 -        PrintGCDetails && Verbose, true, gclog_or_tty);)
  64.290 +      NOT_PRODUCT(GCTraceTime t("Scavenge-Before-Remark",
  64.291 +        PrintGCDetails && Verbose, true, _gc_timer_cm);)
  64.292        int level = _cmsGen->level() - 1;
  64.293        if (level >= 0) {
  64.294          gch->do_collection(true,        // full (i.e. force, see below)
  64.295 @@ -4915,7 +4993,7 @@
  64.296  void CMSCollector::checkpointRootsFinalWork(bool asynch,
  64.297    bool clear_all_soft_refs, bool init_mark_was_synchronous) {
  64.298  
  64.299 -  NOT_PRODUCT(TraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, gclog_or_tty);)
  64.300 +  NOT_PRODUCT(GCTraceTime tr("checkpointRootsFinalWork", PrintGCDetails, false, _gc_timer_cm);)
  64.301  
  64.302    assert(haveFreelistLocks(), "must have free list locks");
  64.303    assert_lock_strong(bitMapLock());
  64.304 @@ -4966,11 +5044,11 @@
  64.305        // the most recent young generation GC, minus those cleaned up by the
  64.306        // concurrent precleaning.
  64.307        if (CMSParallelRemarkEnabled && CollectedHeap::use_parallel_gc_threads()) {
  64.308 -        TraceTime t("Rescan (parallel) ", PrintGCDetails, false, gclog_or_tty);
  64.309 +        GCTraceTime t("Rescan (parallel) ", PrintGCDetails, false, _gc_timer_cm);
  64.310          do_remark_parallel();
  64.311        } else {
  64.312 -        TraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
  64.313 -                    gclog_or_tty);
  64.314 +        GCTraceTime t("Rescan (non-parallel) ", PrintGCDetails, false,
  64.315 +                    _gc_timer_cm);
  64.316          do_remark_non_parallel();
  64.317        }
  64.318      }
  64.319 @@ -4983,7 +5061,7 @@
  64.320    verify_overflow_empty();
  64.321  
  64.322    {
  64.323 -    NOT_PRODUCT(TraceTime ts("refProcessingWork", PrintGCDetails, false, gclog_or_tty);)
  64.324 +    NOT_PRODUCT(GCTraceTime ts("refProcessingWork", PrintGCDetails, false, _gc_timer_cm);)
  64.325      refProcessingWork(asynch, clear_all_soft_refs);
  64.326    }
  64.327    verify_work_stacks_empty();
  64.328 @@ -5044,6 +5122,8 @@
  64.329      verify_after_remark();
  64.330    }
  64.331  
  64.332 +  _gc_tracer_cm->report_object_count_after_gc(&_is_alive_closure);
  64.333 +
  64.334    // Change under the freelistLocks.
  64.335    _collectorState = Sweeping;
  64.336    // Call isAllClear() under bitMapLock
  64.337 @@ -5697,7 +5777,7 @@
  64.338                                NULL,  // space is set further below
  64.339                                &_markBitMap, &_markStack, &mrias_cl);
  64.340    {
  64.341 -    TraceTime t("grey object rescan", PrintGCDetails, false, gclog_or_tty);
  64.342 +    GCTraceTime t("grey object rescan", PrintGCDetails, false, _gc_timer_cm);
  64.343      // Iterate over the dirty cards, setting the corresponding bits in the
  64.344      // mod union table.
  64.345      {
  64.346 @@ -5734,7 +5814,7 @@
  64.347      Universe::verify();
  64.348    }
  64.349    {
  64.350 -    TraceTime t("root rescan", PrintGCDetails, false, gclog_or_tty);
  64.351 +    GCTraceTime t("root rescan", PrintGCDetails, false, _gc_timer_cm);
  64.352  
  64.353      verify_work_stacks_empty();
  64.354  
  64.355 @@ -5756,7 +5836,7 @@
  64.356    }
  64.357  
  64.358    {
  64.359 -    TraceTime t("visit unhandled CLDs", PrintGCDetails, false, gclog_or_tty);
  64.360 +    GCTraceTime t("visit unhandled CLDs", PrintGCDetails, false, _gc_timer_cm);
  64.361  
  64.362      verify_work_stacks_empty();
  64.363  
  64.364 @@ -5775,7 +5855,7 @@
  64.365    }
  64.366  
  64.367    {
  64.368 -    TraceTime t("dirty klass scan", PrintGCDetails, false, gclog_or_tty);
  64.369 +    GCTraceTime t("dirty klass scan", PrintGCDetails, false, _gc_timer_cm);
  64.370  
  64.371      verify_work_stacks_empty();
  64.372  
  64.373 @@ -5977,7 +6057,9 @@
  64.374                                  _span, &_markBitMap, &_markStack,
  64.375                                  &cmsKeepAliveClosure, false /* !preclean */);
  64.376    {
  64.377 -    TraceTime t("weak refs processing", PrintGCDetails, false, gclog_or_tty);
  64.378 +    GCTraceTime t("weak refs processing", PrintGCDetails, false, _gc_timer_cm);
  64.379 +
  64.380 +    ReferenceProcessorStats stats;
  64.381      if (rp->processing_is_mt()) {
  64.382        // Set the degree of MT here.  If the discovery is done MT, there
  64.383        // may have been a different number of threads doing the discovery
  64.384 @@ -5996,16 +6078,20 @@
  64.385        }
  64.386        rp->set_active_mt_degree(active_workers);
  64.387        CMSRefProcTaskExecutor task_executor(*this);
  64.388 -      rp->process_discovered_references(&_is_alive_closure,
  64.389 +      stats = rp->process_discovered_references(&_is_alive_closure,
  64.390                                          &cmsKeepAliveClosure,
  64.391                                          &cmsDrainMarkingStackClosure,
  64.392 -                                        &task_executor);
  64.393 +                                        &task_executor,
  64.394 +                                        _gc_timer_cm);
  64.395      } else {
  64.396 -      rp->process_discovered_references(&_is_alive_closure,
  64.397 +      stats = rp->process_discovered_references(&_is_alive_closure,
  64.398                                          &cmsKeepAliveClosure,
  64.399                                          &cmsDrainMarkingStackClosure,
  64.400 -                                        NULL);
  64.401 -    }
  64.402 +                                        NULL,
  64.403 +                                        _gc_timer_cm);
  64.404 +    }
  64.405 +    _gc_tracer_cm->report_gc_reference_stats(stats);
  64.406 +
  64.407    }
  64.408  
  64.409    // This is the point where the entire marking should have completed.
  64.410 @@ -6013,7 +6099,7 @@
  64.411  
  64.412    if (should_unload_classes()) {
  64.413      {
  64.414 -      TraceTime t("class unloading", PrintGCDetails, false, gclog_or_tty);
  64.415 +      GCTraceTime t("class unloading", PrintGCDetails, false, _gc_timer_cm);
  64.416  
  64.417        // Unload classes and purge the SystemDictionary.
  64.418        bool purged_class = SystemDictionary::do_unloading(&_is_alive_closure);
  64.419 @@ -6026,7 +6112,7 @@
  64.420      }
  64.421  
  64.422      {
  64.423 -      TraceTime t("scrub symbol table", PrintGCDetails, false, gclog_or_tty);
  64.424 +      GCTraceTime t("scrub symbol table", PrintGCDetails, false, _gc_timer_cm);
  64.425        // Clean up unreferenced symbols in symbol table.
  64.426        SymbolTable::unlink();
  64.427      }
  64.428 @@ -6035,7 +6121,7 @@
  64.429    // CMS doesn't use the StringTable as hard roots when class unloading is turned off.
  64.430    // Need to check if we really scanned the StringTable.
  64.431    if ((roots_scanning_options() & SharedHeap::SO_Strings) == 0) {
  64.432 -    TraceTime t("scrub string table", PrintGCDetails, false, gclog_or_tty);
  64.433 +    GCTraceTime t("scrub string table", PrintGCDetails, false, _gc_timer_cm);
  64.434      // Delete entries for dead interned strings.
  64.435      StringTable::unlink(&_is_alive_closure);
  64.436    }
  64.437 @@ -6380,12 +6466,14 @@
  64.438        _cmsGen->rotate_debug_collection_type();
  64.439      }
  64.440    )
  64.441 +
  64.442 +  register_gc_end();
  64.443  }
  64.444  
  64.445  void CMSCollector::do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause) {
  64.446    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  64.447    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  64.448 -  TraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
  64.449 +  GCTraceTime t(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
  64.450    TraceCollectorStats tcs(counters());
  64.451  
  64.452    switch (op) {
    65.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Fri Jun 07 09:33:01 2013 -0700
    65.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Mon Jun 10 11:30:51 2013 +0200
    65.3 @@ -25,8 +25,10 @@
    65.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
    65.5  #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_HPP
    65.6  
    65.7 +#include "gc_implementation/shared/gcHeapSummary.hpp"
    65.8  #include "gc_implementation/shared/gSpaceCounters.hpp"
    65.9  #include "gc_implementation/shared/gcStats.hpp"
   65.10 +#include "gc_implementation/shared/gcWhen.hpp"
   65.11  #include "gc_implementation/shared/generationCounters.hpp"
   65.12  #include "memory/freeBlockDictionary.hpp"
   65.13  #include "memory/generation.hpp"
   65.14 @@ -53,6 +55,8 @@
   65.15  class CMSAdaptiveSizePolicy;
   65.16  class CMSConcMarkingTask;
   65.17  class CMSGCAdaptivePolicyCounters;
   65.18 +class CMSTracer;
   65.19 +class ConcurrentGCTimer;
   65.20  class ConcurrentMarkSweepGeneration;
   65.21  class ConcurrentMarkSweepPolicy;
   65.22  class ConcurrentMarkSweepThread;
   65.23 @@ -61,6 +65,7 @@
   65.24  class PromotionInfo;
   65.25  class ScanMarkedObjectsAgainCarefullyClosure;
   65.26  class TenuredGeneration;
   65.27 +class SerialOldTracer;
   65.28  
   65.29  // A generic CMS bit map. It's the basis for both the CMS marking bit map
   65.30  // as well as for the mod union table (in each case only a subset of the
   65.31 @@ -567,8 +572,9 @@
   65.32    bool _completed_initialization;
   65.33  
   65.34    // In support of ExplicitGCInvokesConcurrent
   65.35 -  static   bool _full_gc_requested;
   65.36 -  unsigned int  _collection_count_start;
   65.37 +  static bool _full_gc_requested;
   65.38 +  static GCCause::Cause _full_gc_cause;
   65.39 +  unsigned int _collection_count_start;
   65.40  
   65.41    // Should we unload classes this concurrent cycle?
   65.42    bool _should_unload_classes;
   65.43 @@ -609,6 +615,20 @@
   65.44    AdaptivePaddedAverage _inter_sweep_estimate;
   65.45    AdaptivePaddedAverage _intra_sweep_estimate;
   65.46  
   65.47 +  CMSTracer* _gc_tracer_cm;
   65.48 +  ConcurrentGCTimer* _gc_timer_cm;
   65.49 +
   65.50 +  bool _cms_start_registered;
   65.51 +
   65.52 +  GCHeapSummary _last_heap_summary;
   65.53 +  MetaspaceSummary _last_metaspace_summary;
   65.54 +
   65.55 +  void register_foreground_gc_start(GCCause::Cause cause);
   65.56 +  void register_gc_start(GCCause::Cause cause);
   65.57 +  void register_gc_end();
   65.58 +  void save_heap_summary();
   65.59 +  void report_heap_summary(GCWhen::Type when);
   65.60 +
   65.61   protected:
   65.62    ConcurrentMarkSweepGeneration* _cmsGen;  // old gen (CMS)
   65.63    MemRegion                      _span;    // span covering above two
   65.64 @@ -827,6 +847,10 @@
   65.65    void do_mark_sweep_work(bool clear_all_soft_refs,
   65.66      CollectorState first_state, bool should_start_over);
   65.67  
   65.68 +  // Work methods for reporting concurrent mode interruption or failure
   65.69 +  bool is_external_interruption();
   65.70 +  void report_concurrent_mode_interruption();
   65.71 +
   65.72    // If the backgrould GC is active, acquire control from the background
   65.73    // GC and do the collection.
   65.74    void acquire_control_and_collect(bool   full, bool clear_all_soft_refs);
   65.75 @@ -876,11 +900,11 @@
   65.76                 bool   clear_all_soft_refs,
   65.77                 size_t size,
   65.78                 bool   tlab);
   65.79 -  void collect_in_background(bool clear_all_soft_refs);
   65.80 -  void collect_in_foreground(bool clear_all_soft_refs);
   65.81 +  void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
   65.82 +  void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
   65.83  
   65.84    // In support of ExplicitGCInvokesConcurrent
   65.85 -  static void request_full_gc(unsigned int full_gc_count);
   65.86 +  static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
   65.87    // Should we unload classes in a particular concurrent cycle?
   65.88    bool should_unload_classes() const {
   65.89      return _should_unload_classes;
    66.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Fri Jun 07 09:33:01 2013 -0700
    66.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.cpp	Mon Jun 10 11:30:51 2013 +0200
    66.3 @@ -140,7 +140,9 @@
    66.4    while (!_should_terminate) {
    66.5      sleepBeforeNextCycle();
    66.6      if (_should_terminate) break;
    66.7 -    _collector->collect_in_background(false);  // !clear_all_soft_refs
    66.8 +    GCCause::Cause cause = _collector->_full_gc_requested ?
    66.9 +      _collector->_full_gc_cause : GCCause::_cms_concurrent_mark;
   66.10 +    _collector->collect_in_background(false, cause);
   66.11    }
   66.12    assert(_should_terminate, "just checking");
   66.13    // Check that the state of any protocol for synchronization
    67.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Fri Jun 07 09:33:01 2013 -0700
    67.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/vmCMSOperations.cpp	Mon Jun 10 11:30:51 2013 +0200
    67.3 @@ -1,5 +1,5 @@
    67.4  /*
    67.5 - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
    67.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
    67.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    67.8   *
    67.9   * This code is free software; you can redistribute it and/or modify it
   67.10 @@ -26,9 +26,12 @@
   67.11  #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
   67.12  #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
   67.13  #include "gc_implementation/concurrentMarkSweep/vmCMSOperations.hpp"
   67.14 +#include "gc_implementation/shared/gcTimer.hpp"
   67.15 +#include "gc_implementation/shared/gcTraceTime.hpp"
   67.16  #include "gc_implementation/shared/isGCActiveMark.hpp"
   67.17  #include "memory/gcLocker.inline.hpp"
   67.18  #include "runtime/interfaceSupport.hpp"
   67.19 +#include "runtime/os.hpp"
   67.20  #include "utilities/dtrace.hpp"
   67.21  
   67.22  
   67.23 @@ -60,6 +63,7 @@
   67.24  void VM_CMS_Operation::verify_before_gc() {
   67.25    if (VerifyBeforeGC &&
   67.26        GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
   67.27 +    GCTraceTime tm("Verify Before", false, false, _collector->_gc_timer_cm);
   67.28      HandleMark hm;
   67.29      FreelistLocker x(_collector);
   67.30      MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
   67.31 @@ -71,6 +75,7 @@
   67.32  void VM_CMS_Operation::verify_after_gc() {
   67.33    if (VerifyAfterGC &&
   67.34        GenCollectedHeap::heap()->total_collections() >= VerifyGCStartAt) {
   67.35 +    GCTraceTime tm("Verify After", false, false, _collector->_gc_timer_cm);
   67.36      HandleMark hm;
   67.37      FreelistLocker x(_collector);
   67.38      MutexLockerEx  y(_collector->bitMapLock(), Mutex::_no_safepoint_check_flag);
   67.39 @@ -140,6 +145,8 @@
   67.40                                  );
   67.41  #endif /* USDT2 */
   67.42  
   67.43 +  _collector->_gc_timer_cm->register_gc_pause_start("Initial Mark", os::elapsed_counter());
   67.44 +
   67.45    GenCollectedHeap* gch = GenCollectedHeap::heap();
   67.46    GCCauseSetter gccs(gch, GCCause::_cms_initial_mark);
   67.47  
   67.48 @@ -149,6 +156,9 @@
   67.49    _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsInitial, gch->gc_cause());
   67.50  
   67.51    VM_CMS_Operation::verify_after_gc();
   67.52 +
   67.53 +  _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
   67.54 +
   67.55  #ifndef USDT2
   67.56    HS_DTRACE_PROBE(hs_private, cms__initmark__end);
   67.57  #else /* USDT2 */
   67.58 @@ -172,6 +182,8 @@
   67.59                                  );
   67.60  #endif /* USDT2 */
   67.61  
   67.62 +  _collector->_gc_timer_cm->register_gc_pause_start("Final Mark", os::elapsed_counter());
   67.63 +
   67.64    GenCollectedHeap* gch = GenCollectedHeap::heap();
   67.65    GCCauseSetter gccs(gch, GCCause::_cms_final_remark);
   67.66  
   67.67 @@ -181,6 +193,10 @@
   67.68    _collector->do_CMS_operation(CMSCollector::CMS_op_checkpointRootsFinal, gch->gc_cause());
   67.69  
   67.70    VM_CMS_Operation::verify_after_gc();
   67.71 +
   67.72 +  _collector->save_heap_summary();
   67.73 +  _collector->_gc_timer_cm->register_gc_pause_end(os::elapsed_counter());
   67.74 +
   67.75  #ifndef USDT2
   67.76    HS_DTRACE_PROBE(hs_private, cms__remark__end);
   67.77  #else /* USDT2 */
   67.78 @@ -225,7 +241,7 @@
   67.79      // In case CMS thread was in icms_wait(), wake it up.
   67.80      CMSCollector::start_icms();
   67.81      // Nudge the CMS thread to start a concurrent collection.
   67.82 -    CMSCollector::request_full_gc(_full_gc_count_before);
   67.83 +    CMSCollector::request_full_gc(_full_gc_count_before, _gc_cause);
   67.84    } else {
   67.85      assert(_full_gc_count_before < gch->total_full_collections(), "Error");
   67.86      FullGCCount_lock->notify_all();  // Inform the Java thread its work is done
    68.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Jun 07 09:33:01 2013 -0700
    68.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Mon Jun 10 11:30:51 2013 +0200
    68.3 @@ -36,6 +36,9 @@
    68.4  #include "gc_implementation/g1/heapRegionRemSet.hpp"
    68.5  #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    68.6  #include "gc_implementation/shared/vmGCOperations.hpp"
    68.7 +#include "gc_implementation/shared/gcTimer.hpp"
    68.8 +#include "gc_implementation/shared/gcTrace.hpp"
    68.9 +#include "gc_implementation/shared/gcTraceTime.hpp"
   68.10  #include "memory/genOopClosures.inline.hpp"
   68.11  #include "memory/referencePolicy.hpp"
   68.12  #include "memory/resourceArea.hpp"
   68.13 @@ -1342,6 +1345,9 @@
   68.14    _remark_times.add((now - start) * 1000.0);
   68.15  
   68.16    g1p->record_concurrent_mark_remark_end();
   68.17 +
   68.18 +  G1CMIsAliveClosure is_alive(g1h);
   68.19 +  g1h->gc_tracer_cm()->report_object_count_after_gc(&is_alive);
   68.20  }
   68.21  
   68.22  // Base class of the closures that finalize and verify the
   68.23 @@ -2129,6 +2135,7 @@
   68.24    }
   68.25  
   68.26    g1h->verify_region_sets_optional();
   68.27 +  g1h->trace_heap_after_concurrent_cycle();
   68.28  }
   68.29  
   68.30  void ConcurrentMark::completeCleanup() {
   68.31 @@ -2439,7 +2446,7 @@
   68.32      if (G1Log::finer()) {
   68.33        gclog_or_tty->put(' ');
   68.34      }
   68.35 -    TraceTime t("GC ref-proc", G1Log::finer(), false, gclog_or_tty);
   68.36 +    GCTraceTime t("GC ref-proc", G1Log::finer(), false, g1h->gc_timer_cm());
   68.37  
   68.38      ReferenceProcessor* rp = g1h->ref_processor_cm();
   68.39  
   68.40 @@ -2491,10 +2498,13 @@
   68.41      rp->set_active_mt_degree(active_workers);
   68.42  
   68.43      // Process the weak references.
   68.44 -    rp->process_discovered_references(&g1_is_alive,
   68.45 -                                      &g1_keep_alive,
   68.46 -                                      &g1_drain_mark_stack,
   68.47 -                                      executor);
   68.48 +    const ReferenceProcessorStats& stats =
   68.49 +        rp->process_discovered_references(&g1_is_alive,
   68.50 +                                          &g1_keep_alive,
   68.51 +                                          &g1_drain_mark_stack,
   68.52 +                                          executor,
   68.53 +                                          g1h->gc_timer_cm());
   68.54 +    g1h->gc_tracer_cm()->report_gc_reference_stats(stats);
   68.55  
   68.56      // The do_oop work routines of the keep_alive and drain_marking_stack
   68.57      // oop closures will set the has_overflown flag if we overflow the
   68.58 @@ -3227,6 +3237,9 @@
   68.59    satb_mq_set.set_active_all_threads(
   68.60                                   false, /* new active value */
   68.61                                   satb_mq_set.is_active() /* expected_active */);
   68.62 +
   68.63 +  _g1h->trace_heap_after_concurrent_cycle();
   68.64 +  _g1h->register_concurrent_cycle_end();
   68.65  }
   68.66  
   68.67  static void print_ms_time_info(const char* prefix, const char* name,
    69.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Fri Jun 07 09:33:01 2013 -0700
    69.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Mon Jun 10 11:30:51 2013 +0200
    69.3 @@ -569,8 +569,6 @@
    69.4    void clear_has_overflown()     { _has_overflown = false; }
    69.5    bool restart_for_overflow()    { return _restart_for_overflow; }
    69.6  
    69.7 -  bool has_aborted()             { return _has_aborted; }
    69.8 -
    69.9    // Methods to enter the two overflow sync barriers
   69.10    void enter_first_sync_barrier(uint worker_id);
   69.11    void enter_second_sync_barrier(uint worker_id);
   69.12 @@ -821,6 +819,8 @@
   69.13    // Called to abort the marking cycle after a Full GC takes palce.
   69.14    void abort();
   69.15  
   69.16 +  bool has_aborted()      { return _has_aborted; }
   69.17 +
   69.18    // This prints the global/local fingers. It is used for debugging.
   69.19    NOT_PRODUCT(void print_finger();)
   69.20  
    70.1 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Fri Jun 07 09:33:01 2013 -0700
    70.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp	Mon Jun 10 11:30:51 2013 +0200
    70.3 @@ -93,7 +93,6 @@
    70.4        ResourceMark rm;
    70.5        HandleMark   hm;
    70.6        double cycle_start = os::elapsedVTime();
    70.7 -      char verbose_str[128];
    70.8  
    70.9        // We have to ensure that we finish scanning the root regions
   70.10        // before the next GC takes place. To ensure this we have to
   70.11 @@ -155,8 +154,7 @@
   70.12            }
   70.13  
   70.14            CMCheckpointRootsFinalClosure final_cl(_cm);
   70.15 -          sprintf(verbose_str, "GC remark");
   70.16 -          VM_CGC_Operation op(&final_cl, verbose_str, true /* needs_pll */);
   70.17 +          VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
   70.18            VMThread::execute(&op);
   70.19          }
   70.20          if (cm()->restart_for_overflow()) {
   70.21 @@ -187,8 +185,7 @@
   70.22          }
   70.23  
   70.24          CMCleanUp cl_cl(_cm);
   70.25 -        sprintf(verbose_str, "GC cleanup");
   70.26 -        VM_CGC_Operation op(&cl_cl, verbose_str, false /* needs_pll */);
   70.27 +        VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
   70.28          VMThread::execute(&op);
   70.29        } else {
   70.30          // We don't want to update the marking status if a GC pause
   70.31 @@ -292,6 +289,7 @@
   70.32      // called System.gc() with +ExplicitGCInvokesConcurrent).
   70.33      _sts.join();
   70.34      g1h->increment_old_marking_cycles_completed(true /* concurrent */);
   70.35 +    g1h->register_concurrent_cycle_end();
   70.36      _sts.leave();
   70.37    }
   70.38    assert(_should_terminate, "just checking");
    71.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    71.2 +++ b/src/share/vm/gc_implementation/g1/evacuationInfo.hpp	Mon Jun 10 11:30:51 2013 +0200
    71.3 @@ -0,0 +1,81 @@
    71.4 +/*
    71.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    71.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    71.7 + *
    71.8 + * This code is free software; you can redistribute it and/or modify it
    71.9 + * under the terms of the GNU General Public License version 2 only, as
   71.10 + * published by the Free Software Foundation.
   71.11 + *
   71.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   71.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   71.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   71.15 + * version 2 for more details (a copy is included in the LICENSE file that
   71.16 + * accompanied this code).
   71.17 + *
   71.18 + * You should have received a copy of the GNU General Public License version
   71.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   71.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   71.21 + *
   71.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   71.23 + * or visit www.oracle.com if you need additional information or have any
   71.24 + * questions.
   71.25 + *
   71.26 + */
   71.27 +
   71.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP
   71.29 +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP
   71.30 +
   71.31 +#include "memory/allocation.hpp"
   71.32 +
   71.33 +class EvacuationInfo : public StackObj {
   71.34 +  uint _collectionset_regions;
   71.35 +  uint _allocation_regions;
   71.36 +  size_t _collectionset_used_before;
   71.37 +  size_t _collectionset_used_after;
   71.38 +  size_t _alloc_regions_used_before;
   71.39 +  size_t _bytes_copied;
   71.40 +  uint   _regions_freed;
   71.41 +
   71.42 +public:
   71.43 +  EvacuationInfo() : _collectionset_regions(0), _allocation_regions(0), _collectionset_used_before(0),
   71.44 +                     _collectionset_used_after(0), _alloc_regions_used_before(0),
   71.45 +                     _bytes_copied(0), _regions_freed(0) { }
   71.46 +
   71.47 +  void set_collectionset_regions(uint collectionset_regions) {
   71.48 +    _collectionset_regions = collectionset_regions;
   71.49 +  }
   71.50 +
   71.51 +  void set_allocation_regions(uint allocation_regions) {
   71.52 +    _allocation_regions = allocation_regions;
   71.53 +  }
   71.54 +
   71.55 +  void set_collectionset_used_before(size_t used) {
   71.56 +    _collectionset_used_before = used;
   71.57 +  }
   71.58 +
   71.59 +  void increment_collectionset_used_after(size_t used) {
   71.60 +    _collectionset_used_after += used;
   71.61 +  }
   71.62 +
   71.63 +  void set_alloc_regions_used_before(size_t used) {
   71.64 +    _alloc_regions_used_before = used;
   71.65 +  }
   71.66 +
   71.67 +  void set_bytes_copied(size_t copied) {
   71.68 +    _bytes_copied = copied;
   71.69 +  }
   71.70 +
   71.71 +  void set_regions_freed(uint freed) {
   71.72 +    _regions_freed += freed;
   71.73 +  }
   71.74 +
   71.75 +  uint   collectionset_regions()     { return _collectionset_regions; }
   71.76 +  uint   allocation_regions()        { return _allocation_regions; }
   71.77 +  size_t collectionset_used_before() { return _collectionset_used_before; }
   71.78 +  size_t collectionset_used_after()  { return _collectionset_used_after; }
   71.79 +  size_t alloc_regions_used_before() { return _alloc_regions_used_before; }
   71.80 +  size_t bytes_copied()              { return _bytes_copied; }
   71.81 +  uint   regions_freed()             { return _regions_freed; }
   71.82 +};
   71.83 +
   71.84 +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_EVACUATIONINFO_HPP
    72.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Jun 07 09:33:01 2013 -0700
    72.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Mon Jun 10 11:30:51 2013 +0200
    72.3 @@ -38,10 +38,15 @@
    72.4  #include "gc_implementation/g1/g1MarkSweep.hpp"
    72.5  #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    72.6  #include "gc_implementation/g1/g1RemSet.inline.hpp"
    72.7 +#include "gc_implementation/g1/g1YCTypes.hpp"
    72.8  #include "gc_implementation/g1/heapRegion.inline.hpp"
    72.9  #include "gc_implementation/g1/heapRegionRemSet.hpp"
   72.10  #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
   72.11  #include "gc_implementation/g1/vm_operations_g1.hpp"
   72.12 +#include "gc_implementation/shared/gcHeapSummary.hpp"
   72.13 +#include "gc_implementation/shared/gcTimer.hpp"
   72.14 +#include "gc_implementation/shared/gcTrace.hpp"
   72.15 +#include "gc_implementation/shared/gcTraceTime.hpp"
   72.16  #include "gc_implementation/shared/isGCActiveMark.hpp"
   72.17  #include "memory/gcLocker.inline.hpp"
   72.18  #include "memory/genOopClosures.inline.hpp"
   72.19 @@ -76,7 +81,7 @@
   72.20  // The number of GC workers is passed to heap_region_par_iterate_chunked().
   72.21  // It does use run_task() which sets _n_workers in the task.
   72.22  // G1ParTask executes g1_process_strong_roots() ->
   72.23 -// SharedHeap::process_strong_roots() which calls eventuall to
   72.24 +// SharedHeap::process_strong_roots() which calls eventually to
   72.25  // CardTableModRefBS::par_non_clean_card_iterate_work() which uses
   72.26  // SequentialSubTasksDone.  SharedHeap::process_strong_roots() also
   72.27  // directly uses SubTasksDone (_process_strong_tasks field in SharedHeap).
   72.28 @@ -457,7 +462,7 @@
   72.29  #endif
   72.30  
   72.31  // Returns true if the reference points to an object that
   72.32 -// can move in an incremental collecction.
   72.33 +// can move in an incremental collection.
   72.34  bool G1CollectedHeap::is_scavengable(const void* p) {
   72.35    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   72.36    G1CollectorPolicy* g1p = g1h->g1_policy();
   72.37 @@ -548,7 +553,7 @@
   72.38        return res;
   72.39      }
   72.40  
   72.41 -    // Wait here until we get notifed either when (a) there are no
   72.42 +    // Wait here until we get notified either when (a) there are no
   72.43      // more free regions coming or (b) some regions have been moved on
   72.44      // the secondary_free_list.
   72.45      SecondaryFreeList_lock->wait(Mutex::_no_safepoint_check_flag);
   72.46 @@ -623,7 +628,7 @@
   72.47    uint first = G1_NULL_HRS_INDEX;
   72.48    if (num_regions == 1) {
   72.49      // Only one region to allocate, no need to go through the slower
   72.50 -    // path. The caller will attempt the expasion if this fails, so
   72.51 +    // path. The caller will attempt the expansion if this fails, so
   72.52      // let's not try to expand here too.
   72.53      HeapRegion* hr = new_region(word_size, false /* do_expand */);
   72.54      if (hr != NULL) {
   72.55 @@ -688,7 +693,7 @@
   72.56    // the first region.
   72.57    HeapWord* new_obj = first_hr->bottom();
   72.58    // This will be the new end of the first region in the series that
   72.59 -  // should also match the end of the last region in the seriers.
   72.60 +  // should also match the end of the last region in the series.
   72.61    HeapWord* new_end = new_obj + word_size_sum;
   72.62    // This will be the new top of the first region that will reflect
   72.63    // this allocation.
   72.64 @@ -863,7 +868,7 @@
   72.65                                bool*  gc_overhead_limit_was_exceeded) {
   72.66    assert_heap_not_locked_and_not_at_safepoint();
   72.67  
   72.68 -  // Loop until the allocation is satisified, or unsatisfied after GC.
   72.69 +  // Loop until the allocation is satisfied, or unsatisfied after GC.
   72.70    for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
   72.71      unsigned int gc_count_before;
   72.72  
   72.73 @@ -1003,7 +1008,7 @@
   72.74        (*gclocker_retry_count_ret) += 1;
   72.75      }
   72.76  
   72.77 -    // We can reach here if we were unsuccessul in scheduling a
   72.78 +    // We can reach here if we were unsuccessful in scheduling a
   72.79      // collection (because another thread beat us to it) or if we were
   72.80      // stalled due to the GC locker. In either can we should retry the
   72.81      // allocation attempt in case another thread successfully
   72.82 @@ -1128,7 +1133,7 @@
   72.83        (*gclocker_retry_count_ret) += 1;
   72.84      }
   72.85  
   72.86 -    // We can reach here if we were unsuccessul in scheduling a
   72.87 +    // We can reach here if we were unsuccessful in scheduling a
   72.88      // collection (because another thread beat us to it) or if we were
   72.89      // stalled due to the GC locker. In either can we should retry the
   72.90      // allocation attempt in case another thread successfully
   72.91 @@ -1298,10 +1303,17 @@
   72.92      return false;
   72.93    }
   72.94  
   72.95 +  STWGCTimer* gc_timer = G1MarkSweep::gc_timer();
   72.96 +  gc_timer->register_gc_start(os::elapsed_counter());
   72.97 +
   72.98 +  SerialOldTracer* gc_tracer = G1MarkSweep::gc_tracer();
   72.99 +  gc_tracer->report_gc_start(gc_cause(), gc_timer->gc_start());
  72.100 +
  72.101    SvcGCMarker sgcm(SvcGCMarker::FULL);
  72.102    ResourceMark rm;
  72.103  
  72.104    print_heap_before_gc();
  72.105 +  trace_heap_before_gc(gc_tracer);
  72.106  
  72.107    size_t metadata_prev_used = MetaspaceAux::allocated_used_bytes();
  72.108  
  72.109 @@ -1322,7 +1334,7 @@
  72.110      TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  72.111  
  72.112      {
  72.113 -      TraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, gclog_or_tty);
  72.114 +      GCTraceTime t(GCCauseString("Full GC", gc_cause()), G1Log::fine(), true, NULL);
  72.115        TraceCollectorStats tcs(g1mm()->full_collection_counters());
  72.116        TraceMemoryManagerStats tms(true /* fullGC */, gc_cause());
  72.117  
  72.118 @@ -1351,7 +1363,7 @@
  72.119  
  72.120        verify_before_gc();
  72.121  
  72.122 -      pre_full_gc_dump();
  72.123 +      pre_full_gc_dump(gc_timer);
  72.124  
  72.125        COMPILER2_PRESENT(DerivedPointerTable::clear());
  72.126  
  72.127 @@ -1433,7 +1445,7 @@
  72.128  
  72.129        reset_gc_time_stamp();
  72.130        // Since everything potentially moved, we will clear all remembered
  72.131 -      // sets, and clear all cards.  Later we will rebuild remebered
  72.132 +      // sets, and clear all cards.  Later we will rebuild remembered
  72.133        // sets. We will also reset the GC time stamps of the regions.
  72.134        clear_rsets_post_compaction();
  72.135        check_gc_time_stamps();
  72.136 @@ -1553,8 +1565,12 @@
  72.137      }
  72.138  
  72.139      print_heap_after_gc();
  72.140 -
  72.141 -    post_full_gc_dump();
  72.142 +    trace_heap_after_gc(gc_tracer);
  72.143 +
  72.144 +    post_full_gc_dump(gc_timer);
  72.145 +
  72.146 +    gc_timer->register_gc_end(os::elapsed_counter());
  72.147 +    gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions());
  72.148    }
  72.149  
  72.150    return true;
  72.151 @@ -1919,7 +1935,7 @@
  72.152    _ref_processor_stw(NULL),
  72.153    _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
  72.154    _bot_shared(NULL),
  72.155 -  _evac_failure_scan_stack(NULL) ,
  72.156 +  _evac_failure_scan_stack(NULL),
  72.157    _mark_in_progress(false),
  72.158    _cg1r(NULL), _summary_bytes_used(0),
  72.159    _g1mm(NULL),
  72.160 @@ -1939,12 +1955,18 @@
  72.161    _surviving_young_words(NULL),
  72.162    _old_marking_cycles_started(0),
  72.163    _old_marking_cycles_completed(0),
  72.164 +  _concurrent_cycle_started(false),
  72.165    _in_cset_fast_test(NULL),
  72.166    _in_cset_fast_test_base(NULL),
  72.167    _dirty_cards_region_list(NULL),
  72.168    _worker_cset_start_region(NULL),
  72.169 -  _worker_cset_start_region_time_stamp(NULL) {
  72.170 -  _g1h = this; // To catch bugs.
  72.171 +  _worker_cset_start_region_time_stamp(NULL),
  72.172 +  _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
  72.173 +  _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
  72.174 +  _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
  72.175 +  _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
  72.176 +
  72.177 +  _g1h = this;
  72.178    if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
  72.179      vm_exit_during_initialization("Failed necessary allocation.");
  72.180    }
  72.181 @@ -1959,13 +1981,14 @@
  72.182  
  72.183    _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
  72.184    _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
  72.185 +  _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
  72.186  
  72.187    for (int i = 0; i < n_queues; i++) {
  72.188      RefToScanQueue* q = new RefToScanQueue();
  72.189      q->initialize();
  72.190      _task_queues->register_queue(i, q);
  72.191 -  }
  72.192 -
  72.193 +    ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
  72.194 +  }
  72.195    clear_cset_start_regions();
  72.196  
  72.197    // Initialize the G1EvacuationFailureALot counters and flags.
  72.198 @@ -2025,7 +2048,7 @@
  72.199                                                   HeapRegion::GrainBytes);
  72.200  
  72.201    // It is important to do this in a way such that concurrent readers can't
  72.202 -  // temporarily think somethings in the heap.  (I've actually seen this
  72.203 +  // temporarily think something is in the heap.  (I've actually seen this
  72.204    // happen in asserts: DLD.)
  72.205    _reserved.set_word_size(0);
  72.206    _reserved.set_start((HeapWord*)heap_rs.base());
  72.207 @@ -2462,7 +2485,7 @@
  72.208    // We need to clear the "in_progress" flag in the CM thread before
  72.209    // we wake up any waiters (especially when ExplicitInvokesConcurrent
  72.210    // is set) so that if a waiter requests another System.gc() it doesn't
  72.211 -  // incorrectly see that a marking cyle is still in progress.
  72.212 +  // incorrectly see that a marking cycle is still in progress.
  72.213    if (concurrent) {
  72.214      _cmThread->clear_in_progress();
  72.215    }
  72.216 @@ -2474,6 +2497,49 @@
  72.217    FullGCCount_lock->notify_all();
  72.218  }
  72.219  
  72.220 +void G1CollectedHeap::register_concurrent_cycle_start(jlong start_time) {
  72.221 +  _concurrent_cycle_started = true;
  72.222 +  _gc_timer_cm->register_gc_start(start_time);
  72.223 +
  72.224 +  _gc_tracer_cm->report_gc_start(gc_cause(), _gc_timer_cm->gc_start());
  72.225 +  trace_heap_before_gc(_gc_tracer_cm);
  72.226 +}
  72.227 +
  72.228 +void G1CollectedHeap::register_concurrent_cycle_end() {
  72.229 +  if (_concurrent_cycle_started) {
  72.230 +    _gc_timer_cm->register_gc_end(os::elapsed_counter());
  72.231 +
  72.232 +    if (_cm->has_aborted()) {
  72.233 +      _gc_tracer_cm->report_concurrent_mode_failure();
  72.234 +    }
  72.235 +    _gc_tracer_cm->report_gc_end(_gc_timer_cm->gc_end(), _gc_timer_cm->time_partitions());
  72.236 +
  72.237 +    _concurrent_cycle_started = false;
  72.238 +  }
  72.239 +}
  72.240 +
  72.241 +void G1CollectedHeap::trace_heap_after_concurrent_cycle() {
  72.242 +  if (_concurrent_cycle_started) {
  72.243 +    trace_heap_after_gc(_gc_tracer_cm);
  72.244 +  }
  72.245 +}
  72.246 +
  72.247 +G1YCType G1CollectedHeap::yc_type() {
  72.248 +  bool is_young = g1_policy()->gcs_are_young();
  72.249 +  bool is_initial_mark = g1_policy()->during_initial_mark_pause();
  72.250 +  bool is_during_mark = mark_in_progress();
  72.251 +
  72.252 +  if (is_initial_mark) {
  72.253 +    return InitialMark;
  72.254 +  } else if (is_during_mark) {
  72.255 +    return DuringMark;
  72.256 +  } else if (is_young) {
  72.257 +    return Normal;
  72.258 +  } else {
  72.259 +    return Mixed;
  72.260 +  }
  72.261 +}
  72.262 +
  72.263  void G1CollectedHeap::collect(GCCause::Cause cause) {
  72.264    assert_heap_not_locked();
  72.265  
  72.266 @@ -2676,13 +2742,13 @@
  72.267              break;
  72.268            }
  72.269  
  72.270 -          // Noone should have claimed it directly. We can given
  72.271 +          // No one should have claimed it directly. We can given
  72.272            // that we claimed its "starts humongous" region.
  72.273            assert(chr->claim_value() != claim_value, "sanity");
  72.274            assert(chr->humongous_start_region() == r, "sanity");
  72.275  
  72.276            if (chr->claimHeapRegion(claim_value)) {
  72.277 -            // we should always be able to claim it; noone else should
  72.278 +            // we should always be able to claim it; no one else should
  72.279              // be trying to claim this region
  72.280  
  72.281              bool res2 = cl->doHeapRegion(chr);
  72.282 @@ -2976,7 +3042,7 @@
  72.283    // the min TLAB size.
  72.284  
  72.285    // Also, this value can be at most the humongous object threshold,
  72.286 -  // since we can't allow tlabs to grow big enough to accomodate
  72.287 +  // since we can't allow tlabs to grow big enough to accommodate
  72.288    // humongous objects.
  72.289  
  72.290    HeapRegion* hr = _mutator_alloc_region.get();
  72.291 @@ -3743,10 +3809,15 @@
  72.292      return false;
  72.293    }
  72.294  
  72.295 +  _gc_timer_stw->register_gc_start(os::elapsed_counter());
  72.296 +
  72.297 +  _gc_tracer_stw->report_gc_start(gc_cause(), _gc_timer_stw->gc_start());
  72.298 +
  72.299    SvcGCMarker sgcm(SvcGCMarker::MINOR);
  72.300    ResourceMark rm;
  72.301  
  72.302    print_heap_before_gc();
  72.303 +  trace_heap_before_gc(_gc_tracer_stw);
  72.304  
  72.305    HRSPhaseSetter x(HRSPhaseEvacuation);
  72.306    verify_region_sets_optional();
  72.307 @@ -3771,11 +3842,17 @@
  72.308  
  72.309    // Inner scope for scope based logging, timers, and stats collection
  72.310    {
  72.311 +    EvacuationInfo evacuation_info;
  72.312 +
  72.313      if (g1_policy()->during_initial_mark_pause()) {
  72.314        // We are about to start a marking cycle, so we increment the
  72.315        // full collection counter.
  72.316        increment_old_marking_cycles_started();
  72.317 +      register_concurrent_cycle_start(_gc_timer_stw->gc_start());
  72.318      }
  72.319 +
  72.320 +    _gc_tracer_stw->report_yc_type(yc_type());
  72.321 +
  72.322      TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
  72.323  
  72.324      int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
  72.325 @@ -3885,7 +3962,7 @@
  72.326          g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
  72.327  #endif // YOUNG_LIST_VERBOSE
  72.328  
  72.329 -        g1_policy()->finalize_cset(target_pause_time_ms);
  72.330 +        g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
  72.331  
  72.332          _cm->note_start_of_gc();
  72.333          // We should not verify the per-thread SATB buffers given that
  72.334 @@ -3921,10 +3998,10 @@
  72.335          setup_surviving_young_words();
  72.336  
  72.337          // Initialize the GC alloc regions.
  72.338 -        init_gc_alloc_regions();
  72.339 +        init_gc_alloc_regions(evacuation_info);
  72.340  
  72.341          // Actually do the work...
  72.342 -        evacuate_collection_set();
  72.343 +        evacuate_collection_set(evacuation_info);
  72.344  
  72.345          // We do this to mainly verify the per-thread SATB buffers
  72.346          // (which have been filtered by now) since we didn't verify
  72.347 @@ -3936,7 +4013,7 @@
  72.348                                   true  /* verify_thread_buffers */,
  72.349                                   true  /* verify_fingers */);
  72.350  
  72.351 -        free_collection_set(g1_policy()->collection_set());
  72.352 +        free_collection_set(g1_policy()->collection_set(), evacuation_info);
  72.353          g1_policy()->clear_collection_set();
  72.354  
  72.355          cleanup_surviving_young_words();
  72.356 @@ -3964,13 +4041,19 @@
  72.357  #endif // YOUNG_LIST_VERBOSE
  72.358  
  72.359          g1_policy()->record_survivor_regions(_young_list->survivor_length(),
  72.360 -                                            _young_list->first_survivor_region(),
  72.361 -                                            _young_list->last_survivor_region());
  72.362 +                                             _young_list->first_survivor_region(),
  72.363 +                                             _young_list->last_survivor_region());
  72.364  
  72.365          _young_list->reset_auxilary_lists();
  72.366  
  72.367          if (evacuation_failed()) {
  72.368            _summary_bytes_used = recalculate_used();
  72.369 +          uint n_queues = MAX2((int)ParallelGCThreads, 1);
  72.370 +          for (uint i = 0; i < n_queues; i++) {
  72.371 +            if (_evacuation_failed_info_array[i].has_failed()) {
  72.372 +              _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
  72.373 +            }
  72.374 +          }
  72.375          } else {
  72.376            // The "used" of the the collection set have already been subtracted
  72.377            // when they were freed.  Add in the bytes evacuated.
  72.378 @@ -4013,7 +4096,7 @@
  72.379            }
  72.380          }
  72.381  
  72.382 -        // We redo the verificaiton but now wrt to the new CSet which
  72.383 +        // We redo the verification but now wrt to the new CSet which
  72.384          // has just got initialized after the previous CSet was freed.
  72.385          _cm->verify_no_cset_oops(true  /* verify_stacks */,
  72.386                                   true  /* verify_enqueued_buffers */,
  72.387 @@ -4026,7 +4109,7 @@
  72.388          // investigate this in CR 7178365.
  72.389          double sample_end_time_sec = os::elapsedTime();
  72.390          double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
  72.391 -        g1_policy()->record_collection_pause_end(pause_time_ms);
  72.392 +        g1_policy()->record_collection_pause_end(pause_time_ms, evacuation_info);
  72.393  
  72.394          MemoryService::track_memory_usage();
  72.395  
  72.396 @@ -4093,14 +4176,19 @@
  72.397      TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
  72.398  
  72.399      print_heap_after_gc();
  72.400 +    trace_heap_after_gc(_gc_tracer_stw);
  72.401  
  72.402      // We must call G1MonitoringSupport::update_sizes() in the same scoping level
  72.403      // as an active TraceMemoryManagerStats object (i.e. before the destructor for the
  72.404      // TraceMemoryManagerStats is called) so that the G1 memory pools are updated
  72.405      // before any GC notifications are raised.
  72.406      g1mm()->update_sizes();
  72.407 -  }
  72.408 -
  72.409 +
  72.410 +    _gc_tracer_stw->report_evacuation_info(&evacuation_info);
  72.411 +    _gc_tracer_stw->report_tenuring_threshold(_g1_policy->tenuring_threshold());
  72.412 +    _gc_timer_stw->register_gc_end(os::elapsed_counter());
  72.413 +    _gc_tracer_stw->report_gc_end(_gc_timer_stw->gc_end(), _gc_timer_stw->time_partitions());
  72.414 +  }
  72.415    // It should now be safe to tell the concurrent mark thread to start
  72.416    // without its logging output interfering with the logging output
  72.417    // that came from the pause.
  72.418 @@ -4152,7 +4240,7 @@
  72.419    assert(_mutator_alloc_region.get() == NULL, "post-condition");
  72.420  }
  72.421  
  72.422 -void G1CollectedHeap::init_gc_alloc_regions() {
  72.423 +void G1CollectedHeap::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
  72.424    assert_at_safepoint(true /* should_be_vm_thread */);
  72.425  
  72.426    _survivor_gc_alloc_region.init();
  72.427 @@ -4167,7 +4255,7 @@
  72.428    // a cleanup and it should be on the free list now), or
  72.429    // d) it's humongous (this means that it was emptied
  72.430    // during a cleanup and was added to the free list, but
  72.431 -  // has been subseqently used to allocate a humongous
  72.432 +  // has been subsequently used to allocate a humongous
  72.433    // object that may be less than the region size).
  72.434    if (retained_region != NULL &&
  72.435        !retained_region->in_collection_set() &&
  72.436 @@ -4184,10 +4272,13 @@
  72.437      retained_region->note_start_of_copying(during_im);
  72.438      _old_gc_alloc_region.set(retained_region);
  72.439      _hr_printer.reuse(retained_region);
  72.440 -  }
  72.441 -}
  72.442 -
  72.443 -void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers) {
  72.444 +    evacuation_info.set_alloc_regions_used_before(retained_region->used());
  72.445 +  }
  72.446 +}
  72.447 +
  72.448 +void G1CollectedHeap::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
  72.449 +  evacuation_info.set_allocation_regions(_survivor_gc_alloc_region.count() +
  72.450 +                                         _old_gc_alloc_region.count());
  72.451    _survivor_gc_alloc_region.release();
  72.452    // If we have an old GC alloc region to release, we'll save it in
  72.453    // _retained_old_gc_alloc_region. If we don't
  72.454 @@ -4270,7 +4361,7 @@
  72.455  }
  72.456  
  72.457  oop
  72.458 -G1CollectedHeap::handle_evacuation_failure_par(OopsInHeapRegionClosure* cl,
  72.459 +G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
  72.460                                                 oop old) {
  72.461    assert(obj_in_cs(old),
  72.462           err_msg("obj: "PTR_FORMAT" should still be in the CSet",
  72.463 @@ -4279,7 +4370,12 @@
  72.464    oop forward_ptr = old->forward_to_atomic(old);
  72.465    if (forward_ptr == NULL) {
  72.466      // Forward-to-self succeeded.
  72.467 -
  72.468 +    assert(_par_scan_state != NULL, "par scan state");
  72.469 +    OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  72.470 +    uint queue_num = _par_scan_state->queue_num();
  72.471 +
  72.472 +    _evacuation_failed = true;
  72.473 +    _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
  72.474      if (_evac_failure_closure != cl) {
  72.475        MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
  72.476        assert(!_drain_in_progress,
  72.477 @@ -4310,8 +4406,6 @@
  72.478  }
  72.479  
  72.480  void G1CollectedHeap::handle_evacuation_failure_common(oop old, markOop m) {
  72.481 -  set_evacuation_failed(true);
  72.482 -
  72.483    preserve_mark_if_necessary(old, m);
  72.484  
  72.485    HeapRegion* r = heap_region_containing(old);
  72.486 @@ -4561,8 +4655,7 @@
  72.487    if (obj_ptr == NULL) {
  72.488      // This will either forward-to-self, or detect that someone else has
  72.489      // installed a forwarding pointer.
  72.490 -    OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
  72.491 -    return _g1->handle_evacuation_failure_par(cl, old);
  72.492 +    return _g1->handle_evacuation_failure_par(_par_scan_state, old);
  72.493    }
  72.494  
  72.495    oop obj = oop(obj_ptr);
  72.496 @@ -5166,7 +5259,7 @@
  72.497        // will be copied, the reference field set to point to the
  72.498        // new location, and the RSet updated. Otherwise we need to
  72.499        // use the the non-heap or metadata closures directly to copy
  72.500 -      // the refernt object and update the pointer, while avoiding
  72.501 +      // the referent object and update the pointer, while avoiding
  72.502        // updating the RSet.
  72.503  
  72.504        if (_g1h->is_in_g1_reserved(p)) {
  72.505 @@ -5334,7 +5427,7 @@
  72.506    }
  72.507  };
  72.508  
  72.509 -// Driver routine for parallel reference enqueing.
  72.510 +// Driver routine for parallel reference enqueueing.
  72.511  // Creates an instance of the ref enqueueing gang
  72.512  // task and has the worker threads execute it.
  72.513  
  72.514 @@ -5463,7 +5556,7 @@
  72.515    // processor would have seen that the reference object had already
  72.516    // been 'discovered' and would have skipped discovering the reference,
  72.517    // but would not have treated the reference object as a regular oop.
  72.518 -  // As a reult the copy closure would not have been applied to the
  72.519 +  // As a result the copy closure would not have been applied to the
  72.520    // referent object.
  72.521    //
  72.522    // We need to explicitly copy these referent objects - the references
  72.523 @@ -5539,21 +5632,28 @@
  72.524    // Setup the soft refs policy...
  72.525    rp->setup_policy(false);
  72.526  
  72.527 +  ReferenceProcessorStats stats;
  72.528    if (!rp->processing_is_mt()) {
  72.529      // Serial reference processing...
  72.530 -    rp->process_discovered_references(&is_alive,
  72.531 -                                      &keep_alive,
  72.532 -                                      &drain_queue,
  72.533 -                                      NULL);
  72.534 +    stats = rp->process_discovered_references(&is_alive,
  72.535 +                                              &keep_alive,
  72.536 +                                              &drain_queue,
  72.537 +                                              NULL,
  72.538 +                                              _gc_timer_stw);
  72.539    } else {
  72.540      // Parallel reference processing
  72.541      assert(rp->num_q() == no_of_gc_workers, "sanity");
  72.542      assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
  72.543  
  72.544      G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
  72.545 -    rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
  72.546 -  }
  72.547 -
  72.548 +    stats = rp->process_discovered_references(&is_alive,
  72.549 +                                              &keep_alive,
  72.550 +                                              &drain_queue,
  72.551 +                                              &par_task_executor,
  72.552 +                                              _gc_timer_stw);
  72.553 +  }
  72.554 +
  72.555 +  _gc_tracer_stw->report_gc_reference_stats(stats);
  72.556    // We have completed copying any necessary live referent objects
  72.557    // (that were not copied during the actual pause) so we can
  72.558    // retire any active alloc buffers
  72.559 @@ -5577,7 +5677,7 @@
  72.560      // Serial reference processing...
  72.561      rp->enqueue_discovered_references();
  72.562    } else {
  72.563 -    // Parallel reference enqueuing
  72.564 +    // Parallel reference enqueueing
  72.565  
  72.566      assert(no_of_gc_workers == workers()->active_workers(),
  72.567             "Need to reset active workers");
  72.568 @@ -5594,15 +5694,15 @@
  72.569    // FIXME
  72.570    // CM's reference processing also cleans up the string and symbol tables.
  72.571    // Should we do that here also? We could, but it is a serial operation
  72.572 -  // and could signicantly increase the pause time.
  72.573 +  // and could significantly increase the pause time.
  72.574  
  72.575    double ref_enq_time = os::elapsedTime() - ref_enq_start;
  72.576    g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
  72.577  }
  72.578  
  72.579 -void G1CollectedHeap::evacuate_collection_set() {
  72.580 +void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
  72.581    _expand_heap_after_alloc_failure = true;
  72.582 -  set_evacuation_failed(false);
  72.583 +  _evacuation_failed = false;
  72.584  
  72.585    // Should G1EvacuationFailureALot be in effect for this GC?
  72.586    NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
  72.587 @@ -5691,7 +5791,7 @@
  72.588      JNIHandles::weak_oops_do(&is_alive, &keep_alive);
  72.589    }
  72.590  
  72.591 -  release_gc_alloc_regions(n_workers);
  72.592 +  release_gc_alloc_regions(n_workers, evacuation_info);
  72.593    g1_rem_set()->cleanup_after_oops_into_collection_set_do();
  72.594  
  72.595    // Reset and re-enable the hot card cache.
  72.596 @@ -5714,7 +5814,7 @@
  72.597    // Enqueue any remaining references remaining on the STW
  72.598    // reference processor's discovered lists. We need to do
  72.599    // this after the card table is cleaned (and verified) as
  72.600 -  // the act of enqueuing entries on to the pending list
  72.601 +  // the act of enqueueing entries on to the pending list
  72.602    // will log these updates (and dirty their associated
  72.603    // cards). We need these updates logged to update any
  72.604    // RSets.
  72.605 @@ -5942,7 +6042,7 @@
  72.606    g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
  72.607  }
  72.608  
  72.609 -void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
  72.610 +void G1CollectedHeap::free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info) {
  72.611    size_t pre_used = 0;
  72.612    FreeRegionList local_free_list("Local List for CSet Freeing");
  72.613  
  72.614 @@ -6028,10 +6128,12 @@
  72.615        cur->set_evacuation_failed(false);
  72.616        // The region is now considered to be old.
  72.617        _old_set.add(cur);
  72.618 +      evacuation_info.increment_collectionset_used_after(cur->used());
  72.619      }
  72.620      cur = next;
  72.621    }
  72.622  
  72.623 +  evacuation_info.set_regions_freed(local_free_list.length());
  72.624    policy->record_max_rs_lengths(rs_lengths);
  72.625    policy->cset_regions_freed();
  72.626  
    73.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Jun 07 09:33:01 2013 -0700
    73.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Mon Jun 10 11:30:51 2013 +0200
    73.3 @@ -1,5 +1,5 @@
    73.4  /*
    73.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    73.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    73.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.8   *
    73.9   * This code is free software; you can redistribute it and/or modify it
   73.10 @@ -26,10 +26,12 @@
   73.11  #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
   73.12  
   73.13  #include "gc_implementation/g1/concurrentMark.hpp"
   73.14 +#include "gc_implementation/g1/evacuationInfo.hpp"
   73.15  #include "gc_implementation/g1/g1AllocRegion.hpp"
   73.16  #include "gc_implementation/g1/g1HRPrinter.hpp"
   73.17 +#include "gc_implementation/g1/g1MonitoringSupport.hpp"
   73.18  #include "gc_implementation/g1/g1RemSet.hpp"
   73.19 -#include "gc_implementation/g1/g1MonitoringSupport.hpp"
   73.20 +#include "gc_implementation/g1/g1YCTypes.hpp"
   73.21  #include "gc_implementation/g1/heapRegionSeq.hpp"
   73.22  #include "gc_implementation/g1/heapRegionSets.hpp"
   73.23  #include "gc_implementation/shared/hSpaceCounters.hpp"
   73.24 @@ -61,7 +63,12 @@
   73.25  class ConcurrentMark;
   73.26  class ConcurrentMarkThread;
   73.27  class ConcurrentG1Refine;
   73.28 +class ConcurrentGCTimer;
   73.29  class GenerationCounters;
   73.30 +class STWGCTimer;
   73.31 +class G1NewTracer;
   73.32 +class G1OldTracer;
   73.33 +class EvacuationFailedInfo;
   73.34  
   73.35  typedef OverflowTaskQueue<StarTask, mtGC>         RefToScanQueue;
   73.36  typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
   73.37 @@ -160,7 +167,7 @@
   73.38  // An instance is embedded into the G1CH and used as the
   73.39  // (optional) _is_alive_non_header closure in the STW
   73.40  // reference processor. It is also extensively used during
   73.41 -// refence processing during STW evacuation pauses.
   73.42 +// reference processing during STW evacuation pauses.
   73.43  class G1STWIsAliveClosure: public BoolObjectClosure {
   73.44    G1CollectedHeap* _g1;
   73.45  public:
   73.46 @@ -323,10 +330,10 @@
   73.47    void release_mutator_alloc_region();
   73.48  
   73.49    // It initializes the GC alloc regions at the start of a GC.
   73.50 -  void init_gc_alloc_regions();
   73.51 +  void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
   73.52  
   73.53    // It releases the GC alloc regions at the end of a GC.
   73.54 -  void release_gc_alloc_regions(uint no_of_gc_workers);
   73.55 +  void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
   73.56  
   73.57    // It does any cleanup that needs to be done on the GC alloc regions
   73.58    // before a Full GC.
   73.59 @@ -389,6 +396,8 @@
   73.60    // concurrent cycles) we have completed.
   73.61    volatile unsigned int _old_marking_cycles_completed;
   73.62  
   73.63 +  bool _concurrent_cycle_started;
   73.64 +
   73.65    // This is a non-product method that is helpful for testing. It is
   73.66    // called at the end of a GC and artificially expands the heap by
   73.67    // allocating a number of dead regions. This way we can induce very
   73.68 @@ -734,6 +743,12 @@
   73.69      return _old_marking_cycles_completed;
   73.70    }
   73.71  
   73.72 +  void register_concurrent_cycle_start(jlong start_time);
   73.73 +  void register_concurrent_cycle_end();
   73.74 +  void trace_heap_after_concurrent_cycle();
   73.75 +
   73.76 +  G1YCType yc_type();
   73.77 +
   73.78    G1HRPrinter* hr_printer() { return &_hr_printer; }
   73.79  
   73.80  protected:
   73.81 @@ -769,7 +784,7 @@
   73.82    bool do_collection_pause_at_safepoint(double target_pause_time_ms);
   73.83  
   73.84    // Actually do the work of evacuating the collection set.
   73.85 -  void evacuate_collection_set();
   73.86 +  void evacuate_collection_set(EvacuationInfo& evacuation_info);
   73.87  
   73.88    // The g1 remembered set of the heap.
   73.89    G1RemSet* _g1_rem_set;
   73.90 @@ -794,7 +809,7 @@
   73.91  
   73.92    // After a collection pause, make the regions in the CS into free
   73.93    // regions.
   73.94 -  void free_collection_set(HeapRegion* cs_head);
   73.95 +  void free_collection_set(HeapRegion* cs_head, EvacuationInfo& evacuation_info);
   73.96  
   73.97    // Abandon the current collection set without recording policy
   73.98    // statistics or updating free lists.
   73.99 @@ -863,9 +878,7 @@
  73.100    // True iff a evacuation has failed in the current collection.
  73.101    bool _evacuation_failed;
  73.102  
  73.103 -  // Set the attribute indicating whether evacuation has failed in the
  73.104 -  // current collection.
  73.105 -  void set_evacuation_failed(bool b) { _evacuation_failed = b; }
  73.106 +  EvacuationFailedInfo* _evacuation_failed_info_array;
  73.107  
  73.108    // Failed evacuations cause some logical from-space objects to have
  73.109    // forwarding pointers to themselves.  Reset them.
  73.110 @@ -907,7 +920,7 @@
  73.111    void finalize_for_evac_failure();
  73.112  
  73.113    // An attempt to evacuate "obj" has failed; take necessary steps.
  73.114 -  oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
  73.115 +  oop handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state, oop obj);
  73.116    void handle_evacuation_failure_common(oop obj, markOop m);
  73.117  
  73.118  #ifndef PRODUCT
  73.119 @@ -939,13 +952,13 @@
  73.120    inline bool evacuation_should_fail();
  73.121  
  73.122    // Reset the G1EvacuationFailureALot counters.  Should be called at
  73.123 -  // the end of an evacuation pause in which an evacuation failure ocurred.
  73.124 +  // the end of an evacuation pause in which an evacuation failure occurred.
  73.125    inline void reset_evacuation_should_fail();
  73.126  #endif // !PRODUCT
  73.127  
  73.128    // ("Weak") Reference processing support.
  73.129    //
  73.130 -  // G1 has 2 instances of the referece processor class. One
  73.131 +  // G1 has 2 instances of the reference processor class. One
  73.132    // (_ref_processor_cm) handles reference object discovery
  73.133    // and subsequent processing during concurrent marking cycles.
  73.134    //
  73.135 @@ -995,6 +1008,12 @@
  73.136    // The (stw) reference processor...
  73.137    ReferenceProcessor* _ref_processor_stw;
  73.138  
  73.139 +  STWGCTimer* _gc_timer_stw;
  73.140 +  ConcurrentGCTimer* _gc_timer_cm;
  73.141 +
  73.142 +  G1OldTracer* _gc_tracer_cm;
  73.143 +  G1NewTracer* _gc_tracer_stw;
  73.144 +
  73.145    // During reference object discovery, the _is_alive_non_header
  73.146    // closure (if non-null) is applied to the referent object to
  73.147    // determine whether the referent is live. If so then the
  73.148 @@ -1140,9 +1159,12 @@
  73.149    // The STW reference processor....
  73.150    ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
  73.151  
  73.152 -  // The Concurent Marking reference processor...
  73.153 +  // The Concurrent Marking reference processor...
  73.154    ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
  73.155  
  73.156 +  ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; }
  73.157 +  G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; }
  73.158 +
  73.159    virtual size_t capacity() const;
  73.160    virtual size_t used() const;
  73.161    // This should be called when we're not holding the heap lock. The
  73.162 @@ -1200,7 +1222,7 @@
  73.163  
  73.164    // verify_region_sets_optional() is planted in the code for
  73.165    // list verification in non-product builds (and it can be enabled in
  73.166 -  // product builds by definning HEAP_REGION_SET_FORCE_VERIFY to be 1).
  73.167 +  // product builds by defining HEAP_REGION_SET_FORCE_VERIFY to be 1).
  73.168  #if HEAP_REGION_SET_FORCE_VERIFY
  73.169    void verify_region_sets_optional() {
  73.170      verify_region_sets();
  73.171 @@ -1266,7 +1288,7 @@
  73.172    // The same as above but assume that the caller holds the Heap_lock.
  73.173    void collect_locked(GCCause::Cause cause);
  73.174  
  73.175 -  // True iff a evacuation has failed in the most-recent collection.
  73.176 +  // True iff an evacuation has failed in the most-recent collection.
  73.177    bool evacuation_failed() { return _evacuation_failed; }
  73.178  
  73.179    // It will free a region if it has allocated objects in it that are
  73.180 @@ -1554,6 +1576,7 @@
  73.181  
  73.182    // Override; it uses the "prev" marking information
  73.183    virtual void verify(bool silent);
  73.184 +
  73.185    virtual void print_on(outputStream* st) const;
  73.186    virtual void print_extended_on(outputStream* st) const;
  73.187    virtual void print_on_error(outputStream* st) const;
  73.188 @@ -1839,7 +1862,7 @@
  73.189    G1ParScanHeapEvacClosure*     _evac_cl;
  73.190    G1ParScanPartialArrayClosure* _partial_scan_cl;
  73.191  
  73.192 -  int _hash_seed;
  73.193 +  int  _hash_seed;
  73.194    uint _queue_num;
  73.195  
  73.196    size_t _term_attempts;
    74.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Jun 07 09:33:01 2013 -0700
    74.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Mon Jun 10 11:30:51 2013 +0200
    74.3 @@ -909,7 +909,7 @@
    74.4  // Anything below that is considered to be zero
    74.5  #define MIN_TIMER_GRANULARITY 0.0000001
    74.6  
    74.7 -void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms) {
    74.8 +void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info) {
    74.9    double end_time_sec = os::elapsedTime();
   74.10    assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
   74.11           "otherwise, the subtraction below does not make sense");
   74.12 @@ -941,6 +941,9 @@
   74.13    _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
   74.14                            end_time_sec, false);
   74.15  
   74.16 +  evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
   74.17 +  evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
   74.18 +
   74.19    if (update_stats) {
   74.20      _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
   74.21      // this is where we update the allocation rate of the application
   74.22 @@ -1896,7 +1899,7 @@
   74.23  }
   74.24  
   74.25  
   74.26 -void G1CollectorPolicy::finalize_cset(double target_pause_time_ms) {
   74.27 +void G1CollectorPolicy::finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info) {
   74.28    double young_start_time_sec = os::elapsedTime();
   74.29  
   74.30    YoungList* young_list = _g1->young_list();
   74.31 @@ -2102,6 +2105,7 @@
   74.32  
   74.33    double non_young_end_time_sec = os::elapsedTime();
   74.34    phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
   74.35 +  evacuation_info.set_collectionset_regions(cset_region_length());
   74.36  }
   74.37  
   74.38  void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
    75.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Fri Jun 07 09:33:01 2013 -0700
    75.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp	Mon Jun 10 11:30:51 2013 +0200
    75.3 @@ -671,7 +671,7 @@
    75.4  
    75.5    // Record the start and end of an evacuation pause.
    75.6    void record_collection_pause_start(double start_time_sec);
    75.7 -  void record_collection_pause_end(double pause_time_ms);
    75.8 +  void record_collection_pause_end(double pause_time_ms, EvacuationInfo& evacuation_info);
    75.9  
   75.10    // Record the start and end of a full collection.
   75.11    void record_full_collection_start();
   75.12 @@ -720,7 +720,7 @@
   75.13    // Choose a new collection set.  Marks the chosen regions as being
   75.14    // "in_collection_set", and links them together.  The head and number of
   75.15    // the collection set are available via access methods.
   75.16 -  void finalize_cset(double target_pause_time_ms);
   75.17 +  void finalize_cset(double target_pause_time_ms, EvacuationInfo& evacuation_info);
   75.18  
   75.19    // The head of the list (via "next_in_collection_set()") representing the
   75.20    // current collection set.
   75.21 @@ -879,6 +879,7 @@
   75.22    ageTable _survivors_age_table;
   75.23  
   75.24  public:
   75.25 +  uint tenuring_threshold() const { return _tenuring_threshold; }
   75.26  
   75.27    inline GCAllocPurpose
   75.28      evacuation_destination(HeapRegion* src_region, uint age, size_t word_sz) {
    76.1 --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Fri Jun 07 09:33:01 2013 -0700
    76.2 +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp	Mon Jun 10 11:30:51 2013 +0200
    76.3 @@ -38,7 +38,7 @@
    76.4    NOT_PRODUCT(static const T _uninitialized;)
    76.5  
    76.6    // We are caching the sum and average to only have to calculate them once.
    76.7 -  // This is not done in an MT-safe way. It is intetened to allow single
    76.8 +  // This is not done in an MT-safe way. It is intended to allow single
    76.9    // threaded code to call sum() and average() multiple times in any order
   76.10    // without having to worry about the cost.
   76.11    bool   _has_new_data;
    77.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Jun 07 09:33:01 2013 -0700
    77.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Mon Jun 10 11:30:51 2013 +0200
    77.3 @@ -1,5 +1,5 @@
    77.4  /*
    77.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    77.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    77.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    77.8   *
    77.9   * This code is free software; you can redistribute it and/or modify it
   77.10 @@ -31,6 +31,10 @@
   77.11  #include "code/icBuffer.hpp"
   77.12  #include "gc_implementation/g1/g1Log.hpp"
   77.13  #include "gc_implementation/g1/g1MarkSweep.hpp"
   77.14 +#include "gc_implementation/shared/gcHeapSummary.hpp"
   77.15 +#include "gc_implementation/shared/gcTimer.hpp"
   77.16 +#include "gc_implementation/shared/gcTrace.hpp"
   77.17 +#include "gc_implementation/shared/gcTraceTime.hpp"
   77.18  #include "memory/gcLocker.hpp"
   77.19  #include "memory/genCollectedHeap.hpp"
   77.20  #include "memory/modRefBarrierSet.hpp"
   77.21 @@ -119,7 +123,7 @@
   77.22  void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
   77.23                                      bool clear_all_softrefs) {
   77.24    // Recursively traverse all live objects and mark them
   77.25 -  TraceTime tm("phase 1", G1Log::fine() && Verbose, true, gclog_or_tty);
   77.26 +  GCTraceTime tm("phase 1", G1Log::fine() && Verbose, true, gc_timer());
   77.27    GenMarkSweep::trace(" 1");
   77.28  
   77.29    SharedHeap* sh = SharedHeap::heap();
   77.30 @@ -139,10 +143,13 @@
   77.31    assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
   77.32  
   77.33    rp->setup_policy(clear_all_softrefs);
   77.34 -  rp->process_discovered_references(&GenMarkSweep::is_alive,
   77.35 -                                    &GenMarkSweep::keep_alive,
   77.36 -                                    &GenMarkSweep::follow_stack_closure,
   77.37 -                                    NULL);
   77.38 +  const ReferenceProcessorStats& stats =
   77.39 +    rp->process_discovered_references(&GenMarkSweep::is_alive,
   77.40 +                                      &GenMarkSweep::keep_alive,
   77.41 +                                      &GenMarkSweep::follow_stack_closure,
   77.42 +                                      NULL,
   77.43 +                                      gc_timer());
   77.44 +  gc_tracer()->report_gc_reference_stats(stats);
   77.45  
   77.46  
   77.47    // This is the point where the entire marking should have completed.
   77.48 @@ -185,6 +192,8 @@
   77.49        gclog_or_tty->print_cr("]");
   77.50      }
   77.51    }
   77.52 +
   77.53 +  gc_tracer()->report_object_count_after_gc(&GenMarkSweep::is_alive);
   77.54  }
   77.55  
   77.56  class G1PrepareCompactClosure: public HeapRegionClosure {
   77.57 @@ -257,7 +266,7 @@
   77.58  
   77.59    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   77.60  
   77.61 -  TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty);
   77.62 +  GCTraceTime tm("phase 2", G1Log::fine() && Verbose, true, gc_timer());
   77.63    GenMarkSweep::trace("2");
   77.64  
   77.65    // find the first region
   77.66 @@ -294,7 +303,7 @@
   77.67    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   77.68  
   77.69    // Adjust the pointers to reflect the new locations
   77.70 -  TraceTime tm("phase 3", G1Log::fine() && Verbose, true, gclog_or_tty);
   77.71 +  GCTraceTime tm("phase 3", G1Log::fine() && Verbose, true, gc_timer());
   77.72    GenMarkSweep::trace("3");
   77.73  
   77.74    SharedHeap* sh = SharedHeap::heap();
   77.75 @@ -353,7 +362,7 @@
   77.76    // to use a higher index (saved from phase2) when verifying perm_gen.
   77.77    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   77.78  
   77.79 -  TraceTime tm("phase 4", G1Log::fine() && Verbose, true, gclog_or_tty);
   77.80 +  GCTraceTime tm("phase 4", G1Log::fine() && Verbose, true, gc_timer());
   77.81    GenMarkSweep::trace("4");
   77.82  
   77.83    G1SpaceCompactClosure blk;
    78.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Fri Jun 07 09:33:01 2013 -0700
    78.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp	Mon Jun 10 11:30:51 2013 +0200
    78.3 @@ -1,5 +1,5 @@
    78.4  /*
    78.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    78.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    78.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    78.8   *
    78.9   * This code is free software; you can redistribute it and/or modify it
   78.10 @@ -54,6 +54,9 @@
   78.11    static void invoke_at_safepoint(ReferenceProcessor* rp,
   78.12                                    bool clear_all_softrefs);
   78.13  
   78.14 +  static STWGCTimer* gc_timer() { return GenMarkSweep::_gc_timer; }
   78.15 +  static SerialOldTracer* gc_tracer() { return GenMarkSweep::_gc_tracer; }
   78.16 +
   78.17   private:
   78.18  
   78.19    // Mark live objects
    79.1 --- a/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Fri Jun 07 09:33:01 2013 -0700
    79.2 +++ b/src/share/vm/gc_implementation/g1/g1MonitoringSupport.hpp	Mon Jun 10 11:30:51 2013 +0200
    79.3 @@ -1,5 +1,5 @@
    79.4  /*
    79.5 - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    79.6 + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
    79.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    79.8   *
    79.9   * This code is free software; you can redistribute it and/or modify it
   79.10 @@ -224,6 +224,7 @@
   79.11    // Monitoring support used by
   79.12    //   MemoryService
   79.13    //   jstat counters
   79.14 +  //   Tracing
   79.15  
   79.16    size_t overall_reserved()           { return _overall_reserved;     }
   79.17    size_t overall_committed()          { return _overall_committed;    }
    80.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    80.2 +++ b/src/share/vm/gc_implementation/g1/g1YCTypes.hpp	Mon Jun 10 11:30:51 2013 +0200
    80.3 @@ -0,0 +1,51 @@
    80.4 +/*
    80.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    80.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    80.7 + *
    80.8 + * This code is free software; you can redistribute it and/or modify it
    80.9 + * under the terms of the GNU General Public License version 2 only, as
   80.10 + * published by the Free Software Foundation.
   80.11 + *
   80.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   80.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   80.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   80.15 + * version 2 for more details (a copy is included in the LICENSE file that
   80.16 + * accompanied this code).
   80.17 + *
   80.18 + * You should have received a copy of the GNU General Public License version
   80.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   80.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   80.21 + *
   80.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   80.23 + * or visit www.oracle.com if you need additional information or have any
   80.24 + * questions.
   80.25 + *
   80.26 + */
   80.27 +
   80.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
   80.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
   80.30 +
   80.31 +#include "utilities/debug.hpp"
   80.32 +
   80.33 +enum G1YCType {
   80.34 +  Normal,
   80.35 +  InitialMark,
   80.36 +  DuringMark,
   80.37 +  Mixed,
   80.38 +  G1YCTypeEndSentinel
   80.39 +};
   80.40 +
   80.41 +class G1YCTypeHelper {
   80.42 + public:
   80.43 +  static const char* to_string(G1YCType type) {
   80.44 +    switch(type) {
   80.45 +      case Normal: return "Normal";
   80.46 +      case InitialMark: return "Initial Mark";
   80.47 +      case DuringMark: return "During Mark";
   80.48 +      case Mixed: return "Mixed";
   80.49 +      default: ShouldNotReachHere(); return NULL;
   80.50 +    }
   80.51 +  }
   80.52 +};
   80.53 +
   80.54 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1YCTYPES_HPP
    81.1 --- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Fri Jun 07 09:33:01 2013 -0700
    81.2 +++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp	Mon Jun 10 11:30:51 2013 +0200
    81.3 @@ -1,5 +1,5 @@
    81.4  /*
    81.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    81.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    81.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    81.8   *
    81.9   * This code is free software; you can redistribute it and/or modify it
   81.10 @@ -28,6 +28,8 @@
   81.11  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
   81.12  #include "gc_implementation/g1/g1Log.hpp"
   81.13  #include "gc_implementation/g1/vm_operations_g1.hpp"
   81.14 +#include "gc_implementation/shared/gcTimer.hpp"
   81.15 +#include "gc_implementation/shared/gcTraceTime.hpp"
   81.16  #include "gc_implementation/shared/isGCActiveMark.hpp"
   81.17  #include "gc_implementation/g1/vm_operations_g1.hpp"
   81.18  #include "runtime/interfaceSupport.hpp"
   81.19 @@ -227,7 +229,7 @@
   81.20  void VM_CGC_Operation::doit() {
   81.21    gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
   81.22    TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
   81.23 -  TraceTime t(_printGCMessage, G1Log::fine(), true, gclog_or_tty);
   81.24 +  GCTraceTime t(_printGCMessage, G1Log::fine(), true, G1CollectedHeap::heap()->gc_timer_cm());
   81.25    SharedHeap* sh = SharedHeap::heap();
   81.26    // This could go away if CollectedHeap gave access to _gc_is_active...
   81.27    if (sh != NULL) {
    82.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Fri Jun 07 09:33:01 2013 -0700
    82.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Mon Jun 10 11:30:51 2013 +0200
    82.3 @@ -1,5 +1,5 @@
    82.4  /*
    82.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    82.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    82.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    82.8   *
    82.9   * This code is free software; you can redistribute it and/or modify it
   82.10 @@ -29,6 +29,11 @@
   82.11  #include "gc_implementation/shared/adaptiveSizePolicy.hpp"
   82.12  #include "gc_implementation/shared/ageTable.hpp"
   82.13  #include "gc_implementation/shared/parGCAllocBuffer.hpp"
   82.14 +#include "gc_implementation/shared/gcHeapSummary.hpp"
   82.15 +#include "gc_implementation/shared/gcTimer.hpp"
   82.16 +#include "gc_implementation/shared/gcTrace.hpp"
   82.17 +#include "gc_implementation/shared/gcTraceTime.hpp"
   82.18 +#include "gc_implementation/shared/copyFailedInfo.hpp"
   82.19  #include "gc_implementation/shared/spaceDecorator.hpp"
   82.20  #include "memory/defNewGeneration.inline.hpp"
   82.21  #include "memory/genCollectedHeap.hpp"
   82.22 @@ -75,7 +80,6 @@
   82.23                        work_queue_set_, &term_),
   82.24    _is_alive_closure(gen_), _scan_weak_ref_closure(gen_, this),
   82.25    _keep_alive_closure(&_scan_weak_ref_closure),
   82.26 -  _promotion_failure_size(0),
   82.27    _strong_roots_time(0.0), _term_time(0.0)
   82.28  {
   82.29    #if TASKQUEUE_STATS
   82.30 @@ -279,13 +283,10 @@
   82.31    }
   82.32  }
   82.33  
   82.34 -void ParScanThreadState::print_and_clear_promotion_failure_size() {
   82.35 -  if (_promotion_failure_size != 0) {
   82.36 -    if (PrintPromotionFailure) {
   82.37 -      gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
   82.38 -        _thread_num, _promotion_failure_size);
   82.39 -    }
   82.40 -    _promotion_failure_size = 0;
   82.41 +void ParScanThreadState::print_promotion_failure_size() {
   82.42 +  if (_promotion_failed_info.has_failed() && PrintPromotionFailure) {
   82.43 +    gclog_or_tty->print(" (%d: promotion failure size = " SIZE_FORMAT ") ",
   82.44 +                        _thread_num, _promotion_failed_info.first_size());
   82.45    }
   82.46  }
   82.47  
   82.48 @@ -305,6 +306,7 @@
   82.49  
   82.50    inline ParScanThreadState& thread_state(int i);
   82.51  
   82.52 +  void trace_promotion_failed(YoungGCTracer& gc_tracer);
   82.53    void reset(int active_workers, bool promotion_failed);
   82.54    void flush();
   82.55  
   82.56 @@ -353,13 +355,21 @@
   82.57    return ((ParScanThreadState*)_data)[i];
   82.58  }
   82.59  
   82.60 +void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
   82.61 +  for (int i = 0; i < length(); ++i) {
   82.62 +    if (thread_state(i).promotion_failed()) {
   82.63 +      gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info());
   82.64 +      thread_state(i).promotion_failed_info().reset();
   82.65 +    }
   82.66 +  }
   82.67 +}
   82.68  
   82.69  void ParScanThreadStateSet::reset(int active_threads, bool promotion_failed)
   82.70  {
   82.71    _term.reset_for_reuse(active_threads);
   82.72    if (promotion_failed) {
   82.73      for (int i = 0; i < length(); ++i) {
   82.74 -      thread_state(i).print_and_clear_promotion_failure_size();
   82.75 +      thread_state(i).print_promotion_failure_size();
   82.76      }
   82.77    }
   82.78  }
   82.79 @@ -583,14 +593,6 @@
   82.80    gch->set_n_termination(active_workers);
   82.81  }
   82.82  
   82.83 -// The "i" passed to this method is the part of the work for
   82.84 -// this thread.  It is not the worker ID.  The "i" is derived
   82.85 -// from _started_workers which is incremented in internal_note_start()
   82.86 -// called in GangWorker loop() and which is called under the
   82.87 -// which is  called under the protection of the gang monitor and is
   82.88 -// called after a task is started.  So "i" is based on
   82.89 -// first-come-first-served.
   82.90 -
   82.91  void ParNewGenTask::work(uint worker_id) {
   82.92    GenCollectedHeap* gch = GenCollectedHeap::heap();
   82.93    // Since this is being done in a separate thread, need new resource
   82.94 @@ -876,16 +878,45 @@
   82.95  }
   82.96  
   82.97  
   82.98 +// A Generation that does parallel young-gen collection.
   82.99 +
  82.100  bool ParNewGeneration::_avoid_promotion_undo = false;
  82.101  
  82.102 -// A Generation that does parallel young-gen collection.
  82.103 +void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
  82.104 +  assert(_promo_failure_scan_stack.is_empty(), "post condition");
  82.105 +  _promo_failure_scan_stack.clear(true); // Clear cached segments.
  82.106 +
  82.107 +  remove_forwarding_pointers();
  82.108 +  if (PrintGCDetails) {
  82.109 +    gclog_or_tty->print(" (promotion failed)");
  82.110 +  }
  82.111 +  // All the spaces are in play for mark-sweep.
  82.112 +  swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
  82.113 +  from()->set_next_compaction_space(to());
  82.114 +  gch->set_incremental_collection_failed();
  82.115 +  // Inform the next generation that a promotion failure occurred.
  82.116 +  _next_gen->promotion_failure_occurred();
  82.117 +
  82.118 +  // Trace promotion failure in the parallel GC threads
  82.119 +  thread_state_set.trace_promotion_failed(gc_tracer);
  82.120 +  // Single threaded code may have reported promotion failure to the global state
  82.121 +  if (_promotion_failed_info.has_failed()) {
  82.122 +    gc_tracer.report_promotion_failed(_promotion_failed_info);
  82.123 +  }
  82.124 +  // Reset the PromotionFailureALot counters.
  82.125 +  NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
  82.126 +}
  82.127  
  82.128  void ParNewGeneration::collect(bool   full,
  82.129                                 bool   clear_all_soft_refs,
  82.130                                 size_t size,
  82.131                                 bool   is_tlab) {
  82.132    assert(full || size > 0, "otherwise we don't want to collect");
  82.133 +
  82.134    GenCollectedHeap* gch = GenCollectedHeap::heap();
  82.135 +
  82.136 +  _gc_timer->register_gc_start(os::elapsed_counter());
  82.137 +
  82.138    assert(gch->kind() == CollectedHeap::GenCollectedHeap,
  82.139      "not a CMS generational heap");
  82.140    AdaptiveSizePolicy* size_policy = gch->gen_policy()->size_policy();
  82.141 @@ -906,7 +937,7 @@
  82.142      set_avoid_promotion_undo(true);
  82.143    }
  82.144  
  82.145 -  // If the next generation is too full to accomodate worst-case promotion
  82.146 +  // If the next generation is too full to accommodate worst-case promotion
  82.147    // from this generation, pass on collection; let the next generation
  82.148    // do it.
  82.149    if (!collection_attempt_is_safe()) {
  82.150 @@ -915,6 +946,10 @@
  82.151    }
  82.152    assert(to()->is_empty(), "Else not collection_attempt_is_safe");
  82.153  
  82.154 +  ParNewTracer gc_tracer;
  82.155 +  gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
  82.156 +  gch->trace_heap_before_gc(&gc_tracer);
  82.157 +
  82.158    init_assuming_no_promotion_failure();
  82.159  
  82.160    if (UseAdaptiveSizePolicy) {
  82.161 @@ -922,7 +957,7 @@
  82.162      size_policy->minor_collection_begin();
  82.163    }
  82.164  
  82.165 -  TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
  82.166 +  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
  82.167    // Capture heap used before collection (for printing).
  82.168    size_t gch_prev_used = gch->used();
  82.169  
  82.170 @@ -975,17 +1010,21 @@
  82.171    rp->setup_policy(clear_all_soft_refs);
  82.172    // Can  the mt_degree be set later (at run_task() time would be best)?
  82.173    rp->set_active_mt_degree(active_workers);
  82.174 +  ReferenceProcessorStats stats;
  82.175    if (rp->processing_is_mt()) {
  82.176      ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
  82.177 -    rp->process_discovered_references(&is_alive, &keep_alive,
  82.178 -                                      &evacuate_followers, &task_executor);
  82.179 +    stats = rp->process_discovered_references(&is_alive, &keep_alive,
  82.180 +                                              &evacuate_followers, &task_executor,
  82.181 +                                              _gc_timer);
  82.182    } else {
  82.183      thread_state_set.flush();
  82.184      gch->set_par_threads(0);  // 0 ==> non-parallel.
  82.185      gch->save_marks();
  82.186 -    rp->process_discovered_references(&is_alive, &keep_alive,
  82.187 -                                      &evacuate_followers, NULL);
  82.188 +    stats = rp->process_discovered_references(&is_alive, &keep_alive,
  82.189 +                                              &evacuate_followers, NULL,
  82.190 +                                              _gc_timer);
  82.191    }
  82.192 +  gc_tracer.report_gc_reference_stats(stats);
  82.193    if (!promotion_failed()) {
  82.194      // Swap the survivor spaces.
  82.195      eden()->clear(SpaceDecorator::Mangle);
  82.196 @@ -1010,22 +1049,7 @@
  82.197  
  82.198      adjust_desired_tenuring_threshold();
  82.199    } else {
  82.200 -    assert(_promo_failure_scan_stack.is_empty(), "post condition");
  82.201 -    _promo_failure_scan_stack.clear(true); // Clear cached segments.
  82.202 -
  82.203 -    remove_forwarding_pointers();
  82.204 -    if (PrintGCDetails) {
  82.205 -      gclog_or_tty->print(" (promotion failed)");
  82.206 -    }
  82.207 -    // All the spaces are in play for mark-sweep.
  82.208 -    swap_spaces();  // Make life simpler for CMS || rescan; see 6483690.
  82.209 -    from()->set_next_compaction_space(to());
  82.210 -    gch->set_incremental_collection_failed();
  82.211 -    // Inform the next generation that a promotion failure occurred.
  82.212 -    _next_gen->promotion_failure_occurred();
  82.213 -
  82.214 -    // Reset the PromotionFailureALot counters.
  82.215 -    NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
  82.216 +    handle_promotion_failed(gch, thread_state_set, gc_tracer);
  82.217    }
  82.218    // set new iteration safe limit for the survivor spaces
  82.219    from()->set_concurrent_iteration_safe_limit(from()->top());
  82.220 @@ -1065,6 +1089,13 @@
  82.221      rp->enqueue_discovered_references(NULL);
  82.222    }
  82.223    rp->verify_no_references_recorded();
  82.224 +
  82.225 +  gch->trace_heap_after_gc(&gc_tracer);
  82.226 +  gc_tracer.report_tenuring_threshold(tenuring_threshold());
  82.227 +
  82.228 +  _gc_timer->register_gc_end(os::elapsed_counter());
  82.229 +
  82.230 +  gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
  82.231  }
  82.232  
  82.233  static int sum;
  82.234 @@ -1174,8 +1205,7 @@
  82.235        new_obj = old;
  82.236  
  82.237        preserve_mark_if_necessary(old, m);
  82.238 -      // Log the size of the maiden promotion failure
  82.239 -      par_scan_state->log_promotion_failure(sz);
  82.240 +      par_scan_state->register_promotion_failure(sz);
  82.241      }
  82.242  
  82.243      old->forward_to(new_obj);
  82.244 @@ -1300,8 +1330,7 @@
  82.245        failed_to_promote = true;
  82.246  
  82.247        preserve_mark_if_necessary(old, m);
  82.248 -      // Log the size of the maiden promotion failure
  82.249 -      par_scan_state->log_promotion_failure(sz);
  82.250 +      par_scan_state->register_promotion_failure(sz);
  82.251      }
  82.252    } else {
  82.253      // Is in to-space; do copying ourselves.
  82.254 @@ -1599,8 +1628,7 @@
  82.255  }
  82.256  #undef BUSY
  82.257  
  82.258 -void ParNewGeneration::ref_processor_init()
  82.259 -{
  82.260 +void ParNewGeneration::ref_processor_init() {
  82.261    if (_ref_processor == NULL) {
  82.262      // Allocate and initialize a reference processor
  82.263      _ref_processor =
    83.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Fri Jun 07 09:33:01 2013 -0700
    83.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.hpp	Mon Jun 10 11:30:51 2013 +0200
    83.3 @@ -25,7 +25,9 @@
    83.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
    83.5  #define SHARE_VM_GC_IMPLEMENTATION_PARNEW_PARNEWGENERATION_HPP
    83.6  
    83.7 +#include "gc_implementation/shared/gcTrace.hpp"
    83.8  #include "gc_implementation/shared/parGCAllocBuffer.hpp"
    83.9 +#include "gc_implementation/shared/copyFailedInfo.hpp"
   83.10  #include "memory/defNewGeneration.hpp"
   83.11  #include "utilities/taskqueue.hpp"
   83.12  
   83.13 @@ -105,7 +107,7 @@
   83.14  #endif // TASKQUEUE_STATS
   83.15  
   83.16    // Stats for promotion failure
   83.17 -  size_t _promotion_failure_size;
   83.18 +  PromotionFailedInfo _promotion_failed_info;
   83.19  
   83.20    // Timing numbers.
   83.21    double _start;
   83.22 @@ -180,13 +182,16 @@
   83.23    void undo_alloc_in_to_space(HeapWord* obj, size_t word_sz);
   83.24  
   83.25    // Promotion failure stats
   83.26 -  size_t promotion_failure_size() { return promotion_failure_size(); }
   83.27 -  void log_promotion_failure(size_t sz) {
   83.28 -    if (_promotion_failure_size == 0) {
   83.29 -      _promotion_failure_size = sz;
   83.30 -    }
   83.31 +  void register_promotion_failure(size_t sz) {
   83.32 +    _promotion_failed_info.register_copy_failure(sz);
   83.33    }
   83.34 -  void print_and_clear_promotion_failure_size();
   83.35 +  PromotionFailedInfo& promotion_failed_info() {
   83.36 +    return _promotion_failed_info;
   83.37 +  }
   83.38 +  bool promotion_failed() {
   83.39 +    return _promotion_failed_info.has_failed();
   83.40 +  }
   83.41 +  void print_promotion_failure_size();
   83.42  
   83.43  #if TASKQUEUE_STATS
   83.44    TaskQueueStats & taskqueue_stats() const { return _work_queue->stats; }
   83.45 @@ -337,6 +342,8 @@
   83.46    // word being overwritten with a self-forwarding-pointer.
   83.47    void preserve_mark_if_necessary(oop obj, markOop m);
   83.48  
   83.49 +  void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);
   83.50 +
   83.51   protected:
   83.52  
   83.53    bool _survivor_overflow;
    84.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Fri Jun 07 09:33:01 2013 -0700
    84.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Mon Jun 10 11:30:51 2013 +0200
    84.3 @@ -1,5 +1,5 @@
    84.4  /*
    84.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    84.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    84.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    84.8   *
    84.9   * This code is free software; you can redistribute it and/or modify it
   84.10 @@ -35,6 +35,8 @@
   84.11  #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
   84.12  #include "gc_implementation/parallelScavenge/psScavenge.hpp"
   84.13  #include "gc_implementation/parallelScavenge/vmPSOperations.hpp"
   84.14 +#include "gc_implementation/shared/gcHeapSummary.hpp"
   84.15 +#include "gc_implementation/shared/gcWhen.hpp"
   84.16  #include "memory/gcLocker.inline.hpp"
   84.17  #include "oops/oop.inline.hpp"
   84.18  #include "runtime/handles.inline.hpp"
   84.19 @@ -642,6 +644,29 @@
   84.20    ensure_parsability(false);  // no need to retire TLABs for verification
   84.21  }
   84.22  
   84.23 +PSHeapSummary ParallelScavengeHeap::create_ps_heap_summary() {
   84.24 +  PSOldGen* old = old_gen();
   84.25 +  HeapWord* old_committed_end = (HeapWord*)old->virtual_space()->committed_high_addr();
   84.26 +  VirtualSpaceSummary old_summary(old->reserved().start(), old_committed_end, old->reserved().end());
   84.27 +  SpaceSummary old_space(old->reserved().start(), old_committed_end, old->used_in_bytes());
   84.28 +
   84.29 +  PSYoungGen* young = young_gen();
   84.30 +  VirtualSpaceSummary young_summary(young->reserved().start(),
   84.31 +    (HeapWord*)young->virtual_space()->committed_high_addr(), young->reserved().end());
   84.32 +
   84.33 +  MutableSpace* eden = young_gen()->eden_space();
   84.34 +  SpaceSummary eden_space(eden->bottom(), eden->end(), eden->used_in_bytes());
   84.35 +
   84.36 +  MutableSpace* from = young_gen()->from_space();
   84.37 +  SpaceSummary from_space(from->bottom(), from->end(), from->used_in_bytes());
   84.38 +
   84.39 +  MutableSpace* to = young_gen()->to_space();
   84.40 +  SpaceSummary to_space(to->bottom(), to->end(), to->used_in_bytes());
   84.41 +
   84.42 +  VirtualSpaceSummary heap_summary = create_heap_space_summary();
   84.43 +  return PSHeapSummary(heap_summary, used(), old_summary, old_space, young_summary, eden_space, from_space, to_space);
   84.44 +}
   84.45 +
   84.46  void ParallelScavengeHeap::print_on(outputStream* st) const {
   84.47    young_gen()->print_on(st);
   84.48    old_gen()->print_on(st);
   84.49 @@ -706,6 +731,12 @@
   84.50    }
   84.51  }
   84.52  
   84.53 +void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
   84.54 +  const PSHeapSummary& heap_summary = create_ps_heap_summary();
   84.55 +  const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
   84.56 +  gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary);
   84.57 +}
   84.58 +
   84.59  ParallelScavengeHeap* ParallelScavengeHeap::heap() {
   84.60    assert(_psh != NULL, "Uninitialized access to ParallelScavengeHeap::heap()");
   84.61    assert(_psh->kind() == CollectedHeap::ParallelScavengeHeap, "not a parallel scavenge heap");
    85.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Fri Jun 07 09:33:01 2013 -0700
    85.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Mon Jun 10 11:30:51 2013 +0200
    85.3 @@ -1,5 +1,5 @@
    85.4  /*
    85.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    85.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    85.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    85.8   *
    85.9   * This code is free software; you can redistribute it and/or modify it
   85.10 @@ -30,14 +30,18 @@
   85.11  #include "gc_implementation/parallelScavenge/psOldGen.hpp"
   85.12  #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
   85.13  #include "gc_implementation/shared/gcPolicyCounters.hpp"
   85.14 +#include "gc_implementation/shared/gcWhen.hpp"
   85.15  #include "gc_interface/collectedHeap.inline.hpp"
   85.16  #include "utilities/ostream.hpp"
   85.17  
   85.18  class AdjoiningGenerations;
   85.19 +class CollectorPolicy;
   85.20 +class GCHeapSummary;
   85.21  class GCTaskManager;
   85.22 -class PSAdaptiveSizePolicy;
   85.23  class GenerationSizer;
   85.24  class CollectorPolicy;
   85.25 +class PSAdaptiveSizePolicy;
   85.26 +class PSHeapSummary;
   85.27  
   85.28  class ParallelScavengeHeap : public CollectedHeap {
   85.29    friend class VMStructs;
   85.30 @@ -65,6 +69,8 @@
   85.31  
   85.32    static GCTaskManager*          _gc_task_manager;      // The task manager.
   85.33  
   85.34 +  void trace_heap(GCWhen::Type when, GCTracer* tracer);
   85.35 +
   85.36   protected:
   85.37    static inline size_t total_invocations();
   85.38    HeapWord* allocate_new_tlab(size_t size);
   85.39 @@ -219,6 +225,7 @@
   85.40    jlong millis_since_last_gc();
   85.41  
   85.42    void prepare_for_verify();
   85.43 +  PSHeapSummary create_ps_heap_summary();
   85.44    virtual void print_on(outputStream* st) const;
   85.45    virtual void print_on_error(outputStream* st) const;
   85.46    virtual void print_gc_threads_on(outputStream* st) const;
    86.1 --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Fri Jun 07 09:33:01 2013 -0700
    86.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp	Mon Jun 10 11:30:51 2013 +0200
    86.3 @@ -27,6 +27,8 @@
    86.4  #include "code/codeCache.hpp"
    86.5  #include "gc_implementation/parallelScavenge/pcTasks.hpp"
    86.6  #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
    86.7 +#include "gc_implementation/shared/gcTimer.hpp"
    86.8 +#include "gc_implementation/shared/gcTraceTime.hpp"
    86.9  #include "gc_interface/collectedHeap.hpp"
   86.10  #include "memory/universe.hpp"
   86.11  #include "oops/objArrayKlass.inline.hpp"
   86.12 @@ -48,8 +50,8 @@
   86.13  
   86.14    ResourceMark rm;
   86.15  
   86.16 -  NOT_PRODUCT(TraceTime tm("ThreadRootsMarkingTask",
   86.17 -    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   86.18 +  NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask",
   86.19 +    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   86.20    ParCompactionManager* cm =
   86.21      ParCompactionManager::gc_thread_compaction_manager(which);
   86.22  
   86.23 @@ -77,8 +79,8 @@
   86.24  void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
   86.25    assert(Universe::heap()->is_gc_active(), "called outside gc");
   86.26  
   86.27 -  NOT_PRODUCT(TraceTime tm("MarkFromRootsTask",
   86.28 -    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   86.29 +  NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
   86.30 +    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   86.31    ParCompactionManager* cm =
   86.32      ParCompactionManager::gc_thread_compaction_manager(which);
   86.33    PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
   86.34 @@ -148,8 +150,8 @@
   86.35  {
   86.36    assert(Universe::heap()->is_gc_active(), "called outside gc");
   86.37  
   86.38 -  NOT_PRODUCT(TraceTime tm("RefProcTask",
   86.39 -    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   86.40 +  NOT_PRODUCT(GCTraceTime tm("RefProcTask",
   86.41 +    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   86.42    ParCompactionManager* cm =
   86.43      ParCompactionManager::gc_thread_compaction_manager(which);
   86.44    PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
   86.45 @@ -204,8 +206,8 @@
   86.46  void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
   86.47    assert(Universe::heap()->is_gc_active(), "called outside gc");
   86.48  
   86.49 -  NOT_PRODUCT(TraceTime tm("StealMarkingTask",
   86.50 -    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   86.51 +  NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
   86.52 +    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   86.53  
   86.54    ParCompactionManager* cm =
   86.55      ParCompactionManager::gc_thread_compaction_manager(which);
   86.56 @@ -237,8 +239,8 @@
   86.57  void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
   86.58    assert(Universe::heap()->is_gc_active(), "called outside gc");
   86.59  
   86.60 -  NOT_PRODUCT(TraceTime tm("StealRegionCompactionTask",
   86.61 -    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   86.62 +  NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
   86.63 +    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   86.64  
   86.65    ParCompactionManager* cm =
   86.66      ParCompactionManager::gc_thread_compaction_manager(which);
   86.67 @@ -304,8 +306,8 @@
   86.68  
   86.69  void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
   86.70  
   86.71 -  NOT_PRODUCT(TraceTime tm("UpdateDensePrefixTask",
   86.72 -    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   86.73 +  NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask",
   86.74 +    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   86.75  
   86.76    ParCompactionManager* cm =
   86.77      ParCompactionManager::gc_thread_compaction_manager(which);
   86.78 @@ -319,8 +321,8 @@
   86.79  void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
   86.80    assert(Universe::heap()->is_gc_active(), "called outside gc");
   86.81  
   86.82 -  NOT_PRODUCT(TraceTime tm("DrainStacksCompactionTask",
   86.83 -    PrintGCDetails && TraceParallelOldGCTasks, true, gclog_or_tty));
   86.84 +  NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
   86.85 +    PrintGCDetails && TraceParallelOldGCTasks, true, NULL));
   86.86  
   86.87    ParCompactionManager* cm =
   86.88      ParCompactionManager::gc_thread_compaction_manager(which);
    87.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Fri Jun 07 09:33:01 2013 -0700
    87.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psMarkSweep.cpp	Mon Jun 10 11:30:51 2013 +0200
    87.3 @@ -34,6 +34,10 @@
    87.4  #include "gc_implementation/parallelScavenge/psOldGen.hpp"
    87.5  #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    87.6  #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    87.7 +#include "gc_implementation/shared/gcHeapSummary.hpp"
    87.8 +#include "gc_implementation/shared/gcTimer.hpp"
    87.9 +#include "gc_implementation/shared/gcTrace.hpp"
   87.10 +#include "gc_implementation/shared/gcTraceTime.hpp"
   87.11  #include "gc_implementation/shared/isGCActiveMark.hpp"
   87.12  #include "gc_implementation/shared/markSweep.hpp"
   87.13  #include "gc_implementation/shared/spaceDecorator.hpp"
   87.14 @@ -108,8 +112,12 @@
   87.15    }
   87.16  
   87.17    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   87.18 +  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   87.19    GCCause::Cause gc_cause = heap->gc_cause();
   87.20 -  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
   87.21 +
   87.22 +  _gc_timer->register_gc_start(os::elapsed_counter());
   87.23 +  _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());
   87.24 +
   87.25    PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   87.26  
   87.27    // The scope of casr should end after code that can change
   87.28 @@ -131,6 +139,7 @@
   87.29    AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   87.30  
   87.31    heap->print_heap_before_gc();
   87.32 +  heap->trace_heap_before_gc(_gc_tracer);
   87.33  
   87.34    // Fill in TLABs
   87.35    heap->accumulate_statistics_all_tlabs();
   87.36 @@ -147,7 +156,7 @@
   87.37      old_gen->verify_object_start_array();
   87.38    }
   87.39  
   87.40 -  heap->pre_full_gc_dump();
   87.41 +  heap->pre_full_gc_dump(_gc_timer);
   87.42  
   87.43    // Filled in below to track the state of the young gen after the collection.
   87.44    bool eden_empty;
   87.45 @@ -159,7 +168,7 @@
   87.46  
   87.47      gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   87.48      TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   87.49 -    TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
   87.50 +    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
   87.51      TraceCollectorStats tcs(counters());
   87.52      TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
   87.53  
   87.54 @@ -374,13 +383,18 @@
   87.55    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
   87.56  
   87.57    heap->print_heap_after_gc();
   87.58 +  heap->trace_heap_after_gc(_gc_tracer);
   87.59  
   87.60 -  heap->post_full_gc_dump();
   87.61 +  heap->post_full_gc_dump(_gc_timer);
   87.62  
   87.63  #ifdef TRACESPINNING
   87.64    ParallelTaskTerminator::print_termination_counts();
   87.65  #endif
   87.66  
   87.67 +  _gc_timer->register_gc_end(os::elapsed_counter());
   87.68 +
   87.69 +  _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
   87.70 +
   87.71    return true;
   87.72  }
   87.73  
   87.74 @@ -498,7 +512,7 @@
   87.75  
   87.76  void PSMarkSweep::mark_sweep_phase1(bool clear_all_softrefs) {
   87.77    // Recursively traverse all live objects and mark them
   87.78 -  TraceTime tm("phase 1", PrintGCDetails && Verbose, true, gclog_or_tty);
   87.79 +  GCTraceTime tm("phase 1", PrintGCDetails && Verbose, true, _gc_timer);
   87.80    trace(" 1");
   87.81  
   87.82    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
   87.83 @@ -531,8 +545,10 @@
   87.84    // Process reference objects found during marking
   87.85    {
   87.86      ref_processor()->setup_policy(clear_all_softrefs);
   87.87 -    ref_processor()->process_discovered_references(
   87.88 -      is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL);
   87.89 +    const ReferenceProcessorStats& stats =
   87.90 +      ref_processor()->process_discovered_references(
   87.91 +        is_alive_closure(), mark_and_push_closure(), follow_stack_closure(), NULL, _gc_timer);
   87.92 +    gc_tracer()->report_gc_reference_stats(stats);
   87.93    }
   87.94  
   87.95    // This is the point where the entire marking should have completed.
   87.96 @@ -552,11 +568,12 @@
   87.97  
   87.98    // Clean up unreferenced symbols in symbol table.
   87.99    SymbolTable::unlink();
  87.100 +  _gc_tracer->report_object_count_after_gc(is_alive_closure());
  87.101  }
  87.102  
  87.103  
  87.104  void PSMarkSweep::mark_sweep_phase2() {
  87.105 -  TraceTime tm("phase 2", PrintGCDetails && Verbose, true, gclog_or_tty);
  87.106 +  GCTraceTime tm("phase 2", PrintGCDetails && Verbose, true, _gc_timer);
  87.107    trace("2");
  87.108  
  87.109    // Now all live objects are marked, compute the new object addresses.
  87.110 @@ -586,7 +603,7 @@
  87.111  
  87.112  void PSMarkSweep::mark_sweep_phase3() {
  87.113    // Adjust the pointers to reflect the new locations
  87.114 -  TraceTime tm("phase 3", PrintGCDetails && Verbose, true, gclog_or_tty);
  87.115 +  GCTraceTime tm("phase 3", PrintGCDetails && Verbose, true, _gc_timer);
  87.116    trace("3");
  87.117  
  87.118    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  87.119 @@ -629,7 +646,7 @@
  87.120  
  87.121  void PSMarkSweep::mark_sweep_phase4() {
  87.122    EventMark m("4 compact heap");
  87.123 -  TraceTime tm("phase 4", PrintGCDetails && Verbose, true, gclog_or_tty);
  87.124 +  GCTraceTime tm("phase 4", PrintGCDetails && Verbose, true, _gc_timer);
  87.125    trace("4");
  87.126  
  87.127    // All pointers are now adjusted, move objects accordingly
    88.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Fri Jun 07 09:33:01 2013 -0700
    88.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp	Mon Jun 10 11:30:51 2013 +0200
    88.3 @@ -39,6 +39,10 @@
    88.4  #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
    88.5  #include "gc_implementation/parallelScavenge/psScavenge.hpp"
    88.6  #include "gc_implementation/parallelScavenge/psYoungGen.hpp"
    88.7 +#include "gc_implementation/shared/gcHeapSummary.hpp"
    88.8 +#include "gc_implementation/shared/gcTimer.hpp"
    88.9 +#include "gc_implementation/shared/gcTrace.hpp"
   88.10 +#include "gc_implementation/shared/gcTraceTime.hpp"
   88.11  #include "gc_implementation/shared/isGCActiveMark.hpp"
   88.12  #include "gc_interface/gcCause.hpp"
   88.13  #include "memory/gcLocker.inline.hpp"
   88.14 @@ -799,6 +803,8 @@
   88.15  }
   88.16  #endif  // #ifdef ASSERT
   88.17  
   88.18 +STWGCTimer          PSParallelCompact::_gc_timer;
   88.19 +ParallelOldTracer   PSParallelCompact::_gc_tracer;
   88.20  elapsedTimer        PSParallelCompact::_accumulated_time;
   88.21  unsigned int        PSParallelCompact::_total_invocations = 0;
   88.22  unsigned int        PSParallelCompact::_maximum_compaction_gc_num = 0;
   88.23 @@ -972,7 +978,7 @@
   88.24    // at each young gen gc.  Do the update unconditionally (even though a
   88.25    // promotion failure does not swap spaces) because an unknown number of minor
   88.26    // collections will have swapped the spaces an unknown number of times.
   88.27 -  TraceTime tm("pre compact", print_phases(), true, gclog_or_tty);
   88.28 +  GCTraceTime tm("pre compact", print_phases(), true, &_gc_timer);
   88.29    ParallelScavengeHeap* heap = gc_heap();
   88.30    _space_info[from_space_id].set_space(heap->young_gen()->from_space());
   88.31    _space_info[to_space_id].set_space(heap->young_gen()->to_space());
   88.32 @@ -989,6 +995,7 @@
   88.33    _total_invocations++;
   88.34  
   88.35    heap->print_heap_before_gc();
   88.36 +  heap->trace_heap_before_gc(&_gc_tracer);
   88.37  
   88.38    // Fill in TLABs
   88.39    heap->accumulate_statistics_all_tlabs();
   88.40 @@ -1014,7 +1021,7 @@
   88.41  
   88.42  void PSParallelCompact::post_compact()
   88.43  {
   88.44 -  TraceTime tm("post compact", print_phases(), true, gclog_or_tty);
   88.45 +  GCTraceTime tm("post compact", print_phases(), true, &_gc_timer);
   88.46  
   88.47    for (unsigned int id = old_space_id; id < last_space_id; ++id) {
   88.48      // Clear the marking bitmap, summary data and split info.
   88.49 @@ -1840,7 +1847,7 @@
   88.50  void PSParallelCompact::summary_phase(ParCompactionManager* cm,
   88.51                                        bool maximum_compaction)
   88.52  {
   88.53 -  TraceTime tm("summary phase", print_phases(), true, gclog_or_tty);
   88.54 +  GCTraceTime tm("summary phase", print_phases(), true, &_gc_timer);
   88.55    // trace("2");
   88.56  
   88.57  #ifdef  ASSERT
   88.58 @@ -1998,11 +2005,15 @@
   88.59      return false;
   88.60    }
   88.61  
   88.62 +  ParallelScavengeHeap* heap = gc_heap();
   88.63 +
   88.64 +  _gc_timer.register_gc_start(os::elapsed_counter());
   88.65 +  _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
   88.66 +
   88.67    TimeStamp marking_start;
   88.68    TimeStamp compaction_start;
   88.69    TimeStamp collection_exit;
   88.70  
   88.71 -  ParallelScavengeHeap* heap = gc_heap();
   88.72    GCCause::Cause gc_cause = heap->gc_cause();
   88.73    PSYoungGen* young_gen = heap->young_gen();
   88.74    PSOldGen* old_gen = heap->old_gen();
   88.75 @@ -2018,7 +2029,7 @@
   88.76      heap->record_gen_tops_before_GC();
   88.77    }
   88.78  
   88.79 -  heap->pre_full_gc_dump();
   88.80 +  heap->pre_full_gc_dump(&_gc_timer);
   88.81  
   88.82    _print_phases = PrintGCDetails && PrintParallelOldGCPhaseTimes;
   88.83  
   88.84 @@ -2045,7 +2056,7 @@
   88.85  
   88.86      gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   88.87      TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   88.88 -    TraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
   88.89 +    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
   88.90      TraceCollectorStats tcs(counters());
   88.91      TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);
   88.92  
   88.93 @@ -2065,7 +2076,7 @@
   88.94      bool marked_for_unloading = false;
   88.95  
   88.96      marking_start.update();
   88.97 -    marking_phase(vmthread_cm, maximum_heap_compaction);
   88.98 +    marking_phase(vmthread_cm, maximum_heap_compaction, &_gc_tracer);
   88.99  
  88.100      bool max_on_system_gc = UseMaximumCompactionOnSystemGC
  88.101        && gc_cause == GCCause::_java_lang_system_gc;
  88.102 @@ -2218,6 +2229,8 @@
  88.103    collection_exit.update();
  88.104  
  88.105    heap->print_heap_after_gc();
  88.106 +  heap->trace_heap_after_gc(&_gc_tracer);
  88.107 +
  88.108    if (PrintGCTaskTimeStamps) {
  88.109      gclog_or_tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " "
  88.110                             INT64_FORMAT,
  88.111 @@ -2226,12 +2239,17 @@
  88.112      gc_task_manager()->print_task_time_stamps();
  88.113    }
  88.114  
  88.115 -  heap->post_full_gc_dump();
  88.116 +  heap->post_full_gc_dump(&_gc_timer);
  88.117  
  88.118  #ifdef TRACESPINNING
  88.119    ParallelTaskTerminator::print_termination_counts();
  88.120  #endif
  88.121  
  88.122 +  _gc_timer.register_gc_end(os::elapsed_counter());
  88.123 +
  88.124 +  _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
  88.125 +  _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
  88.126 +
  88.127    return true;
  88.128  }
  88.129  
  88.130 @@ -2330,9 +2348,10 @@
  88.131  }
  88.132  
  88.133  void PSParallelCompact::marking_phase(ParCompactionManager* cm,
  88.134 -                                      bool maximum_heap_compaction) {
  88.135 +                                      bool maximum_heap_compaction,
  88.136 +                                      ParallelOldTracer *gc_tracer) {
  88.137    // Recursively traverse all live objects and mark them
  88.138 -  TraceTime tm("marking phase", print_phases(), true, gclog_or_tty);
  88.139 +  GCTraceTime tm("marking phase", print_phases(), true, &_gc_timer);
  88.140  
  88.141    ParallelScavengeHeap* heap = gc_heap();
  88.142    uint parallel_gc_threads = heap->gc_task_manager()->workers();
  88.143 @@ -2347,7 +2366,8 @@
  88.144    ClassLoaderDataGraph::clear_claimed_marks();
  88.145  
  88.146    {
  88.147 -    TraceTime tm_m("par mark", print_phases(), true, gclog_or_tty);
  88.148 +    GCTraceTime tm_m("par mark", print_phases(), true, &_gc_timer);
  88.149 +
  88.150      ParallelScavengeHeap::ParStrongRootsScope psrs;
  88.151  
  88.152      GCTaskQueue* q = GCTaskQueue::create();
  88.153 @@ -2375,19 +2395,24 @@
  88.154  
  88.155    // Process reference objects found during marking
  88.156    {
  88.157 -    TraceTime tm_r("reference processing", print_phases(), true, gclog_or_tty);
  88.158 +    GCTraceTime tm_r("reference processing", print_phases(), true, &_gc_timer);
  88.159 +
  88.160 +    ReferenceProcessorStats stats;
  88.161      if (ref_processor()->processing_is_mt()) {
  88.162        RefProcTaskExecutor task_executor;
  88.163 -      ref_processor()->process_discovered_references(
  88.164 +      stats = ref_processor()->process_discovered_references(
  88.165          is_alive_closure(), &mark_and_push_closure, &follow_stack_closure,
  88.166 -        &task_executor);
  88.167 +        &task_executor, &_gc_timer);
  88.168      } else {
  88.169 -      ref_processor()->process_discovered_references(
  88.170 -        is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL);
  88.171 +      stats = ref_processor()->process_discovered_references(
  88.172 +        is_alive_closure(), &mark_and_push_closure, &follow_stack_closure, NULL,
  88.173 +        &_gc_timer);
  88.174      }
  88.175 +
  88.176 +    gc_tracer->report_gc_reference_stats(stats);
  88.177    }
  88.178  
  88.179 -  TraceTime tm_c("class unloading", print_phases(), true, gclog_or_tty);
  88.180 +  GCTraceTime tm_c("class unloading", print_phases(), true, &_gc_timer);
  88.181  
  88.182    // This is the point where the entire marking should have completed.
  88.183    assert(cm->marking_stacks_empty(), "Marking should have completed");
  88.184 @@ -2406,6 +2431,7 @@
  88.185  
  88.186    // Clean up unreferenced symbols in symbol table.
  88.187    SymbolTable::unlink();
  88.188 +  _gc_tracer.report_object_count_after_gc(is_alive_closure());
  88.189  }
  88.190  
  88.191  void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
  88.192 @@ -2446,7 +2472,7 @@
  88.193  
  88.194  void PSParallelCompact::adjust_roots() {
  88.195    // Adjust the pointers to reflect the new locations
  88.196 -  TraceTime tm("adjust roots", print_phases(), true, gclog_or_tty);
  88.197 +  GCTraceTime tm("adjust roots", print_phases(), true, &_gc_timer);
  88.198  
  88.199    // Need new claim bits when tracing through and adjusting pointers.
  88.200    ClassLoaderDataGraph::clear_claimed_marks();
  88.201 @@ -2482,7 +2508,7 @@
  88.202  void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
  88.203                                                        uint parallel_gc_threads)
  88.204  {
  88.205 -  TraceTime tm("drain task setup", print_phases(), true, gclog_or_tty);
  88.206 +  GCTraceTime tm("drain task setup", print_phases(), true, &_gc_timer);
  88.207  
  88.208    // Find the threads that are active
  88.209    unsigned int which = 0;
  88.210 @@ -2556,7 +2582,7 @@
  88.211  
  88.212  void PSParallelCompact::enqueue_dense_prefix_tasks(GCTaskQueue* q,
  88.213                                                      uint parallel_gc_threads) {
  88.214 -  TraceTime tm("dense prefix task setup", print_phases(), true, gclog_or_tty);
  88.215 +  GCTraceTime tm("dense prefix task setup", print_phases(), true, &_gc_timer);
  88.216  
  88.217    ParallelCompactData& sd = PSParallelCompact::summary_data();
  88.218  
  88.219 @@ -2638,7 +2664,7 @@
  88.220                                       GCTaskQueue* q,
  88.221                                       ParallelTaskTerminator* terminator_ptr,
  88.222                                       uint parallel_gc_threads) {
  88.223 -  TraceTime tm("steal task setup", print_phases(), true, gclog_or_tty);
  88.224 +  GCTraceTime tm("steal task setup", print_phases(), true, &_gc_timer);
  88.225  
  88.226    // Once a thread has drained it's stack, it should try to steal regions from
  88.227    // other threads.
  88.228 @@ -2686,7 +2712,7 @@
  88.229  
  88.230  void PSParallelCompact::compact() {
  88.231    // trace("5");
  88.232 -  TraceTime tm("compaction phase", print_phases(), true, gclog_or_tty);
  88.233 +  GCTraceTime tm("compaction phase", print_phases(), true, &_gc_timer);
  88.234  
  88.235    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  88.236    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  88.237 @@ -2703,7 +2729,7 @@
  88.238    enqueue_region_stealing_tasks(q, &terminator, active_gc_threads);
  88.239  
  88.240    {
  88.241 -    TraceTime tm_pc("par compact", print_phases(), true, gclog_or_tty);
  88.242 +    GCTraceTime tm_pc("par compact", print_phases(), true, &_gc_timer);
  88.243  
  88.244      gc_task_manager()->execute_and_wait(q);
  88.245  
  88.246 @@ -2717,7 +2743,7 @@
  88.247  
  88.248    {
  88.249      // Update the deferred objects, if any.  Any compaction manager can be used.
  88.250 -    TraceTime tm_du("deferred updates", print_phases(), true, gclog_or_tty);
  88.251 +    GCTraceTime tm_du("deferred updates", print_phases(), true, &_gc_timer);
  88.252      ParCompactionManager* cm = ParCompactionManager::manager_array(0);
  88.253      for (unsigned int id = old_space_id; id < last_space_id; ++id) {
  88.254        update_deferred_objects(cm, SpaceId(id));
    89.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Fri Jun 07 09:33:01 2013 -0700
    89.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp	Mon Jun 10 11:30:51 2013 +0200
    89.3 @@ -46,6 +46,8 @@
    89.4  class PreGCValues;
    89.5  class MoveAndUpdateClosure;
    89.6  class RefProcTaskExecutor;
    89.7 +class ParallelOldTracer;
    89.8 +class STWGCTimer;
    89.9  
   89.10  // The SplitInfo class holds the information needed to 'split' a source region
   89.11  // so that the live data can be copied to two destination *spaces*.  Normally,
   89.12 @@ -972,6 +974,8 @@
   89.13    friend class RefProcTaskProxy;
   89.14  
   89.15   private:
   89.16 +  static STWGCTimer           _gc_timer;
   89.17 +  static ParallelOldTracer    _gc_tracer;
   89.18    static elapsedTimer         _accumulated_time;
   89.19    static unsigned int         _total_invocations;
   89.20    static unsigned int         _maximum_compaction_gc_num;
   89.21 @@ -1015,7 +1019,8 @@
   89.22  
   89.23    // Mark live objects
   89.24    static void marking_phase(ParCompactionManager* cm,
   89.25 -                            bool maximum_heap_compaction);
   89.26 +                            bool maximum_heap_compaction,
   89.27 +                            ParallelOldTracer *gc_tracer);
   89.28  
   89.29    template <class T>
   89.30    static inline void follow_root(ParCompactionManager* cm, T* p);
   89.31 @@ -1284,6 +1289,8 @@
   89.32    // Reference Processing
   89.33    static ReferenceProcessor* const ref_processor() { return _ref_processor; }
   89.34  
   89.35 +  static STWGCTimer* gc_timer() { return &_gc_timer; }
   89.36 +
   89.37    // Return the SpaceId for the given address.
   89.38    static SpaceId space_id(HeapWord* addr);
   89.39  
    90.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Fri Jun 07 09:33:01 2013 -0700
    90.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Mon Jun 10 11:30:51 2013 +0200
    90.3 @@ -1,5 +1,5 @@
    90.4  /*
    90.5 - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
    90.6 + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
    90.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    90.8   *
    90.9   * This code is free software; you can redistribute it and/or modify it
   90.10 @@ -27,6 +27,7 @@
   90.11  #include "gc_implementation/parallelScavenge/psOldGen.hpp"
   90.12  #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
   90.13  #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
   90.14 +#include "gc_implementation/shared/gcTrace.hpp"
   90.15  #include "gc_implementation/shared/mutableSpace.hpp"
   90.16  #include "memory/memRegion.hpp"
   90.17  #include "oops/oop.inline.hpp"
   90.18 @@ -49,7 +50,7 @@
   90.19    guarantee(_manager_array != NULL, "Could not initialize promotion manager");
   90.20  
   90.21    _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
   90.22 -  guarantee(_stack_array_depth != NULL, "Cound not initialize promotion manager");
   90.23 +  guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager");
   90.24  
   90.25    // Create and register the PSPromotionManager(s) for the worker threads.
   90.26    for(uint i=0; i<ParallelGCThreads; i++) {
   90.27 @@ -86,13 +87,20 @@
   90.28    }
   90.29  }
   90.30  
   90.31 -void PSPromotionManager::post_scavenge() {
   90.32 +bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
   90.33 +  bool promotion_failure_occurred = false;
   90.34 +
   90.35    TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
   90.36    for (uint i = 0; i < ParallelGCThreads + 1; i++) {
   90.37      PSPromotionManager* manager = manager_array(i);
   90.38      assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
   90.39 +    if (manager->_promotion_failed_info.has_failed()) {
   90.40 +      gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
   90.41 +      promotion_failure_occurred = true;
   90.42 +    }
   90.43      manager->flush_labs();
   90.44    }
   90.45 +  return promotion_failure_occurred;
   90.46  }
   90.47  
   90.48  #if TASKQUEUE_STATS
   90.49 @@ -187,6 +195,8 @@
   90.50    _old_lab.initialize(MemRegion(lab_base, (size_t)0));
   90.51    _old_gen_is_full = false;
   90.52  
   90.53 +  _promotion_failed_info.reset();
   90.54 +
   90.55    TASKQUEUE_STATS_ONLY(reset_stats());
   90.56  }
   90.57  
   90.58 @@ -305,6 +315,8 @@
   90.59      // We won any races, we "own" this object.
   90.60      assert(obj == obj->forwardee(), "Sanity");
   90.61  
   90.62 +    _promotion_failed_info.register_copy_failure(obj->size());
   90.63 +
   90.64      obj->push_contents(this);
   90.65  
   90.66      // Save the mark if needed
    91.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Fri Jun 07 09:33:01 2013 -0700
    91.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Mon Jun 10 11:30:51 2013 +0200
    91.3 @@ -1,5 +1,5 @@
    91.4  /*
    91.5 - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
    91.6 + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
    91.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    91.8   *
    91.9   * This code is free software; you can redistribute it and/or modify it
   91.10 @@ -26,6 +26,8 @@
   91.11  #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_HPP
   91.12  
   91.13  #include "gc_implementation/parallelScavenge/psPromotionLAB.hpp"
   91.14 +#include "gc_implementation/shared/gcTrace.hpp"
   91.15 +#include "gc_implementation/shared/copyFailedInfo.hpp"
   91.16  #include "memory/allocation.hpp"
   91.17  #include "utilities/taskqueue.hpp"
   91.18  
   91.19 @@ -33,7 +35,7 @@
   91.20  // psPromotionManager is used by a single thread to manage object survival
   91.21  // during a scavenge. The promotion manager contains thread local data only.
   91.22  //
   91.23 -// NOTE! Be carefull when allocating the stacks on cheap. If you are going
   91.24 +// NOTE! Be careful when allocating the stacks on cheap. If you are going
   91.25  // to use a promotion manager in more than one thread, the stacks MUST be
   91.26  // on cheap. This can lead to memory leaks, though, as they are not auto
   91.27  // deallocated.
   91.28 @@ -85,6 +87,8 @@
   91.29    uint                                _array_chunk_size;
   91.30    uint                                _min_array_size_for_chunking;
   91.31  
   91.32 +  PromotionFailedInfo                 _promotion_failed_info;
   91.33 +
   91.34    // Accessors
   91.35    static PSOldGen* old_gen()         { return _old_gen; }
   91.36    static MutableSpace* young_space() { return _young_space; }
   91.37 @@ -149,7 +153,7 @@
   91.38    static void initialize();
   91.39  
   91.40    static void pre_scavenge();
   91.41 -  static void post_scavenge();
   91.42 +  static bool post_scavenge(YoungGCTracer& gc_tracer);
   91.43  
   91.44    static PSPromotionManager* gc_thread_promotion_manager(int index);
   91.45    static PSPromotionManager* vm_thread_promotion_manager();
    92.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Fri Jun 07 09:33:01 2013 -0700
    92.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Mon Jun 10 11:30:51 2013 +0200
    92.3 @@ -1,5 +1,5 @@
    92.4  /*
    92.5 - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
    92.6 + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
    92.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    92.8   *
    92.9   * This code is free software; you can redistribute it and/or modify it
   92.10 @@ -152,7 +152,7 @@
   92.11  
   92.12          // This is the promotion failed test, and code handling.
   92.13          // The code belongs here for two reasons. It is slightly
   92.14 -        // different thatn the code below, and cannot share the
   92.15 +        // different than the code below, and cannot share the
   92.16          // CAS testing code. Keeping the code here also minimizes
   92.17          // the impact on the common case fast path code.
   92.18  
    93.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Fri Jun 07 09:33:01 2013 -0700
    93.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp	Mon Jun 10 11:30:51 2013 +0200
    93.3 @@ -34,6 +34,10 @@
    93.4  #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
    93.5  #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
    93.6  #include "gc_implementation/parallelScavenge/psTasks.hpp"
    93.7 +#include "gc_implementation/shared/gcHeapSummary.hpp"
    93.8 +#include "gc_implementation/shared/gcTimer.hpp"
    93.9 +#include "gc_implementation/shared/gcTrace.hpp"
   93.10 +#include "gc_implementation/shared/gcTraceTime.hpp"
   93.11  #include "gc_implementation/shared/isGCActiveMark.hpp"
   93.12  #include "gc_implementation/shared/spaceDecorator.hpp"
   93.13  #include "gc_interface/gcCause.hpp"
   93.14 @@ -63,10 +67,11 @@
   93.15  HeapWord*                  PSScavenge::_young_generation_boundary = NULL;
   93.16  uintptr_t                  PSScavenge::_young_generation_boundary_compressed = 0;
   93.17  elapsedTimer               PSScavenge::_accumulated_time;
   93.18 +STWGCTimer                 PSScavenge::_gc_timer;
   93.19 +ParallelScavengeTracer     PSScavenge::_gc_tracer;
   93.20  Stack<markOop, mtGC>       PSScavenge::_preserved_mark_stack;
   93.21  Stack<oop, mtGC>           PSScavenge::_preserved_oop_stack;
   93.22  CollectorCounters*         PSScavenge::_counters = NULL;
   93.23 -bool                       PSScavenge::_promotion_failed = false;
   93.24  
   93.25  // Define before use
   93.26  class PSIsAliveClosure: public BoolObjectClosure {
   93.27 @@ -259,6 +264,8 @@
   93.28    assert(_preserved_mark_stack.is_empty(), "should be empty");
   93.29    assert(_preserved_oop_stack.is_empty(), "should be empty");
   93.30  
   93.31 +  _gc_timer.register_gc_start(os::elapsed_counter());
   93.32 +
   93.33    TimeStamp scavenge_entry;
   93.34    TimeStamp scavenge_midpoint;
   93.35    TimeStamp scavenge_exit;
   93.36 @@ -278,11 +285,14 @@
   93.37      return false;
   93.38    }
   93.39  
   93.40 +  _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());
   93.41 +
   93.42    bool promotion_failure_occurred = false;
   93.43  
   93.44    PSYoungGen* young_gen = heap->young_gen();
   93.45    PSOldGen* old_gen = heap->old_gen();
   93.46    PSAdaptiveSizePolicy* size_policy = heap->size_policy();
   93.47 +
   93.48    heap->increment_total_collections();
   93.49  
   93.50    AdaptiveSizePolicyOutput(size_policy, heap->total_collections());
   93.51 @@ -299,12 +309,12 @@
   93.52    }
   93.53  
   93.54    heap->print_heap_before_gc();
   93.55 +  heap->trace_heap_before_gc(&_gc_tracer);
   93.56  
   93.57    assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
   93.58    assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");
   93.59  
   93.60    size_t prev_used = heap->used();
   93.61 -  assert(promotion_failed() == false, "Sanity");
   93.62  
   93.63    // Fill in TLABs
   93.64    heap->accumulate_statistics_all_tlabs();
   93.65 @@ -321,7 +331,7 @@
   93.66  
   93.67      gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
   93.68      TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
   93.69 -    TraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, gclog_or_tty);
   93.70 +    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
   93.71      TraceCollectorStats tcs(counters());
   93.72      TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);
   93.73  
   93.74 @@ -387,7 +397,7 @@
   93.75      // We'll use the promotion manager again later.
   93.76      PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
   93.77      {
   93.78 -      // TraceTime("Roots");
   93.79 +      GCTraceTime tm("Scavenge", false, false, &_gc_timer);
   93.80        ParallelScavengeHeap::ParStrongRootsScope psrs;
   93.81  
   93.82        GCTaskQueue* q = GCTaskQueue::create();
   93.83 @@ -429,36 +439,41 @@
   93.84  
   93.85      // Process reference objects discovered during scavenge
   93.86      {
   93.87 +      GCTraceTime tm("References", false, false, &_gc_timer);
   93.88 +
   93.89        reference_processor()->setup_policy(false); // not always_clear
   93.90        reference_processor()->set_active_mt_degree(active_workers);
   93.91        PSKeepAliveClosure keep_alive(promotion_manager);
   93.92        PSEvacuateFollowersClosure evac_followers(promotion_manager);
   93.93 +      ReferenceProcessorStats stats;
   93.94        if (reference_processor()->processing_is_mt()) {
   93.95          PSRefProcTaskExecutor task_executor;
   93.96 -        reference_processor()->process_discovered_references(
   93.97 -          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor);
   93.98 +        stats = reference_processor()->process_discovered_references(
   93.99 +          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
  93.100 +          &_gc_timer);
  93.101        } else {
  93.102 -        reference_processor()->process_discovered_references(
  93.103 -          &_is_alive_closure, &keep_alive, &evac_followers, NULL);
  93.104 +        stats = reference_processor()->process_discovered_references(
  93.105 +          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
  93.106 +      }
  93.107 +
  93.108 +      _gc_tracer.report_gc_reference_stats(stats);
  93.109 +
  93.110 +      // Enqueue reference objects discovered during scavenge.
  93.111 +      if (reference_processor()->processing_is_mt()) {
  93.112 +        PSRefProcTaskExecutor task_executor;
  93.113 +        reference_processor()->enqueue_discovered_references(&task_executor);
  93.114 +      } else {
  93.115 +        reference_processor()->enqueue_discovered_references(NULL);
  93.116        }
  93.117      }
  93.118  
  93.119 -    // Enqueue reference objects discovered during scavenge.
  93.120 -    if (reference_processor()->processing_is_mt()) {
  93.121 -      PSRefProcTaskExecutor task_executor;
  93.122 -      reference_processor()->enqueue_discovered_references(&task_executor);
  93.123 -    } else {
  93.124 -      reference_processor()->enqueue_discovered_references(NULL);
  93.125 -    }
  93.126 -
  93.127 +    GCTraceTime tm("StringTable", false, false, &_gc_timer);
  93.128      // Unlink any dead interned Strings and process the remaining live ones.
  93.129      PSScavengeRootsClosure root_closure(promotion_manager);
  93.130      StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
  93.131  
  93.132      // Finally, flush the promotion_manager's labs, and deallocate its stacks.
  93.133 -    PSPromotionManager::post_scavenge();
  93.134 -
  93.135 -    promotion_failure_occurred = promotion_failed();
  93.136 +    promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
  93.137      if (promotion_failure_occurred) {
  93.138        clean_up_failed_promotion();
  93.139        if (PrintGC) {
  93.140 @@ -473,8 +488,6 @@
  93.141  
  93.142      if (!promotion_failure_occurred) {
  93.143        // Swap the survivor spaces.
  93.144 -
  93.145 -
  93.146        young_gen->eden_space()->clear(SpaceDecorator::Mangle);
  93.147        young_gen->from_space()->clear(SpaceDecorator::Mangle);
  93.148        young_gen->swap_spaces();
  93.149 @@ -612,7 +625,11 @@
  93.150  
  93.151      NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
  93.152  
  93.153 -    CodeCache::prune_scavenge_root_nmethods();
  93.154 +    {
  93.155 +      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);
  93.156 +
  93.157 +      CodeCache::prune_scavenge_root_nmethods();
  93.158 +    }
  93.159  
  93.160      // Re-verify object start arrays
  93.161      if (VerifyObjectStartArray &&
  93.162 @@ -652,6 +669,8 @@
  93.163    }
  93.164  
  93.165    heap->print_heap_after_gc();
  93.166 +  heap->trace_heap_after_gc(&_gc_tracer);
  93.167 +  _gc_tracer.report_tenuring_threshold(tenuring_threshold());
  93.168  
  93.169    if (ZapUnusedHeapArea) {
  93.170      young_gen->eden_space()->check_mangled_unused_area_complete();
  93.171 @@ -672,6 +691,11 @@
  93.172    ParallelTaskTerminator::print_termination_counts();
  93.173  #endif
  93.174  
  93.175 +
  93.176 +  _gc_timer.register_gc_end(os::elapsed_counter());
  93.177 +
  93.178 +  _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
  93.179 +
  93.180    return !promotion_failure_occurred;
  93.181  }
  93.182  
  93.183 @@ -681,7 +705,6 @@
  93.184  void PSScavenge::clean_up_failed_promotion() {
  93.185    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  93.186    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  93.187 -  assert(promotion_failed(), "Sanity");
  93.188  
  93.189    PSYoungGen* young_gen = heap->young_gen();
  93.190  
  93.191 @@ -706,7 +729,6 @@
  93.192      // Clear the preserved mark and oop stack caches.
  93.193      _preserved_mark_stack.clear(true);
  93.194      _preserved_oop_stack.clear(true);
  93.195 -    _promotion_failed = false;
  93.196    }
  93.197  
  93.198    // Reset the PromotionFailureALot counters.
  93.199 @@ -717,11 +739,10 @@
  93.200  // fails. Some markOops will need preservation, some will not. Note
  93.201  // that the entire eden is traversed after a failed promotion, with
  93.202  // all forwarded headers replaced by the default markOop. This means
  93.203 -// it is not neccessary to preserve most markOops.
  93.204 +// it is not necessary to preserve most markOops.
  93.205  void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) {
  93.206 -  _promotion_failed = true;
  93.207    if (obj_mark->must_be_preserved_for_promotion_failure(obj)) {
  93.208 -    // Should use per-worker private stakcs hetre rather than
  93.209 +    // Should use per-worker private stacks here rather than
  93.210      // locking a common pair of stacks.
  93.211      ThreadCritical tc;
  93.212      _preserved_oop_stack.push(obj);
    94.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Fri Jun 07 09:33:01 2013 -0700
    94.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.hpp	Mon Jun 10 11:30:51 2013 +0200
    94.3 @@ -1,5 +1,5 @@
    94.4  /*
    94.5 - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
    94.6 + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
    94.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    94.8   *
    94.9   * This code is free software; you can redistribute it and/or modify it
   94.10 @@ -28,6 +28,7 @@
   94.11  #include "gc_implementation/parallelScavenge/cardTableExtension.hpp"
   94.12  #include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
   94.13  #include "gc_implementation/shared/collectorCounters.hpp"
   94.14 +#include "gc_implementation/shared/gcTrace.hpp"
   94.15  #include "memory/allocation.hpp"
   94.16  #include "oops/oop.hpp"
   94.17  #include "utilities/stack.hpp"
   94.18 @@ -37,8 +38,10 @@
   94.19  class OopStack;
   94.20  class ReferenceProcessor;
   94.21  class ParallelScavengeHeap;
   94.22 +class ParallelScavengeTracer;
   94.23  class PSIsAliveClosure;
   94.24  class PSRefProcTaskExecutor;
   94.25 +class STWGCTimer;
   94.26  
   94.27  class PSScavenge: AllStatic {
   94.28    friend class PSIsAliveClosure;
   94.29 @@ -68,6 +71,8 @@
   94.30    static bool                 _survivor_overflow;    // Overflow this collection
   94.31    static uint                 _tenuring_threshold;   // tenuring threshold for next scavenge
   94.32    static elapsedTimer         _accumulated_time;     // total time spent on scavenge
   94.33 +  static STWGCTimer           _gc_timer;             // GC time book keeper
   94.34 +  static ParallelScavengeTracer _gc_tracer;          // GC tracing
   94.35    // The lowest address possible for the young_gen.
   94.36    // This is used to decide if an oop should be scavenged,
   94.37    // cards should be marked, etc.
   94.38 @@ -77,7 +82,6 @@
   94.39    static Stack<markOop, mtGC> _preserved_mark_stack; // List of marks to be restored after failed promotion
   94.40    static Stack<oop, mtGC>     _preserved_oop_stack;  // List of oops that need their mark restored.
   94.41    static CollectorCounters*   _counters;             // collector performance counters
   94.42 -  static bool                 _promotion_failed;
   94.43  
   94.44    static void clean_up_failed_promotion();
   94.45  
   94.46 @@ -93,7 +97,6 @@
   94.47    // Accessors
   94.48    static uint             tenuring_threshold()  { return _tenuring_threshold; }
   94.49    static elapsedTimer*    accumulated_time()    { return &_accumulated_time; }
   94.50 -  static bool             promotion_failed()    { return _promotion_failed; }
   94.51    static int              consecutive_skipped_scavenges()
   94.52      { return _consecutive_skipped_scavenges; }
   94.53  
    95.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    95.2 +++ b/src/share/vm/gc_implementation/shared/copyFailedInfo.hpp	Mon Jun 10 11:30:51 2013 +0200
    95.3 @@ -0,0 +1,90 @@
    95.4 +/*
    95.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    95.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    95.7 + *
    95.8 + * This code is free software; you can redistribute it and/or modify it
    95.9 + * under the terms of the GNU General Public License version 2 only, as
   95.10 + * published by the Free Software Foundation.
   95.11 + *
   95.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   95.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   95.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   95.15 + * version 2 for more details (a copy is included in the LICENSE file that
   95.16 + * accompanied this code).
   95.17 + *
   95.18 + * You should have received a copy of the GNU General Public License version
   95.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   95.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   95.21 + *
   95.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   95.23 + * or visit www.oracle.com if you need additional information or have any
   95.24 + * questions.
   95.25 + *
   95.26 + */
   95.27 +
   95.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP
   95.29 +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP
   95.30 +
   95.31 +#include "runtime/thread.hpp"
   95.32 +#include "utilities/globalDefinitions.hpp"
   95.33 +
   95.34 +class CopyFailedInfo : public CHeapObj<mtGC> {
   95.35 +  size_t    _first_size;
   95.36 +  size_t    _smallest_size;
   95.37 +  size_t    _total_size;
   95.38 +  uint      _count;
   95.39 +
   95.40 + public:
   95.41 +  CopyFailedInfo() : _first_size(0), _smallest_size(0), _total_size(0), _count(0) {}
   95.42 +
   95.43 +  virtual void register_copy_failure(size_t size) {
   95.44 +    if (_first_size == 0) {
   95.45 +      _first_size = size;
   95.46 +      _smallest_size = size;
   95.47 +    } else if (size < _smallest_size) {
   95.48 +      _smallest_size = size;
   95.49 +    }
   95.50 +    _total_size += size;
   95.51 +    _count++;
   95.52 +  }
   95.53 +
   95.54 +  virtual void reset() {
   95.55 +    _first_size = 0;
   95.56 +    _smallest_size = 0;
   95.57 +    _total_size = 0;
   95.58 +    _count = 0;
   95.59 +  }
   95.60 +
   95.61 +  bool has_failed() const { return _count != 0; }
   95.62 +  size_t first_size() const { return _first_size; }
   95.63 +  size_t smallest_size() const { return _smallest_size; }
   95.64 +  size_t total_size() const { return _total_size; }
   95.65 +  uint failed_count() const { return _count; }
   95.66 +};
   95.67 +
   95.68 +class PromotionFailedInfo : public CopyFailedInfo {
   95.69 +  OSThread* _thread;
   95.70 +
   95.71 + public:
   95.72 +  PromotionFailedInfo() : CopyFailedInfo(), _thread(NULL) {}
   95.73 +
   95.74 +  void register_copy_failure(size_t size) {
   95.75 +    CopyFailedInfo::register_copy_failure(size);
   95.76 +    if (_thread == NULL) {
   95.77 +      _thread = Thread::current()->osthread();
   95.78 +    } else {
   95.79 +      assert(_thread == Thread::current()->osthread(), "The PromotionFailedInfo should be thread local.");
   95.80 +    }
   95.81 +  }
   95.82 +
   95.83 +  void reset() {
   95.84 +    CopyFailedInfo::reset();
   95.85 +    _thread = NULL;
   95.86 +  }
   95.87 +
   95.88 +  OSThread* thread() const { return _thread; }
   95.89 +};
   95.90 +
   95.91 +class EvacuationFailedInfo : public CopyFailedInfo {};
   95.92 +
   95.93 +#endif /* SHARE_VM_GC_IMPLEMENTATION_SHARED_COPYFAILEDINFO_HPP */
    96.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    96.2 +++ b/src/share/vm/gc_implementation/shared/gcHeapSummary.hpp	Mon Jun 10 11:30:51 2013 +0200
    96.3 @@ -0,0 +1,142 @@
    96.4 +/*
    96.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    96.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    96.7 + *
    96.8 + * This code is free software; you can redistribute it and/or modify it
    96.9 + * under the terms of the GNU General Public License version 2 only, as
   96.10 + * published by the Free Software Foundation.
   96.11 + *
   96.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   96.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   96.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   96.15 + * version 2 for more details (a copy is included in the LICENSE file that
   96.16 + * accompanied this code).
   96.17 + *
   96.18 + * You should have received a copy of the GNU General Public License version
   96.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   96.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   96.21 + *
   96.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   96.23 + * or visit www.oracle.com if you need additional information or have any
   96.24 + * questions.
   96.25 + *
   96.26 + */
   96.27 +
   96.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
   96.29 +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
   96.30 +
   96.31 +#include "memory/allocation.hpp"
   96.32 +
   96.33 +class VirtualSpaceSummary : public StackObj {
   96.34 +  HeapWord* _start;
   96.35 +  HeapWord* _committed_end;
   96.36 +  HeapWord* _reserved_end;
   96.37 +public:
   96.38 +  VirtualSpaceSummary() :
   96.39 +      _start(NULL), _committed_end(NULL), _reserved_end(NULL) { }
   96.40 +  VirtualSpaceSummary(HeapWord* start, HeapWord* committed_end, HeapWord* reserved_end) :
   96.41 +      _start(start), _committed_end(committed_end), _reserved_end(reserved_end) { }
   96.42 +
   96.43 +  HeapWord* start() const { return _start; }
   96.44 +  HeapWord* committed_end() const { return _committed_end; }
   96.45 +  HeapWord* reserved_end() const { return _reserved_end; }
   96.46 +  size_t committed_size() const { return (uintptr_t)_committed_end - (uintptr_t)_start;  }
   96.47 +  size_t reserved_size() const { return (uintptr_t)_reserved_end - (uintptr_t)_start; }
   96.48 +};
   96.49 +
   96.50 +class SpaceSummary : public StackObj {
   96.51 +  HeapWord* _start;
   96.52 +  HeapWord* _end;
   96.53 +  size_t    _used;
   96.54 +public:
   96.55 +  SpaceSummary() :
   96.56 +      _start(NULL), _end(NULL), _used(0) { }
   96.57 +  SpaceSummary(HeapWord* start, HeapWord* end, size_t used) :
   96.58 +      _start(start), _end(end), _used(used) { }
   96.59 +
   96.60 +  HeapWord* start() const { return _start; }
   96.61 +  HeapWord* end() const { return _end; }
   96.62 +  size_t used() const { return _used; }
   96.63 +  size_t size() const { return (uintptr_t)_end - (uintptr_t)_start; }
   96.64 +};
   96.65 +
   96.66 +class MetaspaceSizes : public StackObj {
   96.67 +  size_t _capacity;
   96.68 +  size_t _used;
   96.69 +  size_t _reserved;
   96.70 +
   96.71 + public:
   96.72 +  MetaspaceSizes() : _capacity(0), _used(0), _reserved(0) {}
   96.73 +  MetaspaceSizes(size_t capacity, size_t used, size_t reserved) :
   96.74 +    _capacity(capacity), _used(used), _reserved(reserved) {}
   96.75 +
   96.76 +  size_t capacity() const { return _capacity; }
   96.77 +  size_t used() const { return _used; }
   96.78 +  size_t reserved() const { return _reserved; }
   96.79 +};
   96.80 +
   96.81 +class GCHeapSummary;
   96.82 +class PSHeapSummary;
   96.83 +
   96.84 +class GCHeapSummaryVisitor {
   96.85 + public:
   96.86 +  virtual void visit(const GCHeapSummary* heap_summary) const = 0;
   96.87 +  virtual void visit(const PSHeapSummary* heap_summary) const {}
   96.88 +};
   96.89 +
   96.90 +class GCHeapSummary : public StackObj {
   96.91 +  VirtualSpaceSummary _heap;
   96.92 +  size_t _used;
   96.93 +
   96.94 + public:
   96.95 +   GCHeapSummary() :
   96.96 +       _heap(), _used(0) { }
   96.97 +   GCHeapSummary(VirtualSpaceSummary& heap_space, size_t used) :
   96.98 +       _heap(heap_space), _used(used) { }
   96.99 +
  96.100 +  const VirtualSpaceSummary& heap() const { return _heap; }
  96.101 +  size_t used() const { return _used; }
  96.102 +
  96.103 +   virtual void accept(GCHeapSummaryVisitor* visitor) const {
  96.104 +     visitor->visit(this);
  96.105 +   }
  96.106 +};
  96.107 +
  96.108 +class PSHeapSummary : public GCHeapSummary {
  96.109 +  VirtualSpaceSummary  _old;
  96.110 +  SpaceSummary         _old_space;
  96.111 +  VirtualSpaceSummary  _young;
  96.112 +  SpaceSummary         _eden;
  96.113 +  SpaceSummary         _from;
  96.114 +  SpaceSummary         _to;
  96.115 + public:
  96.116 +   PSHeapSummary(VirtualSpaceSummary& heap_space, size_t heap_used, VirtualSpaceSummary old, SpaceSummary old_space, VirtualSpaceSummary young, SpaceSummary eden, SpaceSummary from, SpaceSummary to) :
  96.117 +       GCHeapSummary(heap_space, heap_used), _old(old), _old_space(old_space), _young(young), _eden(eden), _from(from), _to(to) { }
  96.118 +   const VirtualSpaceSummary& old() const { return _old; }
  96.119 +   const SpaceSummary& old_space() const { return _old_space; }
  96.120 +   const VirtualSpaceSummary& young() const { return _young; }
  96.121 +   const SpaceSummary& eden() const { return _eden; }
  96.122 +   const SpaceSummary& from() const { return _from; }
  96.123 +   const SpaceSummary& to() const { return _to; }
  96.124 +
  96.125 +   virtual void accept(GCHeapSummaryVisitor* visitor) const {
  96.126 +     visitor->visit(this);
  96.127 +   }
  96.128 +};
  96.129 +
  96.130 +class MetaspaceSummary : public StackObj {
  96.131 +  MetaspaceSizes _meta_space;
  96.132 +  MetaspaceSizes _data_space;
  96.133 +  MetaspaceSizes _class_space;
  96.134 +
  96.135 + public:
  96.136 +  MetaspaceSummary() : _meta_space(), _data_space(), _class_space() {}
  96.137 +  MetaspaceSummary(const MetaspaceSizes& meta_space, const MetaspaceSizes& data_space, const MetaspaceSizes& class_space) :
  96.138 +       _meta_space(meta_space), _data_space(data_space), _class_space(class_space) { }
  96.139 +
  96.140 +  const MetaspaceSizes& meta_space() const { return _meta_space; }
  96.141 +  const MetaspaceSizes& data_space() const { return _data_space; }
  96.142 +  const MetaspaceSizes& class_space() const { return _class_space; }
  96.143 +};
  96.144 +
  96.145 +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCHEAPSUMMARY_HPP
    97.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    97.2 +++ b/src/share/vm/gc_implementation/shared/gcTimer.cpp	Mon Jun 10 11:30:51 2013 +0200
    97.3 @@ -0,0 +1,374 @@
    97.4 +/*
    97.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    97.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    97.7 + *
    97.8 + * This code is free software; you can redistribute it and/or modify it
    97.9 + * under the terms of the GNU General Public License version 2 only, as
   97.10 + * published by the Free Software Foundation.
   97.11 + *
   97.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   97.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   97.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   97.15 + * version 2 for more details (a copy is included in the LICENSE file that
   97.16 + * accompanied this code).
   97.17 + *
   97.18 + * You should have received a copy of the GNU General Public License version
   97.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   97.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   97.21 + *
   97.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   97.23 + * or visit www.oracle.com if you need additional information or have any
   97.24 + * questions.
   97.25 + *
   97.26 + */
   97.27 +
   97.28 +#include "precompiled.hpp"
   97.29 +#include "gc_implementation/shared/gcTimer.hpp"
   97.30 +#include "utilities/growableArray.hpp"
   97.31 +
   97.32 +void GCTimer::register_gc_start(jlong time) {
   97.33 +  _time_partitions.clear();
   97.34 +  _gc_start = time;
   97.35 +}
   97.36 +
   97.37 +void GCTimer::register_gc_end(jlong time) {
   97.38 +  assert(!_time_partitions.has_active_phases(),
   97.39 +      "We should have ended all started phases, before ending the GC");
   97.40 +
   97.41 +  _gc_end = time;
   97.42 +}
   97.43 +
   97.44 +void GCTimer::register_gc_pause_start(const char* name, jlong time) {
   97.45 +  _time_partitions.report_gc_phase_start(name, time);
   97.46 +}
   97.47 +
   97.48 +void GCTimer::register_gc_pause_end(jlong time) {
   97.49 +  _time_partitions.report_gc_phase_end(time);
   97.50 +}
   97.51 +
   97.52 +void GCTimer::register_gc_phase_start(const char* name, jlong time) {
   97.53 +  _time_partitions.report_gc_phase_start(name, time);
   97.54 +}
   97.55 +
   97.56 +void GCTimer::register_gc_phase_end(jlong time) {
   97.57 +  _time_partitions.report_gc_phase_end(time);
   97.58 +}
   97.59 +
   97.60 +
   97.61 +void STWGCTimer::register_gc_start(jlong time) {
   97.62 +  GCTimer::register_gc_start(time);
   97.63 +  register_gc_pause_start("GC Pause", time);
   97.64 +}
   97.65 +
   97.66 +void STWGCTimer::register_gc_end(jlong time) {
   97.67 +  register_gc_pause_end(time);
   97.68 +  GCTimer::register_gc_end(time);
   97.69 +}
   97.70 +
   97.71 +void ConcurrentGCTimer::register_gc_pause_start(const char* name, jlong time) {
   97.72 +  GCTimer::register_gc_pause_start(name, time);
   97.73 +}
   97.74 +
   97.75 +void ConcurrentGCTimer::register_gc_pause_end(jlong time) {
   97.76 +  GCTimer::register_gc_pause_end(time);
   97.77 +}
   97.78 +
   97.79 +void PhasesStack::clear() {
   97.80 +  _next_phase_level = 0;
   97.81 +}
   97.82 +
   97.83 +void PhasesStack::push(int phase_index) {
   97.84 +  assert(_next_phase_level < PHASE_LEVELS, "Overflow");
   97.85 +
   97.86 +  _phase_indices[_next_phase_level] = phase_index;
   97.87 +
   97.88 +  _next_phase_level++;
   97.89 +}
   97.90 +
   97.91 +int PhasesStack::pop() {
   97.92 +  assert(_next_phase_level > 0, "Underflow");
   97.93 +
   97.94 +  _next_phase_level--;
   97.95 +
   97.96 +  return _phase_indices[_next_phase_level];
   97.97 +}
   97.98 +
   97.99 +int PhasesStack::count() const {
  97.100 +  return _next_phase_level;
  97.101 +}
  97.102 +
  97.103 +
  97.104 +TimePartitions::TimePartitions() {
  97.105 +  _phases = new (ResourceObj::C_HEAP, mtGC) GrowableArray<PausePhase>(INITIAL_CAPACITY, true, mtGC);
  97.106 +  clear();
  97.107 +}
  97.108 +
  97.109 +TimePartitions::~TimePartitions() {
  97.110 +  delete _phases;
  97.111 +  _phases = NULL;
  97.112 +}
  97.113 +
  97.114 +void TimePartitions::clear() {
  97.115 +  _phases->clear();
  97.116 +  _active_phases.clear();
  97.117 +  _sum_of_pauses = 0;
  97.118 +  _longest_pause = 0;
  97.119 +}
  97.120 +
  97.121 +void TimePartitions::report_gc_phase_start(const char* name, jlong time) {
  97.122 +  assert(_phases->length() <= 1000, "Too many recored phases?");
  97.123 +
  97.124 +  int level = _active_phases.count();
  97.125 +
  97.126 +  PausePhase phase;
  97.127 +  phase.set_level(level);
  97.128 +  phase.set_name(name);
  97.129 +  phase.set_start(time);
  97.130 +
  97.131 +  int index = _phases->append(phase);
  97.132 +
  97.133 +  _active_phases.push(index);
  97.134 +}
  97.135 +
  97.136 +void TimePartitions::update_statistics(GCPhase* phase) {
  97.137 +  // FIXME: This should only be done for pause phases
  97.138 +  if (phase->level() == 0) {
  97.139 +    jlong pause = phase->end() - phase->start();
  97.140 +    _sum_of_pauses += pause;
  97.141 +    _longest_pause = MAX2(pause, _longest_pause);
  97.142 +  }
  97.143 +}
  97.144 +
  97.145 +void TimePartitions::report_gc_phase_end(jlong time) {
  97.146 +  int phase_index = _active_phases.pop();
  97.147 +  GCPhase* phase = _phases->adr_at(phase_index);
  97.148 +  phase->set_end(time);
  97.149 +  update_statistics(phase);
  97.150 +}
  97.151 +
  97.152 +int TimePartitions::num_phases() const {
  97.153 +  return _phases->length();
  97.154 +}
  97.155 +
  97.156 +GCPhase* TimePartitions::phase_at(int index) const {
  97.157 +  assert(index >= 0, "Out of bounds");
  97.158 +  assert(index < _phases->length(), "Out of bounds");
  97.159 +
  97.160 +  return _phases->adr_at(index);
  97.161 +}
  97.162 +
  97.163 +jlong TimePartitions::sum_of_pauses() {
  97.164 +  return _sum_of_pauses;
  97.165 +}
  97.166 +
  97.167 +jlong TimePartitions::longest_pause() {
  97.168 +  return _longest_pause;
  97.169 +}
  97.170 +
  97.171 +bool TimePartitions::has_active_phases() {
  97.172 +  return _active_phases.count() > 0;
  97.173 +}
  97.174 +
  97.175 +bool TimePartitionPhasesIterator::has_next() {
  97.176 +  return _next < _time_partitions->num_phases();
  97.177 +}
  97.178 +
  97.179 +GCPhase* TimePartitionPhasesIterator::next() {
  97.180 +  assert(has_next(), "Must have phases left");
  97.181 +  return _time_partitions->phase_at(_next++);
  97.182 +}
  97.183 +
  97.184 +
  97.185 +/////////////// Unit tests ///////////////
  97.186 +
  97.187 +#ifndef PRODUCT
  97.188 +
  97.189 +class TimePartitionPhasesIteratorTest {
  97.190 + public:
  97.191 +  static void all() {
  97.192 +    one_pause();
  97.193 +    two_pauses();
  97.194 +    one_sub_pause_phase();
  97.195 +    many_sub_pause_phases();
  97.196 +    many_sub_pause_phases2();
  97.197 +    max_nested_pause_phases();
  97.198 +  }
  97.199 +
  97.200 +  static void validate_pause_phase(GCPhase* phase, int level, const char* name, jlong start, jlong end) {
  97.201 +    assert(phase->level() == level, "Incorrect level");
  97.202 +    assert(strcmp(phase->name(), name) == 0, "Incorrect name");
  97.203 +    assert(phase->start() == start, "Incorrect start");
  97.204 +    assert(phase->end() == end, "Incorrect end");
  97.205 +  }
  97.206 +
  97.207 +  static void one_pause() {
  97.208 +    TimePartitions time_partitions;
  97.209 +    time_partitions.report_gc_phase_start("PausePhase", 2);
  97.210 +    time_partitions.report_gc_phase_end(8);
  97.211 +
  97.212 +    TimePartitionPhasesIterator iter(&time_partitions);
  97.213 +
  97.214 +    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 8);
  97.215 +    assert(time_partitions.sum_of_pauses() == 8-2, "Incorrect");
  97.216 +    assert(time_partitions.longest_pause() == 8-2, "Incorrect");
  97.217 +
  97.218 +    assert(!iter.has_next(), "Too many elements");
  97.219 +  }
  97.220 +
  97.221 +  static void two_pauses() {
  97.222 +    TimePartitions time_partitions;
  97.223 +    time_partitions.report_gc_phase_start("PausePhase1", 2);
  97.224 +    time_partitions.report_gc_phase_end(3);
  97.225 +    time_partitions.report_gc_phase_start("PausePhase2", 4);
  97.226 +    time_partitions.report_gc_phase_end(6);
  97.227 +
  97.228 +    TimePartitionPhasesIterator iter(&time_partitions);
  97.229 +
  97.230 +    validate_pause_phase(iter.next(), 0, "PausePhase1", 2, 3);
  97.231 +    validate_pause_phase(iter.next(), 0, "PausePhase2", 4, 6);
  97.232 +
  97.233 +    assert(time_partitions.sum_of_pauses() == 3, "Incorrect");
  97.234 +    assert(time_partitions.longest_pause() == 2, "Incorrect");
  97.235 +
  97.236 +    assert(!iter.has_next(), "Too many elements");
  97.237 +  }
  97.238 +
  97.239 +  static void one_sub_pause_phase() {
  97.240 +    TimePartitions time_partitions;
  97.241 +    time_partitions.report_gc_phase_start("PausePhase", 2);
  97.242 +    time_partitions.report_gc_phase_start("SubPhase", 3);
  97.243 +    time_partitions.report_gc_phase_end(4);
  97.244 +    time_partitions.report_gc_phase_end(5);
  97.245 +
  97.246 +    TimePartitionPhasesIterator iter(&time_partitions);
  97.247 +
  97.248 +    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 5);
  97.249 +    validate_pause_phase(iter.next(), 1, "SubPhase", 3, 4);
  97.250 +
  97.251 +    assert(time_partitions.sum_of_pauses() == 3, "Incorrect");
  97.252 +    assert(time_partitions.longest_pause() == 3, "Incorrect");
  97.253 +
  97.254 +    assert(!iter.has_next(), "Too many elements");
  97.255 +  }
  97.256 +
  97.257 +  static void max_nested_pause_phases() {
  97.258 +    TimePartitions time_partitions;
  97.259 +    time_partitions.report_gc_phase_start("PausePhase", 2);
  97.260 +    time_partitions.report_gc_phase_start("SubPhase1", 3);
  97.261 +    time_partitions.report_gc_phase_start("SubPhase2", 4);
  97.262 +    time_partitions.report_gc_phase_start("SubPhase3", 5);
  97.263 +    time_partitions.report_gc_phase_end(6);
  97.264 +    time_partitions.report_gc_phase_end(7);
  97.265 +    time_partitions.report_gc_phase_end(8);
  97.266 +    time_partitions.report_gc_phase_end(9);
  97.267 +
  97.268 +    TimePartitionPhasesIterator iter(&time_partitions);
  97.269 +
  97.270 +    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 9);
  97.271 +    validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8);
  97.272 +    validate_pause_phase(iter.next(), 2, "SubPhase2", 4, 7);
  97.273 +    validate_pause_phase(iter.next(), 3, "SubPhase3", 5, 6);
  97.274 +
  97.275 +    assert(time_partitions.sum_of_pauses() == 7, "Incorrect");
  97.276 +    assert(time_partitions.longest_pause() == 7, "Incorrect");
  97.277 +
  97.278 +    assert(!iter.has_next(), "Too many elements");
  97.279 +  }
  97.280 +
  97.281 +  static void many_sub_pause_phases() {
  97.282 +    TimePartitions time_partitions;
  97.283 +    time_partitions.report_gc_phase_start("PausePhase", 2);
  97.284 +
  97.285 +    time_partitions.report_gc_phase_start("SubPhase1", 3);
  97.286 +    time_partitions.report_gc_phase_end(4);
  97.287 +    time_partitions.report_gc_phase_start("SubPhase2", 5);
  97.288 +    time_partitions.report_gc_phase_end(6);
  97.289 +    time_partitions.report_gc_phase_start("SubPhase3", 7);
  97.290 +    time_partitions.report_gc_phase_end(8);
  97.291 +    time_partitions.report_gc_phase_start("SubPhase4", 9);
  97.292 +    time_partitions.report_gc_phase_end(10);
  97.293 +
  97.294 +    time_partitions.report_gc_phase_end(11);
  97.295 +
  97.296 +    TimePartitionPhasesIterator iter(&time_partitions);
  97.297 +
  97.298 +    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 11);
  97.299 +    validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 4);
  97.300 +    validate_pause_phase(iter.next(), 1, "SubPhase2", 5, 6);
  97.301 +    validate_pause_phase(iter.next(), 1, "SubPhase3", 7, 8);
  97.302 +    validate_pause_phase(iter.next(), 1, "SubPhase4", 9, 10);
  97.303 +
  97.304 +    assert(time_partitions.sum_of_pauses() == 9, "Incorrect");
  97.305 +    assert(time_partitions.longest_pause() == 9, "Incorrect");
  97.306 +
  97.307 +    assert(!iter.has_next(), "Too many elements");
  97.308 +  }
  97.309 +
  97.310 +  static void many_sub_pause_phases2() {
  97.311 +    TimePartitions time_partitions;
  97.312 +    time_partitions.report_gc_phase_start("PausePhase", 2);
  97.313 +
  97.314 +    time_partitions.report_gc_phase_start("SubPhase1", 3);
  97.315 +    time_partitions.report_gc_phase_start("SubPhase11", 4);
  97.316 +    time_partitions.report_gc_phase_end(5);
  97.317 +    time_partitions.report_gc_phase_start("SubPhase12", 6);
  97.318 +    time_partitions.report_gc_phase_end(7);
  97.319 +    time_partitions.report_gc_phase_end(8);
  97.320 +    time_partitions.report_gc_phase_start("SubPhase2", 9);
  97.321 +    time_partitions.report_gc_phase_start("SubPhase21", 10);
  97.322 +    time_partitions.report_gc_phase_end(11);
  97.323 +    time_partitions.report_gc_phase_start("SubPhase22", 12);
  97.324 +    time_partitions.report_gc_phase_end(13);
  97.325 +    time_partitions.report_gc_phase_end(14);
  97.326 +    time_partitions.report_gc_phase_start("SubPhase3", 15);
  97.327 +    time_partitions.report_gc_phase_end(16);
  97.328 +
  97.329 +    time_partitions.report_gc_phase_end(17);
  97.330 +
  97.331 +    TimePartitionPhasesIterator iter(&time_partitions);
  97.332 +
  97.333 +    validate_pause_phase(iter.next(), 0, "PausePhase", 2, 17);
  97.334 +    validate_pause_phase(iter.next(), 1, "SubPhase1", 3, 8);
  97.335 +    validate_pause_phase(iter.next(), 2, "SubPhase11", 4, 5);
  97.336 +    validate_pause_phase(iter.next(), 2, "SubPhase12", 6, 7);
  97.337 +    validate_pause_phase(iter.next(), 1, "SubPhase2", 9, 14);
  97.338 +    validate_pause_phase(iter.next(), 2, "SubPhase21", 10, 11);
  97.339 +    validate_pause_phase(iter.next(), 2, "SubPhase22", 12, 13);
  97.340 +    validate_pause_phase(iter.next(), 1, "SubPhase3", 15, 16);
  97.341 +
  97.342 +    assert(time_partitions.sum_of_pauses() == 15, "Incorrect");
  97.343 +    assert(time_partitions.longest_pause() == 15, "Incorrect");
  97.344 +
  97.345 +    assert(!iter.has_next(), "Too many elements");
  97.346 +  }
  97.347 +};
  97.348 +
  97.349 +class GCTimerTest {
  97.350 +public:
  97.351 +  static void all() {
  97.352 +    gc_start();
  97.353 +    gc_end();
  97.354 +  }
  97.355 +
  97.356 +  static void gc_start() {
  97.357 +    GCTimer gc_timer;
  97.358 +    gc_timer.register_gc_start(1);
  97.359 +
  97.360 +    assert(gc_timer.gc_start() == 1, "Incorrect");
  97.361 +  }
  97.362 +
  97.363 +  static void gc_end() {
  97.364 +    GCTimer gc_timer;
  97.365 +    gc_timer.register_gc_start(1);
  97.366 +    gc_timer.register_gc_end(2);
  97.367 +
  97.368 +    assert(gc_timer.gc_end() == 2, "Incorrect");
  97.369 +  }
  97.370 +};
  97.371 +
  97.372 +void GCTimerAllTest::all() {
  97.373 +  GCTimerTest::all();
  97.374 +  TimePartitionPhasesIteratorTest::all();
  97.375 +}
  97.376 +
  97.377 +#endif
    98.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    98.2 +++ b/src/share/vm/gc_implementation/shared/gcTimer.hpp	Mon Jun 10 11:30:51 2013 +0200
    98.3 @@ -0,0 +1,195 @@
    98.4 +/*
    98.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    98.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    98.7 + *
    98.8 + * This code is free software; you can redistribute it and/or modify it
    98.9 + * under the terms of the GNU General Public License version 2 only, as
   98.10 + * published by the Free Software Foundation.
   98.11 + *
   98.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   98.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   98.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   98.15 + * version 2 for more details (a copy is included in the LICENSE file that
   98.16 + * accompanied this code).
   98.17 + *
   98.18 + * You should have received a copy of the GNU General Public License version
   98.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   98.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   98.21 + *
   98.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   98.23 + * or visit www.oracle.com if you need additional information or have any
   98.24 + * questions.
   98.25 + *
   98.26 + */
   98.27 +
   98.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP
   98.29 +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP
   98.30 +
   98.31 +#include "memory/allocation.hpp"
   98.32 +#include "prims/jni_md.h"
   98.33 +#include "utilities/macros.hpp"
   98.34 +
   98.35 +class ConcurrentPhase;
   98.36 +class GCPhase;
   98.37 +class PausePhase;
   98.38 +
   98.39 +template <class E> class GrowableArray;
   98.40 +
   98.41 +class PhaseVisitor {
   98.42 + public:
   98.43 +  virtual void visit(GCPhase* phase) = 0;
   98.44 +  virtual void visit(PausePhase* phase) { visit((GCPhase*)phase); }
   98.45 +  virtual void visit(ConcurrentPhase* phase) { visit((GCPhase*)phase); }
   98.46 +};
   98.47 +
   98.48 +class GCPhase {
   98.49 +  const char* _name;
   98.50 +  int _level;
   98.51 +  jlong _start;
   98.52 +  jlong _end;
   98.53 +
   98.54 + public:
   98.55 +  void set_name(const char* name) { _name = name; }
   98.56 +  const char* name() { return _name; }
   98.57 +
   98.58 +  int level() { return _level; }
   98.59 +  void set_level(int level) { _level = level; }
   98.60 +
   98.61 +  jlong start() { return _start; }
   98.62 +  void set_start(jlong time) { _start = time; }
   98.63 +
   98.64 +  jlong end() { return _end; }
   98.65 +  void set_end(jlong time) { _end = time; }
   98.66 +
   98.67 +  virtual void accept(PhaseVisitor* visitor) = 0;
   98.68 +};
   98.69 +
   98.70 +class PausePhase : public GCPhase {
   98.71 + public:
   98.72 +  void accept(PhaseVisitor* visitor) {
   98.73 +    visitor->visit(this);
   98.74 +  }
   98.75 +};
   98.76 +
   98.77 +class ConcurrentPhase : public GCPhase {
   98.78 +  void accept(PhaseVisitor* visitor) {
   98.79 +    visitor->visit(this);
   98.80 +  }
   98.81 +};
   98.82 +
   98.83 +class PhasesStack {
   98.84 + public:
   98.85 +  // FIXME: Temporary set to 5 (used to be 4), since Reference processing needs it.
   98.86 +  static const int PHASE_LEVELS = 5;
   98.87 +
   98.88 + private:
   98.89 +  int _phase_indices[PHASE_LEVELS];
   98.90 +  int _next_phase_level;
   98.91 +
   98.92 + public:
   98.93 +  PhasesStack() { clear(); }
   98.94 +  void clear();
   98.95 +
   98.96 +  void push(int phase_index);
   98.97 +  int pop();
   98.98 +  int count() const;
   98.99 +};
  98.100 +
  98.101 +class TimePartitions {
  98.102 +  static const int INITIAL_CAPACITY = 10;
  98.103 +
  98.104 +  // Currently we only support pause phases.
  98.105 +  GrowableArray<PausePhase>* _phases;
  98.106 +  PhasesStack _active_phases;
  98.107 +
  98.108 +  jlong _sum_of_pauses;
  98.109 +  jlong _longest_pause;
  98.110 +
  98.111 + public:
  98.112 +  TimePartitions();
  98.113 +  ~TimePartitions();
  98.114 +  void clear();
  98.115 +
  98.116 +  void report_gc_phase_start(const char* name, jlong time);
  98.117 +  void report_gc_phase_end(jlong time);
  98.118 +
  98.119 +  int num_phases() const;
  98.120 +  GCPhase* phase_at(int index) const;
  98.121 +
  98.122 +  jlong sum_of_pauses();
  98.123 +  jlong longest_pause();
  98.124 +
  98.125 +  bool has_active_phases();
  98.126 + private:
  98.127 +  void update_statistics(GCPhase* phase);
  98.128 +};
  98.129 +
  98.130 +class PhasesIterator {
  98.131 + public:
  98.132 +  virtual bool has_next() = 0;
  98.133 +  virtual GCPhase* next() = 0;
  98.134 +};
  98.135 +
  98.136 +class GCTimer : public ResourceObj {
  98.137 +  NOT_PRODUCT(friend class GCTimerTest;)
  98.138 + protected:
  98.139 +  jlong _gc_start;
  98.140 +  jlong _gc_end;
  98.141 +  TimePartitions _time_partitions;
  98.142 +
  98.143 + public:
  98.144 +  virtual void register_gc_start(jlong time);
  98.145 +  virtual void register_gc_end(jlong time);
  98.146 +
  98.147 +  void register_gc_phase_start(const char* name, jlong time);
  98.148 +  void register_gc_phase_end(jlong time);
  98.149 +
  98.150 +  jlong gc_start() { return _gc_start; }
  98.151 +  jlong gc_end() { return _gc_end; }
  98.152 +
  98.153 +  TimePartitions* time_partitions() { return &_time_partitions; }
  98.154 +
  98.155 +  long longest_pause();
  98.156 +  long sum_of_pauses();
  98.157 +
  98.158 + protected:
  98.159 +  void register_gc_pause_start(const char* name, jlong time);
  98.160 +  void register_gc_pause_end(jlong time);
  98.161 +};
  98.162 +
  98.163 +class STWGCTimer : public GCTimer {
  98.164 + public:
  98.165 +  virtual void register_gc_start(jlong time);
  98.166 +  virtual void register_gc_end(jlong time);
  98.167 +};
  98.168 +
  98.169 +class ConcurrentGCTimer : public GCTimer {
  98.170 + public:
  98.171 +  void register_gc_pause_start(const char* name, jlong time);
  98.172 +  void register_gc_pause_end(jlong time);
  98.173 +};
  98.174 +
  98.175 +class TimePartitionPhasesIterator {
  98.176 +  TimePartitions* _time_partitions;
  98.177 +  int _next;
  98.178 +
  98.179 + public:
  98.180 +  TimePartitionPhasesIterator(TimePartitions* time_partitions) : _time_partitions(time_partitions), _next(0) { }
  98.181 +
  98.182 +  virtual bool has_next();
  98.183 +  virtual GCPhase* next();
  98.184 +};
  98.185 +
  98.186 +
  98.187 +/////////////// Unit tests ///////////////
  98.188 +
  98.189 +#ifndef PRODUCT
  98.190 +
  98.191 +class GCTimerAllTest {
  98.192 + public:
  98.193 +  static void all();
  98.194 +};
  98.195 +
  98.196 +#endif
  98.197 +
  98.198 +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTIMER_HPP
    99.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    99.2 +++ b/src/share/vm/gc_implementation/shared/gcTrace.cpp	Mon Jun 10 11:30:51 2013 +0200
    99.3 @@ -0,0 +1,207 @@
    99.4 +/*
    99.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    99.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    99.7 + *
    99.8 + * This code is free software; you can redistribute it and/or modify it
    99.9 + * under the terms of the GNU General Public License version 2 only, as
   99.10 + * published by the Free Software Foundation.
   99.11 + *
   99.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   99.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   99.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   99.15 + * version 2 for more details (a copy is included in the LICENSE file that
   99.16 + * accompanied this code).
   99.17 + *
   99.18 + * You should have received a copy of the GNU General Public License version
   99.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   99.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   99.21 + *
   99.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   99.23 + * or visit www.oracle.com if you need additional information or have any
   99.24 + * questions.
   99.25 + *
   99.26 + */
   99.27 +
   99.28 +#include "precompiled.hpp"
   99.29 +#include "gc_implementation/shared/gcHeapSummary.hpp"
   99.30 +#include "gc_implementation/shared/gcTimer.hpp"
   99.31 +#include "gc_implementation/shared/gcTrace.hpp"
   99.32 +#include "gc_implementation/shared/copyFailedInfo.hpp"
   99.33 +#include "memory/heapInspection.hpp"
   99.34 +#include "memory/referenceProcessorStats.hpp"
   99.35 +#include "utilities/globalDefinitions.hpp"
   99.36 +
   99.37 +#if INCLUDE_ALL_GCS
   99.38 +#include "gc_implementation/g1/evacuationInfo.hpp"
   99.39 +#endif
   99.40 +
   99.41 +#define assert_unset_gc_id() assert(_shared_gc_info.id() == SharedGCInfo::UNSET_GCID, "GC already started?")
   99.42 +#define assert_set_gc_id() assert(_shared_gc_info.id() != SharedGCInfo::UNSET_GCID, "GC not started?")
   99.43 +
   99.44 +static jlong GCTracer_next_gc_id = 0;
   99.45 +static GCId create_new_gc_id() {
   99.46 +  return GCTracer_next_gc_id++;
   99.47 +}
   99.48 +
   99.49 +void GCTracer::report_gc_start_impl(GCCause::Cause cause, jlong timestamp) {
   99.50 +  assert_unset_gc_id();
   99.51 +
   99.52 +  GCId gc_id = create_new_gc_id();
   99.53 +  _shared_gc_info.set_id(gc_id);
   99.54 +  _shared_gc_info.set_cause(cause);
   99.55 +  _shared_gc_info.set_start_timestamp(timestamp);
   99.56 +}
   99.57 +
   99.58 +void GCTracer::report_gc_start(GCCause::Cause cause, jlong timestamp) {
   99.59 +  assert_unset_gc_id();
   99.60 +
   99.61 +  report_gc_start_impl(cause, timestamp);
   99.62 +}
   99.63 +
   99.64 +bool GCTracer::has_reported_gc_start() const {
   99.65 +  return _shared_gc_info.id() != SharedGCInfo::UNSET_GCID;
   99.66 +}
   99.67 +
   99.68 +void GCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
   99.69 +  assert_set_gc_id();
   99.70 +
   99.71 +  _shared_gc_info.set_sum_of_pauses(time_partitions->sum_of_pauses());
   99.72 +  _shared_gc_info.set_longest_pause(time_partitions->longest_pause());
   99.73 +  _shared_gc_info.set_end_timestamp(timestamp);
   99.74 +
   99.75 +  send_phase_events(time_partitions);
   99.76 +  send_garbage_collection_event();
   99.77 +}
   99.78 +
   99.79 +void GCTracer::report_gc_end(jlong timestamp, TimePartitions* time_partitions) {
   99.80 +  assert_set_gc_id();
   99.81 +
   99.82 +  report_gc_end_impl(timestamp, time_partitions);
   99.83 +
   99.84 +  _shared_gc_info.set_id(SharedGCInfo::UNSET_GCID);
   99.85 +}
   99.86 +
   99.87 +void GCTracer::report_gc_reference_stats(const ReferenceProcessorStats& rps) const {
   99.88 +  assert_set_gc_id();
   99.89 +
   99.90 +  send_reference_stats_event(REF_SOFT, rps.soft_count());
   99.91 +  send_reference_stats_event(REF_WEAK, rps.weak_count());
   99.92 +  send_reference_stats_event(REF_FINAL, rps.final_count());
   99.93 +  send_reference_stats_event(REF_PHANTOM, rps.phantom_count());
   99.94 +}
   99.95 +
   99.96 +#if INCLUDE_SERVICES
   99.97 +void ObjectCountEventSenderClosure::do_cinfo(KlassInfoEntry* entry) {
   99.98 +  if (should_send_event(entry)) {
   99.99 +    send_event(entry);
  99.100 +  }
  99.101 +}
  99.102 +
  99.103 +void ObjectCountEventSenderClosure::send_event(KlassInfoEntry* entry) {
  99.104 +  _gc_tracer->send_object_count_after_gc_event(entry->klass(), entry->count(),
  99.105 +                                               entry->words() * BytesPerWord);
  99.106 +}
  99.107 +
  99.108 +bool ObjectCountEventSenderClosure::should_send_event(KlassInfoEntry* entry) const {
  99.109 +  double percentage_of_heap = ((double) entry->words()) / _total_size_in_words;
  99.110 +  return percentage_of_heap > _size_threshold_percentage;
  99.111 +}
  99.112 +
  99.113 +void GCTracer::report_object_count_after_gc(BoolObjectClosure* is_alive_cl) {
  99.114 +  assert_set_gc_id();
  99.115 +
  99.116 +  if (should_send_object_count_after_gc_event()) {
  99.117 +    ResourceMark rm;
  99.118 +
  99.119 +    KlassInfoTable cit(false);
  99.120 +    if (!cit.allocation_failed()) {
  99.121 +      HeapInspection hi(false, false, false, NULL);
  99.122 +      hi.populate_table(&cit, is_alive_cl);
  99.123 +
  99.124 +      ObjectCountEventSenderClosure event_sender(this, cit.size_of_instances_in_words());
  99.125 +      cit.iterate(&event_sender);
  99.126 +    }
  99.127 +  }
  99.128 +}
  99.129 +#endif
  99.130 +
  99.131 +void GCTracer::report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const {
  99.132 +  assert_set_gc_id();
  99.133 +
  99.134 +  send_gc_heap_summary_event(when, heap_summary);
  99.135 +  send_meta_space_summary_event(when, meta_space_summary);
  99.136 +}
  99.137 +
  99.138 +void YoungGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
  99.139 +  assert_set_gc_id();
  99.140 +  assert(_tenuring_threshold != UNSET_TENURING_THRESHOLD, "Tenuring threshold has not been reported");
  99.141 +
  99.142 +  GCTracer::report_gc_end_impl(timestamp, time_partitions);
  99.143 +  send_young_gc_event();
  99.144 +
  99.145 +  _tenuring_threshold = UNSET_TENURING_THRESHOLD;
  99.146 +}
  99.147 +
  99.148 +void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) {
  99.149 +  assert_set_gc_id();
  99.150 +
  99.151 +  send_promotion_failed_event(pf_info);
  99.152 +}
  99.153 +
  99.154 +void YoungGCTracer::report_tenuring_threshold(const uint tenuring_threshold) {
  99.155 +  _tenuring_threshold = tenuring_threshold;
  99.156 +}
  99.157 +
  99.158 +void OldGCTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
  99.159 +  assert_set_gc_id();
  99.160 +
  99.161 +  GCTracer::report_gc_end_impl(timestamp, time_partitions);
  99.162 +  send_old_gc_event();
  99.163 +}
  99.164 +
  99.165 +void ParallelOldTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
  99.166 +  assert_set_gc_id();
  99.167 +
  99.168 +  OldGCTracer::report_gc_end_impl(timestamp, time_partitions);
  99.169 +  send_parallel_old_event();
  99.170 +}
  99.171 +
  99.172 +void ParallelOldTracer::report_dense_prefix(void* dense_prefix) {
  99.173 +  assert_set_gc_id();
  99.174 +
  99.175 +  _parallel_old_gc_info.report_dense_prefix(dense_prefix);
  99.176 +}
  99.177 +
  99.178 +void OldGCTracer::report_concurrent_mode_failure() {
  99.179 +  assert_set_gc_id();
  99.180 +
  99.181 +  send_concurrent_mode_failure_event();
  99.182 +}
  99.183 +
  99.184 +#if INCLUDE_ALL_GCS
  99.185 +void G1NewTracer::report_yc_type(G1YCType type) {
  99.186 +  assert_set_gc_id();
  99.187 +
  99.188 +  _g1_young_gc_info.set_type(type);
  99.189 +}
  99.190 +
  99.191 +void G1NewTracer::report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions) {
  99.192 +  assert_set_gc_id();
  99.193 +
  99.194 +  YoungGCTracer::report_gc_end_impl(timestamp, time_partitions);
  99.195 +  send_g1_young_gc_event();
  99.196 +}
  99.197 +
  99.198 +void G1NewTracer::report_evacuation_info(EvacuationInfo* info) {
  99.199 +  assert_set_gc_id();
  99.200 +
  99.201 +  send_evacuation_info_event(info);
  99.202 +}
  99.203 +
  99.204 +void G1NewTracer::report_evacuation_failed(EvacuationFailedInfo& ef_info) {
  99.205 +  assert_set_gc_id();
  99.206 +
  99.207 +  send_evacuation_failed_event(ef_info);
  99.208 +  ef_info.reset();
  99.209 +}
  99.210 +#endif
   100.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   100.2 +++ b/src/share/vm/gc_implementation/shared/gcTrace.hpp	Mon Jun 10 11:30:51 2013 +0200
   100.3 @@ -0,0 +1,255 @@
   100.4 +/*
   100.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   100.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   100.7 + *
   100.8 + * This code is free software; you can redistribute it and/or modify it
   100.9 + * under the terms of the GNU General Public License version 2 only, as
  100.10 + * published by the Free Software Foundation.
  100.11 + *
  100.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  100.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  100.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  100.15 + * version 2 for more details (a copy is included in the LICENSE file that
  100.16 + * accompanied this code).
  100.17 + *
  100.18 + * You should have received a copy of the GNU General Public License version
  100.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  100.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  100.21 + *
  100.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  100.23 + * or visit www.oracle.com if you need additional information or have any
  100.24 + * questions.
  100.25 + *
  100.26 + */
  100.27 +
  100.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP
  100.29 +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP
  100.30 +
  100.31 +#include "gc_interface/gcCause.hpp"
  100.32 +#include "gc_interface/gcName.hpp"
  100.33 +#include "gc_implementation/shared/gcWhen.hpp"
  100.34 +#include "gc_implementation/shared/copyFailedInfo.hpp"
  100.35 +#include "memory/allocation.hpp"
  100.36 +#include "memory/klassInfoClosure.hpp"
  100.37 +#include "memory/referenceType.hpp"
  100.38 +#if INCLUDE_ALL_GCS
  100.39 +#include "gc_implementation/g1/g1YCTypes.hpp"
  100.40 +#endif
  100.41 +#include "utilities/macros.hpp"
  100.42 +
  100.43 +typedef uint GCId;
  100.44 +
  100.45 +class EvacuationInfo;
  100.46 +class GCHeapSummary;
  100.47 +class MetaspaceSummary;
  100.48 +class PSHeapSummary;
  100.49 +class ReferenceProcessorStats;
  100.50 +class TimePartitions;
  100.51 +class BoolObjectClosure;
  100.52 +
  100.53 +class SharedGCInfo VALUE_OBJ_CLASS_SPEC {
  100.54 +  static const jlong UNSET_TIMESTAMP = -1;
  100.55 +
  100.56 + public:
  100.57 +  static const GCId UNSET_GCID = (GCId)-1;
  100.58 +
  100.59 + private:
  100.60 +  GCId _id;
  100.61 +  GCName _name;
  100.62 +  GCCause::Cause _cause;
  100.63 +  jlong _start_timestamp;
  100.64 +  jlong _end_timestamp;
  100.65 +  jlong _sum_of_pauses;
  100.66 +  jlong _longest_pause;
  100.67 +
  100.68 + public:
  100.69 +  SharedGCInfo(GCName name) : _id(UNSET_GCID), _name(name), _cause(GCCause::_last_gc_cause),
  100.70 +      _start_timestamp(UNSET_TIMESTAMP), _end_timestamp(UNSET_TIMESTAMP), _sum_of_pauses(0), _longest_pause(0) {}
  100.71 +
  100.72 +  void set_id(GCId id) { _id = id; }
  100.73 +  GCId id() const { return _id; }
  100.74 +
  100.75 +  void set_start_timestamp(jlong timestamp) { _start_timestamp = timestamp; }
  100.76 +  jlong start_timestamp() const { return _start_timestamp; }
  100.77 +
  100.78 +  void set_end_timestamp(jlong timestamp) { _end_timestamp = timestamp; }
  100.79 +  jlong end_timestamp() const { return _end_timestamp; }
  100.80 +
  100.81 +  void set_name(GCName name) { _name = name; }
  100.82 +  GCName name() const { return _name; }
  100.83 +
  100.84 +  void set_cause(GCCause::Cause cause) { _cause = cause; }
  100.85 +  GCCause::Cause cause() const { return _cause; }
  100.86 +
  100.87 +  void set_sum_of_pauses(jlong duration) { _sum_of_pauses = duration; }
  100.88 +  jlong sum_of_pauses() const { return _sum_of_pauses; }
  100.89 +
  100.90 +  void set_longest_pause(jlong duration) { _longest_pause = duration; }
  100.91 +  jlong longest_pause() const { return _longest_pause; }
  100.92 +};
  100.93 +
  100.94 +class ParallelOldGCInfo VALUE_OBJ_CLASS_SPEC {
  100.95 +  void* _dense_prefix;
  100.96 + public:
  100.97 +  ParallelOldGCInfo() : _dense_prefix(NULL) {}
  100.98 +  void report_dense_prefix(void* addr) {
  100.99 +    _dense_prefix = addr;
 100.100 +  }
 100.101 +  void* dense_prefix() const { return _dense_prefix; }
 100.102 +};
 100.103 +
 100.104 +#if INCLUDE_ALL_GCS
 100.105 +
 100.106 +class G1YoungGCInfo VALUE_OBJ_CLASS_SPEC {
 100.107 +  G1YCType _type;
 100.108 + public:
 100.109 +  G1YoungGCInfo() : _type(G1YCTypeEndSentinel) {}
 100.110 +  void set_type(G1YCType type) {
 100.111 +    _type = type;
 100.112 +  }
 100.113 +  G1YCType type() const { return _type; }
 100.114 +};
 100.115 +
 100.116 +#endif // INCLUDE_ALL_GCS
 100.117 +
 100.118 +class GCTracer : public ResourceObj {
 100.119 +  friend class ObjectCountEventSenderClosure;
 100.120 + protected:
 100.121 +  SharedGCInfo _shared_gc_info;
 100.122 +
 100.123 + public:
 100.124 +  void report_gc_start(GCCause::Cause cause, jlong timestamp);
 100.125 +  void report_gc_end(jlong timestamp, TimePartitions* time_partitions);
 100.126 +  void report_gc_heap_summary(GCWhen::Type when, const GCHeapSummary& heap_summary, const MetaspaceSummary& meta_space_summary) const;
 100.127 +  void report_gc_reference_stats(const ReferenceProcessorStats& rp) const;
 100.128 +  void report_object_count_after_gc(BoolObjectClosure* object_filter) NOT_SERVICES_RETURN;
 100.129 +
 100.130 +  bool has_reported_gc_start() const;
 100.131 +
 100.132 + protected:
 100.133 +  GCTracer(GCName name) : _shared_gc_info(name) {}
 100.134 +  virtual void report_gc_start_impl(GCCause::Cause cause, jlong timestamp);
 100.135 +  virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
 100.136 +
 100.137 + private:
 100.138 +  void send_garbage_collection_event() const;
 100.139 +  void send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const;
 100.140 +  void send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const;
 100.141 +  void send_reference_stats_event(ReferenceType type, size_t count) const;
 100.142 +  void send_phase_events(TimePartitions* time_partitions) const;
 100.143 +  void send_object_count_after_gc_event(Klass* klass, jlong count, julong total_size) const NOT_SERVICES_RETURN;
 100.144 +  bool should_send_object_count_after_gc_event() const;
 100.145 +};
 100.146 +
 100.147 +class ObjectCountEventSenderClosure : public KlassInfoClosure {
 100.148 +  GCTracer* _gc_tracer;
 100.149 +  const double _size_threshold_percentage;
 100.150 +  const size_t _total_size_in_words;
 100.151 + public:
 100.152 +  ObjectCountEventSenderClosure(GCTracer* gc_tracer, size_t total_size_in_words) :
 100.153 +    _gc_tracer(gc_tracer),
 100.154 +    _size_threshold_percentage(ObjectCountCutOffPercent / 100),
 100.155 +    _total_size_in_words(total_size_in_words)
 100.156 +  {}
 100.157 +  virtual void do_cinfo(KlassInfoEntry* entry);
 100.158 + protected:
 100.159 +  virtual void send_event(KlassInfoEntry* entry);
 100.160 + private:
 100.161 +  bool should_send_event(KlassInfoEntry* entry) const;
 100.162 +};
 100.163 +
 100.164 +class YoungGCTracer : public GCTracer {
 100.165 +  static const uint UNSET_TENURING_THRESHOLD = (uint) -1;
 100.166 +
 100.167 +  uint _tenuring_threshold;
 100.168 +
 100.169 + protected:
 100.170 +  YoungGCTracer(GCName name) : GCTracer(name), _tenuring_threshold(UNSET_TENURING_THRESHOLD) {}
 100.171 +  virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
 100.172 +
 100.173 + public:
 100.174 +  void report_promotion_failed(const PromotionFailedInfo& pf_info);
 100.175 +  void report_tenuring_threshold(const uint tenuring_threshold);
 100.176 +
 100.177 + private:
 100.178 +  void send_young_gc_event() const;
 100.179 +  void send_promotion_failed_event(const PromotionFailedInfo& pf_info) const;
 100.180 +};
 100.181 +
 100.182 +class OldGCTracer : public GCTracer {
 100.183 + protected:
 100.184 +  OldGCTracer(GCName name) : GCTracer(name) {}
 100.185 +  virtual void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
 100.186 +
 100.187 + public:
 100.188 +  void report_concurrent_mode_failure();
 100.189 +
 100.190 + private:
 100.191 +  void send_old_gc_event() const;
 100.192 +  void send_concurrent_mode_failure_event();
 100.193 +};
 100.194 +
 100.195 +class ParallelOldTracer : public OldGCTracer {
 100.196 +  ParallelOldGCInfo _parallel_old_gc_info;
 100.197 +
 100.198 + public:
 100.199 +  ParallelOldTracer() : OldGCTracer(ParallelOld) {}
 100.200 +  void report_dense_prefix(void* dense_prefix);
 100.201 +
 100.202 + protected:
 100.203 +  void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
 100.204 +
 100.205 + private:
 100.206 +  void send_parallel_old_event() const;
 100.207 +};
 100.208 +
 100.209 +class SerialOldTracer : public OldGCTracer {
 100.210 + public:
 100.211 +  SerialOldTracer() : OldGCTracer(SerialOld) {}
 100.212 +};
 100.213 +
 100.214 +class ParallelScavengeTracer : public YoungGCTracer {
 100.215 + public:
 100.216 +  ParallelScavengeTracer() : YoungGCTracer(ParallelScavenge) {}
 100.217 +};
 100.218 +
 100.219 +class DefNewTracer : public YoungGCTracer {
 100.220 + public:
 100.221 +  DefNewTracer() : YoungGCTracer(DefNew) {}
 100.222 +};
 100.223 +
 100.224 +class ParNewTracer : public YoungGCTracer {
 100.225 + public:
 100.226 +  ParNewTracer() : YoungGCTracer(ParNew) {}
 100.227 +};
 100.228 +
 100.229 +#if INCLUDE_ALL_GCS
 100.230 +class G1NewTracer : public YoungGCTracer {
 100.231 +  G1YoungGCInfo _g1_young_gc_info;
 100.232 +
 100.233 + public:
 100.234 +  G1NewTracer() : YoungGCTracer(G1New) {}
 100.235 +
 100.236 +  void report_yc_type(G1YCType type);
 100.237 +  void report_gc_end_impl(jlong timestamp, TimePartitions* time_partitions);
 100.238 +  void report_evacuation_info(EvacuationInfo* info);
 100.239 +  void report_evacuation_failed(EvacuationFailedInfo& ef_info);
 100.240 +
 100.241 + private:
 100.242 +  void send_g1_young_gc_event();
 100.243 +  void send_evacuation_info_event(EvacuationInfo* info);
 100.244 +  void send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const;
 100.245 +};
 100.246 +#endif
 100.247 +
 100.248 +class CMSTracer : public OldGCTracer {
 100.249 + public:
 100.250 +  CMSTracer() : OldGCTracer(ConcurrentMarkSweep) {}
 100.251 +};
 100.252 +
 100.253 +class G1OldTracer : public OldGCTracer {
 100.254 + public:
 100.255 +  G1OldTracer() : OldGCTracer(G1Old) {}
 100.256 +};
 100.257 +
 100.258 +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACE_HPP
   101.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   101.2 +++ b/src/share/vm/gc_implementation/shared/gcTraceSend.cpp	Mon Jun 10 11:30:51 2013 +0200
   101.3 @@ -0,0 +1,318 @@
   101.4 +/*
   101.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   101.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   101.7 + *
   101.8 + * This code is free software; you can redistribute it and/or modify it
   101.9 + * under the terms of the GNU General Public License version 2 only, as
  101.10 + * published by the Free Software Foundation.
  101.11 + *
  101.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  101.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  101.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  101.15 + * version 2 for more details (a copy is included in the LICENSE file that
  101.16 + * accompanied this code).
  101.17 + *
  101.18 + * You should have received a copy of the GNU General Public License version
  101.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  101.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  101.21 + *
  101.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  101.23 + * or visit www.oracle.com if you need additional information or have any
  101.24 + * questions.
  101.25 + *
  101.26 + */
  101.27 +
  101.28 +#include "precompiled.hpp"
  101.29 +#include "gc_implementation/shared/gcHeapSummary.hpp"
  101.30 +#include "gc_implementation/shared/gcTimer.hpp"
  101.31 +#include "gc_implementation/shared/gcTrace.hpp"
  101.32 +#include "gc_implementation/shared/gcWhen.hpp"
  101.33 +#include "gc_implementation/shared/copyFailedInfo.hpp"
  101.34 +#include "trace/tracing.hpp"
  101.35 +#include "trace/traceBackend.hpp"
  101.36 +#if INCLUDE_ALL_GCS
  101.37 +#include "gc_implementation/g1/evacuationInfo.hpp"
  101.38 +#include "gc_implementation/g1/g1YCTypes.hpp"
  101.39 +#endif
  101.40 +
  101.41 +// All GC dependencies against the trace framework is contained within this file.
  101.42 +
  101.43 +typedef uintptr_t TraceAddress;
  101.44 +
  101.45 +void GCTracer::send_garbage_collection_event() const {
  101.46 +  EventGCGarbageCollection event(UNTIMED);
  101.47 +  if (event.should_commit()) {
  101.48 +    event.set_gcId(_shared_gc_info.id());
  101.49 +    event.set_name(_shared_gc_info.name());
  101.50 +    event.set_cause((u2) _shared_gc_info.cause());
  101.51 +    event.set_sumOfPauses(_shared_gc_info.sum_of_pauses());
  101.52 +    event.set_longestPause(_shared_gc_info.longest_pause());
  101.53 +    event.set_starttime(_shared_gc_info.start_timestamp());
  101.54 +    event.set_endtime(_shared_gc_info.end_timestamp());
  101.55 +    event.commit();
  101.56 +  }
  101.57 +}
  101.58 +
  101.59 +void GCTracer::send_reference_stats_event(ReferenceType type, size_t count) const {
  101.60 +  EventGCReferenceStatistics e;
  101.61 +  if (e.should_commit()) {
  101.62 +      e.set_gcId(_shared_gc_info.id());
  101.63 +      e.set_type((u1)type);
  101.64 +      e.set_count(count);
  101.65 +      e.commit();
  101.66 +  }
  101.67 +}
  101.68 +
  101.69 +void ParallelOldTracer::send_parallel_old_event() const {
  101.70 +  EventGCParallelOld e(UNTIMED);
  101.71 +  if (e.should_commit()) {
  101.72 +    e.set_gcId(_shared_gc_info.id());
  101.73 +    e.set_densePrefix((TraceAddress)_parallel_old_gc_info.dense_prefix());
  101.74 +    e.set_starttime(_shared_gc_info.start_timestamp());
  101.75 +    e.set_endtime(_shared_gc_info.end_timestamp());
  101.76 +    e.commit();
  101.77 +  }
  101.78 +}
  101.79 +
  101.80 +void YoungGCTracer::send_young_gc_event() const {
  101.81 +  EventGCYoungGarbageCollection e(UNTIMED);
  101.82 +  if (e.should_commit()) {
  101.83 +    e.set_gcId(_shared_gc_info.id());
  101.84 +    e.set_tenuringThreshold(_tenuring_threshold);
  101.85 +    e.set_starttime(_shared_gc_info.start_timestamp());
  101.86 +    e.set_endtime(_shared_gc_info.end_timestamp());
  101.87 +    e.commit();
  101.88 +  }
  101.89 +}
  101.90 +
  101.91 +void OldGCTracer::send_old_gc_event() const {
  101.92 +  EventGCOldGarbageCollection e(UNTIMED);
  101.93 +  if (e.should_commit()) {
  101.94 +    e.set_gcId(_shared_gc_info.id());
  101.95 +    e.set_starttime(_shared_gc_info.start_timestamp());
  101.96 +    e.set_endtime(_shared_gc_info.end_timestamp());
  101.97 +    e.commit();
  101.98 +  }
  101.99 +}
 101.100 +
 101.101 +static TraceStructCopyFailed to_trace_struct(const CopyFailedInfo& cf_info) {
 101.102 +  TraceStructCopyFailed failed_info;
 101.103 +  failed_info.set_objectCount(cf_info.failed_count());
 101.104 +  failed_info.set_firstSize(cf_info.first_size());
 101.105 +  failed_info.set_smallestSize(cf_info.smallest_size());
 101.106 +  failed_info.set_totalSize(cf_info.total_size());
 101.107 +  return failed_info;
 101.108 +}
 101.109 +
 101.110 +void YoungGCTracer::send_promotion_failed_event(const PromotionFailedInfo& pf_info) const {
 101.111 +  EventPromotionFailed e;
 101.112 +  if (e.should_commit()) {
 101.113 +    e.set_gcId(_shared_gc_info.id());
 101.114 +    e.set_data(to_trace_struct(pf_info));
 101.115 +    e.set_thread(pf_info.thread()->thread_id());
 101.116 +    e.commit();
 101.117 +  }
 101.118 +}
 101.119 +
 101.120 +// Common to CMS and G1
 101.121 +void OldGCTracer::send_concurrent_mode_failure_event() {
 101.122 +  EventConcurrentModeFailure e;
 101.123 +  if (e.should_commit()) {
 101.124 +    e.set_gcId(_shared_gc_info.id());
 101.125 +    e.commit();
 101.126 +  }
 101.127 +}
 101.128 +
 101.129 +#if INCLUDE_SERVICES
 101.130 +void GCTracer::send_object_count_after_gc_event(Klass* klass, jlong count, julong total_size) const {
 101.131 +  EventObjectCountAfterGC e;
 101.132 +  if (e.should_commit()) {
 101.133 +    e.set_gcId(_shared_gc_info.id());
 101.134 +    e.set_class(klass);
 101.135 +    e.set_count(count);
 101.136 +    e.set_totalSize(total_size);
 101.137 +    e.commit();
 101.138 +  }
 101.139 +}
 101.140 +#endif
 101.141 +
 101.142 +bool GCTracer::should_send_object_count_after_gc_event() const {
 101.143 +#if INCLUDE_TRACE
 101.144 +  return Tracing::is_event_enabled(EventObjectCountAfterGC::eventId);
 101.145 +#else
 101.146 +  return false;
 101.147 +#endif
 101.148 +}
 101.149 +
 101.150 +#if INCLUDE_ALL_GCS
 101.151 +void G1NewTracer::send_g1_young_gc_event() {
 101.152 +  EventGCG1GarbageCollection e(UNTIMED);
 101.153 +  if (e.should_commit()) {
 101.154 +    e.set_gcId(_shared_gc_info.id());
 101.155 +    e.set_type(_g1_young_gc_info.type());
 101.156 +    e.set_starttime(_shared_gc_info.start_timestamp());
 101.157 +    e.set_endtime(_shared_gc_info.end_timestamp());
 101.158 +    e.commit();
 101.159 +  }
 101.160 +}
 101.161 +
 101.162 +void G1NewTracer::send_evacuation_info_event(EvacuationInfo* info) {
 101.163 +  EventEvacuationInfo e;
 101.164 +  if (e.should_commit()) {
 101.165 +    e.set_gcId(_shared_gc_info.id());
 101.166 +    e.set_cSetRegions(info->collectionset_regions());
 101.167 +    e.set_cSetUsedBefore(info->collectionset_used_before());
 101.168 +    e.set_cSetUsedAfter(info->collectionset_used_after());
 101.169 +    e.set_allocationRegions(info->allocation_regions());
 101.170 +    e.set_allocRegionsUsedBefore(info->alloc_regions_used_before());
 101.171 +    e.set_allocRegionsUsedAfter(info->alloc_regions_used_before() + info->bytes_copied());
 101.172 +    e.set_bytesCopied(info->bytes_copied());
 101.173 +    e.set_regionsFreed(info->regions_freed());
 101.174 +    e.commit();
 101.175 +  }
 101.176 +}
 101.177 +
 101.178 +void G1NewTracer::send_evacuation_failed_event(const EvacuationFailedInfo& ef_info) const {
 101.179 +  EventEvacuationFailed e;
 101.180 +  if (e.should_commit()) {
 101.181 +    e.set_gcId(_shared_gc_info.id());
 101.182 +    e.set_data(to_trace_struct(ef_info));
 101.183 +    e.commit();
 101.184 +  }
 101.185 +}
 101.186 +#endif
 101.187 +
 101.188 +static TraceStructVirtualSpace to_trace_struct(const VirtualSpaceSummary& summary) {
 101.189 +  TraceStructVirtualSpace space;
 101.190 +  space.set_start((TraceAddress)summary.start());
 101.191 +  space.set_committedEnd((TraceAddress)summary.committed_end());
 101.192 +  space.set_committedSize(summary.committed_size());
 101.193 +  space.set_reservedEnd((TraceAddress)summary.reserved_end());
 101.194 +  space.set_reservedSize(summary.reserved_size());
 101.195 +  return space;
 101.196 +}
 101.197 +
 101.198 +static TraceStructObjectSpace to_trace_struct(const SpaceSummary& summary) {
 101.199 +  TraceStructObjectSpace space;
 101.200 +  space.set_start((TraceAddress)summary.start());
 101.201 +  space.set_end((TraceAddress)summary.end());
 101.202 +  space.set_used(summary.used());
 101.203 +  space.set_size(summary.size());
 101.204 +  return space;
 101.205 +}
 101.206 +
 101.207 +class GCHeapSummaryEventSender : public GCHeapSummaryVisitor {
 101.208 +  GCId _id;
 101.209 +  GCWhen::Type _when;
 101.210 + public:
 101.211 +  GCHeapSummaryEventSender(GCId id, GCWhen::Type when) : _id(id), _when(when) {}
 101.212 +
 101.213 +  void visit(const GCHeapSummary* heap_summary) const {
 101.214 +    const VirtualSpaceSummary& heap_space = heap_summary->heap();
 101.215 +
 101.216 +    EventGCHeapSummary e;
 101.217 +    if (e.should_commit()) {
 101.218 +      e.set_gcId(_id);
 101.219 +      e.set_when((u1)_when);
 101.220 +      e.set_heapSpace(to_trace_struct(heap_space));
 101.221 +      e.set_heapUsed(heap_summary->used());
 101.222 +      e.commit();
 101.223 +    }
 101.224 +  }
 101.225 +
 101.226 +  void visit(const PSHeapSummary* ps_heap_summary) const {
 101.227 +    visit((GCHeapSummary*)ps_heap_summary);
 101.228 +
 101.229 +    const VirtualSpaceSummary& old_summary = ps_heap_summary->old();
 101.230 +    const SpaceSummary& old_space = ps_heap_summary->old_space();
 101.231 +    const VirtualSpaceSummary& young_summary = ps_heap_summary->young();
 101.232 +    const SpaceSummary& eden_space = ps_heap_summary->eden();
 101.233 +    const SpaceSummary& from_space = ps_heap_summary->from();
 101.234 +    const SpaceSummary& to_space = ps_heap_summary->to();
 101.235 +
 101.236 +    EventPSHeapSummary e;
 101.237 +    if (e.should_commit()) {
 101.238 +      e.set_gcId(_id);
 101.239 +      e.set_when((u1)_when);
 101.240 +
 101.241 +      e.set_oldSpace(to_trace_struct(ps_heap_summary->old()));
 101.242 +      e.set_oldObjectSpace(to_trace_struct(ps_heap_summary->old_space()));
 101.243 +      e.set_youngSpace(to_trace_struct(ps_heap_summary->young()));
 101.244 +      e.set_edenSpace(to_trace_struct(ps_heap_summary->eden()));
 101.245 +      e.set_fromSpace(to_trace_struct(ps_heap_summary->from()));
 101.246 +      e.set_toSpace(to_trace_struct(ps_heap_summary->to()));
 101.247 +      e.commit();
 101.248 +    }
 101.249 +  }
 101.250 +};
 101.251 +
 101.252 +void GCTracer::send_gc_heap_summary_event(GCWhen::Type when, const GCHeapSummary& heap_summary) const {
 101.253 +  GCHeapSummaryEventSender visitor(_shared_gc_info.id(), when);
 101.254 +  heap_summary.accept(&visitor);
 101.255 +}
 101.256 +
 101.257 +static TraceStructMetaspaceSizes to_trace_struct(const MetaspaceSizes& sizes) {
 101.258 +  TraceStructMetaspaceSizes meta_sizes;
 101.259 +
 101.260 +  meta_sizes.set_capacity(sizes.capacity());
 101.261 +  meta_sizes.set_used(sizes.used());
 101.262 +  meta_sizes.set_reserved(sizes.reserved());
 101.263 +
 101.264 +  return meta_sizes;
 101.265 +}
 101.266 +
 101.267 +void GCTracer::send_meta_space_summary_event(GCWhen::Type when, const MetaspaceSummary& meta_space_summary) const {
 101.268 +  EventMetaspaceSummary e;
 101.269 +  if (e.should_commit()) {
 101.270 +    e.set_gcId(_shared_gc_info.id());
 101.271 +    e.set_when((u1) when);
 101.272 +    e.set_metaspace(to_trace_struct(meta_space_summary.meta_space()));
 101.273 +    e.set_dataSpace(to_trace_struct(meta_space_summary.data_space()));
 101.274 +    e.set_classSpace(to_trace_struct(meta_space_summary.class_space()));
 101.275 +    e.commit();
 101.276 +  }
 101.277 +}
 101.278 +
 101.279 +class PhaseSender : public PhaseVisitor {
 101.280 +  GCId _gc_id;
 101.281 + public:
 101.282 +  PhaseSender(GCId gc_id) : _gc_id(gc_id) {}
 101.283 +
 101.284 +  template<typename T>
 101.285 +  void send_phase(PausePhase* pause) {
 101.286 +    T event(UNTIMED);
 101.287 +    if (event.should_commit()) {
 101.288 +      event.set_gcId(_gc_id);
 101.289 +      event.set_name(pause->name());
 101.290 +      event.set_starttime(pause->start());
 101.291 +      event.set_endtime(pause->end());
 101.292 +      event.commit();
 101.293 +    }
 101.294 +  }
 101.295 +
 101.296 +  void visit(GCPhase* pause) { ShouldNotReachHere(); }
 101.297 +  void visit(ConcurrentPhase* pause) { Unimplemented(); }
 101.298 +  void visit(PausePhase* pause) {
 101.299 +    assert(PhasesStack::PHASE_LEVELS == 5, "Need more event types");
 101.300 +
 101.301 +    switch (pause->level()) {
 101.302 +      case 0: send_phase<EventGCPhasePause>(pause); break;
 101.303 +      case 1: send_phase<EventGCPhasePauseLevel1>(pause); break;
 101.304 +      case 2: send_phase<EventGCPhasePauseLevel2>(pause); break;
 101.305 +      case 3: send_phase<EventGCPhasePauseLevel3>(pause); break;
 101.306 +      default: /* Ignore sending this phase */ break;
 101.307 +    }
 101.308 +  }
 101.309 +
 101.310 +#undef send_phase
 101.311 +};
 101.312 +
 101.313 +void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
 101.314 +  PhaseSender phase_reporter(_shared_gc_info.id());
 101.315 +
 101.316 +  TimePartitionPhasesIterator iter(time_partitions);
 101.317 +  while (iter.has_next()) {
 101.318 +    GCPhase* phase = iter.next();
 101.319 +    phase->accept(&phase_reporter);
 101.320 +  }
 101.321 +}
   102.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   102.2 +++ b/src/share/vm/gc_implementation/shared/gcTraceTime.cpp	Mon Jun 10 11:30:51 2013 +0200
   102.3 @@ -0,0 +1,79 @@
   102.4 +/*
   102.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   102.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   102.7 + *
   102.8 + * This code is free software; you can redistribute it and/or modify it
   102.9 + * under the terms of the GNU General Public License version 2 only, as
  102.10 + * published by the Free Software Foundation.
  102.11 + *
  102.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  102.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  102.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  102.15 + * version 2 for more details (a copy is included in the LICENSE file that
  102.16 + * accompanied this code).
  102.17 + *
  102.18 + * You should have received a copy of the GNU General Public License version
  102.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  102.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  102.21 + *
  102.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  102.23 + * or visit www.oracle.com if you need additional information or have any
  102.24 + * questions.
  102.25 + *
  102.26 + */
  102.27 +
  102.28 +#include "precompiled.hpp"
  102.29 +#include "gc_implementation/shared/gcTimer.hpp"
  102.30 +#include "gc_implementation/shared/gcTraceTime.hpp"
  102.31 +#include "runtime/globals.hpp"
  102.32 +#include "runtime/os.hpp"
  102.33 +#include "runtime/safepoint.hpp"
  102.34 +#include "runtime/thread.inline.hpp"
  102.35 +#include "runtime/timer.hpp"
  102.36 +#include "utilities/ostream.hpp"
  102.37 +
  102.38 +
  102.39 +GCTraceTime::GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer) :
  102.40 +    _title(title), _doit(doit), _print_cr(print_cr), _timer(timer) {
  102.41 +  if (_doit || _timer != NULL) {
  102.42 +    _start_counter = os::elapsed_counter();
  102.43 +  }
  102.44 +
  102.45 +  if (_timer != NULL) {
  102.46 +    assert(SafepointSynchronize::is_at_safepoint(), "Tracing currently only supported at safepoints");
  102.47 +    assert(Thread::current()->is_VM_thread(), "Tracing currently only supported from the VM thread");
  102.48 +
  102.49 +    _timer->register_gc_phase_start(title, _start_counter);
  102.50 +  }
  102.51 +
  102.52 +  if (_doit) {
  102.53 +    if (PrintGCTimeStamps) {
  102.54 +      gclog_or_tty->stamp();
  102.55 +      gclog_or_tty->print(": ");
  102.56 +    }
  102.57 +    gclog_or_tty->print("[%s", title);
  102.58 +    gclog_or_tty->flush();
  102.59 +  }
  102.60 +}
  102.61 +
  102.62 +GCTraceTime::~GCTraceTime() {
  102.63 +  jlong stop_counter = 0;
  102.64 +
  102.65 +  if (_doit || _timer != NULL) {
  102.66 +    stop_counter = os::elapsed_counter();
  102.67 +  }
  102.68 +
  102.69 +  if (_timer != NULL) {
  102.70 +    _timer->register_gc_phase_end(stop_counter);
  102.71 +  }
  102.72 +
  102.73 +  if (_doit) {
  102.74 +    double seconds = TimeHelper::counter_to_seconds(stop_counter - _start_counter);
  102.75 +    if (_print_cr) {
  102.76 +      gclog_or_tty->print_cr(", %3.7f secs]", seconds);
  102.77 +    } else {
  102.78 +      gclog_or_tty->print(", %3.7f secs]", seconds);
  102.79 +    }
  102.80 +    gclog_or_tty->flush();
  102.81 +  }
  102.82 +}
   103.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   103.2 +++ b/src/share/vm/gc_implementation/shared/gcTraceTime.hpp	Mon Jun 10 11:30:51 2013 +0200
   103.3 @@ -0,0 +1,44 @@
   103.4 +/*
   103.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   103.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   103.7 + *
   103.8 + * This code is free software; you can redistribute it and/or modify it
   103.9 + * under the terms of the GNU General Public License version 2 only, as
  103.10 + * published by the Free Software Foundation.
  103.11 + *
  103.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  103.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  103.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  103.15 + * version 2 for more details (a copy is included in the LICENSE file that
  103.16 + * accompanied this code).
  103.17 + *
  103.18 + * You should have received a copy of the GNU General Public License version
  103.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  103.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  103.21 + *
  103.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  103.23 + * or visit www.oracle.com if you need additional information or have any
  103.24 + * questions.
  103.25 + *
  103.26 + */
  103.27 +
  103.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
  103.29 +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
  103.30 +
  103.31 +#include "prims/jni_md.h"
  103.32 +
  103.33 +class GCTimer;
  103.34 +
  103.35 +class GCTraceTime {
  103.36 +  const char* _title;
  103.37 +  bool _doit;
  103.38 +  bool _print_cr;
  103.39 +  GCTimer* _timer;
  103.40 +  jlong _start_counter;
  103.41 +
  103.42 + public:
  103.43 +  GCTraceTime(const char* title, bool doit, bool print_cr, GCTimer* timer);
  103.44 +  ~GCTraceTime();
  103.45 +};
  103.46 +
  103.47 +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCTRACETIME_HPP
   104.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   104.2 +++ b/src/share/vm/gc_implementation/shared/gcWhen.hpp	Mon Jun 10 11:30:51 2013 +0200
   104.3 @@ -0,0 +1,48 @@
   104.4 +/*
   104.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   104.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   104.7 + *
   104.8 + * This code is free software; you can redistribute it and/or modify it
   104.9 + * under the terms of the GNU General Public License version 2 only, as
  104.10 + * published by the Free Software Foundation.
  104.11 + *
  104.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  104.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  104.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  104.15 + * version 2 for more details (a copy is included in the LICENSE file that
  104.16 + * accompanied this code).
  104.17 + *
  104.18 + * You should have received a copy of the GNU General Public License version
  104.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  104.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  104.21 + *
  104.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  104.23 + * or visit www.oracle.com if you need additional information or have any
  104.24 + * questions.
  104.25 + *
  104.26 + */
  104.27 +
  104.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP
  104.29 +#define SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP
  104.30 +
  104.31 +#include "memory/allocation.hpp"
  104.32 +#include "utilities/debug.hpp"
  104.33 +
  104.34 +class GCWhen : AllStatic {
  104.35 + public:
  104.36 +  enum Type {
  104.37 +    BeforeGC,
  104.38 +    AfterGC,
  104.39 +    GCWhenEndSentinel
  104.40 +  };
  104.41 +
  104.42 +  static const char* to_string(GCWhen::Type when) {
  104.43 +    switch (when) {
  104.44 +    case BeforeGC: return "Before GC";
  104.45 +    case AfterGC:  return "After GC";
  104.46 +    default: ShouldNotReachHere(); return NULL;
  104.47 +    }
  104.48 +  }
  104.49 +};
  104.50 +
  104.51 +#endif // SHARE_VM_GC_IMPLEMENTATION_SHARED_GCWHEN_HPP
   105.1 --- a/src/share/vm/gc_implementation/shared/markSweep.cpp	Fri Jun 07 09:33:01 2013 -0700
   105.2 +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp	Mon Jun 10 11:30:51 2013 +0200
   105.3 @@ -24,6 +24,8 @@
   105.4  
   105.5  #include "precompiled.hpp"
   105.6  #include "compiler/compileBroker.hpp"
   105.7 +#include "gc_implementation/shared/gcTimer.hpp"
   105.8 +#include "gc_implementation/shared/gcTrace.hpp"
   105.9  #include "gc_implementation/shared/markSweep.inline.hpp"
  105.10  #include "gc_interface/collectedHeap.inline.hpp"
  105.11  #include "oops/methodData.hpp"
  105.12 @@ -41,6 +43,8 @@
  105.13  size_t                  MarkSweep::_preserved_count_max = 0;
  105.14  PreservedMark*          MarkSweep::_preserved_marks = NULL;
  105.15  ReferenceProcessor*     MarkSweep::_ref_processor   = NULL;
  105.16 +STWGCTimer*             MarkSweep::_gc_timer        = NULL;
  105.17 +SerialOldTracer*        MarkSweep::_gc_tracer       = NULL;
  105.18  
  105.19  MarkSweep::FollowRootClosure  MarkSweep::follow_root_closure;
  105.20  CodeBlobToOopClosure MarkSweep::follow_code_root_closure(&MarkSweep::follow_root_closure, /*do_marking=*/ true);
  105.21 @@ -173,7 +177,10 @@
  105.22  void MarkSweep::KeepAliveClosure::do_oop(oop* p)       { MarkSweep::KeepAliveClosure::do_oop_work(p); }
  105.23  void MarkSweep::KeepAliveClosure::do_oop(narrowOop* p) { MarkSweep::KeepAliveClosure::do_oop_work(p); }
  105.24  
  105.25 -void marksweep_init() { /* empty */ }
  105.26 +void marksweep_init() {
  105.27 +  MarkSweep::_gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
  105.28 +  MarkSweep::_gc_tracer = new (ResourceObj::C_HEAP, mtGC) SerialOldTracer();
  105.29 +}
  105.30  
  105.31  #ifndef PRODUCT
  105.32  
   106.1 --- a/src/share/vm/gc_implementation/shared/markSweep.hpp	Fri Jun 07 09:33:01 2013 -0700
   106.2 +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp	Mon Jun 10 11:30:51 2013 +0200
   106.3 @@ -1,5 +1,5 @@
   106.4  /*
   106.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   106.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   106.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   106.8   *
   106.9   * This code is free software; you can redistribute it and/or modify it
  106.10 @@ -36,6 +36,8 @@
  106.11  
  106.12  class ReferenceProcessor;
  106.13  class DataLayout;
  106.14 +class SerialOldTracer;
  106.15 +class STWGCTimer;
  106.16  
  106.17  // MarkSweep takes care of global mark-compact garbage collection for a
  106.18  // GenCollectedHeap using a four-phase pointer forwarding algorithm.  All
  106.19 @@ -128,6 +130,9 @@
  106.20    // Reference processing (used in ...follow_contents)
  106.21    static ReferenceProcessor*             _ref_processor;
  106.22  
  106.23 +  static STWGCTimer*                     _gc_timer;
  106.24 +  static SerialOldTracer*                _gc_tracer;
  106.25 +
  106.26    // Non public closures
  106.27    static KeepAliveClosure keep_alive;
  106.28  
  106.29 @@ -151,6 +156,9 @@
  106.30    // Reference Processing
  106.31    static ReferenceProcessor* const ref_processor() { return _ref_processor; }
  106.32  
  106.33 +  static STWGCTimer* gc_timer() { return _gc_timer; }
  106.34 +  static SerialOldTracer* gc_tracer() { return _gc_tracer; }
  106.35 +
  106.36    // Call backs for marking
  106.37    static void mark_object(oop obj);
  106.38    // Mark pointer and follow contents.  Empty marking stack afterwards.
   107.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Fri Jun 07 09:33:01 2013 -0700
   107.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp	Mon Jun 10 11:30:51 2013 +0200
   107.3 @@ -145,32 +145,37 @@
   107.4    return false;
   107.5  }
   107.6  
   107.7 +bool VM_GC_HeapInspection::collect() {
   107.8 +  if (GC_locker::is_active()) {
   107.9 +    return false;
  107.10 +  }
  107.11 +  Universe::heap()->collect_as_vm_thread(GCCause::_heap_inspection);
  107.12 +  return true;
  107.13 +}
  107.14 +
  107.15  void VM_GC_HeapInspection::doit() {
  107.16    HandleMark hm;
  107.17 -  CollectedHeap* ch = Universe::heap();
  107.18 -  ch->ensure_parsability(false); // must happen, even if collection does
  107.19 -                                 // not happen (e.g. due to GC_locker)
  107.20 +  Universe::heap()->ensure_parsability(false); // must happen, even if collection does
  107.21 +                                               // not happen (e.g. due to GC_locker)
  107.22 +                                               // or _full_gc being false
  107.23    if (_full_gc) {
  107.24 -    // The collection attempt below would be skipped anyway if
  107.25 -    // the gc locker is held. The following dump may then be a tad
  107.26 -    // misleading to someone expecting only live objects to show
  107.27 -    // up in the dump (see CR 6944195). Just issue a suitable warning
  107.28 -    // in that case and do not attempt to do a collection.
  107.29 -    // The latter is a subtle point, because even a failed attempt
  107.30 -    // to GC will, in fact, induce one in the future, which we
  107.31 -    // probably want to avoid in this case because the GC that we may
  107.32 -    // be about to attempt holds value for us only
  107.33 -    // if it happens now and not if it happens in the eventual
  107.34 -    // future.
  107.35 -    if (GC_locker::is_active()) {
  107.36 +    if (!collect()) {
  107.37 +      // The collection attempt was skipped because the gc locker is held.
  107.38 +      // The following dump may then be a tad misleading to someone expecting
  107.39 +      // only live objects to show up in the dump (see CR 6944195). Just issue
  107.40 +      // a suitable warning in that case and do not attempt to do a collection.
  107.41 +      // The latter is a subtle point, because even a failed attempt
  107.42 +      // to GC will, in fact, induce one in the future, which we
  107.43 +      // probably want to avoid in this case because the GC that we may
  107.44 +      // be about to attempt holds value for us only
  107.45 +      // if it happens now and not if it happens in the eventual
  107.46 +      // future.
  107.47        warning("GC locker is held; pre-dump GC was skipped");
  107.48 -    } else {
  107.49 -      ch->collect_as_vm_thread(GCCause::_heap_inspection);
  107.50      }
  107.51    }
  107.52    HeapInspection inspect(_csv_format, _print_help, _print_class_stats,
  107.53                           _columns);
  107.54 -  inspect.heap_inspection(_out, _need_prologue /* need_prologue */);
  107.55 +  inspect.heap_inspection(_out);
  107.56  }
  107.57  
  107.58  
   108.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Fri Jun 07 09:33:01 2013 -0700
   108.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Mon Jun 10 11:30:51 2013 +0200
   108.3 @@ -129,21 +129,18 @@
   108.4   private:
   108.5    outputStream* _out;
   108.6    bool _full_gc;
   108.7 -  bool _need_prologue;
   108.8    bool _csv_format; // "comma separated values" format for spreadsheet.
   108.9    bool _print_help;
  108.10    bool _print_class_stats;
  108.11    const char* _columns;
  108.12   public:
  108.13 -  VM_GC_HeapInspection(outputStream* out, bool request_full_gc,
  108.14 -                       bool need_prologue) :
  108.15 +  VM_GC_HeapInspection(outputStream* out, bool request_full_gc) :
  108.16      VM_GC_Operation(0 /* total collections,      dummy, ignored */,
  108.17                      GCCause::_heap_inspection /* GC Cause */,
  108.18                      0 /* total full collections, dummy, ignored */,
  108.19                      request_full_gc) {
  108.20      _out = out;
  108.21      _full_gc = request_full_gc;
  108.22 -    _need_prologue = need_prologue;
  108.23      _csv_format = false;
  108.24      _print_help = false;
  108.25      _print_class_stats = false;
  108.26 @@ -159,6 +156,8 @@
  108.27    void set_print_help(bool value) {_print_help = value;}
  108.28    void set_print_class_stats(bool value) {_print_class_stats = value;}
  108.29    void set_columns(const char* value) {_columns = value;}
  108.30 + protected:
  108.31 +  bool collect();
  108.32  };
  108.33  
  108.34  
   109.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   109.2 +++ b/src/share/vm/gc_interface/allocTracer.cpp	Mon Jun 10 11:30:51 2013 +0200
   109.3 @@ -0,0 +1,48 @@
   109.4 +/*
   109.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   109.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   109.7 + *
   109.8 + * This code is free software; you can redistribute it and/or modify it
   109.9 + * under the terms of the GNU General Public License version 2 only, as
  109.10 + * published by the Free Software Foundation.
  109.11 + *
  109.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  109.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  109.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  109.15 + * version 2 for more details (a copy is included in the LICENSE file that
  109.16 + * accompanied this code).
  109.17 + *
  109.18 + * You should have received a copy of the GNU General Public License version
  109.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  109.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  109.21 + *
  109.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  109.23 + * or visit www.oracle.com if you need additional information or have any
  109.24 + * questions.
  109.25 + *
  109.26 + */
  109.27 +
  109.28 +#include "precompiled.hpp"
  109.29 +#include "gc_interface/allocTracer.hpp"
  109.30 +#include "trace/tracing.hpp"
  109.31 +#include "runtime/handles.hpp"
  109.32 +#include "utilities/globalDefinitions.hpp"
  109.33 +
  109.34 +void AllocTracer::send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size) {
  109.35 +  EventAllocObjectOutsideTLAB event;
  109.36 +  if (event.should_commit()) {
  109.37 +    event.set_class(klass());
  109.38 +    event.set_allocationSize(alloc_size);
  109.39 +    event.commit();
  109.40 +  }
  109.41 +}
  109.42 +
  109.43 +void AllocTracer::send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size) {
  109.44 +  EventAllocObjectInNewTLAB event;
  109.45 +  if (event.should_commit()) {
  109.46 +    event.set_class(klass());
  109.47 +    event.set_allocationSize(alloc_size);
  109.48 +    event.set_tlabSize(tlab_size);
  109.49 +    event.commit();
  109.50 +  }
  109.51 +}
   110.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   110.2 +++ b/src/share/vm/gc_interface/allocTracer.hpp	Mon Jun 10 11:30:51 2013 +0200
   110.3 @@ -0,0 +1,37 @@
   110.4 +/*
   110.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   110.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   110.7 + *
   110.8 + * This code is free software; you can redistribute it and/or modify it
   110.9 + * under the terms of the GNU General Public License version 2 only, as
  110.10 + * published by the Free Software Foundation.
  110.11 + *
  110.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  110.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  110.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  110.15 + * version 2 for more details (a copy is included in the LICENSE file that
  110.16 + * accompanied this code).
  110.17 + *
  110.18 + * You should have received a copy of the GNU General Public License version
  110.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  110.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  110.21 + *
  110.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  110.23 + * or visit www.oracle.com if you need additional information or have any
  110.24 + * questions.
  110.25 + *
  110.26 + */
  110.27 +
  110.28 +#ifndef SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP
  110.29 +#define SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP
  110.30 +
  110.31 +#include "memory/allocation.hpp"
  110.32 +#include "runtime/handles.hpp"
  110.33 +
  110.34 +class AllocTracer : AllStatic {
  110.35 +  public:
  110.36 +    static void send_allocation_outside_tlab_event(KlassHandle klass, size_t alloc_size);
  110.37 +    static void send_allocation_in_new_tlab_event(KlassHandle klass, size_t tlab_size, size_t alloc_size);
  110.38 +};
  110.39 +
  110.40 +#endif /* SHARE_VM_GC_INTERFACE_ALLOCTRACER_HPP */
   111.1 --- a/src/share/vm/gc_interface/collectedHeap.cpp	Fri Jun 07 09:33:01 2013 -0700
   111.2 +++ b/src/share/vm/gc_interface/collectedHeap.cpp	Mon Jun 10 11:30:51 2013 +0200
   111.3 @@ -1,5 +1,5 @@
   111.4  /*
   111.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   111.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   111.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   111.8   *
   111.9   * This code is free software; you can redistribute it and/or modify it
  111.10 @@ -24,9 +24,15 @@
  111.11  
  111.12  #include "precompiled.hpp"
  111.13  #include "classfile/systemDictionary.hpp"
  111.14 +#include "gc_implementation/shared/gcHeapSummary.hpp"
  111.15 +#include "gc_implementation/shared/gcTrace.hpp"
  111.16 +#include "gc_implementation/shared/gcTraceTime.hpp"
  111.17 +#include "gc_implementation/shared/gcWhen.hpp"
  111.18  #include "gc_implementation/shared/vmGCOperations.hpp"
  111.19 +#include "gc_interface/allocTracer.hpp"
  111.20  #include "gc_interface/collectedHeap.hpp"
  111.21  #include "gc_interface/collectedHeap.inline.hpp"
  111.22 +#include "memory/metaspace.hpp"
  111.23  #include "oops/oop.inline.hpp"
  111.24  #include "oops/instanceMirrorKlass.hpp"
  111.25  #include "runtime/init.hpp"
  111.26 @@ -65,11 +71,71 @@
  111.27    }
  111.28  }
  111.29  
  111.30 +VirtualSpaceSummary CollectedHeap::create_heap_space_summary() {
  111.31 +  size_t capacity_in_words = capacity() / HeapWordSize;
  111.32 +
  111.33 +  return VirtualSpaceSummary(
  111.34 +    reserved_region().start(), reserved_region().start() + capacity_in_words, reserved_region().end());
  111.35 +}
  111.36 +
  111.37 +GCHeapSummary CollectedHeap::create_heap_summary() {
  111.38 +  VirtualSpaceSummary heap_space = create_heap_space_summary();
  111.39 +  return GCHeapSummary(heap_space, used());
  111.40 +}
  111.41 +
  111.42 +MetaspaceSummary CollectedHeap::create_metaspace_summary() {
  111.43 +  const MetaspaceSizes meta_space(
  111.44 +      0, /*MetaspaceAux::capacity_in_bytes(),*/
  111.45 +      0, /*MetaspaceAux::used_in_bytes(),*/
  111.46 +      MetaspaceAux::reserved_in_bytes());
  111.47 +  const MetaspaceSizes data_space(
  111.48 +      0, /*MetaspaceAux::capacity_in_bytes(Metaspace::NonClassType),*/
  111.49 +      0, /*MetaspaceAux::used_in_bytes(Metaspace::NonClassType),*/
  111.50 +      MetaspaceAux::reserved_in_bytes(Metaspace::NonClassType));
  111.51 +  const MetaspaceSizes class_space(
  111.52 +      0, /*MetaspaceAux::capacity_in_bytes(Metaspace::ClassType),*/
  111.53 +      0, /*MetaspaceAux::used_in_bytes(Metaspace::ClassType),*/
  111.54 +      MetaspaceAux::reserved_in_bytes(Metaspace::ClassType));
  111.55 +
  111.56 +  return MetaspaceSummary(meta_space, data_space, class_space);
  111.57 +}
  111.58 +
  111.59 +void CollectedHeap::print_heap_before_gc() {
  111.60 +  if (PrintHeapAtGC) {
  111.61 +    Universe::print_heap_before_gc();
  111.62 +  }
  111.63 +  if (_gc_heap_log != NULL) {
  111.64 +    _gc_heap_log->log_heap_before();
  111.65 +  }
  111.66 +}
  111.67 +
  111.68 +void CollectedHeap::print_heap_after_gc() {
  111.69 +  if (PrintHeapAtGC) {
  111.70 +    Universe::print_heap_after_gc();
  111.71 +  }
  111.72 +  if (_gc_heap_log != NULL) {
  111.73 +    _gc_heap_log->log_heap_after();
  111.74 +  }
  111.75 +}
  111.76 +
  111.77 +void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
  111.78 +  const GCHeapSummary& heap_summary = create_heap_summary();
  111.79 +  const MetaspaceSummary& metaspace_summary = create_metaspace_summary();
  111.80 +  gc_tracer->report_gc_heap_summary(when, heap_summary, metaspace_summary);
  111.81 +}
  111.82 +
  111.83 +void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) {
  111.84 +  trace_heap(GCWhen::BeforeGC, gc_tracer);
  111.85 +}
  111.86 +
  111.87 +void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) {
  111.88 +  trace_heap(GCWhen::AfterGC, gc_tracer);
  111.89 +}
  111.90 +
  111.91  // Memory state functions.
  111.92  
  111.93  
  111.94  CollectedHeap::CollectedHeap() : _n_par_threads(0)
  111.95 -
  111.96  {
  111.97    const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
  111.98    const size_t elements_per_word = HeapWordSize / sizeof(jint);
  111.99 @@ -185,7 +251,7 @@
 111.100  }
 111.101  #endif
 111.102  
 111.103 -HeapWord* CollectedHeap::allocate_from_tlab_slow(Thread* thread, size_t size) {
 111.104 +HeapWord* CollectedHeap::allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size) {
 111.105  
 111.106    // Retain tlab and allocate object in shared space if
 111.107    // the amount free in the tlab is too large to discard.
 111.108 @@ -209,6 +275,9 @@
 111.109    if (obj == NULL) {
 111.110      return NULL;
 111.111    }
 111.112 +
 111.113 +  AllocTracer::send_allocation_in_new_tlab_event(klass, new_tlab_size * HeapWordSize, size * HeapWordSize);
 111.114 +
 111.115    if (ZeroTLAB) {
 111.116      // ..and clear it.
 111.117      Copy::zero_to_words(obj, new_tlab_size);
 111.118 @@ -458,28 +527,28 @@
 111.119    }
 111.120  }
 111.121  
 111.122 -void CollectedHeap::pre_full_gc_dump() {
 111.123 +void CollectedHeap::pre_full_gc_dump(GCTimer* timer) {
 111.124    if (HeapDumpBeforeFullGC) {
 111.125 -    TraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, gclog_or_tty);
 111.126 +    GCTraceTime tt("Heap Dump (before full gc): ", PrintGCDetails, false, timer);
 111.127      // We are doing a "major" collection and a heap dump before
 111.128      // major collection has been requested.
 111.129      HeapDumper::dump_heap();
 111.130    }
 111.131    if (PrintClassHistogramBeforeFullGC) {
 111.132 -    TraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, gclog_or_tty);
 111.133 -    VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
 111.134 +    GCTraceTime tt("Class Histogram (before full gc): ", PrintGCDetails, true, timer);
 111.135 +    VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
 111.136      inspector.doit();
 111.137    }
 111.138  }
 111.139  
 111.140 -void CollectedHeap::post_full_gc_dump() {
 111.141 +void CollectedHeap::post_full_gc_dump(GCTimer* timer) {
 111.142    if (HeapDumpAfterFullGC) {
 111.143 -    TraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, gclog_or_tty);
 111.144 +    GCTraceTime tt("Heap Dump (after full gc): ", PrintGCDetails, false, timer);
 111.145      HeapDumper::dump_heap();
 111.146    }
 111.147    if (PrintClassHistogramAfterFullGC) {
 111.148 -    TraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, gclog_or_tty);
 111.149 -    VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */, false /* ! prologue */);
 111.150 +    GCTraceTime tt("Class Histogram (after full gc): ", PrintGCDetails, true, timer);
 111.151 +    VM_GC_HeapInspection inspector(gclog_or_tty, false /* ! full gc */);
 111.152      inspector.doit();
 111.153    }
 111.154  }
 111.155 @@ -490,7 +559,7 @@
 111.156    assert(size >= 0, "int won't convert to size_t");
 111.157    HeapWord* obj;
 111.158      assert(ScavengeRootsInCode > 0, "must be");
 111.159 -    obj = common_mem_allocate_init(size, CHECK_NULL);
 111.160 +    obj = common_mem_allocate_init(real_klass, size, CHECK_NULL);
 111.161    post_allocation_setup_common(klass, obj);
 111.162    assert(Universe::is_bootstrapping() ||
 111.163           !((oop)obj)->is_array(), "must not be an array");
   112.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Fri Jun 07 09:33:01 2013 -0700
   112.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Mon Jun 10 11:30:51 2013 +0200
   112.3 @@ -1,5 +1,5 @@
   112.4  /*
   112.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   112.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   112.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   112.8   *
   112.9   * This code is free software; you can redistribute it and/or modify it
  112.10 @@ -26,6 +26,7 @@
  112.11  #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_HPP
  112.12  
  112.13  #include "gc_interface/gcCause.hpp"
  112.14 +#include "gc_implementation/shared/gcWhen.hpp"
  112.15  #include "memory/allocation.hpp"
  112.16  #include "memory/barrierSet.hpp"
  112.17  #include "runtime/handles.hpp"
  112.18 @@ -38,11 +39,16 @@
  112.19  // class defines the functions that a heap must implement, and contains
  112.20  // infrastructure common to all heaps.
  112.21  
  112.22 +class AdaptiveSizePolicy;
  112.23  class BarrierSet;
  112.24 +class CollectorPolicy;
  112.25 +class GCHeapSummary;
  112.26 +class GCTimer;
  112.27 +class GCTracer;
  112.28 +class MetaspaceSummary;
  112.29 +class Thread;
  112.30  class ThreadClosure;
  112.31 -class AdaptiveSizePolicy;
  112.32 -class Thread;
  112.33 -class CollectorPolicy;
  112.34 +class VirtualSpaceSummary;
  112.35  
  112.36  class GCMessage : public FormatBuffer<1024> {
  112.37   public:
  112.38 @@ -128,16 +134,16 @@
  112.39    virtual void resize_all_tlabs();
  112.40  
  112.41    // Allocate from the current thread's TLAB, with broken-out slow path.
  112.42 -  inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
  112.43 -  static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
  112.44 +  inline static HeapWord* allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size);
  112.45 +  static HeapWord* allocate_from_tlab_slow(KlassHandle klass, Thread* thread, size_t size);
  112.46  
  112.47    // Allocate an uninitialized block of the given size, or returns NULL if
  112.48    // this is impossible.
  112.49 -  inline static HeapWord* common_mem_allocate_noinit(size_t size, TRAPS);
  112.50 +  inline static HeapWord* common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS);
  112.51  
  112.52    // Like allocate_init, but the block returned by a successful allocation
  112.53    // is guaranteed initialized to zeros.
  112.54 -  inline static HeapWord* common_mem_allocate_init(size_t size, TRAPS);
  112.55 +  inline static HeapWord* common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS);
  112.56  
  112.57    // Helper functions for (VM) allocation.
  112.58    inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj);
  112.59 @@ -166,6 +172,8 @@
  112.60    // Fill with a single object (either an int array or a java.lang.Object).
  112.61    static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
  112.62  
  112.63 +  virtual void trace_heap(GCWhen::Type when, GCTracer* tracer);
  112.64 +
  112.65    // Verification functions
  112.66    virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
  112.67      PRODUCT_RETURN;
  112.68 @@ -202,8 +210,6 @@
  112.69    MemRegion reserved_region() const { return _reserved; }
  112.70    address base() const { return (address)reserved_region().start(); }
  112.71  
  112.72 -  // Future cleanup here. The following functions should specify bytes or
  112.73 -  // heapwords as part of their signature.
  112.74    virtual size_t capacity() const = 0;
  112.75    virtual size_t used() const = 0;
  112.76  
  112.77 @@ -550,8 +556,13 @@
  112.78    virtual void prepare_for_verify() = 0;
  112.79  
  112.80    // Generate any dumps preceding or following a full gc
  112.81 -  void pre_full_gc_dump();
  112.82 -  void post_full_gc_dump();
  112.83 +  void pre_full_gc_dump(GCTimer* timer);
  112.84 +  void post_full_gc_dump(GCTimer* timer);
  112.85 +
  112.86 +  VirtualSpaceSummary create_heap_space_summary();
  112.87 +  GCHeapSummary create_heap_summary();
  112.88 +
  112.89 +  MetaspaceSummary create_metaspace_summary();
  112.90  
  112.91    // Print heap information on the given outputStream.
  112.92    virtual void print_on(outputStream* st) const = 0;
  112.93 @@ -560,7 +571,7 @@
  112.94      print_on(tty);
  112.95    }
  112.96    // Print more detailed heap information on the given
  112.97 -  // outputStream. The default behaviour is to call print_on(). It is
  112.98 +  // outputStream. The default behavior is to call print_on(). It is
  112.99    // up to each subclass to override it and add any additional output
 112.100    // it needs.
 112.101    virtual void print_extended_on(outputStream* st) const {
 112.102 @@ -589,23 +600,11 @@
 112.103    // Default implementation does nothing.
 112.104    virtual void print_tracing_info() const = 0;
 112.105  
 112.106 -  // If PrintHeapAtGC is set call the appropriate routi
 112.107 -  void print_heap_before_gc() {
 112.108 -    if (PrintHeapAtGC) {
 112.109 -      Universe::print_heap_before_gc();
 112.110 -    }
 112.111 -    if (_gc_heap_log != NULL) {
 112.112 -      _gc_heap_log->log_heap_before();
 112.113 -    }
 112.114 -  }
 112.115 -  void print_heap_after_gc() {
 112.116 -    if (PrintHeapAtGC) {
 112.117 -      Universe::print_heap_after_gc();
 112.118 -    }
 112.119 -    if (_gc_heap_log != NULL) {
 112.120 -      _gc_heap_log->log_heap_after();
 112.121 -    }
 112.122 -  }
 112.123 +  void print_heap_before_gc();
 112.124 +  void print_heap_after_gc();
 112.125 +
 112.126 +  void trace_heap_before_gc(GCTracer* gc_tracer);
 112.127 +  void trace_heap_after_gc(GCTracer* gc_tracer);
 112.128  
 112.129    // Heap verification
 112.130    virtual void verify(bool silent, VerifyOption option) = 0;
 112.131 @@ -619,7 +618,7 @@
 112.132    inline bool promotion_should_fail();
 112.133  
 112.134    // Reset the PromotionFailureALot counters.  Should be called at the end of a
 112.135 -  // GC in which promotion failure ocurred.
 112.136 +  // GC in which promotion failure occurred.
 112.137    inline void reset_promotion_should_fail(volatile size_t* count);
 112.138    inline void reset_promotion_should_fail();
 112.139  #endif  // #ifndef PRODUCT
   113.1 --- a/src/share/vm/gc_interface/collectedHeap.inline.hpp	Fri Jun 07 09:33:01 2013 -0700
   113.2 +++ b/src/share/vm/gc_interface/collectedHeap.inline.hpp	Mon Jun 10 11:30:51 2013 +0200
   113.3 @@ -25,6 +25,7 @@
   113.4  #ifndef SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
   113.5  #define SHARE_VM_GC_INTERFACE_COLLECTEDHEAP_INLINE_HPP
   113.6  
   113.7 +#include "gc_interface/allocTracer.hpp"
   113.8  #include "gc_interface/collectedHeap.hpp"
   113.9  #include "memory/threadLocalAllocBuffer.inline.hpp"
  113.10  #include "memory/universe.hpp"
  113.11 @@ -107,7 +108,7 @@
  113.12    post_allocation_notify(klass, (oop)obj);
  113.13  }
  113.14  
  113.15 -HeapWord* CollectedHeap::common_mem_allocate_noinit(size_t size, TRAPS) {
  113.16 +HeapWord* CollectedHeap::common_mem_allocate_noinit(KlassHandle klass, size_t size, TRAPS) {
  113.17  
  113.18    // Clear unhandled oops for memory allocation.  Memory allocation might
  113.19    // not take out a lock if from tlab, so clear here.
  113.20 @@ -120,7 +121,7 @@
  113.21  
  113.22    HeapWord* result = NULL;
  113.23    if (UseTLAB) {
  113.24 -    result = CollectedHeap::allocate_from_tlab(THREAD, size);
  113.25 +    result = allocate_from_tlab(klass, THREAD, size);
  113.26      if (result != NULL) {
  113.27        assert(!HAS_PENDING_EXCEPTION,
  113.28               "Unexpected exception, will result in uninitialized storage");
  113.29 @@ -136,6 +137,9 @@
  113.30      assert(!HAS_PENDING_EXCEPTION,
  113.31             "Unexpected exception, will result in uninitialized storage");
  113.32      THREAD->incr_allocated_bytes(size * HeapWordSize);
  113.33 +
  113.34 +    AllocTracer::send_allocation_outside_tlab_event(klass, size * HeapWordSize);
  113.35 +
  113.36      return result;
  113.37    }
  113.38  
  113.39 @@ -165,13 +169,13 @@
  113.40    }
  113.41  }
  113.42  
  113.43 -HeapWord* CollectedHeap::common_mem_allocate_init(size_t size, TRAPS) {
  113.44 -  HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
  113.45 +HeapWord* CollectedHeap::common_mem_allocate_init(KlassHandle klass, size_t size, TRAPS) {
  113.46 +  HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
  113.47    init_obj(obj, size);
  113.48    return obj;
  113.49  }
  113.50  
  113.51 -HeapWord* CollectedHeap::allocate_from_tlab(Thread* thread, size_t size) {
  113.52 +HeapWord* CollectedHeap::allocate_from_tlab(KlassHandle klass, Thread* thread, size_t size) {
  113.53    assert(UseTLAB, "should use UseTLAB");
  113.54  
  113.55    HeapWord* obj = thread->tlab().allocate(size);
  113.56 @@ -179,7 +183,7 @@
  113.57      return obj;
  113.58    }
  113.59    // Otherwise...
  113.60 -  return allocate_from_tlab_slow(thread, size);
  113.61 +  return allocate_from_tlab_slow(klass, thread, size);
  113.62  }
  113.63  
  113.64  void CollectedHeap::init_obj(HeapWord* obj, size_t size) {
  113.65 @@ -194,7 +198,7 @@
  113.66    debug_only(check_for_valid_allocation_state());
  113.67    assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
  113.68    assert(size >= 0, "int won't convert to size_t");
  113.69 -  HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
  113.70 +  HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
  113.71    post_allocation_setup_obj(klass, obj);
  113.72    NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
  113.73    return (oop)obj;
  113.74 @@ -207,7 +211,7 @@
  113.75    debug_only(check_for_valid_allocation_state());
  113.76    assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
  113.77    assert(size >= 0, "int won't convert to size_t");
  113.78 -  HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
  113.79 +  HeapWord* obj = common_mem_allocate_init(klass, size, CHECK_NULL);
  113.80    post_allocation_setup_array(klass, obj, length);
  113.81    NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
  113.82    return (oop)obj;
  113.83 @@ -220,7 +224,7 @@
  113.84    debug_only(check_for_valid_allocation_state());
  113.85    assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
  113.86    assert(size >= 0, "int won't convert to size_t");
  113.87 -  HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
  113.88 +  HeapWord* obj = common_mem_allocate_noinit(klass, size, CHECK_NULL);
  113.89    ((oop)obj)->set_klass_gap(0);
  113.90    post_allocation_setup_array(klass, obj, length);
  113.91  #ifndef PRODUCT
   114.1 --- a/src/share/vm/gc_interface/gcCause.cpp	Fri Jun 07 09:33:01 2013 -0700
   114.2 +++ b/src/share/vm/gc_interface/gcCause.cpp	Mon Jun 10 11:30:51 2013 +0200
   114.3 @@ -72,6 +72,9 @@
   114.4      case _cms_final_remark:
   114.5        return "CMS Final Remark";
   114.6  
   114.7 +    case _cms_concurrent_mark:
   114.8 +      return "CMS Concurrent Mark";
   114.9 +
  114.10      case _old_generation_expanded_on_last_scavenge:
  114.11        return "Old Generation Expanded On Last Scavenge";
  114.12  
   115.1 --- a/src/share/vm/gc_interface/gcCause.hpp	Fri Jun 07 09:33:01 2013 -0700
   115.2 +++ b/src/share/vm/gc_interface/gcCause.hpp	Mon Jun 10 11:30:51 2013 +0200
   115.3 @@ -60,6 +60,7 @@
   115.4      _cms_generation_full,
   115.5      _cms_initial_mark,
   115.6      _cms_final_remark,
   115.7 +    _cms_concurrent_mark,
   115.8  
   115.9      _old_generation_expanded_on_last_scavenge,
  115.10      _old_generation_too_full_to_scavenge,
   116.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   116.2 +++ b/src/share/vm/gc_interface/gcName.hpp	Mon Jun 10 11:30:51 2013 +0200
   116.3 @@ -0,0 +1,61 @@
   116.4 +/*
   116.5 + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
   116.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   116.7 + *
   116.8 + * This code is free software; you can redistribute it and/or modify it
   116.9 + * under the terms of the GNU General Public License version 2 only, as
  116.10 + * published by the Free Software Foundation.
  116.11 + *
  116.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  116.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  116.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  116.15 + * version 2 for more details (a copy is included in the LICENSE file that
  116.16 + * accompanied this code).
  116.17 + *
  116.18 + * You should have received a copy of the GNU General Public License version
  116.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  116.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  116.21 + *
  116.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  116.23 + * or visit www.oracle.com if you need additional information or have any
  116.24 + * questions.
  116.25 + *
  116.26 + */
  116.27 +
  116.28 +#ifndef SHARE_VM_GC_INTERFACE_GCNAME_HPP
  116.29 +#define SHARE_VM_GC_INTERFACE_GCNAME_HPP
  116.30 +
  116.31 +#include "utilities/debug.hpp"
  116.32 +
  116.33 +enum GCName {
  116.34 +  ParallelOld,
  116.35 +  SerialOld,
  116.36 +  PSMarkSweep,
  116.37 +  ParallelScavenge,
  116.38 +  DefNew,
  116.39 +  ParNew,
  116.40 +  G1New,
  116.41 +  ConcurrentMarkSweep,
  116.42 +  G1Old,
  116.43 +  GCNameEndSentinel
  116.44 +};
  116.45 +
  116.46 +class GCNameHelper {
  116.47 + public:
  116.48 +  static const char* to_string(GCName name) {
  116.49 +    switch(name) {
  116.50 +      case ParallelOld: return "ParallelOld";
  116.51 +      case SerialOld: return "SerialOld";
  116.52 +      case PSMarkSweep: return "PSMarkSweep";
  116.53 +      case ParallelScavenge: return "ParallelScavenge";
  116.54 +      case DefNew: return "DefNew";
  116.55 +      case ParNew: return "ParNew";
  116.56 +      case G1New: return "G1New";
  116.57 +      case ConcurrentMarkSweep: return "ConcurrentMarkSweep";
  116.58 +      case G1Old: return "G1Old";
  116.59 +      default: ShouldNotReachHere(); return NULL;
  116.60 +    }
  116.61 +  }
  116.62 +};
  116.63 +
  116.64 +#endif // SHARE_VM_GC_INTERFACE_GCNAME_HPP
   117.1 --- a/src/share/vm/memory/allocation.hpp	Fri Jun 07 09:33:01 2013 -0700
   117.2 +++ b/src/share/vm/memory/allocation.hpp	Mon Jun 10 11:30:51 2013 +0200
   117.3 @@ -157,7 +157,8 @@
   117.4    mtJavaHeap          = 0x0C00,  // Java heap
   117.5    mtClassShared       = 0x0D00,  // class data sharing
   117.6    mtTest              = 0x0E00,  // Test type for verifying NMT
   117.7 -  mt_number_of_types  = 0x000E,  // number of memory types (mtDontTrack
   117.8 +  mtTracing           = 0x0F00,  // memory used for Tracing
   117.9 +  mt_number_of_types  = 0x000F,  // number of memory types (mtDontTrack
  117.10                                   // is not included as validate type)
  117.11    mtDontTrack         = 0x0F00,  // memory we do not or cannot track
  117.12    mt_masks            = 0x7F00,
   118.1 --- a/src/share/vm/memory/defNewGeneration.cpp	Fri Jun 07 09:33:01 2013 -0700
   118.2 +++ b/src/share/vm/memory/defNewGeneration.cpp	Mon Jun 10 11:30:51 2013 +0200
   118.3 @@ -1,5 +1,5 @@
   118.4  /*
   118.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   118.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   118.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   118.8   *
   118.9   * This code is free software; you can redistribute it and/or modify it
  118.10 @@ -25,6 +25,10 @@
  118.11  #include "precompiled.hpp"
  118.12  #include "gc_implementation/shared/collectorCounters.hpp"
  118.13  #include "gc_implementation/shared/gcPolicyCounters.hpp"
  118.14 +#include "gc_implementation/shared/gcHeapSummary.hpp"
  118.15 +#include "gc_implementation/shared/gcTimer.hpp"
  118.16 +#include "gc_implementation/shared/gcTraceTime.hpp"
  118.17 +#include "gc_implementation/shared/gcTrace.hpp"
  118.18  #include "gc_implementation/shared/spaceDecorator.hpp"
  118.19  #include "memory/defNewGeneration.inline.hpp"
  118.20  #include "memory/gcLocker.inline.hpp"
  118.21 @@ -223,6 +227,8 @@
  118.22    _next_gen = NULL;
  118.23    _tenuring_threshold = MaxTenuringThreshold;
  118.24    _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
  118.25 +
  118.26 +  _gc_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
  118.27  }
  118.28  
  118.29  void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
  118.30 @@ -558,12 +564,18 @@
  118.31                                 size_t size,
  118.32                                 bool   is_tlab) {
  118.33    assert(full || size > 0, "otherwise we don't want to collect");
  118.34 +
  118.35    GenCollectedHeap* gch = GenCollectedHeap::heap();
  118.36 +
  118.37 +  _gc_timer->register_gc_start(os::elapsed_counter());
  118.38 +  DefNewTracer gc_tracer;
  118.39 +  gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
  118.40 +
  118.41    _next_gen = gch->next_gen(this);
  118.42    assert(_next_gen != NULL,
  118.43      "This must be the youngest gen, and not the only gen");
  118.44  
  118.45 -  // If the next generation is too full to accomodate promotion
  118.46 +  // If the next generation is too full to accommodate promotion
  118.47    // from this generation, pass on collection; let the next generation
  118.48    // do it.
  118.49    if (!collection_attempt_is_safe()) {
  118.50 @@ -577,10 +589,12 @@
  118.51  
  118.52    init_assuming_no_promotion_failure();
  118.53  
  118.54 -  TraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
  118.55 +  GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
  118.56    // Capture heap used before collection (for printing).
  118.57    size_t gch_prev_used = gch->used();
  118.58  
  118.59 +  gch->trace_heap_before_gc(&gc_tracer);
  118.60 +
  118.61    SpecializationStats::clear();
  118.62  
  118.63    // These can be shared for all code paths
  118.64 @@ -631,9 +645,12 @@
  118.65    FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
  118.66    ReferenceProcessor* rp = ref_processor();
  118.67    rp->setup_policy(clear_all_soft_refs);
  118.68 +  const ReferenceProcessorStats& stats =
  118.69    rp->process_discovered_references(&is_alive, &keep_alive, &evacuate_followers,
  118.70 -                                    NULL);
  118.71 -  if (!promotion_failed()) {
  118.72 +                                    NULL, _gc_timer);
  118.73 +  gc_tracer.report_gc_reference_stats(stats);
  118.74 +
  118.75 +  if (!_promotion_failed) {
  118.76      // Swap the survivor spaces.
  118.77      eden()->clear(SpaceDecorator::Mangle);
  118.78      from()->clear(SpaceDecorator::Mangle);
  118.79 @@ -680,6 +697,7 @@
  118.80  
  118.81      // Inform the next generation that a promotion failure occurred.
  118.82      _next_gen->promotion_failure_occurred();
  118.83 +    gc_tracer.report_promotion_failed(_promotion_failed_info);
  118.84  
  118.85      // Reset the PromotionFailureALot counters.
  118.86      NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
  118.87 @@ -689,11 +707,18 @@
  118.88    to()->set_concurrent_iteration_safe_limit(to()->top());
  118.89    SpecializationStats::print();
  118.90  
  118.91 -  // We need to use a monotonically non-deccreasing time in ms
  118.92 +  // We need to use a monotonically non-decreasing time in ms
  118.93    // or we will see time-warp warnings and os::javaTimeMillis()
  118.94    // does not guarantee monotonicity.
  118.95    jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  118.96    update_time_of_last_gc(now);
  118.97 +
  118.98 +  gch->trace_heap_after_gc(&gc_tracer);
  118.99 +  gc_tracer.report_tenuring_threshold(tenuring_threshold());
 118.100 +
 118.101 +  _gc_timer->register_gc_end(os::elapsed_counter());
 118.102 +
 118.103 +  gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
 118.104  }
 118.105  
 118.106  class RemoveForwardPointerClosure: public ObjectClosure {
 118.107 @@ -705,6 +730,7 @@
 118.108  
 118.109  void DefNewGeneration::init_assuming_no_promotion_failure() {
 118.110    _promotion_failed = false;
 118.111 +  _promotion_failed_info.reset();
 118.112    from()->set_next_compaction_space(NULL);
 118.113  }
 118.114  
 118.115 @@ -726,7 +752,7 @@
 118.116  }
 118.117  
 118.118  void DefNewGeneration::preserve_mark(oop obj, markOop m) {
 118.119 -  assert(promotion_failed() && m->must_be_preserved_for_promotion_failure(obj),
 118.120 +  assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj),
 118.121           "Oversaving!");
 118.122    _objs_with_preserved_marks.push(obj);
 118.123    _preserved_marks_of_objs.push(m);
 118.124 @@ -744,6 +770,7 @@
 118.125                          old->size());
 118.126    }
 118.127    _promotion_failed = true;
 118.128 +  _promotion_failed_info.register_copy_failure(old->size());
 118.129    preserve_mark_if_necessary(old, old->mark());
 118.130    // forward to self
 118.131    old->forward_to(old);
 118.132 @@ -962,6 +989,10 @@
 118.133    from()->set_top_for_allocations();
 118.134  }
 118.135  
 118.136 +void DefNewGeneration::ref_processor_init() {
 118.137 +  Generation::ref_processor_init();
 118.138 +}
 118.139 +
 118.140  
 118.141  void DefNewGeneration::update_counters() {
 118.142    if (UsePerfData) {
   119.1 --- a/src/share/vm/memory/defNewGeneration.hpp	Fri Jun 07 09:33:01 2013 -0700
   119.2 +++ b/src/share/vm/memory/defNewGeneration.hpp	Mon Jun 10 11:30:51 2013 +0200
   119.3 @@ -1,5 +1,5 @@
   119.4  /*
   119.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   119.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   119.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   119.8   *
   119.9   * This code is free software; you can redistribute it and/or modify it
  119.10 @@ -28,12 +28,14 @@
  119.11  #include "gc_implementation/shared/ageTable.hpp"
  119.12  #include "gc_implementation/shared/cSpaceCounters.hpp"
  119.13  #include "gc_implementation/shared/generationCounters.hpp"
  119.14 +#include "gc_implementation/shared/copyFailedInfo.hpp"
  119.15  #include "memory/generation.inline.hpp"
  119.16  #include "utilities/stack.hpp"
  119.17  
  119.18  class EdenSpace;
  119.19  class ContiguousSpace;
  119.20  class ScanClosure;
  119.21 +class STWGCTimer;
  119.22  
  119.23  // DefNewGeneration is a young generation containing eden, from- and
  119.24  // to-space.
  119.25 @@ -46,15 +48,17 @@
  119.26    uint        _tenuring_threshold;   // Tenuring threshold for next collection.
  119.27    ageTable    _age_table;
  119.28    // Size of object to pretenure in words; command line provides bytes
  119.29 -  size_t        _pretenure_size_threshold_words;
  119.30 +  size_t      _pretenure_size_threshold_words;
  119.31  
  119.32    ageTable*   age_table() { return &_age_table; }
  119.33 +
  119.34    // Initialize state to optimistically assume no promotion failure will
  119.35    // happen.
  119.36    void   init_assuming_no_promotion_failure();
  119.37    // True iff a promotion has failed in the current collection.
  119.38    bool   _promotion_failed;
  119.39    bool   promotion_failed() { return _promotion_failed; }
  119.40 +  PromotionFailedInfo _promotion_failed_info;
  119.41  
  119.42    // Handling promotion failure.  A young generation collection
  119.43    // can fail if a live object cannot be copied out of its
  119.44 @@ -132,6 +136,8 @@
  119.45    ContiguousSpace* _from_space;
  119.46    ContiguousSpace* _to_space;
  119.47  
  119.48 +  STWGCTimer* _gc_timer;
  119.49 +
  119.50    enum SomeProtectedConstants {
  119.51      // Generations are GenGrain-aligned and have size that are multiples of
  119.52      // GenGrain.
  119.53 @@ -203,6 +209,8 @@
  119.54    DefNewGeneration(ReservedSpace rs, size_t initial_byte_size, int level,
  119.55                     const char* policy="Copy");
  119.56  
  119.57 +  virtual void ref_processor_init();
  119.58 +
  119.59    virtual Generation::Name kind() { return Generation::DefNew; }
  119.60  
  119.61    // Accessing spaces
   120.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Fri Jun 07 09:33:01 2013 -0700
   120.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Mon Jun 10 11:30:51 2013 +0200
   120.3 @@ -1,5 +1,5 @@
   120.4  /*
   120.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
   120.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
   120.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   120.8   *
   120.9   * This code is free software; you can redistribute it and/or modify it
  120.10 @@ -28,6 +28,7 @@
  120.11  #include "classfile/vmSymbols.hpp"
  120.12  #include "code/icBuffer.hpp"
  120.13  #include "gc_implementation/shared/collectorCounters.hpp"
  120.14 +#include "gc_implementation/shared/gcTraceTime.hpp"
  120.15  #include "gc_implementation/shared/vmGCOperations.hpp"
  120.16  #include "gc_interface/collectedHeap.inline.hpp"
  120.17  #include "memory/filemap.hpp"
  120.18 @@ -388,7 +389,7 @@
  120.19      const char* gc_cause_prefix = complete ? "Full GC" : "GC";
  120.20      gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
  120.21      TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
  120.22 -    TraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, gclog_or_tty);
  120.23 +    GCTraceTime t(GCCauseString(gc_cause_prefix, gc_cause()), PrintGCDetails, false, NULL);
  120.24  
  120.25      gc_prologue(complete);
  120.26      increment_total_collections(complete);
  120.27 @@ -417,10 +418,11 @@
  120.28              // The full_collections increment was missed above.
  120.29              increment_total_full_collections();
  120.30            }
  120.31 -          pre_full_gc_dump();    // do any pre full gc dumps
  120.32 +          pre_full_gc_dump(NULL);    // do any pre full gc dumps
  120.33          }
  120.34          // Timer for individual generations. Last argument is false: no CR
  120.35 -        TraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, gclog_or_tty);
  120.36 +        // FIXME: We should try to start the timing earlier to cover more of the GC pause
  120.37 +        GCTraceTime t1(_gens[i]->short_name(), PrintGCDetails, false, NULL);
  120.38          TraceCollectorStats tcs(_gens[i]->counters());
  120.39          TraceMemoryManagerStats tmms(_gens[i]->kind(),gc_cause());
  120.40  
  120.41 @@ -534,7 +536,8 @@
  120.42      complete = complete || (max_level_collected == n_gens() - 1);
  120.43  
  120.44      if (complete) { // We did a "major" collection
  120.45 -      post_full_gc_dump();   // do any post full gc dumps
  120.46 +      // FIXME: See comment at pre_full_gc_dump call
  120.47 +      post_full_gc_dump(NULL);   // do any post full gc dumps
  120.48      }
  120.49  
  120.50      if (PrintGCDetails) {
   121.1 --- a/src/share/vm/memory/genMarkSweep.cpp	Fri Jun 07 09:33:01 2013 -0700
   121.2 +++ b/src/share/vm/memory/genMarkSweep.cpp	Mon Jun 10 11:30:51 2013 +0200
   121.3 @@ -1,5 +1,5 @@
   121.4  /*
   121.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   121.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   121.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   121.8   *
   121.9   * This code is free software; you can redistribute it and/or modify it
  121.10 @@ -29,6 +29,10 @@
  121.11  #include "classfile/vmSymbols.hpp"
  121.12  #include "code/codeCache.hpp"
  121.13  #include "code/icBuffer.hpp"
  121.14 +#include "gc_implementation/shared/gcHeapSummary.hpp"
  121.15 +#include "gc_implementation/shared/gcTimer.hpp"
  121.16 +#include "gc_implementation/shared/gcTrace.hpp"
  121.17 +#include "gc_implementation/shared/gcTraceTime.hpp"
  121.18  #include "gc_interface/collectedHeap.inline.hpp"
  121.19  #include "memory/genCollectedHeap.hpp"
  121.20  #include "memory/genMarkSweep.hpp"
  121.21 @@ -65,7 +69,9 @@
  121.22    _ref_processor = rp;
  121.23    rp->setup_policy(clear_all_softrefs);
  121.24  
  121.25 -  TraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, gclog_or_tty);
  121.26 +  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL);
  121.27 +
  121.28 +  gch->trace_heap_before_gc(_gc_tracer);
  121.29  
  121.30    // When collecting the permanent generation Method*s may be moving,
  121.31    // so we either have to flush all bcp data or convert it into bci.
  121.32 @@ -155,6 +161,8 @@
  121.33    // does not guarantee monotonicity.
  121.34    jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  121.35    gch->update_time_of_last_gc(now);
  121.36 +
  121.37 +  gch->trace_heap_after_gc(_gc_tracer);
  121.38  }
  121.39  
  121.40  void GenMarkSweep::allocate_stacks() {
  121.41 @@ -192,7 +200,7 @@
  121.42  void GenMarkSweep::mark_sweep_phase1(int level,
  121.43                                    bool clear_all_softrefs) {
  121.44    // Recursively traverse all live objects and mark them
  121.45 -  TraceTime tm("phase 1", PrintGC && Verbose, true, gclog_or_tty);
  121.46 +  GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer);
  121.47    trace(" 1");
  121.48  
  121.49    GenCollectedHeap* gch = GenCollectedHeap::heap();
  121.50 @@ -219,8 +227,10 @@
  121.51    // Process reference objects found during marking
  121.52    {
  121.53      ref_processor()->setup_policy(clear_all_softrefs);
  121.54 -    ref_processor()->process_discovered_references(
  121.55 -      &is_alive, &keep_alive, &follow_stack_closure, NULL);
  121.56 +    const ReferenceProcessorStats& stats =
  121.57 +      ref_processor()->process_discovered_references(
  121.58 +        &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer);
  121.59 +    gc_tracer()->report_gc_reference_stats(stats);
  121.60    }
  121.61  
  121.62    // This is the point where the entire marking should have completed.
  121.63 @@ -240,6 +250,8 @@
  121.64  
  121.65    // Clean up unreferenced symbols in symbol table.
  121.66    SymbolTable::unlink();
  121.67 +
  121.68 +  gc_tracer()->report_object_count_after_gc(&is_alive);
  121.69  }
  121.70  
  121.71  
  121.72 @@ -259,7 +271,7 @@
  121.73  
  121.74    GenCollectedHeap* gch = GenCollectedHeap::heap();
  121.75  
  121.76 -  TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty);
  121.77 +  GCTraceTime tm("phase 2", PrintGC && Verbose, true, _gc_timer);
  121.78    trace("2");
  121.79  
  121.80    gch->prepare_for_compaction();
  121.81 @@ -276,7 +288,7 @@
  121.82    GenCollectedHeap* gch = GenCollectedHeap::heap();
  121.83  
  121.84    // Adjust the pointers to reflect the new locations
  121.85 -  TraceTime tm("phase 3", PrintGC && Verbose, true, gclog_or_tty);
  121.86 +  GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer);
  121.87    trace("3");
  121.88  
  121.89    // Need new claim bits for the pointer adjustment tracing.
  121.90 @@ -331,7 +343,7 @@
  121.91    // to use a higher index (saved from phase2) when verifying perm_gen.
  121.92    GenCollectedHeap* gch = GenCollectedHeap::heap();
  121.93  
  121.94 -  TraceTime tm("phase 4", PrintGC && Verbose, true, gclog_or_tty);
  121.95 +  GCTraceTime tm("phase 4", PrintGC && Verbose, true, _gc_timer);
  121.96    trace("4");
  121.97  
  121.98    GenCompactClosure blk;
   122.1 --- a/src/share/vm/memory/generation.cpp	Fri Jun 07 09:33:01 2013 -0700
   122.2 +++ b/src/share/vm/memory/generation.cpp	Mon Jun 10 11:30:51 2013 +0200
   122.3 @@ -1,5 +1,5 @@
   122.4  /*
   122.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   122.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   122.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   122.8   *
   122.9   * This code is free software; you can redistribute it and/or modify it
  122.10 @@ -23,6 +23,8 @@
  122.11   */
  122.12  
  122.13  #include "precompiled.hpp"
  122.14 +#include "gc_implementation/shared/gcTimer.hpp"
  122.15 +#include "gc_implementation/shared/gcTrace.hpp"
  122.16  #include "gc_implementation/shared/spaceDecorator.hpp"
  122.17  #include "gc_interface/collectedHeap.inline.hpp"
  122.18  #include "memory/allocation.inline.hpp"
  122.19 @@ -624,12 +626,26 @@
  122.20                                             bool   clear_all_soft_refs,
  122.21                                             size_t size,
  122.22                                             bool   is_tlab) {
  122.23 +  GenCollectedHeap* gch = GenCollectedHeap::heap();
  122.24 +
  122.25    SpecializationStats::clear();
  122.26    // Temporarily expand the span of our ref processor, so
  122.27    // refs discovery is over the entire heap, not just this generation
  122.28    ReferenceProcessorSpanMutator
  122.29 -    x(ref_processor(), GenCollectedHeap::heap()->reserved_region());
  122.30 +    x(ref_processor(), gch->reserved_region());
  122.31 +
  122.32 +  STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
  122.33 +  gc_timer->register_gc_start(os::elapsed_counter());
  122.34 +
  122.35 +  SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
  122.36 +  gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
  122.37 +
  122.38    GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
  122.39 +
  122.40 +  gc_timer->register_gc_end(os::elapsed_counter());
  122.41 +
  122.42 +  gc_tracer->report_gc_end(os::elapsed_counter(), gc_timer->time_partitions());
  122.43 +
  122.44    SpecializationStats::print();
  122.45  }
  122.46  
   123.1 --- a/src/share/vm/memory/heapInspection.cpp	Fri Jun 07 09:33:01 2013 -0700
   123.2 +++ b/src/share/vm/memory/heapInspection.cpp	Mon Jun 10 11:30:51 2013 +0200
   123.3 @@ -95,7 +95,7 @@
   123.4      }
   123.5      elt = elt->next();
   123.6    }
   123.7 -  elt = new KlassInfoEntry(k, list());
   123.8 +  elt = new (std::nothrow) KlassInfoEntry(k, list());
   123.9    // We may be out of space to allocate the new entry.
  123.10    if (elt != NULL) {
  123.11      set_list(elt);
  123.12 @@ -127,13 +127,15 @@
  123.13    _table->lookup(k);
  123.14  }
  123.15  
  123.16 -KlassInfoTable::KlassInfoTable(int size, HeapWord* ref,
  123.17 -                               bool need_class_stats) {
  123.18 +KlassInfoTable::KlassInfoTable(bool need_class_stats) {
  123.19 +  _size_of_instances_in_words = 0;
  123.20    _size = 0;
  123.21 -  _ref = ref;
  123.22 -  _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size, mtInternal);
  123.23 +  _ref = (HeapWord*) Universe::boolArrayKlassObj();
  123.24 +  _buckets =
  123.25 +    (KlassInfoBucket*)  AllocateHeap(sizeof(KlassInfoBucket) * _num_buckets,
  123.26 +                                            mtInternal, 0, AllocFailStrategy::RETURN_NULL);
  123.27    if (_buckets != NULL) {
  123.28 -    _size = size;
  123.29 +    _size = _num_buckets;
  123.30      for (int index = 0; index < _size; index++) {
  123.31        _buckets[index].initialize();
  123.32      }
  123.33 @@ -179,6 +181,7 @@
  123.34    if (elt != NULL) {
  123.35      elt->set_count(elt->count() + 1);
  123.36      elt->set_words(elt->words() + obj->size());
  123.37 +    _size_of_instances_in_words += obj->size();
  123.38      return true;
  123.39    } else {
  123.40      return false;
  123.41 @@ -192,14 +195,18 @@
  123.42    }
  123.43  }
  123.44  
  123.45 +size_t KlassInfoTable::size_of_instances_in_words() const {
  123.46 +  return _size_of_instances_in_words;
  123.47 +}
  123.48 +
  123.49  int KlassInfoHisto::sort_helper(KlassInfoEntry** e1, KlassInfoEntry** e2) {
  123.50    return (*e1)->compare(*e1,*e2);
  123.51  }
  123.52  
  123.53 -KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit, const char* title, int estimatedCount) :
  123.54 +KlassInfoHisto::KlassInfoHisto(KlassInfoTable* cit, const char* title) :
  123.55    _cit(cit),
  123.56    _title(title) {
  123.57 -  _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(estimatedCount,true);
  123.58 +  _elements = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<KlassInfoEntry*>(_histo_initial_size, true);
  123.59  }
  123.60  
  123.61  KlassInfoHisto::~KlassInfoHisto() {
  123.62 @@ -444,25 +451,37 @@
  123.63   private:
  123.64    KlassInfoTable* _cit;
  123.65    size_t _missed_count;
  123.66 +  BoolObjectClosure* _filter;
  123.67   public:
  123.68 -  RecordInstanceClosure(KlassInfoTable* cit) :
  123.69 -    _cit(cit), _missed_count(0) {}
  123.70 +  RecordInstanceClosure(KlassInfoTable* cit, BoolObjectClosure* filter) :
  123.71 +    _cit(cit), _missed_count(0), _filter(filter) {}
  123.72  
  123.73    void do_object(oop obj) {
  123.74 -    if (!_cit->record_instance(obj)) {
  123.75 -      _missed_count++;
  123.76 +    if (should_visit(obj)) {
  123.77 +      if (!_cit->record_instance(obj)) {
  123.78 +        _missed_count++;
  123.79 +      }
  123.80      }
  123.81    }
  123.82  
  123.83    size_t missed_count() { return _missed_count; }
  123.84 +
  123.85 + private:
  123.86 +  bool should_visit(oop obj) {
  123.87 +    return _filter == NULL || _filter->do_object_b(obj);
  123.88 +  }
  123.89  };
  123.90  
  123.91 -void HeapInspection::heap_inspection(outputStream* st, bool need_prologue) {
  123.92 +size_t HeapInspection::populate_table(KlassInfoTable* cit, BoolObjectClosure *filter) {
  123.93    ResourceMark rm;
  123.94 -  // Get some random number for ref (the hash key)
  123.95 -  HeapWord* ref = (HeapWord*) Universe::boolArrayKlassObj();
  123.96 -  CollectedHeap* heap = Universe::heap();
  123.97 -  bool is_shared_heap = false;
  123.98 +
  123.99 +  RecordInstanceClosure ric(cit, filter);
 123.100 +  Universe::heap()->object_iterate(&ric);
 123.101 +  return ric.missed_count();
 123.102 +}
 123.103 +
 123.104 +void HeapInspection::heap_inspection(outputStream* st) {
 123.105 +  ResourceMark rm;
 123.106  
 123.107    if (_print_help) {
 123.108      for (int c=0; c<KlassSizeStats::_num_columns; c++) {
 123.109 @@ -482,39 +501,30 @@
 123.110      return;
 123.111    }
 123.112  
 123.113 -  // Collect klass instance info
 123.114 -  KlassInfoTable cit(KlassInfoTable::cit_size, ref, _print_class_stats);
 123.115 +  KlassInfoTable cit(_print_class_stats);
 123.116    if (!cit.allocation_failed()) {
 123.117 -    // Iterate over objects in the heap
 123.118 -    RecordInstanceClosure ric(&cit);
 123.119 -    Universe::heap()->object_iterate(&ric);
 123.120 -
 123.121 -    // Report if certain classes are not counted because of
 123.122 -    // running out of C-heap for the histogram.
 123.123 -    size_t missed_count = ric.missed_count();
 123.124 +    size_t missed_count = populate_table(&cit);
 123.125      if (missed_count != 0) {
 123.126        st->print_cr("WARNING: Ran out of C-heap; undercounted " SIZE_FORMAT
 123.127                     " total instances in data below",
 123.128                     missed_count);
 123.129      }
 123.130 +
 123.131      // Sort and print klass instance info
 123.132      const char *title = "\n"
 123.133                " num     #instances         #bytes  class name\n"
 123.134                "----------------------------------------------";
 123.135 -    KlassInfoHisto histo(&cit, title, KlassInfoHisto::histo_initial_size);
 123.136 +    KlassInfoHisto histo(&cit, title);
 123.137      HistoClosure hc(&histo);
 123.138 +
 123.139      cit.iterate(&hc);
 123.140 +
 123.141      histo.sort();
 123.142      histo.print_histo_on(st, _print_class_stats, _csv_format, _columns);
 123.143    } else {
 123.144      st->print_cr("WARNING: Ran out of C-heap; histogram not generated");
 123.145    }
 123.146    st->flush();
 123.147 -
 123.148 -  if (need_prologue && is_shared_heap) {
 123.149 -    SharedHeap* sh = (SharedHeap*)heap;
 123.150 -    sh->gc_epilogue(false /* !full */); // release all acquired locks, etc.
 123.151 -  }
 123.152  }
 123.153  
 123.154  class FindInstanceClosure : public ObjectClosure {
   124.1 --- a/src/share/vm/memory/heapInspection.hpp	Fri Jun 07 09:33:01 2013 -0700
   124.2 +++ b/src/share/vm/memory/heapInspection.hpp	Mon Jun 10 11:30:51 2013 +0200
   124.3 @@ -26,6 +26,7 @@
   124.4  #define SHARE_VM_MEMORY_HEAPINSPECTION_HPP
   124.5  
   124.6  #include "memory/allocation.inline.hpp"
   124.7 +#include "memory/klassInfoClosure.hpp"
   124.8  #include "oops/oop.inline.hpp"
   124.9  #include "oops/annotations.hpp"
  124.10  #include "utilities/macros.hpp"
  124.11 @@ -203,12 +204,6 @@
  124.12    const char* name() const;
  124.13  };
  124.14  
  124.15 -class KlassInfoClosure: public StackObj {
  124.16 - public:
  124.17 -  // Called for each KlassInfoEntry.
  124.18 -  virtual void do_cinfo(KlassInfoEntry* cie) = 0;
  124.19 -};
  124.20 -
  124.21  class KlassInfoBucket: public CHeapObj<mtInternal> {
  124.22   private:
  124.23    KlassInfoEntry* _list;
  124.24 @@ -224,6 +219,8 @@
  124.25  class KlassInfoTable: public StackObj {
  124.26   private:
  124.27    int _size;
  124.28 +  static const int _num_buckets = 20011;
  124.29 +  size_t _size_of_instances_in_words;
  124.30  
  124.31    // An aligned reference address (typically the least
  124.32    // address in the perm gen) used for hashing klass
  124.33 @@ -242,21 +239,19 @@
  124.34    };
  124.35  
  124.36   public:
  124.37 -  // Table size
  124.38 -  enum {
  124.39 -    cit_size = 20011
  124.40 -  };
  124.41 -  KlassInfoTable(int size, HeapWord* ref, bool need_class_stats);
  124.42 +  KlassInfoTable(bool need_class_stats);
  124.43    ~KlassInfoTable();
  124.44    bool record_instance(const oop obj);
  124.45    void iterate(KlassInfoClosure* cic);
  124.46    bool allocation_failed() { return _buckets == NULL; }
  124.47 +  size_t size_of_instances_in_words() const;
  124.48  
  124.49    friend class KlassInfoHisto;
  124.50  };
  124.51  
  124.52  class KlassInfoHisto : public StackObj {
  124.53   private:
  124.54 +  static const int _histo_initial_size = 1000;
  124.55    KlassInfoTable *_cit;
  124.56    GrowableArray<KlassInfoEntry*>* _elements;
  124.57    GrowableArray<KlassInfoEntry*>* elements() const { return _elements; }
  124.58 @@ -334,11 +329,7 @@
  124.59    }
  124.60  
  124.61   public:
  124.62 -  enum {
  124.63 -    histo_initial_size = 1000
  124.64 -  };
  124.65 -  KlassInfoHisto(KlassInfoTable* cit, const char* title,
  124.66 -             int estimatedCount);
  124.67 +  KlassInfoHisto(KlassInfoTable* cit, const char* title);
  124.68    ~KlassInfoHisto();
  124.69    void add(KlassInfoEntry* cie);
  124.70    void print_histo_on(outputStream* st, bool print_class_stats, bool csv_format, const char *columns);
  124.71 @@ -347,6 +338,11 @@
  124.72  
  124.73  #endif // INCLUDE_SERVICES
  124.74  
  124.75 +// These declarations are needed since teh declaration of KlassInfoTable and
  124.76 +// KlassInfoClosure are guarded by #if INLCUDE_SERVICES
  124.77 +class KlassInfoTable;
  124.78 +class KlassInfoClosure;
  124.79 +
  124.80  class HeapInspection : public StackObj {
  124.81    bool _csv_format; // "comma separated values" format for spreadsheet.
  124.82    bool _print_help;
  124.83 @@ -357,8 +353,11 @@
  124.84                   bool print_class_stats, const char *columns) :
  124.85        _csv_format(csv_format), _print_help(print_help),
  124.86        _print_class_stats(print_class_stats), _columns(columns) {}
  124.87 -  void heap_inspection(outputStream* st, bool need_prologue) NOT_SERVICES_RETURN;
  124.88 +  void heap_inspection(outputStream* st) NOT_SERVICES_RETURN;
  124.89 +  size_t populate_table(KlassInfoTable* cit, BoolObjectClosure* filter = NULL) NOT_SERVICES_RETURN;
  124.90    static void find_instances_at_safepoint(Klass* k, GrowableArray<oop>* result) NOT_SERVICES_RETURN;
  124.91 + private:
  124.92 +  void iterate_over_heap(KlassInfoTable* cit, BoolObjectClosure* filter = NULL);
  124.93  };
  124.94  
  124.95  #endif // SHARE_VM_MEMORY_HEAPINSPECTION_HPP
   125.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   125.2 +++ b/src/share/vm/memory/klassInfoClosure.hpp	Mon Jun 10 11:30:51 2013 +0200
   125.3 @@ -0,0 +1,36 @@
   125.4 +/*
   125.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   125.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   125.7 + *
   125.8 + * This code is free software; you can redistribute it and/or modify it
   125.9 + * under the terms of the GNU General Public License version 2 only, as
  125.10 + * published by the Free Software Foundation.
  125.11 + *
  125.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  125.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  125.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  125.15 + * version 2 for more details (a copy is included in the LICENSE file that
  125.16 + * accompanied this code).
  125.17 + *
  125.18 + * You should have received a copy of the GNU General Public License version
  125.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  125.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  125.21 + *
  125.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  125.23 + * or visit www.oracle.com if you need additional information or have any
  125.24 + * questions.
  125.25 + *
  125.26 + */
  125.27 +
  125.28 +#ifndef SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
  125.29 +#define SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
  125.30 +
  125.31 +class KlassInfoEntry;
  125.32 +
  125.33 +class KlassInfoClosure : public StackObj {
  125.34 + public:
  125.35 +  // Called for each KlassInfoEntry.
  125.36 +  virtual void do_cinfo(KlassInfoEntry* cie) = 0;
  125.37 +};
  125.38 +
  125.39 +#endif // SHARE_VM_MEMORY_KLASSINFOCLOSURE_HPP
   126.1 --- a/src/share/vm/memory/metaspace.hpp	Fri Jun 07 09:33:01 2013 -0700
   126.2 +++ b/src/share/vm/memory/metaspace.hpp	Mon Jun 10 11:30:51 2013 +0200
   126.3 @@ -193,7 +193,10 @@
   126.4  };
   126.5  
   126.6  class MetaspaceAux : AllStatic {
   126.7 +  static size_t free_chunks_total(Metaspace::MetadataType mdtype);
   126.8 +  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
   126.9  
  126.10 + public:
  126.11    // Statistics for class space and data space in metaspace.
  126.12  
  126.13    // These methods iterate over the classloader data graph
  126.14 @@ -205,10 +208,6 @@
  126.15    // Iterates over the virtual space list.
  126.16    static size_t reserved_in_bytes(Metaspace::MetadataType mdtype);
  126.17  
  126.18 -  static size_t free_chunks_total(Metaspace::MetadataType mdtype);
  126.19 -  static size_t free_chunks_total_in_bytes(Metaspace::MetadataType mdtype);
  126.20 -
  126.21 - public:
  126.22    // Running sum of space in all Metachunks that has been
  126.23    // allocated to a Metaspace.  This is used instead of
  126.24    // iterating over all the classloaders. One for each
   127.1 --- a/src/share/vm/memory/oopFactory.hpp	Fri Jun 07 09:33:01 2013 -0700
   127.2 +++ b/src/share/vm/memory/oopFactory.hpp	Mon Jun 10 11:30:51 2013 +0200
   127.3 @@ -1,5 +1,5 @@
   127.4  /*
   127.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   127.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   127.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   127.8   *
   127.9   * This code is free software; you can redistribute it and/or modify it
  127.10 @@ -27,6 +27,7 @@
  127.11  
  127.12  #include "classfile/symbolTable.hpp"
  127.13  #include "classfile/systemDictionary.hpp"
  127.14 +#include "memory/referenceType.hpp"
  127.15  #include "memory/universe.hpp"
  127.16  #include "oops/objArrayKlass.hpp"
  127.17  #include "oops/oop.hpp"
   128.1 --- a/src/share/vm/memory/referenceProcessor.cpp	Fri Jun 07 09:33:01 2013 -0700
   128.2 +++ b/src/share/vm/memory/referenceProcessor.cpp	Mon Jun 10 11:30:51 2013 +0200
   128.3 @@ -1,5 +1,5 @@
   128.4  /*
   128.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   128.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   128.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   128.8   *
   128.9   * This code is free software; you can redistribute it and/or modify it
  128.10 @@ -25,6 +25,8 @@
  128.11  #include "precompiled.hpp"
  128.12  #include "classfile/javaClasses.hpp"
  128.13  #include "classfile/systemDictionary.hpp"
  128.14 +#include "gc_implementation/shared/gcTimer.hpp"
  128.15 +#include "gc_implementation/shared/gcTraceTime.hpp"
  128.16  #include "gc_interface/collectedHeap.hpp"
  128.17  #include "gc_interface/collectedHeap.inline.hpp"
  128.18  #include "memory/referencePolicy.hpp"
  128.19 @@ -180,11 +182,20 @@
  128.20    // past clock value.
  128.21  }
  128.22  
  128.23 -void ReferenceProcessor::process_discovered_references(
  128.24 +size_t ReferenceProcessor::total_count(DiscoveredList lists[]) {
  128.25 +  size_t total = 0;
  128.26 +  for (uint i = 0; i < _max_num_q; ++i) {
  128.27 +    total += lists[i].length();
  128.28 +  }
  128.29 +  return total;
  128.30 +}
  128.31 +
  128.32 +ReferenceProcessorStats ReferenceProcessor::process_discovered_references(
  128.33    BoolObjectClosure*           is_alive,
  128.34    OopClosure*                  keep_alive,
  128.35    VoidClosure*                 complete_gc,
  128.36 -  AbstractRefProcTaskExecutor* task_executor) {
  128.37 +  AbstractRefProcTaskExecutor* task_executor,
  128.38 +  GCTimer*                     gc_timer) {
  128.39    NOT_PRODUCT(verify_ok_to_handle_reflists());
  128.40  
  128.41    assert(!enqueuing_is_done(), "If here enqueuing should not be complete");
  128.42 @@ -202,34 +213,43 @@
  128.43    _soft_ref_timestamp_clock = java_lang_ref_SoftReference::clock();
  128.44  
  128.45    bool trace_time = PrintGCDetails && PrintReferenceGC;
  128.46 +
  128.47    // Soft references
  128.48 +  size_t soft_count = 0;
  128.49    {
  128.50 -    TraceTime tt("SoftReference", trace_time, false, gclog_or_tty);
  128.51 -    process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
  128.52 -                               is_alive, keep_alive, complete_gc, task_executor);
  128.53 +    GCTraceTime tt("SoftReference", trace_time, false, gc_timer);
  128.54 +    soft_count =
  128.55 +      process_discovered_reflist(_discoveredSoftRefs, _current_soft_ref_policy, true,
  128.56 +                                 is_alive, keep_alive, complete_gc, task_executor);
  128.57    }
  128.58  
  128.59    update_soft_ref_master_clock();
  128.60  
  128.61    // Weak references
  128.62 +  size_t weak_count = 0;
  128.63    {
  128.64 -    TraceTime tt("WeakReference", trace_time, false, gclog_or_tty);
  128.65 -    process_discovered_reflist(_discoveredWeakRefs, NULL, true,
  128.66 -                               is_alive, keep_alive, complete_gc, task_executor);
  128.67 +    GCTraceTime tt("WeakReference", trace_time, false, gc_timer);
  128.68 +    weak_count =
  128.69 +      process_discovered_reflist(_discoveredWeakRefs, NULL, true,
  128.70 +                                 is_alive, keep_alive, complete_gc, task_executor);
  128.71    }
  128.72  
  128.73    // Final references
  128.74 +  size_t final_count = 0;
  128.75    {
  128.76 -    TraceTime tt("FinalReference", trace_time, false, gclog_or_tty);
  128.77 -    process_discovered_reflist(_discoveredFinalRefs, NULL, false,
  128.78 -                               is_alive, keep_alive, complete_gc, task_executor);
  128.79 +    GCTraceTime tt("FinalReference", trace_time, false, gc_timer);
  128.80 +    final_count =
  128.81 +      process_discovered_reflist(_discoveredFinalRefs, NULL, false,
  128.82 +                                 is_alive, keep_alive, complete_gc, task_executor);
  128.83    }
  128.84  
  128.85    // Phantom references
  128.86 +  size_t phantom_count = 0;
  128.87    {
  128.88 -    TraceTime tt("PhantomReference", trace_time, false, gclog_or_tty);
  128.89 -    process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
  128.90 -                               is_alive, keep_alive, complete_gc, task_executor);
  128.91 +    GCTraceTime tt("PhantomReference", trace_time, false, gc_timer);
  128.92 +    phantom_count =
  128.93 +      process_discovered_reflist(_discoveredPhantomRefs, NULL, false,
  128.94 +                                 is_alive, keep_alive, complete_gc, task_executor);
  128.95    }
  128.96  
  128.97    // Weak global JNI references. It would make more sense (semantically) to
  128.98 @@ -238,12 +258,14 @@
  128.99    // thus use JNI weak references to circumvent the phantom references and
 128.100    // resurrect a "post-mortem" object.
 128.101    {
 128.102 -    TraceTime tt("JNI Weak Reference", trace_time, false, gclog_or_tty);
 128.103 +    GCTraceTime tt("JNI Weak Reference", trace_time, false, gc_timer);
 128.104      if (task_executor != NULL) {
 128.105        task_executor->set_single_threaded_mode();
 128.106      }
 128.107      process_phaseJNI(is_alive, keep_alive, complete_gc);
 128.108    }
 128.109 +
 128.110 +  return ReferenceProcessorStats(soft_count, weak_count, final_count, phantom_count);
 128.111  }
 128.112  
 128.113  #ifndef PRODUCT
 128.114 @@ -878,7 +900,7 @@
 128.115    balance_queues(_discoveredPhantomRefs);
 128.116  }
 128.117  
 128.118 -void
 128.119 +size_t
 128.120  ReferenceProcessor::process_discovered_reflist(
 128.121    DiscoveredList               refs_lists[],
 128.122    ReferencePolicy*             policy,
 128.123 @@ -901,12 +923,11 @@
 128.124        must_balance) {
 128.125      balance_queues(refs_lists);
 128.126    }
 128.127 +
 128.128 +  size_t total_list_count = total_count(refs_lists);
 128.129 +
 128.130    if (PrintReferenceGC && PrintGCDetails) {
 128.131 -    size_t total = 0;
 128.132 -    for (uint i = 0; i < _max_num_q; ++i) {
 128.133 -      total += refs_lists[i].length();
 128.134 -    }
 128.135 -    gclog_or_tty->print(", %u refs", total);
 128.136 +    gclog_or_tty->print(", %u refs", total_list_count);
 128.137    }
 128.138  
 128.139    // Phase 1 (soft refs only):
 128.140 @@ -951,6 +972,8 @@
 128.141                       is_alive, keep_alive, complete_gc);
 128.142      }
 128.143    }
 128.144 +
 128.145 +  return total_list_count;
 128.146  }
 128.147  
 128.148  void ReferenceProcessor::clean_up_discovered_references() {
 128.149 @@ -1266,14 +1289,15 @@
 128.150    BoolObjectClosure* is_alive,
 128.151    OopClosure* keep_alive,
 128.152    VoidClosure* complete_gc,
 128.153 -  YieldClosure* yield) {
 128.154 +  YieldClosure* yield,
 128.155 +  GCTimer* gc_timer) {
 128.156  
 128.157    NOT_PRODUCT(verify_ok_to_handle_reflists());
 128.158  
 128.159    // Soft references
 128.160    {
 128.161 -    TraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
 128.162 -              false, gclog_or_tty);
 128.163 +    GCTraceTime tt("Preclean SoftReferences", PrintGCDetails && PrintReferenceGC,
 128.164 +              false, gc_timer);
 128.165      for (uint i = 0; i < _max_num_q; i++) {
 128.166        if (yield->should_return()) {
 128.167          return;
 128.168 @@ -1285,8 +1309,8 @@
 128.169  
 128.170    // Weak references
 128.171    {
 128.172 -    TraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
 128.173 -              false, gclog_or_tty);
 128.174 +    GCTraceTime tt("Preclean WeakReferences", PrintGCDetails && PrintReferenceGC,
 128.175 +              false, gc_timer);
 128.176      for (uint i = 0; i < _max_num_q; i++) {
 128.177        if (yield->should_return()) {
 128.178          return;
 128.179 @@ -1298,8 +1322,8 @@
 128.180  
 128.181    // Final references
 128.182    {
 128.183 -    TraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
 128.184 -              false, gclog_or_tty);
 128.185 +    GCTraceTime tt("Preclean FinalReferences", PrintGCDetails && PrintReferenceGC,
 128.186 +              false, gc_timer);
 128.187      for (uint i = 0; i < _max_num_q; i++) {
 128.188        if (yield->should_return()) {
 128.189          return;
 128.190 @@ -1311,8 +1335,8 @@
 128.191  
 128.192    // Phantom references
 128.193    {
 128.194 -    TraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
 128.195 -              false, gclog_or_tty);
 128.196 +    GCTraceTime tt("Preclean PhantomReferences", PrintGCDetails && PrintReferenceGC,
 128.197 +              false, gc_timer);
 128.198      for (uint i = 0; i < _max_num_q; i++) {
 128.199        if (yield->should_return()) {
 128.200          return;
   129.1 --- a/src/share/vm/memory/referenceProcessor.hpp	Fri Jun 07 09:33:01 2013 -0700
   129.2 +++ b/src/share/vm/memory/referenceProcessor.hpp	Mon Jun 10 11:30:51 2013 +0200
   129.3 @@ -1,5 +1,5 @@
   129.4  /*
   129.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   129.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   129.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   129.8   *
   129.9   * This code is free software; you can redistribute it and/or modify it
  129.10 @@ -26,8 +26,12 @@
  129.11  #define SHARE_VM_MEMORY_REFERENCEPROCESSOR_HPP
  129.12  
  129.13  #include "memory/referencePolicy.hpp"
  129.14 +#include "memory/referenceProcessorStats.hpp"
  129.15 +#include "memory/referenceType.hpp"
  129.16  #include "oops/instanceRefKlass.hpp"
  129.17  
  129.18 +class GCTimer;
  129.19 +
  129.20  // ReferenceProcessor class encapsulates the per-"collector" processing
  129.21  // of java.lang.Reference objects for GC. The interface is useful for supporting
  129.22  // a generational abstraction, in particular when there are multiple
  129.23 @@ -204,6 +208,10 @@
  129.24  };
  129.25  
  129.26  class ReferenceProcessor : public CHeapObj<mtGC> {
  129.27 +
  129.28 + private:
  129.29 +  size_t total_count(DiscoveredList lists[]);
  129.30 +
  129.31   protected:
  129.32    // Compatibility with pre-4965777 JDK's
  129.33    static bool _pending_list_uses_discovered_field;
  129.34 @@ -282,13 +290,13 @@
  129.35    }
  129.36  
  129.37    // Process references with a certain reachability level.
  129.38 -  void process_discovered_reflist(DiscoveredList               refs_lists[],
  129.39 -                                  ReferencePolicy*             policy,
  129.40 -                                  bool                         clear_referent,
  129.41 -                                  BoolObjectClosure*           is_alive,
  129.42 -                                  OopClosure*                  keep_alive,
  129.43 -                                  VoidClosure*                 complete_gc,
  129.44 -                                  AbstractRefProcTaskExecutor* task_executor);
  129.45 +  size_t process_discovered_reflist(DiscoveredList               refs_lists[],
  129.46 +                                    ReferencePolicy*             policy,
  129.47 +                                    bool                         clear_referent,
  129.48 +                                    BoolObjectClosure*           is_alive,
  129.49 +                                    OopClosure*                  keep_alive,
  129.50 +                                    VoidClosure*                 complete_gc,
  129.51 +                                    AbstractRefProcTaskExecutor* task_executor);
  129.52  
  129.53    void process_phaseJNI(BoolObjectClosure* is_alive,
  129.54                          OopClosure*        keep_alive,
  129.55 @@ -349,7 +357,8 @@
  129.56    void preclean_discovered_references(BoolObjectClosure* is_alive,
  129.57                                        OopClosure*        keep_alive,
  129.58                                        VoidClosure*       complete_gc,
  129.59 -                                      YieldClosure*      yield);
  129.60 +                                      YieldClosure*      yield,
  129.61 +                                      GCTimer*           gc_timer);
  129.62  
  129.63    // Delete entries in the discovered lists that have
  129.64    // either a null referent or are not active. Such
  129.65 @@ -500,12 +509,13 @@
  129.66    bool discover_reference(oop obj, ReferenceType rt);
  129.67  
  129.68    // Process references found during GC (called by the garbage collector)
  129.69 -  void process_discovered_references(BoolObjectClosure*           is_alive,
  129.70 -                                     OopClosure*                  keep_alive,
  129.71 -                                     VoidClosure*                 complete_gc,
  129.72 -                                     AbstractRefProcTaskExecutor* task_executor);
  129.73 +  ReferenceProcessorStats
  129.74 +  process_discovered_references(BoolObjectClosure*           is_alive,
  129.75 +                                OopClosure*                  keep_alive,
  129.76 +                                VoidClosure*                 complete_gc,
  129.77 +                                AbstractRefProcTaskExecutor* task_executor,
  129.78 +                                GCTimer *gc_timer);
  129.79  
  129.80 - public:
  129.81    // Enqueue references at end of GC (called by the garbage collector)
  129.82    bool enqueue_discovered_references(AbstractRefProcTaskExecutor* task_executor = NULL);
  129.83  
   130.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   130.2 +++ b/src/share/vm/memory/referenceProcessorStats.hpp	Mon Jun 10 11:30:51 2013 +0200
   130.3 @@ -0,0 +1,73 @@
   130.4 +/*
   130.5 + * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved.
   130.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   130.7 + *
   130.8 + * This code is free software; you can redistribute it and/or modify it
   130.9 + * under the terms of the GNU General Public License version 2 only, as
  130.10 + * published by the Free Software Foundation.
  130.11 + *
  130.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  130.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  130.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  130.15 + * version 2 for more details (a copy is included in the LICENSE file that
  130.16 + * accompanied this code).
  130.17 + *
  130.18 + * You should have received a copy of the GNU General Public License version
  130.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  130.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  130.21 + *
  130.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  130.23 + * or visit www.oracle.com if you need additional information or have any
  130.24 + * questions.
  130.25 + *
  130.26 + */
  130.27 +
  130.28 +#ifndef SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP
  130.29 +#define SHARE_VM_MEMORY_REFERENCEPROCESSORSTATS_HPP
  130.30 +
  130.31 +#include "utilities/globalDefinitions.hpp"
  130.32 +
  130.33 +class ReferenceProcessor;
  130.34 +
  130.35 +// ReferenceProcessorStats contains statistics about how many references that
  130.36 +// have been traversed when processing references during garbage collection.
  130.37 +class ReferenceProcessorStats {
  130.38 +  size_t _soft_count;
  130.39 +  size_t _weak_count;
  130.40 +  size_t _final_count;
  130.41 +  size_t _phantom_count;
  130.42 +
  130.43 + public:
  130.44 +  ReferenceProcessorStats() :
  130.45 +    _soft_count(0),
  130.46 +    _weak_count(0),
  130.47 +    _final_count(0),
  130.48 +    _phantom_count(0) {}
  130.49 +
  130.50 +  ReferenceProcessorStats(size_t soft_count,
  130.51 +                          size_t weak_count,
  130.52 +                          size_t final_count,
  130.53 +                          size_t phantom_count) :
  130.54 +    _soft_count(soft_count),
  130.55 +    _weak_count(weak_count),
  130.56 +    _final_count(final_count),
  130.57 +    _phantom_count(phantom_count)
  130.58 +  {}
  130.59 +
  130.60 +  size_t soft_count() const {
  130.61 +    return _soft_count;
  130.62 +  }
  130.63 +
  130.64 +  size_t weak_count() const {
  130.65 +    return _weak_count;
  130.66 +  }
  130.67 +
  130.68 +  size_t final_count() const {
  130.69 +    return _final_count;
  130.70 +  }
  130.71 +
  130.72 +  size_t phantom_count() const {
  130.73 +    return _phantom_count;
  130.74 +  }
  130.75 +};
  130.76 +#endif
   131.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   131.2 +++ b/src/share/vm/memory/referenceType.hpp	Mon Jun 10 11:30:51 2013 +0200
   131.3 @@ -0,0 +1,41 @@
   131.4 +/*
   131.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   131.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   131.7 + *
   131.8 + * This code is free software; you can redistribute it and/or modify it
   131.9 + * under the terms of the GNU General Public License version 2 only, as
  131.10 + * published by the Free Software Foundation.
  131.11 + *
  131.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  131.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  131.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  131.15 + * version 2 for more details (a copy is included in the LICENSE file that
  131.16 + * accompanied this code).
  131.17 + *
  131.18 + * You should have received a copy of the GNU General Public License version
  131.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  131.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  131.21 + *
  131.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  131.23 + * or visit www.oracle.com if you need additional information or have any
  131.24 + * questions.
  131.25 + *
  131.26 + */
  131.27 +
  131.28 +#ifndef SHARE_VM_MEMORY_REFRERENCETYPE_HPP
  131.29 +#define SHARE_VM_MEMORY_REFRERENCETYPE_HPP
  131.30 +
  131.31 +#include "utilities/debug.hpp"
  131.32 +
  131.33 +// ReferenceType is used to distinguish between java/lang/ref/Reference subclasses
  131.34 +
  131.35 +enum ReferenceType {
  131.36 +  REF_NONE,      // Regular class
  131.37 +  REF_OTHER,     // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below
  131.38 +  REF_SOFT,      // Subclass of java/lang/ref/SoftReference
  131.39 +  REF_WEAK,      // Subclass of java/lang/ref/WeakReference
  131.40 +  REF_FINAL,     // Subclass of java/lang/ref/FinalReference
  131.41 +  REF_PHANTOM    // Subclass of java/lang/ref/PhantomReference
  131.42 +};
  131.43 +
  131.44 +#endif // SHARE_VM_MEMORY_REFRERENCETYPE_HPP
   132.1 --- a/src/share/vm/memory/universe.cpp	Fri Jun 07 09:33:01 2013 -0700
   132.2 +++ b/src/share/vm/memory/universe.cpp	Mon Jun 10 11:30:51 2013 +0200
   132.3 @@ -819,12 +819,14 @@
   132.4        // keep the Universe::narrow_oop_base() set in Universe::reserve_heap()
   132.5        Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
   132.6        if (verbose) {
   132.7 -        tty->print(", Compressed Oops with base: "PTR_FORMAT, Universe::narrow_oop_base());
   132.8 +        tty->print(", %s: "PTR_FORMAT,
   132.9 +            narrow_oop_mode_to_string(HeapBasedNarrowOop),
  132.10 +            Universe::narrow_oop_base());
  132.11        }
  132.12      } else {
  132.13        Universe::set_narrow_oop_base(0);
  132.14        if (verbose) {
  132.15 -        tty->print(", zero based Compressed Oops");
  132.16 +        tty->print(", %s", narrow_oop_mode_to_string(ZeroBasedNarrowOop));
  132.17        }
  132.18  #ifdef _WIN64
  132.19        if (!Universe::narrow_oop_use_implicit_null_checks()) {
  132.20 @@ -839,7 +841,7 @@
  132.21        } else {
  132.22          Universe::set_narrow_oop_shift(0);
  132.23          if (verbose) {
  132.24 -          tty->print(", 32-bits Oops");
  132.25 +          tty->print(", %s", narrow_oop_mode_to_string(UnscaledNarrowOop));
  132.26          }
  132.27        }
  132.28      }
  132.29 @@ -946,6 +948,33 @@
  132.30  }
  132.31  
  132.32  
  132.33 +const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
  132.34 +  switch (mode) {
  132.35 +    case UnscaledNarrowOop:
  132.36 +      return "32-bits Oops";
  132.37 +    case ZeroBasedNarrowOop:
  132.38 +      return "zero based Compressed Oops";
  132.39 +    case HeapBasedNarrowOop:
  132.40 +      return "Compressed Oops with base";
  132.41 +  }
  132.42 +
  132.43 +  ShouldNotReachHere();
  132.44 +  return "";
  132.45 +}
  132.46 +
  132.47 +
  132.48 +Universe::NARROW_OOP_MODE Universe::narrow_oop_mode() {
  132.49 +  if (narrow_oop_base() != 0) {
  132.50 +    return HeapBasedNarrowOop;
  132.51 +  }
  132.52 +
  132.53 +  if (narrow_oop_shift() != 0) {
  132.54 +    return ZeroBasedNarrowOop;
  132.55 +  }
  132.56 +
  132.57 +  return UnscaledNarrowOop;
  132.58 +}
  132.59 +
  132.60  
  132.61  void universe2_init() {
  132.62    EXCEPTION_MARK;
   133.1 --- a/src/share/vm/memory/universe.hpp	Fri Jun 07 09:33:01 2013 -0700
   133.2 +++ b/src/share/vm/memory/universe.hpp	Mon Jun 10 11:30:51 2013 +0200
   133.3 @@ -253,19 +253,6 @@
   133.4      return m;
   133.5    }
   133.6  
   133.7 -  // Narrow Oop encoding mode:
   133.8 -  // 0 - Use 32-bits oops without encoding when
   133.9 -  //     NarrowOopHeapBaseMin + heap_size < 4Gb
  133.10 -  // 1 - Use zero based compressed oops with encoding when
  133.11 -  //     NarrowOopHeapBaseMin + heap_size < 32Gb
  133.12 -  // 2 - Use compressed oops with heap base + encoding.
  133.13 -  enum NARROW_OOP_MODE {
  133.14 -    UnscaledNarrowOop  = 0,
  133.15 -    ZeroBasedNarrowOop = 1,
  133.16 -    HeapBasedNarrowOop = 2
  133.17 -  };
  133.18 -  static char*    preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
  133.19 -  static char*    preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
  133.20    static void     set_narrow_oop_base(address base) {
  133.21      assert(UseCompressedOops, "no compressed oops?");
  133.22      _narrow_oop._base    = base;
  133.23 @@ -380,6 +367,21 @@
  133.24    static CollectedHeap* heap() { return _collectedHeap; }
  133.25  
  133.26    // For UseCompressedOops
  133.27 +  // Narrow Oop encoding mode:
  133.28 +  // 0 - Use 32-bits oops without encoding when
  133.29 +  //     NarrowOopHeapBaseMin + heap_size < 4Gb
  133.30 +  // 1 - Use zero based compressed oops with encoding when
  133.31 +  //     NarrowOopHeapBaseMin + heap_size < 32Gb
  133.32 +  // 2 - Use compressed oops with heap base + encoding.
  133.33 +  enum NARROW_OOP_MODE {
  133.34 +    UnscaledNarrowOop  = 0,
  133.35 +    ZeroBasedNarrowOop = 1,
  133.36 +    HeapBasedNarrowOop = 2
  133.37 +  };
  133.38 +  static NARROW_OOP_MODE narrow_oop_mode();
  133.39 +  static const char* narrow_oop_mode_to_string(NARROW_OOP_MODE mode);
  133.40 +  static char*    preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode);
  133.41 +  static char*    preferred_metaspace_base(size_t heap_size, NARROW_OOP_MODE mode);
  133.42    static address  narrow_oop_base()                       { return  _narrow_oop._base; }
  133.43    static bool  is_narrow_oop_base(void* addr)             { return (narrow_oop_base() == (address)addr); }
  133.44    static int      narrow_oop_shift()                      { return  _narrow_oop._shift; }
   134.1 --- a/src/share/vm/oops/instanceKlass.hpp	Fri Jun 07 09:33:01 2013 -0700
   134.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Mon Jun 10 11:30:51 2013 +0200
   134.3 @@ -26,6 +26,7 @@
   134.4  #define SHARE_VM_OOPS_INSTANCEKLASS_HPP
   134.5  
   134.6  #include "classfile/classLoaderData.hpp"
   134.7 +#include "memory/referenceType.hpp"
   134.8  #include "oops/annotations.hpp"
   134.9  #include "oops/constMethod.hpp"
  134.10  #include "oops/fieldInfo.hpp"
  134.11 @@ -37,6 +38,7 @@
  134.12  #include "utilities/accessFlags.hpp"
  134.13  #include "utilities/bitMap.inline.hpp"
  134.14  #include "utilities/macros.hpp"
  134.15 +#include "trace/traceMacros.hpp"
  134.16  
  134.17  // An InstanceKlass is the VM level representation of a Java class.
  134.18  // It contains all information needed for at class at execution runtime.
   135.1 --- a/src/share/vm/oops/klass.cpp	Fri Jun 07 09:33:01 2013 -0700
   135.2 +++ b/src/share/vm/oops/klass.cpp	Mon Jun 10 11:30:51 2013 +0200
   135.3 @@ -37,6 +37,7 @@
   135.4  #include "oops/klass.inline.hpp"
   135.5  #include "oops/oop.inline2.hpp"
   135.6  #include "runtime/atomic.hpp"
   135.7 +#include "trace/traceMacros.hpp"
   135.8  #include "utilities/stack.hpp"
   135.9  #include "utilities/macros.hpp"
  135.10  #if INCLUDE_ALL_GCS
  135.11 @@ -168,7 +169,7 @@
  135.12    set_next_sibling(NULL);
  135.13    set_next_link(NULL);
  135.14    set_alloc_count(0);
  135.15 -  TRACE_SET_KLASS_TRACE_ID(this, 0);
  135.16 +  TRACE_INIT_ID(this);
  135.17  
  135.18    set_prototype_header(markOopDesc::prototype());
  135.19    set_biased_lock_revocation_count(0);
   136.1 --- a/src/share/vm/opto/compile.cpp	Fri Jun 07 09:33:01 2013 -0700
   136.2 +++ b/src/share/vm/opto/compile.cpp	Mon Jun 10 11:30:51 2013 +0200
   136.3 @@ -1,5 +1,5 @@
   136.4  /*
   136.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   136.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   136.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   136.8   *
   136.9   * This code is free software; you can redistribute it and/or modify it
  136.10 @@ -63,6 +63,7 @@
  136.11  #include "runtime/signature.hpp"
  136.12  #include "runtime/stubRoutines.hpp"
  136.13  #include "runtime/timer.hpp"
  136.14 +#include "trace/tracing.hpp"
  136.15  #include "utilities/copy.hpp"
  136.16  #ifdef TARGET_ARCH_MODEL_x86_32
  136.17  # include "adfiles/ad_x86_32.hpp"
  136.18 @@ -786,7 +787,7 @@
  136.19  
  136.20      if (failing())  return;
  136.21  
  136.22 -    print_method("Before RemoveUseless", 3);
  136.23 +    print_method(PHASE_BEFORE_REMOVEUSELESS, 3);
  136.24  
  136.25      // Remove clutter produced by parsing.
  136.26      if (!failing()) {
  136.27 @@ -1801,9 +1802,9 @@
  136.28  
  136.29    {
  136.30      ResourceMark rm;
  136.31 -    print_method("Before StringOpts", 3);
  136.32 +    print_method(PHASE_BEFORE_STRINGOPTS, 3);
  136.33      PhaseStringOpts pso(initial_gvn(), for_igvn());
  136.34 -    print_method("After StringOpts", 3);
  136.35 +    print_method(PHASE_AFTER_STRINGOPTS, 3);
  136.36    }
  136.37  
  136.38    // now inline anything that we skipped the first time around
  136.39 @@ -1958,7 +1959,7 @@
  136.40  
  136.41    NOT_PRODUCT( verify_graph_edges(); )
  136.42  
  136.43 -  print_method("After Parsing");
  136.44 +  print_method(PHASE_AFTER_PARSING);
  136.45  
  136.46   {
  136.47    // Iterative Global Value Numbering, including ideal transforms
  136.48 @@ -1969,7 +1970,7 @@
  136.49      igvn.optimize();
  136.50    }
  136.51  
  136.52 -  print_method("Iter GVN 1", 2);
  136.53 +  print_method(PHASE_ITER_GVN1, 2);
  136.54  
  136.55    if (failing())  return;
  136.56  
  136.57 @@ -1978,7 +1979,7 @@
  136.58      inline_incrementally(igvn);
  136.59    }
  136.60  
  136.61 -  print_method("Incremental Inline", 2);
  136.62 +  print_method(PHASE_INCREMENTAL_INLINE, 2);
  136.63  
  136.64    if (failing())  return;
  136.65  
  136.66 @@ -1987,7 +1988,7 @@
  136.67      // Inline valueOf() methods now.
  136.68      inline_boxing_calls(igvn);
  136.69  
  136.70 -    print_method("Incremental Boxing Inline", 2);
  136.71 +    print_method(PHASE_INCREMENTAL_BOXING_INLINE, 2);
  136.72  
  136.73      if (failing())  return;
  136.74    }
  136.75 @@ -2002,7 +2003,7 @@
  136.76        // Cleanup graph (remove dead nodes).
  136.77        TracePhase t2("idealLoop", &_t_idealLoop, true);
  136.78        PhaseIdealLoop ideal_loop( igvn, false, true );
  136.79 -      if (major_progress()) print_method("PhaseIdealLoop before EA", 2);
  136.80 +      if (major_progress()) print_method(PHASE_PHASEIDEAL_BEFORE_EA, 2);
  136.81        if (failing())  return;
  136.82      }
  136.83      ConnectionGraph::do_analysis(this, &igvn);
  136.84 @@ -2011,7 +2012,7 @@
  136.85  
  136.86      // Optimize out fields loads from scalar replaceable allocations.
  136.87      igvn.optimize();
  136.88 -    print_method("Iter GVN after EA", 2);
  136.89 +    print_method(PHASE_ITER_GVN_AFTER_EA, 2);
  136.90  
  136.91      if (failing())  return;
  136.92  
  136.93 @@ -2022,7 +2023,7 @@
  136.94        igvn.set_delay_transform(false);
  136.95  
  136.96        igvn.optimize();
  136.97 -      print_method("Iter GVN after eliminating allocations and locks", 2);
  136.98 +      print_method(PHASE_ITER_GVN_AFTER_ELIMINATION, 2);
  136.99  
 136.100        if (failing())  return;
 136.101      }
 136.102 @@ -2038,7 +2039,7 @@
 136.103        TracePhase t2("idealLoop", &_t_idealLoop, true);
 136.104        PhaseIdealLoop ideal_loop( igvn, true );
 136.105        loop_opts_cnt--;
 136.106 -      if (major_progress()) print_method("PhaseIdealLoop 1", 2);
 136.107 +      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP1, 2);
 136.108        if (failing())  return;
 136.109      }
 136.110      // Loop opts pass if partial peeling occurred in previous pass
 136.111 @@ -2046,7 +2047,7 @@
 136.112        TracePhase t3("idealLoop", &_t_idealLoop, true);
 136.113        PhaseIdealLoop ideal_loop( igvn, false );
 136.114        loop_opts_cnt--;
 136.115 -      if (major_progress()) print_method("PhaseIdealLoop 2", 2);
 136.116 +      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP2, 2);
 136.117        if (failing())  return;
 136.118      }
 136.119      // Loop opts pass for loop-unrolling before CCP
 136.120 @@ -2054,7 +2055,7 @@
 136.121        TracePhase t4("idealLoop", &_t_idealLoop, true);
 136.122        PhaseIdealLoop ideal_loop( igvn, false );
 136.123        loop_opts_cnt--;
 136.124 -      if (major_progress()) print_method("PhaseIdealLoop 3", 2);
 136.125 +      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP3, 2);
 136.126      }
 136.127      if (!failing()) {
 136.128        // Verify that last round of loop opts produced a valid graph
 136.129 @@ -2071,7 +2072,7 @@
 136.130      TracePhase t2("ccp", &_t_ccp, true);
 136.131      ccp.do_transform();
 136.132    }
 136.133 -  print_method("PhaseCPP 1", 2);
 136.134 +  print_method(PHASE_CPP1, 2);
 136.135  
 136.136    assert( true, "Break here to ccp.dump_old2new_map()");
 136.137  
 136.138 @@ -2082,7 +2083,7 @@
 136.139      igvn.optimize();
 136.140    }
 136.141  
 136.142 -  print_method("Iter GVN 2", 2);
 136.143 +  print_method(PHASE_ITER_GVN2, 2);
 136.144  
 136.145    if (failing())  return;
 136.146  
 136.147 @@ -2095,7 +2096,7 @@
 136.148        assert( cnt++ < 40, "infinite cycle in loop optimization" );
 136.149        PhaseIdealLoop ideal_loop( igvn, true);
 136.150        loop_opts_cnt--;
 136.151 -      if (major_progress()) print_method("PhaseIdealLoop iterations", 2);
 136.152 +      if (major_progress()) print_method(PHASE_PHASEIDEALLOOP_ITERATIONS, 2);
 136.153        if (failing())  return;
 136.154      }
 136.155    }
 136.156 @@ -2128,7 +2129,7 @@
 136.157      }
 136.158    }
 136.159  
 136.160 -  print_method("Optimize finished", 2);
 136.161 +  print_method(PHASE_OPTIMIZE_FINISHED, 2);
 136.162  }
 136.163  
 136.164  
 136.165 @@ -2176,7 +2177,7 @@
 136.166      cfg.GlobalCodeMotion(m,unique(),proj_list);
 136.167      if (failing())  return;
 136.168  
 136.169 -    print_method("Global code motion", 2);
 136.170 +    print_method(PHASE_GLOBAL_CODE_MOTION, 2);
 136.171  
 136.172      NOT_PRODUCT( verify_graph_edges(); )
 136.173  
 136.174 @@ -2229,7 +2230,7 @@
 136.175      Output();
 136.176    }
 136.177  
 136.178 -  print_method("Final Code");
 136.179 +  print_method(PHASE_FINAL_CODE);
 136.180  
 136.181    // He's dead, Jim.
 136.182    _cfg     = (PhaseCFG*)0xdeadbeef;
 136.183 @@ -3316,8 +3317,16 @@
 136.184      // Record the first failure reason.
 136.185      _failure_reason = reason;
 136.186    }
 136.187 +
 136.188 +  EventCompilerFailure event;
 136.189 +  if (event.should_commit()) {
 136.190 +    event.set_compileID(Compile::compile_id());
 136.191 +    event.set_failure(reason);
 136.192 +    event.commit();
 136.193 +  }
 136.194 +
 136.195    if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
 136.196 -    C->print_method(_failure_reason);
 136.197 +    C->print_method(PHASE_FAILURE);
 136.198    }
 136.199    _root = NULL;  // flush the graph, too
 136.200  }
   137.1 --- a/src/share/vm/opto/compile.hpp	Fri Jun 07 09:33:01 2013 -0700
   137.2 +++ b/src/share/vm/opto/compile.hpp	Mon Jun 10 11:30:51 2013 +0200
   137.3 @@ -1,5 +1,5 @@
   137.4  /*
   137.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   137.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   137.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   137.8   *
   137.9   * This code is free software; you can redistribute it and/or modify it
  137.10 @@ -36,10 +36,12 @@
  137.11  #include "libadt/vectset.hpp"
  137.12  #include "memory/resourceArea.hpp"
  137.13  #include "opto/idealGraphPrinter.hpp"
  137.14 +#include "opto/phasetype.hpp"
  137.15  #include "opto/phase.hpp"
  137.16  #include "opto/regmask.hpp"
  137.17  #include "runtime/deoptimization.hpp"
  137.18  #include "runtime/vmThread.hpp"
  137.19 +#include "trace/tracing.hpp"
  137.20  
  137.21  class Block;
  137.22  class Bundle;
  137.23 @@ -322,6 +324,7 @@
  137.24    IdealGraphPrinter*    _printer;
  137.25  #endif
  137.26  
  137.27 +
  137.28    // Node management
  137.29    uint                  _unique;                // Counter for unique Node indices
  137.30    VectorSet             _dead_node_list;        // Set of dead nodes
  137.31 @@ -573,17 +576,43 @@
  137.32    bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
  137.33    void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
  137.34  
  137.35 +  jlong _latest_stage_start_counter;
  137.36 +
  137.37    void begin_method() {
  137.38  #ifndef PRODUCT
  137.39      if (_printer) _printer->begin_method(this);
  137.40  #endif
  137.41 +    C->_latest_stage_start_counter = os::elapsed_counter();
  137.42    }
  137.43 -  void print_method(const char * name, int level = 1) {
  137.44 +
  137.45 +  void print_method(CompilerPhaseType cpt, int level = 1) {
  137.46 +    EventCompilerPhase event(UNTIMED);
  137.47 +    if (event.should_commit()) {
  137.48 +      event.set_starttime(C->_latest_stage_start_counter);
  137.49 +      event.set_endtime(os::elapsed_counter());
  137.50 +      event.set_phase((u1) cpt);
  137.51 +      event.set_compileID(C->_compile_id);
  137.52 +      event.set_phaseLevel(level);
  137.53 +      event.commit();
  137.54 +    }
  137.55 +
  137.56 +
  137.57  #ifndef PRODUCT
  137.58 -    if (_printer) _printer->print_method(this, name, level);
  137.59 +    if (_printer) _printer->print_method(this, CompilerPhaseTypeHelper::to_string(cpt), level);
  137.60  #endif
  137.61 +    C->_latest_stage_start_counter = os::elapsed_counter();
  137.62    }
  137.63 -  void end_method() {
  137.64 +
  137.65 +  void end_method(int level = 1) {
  137.66 +    EventCompilerPhase event(UNTIMED);
  137.67 +    if (event.should_commit()) {
  137.68 +      event.set_starttime(C->_latest_stage_start_counter);
  137.69 +      event.set_endtime(os::elapsed_counter());
  137.70 +      event.set_phase((u1) PHASE_END);
  137.71 +      event.set_compileID(C->_compile_id);
  137.72 +      event.set_phaseLevel(level);
  137.73 +      event.commit();
  137.74 +    }
  137.75  #ifndef PRODUCT
  137.76      if (_printer) _printer->end_method();
  137.77  #endif
   138.1 --- a/src/share/vm/opto/escape.cpp	Fri Jun 07 09:33:01 2013 -0700
   138.2 +++ b/src/share/vm/opto/escape.cpp	Mon Jun 10 11:30:51 2013 +0200
   138.3 @@ -1,5 +1,5 @@
   138.4  /*
   138.5 - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
   138.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
   138.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   138.8   *
   138.9   * This code is free software; you can redistribute it and/or modify it
  138.10 @@ -277,7 +277,7 @@
  138.11      // scalar replaceable objects.
  138.12      split_unique_types(alloc_worklist);
  138.13      if (C->failing())  return false;
  138.14 -    C->print_method("After Escape Analysis", 2);
  138.15 +    C->print_method(PHASE_AFTER_EA, 2);
  138.16  
  138.17  #ifdef ASSERT
  138.18    } else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
   139.1 --- a/src/share/vm/opto/library_call.cpp	Fri Jun 07 09:33:01 2013 -0700
   139.2 +++ b/src/share/vm/opto/library_call.cpp	Mon Jun 10 11:30:51 2013 +0200
   139.3 @@ -1,5 +1,5 @@
   139.4  /*
   139.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
   139.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
   139.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   139.8   *
   139.9   * This code is free software; you can redistribute it and/or modify it
  139.10 @@ -38,6 +38,7 @@
  139.11  #include "opto/subnode.hpp"
  139.12  #include "prims/nativeLookup.hpp"
  139.13  #include "runtime/sharedRuntime.hpp"
  139.14 +#include "trace/traceMacros.hpp"
  139.15  
  139.16  class LibraryIntrinsic : public InlineCallGenerator {
  139.17    // Extend the set of intrinsics known to the runtime:
   140.1 --- a/src/share/vm/opto/loopnode.cpp	Fri Jun 07 09:33:01 2013 -0700
   140.2 +++ b/src/share/vm/opto/loopnode.cpp	Mon Jun 10 11:30:51 2013 +0200
   140.3 @@ -1,5 +1,5 @@
   140.4  /*
   140.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
   140.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
   140.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   140.8   *
   140.9   * This code is free software; you can redistribute it and/or modify it
  140.10 @@ -440,7 +440,7 @@
  140.11    // ---- SUCCESS!   Found A Trip-Counted Loop!  -----
  140.12    //
  140.13    assert(x->Opcode() == Op_Loop, "regular loops only");
  140.14 -  C->print_method("Before CountedLoop", 3);
  140.15 +  C->print_method(PHASE_BEFORE_CLOOPS, 3);
  140.16  
  140.17    Node *hook = new (C) Node(6);
  140.18  
  140.19 @@ -791,7 +791,7 @@
  140.20    }
  140.21  #endif
  140.22  
  140.23 -  C->print_method("After CountedLoop", 3);
  140.24 +  C->print_method(PHASE_AFTER_CLOOPS, 3);
  140.25  
  140.26    return true;
  140.27  }
  140.28 @@ -2164,7 +2164,7 @@
  140.29    // Split shared headers and insert loop landing pads.
  140.30    // Do not bother doing this on the Root loop of course.
  140.31    if( !_verify_me && !_verify_only && _ltree_root->_child ) {
  140.32 -    C->print_method("Before beautify loops", 3);
  140.33 +    C->print_method(PHASE_BEFORE_BEAUTIFY_LOOPS, 3);
  140.34      if( _ltree_root->_child->beautify_loops( this ) ) {
  140.35        // Re-build loop tree!
  140.36        _ltree_root->_child = NULL;
  140.37 @@ -2178,7 +2178,7 @@
  140.38        // Reset loop nesting depth
  140.39        _ltree_root->set_nest( 0 );
  140.40  
  140.41 -      C->print_method("After beautify loops", 3);
  140.42 +      C->print_method(PHASE_AFTER_BEAUTIFY_LOOPS, 3);
  140.43      }
  140.44    }
  140.45  
   141.1 --- a/src/share/vm/opto/matcher.cpp	Fri Jun 07 09:33:01 2013 -0700
   141.2 +++ b/src/share/vm/opto/matcher.cpp	Mon Jun 10 11:30:51 2013 +0200
   141.3 @@ -1,5 +1,5 @@
   141.4  /*
   141.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   141.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   141.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   141.8   *
   141.9   * This code is free software; you can redistribute it and/or modify it
  141.10 @@ -317,7 +317,7 @@
  141.11    find_shared( C->root() );
  141.12    find_shared( C->top() );
  141.13  
  141.14 -  C->print_method("Before Matching");
  141.15 +  C->print_method(PHASE_BEFORE_MATCHING);
  141.16  
  141.17    // Create new ideal node ConP #NULL even if it does exist in old space
  141.18    // to avoid false sharing if the corresponding mach node is not used.
  141.19 @@ -1848,7 +1848,7 @@
  141.20  
  141.21    for( uint i=0; kid != NULL && i<2; kid = s->_kids[1], i++ ) {   // binary tree
  141.22      int newrule;
  141.23 -    if( i == 0 )
  141.24 +    if( i == 0)
  141.25        newrule = kid->_rule[_leftOp[rule]];
  141.26      else
  141.27        newrule = kid->_rule[_rightOp[rule]];
   142.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   142.2 +++ b/src/share/vm/opto/phasetype.hpp	Mon Jun 10 11:30:51 2013 +0200
   142.3 @@ -0,0 +1,98 @@
   142.4 +/*
   142.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   142.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   142.7 + *
   142.8 + * This code is free software; you can redistribute it and/or modify it
   142.9 + * under the terms of the GNU General Public License version 2 only, as
  142.10 + * published by the Free Software Foundation.
  142.11 + *
  142.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  142.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  142.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  142.15 + * version 2 for more details (a copy is included in the LICENSE file that
  142.16 + * accompanied this code).
  142.17 + *
  142.18 + * You should have received a copy of the GNU General Public License version
  142.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  142.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  142.21 + *
  142.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  142.23 + * or visit www.oracle.com if you need additional information or have any
  142.24 + * questions.
  142.25 + *
  142.26 + */
  142.27 +
  142.28 +#ifndef SHARE_VM_OPTO_PHASETYPE_HPP
  142.29 +#define SHARE_VM_OPTO_PHASETYPE_HPP
  142.30 +
  142.31 +enum CompilerPhaseType {
  142.32 +  PHASE_BEFORE_STRINGOPTS,
  142.33 +  PHASE_AFTER_STRINGOPTS,
  142.34 +  PHASE_BEFORE_REMOVEUSELESS,
  142.35 +  PHASE_AFTER_PARSING,
  142.36 +  PHASE_ITER_GVN1,
  142.37 +  PHASE_PHASEIDEAL_BEFORE_EA,
  142.38 +  PHASE_ITER_GVN_AFTER_EA,
  142.39 +  PHASE_ITER_GVN_AFTER_ELIMINATION,
  142.40 +  PHASE_PHASEIDEALLOOP1,
  142.41 +  PHASE_PHASEIDEALLOOP2,
  142.42 +  PHASE_PHASEIDEALLOOP3,
  142.43 +  PHASE_CPP1,
  142.44 +  PHASE_ITER_GVN2,
  142.45 +  PHASE_PHASEIDEALLOOP_ITERATIONS,
  142.46 +  PHASE_OPTIMIZE_FINISHED,
  142.47 +  PHASE_GLOBAL_CODE_MOTION,
  142.48 +  PHASE_FINAL_CODE,
  142.49 +  PHASE_AFTER_EA,
  142.50 +  PHASE_BEFORE_CLOOPS,
  142.51 +  PHASE_AFTER_CLOOPS,
  142.52 +  PHASE_BEFORE_BEAUTIFY_LOOPS,
  142.53 +  PHASE_AFTER_BEAUTIFY_LOOPS,
  142.54 +  PHASE_BEFORE_MATCHING,
  142.55 +  PHASE_INCREMENTAL_INLINE,
  142.56 +  PHASE_INCREMENTAL_BOXING_INLINE,
  142.57 +  PHASE_END,
  142.58 +  PHASE_FAILURE,
  142.59 +
  142.60 +  PHASE_NUM_TYPES
  142.61 +};
  142.62 +
  142.63 +class CompilerPhaseTypeHelper {
  142.64 +  public:
  142.65 +  static const char* to_string(CompilerPhaseType cpt) {
  142.66 +    switch (cpt) {
  142.67 +      case PHASE_BEFORE_STRINGOPTS:          return "Before StringOpts";
  142.68 +      case PHASE_AFTER_STRINGOPTS:           return "After StringOpts";
  142.69 +      case PHASE_BEFORE_REMOVEUSELESS:       return "Before RemoveUseless";
  142.70 +      case PHASE_AFTER_PARSING:              return "After Parsing";
  142.71 +      case PHASE_ITER_GVN1:                  return "Iter GVN 1";
  142.72 +      case PHASE_PHASEIDEAL_BEFORE_EA:       return "PhaseIdealLoop before EA";
  142.73 +      case PHASE_ITER_GVN_AFTER_EA:          return "Iter GVN after EA";
  142.74 +      case PHASE_ITER_GVN_AFTER_ELIMINATION: return "Iter GVN after eliminating allocations and locks";
  142.75 +      case PHASE_PHASEIDEALLOOP1:            return "PhaseIdealLoop 1";
  142.76 +      case PHASE_PHASEIDEALLOOP2:            return "PhaseIdealLoop 2";
  142.77 +      case PHASE_PHASEIDEALLOOP3:            return "PhaseIdealLoop 3";
  142.78 +      case PHASE_CPP1:                       return "PhaseCPP 1";
  142.79 +      case PHASE_ITER_GVN2:                  return "Iter GVN 2";
  142.80 +      case PHASE_PHASEIDEALLOOP_ITERATIONS:  return "PhaseIdealLoop iterations";
  142.81 +      case PHASE_OPTIMIZE_FINISHED:          return "Optimize finished";
  142.82 +      case PHASE_GLOBAL_CODE_MOTION:         return "Global code motion";
  142.83 +      case PHASE_FINAL_CODE:                 return "Final Code";
  142.84 +      case PHASE_AFTER_EA:                   return "After Escape Analysis";
  142.85 +      case PHASE_BEFORE_CLOOPS:              return "Before CountedLoop";
  142.86 +      case PHASE_AFTER_CLOOPS:               return "After CountedLoop";
  142.87 +      case PHASE_BEFORE_BEAUTIFY_LOOPS:      return "Before beautify loops";
  142.88 +      case PHASE_AFTER_BEAUTIFY_LOOPS:       return "After beautify loops";
  142.89 +      case PHASE_BEFORE_MATCHING:            return "Before Matching";
  142.90 +      case PHASE_INCREMENTAL_INLINE:         return "Incremental Inline";
  142.91 +      case PHASE_INCREMENTAL_BOXING_INLINE:  return "Incremental Boxing Inline";
  142.92 +      case PHASE_END:                        return "End";
  142.93 +      case PHASE_FAILURE:                    return "Failure";
  142.94 +      default:
  142.95 +        ShouldNotReachHere();
  142.96 +        return NULL;
  142.97 +    }
  142.98 +  }
  142.99 +};
 142.100 +
 142.101 +#endif //SHARE_VM_OPTO_PHASETYPE_HPP
   143.1 --- a/src/share/vm/precompiled/precompiled.hpp	Fri Jun 07 09:33:01 2013 -0700
   143.2 +++ b/src/share/vm/precompiled/precompiled.hpp	Mon Jun 10 11:30:51 2013 +0200
   143.3 @@ -26,7 +26,6 @@
   143.4  // or if the user passes USE_PRECOMPILED_HEADER=0 to the makefiles.
   143.5  
   143.6  #ifndef DONT_USE_PRECOMPILED_HEADER
   143.7 -
   143.8  # include "asm/assembler.hpp"
   143.9  # include "asm/assembler.inline.hpp"
  143.10  # include "asm/codeBuffer.hpp"
   144.1 --- a/src/share/vm/prims/jni.cpp	Fri Jun 07 09:33:01 2013 -0700
   144.2 +++ b/src/share/vm/prims/jni.cpp	Mon Jun 10 11:30:51 2013 +0200
   144.3 @@ -74,7 +74,6 @@
   144.4  #include "runtime/vm_operations.hpp"
   144.5  #include "services/runtimeService.hpp"
   144.6  #include "trace/tracing.hpp"
   144.7 -#include "trace/traceEventTypes.hpp"
   144.8  #include "utilities/defaultStream.hpp"
   144.9  #include "utilities/dtrace.hpp"
  144.10  #include "utilities/events.hpp"
  144.11 @@ -5014,6 +5013,7 @@
  144.12  
  144.13  #ifndef PRODUCT
  144.14  
  144.15 +#include "gc_implementation/shared/gcTimer.hpp"
  144.16  #include "gc_interface/collectedHeap.hpp"
  144.17  #if INCLUDE_ALL_GCS
  144.18  #include "gc_implementation/g1/heapRegionRemSet.hpp"
  144.19 @@ -5031,6 +5031,7 @@
  144.20    if (ExecuteInternalVMTests) {
  144.21      tty->print_cr("Running internal VM tests");
  144.22      run_unit_test(GlobalDefinitions::test_globals());
  144.23 +    run_unit_test(GCTimerAllTest::all());
  144.24      run_unit_test(arrayOopDesc::test_max_array_length());
  144.25      run_unit_test(CollectedHeap::test_is_in());
  144.26      run_unit_test(QuickSort::test_quick_sort());
  144.27 @@ -5131,9 +5132,11 @@
  144.28         JvmtiExport::post_thread_start(thread);
  144.29      }
  144.30  
  144.31 -    EVENT_BEGIN(TraceEventThreadStart, event);
  144.32 -    EVENT_COMMIT(event,
  144.33 -        EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
  144.34 +    EventThreadStart event;
  144.35 +    if (event.should_commit()) {
  144.36 +      event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
  144.37 +      event.commit();
  144.38 +    }
  144.39  
  144.40      // Check if we should compile all classes on bootclasspath
  144.41      NOT_PRODUCT(if (CompileTheWorld) ClassLoader::compile_the_world();)
  144.42 @@ -5334,9 +5337,11 @@
  144.43      JvmtiExport::post_thread_start(thread);
  144.44    }
  144.45  
  144.46 -  EVENT_BEGIN(TraceEventThreadStart, event);
  144.47 -  EVENT_COMMIT(event,
  144.48 -      EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
  144.49 +  EventThreadStart event;
  144.50 +  if (event.should_commit()) {
  144.51 +    event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
  144.52 +    event.commit();
  144.53 +  }
  144.54  
  144.55    *(JNIEnv**)penv = thread->jni_environment();
  144.56  
   145.1 --- a/src/share/vm/prims/jvm.cpp	Fri Jun 07 09:33:01 2013 -0700
   145.2 +++ b/src/share/vm/prims/jvm.cpp	Mon Jun 10 11:30:51 2013 +0200
   145.3 @@ -59,6 +59,7 @@
   145.4  #include "services/attachListener.hpp"
   145.5  #include "services/management.hpp"
   145.6  #include "services/threadService.hpp"
   145.7 +#include "trace/tracing.hpp"
   145.8  #include "utilities/copy.hpp"
   145.9  #include "utilities/defaultStream.hpp"
  145.10  #include "utilities/dtrace.hpp"
  145.11 @@ -2999,6 +3000,8 @@
  145.12                               millis);
  145.13  #endif /* USDT2 */
  145.14  
  145.15 +  EventThreadSleep event;
  145.16 +
  145.17    if (millis == 0) {
  145.18      // When ConvertSleepToYield is on, this matches the classic VM implementation of
  145.19      // JVM_Sleep. Critical for similar threading behaviour (Win32)
  145.20 @@ -3019,6 +3022,10 @@
  145.21        // An asynchronous exception (e.g., ThreadDeathException) could have been thrown on
  145.22        // us while we were sleeping. We do not overwrite those.
  145.23        if (!HAS_PENDING_EXCEPTION) {
  145.24 +        if (event.should_commit()) {
  145.25 +          event.set_time(millis);
  145.26 +          event.commit();
  145.27 +        }
  145.28  #ifndef USDT2
  145.29          HS_DTRACE_PROBE1(hotspot, thread__sleep__end,1);
  145.30  #else /* USDT2 */
  145.31 @@ -3032,6 +3039,10 @@
  145.32      }
  145.33      thread->osthread()->set_state(old_state);
  145.34    }
  145.35 +  if (event.should_commit()) {
  145.36 +    event.set_time(millis);
  145.37 +    event.commit();
  145.38 +  }
  145.39  #ifndef USDT2
  145.40    HS_DTRACE_PROBE1(hotspot, thread__sleep__end,0);
  145.41  #else /* USDT2 */
   146.1 --- a/src/share/vm/prims/jvmtiGen.java	Fri Jun 07 09:33:01 2013 -0700
   146.2 +++ b/src/share/vm/prims/jvmtiGen.java	Mon Jun 10 11:30:51 2013 +0200
   146.3 @@ -1,5 +1,5 @@
   146.4  /*
   146.5 - * Copyright (c) 2003, 2005, Oracle and/or its affiliates. All rights reserved.
   146.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
   146.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   146.8   *
   146.9   * This code is free software; you can redistribute it and/or modify it
  146.10 @@ -31,7 +31,6 @@
  146.11  import org.xml.sax.SAXParseException;
  146.12  import org.w3c.dom.Document;
  146.13  import org.w3c.dom.DOMException;
  146.14 -
  146.15  // For write operation
  146.16  import javax.xml.transform.Transformer;
  146.17  import javax.xml.transform.TransformerException;
  146.18 @@ -129,6 +128,7 @@
  146.19  
  146.20          factory.setNamespaceAware(true);
  146.21          factory.setValidating(true);
  146.22 +        factory.setXIncludeAware(true);
  146.23  
  146.24          try {
  146.25              File datafile   = new File(inFileName);
   147.1 --- a/src/share/vm/prims/jvmtiImpl.cpp	Fri Jun 07 09:33:01 2013 -0700
   147.2 +++ b/src/share/vm/prims/jvmtiImpl.cpp	Mon Jun 10 11:30:51 2013 +0200
   147.3 @@ -360,19 +360,14 @@
   147.4    case CLEAR_BREAKPOINT:
   147.5      _breakpoints->clear_at_safepoint(*_bp);
   147.6      break;
   147.7 -  case CLEAR_ALL_BREAKPOINT:
   147.8 -    _breakpoints->clearall_at_safepoint();
   147.9 -    break;
  147.10    default:
  147.11      assert(false, "Unknown operation");
  147.12    }
  147.13  }
  147.14  
  147.15  void VM_ChangeBreakpoints::oops_do(OopClosure* f) {
  147.16 -  // This operation keeps breakpoints alive
  147.17 -  if (_breakpoints != NULL) {
  147.18 -    _breakpoints->oops_do(f);
  147.19 -  }
  147.20 +  // The JvmtiBreakpoints in _breakpoints will be visited via
  147.21 +  // JvmtiExport::oops_do.
  147.22    if (_bp != NULL) {
  147.23      _bp->oops_do(f);
  147.24    }
  147.25 @@ -433,23 +428,13 @@
  147.26    }
  147.27  }
  147.28  
  147.29 -void JvmtiBreakpoints::clearall_at_safepoint() {
  147.30 -  assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
  147.31 -
  147.32 -  int len = _bps.length();
  147.33 -  for (int i=0; i<len; i++) {
  147.34 -    _bps.at(i).clear();
  147.35 -  }
  147.36 -  _bps.clear();
  147.37 -}
  147.38 -
  147.39  int JvmtiBreakpoints::length() { return _bps.length(); }
  147.40  
  147.41  int JvmtiBreakpoints::set(JvmtiBreakpoint& bp) {
  147.42    if ( _bps.find(bp) != -1) {
  147.43       return JVMTI_ERROR_DUPLICATE;
  147.44    }
  147.45 -  VM_ChangeBreakpoints set_breakpoint(this,VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
  147.46 +  VM_ChangeBreakpoints set_breakpoint(VM_ChangeBreakpoints::SET_BREAKPOINT, &bp);
  147.47    VMThread::execute(&set_breakpoint);
  147.48    return JVMTI_ERROR_NONE;
  147.49  }
  147.50 @@ -459,7 +444,7 @@
  147.51       return JVMTI_ERROR_NOT_FOUND;
  147.52    }
  147.53  
  147.54 -  VM_ChangeBreakpoints clear_breakpoint(this,VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
  147.55 +  VM_ChangeBreakpoints clear_breakpoint(VM_ChangeBreakpoints::CLEAR_BREAKPOINT, &bp);
  147.56    VMThread::execute(&clear_breakpoint);
  147.57    return JVMTI_ERROR_NONE;
  147.58  }
  147.59 @@ -490,11 +475,6 @@
  147.60    }
  147.61  }
  147.62  
  147.63 -void JvmtiBreakpoints::clearall() {
  147.64 -  VM_ChangeBreakpoints clearall_breakpoint(this,VM_ChangeBreakpoints::CLEAR_ALL_BREAKPOINT);
  147.65 -  VMThread::execute(&clearall_breakpoint);
  147.66 -}
  147.67 -
  147.68  //
  147.69  // class JvmtiCurrentBreakpoints
  147.70  //
   148.1 --- a/src/share/vm/prims/jvmtiImpl.hpp	Fri Jun 07 09:33:01 2013 -0700
   148.2 +++ b/src/share/vm/prims/jvmtiImpl.hpp	Mon Jun 10 11:30:51 2013 +0200
   148.3 @@ -204,47 +204,6 @@
   148.4  
   148.5  ///////////////////////////////////////////////////////////////
   148.6  //
   148.7 -// class VM_ChangeBreakpoints
   148.8 -// Used by              : JvmtiBreakpoints
   148.9 -// Used by JVMTI methods: none directly.
  148.10 -// Note: A Helper class.
  148.11 -//
  148.12 -// VM_ChangeBreakpoints implements a VM_Operation for ALL modifications to the JvmtiBreakpoints class.
  148.13 -//
  148.14 -
  148.15 -class VM_ChangeBreakpoints : public VM_Operation {
  148.16 -private:
  148.17 -  JvmtiBreakpoints* _breakpoints;
  148.18 -  int               _operation;
  148.19 -  JvmtiBreakpoint*  _bp;
  148.20 -
  148.21 -public:
  148.22 -  enum { SET_BREAKPOINT=0, CLEAR_BREAKPOINT=1, CLEAR_ALL_BREAKPOINT=2 };
  148.23 -
  148.24 -  VM_ChangeBreakpoints(JvmtiBreakpoints* breakpoints, int operation) {
  148.25 -    _breakpoints = breakpoints;
  148.26 -    _bp = NULL;
  148.27 -    _operation = operation;
  148.28 -    assert(breakpoints != NULL, "breakpoints != NULL");
  148.29 -    assert(operation == CLEAR_ALL_BREAKPOINT, "unknown breakpoint operation");
  148.30 -  }
  148.31 -  VM_ChangeBreakpoints(JvmtiBreakpoints* breakpoints, int operation, JvmtiBreakpoint *bp) {
  148.32 -    _breakpoints = breakpoints;
  148.33 -    _bp = bp;
  148.34 -    _operation = operation;
  148.35 -    assert(breakpoints != NULL, "breakpoints != NULL");
  148.36 -    assert(bp != NULL, "bp != NULL");
  148.37 -    assert(operation == SET_BREAKPOINT || operation == CLEAR_BREAKPOINT , "unknown breakpoint operation");
  148.38 -  }
  148.39 -
  148.40 -  VMOp_Type type() const { return VMOp_ChangeBreakpoints; }
  148.41 -  void doit();
  148.42 -  void oops_do(OopClosure* f);
  148.43 -};
  148.44 -
  148.45 -
  148.46 -///////////////////////////////////////////////////////////////
  148.47 -//
  148.48  // class JvmtiBreakpoints
  148.49  // Used by              : JvmtiCurrentBreakpoints
  148.50  // Used by JVMTI methods: none directly
  148.51 @@ -271,7 +230,6 @@
  148.52    friend class VM_ChangeBreakpoints;
  148.53    void set_at_safepoint(JvmtiBreakpoint& bp);
  148.54    void clear_at_safepoint(JvmtiBreakpoint& bp);
  148.55 -  void clearall_at_safepoint();
  148.56  
  148.57    static void do_element(GrowableElement *e);
  148.58  
  148.59 @@ -286,7 +244,6 @@
  148.60    int  set(JvmtiBreakpoint& bp);
  148.61    int  clear(JvmtiBreakpoint& bp);
  148.62    void clearall_in_class_at_safepoint(Klass* klass);
  148.63 -  void clearall();
  148.64    void gc_epilogue();
  148.65  };
  148.66  
  148.67 @@ -344,6 +301,40 @@
  148.68      return false;
  148.69  }
  148.70  
  148.71 +
  148.72 +///////////////////////////////////////////////////////////////
  148.73 +//
  148.74 +// class VM_ChangeBreakpoints
  148.75 +// Used by              : JvmtiBreakpoints
  148.76 +// Used by JVMTI methods: none directly.
  148.77 +// Note: A Helper class.
  148.78 +//
  148.79 +// VM_ChangeBreakpoints implements a VM_Operation for ALL modifications to the JvmtiBreakpoints class.
  148.80 +//
  148.81 +
  148.82 +class VM_ChangeBreakpoints : public VM_Operation {
  148.83 +private:
  148.84 +  JvmtiBreakpoints* _breakpoints;
  148.85 +  int               _operation;
  148.86 +  JvmtiBreakpoint*  _bp;
  148.87 +
  148.88 +public:
  148.89 +  enum { SET_BREAKPOINT=0, CLEAR_BREAKPOINT=1 };
  148.90 +
  148.91 +  VM_ChangeBreakpoints(int operation, JvmtiBreakpoint *bp) {
  148.92 +    JvmtiBreakpoints& current_bps = JvmtiCurrentBreakpoints::get_jvmti_breakpoints();
  148.93 +    _breakpoints = &current_bps;
  148.94 +    _bp = bp;
  148.95 +    _operation = operation;
  148.96 +    assert(bp != NULL, "bp != NULL");
  148.97 +  }
  148.98 +
  148.99 +  VMOp_Type type() const { return VMOp_ChangeBreakpoints; }
 148.100 +  void doit();
 148.101 +  void oops_do(OopClosure* f);
 148.102 +};
 148.103 +
 148.104 +
 148.105  ///////////////////////////////////////////////////////////////
 148.106  // The get/set local operations must only be done by the VM thread
 148.107  // because the interpreter version needs to access oop maps, which can
   149.1 --- a/src/share/vm/prims/unsafe.cpp	Fri Jun 07 09:33:01 2013 -0700
   149.2 +++ b/src/share/vm/prims/unsafe.cpp	Mon Jun 10 11:30:51 2013 +0200
   149.3 @@ -1,5 +1,5 @@
   149.4  /*
   149.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
   149.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
   149.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   149.8   *
   149.9   * This code is free software; you can redistribute it and/or modify it
  149.10 @@ -36,6 +36,7 @@
  149.11  #include "runtime/reflection.hpp"
  149.12  #include "runtime/synchronizer.hpp"
  149.13  #include "services/threadService.hpp"
  149.14 +#include "trace/tracing.hpp"
  149.15  #include "utilities/copy.hpp"
  149.16  #include "utilities/dtrace.hpp"
  149.17  
  149.18 @@ -1204,6 +1205,7 @@
  149.19  
  149.20  UNSAFE_ENTRY(void, Unsafe_Park(JNIEnv *env, jobject unsafe, jboolean isAbsolute, jlong time))
  149.21    UnsafeWrapper("Unsafe_Park");
  149.22 +  EventThreadPark event;
  149.23  #ifndef USDT2
  149.24    HS_DTRACE_PROBE3(hotspot, thread__park__begin, thread->parker(), (int) isAbsolute, time);
  149.25  #else /* USDT2 */
  149.26 @@ -1218,6 +1220,13 @@
  149.27    HOTSPOT_THREAD_PARK_END(
  149.28                            (uintptr_t) thread->parker());
  149.29  #endif /* USDT2 */
  149.30 +  if (event.should_commit()) {
  149.31 +    oop obj = thread->current_park_blocker();
  149.32 +    event.set_klass(obj ? obj->klass() : NULL);
  149.33 +    event.set_timeout(time);
  149.34 +    event.set_address(obj ? (TYPE_ADDRESS) (uintptr_t) obj : 0);
  149.35 +    event.commit();
  149.36 +  }
  149.37  UNSAFE_END
  149.38  
  149.39  UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread))
   150.1 --- a/src/share/vm/runtime/frame.hpp	Fri Jun 07 09:33:01 2013 -0700
   150.2 +++ b/src/share/vm/runtime/frame.hpp	Mon Jun 10 11:30:51 2013 +0200
   150.3 @@ -1,5 +1,5 @@
   150.4  /*
   150.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   150.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   150.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   150.8   *
   150.9   * This code is free software; you can redistribute it and/or modify it
  150.10 @@ -134,6 +134,7 @@
  150.11    bool is_interpreted_frame()    const;
  150.12    bool is_java_frame()           const;
  150.13    bool is_entry_frame()          const;             // Java frame called from C?
  150.14 +  bool is_stub_frame()           const;
  150.15    bool is_ignored_frame()        const;
  150.16    bool is_native_frame()         const;
  150.17    bool is_runtime_frame()        const;
   151.1 --- a/src/share/vm/runtime/frame.inline.hpp	Fri Jun 07 09:33:01 2013 -0700
   151.2 +++ b/src/share/vm/runtime/frame.inline.hpp	Mon Jun 10 11:30:51 2013 +0200
   151.3 @@ -1,5 +1,5 @@
   151.4  /*
   151.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   151.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   151.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   151.8   *
   151.9   * This code is free software; you can redistribute it and/or modify it
  151.10 @@ -79,6 +79,10 @@
  151.11    return StubRoutines::returns_to_call_stub(pc());
  151.12  }
  151.13  
  151.14 +inline bool frame::is_stub_frame() const {
  151.15 +  return StubRoutines::is_stub_code(pc()) || (_cb != NULL && _cb->is_adapter_blob());
  151.16 +}
  151.17 +
  151.18  inline bool frame::is_first_frame() const {
  151.19    return is_entry_frame() && entry_frame_is_first();
  151.20  }
   152.1 --- a/src/share/vm/runtime/globals.hpp	Fri Jun 07 09:33:01 2013 -0700
   152.2 +++ b/src/share/vm/runtime/globals.hpp	Mon Jun 10 11:30:51 2013 +0200
   152.3 @@ -2311,6 +2311,10 @@
   152.4            "Print diagnostic message when GC is stalled"                     \
   152.5            "by JNI critical section")                                        \
   152.6                                                                              \
   152.7 +  experimental(double, ObjectCountCutOffPercent, 0.5,                       \
   152.8 +          "The percentage of the used heap that the instances of a class "  \
   152.9 +          "must occupy for the class to generate a trace event.")           \
  152.10 +                                                                            \
  152.11    /* GC log rotation setting */                                             \
  152.12                                                                              \
  152.13    product(bool, UseGCLogFileRotation, false,                                \
  152.14 @@ -3688,7 +3692,13 @@
  152.15    experimental(uintx, ArrayAllocatorMallocLimit,                            \
  152.16            SOLARIS_ONLY(64*K) NOT_SOLARIS(max_uintx),                        \
  152.17            "Allocation less than this value will be allocated "              \
  152.18 -          "using malloc. Larger allocations will use mmap.")
  152.19 +          "using malloc. Larger allocations will use mmap.")                \
  152.20 +                                                                            \
  152.21 +  product(bool, EnableTracing, false,                                       \
  152.22 +          "Enable event-based tracing")                                     \
  152.23 +  product(bool, UseLockedTracing, false,                                    \
  152.24 +          "Use locked-tracing when doing event-based tracing")
  152.25 +
  152.26  
  152.27  /*
  152.28   *  Macros for factoring of globals
   153.1 --- a/src/share/vm/runtime/java.cpp	Fri Jun 07 09:33:01 2013 -0700
   153.2 +++ b/src/share/vm/runtime/java.cpp	Mon Jun 10 11:30:51 2013 +0200
   153.3 @@ -1,5 +1,5 @@
   153.4  /*
   153.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   153.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   153.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   153.8   *
   153.9   * This code is free software; you can redistribute it and/or modify it
  153.10 @@ -60,7 +60,6 @@
  153.11  #include "services/memReporter.hpp"
  153.12  #include "services/memTracker.hpp"
  153.13  #include "trace/tracing.hpp"
  153.14 -#include "trace/traceEventTypes.hpp"
  153.15  #include "utilities/dtrace.hpp"
  153.16  #include "utilities/globalDefinitions.hpp"
  153.17  #include "utilities/histogram.hpp"
  153.18 @@ -528,9 +527,12 @@
  153.19      JvmtiExport::post_thread_end(thread);
  153.20    }
  153.21  
  153.22 -  EVENT_BEGIN(TraceEventThreadEnd, event);
  153.23 -  EVENT_COMMIT(event,
  153.24 -      EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(thread->threadObj())));
  153.25 +
  153.26 +  EventThreadEnd event;
  153.27 +  if (event.should_commit()) {
  153.28 +      event.set_javalangthread(java_lang_Thread::thread_id(thread->threadObj()));
  153.29 +      event.commit();
  153.30 +  }
  153.31  
  153.32    // Always call even when there are not JVMTI environments yet, since environments
  153.33    // may be attached late and JVMTI must track phases of VM execution
   154.1 --- a/src/share/vm/runtime/mutexLocker.cpp	Fri Jun 07 09:33:01 2013 -0700
   154.2 +++ b/src/share/vm/runtime/mutexLocker.cpp	Mon Jun 10 11:30:51 2013 +0200
   154.3 @@ -1,5 +1,5 @@
   154.4  /*
   154.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   154.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   154.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   154.8   *
   154.9   * This code is free software; you can redistribute it and/or modify it
  154.10 @@ -270,13 +270,12 @@
  154.11    def(MethodCompileQueue_lock      , Monitor, nonleaf+4,   true );
  154.12    def(Debug2_lock                  , Mutex  , nonleaf+4,   true );
  154.13    def(Debug3_lock                  , Mutex  , nonleaf+4,   true );
  154.14 -  def(ProfileVM_lock               , Monitor, nonleaf+4,   false); // used for profiling of the VMThread
  154.15 +  def(ProfileVM_lock               , Monitor, special,   false); // used for profiling of the VMThread
  154.16    def(CompileThread_lock           , Monitor, nonleaf+5,   false );
  154.17  
  154.18 -  def(JfrQuery_lock                , Monitor, nonleaf,     true);  // JFR locks, keep these in consecutive order
  154.19 -  def(JfrMsg_lock                  , Monitor, nonleaf+2,   true);
  154.20 -  def(JfrBuffer_lock               , Mutex,   nonleaf+3,   true);
  154.21 -  def(JfrStream_lock               , Mutex,   nonleaf+4,   true);
  154.22 +  def(JfrMsg_lock                  , Monitor, leaf,        true);
  154.23 +  def(JfrBuffer_lock               , Mutex,   nonleaf+1,   true);
  154.24 +  def(JfrStream_lock               , Mutex,   nonleaf+2,   true);
  154.25    def(PeriodicTask_lock            , Monitor, nonleaf+5,   true);
  154.26  }
  154.27  
   155.1 --- a/src/share/vm/runtime/objectMonitor.cpp	Fri Jun 07 09:33:01 2013 -0700
   155.2 +++ b/src/share/vm/runtime/objectMonitor.cpp	Mon Jun 10 11:30:51 2013 +0200
   155.3 @@ -36,7 +36,10 @@
   155.4  #include "runtime/stubRoutines.hpp"
   155.5  #include "runtime/thread.inline.hpp"
   155.6  #include "services/threadService.hpp"
   155.7 +#include "trace/tracing.hpp"
   155.8 +#include "trace/traceMacros.hpp"
   155.9  #include "utilities/dtrace.hpp"
  155.10 +#include "utilities/macros.hpp"
  155.11  #include "utilities/preserveException.hpp"
  155.12  #ifdef TARGET_OS_FAMILY_linux
  155.13  # include "os_linux.inline.hpp"
  155.14 @@ -371,6 +374,8 @@
  155.15    // Ensure the object-monitor relationship remains stable while there's contention.
  155.16    Atomic::inc_ptr(&_count);
  155.17  
  155.18 +  EventJavaMonitorEnter event;
  155.19 +
  155.20    { // Change java thread status to indicate blocked on monitor enter.
  155.21      JavaThreadBlockedOnMonitorEnterState jtbmes(jt, this);
  155.22  
  155.23 @@ -402,7 +407,7 @@
  155.24        //
  155.25            _recursions = 0 ;
  155.26        _succ = NULL ;
  155.27 -      exit (Self) ;
  155.28 +      exit (false, Self) ;
  155.29  
  155.30        jt->java_suspend_self();
  155.31      }
  155.32 @@ -435,6 +440,14 @@
  155.33    if (JvmtiExport::should_post_monitor_contended_entered()) {
  155.34      JvmtiExport::post_monitor_contended_entered(jt, this);
  155.35    }
  155.36 +
  155.37 +  if (event.should_commit()) {
  155.38 +    event.set_klass(((oop)this->object())->klass());
  155.39 +    event.set_previousOwner((TYPE_JAVALANGTHREAD)_previous_owner_tid);
  155.40 +    event.set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
  155.41 +    event.commit();
  155.42 +  }
  155.43 +
  155.44    if (ObjectMonitor::_sync_ContendedLockAttempts != NULL) {
  155.45       ObjectMonitor::_sync_ContendedLockAttempts->inc() ;
  155.46    }
  155.47 @@ -917,7 +930,7 @@
  155.48  // Both impinge on OS scalability.  Given that, at most one thread parked on
  155.49  // a monitor will use a timer.
  155.50  
  155.51 -void ATTR ObjectMonitor::exit(TRAPS) {
  155.52 +void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
  155.53     Thread * Self = THREAD ;
  155.54     if (THREAD != _owner) {
  155.55       if (THREAD->is_lock_owned((address) _owner)) {
  155.56 @@ -954,6 +967,14 @@
  155.57        _Responsible = NULL ;
  155.58     }
  155.59  
  155.60 +#if INCLUDE_TRACE
  155.61 +   // get the owner's thread id for the MonitorEnter event
  155.62 +   // if it is enabled and the thread isn't suspended
  155.63 +   if (not_suspended && Tracing::is_event_enabled(TraceJavaMonitorEnterEvent)) {
  155.64 +     _previous_owner_tid = SharedRuntime::get_java_tid(Self);
  155.65 +   }
  155.66 +#endif
  155.67 +
  155.68     for (;;) {
  155.69        assert (THREAD == _owner, "invariant") ;
  155.70  
  155.71 @@ -1343,7 +1364,7 @@
  155.72     guarantee(Self == _owner, "complete_exit not owner");
  155.73     intptr_t save = _recursions; // record the old recursion count
  155.74     _recursions = 0;        // set the recursion level to be 0
  155.75 -   exit (Self) ;           // exit the monitor
  155.76 +   exit (true, Self) ;           // exit the monitor
  155.77     guarantee (_owner != Self, "invariant");
  155.78     return save;
  155.79  }
  155.80 @@ -1397,6 +1418,20 @@
  155.81    for (v = *adr ; Atomic::cmpxchg (v + dx, adr, v) != v; v = *adr) ;
  155.82    return v ;
  155.83  }
  155.84 +
  155.85 +// helper method for posting a monitor wait event
  155.86 +void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
  155.87 +                                                           jlong notifier_tid,
  155.88 +                                                           jlong timeout,
  155.89 +                                                           bool timedout) {
  155.90 +  event->set_klass(((oop)this->object())->klass());
  155.91 +  event->set_timeout((TYPE_ULONG)timeout);
  155.92 +  event->set_address((TYPE_ADDRESS)(uintptr_t)(this->object_addr()));
  155.93 +  event->set_notifier((TYPE_OSTHREAD)notifier_tid);
  155.94 +  event->set_timedOut((TYPE_BOOLEAN)timedout);
  155.95 +  event->commit();
  155.96 +}
  155.97 +
  155.98  // -----------------------------------------------------------------------------
  155.99  // Wait/Notify/NotifyAll
 155.100  //
 155.101 @@ -1412,6 +1447,8 @@
 155.102     // Throw IMSX or IEX.
 155.103     CHECK_OWNER();
 155.104  
 155.105 +   EventJavaMonitorWait event;
 155.106 +
 155.107     // check for a pending interrupt
 155.108     if (interruptible && Thread::is_interrupted(Self, true) && !HAS_PENDING_EXCEPTION) {
 155.109       // post monitor waited event.  Note that this is past-tense, we are done waiting.
 155.110 @@ -1420,10 +1457,14 @@
 155.111          // wait was not timed out due to thread interrupt.
 155.112          JvmtiExport::post_monitor_waited(jt, this, false);
 155.113       }
 155.114 +     if (event.should_commit()) {
 155.115 +       post_monitor_wait_event(&event, 0, millis, false);
 155.116 +     }
 155.117       TEVENT (Wait - Throw IEX) ;
 155.118       THROW(vmSymbols::java_lang_InterruptedException());
 155.119       return ;
 155.120     }
 155.121 +
 155.122     TEVENT (Wait) ;
 155.123  
 155.124     assert (Self->_Stalled == 0, "invariant") ;
 155.125 @@ -1455,7 +1496,7 @@
 155.126     intptr_t save = _recursions; // record the old recursion count
 155.127     _waiters++;                  // increment the number of waiters
 155.128     _recursions = 0;             // set the recursion level to be 1
 155.129 -   exit (Self) ;                    // exit the monitor
 155.130 +   exit (true, Self) ;                    // exit the monitor
 155.131     guarantee (_owner != Self, "invariant") ;
 155.132  
 155.133     // As soon as the ObjectMonitor's ownership is dropped in the exit()
 155.134 @@ -1555,6 +1596,11 @@
 155.135       if (JvmtiExport::should_post_monitor_waited()) {
 155.136         JvmtiExport::post_monitor_waited(jt, this, ret == OS_TIMEOUT);
 155.137       }
 155.138 +
 155.139 +     if (event.should_commit()) {
 155.140 +       post_monitor_wait_event(&event, node._notifier_tid, millis, ret == OS_TIMEOUT);
 155.141 +     }
 155.142 +
 155.143       OrderAccess::fence() ;
 155.144  
 155.145       assert (Self->_Stalled != 0, "invariant") ;
 155.146 @@ -1634,6 +1680,8 @@
 155.147          iterator->TState = ObjectWaiter::TS_ENTER ;
 155.148       }
 155.149       iterator->_notified = 1 ;
 155.150 +     Thread * Self = THREAD;
 155.151 +     iterator->_notifier_tid = Self->osthread()->thread_id();
 155.152  
 155.153       ObjectWaiter * List = _EntryList ;
 155.154       if (List != NULL) {
 155.155 @@ -1758,6 +1806,8 @@
 155.156       guarantee (iterator->TState == ObjectWaiter::TS_WAIT, "invariant") ;
 155.157       guarantee (iterator->_notified == 0, "invariant") ;
 155.158       iterator->_notified = 1 ;
 155.159 +     Thread * Self = THREAD;
 155.160 +     iterator->_notifier_tid = Self->osthread()->thread_id();
 155.161       if (Policy != 4) {
 155.162          iterator->TState = ObjectWaiter::TS_ENTER ;
 155.163       }
   156.1 --- a/src/share/vm/runtime/objectMonitor.hpp	Fri Jun 07 09:33:01 2013 -0700
   156.2 +++ b/src/share/vm/runtime/objectMonitor.hpp	Mon Jun 10 11:30:51 2013 +0200
   156.3 @@ -1,5 +1,5 @@
   156.4  /*
   156.5 - * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
   156.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
   156.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   156.8   *
   156.9   * This code is free software; you can redistribute it and/or modify it
  156.10 @@ -29,7 +29,6 @@
  156.11  #include "runtime/park.hpp"
  156.12  #include "runtime/perfData.hpp"
  156.13  
  156.14 -
  156.15  // ObjectWaiter serves as a "proxy" or surrogate thread.
  156.16  // TODO-FIXME: Eliminate ObjectWaiter and use the thread-specific
  156.17  // ParkEvent instead.  Beware, however, that the JVMTI code
  156.18 @@ -43,6 +42,7 @@
  156.19    ObjectWaiter * volatile _next;
  156.20    ObjectWaiter * volatile _prev;
  156.21    Thread*       _thread;
  156.22 +  jlong         _notifier_tid;
  156.23    ParkEvent *   _event;
  156.24    volatile int  _notified ;
  156.25    volatile TStates TState ;
  156.26 @@ -55,6 +55,9 @@
  156.27    void wait_reenter_end(ObjectMonitor *mon);
  156.28  };
  156.29  
  156.30 +// forward declaration to avoid include tracing.hpp
  156.31 +class EventJavaMonitorWait;
  156.32 +
  156.33  // WARNING:
  156.34  //   This is a very sensitive and fragile class. DO NOT make any
  156.35  // change unless you are fully aware of the underlying semantics.
  156.36 @@ -151,6 +154,7 @@
  156.37      _SpinFreq     = 0 ;
  156.38      _SpinClock    = 0 ;
  156.39      OwnerIsThread = 0 ;
  156.40 +    _previous_owner_tid = 0;
  156.41    }
  156.42  
  156.43    ~ObjectMonitor() {
  156.44 @@ -192,7 +196,7 @@
  156.45  
  156.46    bool      try_enter (TRAPS) ;
  156.47    void      enter(TRAPS);
  156.48 -  void      exit(TRAPS);
  156.49 +  void      exit(bool not_suspended, TRAPS);
  156.50    void      wait(jlong millis, bool interruptable, TRAPS);
  156.51    void      notify(TRAPS);
  156.52    void      notifyAll(TRAPS);
  156.53 @@ -218,6 +222,10 @@
  156.54    void      ctAsserts () ;
  156.55    void      ExitEpilog (Thread * Self, ObjectWaiter * Wakee) ;
  156.56    bool      ExitSuspendEquivalent (JavaThread * Self) ;
  156.57 +  void      post_monitor_wait_event(EventJavaMonitorWait * event,
  156.58 +                                                   jlong notifier_tid,
  156.59 +                                                   jlong timeout,
  156.60 +                                                   bool timedout);
  156.61  
  156.62   private:
  156.63    friend class ObjectSynchronizer;
  156.64 @@ -240,6 +248,7 @@
  156.65  
  156.66   protected:                         // protected for jvmtiRawMonitor
  156.67    void *  volatile _owner;          // pointer to owning thread OR BasicLock
  156.68 +  volatile jlong _previous_owner_tid; // thread id of the previous owner of the monitor
  156.69    volatile intptr_t  _recursions;   // recursion count, 0 for first entry
  156.70   private:
  156.71    int OwnerIsThread ;               // _owner is (Thread *) vs SP/BasicLock
   157.1 --- a/src/share/vm/runtime/os.cpp	Fri Jun 07 09:33:01 2013 -0700
   157.2 +++ b/src/share/vm/runtime/os.cpp	Mon Jun 10 11:30:51 2013 +0200
   157.3 @@ -265,8 +265,7 @@
   157.4          VMThread::execute(&op1);
   157.5          Universe::print_heap_at_SIGBREAK();
   157.6          if (PrintClassHistogram) {
   157.7 -          VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */,
   157.8 -                                   true /* need_prologue */);
   157.9 +          VM_GC_HeapInspection op1(gclog_or_tty, true /* force full GC before heap inspection */);
  157.10            VMThread::execute(&op1);
  157.11          }
  157.12          if (JvmtiExport::should_post_data_dump()) {
  157.13 @@ -1444,11 +1443,16 @@
  157.14    return (int) i;
  157.15  }
  157.16  
  157.17 +void os::SuspendedThreadTask::run() {
  157.18 +  assert(Threads_lock->owned_by_self() || (_thread == VMThread::vm_thread()), "must have threads lock to call this");
  157.19 +  internal_do_task();
  157.20 +  _done = true;
  157.21 +}
  157.22 +
  157.23  bool os::create_stack_guard_pages(char* addr, size_t bytes) {
  157.24    return os::pd_create_stack_guard_pages(addr, bytes);
  157.25  }
  157.26  
  157.27 -
  157.28  char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
  157.29    char* result = pd_reserve_memory(bytes, addr, alignment_hint);
  157.30    if (result != NULL) {
  157.31 @@ -1551,3 +1555,19 @@
  157.32    pd_realign_memory(addr, bytes, alignment_hint);
  157.33  }
  157.34  
  157.35 +#ifndef TARGET_OS_FAMILY_windows
  157.36 +/* try to switch state from state "from" to state "to"
  157.37 + * returns the state set after the method is complete
  157.38 + */
  157.39 +os::SuspendResume::State os::SuspendResume::switch_state(os::SuspendResume::State from,
  157.40 +                                                         os::SuspendResume::State to)
  157.41 +{
  157.42 +  os::SuspendResume::State result =
  157.43 +    (os::SuspendResume::State) Atomic::cmpxchg((jint) to, (jint *) &_state, (jint) from);
  157.44 +  if (result == from) {
  157.45 +    // success
  157.46 +    return to;
  157.47 +  }
  157.48 +  return result;
  157.49 +}
  157.50 +#endif
   158.1 --- a/src/share/vm/runtime/os.hpp	Fri Jun 07 09:33:01 2013 -0700
   158.2 +++ b/src/share/vm/runtime/os.hpp	Mon Jun 10 11:30:51 2013 +0200
   158.3 @@ -781,6 +781,104 @@
   158.4    // ResumeThread call)
   158.5    static void pause();
   158.6  
   158.7 +  class SuspendedThreadTaskContext {
   158.8 +  public:
   158.9 +    SuspendedThreadTaskContext(Thread* thread, void *ucontext) : _thread(thread), _ucontext(ucontext) {}
  158.10 +    Thread* thread() const { return _thread; }
  158.11 +    void* ucontext() const { return _ucontext; }
  158.12 +  private:
  158.13 +    Thread* _thread;
  158.14 +    void* _ucontext;
  158.15 +  };
  158.16 +
  158.17 +  class SuspendedThreadTask {
  158.18 +  public:
  158.19 +    SuspendedThreadTask(Thread* thread) : _thread(thread), _done(false) {}
  158.20 +    virtual ~SuspendedThreadTask() {}
  158.21 +    void run();
  158.22 +    bool is_done() { return _done; }
  158.23 +    virtual void do_task(const SuspendedThreadTaskContext& context) = 0;
  158.24 +  protected:
  158.25 +  private:
  158.26 +    void internal_do_task();
  158.27 +    Thread* _thread;
  158.28 +    bool _done;
  158.29 +  };
  158.30 +
  158.31 +#ifndef TARGET_OS_FAMILY_windows
  158.32 +  // Suspend/resume support
  158.33 +  // Protocol:
  158.34 +  //
  158.35 +  // a thread starts in SR_RUNNING
  158.36 +  //
  158.37 +  // SR_RUNNING can go to
  158.38 +  //   * SR_SUSPEND_REQUEST when the WatcherThread wants to suspend it
  158.39 +  // SR_SUSPEND_REQUEST can go to
  158.40 +  //   * SR_RUNNING if WatcherThread decides it waited for SR_SUSPENDED too long (timeout)
  158.41 +  //   * SR_SUSPENDED if the stopped thread receives the signal and switches state
  158.42 +  // SR_SUSPENDED can go to
  158.43 +  //   * SR_WAKEUP_REQUEST when the WatcherThread has done the work and wants to resume
  158.44 +  // SR_WAKEUP_REQUEST can go to
  158.45 +  //   * SR_RUNNING when the stopped thread receives the signal
  158.46 +  //   * SR_WAKEUP_REQUEST on timeout (resend the signal and try again)
  158.47 +  class SuspendResume {
  158.48 +   public:
  158.49 +    enum State {
  158.50 +      SR_RUNNING,
  158.51 +      SR_SUSPEND_REQUEST,
  158.52 +      SR_SUSPENDED,
  158.53 +      SR_WAKEUP_REQUEST
  158.54 +    };
  158.55 +
  158.56 +  private:
  158.57 +    volatile State _state;
  158.58 +
  158.59 +  private:
  158.60 +    /* try to switch state from state "from" to state "to"
  158.61 +     * returns the state set after the method is complete
  158.62 +     */
  158.63 +    State switch_state(State from, State to);
  158.64 +
  158.65 +  public:
  158.66 +    SuspendResume() : _state(SR_RUNNING) { }
  158.67 +
  158.68 +    State state() const { return _state; }
  158.69 +
  158.70 +    State request_suspend() {
  158.71 +      return switch_state(SR_RUNNING, SR_SUSPEND_REQUEST);
  158.72 +    }
  158.73 +
  158.74 +    State cancel_suspend() {
  158.75 +      return switch_state(SR_SUSPEND_REQUEST, SR_RUNNING);
  158.76 +    }
  158.77 +
  158.78 +    State suspended() {
  158.79 +      return switch_state(SR_SUSPEND_REQUEST, SR_SUSPENDED);
  158.80 +    }
  158.81 +
  158.82 +    State request_wakeup() {
  158.83 +      return switch_state(SR_SUSPENDED, SR_WAKEUP_REQUEST);
  158.84 +    }
  158.85 +
  158.86 +    State running() {
  158.87 +      return switch_state(SR_WAKEUP_REQUEST, SR_RUNNING);
  158.88 +    }
  158.89 +
  158.90 +    bool is_running() const {
  158.91 +      return _state == SR_RUNNING;
  158.92 +    }
  158.93 +
  158.94 +    bool is_suspend_request() const {
  158.95 +      return _state == SR_SUSPEND_REQUEST;
  158.96 +    }
  158.97 +
  158.98 +    bool is_suspended() const {
  158.99 +      return _state == SR_SUSPENDED;
 158.100 +    }
 158.101 +  };
 158.102 +#endif
 158.103 +
 158.104 +
 158.105   protected:
 158.106    static long _rand_seed;                   // seed for random number generator
 158.107    static int _processor_count;              // number of processors
   159.1 --- a/src/share/vm/runtime/perfData.cpp	Fri Jun 07 09:33:01 2013 -0700
   159.2 +++ b/src/share/vm/runtime/perfData.cpp	Mon Jun 10 11:30:51 2013 +0200
   159.3 @@ -323,6 +323,10 @@
   159.4    }
   159.5  }
   159.6  
   159.7 +PerfData* PerfDataManager::find_by_name(const char* name) {
   159.8 +  return _all->find_by_name(name);
   159.9 +}
  159.10 +
  159.11  PerfDataList* PerfDataManager::all() {
  159.12  
  159.13    MutexLocker ml(PerfDataManager_lock);
   160.1 --- a/src/share/vm/runtime/perfData.hpp	Fri Jun 07 09:33:01 2013 -0700
   160.2 +++ b/src/share/vm/runtime/perfData.hpp	Mon Jun 10 11:30:51 2013 +0200
   160.3 @@ -693,6 +693,9 @@
   160.4      // the given name.
   160.5      static bool exists(const char* name) { return _all->contains(name); }
   160.6  
   160.7 +    // method to search for a instrumentation object by name
   160.8 +    static PerfData* find_by_name(const char* name);
   160.9 +
  160.10      // method to map a CounterNS enumeration to a namespace string
  160.11      static const char* ns_to_string(CounterNS ns) {
  160.12        return _name_spaces[ns];
   161.1 --- a/src/share/vm/runtime/stubRoutines.hpp	Fri Jun 07 09:33:01 2013 -0700
   161.2 +++ b/src/share/vm/runtime/stubRoutines.hpp	Mon Jun 10 11:30:51 2013 +0200
   161.3 @@ -1,5 +1,5 @@
   161.4  /*
   161.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   161.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   161.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   161.8   *
   161.9   * This code is free software; you can redistribute it and/or modify it
  161.10 @@ -223,6 +223,8 @@
  161.11    static void    initialize1();                            // must happen before universe::genesis
  161.12    static void    initialize2();                            // must happen after  universe::genesis
  161.13  
  161.14 +  static bool is_stub_code(address addr)                   { return contains(addr); }
  161.15 +
  161.16    static bool contains(address addr) {
  161.17      return
  161.18        (_code1 != NULL && _code1->blob_contains(addr)) ||
   162.1 --- a/src/share/vm/runtime/sweeper.cpp	Fri Jun 07 09:33:01 2013 -0700
   162.2 +++ b/src/share/vm/runtime/sweeper.cpp	Mon Jun 10 11:30:51 2013 +0200
   162.3 @@ -1,5 +1,5 @@
   162.4  /*
   162.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   162.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   162.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   162.8   *
   162.9   * This code is free software; you can redistribute it and/or modify it
  162.10 @@ -36,6 +36,7 @@
  162.11  #include "runtime/os.hpp"
  162.12  #include "runtime/sweeper.hpp"
  162.13  #include "runtime/vm_operations.hpp"
  162.14 +#include "trace/tracing.hpp"
  162.15  #include "utilities/events.hpp"
  162.16  #include "utilities/xmlstream.hpp"
  162.17  
  162.18 @@ -130,6 +131,9 @@
  162.19  long      NMethodSweeper::_traversals = 0;   // No. of stack traversals performed
  162.20  nmethod*  NMethodSweeper::_current = NULL;   // Current nmethod
  162.21  int       NMethodSweeper::_seen = 0 ;        // No. of nmethods we have currently processed in current pass of CodeCache
  162.22 +int       NMethodSweeper::_flushed_count = 0;   // Nof. nmethods flushed in current sweep
  162.23 +int       NMethodSweeper::_zombified_count = 0; // Nof. nmethods made zombie in current sweep
  162.24 +int       NMethodSweeper::_marked_count = 0;    // Nof. nmethods marked for reclaim in current sweep
  162.25  
  162.26  volatile int NMethodSweeper::_invocations = 0;   // No. of invocations left until we are completed with this pass
  162.27  volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progress.
  162.28 @@ -143,6 +147,15 @@
  162.29  int       NMethodSweeper::_dead_compile_ids = 0;
  162.30  long      NMethodSweeper::_last_flush_traversal_id = 0;
  162.31  
  162.32 +int       NMethodSweeper::_number_of_flushes = 0; // Total of full traversals caused by full cache
  162.33 +int       NMethodSweeper::_total_nof_methods_reclaimed = 0;
  162.34 +jlong     NMethodSweeper::_total_time_sweeping = 0;
  162.35 +jlong     NMethodSweeper::_total_time_this_sweep = 0;
  162.36 +jlong     NMethodSweeper::_peak_sweep_time = 0;
  162.37 +jlong     NMethodSweeper::_peak_sweep_fraction_time = 0;
  162.38 +jlong     NMethodSweeper::_total_disconnect_time = 0;
  162.39 +jlong     NMethodSweeper::_peak_disconnect_time = 0;
  162.40 +
  162.41  class MarkActivationClosure: public CodeBlobClosure {
  162.42  public:
  162.43    virtual void do_code_blob(CodeBlob* cb) {
  162.44 @@ -176,6 +189,8 @@
  162.45      _invocations = NmethodSweepFraction;
  162.46      _current     = CodeCache::first_nmethod();
  162.47      _traversals  += 1;
  162.48 +    _total_time_this_sweep = 0;
  162.49 +
  162.50      if (PrintMethodFlushing) {
  162.51        tty->print_cr("### Sweep: stack traversal %d", _traversals);
  162.52      }
  162.53 @@ -229,12 +244,13 @@
  162.54  }
  162.55  
  162.56  void NMethodSweeper::sweep_code_cache() {
  162.57 -#ifdef ASSERT
  162.58 -  jlong sweep_start;
  162.59 -  if (PrintMethodFlushing) {
  162.60 -    sweep_start = os::javaTimeMillis();
  162.61 -  }
  162.62 -#endif
  162.63 +
  162.64 +  jlong sweep_start_counter = os::elapsed_counter();
  162.65 +
  162.66 +  _flushed_count   = 0;
  162.67 +  _zombified_count = 0;
  162.68 +  _marked_count    = 0;
  162.69 +
  162.70    if (PrintMethodFlushing && Verbose) {
  162.71      tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
  162.72    }
  162.73 @@ -302,14 +318,34 @@
  162.74      }
  162.75    }
  162.76  
  162.77 +  jlong sweep_end_counter = os::elapsed_counter();
  162.78 +  jlong sweep_time = sweep_end_counter - sweep_start_counter;
  162.79 +  _total_time_sweeping  += sweep_time;
  162.80 +  _total_time_this_sweep += sweep_time;
  162.81 +  _peak_sweep_fraction_time = MAX2(sweep_time, _peak_sweep_fraction_time);
  162.82 +  _total_nof_methods_reclaimed += _flushed_count;
  162.83 +
  162.84 +  EventSweepCodeCache event(UNTIMED);
  162.85 +  if (event.should_commit()) {
  162.86 +    event.set_starttime(sweep_start_counter);
  162.87 +    event.set_endtime(sweep_end_counter);
  162.88 +    event.set_sweepIndex(_traversals);
  162.89 +    event.set_sweepFractionIndex(NmethodSweepFraction - _invocations + 1);
  162.90 +    event.set_sweptCount(todo);
  162.91 +    event.set_flushedCount(_flushed_count);
  162.92 +    event.set_markedCount(_marked_count);
  162.93 +    event.set_zombifiedCount(_zombified_count);
  162.94 +    event.commit();
  162.95 +  }
  162.96 +
  162.97  #ifdef ASSERT
  162.98    if(PrintMethodFlushing) {
  162.99 -    jlong sweep_end             = os::javaTimeMillis();
 162.100 -    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
 162.101 +    tty->print_cr("### sweeper:      sweep time(%d): " INT64_FORMAT, _invocations, (jlong)sweep_time);
 162.102    }
 162.103  #endif
 162.104  
 162.105    if (_invocations == 1) {
 162.106 +    _peak_sweep_time = MAX2(_peak_sweep_time, _total_time_this_sweep);
 162.107      log_sweep("finished");
 162.108    }
 162.109  
 162.110 @@ -388,12 +424,14 @@
 162.111          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
 162.112        }
 162.113        release_nmethod(nm);
 162.114 +      _flushed_count++;
 162.115      } else {
 162.116        if (PrintMethodFlushing && Verbose) {
 162.117          tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
 162.118        }
 162.119        nm->mark_for_reclamation();
 162.120        _resweep = true;
 162.121 +      _marked_count++;
 162.122        SWEEP(nm);
 162.123      }
 162.124    } else if (nm->is_not_entrant()) {
 162.125 @@ -405,6 +443,7 @@
 162.126        }
 162.127        nm->make_zombie();
 162.128        _resweep = true;
 162.129 +      _zombified_count++;
 162.130        SWEEP(nm);
 162.131      } else {
 162.132        // Still alive, clean up its inline caches
 162.133 @@ -420,13 +459,16 @@
 162.134      // Unloaded code, just make it a zombie
 162.135      if (PrintMethodFlushing && Verbose)
 162.136        tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
 162.137 +
 162.138      if (nm->is_osr_method()) {
 162.139        SWEEP(nm);
 162.140        // No inline caches will ever point to osr methods, so we can just remove it
 162.141        release_nmethod(nm);
 162.142 +      _flushed_count++;
 162.143      } else {
 162.144        nm->make_zombie();
 162.145        _resweep = true;
 162.146 +      _zombified_count++;
 162.147        SWEEP(nm);
 162.148      }
 162.149    } else {
 162.150 @@ -484,7 +526,7 @@
 162.151    // If there was a race in detecting full code cache, only run
 162.152    // one vm op for it or keep the compiler shut off
 162.153  
 162.154 -  debug_only(jlong start = os::javaTimeMillis();)
 162.155 +  jlong disconnect_start_counter = os::elapsed_counter();
 162.156  
 162.157    // Traverse the code cache trying to dump the oldest nmethods
 162.158    int curr_max_comp_id = CompileBroker::get_compilation_id();
 162.159 @@ -541,13 +583,28 @@
 162.160      _last_full_flush_time = os::javaTimeMillis();
 162.161    }
 162.162  
 162.163 +  jlong disconnect_end_counter = os::elapsed_counter();
 162.164 +  jlong disconnect_time = disconnect_end_counter - disconnect_start_counter;
 162.165 +  _total_disconnect_time += disconnect_time;
 162.166 +  _peak_disconnect_time = MAX2(disconnect_time, _peak_disconnect_time);
 162.167 +
 162.168 +  EventCleanCodeCache event(UNTIMED);
 162.169 +  if (event.should_commit()) {
 162.170 +    event.set_starttime(disconnect_start_counter);
 162.171 +    event.set_endtime(disconnect_end_counter);
 162.172 +    event.set_disconnectedCount(disconnected);
 162.173 +    event.set_madeNonEntrantCount(made_not_entrant);
 162.174 +    event.commit();
 162.175 +  }
 162.176 +  _number_of_flushes++;
 162.177 +
 162.178    // After two more traversals the sweeper will get rid of unrestored nmethods
 162.179    _last_flush_traversal_id = _traversals;
 162.180    _resweep = true;
 162.181  #ifdef ASSERT
 162.182 -  jlong end = os::javaTimeMillis();
 162.183 +
 162.184    if(PrintMethodFlushing && Verbose) {
 162.185 -    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, end-start);
 162.186 +    tty->print_cr("### sweeper: unload time: " INT64_FORMAT, (jlong)disconnect_time);
 162.187    }
 162.188  #endif
 162.189  }
   163.1 --- a/src/share/vm/runtime/sweeper.hpp	Fri Jun 07 09:33:01 2013 -0700
   163.2 +++ b/src/share/vm/runtime/sweeper.hpp	Mon Jun 10 11:30:51 2013 +0200
   163.3 @@ -1,5 +1,5 @@
   163.4  /*
   163.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   163.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   163.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   163.8   *
   163.9   * This code is free software; you can redistribute it and/or modify it
  163.10 @@ -31,9 +31,12 @@
  163.11  //
  163.12  
  163.13  class NMethodSweeper : public AllStatic {
  163.14 -  static long      _traversals;   // Stack traversal count
  163.15 -  static nmethod*  _current;      // Current nmethod
  163.16 -  static int       _seen;         // Nof. nmethod we have currently processed in current pass of CodeCache
  163.17 +  static long      _traversals;      // Stack scan count, also sweep ID.
  163.18 +  static nmethod*  _current;         // Current nmethod
  163.19 +  static int       _seen;            // Nof. nmethod we have currently processed in current pass of CodeCache
  163.20 +  static int       _flushed_count;   // Nof. nmethods flushed in current sweep
  163.21 +  static int       _zombified_count; // Nof. nmethods made zombie in current sweep
  163.22 +  static int       _marked_count;    // Nof. nmethods marked for reclaim in current sweep
  163.23  
  163.24    static volatile int  _invocations;   // No. of invocations left until we are completed with this pass
  163.25    static volatile int  _sweep_started; // Flag to control conc sweeper
  163.26 @@ -53,6 +56,16 @@
  163.27    static int       _highest_marked;   // highest compile id dumped at last emergency unloading
  163.28    static int       _dead_compile_ids; // number of compile ids that where not in the cache last flush
  163.29  
  163.30 +  // Stat counters
  163.31 +  static int       _number_of_flushes;            // Total of full traversals caused by full cache
  163.32 +  static int       _total_nof_methods_reclaimed;  // Accumulated nof methods flushed
  163.33 +  static jlong     _total_time_sweeping;          // Accumulated time sweeping
  163.34 +  static jlong     _total_time_this_sweep;        // Total time this sweep
  163.35 +  static jlong     _peak_sweep_time;              // Peak time for a full sweep
  163.36 +  static jlong     _peak_sweep_fraction_time;     // Peak time sweeping one fraction
  163.37 +  static jlong     _total_disconnect_time;        // Total time cleaning code mem
  163.38 +  static jlong     _peak_disconnect_time;         // Peak time cleaning code mem
  163.39 +
  163.40    static void process_nmethod(nmethod *nm);
  163.41    static void release_nmethod(nmethod* nm);
  163.42  
  163.43 @@ -60,7 +73,14 @@
  163.44    static bool sweep_in_progress();
  163.45  
  163.46   public:
  163.47 -  static long traversal_count() { return _traversals; }
  163.48 +  static long traversal_count()              { return _traversals; }
  163.49 +  static int  number_of_flushes()            { return _number_of_flushes; }
  163.50 +  static int  total_nof_methods_reclaimed()  { return _total_nof_methods_reclaimed; }
  163.51 +  static jlong total_time_sweeping()         { return _total_time_sweeping; }
  163.52 +  static jlong peak_sweep_time()             { return _peak_sweep_time; }
  163.53 +  static jlong peak_sweep_fraction_time()    { return _peak_sweep_fraction_time; }
  163.54 +  static jlong total_disconnect_time()       { return _total_disconnect_time; }
  163.55 +  static jlong peak_disconnect_time()        { return _peak_disconnect_time; }
  163.56  
  163.57  #ifdef ASSERT
  163.58    // Keep track of sweeper activity in the ring buffer
   164.1 --- a/src/share/vm/runtime/synchronizer.cpp	Fri Jun 07 09:33:01 2013 -0700
   164.2 +++ b/src/share/vm/runtime/synchronizer.cpp	Mon Jun 10 11:30:51 2013 +0200
   164.3 @@ -213,7 +213,7 @@
   164.4       }
   164.5    }
   164.6  
   164.7 -  ObjectSynchronizer::inflate(THREAD, object)->exit (THREAD) ;
   164.8 +  ObjectSynchronizer::inflate(THREAD, object)->exit (true, THREAD) ;
   164.9  }
  164.10  
  164.11  // -----------------------------------------------------------------------------
  164.12 @@ -343,7 +343,7 @@
  164.13    // If this thread has locked the object, exit the monitor.  Note:  can't use
  164.14    // monitor->check(CHECK); must exit even if an exception is pending.
  164.15    if (monitor->check(THREAD)) {
  164.16 -     monitor->exit(THREAD);
  164.17 +     monitor->exit(true, THREAD);
  164.18    }
  164.19  }
  164.20  
   165.1 --- a/src/share/vm/runtime/task.cpp	Fri Jun 07 09:33:01 2013 -0700
   165.2 +++ b/src/share/vm/runtime/task.cpp	Mon Jun 10 11:30:51 2013 +0200
   165.3 @@ -1,5 +1,5 @@
   165.4  /*
   165.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   165.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   165.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   165.8   *
   165.9   * This code is free software; you can redistribute it and/or modify it
  165.10 @@ -114,9 +114,11 @@
  165.11    disenroll();
  165.12  }
  165.13  
  165.14 +/* enroll could be called from a JavaThread, so we have to check for
  165.15 + * safepoint when taking the lock to avoid deadlocking */
  165.16  void PeriodicTask::enroll() {
  165.17    MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
  165.18 -                     NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
  165.19 +                     NULL : PeriodicTask_lock);
  165.20  
  165.21    if (_num_tasks == PeriodicTask::max_tasks) {
  165.22      fatal("Overflow in PeriodicTask table");
  165.23 @@ -131,9 +133,11 @@
  165.24    }
  165.25  }
  165.26  
  165.27 +/* disenroll could be called from a JavaThread, so we have to check for
  165.28 + * safepoint when taking the lock to avoid deadlocking */
  165.29  void PeriodicTask::disenroll() {
  165.30    MutexLockerEx ml(PeriodicTask_lock->owned_by_self() ?
  165.31 -                     NULL : PeriodicTask_lock, Mutex::_no_safepoint_check_flag);
  165.32 +                     NULL : PeriodicTask_lock);
  165.33  
  165.34    int index;
  165.35    for(index = 0; index < _num_tasks && _tasks[index] != this; index++)
   166.1 --- a/src/share/vm/runtime/thread.cpp	Fri Jun 07 09:33:01 2013 -0700
   166.2 +++ b/src/share/vm/runtime/thread.cpp	Mon Jun 10 11:30:51 2013 +0200
   166.3 @@ -77,7 +77,8 @@
   166.4  #include "services/management.hpp"
   166.5  #include "services/memTracker.hpp"
   166.6  #include "services/threadService.hpp"
   166.7 -#include "trace/traceEventTypes.hpp"
   166.8 +#include "trace/tracing.hpp"
   166.9 +#include "trace/traceMacros.hpp"
  166.10  #include "utilities/defaultStream.hpp"
  166.11  #include "utilities/dtrace.hpp"
  166.12  #include "utilities/events.hpp"
  166.13 @@ -238,7 +239,6 @@
  166.14    CHECK_UNHANDLED_OOPS_ONLY(_gc_locked_out_count = 0;)
  166.15    _jvmti_env_iteration_count = 0;
  166.16    set_allocated_bytes(0);
  166.17 -  set_trace_buffer(NULL);
  166.18    _vm_operation_started_count = 0;
  166.19    _vm_operation_completed_count = 0;
  166.20    _current_pending_monitor = NULL;
  166.21 @@ -1659,9 +1659,11 @@
  166.22      JvmtiExport::post_thread_start(this);
  166.23    }
  166.24  
  166.25 -  EVENT_BEGIN(TraceEventThreadStart, event);
  166.26 -  EVENT_COMMIT(event,
  166.27 -     EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
  166.28 +  EventThreadStart event;
  166.29 +  if (event.should_commit()) {
  166.30 +     event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
  166.31 +     event.commit();
  166.32 +  }
  166.33  
  166.34    // We call another function to do the rest so we are sure that the stack addresses used
  166.35    // from there will be lower than the stack base just computed
  166.36 @@ -1791,9 +1793,11 @@
  166.37  
  166.38      // Called before the java thread exit since we want to read info
  166.39      // from java_lang_Thread object
  166.40 -    EVENT_BEGIN(TraceEventThreadEnd, event);
  166.41 -    EVENT_COMMIT(event,
  166.42 -        EVENT_SET(event, javalangthread, java_lang_Thread::thread_id(this->threadObj())));
  166.43 +    EventThreadEnd event;
  166.44 +    if (event.should_commit()) {
  166.45 +        event.set_javalangthread(java_lang_Thread::thread_id(this->threadObj()));
  166.46 +        event.commit();
  166.47 +    }
  166.48  
  166.49      // Call after last event on thread
  166.50      EVENT_THREAD_EXIT(this);
  166.51 @@ -3648,8 +3652,8 @@
  166.52    // Notify JVMTI agents that VM initialization is complete - nop if no agents.
  166.53    JvmtiExport::post_vm_initialized();
  166.54  
  166.55 -  if (!TRACE_START()) {
  166.56 -    vm_exit_during_initialization(Handle(THREAD, PENDING_EXCEPTION));
  166.57 +  if (TRACE_START() != JNI_OK) {
  166.58 +    vm_exit_during_initialization("Failed to start tracing backend.");
  166.59    }
  166.60  
  166.61    if (CleanChunkPoolAsync) {
   167.1 --- a/src/share/vm/runtime/thread.hpp	Fri Jun 07 09:33:01 2013 -0700
   167.2 +++ b/src/share/vm/runtime/thread.hpp	Mon Jun 10 11:30:51 2013 +0200
   167.3 @@ -47,7 +47,8 @@
   167.4  #include "services/memRecorder.hpp"
   167.5  #endif // INCLUDE_NMT
   167.6  
   167.7 -#include "trace/tracing.hpp"
   167.8 +#include "trace/traceBackend.hpp"
   167.9 +#include "trace/traceMacros.hpp"
  167.10  #include "utilities/exceptions.hpp"
  167.11  #include "utilities/top.hpp"
  167.12  #if INCLUDE_ALL_GCS
  167.13 @@ -258,7 +259,7 @@
  167.14    jlong _allocated_bytes;                       // Cumulative number of bytes allocated on
  167.15                                                  // the Java heap
  167.16  
  167.17 -  TRACE_BUFFER _trace_buffer;                   // Thread-local buffer for tracing
  167.18 +  TRACE_DATA _trace_data;                       // Thread-local data for tracing
  167.19  
  167.20    int   _vm_operation_started_count;            // VM_Operation support
  167.21    int   _vm_operation_completed_count;          // VM_Operation support
  167.22 @@ -449,8 +450,7 @@
  167.23      return allocated_bytes;
  167.24    }
  167.25  
  167.26 -  TRACE_BUFFER trace_buffer()              { return _trace_buffer; }
  167.27 -  void set_trace_buffer(TRACE_BUFFER buf)  { _trace_buffer = buf; }
  167.28 +  TRACE_DATA* trace_data()              { return &_trace_data; }
  167.29  
  167.30    // VM operation support
  167.31    int vm_operation_ticket()                      { return ++_vm_operation_started_count; }
   168.1 --- a/src/share/vm/runtime/timer.cpp	Fri Jun 07 09:33:01 2013 -0700
   168.2 +++ b/src/share/vm/runtime/timer.cpp	Mon Jun 10 11:30:51 2013 +0200
   168.3 @@ -1,5 +1,5 @@
   168.4  /*
   168.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   168.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   168.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   168.8   *
   168.9   * This code is free software; you can redistribute it and/or modify it
  168.10 @@ -39,6 +39,11 @@
  168.11  # include "os_bsd.inline.hpp"
  168.12  #endif
  168.13  
  168.14 +double TimeHelper::counter_to_seconds(jlong counter) {
  168.15 +  double count = (double) counter;
  168.16 +  double freq  = (double) os::elapsed_frequency();
  168.17 +  return counter/freq;
  168.18 +}
  168.19  
  168.20  void elapsedTimer::add(elapsedTimer t) {
  168.21    _counter += t._counter;
  168.22 @@ -59,9 +64,7 @@
  168.23  }
  168.24  
  168.25  double elapsedTimer::seconds() const {
  168.26 -  double count = (double) _counter;
  168.27 -  double freq  = (double) os::elapsed_frequency();
  168.28 -  return count/freq;
  168.29 + return TimeHelper::counter_to_seconds(_counter);
  168.30  }
  168.31  
  168.32  jlong elapsedTimer::milliseconds() const {
  168.33 @@ -90,9 +93,7 @@
  168.34  double TimeStamp::seconds() const {
  168.35    assert(is_updated(), "must not be clear");
  168.36    jlong new_count = os::elapsed_counter();
  168.37 -  double count = (double) new_count - _counter;
  168.38 -  double freq  = (double) os::elapsed_frequency();
  168.39 -  return count/freq;
  168.40 +  return TimeHelper::counter_to_seconds(new_count - _counter);
  168.41  }
  168.42  
  168.43  jlong TimeStamp::milliseconds() const {
  168.44 @@ -110,19 +111,15 @@
  168.45  }
  168.46  
  168.47  TraceTime::TraceTime(const char* title,
  168.48 -                     bool doit,
  168.49 -                     bool print_cr,
  168.50 -                     outputStream* logfile) {
  168.51 +                     bool doit) {
  168.52    _active   = doit;
  168.53    _verbose  = true;
  168.54 -  _print_cr = print_cr;
  168.55 -  _logfile = (logfile != NULL) ? logfile : tty;
  168.56  
  168.57    if (_active) {
  168.58      _accum = NULL;
  168.59 -    _logfile->stamp(PrintGCTimeStamps);
  168.60 -    _logfile->print("[%s", title);
  168.61 -    _logfile->flush();
  168.62 +    tty->stamp(PrintGCTimeStamps);
  168.63 +    tty->print("[%s", title);
  168.64 +    tty->flush();
  168.65      _t.start();
  168.66    }
  168.67  }
  168.68 @@ -130,17 +127,14 @@
  168.69  TraceTime::TraceTime(const char* title,
  168.70                       elapsedTimer* accumulator,
  168.71                       bool doit,
  168.72 -                     bool verbose,
  168.73 -                     outputStream* logfile) {
  168.74 +                     bool verbose) {
  168.75    _active = doit;
  168.76    _verbose = verbose;
  168.77 -  _print_cr = true;
  168.78 -  _logfile = (logfile != NULL) ? logfile : tty;
  168.79    if (_active) {
  168.80      if (_verbose) {
  168.81 -      _logfile->stamp(PrintGCTimeStamps);
  168.82 -      _logfile->print("[%s", title);
  168.83 -      _logfile->flush();
  168.84 +      tty->stamp(PrintGCTimeStamps);
  168.85 +      tty->print("[%s", title);
  168.86 +      tty->flush();
  168.87      }
  168.88      _accum = accumulator;
  168.89      _t.start();
  168.90 @@ -152,12 +146,8 @@
  168.91      _t.stop();
  168.92      if (_accum!=NULL) _accum->add(_t);
  168.93      if (_verbose) {
  168.94 -      if (_print_cr) {
  168.95 -        _logfile->print_cr(", %3.7f secs]", _t.seconds());
  168.96 -      } else {
  168.97 -        _logfile->print(", %3.7f secs]", _t.seconds());
  168.98 -      }
  168.99 -      _logfile->flush();
 168.100 +      tty->print_cr(", %3.7f secs]", _t.seconds());
 168.101 +      tty->flush();
 168.102      }
 168.103    }
 168.104  }
   169.1 --- a/src/share/vm/runtime/timer.hpp	Fri Jun 07 09:33:01 2013 -0700
   169.2 +++ b/src/share/vm/runtime/timer.hpp	Mon Jun 10 11:30:51 2013 +0200
   169.3 @@ -1,5 +1,5 @@
   169.4  /*
   169.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   169.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   169.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   169.8   *
   169.9   * This code is free software; you can redistribute it and/or modify it
  169.10 @@ -82,21 +82,16 @@
  169.11   private:
  169.12    bool          _active;    // do timing
  169.13    bool          _verbose;   // report every timing
  169.14 -  bool          _print_cr;  // add a CR to the end of the timer report
  169.15    elapsedTimer  _t;         // timer
  169.16    elapsedTimer* _accum;     // accumulator
  169.17 -  outputStream* _logfile;   // output log file
  169.18   public:
  169.19 -  // Constuctors
  169.20 +  // Constructors
  169.21    TraceTime(const char* title,
  169.22 -            bool doit = true,
  169.23 -            bool print_cr = true,
  169.24 -            outputStream *logfile = NULL);
  169.25 +            bool doit = true);
  169.26    TraceTime(const char* title,
  169.27              elapsedTimer* accumulator,
  169.28              bool doit = true,
  169.29 -            bool verbose = false,
  169.30 -            outputStream *logfile = NULL );
  169.31 +            bool verbose = false);
  169.32    ~TraceTime();
  169.33  
  169.34    // Accessors
  169.35 @@ -125,4 +120,9 @@
  169.36    ~TraceCPUTime();
  169.37  };
  169.38  
  169.39 +class TimeHelper {
  169.40 + public:
  169.41 +  static double counter_to_seconds(jlong counter);
  169.42 +};
  169.43 +
  169.44  #endif // SHARE_VM_RUNTIME_TIMER_HPP
   170.1 --- a/src/share/vm/runtime/vmStructs.cpp	Fri Jun 07 09:33:01 2013 -0700
   170.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Mon Jun 10 11:30:51 2013 +0200
   170.3 @@ -60,6 +60,7 @@
   170.4  #include "memory/generationSpec.hpp"
   170.5  #include "memory/heap.hpp"
   170.6  #include "memory/metablock.hpp"
   170.7 +#include "memory/referenceType.hpp"
   170.8  #include "memory/space.hpp"
   170.9  #include "memory/tenuredGeneration.hpp"
  170.10  #include "memory/universe.hpp"
   171.1 --- a/src/share/vm/runtime/vmThread.cpp	Fri Jun 07 09:33:01 2013 -0700
   171.2 +++ b/src/share/vm/runtime/vmThread.cpp	Mon Jun 10 11:30:51 2013 +0200
   171.3 @@ -35,6 +35,7 @@
   171.4  #include "runtime/vmThread.hpp"
   171.5  #include "runtime/vm_operations.hpp"
   171.6  #include "services/runtimeService.hpp"
   171.7 +#include "trace/tracing.hpp"
   171.8  #include "utilities/dtrace.hpp"
   171.9  #include "utilities/events.hpp"
  171.10  #include "utilities/xmlstream.hpp"
  171.11 @@ -365,7 +366,23 @@
  171.12                       (char *) op->name(), strlen(op->name()),
  171.13                       op->evaluation_mode());
  171.14  #endif /* USDT2 */
  171.15 +
  171.16 +    EventExecuteVMOperation event;
  171.17 +
  171.18      op->evaluate();
  171.19 +
  171.20 +    if (event.should_commit()) {
  171.21 +      bool is_concurrent = op->evaluate_concurrently();
  171.22 +      event.set_operation(op->type());
  171.23 +      event.set_safepoint(op->evaluate_at_safepoint());
  171.24 +      event.set_blocking(!is_concurrent);
  171.25 +      // Only write caller thread information for non-concurrent vm operations.
  171.26 +      // For concurrent vm operations, the thread id is set to 0 indicating thread is unknown.
  171.27 +      // This is because the caller thread could have exited already.
  171.28 +      event.set_caller(is_concurrent ? 0 : op->calling_thread()->osthread()->thread_id());
  171.29 +      event.commit();
  171.30 +    }
  171.31 +
  171.32  #ifndef USDT2
  171.33      HS_DTRACE_PROBE3(hotspot, vmops__end, op->name(), strlen(op->name()),
  171.34                       op->evaluation_mode());
  171.35 @@ -601,7 +618,7 @@
  171.36      {
  171.37        VMOperationQueue_lock->lock_without_safepoint_check();
  171.38        bool ok = _vm_queue->add(op);
  171.39 -      op->set_timestamp(os::javaTimeMillis());
  171.40 +    op->set_timestamp(os::javaTimeMillis());
  171.41        VMOperationQueue_lock->notify();
  171.42        VMOperationQueue_lock->unlock();
  171.43        // VM_Operation got skipped
   172.1 --- a/src/share/vm/runtime/vm_operations.cpp	Fri Jun 07 09:33:01 2013 -0700
   172.2 +++ b/src/share/vm/runtime/vm_operations.cpp	Mon Jun 10 11:30:51 2013 +0200
   172.3 @@ -1,5 +1,5 @@
   172.4  /*
   172.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   172.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   172.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   172.8   *
   172.9   * This code is free software; you can redistribute it and/or modify it
  172.10 @@ -37,6 +37,7 @@
  172.11  #include "runtime/thread.inline.hpp"
  172.12  #include "runtime/vm_operations.hpp"
  172.13  #include "services/threadService.hpp"
  172.14 +#include "trace/tracing.hpp"
  172.15  
  172.16  #define VM_OP_NAME_INITIALIZE(name) #name,
  172.17  
  172.18 @@ -62,19 +63,21 @@
  172.19    }
  172.20  }
  172.21  
  172.22 +const char* VM_Operation::mode_to_string(Mode mode) {
  172.23 +  switch(mode) {
  172.24 +    case _safepoint      : return "safepoint";
  172.25 +    case _no_safepoint   : return "no safepoint";
  172.26 +    case _concurrent     : return "concurrent";
  172.27 +    case _async_safepoint: return "async safepoint";
  172.28 +    default              : return "unknown";
  172.29 +  }
  172.30 +}
  172.31  // Called by fatal error handler.
  172.32  void VM_Operation::print_on_error(outputStream* st) const {
  172.33    st->print("VM_Operation (" PTR_FORMAT "): ", this);
  172.34    st->print("%s", name());
  172.35  
  172.36 -  const char* mode;
  172.37 -  switch(evaluation_mode()) {
  172.38 -    case _safepoint      : mode = "safepoint";       break;
  172.39 -    case _no_safepoint   : mode = "no safepoint";    break;
  172.40 -    case _concurrent     : mode = "concurrent";      break;
  172.41 -    case _async_safepoint: mode = "async safepoint"; break;
  172.42 -    default              : mode = "unknown";         break;
  172.43 -  }
  172.44 +  const char* mode = mode_to_string(evaluation_mode());
  172.45    st->print(", mode: %s", mode);
  172.46  
  172.47    if (calling_thread()) {
   173.1 --- a/src/share/vm/runtime/vm_operations.hpp	Fri Jun 07 09:33:01 2013 -0700
   173.2 +++ b/src/share/vm/runtime/vm_operations.hpp	Mon Jun 10 11:30:51 2013 +0200
   173.3 @@ -1,5 +1,5 @@
   173.4  /*
   173.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   173.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   173.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   173.8   *
   173.9   * This code is free software; you can redistribute it and/or modify it
  173.10 @@ -178,6 +178,8 @@
  173.11             evaluation_mode() == _async_safepoint;
  173.12    }
  173.13  
  173.14 +  static const char* mode_to_string(Mode mode);
  173.15 +
  173.16    // Debugging
  173.17    void print_on_error(outputStream* st) const;
  173.18    const char* name() const { return _names[type()]; }
   174.1 --- a/src/share/vm/services/attachListener.cpp	Fri Jun 07 09:33:01 2013 -0700
   174.2 +++ b/src/share/vm/services/attachListener.cpp	Mon Jun 10 11:30:51 2013 +0200
   174.3 @@ -227,7 +227,7 @@
   174.4      }
   174.5      live_objects_only = strcmp(arg0, "-live") == 0;
   174.6    }
   174.7 -  VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */, true /* need_prologue */);
   174.8 +  VM_GC_HeapInspection heapop(out, live_objects_only /* request full gc */);
   174.9    VMThread::execute(&heapop);
  174.10    return JNI_OK;
  174.11  }
   175.1 --- a/src/share/vm/services/diagnosticArgument.cpp	Fri Jun 07 09:33:01 2013 -0700
   175.2 +++ b/src/share/vm/services/diagnosticArgument.cpp	Mon Jun 10 11:30:51 2013 +0200
   175.3 @@ -24,6 +24,7 @@
   175.4  
   175.5  #include "precompiled.hpp"
   175.6  #include "memory/allocation.inline.hpp"
   175.7 +#include "memory/resourceArea.hpp"
   175.8  #include "runtime/thread.hpp"
   175.9  #include "services/diagnosticArgument.hpp"
  175.10  
  175.11 @@ -86,9 +87,18 @@
  175.12  
  175.13  template <> void DCmdArgument<jlong>::parse_value(const char* str,
  175.14                                                    size_t len, TRAPS) {
  175.15 -    if (str == NULL || sscanf(str, JLONG_FORMAT, &_value) != 1) {
  175.16 -    THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
  175.17 -      "Integer parsing error in diagnostic command arguments\n");
  175.18 +  int scanned = -1;
  175.19 +  if (str == NULL
  175.20 +      || sscanf(str, JLONG_FORMAT"%n", &_value, &scanned) != 1
  175.21 +      || (size_t)scanned != len)
  175.22 +  {
  175.23 +    ResourceMark rm;
  175.24 +
  175.25 +    char* buf = NEW_RESOURCE_ARRAY(char, len + 1);
  175.26 +    strncpy(buf, str, len);
  175.27 +    buf[len] = '\0';
  175.28 +    Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(),
  175.29 +      "Integer parsing error in command argument '%s'. Could not parse: %s.", _name, buf);
  175.30    }
  175.31  }
  175.32  
  175.33 @@ -96,7 +106,7 @@
  175.34    if (has_default()) {
  175.35      this->parse_value(_default_string, strlen(_default_string), THREAD);
  175.36      if (HAS_PENDING_EXCEPTION) {
  175.37 -      fatal("Default string must be parsable");
  175.38 +      fatal("Default string must be parseable");
  175.39      }
  175.40    } else {
  175.41      set_value(0);
  175.42 @@ -116,8 +126,13 @@
  175.43      } else if (len == strlen("false") && strncasecmp(str, "false", len) == 0) {
  175.44         set_value(false);
  175.45      } else {
  175.46 -      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
  175.47 -        "Boolean parsing error in diagnostic command arguments");
  175.48 +      ResourceMark rm;
  175.49 +
  175.50 +      char* buf = NEW_RESOURCE_ARRAY(char, len + 1);
  175.51 +      strncpy(buf, str, len);
  175.52 +      buf[len] = '\0';
  175.53 +      Exceptions::fthrow(THREAD_AND_LOCATION, vmSymbols::java_lang_IllegalArgumentException(),
  175.54 +        "Boolean parsing error in command argument '%s'. Could not parse: %s.", _name, buf);
  175.55      }
  175.56    }
  175.57  }
  175.58 @@ -168,7 +183,7 @@
  175.59                                                   size_t len, TRAPS) {
  175.60    if (str == NULL) {
  175.61      THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(),
  175.62 -              "Integer parsing error nanotime value: syntax error");
  175.63 +              "Integer parsing error nanotime value: syntax error, value is null");
  175.64    }
  175.65  
  175.66    int argc = sscanf(str, JLONG_FORMAT, &_value._time);
   176.1 --- a/src/share/vm/services/diagnosticCommand.cpp	Fri Jun 07 09:33:01 2013 -0700
   176.2 +++ b/src/share/vm/services/diagnosticCommand.cpp	Mon Jun 10 11:30:51 2013 +0200
   176.3 @@ -320,8 +320,7 @@
   176.4  
   176.5  void ClassHistogramDCmd::execute(DCmdSource source, TRAPS) {
   176.6    VM_GC_HeapInspection heapop(output(),
   176.7 -                              !_all.value() /* request full gc if false */,
   176.8 -                              true /* need_prologue */);
   176.9 +                              !_all.value() /* request full gc if false */);
  176.10    VMThread::execute(&heapop);
  176.11  }
  176.12  
  176.13 @@ -361,8 +360,7 @@
  176.14    }
  176.15  
  176.16    VM_GC_HeapInspection heapop(output(),
  176.17 -                              true, /* request_full_gc */
  176.18 -                              true /* need_prologue */);
  176.19 +                              true /* request_full_gc */);
  176.20    heapop.set_csv_format(_csv.value());
  176.21    heapop.set_print_help(_help.value());
  176.22    heapop.set_print_class_stats(true);
   177.1 --- a/src/share/vm/services/memBaseline.cpp	Fri Jun 07 09:33:01 2013 -0700
   177.2 +++ b/src/share/vm/services/memBaseline.cpp	Mon Jun 10 11:30:51 2013 +0200
   177.3 @@ -41,6 +41,7 @@
   177.4    {mtOther,      "Other"},
   177.5    {mtSymbol,     "Symbol"},
   177.6    {mtNMT,        "Memory Tracking"},
   177.7 +  {mtTracing,    "Tracing"},
   177.8    {mtChunk,      "Pooled Free Chunks"},
   177.9    {mtClassShared,"Shared spaces for classes"},
  177.10    {mtTest,       "Test"},
   178.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   178.2 +++ b/src/share/vm/trace/noTraceBackend.hpp	Mon Jun 10 11:30:51 2013 +0200
   178.3 @@ -0,0 +1,48 @@
   178.4 +/*
   178.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   178.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   178.7 + *
   178.8 + * This code is free software; you can redistribute it and/or modify it
   178.9 + * under the terms of the GNU General Public License version 2 only, as
  178.10 + * published by the Free Software Foundation.
  178.11 + *
  178.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  178.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  178.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  178.15 + * version 2 for more details (a copy is included in the LICENSE file that
  178.16 + * accompanied this code).
  178.17 + *
  178.18 + * You should have received a copy of the GNU General Public License version
  178.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  178.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  178.21 + *
  178.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  178.23 + * or visit www.oracle.com if you need additional information or have any
  178.24 + * questions.
  178.25 + *
  178.26 + */
  178.27 +#ifndef SHARE_VM_TRACE_NOTRACEBACKEND_HPP
  178.28 +#define SHARE_VM_TRACE_NOTRACEBACKEND_HPP
  178.29 +
  178.30 +#include "prims/jni.h"
  178.31 +
  178.32 +typedef jlong TracingTime;
  178.33 +typedef jlong RelativeTracingTime;
  178.34 +
  178.35 +class NoTraceBackend {
  178.36 +public:
  178.37 +  static TracingTime time() {
  178.38 +    return 0;
  178.39 +  }
  178.40 +};
  178.41 +
  178.42 +class TraceThreadData {
  178.43 +public:
  178.44 +    TraceThreadData() {}
  178.45 +};
  178.46 +
  178.47 +typedef NoTraceBackend Tracing;
  178.48 +
  178.49 +#endif
  178.50 +
  178.51 +
   179.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   179.2 +++ b/src/share/vm/trace/trace.dtd	Mon Jun 10 11:30:51 2013 +0200
   179.3 @@ -0,0 +1,86 @@
   179.4 +<?xml version="1.0" encoding="UTF-8"?>
   179.5 +<!--
   179.6 + Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   179.7 + DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   179.8 +
   179.9 + This code is free software; you can redistribute it and/or modify it
  179.10 + under the terms of the GNU General Public License version 2 only, as
  179.11 + published by the Free Software Foundation.
  179.12 +
  179.13 + This code is distributed in the hope that it will be useful, but WITHOUT
  179.14 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  179.15 + FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  179.16 + version 2 for more details (a copy is included in the LICENSE file that
  179.17 + accompanied this code).
  179.18 +
  179.19 + You should have received a copy of the GNU General Public License version
  179.20 + 2 along with this work; if not, write to the Free Software Foundation,
  179.21 + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  179.22 +
  179.23 + Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  179.24 + or visit www.oracle.com if you need additional information or have any
  179.25 + questions.
  179.26 +  
  179.27 +-->
  179.28 +
  179.29 +<!ELEMENT trace (xi:include, relation_decls, events*, xi:include, xi:include)>
  179.30 +<!ELEMENT types (content_types, primary_types)>
  179.31 +<!ELEMENT content_types (content_type|struct_type)*>
  179.32 +<!ELEMENT content_type (value|structvalue|structarray|array)*>
  179.33 +<!ELEMENT struct_type (value*)>
  179.34 +<!ELEMENT primary_types (primary_type*)>
  179.35 +<!ELEMENT primary_type EMPTY>
  179.36 +<!ELEMENT relation_decls (relation_decl*)>
  179.37 +<!ELEMENT relation_decl EMPTY>
  179.38 +<!ELEMENT events (event|struct)*>
  179.39 +<!ELEMENT event (value|structvalue)*>
  179.40 +<!ELEMENT struct (value|structvalue)*>
  179.41 +<!ELEMENT value EMPTY>
  179.42 +<!ELEMENT structvalue EMPTY>
  179.43 +<!ELEMENT structarray EMPTY>
  179.44 +<!ELEMENT array EMPTY>
  179.45 +<!ATTLIST content_type  id             CDATA #REQUIRED
  179.46 +                        hr_name        CDATA #REQUIRED
  179.47 +                        type           CDATA #REQUIRED
  179.48 +                        jvm_type       CDATA #IMPLIED
  179.49 +                        builtin_type   CDATA #IMPLIED>
  179.50 +<!ATTLIST struct_type   id             CDATA #REQUIRED>
  179.51 +<!ATTLIST structarray   type           CDATA #REQUIRED
  179.52 +                        field          CDATA #REQUIRED
  179.53 +                        label          CDATA #REQUIRED>
  179.54 +<!ATTLIST primary_type  symbol         CDATA #REQUIRED
  179.55 +                        datatype       CDATA #REQUIRED
  179.56 +                        contenttype    CDATA #REQUIRED
  179.57 +                        type           CDATA #REQUIRED
  179.58 +                        sizeop         CDATA #REQUIRED>
  179.59 +<!ATTLIST relation_decl id             CDATA #REQUIRED
  179.60 +                        uri            CDATA #REQUIRED>
  179.61 +<!ATTLIST event         id             CDATA #REQUIRED
  179.62 +                        path           CDATA #REQUIRED
  179.63 +                        label          CDATA #REQUIRED
  179.64 +                        description    CDATA #IMPLIED
  179.65 +                        has_thread     CDATA "false"
  179.66 +                        ignore_check   CDATA "false"
  179.67 +                        has_stacktrace CDATA "false"
  179.68 +                        is_instant     CDATA "false"
  179.69 +                        is_constant    CDATA "false"
  179.70 +                        is_requestable CDATA "false">
  179.71 +<!ATTLIST struct        id             CDATA #REQUIRED>
  179.72 +<!ATTLIST value         type           CDATA #REQUIRED
  179.73 +                        field          CDATA #REQUIRED
  179.74 +                        label          CDATA #REQUIRED
  179.75 +                        description    CDATA #IMPLIED
  179.76 +                        relation       CDATA "NOT_AVAILABLE"
  179.77 +                        transition     CDATA "NONE">
  179.78 +<!ATTLIST array         type           CDATA #REQUIRED
  179.79 +                        field          CDATA #REQUIRED
  179.80 +                        label          CDATA #REQUIRED
  179.81 +                        description    CDATA #IMPLIED>
  179.82 +<!ATTLIST structarray   type           CDATA #REQUIRED
  179.83 +                        field          CDATA #REQUIRED
  179.84 +                        label          CDATA #REQUIRED
  179.85 +                        description    CDATA #IMPLIED>
  179.86 +<!ATTLIST structvalue   type           CDATA #REQUIRED
  179.87 +                        field          CDATA #REQUIRED
  179.88 +                        label          CDATA #REQUIRED
  179.89 +                        description    CDATA #IMPLIED>
   180.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   180.2 +++ b/src/share/vm/trace/trace.xml	Mon Jun 10 11:30:51 2013 +0200
   180.3 @@ -0,0 +1,367 @@
   180.4 +<?xml version="1.0" encoding="utf-8"?>
   180.5 +<!--
   180.6 + Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   180.7 + DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   180.8 +
   180.9 + This code is free software; you can redistribute it and/or modify it
  180.10 + under the terms of the GNU General Public License version 2 only, as
  180.11 + published by the Free Software Foundation.
  180.12 +
  180.13 + This code is distributed in the hope that it will be useful, but WITHOUT
  180.14 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  180.15 + FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  180.16 + version 2 for more details (a copy is included in the LICENSE file that
  180.17 + accompanied this code).
  180.18 +
  180.19 + You should have received a copy of the GNU General Public License version
  180.20 + 2 along with this work; if not, write to the Free Software Foundation,
  180.21 + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  180.22 +
  180.23 + Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  180.24 + or visit www.oracle.com if you need additional information or have any
  180.25 + questions.
  180.26 +
  180.27 +-->
  180.28 +
  180.29 +
  180.30 +<!DOCTYPE trace SYSTEM "trace.dtd" [
  180.31 +<!ENTITY % xinclude SYSTEM "xinclude.mod">
  180.32 +%xinclude;
  180.33 +]>
  180.34 +
  180.35 +<trace>
  180.36 +  <xi:include href="tracetypes.xml" xmlns:xi="http://www.w3.org/2001/XInclude"/>
  180.37 +
  180.38 +  <relation_decls>
  180.39 +    <relation_decl id="GC_ID" uri="vm/gc/id"/>
  180.40 +    <relation_decl id="COMP_ID" uri="vm/compiler/id"/>
  180.41 +    <relation_decl id="SWEEP_ID" uri="vm/code_sweeper/id"/>
  180.42 +    <relation_decl id="JAVA_MONITOR_ADDRESS" uri="java/monitor/address"/>
  180.43 +  </relation_decls>
  180.44 +
  180.45 +<!--
  180.46 +
  180.47 +Events in the JVM are by default timed (it's more common)
  180.48 +Perhaps a little strange. Might change.
  180.49 +
  180.50 +EVENTS
  180.51 +
  180.52 +Declard with the 'event' tag.
  180.53 +
  180.54 +<value fields> can be one or more of
  180.55 +   value            - a simple primitive or constant type value
  180.56 +   structvalue      - value is a sub-struct. This type must be previously defined
  180.57 +                      with 'struct'
  180.58 +All these require you to declare type, field and label of the field. They also accept
  180.59 +an optional description of the field. If the meaning of the field is not obvious
  180.60 +from the label you should provide a description. If an event however is not actually
  180.61 +meant for end-users, you should probably _not_ write descriptions at all, since you
  180.62 +might just add more concepts the user has no notion of/interest in.
  180.63 +
  180.64 +Events should be modeled after what conceptual process you are expressing, _NOT_
  180.65 +from whatever data structures you might use inside the JVM for expressing a process.
  180.66 +
  180.67 +
  180.68 +STRUCT
  180.69 +
  180.70 +Declared with the 'struct' tag.
  180.71 +
  180.72 +Declares a structure type that can be used in other events.
  180.73 +
  180.74 +-->
  180.75 +
  180.76 +  <events>
  180.77 +    <event id="ThreadStart" path="java/thread_start" label="Java Thread Start"
  180.78 +           has_thread="true" is_instant="true">
  180.79 +      <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
  180.80 +    </event>
  180.81 +
  180.82 +    <event id="ThreadEnd" path="java/thread_end" label="Java Thread End"
  180.83 +           has_thread="true" is_instant="true">
  180.84 +      <value type="JAVALANGTHREAD" field="javalangthread" label="Java Thread"/>
  180.85 +    </event>
  180.86 +
  180.87 +    <event id="ThreadSleep" path="java/thread_sleep" label="Java Thread Sleep"
  180.88 +            has_thread="true" has_stacktrace="true" is_instant="false">
  180.89 +      <value type="MILLIS" field="time" label="Sleep Time"/>
  180.90 +    </event>
  180.91 +
  180.92 +    <event id="ThreadPark" path="java/thread_park" label="Java Thread Park"
  180.93 +            has_thread="true" has_stacktrace="true" is_instant="false">
  180.94 +      <value type="CLASS" field="klass" label="Class Parked On"/>
  180.95 +      <value type="MILLIS" field="timeout" label="Park Timeout"/>
  180.96 +      <value type="ADDRESS" field="address" label="Address of Object Parked" relation="JAVA_MONITOR_ADDRESS"/>
  180.97 +    </event>
  180.98 +
  180.99 +    <event id="JavaMonitorEnter" path="java/monitor_enter" label="Java Monitor Blocked"
 180.100 +            has_thread="true" has_stacktrace="true" is_instant="false">
 180.101 +      <value type="CLASS" field="klass" label="Monitor Class"/>
 180.102 +      <value type="JAVALANGTHREAD" field="previousOwner" label="Previous Monitor Owner"/>
 180.103 +      <value type="ADDRESS" field="address" label="Monitor Address" relation="JAVA_MONITOR_ADDRESS"/>
 180.104 +    </event>
 180.105 +
 180.106 +    <event id="JavaMonitorWait" path="java/monitor_wait" label="Java Monitor Wait" description="Waiting on a Java monitor"
 180.107 +            has_thread="true" has_stacktrace="true" is_instant="false">
 180.108 +      <value type="CLASS" field="klass" label="Monitor Class" description="Class of object waited on"/>
 180.109 +      <value type="OSTHREAD" field="notifier" label="Notifier Thread" description="Notifying Thread"/>
 180.110 +      <value type="MILLIS" field="timeout" label="Timeout" description="Maximum wait time"/>
 180.111 +      <value type="BOOLEAN" field="timedOut" label="Timed Out" description="Wait has been timed out"/>
 180.112 +      <value type="ADDRESS" field="address" label="Monitor Address" description="Address of object waited on" relation="JAVA_MONITOR_ADDRESS"/>
 180.113 +    </event>
 180.114 +
 180.115 +    <event id="ClassLoad" path="vm/class/load" label="Class Load"
 180.116 +            has_thread="true" has_stacktrace="true" is_instant="false">
 180.117 +      <value type="CLASS" field="loadedClass" label="Loaded Class"/>
 180.118 +      <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
 180.119 +      <value type="CLASS" field="initiatingClassLoader" label="Initiating Class Loader"/>
 180.120 +    </event>
 180.121 +
 180.122 +    <event id="ClassUnload" path="vm/class/unload" label="Class Unload"
 180.123 +        has_thread="true" is_instant="true">
 180.124 +      <value type="CLASS" field="unloadedClass" label="Unloaded Class"/>
 180.125 +      <value type="CLASS" field="definingClassLoader" label="Defining Class Loader"/>
 180.126 +    </event>
 180.127 +
 180.128 +    <struct id="VirtualSpace">
 180.129 +      <value type="ADDRESS" field="start" label="Start Address" description="Start address of the virtual space" />
 180.130 +      <value type="ADDRESS" field="committedEnd" label="Committed End Address" description="End address of the committed memory for the virtual space" />
 180.131 +      <value type="BYTES64" field="committedSize" label="Committed Size" description="Size of the committed memory for the virtual space" />
 180.132 +      <value type="ADDRESS" field="reservedEnd" label="Reserved End Address" description="End address of the reserved memory for the virtual space" />
 180.133 +      <value type="BYTES64" field="reservedSize" label="Reserved Size" description="Size of the reserved memory for the virtual space" />
 180.134 +    </struct>
 180.135 +
 180.136 +    <struct id="ObjectSpace">
 180.137 +      <value type="ADDRESS" field="start" label="Start Address" description="Start address of the space" />
 180.138 +      <value type="ADDRESS" field="end" label="End Address" description="End address of the space" />
 180.139 +      <value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
 180.140 +      <value type="BYTES64" field="size" label="Size" description="Size of the space" />
 180.141 +    </struct>
 180.142 +
 180.143 +    <event id="GCHeapSummary" path="vm/gc/heap/summary" label="Heap Summary" is_instant="true">
 180.144 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.145 +      <value type="GCWHEN" field="when" label="When" />
 180.146 +      <structvalue type="VirtualSpace" field="heapSpace" label="Heap Space"/>
 180.147 +      <value type="BYTES64" field="heapUsed" label="Heap Used" description="Bytes allocated by objects in the heap"/>
 180.148 +    </event>
 180.149 +
 180.150 +    <struct id="MetaspaceSizes">
 180.151 +      <value type="BYTES64" field="capacity" label="Capacity" description="Total available memory to allocate in" />
 180.152 +      <value type="BYTES64" field="used" label="Used" description="Bytes allocated by objects in the space" />
 180.153 +      <value type="BYTES64" field="reserved" label="Reserved" description="Reserved memory for this space" />
 180.154 +    </struct>
 180.155 +
 180.156 +    <event id="MetaspaceSummary" path="vm/gc/heap/metaspace_summary" label="Metaspace Summary" is_instant="true">
 180.157 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.158 +      <value type="GCWHEN" field="when" label="When" />
 180.159 +      <structvalue type="MetaspaceSizes" field="metaspace" label="Total"/>
 180.160 +      <structvalue type="MetaspaceSizes" field="dataSpace" label="Data"/>
 180.161 +      <structvalue type="MetaspaceSizes" field="classSpace" label="Class"/>
 180.162 +    </event>
 180.163 +
 180.164 +    <event id="PSHeapSummary" path="vm/gc/heap/ps_summary" label="ParallelScavengeHeap Summary" is_instant="true">
 180.165 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.166 +      <value type="GCWHEN" field="when" label="When" />
 180.167 +
 180.168 +      <structvalue type="VirtualSpace" field="oldSpace" label="Old Space"/>
 180.169 +      <structvalue type="ObjectSpace" field="oldObjectSpace" label="Old Object Space"/>
 180.170 +
 180.171 +      <structvalue type="VirtualSpace" field="youngSpace" label="Young Space"/>
 180.172 +      <structvalue type="ObjectSpace" field="edenSpace" label="Eden Space"/>
 180.173 +      <structvalue type="ObjectSpace" field="fromSpace" label="From Space"/>
 180.174 +      <structvalue type="ObjectSpace" field="toSpace" label="To Space"/>
 180.175 +    </event>
 180.176 +
 180.177 +    <event id="GCGarbageCollection" path="vm/gc/collector/garbage_collection" label="Garbage Collection"
 180.178 +           description="Garbage collection performed by the JVM">
 180.179 +      <value type="UINT" field="gcId"  label="GC ID" relation="GC_ID" />
 180.180 +      <value type="GCNAME" field="name" label="Name" description="The name of the Garbage Collector" />
 180.181 +      <value type="GCCAUSE" field="cause" label="Cause" description="The reason for triggering this Garbage Collection" />
 180.182 +      <value type="RELATIVE_TICKS" field="sumOfPauses" label="Sum of Pauses" description="Sum of all the times in which Java execution was paused during the garbage collection" />
 180.183 +      <value type="RELATIVE_TICKS" field="longestPause" label="Longest Pause" description="Longest individual pause during the garbage collection" />
 180.184 +    </event>
 180.185 +
 180.186 +    <event id="GCParallelOld" path="vm/gc/collector/parold_garbage_collection" label="Parallel Old Garbage Collection"
 180.187 +           description="Extra information specific to Parallel Old Garbage Collections">
 180.188 +      <value type="UINT" field="gcId"  label="GC ID" relation="GC_ID" />
 180.189 +      <value type="ADDRESS" field="densePrefix" label="Dense Prefix" description="The address of the dense prefix, used when compacting" />
 180.190 +    </event>
 180.191 +
 180.192 +    <event id="GCYoungGarbageCollection" path="vm/gc/collector/young_garbage_collection" label="Young Garbage Collection"
 180.193 +           description="Extra information specific to Young Garbage Collections">
 180.194 +      <value type="UINT" field="gcId"  label="GC ID" relation="GC_ID" />
 180.195 +      <value type="UINT" field="tenuringThreshold" label="Tenuring Threshold" />
 180.196 +    </event>
 180.197 +
 180.198 +    <event id="GCOldGarbageCollection" path="vm/gc/collector/old_garbage_collection" label="Old Garbage Collection"
 180.199 +           description="Extra information specific to Old Garbage Collections">
 180.200 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.201 +    </event>
 180.202 +
 180.203 +    <event id="GCG1GarbageCollection" path="vm/gc/collector/g1_garbage_collection" label="G1 Garbage Collection"
 180.204 +           description="Extra information specific to G1 Garbage Collections">
 180.205 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.206 +      <value type="G1YCTYPE" field="type" label="Type" />
 180.207 +    </event>
 180.208 +
 180.209 +    <event id="EvacuationInfo" path="vm/gc/detailed/evacuation_info" label="Evacuation Info" is_instant="true">
 180.210 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.211 +      <value type="UINT" field="cSetRegions" label="Collection Set Regions"/>
 180.212 +      <value type="BYTES64" field="cSetUsedBefore" label="Collection Set Before" description="Memory usage before GC in the collection set regions"/>
 180.213 +      <value type="BYTES64" field="cSetUsedAfter" label="Collection Set After" description="Memory usage after GC in the collection set regions"/>
 180.214 +      <value type="UINT" field="allocationRegions" label="Allocation Regions" description="Regions chosen as allocation regions during evacuation (includes survivors and old space regions)"/>
 180.215 +      <value type="BYTES64" field="allocRegionsUsedBefore" label="Alloc Regions Before" description="Memory usage before GC in allocation regions"/>
 180.216 +      <value type="BYTES64" field="allocRegionsUsedAfter" label="Alloc Regions After" description="Memory usage after GC in allocation regions"/>
 180.217 +      <value type="BYTES64" field="bytesCopied" label="BytesCopied"/>
 180.218 +      <value type="UINT" field="regionsFreed" label="Regions Freed"/>
 180.219 +    </event>
 180.220 +
 180.221 +    <event id="GCReferenceStatistics" path="vm/gc/reference/statistics"
 180.222 +           label="GC Reference Statistics" is_instant="true"
 180.223 +           description="Total count of processed references during GC">
 180.224 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.225 +      <value type="REFERENCETYPE" field="type" label="Type" />
 180.226 +      <value type="ULONG" field="count" label="Total Count" />
 180.227 +    </event>
 180.228 +
 180.229 +    <struct id="CopyFailed">
 180.230 +      <value type="ULONG" field="objectCount" label="Object Count"/>
 180.231 +      <value type="BYTES64" field="firstSize" label="First Failed Object Size"/>
 180.232 +      <value type="BYTES64" field="smallestSize" label="Smallest Failed Object Size"/>
 180.233 +      <value type="BYTES64" field="totalSize" label="Total Object Size"/>
 180.234 +    </struct>
 180.235 +
 180.236 +    <event id="ObjectCountAfterGC" path="vm/gc/detailed/object_count_after_gc" is_instant="true" label="Object Count after GC">
 180.237 +      <value type="UINT" field="gcId"  label="GC ID" relation="GC_ID" />
 180.238 +      <value type="CLASS" field="class" label="Class" />
 180.239 +      <value type="LONG" field="count" label="Count" />
 180.240 +      <value type="BYTES64" field="totalSize" label="Total Size" />
 180.241 +    </event>
 180.242 +
 180.243 +    <event id="PromotionFailed" path="vm/gc/detailed/promotion_failed" label="Promotion Failed" is_instant="true"
 180.244 +           description="Promotion of an object failed">
 180.245 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.246 +      <structvalue type="CopyFailed" field="data" label="data"/>
 180.247 +      <value type="OSTHREAD" field="thread" label="Running thread"/>
 180.248 +    </event>
 180.249 +
 180.250 +    <event id="EvacuationFailed" path="vm/gc/detailed/evacuation_failed" label="Evacuation Failed" is_instant="true"
 180.251 +           description="Evacuation of an object failed">
 180.252 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.253 +      <structvalue type="CopyFailed" field="data" label="data"/>
 180.254 +    </event>
 180.255 +
 180.256 +    <event id="ConcurrentModeFailure" path="vm/gc/detailed/concurrent_mode_failure" label="Concurrent Mode Failure"
 180.257 +           is_instant="true" description="Concurrent Mode failed">
 180.258 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.259 +    </event>
 180.260 +
 180.261 +    <event id="GCPhasePause" path="vm/gc/phases/pause" label="GC Phase Pause">
 180.262 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.263 +      <value type="UTF8" field="name" label="Name" />
 180.264 +    </event>
 180.265 +
 180.266 +    <event id="GCPhasePauseLevel1" path="vm/gc/phases/pause_level_1" label="GC Phase Pause Level 1">
 180.267 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.268 +      <value type="UTF8" field="name" label="Name" />
 180.269 +    </event>
 180.270 +
 180.271 +    <event id="GCPhasePauseLevel2" path="vm/gc/phases/pause_level_2" label="GC Phase Pause Level 2">
 180.272 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.273 +      <value type="UTF8" field="name" label="Name" />
 180.274 +    </event>
 180.275 +
 180.276 +    <event id="GCPhasePauseLevel3" path="vm/gc/phases/pause_level_3" label="GC Phase Pause Level 3">
 180.277 +      <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/>
 180.278 +      <value type="UTF8" field="name" label="Name" />
 180.279 +    </event>
 180.280 +
 180.281 +    <!-- Compiler events -->
 180.282 +
 180.283 +    <event id="Compilation" path="vm/compiler/compilation" label="Compilation"
 180.284 +         has_thread="true" is_requestable="false" is_constant="false">
 180.285 +      <value type="METHOD" field="method" label="Java Method"/>
 180.286 +      <value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
 180.287 +      <value type="USHORT" field="compileLevel" label="Compilation Level"/>
 180.288 +      <value type="BOOLEAN" field="succeded" label="Succeeded"/>
 180.289 +      <value type="BOOLEAN" field="isOsr" label="On Stack Replacement"/>
 180.290 +      <value type="BYTES" field="codeSize" label="Compiled Code Size"/>
 180.291 +      <value type="BYTES" field="inlinedBytes" label="Inlined Code Size"/>
 180.292 +    </event>
 180.293 +
 180.294 +    <event id="CompilerPhase" path="vm/compiler/phase" label="Compiler Phase"
 180.295 +            has_thread="true" is_requestable="false" is_constant="false">
 180.296 +      <value type="COMPILERPHASETYPE" field="phase" label="Compile Phase"/>
 180.297 +      <value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
 180.298 +      <value type="USHORT" field="phaseLevel" label="Phase Level"/>
 180.299 +    </event>
 180.300 +
 180.301 +    <event id="CompilerFailure" path="vm/compiler/failure" label="Compilation Failure"
 180.302 +            has_thread="true" is_requestable="false" is_constant="false" is_instant="true">
 180.303 +      <value type="UTF8" field="failure" label="Message"/>
 180.304 +      <value type="UINT" field="compileID" label="Compilation ID" relation="COMP_ID"/>
 180.305 +    </event>
 180.306 +
 180.307 +    <!-- Code sweeper events -->
 180.308 +
 180.309 +    <event id="SweepCodeCache" path="vm/code_sweeper/sweep" label="Sweep Code Cache"
 180.310 +       has_thread="true" is_requestable="false" is_constant="false">
 180.311 +      <value type="INTEGER" field="sweepIndex" label="Sweep Index" relation="SWEEP_ID"/>
 180.312 +      <value type="USHORT" field="sweepFractionIndex" label="Fraction Index"/>
 180.313 +      <value type="UINT" field="sweptCount" label="Methods Swept"/>
 180.314 +      <value type="UINT" field="flushedCount" label="Methods Flushed"/>
 180.315 +      <value type="UINT" field="markedCount" label="Methods Reclaim"/>
 180.316 +      <value type="UINT" field="zombifiedCount" label="Methods Zombified"/>
 180.317 +    </event>
 180.318 +
 180.319 +    <event id="CleanCodeCache" path="vm/code_sweeper/clean" label="Clean Code Cache"
 180.320 +             description="Clean code cache from oldest methods"
 180.321 +             has_thread="true" is_requestable="false" is_constant="false">
 180.322 +      <value type="UINT" field="disconnectedCount" label="Methods Disconnected"/>
 180.323 +      <value type="UINT" field="madeNonEntrantCount" label="Methods Made Non-Entrant"/>
 180.324 +    </event>
 180.325 +
 180.326 +    <!-- Code cache events -->
 180.327 +
 180.328 +    <event id="CodeCacheFull" path="vm/code_cache/full" label="Code Cache Full"
 180.329 +         has_thread="true" is_requestable="false" is_constant="false" is_instant="true">
 180.330 +      <value type="ADDRESS" field="startAddress" label="Start Address"/>
 180.331 +      <value type="ADDRESS" field="commitedTopAddress" label="Commited Top"/>
 180.332 +      <value type="ADDRESS" field="reservedTopAddress" label="Reserved Top"/>
 180.333 +      <value type="INTEGER" field="entryCount" label="Entries"/>
 180.334 +      <value type="INTEGER" field="methodCount" label="Methods"/>
 180.335 +      <value type="INTEGER" field="adaptorCount" label="Adaptors"/>
 180.336 +      <value type="BYTES64" field="unallocatedCapacity" label="Unallocated"/>
 180.337 +      <value type="INTEGER" field="fullCount" label="Full Count"/>
 180.338 +    </event>
 180.339 +
 180.340 +    <event id="ExecuteVMOperation" path="vm/runtime/execute_vm_operation" label="VM Operation"
 180.341 +        description="Execution of a VM Operation" has_thread="true">
 180.342 +      <value type="VMOPERATIONTYPE" field="operation" label="Operation" />
 180.343 +      <value type="BOOLEAN" field="safepoint" label="At Safepoint" description="If the operation occured at a safepoint."/>
 180.344 +      <value type="BOOLEAN" field="blocking" label="Caller Blocked" description="If the calling thread was blocked until the operation was complete."/>
 180.345 +      <value type="OSTHREAD" field="caller" label="Caller" transition="FROM" description="Thread requesting operation. If non-blocking, will be set to 0 indicating thread is unknown."/>
 180.346 +    </event>
 180.347 +
 180.348 +    <!-- Allocation events -->
 180.349 +    <event id="AllocObjectInNewTLAB" path="java/object_alloc_in_new_TLAB" label="Allocation in new TLAB"
 180.350 +        description="Allocation in new Thread Local Allocation Buffer" has_thread="true" has_stacktrace="true" is_instant="true">
 180.351 +      <value type="CLASS" field="class" label="Class" description="Class of allocated object"/>
 180.352 +      <value type="BYTES64" field="allocationSize" label="Allocation Size"/>
 180.353 +      <value type="BYTES64" field="tlabSize" label="TLAB Size"/>
 180.354 +    </event>
 180.355 +
 180.356 +    <event id="AllocObjectOutsideTLAB" path="java/object_alloc_outside_TLAB" label="Allocation outside TLAB"
 180.357 +        description="Allocation outside Thread Local Allocation Buffers" has_thread="true" has_stacktrace="true" is_instant="true">
 180.358 +      <value type="CLASS" field="class" label="Class" description="Class of allocated object"/>
 180.359 +      <value type="BYTES64" field="allocationSize" label="Allocation Size"/>
 180.360 +    </event>
 180.361 +  </events>
 180.362 +
 180.363 +  <xi:include href="../../../closed/share/vm/trace/traceeventtypes.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
 180.364 +    <xi:fallback/>
 180.365 +  </xi:include>
 180.366 +
 180.367 +  <xi:include href="../../../closed/share/vm/trace/traceevents.xml" xmlns:xi="http://www.w3.org/2001/XInclude">
 180.368 +    <xi:fallback/>
 180.369 +  </xi:include>
 180.370 +</trace>
   181.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   181.2 +++ b/src/share/vm/trace/traceBackend.hpp	Mon Jun 10 11:30:51 2013 +0200
   181.3 @@ -0,0 +1,70 @@
   181.4 +/*
   181.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   181.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   181.7 + *
   181.8 + * This code is free software; you can redistribute it and/or modify it
   181.9 + * under the terms of the GNU General Public License version 2 only, as
  181.10 + * published by the Free Software Foundation.
  181.11 + *
  181.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  181.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  181.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  181.15 + * version 2 for more details (a copy is included in the LICENSE file that
  181.16 + * accompanied this code).
  181.17 + *
  181.18 + * You should have received a copy of the GNU General Public License version
  181.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  181.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  181.21 + *
  181.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  181.23 + * or visit www.oracle.com if you need additional information or have any
  181.24 + * questions.
  181.25 + *
  181.26 + */
  181.27 +#ifndef SHARE_VM_TRACE_TRACEBACKEND_HPP
  181.28 +#define SHARE_VM_TRACE_TRACEBACKEND_HPP
  181.29 +
  181.30 +#include "utilities/macros.hpp"
  181.31 +
  181.32 +#if INCLUDE_TRACE
  181.33 +
  181.34 +#include "runtime/globals.hpp"
  181.35 +#include "runtime/os.hpp"
  181.36 +#include "trace/traceTime.hpp"
  181.37 +#include "tracefiles/traceEventIds.hpp"
  181.38 +
  181.39 +class TraceBackend {
  181.40 +public:
  181.41 +  static bool enabled(void) {
  181.42 +    return EnableTracing;
  181.43 +  }
  181.44 +
  181.45 +  static bool is_event_enabled(TraceEventId id) {
  181.46 +    return enabled();
  181.47 +  }
  181.48 +
  181.49 +  static TracingTime time() {
  181.50 +    return os::elapsed_counter();
  181.51 +  }
  181.52 +
  181.53 +  static TracingTime time_adjustment(jlong time) {
  181.54 +    return time;
  181.55 +  }
  181.56 +
  181.57 +  static void on_unloading_classes(void) {
  181.58 +  }
  181.59 +};
  181.60 +
  181.61 +class TraceThreadData {
  181.62 +public:
  181.63 +    TraceThreadData() {}
  181.64 +};
  181.65 +
  181.66 +typedef TraceBackend Tracing;
  181.67 +
  181.68 +#else /* INCLUDE_TRACE */
  181.69 +
  181.70 +#include "trace/noTraceBackend.hpp"
  181.71 +
  181.72 +#endif /* INCLUDE_TRACE */
  181.73 +#endif /* SHARE_VM_TRACE_TRACEBACKEND_HPP */
   182.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   182.2 +++ b/src/share/vm/trace/traceDataTypes.hpp	Mon Jun 10 11:30:51 2013 +0200
   182.3 @@ -0,0 +1,67 @@
   182.4 +/*
   182.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   182.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   182.7 + *
   182.8 + * This code is free software; you can redistribute it and/or modify it
   182.9 + * under the terms of the GNU General Public License version 2 only, as
  182.10 + * published by the Free Software Foundation.
  182.11 + *
  182.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  182.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  182.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  182.15 + * version 2 for more details (a copy is included in the LICENSE file that
  182.16 + * accompanied this code).
  182.17 + *
  182.18 + * You should have received a copy of the GNU General Public License version
  182.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  182.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  182.21 + *
  182.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  182.23 + * or visit www.oracle.com if you need additional information or have any
  182.24 + * questions.
  182.25 + *
  182.26 + */
  182.27 +
  182.28 +#ifndef SHARE_VM_TRACE_TRACEDATATYPES_HPP
  182.29 +#define SHARE_VM_TRACE_TRACEDATATYPES_HPP
  182.30 +
  182.31 +#include <stddef.h>
  182.32 +
  182.33 +#include "utilities/globalDefinitions.hpp"
  182.34 +
  182.35 +enum {
  182.36 +  CONTENT_TYPE_NONE             = 0,
  182.37 +  CONTENT_TYPE_BYTES            = 1,
  182.38 +  CONTENT_TYPE_EPOCHMILLIS      = 2,
  182.39 +  CONTENT_TYPE_MILLIS           = 3,
  182.40 +  CONTENT_TYPE_NANOS            = 4,
  182.41 +  CONTENT_TYPE_TICKS            = 5,
  182.42 +  CONTENT_TYPE_ADDRESS          = 6,
  182.43 +
  182.44 +  CONTENT_TYPE_OSTHREAD,
  182.45 +  CONTENT_TYPE_JAVALANGTHREAD,
  182.46 +  CONTENT_TYPE_STACKTRACE,
  182.47 +  CONTENT_TYPE_CLASS,
  182.48 +  CONTENT_TYPE_PERCENTAGE,
  182.49 +
  182.50 +  JVM_CONTENT_TYPES_START       = 30,
  182.51 +  JVM_CONTENT_TYPES_END         = 100
  182.52 +};
  182.53 +
  182.54 +enum ReservedEvent {
  182.55 +  EVENT_PRODUCERS,
  182.56 +  EVENT_CHECKPOINT,
  182.57 +  EVENT_BUFFERLOST,
  182.58 +
  182.59 +  NUM_RESERVED_EVENTS
  182.60 +};
  182.61 +
  182.62 +typedef enum ReservedEvent ReservedEvent;
  182.63 +
  182.64 +typedef u8 classid;
  182.65 +typedef u8 stacktraceid;
  182.66 +typedef u8 methodid;
  182.67 +typedef u8 fieldid;
  182.68 +
  182.69 +#endif // SHARE_VM_TRACE_TRACEDATATYPES_HPP
  182.70 +
   183.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   183.2 +++ b/src/share/vm/trace/traceEvent.hpp	Mon Jun 10 11:30:51 2013 +0200
   183.3 @@ -0,0 +1,150 @@
   183.4 +/*
   183.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   183.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   183.7 + *
   183.8 + * This code is free software; you can redistribute it and/or modify it
   183.9 + * under the terms of the GNU General Public License version 2 only, as
  183.10 + * published by the Free Software Foundation.
  183.11 + *
  183.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  183.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  183.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  183.15 + * version 2 for more details (a copy is included in the LICENSE file that
  183.16 + * accompanied this code).
  183.17 + *
  183.18 + * You should have received a copy of the GNU General Public License version
  183.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  183.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  183.21 + *
  183.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  183.23 + * or visit www.oracle.com if you need additional information or have any
  183.24 + * questions.
  183.25 + *
  183.26 + */
  183.27 +
  183.28 +#ifndef SHARE_VM_TRACE_TRACEEVENT_HPP
  183.29 +#define SHARE_VM_TRACE_TRACEEVENT_HPP
  183.30 +
  183.31 +enum EventStartTime {
  183.32 +  UNTIMED,
  183.33 +  TIMED
  183.34 +};
  183.35 +
  183.36 +#include "utilities/macros.hpp"
  183.37 +
  183.38 +#if INCLUDE_TRACE
  183.39 +
  183.40 +#include "trace/traceBackend.hpp"
  183.41 +#include "trace/tracing.hpp"
  183.42 +#include "tracefiles/traceEventIds.hpp"
  183.43 +#include "tracefiles/traceTypes.hpp"
  183.44 +
  183.45 +template<typename T>
  183.46 +class TraceEvent : public StackObj {
  183.47 + protected:
  183.48 +  jlong _startTime;
  183.49 +  jlong _endTime;
  183.50 +
  183.51 + private:
  183.52 +  bool _started;
  183.53 +#ifdef ASSERT
  183.54 +  bool _committed;
  183.55 +  bool _cancelled;
  183.56 + protected:
  183.57 +  bool _ignore_check;
  183.58 +#endif
  183.59 +
  183.60 + public:
  183.61 +  TraceEvent(EventStartTime timing=TIMED) :
  183.62 +    _startTime(0),
  183.63 +    _endTime(0),
  183.64 +    _started(false)
  183.65 +#ifdef ASSERT
  183.66 +    ,
  183.67 +    _committed(false),
  183.68 +    _cancelled(false),
  183.69 +    _ignore_check(false)
  183.70 +#endif
  183.71 +  {
  183.72 +    if (T::is_enabled()) {
  183.73 +      _started = true;
  183.74 +      if (timing == TIMED && !T::isInstant) {
  183.75 +        static_cast<T *>(this)->set_starttime(Tracing::time());
  183.76 +      }
  183.77 +    }
  183.78 +  }
  183.79 +
  183.80 +  static bool is_enabled() {
  183.81 +    return Tracing::is_event_enabled(T::eventId);
  183.82 +  }
  183.83 +
  183.84 +  bool should_commit() {
  183.85 +    return _started;
  183.86 +  }
  183.87 +
  183.88 +  void ignoreCheck() {
  183.89 +    DEBUG_ONLY(_ignore_check = true);
  183.90 +  }
  183.91 +
  183.92 +  void commit() {
  183.93 +    if (!should_commit()) {
  183.94 +        cancel();
  183.95 +        return;
  183.96 +    }
  183.97 +    if (_endTime == 0) {
  183.98 +      static_cast<T*>(this)->set_endtime(Tracing::time());
  183.99 +    }
 183.100 +    if (static_cast<T*>(this)->should_write()) {
 183.101 +      static_cast<T*>(this)->writeEvent();
 183.102 +    }
 183.103 +    set_commited();
 183.104 +  }
 183.105 +
 183.106 +  void set_starttime(jlong time) {
 183.107 +    _startTime = time;
 183.108 +  }
 183.109 +
 183.110 +  void set_endtime(jlong time) {
 183.111 +    _endTime = time;
 183.112 +  }
 183.113 +
 183.114 +  TraceEventId id() const {
 183.115 +    return T::eventId;
 183.116 +  }
 183.117 +
 183.118 +  bool is_instant() const {
 183.119 +    return T::isInstant;
 183.120 +  }
 183.121 +
 183.122 +  bool is_requestable() const {
 183.123 +    return T::isRequestable;
 183.124 +  }
 183.125 +
 183.126 +  bool has_thread() const {
 183.127 +    return T::hasThread;
 183.128 +  }
 183.129 +
 183.130 +  bool has_stacktrace() const {
 183.131 +    return T::hasStackTrace;
 183.132 +  }
 183.133 +
 183.134 +  void cancel() {
 183.135 +    assert(!_committed && !_cancelled, "event was already committed/cancelled");
 183.136 +    DEBUG_ONLY(_cancelled = true);
 183.137 +  }
 183.138 +
 183.139 +  void set_commited() {
 183.140 +    assert(!_committed, "event has already been committed");
 183.141 +    DEBUG_ONLY(_committed = true);
 183.142 +  }
 183.143 +
 183.144 +  ~TraceEvent() {
 183.145 +    if (_started) {
 183.146 +      assert(_ignore_check || _committed || _cancelled, "event was not committed/cancelled");
 183.147 +    }
 183.148 +  }
 183.149 +};
 183.150 +
 183.151 +#endif /* INCLUDE_TRACE */
 183.152 +
 183.153 +#endif /* SHARE_VM_TRACE_TRACEEVENT_HPP */
   184.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   184.2 +++ b/src/share/vm/trace/traceEventClasses.xsl	Mon Jun 10 11:30:51 2013 +0200
   184.3 @@ -0,0 +1,246 @@
   184.4 +<?xml version="1.0" encoding="utf-8"?>
   184.5 +<!--
   184.6 + Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   184.7 + DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   184.8 +
   184.9 + This code is free software; you can redistribute it and/or modify it
  184.10 + under the terms of the GNU General Public License version 2 only, as
  184.11 + published by the Free Software Foundation.
  184.12 +
  184.13 + This code is distributed in the hope that it will be useful, but WITHOUT
  184.14 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  184.15 + FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  184.16 + version 2 for more details (a copy is included in the LICENSE file that
  184.17 + accompanied this code).
  184.18 +
  184.19 + You should have received a copy of the GNU General Public License version
  184.20 + 2 along with this work; if not, write to the Free Software Foundation,
  184.21 + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  184.22 +
  184.23 + Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  184.24 + or visit www.oracle.com if you need additional information or have any
  184.25 + questions.
  184.26 +-->
  184.27 +
  184.28 +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
  184.29 +<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
  184.30 +<xsl:import href="xsl_util.xsl"/>
  184.31 +
  184.32 +<xsl:template match="/">
  184.33 +  <xsl:call-template name="file-header"/>
  184.34 +
  184.35 +#ifndef TRACEFILES_TRACEEVENTCLASSES_HPP
  184.36 +#define TRACEFILES_TRACEEVENTCLASSES_HPP
  184.37 +
  184.38 +// On purpose outside the INCLUDE_TRACE
  184.39 +// Some parts of traceEvent.hpp are used outside of
  184.40 +// INCLUDE_TRACE
  184.41 +
  184.42 +#include "memory/resourceArea.hpp"
  184.43 +#include "tracefiles/traceTypes.hpp"
  184.44 +#include "trace/traceEvent.hpp"
  184.45 +#include "utilities/macros.hpp"
  184.46 +
  184.47 +#if INCLUDE_TRACE
  184.48 +
  184.49 +
  184.50 +#include "trace/traceStream.hpp"
  184.51 +#include "utilities/ostream.hpp"
  184.52 +
  184.53 +  <xsl:apply-templates select="trace/events/struct" mode="trace"/>
  184.54 +  <xsl:apply-templates select="trace/events/event" mode="trace"/>
  184.55 +
  184.56 +#else
  184.57 +
  184.58 +class TraceEvent {
  184.59 +public:
  184.60 +  TraceEvent() {}
  184.61 +  void set_starttime(jlong time) const {}
  184.62 +  void set_endtime(jlong time) const {}
  184.63 +  bool should_commit() const { return false; }
  184.64 +  void commit() const {}
  184.65 +};
  184.66 +
  184.67 +  <xsl:apply-templates select="trace/events/struct" mode="empty"/>
  184.68 +  <xsl:apply-templates select="trace/events/event" mode="empty"/>
  184.69 +
  184.70 +#endif
  184.71 +
  184.72 +#endif
  184.73 +</xsl:template>
  184.74 +
  184.75 +<xsl:template match="struct" mode="trace">
  184.76 +struct TraceStruct<xsl:value-of select="@id"/>
  184.77 +{
  184.78 +private:
  184.79 +<xsl:apply-templates select="value" mode="write-fields"/>
  184.80 +public:
  184.81 +<xsl:apply-templates select="value" mode="write-setters"/>
  184.82 +
  184.83 +  void writeStruct(TraceStream&amp; ts) {
  184.84 +<xsl:apply-templates select="value" mode="write-data"/>
  184.85 +  }
  184.86 +};
  184.87 +
  184.88 +</xsl:template>
  184.89 +
  184.90 +<xsl:template match="struct" mode="empty">
  184.91 +struct TraceStruct<xsl:value-of select="@id"/> 
  184.92 +{
  184.93 +public:
  184.94 +<xsl:apply-templates select="value" mode="write-empty-setters"/>
  184.95 +};
  184.96 +</xsl:template>
  184.97 +
  184.98 +
  184.99 +<xsl:template match="event" mode="empty">
 184.100 +  <xsl:value-of select="concat('class Event', @id, ' : public TraceEvent')"/>
 184.101 +{
 184.102 + public:
 184.103 +<xsl:value-of select="concat('  Event', @id, '(bool ignore=true) {}')"/>
 184.104 +<xsl:text>
 184.105 +</xsl:text>
 184.106 +
 184.107 +<xsl:apply-templates select="value|structvalue|transition_value|relation" mode="write-empty-setters"/>
 184.108 +};
 184.109 +
 184.110 +</xsl:template>
 184.111 +
 184.112 +
 184.113 +<xsl:template match="event" mode="trace">
 184.114 +  <xsl:value-of select="concat('class Event', @id, ' : public TraceEvent&lt;Event', @id, '&gt;')"/>
 184.115 +{
 184.116 + public:
 184.117 +  static const bool hasThread = <xsl:value-of select="@has_thread"/>;
 184.118 +  static const bool hasStackTrace = <xsl:value-of select="@has_stacktrace"/>;
 184.119 +  static const bool isInstant = <xsl:value-of select="@is_instant"/>;
 184.120 +  static const bool isRequestable = <xsl:value-of select="@is_requestable"/>;
 184.121 +  static const TraceEventId eventId = <xsl:value-of select="concat('Trace', @id, 'Event')"/>;
 184.122 +
 184.123 + private:
 184.124 +<xsl:apply-templates select="value|structvalue|transition_value|relation" mode="write-fields"/>
 184.125 +
 184.126 +  void writeEventContent(void) {
 184.127 +    TraceStream ts(*tty);
 184.128 +    ts.print("<xsl:value-of select="@label"/>: [");
 184.129 +<xsl:apply-templates select="value|structvalue" mode="write-data"/>
 184.130 +    ts.print("]\n");
 184.131 +  }
 184.132 +
 184.133 + public:
 184.134 +<xsl:apply-templates select="value|structvalue|transition_value|relation" mode="write-setters"/>
 184.135 +
 184.136 +  bool should_write(void) {
 184.137 +    return true;
 184.138 +  }
 184.139 +<xsl:text>
 184.140 +
 184.141 +</xsl:text>
 184.142 +  <xsl:value-of select="concat('  Event', @id, '(EventStartTime timing=TIMED) : TraceEvent&lt;Event', @id, '&gt;(timing) {}', $newline)"/>
 184.143 +  void writeEvent(void) {
 184.144 +    ResourceMark rm;
 184.145 +    if (UseLockedTracing) {
 184.146 +      ttyLocker lock;
 184.147 +      writeEventContent();
 184.148 +    } else {
 184.149 +      writeEventContent();
 184.150 +    }
 184.151 +  }
 184.152 +};
 184.153 +
 184.154 +</xsl:template>
 184.155 +
 184.156 +<xsl:template match="value|transition_value|relation" mode="write-empty-setters">
 184.157 +  <xsl:param name="cls"/>
 184.158 +  <xsl:variable name="type" select="@type"/>
 184.159 +  <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
 184.160 +  <xsl:value-of select="concat('  void set_', @field, '(', $wt, ' value) { }')"/>
 184.161 +  <xsl:if test="position() != last()">
 184.162 +    <xsl:text>
 184.163 +</xsl:text>
 184.164 +  </xsl:if>
 184.165 +</xsl:template>
 184.166 +
 184.167 +<xsl:template match="structvalue" mode="write-empty-setters">
 184.168 +  <xsl:param name="cls"/>
 184.169 +  <xsl:value-of select="concat('  void set_', @field, '(const TraceStruct', @type, '&amp; value) { }')"/>
 184.170 +  <xsl:if test="position() != last()">
 184.171 +    <xsl:text>
 184.172 +</xsl:text>
 184.173 +  </xsl:if>
 184.174 +</xsl:template>
 184.175 +
 184.176 +
 184.177 +<xsl:template match="value[@type='TICKS']" mode="write-setters">
 184.178 +#if INCLUDE_TRACE
 184.179 +  <xsl:value-of select="concat('void set_', @field, '(jlong time) { _', @field, ' = time; }')"/>
 184.180 +#else
 184.181 +  <xsl:value-of select="concat('void set_', @field, '(jlong ignore) {}')"/>
 184.182 +#endif
 184.183 +</xsl:template>
 184.184 +
 184.185 +<xsl:template match="value[@type='RELATIVE_TICKS']" mode="write-setters">
 184.186 +#if INCLUDE_TRACE
 184.187 +  <xsl:value-of select="concat('void set_', @field, '(jlong time) { _', @field, ' = time; }')"/>
 184.188 +#else
 184.189 +  <xsl:value-of select="concat('void set_', @field, '(jlong ignore) {}')"/>
 184.190 +#endif
 184.191 +</xsl:template>
 184.192 +
 184.193 +<xsl:template match="value" mode="write-fields">
 184.194 +  <xsl:variable name="type" select="@type"/>
 184.195 +  <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
 184.196 +  <xsl:value-of select="concat('  ', $wt, ' _', @field, ';')"/>
 184.197 +  <xsl:if test="position() != last()">
 184.198 +    <xsl:text> 
 184.199 +</xsl:text>
 184.200 +  </xsl:if>
 184.201 +</xsl:template>
 184.202 +
 184.203 +<xsl:template match="structvalue" mode="write-fields">
 184.204 +  <xsl:value-of select="concat('  TraceStruct', @type, ' _', @field, ';')"/>
 184.205 +  <xsl:text>
 184.206 +</xsl:text>
 184.207 +</xsl:template>
 184.208 +
 184.209 +<xsl:template match="value|transition_value|relation" mode="write-setters">
 184.210 +  <xsl:param name="cls"/>
 184.211 +  <xsl:variable name="type" select="@type"/>
 184.212 +  <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@type"/>
 184.213 +  <xsl:value-of select="concat('  void set_', @field, '(', $wt, ' value) { this->_', @field, ' = value; }')"/>
 184.214 +  <xsl:if test="position() != last()">
 184.215 +    <xsl:text>
 184.216 +</xsl:text>
 184.217 +  </xsl:if>
 184.218 +</xsl:template>
 184.219 +
 184.220 +<xsl:template match="structvalue" mode="write-setters">
 184.221 +  <xsl:param name="cls"/>
 184.222 +  <xsl:value-of select="concat('  void set_', @field, '(const TraceStruct', @type, '&amp; value) { this->_', @field, ' = value; }')"/>
 184.223 +  <xsl:if test="position() != last()">
 184.224 +    <xsl:text>
 184.225 +</xsl:text>
 184.226 +  </xsl:if>
 184.227 +</xsl:template>
 184.228 +
 184.229 +<xsl:template match="value" mode="write-data">
 184.230 +  <xsl:variable name="type" select="@type"/>
 184.231 +  <xsl:variable name="wt" select="//primary_type[@symbol=$type]/@writetype"/>
 184.232 +  <xsl:value-of select="concat('    ts.print_val(&quot;', @label, '&quot;, _', @field, ');')"/>
 184.233 +  <xsl:if test="position() != last()">
 184.234 +    <xsl:text>
 184.235 +    ts.print(", ");
 184.236 +</xsl:text>
 184.237 +  </xsl:if>
 184.238 +</xsl:template>
 184.239 +
 184.240 +<xsl:template match="structvalue" mode="write-data">
 184.241 +  <xsl:value-of select="concat('    _', @field, '.writeStruct(ts);')"/>
 184.242 +  <xsl:if test="position() != last()">
 184.243 +    <xsl:text>
 184.244 +    ts.print(", ");
 184.245 +</xsl:text>
 184.246 +  </xsl:if>
 184.247 +</xsl:template>
 184.248 +
 184.249 +</xsl:stylesheet>
   185.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   185.2 +++ b/src/share/vm/trace/traceEventIds.xsl	Mon Jun 10 11:30:51 2013 +0200
   185.3 @@ -0,0 +1,74 @@
   185.4 +<?xml version="1.0" encoding="utf-8"?>
   185.5 +<!--
   185.6 + Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   185.7 + DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   185.8 +
   185.9 + This code is free software; you can redistribute it and/or modify it
  185.10 + under the terms of the GNU General Public License version 2 only, as
  185.11 + published by the Free Software Foundation.
  185.12 +
  185.13 + This code is distributed in the hope that it will be useful, but WITHOUT
  185.14 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  185.15 + FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  185.16 + version 2 for more details (a copy is included in the LICENSE file that
  185.17 + accompanied this code).
  185.18 +
  185.19 + You should have received a copy of the GNU General Public License version
  185.20 + 2 along with this work; if not, write to the Free Software Foundation,
  185.21 + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  185.22 +
  185.23 + Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  185.24 + or visit www.oracle.com if you need additional information or have any
  185.25 + questions.
  185.26 +-->
  185.27 +
  185.28 +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
  185.29 +<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
  185.30 +<xsl:import href="xsl_util.xsl"/>
  185.31 +
  185.32 +<xsl:template match="/">
  185.33 +  <xsl:call-template name="file-header"/>
  185.34 +
  185.35 +#ifndef TRACEFILES_JFREVENTIDS_HPP
  185.36 +#define TRACEFILES_JFREVENTIDS_HPP
  185.37 +
  185.38 +#include "utilities/macros.hpp"
  185.39 +
  185.40 +#if INCLUDE_TRACE
  185.41 +
  185.42 +#include "trace/traceDataTypes.hpp"
  185.43 +
  185.44 +/**
  185.45 + * Enum of the event types in the JVM
  185.46 + */
  185.47 +enum TraceEventId {
  185.48 +  _traceeventbase = (NUM_RESERVED_EVENTS-1), // Make sure we start at right index.
  185.49 +  
  185.50 +  // Events -> enum entry
  185.51 +<xsl:for-each select="trace/events/event">
  185.52 +  <xsl:value-of select="concat('  Trace', @id, 'Event,', $newline)"/>
  185.53 +</xsl:for-each>
  185.54 +  MaxTraceEventId
  185.55 +};
  185.56 +
  185.57 +/**
  185.58 + * Struct types in the JVM
  185.59 + */
  185.60 +enum TraceStructId {
  185.61 +<xsl:for-each select="trace/types/content_types/*">
  185.62 +  <xsl:value-of select="concat('  Trace', @id, 'Struct,', $newline)"/>
  185.63 +</xsl:for-each>
  185.64 +<xsl:for-each select="trace/events/*">
  185.65 +  <xsl:value-of select="concat('  Trace', @id, 'Struct,', $newline)"/>
  185.66 +</xsl:for-each>
  185.67 +  MaxTraceStructId
  185.68 +};
  185.69 +
  185.70 +typedef enum TraceEventId  TraceEventId;
  185.71 +typedef enum TraceStructId TraceStructId;
  185.72 +
  185.73 +#endif
  185.74 +#endif
  185.75 +</xsl:template>
  185.76 +
  185.77 +</xsl:stylesheet>
   186.1 --- a/src/share/vm/trace/traceEventTypes.hpp	Fri Jun 07 09:33:01 2013 -0700
   186.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
   186.3 @@ -1,30 +0,0 @@
   186.4 -/*
   186.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   186.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   186.7 - *
   186.8 - * This code is free software; you can redistribute it and/or modify it
   186.9 - * under the terms of the GNU General Public License version 2 only, as
  186.10 - * published by the Free Software Foundation.
  186.11 - *
  186.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
  186.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  186.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  186.15 - * version 2 for more details (a copy is included in the LICENSE file that
  186.16 - * accompanied this code).
  186.17 - *
  186.18 - * You should have received a copy of the GNU General Public License version
  186.19 - * 2 along with this work; if not, write to the Free Software Foundation,
  186.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  186.21 - *
  186.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  186.23 - * or visit www.oracle.com if you need additional information or have any
  186.24 - * questions.
  186.25 - *
  186.26 - */
  186.27 -
  186.28 -#ifndef SHARE_VM_TRACE_TRACE_EVENT_TYPES_HPP
  186.29 -#define SHARE_VM_TRACE_TRACE_EVENT_TYPES_HPP
  186.30 -
  186.31 -/* Empty, just a placeholder for tracing events */
  186.32 -
  186.33 -#endif
   187.1 --- a/src/share/vm/trace/traceMacros.hpp	Fri Jun 07 09:33:01 2013 -0700
   187.2 +++ b/src/share/vm/trace/traceMacros.hpp	Mon Jun 10 11:30:51 2013 +0200
   187.3 @@ -1,5 +1,5 @@
   187.4  /*
   187.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   187.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   187.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   187.8   *
   187.9   * This code is free software; you can redistribute it and/or modify it
  187.10 @@ -25,22 +25,14 @@
  187.11  #ifndef SHARE_VM_TRACE_TRACE_MACRO_HPP
  187.12  #define SHARE_VM_TRACE_TRACE_MACRO_HPP
  187.13  
  187.14 -#define EVENT_BEGIN(type, name)
  187.15 -#define EVENT_SET(name, field, value)
  187.16 -#define EVENT_COMMIT(name, ...)
  187.17 -#define EVENT_STARTED(name, time)
  187.18 -#define EVENT_ENDED(name, time)
  187.19  #define EVENT_THREAD_EXIT(thread)
  187.20  
  187.21 -#define TRACE_ENABLED 0
  187.22 +#define TRACE_INIT_ID(k)
  187.23 +#define TRACE_DATA TraceThreadData
  187.24  
  187.25 -#define TRACE_INIT_ID(k)
  187.26 -#define TRACE_BUFFER void*
  187.27 +#define TRACE_START() JNI_OK
  187.28 +#define TRACE_INITIALIZE() JNI_OK
  187.29  
  187.30 -#define TRACE_START() true
  187.31 -#define TRACE_INITIALIZE() 0
  187.32 -
  187.33 -#define TRACE_SET_KLASS_TRACE_ID(x1, x2) do { } while (0)
  187.34  #define TRACE_DEFINE_KLASS_METHODS typedef int ___IGNORED_hs_trace_type1
  187.35  #define TRACE_DEFINE_KLASS_TRACE_ID typedef int ___IGNORED_hs_trace_type2
  187.36  #define TRACE_DEFINE_OFFSET typedef int ___IGNORED_hs_trace_type3
   188.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   188.2 +++ b/src/share/vm/trace/traceStream.hpp	Mon Jun 10 11:30:51 2013 +0200
   188.3 @@ -0,0 +1,121 @@
   188.4 +/*
   188.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   188.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   188.7 + *
   188.8 + * This code is free software; you can redistribute it and/or modify it
   188.9 + * under the terms of the GNU General Public License version 2 only, as
  188.10 + * published by the Free Software Foundation.
  188.11 + *
  188.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  188.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  188.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  188.15 + * version 2 for more details (a copy is included in the LICENSE file that
  188.16 + * accompanied this code).
  188.17 + *
  188.18 + * You should have received a copy of the GNU General Public License version
  188.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  188.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  188.21 + *
  188.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  188.23 + * or visit www.oracle.com if you need additional information or have any
  188.24 + * questions.
  188.25 + *
  188.26 + */
  188.27 +
  188.28 +#ifndef SHARE_VM_TRACE_TRACESTREAM_HPP
  188.29 +#define SHARE_VM_TRACE_TRACESTREAM_HPP
  188.30 +
  188.31 +#include "utilities/macros.hpp"
  188.32 +
  188.33 +#if INCLUDE_TRACE
  188.34 +
  188.35 +#include "oops/klass.hpp"
  188.36 +#include "oops/method.hpp"
  188.37 +#include "oops/symbol.hpp"
  188.38 +#include "utilities/ostream.hpp"
  188.39 +
  188.40 +class TraceStream : public StackObj {
  188.41 + private:
  188.42 +  outputStream& _st;
  188.43 +
  188.44 + public:
  188.45 +  TraceStream(outputStream& stream): _st(stream) {}
  188.46 +
  188.47 +  void print_val(const char* label, u1 val) {
  188.48 +    _st.print("%s = "UINT32_FORMAT, label, val);
  188.49 +  }
  188.50 +
  188.51 +  void print_val(const char* label, u2 val) {
  188.52 +    _st.print("%s = "UINT32_FORMAT, label, val);
  188.53 +  }
  188.54 +
  188.55 +  void print_val(const char* label, s2 val) {
  188.56 +    _st.print("%s = "INT32_FORMAT, label, val);
  188.57 +  }
  188.58 +
  188.59 +  void print_val(const char* label, u4 val) {
  188.60 +    _st.print("%s = "UINT32_FORMAT, label, val);
  188.61 +  }
  188.62 +
  188.63 +  void print_val(const char* label, s4 val) {
  188.64 +    _st.print("%s = "INT32_FORMAT, label, val);
  188.65 +  }
  188.66 +
  188.67 +  void print_val(const char* label, u8 val) {
  188.68 +    _st.print("%s = "UINT64_FORMAT, label, val);
  188.69 +  }
  188.70 +
  188.71 +  void print_val(const char* label, s8 val) {
  188.72 +    _st.print("%s = "INT64_FORMAT, label, val);
  188.73 +  }
  188.74 +
  188.75 +  void print_val(const char* label, bool val) {
  188.76 +    _st.print("%s = %s", label, val ? "true" : "false");
  188.77 +  }
  188.78 +
  188.79 +  void print_val(const char* label, float val) {
  188.80 +    _st.print("%s = %f", label, val);
  188.81 +  }
  188.82 +
  188.83 +  void print_val(const char* label, double val) {
  188.84 +    _st.print("%s = %f", label, val);
  188.85 +  }
  188.86 +
  188.87 +  // Caller is machine generated code located in traceEventClasses.hpp
  188.88 +  // Event<TraceId>::writeEvent() (pseudocode) contains the
  188.89 +  // necessary ResourceMark for the resource allocations below.
  188.90 +  // See traceEventClasses.xsl for details.
  188.91 +  void print_val(const char* label, const Klass* const val) {
  188.92 +    const char* description = "NULL";
  188.93 +    if (val != NULL) {
  188.94 +      Symbol* name = val->name();
  188.95 +      if (name != NULL) {
  188.96 +        description = name->as_C_string();
  188.97 +      }
  188.98 +    }
  188.99 +    _st.print("%s = %s", label, description);
 188.100 +  }
 188.101 +
 188.102 +  // Caller is machine generated code located in traceEventClasses.hpp
 188.103 +  // Event<TraceId>::writeEvent() (pseudocode) contains the
 188.104 +  // necessary ResourceMark for the resource allocations below.
 188.105 +  // See traceEventClasses.xsl for details.
 188.106 +  void print_val(const char* label, const Method* const val) {
 188.107 +    const char* description = "NULL";
 188.108 +    if (val != NULL) {
 188.109 +      description = val->name_and_sig_as_C_string();
 188.110 +    }
 188.111 +    _st.print("%s = %s", label, description);
 188.112 +  }
 188.113 +
 188.114 +  void print_val(const char* label, const char* val) {
 188.115 +    _st.print("%s = '%s'", label, val);
 188.116 +  }
 188.117 +
 188.118 +  void print(const char* val) {
 188.119 +    _st.print(val);
 188.120 +  }
 188.121 +};
 188.122 +
 188.123 +#endif /* INCLUDE_TRACE */
 188.124 +#endif /* SHARE_VM_TRACE_TRACESTREAM_HPP */
   189.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   189.2 +++ b/src/share/vm/trace/traceTime.hpp	Mon Jun 10 11:30:51 2013 +0200
   189.3 @@ -0,0 +1,33 @@
   189.4 +/*
   189.5 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   189.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   189.7 + *
   189.8 + * This code is free software; you can redistribute it and/or modify it
   189.9 + * under the terms of the GNU General Public License version 2 only, as
  189.10 + * published by the Free Software Foundation.
  189.11 + *
  189.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  189.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  189.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  189.15 + * version 2 for more details (a copy is included in the LICENSE file that
  189.16 + * accompanied this code).
  189.17 + *
  189.18 + * You should have received a copy of the GNU General Public License version
  189.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  189.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  189.21 + *
  189.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  189.23 + * or visit www.oracle.com if you need additional information or have any
  189.24 + * questions.
  189.25 + *
  189.26 + */
  189.27 +
  189.28 +#ifndef SHARE_VM_TRACE_TRACETIME_HPP
  189.29 +#define SHARE_VM_TRACE_TRACETIME_HPP
  189.30 +
  189.31 +#include "prims/jni.h"
  189.32 +
  189.33 +typedef jlong TracingTime;
  189.34 +typedef jlong RelativeTracingTime;
  189.35 +
  189.36 +#endif
   190.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   190.2 +++ b/src/share/vm/trace/traceTypes.xsl	Mon Jun 10 11:30:51 2013 +0200
   190.3 @@ -0,0 +1,72 @@
   190.4 +<?xml version="1.0" encoding="utf-8"?>
   190.5 +<!--
   190.6 + Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   190.7 + DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   190.8 +
   190.9 + This code is free software; you can redistribute it and/or modify it
  190.10 + under the terms of the GNU General Public License version 2 only, as
  190.11 + published by the Free Software Foundation.
  190.12 +
  190.13 + This code is distributed in the hope that it will be useful, but WITHOUT
  190.14 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  190.15 + FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  190.16 + version 2 for more details (a copy is included in the LICENSE file that
  190.17 + accompanied this code).
  190.18 +
  190.19 + You should have received a copy of the GNU General Public License version
  190.20 + 2 along with this work; if not, write to the Free Software Foundation,
  190.21 + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  190.22 +
  190.23 + Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  190.24 + or visit www.oracle.com if you need additional information or have any
  190.25 + questions.
  190.26 +-->
  190.27 +
  190.28 +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
  190.29 +<xsl:output method="text" indent="no" omit-xml-declaration="yes"/>
  190.30 +<xsl:import href="xsl_util.xsl"/>
  190.31 +
  190.32 +<xsl:template match="/">
  190.33 +  <xsl:call-template name="file-header"/>
  190.34 +
  190.35 +#ifndef TRACEFILES_JFRTYPES_HPP
  190.36 +#define TRACEFILES_JFRTYPES_HPP
  190.37 +
  190.38 +#include "trace/traceDataTypes.hpp"
  190.39 +#include "utilities/globalDefinitions.hpp"
  190.40 +#include "oops/symbol.hpp"
  190.41 +
  190.42 +enum JVMContentType {
  190.43 +  _not_a_content_type = (JVM_CONTENT_TYPES_START - 1),
  190.44 +  
  190.45 +<xsl:for-each select="trace/types/content_types/content_type[@jvm_type]">
  190.46 +  <xsl:value-of select="concat('  CONTENT_TYPE_', @jvm_type, ',',  $newline)"/>
  190.47 +</xsl:for-each>
  190.48 +  NUM_JVM_CONTENT_TYPES
  190.49 +};
  190.50 +
  190.51 +
  190.52 +enum JVMEventRelations {
  190.53 +  JVM_REL_NOT_AVAILABLE = 0,
  190.54 +  
  190.55 +<xsl:for-each select="trace/relation_decls/relation_decl">
  190.56 +  <xsl:value-of select="concat('  JVM_REL_', @id, ',', $newline)"/>
  190.57 +</xsl:for-each>
  190.58 +  NUM_EVENT_RELATIONS
  190.59 +};
  190.60 +
  190.61 +/**
  190.62 + * Create typedefs for the JRA types:
  190.63 + *   typedef s8 TYPE_LONG;
  190.64 + *   typedef s4 TYPE_INTEGER;
  190.65 + *   typedef const char * TYPE_STRING;
  190.66 + *   ...
  190.67 + */
  190.68 +<xsl:for-each select="trace/types/primary_types/primary_type">
  190.69 +typedef <xsl:value-of select="@type"/>  TYPE_<xsl:value-of select="@symbol"/>;
  190.70 +</xsl:for-each>
  190.71 +
  190.72 +#endif // JFRFILES_JFRTYPES_HPP
  190.73 +</xsl:template>
  190.74 +
  190.75 +</xsl:stylesheet>
   191.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   191.2 +++ b/src/share/vm/trace/tracetypes.xml	Mon Jun 10 11:30:51 2013 +0200
   191.3 @@ -0,0 +1,368 @@
   191.4 +<?xml version="1.0" encoding="utf-8"?>
   191.5 +<!--
   191.6 + Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   191.7 + DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   191.8 +
   191.9 + This code is free software; you can redistribute it and/or modify it
  191.10 + under the terms of the GNU General Public License version 2 only, as
  191.11 + published by the Free Software Foundation.
  191.12 +
  191.13 + This code is distributed in the hope that it will be useful, but WITHOUT
  191.14 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  191.15 + FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  191.16 + version 2 for more details (a copy is included in the LICENSE file that
  191.17 + accompanied this code).
  191.18 +
  191.19 + You should have received a copy of the GNU General Public License version
  191.20 + 2 along with this work; if not, write to the Free Software Foundation,
  191.21 + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  191.22 +
  191.23 + Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  191.24 + or visit www.oracle.com if you need additional information or have any
  191.25 + questions.
  191.26 +-->
  191.27 +
  191.28 +<!DOCTYPE types SYSTEM "trace.dtd">
  191.29 +
  191.30 +<!--
  191.31 +
  191.32 +Content types (complex) should create constant pool data
  191.33 +in the recording.
  191.34 +Currently at least, there is _NO_ verification that whatever
  191.35 +writer you have is actually writing correctly. So BE CAREFUL!
  191.36 +
  191.37 +Declared with the 'content_type' tag.
  191.38 +
  191.39 +<type> is the ID type, i.e the integer type that resolves this. Most often
  191.40 +U4 or U8, but for example really small number constants, like GCTYPE uses U1.
  191.41 +
  191.42 +<content-type> is where it gets interesting. 'builtin_type' means we're
  191.43 +defining how we resolve one of the trace built-in types (Class, Thread etc),
  191.44 +jvm_type means defining a new one for our own use.
  191.45 +
  191.46 +Example: (GcMode)
  191.47 +
  191.48 +<content_type id="GCMode" hr_name="GC mode" type="U1" jvm_type="GCMODE">
  191.49 +  <value type="UTF8" field="desc" description="Description"/>
  191.50 +</content_type>
  191.51 +
  191.52 +This creates a content type CONTENT_TYPE_GCMODE
  191.53 +The field type referencing it is u1 (U1), and the constant pool struct has one field, the name.
  191.54 +
  191.55 +Before we can use it we need also define a primary field data type:
  191.56 +
  191.57 +<primary_type symbol="GCMODE" datatype="U1" contenttype="NONE"
  191.58 +              type="u8" sizeop="sizeof(u1)"/>
  191.59 +
  191.60 +Now we can use the content + data type in declaring event fields.
  191.61 +Remember however, that for us to be able to resolve the value later we must also add
  191.62 +creating the constant pool data in  VM_JFRCheckpoint::write_checkpoint
  191.63 +
  191.64 +   ...
  191.65 +   //CGMODE
  191.66 +   w->be_uint(CONTENT_TYPE_GCMODE);
  191.67 +   w->be_uint(MM_GC_MODE_UNINITIALIZED);
  191.68 +   for (i = 0; i < MM_GC_MODE_UNINITIALIZED; i++) {
  191.69 +      w->uchar(i);
  191.70 +      w->write_utf8(gcModeGetName(i));
  191.71 +   }
  191.72 +
  191.73 + -->
  191.74 +
  191.75 + <types>
  191.76 +  <content_types>
  191.77 +    <content_type id="Thread" hr_name="Thread"
  191.78 +                  type="U4" builtin_type="OSTHREAD">
  191.79 +      <value type="UTF8" field="name" label="Thread name"/>
  191.80 +    </content_type>
  191.81 +
  191.82 +    <content_type id="VMThread" hr_name="VM Thread"
  191.83 +                  type="U8" jvm_type="VMTHREAD">
  191.84 +      <value type="OSTHREAD" field="thread" label="VM Thread"/>
  191.85 +    </content_type>
  191.86 +
  191.87 +    <!-- The first argument ("JavaThread") is misleading, it's really a
  191.88 +         java.lang.Thread id (long), but Mission Control depends on the name
  191.89 +         being "JavaThread" so it shouldn't be changed.
  191.90 +    -->
  191.91 +    <content_type id="JavaThread" hr_name="Java thread"
  191.92 +                  type="U8" builtin_type="JAVALANGTHREAD">
  191.93 +      <value type="OSTHREAD" field="thread" label="OS Thread ID"/>
  191.94 +      <value type="BYTES64" field="allocInsideTla"
  191.95 +             label="Allocated bytes inside TLAs"/>
  191.96 +      <value type="BYTES64" field="allocOutsideTla"
  191.97 +             label="Allocated bytes outside TLAs"/>
  191.98 +      <value type="THREADGROUP" field="group" label="Java Thread Group"/>
  191.99 +    </content_type>
 191.100 +
 191.101 +    <content_type id="ThreadGroup" hr_name="Thread group"
 191.102 +                  type="U4" jvm_type="THREADGROUP">
 191.103 +      <value type="THREADGROUP" field="parent" label="Parent"/>
 191.104 +      <value type="UTF8" field="name" label="Name"/>
 191.105 +    </content_type>
 191.106 +
 191.107 +    <content_type id="StackTrace" hr_name="Stacktrace"
 191.108 +                  type="U8" builtin_type="STACKTRACE">
 191.109 +      <value type="BOOLEAN" field="truncated" label="Truncated"/>
 191.110 +      <structarray type="StackFrame" field="frames" label="Stack frames"/>
 191.111 +    </content_type>
 191.112 +
 191.113 +    <content_type id="Class" hr_name="Java class"
 191.114 +                  type="U8" builtin_type="CLASS">
 191.115 +      <value type="CLASS" field="loaderClass" label="ClassLoader"/>
 191.116 +      <value type="SYMBOL" field="name" label="Name"/>
 191.117 +      <value type="SHORT" field="modifiers" label="Access modifiers"/>
 191.118 +    </content_type>
 191.119 +
 191.120 +    <content_type id="Method" hr_name="Java method"
 191.121 +                  type="U8" jvm_type="METHOD">
 191.122 +      <value type="CLASS" field="class" label="Class"/>
 191.123 +      <value type="SYMBOL" field="name" label="Name"/>
 191.124 +      <value type="SYMBOL" field="signature" label="Signature"/>
 191.125 +      <value type="SHORT" field="modifiers" label="Access modifiers"/>
 191.126 +    </content_type>
 191.127 +
 191.128 +    <content_type id="UTFConstant" hr_name="UTF constant"
 191.129 +                  type="U8" jvm_type="SYMBOL">
 191.130 +      <value type="UTF8" field="utf8" label="UTF8 data"/>
 191.131 +    </content_type>
 191.132 +
 191.133 +    <content_type id="ThreadState" hr_name="Java Thread State"
 191.134 +                  type="U2" jvm_type="THREADSTATE">
 191.135 +      <value type="UTF8" field="name" label="Name"/>
 191.136 +    </content_type>
 191.137 +
 191.138 +    <content_type id="FrameType" hr_name="Frame type"
 191.139 +                  type="U1" jvm_type="FRAMETYPE">
 191.140 +      <value type="UTF8" field="desc" label="Description"/>
 191.141 +    </content_type>
 191.142 +
 191.143 +    <struct_type id="StackFrame">
 191.144 +      <value type="METHOD" field="method" label="Java Method"/>
 191.145 +      <value type="INTEGER" field="line" label="Line number"/>
 191.146 +      <value type="FRAMETYPE" field="type" label="Frame type"/>
 191.147 +    </struct_type>
 191.148 +
 191.149 +    <content_type id="GCName" hr_name="GC Name"
 191.150 +                  type="U1" jvm_type="GCNAME">
 191.151 +      <value type="UTF8" field="name" label="name" />
 191.152 +    </content_type>
 191.153 +
 191.154 +    <content_type id="GCCause" hr_name="GC Cause"
 191.155 +                  type="U2" jvm_type="GCCAUSE">
 191.156 +      <value type="UTF8" field="cause" label="cause" />
 191.157 +    </content_type>
 191.158 +
 191.159 +    <content_type id="GCWhen" hr_name="GC When"
 191.160 +                  type="U1" jvm_type="GCWHEN">
 191.161 +      <value type="UTF8" field="when" label="when" />
 191.162 +    </content_type>
 191.163 +
 191.164 +    <content_type id="G1YCType" hr_name="G1 YC Type"
 191.165 +                  type="U1" jvm_type="G1YCTYPE">
 191.166 +      <value type="UTF8" field="type" label="type" />
 191.167 +    </content_type>
 191.168 +
 191.169 +    <content_type id="ReferenceType" hr_name="Reference Type"
 191.170 +                  type="U1" jvm_type="REFERENCETYPE">
 191.171 +      <value type="UTF8" field="type" label="type" />
 191.172 +    </content_type>
 191.173 +
 191.174 +    <content_type id="NARROW_OOP_MODE" hr_name="Narrow Oop Mode"
 191.175 +                  type="U1" jvm_type="NARROWOOPMODE">
 191.176 +      <value type="UTF8" field="mode" label="mode" />
 191.177 +    </content_type>
 191.178 +
 191.179 +    <content_type id="VMOperationType" hr_name="VM Operation Type"
 191.180 +                  type="U2" jvm_type="VMOPERATIONTYPE">
 191.181 +      <value type="UTF8" field="type" label="type" />
 191.182 +    </content_type>
 191.183 +
 191.184 +    <content_type id="CompilerPhaseType" hr_name="Compiler Phase Type"
 191.185 +                  type="U1" jvm_type="COMPILERPHASETYPE">
 191.186 +      <value type="UTF8" field="phase" label="phase" />
 191.187 +    </content_type>
 191.188 +
 191.189 +  </content_types>
 191.190 +
 191.191 +
 191.192 +  <primary_types>
 191.193 +    <!--
 191.194 +      - primary_type takes these attributes:
 191.195 +      -   symbol      INTEGER, LONG etc
 191.196 +      -   datatype    The trace datatype, see enum DataType
 191.197 +      -   contenttype Either resolved content type or the semantic meaning
 191.198 +      -   type        The actual type as used in structures etc
 191.199 +      -   sizeop      A function/macro that can be applied on a single
 191.200 +      -               struct value of type "type" and yield the factual byte
 191.201 +      -               size we need to write.  The % is replaced by the value
 191.202 +      -->
 191.203 +
 191.204 +    <!-- SIGNED 64bit -->
 191.205 +    <primary_type symbol="LONG" datatype="LONG" contenttype="NONE"
 191.206 +                  type="s8" sizeop="sizeof(s8)"/>
 191.207 +
 191.208 +    <!-- UNSIGNED 64bit -->
 191.209 +    <primary_type symbol="ULONG" datatype="U8" contenttype="NONE"
 191.210 +                  type="u8" sizeop="sizeof(u8)"/>
 191.211 +
 191.212 +    <!-- SIGNED 32bit -->
 191.213 +    <primary_type symbol="INTEGER" datatype="INT" contenttype="NONE"
 191.214 +                  type="s4" sizeop="sizeof(s4)"/>
 191.215 +
 191.216 +    <!-- UNSIGNED 32bit -->
 191.217 +    <primary_type symbol="UINT" datatype="U4" contenttype="NONE"
 191.218 +                  type="unsigned" sizeop="sizeof(unsigned)"/>
 191.219 +
 191.220 +    <!-- UNSIGNED 16bit -->
 191.221 +    <primary_type symbol="USHORT" datatype="U2" contenttype="NONE"
 191.222 +                  type="u2" sizeop="sizeof(u2)"/>
 191.223 +
 191.224 +    <!--  SIGNED 16bit -->
 191.225 +    <primary_type symbol="SHORT" datatype="SHORT" contenttype="NONE"
 191.226 +                  type="s2" sizeop="sizeof(s2)"/>
 191.227 +
 191.228 +    <!--  SIGNED 8bit -->
 191.229 +    <primary_type symbol="BYTE" datatype="BYTE" contenttype="NONE"
 191.230 +                  type="s1" sizeop="sizeof(s1)"/>
 191.231 +
 191.232 +    <!--  UNSIGNED 8bit -->
 191.233 +    <primary_type symbol="UBYTE" datatype="U1" contenttype="NONE"
 191.234 +                  type="u1" sizeop="sizeof(u1)"/>
 191.235 +
 191.236 +    <!--  float 32bit -->
 191.237 +    <primary_type symbol="FLOAT" datatype="FLOAT" contenttype="NONE"
 191.238 +                  type="float" sizeop="sizeof(float)"/>
 191.239 +
 191.240 +    <!--  float 64bit -->
 191.241 +    <primary_type symbol="DOUBLE" datatype="DOUBLE" contenttype="NONE"
 191.242 +                  type="double" sizeop="sizeof(double)"/>
 191.243 +
 191.244 +    <!-- boolean type (1-byte) -->
 191.245 +    <primary_type symbol="BOOLEAN" datatype="BOOLEAN" contenttype="NONE"
 191.246 +                  type="bool" sizeop="1"/>
 191.247 +
 191.248 +    <!-- 32-bit unsigned integer, SEMANTIC value BYTES -->
 191.249 +    <primary_type symbol="BYTES" datatype="U4" contenttype="BYTES"
 191.250 +                  type="u4" sizeop="sizeof(u4)"/>
 191.251 +
 191.252 +    <primary_type symbol="IOBYTES" datatype="U4" contenttype="BYTES"
 191.253 +                  type="u4" sizeop="sizeof(u4)"/>
 191.254 +
 191.255 +    <!-- 64-bit unsigned integer, SEMANTIC value BYTES -->
 191.256 +    <primary_type symbol="BYTES64" datatype="U8" contenttype="BYTES"
 191.257 +                  type="u8" sizeop="sizeof(u8)"/>
 191.258 +
 191.259 +    <!-- 64-bit unsigned integer, SEMANTIC value ABSOLUTE MILLISECONDS -->
 191.260 +    <primary_type symbol="EPOCHMILLIS" datatype="LONG" contenttype="EPOCHMILLIS"
 191.261 +                  type="s8" sizeop="sizeof(s8)"/>
 191.262 +
 191.263 +    <!-- 64-bit unsigned integer, SEMANTIC value RELATIVE MILLISECONDS -->
 191.264 +    <primary_type symbol="MILLIS" datatype="LONG" contenttype="MILLIS"
 191.265 +                  type="s8" sizeop="sizeof(s8)"/>
 191.266 +
 191.267 +    <!-- 64-bit unsigned integer, SEMANTIC value RELATIVE NANOSECONDS -->
 191.268 +    <primary_type symbol="NANOS" datatype="LONG" contenttype="NANOS"
 191.269 +                  type="s8" sizeop="sizeof(s8)"/>
 191.270 +
 191.271 +    <!-- 64-bit signed integer, SEMANTIC value ABSOLUTE TICKS -->
 191.272 +    <primary_type symbol="TICKS" datatype="LONG" contenttype="TICKS"
 191.273 +                  type="s8" sizeop="sizeof(s8)"/>
 191.274 +
 191.275 +    <!-- 64-bit signed integer, SEMANTIC value RELATIVE TICKS -->
 191.276 +    <primary_type symbol="RELATIVE_TICKS" datatype="LONG" contenttype="TICKS"
 191.277 +                  type="s8" sizeop="sizeof(s8)"/>
 191.278 +
 191.279 +    <!-- 64-bit unsigned integer, SEMANTIC value ADDRESS (mem loc) -->
 191.280 +    <primary_type symbol="ADDRESS" datatype="U8" contenttype="ADDRESS"
 191.281 +                  type="u8" sizeop="sizeof(u8)"/>
 191.282 +
 191.283 +    <!-- 32-bit float, SEMANTIC value PERCENTAGE (0.0-1.0) -->
 191.284 +    <primary_type symbol="PERCENT" datatype="FLOAT" contenttype="PERCENTAGE"
 191.285 +                  type="float" sizeop="sizeof(float)"/>
 191.286 +
 191.287 +    <!-- UTF-encoded string, max length 64k -->
 191.288 +    <primary_type symbol="UTF8" datatype="UTF8" contenttype="NONE"
 191.289 +                  type="const char *" sizeop="sizeof_utf(%)"/>
 191.290 +
 191.291 +    <!-- Symbol* constant. Note that this may currently ONLY be used by
 191.292 +          classes, methods fields.  This restriction might be lifted. -->
 191.293 +    <primary_type symbol="SYMBOL" datatype="U8" contenttype="SYMBOL"
 191.294 +                  type="Symbol *" sizeop="sizeof(u8)"/>
 191.295 +
 191.296 +    <!-- A Klass *. The actual class is marked as "used" and will
 191.297 +         eventually be written into the recording constant pool -->
 191.298 +    <primary_type symbol="CLASS" datatype="U8" contenttype="CLASS"
 191.299 +                  type="Klass *" sizeop="sizeof(u8)"/>
 191.300 +
 191.301 +    <!-- A Method *. The method is marked as "used" and will eventually be
 191.302 +         written into the recording constant pool. -->
 191.303 +    <primary_type symbol="METHOD" datatype="U8" contenttype="METHOD"
 191.304 +                  type="Method *" sizeop="sizeof(u8)"/>
 191.305 +
 191.306 +    <!--  The type for stacktraces in the recording. Shoudl not be used by
 191.307 +          events explicitly -->
 191.308 +    <primary_type symbol="STACKTRACE" datatype="U8" contenttype="STACKTRACE"
 191.309 +                  type="u8" sizeop="sizeof(u8)"/>
 191.310 +
 191.311 +    <!-- OS Thread ID -->
 191.312 +    <primary_type symbol="OSTHREAD" datatype="U4" contenttype="OSTHREAD"
 191.313 +                  type="u4" sizeop="sizeof(u4)"/>
 191.314 +
 191.315 +    <!-- VM Thread ID Note: changed from U2 to U8 for hotspot -->
 191.316 +    <primary_type symbol="VMTHREAD" datatype="U8" contenttype="VMTHREAD"
 191.317 +                  type="u8"  sizeop="sizeof(u8)"/>
 191.318 +
 191.319 +    <!-- Java Thread ID -->
 191.320 +    <primary_type symbol="JAVALANGTHREAD" datatype="LONG"
 191.321 +                  contenttype="JAVALANGTHREAD" type="s8"
 191.322 +                  sizeop="sizeof(s8)"/>
 191.323 +
 191.324 +    <!-- Threadgroup THIS TYPE MAY NOT BE USED IN NORMAL EVENTS (ATM). Only
 191.325 +          for thread constant pool // KK TODO: u8 should be ObjectP -->
 191.326 +    <primary_type symbol="THREADGROUP" datatype="U4" contenttype="THREADGROUP"
 191.327 +                  type="u8"
 191.328 +                  sizeop="sizeof(u4)"/>
 191.329 +
 191.330 +    <!-- FRAMETYPE enum -->
 191.331 +    <primary_type symbol="FRAMETYPE" datatype="U1" contenttype="FRAMETYPE"
 191.332 +                  type="u1" sizeop="sizeof(u1)"/>
 191.333 +
 191.334 +    <!-- THREADSTATE enum -->
 191.335 +    <primary_type symbol="THREADSTATE" datatype="U2" contenttype="THREADSTATE"
 191.336 +                  type="u2" sizeop="sizeof(u2)"/>
 191.337 +
 191.338 +    <!-- GCName -->
 191.339 +    <primary_type symbol="GCNAME" datatype="U1" contenttype="GCNAME"
 191.340 +                  type="u1" sizeop="sizeof(u1)" />
 191.341 +
 191.342 +    <!-- GCCAUSE -->
 191.343 +    <primary_type symbol="GCCAUSE" datatype="U2" contenttype="GCCAUSE"
 191.344 +                  type="u2" sizeop="sizeof(u2)" />
 191.345 +
 191.346 +    <!-- GCWHEN -->
 191.347 +    <primary_type symbol="GCWHEN" datatype="U1" contenttype="GCWHEN"
 191.348 +                  type="u1" sizeop="sizeof(u1)" />
 191.349 +
 191.350 +    <!-- G1YCType -->
 191.351 +    <primary_type symbol="G1YCTYPE" datatype="U1" contenttype="G1YCTYPE"
 191.352 +                  type="u1" sizeop="sizeof(u1)" />
 191.353 +
 191.354 +    <!-- REFERENCETYPE -->
 191.355 +    <primary_type symbol="REFERENCETYPE" datatype="U1"
 191.356 +                  contenttype="REFERENCETYPE" type="u1" sizeop="sizeof(u1)" />
 191.357 +
 191.358 +    <!-- NARROWOOPMODE -->
 191.359 +    <primary_type symbol="NARROWOOPMODE" datatype="U1"
 191.360 +                  contenttype="NARROWOOPMODE" type="u1" sizeop="sizeof(u1)" />
 191.361 +
 191.362 +    <!-- COMPILERPHASETYPE -->
 191.363 +    <primary_type symbol="COMPILERPHASETYPE" datatype="U1"
 191.364 +                  contenttype="COMPILERPHASETYPE" type="u1" sizeop="sizeof(u1)" />
 191.365 +
 191.366 +    <!-- VMOPERATIONTYPE -->
 191.367 +    <primary_type symbol="VMOPERATIONTYPE" datatype="U2" contenttype="VMOPERATIONTYPE"
 191.368 +                  type="u2" sizeop="sizeof(u2)" />
 191.369 +
 191.370 +  </primary_types>
 191.371 +</types>
   192.1 --- a/src/share/vm/trace/tracing.hpp	Fri Jun 07 09:33:01 2013 -0700
   192.2 +++ b/src/share/vm/trace/tracing.hpp	Mon Jun 10 11:30:51 2013 +0200
   192.3 @@ -1,5 +1,5 @@
   192.4  /*
   192.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   192.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   192.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   192.8   *
   192.9   * This code is free software; you can redistribute it and/or modify it
  192.10 @@ -25,6 +25,7 @@
  192.11  #ifndef SHARE_VM_TRACE_TRACING_HPP
  192.12  #define SHARE_VM_TRACE_TRACING_HPP
  192.13  
  192.14 -#include "trace/traceMacros.hpp"
  192.15 +#include "tracefiles/traceEventClasses.hpp"
  192.16 +#include "tracefiles/traceEventIds.hpp"
  192.17  
  192.18  #endif
   193.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   193.2 +++ b/src/share/vm/trace/xinclude.mod	Mon Jun 10 11:30:51 2013 +0200
   193.3 @@ -0,0 +1,61 @@
   193.4 +<?xml version="1.0" encoding="UTF-8"?>
   193.5 +<!--
   193.6 + Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   193.7 + DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   193.8 +
   193.9 + This code is free software; you can redistribute it and/or modify it
  193.10 + under the terms of the GNU General Public License version 2 only, as
  193.11 + published by the Free Software Foundation.
  193.12 +
  193.13 + This code is distributed in the hope that it will be useful, but WITHOUT
  193.14 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  193.15 + FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  193.16 + version 2 for more details (a copy is included in the LICENSE file that
  193.17 + accompanied this code).
  193.18 +
  193.19 + You should have received a copy of the GNU General Public License version
  193.20 + 2 along with this work; if not, write to the Free Software Foundation,
  193.21 + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  193.22 +
  193.23 + Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  193.24 + or visit www.oracle.com if you need additional information or have any
  193.25 + questions.
  193.26 +  
  193.27 +-->
  193.28 +
  193.29 +<!--
  193.30 + Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
  193.31 + DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  193.32 +
  193.33 + This code is free software; you can redistribute it and/or modify it
  193.34 + under the terms of the GNU General Public License version 2 only, as
  193.35 + published by the Free Software Foundation.
  193.36 +
  193.37 + This code is distributed in the hope that it will be useful, but WITHOUT
  193.38 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  193.39 + FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  193.40 + version 2 for more details (a copy is included in the LICENSE file that
  193.41 + accompanied this code).
  193.42 +
  193.43 + You should have received a copy of the GNU General Public License version
  193.44 + 2 along with this work; if not, write to the Free Software Foundation,
  193.45 + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  193.46 +
  193.47 + Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  193.48 + or visit www.oracle.com if you need additional information or have any
  193.49 + questions.
  193.50 +  
  193.51 +-->
  193.52 +<!ELEMENT xi:include (xi:fallback?) >
  193.53 +<!ATTLIST xi:include
  193.54 +    xmlns:xi   CDATA       #FIXED    "http://www.w3.org/2001/XInclude"
  193.55 +    href       CDATA       #IMPLIED
  193.56 +    parse      (xml|text)  "xml"
  193.57 +    xpointer   CDATA       #IMPLIED
  193.58 +    encoding   CDATA       #IMPLIED 
  193.59 +    accept     CDATA       #IMPLIED
  193.60 +    accept-language CDATA  #IMPLIED >
  193.61 +
  193.62 +<!ELEMENT xi:fallback ANY>
  193.63 +<!ATTLIST xi:fallback
  193.64 +    xmlns:xi   CDATA   #FIXED   "http://www.w3.org/2001/XInclude" >
   194.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   194.2 +++ b/src/share/vm/trace/xsl_util.xsl	Mon Jun 10 11:30:51 2013 +0200
   194.3 @@ -0,0 +1,78 @@
   194.4 +<?xml version="1.0" encoding="utf-8"?>
   194.5 +<!--
   194.6 + Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
   194.7 + DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   194.8 +
   194.9 + This code is free software; you can redistribute it and/or modify it
  194.10 + under the terms of the GNU General Public License version 2 only, as
  194.11 + published by the Free Software Foundation.
  194.12 +
  194.13 + This code is distributed in the hope that it will be useful, but WITHOUT
  194.14 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  194.15 + FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  194.16 + version 2 for more details (a copy is included in the LICENSE file that
  194.17 + accompanied this code).
  194.18 +
  194.19 + You should have received a copy of the GNU General Public License version
  194.20 + 2 along with this work; if not, write to the Free Software Foundation,
  194.21 + Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  194.22 +
  194.23 + Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  194.24 + or visit www.oracle.com if you need additional information or have any
  194.25 + questions.
  194.26 +-->
  194.27 +
  194.28 +<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
  194.29 +
  194.30 +<!-- utilities used when generating code -->
  194.31 +
  194.32 +<xsl:variable name="newline">
  194.33 +  <xsl:text>&#xA;</xsl:text>
  194.34 +</xsl:variable>
  194.35 +
  194.36 +<xsl:variable name="indent1">
  194.37 +  <xsl:text>&#xA;  </xsl:text>
  194.38 +</xsl:variable>
  194.39 +
  194.40 +<xsl:variable name="indent2">
  194.41 +  <xsl:text>&#xA;    </xsl:text>
  194.42 +</xsl:variable>
  194.43 +
  194.44 +<xsl:variable name="indent3">
  194.45 +  <xsl:text>&#xA;      </xsl:text>
  194.46 +</xsl:variable>
  194.47 +
  194.48 +<xsl:variable name="indent4">
  194.49 +  <xsl:text>&#xA;        </xsl:text>
  194.50 +</xsl:variable>
  194.51 +
  194.52 +<xsl:variable name="quote">
  194.53 +  <xsl:text>"</xsl:text>
  194.54 +</xsl:variable>
  194.55 +
  194.56 +<xsl:template name="file-header">
  194.57 +  <xsl:text>/* AUTOMATICALLY GENERATED FILE - DO NOT EDIT */</xsl:text>
  194.58 +</xsl:template>
  194.59 +
  194.60 +<xsl:template name="string-replace-all">
  194.61 +  <xsl:param name="text" />
  194.62 +  <xsl:param name="replace" />
  194.63 +  <xsl:param name="by" />
  194.64 +  <xsl:choose>
  194.65 +    <xsl:when test="contains($text, $replace)">
  194.66 +      <xsl:value-of select="substring-before($text,$replace)" />
  194.67 +      <xsl:value-of select="$by" />
  194.68 +      <xsl:call-template name="string-replace-all">
  194.69 +        <xsl:with-param name="text" select="substring-after($text,$replace)" />
  194.70 +        <xsl:with-param name="replace" select="$replace" />
  194.71 +        <xsl:with-param name="by" select="$by" />
  194.72 +      </xsl:call-template>
  194.73 +    </xsl:when>
  194.74 +    <xsl:otherwise>
  194.75 +      <xsl:value-of select="$text" />
  194.76 +    </xsl:otherwise>
  194.77 +  </xsl:choose>
  194.78 +</xsl:template>
  194.79 +
  194.80 +
  194.81 +</xsl:stylesheet>
   195.1 --- a/src/share/vm/utilities/globalDefinitions.hpp	Fri Jun 07 09:33:01 2013 -0700
   195.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp	Mon Jun 10 11:30:51 2013 +0200
   195.3 @@ -763,18 +763,6 @@
   195.4  TosState as_TosState(BasicType type);
   195.5  
   195.6  
   195.7 -// ReferenceType is used to distinguish between java/lang/ref/Reference subclasses
   195.8 -
   195.9 -enum ReferenceType {
  195.10 - REF_NONE,      // Regular class
  195.11 - REF_OTHER,     // Subclass of java/lang/ref/Reference, but not subclass of one of the classes below
  195.12 - REF_SOFT,      // Subclass of java/lang/ref/SoftReference
  195.13 - REF_WEAK,      // Subclass of java/lang/ref/WeakReference
  195.14 - REF_FINAL,     // Subclass of java/lang/ref/FinalReference
  195.15 - REF_PHANTOM    // Subclass of java/lang/ref/PhantomReference
  195.16 -};
  195.17 -
  195.18 -
  195.19  // JavaThreadState keeps track of which part of the code a thread is executing in. This
  195.20  // information is needed by the safepoint code.
  195.21  //
   196.1 --- a/src/share/vm/utilities/macros.hpp	Fri Jun 07 09:33:01 2013 -0700
   196.2 +++ b/src/share/vm/utilities/macros.hpp	Mon Jun 10 11:30:51 2013 +0200
   196.3 @@ -160,6 +160,10 @@
   196.4  #define NOT_NMT_RETURN_(code) { return code; }
   196.5  #endif // INCLUDE_NMT
   196.6  
   196.7 +#ifndef INCLUDE_TRACE
   196.8 +#define INCLUDE_TRACE 1
   196.9 +#endif // INCLUDE_TRACE
  196.10 +
  196.11  // COMPILER1 variant
  196.12  #ifdef COMPILER1
  196.13  #ifdef COMPILER2

mercurial