Merge

Wed, 22 Sep 2010 12:54:51 -0400

author
kamg
date
Wed, 22 Sep 2010 12:54:51 -0400
changeset 2160
a25394352030
parent 2159
2966dab85b3e
parent 2151
18c378513575
child 2161
9bdbd693dbaa

Merge

src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/make/linux/Makefile	Tue Sep 21 06:58:44 2010 -0700
     1.2 +++ b/make/linux/Makefile	Wed Sep 22 12:54:51 2010 -0400
     1.3 @@ -19,7 +19,7 @@
     1.4  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
     1.5  # or visit www.oracle.com if you need additional information or have any
     1.6  # questions.
     1.7 -#  
     1.8 +#
     1.9  #
    1.10  
    1.11  # This makefile creates a build tree and lights off a build.
    1.12 @@ -45,13 +45,13 @@
    1.13  #
    1.14  #    make REMOTE="rsh -l me myotherlinuxbox"
    1.15  
    1.16 -# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding. 
    1.17 -# JDI binding on SA produces two binaries: 
    1.18 +# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
    1.19 +# JDI binding on SA produces two binaries:
    1.20  #  1. sa-jdi.jar       - This is build before building libjvm[_g].so
    1.21  #                        Please refer to ./makefiles/sa.make
    1.22  #  2. libsa[_g].so     - Native library for SA - This is built after
    1.23  #                        libjsig[_g].so (signal interposition library)
    1.24 -#                        Please refer to ./makefiles/vm.make 
    1.25 +#                        Please refer to ./makefiles/vm.make
    1.26  # If $(GAMMADIR)/agent dir is not present, SA components are not built.
    1.27  
    1.28  ifeq ($(GAMMADIR),)
    1.29 @@ -61,11 +61,9 @@
    1.30  endif
    1.31  include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
    1.32  
    1.33 -ifndef LP64
    1.34  ifndef CC_INTERP
    1.35  FORCE_TIERED=1
    1.36  endif
    1.37 -endif
    1.38  
    1.39  ifdef LP64
    1.40    ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
     2.1 --- a/make/solaris/Makefile	Tue Sep 21 06:58:44 2010 -0700
     2.2 +++ b/make/solaris/Makefile	Wed Sep 22 12:54:51 2010 -0400
     2.3 @@ -1,5 +1,5 @@
     2.4  #
     2.5 -# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
     2.6 +# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
     2.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8  #
     2.9  # This code is free software; you can redistribute it and/or modify it
    2.10 @@ -19,7 +19,7 @@
    2.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    2.12  # or visit www.oracle.com if you need additional information or have any
    2.13  # questions.
    2.14 -#  
    2.15 +#
    2.16  #
    2.17  
    2.18  # This makefile creates a build tree and lights off a build.
    2.19 @@ -36,13 +36,13 @@
    2.20  # or BOOTDIR has to be set. We do *not* search javac, javah, rmic etc.
    2.21  # from the PATH.
    2.22  
    2.23 -# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding. 
    2.24 -# JDI binding on SA produces two binaries: 
    2.25 +# Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
    2.26 +# JDI binding on SA produces two binaries:
    2.27  #  1. sa-jdi.jar       - This is build before building libjvm[_g].so
    2.28  #                        Please refer to ./makefiles/sa.make
    2.29  #  2. libsaproc[_g].so - Native library for SA - This is built after
    2.30  #                        libjsig[_g].so (signal interposition library)
    2.31 -#                        Please refer to ./makefiles/vm.make 
    2.32 +#                        Please refer to ./makefiles/vm.make
    2.33  # If $(GAMMADIR)/agent dir is not present, SA components are not built.
    2.34  
    2.35  ifeq ($(GAMMADIR),)
    2.36 @@ -52,11 +52,9 @@
    2.37  endif
    2.38  include $(GAMMADIR)/make/$(OSNAME)/makefiles/rules.make
    2.39  
    2.40 -ifndef LP64
    2.41  ifndef CC_INTERP
    2.42  FORCE_TIERED=1
    2.43  endif
    2.44 -endif
    2.45  
    2.46  ifdef LP64
    2.47    ifeq ("$(filter $(LP64_ARCH),$(BUILDARCH))","")
     3.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     3.2 +++ b/make/solaris/makefiles/reorder_TIERED_sparcv9	Wed Sep 22 12:54:51 2010 -0400
     3.3 @@ -0,0 +1,4477 @@
     3.4 +data = R0x2000;
     3.5 +text = LOAD ?RXO;
     3.6 +
     3.7 +
     3.8 +text: .text%__1cCosOjavaTimeMillis6F_x_;
     3.9 +text: .text%__1cQIndexSetIteratorQadvance_and_next6M_I_;
    3.10 +text: .text%__1cNinstanceKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
    3.11 +text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_;
    3.12 +text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_;
    3.13 +text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_;
    3.14 +text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_;
    3.15 +text: .text%__1cIPhaseIFGIadd_edge6MII_i_;
    3.16 +text: .text%__1cQIndexSetIterator2t6MpnIIndexSet__v_;
    3.17 +text: .text%__1cENodeEjvms6kM_pnIJVMState__;
    3.18 +text: .text%__1cIIndexSetWalloc_block_containing6MI_pn0AIBitBlock__;
    3.19 +text: .text%__1cETypeDcmp6Fkpk03_i_;
    3.20 +text: .text%__1cENodeHlatency6MI_I_;
    3.21 +text: .text%__1cHRegMaskJis_bound16kM_i_;
    3.22 +text: .text%__1cDff16FI_i_;
    3.23 +text: .text%__1cHRegMaskESize6kM_I_;
    3.24 +text: .text%__1cXresource_allocate_bytes6FI_pc_;
    3.25 +text: .text%__1cENodeIpipeline6kM_pknIPipeline__;
    3.26 +text: .text%__1cJVectorSet2R6MI_rnDSet__;
    3.27 +text: .text%__1cHRegMaskJis_bound26kM_i_;
    3.28 +text: .text%__1cNSharedRuntimeElmul6Fxx_x_;
    3.29 +text: .text%__1cIMachNodeGOpcode6kM_i_;
    3.30 +text: .text%__1cJiRegIOperEtype6kM_pknEType__: ad_sparc.o;
    3.31 +text: .text%__1cIIndexSetKinitialize6MI_v_;
    3.32 +text: .text%__1cITypeNodeLbottom_type6kM_pknEType__;
    3.33 +text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_;
    3.34 +text: .text%__1cKTypeOopPtrFklass6kM_pnHciKlass__: type.o;
    3.35 +text: .text%__1cETypeFuhash6Fkpk0_i_;
    3.36 +text: .text%__1cQIndexSetIteratorEnext6M_I_: chaitin.o;
    3.37 +text: .text%__1cENodeIout_grow6MI_v_;
    3.38 +text: .text%__1cOloadConI13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
    3.39 +text: .text%__1cNobjArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
    3.40 +text: .text%__1cENodeHadd_req6Mp0_v_;
    3.41 +text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o;
    3.42 +text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_;
    3.43 +text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_;
    3.44 +text: .text%__1cOloadConI13NodeErule6kM_I_: ad_sparc_misc.o;
    3.45 +text: .text%__1cICallNodeKmatch_edge6kMI_I_;
    3.46 +text: .text%__1cINodeHashQhash_find_insert6MpnENode__2_;
    3.47 +text: .text%__1cHPhiNodeGOpcode6kM_i_;
    3.48 +text: .text%__1cKbranchNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
    3.49 +text: .text%__1cIProjNodeGOpcode6kM_i_;
    3.50 +text: .text%__1cETypeIhashcons6M_pk0_;
    3.51 +text: .text%__1cOPhaseIdealLoopUbuild_loop_late_post6MpnENode_pk0_v_;
    3.52 +text: .text%__1cMPhaseChaitinTinterfere_with_live6MIpnIIndexSet__v_;
    3.53 +text: .text%__1cWNode_Backward_IteratorEnext6M_pnENode__;
    3.54 +text: .text%__1cNIdealLoopTreeJis_member6kMpk0_i_;
    3.55 +text: .text%__1cMMachCallNodeKin_RegMask6kMI_rknHRegMask__;
    3.56 +text: .text%__1cHCompileNnode_bundling6MpknENode__pnGBundle__;
    3.57 +text: .text%__1cGIfNodeGOpcode6kM_i_;
    3.58 +text: .text%__1cOPhaseIdealLoopYsplit_if_with_blocks_pre6MpnENode__2_;
    3.59 +text: .text%__1cOPhaseIdealLoopZsplit_if_with_blocks_post6MpnENode__v_;
    3.60 +text: .text%__1cIUniverseMnon_oop_word6F_pv_;
    3.61 +text: .text%__1cDLRGOcompute_degree6kMr0_i_;
    3.62 +text: .text%__1cFArenaIArealloc6MpvII_1_;
    3.63 +text: .text%__1cIConINodeGOpcode6kM_i_;
    3.64 +text: .text%__1cETypeEmeet6kMpk0_2_;
    3.65 +text: .text%__1cENode2t6MI_v_;
    3.66 +text: .text%__1cRMachSpillCopyNodeJideal_reg6kM_I_: ad_sparc.o;
    3.67 +text: .text%__1cIPipelineXfunctional_unit_latency6kMIpk0_I_;
    3.68 +text: .text%__1cWPSScavengeRootsClosureGdo_oop6MppnHoopDesc__v_: psTasks.o;
    3.69 +text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_;
    3.70 +text: .text%__1cJCProjNodeNis_block_proj6kM_pknENode__: cfgnode.o;
    3.71 +text: .text%__1cKIfTrueNodeGOpcode6kM_i_;
    3.72 +text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_;
    3.73 +text: .text%__1cIMachNodeKin_RegMask6kMI_rknHRegMask__;
    3.74 +text: .text%__1cJloadPNodeErule6kM_I_: ad_sparc_misc.o;
    3.75 +text: .text%__1cIPhaseIFGQeffective_degree6kMI_i_;
    3.76 +text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_;
    3.77 +text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_;
    3.78 +text: .text%__1cIAddPNodeGOpcode6kM_i_;
    3.79 +text: .text%__1cIPhaseIFGJre_insert6MI_v_;
    3.80 +text: .text%__1cIPhaseIFGLremove_node6MI_pnIIndexSet__;
    3.81 +text: .text%__1cKNode_ArrayGinsert6MIpnENode__v_;
    3.82 +text: .text%__1cHTypeIntEhash6kM_i_;
    3.83 +text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_;
    3.84 +text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_;
    3.85 +text: .text%__1cMPhaseIterGVNNtransform_old6MpnENode__2_;
    3.86 +text: .text%__1cDfh16FI_i_;
    3.87 +text: .text%__1cNMachIdealNodeErule6kM_I_: ad_sparc.o;
    3.88 +text: .text%__1cIIndexSetKfree_block6MI_v_;
    3.89 +text: .text%__1cWShouldNotReachHereNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
    3.90 +text: .text%__1cLIfFalseNodeGOpcode6kM_i_;
    3.91 +text: .text%__1cSCallStaticJavaNodeGOpcode6kM_i_;
    3.92 +text: .text%__1cENodeEhash6kM_I_;
    3.93 +text: .text%__1cOPhaseIdealLoopEsort6MpnNIdealLoopTree_2_2_;
    3.94 +text: .text%__1cMMachProjNodeLbottom_type6kM_pknEType__;
    3.95 +text: .text%JVM_ArrayCopy;
    3.96 +text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_;
    3.97 +text: .text%__1cNSharedRuntimeDl2f6Fx_f_;
    3.98 +text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__;
    3.99 +text: .text%__1cHConNodeGOpcode6kM_i_;
   3.100 +text: .text%__1cMPhaseIterGVNWadd_users_to_worklist06MpnENode__v_;
   3.101 +text: .text%__1cMMachProjNodeGOpcode6kM_i_;
   3.102 +text: .text%__1cJiRegPOperEtype6kM_pknEType__: ad_sparc.o;
   3.103 +text: .text%__1cXPipeline_Use_Cycle_Mask2L6Mi_r0_: ad_sparc_pipeline.o;
   3.104 +text: .text%__1cIBoolNodeGOpcode6kM_i_;
   3.105 +text: .text%__1cYCallStaticJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.106 +text: .text%__1cENodeEgrow6MI_v_;
   3.107 +text: .text%__1cIciObjectEhash6M_i_;
   3.108 +text: .text%__1cKRegionNodeGOpcode6kM_i_;
   3.109 +text: .text%__1cOPhaseIdealLoopUbuild_loop_tree_impl6MpnENode_i_i_;
   3.110 +text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o;
   3.111 +text: .text%__1cRMachSpillCopyNodeLbottom_type6kM_pknEType__: ad_sparc.o;
   3.112 +text: .text%__1cOPhaseIdealLoopOget_early_ctrl6MpnENode__2_;
   3.113 +text: .text%__1cIIndexSetKinitialize6MIpnFArena__v_;
   3.114 +text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_;
   3.115 +text: .text%__1cIPhaseGVNJtransform6MpnENode__2_;
   3.116 +text: .text%__1cOoop_RelocationLunpack_data6M_v_;
   3.117 +text: .text%__1cRmethodDataOopDescHdata_at6Mi_pnLProfileData__;
   3.118 +text: .text%__1cPJavaFrameAnchorNmake_walkable6MpnKJavaThread__v_;
   3.119 +text: .text%__1cENodeNis_block_proj6kM_pk0_;
   3.120 +text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__;
   3.121 +text: .text%__1cIProjNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.122 +text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_;
   3.123 +text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_;
   3.124 +text: .text%__1cLTypeInstPtrEhash6kM_i_;
   3.125 +text: .text%__1cYCallStaticJavaDirectNodeIpipeline6kM_pknIPipeline__;
   3.126 +text: .text%__1cOPhaseIdealLoopThas_local_phi_input6MpnENode__2_;
   3.127 +text: .text%__1cJloadINodeErule6kM_I_: ad_sparc_misc.o;
   3.128 +text: .text%__1cRMachSpillCopyNodeLout_RegMask6kM_rknHRegMask__: ad_sparc.o;
   3.129 +text: .text%__1cKbranchNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.130 +text: .text%__1cMMachProjNodeJideal_reg6kM_I_: classes.o;
   3.131 +text: .text%__1cMMachProjNodeLout_RegMask6kM_rknHRegMask__: classes.o;
   3.132 +text: .text%__1cRMachSpillCopyNodeKin_RegMask6kMI_rknHRegMask__: ad_sparc.o;
   3.133 +text: .text%__1cbAfinal_graph_reshaping_impl6FpnENode_rnUFinal_Reshape_Counts__v_: compile.o;
   3.134 +text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__;
   3.135 +text: .text%__1cUParallelScavengeHeapVlarge_typearray_limit6M_I_: parallelScavengeHeap.o;
   3.136 +text: .text%__1cIPhaseCCPOtransform_once6MpnENode__2_;
   3.137 +text: .text%__1cGciTypeEmake6FnJBasicType__p0_;
   3.138 +text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__;
   3.139 +text: .text%__1cENodeFclone6kM_p0_;
   3.140 +text: .text%__1cITypeNodeEhash6kM_I_;
   3.141 +text: .text%__1cMPipeline_UseMfull_latency6kMIrk0_I_;
   3.142 +text: .text%__1cRMachSpillCopyNodePoper_input_base6kM_I_: ad_sparc.o;
   3.143 +text: .text%__1cENodeKmatch_edge6kMI_I_;
   3.144 +text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_;
   3.145 +text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_;
   3.146 +text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_;
   3.147 +text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_;
   3.148 +text: .text%__1cOPhaseIdealLoopZremix_address_expressions6MpnENode__2_;
   3.149 +text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_;
   3.150 +text: .text%__1cICallNodeLbottom_type6kM_pknEType__;
   3.151 +text: .text%__1cOPhaseIdealLoopNget_late_ctrl6MpnENode_2_2_;
   3.152 +text: .text%JVM_CurrentTimeMillis;
   3.153 +text: .text%__1cENodeIIdentity6MpnOPhaseTransform__p0_;
   3.154 +text: .text%__1cIPipelinePoperand_latency6kMIpk0_I_;
   3.155 +text: .text%__1cKTypeAryPtrEhash6kM_i_;
   3.156 +text: .text%__1cETypeFxmeet6kMpk0_2_;
   3.157 +text: .text%__1cILRG_ListGextend6MII_v_;
   3.158 +text: .text%__1cJVectorSet2F6kMI_i_;
   3.159 +text: .text%__1cENodeQIdeal_DU_postCCP6MpnIPhaseCCP__p0_;
   3.160 +text: .text%__1cOtypeArrayKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
   3.161 +text: .text%__1cIProjNodeEhash6kM_I_;
   3.162 +text: .text%__1cIAddINodeGOpcode6kM_i_;
   3.163 +text: .text%__1cIIndexSet2t6Mp0_v_;
   3.164 +text: .text%__1cRmethodDataOopDescJnext_data6MpnLProfileData__2_;
   3.165 +text: .text%__1cITypeNodeJideal_reg6kM_I_;
   3.166 +text: .text%__1cYCallStaticJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.167 +text: .text%__1cMloadConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.168 +text: .text%__1cHPhiNodeLout_RegMask6kM_rknHRegMask__;
   3.169 +text: .text%__1cENodeHsize_of6kM_I_;
   3.170 +text: .text%__1cICmpPNodeGOpcode6kM_i_;
   3.171 +text: .text%__1cKNode_ArrayGremove6MI_v_;
   3.172 +text: .text%__1cHPhiNodeEhash6kM_I_;
   3.173 +text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__;
   3.174 +text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__;
   3.175 +text: .text%__1cKmethodOperJnum_edges6kM_I_: ad_sparc.o;
   3.176 +text: .text%__1cJStartNodeLbottom_type6kM_pknEType__;
   3.177 +text: .text%__1cHTypeIntFxmeet6kMpknEType__3_;
   3.178 +text: .text%__1cIProjNodeLbottom_type6kM_pknEType__;
   3.179 +text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__;
   3.180 +text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_;
   3.181 +text: .text%__1cICmpINodeGOpcode6kM_i_;
   3.182 +text: .text%Unsafe_CompareAndSwapLong;
   3.183 +text: .text%__1cNCatchProjNodeGOpcode6kM_i_;
   3.184 +text: .text%__1cQUnique_Node_ListGremove6MpnENode__v_;
   3.185 +text: .text%__1cENode2t6Mp0_v_;
   3.186 +text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_;
   3.187 +text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_;
   3.188 +text: .text%__1cTCreateExceptionNodeErule6kM_I_: ad_sparc_misc.o;
   3.189 +text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_;
   3.190 +text: .text%__1cHRegMaskMSmearToPairs6M_v_;
   3.191 +text: .text%__1cMPhaseIterGVNVadd_users_to_worklist6MpnENode__v_;
   3.192 +text: .text%__1cMloadConPNodeErule6kM_I_: ad_sparc_misc.o;
   3.193 +text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__;
   3.194 +text: .text%__1cMPipeline_UseJadd_usage6Mrk0_v_;
   3.195 +text: .text%__1cIAddPNodeKmatch_edge6kMI_I_;
   3.196 +text: .text%__1cJiRegIOperKin_RegMask6kMi_pknHRegMask__;
   3.197 +text: .text%__1cGIfNodeLbottom_type6kM_pknEType__: classes.o;
   3.198 +text: .text%__1cGcmpkey6Fpkv1_i_;
   3.199 +text: .text%__1cMMergeMemNodeGOpcode6kM_i_;
   3.200 +text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__;
   3.201 +text: .text%__1cIParmNodeGOpcode6kM_i_;
   3.202 +text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_;
   3.203 +text: .text%__1cHTypeIntEmake6Fiii_pk0_;
   3.204 +text: .text%__1cENodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
   3.205 +text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_;
   3.206 +text: .text%__1cKSchedulingWAddNodeToAvailableList6MpnENode__v_;
   3.207 +text: .text%__1cKSchedulingSChooseNodeToBundle6M_pnENode__;
   3.208 +text: .text%__1cKSchedulingPAddNodeToBundle6MpnENode_pknFBlock__v_;
   3.209 +text: .text%__1cICallNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.210 +text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__;
   3.211 +text: .text%__1cJLoadPNodeGOpcode6kM_i_;
   3.212 +text: .text%__1cMMutableSpaceIallocate6MI_pnIHeapWord__;
   3.213 +text: .text%__1cJPSPermGenSallocate_permanent6MI_pnIHeapWord__;
   3.214 +text: .text%__1cUParallelScavengeHeapWpermanent_mem_allocate6MI_pnIHeapWord__;
   3.215 +text: .text%__1cIMachNodeIpipeline6kM_pknIPipeline__;
   3.216 +text: .text%__1cMMutableSpaceMcas_allocate6MI_pnIHeapWord__;
   3.217 +text: .text%__1cNflagsRegPOperEtype6kM_pknEType__: ad_sparc.o;
   3.218 +text: .text%__1cHPhiNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.219 +text: .text%__1cMMachTypeNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
   3.220 +text: .text%__1cJCatchNodeGOpcode6kM_i_;
   3.221 +text: .text%__1cIJVMStateLdebug_start6kM_I_;
   3.222 +text: .text%__1cENodeHdel_req6MI_v_;
   3.223 +text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_;
   3.224 +text: .text%__1cOAbstractICachePinvalidate_word6FpC_v_;
   3.225 +text: .text%__1cFBlockIis_Empty6kM_i_;
   3.226 +text: .text%__1cOThreadCritical2T6M_v_;
   3.227 +text: .text%__1cOThreadCritical2t6M_v_;
   3.228 +text: .text%method_compare: methodOop.o;
   3.229 +text: .text%__1cICodeHeapKfind_start6kMpv_1_;
   3.230 +text: .text%__1cETypeEhash6kM_i_;
   3.231 +text: .text%__1cRNativeInstructionLset_long_at6Mii_v_;
   3.232 +text: .text%__1cIAddPNodeLbottom_type6kM_pknEType__;
   3.233 +text: .text%__1cJCProjNodeEhash6kM_I_: classes.o;
   3.234 +text: .text%__1cIHaltNodeGOpcode6kM_i_;
   3.235 +text: .text%__1cFStateRMachNodeGenerator6MipnHCompile__pnIMachNode__;
   3.236 +text: .text%__1cHMatcherKReduceInst6MpnFState_irpnENode__pnIMachNode__;
   3.237 +text: .text%__1cICmpUNodeGOpcode6kM_i_;
   3.238 +text: .text%__1cOPhaseIdealLoopbIdom_lca_for_get_late_ctrl_internal6MpnENode_22_2_;
   3.239 +text: .text%__1cXPipeline_Use_Cycle_MaskCOr6Mrk0_v_;
   3.240 +text: .text%__1cILoadNodeEhash6kM_I_;
   3.241 +text: .text%__1cKTypeAryPtrKadd_offset6kMi_pknHTypePtr__;
   3.242 +text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_;
   3.243 +text: .text%__1cKHandleMark2T6M_v_;
   3.244 +text: .text%__1cZPhaseConservativeCoalesceIcoalesce6MpnFBlock__v_;
   3.245 +text: .text%__1cMPhaseIterGVNZremove_globally_dead_node6MpnENode__v_;
   3.246 +text: .text%__1cWShouldNotReachHereNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.247 +text: .text%__1cHPhiNodeKin_RegMask6kMI_rknHRegMask__;
   3.248 +text: .text%__1cILoadNodeLbottom_type6kM_pknEType__;
   3.249 +text: .text%JVM_ReleaseUTF;
   3.250 +text: .text%__1cJloadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.251 +text: .text%__1cJTypeTupleEhash6kM_i_;
   3.252 +text: .text%__1cMflagsRegOperEtype6kM_pknEType__: ad_sparc.o;
   3.253 +text: .text%__1cObranchConPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.254 +text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__;
   3.255 +text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__;
   3.256 +text: .text%__1cMOopMapStreamJfind_next6M_v_;
   3.257 +text: .text%__1cFDictI2i6M_v_;
   3.258 +text: .text%__1cKNode_ArrayEgrow6MI_v_;
   3.259 +text: .text%__1cHTypeIntEmake6Fi_pk0_;
   3.260 +text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_;
   3.261 +text: .text%__1cJloadPNodeIpipeline6kM_pknIPipeline__;
   3.262 +text: .text%__1cMMergeMemNodeLbottom_type6kM_pknEType__: memnode.o;
   3.263 +text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_;
   3.264 +text: .text%__1cOPSPromotionLABKinitialize6MnJMemRegion__v_;
   3.265 +text: .text%__1cJMultiNodeIproj_out6kMI_pnIProjNode__;
   3.266 +text: .text%__1cPindOffset13OperKin_RegMask6kMi_pknHRegMask__;
   3.267 +text: .text%__1cUcompI_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.268 +text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o;
   3.269 +text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_;
   3.270 +text: .text%__1cKCastPPNodeGOpcode6kM_i_;
   3.271 +text: .text%__1cOoop_RelocationFvalue6M_pC_: relocInfo.o;
   3.272 +text: .text%__1cOoop_RelocationGoffset6M_i_: relocInfo.o;
   3.273 +text: .text%__1cPSignatureStreamEnext6M_v_;
   3.274 +text: .text%__1cLLShiftINodeGOpcode6kM_i_;
   3.275 +text: .text%__1cMPhaseChaitinSuse_prior_register6MpnENode_I2pnFBlock_rnJNode_List_6_i_;
   3.276 +text: .text%__1cGIfNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.277 +text: .text%__1cGBitMapJset_union6M0_v_;
   3.278 +text: .text%__1cIConPNodeGOpcode6kM_i_;
   3.279 +text: .text%__1cJLoadINodeGOpcode6kM_i_;
   3.280 +text: .text%JVM_GetMethodIxExceptionTableLength;
   3.281 +text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__;
   3.282 +text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__;
   3.283 +text: .text%__1cNSharedRuntimeDd2i6Fd_i_;
   3.284 +text: .text%__1cVcompP_iRegP_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.285 +text: .text%__1cKRegionNodeEhash6kM_I_: classes.o;
   3.286 +text: .text%__1cNbranchConNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.287 +text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_;
   3.288 +text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_;
   3.289 +text: .text%__1cIAddPNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.290 +text: .text%__1cGBitMap2t6MpII_v_;
   3.291 +text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_;
   3.292 +text: .text%__1cMMachCallNodeLbottom_type6kM_pknEType__;
   3.293 +text: .text%__1cFParsePdo_one_bytecode6M_v_;
   3.294 +text: .text%__1cFParseNdo_exceptions6M_v_;
   3.295 +text: .text%__1cHPhiNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.296 +text: .text%__1cHMatcherKmatch_tree6MpknENode__pnIMachNode__;
   3.297 +text: .text%__1cMPhaseIterGVNKis_IterGVN6M_p0_: phaseX.o;
   3.298 +text: .text%__1cKimmI13OperIconstant6kM_i_: ad_sparc_clone.o;
   3.299 +text: .text%__1cCosVcurrent_stack_pointer6F_pC_;
   3.300 +text: .text%__1cEDict2F6kMpkv_pv_;
   3.301 +text: .text%__1cKRegionNodeLbottom_type6kM_pknEType__: classes.o;
   3.302 +text: .text%__1cENodeIdestruct6M_v_;
   3.303 +text: .text%__1cMCreateExNodeGOpcode6kM_i_;
   3.304 +text: .text%__1cIBoolNodeEhash6kM_I_;
   3.305 +text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
   3.306 +text: .text%__1cLTypeInstPtrFxmeet6kMpknEType__3_;
   3.307 +text: .text%__1cKNode_ArrayFclear6M_v_;
   3.308 +text: .text%__1cObranchConPNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.309 +text: .text%__1cIProjNodeHsize_of6kM_I_;
   3.310 +text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_;
   3.311 +text: .text%__1cMloadConINodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.312 +text: .text%__1cIHaltNodeKmatch_edge6kMI_I_: classes.o;
   3.313 +text: .text%__1cJloadBNodeErule6kM_I_: ad_sparc_misc.o;
   3.314 +text: .text%__1cHhashptr6Fpkv_i_;
   3.315 +text: .text%__1cMMachHaltNodeEjvms6kM_pnIJVMState__;
   3.316 +text: .text%__1cHhashkey6Fpkv_i_;
   3.317 +text: .text%__1cMPhaseChaitinHnew_lrg6MpknENode_I_v_;
   3.318 +text: .text%__1cIJVMStateJdebug_end6kM_I_;
   3.319 +text: .text%__1cIPhaseIFGMtest_edge_sq6kMII_i_;
   3.320 +text: .text%__1cJloadPNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.321 +text: .text%__1cHSubNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.322 +text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_;
   3.323 +text: .text%__1cSaddP_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
   3.324 +text: .text%__1cIMachNodeHtwo_adr6kM_I_: ad_sparc.o;
   3.325 +text: .text%__1cNSafePointNodeHsize_of6kM_I_;
   3.326 +text: .text%__1cLTypeInstPtrKadd_offset6kMi_pknHTypePtr__;
   3.327 +text: .text%__1cHCmpNodeLbottom_type6kM_pknEType__: classes.o;
   3.328 +text: .text%__1cPcheckCastPPNodeIpipeline6kM_pknIPipeline__;
   3.329 +text: .text%__1cNLoadRangeNodeGOpcode6kM_i_;
   3.330 +text: .text%__1cNbranchConNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.331 +text: .text%__1cENode2t6Mp011_v_;
   3.332 +text: .text%__1cJStoreNodeKmatch_edge6kMI_I_;
   3.333 +text: .text%__1cOPSPromotionLABFflush6M_v_;
   3.334 +text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o;
   3.335 +text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o;
   3.336 +text: .text%__1cOcompU_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.337 +text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__;
   3.338 +text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_;
   3.339 +text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__;
   3.340 +text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_;
   3.341 +text: .text%__1cSCountedLoopEndNodeGOpcode6kM_i_;
   3.342 +text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__;
   3.343 +text: .text%__1cPcheckCastPPNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.344 +text: .text%__1cNCellTypeStateFmerge6kM0i_0_;
   3.345 +text: .text%__1cMPhaseIterGVNMsubsume_node6MpnENode_2_v_;
   3.346 +text: .text%__1cILoadNodeKmatch_edge6kMI_I_;
   3.347 +text: .text%__1cJloadINodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.348 +text: .text%__1cNExceptionMark2T6M_v_;
   3.349 +text: .text%__1cNExceptionMark2t6MrpnGThread__v_;
   3.350 +text: .text%__1cITypeLongEhash6kM_i_;
   3.351 +text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__;
   3.352 +text: .text%__1cJiRegLOperEtype6kM_pknEType__: ad_sparc.o;
   3.353 +text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__;
   3.354 +text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__;
   3.355 +text: .text%__1cOloadConI13NodeLout_RegMask6kM_rknHRegMask__;
   3.356 +text: .text%__1cOloadConI13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
   3.357 +text: .text%__1cObranchConPNodeIpipeline6kM_pknIPipeline__;
   3.358 +text: .text%__1cKStoreINodeGOpcode6kM_i_;
   3.359 +text: .text%__1cJcmpOpOperJnum_edges6kM_I_: ad_sparc_clone.o;
   3.360 +text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_;
   3.361 +text: .text%__1cJiRegPOperKin_RegMask6kMi_pknHRegMask__;
   3.362 +text: .text%__1cKRegionNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.363 +text: .text%__1cKstorePNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.364 +text: .text%__1cHPhiNodeHsize_of6kM_I_: cfgnode.o;
   3.365 +text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_;
   3.366 +text: .text%__1cQaddP_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.367 +text: .text%__1cSaddI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.368 +text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__;
   3.369 +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode_i_i_;
   3.370 +text: .text%__1cIBoolNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.371 +text: .text%__1cLCounterDataKcell_count6M_i_: ciMethodData.o;
   3.372 +text: .text%__1cHRegMaskMClearToPairs6M_v_;
   3.373 +text: .text%__1cRshlI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.374 +text: .text%__1cIMachOperDreg6kMpnNPhaseRegAlloc_pknENode__i_;
   3.375 +text: .text%__1cNPhaseCoalesceRcombine_these_two6MpnENode_2_v_;
   3.376 +text: .text%__1cKcmpOpPOperJnum_edges6kM_I_: ad_sparc_clone.o;
   3.377 +text: .text%__1cKTypeRawPtrKadd_offset6kMi_pknHTypePtr__;
   3.378 +text: .text%__1cMloadConINodeErule6kM_I_: ad_sparc_misc.o;
   3.379 +text: .text%__1cFArenaEgrow6MI_pv_;
   3.380 +text: .text%__1cMPhaseChaitinLinsert_proj6MpnFBlock_IpnENode_I_v_;
   3.381 +text: .text%__1cILoadNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.382 +text: .text%__1cJStoreNodeLbottom_type6kM_pknEType__;
   3.383 +text: .text%__1cIBoolNodeLbottom_type6kM_pknEType__: subnode.o;
   3.384 +text: .text%__1cNSafePointNodeSset_next_exception6Mp0_v_;
   3.385 +text: .text%__1cQaddP_reg_regNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.386 +text: .text%__1cIHaltNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.387 +text: .text%__1cPCheckCastPPNodeGOpcode6kM_i_;
   3.388 +text: .text%__1cKStorePNodeGOpcode6kM_i_;
   3.389 +text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o;
   3.390 +text: .text%__1cNflagsRegUOperEtype6kM_pknEType__: ad_sparc.o;
   3.391 +text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__;
   3.392 +text: .text%__1cPcheckCastPPNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
   3.393 +text: .text%__1cIAddPNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.394 +text: .text%__1cRInvocationCounterEinit6M_v_;
   3.395 +text: .text%__1cKNode_Array2t6MpnFArena__v_: block.o;
   3.396 +text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__;
   3.397 +text: .text%__1cXPhaseAggressiveCoalesceIcoalesce6MpnFBlock__v_;
   3.398 +text: .text%__1cFBlockScall_catch_cleanup6MrnLBlock_Array__v_;
   3.399 +text: .text%__1cObranchConUNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.400 +text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_;
   3.401 +text: .text%__1cIAddINodeLbottom_type6kM_pknEType__: classes.o;
   3.402 +text: .text%__1cHRetNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
   3.403 +text: .text%__1cKRegionNodeLout_RegMask6kM_rknHRegMask__;
   3.404 +text: .text%__1cKstorePNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.405 +text: .text%__1cMObjectLocker2T6M_v_;
   3.406 +text: .text%__1cOcompI_iRegNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.407 +text: .text%__1cICallNodeIIdentity6MpnOPhaseTransform__pnENode__: callnode.o;
   3.408 +text: .text%__1cMURShiftINodeGOpcode6kM_i_;
   3.409 +text: .text%__1cRmethodDataOopDescPinitialize_data6MpnOBytecodeStream_i_i_;
   3.410 +text: .text%__1cNRelocIteratorKset_limits6MpC1_v_;
   3.411 +text: .text%__1cIRootNodeGOpcode6kM_i_;
   3.412 +text: .text%__1cOloadConI13NodeIpipeline6kM_pknIPipeline__;
   3.413 +text: .text%__1cJloadPNodeLout_RegMask6kM_rknHRegMask__;
   3.414 +text: .text%__1cILoadNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.415 +text: .text%__1cTCreateExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.416 +text: .text%__1cFStateM_sub_Op_ConI6MpknENode__v_;
   3.417 +text: .text%__1cPcheckCastPPNodeErule6kM_I_: ad_sparc_misc.o;
   3.418 +text: .text%__1cISubINodeGOpcode6kM_i_;
   3.419 +text: .text%__1cNbranchConNodeIpipeline6kM_pknIPipeline__;
   3.420 +text: .text%__1cJTypeTupleEmake6FIppknEType__pk0_;
   3.421 +text: .text%__1cJTypeTupleGfields6FI_ppknEType__;
   3.422 +text: .text%__1cENodeFValue6kMpnOPhaseTransform__pknEType__;
   3.423 +text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__;
   3.424 +text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__;
   3.425 +text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_;
   3.426 +text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_;
   3.427 +text: .text%__1cKbranchNodeHsize_of6kM_I_: ad_sparc_misc.o;
   3.428 +text: .text%__1cHPhiNodeIadr_type6kM_pknHTypePtr__: cfgnode.o;
   3.429 +text: .text%__1cHAddNodeEhash6kM_I_;
   3.430 +text: .text%__1cENodeRdisconnect_inputs6Mp0_i_;
   3.431 +text: .text%__1cPsplit_flow_path6FpnIPhaseGVN_pnHPhiNode__pnENode__: cfgnode.o;
   3.432 +text: .text%__1cSaddI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
   3.433 +text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__;
   3.434 +text: .text%__1cHConNodeEhash6kM_I_;
   3.435 +text: .text%__1cLLShiftINodeLbottom_type6kM_pknEType__: classes.o;
   3.436 +text: .text%__1cNmethodOopDescIbci_from6kMpC_i_;
   3.437 +text: .text%__1cOMachReturnNodeIadr_type6kM_pknHTypePtr__;
   3.438 +text: .text%__1cNidealize_test6FpnIPhaseGVN_pnGIfNode__3_: ifnode.o;
   3.439 +text: .text%__1cITypeNodeHsize_of6kM_I_;
   3.440 +text: .text%__1cNSafePointNodeLbottom_type6kM_pknEType__: callnode.o;
   3.441 +text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__;
   3.442 +text: .text%__1cJloadINodeIpipeline6kM_pknIPipeline__;
   3.443 +text: .text%JVM_GetClassModifiers;
   3.444 +text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__;
   3.445 +text: .text%__1cNSafePointNodeOnext_exception6kM_p0_;
   3.446 +text: .text%JVM_GetClassAccessFlags;
   3.447 +text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_;
   3.448 +text: .text%__1cIsplit_if6FpnGIfNode_pnMPhaseIterGVN__pnENode__: ifnode.o;
   3.449 +text: .text%__1cHTypeAryEhash6kM_i_;
   3.450 +text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_;
   3.451 +text: .text%__1cJMultiNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__;
   3.452 +text: .text%__1cJCProjNodeLout_RegMask6kM_rknHRegMask__;
   3.453 +text: .text%__1cPPerfLongVariantGsample6M_v_;
   3.454 +text: .text%__1cJStoreNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.455 +text: .text%__1cMPhaseChaitinMyank_if_dead6MpnENode_pnFBlock_pnJNode_List_6_i_;
   3.456 +text: .text%__1cJCatchNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.457 +text: .text%__1cIMachOperNconstant_disp6kM_i_;
   3.458 +text: .text%__1cIMachOperFscale6kM_i_;
   3.459 +text: .text%__1cENode2t6Mp0111_v_;
   3.460 +text: .text%__1cFPhase2t6Mn0ALPhaseNumber__v_;
   3.461 +text: .text%__1cNCompileBrokerLmaybe_block6F_v_;
   3.462 +text: .text%__1cFBlockOcode_alignment6M_I_;
   3.463 +text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__;
   3.464 +text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__;
   3.465 +text: .text%__1cFStateM_sub_Op_RegP6MpknENode__v_;
   3.466 +text: .text%JVM_GetCPMethodSignatureUTF;
   3.467 +text: .text%__1cFChunkJnext_chop6M_v_;
   3.468 +text: .text%__1cMMergeMemNodeEhash6kM_I_;
   3.469 +text: .text%__1cKSchedulingbFComputeRegisterAntidependencies6MpnFBlock__v_;
   3.470 +text: .text%__1cKSchedulingPComputeUseCount6MpknFBlock__v_;
   3.471 +text: .text%__1cHTypePtrHget_con6kM_i_;
   3.472 +text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o;
   3.473 +text: .text%__1cIMachNodePcompute_padding6kMi_i_: ad_sparc.o;
   3.474 +text: .text%__1cIMachNodeSalignment_required6kM_i_: ad_sparc.o;
   3.475 +text: .text%__1cMPhaseChaitinSget_spillcopy_wide6MpnENode_2I_2_;
   3.476 +text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__;
   3.477 +text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_;
   3.478 +text: .text%__1cQaddI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.479 +text: .text%__1cObranchConUNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.480 +text: .text%__1cFBlockJfind_node6kMpknENode__I_;
   3.481 +text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o;
   3.482 +text: .text%__1cHCmpNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.483 +text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__;
   3.484 +text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_;
   3.485 +text: .text%__1cHTypePtrEhash6kM_i_;
   3.486 +text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__;
   3.487 +text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_;
   3.488 +text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_;
   3.489 +text: .text%__1cUParallelScavengeHeapRallocate_new_tlab6MI_pnIHeapWord__;
   3.490 +text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_;
   3.491 +text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_;
   3.492 +text: .text%__1cFBlockLfind_remove6MpknENode__v_;
   3.493 +text: .text%__1cIIndexSetJlrg_union6MIIkIpknIPhaseIFG_rknHRegMask__I_;
   3.494 +text: .text%__1cKMemBarNodeKmatch_edge6kMI_I_: classes.o;
   3.495 +text: .text%__1cUcompI_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
   3.496 +text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_;
   3.497 +text: .text%__1cNObjectMonitorEexit6MpnGThread__v_;
   3.498 +text: .text%__1cIimmPOperEtype6kM_pknEType__: ad_sparc_clone.o;
   3.499 +text: .text%__1cMloadConPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
   3.500 +text: .text%__1cLMachNopNodeIpipeline6kM_pknIPipeline__;
   3.501 +text: .text%__1cJloadINodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.502 +text: .text%__1cNloadRangeNodeErule6kM_I_: ad_sparc_misc.o;
   3.503 +text: .text%__1cVCompressedWriteStream2t6Mi_v_;
   3.504 +text: .text%__1cNObjectMonitorFenter6MpnGThread__v_;
   3.505 +text: .text%__1cENodeKreplace_by6Mp0_v_;
   3.506 +text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_;
   3.507 +text: .text%__1cMMergeMemNodePiteration_setup6Mpk0_v_;
   3.508 +text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
   3.509 +text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
   3.510 +text: .text%__1cRMachSpillCopyNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
   3.511 +text: .text%__1cKRegionNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.512 +text: .text%__1cJStoreNodeEhash6kM_I_;
   3.513 +text: .text%__1cSaddP_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.514 +text: .text%__1cQaddI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
   3.515 +text: .text%__1cIGraphKitJclone_map6M_pnNSafePointNode__;
   3.516 +text: .text%__1cKIfTrueNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.517 +text: .text%__1cRMemBarReleaseNodeGOpcode6kM_i_;
   3.518 +text: .text%__1cKbranchNodeIpipeline6kM_pknIPipeline__;
   3.519 +text: .text%__1cIMachOperIconstant6kM_i_;
   3.520 +text: .text%__1cWMutableSpaceUsedHelperLtake_sample6M_x_: spaceCounters.o;
   3.521 +text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_;
   3.522 +text: .text%__1cRPSOldPromotionLABFflush6M_v_;
   3.523 +text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__;
   3.524 +text: .text%__1cPcompP_iRegPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.525 +text: .text%__1cLBoxLockNodeGOpcode6kM_i_;
   3.526 +text: .text%__1cIciObjectJset_ident6MI_v_;
   3.527 +text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__;
   3.528 +text: .text%__1cKTypeRawPtrEhash6kM_i_;
   3.529 +text: .text%__1cIBoolNodeKmatch_edge6kMI_I_: subnode.o;
   3.530 +text: .text%__1cMMergeMemNodePset_base_memory6MpnENode__v_;
   3.531 +text: .text%__1cLIfFalseNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.532 +text: .text%__1cCosPelapsed_counter6F_x_;
   3.533 +text: .text%__1cGBitMapOset_difference6M0_v_;
   3.534 +text: .text%__1cNSafePointNodeEjvms6kM_pnIJVMState__: callnode.o;
   3.535 +text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
   3.536 +text: .text%__1cMMergeMemNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.537 +text: .text%JVM_GetMethodIxLocalsCount;
   3.538 +text: .text%__1cNloadRangeNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.539 +text: .text%JVM_CurrentThread;
   3.540 +text: .text%__1cENodeHget_ptr6kM_i_;
   3.541 +text: .text%__1cRcmpFastUnlockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.542 +text: .text%__1cIAndINodeGOpcode6kM_i_;
   3.543 +text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_;
   3.544 +text: .text%__1cENodeHins_req6MIp0_v_;
   3.545 +text: .text%__1cMPhaseChaitinFUnion6MpknENode_3_v_;
   3.546 +text: .text%__1cMloadConLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.547 +text: .text%__1cHAddNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.548 +text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
   3.549 +text: .text%__1cKstoreINodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.550 +text: .text%__1cOFastUnlockNodeGOpcode6kM_i_;
   3.551 +text: .text%__1cITypeNodeDcmp6kMrknENode__I_;
   3.552 +text: .text%__1cIHaltNodeLbottom_type6kM_pknEType__;
   3.553 +text: .text%__1cKstorePNodeIpipeline6kM_pknIPipeline__;
   3.554 +text: .text%__1cKcmpOpUOperJnum_edges6kM_I_: ad_sparc_clone.o;
   3.555 +text: .text%__1cLstoreI0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.556 +text: .text%__1cIciObject2t6MnGHandle__v_;
   3.557 +text: .text%__1cNSafePointNodeKmatch_edge6kMI_I_;
   3.558 +text: .text%__1cIMachOperOindex_position6kM_i_;
   3.559 +text: .text%__1cXmembar_release_lockNodeIpipeline6kM_pknIPipeline__;
   3.560 +text: .text%__1cJVectorSet2L6MI_rnDSet__;
   3.561 +text: .text%__1cOcompU_iRegNodeIpipeline6kM_pknIPipeline__;
   3.562 +text: .text%__1cMMergeMemNodeJmemory_at6kMI_pnENode__;
   3.563 +text: .text%__1cSaddP_reg_imm13NodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.564 +text: .text%__1cPindOffset13OperNconstant_disp6kM_i_: ad_sparc.o;
   3.565 +text: .text%__1cPindOffset13OperFscale6kM_i_: ad_sparc.o;
   3.566 +text: .text%__1cPindOffset13OperNbase_position6kM_i_: ad_sparc.o;
   3.567 +text: .text%__1cWShouldNotReachHereNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.568 +text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__;
   3.569 +text: .text%__1cUcompI_iReg_imm13NodeIpipeline6kM_pknIPipeline__;
   3.570 +text: .text%__1cVcompP_iRegP_imm13NodeIpipeline6kM_pknIPipeline__;
   3.571 +text: .text%__1cQaddP_reg_regNodeIpipeline6kM_pknIPipeline__;
   3.572 +text: .text%__1cQaddP_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
   3.573 +text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_;
   3.574 +text: .text%__1cIJVMStateIof_depth6kMi_p0_;
   3.575 +text: .text%__1cNSharedRuntimeElrem6Fxx_x_;
   3.576 +text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_;
   3.577 +text: .text%__1cMciMethodDataLbci_to_data6Mi_pnLProfileData__;
   3.578 +text: .text%__1cRMemBarAcquireNodeGOpcode6kM_i_;
   3.579 +text: .text%__1cKo0RegPOperEtype6kM_pknEType__: ad_sparc.o;
   3.580 +text: .text%__1cSaddI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
   3.581 +text: .text%__1cObranchConUNodeIpipeline6kM_pknIPipeline__;
   3.582 +text: .text%__1cJVectorSet2t6MpnFArena__v_;
   3.583 +text: .text%__1cKTypeAryPtrFxmeet6kMpknEType__3_;
   3.584 +text: .text%__1cVcompP_iRegP_imm13NodeErule6kM_I_: ad_sparc_misc.o;
   3.585 +text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_;
   3.586 +text: .text%__1cICallNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__;
   3.587 +text: .text%__1cJTraceTime2T6M_v_;
   3.588 +text: .text%__1cITypeNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.589 +text: .text%__1cPcheckCastPPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.590 +text: .text%__1cKMemBarNodeLbottom_type6kM_pknEType__: classes.o;
   3.591 +text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_;
   3.592 +text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_;
   3.593 +text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_;
   3.594 +text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__;
   3.595 +text: .text%__1cMMergeMemNodeNset_memory_at6MIpnENode__v_;
   3.596 +text: .text%__1cLstoreI0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.597 +text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o;
   3.598 +text: .text%__1cQaddI_reg_regNodeIpipeline6kM_pknIPipeline__;
   3.599 +text: .text%__1cENode2t6Mp01_v_;
   3.600 +text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__;
   3.601 +text: .text%__1cKstoreINodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.602 +text: .text%__1cRshrI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.603 +text: .text%__1cFStateM_sub_Op_AddP6MpknENode__v_;
   3.604 +text: .text%__1cTCreateExceptionNodeLout_RegMask6kM_rknHRegMask__;
   3.605 +text: .text%__1cITypeFuncEhash6kM_i_;
   3.606 +text: .text%__1cLBoxLockNodeJideal_reg6kM_I_: classes.o;
   3.607 +text: .text%__1cMTypeKlassPtrEhash6kM_i_;
   3.608 +text: .text%__1cMCallLeafNodeGOpcode6kM_i_;
   3.609 +text: .text%__1cSCallLeafDirectNodeIpipeline6kM_pknIPipeline__;
   3.610 +text: .text%__1cHPhiNodeEmake6FpnENode_2pknEType_pknHTypePtr__p0_;
   3.611 +text: .text%__1cIAddPNodeQmach_bottom_type6FpknIMachNode__pknEType__;
   3.612 +text: .text%__1cOcompU_iRegNodeErule6kM_I_: ad_sparc_misc.o;
   3.613 +text: .text%__1cJiRegLOperKin_RegMask6kMi_pknHRegMask__;
   3.614 +text: .text%__1cNflagsRegPOperKin_RegMask6kMi_pknHRegMask__;
   3.615 +text: .text%__1cHOrINodeGOpcode6kM_i_;
   3.616 +text: .text%__1cXmembar_acquire_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
   3.617 +text: .text%JVM_GetCPMethodClassNameUTF;
   3.618 +text: .text%__1cMloadConDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.619 +text: .text%__1cMflagsRegOperKin_RegMask6kMi_pknHRegMask__;
   3.620 +text: .text%__1cLProfileDataPfollow_contents6M_v_: ciMethodData.o;
   3.621 +text: .text%__1cLProfileDataPadjust_pointers6M_v_: ciMethodData.o;
   3.622 +text: .text%__1cFStateM_sub_Op_RegI6MpknENode__v_;
   3.623 +text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_;
   3.624 +text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_;
   3.625 +text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_;
   3.626 +text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_;
   3.627 +text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_;
   3.628 +text: .text%__1cHCompileYout_preserve_stack_slots6F_I_;
   3.629 +text: .text%__1cIGraphKitLclean_stack6Mi_v_;
   3.630 +text: .text%__1cKStoreBNodeGOpcode6kM_i_;
   3.631 +text: .text%__1cLklassVtableToop_adjust_pointers6M_v_;
   3.632 +text: .text%__1cLklassVtableToop_follow_contents6M_v_;
   3.633 +text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_;
   3.634 +text: .text%__1cJlabelOperFlabel6kM_pnFLabel__: ad_sparc.o;
   3.635 +text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__;
   3.636 +text: .text%__1cIMachNodeIadr_type6kM_pknHTypePtr__;
   3.637 +text: .text%__1cIMachOperMdisp_as_type6kM_pknHTypePtr__: ad_sparc.o;
   3.638 +text: .text%__1cRshlI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o;
   3.639 +text: .text%JVM_IsNaN;
   3.640 +text: .text%__1cNloadRangeNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.641 +text: .text%__1cKbranchNodeLout_RegMask6kM_rknHRegMask__;
   3.642 +text: .text%__1cJStartNodeGOpcode6kM_i_;
   3.643 +text: .text%__1cQregF_to_stkINodeErule6kM_I_: ad_sparc_misc.o;
   3.644 +text: .text%__1cENodeDcmp6kMrk0_I_;
   3.645 +text: .text%__1cHTypeIntFxdual6kM_pknEType__;
   3.646 +text: .text%__1cIciObjectIencoding6M_pnI_jobject__;
   3.647 +text: .text%__1cMmerge_region6FpnKRegionNode_pnIPhaseGVN__pnENode__: cfgnode.o;
   3.648 +text: .text%__1cJAssemblerOpatched_branch6Fiii_i_;
   3.649 +text: .text%__1cJAssemblerSbranch_destination6Fii_i_;
   3.650 +text: .text%__1cRshlI_reg_imm5NodeIpipeline6kM_pknIPipeline__;
   3.651 +text: .text%__1cENodeIadd_prec6Mp0_v_;
   3.652 +text: .text%__1cLBoxLockNodeLbottom_type6kM_pknEType__: classes.o;
   3.653 +text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__;
   3.654 +text: .text%__1cSaddP_reg_imm13NodeIpipeline6kM_pknIPipeline__;
   3.655 +text: .text%__1cWMachCallStaticJavaNodePret_addr_offset6M_i_;
   3.656 +text: .text%__1cITypeFuncEmake6FpknJTypeTuple_3_pk0_;
   3.657 +text: .text%__1cMloadConDNodeErule6kM_I_: ad_sparc_misc.o;
   3.658 +text: .text%__1cSCallLeafDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.659 +text: .text%__1cKTypeOopPtrHget_con6kM_i_;
   3.660 +text: .text%__1cQsubI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.661 +text: .text%__1cIRootNodeLbottom_type6kM_pknEType__: classes.o;
   3.662 +text: .text%__1cJloadLNodeErule6kM_I_: ad_sparc_misc.o;
   3.663 +text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_;
   3.664 +text: .text%__1cJLoadBNodeGOpcode6kM_i_;
   3.665 +text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_;
   3.666 +text: .text%__1cSvframeStreamCommonEnext6M_v_;
   3.667 +text: .text%__1cIAddINodeGadd_id6kM_pknEType__: classes.o;
   3.668 +text: .text%__1cIRootNodeNis_block_proj6kM_pknENode__: classes.o;
   3.669 +text: .text%__1cMMergeMemNode2t6MpnENode__v_;
   3.670 +text: .text%__1cOcompI_iRegNodeIpipeline6kM_pknIPipeline__;
   3.671 +text: .text%__1cRMachSafePointNodeKin_RegMask6kMI_rknHRegMask__;
   3.672 +text: .text%__1cJloadINodeLout_RegMask6kM_rknHRegMask__;
   3.673 +text: .text%__1cPindOffset13OperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
   3.674 +text: .text%__1cPindOffset13OperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
   3.675 +text: .text%__1cPindOffset13OperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
   3.676 +text: .text%__1cICmpPNodeDsub6kMpknEType_3_3_;
   3.677 +text: .text%__1cHMemNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__;
   3.678 +text: .text%__1cIGraphKitQkill_dead_locals6M_v_;
   3.679 +text: .text%__1cCosMvm_page_size6F_i_;
   3.680 +text: .text%__1cRlock_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__;
   3.681 +text: .text%__1cVcompP_iRegP_imm13NodeLout_RegMask6kM_rknHRegMask__;
   3.682 +text: .text%__1cUcompI_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__;
   3.683 +text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o;
   3.684 +text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_;
   3.685 +text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_;
   3.686 +text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_;
   3.687 +text: .text%__1cLklassItableToop_adjust_pointers6M_v_;
   3.688 +text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_;
   3.689 +text: .text%__1cLklassItableToop_follow_contents6M_v_;
   3.690 +text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_;
   3.691 +text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_;
   3.692 +text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_;
   3.693 +text: .text%__1cNSafePointNodeGOpcode6kM_i_;
   3.694 +text: .text%__1cJLoadPNodeJideal_reg6kM_I_: classes.o;
   3.695 +text: .text%__1cMPhaseChaitinPset_was_spilled6MpnENode__v_;
   3.696 +text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__;
   3.697 +text: .text%__1cMloadConPNodeIpipeline6kM_pknIPipeline__;
   3.698 +text: .text%__1cIGraphKit2t6MpnIJVMState__v_;
   3.699 +text: .text%__1cPconvI2L_regNodeErule6kM_I_: ad_sparc_misc.o;
   3.700 +text: .text%__1cQPreserveJVMState2T6M_v_;
   3.701 +text: .text%__1cRshrI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o;
   3.702 +text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_;
   3.703 +text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_;
   3.704 +text: .text%__1cTCreateExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.705 +text: .text%__1cXmembar_release_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.706 +text: .text%__1cMloadConLNodeErule6kM_I_: ad_sparc_misc.o;
   3.707 +text: .text%__1cLConvI2LNodeGOpcode6kM_i_;
   3.708 +text: .text%__1cITypeLongFxmeet6kMpknEType__3_;
   3.709 +text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_;
   3.710 +text: .text%__1cFParseMmerge_common6Mpn0AFBlock_i_v_;
   3.711 +text: .text%__1cPciInstanceKlassYunique_concrete_subklass6M_p0_;
   3.712 +text: .text%__1cLBoxLockNodeHsize_of6kM_I_;
   3.713 +text: .text%__1cOPhaseIdealLoopIset_idom6MpnENode_2I_v_;
   3.714 +text: .text%JVM_GetCPFieldClassNameUTF;
   3.715 +text: .text%__1cSaddI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
   3.716 +text: .text%__1cNLoadKlassNodeGOpcode6kM_i_;
   3.717 +text: .text%__1cRcmpFastUnlockNodeIpipeline6kM_pknIPipeline__;
   3.718 +text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__;
   3.719 +text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__;
   3.720 +text: .text%__1cOcompI_iRegNodeErule6kM_I_: ad_sparc_misc.o;
   3.721 +text: .text%__1cRshlI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__;
   3.722 +text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_;
   3.723 +text: .text%__1cICmpINodeDsub6kMpknEType_3_3_;
   3.724 +text: .text%__1cLRShiftINodeGOpcode6kM_i_;
   3.725 +text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__;
   3.726 +text: .text%__1cSCallLeafDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.727 +text: .text%__1cPcheckCastPPNodeLout_RegMask6kM_rknHRegMask__;
   3.728 +text: .text%__1cOPhaseIdealLoopQconditional_move6MpnENode__2_;
   3.729 +text: .text%__1cJStoreNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.730 +text: .text%__1cITypeFuncEmake6FpnIciMethod__pk0_;
   3.731 +text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_;
   3.732 +text: .text%__1cJloadSNodeErule6kM_I_: ad_sparc_misc.o;
   3.733 +text: .text%__1cKStoreCNodeGOpcode6kM_i_;
   3.734 +text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_;
   3.735 +text: .text%__1cMstringStreamFwrite6MpkcI_v_;
   3.736 +text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__;
   3.737 +text: .text%__1cHRetNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.738 +text: .text%__1cPcmpFastLockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.739 +text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_;
   3.740 +text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_;
   3.741 +text: .text%__1cIBoolNodeJideal_reg6kM_I_: subnode.o;
   3.742 +text: .text%__1cHCmpNodeJideal_reg6kM_I_: classes.o;
   3.743 +text: .text%__1cRloadConP_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.744 +text: .text%__1cETypeFwiden6kMpk0_2_: type.o;
   3.745 +text: .text%__1cLstoreI0NodeIpipeline6kM_pknIPipeline__;
   3.746 +text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__;
   3.747 +text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__;
   3.748 +text: .text%__1cMloadConPNodeLout_RegMask6kM_rknHRegMask__;
   3.749 +text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__;
   3.750 +text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__;
   3.751 +text: .text%__1cPcompP_iRegPNodeIpipeline6kM_pknIPipeline__;
   3.752 +text: .text%__1cNloadRangeNodeLout_RegMask6kM_rknHRegMask__;
   3.753 +text: .text%__1cNCatchProjNodeLbottom_type6kM_pknEType__: cfgnode.o;
   3.754 +text: .text%__1cNCatchProjNodeHsize_of6kM_I_: cfgnode.o;
   3.755 +text: .text%__1cFStateK_sub_Op_If6MpknENode__v_;
   3.756 +text: .text%__1cTciConstantPoolCacheDget6Mi_pv_;
   3.757 +text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_;
   3.758 +text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_;
   3.759 +text: .text%__1cQsubI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
   3.760 +text: .text%__1cXmembar_acquire_lockNodeIpipeline6kM_pknIPipeline__;
   3.761 +text: .text%__1cQaddP_reg_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
   3.762 +text: .text%__1cPCountedLoopNodeGOpcode6kM_i_;
   3.763 +text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_;
   3.764 +text: .text%__1cIAndLNodeGOpcode6kM_i_;
   3.765 +text: .text%__1cIGraphKitOset_all_memory6MpnENode__v_;
   3.766 +text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
   3.767 +text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_;
   3.768 +text: .text%__1cFParseFBlockKinit_graph6Mp0_v_;
   3.769 +text: .text%__1cMTypeKlassPtrEmake6FnHTypePtrDPTR_pnHciKlass_i_pk0_;
   3.770 +text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__;
   3.771 +text: .text%__1cCosGmalloc6FI_pv_;
   3.772 +text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_;
   3.773 +text: .text%__1cIGraphKitTadd_exception_state6MpnNSafePointNode__v_;
   3.774 +text: .text%__1cIimmPOperIconstant6kM_i_: ad_sparc_clone.o;
   3.775 +text: .text%__1cIregDOperEtype6kM_pknEType__: ad_sparc.o;
   3.776 +text: .text%__1cKstoreINodeIpipeline6kM_pknIPipeline__;
   3.777 +text: .text%__1cICodeHeapLheader_size6F_I_;
   3.778 +text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_;
   3.779 +text: .text%__1cNloadRangeNodeIpipeline6kM_pknIPipeline__;
   3.780 +text: .text%__1cFParseMdo_one_block6M_v_;
   3.781 +text: .text%__1cOPhaseIdealLoopRregister_new_node6MpnENode_2_v_;
   3.782 +text: .text%__1cLstoreB0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.783 +text: .text%__1cIAddINodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.784 +text: .text%__1cIJVMStateLdebug_depth6kM_I_;
   3.785 +text: .text%__1cENodeNadd_req_batch6Mp0I_v_;
   3.786 +text: .text%__1cKciTypeFlowLStateVectorOpush_translate6MpnGciType__v_;
   3.787 +text: .text%__1cJloadFNodeErule6kM_I_: ad_sparc_misc.o;
   3.788 +text: .text%__1cPVirtualCallDataKcell_count6M_i_: ciMethodData.o;
   3.789 +text: .text%__1cIMachNodeOpipeline_class6F_pknIPipeline__;
   3.790 +text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
   3.791 +text: .text%__1cIPhaseGVNUtransform_no_reclaim6MpnENode__2_;
   3.792 +text: .text%__1cIAddLNodeGOpcode6kM_i_;
   3.793 +text: .text%__1cLLShiftINodeFValue6kMpnOPhaseTransform__pknEType__;
   3.794 +text: .text%__1cOMethodLivenessKBasicBlockJpropagate6Mp0_v_;
   3.795 +text: .text%__1cKciTypeFlowGJsrSet2t6MpnFArena_i_v_;
   3.796 +text: .text%__1cHMatcherKmatch_sfpt6MpnNSafePointNode__pnIMachNode__;
   3.797 +text: .text%__1cMFastLockNodeGOpcode6kM_i_;
   3.798 +text: .text%__1cLConvL2INodeGOpcode6kM_i_;
   3.799 +text: .text%__1cIXorINodeGOpcode6kM_i_;
   3.800 +text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_;
   3.801 +text: .text%__1cOcompU_iRegNodeLout_RegMask6kM_rknHRegMask__;
   3.802 +text: .text%__1cPorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.803 +text: .text%__1cKTypeAryPtrFklass6kM_pnHciKlass__;
   3.804 +text: .text%__1cIGraphKitbDtransfer_exceptions_into_jvms6M_pnIJVMState__;
   3.805 +text: .text%__1cLTypeInstPtrFxdual6kM_pknEType__;
   3.806 +text: .text%__1cNLoadRangeNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.807 +text: .text%__1cFBlockKsched_call6MrnHMatcher_rnLBlock_Array_IrnJNode_List_pipnMMachCallNode_rnJVectorSet__I_;
   3.808 +text: .text%__1cSsafePoint_pollNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.809 +text: .text%__1cILoadNodeHsize_of6kM_I_;
   3.810 +text: .text%__1cRInterpretedRFrameKtop_method6kM_nMmethodHandle__: rframe.o;
   3.811 +text: .text%__1cIGraphKitJsync_jvms6kM_pnIJVMState__;
   3.812 +text: .text%__1cICmpUNodeDsub6kMpknEType_3_3_;
   3.813 +text: .text%__1cEUTF8Hstrrchr6FpWiW_1_;
   3.814 +text: .text%__1cPcompP_iRegPNodeErule6kM_I_: ad_sparc_misc.o;
   3.815 +text: .text%__1cPsp_ptr_RegPOperKin_RegMask6kMi_pknHRegMask__;
   3.816 +text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_;
   3.817 +text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_;
   3.818 +text: .text%__1cRshrP_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.819 +text: .text%__1cLBoxLockNodeLout_RegMask6kM_rknHRegMask__;
   3.820 +text: .text%__1cITypeLongEmake6Fxxi_pk0_;
   3.821 +text: .text%__1cNloadKlassNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.822 +text: .text%JVM_GetCPMethodNameUTF;
   3.823 +text: .text%__1cMtlsLoadPNodeErule6kM_I_: ad_sparc_misc.o;
   3.824 +text: .text%__1cLstoreB0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.825 +text: .text%__1cIimmIOperIconstant6kM_i_: ad_sparc_clone.o;
   3.826 +text: .text%__1cNSharedRuntimeEldiv6Fxx_x_;
   3.827 +text: .text%__1cHBitDataKcell_count6M_i_: ciMethodData.o;
   3.828 +text: .text%__1cURethrowExceptionNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
   3.829 +text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__;
   3.830 +text: .text%__1cQsubI_reg_regNodeIpipeline6kM_pknIPipeline__;
   3.831 +text: .text%__1cJloadBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.832 +text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_;
   3.833 +text: .text%__1cQaddP_reg_regNodeLout_RegMask6kM_rknHRegMask__;
   3.834 +text: .text%__1cKmethodOperGmethod6kM_i_: ad_sparc.o;
   3.835 +text: .text%__1cFKlassIsubklass6kM_p0_;
   3.836 +text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__;
   3.837 +text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o;
   3.838 +text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__;
   3.839 +text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_;
   3.840 +text: .text%__1cIMulLNodeGOpcode6kM_i_;
   3.841 +text: .text%__1cKReturnNodeKmatch_edge6kMI_I_;
   3.842 +text: .text%__1cGOopMap2t6Mii_v_;
   3.843 +text: .text%__1cNloadConP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.844 +text: .text%__1cJLoadSNodeGOpcode6kM_i_;
   3.845 +text: .text%__1cLPCTableNodeLbottom_type6kM_pknEType__;
   3.846 +text: .text%__1cKBranchDataKcell_count6M_i_: ciMethodData.o;
   3.847 +text: .text%__1cMCreateExNodeKmatch_edge6kMI_I_: classes.o;
   3.848 +text: .text%__1cRloadConP_pollNodeErule6kM_I_: ad_sparc_misc.o;
   3.849 +text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_;
   3.850 +text: .text%__1cRcmpFastUnlockNodeErule6kM_I_: ad_sparc_misc.o;
   3.851 +text: .text%__1cJLoadLNodeGOpcode6kM_i_;
   3.852 +text: .text%__1cMciMethodDataLhas_trap_at6MpnLProfileData_i_i_;
   3.853 +text: .text%__1cPThreadLocalNodeLbottom_type6kM_pknEType__: classes.o;
   3.854 +text: .text%__1cKReturnNodeGOpcode6kM_i_;
   3.855 +text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_;
   3.856 +text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__;
   3.857 +text: .text%__1cNflagsRegUOperKin_RegMask6kMi_pknHRegMask__;
   3.858 +text: .text%__1cMloadConINodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
   3.859 +text: .text%__1cNCatchProjNodeEhash6kM_I_;
   3.860 +text: .text%__1cEUTF8Ounicode_length6Fpkci_i_;
   3.861 +text: .text%__1cHCompileTProcess_OopMap_Node6MpnIMachNode_i_v_;
   3.862 +text: .text%__1cNCallGenerator2t6MpnIciMethod__v_;
   3.863 +text: .text%__1cKCompiledIC2t6MpnKRelocation__v_;
   3.864 +text: .text%__1cKCompiledICOic_destination6kM_pC_;
   3.865 +text: .text%__1cHTypeAryFxmeet6kMpknEType__3_;
   3.866 +text: .text%__1cICallNodeJideal_reg6kM_I_: callnode.o;
   3.867 +text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__;
   3.868 +text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_;
   3.869 +text: .text%__1cPmethodDataKlassIoop_size6kMpnHoopDesc__i_;
   3.870 +text: .text%__1cKciTypeFlowQadd_to_work_list6Mpn0AFBlock__v_;
   3.871 +text: .text%__1cKciTypeFlowKflow_block6Mpn0AFBlock_pn0ALStateVector_pn0AGJsrSet__v_;
   3.872 +text: .text%__1cEUTF8Enext6FpkcpH_pc_;
   3.873 +text: .text%__1cJVectorSetFClear6M_v_;
   3.874 +text: .text%__1cHCompileSflatten_alias_type6kMpknHTypePtr__3_;
   3.875 +text: .text%__1cCosEfree6Fpv_v_;
   3.876 +text: .text%__1cRshrI_reg_imm5NodeIpipeline6kM_pknIPipeline__;
   3.877 +text: .text%__1cPcmpFastLockNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.878 +text: .text%__1cYciExceptionHandlerStreamFcount6M_i_;
   3.879 +text: .text%__1cKciTypeFlowFBlockScompute_exceptions6M_v_;
   3.880 +text: .text%__1cIPhaseIFGFUnion6MII_v_;
   3.881 +text: .text%__1cYCallStaticJavaDirectNodeLout_RegMask6kM_rknHRegMask__;
   3.882 +text: .text%__1cILoopNodeGOpcode6kM_i_;
   3.883 +text: .text%__1cICmpLNodeGOpcode6kM_i_;
   3.884 +text: .text%__1cOPhaseIdealLoopGspinup6MpnENode_2222pnLsmall_cache__2_;
   3.885 +text: .text%__1cQaddI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
   3.886 +text: .text%__1cMindIndexOperJnum_edges6kM_I_: ad_sparc.o;
   3.887 +text: .text%__1cIConLNodeGOpcode6kM_i_;
   3.888 +text: .text%JVM_GetCPFieldSignatureUTF;
   3.889 +text: .text%__1cENodeLnonnull_req6kM_p0_;
   3.890 +text: .text%__1cYCallStaticJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
   3.891 +text: .text%__1cYCallStaticJavaDirectNodeKmethod_set6Mi_v_;
   3.892 +text: .text%__1cMelapsedTimerFstart6M_v_;
   3.893 +text: .text%__1cMelapsedTimerEstop6M_v_;
   3.894 +text: .text%__1cMURShiftINodeLbottom_type6kM_pknEType__: classes.o;
   3.895 +text: .text%__1cSaddP_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
   3.896 +text: .text%__1cOPhaseIdealLoopOfind_use_block6MpnENode_22222_2_;
   3.897 +text: .text%__1cOPhaseIdealLoopKhandle_use6MpnENode_2pnLsmall_cache_22222_v_;
   3.898 +text: .text%jni_DeleteLocalRef: jni.o;
   3.899 +text: .text%__1cIGraphKit2t6M_v_;
   3.900 +text: .text%__1cMoutputStreamDput6Mc_v_;
   3.901 +text: .text%__1cIGraphKitNset_map_clone6MpnNSafePointNode__v_;
   3.902 +text: .text%__1cRInterpretedRFrameEinit6M_v_;
   3.903 +text: .text%__1cHMulNodeEhash6kM_I_;
   3.904 +text: .text%__1cENodeJset_req_X6MIp0pnMPhaseIterGVN__v_;
   3.905 +text: .text%__1cJLoadINodeJideal_reg6kM_I_: classes.o;
   3.906 +text: .text%__1cINodeHashLhash_insert6MpnENode__v_;
   3.907 +text: .text%__1cKstoreCNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.908 +text: .text%__1cENodeLbottom_type6kM_pknEType__;
   3.909 +text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__;
   3.910 +text: .text%__1cKstoreCNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.911 +text: .text%__1cIAddPNodeJideal_reg6kM_I_: classes.o;
   3.912 +text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__;
   3.913 +text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__;
   3.914 +text: .text%__1cKMemBarNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.915 +text: .text%__1cGvframe2t6MpknFframe_pknLRegisterMap_pnKJavaThread__v_;
   3.916 +text: .text%__1cLRegisterMap2t6Mpk0_v_;
   3.917 +text: .text%__1cXmembar_acquire_lockNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.918 +text: .text%__1cOcompI_iRegNodeLout_RegMask6kM_rknHRegMask__;
   3.919 +text: .text%__1cIciSymbolEmake6Fpkc_p0_;
   3.920 +text: .text%__1cPorI_reg_regNodeIpipeline6kM_pknIPipeline__;
   3.921 +text: .text%__1cGPcDesc2t6Miii_v_;
   3.922 +text: .text%__1cHCompileKalias_type6MpnHciField__pn0AJAliasType__;
   3.923 +text: .text%__1cGvframeKnew_vframe6FpknFframe_pknLRegisterMap_pnKJavaThread__p0_;
   3.924 +text: .text%__1cPconvI2L_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.925 +text: .text%__1cMtlsLoadPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
   3.926 +text: .text%__1cYcompareAndSwapL_boolNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.927 +text: .text%__1cIAddINodeJideal_reg6kM_I_: classes.o;
   3.928 +text: .text%__1cIciMethodRget_flow_analysis6M_pnKciTypeFlow__;
   3.929 +text: .text%__1cWCallLeafNoFPDirectNodeIpipeline6kM_pknIPipeline__;
   3.930 +text: .text%__1cSmembar_acquireNodeIpipeline6kM_pknIPipeline__;
   3.931 +text: .text%__1cKbranchNodeJlabel_set6MrnFLabel_I_v_;
   3.932 +text: .text%__1cKbranchNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
   3.933 +text: .text%__1cOloadConI13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
   3.934 +text: .text%__1cSaddL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
   3.935 +text: .text%jni_GetObjectField: jni.o;
   3.936 +text: .text%__1cSMemBarCPUOrderNodeGOpcode6kM_i_;
   3.937 +text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__;
   3.938 +text: .text%__1cQandL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.939 +text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
   3.940 +text: .text%__1cQaddL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
   3.941 +text: .text%__1cNloadKlassNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.942 +text: .text%__1cPmethodDataKlassToop_follow_contents6MpnHoopDesc__v_;
   3.943 +text: .text%__1cPmethodDataKlassToop_adjust_pointers6MpnHoopDesc__i_;
   3.944 +text: .text%__1cJloadBNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.945 +text: .text%__1cYcompareAndSwapL_boolNodeIpipeline6kM_pknIPipeline__;
   3.946 +text: .text%__1cRMachNullCheckNodeKin_RegMask6kMI_rknHRegMask__;
   3.947 +text: .text%__1cOPhaseIdealLoopIsink_use6MpnENode_2_v_;
   3.948 +text: .text%__1cIGraphKitOreplace_in_map6MpnENode_2_v_;
   3.949 +text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
   3.950 +text: .text%__1cHCompileKTracePhase2T6M_v_;
   3.951 +text: .text%__1cMPhaseChaitinLclone_projs6MpnFBlock_IpnENode_4rI_i_;
   3.952 +text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__;
   3.953 +text: .text%__1cIJVMState2t6MpnIciMethod_p0_v_;
   3.954 +text: .text%__1cIHaltNode2t6MpnENode_2_v_;
   3.955 +text: .text%__1cLOptoRuntimeSuncommon_trap_Type6F_pknITypeFunc__;
   3.956 +text: .text%__1cJloadLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.957 +text: .text%__1cSsafePoint_pollNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.958 +text: .text%__1cINodeHashJhash_find6MpknENode__p1_;
   3.959 +text: .text%__1cQmulL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.960 +text: .text%__1cSaddP_reg_imm13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
   3.961 +text: .text%__1cOMethodLivenessKBasicBlock2t6Mp0ii_v_;
   3.962 +text: .text%__1cOMethodLivenessKBasicBlockQcompute_gen_kill6MpnIciMethod__v_;
   3.963 +text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_;
   3.964 +text: .text%__1cJTypeTupleKmake_range6FpnLciSignature__pk0_;
   3.965 +text: .text%__1cJTypeTupleLmake_domain6FpnPciInstanceKlass_pnLciSignature__pk0_;
   3.966 +text: .text%__1cSmembar_acquireNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.967 +text: .text%__1cMWarmCallInfoKalways_hot6F_p0_;
   3.968 +text: .text%__1cTCreateExceptionNodeIpipeline6kM_pknIPipeline__;
   3.969 +text: .text%__1cLstoreB0NodeIpipeline6kM_pknIPipeline__;
   3.970 +text: .text%__1cMtlsLoadPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.971 +text: .text%__1cLBoxLockNodeKin_RegMask6kMI_rknHRegMask__;
   3.972 +text: .text%__1cITypeLongEmake6Fx_pk0_;
   3.973 +text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_;
   3.974 +text: .text%__1cKimmI13OperJnum_edges6kM_I_: ad_sparc_clone.o;
   3.975 +text: .text%__1cJloadBNodeIpipeline6kM_pknIPipeline__;
   3.976 +text: .text%__1cIGraphKitZadd_exception_states_from6MpnIJVMState__v_;
   3.977 +text: .text%__1cMPhaseChaitinNFind_compress6MpknENode__I_;
   3.978 +text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
   3.979 +text: .text%__1cHPhiNodeEmake6FpnENode_2_p0_;
   3.980 +text: .text%__1cNCatchProjNodeIIdentity6MpnOPhaseTransform__pnENode__;
   3.981 +text: .text%__1cWCallLeafNoFPDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
   3.982 +text: .text%__1cIciMethodTcall_profile_at_bci6Mi_nNciCallProfile__;
   3.983 +text: .text%__1cIProjNodeDcmp6kMrknENode__I_;
   3.984 +text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_;
   3.985 +text: .text%__1cFParseMprofile_call6MpnENode__v_;
   3.986 +text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__;
   3.987 +text: .text%__1cIGraphKitWround_double_arguments6MpnIciMethod__v_;
   3.988 +text: .text%__1cIGraphKitTround_double_result6MpnIciMethod__v_;
   3.989 +text: .text%__1cFParseHdo_call6M_v_;
   3.990 +text: .text%__1cNloadConP0NodeErule6kM_I_: ad_sparc_misc.o;
   3.991 +text: .text%__1cHMulNodeFValue6kMpnOPhaseTransform__pknEType__;
   3.992 +text: .text%__1cMPhaseIterGVNJtransform6MpnENode__2_;
   3.993 +text: .text%__1cHTypeIntFwiden6kMpknEType__3_;
   3.994 +text: .text%__1cSsafePoint_pollNodeIpipeline6kM_pknIPipeline__;
   3.995 +text: .text%__1cJloadSNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
   3.996 +text: .text%__1cKarrayKlassLobject_size6kMi_i_;
   3.997 +text: .text%__1cKMemBarNodeEhash6kM_I_;
   3.998 +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
   3.999 +text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o;
  3.1000 +text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_;
  3.1001 +text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_;
  3.1002 +text: .text%__1cMURShiftLNodeGOpcode6kM_i_;
  3.1003 +text: .text%__1cIGraphKitUmake_exception_state6MpnENode__pnNSafePointNode__;
  3.1004 +text: .text%__1cLProfileDataOtranslate_from6Mp0_v_: ciMethodData.o;
  3.1005 +text: .text%__1cRsarI_reg_imm5NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1006 +text: .text%__1cLBuildCutout2t6MpnIGraphKit_pnENode_ff_v_;
  3.1007 +text: .text%__1cTCompareAndSwapLNodeGOpcode6kM_i_;
  3.1008 +text: .text%__1cQxorI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1009 +text: .text%__1cMMergeMemNodeLout_RegMask6kM_rknHRegMask__;
  3.1010 +text: .text%__1cLLShiftINodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1011 +text: .text%__1cNflagsRegLOperEtype6kM_pknEType__: ad_sparc.o;
  3.1012 +text: .text%__1cQsubI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1013 +text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__;
  3.1014 +text: .text%__1cRshrI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__;
  3.1015 +text: .text%__1cQCallLeafNoFPNodeGOpcode6kM_i_;
  3.1016 +text: .text%__1cMURShiftINodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1017 +text: .text%__1cFStateM_sub_Op_ConP6MpknENode__v_;
  3.1018 +text: .text%__1cIGraphKitMsaved_ex_oop6FpnNSafePointNode__pnENode__;
  3.1019 +text: .text%__1cISubINodeLbottom_type6kM_pknEType__: classes.o;
  3.1020 +text: .text%__1cPciInstanceKlassFsuper6M_p0_;
  3.1021 +text: .text%__1cIBoolNodeHsize_of6kM_I_;
  3.1022 +text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o;
  3.1023 +text: .text%__1cPcompP_iRegPNodeLout_RegMask6kM_rknHRegMask__;
  3.1024 +text: .text%__1cJloadPNodeOmemory_operand6kM_pknIMachOper__;
  3.1025 +text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__;
  3.1026 +text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__;
  3.1027 +text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o;
  3.1028 +text: .text%__1cOstackSlotLOperKin_RegMask6kMi_pknHRegMask__;
  3.1029 +text: .text%__1cKInlineTreeMok_to_inline6MpnIciMethod_pnIJVMState_rnNciCallProfile_pnMWarmCallInfo__8_;
  3.1030 +text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__;
  3.1031 +text: .text%__1cICodeHeapSallocated_capacity6kM_I_;
  3.1032 +text: .text%__1cICHeapObj2n6FI_pv_;
  3.1033 +text: .text%__1cQsubL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1034 +text: .text%__1cWCallLeafNoFPDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1035 +text: .text%__1cFTypeDEhash6kM_i_;
  3.1036 +text: .text%__1cKTypeRawPtrHget_con6kM_i_;
  3.1037 +text: .text%__1cJStartNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__;
  3.1038 +text: .text%jni_ExceptionOccurred: jni.o;
  3.1039 +text: .text%__1cKciTypeFlowLStateVectorStype_meet_internal6FpnGciType_3p0_3_;
  3.1040 +text: .text%__1cMloadConINodeLout_RegMask6kM_rknHRegMask__;
  3.1041 +text: .text%__1cGIfNodeHsize_of6kM_I_: classes.o;
  3.1042 +text: .text%__1cPconvL2I_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1043 +text: .text%__1cIimmLOperJconstantL6kM_x_: ad_sparc_clone.o;
  3.1044 +text: .text%__1cTStackWalkCompPolicyRcompilation_level6MnMmethodHandle_i_i_;
  3.1045 +text: .text%jni_GetByteArrayRegion: jni.o;
  3.1046 +text: .text%__1cIGraphKitTset_all_memory_call6MpnENode__v_;
  3.1047 +text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o;
  3.1048 +text: .text%__1cHCompileFstart6kM_pnJStartNode__;
  3.1049 +text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o;
  3.1050 +text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_;
  3.1051 +text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__;
  3.1052 +text: .text%__1cIParmNodeJideal_reg6kM_I_;
  3.1053 +text: .text%__1cQandL_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.1054 +text: .text%__1cIMachNodeRget_base_and_disp6kMrirpknHTypePtr__pknENode__;
  3.1055 +text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
  3.1056 +text: .text%__1cIregFOperKin_RegMask6kMi_pknHRegMask__;
  3.1057 +text: .text%__1cRbranchLoopEndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1058 +text: .text%__1cGRFrame2t6MnFframe_pnKJavaThread_kp0_v_;
  3.1059 +text: .text%jni_GetArrayLength: jni.o;
  3.1060 +text: .text%__1cPciInstanceKlassUget_canonical_holder6Mi_p0_;
  3.1061 +text: .text%__1cJloadLNodeIpipeline6kM_pknIPipeline__;
  3.1062 +text: .text%__1cOClearArrayNodeGOpcode6kM_i_;
  3.1063 +text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_;
  3.1064 +text: .text%__1cVCompressedWriteStreamEgrow6M_v_;
  3.1065 +text: .text%JVM_Write;
  3.1066 +text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_;
  3.1067 +text: .text%__1cIciMethod2t6MnMmethodHandle__v_;
  3.1068 +text: .text%__1cIHaltNodeJideal_reg6kM_I_: classes.o;
  3.1069 +text: .text%__1cWShouldNotReachHereNodeLout_RegMask6kM_rknHRegMask__;
  3.1070 +text: .text%__1cLOpaque1NodeGOpcode6kM_i_;
  3.1071 +text: .text%__1cSbranchCon_longNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1072 +text: .text%__1cKstoreCNodeIpipeline6kM_pknIPipeline__;
  3.1073 +text: .text%__1cHAddNodePadd_of_identity6kMpknEType_3_3_;
  3.1074 +text: .text%__1cUcompU_iReg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1075 +text: .text%JVM_RawMonitorEnter;
  3.1076 +text: .text%JVM_RawMonitorExit;
  3.1077 +text: .text%__1cOMachReturnNodeKin_RegMask6kMI_rknHRegMask__;
  3.1078 +text: .text%__1cMTypeKlassPtrKadd_offset6kMi_pknHTypePtr__;
  3.1079 +text: .text%__1cWShouldNotReachHereNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1080 +text: .text%__1cPcmpFastLockNodeIpipeline6kM_pknIPipeline__;
  3.1081 +text: .text%__1cETypeRget_typeflow_type6FpnGciType__pk0_;
  3.1082 +text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_;
  3.1083 +text: .text%__1cRcmpFastUnlockNodeLout_RegMask6kM_rknHRegMask__;
  3.1084 +text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o;
  3.1085 +text: .text%__1cURethrowExceptionNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1086 +text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_;
  3.1087 +text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o;
  3.1088 +text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
  3.1089 +text: .text%__1cSandI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1090 +text: .text%__1cIAddINodeIadd_ring6kMpknEType_3_3_;
  3.1091 +text: .text%__1cLTypeInstPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__;
  3.1092 +text: .text%__1cMloadConLNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.1093 +text: .text%__1cFParseFmerge6Mi_v_;
  3.1094 +text: .text%__1cNSafePointNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1095 +text: .text%__1cJTypeTupleFxdual6kM_pknEType__;
  3.1096 +text: .text%__1cNLoadKlassNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1097 +text: .text%__1cPorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1098 +text: .text%__1cLRethrowNodeGOpcode6kM_i_;
  3.1099 +text: .text%__1cJloadSNodeIpipeline6kM_pknIPipeline__;
  3.1100 +text: .text%__1cICodeHeapIcapacity6kM_I_;
  3.1101 +text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o;
  3.1102 +text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o;
  3.1103 +text: .text%__1cPcmpFastLockNodeErule6kM_I_: ad_sparc_misc.o;
  3.1104 +text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__;
  3.1105 +text: .text%__1cFArena2T6M_v_;
  3.1106 +text: .text%__1cKMemBarNodeFmatch6MpknIProjNode_pknHMatcher__pnENode__;
  3.1107 +text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o;
  3.1108 +text: .text%__1cHoopDescSslow_identity_hash6M_i_;
  3.1109 +text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_;
  3.1110 +text: .text%__1cLPCTableNodeEhash6kM_I_;
  3.1111 +text: .text%__1cHConNodeLout_RegMask6kM_rknHRegMask__: classes.o;
  3.1112 +text: .text%__1cXPhaseAggressiveCoalesceYinsert_copy_with_overlap6MpnFBlock_pnENode_II_v_;
  3.1113 +text: .text%__1cOloadConI13NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.1114 +text: .text%__1cJloadBNodeLout_RegMask6kM_rknHRegMask__;
  3.1115 +text: .text%__1cMtlsLoadPNodeIpipeline6kM_pknIPipeline__;
  3.1116 +text: .text%__1cMPhaseChaitinNFind_compress6MI_I_;
  3.1117 +text: .text%__1cMindIndexOperKin_RegMask6kMi_pknHRegMask__;
  3.1118 +text: .text%__1cFStateN_sub_Op_LoadP6MpknENode__v_;
  3.1119 +text: .text%__1cFframeVinterpreter_frame_bci6kM_i_;
  3.1120 +text: .text%__1cNGCTaskManagerIget_task6MI_pnGGCTask__;
  3.1121 +text: .text%__1cLGCTaskQdDueueGremove6M_pnGGCTask__;
  3.1122 +text: .text%__1cLGCTaskQdDueueHenqueue6MpnGGCTask__v_;
  3.1123 +text: .text%__1cNGCTaskManagerPnote_completion6MI_v_;
  3.1124 +text: .text%__1cQandI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1125 +text: .text%__1cIGraphKitHjava_bc6kM_nJBytecodesECode__;
  3.1126 +text: .text%__1cIGraphKitNbuiltin_throw6MnODeoptimizationLDeoptReason_pnENode__v_;
  3.1127 +text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__;
  3.1128 +text: .text%__1cRinterpretedVFrameGmethod6kM_pnNmethodOopDesc__;
  3.1129 +text: .text%jni_GetSuperclass: jni.o;
  3.1130 +text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_;
  3.1131 +text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_;
  3.1132 +text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_;
  3.1133 +text: .text%__1cIMulINodeGOpcode6kM_i_;
  3.1134 +text: .text%__1cRcompL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1135 +text: .text%__1cNloadKlassNodeErule6kM_I_: ad_sparc_misc.o;
  3.1136 +text: .text%__1cJloadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1137 +text: .text%__1cGGCTask2t6M_v_;
  3.1138 +text: .text%__1cJloadSNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1139 +text: .text%__1cIJumpDataKcell_count6M_i_: ciMethodData.o;
  3.1140 +text: .text%__1cObranchConPNodeLout_RegMask6kM_rknHRegMask__;
  3.1141 +text: .text%__1cITypeFuncFxdual6kM_pknEType__;
  3.1142 +text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_;
  3.1143 +text: .text%__1cFStateM_sub_Op_CmpI6MpknENode__v_;
  3.1144 +text: .text%__1cJcmpOpOperFccode6kM_i_: ad_sparc_clone.o;
  3.1145 +text: .text%__1cGciType2t6MnLKlassHandle__v_;
  3.1146 +text: .text%__1cHciKlass2t6MnLKlassHandle__v_;
  3.1147 +text: .text%__1cMindirectOperKin_RegMask6kMi_pknHRegMask__;
  3.1148 +text: .text%__1cSPSPromotionManagerbBgc_thread_promotion_manager6Fi_p0_;
  3.1149 +text: .text%__1cQxorI_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.1150 +text: .text%__1cJloadLNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1151 +text: .text%__1cIregFOperEtype6kM_pknEType__: ad_sparc.o;
  3.1152 +text: .text%__1cKcmpOpPOperFccode6kM_i_: ad_sparc_clone.o;
  3.1153 +text: .text%__1cNloadKlassNodeLout_RegMask6kM_rknHRegMask__;
  3.1154 +text: .text%__1cHPhiNodeMslice_memory6kMpknHTypePtr__p0_;
  3.1155 +text: .text%__1cPCheckCastPPNodeJideal_reg6kM_I_: connode.o;
  3.1156 +text: .text%__1cObranchConPNodeJlabel_set6MrnFLabel_I_v_;
  3.1157 +text: .text%__1cObranchConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1158 +text: .text%__1cICHeapObj2k6Fpv_v_;
  3.1159 +text: .text%__1cSaddL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1160 +text: .text%__1cRmethodDataOopDescJbci_to_dp6Mi_pC_;
  3.1161 +text: .text%__1cMloadConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1162 +text: .text%__1cRInvocationCounterJset_carry6M_v_;
  3.1163 +text: .text%__1cFArena2t6M_v_;
  3.1164 +text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_;
  3.1165 +text: .text%__1cRInterpreterOopMap2T6M_v_;
  3.1166 +text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_;
  3.1167 +text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_;
  3.1168 +text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_;
  3.1169 +text: .text%__1cRInterpreterOopMap2t6M_v_;
  3.1170 +text: .text%__1cISubINodeDsub6kMpknEType_3_3_;
  3.1171 +text: .text%__1cFParseOreturn_current6MpnENode__v_;
  3.1172 +text: .text%__1cRsarI_reg_imm5NodeIpipeline6kM_pknIPipeline__;
  3.1173 +text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_;
  3.1174 +text: .text%__1cMloadConLNodeIpipeline6kM_pknIPipeline__;
  3.1175 +text: .text%__1cJStoreNodeSIdeal_masked_input6MpnIPhaseGVN_I_pnENode__;
  3.1176 +text: .text%jni_GetPrimitiveArrayCritical: jni.o;
  3.1177 +text: .text%jni_ReleasePrimitiveArrayCritical: jni.o;
  3.1178 +text: .text%__1cPconvI2L_regNodeIpipeline6kM_pknIPipeline__;
  3.1179 +text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_;
  3.1180 +text: .text%__1cSmembar_releaseNodeIpipeline6kM_pknIPipeline__;
  3.1181 +text: .text%__1cJimmU5OperIconstant6kM_i_: ad_sparc_clone.o;
  3.1182 +text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_;
  3.1183 +text: .text%__1cLOpaque1NodeEhash6kM_I_;
  3.1184 +text: .text%__1cJStoreNodeZIdeal_sign_extended_input6MpnIPhaseGVN_i_pnENode__;
  3.1185 +text: .text%__1cSbranchCon_longNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1186 +text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_;
  3.1187 +text: .text%__1cSmembar_releaseNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1188 +text: .text%__1cNbranchConNodeLout_RegMask6kM_rknHRegMask__;
  3.1189 +text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_;
  3.1190 +text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__;
  3.1191 +text: .text%__1cZCallDynamicJavaDirectNodeIpipeline6kM_pknIPipeline__;
  3.1192 +text: .text%__1cJMultiNodeLout_RegMask6kM_rknHRegMask__;
  3.1193 +text: .text%__1cKStoreLNodeGOpcode6kM_i_;
  3.1194 +text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
  3.1195 +text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__;
  3.1196 +text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_;
  3.1197 +text: .text%__1cNloadKlassNodeIpipeline6kM_pknIPipeline__;
  3.1198 +text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_;
  3.1199 +text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_;
  3.1200 +text: .text%__1cRsarI_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o;
  3.1201 +text: .text%__1cRbranchLoopEndNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1202 +text: .text%__1cQmulL_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.1203 +text: .text%__1cLstoreP0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1204 +text: .text%__1cLRethrowNodeKmatch_edge6kMI_I_;
  3.1205 +text: .text%__1cFTypeFEhash6kM_i_;
  3.1206 +text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_;
  3.1207 +text: .text%__1cFStateM_sub_Op_AddI6MpknENode__v_;
  3.1208 +text: .text%__1cOParseGeneratorIgenerate6MpnIJVMState__2_;
  3.1209 +text: .text%__1cFParseQcreate_entry_map6M_pnNSafePointNode__;
  3.1210 +text: .text%__1cFArenaEused6kM_I_;
  3.1211 +text: .text%__1cFParseLbuild_exits6M_v_;
  3.1212 +text: .text%__1cFParseIdo_exits6M_v_;
  3.1213 +text: .text%__1cFParse2t6MpnIJVMState_pnIciMethod_f_v_;
  3.1214 +text: .text%__1cIBoolNodeDcmp6kMrknENode__I_;
  3.1215 +text: .text%__1cFParsePdo_method_entry6M_v_;
  3.1216 +text: .text%__1cNCallGeneratorKfor_inline6FpnIciMethod_f_p0_;
  3.1217 +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_;
  3.1218 +text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_;
  3.1219 +text: .text%__1cQconstMethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.1220 +text: .text%__1cRciVirtualCallDataOtranslate_from6MpnLProfileData__v_;
  3.1221 +text: .text%jni_IsSameObject: jni.o;
  3.1222 +text: .text%__1cMloadConINodeIpipeline6kM_pknIPipeline__;
  3.1223 +text: .text%__1cNbranchConNodeJlabel_set6MrnFLabel_I_v_;
  3.1224 +text: .text%__1cNbranchConNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1225 +text: .text%__1cQandL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1226 +text: .text%__1cLmethodKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.1227 +text: .text%__1cLsymbolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.1228 +text: .text%__1cIciObjectFklass6M_pnHciKlass__;
  3.1229 +text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__;
  3.1230 +text: .text%__1cPThreadLocalNodeGOpcode6kM_i_;
  3.1231 +text: .text%__1cZPhaseConservativeCoalesceKupdate_ifg6MIIpnIIndexSet_2_v_;
  3.1232 +text: .text%__1cZPhaseConservativeCoalesceMunion_helper6MpnENode_2II222pnFBlock_I_v_;
  3.1233 +text: .text%__1cOMethodLivenessKBasicBlockJstore_one6Mi_v_;
  3.1234 +text: .text%__1cIIndexSetEswap6Mp0_v_;
  3.1235 +text: .text%__1cHTypeAryEmake6FpknEType_pknHTypeInt__pk0_;
  3.1236 +text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_;
  3.1237 +text: .text%__1cKTypeAryPtrFxdual6kM_pknEType__;
  3.1238 +text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_;
  3.1239 +text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o;
  3.1240 +text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_;
  3.1241 +text: .text%__1cKstoreBNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1242 +text: .text%__1cKCastPPNodeQIdeal_DU_postCCP6MpnIPhaseCCP__pnENode__;
  3.1243 +text: .text%__1cKstorePNodeOmemory_operand6kM_pknIMachOper__;
  3.1244 +text: .text%__1cOPhaseIdealLoopOsplit_thru_phi6MpnENode_2i_2_;
  3.1245 +text: .text%__1cENodeGOpcode6kM_i_;
  3.1246 +text: .text%__1cRshrP_reg_imm5NodeIpipeline6kM_pknIPipeline__;
  3.1247 +text: .text%__1cQandI_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.1248 +text: .text%__1cIciMethodbBinterpreter_call_site_count6Mi_i_;
  3.1249 +text: .text%__1cGBitMapIset_from6M0_v_;
  3.1250 +text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__;
  3.1251 +text: .text%__1cTconstantPoolOopDescbDresolve_string_constants_impl6FnSconstantPoolHandle_pnGThread__v_;
  3.1252 +text: .text%__1cHSubNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1253 +text: .text%__1cFChunk2n6FII_pv_;
  3.1254 +text: .text%__1cTCallDynamicJavaNodeGOpcode6kM_i_;
  3.1255 +text: .text%__1cKstoreBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1256 +text: .text%__1cILoadNodeDcmp6kMrknENode__I_;
  3.1257 +text: .text%__1cIciObject2t6M_v_;
  3.1258 +text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_;
  3.1259 +text: .text%__1cRcompL_reg_conNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1260 +text: .text%__1cHCompileXin_preserve_stack_slots6M_I_;
  3.1261 +text: .text%__1cPciObjectFactoryUget_empty_methodData6M_pnMciMethodData__;
  3.1262 +text: .text%__1cMciMethodData2t6M_v_;
  3.1263 +text: .text%__1cHOrINodeLbottom_type6kM_pknEType__: classes.o;
  3.1264 +text: .text%__1cFframeLreal_sender6kMpnLRegisterMap__0_;
  3.1265 +text: .text%__1cGRFrameGcaller6M_p0_;
  3.1266 +text: .text%__1cPCheckCastPPNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1267 +text: .text%__1cRshrP_reg_imm5NodeErule6kM_I_: ad_sparc_misc.o;
  3.1268 +text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_;
  3.1269 +text: .text%__1cXmembar_release_lockNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.1270 +text: .text%__1cJloadINodeOmemory_operand6kM_pknIMachOper__;
  3.1271 +text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o;
  3.1272 +text: .text%__1cMloadConFNodeErule6kM_I_: ad_sparc_misc.o;
  3.1273 +text: .text%__1cIMulINodeLbottom_type6kM_pknEType__: classes.o;
  3.1274 +text: .text%__1cMCreateExNodeJideal_reg6kM_I_: classes.o;
  3.1275 +text: .text%__1cQaddL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1276 +text: .text%__1cMCreateExNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1277 +text: .text%__1cISubINodeGadd_id6kM_pknEType__: classes.o;
  3.1278 +text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_;
  3.1279 +text: .text%__1cQdivD_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1280 +text: .text%__1cNCatchProjNodeDcmp6kMrknENode__I_;
  3.1281 +text: .text%__1cKTypeOopPtrEhash6kM_i_;
  3.1282 +text: .text%__1cIMinINodeGOpcode6kM_i_;
  3.1283 +text: .text%__1cMURShiftINodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1284 +text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_;
  3.1285 +text: .text%__1cKTypeRawPtrFxmeet6kMpknEType__3_;
  3.1286 +text: .text%JVM_GetMethodIxModifiers;
  3.1287 +text: .text%__1cIMulLNodeLbottom_type6kM_pknEType__: classes.o;
  3.1288 +text: .text%__1cPconvI2L_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1289 +text: .text%__1cLLShiftINodeJideal_reg6kM_I_: classes.o;
  3.1290 +text: .text%__1cTCreateExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1291 +text: .text%JVM_IsInterface;
  3.1292 +text: .text%__1cPorI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1293 +text: .text%__1cIDivINodeGOpcode6kM_i_;
  3.1294 +text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_;
  3.1295 +text: .text%__1cICodeHeapIallocate6MI_pv_;
  3.1296 +text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__;
  3.1297 +text: .text%__1cLOpaque1NodeLbottom_type6kM_pknEType__: connode.o;
  3.1298 +text: .text%__1cNloadRangeNodeOmemory_operand6kM_pknIMachOper__;
  3.1299 +text: .text%__1cLRShiftLNodeGOpcode6kM_i_;
  3.1300 +text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__;
  3.1301 +text: .text%__1cSCountedLoopEndNodeKstride_con6kM_i_;
  3.1302 +text: .text%__1cUPipeline_Use_Element2t6M_v_: output.o;
  3.1303 +text: .text%__1cRshrL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1304 +text: .text%__1cHCompileSregister_intrinsic6MpnNCallGenerator__v_;
  3.1305 +text: .text%__1cNSCMemProjNodeGOpcode6kM_i_;
  3.1306 +text: .text%__1cNimmP_pollOperEtype6kM_pknEType__: ad_sparc_clone.o;
  3.1307 +text: .text%__1cRloadConP_pollNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.1308 +text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_;
  3.1309 +text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_;
  3.1310 +text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_;
  3.1311 +text: .text%__1cPciInstanceKlassLfind_method6MpnIciSymbol_2_pnIciMethod__;
  3.1312 +text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_;
  3.1313 +text: .text%__1cFciEnvRfind_system_klass6MpnIciSymbol__pnHciKlass__;
  3.1314 +text: .text%__1cLRegisterMapIpd_clear6M_v_;
  3.1315 +text: .text%__1cHUNICODEHas_utf86FpHi_pc_;
  3.1316 +text: .text%__1cLstoreP0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1317 +text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_;
  3.1318 +text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_;
  3.1319 +text: .text%__1cIGraphKitYcombine_exception_states6MpnNSafePointNode_2_v_;
  3.1320 +text: .text%__1cQmulL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1321 +text: .text%__1cRshrP_reg_imm5NodeLout_RegMask6kM_rknHRegMask__;
  3.1322 +text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__;
  3.1323 +text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_;
  3.1324 +text: .text%__1cKstoreLNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1325 +text: .text%__1cRbranchLoopEndNodeIpipeline6kM_pknIPipeline__;
  3.1326 +text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_;
  3.1327 +text: .text%__1cSconvI2D_helperNodeErule6kM_I_: ad_sparc_misc.o;
  3.1328 +text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_;
  3.1329 +text: .text%__1cUcompI_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1330 +text: .text%__1cOMacroAssemblerNverify_thread6M_v_;
  3.1331 +text: .text%__1cIGraphKitZset_results_for_java_call6MpnMCallJavaNode__pnENode__;
  3.1332 +text: .text%__1cSbranchCon_longNodeIpipeline6kM_pknIPipeline__;
  3.1333 +text: .text%__1cHnmethodVcleanup_inline_caches6M_v_;
  3.1334 +text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_;
  3.1335 +text: .text%__1cIAddLNodeLbottom_type6kM_pknEType__: classes.o;
  3.1336 +text: .text%__1cFStateO_sub_Op_StoreI6MpknENode__v_;
  3.1337 +text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_;
  3.1338 +text: .text%__1cHciField2t6MpnPfieldDescriptor__v_;
  3.1339 +text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_;
  3.1340 +text: .text%__1cMloadConLNodeLout_RegMask6kM_rknHRegMask__;
  3.1341 +text: .text%__1cYcompareAndSwapL_boolNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1342 +text: .text%__1cFKlassMnext_sibling6kM_p0_;
  3.1343 +text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__;
  3.1344 +text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_;
  3.1345 +text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__;
  3.1346 +text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_;
  3.1347 +text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_;
  3.1348 +text: .text%__1cJimmU5OperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.1349 +text: .text%__1cLBlock_ArrayEgrow6MI_v_;
  3.1350 +text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_;
  3.1351 +text: .text%__1cKcmpOpPOperGnegate6M_v_: ad_sparc_clone.o;
  3.1352 +text: .text%__1cObranchConPNodeGnegate6M_v_: ad_sparc_misc.o;
  3.1353 +text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_;
  3.1354 +text: .text%__1cLBoxLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1355 +text: .text%__1cPcmpFastLockNodeLout_RegMask6kM_rknHRegMask__;
  3.1356 +text: .text%__1cHRetNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1357 +text: .text%__1cPconvL2I_regNodeIpipeline6kM_pknIPipeline__;
  3.1358 +text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__;
  3.1359 +text: .text%__1cbDcatch_cleanup_find_cloned_def6FpnFBlock_pnENode_1rnLBlock_Array_i_3_: lcm.o;
  3.1360 +text: .text%__1cQxorI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1361 +text: .text%__1cKstoreLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1362 +text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__;
  3.1363 +text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__;
  3.1364 +text: .text%__1cOClearArrayNodeKmatch_edge6kMI_I_;
  3.1365 +text: .text%__1cPconvL2I_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1366 +text: .text%__1cJloadSNodeLout_RegMask6kM_rknHRegMask__;
  3.1367 +text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_;
  3.1368 +text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o;
  3.1369 +text: .text%__1cFStateP_sub_Op_LShiftI6MpknENode__v_;
  3.1370 +text: .text%__1cQandL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1371 +text: .text%__1cPSignatureStreamRas_symbol_or_null6M_pnNsymbolOopDesc__;
  3.1372 +text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__;
  3.1373 +text: .text%__1cIGraphKitbBset_arguments_for_java_call6MpnMCallJavaNode__v_;
  3.1374 +text: .text%__1cIGraphKitJpush_node6MnJBasicType_pnENode__v_: callGenerator.o;
  3.1375 +text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o;
  3.1376 +text: .text%__1cJcmpOpOperGnegate6M_v_: ad_sparc_clone.o;
  3.1377 +text: .text%__1cMloadConPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1378 +text: .text%jni_SetObjectArrayElement: jni.o;
  3.1379 +text: .text%__1cSandI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
  3.1380 +text: .text%__1cPThreadLocalNodeJideal_reg6kM_I_: classes.o;
  3.1381 +text: .text%__1cNSafePointNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1382 +text: .text%__1cObranchConUNodeLout_RegMask6kM_rknHRegMask__;
  3.1383 +text: .text%__1cRshlL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o;
  3.1384 +text: .text%__1cQandI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1385 +text: .text%__1cSandI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
  3.1386 +text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_;
  3.1387 +text: .text%__1cZresource_reallocate_bytes6FpcII_0_;
  3.1388 +text: .text%__1cLConvL2INodeLbottom_type6kM_pknEType__: classes.o;
  3.1389 +text: .text%__1cOAbstractICacheQinvalidate_range6FpCi_v_;
  3.1390 +text: .text%__1cKstorePNodeLout_RegMask6kM_rknHRegMask__;
  3.1391 +text: .text%__1cIMaxINodeGOpcode6kM_i_;
  3.1392 +text: .text%__1cTDirectCallGeneratorIgenerate6MpnIJVMState__2_;
  3.1393 +text: .text%__1cNCallGeneratorPfor_direct_call6FpnIciMethod__p0_;
  3.1394 +text: .text%__1cMWarmCallInfoLalways_cold6F_p0_;
  3.1395 +text: .text%__1cIimmDOperJconstantD6kM_d_: ad_sparc_clone.o;
  3.1396 +text: .text%__1cIPhaseIFGEinit6MI_v_;
  3.1397 +text: .text%__1cJPhaseLiveHcompute6MI_v_;
  3.1398 +text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__;
  3.1399 +text: .text%__1cSaddI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1400 +text: .text%__1cJloadLNodeLout_RegMask6kM_rknHRegMask__;
  3.1401 +text: .text%__1cFTypeDEmake6Fd_pk0_;
  3.1402 +text: .text%__1cPThreadRootsTaskEname6M_pc_: psTasks.o;
  3.1403 +text: .text%__1cPThreadRootsTaskFdo_it6MpnNGCTaskManager_I_v_;
  3.1404 +text: .text%__1cRshlI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1405 +text: .text%__1cQaddL_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.1406 +text: .text%__1cMloadConDNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.1407 +text: .text%__1cFStateN_sub_Op_LoadI6MpknENode__v_;
  3.1408 +text: .text%__1cIMachOperEtype6kM_pknEType__;
  3.1409 +text: .text%JVM_GetCPClassNameUTF;
  3.1410 +text: .text%__1cKBufferBlobGcreate6Fpkci_p0_;
  3.1411 +text: .text%__1cKcmpOpUOperFccode6kM_i_: ad_sparc_clone.o;
  3.1412 +text: .text%__1cObranchConUNodeJlabel_set6MrnFLabel_I_v_;
  3.1413 +text: .text%__1cObranchConUNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1414 +text: .text%jni_GetStringLength: jni.o;
  3.1415 +text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__;
  3.1416 +text: .text%__1cLConvI2LNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1417 +text: .text%__1cJloadPNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.1418 +text: .text%__1cMoutputStream2t6Mi_v_;
  3.1419 +text: .text%__1cMstringStreamJas_string6M_pc_;
  3.1420 +text: .text%__1cMstringStream2T6M_v_;
  3.1421 +text: .text%__1cMstringStream2t6MI_v_;
  3.1422 +text: .text%__1cIGraphKitMreset_memory6M_pnENode__;
  3.1423 +text: .text%__1cZCallDynamicJavaDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1424 +text: .text%__1cKstorePNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1425 +text: .text%__1cENodeMsetup_is_top6M_v_;
  3.1426 +text: .text%__1cIGotoNodeGOpcode6kM_i_;
  3.1427 +text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_;
  3.1428 +text: .text%__1cNbranchConNodeGnegate6M_v_: ad_sparc_misc.o;
  3.1429 +text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_;
  3.1430 +text: .text%__1cKcmpOpPOperFequal6kM_i_: ad_sparc_clone.o;
  3.1431 +text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_;
  3.1432 +text: .text%__1cKReturnNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1433 +text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_;
  3.1434 +text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_;
  3.1435 +text: .text%__1cJloadINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1436 +text: .text%__1cSmembar_releaseNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.1437 +text: .text%__1cSaddL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
  3.1438 +text: .text%__1cLRShiftINodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1439 +text: .text%__1cEDict2t6MpFpkv2_ipF2_i_v_;
  3.1440 +text: .text%__1cEDict2T6M_v_;
  3.1441 +text: .text%__1cKBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_;
  3.1442 +text: .text%__1cLOopRecorder2t6MpnFArena__v_;
  3.1443 +text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__;
  3.1444 +text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__;
  3.1445 +text: .text%__1cIModINodeGOpcode6kM_i_;
  3.1446 +text: .text%__1cRInterpretedRFrame2t6MnFframe_pnKJavaThread_nMmethodHandle__v_;
  3.1447 +text: .text%__1cKJavaThreadQlast_java_vframe6MpnLRegisterMap__pnKjavaVFrame__;
  3.1448 +text: .text%__1cTStackWalkCompPolicyVfindTopInlinableFrame6MpnNGrowableArray4CpnGRFrame____2_;
  3.1449 +text: .text%__1cTStackWalkCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_;
  3.1450 +text: .text%__1cISubLNodeGOpcode6kM_i_;
  3.1451 +text: .text%__1cKciTypeFlow2t6MpnFciEnv_pnIciMethod_i_v_;
  3.1452 +text: .text%__1cKciTypeFlowPget_start_state6M_pkn0ALStateVector__;
  3.1453 +text: .text%__1cKciTypeFlowHdo_flow6M_v_;
  3.1454 +text: .text%__1cKciTypeFlowKflow_types6M_v_;
  3.1455 +text: .text%__1cKciTypeFlowKmap_blocks6M_v_;
  3.1456 +text: .text%__1cMloadConPNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.1457 +text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_;
  3.1458 +text: .text%__1cIciMethodJload_code6M_v_;
  3.1459 +text: .text%__1cMciMethodDataJload_data6M_v_;
  3.1460 +text: .text%__1cIGraphKitTuse_exception_state6MpnNSafePointNode__pnENode__;
  3.1461 +text: .text%__1cOcompU_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1462 +text: .text%__1cIGraphKitGmemory6MI_pnENode__;
  3.1463 +text: .text%__1cIHaltNodeEhash6kM_I_: classes.o;
  3.1464 +text: .text%__1cFKlassQup_cast_abstract6M_p0_;
  3.1465 +text: .text%__1cKReturnNodeEhash6kM_I_: classes.o;
  3.1466 +text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_;
  3.1467 +text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__;
  3.1468 +text: .text%__1cIAndINodeGadd_id6kM_pknEType__: classes.o;
  3.1469 +text: .text%__1cMciMethodData2t6MnQmethodDataHandle__v_;
  3.1470 +text: .text%__1cIAndINodeImul_ring6kMpknEType_3_3_;
  3.1471 +text: .text%__1cLOpaque2NodeGOpcode6kM_i_;
  3.1472 +text: .text%__1cOClearArrayNodeLbottom_type6kM_pknEType__: classes.o;
  3.1473 +text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_;
  3.1474 +text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_;
  3.1475 +text: .text%__1cOPhaseIdealLoopPis_counted_loop6MpnENode_pnNIdealLoopTree__2_;
  3.1476 +text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o;
  3.1477 +text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_;
  3.1478 +text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_;
  3.1479 +text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__;
  3.1480 +text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_;
  3.1481 +text: .text%jni_GetStringUTFLength: jni.o;
  3.1482 +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_;
  3.1483 +text: .text%jni_GetStringUTFRegion: jni.o;
  3.1484 +text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_;
  3.1485 +text: .text%__1cHUNICODELutf8_length6FpHi_i_;
  3.1486 +text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_;
  3.1487 +text: .text%__1cKstoreBNodeIpipeline6kM_pknIPipeline__;
  3.1488 +text: .text%__1cKstoreINodeOmemory_operand6kM_pknIMachOper__;
  3.1489 +text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__;
  3.1490 +text: .text%__1cRsarI_reg_imm5NodeLout_RegMask6kM_rknHRegMask__;
  3.1491 +text: .text%__1cUcompU_iReg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
  3.1492 +text: .text%__1cQmulL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1493 +text: .text%__1cHAddNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1494 +text: .text%__1cUcompU_iReg_imm13NodeIpipeline6kM_pknIPipeline__;
  3.1495 +text: .text%__1cKPerfStringKset_string6Mpkc_v_;
  3.1496 +text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_;
  3.1497 +text: .text%JVM_InternString;
  3.1498 +text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_;
  3.1499 +text: .text%__1cCosGrandom6F_l_;
  3.1500 +text: .text%__1cKimmP13OperIconstant6kM_i_: ad_sparc_clone.o;
  3.1501 +text: .text%__1cVcompP_iRegP_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1502 +text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__;
  3.1503 +text: .text%__1cRcompL_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.1504 +text: .text%__1cRMachNullCheckNodeLout_RegMask6kM_rknHRegMask__: machnode.o;
  3.1505 +text: .text%__1cSTailCalljmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
  3.1506 +text: .text%__1cIGraphKitPpush_pair_local6Mi_v_: parse2.o;
  3.1507 +text: .text%__1cICodeHeapKdeallocate6Mpv_v_;
  3.1508 +text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_;
  3.1509 +text: .text%__1cKTypeRawPtrEmake6FpC_pk0_;
  3.1510 +text: .text%jni_SetIntField: jni.o;
  3.1511 +text: .text%__1cNIdealLoopTreeMcounted_loop6MpnOPhaseIdealLoop__v_;
  3.1512 +text: .text%__1cKBufferBlobEfree6Fp0_v_;
  3.1513 +text: .text%__1cPconvL2I_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1514 +text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__;
  3.1515 +text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_;
  3.1516 +text: .text%__1cVshrL_reg_imm6_L2INodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1517 +text: .text%__1cZCallDynamicJavaDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1518 +text: .text%__1cIGraphKitTcreate_and_xform_if6MpnENode_2ff_pnGIfNode__: graphKit.o;
  3.1519 +text: .text%__1cWImplicitExceptionTableGappend6MII_v_;
  3.1520 +text: .text%__1cRMachNullCheckNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1521 +text: .text%__1cLProfileDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_: ciMethodData.o;
  3.1522 +text: .text%__1cQxorI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1523 +text: .text%__1cNIdealLoopTreeVadjust_loop_exit_prob6MpnOPhaseIdealLoop__v_;
  3.1524 +text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_;
  3.1525 +text: .text%__1cSandI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
  3.1526 +text: .text%__1cIPhaseIFGISquareUp6M_v_;
  3.1527 +text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_;
  3.1528 +text: .text%__1cKCodeBuffer2T6M_v_;
  3.1529 +text: .text%__1cQPSGenerationPoolQget_memory_usage6M_nLMemoryUsage__;
  3.1530 +text: .text%__1cLOpaque1NodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1531 +text: .text%__1cMURShiftINodeJideal_reg6kM_I_: classes.o;
  3.1532 +text: .text%__1cRcompL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1533 +text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_;
  3.1534 +text: .text%__1cFKlassWappend_to_sibling_list6M_v_;
  3.1535 +text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__;
  3.1536 +text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_;
  3.1537 +text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_;
  3.1538 +text: .text%__1cMTypeKlassPtrFxdual6kM_pknEType__;
  3.1539 +text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_;
  3.1540 +text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__;
  3.1541 +text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_;
  3.1542 +text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_;
  3.1543 +text: .text%__1cNinstanceKlassQinit_implementor6M_v_;
  3.1544 +text: .text%__1cPClassFileStream2t6MpCipc_v_;
  3.1545 +text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_;
  3.1546 +text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__;
  3.1547 +text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__;
  3.1548 +text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_;
  3.1549 +text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__;
  3.1550 +text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__;
  3.1551 +text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_;
  3.1552 +text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__;
  3.1553 +text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_;
  3.1554 +text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_;
  3.1555 +text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_;
  3.1556 +text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_;
  3.1557 +text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_;
  3.1558 +text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_;
  3.1559 +text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_;
  3.1560 +text: .text%__1cKcmpOpPOperJnot_equal6kM_i_: ad_sparc_clone.o;
  3.1561 +text: .text%__1cMPhaseIterGVNIoptimize6M_v_;
  3.1562 +text: .text%__1cOPhaseTransform2t6MnFPhaseLPhaseNumber__v_;
  3.1563 +text: .text%__1cISubINodeJideal_reg6kM_I_: classes.o;
  3.1564 +text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o;
  3.1565 +text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o;
  3.1566 +text: .text%__1cHMemNodeHsize_of6kM_I_;
  3.1567 +text: .text%__1cFVTuneQstart_class_load6F_v_;
  3.1568 +text: .text%__1cSThreadProfilerMark2T6M_v_;
  3.1569 +text: .text%__1cFVTuneOend_class_load6F_v_;
  3.1570 +text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__;
  3.1571 +text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o;
  3.1572 +text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_;
  3.1573 +text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_;
  3.1574 +text: .text%__1cKklassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.1575 +text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_;
  3.1576 +text: .text%__1cQmodI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1577 +text: .text%__1cLRShiftINodeLbottom_type6kM_pknEType__: classes.o;
  3.1578 +text: .text%__1cKCMoveINodeGOpcode6kM_i_;
  3.1579 +text: .text%__1cLLShiftLNodeGOpcode6kM_i_;
  3.1580 +text: .text%__1cYcompareAndSwapL_boolNodeErule6kM_I_: ad_sparc_misc.o;
  3.1581 +text: .text%__1cSinstanceKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.1582 +text: .text%__1cNinstanceKlassScopy_static_fields6MpnSPSPromotionManager__v_;
  3.1583 +text: .text%__1cMtlsLoadPNodeLout_RegMask6kM_rknHRegMask__;
  3.1584 +text: .text%__1cFStateQ_sub_Op_URShiftI6MpknENode__v_;
  3.1585 +text: .text%__1cKcmpOpUOperGnegate6M_v_: ad_sparc_clone.o;
  3.1586 +text: .text%__1cObranchConUNodeGnegate6M_v_: ad_sparc_misc.o;
  3.1587 +text: .text%__1cQaddP_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1588 +text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_;
  3.1589 +text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_;
  3.1590 +text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_;
  3.1591 +text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_;
  3.1592 +text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_;
  3.1593 +text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_;
  3.1594 +text: .text%JVM_GetMethodIxSignatureUTF;
  3.1595 +text: .text%JVM_GetMethodIxMaxStack;
  3.1596 +text: .text%JVM_GetMethodIxArgsSize;
  3.1597 +text: .text%JVM_GetMethodIxByteCodeLength;
  3.1598 +text: .text%JVM_GetMethodIxExceptionIndexes;
  3.1599 +text: .text%JVM_GetMethodIxByteCode;
  3.1600 +text: .text%JVM_GetMethodIxExceptionsCount;
  3.1601 +text: .text%__1cLstoreP0NodeIpipeline6kM_pknIPipeline__;
  3.1602 +text: .text%__1cHCmpNodeGadd_id6kM_pknEType__: classes.o;
  3.1603 +text: .text%__1cMPhaseChaitinSbuild_ifg_physical6MpnMResourceArea__I_;
  3.1604 +text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o;
  3.1605 +text: .text%__1cQmulD_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1606 +text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__;
  3.1607 +text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__;
  3.1608 +text: .text%__1cNPhaseCoalescePcoalesce_driver6M_v_;
  3.1609 +text: .text%__1cLBuildCutout2T6M_v_;
  3.1610 +text: .text%__1cNloadConL0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1611 +text: .text%__1cJloadFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1612 +text: .text%__1cNloadConP0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.1613 +text: .text%__1cJimmP0OperEtype6kM_pknEType__: ad_sparc_clone.o;
  3.1614 +text: .text%__1cLstoreI0NodeLout_RegMask6kM_rknHRegMask__;
  3.1615 +text: .text%__1cPCheckCastPPNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1616 +text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_;
  3.1617 +text: .text%__1cHNTarjanICOMPRESS6M_v_;
  3.1618 +text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_;
  3.1619 +text: .text%__1cQsubL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1620 +text: .text%__1cOcompI_iRegNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1621 +text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_;
  3.1622 +text: .text%__1cQandI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1623 +text: .text%__1cIXorINodeLbottom_type6kM_pknEType__: classes.o;
  3.1624 +text: .text%__1cRmethodDataOopDescLbci_to_data6Mi_pnLProfileData__;
  3.1625 +text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_;
  3.1626 +text: .text%__1cMnegF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1627 +text: .text%__1cLstoreI0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1628 +text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_;
  3.1629 +text: .text%__1cSaddL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
  3.1630 +text: .text%__1cQshrL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1631 +text: .text%__1cKstoreLNodeIpipeline6kM_pknIPipeline__;
  3.1632 +text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_;
  3.1633 +text: .text%__1cILoopNodeHsize_of6kM_I_: classes.o;
  3.1634 +text: .text%__1cHMatcherLfind_shared6MpnENode__v_;
  3.1635 +text: .text%__1cJStartNodeHsize_of6kM_I_;
  3.1636 +text: .text%__1cHMatcherFxform6MpnENode_i_2_;
  3.1637 +text: .text%__1cEDict2t6MpFpkv2_ipF2_ipnFArena_i_v_;
  3.1638 +text: .text%__1cRInterpretedRFrameKtop_vframe6kM_pnKjavaVFrame__: rframe.o;
  3.1639 +text: .text%__1cQmodI_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.1640 +text: .text%__1cRinterpretedVFrameDbci6kM_i_;
  3.1641 +text: .text%__1cIAndINodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1642 +text: .text%__1cIAndINodeGmul_id6kM_pknEType__: classes.o;
  3.1643 +text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_;
  3.1644 +text: .text%__1cNloadRangeNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1645 +text: .text%__1cRcompL_reg_conNodeErule6kM_I_: ad_sparc_misc.o;
  3.1646 +text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
  3.1647 +text: .text%__1cMnegF_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1648 +text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_;
  3.1649 +text: .text%__1cTStackWalkCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_;
  3.1650 +text: .text%__1cRCompilationPolicybJreset_counter_for_back_branch_event6MnMmethodHandle__v_;
  3.1651 +text: .text%__1cOMethodLivenessQcompute_liveness6M_v_;
  3.1652 +text: .text%__1cOMethodLiveness2t6MpnFArena_pnIciMethod__v_;
  3.1653 +text: .text%__1cOMethodLivenessNinit_gen_kill6M_v_;
  3.1654 +text: .text%__1cOMethodLivenessSpropagate_liveness6M_v_;
  3.1655 +text: .text%__1cOMethodLivenessRinit_basic_blocks6M_v_;
  3.1656 +text: .text%__1cIGraphKitHopt_iff6MpnENode_2_2_;
  3.1657 +text: .text%__1cLRShiftINodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1658 +text: .text%__1cJTimeStampGupdate6M_v_;
  3.1659 +text: .text%__1cRmethodDataOopDescKmileage_of6FpnNmethodOopDesc__i_;
  3.1660 +text: .text%__1cWconstantPoolCacheKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.1661 +text: .text%__1cMloadConDNodeIpipeline6kM_pknIPipeline__;
  3.1662 +text: .text%__1cFParseQarray_addressing6MnJBasicType_ippknEType__pnENode__;
  3.1663 +text: .text%__1cNloadConP0NodeLout_RegMask6kM_rknHRegMask__;
  3.1664 +text: .text%__1cQaddL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1665 +text: .text%__1cPCountedLoopNodeHsize_of6kM_I_: classes.o;
  3.1666 +text: .text%__1cIProjNodeJideal_reg6kM_I_;
  3.1667 +text: .text%__1cQaddI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1668 +text: .text%__1cQcmovI_reg_ltNodeIpipeline6kM_pknIPipeline__;
  3.1669 +text: .text%__1cRsubI_zero_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1670 +text: .text%__1cJcmpOpOperFequal6kM_i_: ad_sparc_clone.o;
  3.1671 +text: .text%__1cHCompilebAvarargs_C_out_slots_killed6kM_I_;
  3.1672 +text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o;
  3.1673 +text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_;
  3.1674 +text: .text%__1cOPhaseIdealLoopQset_subtree_ctrl6MpnENode__v_;
  3.1675 +text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
  3.1676 +text: .text%__1cNflagsRegLOperKin_RegMask6kMi_pknHRegMask__;
  3.1677 +text: .text%__1cYciExceptionHandlerStreamPcount_remaining6M_i_;
  3.1678 +text: .text%__1cFParseXcatch_inline_exceptions6MpnNSafePointNode__v_;
  3.1679 +text: .text%__1cRconstantPoolKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.1680 +text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_;
  3.1681 +text: .text%__1cKcmpOpUOperNgreater_equal6kM_i_: ad_sparc_clone.o;
  3.1682 +text: .text%JVM_GetFieldIxModifiers;
  3.1683 +text: .text%__1cRScavengeRootsTaskFdo_it6MpnNGCTaskManager_I_v_;
  3.1684 +text: .text%__1cRScavengeRootsTaskEname6M_pc_: psTasks.o;
  3.1685 +text: .text%JVM_IsConstructorIx;
  3.1686 +text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_;
  3.1687 +text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_;
  3.1688 +text: .text%__1cSaddP_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1689 +text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__;
  3.1690 +text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_;
  3.1691 +text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_;
  3.1692 +text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__;
  3.1693 +text: .text%__1cKg1RegIOperKin_RegMask6kMi_pknHRegMask__;
  3.1694 +text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_;
  3.1695 +text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o;
  3.1696 +text: .text%__1cIAndINodeKmul_opcode6kM_i_: classes.o;
  3.1697 +text: .text%__1cIAndINodeKadd_opcode6kM_i_: classes.o;
  3.1698 +text: .text%__1cTMachCallRuntimeNodePret_addr_offset6M_i_;
  3.1699 +text: .text%__1cLConvL2INodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1700 +text: .text%__1cKo0RegPOperKin_RegMask6kMi_pknHRegMask__;
  3.1701 +text: .text%__1cIregDOperKin_RegMask6kMi_pknHRegMask__;
  3.1702 +text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_;
  3.1703 +text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__;
  3.1704 +text: .text%__1cPBytecode_invokeFindex6kM_i_;
  3.1705 +text: .text%__1cLRethrowNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1706 +text: .text%__1cSPSKeepAliveClosureGdo_oop6MppnHoopDesc__v_: psScavenge.o;
  3.1707 +text: .text%__1cFParseFBlockRsuccessor_for_bci6Mi_p1_;
  3.1708 +text: .text%__1cVPreserveExceptionMark2T6M_v_;
  3.1709 +text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_;
  3.1710 +text: .text%__1cHRetNodeIpipeline6kM_pknIPipeline__;
  3.1711 +text: .text%__1cIRootNodeFValue6kMpnOPhaseTransform__pknEType__: classes.o;
  3.1712 +text: .text%__1cMoutputStreamFprint6MpkcE_v_;
  3.1713 +text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_;
  3.1714 +text: .text%__1cHCompileQsync_stack_slots6kM_i_;
  3.1715 +text: .text%__1cHMulNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1716 +text: .text%__1cJLoadFNodeGOpcode6kM_i_;
  3.1717 +text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o;
  3.1718 +text: .text%__1cHPhiNodeDcmp6kMrknENode__I_;
  3.1719 +text: .text%__1cHOrINodeGadd_id6kM_pknEType__: classes.o;
  3.1720 +text: .text%__1cSTailCalljmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1721 +text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_;
  3.1722 +text: .text%__1cKstoreINodeLout_RegMask6kM_rknHRegMask__;
  3.1723 +text: .text%__1cRloadConP_pollNodeIpipeline6kM_pknIPipeline__;
  3.1724 +text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_;
  3.1725 +text: .text%__1cLMachNopNodeMideal_Opcode6kM_i_: ad_sparc.o;
  3.1726 +text: .text%__1cLMachNopNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1727 +text: .text%__1cOPhaseIdealLoopNreorg_offsets6MpnNIdealLoopTree__v_;
  3.1728 +text: .text%__1cRshrL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o;
  3.1729 +text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_;
  3.1730 +text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_;
  3.1731 +text: .text%__1cPcompP_iRegPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1732 +text: .text%__1cSxorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1733 +text: .text%__1cOPhaseIdealLoopRsplit_thru_region6MpnENode_2_2_;
  3.1734 +text: .text%__1cIAndLNodeGadd_id6kM_pknEType__: classes.o;
  3.1735 +text: .text%__1cbAPSEvacuateFollowersClosureHdo_void6M_v_: psScavenge.o;
  3.1736 +text: .text%jni_ExceptionCheck: jni.o;
  3.1737 +text: .text%__1cIAndLNodeImul_ring6kMpknEType_3_3_;
  3.1738 +text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__;
  3.1739 +text: .text%__1cOPhaseIdealLoopMdominated_by6MpnENode_2_v_;
  3.1740 +text: .text%__1cQshlI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1741 +text: .text%__1cFParseNthrow_to_exit6MpnNSafePointNode__v_;
  3.1742 +text: .text%__1cQinstanceRefKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.1743 +text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_;
  3.1744 +text: .text%__1cJVectorSetGslamin6Mrk0_v_;
  3.1745 +text: .text%JVM_Clone;
  3.1746 +text: .text%__1cRAbstractAssemblerFflush6M_v_;
  3.1747 +text: .text%__1cITypeLongFxdual6kM_pknEType__;
  3.1748 +text: .text%__1cIJumpDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_;
  3.1749 +text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_;
  3.1750 +text: .text%__1cOstackSlotLOperEtype6kM_pknEType__: ad_sparc.o;
  3.1751 +text: .text%__1cURethrowExceptionNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1752 +text: .text%__1cRshrL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__;
  3.1753 +text: .text%__1cLOpaque2NodeEhash6kM_I_;
  3.1754 +text: .text%__1cJloadFNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1755 +text: .text%__1cUcompU_iReg_imm13NodeLout_RegMask6kM_rknHRegMask__;
  3.1756 +text: .text%__1cKstoreINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1757 +text: .text%__1cUEdenMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__;
  3.1758 +text: .text%__1cYSurvivorMutableSpacePoolQget_memory_usage6M_nLMemoryUsage__;
  3.1759 +text: .text%__1cLOptoRuntimeJstub_name6FpC_pkc_;
  3.1760 +text: .text%__1cHOrINodeJideal_reg6kM_I_: classes.o;
  3.1761 +text: .text%__1cICmpLNodeDsub6kMpknEType_3_3_;
  3.1762 +text: .text%__1cHPhiNodeKmake_blank6FpnENode_2_p0_;
  3.1763 +text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o;
  3.1764 +text: .text%__1cIMulINodeGadd_id6kM_pknEType__: classes.o;
  3.1765 +text: .text%__1cOMachEpilogNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1766 +text: .text%__1cFStateM_sub_Op_SubI6MpknENode__v_;
  3.1767 +text: .text%__1cFframeRretrieve_receiver6MpnLRegisterMap__pnHoopDesc__;
  3.1768 +text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__;
  3.1769 +text: .text%__1cNloadKlassNodeOmemory_operand6kM_pknIMachOper__;
  3.1770 +text: .text%__1cMTailCallNodeKmatch_edge6kMI_I_;
  3.1771 +text: .text%jni_NewObject: jni.o;
  3.1772 +text: .text%__1cIPhaseIFGYCompute_Effective_Degree6M_v_;
  3.1773 +text: .text%__1cHMemNodeIadr_type6kM_pknHTypePtr__;
  3.1774 +text: .text%__1cXmembar_release_lockNodeIadr_type6kM_pknHTypePtr__;
  3.1775 +text: .text%__1cJNode_ListEyank6MpnENode__v_;
  3.1776 +text: .text%__1cMPhaseChaitinISimplify6M_v_;
  3.1777 +text: .text%__1cNIdealLoopTreeIset_nest6MI_i_;
  3.1778 +text: .text%__1cSCallLeafDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.1779 +text: .text%__1cIMulLNodeImul_ring6kMpknEType_3_3_;
  3.1780 +text: .text%__1cMStartOSRNodeGOpcode6kM_i_;
  3.1781 +text: .text%__1cSCallLeafDirectNodeLout_RegMask6kM_rknHRegMask__;
  3.1782 +text: .text%__1cIMulLNodeGadd_id6kM_pknEType__: classes.o;
  3.1783 +text: .text%__1cLcmpD_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1784 +text: .text%__1cJcmpOpOperEless6kM_i_: ad_sparc_clone.o;
  3.1785 +text: .text%__1cKciTypeFlowPflow_exceptions6MpnNGrowableArray4Cpn0AFBlock___pnNGrowableArray4CpnPciInstanceKlass___pn0ALStateVector__v_;
  3.1786 +text: .text%__1cKType_ArrayEgrow6MI_v_;
  3.1787 +text: .text%__1cNloadConP0NodeIpipeline6kM_pknIPipeline__;
  3.1788 +text: .text%__1cXmembar_release_lockNodeLout_RegMask6kM_rknHRegMask__;
  3.1789 +text: .text%__1cPconvF2D_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1790 +text: .text%__1cRshrL_reg_imm6NodeIpipeline6kM_pknIPipeline__;
  3.1791 +text: .text%__1cMURShiftLNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.1792 +text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_;
  3.1793 +text: .text%__1cVshrL_reg_imm6_L2INodeIpipeline6kM_pknIPipeline__;
  3.1794 +text: .text%__1cSMemBarVolatileNodeGOpcode6kM_i_;
  3.1795 +text: .text%__1cLstoreB0NodeOmemory_operand6kM_pknIMachOper__;
  3.1796 +text: .text%__1cRshrI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1797 +text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_;
  3.1798 +text: .text%__1cRcmpFastUnlockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.1799 +text: .text%__1cNSafePointNodeLpop_monitor6M_v_;
  3.1800 +text: .text%__1cMPhaseChaitinVfind_base_for_derived6MppnENode_2rI_2_;
  3.1801 +text: .text%__1cLOptoRuntimebAcomplete_monitor_exit_Type6F_pknITypeFunc__;
  3.1802 +text: .text%__1cOstackSlotIOperEtype6kM_pknEType__: ad_sparc.o;
  3.1803 +text: .text%__1cIGraphKitNshared_unlock6MpnENode_2_v_;
  3.1804 +text: .text%__1cFStateT_sub_Op_CheckCastPP6MpknENode__v_;
  3.1805 +text: .text%__1cQsubI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1806 +text: .text%__1cFKlassDLCA6Mp0_1_;
  3.1807 +text: .text%__1cKTypeRawPtrEmake6FnHTypePtrDPTR__pk0_;
  3.1808 +text: .text%__1cHciKlassVleast_common_ancestor6Mp0_1_;
  3.1809 +text: .text%__1cOPhaseIdealLoopPbuild_loop_tree6M_v_;
  3.1810 +text: .text%__1cRcompL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.1811 +text: .text%__1cRshlL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1812 +text: .text%__1cRloadConP_pollNodeLout_RegMask6kM_rknHRegMask__;
  3.1813 +text: .text%__1cQshlL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1814 +text: .text%__1cMindirectOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.1815 +text: .text%__1cMindirectOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.1816 +text: .text%__1cMindirectOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.1817 +text: .text%__1cNSafePointNodeMpush_monitor6MpknMFastLockNode__v_;
  3.1818 +text: .text%__1cSCallLeafDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1819 +text: .text%__1cSCallLeafDirectNodeKmethod_set6Mi_v_;
  3.1820 +text: .text%__1cIDivINodeLbottom_type6kM_pknEType__: classes.o;
  3.1821 +text: .text%__1cJLoadBNodeJideal_reg6kM_I_: classes.o;
  3.1822 +text: .text%__1cJloadBNodeOmemory_operand6kM_pknIMachOper__;
  3.1823 +text: .text%__1cPCountedLoopNodeJinit_trip6kM_pnENode__: cfgnode.o;
  3.1824 +text: .text%__1cRcompL_reg_conNodeIpipeline6kM_pknIPipeline__;
  3.1825 +text: .text%__1cPcheckCastPPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1826 +text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_;
  3.1827 +text: .text%__1cJCMoveNodeLis_cmove_id6FpnOPhaseTransform_pnENode_44pnIBoolNode__4_;
  3.1828 +text: .text%__1cKTypeAryPtrQcast_to_ptr_type6kMnHTypePtrDPTR__pknEType__;
  3.1829 +text: .text%__1cOPhaseIdealLoopKDominators6M_v_;
  3.1830 +text: .text%__1cOPhaseIdealLoopPbuild_loop_late6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_;
  3.1831 +text: .text%__1cOPhaseIdealLoopQbuild_loop_early6MrnJVectorSet_rnJNode_List_rnKNode_Stack_pk0_v_;
  3.1832 +text: .text%jni_NewGlobalRef: jni.o;
  3.1833 +text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_;
  3.1834 +text: .text%__1cIAndINodeJideal_reg6kM_I_: classes.o;
  3.1835 +text: .text%__1cYcompareAndSwapL_boolNodeLout_RegMask6kM_rknHRegMask__;
  3.1836 +text: .text%__1cMPhaseChaitinFSplit6MI_I_;
  3.1837 +text: .text%__1cMPhaseChaitinHcompact6M_v_;
  3.1838 +text: .text%__1cZPhaseConservativeCoalesce2t6MrnMPhaseChaitin__v_;
  3.1839 +text: .text%__1cMPhaseChaitinZcompress_uf_map_for_nodes6M_v_;
  3.1840 +text: .text%__1cZPhaseConservativeCoalesceGverify6M_v_;
  3.1841 +text: .text%__1cRcmpFastUnlockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1842 +text: .text%__1cQshlI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1843 +text: .text%__1cXmembar_release_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1844 +text: .text%__1cKPSYoungGenNused_in_bytes6kM_I_;
  3.1845 +text: .text%__1cOMachEpilogNodeIpipeline6kM_pknIPipeline__;
  3.1846 +text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_;
  3.1847 +text: .text%__1cJloadFNodeIpipeline6kM_pknIPipeline__;
  3.1848 +text: .text%__1cIRootNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o;
  3.1849 +text: .text%__1cJLoadLNodeJideal_reg6kM_I_: classes.o;
  3.1850 +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_;
  3.1851 +text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_;
  3.1852 +text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_;
  3.1853 +text: .text%JVM_FillInStackTrace;
  3.1854 +text: .text%__1cKJavaThreadGactive6F_p0_;
  3.1855 +text: .text%__1cKstoreFNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.1856 +text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_;
  3.1857 +text: .text%__1cMVirtualSpaceNreserved_size6kM_I_;
  3.1858 +text: .text%__1cICodeHeapMmax_capacity6kM_I_;
  3.1859 +text: .text%__1cRsubI_zero_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.1860 +text: .text%__1cHTypePtrFxmeet6kMpknEType__3_;
  3.1861 +text: .text%__1cNflagsRegFOperEtype6kM_pknEType__: ad_sparc.o;
  3.1862 +text: .text%__1cIMinINodeLbottom_type6kM_pknEType__: classes.o;
  3.1863 +text: .text%__1cFParseWensure_phis_everywhere6M_v_;
  3.1864 +text: .text%__1cLRethrowNodeEhash6kM_I_: classes.o;
  3.1865 +text: .text%__1cIDivLNodeGOpcode6kM_i_;
  3.1866 +text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_;
  3.1867 +text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_;
  3.1868 +text: .text%__1cNmethodOopDescVclear_native_function6M_v_;
  3.1869 +text: .text%__1cOloadConL13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1870 +text: .text%__1cQsubL_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.1871 +text: .text%jio_snprintf;
  3.1872 +text: .text%__1cMloadConINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1873 +text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o;
  3.1874 +text: .text%__1cSmulI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1875 +text: .text%jni_NewLocalRef: jni.o;
  3.1876 +text: .text%__1cIMulDNodeGOpcode6kM_i_;
  3.1877 +text: .text%__1cLStrCompNodeGOpcode6kM_i_;
  3.1878 +text: .text%__1cQcmovI_reg_gtNodeIpipeline6kM_pknIPipeline__;
  3.1879 +text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_;
  3.1880 +text: .text%__1cKStoreFNodeGOpcode6kM_i_;
  3.1881 +text: .text%__1cLConvD2INodeGOpcode6kM_i_;
  3.1882 +text: .text%__1cIAddLNodeGadd_id6kM_pknEType__: classes.o;
  3.1883 +text: .text%__1cMURShiftLNodeLbottom_type6kM_pknEType__: classes.o;
  3.1884 +text: .text%__1cKReturnNodeJideal_reg6kM_I_: classes.o;
  3.1885 +text: .text%jni_DeleteGlobalRef: jni.o;
  3.1886 +text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_;
  3.1887 +text: .text%__1cVPatchingRelocIteratorHprepass6M_v_;
  3.1888 +text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_;
  3.1889 +text: .text%__1cIAndLNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1890 +text: .text%__1cIAndLNodeGmul_id6kM_pknEType__: classes.o;
  3.1891 +text: .text%__1cJOopMapSet2t6M_v_;
  3.1892 +text: .text%__1cNSCMemProjNodeLbottom_type6kM_pknEType__: classes.o;
  3.1893 +text: .text%JVM_GetCPMethodModifiers;
  3.1894 +text: .text%jni_GetObjectArrayElement: jni.o;
  3.1895 +text: .text%__1cFParseKarray_load6MnJBasicType__v_;
  3.1896 +text: .text%jni_SetLongField: jni.o;
  3.1897 +text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_;
  3.1898 +text: .text%__1cJOopMapSetHcopy_to6MpC_v_;
  3.1899 +text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_;
  3.1900 +text: .text%__1cJOopMapSetJheap_size6kM_i_;
  3.1901 +text: .text%__1cNSafePointNodeKgrow_stack6MpnIJVMState_I_v_;
  3.1902 +text: .text%__1cIJVMState2t6Mi_v_;
  3.1903 +text: .text%__1cIAndLNodeKadd_opcode6kM_i_: classes.o;
  3.1904 +text: .text%__1cIAndLNodeKmul_opcode6kM_i_: classes.o;
  3.1905 +text: .text%__1cJLoadSNodeJideal_reg6kM_I_: classes.o;
  3.1906 +text: .text%__1cMMachProjNodeHsize_of6kM_I_: classes.o;
  3.1907 +text: .text%__1cOPhaseIdealLoopUsplit_if_with_blocks6MrnJVectorSet_rnKNode_Stack__v_;
  3.1908 +text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_;
  3.1909 +text: .text%__1cLOopRecorderIoop_size6M_i_;
  3.1910 +text: .text%__1cYDebugInformationRecorderJdata_size6M_i_;
  3.1911 +text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_;
  3.1912 +text: .text%__1cOPhaseIdealLoopOset_early_ctrl6MpnENode__v_;
  3.1913 +text: .text%__1cHnmethodKtotal_size6kM_i_;
  3.1914 +text: .text%__1cbFunnecessary_membar_volatileNodeIpipeline6kM_pknIPipeline__;
  3.1915 +text: .text%__1cMloadConLNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.1916 +text: .text%__1cFParseNadd_safepoint6M_v_;
  3.1917 +text: .text%__1cOPhaseTransform2t6Mp0nFPhaseLPhaseNumber__v_;
  3.1918 +text: .text%__1cLPhaseValues2t6Mp0_v_;
  3.1919 +text: .text%__1cQcmovI_reg_ltNodeErule6kM_I_: ad_sparc_misc.o;
  3.1920 +text: .text%__1cXPhaseAggressiveCoalesceGverify6M_v_: coalesce.o;
  3.1921 +text: .text%__1cHCompilebBregister_library_intrinsics6M_v_;
  3.1922 +text: .text%__1cXPhaseAggressiveCoalesceNinsert_copies6MrnHMatcher__v_;
  3.1923 +text: .text%__1cNPhaseRegAlloc2t6MIrnIPhaseCFG_rnHMatcher_pF_v_v_;
  3.1924 +text: .text%__1cIPhaseCFGJbuild_cfg6M_I_;
  3.1925 +text: .text%__1cHCompileEInit6Mi_v_;
  3.1926 +text: .text%__1cVExceptionHandlerTable2t6Mi_v_;
  3.1927 +text: .text%__1cMPhaseChaitin2t6MIrnIPhaseCFG_rnHMatcher__v_;
  3.1928 +text: .text%__1cMPhaseChaitinRRegister_Allocate6M_v_;
  3.1929 +text: .text%__1cHCompileTset_cached_top_node6MpnENode__v_;
  3.1930 +text: .text%__1cHMatcherZnumber_of_saved_registers6F_i_;
  3.1931 +text: .text%__1cNPhaseRegAllocTpd_preallocate_hook6M_v_;
  3.1932 +text: .text%__1cLBlock_Array2t6MpnFArena__v_: block.o;
  3.1933 +text: .text%__1cMPhaseChaitinMreset_uf_map6MI_v_;
  3.1934 +text: .text%__1cMPhaseChaitinRbuild_ifg_virtual6M_v_;
  3.1935 +text: .text%__1cIPhaseCFGQGlobalCodeMotion6MrnHMatcher_IrnJNode_List__v_;
  3.1936 +text: .text%__1cHMatcherTFixup_Save_On_Entry6M_v_;
  3.1937 +text: .text%__1cHMatcherPinit_spill_mask6MpnENode__v_;
  3.1938 +text: .text%__1cHCompileICode_Gen6M_v_;
  3.1939 +text: .text%__1cFArena2t6MI_v_;
  3.1940 +text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_;
  3.1941 +text: .text%__1cHMatcherVinit_first_stack_mask6M_v_;
  3.1942 +text: .text%__1cFArenaNmove_contents6Mp0_1_;
  3.1943 +text: .text%__1cFArenaRdestruct_contents6M_v_;
  3.1944 +text: .text%__1cIPhaseIFG2t6MpnFArena__v_;
  3.1945 +text: .text%__1cFDictIFreset6MpknEDict__v_;
  3.1946 +text: .text%__1cHMatcherFmatch6M_v_;
  3.1947 +text: .text%__1cHMatcher2t6MrnJNode_List__v_;
  3.1948 +text: .text%__1cIPhaseCFGVschedule_pinned_nodes6MrnJVectorSet__v_;
  3.1949 +text: .text%__1cETypeKInitialize6FpnHCompile__v_;
  3.1950 +text: .text%__1cIPhaseCFGYEstimate_Block_Frequency6M_v_;
  3.1951 +text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_;
  3.1952 +text: .text%__1cOCompileWrapper2t6MpnHCompile__v_;
  3.1953 +text: .text%__1cIPhaseCFGKDominators6M_v_;
  3.1954 +text: .text%__1cIPhaseCFG2t6MpnFArena_pnIRootNode_rnHMatcher__v_;
  3.1955 +text: .text%__1cJPhaseLive2t6MrknIPhaseCFG_rnILRG_List_pnFArena__v_;
  3.1956 +text: .text%__1cHCompileYinit_scratch_buffer_blob6M_v_;
  3.1957 +text: .text%__1cOMachPrologNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1958 +text: .text%__1cHCompileTFillExceptionTables6MIpI1pnFLabel__v_;
  3.1959 +text: .text%__1cMPhaseChaitinbApost_allocate_copy_removal6M_v_;
  3.1960 +text: .text%__1cHCompileGOutput6M_v_;
  3.1961 +text: .text%__1cWImplicitExceptionTableIset_size6MI_v_;
  3.1962 +text: .text%__1cHCompileMBuildOopMaps6M_v_;
  3.1963 +text: .text%__1cLdo_liveness6FpnNPhaseRegAlloc_pnIPhaseCFG_pnKBlock_List_ipnFArena_pnEDict__v_: buildOopMap.o;
  3.1964 +text: .text%__1cMPhaseChaitinMfixup_spills6M_v_;
  3.1965 +text: .text%__1cNPhaseRegAllocPalloc_node_regs6Mi_v_;
  3.1966 +text: .text%__1cHCompileLFill_buffer6M_v_;
  3.1967 +text: .text%__1cVCallRuntimeDirectNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1968 +text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o;
  3.1969 +text: .text%__1cENodeHrm_prec6MI_v_;
  3.1970 +text: .text%__1cHRetNodeLout_RegMask6kM_rknHRegMask__;
  3.1971 +text: .text%__1cKstoreFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1972 +text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_;
  3.1973 +text: .text%JVM_DoPrivileged;
  3.1974 +text: .text%__1cRsubI_zero_regNodeIpipeline6kM_pknIPipeline__;
  3.1975 +text: .text%__1cHRetNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1976 +text: .text%__1cIConDNodeGOpcode6kM_i_;
  3.1977 +text: .text%__1cObranchConFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.1978 +text: .text%__1cTresource_free_bytes6FpcI_v_;
  3.1979 +text: .text%__1cNmethodOopDescbDbuild_interpreter_method_data6FnMmethodHandle_pnGThread__v_;
  3.1980 +text: .text%__1cRcompL_reg_conNodeLout_RegMask6kM_rknHRegMask__;
  3.1981 +text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_;
  3.1982 +text: .text%__1cPconvL2I_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.1983 +text: .text%__1cFciEnvKcompile_id6M_I_;
  3.1984 +text: .text%__1cPmethodDataKlassIallocate6MnMmethodHandle_pnGThread__pnRmethodDataOopDesc__;
  3.1985 +text: .text%__1cKoopFactoryOnew_methodData6FnMmethodHandle_pnGThread__pnRmethodDataOopDesc__;
  3.1986 +text: .text%__1cIAndLNodeJideal_reg6kM_I_: classes.o;
  3.1987 +text: .text%__1cKCodeBuffer2t6MpCi_v_;
  3.1988 +text: .text%__1cVshrL_reg_imm6_L2INodeErule6kM_I_: ad_sparc_misc.o;
  3.1989 +text: .text%__1cLConvL2INodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.1990 +text: .text%__1cIciMethodRinstructions_size6M_i_;
  3.1991 +text: .text%__1cSmulI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
  3.1992 +text: .text%__1cCosXthread_local_storage_at6Fi_pv_;
  3.1993 +text: .text%__1cMindIndexOperNconstant_disp6kM_i_: ad_sparc.o;
  3.1994 +text: .text%__1cMindIndexOperOindex_position6kM_i_: ad_sparc.o;
  3.1995 +text: .text%__1cMindIndexOperFscale6kM_i_: ad_sparc.o;
  3.1996 +text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: assembler_sparc.o;
  3.1997 +text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_;
  3.1998 +text: .text%__1cMindIndexOperNbase_position6kM_i_: ad_sparc.o;
  3.1999 +text: .text%__1cNloadKlassNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2000 +text: .text%__1cFStateR_sub_Op_LoadKlass6MpknENode__v_;
  3.2001 +text: .text%__1cGTarjanICOMPRESS6M_v_;
  3.2002 +text: .text%__1cKstoreCNodeOmemory_operand6kM_pknIMachOper__;
  3.2003 +text: .text%__1cICmpDNodeGOpcode6kM_i_;
  3.2004 +text: .text%__1cNloadConL0NodeErule6kM_I_: ad_sparc_misc.o;
  3.2005 +text: .text%__1cIMulLNodeGmul_id6kM_pknEType__: classes.o;
  3.2006 +text: .text%__1cOPhaseIdealLoopOplace_near_use6kMpnENode__2_;
  3.2007 +text: .text%__1cVCallRuntimeDirectNodeIpipeline6kM_pknIPipeline__;
  3.2008 +text: .text%__1cLstoreB0NodeLout_RegMask6kM_rknHRegMask__;
  3.2009 +text: .text%__1cSInterpreterRuntimeOprofile_method6FpnKJavaThread_pC_i_;
  3.2010 +text: .text%__1cMURShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.2011 +text: .text%__1cJloadPNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2012 +text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_;
  3.2013 +text: .text%__1cLRShiftINodeJideal_reg6kM_I_: classes.o;
  3.2014 +text: .text%__1cIMachNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2015 +text: .text%__1cLOpaque2NodeLbottom_type6kM_pknEType__: connode.o;
  3.2016 +text: .text%__1cSconvI2D_helperNodeIpipeline6kM_pknIPipeline__;
  3.2017 +text: .text%__1cUPSGenerationCountersKupdate_all6M_v_: psGenerationCounters.o;
  3.2018 +text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o;
  3.2019 +text: .text%__1cKTypeOopPtrSmake_from_constant6FpnIciObject__pk0_;
  3.2020 +text: .text%__1cQregP_to_stkPNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2021 +text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_;
  3.2022 +text: .text%__1cJTimeStampSticks_since_update6kM_x_;
  3.2023 +text: .text%__1cQmodI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.2024 +text: .text%__1cIMulINodeImul_ring6kMpknEType_3_3_;
  3.2025 +text: .text%__1cURethrowExceptionNodeIpipeline6kM_pknIPipeline__;
  3.2026 +text: .text%__1cIAddLNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.2027 +text: .text%__1cQcmovI_reg_ltNodeLout_RegMask6kM_rknHRegMask__;
  3.2028 +text: .text%__1cLstoreB0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2029 +text: .text%__1cSaddI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2030 +text: .text%__1cIModINodeLbottom_type6kM_pknEType__: classes.o;
  3.2031 +text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_;
  3.2032 +text: .text%__1cJcmpOpOperHgreater6kM_i_: ad_sparc_clone.o;
  3.2033 +text: .text%__1cJimmL0OperJconstantL6kM_x_: ad_sparc_clone.o;
  3.2034 +text: .text%__1cJimmI0OperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.2035 +text: .text%__1cFStateM_sub_Op_ConL6MpknENode__v_;
  3.2036 +text: .text%__1cOloadConL13NodeErule6kM_I_: ad_sparc_misc.o;
  3.2037 +text: .text%__1cNObjectMonitorHis_busy6kM_i_;
  3.2038 +text: .text%JVM_GetClassNameUTF;
  3.2039 +text: .text%__1cKloadUBNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2040 +text: .text%__1cIXorINodeGadd_id6kM_pknEType__: classes.o;
  3.2041 +text: .text%__1cFStateM_sub_Op_AndI6MpknENode__v_;
  3.2042 +text: .text%__1cVshrL_reg_imm6_L2INodeLout_RegMask6kM_rknHRegMask__;
  3.2043 +text: .text%__1cKcmpOpFOperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.2044 +text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o;
  3.2045 +text: .text%__1cTmembar_volatileNodeIpipeline6kM_pknIPipeline__;
  3.2046 +text: .text%__1cJloadFNodeLout_RegMask6kM_rknHRegMask__;
  3.2047 +text: .text%__1cFStateL_sub_Op_OrI6MpknENode__v_;
  3.2048 +text: .text%__1cJCmpL3NodeGOpcode6kM_i_;
  3.2049 +text: .text%JVM_FindLoadedClass;
  3.2050 +text: .text%__1cIMulLNodeKadd_opcode6kM_i_: classes.o;
  3.2051 +text: .text%__1cIMulLNodeKmul_opcode6kM_i_: classes.o;
  3.2052 +text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_;
  3.2053 +text: .text%__1cIConFNodeGOpcode6kM_i_;
  3.2054 +text: .text%__1cSmembar_acquireNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.2055 +text: .text%__1cQmulD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2056 +text: .text%__1cIModLNodeGOpcode6kM_i_;
  3.2057 +text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_;
  3.2058 +text: .text%__1cQLibraryIntrinsicIgenerate6MpnIJVMState__2_;
  3.2059 +text: .text%__1cLRShiftLNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2060 +text: .text%__1cKTypeRawPtrFxdual6kM_pknEType__;
  3.2061 +text: .text%__1cNloadConL0NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.2062 +text: .text%__1cFTypeFEmake6Ff_pk0_;
  3.2063 +text: .text%__1cIimmFOperJconstantF6kM_f_: ad_sparc_clone.o;
  3.2064 +text: .text%__1cEUTF8Ounicode_length6Fpkc_i_;
  3.2065 +text: .text%__1cCosRcurrent_thread_id6F_i_;
  3.2066 +text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_;
  3.2067 +text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_;
  3.2068 +text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_;
  3.2069 +text: .text%__1cSmembar_acquireNodeIadr_type6kM_pknHTypePtr__;
  3.2070 +text: .text%__1cPorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2071 +text: .text%__1cIPhaseCFGOinsert_goto_at6MII_v_;
  3.2072 +text: .text%__1cITypeLongFwiden6kMpknEType__3_;
  3.2073 +text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__;
  3.2074 +text: .text%__1cPCallRuntimeNodeGOpcode6kM_i_;
  3.2075 +text: .text%__1cJcmpOpOperNgreater_equal6kM_i_: ad_sparc_clone.o;
  3.2076 +text: .text%__1cMindIndexOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.2077 +text: .text%__1cMindIndexOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.2078 +text: .text%__1cMindIndexOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.2079 +text: .text%JVM_FindClassFromClass;
  3.2080 +text: .text%__1cRshrP_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2081 +text: .text%__1cObranchConFNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2082 +text: .text%__1cQshrI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2083 +text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_;
  3.2084 +text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_;
  3.2085 +text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_;
  3.2086 +text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_;
  3.2087 +text: .text%JVM_NewInstanceFromConstructor;
  3.2088 +text: .text%__1cFParseFBlockMadd_new_path6M_i_;
  3.2089 +text: .text%__1cIimmPOperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.2090 +text: .text%__1cQsubL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2091 +text: .text%__1cJloadBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2092 +text: .text%__1cLConvF2DNodeGOpcode6kM_i_;
  3.2093 +text: .text%__1cLConvI2DNodeGOpcode6kM_i_;
  3.2094 +text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__;
  3.2095 +text: .text%__1cMloadConFNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.2096 +text: .text%__1cKcmpOpPOperNgreater_equal6kM_i_: ad_sparc_clone.o;
  3.2097 +text: .text%__1cLRShiftLNodeLbottom_type6kM_pknEType__: classes.o;
  3.2098 +text: .text%__1cKimmL13OperJconstantL6kM_x_: ad_sparc_clone.o;
  3.2099 +text: .text%__1cSTailCalljmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2100 +text: .text%__1cKstoreLNodeOmemory_operand6kM_pknIMachOper__;
  3.2101 +text: .text%__1cGIfNodeMdominated_by6MpnENode_pnMPhaseIterGVN__v_;
  3.2102 +text: .text%__1cOcompiledVFrame2t6MpknFframe_pknLRegisterMap_pnKJavaThread_pnJScopeDesc__v_;
  3.2103 +text: .text%__1cJScopeDesc2t6MpknHnmethod_i_v_;
  3.2104 +text: .text%__1cQshlI_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2105 +text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_;
  3.2106 +text: .text%__1cbFunnecessary_membar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2107 +text: .text%__1cULinearLeastSquareFitGupdate6Mdd_v_;
  3.2108 +text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__;
  3.2109 +text: .text%__1cKstoreCNodeLout_RegMask6kM_rknHRegMask__;
  3.2110 +text: .text%__1cKstoreCNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2111 +text: .text%__1cJcmpOpOperKless_equal6kM_i_: ad_sparc_clone.o;
  3.2112 +text: .text%__1cXmembar_acquire_lockNodeLout_RegMask6kM_rknHRegMask__;
  3.2113 +text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__;
  3.2114 +text: .text%__1cMloadConLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2115 +text: .text%__1cIMaxINodeLbottom_type6kM_pknEType__: classes.o;
  3.2116 +text: .text%__1cMloadConDNodeLout_RegMask6kM_rknHRegMask__;
  3.2117 +text: .text%__1cMindirectOperNconstant_disp6kM_i_: ad_sparc.o;
  3.2118 +text: .text%__1cMindirectOperNbase_position6kM_i_: ad_sparc.o;
  3.2119 +text: .text%__1cIAddLNodeJideal_reg6kM_I_: classes.o;
  3.2120 +text: .text%__1cMindirectOperFscale6kM_i_: ad_sparc.o;
  3.2121 +text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
  3.2122 +text: .text%__1cSsubL_reg_reg_2NodeErule6kM_I_: ad_sparc_misc.o;
  3.2123 +text: .text%jni_NewString: jni.o;
  3.2124 +text: .text%__1cLConvL2INodeJideal_reg6kM_I_: classes.o;
  3.2125 +text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__;
  3.2126 +text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__;
  3.2127 +text: .text%__1cOcompiledVFrameEcode6kM_pnHnmethod__;
  3.2128 +text: .text%__1cIGraphKitMnext_monitor6M_i_;
  3.2129 +text: .text%__1cLBoxLockNode2t6Mi_v_;
  3.2130 +text: .text%__1cPconvF2D_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2131 +text: .text%__1cLOptoRuntimebBcomplete_monitor_enter_Type6F_pknITypeFunc__;
  3.2132 +text: .text%__1cIGraphKitLshared_lock6MpnENode__pnMFastLockNode__;
  3.2133 +text: .text%__1cPcmpFastLockNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.2134 +text: .text%__1cNloadConP0NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.2135 +text: .text%__1cRorI_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2136 +text: .text%__1cKcmpOpUOperEless6kM_i_: ad_sparc_clone.o;
  3.2137 +text: .text%__1cQaddF_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.2138 +text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_;
  3.2139 +text: .text%lwp_mutex_init: os_solaris.o;
  3.2140 +text: .text%__1cRsubI_zero_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2141 +text: .text%__1cFframeLnmethods_do6M_v_;
  3.2142 +text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__;
  3.2143 +text: .text%__1cQnotemp_iRegIOperEtype6kM_pknEType__: ad_sparc.o;
  3.2144 +text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__;
  3.2145 +text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o;
  3.2146 +text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o;
  3.2147 +text: .text%__1cCosPhint_no_preempt6F_v_;
  3.2148 +text: .text%__1cOcmovII_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2149 +text: .text%__1cIMulLNodeJideal_reg6kM_I_: classes.o;
  3.2150 +text: .text%__1cIMulINodeGmul_id6kM_pknEType__: classes.o;
  3.2151 +text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_;
  3.2152 +text: .text%__1cRsarL_reg_imm6NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2153 +text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__;
  3.2154 +text: .text%__1cSstring_compareNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2155 +text: .text%__1cFciEnv2T6M_v_;
  3.2156 +text: .text%__1cIGraphKitNgen_checkcast6MpnENode_2p2_2_;
  3.2157 +text: .text%__1cMMergeMemNodeIadr_type6kM_pknHTypePtr__: memnode.o;
  3.2158 +text: .text%__1cJcmpOpOperJnot_equal6kM_i_: ad_sparc_clone.o;
  3.2159 +text: .text%__1cGvframeDtop6kM_p0_;
  3.2160 +text: .text%__1cOCompiledRFrameEinit6M_v_;
  3.2161 +text: .text%__1cXmembar_acquire_lockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2162 +text: .text%__1cJloadSNodeOmemory_operand6kM_pknIMachOper__;
  3.2163 +text: .text%__1cVCallRuntimeDirectNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2164 +text: .text%__1cPcmpFastLockNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2165 +text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_;
  3.2166 +text: .text%__1cIXorINodeJideal_reg6kM_I_: classes.o;
  3.2167 +text: .text%__1cIGraphKitRgen_subtype_check6MpnENode_2_2_;
  3.2168 +text: .text%__1cOMacroAssemblerLsave_thread6MkpnMRegisterImpl__v_;
  3.2169 +text: .text%__1cOcmovII_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2170 +text: .text%__1cMloadConINodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.2171 +text: .text%__1cRshlL_reg_imm6NodeIpipeline6kM_pknIPipeline__;
  3.2172 +text: .text%__1cFParseGdo_new6M_v_;
  3.2173 +text: .text%__1cIimmIOperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.2174 +text: .text%__1cQmodI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2175 +text: .text%__1cLConvI2LNodeJideal_reg6kM_I_: classes.o;
  3.2176 +text: .text%jni_GetObjectClass: jni.o;
  3.2177 +text: .text%__1cSxorI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
  3.2178 +text: .text%__1cOMacroAssemblerFalign6Mi_v_;
  3.2179 +text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_;
  3.2180 +text: .text%__1cKManagementJtimestamp6F_x_;
  3.2181 +text: .text%__1cIPSOldGenPupdate_counters6M_v_;
  3.2182 +text: .text%__1cQshrI_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2183 +text: .text%__1cFForteNregister_stub6FpkcpC3_v_;
  3.2184 +text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_;
  3.2185 +text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
  3.2186 +text: .text%__1cTloadL_unalignedNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2187 +text: .text%__1cJloadLNodeOmemory_operand6kM_pknIMachOper__;
  3.2188 +text: .text%__1cOMacroAssemblerVreset_last_Java_frame6M_v_;
  3.2189 +text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_2_v_;
  3.2190 +text: .text%__1cSstring_compareNodeIpipeline6kM_pknIPipeline__;
  3.2191 +text: .text%__1cOstackSlotIOperKin_RegMask6kMi_pknHRegMask__;
  3.2192 +text: .text%__1cQregF_to_stkINodeIpipeline6kM_pknIPipeline__;
  3.2193 +text: .text%__1cINodeHash2t6MpnFArena_I_v_;
  3.2194 +text: .text%__1cPconvI2L_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2195 +text: .text%__1cOPhaseTransform2t6MpnFArena_nFPhaseLPhaseNumber__v_;
  3.2196 +text: .text%__1cLPhaseValues2t6MpnFArena_I_v_;
  3.2197 +text: .text%__1cJStubQdDueueGcommit6Mi_v_;
  3.2198 +text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__;
  3.2199 +text: .text%__1cOcmovII_regNodeIpipeline6kM_pknIPipeline__;
  3.2200 +text: .text%__1cKstoreFNodeIpipeline6kM_pknIPipeline__;
  3.2201 +text: .text%__1cOMacroAssemblerKsave_frame6Mi_v_;
  3.2202 +text: .text%__1cSmulI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
  3.2203 +text: .text%__1cLstoreC0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2204 +text: .text%__1cOPhaseIdealLoopVclone_up_backedge_goo6MpnENode_22_2_;
  3.2205 +text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_;
  3.2206 +text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_;
  3.2207 +text: .text%JVM_FindClassFromClassLoader;
  3.2208 +text: .text%JVM_FindClassFromBootLoader;
  3.2209 +text: .text%signalHandler;
  3.2210 +text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o;
  3.2211 +text: .text%JVM_handle_solaris_signal;
  3.2212 +text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__;
  3.2213 +text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o;
  3.2214 +text: .text%__1cFStateM_sub_Op_AndL6MpknENode__v_;
  3.2215 +text: .text%__1cKConv2BNodeGOpcode6kM_i_;
  3.2216 +text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_;
  3.2217 +text: .text%JVM_IHashCode;
  3.2218 +text: .text%__1cSconvI2D_helperNodeLout_RegMask6kM_rknHRegMask__;
  3.2219 +text: .text%__1cJStartNodeJideal_reg6kM_I_: callnode.o;
  3.2220 +text: .text%__1cOMacroAssemblerbBcheck_and_forward_exception6MpnMRegisterImpl__v_;
  3.2221 +text: .text%__1cQcmovI_reg_ltNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.2222 +text: .text%__1cQandL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2223 +text: .text%__1cLPhaseValuesKis_IterGVN6M_pnMPhaseIterGVN__: phaseX.o;
  3.2224 +text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_;
  3.2225 +text: .text%__1cKC2CompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_;
  3.2226 +text: .text%JVM_GetClassLoader;
  3.2227 +text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_;
  3.2228 +text: .text%__1cCosRelapsed_frequency6F_x_;
  3.2229 +text: .text%__1cFStateP_sub_Op_ConvL2I6MpknENode__v_;
  3.2230 +text: .text%__1cOPhaseIdealLoopLdo_split_if6MpnENode__v_;
  3.2231 +text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_;
  3.2232 +text: .text%__1cKScheduling2t6MpnFArena_rnHCompile__v_;
  3.2233 +text: .text%__1cKSchedulingMDoScheduling6M_v_;
  3.2234 +text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_;
  3.2235 +text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_;
  3.2236 +text: .text%__1cSCardTableExtensionbAscavenge_contents_parallel6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager_I_v_;
  3.2237 +text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o;
  3.2238 +text: .text%__1cFframeMpd_gc_epilog6M_v_;
  3.2239 +text: .text%__1cMelapsedTimerHseconds6kM_d_;
  3.2240 +text: .text%__1cJStealTaskEname6M_pc_: psTasks.o;
  3.2241 +text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o;
  3.2242 +text: .text%__1cFframeLgc_epilogue6M_v_;
  3.2243 +text: .text%__1cFframeLgc_prologue6M_v_;
  3.2244 +text: .text%__1cTOldToYoungRootsTaskEname6M_pc_: psTasks.o;
  3.2245 +text: .text%__1cJStealTaskFdo_it6MpnNGCTaskManager_I_v_;
  3.2246 +text: .text%__1cTOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_;
  3.2247 +text: .text%__1cNGCTaskManagerMnote_release6MI_v_;
  3.2248 +text: .text%__1cMciMethodDataStrap_recompiled_at6MpnLProfileData__i_;
  3.2249 +text: .text%__1cJloadLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2250 +text: .text%__1cSmembar_acquireNodeLout_RegMask6kM_rknHRegMask__;
  3.2251 +text: .text%__1cSmembar_acquireNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2252 +text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_;
  3.2253 +text: .text%__1cVExceptionHandlerTableHcopy_to6MpnHnmethod__v_;
  3.2254 +text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_;
  3.2255 +text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_;
  3.2256 +text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_;
  3.2257 +text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_;
  3.2258 +text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_;
  3.2259 +text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_;
  3.2260 +text: .text%__1cIciMethodRbuild_method_data6M_v_;
  3.2261 +text: .text%__1cHCompileIOptimize6M_v_;
  3.2262 +text: .text%__1cHCompileLFinish_Warm6M_v_;
  3.2263 +text: .text%__1cbAfinal_graph_reshaping_walk6FrnKNode_Stack_pnENode_rnUFinal_Reshape_Counts__v_: compile.o;
  3.2264 +text: .text%__1cHCompileLInline_Warm6M_i_;
  3.2265 +text: .text%__1cSPhaseRemoveUseless2t6MpnIPhaseGVN_pnQUnique_Node_List__v_;
  3.2266 +text: .text%__1cJStartNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2267 +text: .text%__1cKInlineTreeWbuild_inline_tree_root6F_p0_;
  3.2268 +text: .text%__1cHCompileRbuild_start_state6MpnJStartNode_pknITypeFunc__pnIJVMState__;
  3.2269 +text: .text%__1cIPhaseCCPHanalyze6M_v_;
  3.2270 +text: .text%__1cIPhaseCCPMdo_transform6M_v_;
  3.2271 +text: .text%__1cIPhaseCCPJtransform6MpnENode__2_;
  3.2272 +text: .text%__1cIPhaseCCP2t6MpnMPhaseIterGVN__v_;
  3.2273 +text: .text%__1cHCompileVidentify_useful_nodes6MrnQUnique_Node_List__v_;
  3.2274 +text: .text%__1cHCompileUremove_useless_nodes6MrnQUnique_Node_List__v_;
  3.2275 +text: .text%__1cQUnique_Node_ListUremove_useless_nodes6MrnJVectorSet__v_;
  3.2276 +text: .text%__1cMPhaseIterGVN2t6MpnIPhaseGVN__v_;
  3.2277 +text: .text%__1cMPhaseIterGVN2t6Mp0_v_;
  3.2278 +text: .text%__1cQmulI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2279 +text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_;
  3.2280 +text: .text%__1cHCompileNreturn_values6MpnIJVMState__v_;
  3.2281 +text: .text%__1cOcmovII_immNodeIpipeline6kM_pknIPipeline__;
  3.2282 +text: .text%__1cOMachEpilogNodeQsafepoint_offset6kM_i_;
  3.2283 +text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_;
  3.2284 +text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_;
  3.2285 +text: .text%__1cIModINodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2286 +text: .text%__1cFStateP_sub_Op_RShiftI6MpknENode__v_;
  3.2287 +text: .text%__1cRsarI_reg_imm5NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2288 +text: .text%lwp_cond_init: os_solaris.o;
  3.2289 +text: .text%__1cTmembar_volatileNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2290 +text: .text%__1cNloadConL0NodeLout_RegMask6kM_rknHRegMask__;
  3.2291 +text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o;
  3.2292 +text: .text%__1cXmembar_acquire_lockNodeIadr_type6kM_pknHTypePtr__;
  3.2293 +text: .text%__1cKPSYoungGenRcapacity_in_bytes6kM_I_;
  3.2294 +text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o;
  3.2295 +text: .text%__1cOloadConI13NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2296 +text: .text%__1cJloadSNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2297 +text: .text%__1cIAddFNodeGOpcode6kM_i_;
  3.2298 +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_;
  3.2299 +text: .text%__1cFStateO_sub_Op_Binary6MpknENode__v_;
  3.2300 +text: .text%__1cKBinaryNodeGOpcode6kM_i_;
  3.2301 +text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o;
  3.2302 +text: .text%__1cLBoxLockNodeDcmp6kMrknENode__I_;
  3.2303 +text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_;
  3.2304 +text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_;
  3.2305 +text: .text%__1cRNativeMovConstRegIset_data6Mi_v_;
  3.2306 +text: .text%__1cFParsebLincrement_and_test_invocation_counter6Mi_v_;
  3.2307 +text: .text%__1cSsafePoint_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2308 +text: .text%__1cMTailCallNodeGOpcode6kM_i_;
  3.2309 +text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_;
  3.2310 +text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_;
  3.2311 +text: .text%__1cSsafePoint_pollNodeLout_RegMask6kM_rknHRegMask__;
  3.2312 +text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_;
  3.2313 +text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_;
  3.2314 +text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_;
  3.2315 +text: .text%__1cIMulFNodeGOpcode6kM_i_;
  3.2316 +text: .text%__1cISubLNodeLbottom_type6kM_pknEType__: classes.o;
  3.2317 +text: .text%__1cQmulD_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2318 +text: .text%__1cNSCMemProjNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2319 +text: .text%__1cSThreadLocalStorageGthread6F_pnGThread__: assembler_sparc.o;
  3.2320 +text: .text%jni_SetByteArrayRegion: jni.o;
  3.2321 +text: .text%__1cQregI_to_stkINodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2322 +text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__;
  3.2323 +text: .text%__1cSdivL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2324 +text: .text%__1cFStateM_sub_Op_XorI6MpknENode__v_;
  3.2325 +text: .text%__1cHTypePtrEmake6FnETypeFTYPES_n0ADPTR_i_pk0_;
  3.2326 +text: .text%__1cCosLelapsedTime6F_d_;
  3.2327 +text: .text%__1cKScopeValueJread_from6FpnTDebugInfoReadStream__p0_;
  3.2328 +text: .text%__1cKPerfMemoryMmark_updated6F_v_;
  3.2329 +text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__;
  3.2330 +text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_;
  3.2331 +text: .text%__1cKPerfMemoryFalloc6FI_pc_;
  3.2332 +text: .text%__1cLStrCompNodeKmatch_edge6kMI_I_;
  3.2333 +text: .text%__1cQmulL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2334 +text: .text%__1cILocation2t6MpnTDebugInfoReadStream__v_;
  3.2335 +text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__;
  3.2336 +text: .text%jni_ReleaseStringUTFChars;
  3.2337 +text: .text%jni_GetStringUTFChars: jni.o;
  3.2338 +text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__;
  3.2339 +text: .text%__1cFParseLarray_store6MnJBasicType__v_;
  3.2340 +text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_;
  3.2341 +text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_;
  3.2342 +text: .text%JVM_IsInterrupted;
  3.2343 +text: .text%__1cLLShiftLNodeLbottom_type6kM_pknEType__: classes.o;
  3.2344 +text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o;
  3.2345 +text: .text%JVM_FindLibraryEntry;
  3.2346 +text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_;
  3.2347 +text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_;
  3.2348 +text: .text%__1cRshlL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__;
  3.2349 +text: .text%__1cQshlL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2350 +text: .text%__1cPconvF2D_regNodeIpipeline6kM_pknIPipeline__;
  3.2351 +text: .text%__1cLRShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.2352 +text: .text%__1cSstring_compareNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2353 +text: .text%__1cOMacroAssemblerEstop6Mpkc_v_;
  3.2354 +text: .text%__1cObranchConFNodeIpipeline6kM_pknIPipeline__;
  3.2355 +text: .text%__1cKloadUBNodeIpipeline6kM_pknIPipeline__;
  3.2356 +text: .text%__1cQaddP_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2357 +text: .text%__1cLcmpD_ccNodeErule6kM_I_: ad_sparc_misc.o;
  3.2358 +text: .text%__1cTloadL_unalignedNodeIpipeline6kM_pknIPipeline__;
  3.2359 +text: .text%__1cLLShiftLNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2360 +text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_;
  3.2361 +text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o;
  3.2362 +text: .text%__1cRbranchLoopEndNodeLout_RegMask6kM_rknHRegMask__;
  3.2363 +text: .text%__1cQaddF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2364 +text: .text%__1cKcmpOpUOperHgreater6kM_i_: ad_sparc_clone.o;
  3.2365 +text: .text%__1cUParallelScavengeHeapEused6kM_I_;
  3.2366 +text: .text%__1cIDivINodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2367 +text: .text%__1cQmulF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2368 +text: .text%__1cQxorI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2369 +text: .text%__1cWCallLeafNoFPDirectNodeLout_RegMask6kM_rknHRegMask__;
  3.2370 +text: .text%__1cLcmpD_ccNodeIpipeline6kM_pknIPipeline__;
  3.2371 +text: .text%__1cWCallLeafNoFPDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.2372 +text: .text%__1cJloadINodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.2373 +text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_;
  3.2374 +text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_;
  3.2375 +text: .text%__1cIMinINodeGadd_id6kM_pknEType__: classes.o;
  3.2376 +text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__;
  3.2377 +text: .text%__1cOClearArrayNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.2378 +text: .text%__1cRbranchLoopEndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2379 +text: .text%__1cRbranchLoopEndNodeJlabel_set6MrnFLabel_I_v_;
  3.2380 +text: .text%__1cLMachUEPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2381 +text: .text%__1cCosTnative_java_library6F_pv_;
  3.2382 +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_;
  3.2383 +text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_;
  3.2384 +text: .text%__1cSxorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
  3.2385 +text: .text%__1cMPhaseChaitinGSelect6M_I_;
  3.2386 +text: .text%__1cFParseSjump_switch_ranges6MpnENode_pnLSwitchRange_4i_v_;
  3.2387 +text: .text%__1cSbranchCon_longNodeJlabel_set6MrnFLabel_I_v_;
  3.2388 +text: .text%__1cSbranchCon_longNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2389 +text: .text%__1cSbranchCon_longNodeLout_RegMask6kM_rknHRegMask__;
  3.2390 +text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_;
  3.2391 +text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_;
  3.2392 +text: .text%__1cLstoreP0NodeOmemory_operand6kM_pknIMachOper__;
  3.2393 +text: .text%__1cFParseTprofile_switch_case6Mi_v_;
  3.2394 +text: .text%__1cSandI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2395 +text: .text%__1cIimmLOperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.2396 +text: .text%__1cFParseOmerge_new_path6Mi_v_;
  3.2397 +text: .text%__1cQregP_to_stkPNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.2398 +text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_;
  3.2399 +text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__;
  3.2400 +text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__;
  3.2401 +text: .text%__1cSmembar_releaseNodeLout_RegMask6kM_rknHRegMask__;
  3.2402 +text: .text%jni_NewByteArray: jni.o;
  3.2403 +text: .text%__1cQdivL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2404 +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_;
  3.2405 +text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_;
  3.2406 +text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_;
  3.2407 +text: .text%__1cFParseLdo_newarray6MnJBasicType__v_;
  3.2408 +text: .text%__1cPmethodDataKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.2409 +text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__;
  3.2410 +text: .text%__1cSconvI2D_helperNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.2411 +text: .text%__1cLstoreP0NodeLout_RegMask6kM_rknHRegMask__;
  3.2412 +text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_;
  3.2413 +text: .text%__1cSmembar_releaseNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2414 +text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_;
  3.2415 +text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__;
  3.2416 +text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_;
  3.2417 +text: .text%__1cSTailCalljmpIndNodeIpipeline6kM_pknIPipeline__;
  3.2418 +text: .text%__1cQcmovI_reg_gtNodeErule6kM_I_: ad_sparc_misc.o;
  3.2419 +text: .text%JVM_GetMethodIxExceptionTableEntry;
  3.2420 +text: .text%__1cIDivINodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.2421 +text: .text%__1cLstoreP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2422 +text: .text%__1cQstkI_to_regFNodeErule6kM_I_: ad_sparc_misc.o;
  3.2423 +text: .text%__1cLRethrowNodeJideal_reg6kM_I_: classes.o;
  3.2424 +text: .text%__1cKloadUBNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2425 +text: .text%__1cHCompileSrethrow_exceptions6MpnIJVMState__v_;
  3.2426 +text: .text%__1cURethrowExceptionNodeLout_RegMask6kM_rknHRegMask__;
  3.2427 +text: .text%__1cLRethrowNode2t6MpnENode_22222_v_;
  3.2428 +text: .text%__1cTLoadL_unalignedNodeGOpcode6kM_i_;
  3.2429 +text: .text%__1cSmulI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
  3.2430 +text: .text%__1cZInterpreterMacroAssemblerZget_2_byte_integer_at_bcp6MipnMRegisterImpl_2n0ALsignedOrNot_n0AKsetCCOrNot__v_;
  3.2431 +text: .text%__1cQcmovI_reg_gtNodeLout_RegMask6kM_rknHRegMask__;
  3.2432 +text: .text%__1cURethrowExceptionNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2433 +text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_;
  3.2434 +text: .text%__1cISubLNodeDsub6kMpknEType_3_3_;
  3.2435 +text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_;
  3.2436 +text: .text%__1cJLoadINodeMstore_Opcode6kM_i_: classes.o;
  3.2437 +text: .text%__1cQandI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2438 +text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__;
  3.2439 +text: .text%__1cQmulI_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2440 +text: .text%__1cFParsePmerge_exception6Mi_v_;
  3.2441 +text: .text%__1cLStrCompNodeLbottom_type6kM_pknEType__: classes.o;
  3.2442 +text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o;
  3.2443 +text: .text%__1cNloadConP0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2444 +text: .text%jni_ReleaseStringCritical: jni.o;
  3.2445 +text: .text%__1cJCMoveNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2446 +text: .text%jni_GetStringCritical: jni.o;
  3.2447 +text: .text%__1cHciKlassSsuper_check_offset6M_I_;
  3.2448 +text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o;
  3.2449 +text: .text%__1cWCallLeafNoFPDirectNodeKmethod_set6Mi_v_;
  3.2450 +text: .text%__1cWCallLeafNoFPDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2451 +text: .text%__1cIDivLNodeLbottom_type6kM_pknEType__: classes.o;
  3.2452 +text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o;
  3.2453 +text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o;
  3.2454 +text: .text%__1cMloadConFNodeIpipeline6kM_pknIPipeline__;
  3.2455 +text: .text%__1cMadjust_check6FpnENode_11iipnMPhaseIterGVN__v_: ifnode.o;
  3.2456 +text: .text%__1cJScopeDescGsender6kM_p0_;
  3.2457 +text: .text%__1cSxorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
  3.2458 +text: .text%__1cOcompiledVFrameGsender6kM_pnGvframe__;
  3.2459 +text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_;
  3.2460 +text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_;
  3.2461 +text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_;
  3.2462 +text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_;
  3.2463 +text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_;
  3.2464 +text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_;
  3.2465 +text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_;
  3.2466 +text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_;
  3.2467 +text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_;
  3.2468 +text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_;
  3.2469 +text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_;
  3.2470 +text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_;
  3.2471 +text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_;
  3.2472 +text: .text%__1cOPhaseIdealLoopOadd_constraint6MiipnENode_22p23_v_;
  3.2473 +text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_;
  3.2474 +text: .text%__1cKstoreBNodeOmemory_operand6kM_pknIMachOper__;
  3.2475 +text: .text%__1cSaddL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2476 +text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_;
  3.2477 +text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o;
  3.2478 +text: .text%__1cLstoreI0NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2479 +text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_;
  3.2480 +text: .text%__1cRtestI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2481 +text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__;
  3.2482 +text: .text%__1cFParseMdo_checkcast6M_v_;
  3.2483 +text: .text%__1cOCompiledRFrameKtop_method6kM_nMmethodHandle__: rframe.o;
  3.2484 +text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__;
  3.2485 +text: .text%__1cRtestI_reg_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2486 +text: .text%__1cOcmovIL_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2487 +text: .text%__1cJimmU6OperIconstant6kM_i_: ad_sparc_clone.o;
  3.2488 +text: .text%__1cHRegMask2t6M_v_: matcher.o;
  3.2489 +text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_;
  3.2490 +text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_;
  3.2491 +text: .text%__1cSmulL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
  3.2492 +text: .text%__1cPstoreI_FregNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2493 +text: .text%__1cLcmpD_ccNodeLout_RegMask6kM_rknHRegMask__;
  3.2494 +text: .text%__1cXMachCallDynamicJavaNodePret_addr_offset6M_i_;
  3.2495 +text: .text%__1cNflagsRegFOperKin_RegMask6kMi_pknHRegMask__;
  3.2496 +text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
  3.2497 +text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_;
  3.2498 +text: .text%__1cIModLNodeLbottom_type6kM_pknEType__: classes.o;
  3.2499 +text: .text%__1cMloadConFNodeLout_RegMask6kM_rknHRegMask__;
  3.2500 +text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_;
  3.2501 +text: .text%__1cbAPSGCAdaptivePolicyCountersbBupdate_counters_from_policy6M_v_;
  3.2502 +text: .text%__1cXTraceMemoryManagerStats2T6M_v_;
  3.2503 +text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_;
  3.2504 +text: .text%__1cQLRUMaxHeapPolicy2t6M_v_;
  3.2505 +text: .text%__1cUParallelScavengeHeapQresize_all_tlabs6M_v_;
  3.2506 +text: .text%__1cUParallelScavengeHeapPupdate_counters6M_v_;
  3.2507 +text: .text%__1cUParallelScavengeHeapbFaccumulate_statistics_all_tlabs6M_v_;
  3.2508 +text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_;
  3.2509 +text: .text%__1cTDerivedPointerTablePupdate_pointers6F_v_;
  3.2510 +text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_;
  3.2511 +text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_;
  3.2512 +text: .text%__1cMTypeKlassPtrFxmeet6kMpknEType__3_;
  3.2513 +text: .text%__1cKPSYoungGenPupdate_counters6M_v_;
  3.2514 +text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_;
  3.2515 +text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_;
  3.2516 +text: .text%__1cPGCMemoryManagerIgc_begin6M_v_;
  3.2517 +text: .text%__1cPGCMemoryManagerGgc_end6M_v_;
  3.2518 +text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_;
  3.2519 +text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_;
  3.2520 +text: .text%__1cbAPSGCAdaptivePolicyCountersPupdate_counters6M_v_;
  3.2521 +text: .text%__1cTDerivedPointerTableFclear6F_v_;
  3.2522 +text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_;
  3.2523 +text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_;
  3.2524 +text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_;
  3.2525 +text: .text%__1cMCounterDecayFdecay6F_v_;
  3.2526 +text: .text%__1cCosbCmake_polling_page_unreadable6F_v_;
  3.2527 +text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_;
  3.2528 +text: .text%__1cLConvI2FNodeGOpcode6kM_i_;
  3.2529 +text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_;
  3.2530 +text: .text%__1cQaddF_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2531 +text: .text%__1cUSafepointSynchronizeFbegin6F_v_;
  3.2532 +text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__;
  3.2533 +text: .text%__1cONMethodSweeperFsweep6F_v_;
  3.2534 +text: .text%__1cCosbAmake_polling_page_readable6F_v_;
  3.2535 +text: .text%__1cUSafepointSynchronizeDend6F_v_;
  3.2536 +text: .text%__1cOcmovII_immNodeErule6kM_I_: ad_sparc_misc.o;
  3.2537 +text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_;
  3.2538 +text: .text%__1cKimmU13OperIconstant6kM_i_: ad_sparc_clone.o;
  3.2539 +text: .text%__1cQshlL_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2540 +text: .text%__1cUcompU_iReg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2541 +text: .text%JVM_GetCallerClass;
  3.2542 +text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o;
  3.2543 +text: .text%__1cOcmovPP_regNodeIpipeline6kM_pknIPipeline__;
  3.2544 +text: .text%__1cKstoreBNodeLout_RegMask6kM_rknHRegMask__;
  3.2545 +text: .text%__1cSobjArrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.2546 +text: .text%__1cLstoreC0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2547 +text: .text%__1cTloadL_unalignedNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2548 +text: .text%__1cICmpFNodeGOpcode6kM_i_;
  3.2549 +text: .text%__1cOstackSlotPOperKin_RegMask6kMi_pknHRegMask__;
  3.2550 +text: .text%__1cQregF_to_stkINodeLout_RegMask6kM_rknHRegMask__;
  3.2551 +text: .text%__1cJLoadDNodeGOpcode6kM_i_;
  3.2552 +text: .text%__1cQmulD_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2553 +text: .text%jni_IsAssignableFrom: jni.o;
  3.2554 +text: .text%jni_GetFieldID: jni.o;
  3.2555 +text: .text%__1cJLoadPNodeMstore_Opcode6kM_i_: classes.o;
  3.2556 +text: .text%__1cLstoreB0NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2557 +text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_;
  3.2558 +text: .text%__1cHTypeAryFxdual6kM_pknEType__;
  3.2559 +text: .text%__1cMtlsLoadPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2560 +text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_;
  3.2561 +text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__;
  3.2562 +text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__;
  3.2563 +text: .text%__1cMVM_OperationIevaluate6M_v_;
  3.2564 +text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_;
  3.2565 +text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__;
  3.2566 +text: .text%__1cMnegD_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2567 +text: .text%__1cQcmovI_reg_gtNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.2568 +text: .text%__1cGGCTask2t6Mn0AEKindEkind__v_;
  3.2569 +text: .text%__1cNGCTaskManagerVrelease_all_resources6M_v_;
  3.2570 +text: .text%__1cLGCTaskQdDueueHenqueue6Mp0_v_;
  3.2571 +text: .text%__1cSCardTableExtensionRscavenge_contents6MpnQObjectStartArray_pnMMutableSpace_pnIHeapWord_pnSPSPromotionManager__v_;
  3.2572 +text: .text%__1cUWaitForBarrierGCTaskFdo_it6MpnNGCTaskManager_I_v_;
  3.2573 +text: .text%__1cNGCTaskManagerIadd_list6MpnLGCTaskQdDueue__v_;
  3.2574 +text: .text%__1cHThreadsZcreate_thread_roots_tasks6FpnLGCTaskQdDueue__v_;
  3.2575 +text: .text%__1cUWaitForBarrierGCTaskGcreate6F_p0_;
  3.2576 +text: .text%__1cUWaitForBarrierGCTaskIdestruct6M_v_;
  3.2577 +text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_;
  3.2578 +text: .text%__1cSPSPromotionManagerNpost_scavenge6F_v_;
  3.2579 +text: .text%__1cNBarrierGCTaskOdo_it_internal6MpnNGCTaskManager_I_v_;
  3.2580 +text: .text%__1cNJvmtiGCMarker2T6M_v_;
  3.2581 +text: .text%__1cUWaitForBarrierGCTaskHdestroy6Fp0_v_;
  3.2582 +text: .text%__1cLGCTaskQdDueueGcreate6F_p0_;
  3.2583 +text: .text%__1cSPSPromotionManagerMpre_scavenge6F_v_;
  3.2584 +text: .text%__1cZSerialOldToYoungRootsTaskFdo_it6MpnNGCTaskManager_I_v_;
  3.2585 +text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_;
  3.2586 +text: .text%__1cZSerialOldToYoungRootsTaskEname6M_pc_: psTasks.o;
  3.2587 +text: .text%__1cKPSYoungGenLswap_spaces6M_v_;
  3.2588 +text: .text%__1cUParallelScavengeHeapQresize_young_gen6MII_v_;
  3.2589 +text: .text%__1cKPSYoungGenGresize6MII_v_;
  3.2590 +text: .text%__1cKPSYoungGenNresize_spaces6MII_v_;
  3.2591 +text: .text%__1cSPSPromotionManagerbBvm_thread_promotion_manager6F_p0_;
  3.2592 +text: .text%__1cUWaitForBarrierGCTaskIwait_for6M_v_;
  3.2593 +text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_;
  3.2594 +text: .text%__1cNMonitorSupplyHreserve6F_pnHMonitor__;
  3.2595 +text: .text%__1cNMonitorSupplyHrelease6FpnHMonitor__v_;
  3.2596 +text: .text%__1cUWaitForBarrierGCTaskEname6M_pc_: gcTaskManager.o;
  3.2597 +text: .text%__1cTmembar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.2598 +text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__;
  3.2599 +text: .text%__1cTloadL_unalignedNodeErule6kM_I_: ad_sparc_misc.o;
  3.2600 +text: .text%__1cOcmovII_immNodeLout_RegMask6kM_rknHRegMask__;
  3.2601 +text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o;
  3.2602 +text: .text%__1cMURShiftLNodeJideal_reg6kM_I_: classes.o;
  3.2603 +text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_;
  3.2604 +text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_;
  3.2605 +text: .text%__1cKstoreBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2606 +text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_;
  3.2607 +text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_;
  3.2608 +text: .text%__1cFframeZinterpreter_frame_set_mdp6MpC_v_;
  3.2609 +text: .text%__1cZInterpreterMacroAssemblerIpush_ptr6MpnMRegisterImpl__v_;
  3.2610 +text: .text%__1cISubLNodeGadd_id6kM_pknEType__: classes.o;
  3.2611 +text: .text%__1cIciMethodRinterpreter_entry6M_pC_;
  3.2612 +text: .text%__1cQmulF_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2613 +text: .text%__1cPconvF2D_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2614 +text: .text%__1cRcompL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2615 +text: .text%__1cJloadBNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.2616 +text: .text%jni_SetBooleanField: jni.o;
  3.2617 +text: .text%__1cKimmL13OperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.2618 +text: .text%__1cLcmpF_ccNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2619 +text: .text%__1cLRuntimeStubbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o;
  3.2620 +text: .text%__1cRorI_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
  3.2621 +text: .text%__1cRsarL_reg_imm6NodeIpipeline6kM_pknIPipeline__;
  3.2622 +text: .text%__1cQmulI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.2623 +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_;
  3.2624 +text: .text%__1cZInterpreterMacroAssemblerGpush_i6MpnMRegisterImpl__v_;
  3.2625 +text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_;
  3.2626 +text: .text%__1cLLShiftLNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.2627 +text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o;
  3.2628 +text: .text%__1cJloadSNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.2629 +text: .text%__1cRloadConP_pollNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2630 +text: .text%__1cNObjectMonitorHRecycle6M_v_;
  3.2631 +text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__;
  3.2632 +text: .text%__1cMloadConLNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.2633 +text: .text%__1cJloadDNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2634 +text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__;
  3.2635 +text: .text%__1cQstkI_to_regFNodeIpipeline6kM_pknIPipeline__;
  3.2636 +text: .text%__1cQregP_to_stkPNodeIpipeline6kM_pknIPipeline__;
  3.2637 +text: .text%__1cZInterpreterMacroAssemblerFpop_i6MpnMRegisterImpl__v_;
  3.2638 +text: .text%__1cIMaxINodeGadd_id6kM_pknEType__: classes.o;
  3.2639 +text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__;
  3.2640 +text: .text%__1cYcompareAndSwapL_boolNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2641 +text: .text%__1cNSCMemProjNodeJideal_reg6kM_I_: classes.o;
  3.2642 +text: .text%__1cYcompareAndSwapL_boolNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.2643 +text: .text%__1cIProjNodeLout_RegMask6kM_rknHRegMask__;
  3.2644 +text: .text%__1cIPSOldGenMmax_gen_size6M_I_: psOldGen.o;
  3.2645 +text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_;
  3.2646 +text: .text%__1cSdivL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
  3.2647 +text: .text%__1cbDVM_ParallelGCFailedAllocationEdoit6M_v_;
  3.2648 +text: .text%__1cQaddL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2649 +text: .text%__1cPadd_derived_oop6FppnHoopDesc_2_v_: oopMap.o;
  3.2650 +text: .text%__1cMregD_lowOperKin_RegMask6kMi_pknHRegMask__;
  3.2651 +text: .text%__1cHOrINodeIadd_ring6kMpknEType_3_3_;
  3.2652 +text: .text%__1cOMethodLivenessKBasicBlockFsplit6Mi_p1_;
  3.2653 +text: .text%__1cOcmovII_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.2654 +text: .text%__1cENodeEgetd6kM_d_;
  3.2655 +text: .text%__1cOcmovIL_immNodeIpipeline6kM_pknIPipeline__;
  3.2656 +text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_;
  3.2657 +text: .text%__1cZInterpreterMacroAssemblerGpush_f6MpnRFloatRegisterImpl__v_;
  3.2658 +text: .text%__1cIciObject2t6MpnHciKlass__v_;
  3.2659 +text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__;
  3.2660 +text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_;
  3.2661 +text: .text%__1cPstoreI_FregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2662 +text: .text%__1cKCMovePNodeGOpcode6kM_i_;
  3.2663 +text: .text%__1cLstoreC0NodeIpipeline6kM_pknIPipeline__;
  3.2664 +text: .text%JVM_MonitorWait;
  3.2665 +text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_;
  3.2666 +text: .text%__1cIAddLNodeIadd_ring6kMpknEType_3_3_;
  3.2667 +text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_;
  3.2668 +text: .text%__1cGciType2t6MpnHciKlass__v_;
  3.2669 +text: .text%__1cQshlI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2670 +text: .text%__1cQdivD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2671 +text: .text%__1cFParseSjump_if_false_fork6MpnGIfNode_ii_v_;
  3.2672 +text: .text%__1cNloadConL0NodeIpipeline6kM_pknIPipeline__;
  3.2673 +text: .text%__1cRshrL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2674 +text: .text%__1cUciInstanceKlassKlassEmake6F_p0_;
  3.2675 +text: .text%__1cENode2t6Mp0111111_v_;
  3.2676 +text: .text%__1cIDivLNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2677 +text: .text%__1cZInterpreterMacroAssemblerGpush_d6MpnRFloatRegisterImpl__v_;
  3.2678 +text: .text%__1cFParseRarray_store_check6M_v_;
  3.2679 +text: .text%__1cQsubF_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2680 +text: .text%__1cIciSymbolHbyte_at6Mi_i_;
  3.2681 +text: .text%__1cKCompiledICSset_ic_destination6MpC_v_;
  3.2682 +text: .text%__1cQaddD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2683 +text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__;
  3.2684 +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__;
  3.2685 +text: .text%__1cQset_lwp_priority6Fiii_i_;
  3.2686 +text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__;
  3.2687 +text: .text%jni_NewStringUTF: jni.o;
  3.2688 +text: .text%__1cZInterpreterMacroAssemblerGpush_l6MpnMRegisterImpl__v_;
  3.2689 +text: .text%__1cQsubI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2690 +text: .text%__1cZInterpreterMacroAssemblerXget_constant_pool_cache6MpnMRegisterImpl__v_;
  3.2691 +text: .text%__1cSbranchCon_longNodeGnegate6M_v_: ad_sparc_misc.o;
  3.2692 +text: .text%__1cKcmpOpUOperKless_equal6kM_i_: ad_sparc_clone.o;
  3.2693 +text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__;
  3.2694 +text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__;
  3.2695 +text: .text%__1cUParallelScavengeHeapIcapacity6kM_I_;
  3.2696 +text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__;
  3.2697 +text: .text%__1cSsubL_reg_reg_2NodeIpipeline6kM_pknIPipeline__;
  3.2698 +text: .text%JVM_DefineClassWithSource;
  3.2699 +text: .text%__1cLstoreF0NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2700 +text: .text%JVM_SetClassSigners;
  3.2701 +text: .text%__1cKCompiledICMset_to_clean6M_v_;
  3.2702 +text: .text%__1cSandL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2703 +text: .text%__1cRbranchLoopEndNodeGnegate6M_v_: ad_sparc_misc.o;
  3.2704 +text: .text%__1cLRShiftLNodeJideal_reg6kM_I_: classes.o;
  3.2705 +text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o;
  3.2706 +text: .text%__1cFParseWcheck_interpreter_type6MpnENode_pknEType_rpnNSafePointNode__2_;
  3.2707 +text: .text%__1cOcmovPP_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2708 +text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_;
  3.2709 +text: .text%get_thread;
  3.2710 +text: .text%__1cKstoreCNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2711 +text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__;
  3.2712 +text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_;
  3.2713 +text: .text%jni_CallIntMethod: jni.o;
  3.2714 +text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_;
  3.2715 +text: .text%__1cKloadUBNodeErule6kM_I_: ad_sparc_misc.o;
  3.2716 +text: .text%__1cSconvD2I_helperNodeIpipeline6kM_pknIPipeline__;
  3.2717 +text: .text%__1cIMulDNodeLbottom_type6kM_pknEType__: classes.o;
  3.2718 +text: .text%__1cSaddP_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2719 +text: .text%__1cIAddDNodeGOpcode6kM_i_;
  3.2720 +text: .text%__1cOloadI_fregNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2721 +text: .text%__1cOloadI_fregNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2722 +text: .text%__1cCosJyield_all6Fi_v_;
  3.2723 +text: .text%__1cKstoreLNodeLout_RegMask6kM_rknHRegMask__;
  3.2724 +text: .text%__1cKstoreLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2725 +text: .text%__1cPstoreI_FregNodeIpipeline6kM_pknIPipeline__;
  3.2726 +text: .text%JVM_GetClassMethodsCount;
  3.2727 +text: .text%__1cKstoreINodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2728 +text: .text%JVM_GetClassFieldsCount;
  3.2729 +text: .text%__1cLconvI2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2730 +text: .text%JVM_GetClassCPEntriesCount;
  3.2731 +text: .text%JVM_GetClassCPTypes;
  3.2732 +text: .text%__1cQmulI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2733 +text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_;
  3.2734 +text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__;
  3.2735 +text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_;
  3.2736 +text: .text%__1cIAddFNodeLbottom_type6kM_pknEType__: classes.o;
  3.2737 +text: .text%__1cQregI_to_stkINodeErule6kM_I_: ad_sparc_misc.o;
  3.2738 +text: .text%__1cQmulF_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.2739 +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_;
  3.2740 +text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_;
  3.2741 +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_;
  3.2742 +text: .text%JVM_IsPrimitiveClass;
  3.2743 +text: .text%__1cJimmU6OperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.2744 +text: .text%__1cOPhaseIdealLoopUpeeled_dom_test_elim6MpnNIdealLoopTree_rnJNode_List__v_;
  3.2745 +text: .text%__1cIDivDNodeGOpcode6kM_i_;
  3.2746 +text: .text%__1cQsubI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.2747 +text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__;
  3.2748 +text: .text%__1cIModLNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2749 +text: .text%jni_FindClass: jni.o;
  3.2750 +text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_;
  3.2751 +text: .text%__1cOMacroAssemblerOstore_argument6MpnMRegisterImpl_rnIArgument__v_: interpreterRT_sparc.o;
  3.2752 +text: .text%__1cFParseHdo_irem6M_v_;
  3.2753 +text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_;
  3.2754 +text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_;
  3.2755 +text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_;
  3.2756 +text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_;
  3.2757 +text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_;
  3.2758 +text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_;
  3.2759 +text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__;
  3.2760 +text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__;
  3.2761 +text: .text%__1cOcmovII_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2762 +text: .text%__1cSdivL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
  3.2763 +text: .text%__1cTloadL_unalignedNodeLout_RegMask6kM_rknHRegMask__;
  3.2764 +text: .text%__1cNSharedRuntimeDd2l6Fd_x_;
  3.2765 +text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__;
  3.2766 +text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_;
  3.2767 +text: .text%__1cFStateP_sub_Op_RShiftL6MpknENode__v_;
  3.2768 +text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o;
  3.2769 +text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o;
  3.2770 +text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_;
  3.2771 +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_;
  3.2772 +text: .text%__1cQregI_to_stkINodeIpipeline6kM_pknIPipeline__;
  3.2773 +text: .text%__1cRorI_reg_imm13NodeIpipeline6kM_pknIPipeline__;
  3.2774 +text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__;
  3.2775 +text: .text%__1cMregD_lowOperEtype6kM_pknEType__: ad_sparc.o;
  3.2776 +text: .text%__1cLConvD2INodeLbottom_type6kM_pknEType__: classes.o;
  3.2777 +text: .text%__1cSconvI2F_helperNodeIpipeline6kM_pknIPipeline__;
  3.2778 +text: .text%__1cHMonitor2T6M_v_;
  3.2779 +text: .text%__1cFTypeDFxmeet6kMpknEType__3_;
  3.2780 +text: .text%__1cFMutex2T6M_v_;
  3.2781 +text: .text%lwp_cond_destroy: os_solaris.o;
  3.2782 +text: .text%lwp_mutex_destroy: os_solaris.o;
  3.2783 +text: .text%__1cQdivI_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2784 +text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_;
  3.2785 +text: .text%__1cQregP_to_stkPNodeLout_RegMask6kM_rknHRegMask__;
  3.2786 +text: .text%__1cQstkI_to_regFNodeLout_RegMask6kM_rknHRegMask__;
  3.2787 +text: .text%__1cQregI_to_stkINodeLout_RegMask6kM_rknHRegMask__;
  3.2788 +text: .text%__1cQRelocationHolderEplus6kMi_0_;
  3.2789 +text: .text%__1cUPSMarkSweepDecoratorPadjust_pointers6M_v_;
  3.2790 +text: .text%__1cUPSMarkSweepDecoratorKprecompact6M_v_;
  3.2791 +text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_;
  3.2792 +text: .text%__1cHCompileQgrow_alias_types6M_v_;
  3.2793 +text: .text%__1cISubLNodeJideal_reg6kM_I_: classes.o;
  3.2794 +text: .text%__1cOcmovII_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.2795 +text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_;
  3.2796 +text: .text%__1cKloadUBNodeLout_RegMask6kM_rknHRegMask__;
  3.2797 +text: .text%__1cTloadD_unalignedNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2798 +text: .text%__1cJLoadFNodeJideal_reg6kM_I_: classes.o;
  3.2799 +text: .text%__1cOloadConL13NodeLout_RegMask6kM_rknHRegMask__;
  3.2800 +text: .text%__1cRcompL_reg_conNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2801 +text: .text%__1cQaddF_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2802 +text: .text%__1cICmpDNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2803 +text: .text%__1cKsplit_once6FpnMPhaseIterGVN_pnENode_333_v_: cfgnode.o;
  3.2804 +text: .text%__1cLLShiftLNodeJideal_reg6kM_I_: classes.o;
  3.2805 +text: .text%__1cJloadFNodeOmemory_operand6kM_pknIMachOper__;
  3.2806 +text: .text%__1cJCMoveNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.2807 +text: .text%__1cOPhaseIdealLoopOdo_range_check6MpnNIdealLoopTree_rnJNode_List__v_;
  3.2808 +text: .text%__1cSconvD2I_helperNodeLout_RegMask6kM_rknHRegMask__;
  3.2809 +text: .text%__1cIGraphKitPdstore_rounding6MpnENode__2_;
  3.2810 +text: .text%__1cJloadINodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2811 +text: .text%__1cSdivL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
  3.2812 +text: .text%__1cRloadConP_pollNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.2813 +text: .text%__1cIModINodeJideal_reg6kM_I_: classes.o;
  3.2814 +text: .text%__1cZCallDynamicJavaDirectNodeKmethod_set6Mi_v_;
  3.2815 +text: .text%__1cZCallDynamicJavaDirectNodeLout_RegMask6kM_rknHRegMask__;
  3.2816 +text: .text%__1cSconvD2I_helperNodeErule6kM_I_: ad_sparc_misc.o;
  3.2817 +text: .text%__1cZCallDynamicJavaDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2818 +text: .text%__1cUmulL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__;
  3.2819 +text: .text%__1cQdivL_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2820 +text: .text%__1cUdivL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o;
  3.2821 +text: .text%__1cUmulL_reg_imm13_1NodeErule6kM_I_: ad_sparc_misc.o;
  3.2822 +text: .text%__1cUVirtualCallGeneratorIgenerate6MpnIJVMState__2_;
  3.2823 +text: .text%__1cNObjectMonitor2t6M_v_;
  3.2824 +text: .text%__1cIMulINodeKadd_opcode6kM_i_: classes.o;
  3.2825 +text: .text%__1cIMulINodeKmul_opcode6kM_i_: classes.o;
  3.2826 +text: .text%__1cQdivD_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2827 +text: .text%__1cJCmpD3NodeGOpcode6kM_i_;
  3.2828 +text: .text%__1cJloadDNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2829 +text: .text%__1cIMinINodeJideal_reg6kM_I_: classes.o;
  3.2830 +text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__;
  3.2831 +text: .text%__1cQmulF_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2832 +text: .text%JVM_MonitorNotify;
  3.2833 +text: .text%__1cFBlockNset_next_call6MpnENode_rnJVectorSet_rnLBlock_Array__v_;
  3.2834 +text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_;
  3.2835 +text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_sparc.o;
  3.2836 +text: .text%__1cKstoreFNodeOmemory_operand6kM_pknIMachOper__;
  3.2837 +text: .text%__1cSstring_compareNodeErule6kM_I_: ad_sparc_misc.o;
  3.2838 +text: .text%__1cRtestI_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2839 +text: .text%__1cVshrL_reg_imm6_L2INodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2840 +text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_;
  3.2841 +text: .text%__1cOloadConL13NodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.2842 +text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_;
  3.2843 +text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_;
  3.2844 +text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__;
  3.2845 +text: .text%__1cINegDNodeGOpcode6kM_i_;
  3.2846 +text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_;
  3.2847 +text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_;
  3.2848 +text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_;
  3.2849 +text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_;
  3.2850 +text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_;
  3.2851 +text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_;
  3.2852 +text: .text%__1cOimmI_32_63OperIconstant6kM_i_: ad_sparc_clone.o;
  3.2853 +text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_;
  3.2854 +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2855 +text: .text%__1cSsubL_reg_reg_2NodeLout_RegMask6kM_rknHRegMask__;
  3.2856 +text: .text%__1cOcmovII_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.2857 +text: .text%__1cOstackSlotPOperEtype6kM_pknEType__: ad_sparc.o;
  3.2858 +text: .text%jni_GetMethodID: jni.o;
  3.2859 +text: .text%__1cQshlL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2860 +text: .text%__1cIMulINodeJideal_reg6kM_I_: classes.o;
  3.2861 +text: .text%__1cNminI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.2862 +text: .text%__1cRshlI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2863 +text: .text%__1cOloadConL13NodeIpipeline6kM_pknIPipeline__;
  3.2864 +text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_;
  3.2865 +text: .text%__1cOMacroAssemblerDjmp6MpnMRegisterImpl_ipkci_v_;
  3.2866 +text: .text%__1cIDivLNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.2867 +text: .text%JVM_GetClassDeclaredConstructors;
  3.2868 +text: .text%__1cUdivL_reg_imm13_1NodeIpipeline6kM_pknIPipeline__;
  3.2869 +text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_;
  3.2870 +text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_;
  3.2871 +text: .text%__1cUmulL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__;
  3.2872 +text: .text%__1cQsubD_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2873 +text: .text%__1cUdivL_reg_imm13_1NodeLout_RegMask6kM_rknHRegMask__;
  3.2874 +text: .text%__1cQregP_to_stkPNodeErule6kM_I_: ad_sparc_misc.o;
  3.2875 +text: .text%__1cSconvI2F_helperNodeErule6kM_I_: ad_sparc_misc.o;
  3.2876 +text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_;
  3.2877 +text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_;
  3.2878 +text: .text%__1cOcmovIF_regNodeIpipeline6kM_pknIPipeline__;
  3.2879 +text: .text%__1cQcmovI_reg_ltNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2880 +text: .text%__1cNloadConL0NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.2881 +text: .text%__1cKo1RegPOperKin_RegMask6kMi_pknHRegMask__;
  3.2882 +text: .text%__1cSsubL_reg_reg_1NodeIpipeline6kM_pknIPipeline__;
  3.2883 +text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_;
  3.2884 +text: .text%__1cQshrL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2885 +text: .text%__1cRsarL_reg_imm6NodeErule6kM_I_: ad_sparc_misc.o;
  3.2886 +text: .text%__1cJloadFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2887 +text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__;
  3.2888 +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRT_sparc.o;
  3.2889 +text: .text%__1cRorI_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
  3.2890 +text: .text%__1cQshrL_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2891 +text: .text%__1cQshrI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2892 +text: .text%__1cOimmI_32_63OperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.2893 +text: .text%__1cOloadI_fregNodeIpipeline6kM_pknIPipeline__;
  3.2894 +text: .text%__1cLConvI2DNodeLbottom_type6kM_pknEType__: classes.o;
  3.2895 +text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_;
  3.2896 +text: .text%__1cQdivL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.2897 +text: .text%__1cKCompiledICKcached_oop6kM_pnHoopDesc__;
  3.2898 +text: .text%__1cISubFNodeGOpcode6kM_i_;
  3.2899 +text: .text%JVM_IsThreadAlive;
  3.2900 +text: .text%__1cXPartialSubtypeCheckNodeGOpcode6kM_i_;
  3.2901 +text: .text%__1cLconvI2BNodeIpipeline6kM_pknIPipeline__;
  3.2902 +text: .text%__1cOcmovIF_immNodeIpipeline6kM_pknIPipeline__;
  3.2903 +text: .text%__1cRsarL_reg_imm6NodeLout_RegMask6kM_rknHRegMask__;
  3.2904 +text: .text%__1cQaddI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2905 +text: .text%__1cRtestI_reg_immNodeErule6kM_I_: ad_sparc_misc.o;
  3.2906 +text: .text%__1cRtestI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.2907 +text: .text%__1cRsubI_zero_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2908 +text: .text%__1cSmulL_reg_reg_1NodeIpipeline6kM_pknIPipeline__;
  3.2909 +text: .text%__1cQaddD_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2910 +text: .text%__1cOcmovPI_regNodeIpipeline6kM_pknIPipeline__;
  3.2911 +text: .text%__1cKConv2BNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2912 +text: .text%__1cSstring_compareNodeLout_RegMask6kM_rknHRegMask__;
  3.2913 +text: .text%__1cQregL_to_stkLNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2914 +text: .text%__1cQjava_lang_SystemTout_offset_in_bytes6F_i_;
  3.2915 +text: .text%__1cQjava_lang_SystemSin_offset_in_bytes6F_i_;
  3.2916 +text: .text%__1cWPredictedCallGeneratorIgenerate6MpnIJVMState__2_;
  3.2917 +text: .text%__1cSconvI2F_helperNodeLout_RegMask6kM_rknHRegMask__;
  3.2918 +text: .text%__1cNCallGeneratorRfor_uncommon_trap6FpnIciMethod_nODeoptimizationLDeoptReason_n0CLDeoptAction__p0_;
  3.2919 +text: .text%__1cOcmovPP_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.2920 +text: .text%__1cZUncommonTrapCallGeneratorIgenerate6MpnIJVMState__2_;
  3.2921 +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeIpipeline6kM_pknIPipeline__;
  3.2922 +text: .text%__1cIMulFNodeLbottom_type6kM_pknEType__: classes.o;
  3.2923 +text: .text%__1cGThread2t6M_v_;
  3.2924 +text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_;
  3.2925 +text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_;
  3.2926 +text: .text%__1cFTypeFFxmeet6kMpknEType__3_;
  3.2927 +text: .text%__1cCosScurrent_stack_size6F_I_;
  3.2928 +text: .text%__1cIOSThreadNpd_initialize6M_v_;
  3.2929 +text: .text%__1cCosScurrent_stack_base6F_pC_;
  3.2930 +text: .text%__1cIOSThread2t6MpFpv_i1_v_;
  3.2931 +text: .text%__1cIMulDNodeImul_ring6kMpknEType_3_3_;
  3.2932 +text: .text%__1cCosRinitialize_thread6F_v_;
  3.2933 +text: .text%__1cSdivL_reg_reg_1NodeIpipeline6kM_pknIPipeline__;
  3.2934 +text: .text%__1cCosPpd_start_thread6FpnGThread__v_;
  3.2935 +text: .text%__1cLConvI2FNodeLbottom_type6kM_pknEType__: classes.o;
  3.2936 +text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__;
  3.2937 +text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_;
  3.2938 +text: .text%jni_NewObjectArray: jni.o;
  3.2939 +text: .text%__1cSsubL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o;
  3.2940 +text: .text%__1cOcmovIF_immNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2941 +text: .text%JVM_SetThreadPriority;
  3.2942 +text: .text%__1cCosMstart_thread6FpnGThread__v_;
  3.2943 +text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_;
  3.2944 +text: .text%JVM_GetStackAccessControlContext;
  3.2945 +text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_;
  3.2946 +text: .text%__1cFStateM_sub_Op_ModI6MpknENode__v_;
  3.2947 +text: .text%JVM_Read;
  3.2948 +text: .text%__1cOcmovPI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2949 +text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_;
  3.2950 +text: .text%__1cFStateM_sub_Op_SubL6MpknENode__v_;
  3.2951 +text: .text%__1cKCompiledICMstub_address6kM_pC_;
  3.2952 +text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__;
  3.2953 +text: .text%__1cQsubL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2954 +text: .text%__1cQmodI_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.2955 +text: .text%__1cISubDNodeGOpcode6kM_i_;
  3.2956 +text: .text%__1cQmodI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.2957 +text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__;
  3.2958 +text: .text%__1cRsarI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2959 +text: .text%__1cIGraphKitJpush_pair6MpnENode__v_: callGenerator.o;
  3.2960 +text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__;
  3.2961 +text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_;
  3.2962 +text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_;
  3.2963 +text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__;
  3.2964 +text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_;
  3.2965 +text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_;
  3.2966 +text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_;
  3.2967 +text: .text%jni_GetStaticFieldID: jni.o;
  3.2968 +text: .text%__1cNloadKlassNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.2969 +text: .text%__1cLstoreF0NodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.2970 +text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_;
  3.2971 +text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__;
  3.2972 +text: .text%__1cFStateO_sub_Op_CMoveI6MpknENode__v_;
  3.2973 +text: .text%__1cENodeEgetf6kM_f_;
  3.2974 +text: .text%JVM_DesiredAssertionStatus;
  3.2975 +text: .text%__1cKJavaThreadKinitialize6M_v_;
  3.2976 +text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_;
  3.2977 +text: .text%__1cLConvL2DNodeGOpcode6kM_i_;
  3.2978 +text: .text%__1cQThreadStatistics2t6M_v_;
  3.2979 +text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_;
  3.2980 +text: .text%__1cQshrL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.2981 +text: .text%__1cQsubD_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.2982 +text: .text%__1cGThreadFstart6Fp0_v_;
  3.2983 +text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_: interp_masm_sparc.o;
  3.2984 +text: .text%__1cPconvI2D_memNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.2985 +text: .text%jni_GetFloatArrayRegion: jni.o;
  3.2986 +text: .text%__1cJMarkSweepMfollow_stack6F_v_;
  3.2987 +text: .text%__1cNimmP_pollOperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.2988 +text: .text%__1cRtestI_reg_immNodeIpipeline6kM_pknIPipeline__;
  3.2989 +text: .text%__1cJMemRegionMintersection6kMk0_0_;
  3.2990 +text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_;
  3.2991 +text: .text%__1cKJavaThreadDrun6M_v_;
  3.2992 +text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
  3.2993 +text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__;
  3.2994 +text: .text%JVM_IsArrayClass;
  3.2995 +text: .text%jni_CallStaticVoidMethod: jni.o;
  3.2996 +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__;
  3.2997 +text: .text%__1cLConvF2DNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.2998 +text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_;
  3.2999 +text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_;
  3.3000 +text: .text%__1cKstoreBNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3001 +text: .text%__1cFKlassNexternal_name6kM_pkc_;
  3.3002 +text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_;
  3.3003 +text: .text%__1cKstoreLNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3004 +text: .text%__1cOGenerateOopMapKinterp_all6M_v_;
  3.3005 +text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_;
  3.3006 +text: .text%__1cTloadD_unalignedNodeIpipeline6kM_pknIPipeline__;
  3.3007 +text: .text%JVM_GetClassName;
  3.3008 +text: .text%__1cOloadI_fregNodeErule6kM_I_: ad_sparc_misc.o;
  3.3009 +text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_;
  3.3010 +text: .text%__1cOGenerateOopMapKinit_state6M_v_;
  3.3011 +text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_;
  3.3012 +text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_;
  3.3013 +text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_;
  3.3014 +text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__;
  3.3015 +text: .text%__1cOcmovIF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3016 +text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__;
  3.3017 +text: .text%__1cLConvD2INodeJideal_reg6kM_I_: classes.o;
  3.3018 +text: .text%__1cOcmovIL_immNodeLout_RegMask6kM_rknHRegMask__;
  3.3019 +text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_sparc.o;
  3.3020 +text: .text%__1cINodeHashEgrow6M_v_;
  3.3021 +text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_;
  3.3022 +text: .text%__1cOcmovPP_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.3023 +text: .text%__1cMloadConDNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3024 +text: .text%__1cIMaxINodeIadd_ring6kMpknEType_3_3_;
  3.3025 +text: .text%__1cJloadSNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3026 +text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_;
  3.3027 +text: .text%__1cLConvF2DNodeLbottom_type6kM_pknEType__: classes.o;
  3.3028 +text: .text%JVM_Open;
  3.3029 +text: .text%__1cRInvocationCounterFreset6M_v_;
  3.3030 +text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_;
  3.3031 +text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_;
  3.3032 +text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_;
  3.3033 +text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_;
  3.3034 +text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_;
  3.3035 +text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_;
  3.3036 +text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_;
  3.3037 +text: .text%__1cSmulL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o;
  3.3038 +text: .text%__1cSdivL_reg_reg_1NodeErule6kM_I_: ad_sparc_misc.o;
  3.3039 +text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_;
  3.3040 +text: .text%JVM_StartThread;
  3.3041 +text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o;
  3.3042 +text: .text%jni_GetStaticObjectField: jni.o;
  3.3043 +text: .text%__1cJArrayDataKcell_count6M_i_: ciMethodData.o;
  3.3044 +text: .text%__1cIGraphKitSprecision_rounding6MpnENode__2_;
  3.3045 +text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_;
  3.3046 +text: .text%__1cIGraphKitRcreate_and_map_if6MpnENode_2ff_pnGIfNode__: generateOptoStub.o;
  3.3047 +text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__;
  3.3048 +text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_;
  3.3049 +text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_;
  3.3050 +text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_;
  3.3051 +text: .text%__1cTLoadD_unalignedNodeGOpcode6kM_i_;
  3.3052 +text: .text%__1cQshrI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3053 +text: .text%JVM_FreeMemory;
  3.3054 +text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_;
  3.3055 +text: .text%JVM_TotalMemory;
  3.3056 +text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_;
  3.3057 +text: .text%__1cMloadConDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3058 +text: .text%__1cQdivL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3059 +text: .text%__1cOcmovIL_immNodeErule6kM_I_: ad_sparc_misc.o;
  3.3060 +text: .text%__1cPconvI2D_memNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.3061 +text: .text%__1cSandL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
  3.3062 +text: .text%__1cHThreadsGremove6FpnKJavaThread__v_;
  3.3063 +text: .text%__1cIOSThread2T6M_v_;
  3.3064 +text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_;
  3.3065 +text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_;
  3.3066 +text: .text%__1cQandI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3067 +text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_;
  3.3068 +text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_;
  3.3069 +text: .text%__1cLConvD2INodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3070 +text: .text%__1cIOSThreadKpd_destroy6M_v_;
  3.3071 +text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_;
  3.3072 +text: .text%__1cKJavaThread2T6M_v_;
  3.3073 +text: .text%__1cGThread2T5B6M_v_;
  3.3074 +text: .text%__1cCosLfree_thread6FpnIOSThread__v_;
  3.3075 +text: .text%__1cFStateM_sub_Op_MulI6MpknENode__v_;
  3.3076 +text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_;
  3.3077 +text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o;
  3.3078 +text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_;
  3.3079 +text: .text%__1cSTailCalljmpIndNodeLout_RegMask6kM_rknHRegMask__;
  3.3080 +text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_;
  3.3081 +text: .text%__1cSTailCalljmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3082 +text: .text%__1cQsubF_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.3083 +text: .text%__1cRNativeMovConstRegEdata6kM_i_;
  3.3084 +text: .text%__1cbFunnecessary_membar_volatileNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.3085 +text: .text%__1cLcmpF_ccNodeIpipeline6kM_pknIPipeline__;
  3.3086 +text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_;
  3.3087 +text: .text%jni_CallObjectMethod: jni.o;
  3.3088 +text: .text%__1cQaddD_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3089 +text: .text%__1cPconvD2F_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3090 +text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__;
  3.3091 +text: .text%__1cQdivD_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3092 +text: .text%__1cZInterpreterMacroAssemblerbEset_method_data_pointer_offset6MpnMRegisterImpl__v_;
  3.3093 +text: .text%__1cIMaxINodeJideal_reg6kM_I_: classes.o;
  3.3094 +text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o;
  3.3095 +text: .text%__1cHTypeInt2t6Miii_v_;
  3.3096 +text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_;
  3.3097 +text: .text%__1cOcmovIL_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.3098 +text: .text%__1cKConv2BNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.3099 +text: .text%__1cSandL_reg_imm13NodeErule6kM_I_: ad_sparc_misc.o;
  3.3100 +text: .text%__1cNloadRangeNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3101 +text: .text%__1cRshlI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3102 +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_;
  3.3103 +text: .text%__1cQregL_to_stkLNodeErule6kM_I_: ad_sparc_misc.o;
  3.3104 +text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_;
  3.3105 +text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRT_sparc.o;
  3.3106 +text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_;
  3.3107 +text: .text%__1cLOptoRuntimeYcurrent_time_millis_Type6F_pknITypeFunc__;
  3.3108 +text: .text%__1cHTypePtrFxdual6kM_pknEType__;
  3.3109 +text: .text%__1cOstackSlotIOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3110 +text: .text%__1cOstackSlotIOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3111 +text: .text%JVM_MonitorNotifyAll;
  3.3112 +text: .text%__1cJloadDNodeLout_RegMask6kM_rknHRegMask__;
  3.3113 +text: .text%__1cOstackSlotIOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3114 +text: .text%__1cKCMoveLNodeGOpcode6kM_i_;
  3.3115 +text: .text%__1cRshlL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3116 +text: .text%__1cMnegD_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3117 +text: .text%__1cODeoptimizationVtrap_state_has_reason6Fii_i_;
  3.3118 +text: .text%__1cTloadD_unalignedNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.3119 +text: .text%__1cJloadDNodeErule6kM_I_: ad_sparc_misc.o;
  3.3120 +text: .text%__1cNiRegIsafeOperKin_RegMask6kMi_pknHRegMask__;
  3.3121 +text: .text%__1cNloadConP0NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3122 +text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o;
  3.3123 +text: .text%__1cIAddDNodeLbottom_type6kM_pknEType__: classes.o;
  3.3124 +text: .text%__1cMnegD_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3125 +text: .text%__1cSandL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
  3.3126 +text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o;
  3.3127 +text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
  3.3128 +text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o;
  3.3129 +text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_;
  3.3130 +text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o;
  3.3131 +text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o;
  3.3132 +text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o;
  3.3133 +text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o;
  3.3134 +text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o;
  3.3135 +text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o;
  3.3136 +text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o;
  3.3137 +text: .text%__1cSsubL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__;
  3.3138 +text: .text%__1cSmulL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__;
  3.3139 +text: .text%__1cSdivL_reg_reg_1NodeLout_RegMask6kM_rknHRegMask__;
  3.3140 +text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o;
  3.3141 +text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o;
  3.3142 +text: .text%__1cOloadI_fregNodeLout_RegMask6kM_rknHRegMask__;
  3.3143 +text: .text%__1cRtestI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3144 +text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_;
  3.3145 +text: .text%__1cJLoadSNodeMstore_Opcode6kM_i_: classes.o;
  3.3146 +text: .text%__1cLstoreF0NodeIpipeline6kM_pknIPipeline__;
  3.3147 +text: .text%__1cIMinINodeIadd_ring6kMpknEType_3_3_;
  3.3148 +text: .text%JVM_GetInheritedAccessControlContext;
  3.3149 +text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__;
  3.3150 +text: .text%__1cNmaxI_eRegNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3151 +text: .text%JVM_NativePath;
  3.3152 +text: .text%__1cOMacroAssemblerNflush_windows6M_v_;
  3.3153 +text: .text%__1cSsubD_regD_regDNodeIpipeline6kM_pknIPipeline__;
  3.3154 +text: .text%__1cVCallRuntimeDirectNodeLout_RegMask6kM_rknHRegMask__;
  3.3155 +text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_;
  3.3156 +text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_;
  3.3157 +text: .text%__1cHCompileKinit_start6MpnJStartNode__v_;
  3.3158 +text: .text%__1cKg3RegPOperKin_RegMask6kMi_pknHRegMask__;
  3.3159 +text: .text%__1cVinline_cache_regPOperKin_RegMask6kMi_pknHRegMask__;
  3.3160 +text: .text%__1cKstorePNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3161 +text: .text%__1cQObjectStartArrayFreset6M_v_;
  3.3162 +text: .text%__1cPconvI2D_memNodeLout_RegMask6kM_rknHRegMask__;
  3.3163 +text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_;
  3.3164 +text: .text%__1cQaddD_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3165 +text: .text%__1cLConvF2INodeGOpcode6kM_i_;
  3.3166 +text: .text%__1cVCallRuntimeDirectNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3167 +text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_;
  3.3168 +text: .text%__1cIPSOldGenPadjust_pointers6M_v_;
  3.3169 +text: .text%__1cVCallRuntimeDirectNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3170 +text: .text%__1cOcmovPI_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3171 +text: .text%__1cIPSOldGenHcompact6M_v_;
  3.3172 +text: .text%__1cMtlsLoadPNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3173 +text: .text%__1cLcmpF_ccNodeErule6kM_I_: ad_sparc_misc.o;
  3.3174 +text: .text%__1cVCallRuntimeDirectNodeKmethod_set6Mi_v_;
  3.3175 +text: .text%__1cKimmI11OperIconstant6kM_i_: ad_sparc_clone.o;
  3.3176 +text: .text%__1cQcmovI_reg_gtNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3177 +text: .text%__1cLstoreP0NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3178 +text: .text%__1cOcmovIF_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3179 +text: .text%__1cOcmovLL_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3180 +text: .text%jni_GetStaticMethodID: jni.o;
  3.3181 +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MipnMRegisterImpl__v_;
  3.3182 +text: .text%__1cRtestI_reg_immNodeLout_RegMask6kM_rknHRegMask__;
  3.3183 +text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_;
  3.3184 +text: .text%__1cPconvF2D_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3185 +text: .text%__1cOPhaseIdealLoopKdo_peeling6MpnNIdealLoopTree_rnJNode_List__v_;
  3.3186 +text: .text%__1cOcmovLL_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3187 +text: .text%jint_cmp: parse2.o;
  3.3188 +text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__;
  3.3189 +text: .text%__1cNloadConL0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3190 +text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_;
  3.3191 +text: .text%__1cVMoveL2D_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3192 +text: .text%__1cIMulDNodeGmul_id6kM_pknEType__: classes.o;
  3.3193 +text: .text%__1cIGraphKitTdprecision_rounding6MpnENode__2_;
  3.3194 +text: .text%__1cOcmovLL_regNodeIpipeline6kM_pknIPipeline__;
  3.3195 +text: .text%__1cLConvD2FNodeGOpcode6kM_i_;
  3.3196 +text: .text%__1cIMulFNodeImul_ring6kMpknEType_3_3_;
  3.3197 +text: .text%__1cWloadConI_x41f00000NodeIpipeline6kM_pknIPipeline__;
  3.3198 +text: .text%__1cKcmpOpFOperFccode6kM_i_: ad_sparc_clone.o;
  3.3199 +text: .text%__1cLstoreC0NodeOmemory_operand6kM_pknIMachOper__;
  3.3200 +text: .text%__1cQregL_to_stkLNodeLout_RegMask6kM_rknHRegMask__;
  3.3201 +text: .text%__1cZregDHi_regDLo_to_regDNodeIpipeline6kM_pknIPipeline__;
  3.3202 +text: .text%__1cOcmovIF_immNodeErule6kM_I_: ad_sparc_misc.o;
  3.3203 +text: .text%__1cOcmovDF_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3204 +text: .text%__1cQaddL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3205 +text: .text%__1cZregDHi_regDLo_to_regDNodeErule6kM_I_: ad_sparc_misc.o;
  3.3206 +text: .text%JVM_Close;
  3.3207 +text: .text%__1cSmulD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o;
  3.3208 +text: .text%__1cQsubL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3209 +text: .text%__1cIMulDNodeJideal_reg6kM_I_: classes.o;
  3.3210 +text: .text%__1cKstoreFNodeLout_RegMask6kM_rknHRegMask__;
  3.3211 +text: .text%__1cSsubD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o;
  3.3212 +text: .text%__1cSaddD_regD_regDNodeErule6kM_I_: ad_sparc_misc.o;
  3.3213 +text: .text%__1cSaddP_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3214 +text: .text%__1cXconvI2D_regDHi_regDNodeErule6kM_I_: ad_sparc_misc.o;
  3.3215 +text: .text%__1cKstoreFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3216 +text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__;
  3.3217 +text: .text%__1cOMacroAssemblerNget_vm_result6MpnMRegisterImpl__v_;
  3.3218 +text: .text%__1cQsubF_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3219 +text: .text%__1cZInterpreterMacroAssemblerbIcompute_extra_locals_size_in_bytes6MpnMRegisterImpl_22_v_;
  3.3220 +text: .text%__1cLcmpF_ccNodeLout_RegMask6kM_rknHRegMask__;
  3.3221 +text: .text%__1cPMultiBranchDataScompute_cell_count6FpnOBytecodeStream__i_;
  3.3222 +text: .text%__1cPorI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3223 +text: .text%__1cSxorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3224 +text: .text%__1cPconvI2D_memNodeErule6kM_I_: ad_sparc_misc.o;
  3.3225 +text: .text%__1cQdivI_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3226 +text: .text%__1cLconvI2BNodeErule6kM_I_: ad_sparc_misc.o;
  3.3227 +text: .text%__1cISubFNodeLbottom_type6kM_pknEType__: classes.o;
  3.3228 +text: .text%__1cWloadConI_x43300000NodeErule6kM_I_: ad_sparc_misc.o;
  3.3229 +text: .text%__1cWloadConI_x41f00000NodeErule6kM_I_: ad_sparc_misc.o;
  3.3230 +text: .text%__1cSmulI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3231 +text: .text%__1cOtailjmpIndNodeNis_block_proj6kM_pknENode__: ad_sparc_misc.o;
  3.3232 +text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_;
  3.3233 +text: .text%__1cbFunnecessary_membar_volatileNodeLout_RegMask6kM_rknHRegMask__;
  3.3234 +text: .text%__1cJSubFPNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3235 +text: .text%__1cFParseNdo_instanceof6M_v_;
  3.3236 +text: .text%__1cLconvI2BNodeLout_RegMask6kM_rknHRegMask__;
  3.3237 +text: .text%__1cIGraphKitOgen_instanceof6MpnENode_2_2_;
  3.3238 +text: .text%__1cbFunnecessary_membar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3239 +text: .text%__1cRshrL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3240 +text: .text%__1cJloadBNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3241 +text: .text%__1cQdivI_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.3242 +text: .text%__1cIDivLNodeJideal_reg6kM_I_: classes.o;
  3.3243 +text: .text%__1cLConvI2DNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3244 +text: .text%__1cSmulD_regD_regDNodeIpipeline6kM_pknIPipeline__;
  3.3245 +text: .text%__1cOstackSlotLOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3246 +text: .text%__1cKConv2BNodeLbottom_type6kM_pknEType__: classes.o;
  3.3247 +text: .text%__1cQshlI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3248 +text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_;
  3.3249 +text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_;
  3.3250 +text: .text%__1cJloadDNodeIpipeline6kM_pknIPipeline__;
  3.3251 +text: .text%__1cOcmovPP_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3252 +text: .text%__1cQsubF_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3253 +text: .text%jni_NewObjectV: jni.o;
  3.3254 +text: .text%__1cOcmovLI_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3255 +text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__;
  3.3256 +text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_;
  3.3257 +text: .text%__1cXPartialSubtypeCheckNodeLbottom_type6kM_pknEType__: classes.o;
  3.3258 +text: .text%jni_EnsureLocalCapacity;
  3.3259 +text: .text%__1cLstoreI0NodeOmemory_operand6kM_pknIMachOper__;
  3.3260 +text: .text%__1cIAddFNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o;
  3.3261 +text: .text%__1cLConvD2INodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.3262 +text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__;
  3.3263 +text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_;
  3.3264 +text: .text%__1cIDivDNodeLbottom_type6kM_pknEType__: classes.o;
  3.3265 +text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__;
  3.3266 +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeErule6kM_I_: ad_sparc_misc.o;
  3.3267 +text: .text%__1cSaddD_regD_regDNodeIpipeline6kM_pknIPipeline__;
  3.3268 +text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_;
  3.3269 +text: .text%__1cQsubD_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3270 +text: .text%__1cOcmovPP_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.3271 +text: .text%__1cNSafePointNodeQpeek_monitor_obj6kM_pnENode__;
  3.3272 +text: .text%__1cJloadFNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3273 +text: .text%__1cSaddI_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3274 +text: .text%__1cFParsePdo_monitor_exit6M_v_;
  3.3275 +text: .text%__1cObranchConFNodeLout_RegMask6kM_rknHRegMask__;
  3.3276 +text: .text%__1cObranchConFNodeJlabel_set6MrnFLabel_I_v_;
  3.3277 +text: .text%__1cSconvF2I_helperNodeIpipeline6kM_pknIPipeline__;
  3.3278 +text: .text%__1cSmembar_releaseNodeIadr_type6kM_pknHTypePtr__;
  3.3279 +text: .text%__1cObranchConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3280 +text: .text%__1cLcmpD_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3281 +text: .text%__1cJloadLNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3282 +text: .text%__1cISubDNodeLbottom_type6kM_pknEType__: classes.o;
  3.3283 +text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_;
  3.3284 +text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_;
  3.3285 +text: .text%__1cNSafePointNodeQpeek_monitor_box6kM_pnENode__;
  3.3286 +text: .text%__1cFTypeFFxdual6kM_pknEType__;
  3.3287 +text: .text%__1cICmpFNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3288 +text: .text%__1cKVtableStubRpd_code_alignment6F_i_;
  3.3289 +text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__;
  3.3290 +text: .text%__1cKloadUBNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3291 +text: .text%__1cTloadL_unalignedNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3292 +text: .text%__1cINegDNodeLbottom_type6kM_pknEType__: classes.o;
  3.3293 +text: .text%__1cLConvI2FNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3294 +text: .text%__1cOcmovLL_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3295 +text: .text%__1cRorI_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3296 +text: .text%__1cTloadL_unalignedNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3297 +text: .text%__1cTloadL_unalignedNodeOmemory_operand6kM_pknIMachOper__;
  3.3298 +text: .text%__1cKloadUBNodeOmemory_operand6kM_pknIMachOper__;
  3.3299 +text: .text%__1cXconvI2D_regDHi_regDNodeIpipeline6kM_pknIPipeline__;
  3.3300 +text: .text%__1cZInterpreterMacroAssemblerbFtest_invocation_counter_for_mdp6MpnMRegisterImpl_22rnFLabel__v_;
  3.3301 +text: .text%__1cXconvI2D_regDHi_regDNodeLout_RegMask6kM_rknHRegMask__;
  3.3302 +text: .text%__1cSvframeArrayElementHfill_in6MpnOcompiledVFrame__v_;
  3.3303 +text: .text%__1cFTypeDFxdual6kM_pknEType__;
  3.3304 +text: .text%__1cSaddD_regD_regDNodeLout_RegMask6kM_rknHRegMask__;
  3.3305 +text: .text%__1cZInterpreterMacroAssemblerbAincrement_backedge_counter6MpnMRegisterImpl_2_v_;
  3.3306 +text: .text%__1cZInterpreterMacroAssemblerbBtest_backedge_count_for_osr6MpnMRegisterImpl_22_v_;
  3.3307 +text: .text%__1cSmulL_reg_imm13NodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3308 +text: .text%__1cOcmovPI_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3309 +text: .text%__1cKEntryPoint2t6M_v_;
  3.3310 +text: .text%__1cTloadD_unalignedNodeLout_RegMask6kM_rknHRegMask__;
  3.3311 +text: .text%__1cZregDHi_regDLo_to_regDNodeLout_RegMask6kM_rknHRegMask__;
  3.3312 +text: .text%__1cOcompiledVFrameImonitors6kM_pnNGrowableArray4CpnLMonitorInfo____;
  3.3313 +text: .text%__1cOcompiledVFrameLexpressions6kM_pnUStackValueCollection__;
  3.3314 +text: .text%__1cHciKlassOsuper_of_depth6MI_p0_;
  3.3315 +text: .text%__1cOcompiledVFrameGlocals6kM_pnUStackValueCollection__;
  3.3316 +text: .text%__1cOcompiledVFrameGmethod6kM_pnNmethodOopDesc__;
  3.3317 +text: .text%__1cJimmP0OperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.3318 +text: .text%__1cOcompiledVFrameHraw_bci6kM_i_;
  3.3319 +text: .text%__1cQshrI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3320 +text: .text%__1cWloadConI_x43300000NodeLout_RegMask6kM_rknHRegMask__;
  3.3321 +text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_;
  3.3322 +text: .text%__1cMTailJumpNodeKmatch_edge6kMI_I_;
  3.3323 +text: .text%__1cWloadConI_x41f00000NodeLout_RegMask6kM_rknHRegMask__;
  3.3324 +text: .text%__1cODeoptimizationbJupdate_method_data_from_interpreter6FnQmethodDataHandle_ii_v_;
  3.3325 +text: .text%__1cIimmDOperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.3326 +text: .text%__1cFframeZinterpreter_frame_set_mdx6Mi_v_;
  3.3327 +text: .text%__1cOstackSlotLOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3328 +text: .text%__1cOstackSlotLOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3329 +text: .text%__1cTloadD_unalignedNodeErule6kM_I_: ad_sparc_misc.o;
  3.3330 +text: .text%__1cIModLNodeJideal_reg6kM_I_: classes.o;
  3.3331 +text: .text%__1cOtailjmpIndNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3332 +text: .text%__1cSmulD_regD_regDNodeLout_RegMask6kM_rknHRegMask__;
  3.3333 +text: .text%__1cINegFNodeGOpcode6kM_i_;
  3.3334 +text: .text%__1cSsubD_regD_regDNodeLout_RegMask6kM_rknHRegMask__;
  3.3335 +text: .text%__1cJScopeDescImonitors6M_pnNGrowableArray4CpnMMonitorValue____;
  3.3336 +text: .text%__1cJScopeDescLexpressions6M_pnNGrowableArray4CpnKScopeValue____;
  3.3337 +text: .text%__1cJScopeDescGlocals6M_pnNGrowableArray4CpnKScopeValue____;
  3.3338 +text: .text%JVM_GetComponentType;
  3.3339 +text: .text%__1cQdivI_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3340 +text: .text%Unsafe_DefineClass1;
  3.3341 +text: .text%__1cOcmovII_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3342 +text: .text%__1cLvframeArrayPunpack_to_stack6MrnFframe_i_v_;
  3.3343 +text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_;
  3.3344 +text: .text%__1cLConvF2DNodeJideal_reg6kM_I_: classes.o;
  3.3345 +text: .text%__1cSvframeArrayElementDbci6kM_i_;
  3.3346 +text: .text%__1cVMoveF2I_stack_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3347 +text: .text%JVM_GetCPFieldModifiers;
  3.3348 +text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_;
  3.3349 +text: .text%__1cNnmethodLocker2t6MpC_v_;
  3.3350 +text: .text%__1cNSharedRuntimebJcontinuation_for_implicit_exception6FpnKJavaThread_pCn0AVImplicitExceptionKind__3_;
  3.3351 +text: .text%__1cODeoptimizationNuncommon_trap6FpnKJavaThread_i_pn0ALUnrollBlock__;
  3.3352 +text: .text%__1cODeoptimizationTuncommon_trap_inner6FpnKJavaThread_i_v_;
  3.3353 +text: .text%__1cODeoptimizationNunpack_frames6FpnKJavaThread_i_nJBasicType__;
  3.3354 +text: .text%__1cODeoptimizationYfetch_unroll_info_helper6FpnKJavaThread__pn0ALUnrollBlock__;
  3.3355 +text: .text%__1cZInterpreterMacroAssemblerXindex_check_without_pop6MpnMRegisterImpl_2i22_v_;
  3.3356 +text: .text%__1cRSignatureIteratorKparse_type6M_i_;
  3.3357 +text: .text%__1cPconvD2F_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3358 +text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__;
  3.3359 +text: .text%__1cODeoptimizationRlast_frame_adjust6Fii_i_;
  3.3360 +text: .text%__1cQsubD_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3361 +text: .text%JVM_DefineClass;
  3.3362 +text: .text%JVM_InvokeMethod;
  3.3363 +text: .text%__1cOcmovPP_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3364 +text: .text%jni_NewDirectByteBuffer;
  3.3365 +text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o;
  3.3366 +text: .text%jni_AllocObject: jni.o;
  3.3367 +text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_;
  3.3368 +text: .text%__1cTmembar_volatileNodeLout_RegMask6kM_rknHRegMask__;
  3.3369 +text: .text%__1cMnegD_regNodeIpipeline6kM_pknIPipeline__;
  3.3370 +text: .text%Unsafe_AllocateInstance;
  3.3371 +text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o;
  3.3372 +text: .text%__1cQstkI_to_regINodeIpipeline6kM_pknIPipeline__;
  3.3373 +text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_;
  3.3374 +text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_;
  3.3375 +text: .text%__1cYinternal_word_RelocationGtarget6M_pC_;
  3.3376 +text: .text%__1cJStubQdDueueKremove_all6M_v_;
  3.3377 +text: .text%__1cMloadConFNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3378 +text: .text%__1cPconvI2D_memNodeIpipeline6kM_pknIPipeline__;
  3.3379 +text: .text%__1cPorL_reg_regNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3380 +text: .text%__1cZInterpreterMacroAssemblerLindex_check6MpnMRegisterImpl_2i22_v_;
  3.3381 +text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_;
  3.3382 +text: .text%__1cSaddL_reg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3383 +text: .text%__1cOcmovPI_regNodeLbottom_type6kM_pknEType__: ad_sparc_misc.o;
  3.3384 +text: .text%__1cKstfSSFNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3385 +text: .text%__1cMloadConFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3386 +text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_;
  3.3387 +text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_;
  3.3388 +text: .text%__1cTmembar_volatileNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3389 +text: .text%__1cPconvI2L_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3390 +text: .text%__1cOcmovII_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3391 +text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_;
  3.3392 +text: .text%__1cJCmpF3NodeGOpcode6kM_i_;
  3.3393 +text: .text%__1cLMoveL2DNodeGOpcode6kM_i_;
  3.3394 +text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_;
  3.3395 +text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__;
  3.3396 +text: .text%__1cOcmovII_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3397 +text: .text%__1cIAddFNodeJideal_reg6kM_I_: classes.o;
  3.3398 +text: .text%JVM_NewArray;
  3.3399 +text: .text%__1cHOrLNodeGOpcode6kM_i_;
  3.3400 +text: .text%__1cLStrCompNodeJideal_reg6kM_I_: classes.o;
  3.3401 +text: .text%__1cLOopMapCache2t6M_v_;
  3.3402 +text: .text%__1cNTemplateTableHconvert6F_v_;
  3.3403 +text: .text%__1cOcmovDF_regNodeIpipeline6kM_pknIPipeline__;
  3.3404 +text: .text%__1cZInterpreterMacroAssemblerFpop_l6MpnMRegisterImpl__v_;
  3.3405 +text: .text%__1cOcmovLI_regNodeIpipeline6kM_pknIPipeline__;
  3.3406 +text: .text%__1cSMachBreakpointNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3407 +text: .text%__1cSInterpreterRuntimeQcreate_exception6FpnKJavaThread_pc3_v_;
  3.3408 +text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o;
  3.3409 +text: .text%__1cKPSYoungGenKprecompact6M_v_;
  3.3410 +text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_;
  3.3411 +text: .text%__1cSconvD2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3412 +text: .text%__1cMnegF_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3413 +text: .text%__1cHThreadsLgc_prologue6F_v_;
  3.3414 +text: .text%__1cHThreadsLgc_epilogue6F_v_;
  3.3415 +text: .text%__1cPconvI2L_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3416 +text: .text%__1cPconvD2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3417 +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_;
  3.3418 +text: .text%__1cUParallelScavengeHeapHcollect6MnHGCCauseFCause__v_;
  3.3419 +text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_;
  3.3420 +text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_;
  3.3421 +text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_;
  3.3422 +text: .text%__1cLconvP2BNodeMideal_Opcode6kM_i_: ad_sparc_misc.o;
  3.3423 +text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_;
  3.3424 +text: .text%__1cIciMethodVget_osr_flow_analysis6Mi_pnKciTypeFlow__;
  3.3425 +text: .text%__1cLMoveF2INodeGOpcode6kM_i_;
  3.3426 +text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_;
  3.3427 +text: .text%__1cMStartOSRNodeKosr_domain6F_pknJTypeTuple__;
  3.3428 +text: .text%__1cVVM_ParallelGCSystemGCEdoit6M_v_;
  3.3429 +text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_;
  3.3430 +text: .text%__1cOMacroAssemblerPbreakpoint_trap6M_v_;
  3.3431 +text: .text%__1cJBasicLockHmove_to6MpnHoopDesc_p0_v_;
  3.3432 +text: .text%__1cJMarkSweepNrestore_marks6F_v_;
  3.3433 +text: .text%__1cJMarkSweepMadjust_marks6F_v_;
  3.3434 +text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_;
  3.3435 +text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_;
  3.3436 +text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_;
  3.3437 +text: .text%__1cMStubCodeMark2T6M_v_;
  3.3438 +text: .text%__1cNCallGeneratorHfor_osr6FpnIciMethod_i_p0_;
  3.3439 +text: .text%__1cLClassLoaderSget_system_package6FpkcpnGThread__pnHoopDesc__;
  3.3440 +text: .text%__1cJPSPermGenKprecompact6M_v_;
  3.3441 +text: .text%JVM_GC;
  3.3442 +text: .text%__1cIPSOldGenKprecompact6M_v_;
  3.3443 +text: .text%__1cUPSMarkSweepDecoratorbIset_destination_decorator_perm_gen6F_v_;
  3.3444 +text: .text%__1cUPSMarkSweepDecoratorbHset_destination_decorator_tenured6F_v_;
  3.3445 +text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_;
  3.3446 +text: .text%__1cQmulL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3447 +text: .text%__1cUPSAdaptiveSizePolicyUmajor_collection_end6MInHGCCauseFCause__v_;
  3.3448 +text: .text%__1cUPSAdaptiveSizePolicyWmajor_collection_begin6M_v_;
  3.3449 +text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_;
  3.3450 +text: .text%__1cJPSPermGenQcompute_new_size6MI_v_;
  3.3451 +text: .text%__1cKPSYoungGenHcompact6M_v_;
  3.3452 +text: .text%JVM_GetSystemPackage;
  3.3453 +text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_;
  3.3454 +text: .text%__1cKPSYoungGenPadjust_pointers6M_v_;
  3.3455 +text: .text%__1cQUncommonTrapBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
  3.3456 +text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
  3.3457 +text: .text%__1cNExceptionBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
  3.3458 +text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_;
  3.3459 +text: .text%__1cJCodeCacheLgc_prologue6F_v_;
  3.3460 +text: .text%__1cJCodeCacheLgc_epilogue6F_v_;
  3.3461 +text: .text%__1cIXorINodeIadd_ring6kMpknEType_3_3_;
  3.3462 +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeLout_RegMask6kM_rknHRegMask__;
  3.3463 +text: .text%__1cQregL_to_stkLNodeIpipeline6kM_pknIPipeline__;
  3.3464 +text: .text%__1cKcmpOpFOperKless_equal6kM_i_: ad_sparc_clone.o;
  3.3465 +text: .text%__1cOcmovPI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.3466 +text: .text%__1cSmulL_reg_imm13NodeLout_RegMask6kM_rknHRegMask__;
  3.3467 +text: .text%__1cOcmovIF_immNodeLout_RegMask6kM_rknHRegMask__;
  3.3468 +text: .text%__1cKCMoveDNodeGOpcode6kM_i_;
  3.3469 +text: .text%__1cJLoadDNodeJideal_reg6kM_I_: classes.o;
  3.3470 +text: .text%__1cIMulFNodeGmul_id6kM_pknEType__: classes.o;
  3.3471 +text: .text%__1cNStubGeneratorLstub_prolog6MpnMStubCodeDesc__v_: stubGenerator_sparc.o;
  3.3472 +text: .text%__1cQaddL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3473 +text: .text%jni_GetStringRegion: jni.o;
  3.3474 +text: .text%JVM_RawMonitorCreate;
  3.3475 +text: .text%__1cJloadLNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3476 +text: .text%__1cIMulFNodeJideal_reg6kM_I_: classes.o;
  3.3477 +text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_;
  3.3478 +text: .text%__1cOstackSlotPOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3479 +text: .text%__1cOstackSlotPOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3480 +text: .text%__1cOstackSlotPOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3481 +text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_;
  3.3482 +text: .text%JVM_Sleep;
  3.3483 +text: .text%__1cLConvL2DNodeLbottom_type6kM_pknEType__: classes.o;
  3.3484 +text: .text%__1cQstkI_to_regFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3485 +text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o;
  3.3486 +text: .text%__1cRorI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3487 +text: .text%Unsafe_CompareAndSwapInt;
  3.3488 +text: .text%JVM_Lseek;
  3.3489 +text: .text%__1cNloadRangeNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3490 +text: .text%__1cPconvD2F_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3491 +text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o;
  3.3492 +text: .text%__1cPconvF2D_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3493 +text: .text%__1cQmulI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3494 +text: .text%__1cQmulF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3495 +text: .text%__1cMnegF_regNodeIpipeline6kM_pknIPipeline__;
  3.3496 +text: .text%__1cSconvF2I_helperNodeErule6kM_I_: ad_sparc_misc.o;
  3.3497 +text: .text%__1cQmulD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3498 +text: .text%__1cOcmovLI_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3499 +text: .text%__1cPMultiBranchDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_;
  3.3500 +text: .text%__1cQregP_to_stkPNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3501 +text: .text%__1cZInterpreterMacroAssemblerQtest_mdp_data_at6MipnMRegisterImpl_rnFLabel_2_v_;
  3.3502 +text: .text%__1cQstkI_to_regINodeErule6kM_I_: ad_sparc_misc.o;
  3.3503 +text: .text%__1cOcmovLI_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3504 +text: .text%__1cGciType2t6MnJBasicType__v_;
  3.3505 +text: .text%__1cJLoadBNodeMstore_Opcode6kM_i_: classes.o;
  3.3506 +text: .text%__1cQaddF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3507 +text: .text%__1cETypeEmake6Fn0AFTYPES__pk0_;
  3.3508 +text: .text%__1cSconvF2I_helperNodeLout_RegMask6kM_rknHRegMask__;
  3.3509 +text: .text%__1cRsarL_reg_imm6NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3510 +text: .text%__1cSstring_compareNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3511 +text: .text%jni_GetEnv;
  3.3512 +text: .text%__1cJloadDNodeOmemory_operand6kM_pknIMachOper__;
  3.3513 +text: .text%__1cQstkI_to_regINodeLout_RegMask6kM_rknHRegMask__;
  3.3514 +text: .text%__1cSstring_compareNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3515 +text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_sparc.o;
  3.3516 +text: .text%Unsafe_GetNativeByte;
  3.3517 +text: .text%JVM_NanoTime;
  3.3518 +text: .text%__1cCosNjavaTimeNanos6F_x_;
  3.3519 +text: .text%__1cOMacroAssemblerOrestore_thread6MkpnMRegisterImpl__v_;
  3.3520 +text: .text%__1cVcompiledICHolderKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.3521 +text: .text%__1cQandL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3522 +text: .text%__1cIimmFOperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.3523 +text: .text%__1cHThreadsLnmethods_do6F_v_;
  3.3524 +text: .text%__1cKcmpOpFOperGnegate6M_v_: ad_sparc_clone.o;
  3.3525 +text: .text%__1cICodeBlobFflush6M_v_;
  3.3526 +text: .text%__1cFParseMdo_anewarray6M_v_;
  3.3527 +text: .text%__1cSdivL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3528 +text: .text%jni_CallVoidMethod: jni.o;
  3.3529 +text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__;
  3.3530 +text: .text%__1cObranchConFNodeGnegate6M_v_: ad_sparc_misc.o;
  3.3531 +text: .text%__1cFParseOdo_tableswitch6M_v_;
  3.3532 +text: .text%__1cOcmovIF_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3533 +text: .text%__1cLConvI2FNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.3534 +text: .text%__1cSaddL_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3535 +text: .text%__1cLstoreC0NodeLout_RegMask6kM_rknHRegMask__;
  3.3536 +text: .text%Unsafe_GetNativeFloat;
  3.3537 +text: .text%__1cOstackSlotFOperEtype6kM_pknEType__: ad_sparc.o;
  3.3538 +text: .text%__1cHnmethodFflush6M_v_;
  3.3539 +text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_;
  3.3540 +text: .text%__1cKo2RegPOperKin_RegMask6kMi_pknHRegMask__;
  3.3541 +text: .text%__1cQregI_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3542 +text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_;
  3.3543 +text: .text%__1cParrayKlassKlassRoop_copy_contents6MpnSPSPromotionManager_pnHoopDesc__v_;
  3.3544 +text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_;
  3.3545 +text: .text%__1cWloadConI_x43300000NodeIpipeline6kM_pknIPipeline__;
  3.3546 +text: .text%__1cFParseQdo_monitor_enter6M_v_;
  3.3547 +text: .text%__1cPorL_reg_regNodeIpipeline6kM_pknIPipeline__;
  3.3548 +text: .text%__1cLstoreC0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3549 +text: .text%JVM_FindPrimitiveClass;
  3.3550 +text: .text%__1cVMoveL2D_stack_regNodeIpipeline6kM_pknIPipeline__;
  3.3551 +text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_;
  3.3552 +text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_;
  3.3553 +text: .text%__1cSmodL_reg_imm13NodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3554 +text: .text%__1cRshrI_reg_imm5NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3555 +text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
  3.3556 +text: .text%__1cSsubL_reg_reg_2NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3557 +text: .text%__1cUmulL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3558 +text: .text%__1cIDivDNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3559 +text: .text%__1cPconvI2F_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3560 +text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__;
  3.3561 +text: .text%__1cOstackSlotFOperKin_RegMask6kMi_pknHRegMask__;
  3.3562 +text: .text%__1cUdivL_reg_imm13_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3563 +text: .text%__1cRSignatureIteratorHiterate6M_v_;
  3.3564 +text: .text%__1cOcmovLL_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.3565 +text: .text%__1cJname2type6Fpkc_nJBasicType__;
  3.3566 +text: .text%__1cSmulL_reg_imm13NodeIpipeline6kM_pknIPipeline__;
  3.3567 +text: .text%__1cPBytecode_invokeLresult_type6kMpnGThread__nJBasicType__;
  3.3568 +text: .text%__1cOloadConL13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3569 +text: .text%__1cKcmpOpFOperHgreater6kM_i_: ad_sparc_clone.o;
  3.3570 +text: .text%__1cIDivDNodeJideal_reg6kM_I_: classes.o;
  3.3571 +text: .text%__1cOMacroAssemblerKget_thread6M_v_;
  3.3572 +text: .text%__1cOcmovDF_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3573 +text: .text%__1cOcmovIF_immNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.3574 +text: .text%__1cSconvI2F_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3575 +text: .text%__1cKVtableStub2n6FIi_pv_;
  3.3576 +text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_;
  3.3577 +text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o;
  3.3578 +text: .text%__1cPconvD2F_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3579 +text: .text%__1cIciMethodMnative_entry6M_pC_;
  3.3580 +text: .text%__1cVMoveF2I_stack_regNodeIpipeline6kM_pknIPipeline__;
  3.3581 +text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__;
  3.3582 +text: .text%__1cPorL_reg_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3583 +text: .text%__1cPconvD2F_regNodeIpipeline6kM_pknIPipeline__;
  3.3584 +text: .text%__1cIciSymbolHas_utf86M_pkc_;
  3.3585 +text: .text%__1cQandI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3586 +text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_;
  3.3587 +text: .text%__1cMnegD_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3588 +text: .text%__1cFStateO_sub_Op_CMoveP6MpknENode__v_;
  3.3589 +text: .text%__1cQmulD_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3590 +text: .text%__1cOMacroAssemblerZtotal_frame_size_in_bytes6Mi_i_;
  3.3591 +text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_;
  3.3592 +text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_;
  3.3593 +text: .text%Unsafe_StaticFieldOffset;
  3.3594 +text: .text%__1cQmulI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3595 +text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_;
  3.3596 +text: .text%__1cQaddI_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3597 +text: .text%__1cOcmovLI_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.3598 +text: .text%JVM_GetClassContext;
  3.3599 +text: .text%Unsafe_StaticFieldBaseFromField;
  3.3600 +text: .text%Unsafe_EnsureClassInitialized;
  3.3601 +text: .text%__1cOcmovIF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.3602 +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pCi_v_;
  3.3603 +text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_;
  3.3604 +text: .text%Unsafe_GetObjectVolatile;
  3.3605 +text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_;
  3.3606 +text: .text%__1cKstoreFNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3607 +text: .text%__1cVMoveL2D_stack_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3608 +text: .text%__1cJLoadLNodeMstore_Opcode6kM_i_: classes.o;
  3.3609 +text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__;
  3.3610 +text: .text%__1cOloadConL13NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3611 +text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_;
  3.3612 +text: .text%__1cLstoreF0NodeOmemory_operand6kM_pknIMachOper__;
  3.3613 +text: .text%__1cPconvI2D_memNodeOmemory_operand6kM_pknIMachOper__;
  3.3614 +text: .text%__1cETypeFxdual6kM_pk0_;
  3.3615 +text: .text%__1cJOopMapSetQsingular_oop_map6M_pnGOopMap__;
  3.3616 +text: .text%__1cKimmU13OperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.3617 +text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_;
  3.3618 +text: .text%__1cZInterpreterMacroAssemblerbCincrement_invocation_counter6MpnMRegisterImpl_2_v_;
  3.3619 +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_int6MpnMRegisterImpl_2_v_;
  3.3620 +text: .text%__1cZInterpreterMacroAssemblerWempty_expression_stack6M_v_;
  3.3621 +text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_;
  3.3622 +text: .text%__1cOcmovIL_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3623 +text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_;
  3.3624 +text: .text%__1cOtailjmpIndNodePoper_input_base6kM_I_: ad_sparc_misc.o;
  3.3625 +text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_;
  3.3626 +text: .text%__1cCosEstat6FpkcpnEstat__i_;
  3.3627 +text: .text%__1cQregF_to_stkINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3628 +text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o;
  3.3629 +text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o;
  3.3630 +text: .text%__1cMMonitorChunk2t6Mi_v_;
  3.3631 +text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__;
  3.3632 +text: .text%__1cOPhaseIdealLoopJclone_iff6MpnHPhiNode_pnNIdealLoopTree__pnIBoolNode__;
  3.3633 +text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o;
  3.3634 +text: .text%__1cMMonitorValue2t6MpnTDebugInfoReadStream__v_;
  3.3635 +text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_;
  3.3636 +text: .text%__1cPorL_reg_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3637 +text: .text%__1cLOptoRuntimeMrethrow_Type6F_pknITypeFunc__;
  3.3638 +text: .text%jni_SetStaticObjectField: jni.o;
  3.3639 +text: .text%jni_RegisterNatives: jni.o;
  3.3640 +text: .text%__1cFframebLprevious_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_;
  3.3641 +text: .text%__1cQshlL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3642 +text: .text%JVM_GetClassDeclaredFields;
  3.3643 +text: .text%__1cCosMuser_handler6F_pv_;
  3.3644 +text: .text%JVM_IsSameClassPackage;
  3.3645 +text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_;
  3.3646 +text: .text%__1cKJavaThreadRadd_monitor_chunk6MpnMMonitorChunk__v_;
  3.3647 +text: .text%__1cKJavaThreadUremove_monitor_chunk6MpnMMonitorChunk__v_;
  3.3648 +text: .text%__1cVMoveL2D_stack_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3649 +text: .text%__1cNTemplateTableGiconst6Fi_v_;
  3.3650 +text: .text%__1cLConvF2INodeLbottom_type6kM_pknEType__: classes.o;
  3.3651 +text: .text%JVM_LoadLibrary;
  3.3652 +text: .text%JVM_IsSupportedJNIVersion;
  3.3653 +text: .text%Unsafe_ObjectFieldOffset;
  3.3654 +text: .text%__1cZInterpreterMacroAssemblerYtest_method_data_pointer6MrnFLabel__v_;
  3.3655 +text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_;
  3.3656 +text: .text%__1cZInterpreterMacroAssemblerSget_cpool_and_tags6MpnMRegisterImpl_2_v_;
  3.3657 +text: .text%__1cIAddDNodeIIdentity6MpnOPhaseTransform__pnENode__: classes.o;
  3.3658 +text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_;
  3.3659 +text: .text%__1cNTemplateTableH_return6FnITosState__v_;
  3.3660 +text: .text%__1cHOrLNodeLbottom_type6kM_pknEType__: classes.o;
  3.3661 +text: .text%__1cKimmP13OperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.3662 +text: .text%__1cLConvD2FNodeLbottom_type6kM_pknEType__: classes.o;
  3.3663 +text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_;
  3.3664 +text: .text%__1cHnmethodbJcontinuation_for_implicit_exception6MpC_1_;
  3.3665 +text: .text%__1cNSharedRuntimeEdrem6Fdd_d_;
  3.3666 +text: .text%__1cPstoreI_FregNodeOmemory_operand6kM_pknIMachOper__;
  3.3667 +text: .text%__1cTloadD_unalignedNodeOmemory_operand6kM_pknIMachOper__;
  3.3668 +text: .text%__1cOloadI_fregNodeOmemory_operand6kM_pknIMachOper__;
  3.3669 +text: .text%__1cLconvP2BNodeIpipeline6kM_pknIPipeline__;
  3.3670 +text: .text%__1cCosZvm_allocation_granularity6F_i_;
  3.3671 +text: .text%__1cMTailJumpNodeGOpcode6kM_i_;
  3.3672 +text: .text%__1cTloadD_unalignedNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3673 +text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_;
  3.3674 +text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o;
  3.3675 +text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_;
  3.3676 +text: .text%__1cNRegisterSaverWrestore_live_registers6FpnOMacroAssembler__v_;
  3.3677 +text: .text%__1cLTypeInstPtrOxmeet_unloaded6kMpk0_2_;
  3.3678 +text: .text%__1cRtestI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3679 +text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_;
  3.3680 +text: .text%__1cWImplicitExceptionTable2t6MpknHnmethod__v_;
  3.3681 +text: .text%__1cWImplicitExceptionTableCat6kMI_I_;
  3.3682 +text: .text%__1cFParseVcatch_call_exceptions6MrnYciExceptionHandlerStream__v_;
  3.3683 +text: .text%jni_GetJavaVM;
  3.3684 +text: .text%__1cOcmovDF_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3685 +text: .text%jni_MonitorEnter: jni.o;
  3.3686 +text: .text%jni_MonitorExit: jni.o;
  3.3687 +text: .text%__1cLConvL2DNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3688 +text: .text%__1cULinearLeastSquareFit2t6MI_v_;
  3.3689 +text: .text%__1cQdivL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3690 +text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__;
  3.3691 +text: .text%__1cNReservedSpace2t6MI_v_;
  3.3692 +text: .text%__1cSCardTableExtensionVresize_covered_region6MnJMemRegion__v_;
  3.3693 +text: .text%__1cOloadI_fregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3694 +text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_;
  3.3695 +text: .text%__1cIAddDNodeJideal_reg6kM_I_: classes.o;
  3.3696 +text: .text%__1cJloadFNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3697 +text: .text%__1cKConv2BNodeJideal_reg6kM_I_: classes.o;
  3.3698 +text: .text%__1cLConvI2DNodeJideal_reg6kM_I_: classes.o;
  3.3699 +text: .text%__1cSconvD2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3700 +text: .text%jni_Throw: jni.o;
  3.3701 +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_;
  3.3702 +text: .text%__1cLMoveL2DNodeLbottom_type6kM_pknEType__: classes.o;
  3.3703 +text: .text%__1cIDivINodeJideal_reg6kM_I_: classes.o;
  3.3704 +text: .text%__1cISubDNodeGadd_id6kM_pknEType__: classes.o;
  3.3705 +text: .text%__1cPstoreI_FregNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3706 +text: .text%__1cINegFNodeLbottom_type6kM_pknEType__: classes.o;
  3.3707 +text: .text%__1cOLibraryCallKitXgenerate_current_thread6MrpnENode__2_;
  3.3708 +text: .text%__1cOMacroAssemblerEfneg6MnRFloatRegisterImplFWidth_p13_v_;
  3.3709 +text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRT_sparc.o;
  3.3710 +text: .text%__1cRtestI_reg_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3711 +text: .text%__1cNSpaceCounters2t6MpkciIpnMMutableSpace_pnSGenerationCounters__v_;
  3.3712 +text: .text%__1cLcmpF_ccNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3713 +text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_;
  3.3714 +text: .text%jni_SetObjectField: jni.o;
  3.3715 +text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__;
  3.3716 +text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_;
  3.3717 +text: .text%bootstrap_flush_windows;
  3.3718 +text: .text%__1cSdivL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3719 +text: .text%__1cZInterpreterMacroAssemblerbCverify_oop_or_return_address6MpnMRegisterImpl_2_v_;
  3.3720 +text: .text%__1cFStateO_sub_Op_Conv2B6MpknENode__v_;
  3.3721 +text: .text%__1cNRegisterSaverTsave_live_registers6FpnOMacroAssembler_ipi_pnGOopMap__;
  3.3722 +text: .text%__1cSmulL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3723 +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_;
  3.3724 +text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_;
  3.3725 +text: .text%__1cSsubL_reg_reg_1NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3726 +text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_double6M_v_;
  3.3727 +text: .text%__1cQmulD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3728 +text: .text%Unsafe_AllocateMemory;
  3.3729 +text: .text%__1cSandL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3730 +text: .text%JVM_GetLastErrorString;
  3.3731 +text: .text%__1cQmodL_reg_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3732 +text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_;
  3.3733 +text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_;
  3.3734 +text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_;
  3.3735 +text: .text%__1cPstoreI_FregNodeLout_RegMask6kM_rknHRegMask__;
  3.3736 +text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_;
  3.3737 +text: .text%__1cSandI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3738 +text: .text%__1cMnegD_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3739 +text: .text%__1cNciMethodKlassEmake6F_p0_;
  3.3740 +text: .text%__1cNTemplateTableGlstore6Fi_v_;
  3.3741 +text: .text%__1cLConvF2INodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3742 +text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_;
  3.3743 +text: .text%__1cRcompL_reg_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3744 +text: .text%__1cLconvI2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3745 +text: .text%__1cLConvD2FNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3746 +text: .text%__1cSconvD2I_helperNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3747 +text: .text%__1cRsubI_zero_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3748 +text: .text%__1cKstfSSFNodeIpipeline6kM_pknIPipeline__;
  3.3749 +text: .text%__1cOClassPathEntry2t6M_v_;
  3.3750 +text: .text%__1cZInterpreterMacroAssemblerQaccess_local_ptr6MpnMRegisterImpl_2_v_;
  3.3751 +text: .text%__1cNTemplateTableGistore6Fi_v_;
  3.3752 +text: .text%__1cIRetTableUfind_jsrs_for_target6Mi_pnNRetTableEntry__;
  3.3753 +text: .text%__1cPconvL2I_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3754 +text: .text%__1cUcompI_iReg_imm13NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3755 +text: .text%__1cRsarI_reg_imm5NodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3756 +text: .text%__1cNTemplateTableGastore6Fi_v_;
  3.3757 +text: .text%__1cIRetTableHadd_jsr6Mii_v_;
  3.3758 +text: .text%__1cMnegF_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3759 +text: .text%__1cQregF_to_stkINodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3760 +text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o;
  3.3761 +text: .text%__1cNTemplateTableGdstore6Fi_v_;
  3.3762 +text: .text%__1cNTemplateTableGfstore6Fi_v_;
  3.3763 +text: .text%jni_CallStaticObjectMethod: jni.o;
  3.3764 +text: .text%__1cOcmovLL_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3765 +text: .text%__1cLconvI2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3766 +text: .text%__1cODeoptimizationLUnrollBlockOsize_of_frames6kM_i_;
  3.3767 +text: .text%__1cCosGsignal6Fipv_1_;
  3.3768 +text: .text%__1cQaddD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3769 +text: .text%__1cISubDNodeJideal_reg6kM_I_: classes.o;
  3.3770 +text: .text%__1cISubFNodeGadd_id6kM_pknEType__: classes.o;
  3.3771 +text: .text%__1cISubFNodeJideal_reg6kM_I_: classes.o;
  3.3772 +text: .text%__1cNTemplateTableFlload6Fi_v_;
  3.3773 +text: .text%__1cNTemplateTableFiload6Fi_v_;
  3.3774 +text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_;
  3.3775 +text: .text%__1cLconvP2BNodeLout_RegMask6kM_rknHRegMask__;
  3.3776 +text: .text%__1cVMoveF2I_stack_regNodeErule6kM_I_: ad_sparc_misc.o;
  3.3777 +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC22_v_;
  3.3778 +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC2_v_;
  3.3779 +text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_;
  3.3780 +text: .text%__1cOGenerateOopMapTret_jump_targets_do6MpnOBytecodeStream_pFp0ipi_vi4_v_;
  3.3781 +text: .text%__1cPconvI2D_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3782 +text: .text%Unsafe_SetMemory;
  3.3783 +text: .text%__1cKstfSSFNodeErule6kM_I_: ad_sparc_misc.o;
  3.3784 +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_x6MnJAssemblerJCondition_pCpnMRegisterImpl__v_;
  3.3785 +text: .text%__1cVMoveF2I_stack_regNodeLout_RegMask6kM_rknHRegMask__;
  3.3786 +text: .text%__1cHTypePtrKadd_offset6kMi_pk0_;
  3.3787 +text: .text%__1cOcmovLI_regNodeHsize_of6kM_I_: ad_sparc_misc.o;
  3.3788 +text: .text%__1cNloadConL0NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3789 +text: .text%__1cOcmovPI_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3790 +text: .text%__1cOcmovDF_regNodeHtwo_adr6kM_I_: ad_sparc_misc.o;
  3.3791 +text: .text%__1cQsubF_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3792 +text: .text%__1cFParseRjump_if_true_fork6MpnGIfNode_ii_v_;
  3.3793 +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_icc6MnJAssemblerJCondition_pCpnMRegisterImpl__v_;
  3.3794 +text: .text%__1cNTemplateTableFfload6Fi_v_;
  3.3795 +text: .text%__1cFParsePdo_lookupswitch6M_v_;
  3.3796 +text: .text%__1cNTemplateTableFdload6Fi_v_;
  3.3797 +text: .text%__1cKstfSSFNodeLout_RegMask6kM_rknHRegMask__;
  3.3798 +text: .text%__1cINegDNodeJideal_reg6kM_I_: classes.o;
  3.3799 +text: .text%__1cNTemplateTableFaload6Fi_v_;
  3.3800 +text: .text%__1cRMachSpillCopyNodeHsize_of6kM_I_: ad_sparc.o;
  3.3801 +text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_;
  3.3802 +text: .text%__1cOGenerateOopMapRdo_multianewarray6Mii_v_;
  3.3803 +text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_;
  3.3804 +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3805 +text: .text%jni_CallStaticObjectMethodV: jni.o;
  3.3806 +text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_;
  3.3807 +text: .text%__1cJMemRegionFminus6kMk0_0_;
  3.3808 +text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__;
  3.3809 +text: .text%__1cSInterpreterRuntimebKthrow_ArrayIndexOutOfBoundsException6FpnKJavaThread_pci_v_;
  3.3810 +text: .text%__1cNMemoryManager2t6M_v_;
  3.3811 +text: .text%__1cFStatebB_sub_Op_PartialSubtypeCheck6MpknENode__v_;
  3.3812 +text: .text%__1cFStateM_sub_Op_DivI6MpknENode__v_;
  3.3813 +text: .text%__1cUPSGenerationCounters2t6MpkciipnOPSVirtualSpace__v_;
  3.3814 +text: .text%__1cCosFyield6F_v_;
  3.3815 +text: .text%__1cQsubD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3816 +text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRT_sparc.o;
  3.3817 +text: .text%__1cIDivDNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.3818 +text: .text%__1cFParseRdo_multianewarray6M_v_;
  3.3819 +text: .text%__1cLOptoRuntimeTmultianewarray_Type6Fi_pknITypeFunc__;
  3.3820 +text: .text%__1cZInterpreterMacroAssemblerRget_constant_pool6MpnMRegisterImpl__v_;
  3.3821 +text: .text%__1cXPartialSubtypeCheckNodeJideal_reg6kM_I_: classes.o;
  3.3822 +text: .text%__1cOcmovIF_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3823 +text: .text%__1cLMoveF2INodeLbottom_type6kM_pknEType__: classes.o;
  3.3824 +text: .text%__1cSconvI2D_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3825 +text: .text%__1cLstoreF0NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3826 +text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl_2_v_;
  3.3827 +text: .text%__1cPstoreI_FregNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3828 +text: .text%__1cOcmovLL_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3829 +text: .text%__1cZInterpreterMacroAssemblerUupdate_mdp_by_offset6MpnMRegisterImpl_i2_v_;
  3.3830 +text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_;
  3.3831 +text: .text%__1cMciArrayKlassRbase_element_type6M_pnGciType__;
  3.3832 +text: .text%JVM_GetInterfaceVersion;
  3.3833 +text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_2222rnFLabel__v_;
  3.3834 +text: .text%__1cbFpartialSubtypeCheck_vs_zeroNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3835 +text: .text%__1cNTemplateTableGfconst6Fi_v_;
  3.3836 +text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_;
  3.3837 +text: .text%__1cOcmovPI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3838 +text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_;
  3.3839 +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_;
  3.3840 +text: .text%JVM_RegisterSignal;
  3.3841 +text: .text%JVM_FindSignal;
  3.3842 +text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o;
  3.3843 +text: .text%jio_vsnprintf;
  3.3844 +text: .text%__1cQshrL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3845 +text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_222_v_;
  3.3846 +text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_;
  3.3847 +text: .text%__1cOPSVirtualSpace2t6MnNReservedSpace_I_v_;
  3.3848 +text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o;
  3.3849 +text: .text%__1cNIdealLoopTreeUmerge_many_backedges6MpnOPhaseIdealLoop__v_;
  3.3850 +text: .text%__1cODeoptimizationLUnrollBlock2T6M_v_;
  3.3851 +text: .text%jni_GetDoubleArrayRegion: jni.o;
  3.3852 +text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
  3.3853 +text: .text%__1cLconvP2BNodeErule6kM_I_: ad_sparc_misc.o;
  3.3854 +text: .text%__1cKfix_parent6FpnNIdealLoopTree_1_v_: loopnode.o;
  3.3855 +text: .text%JVM_Available;
  3.3856 +text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_;
  3.3857 +text: .text%__1cQshlL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3858 +text: .text%__1cZInterpreterMacroAssemblerQtop_most_monitor6M_nHAddress__;
  3.3859 +text: .text%__1cLstoreF0NodeLout_RegMask6kM_rknHRegMask__;
  3.3860 +text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_;
  3.3861 +text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_;
  3.3862 +text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_;
  3.3863 +text: .text%__1cSconvF2I_helperNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3864 +text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_;
  3.3865 +text: .text%__1cbAconvL2D_reg_slow_fxtofNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3866 +text: .text%__1cOstackSlotFOperEdisp6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3867 +text: .text%__1cOstackSlotFOperEbase6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3868 +text: .text%__1cOstackSlotFOperFindex6kMpnNPhaseRegAlloc_pknENode_i_i_: ad_sparc.o;
  3.3869 +text: .text%__1cPconvF2I_regNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.3870 +text: .text%__1cNTemplateTableGlconst6Fi_v_;
  3.3871 +text: .text%__1cLstoreC0NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3872 +text: .text%__1cMPeriodicTaskGenroll6M_v_;
  3.3873 +text: .text%__1cMPeriodicTask2t6MI_v_;
  3.3874 +text: .text%__1cNTemplateTableHcastore6F_v_;
  3.3875 +text: .text%Unsafe_CompareAndSwapObject;
  3.3876 +text: .text%__1cLNamedThread2t6M_v_;
  3.3877 +text: .text%__1cLNamedThreadIset_name6MpkcE_v_;
  3.3878 +text: .text%__1cJloadDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3879 +text: .text%__1cQdivD_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3880 +text: .text%__1cWloadConI_x43300000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3881 +text: .text%__1cNTemplateTableKinitialize6F_v_;
  3.3882 +text: .text%__1cKcmpOpFOperJnot_equal6kM_i_: ad_sparc_clone.o;
  3.3883 +text: .text%__1cPconvD2F_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3884 +text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_;
  3.3885 +text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_;
  3.3886 +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_;
  3.3887 +text: .text%__1cNTemplateTableGdconst6Fi_v_;
  3.3888 +text: .text%__1cSconvF2I_helperNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3889 +text: .text%__1cOcmovIF_immNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3890 +text: .text%__1cOcmovIF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3891 +text: .text%__1cJimmL0OperJnum_edges6kM_I_: ad_sparc_clone.o;
  3.3892 +text: .text%__1cSaddD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3893 +text: .text%__1cSsubD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3894 +text: .text%__1cQregF_to_stkINodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3895 +text: .text%__1cNTemplateTableTinvokevfinal_helper6FpnMRegisterImpl_2_v_;
  3.3896 +text: .text%__1cSmulD_regD_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3897 +text: .text%__1cNTemplateTableUgenerate_vtable_call6FpnMRegisterImpl_22_v_;
  3.3898 +text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_;
  3.3899 +text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_;
  3.3900 +text: .text%__1cNVM_DeoptimizeEdoit6M_v_;
  3.3901 +text: .text%__1cMnegF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3902 +text: .text%__1cQsubL_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3903 +text: .text%__1cMVirtualSpace2t6M_v_;
  3.3904 +text: .text%__1cWloadConI_x41f00000NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3905 +text: .text%__1cQdivI_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3906 +text: .text%__1cZregDHi_regDLo_to_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3907 +text: .text%__1cXconvI2D_regDHi_regDNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3908 +text: .text%__1cKloadUBNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3909 +text: .text%__1cNTemplateTableEidiv6F_v_;
  3.3910 +text: .text%__1cQstkI_to_regINodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3911 +text: .text%__1cLMoveL2DNodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3912 +text: .text%__1cLConvD2FNodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.3913 +text: .text%__1cLConvF2INodeIIdentity6MpnOPhaseTransform__pnENode__;
  3.3914 +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_;
  3.3915 +text: .text%__1cLMoveF2INodeFValue6kMpnOPhaseTransform__pknEType__;
  3.3916 +text: .text%__1cLOptoRuntimeIl2f_Type6F_pknITypeFunc__;
  3.3917 +text: .text%__1cOMacroAssemblerUcalc_mem_param_words6MpnMRegisterImpl_2_v_;
  3.3918 +text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MnITosState_pnMRegisterImpl_3_v_;
  3.3919 +text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_2_v_;
  3.3920 +text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_;
  3.3921 +text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_;
  3.3922 +text: .text%__1cZInterpreterMacroAssemblerSupdate_mdp_for_ret6MnITosState_pnMRegisterImpl__v_;
  3.3923 +text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_;
  3.3924 +text: .text%__1cUInterpreterGeneratorLlock_method6M_v_;
  3.3925 +text: .text%__1cZInterpreterMacroAssemblerOthrow_if_not_26MpCpnMRegisterImpl_rnFLabel__v_;
  3.3926 +text: .text%__1cZInterpreterMacroAssemblerQthrow_if_not_1_x6MnJAssemblerJCondition_rnFLabel__v_;
  3.3927 +text: .text%__1cZInterpreterMacroAssemblerZget_4_byte_integer_at_bcp6MipnMRegisterImpl_2n0AKsetCCOrNot__v_;
  3.3928 +text: .text%__1cCosHrealloc6FpvI_1_;
  3.3929 +text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_;
  3.3930 +text: .text%__1cFStateO_sub_Op_CMoveL6MpknENode__v_;
  3.3931 +text: .text%__1cZInterpreterMacroAssemblerRaccess_local_long6MpnMRegisterImpl_2_v_;
  3.3932 +text: .text%__1cIPSOldGenPinitialize_work6Mpkci_v_;
  3.3933 +text: .text%__1cCosIjvm_path6Fpci_v_;
  3.3934 +text: .text%__1cCosNsigexitnum_pd6F_i_;
  3.3935 +text: .text%__1cCosScurrent_process_id6F_i_;
  3.3936 +text: .text%__1cINegFNodeJideal_reg6kM_I_: classes.o;
  3.3937 +text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_;
  3.3938 +text: .text%__1cLConvL2DNodeJideal_reg6kM_I_: classes.o;
  3.3939 +text: .text%__1cLConvF2INodeJideal_reg6kM_I_: classes.o;
  3.3940 +text: .text%__1cLConvD2FNodeJideal_reg6kM_I_: classes.o;
  3.3941 +text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__;
  3.3942 +text: .text%__1cZInterpreterMacroAssemblerSaccess_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_;
  3.3943 +text: .text%__1cZInterpreterMacroAssemblerTaccess_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_;
  3.3944 +text: .text%__1cZInterpreterMacroAssemblerPstore_local_int6MpnMRegisterImpl_2_v_;
  3.3945 +text: .text%__1cZInterpreterMacroAssemblerQstore_local_long6MpnMRegisterImpl_2_v_;
  3.3946 +text: .text%__1cZInterpreterMacroAssemblerRstore_local_float6MpnMRegisterImpl_pnRFloatRegisterImpl__v_;
  3.3947 +text: .text%__1cZInterpreterMacroAssemblerSstore_local_double6MpnMRegisterImpl_pnRFloatRegisterImpl__v_;
  3.3948 +text: .text%__1cCosWactive_processor_count6F_i_;
  3.3949 +text: .text%__1cTAbstractInterpreterKinitialize6F_v_;
  3.3950 +text: .text%jni_NewWeakGlobalRef: jni.o;
  3.3951 +text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o;
  3.3952 +text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o;
  3.3953 +text: .text%__1cOGenerateOopMapTadd_to_ref_init_set6Mi_v_;
  3.3954 +text: .text%__1cUGcThreadCountClosureJdo_thread6MpnGThread__v_;
  3.3955 +text: .text%__1cNinstanceKlassSremove_osr_nmethod6MpnHnmethod__v_;
  3.3956 +text: .text%__1cOPSVirtualSpace2t6M_v_;
  3.3957 +text: .text%jni_IsInstanceOf: jni.o;
  3.3958 +text: .text%__1cMGCTaskThreadDrun6M_v_;
  3.3959 +text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_;
  3.3960 +text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o;
  3.3961 +text: .text%jni_CallStaticVoidMethodV: jni.o;
  3.3962 +text: .text%jni_CallStaticBooleanMethod: jni.o;
  3.3963 +text: .text%__1cMGCTaskThread2t6MpnNGCTaskManager_II_v_;
  3.3964 +text: .text%__1cOtailjmpIndNodeIpipeline6kM_pknIPipeline__;
  3.3965 +text: .text%__1cMGCTaskThreadFstart6M_v_;
  3.3966 +text: .text%__1cQObjectStartArrayKinitialize6MnJMemRegion__v_;
  3.3967 +text: .text%__1cQObjectStartArraySset_covered_region6MnJMemRegion__v_;
  3.3968 +text: .text%__1cZInterpreterMacroAssemblerbAdispatch_next_noverify_oop6MnITosState_i_v_;
  3.3969 +text: .text%__1cRCollectorCounters2t6Mpkci_v_;
  3.3970 +text: .text%__1cFParseDl2f6M_v_;
  3.3971 +text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_;
  3.3972 +text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_;
  3.3973 +text: .text%__1cPGCMemoryManager2t6M_v_;
  3.3974 +text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o;
  3.3975 +text: .text%__1cSInterpreterRuntimeWcreate_klass_exception6FpnKJavaThread_pcpnHoopDesc__v_;
  3.3976 +text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o;
  3.3977 +text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_;
  3.3978 +text: .text%__1cSInterpreterRuntimeSupdate_mdp_for_ret6FpnKJavaThread_i_v_;
  3.3979 +text: .text%__1cPorL_reg_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3980 +text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_;
  3.3981 +text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRT_sparc.o;
  3.3982 +text: .text%__1cCosHSolarisQsignal_sets_init6F_v_;
  3.3983 +text: .text%__1cCosbDallocate_thread_local_storage6F_i_;
  3.3984 +text: .text%__1cUInterpreterGeneratorVrestore_native_result6M_v_;
  3.3985 +text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_;
  3.3986 +text: .text%__1cLconvP2BNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3987 +text: .text%__1cVshrL_reg_imm6_L2INodeEsize6kMpnNPhaseRegAlloc__I_;
  3.3988 +text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_;
  3.3989 +text: .text%__1cCosGstrdup6Fpkc_pc_;
  3.3990 +text: .text%__1cCosLinit_random6Fl_v_;
  3.3991 +text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_;
  3.3992 +text: .text%__1cCosXterminate_signal_thread6F_v_;
  3.3993 +text: .text%__1cCosLsignal_init6F_v_;
  3.3994 +text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o;
  3.3995 +text: .text%__1cOtailjmpIndNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.3996 +text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_;
  3.3997 +text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_;
  3.3998 +text: .text%__1cCosbDinit_system_properties_values6F_v_;
  3.3999 +text: .text%__1cCosPphysical_memory6F_X_;
  3.4000 +text: .text%__1cHvm_exit6Fi_v_;
  3.4001 +text: .text%__1cLbefore_exit6FpnKJavaThread__v_;
  3.4002 +text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_;
  3.4003 +text: .text%__1cSThreadLocalStorageHpd_init6F_v_;
  3.4004 +text: .text%__1cVMoveF2I_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.4005 +text: .text%__1cVMoveL2D_stack_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.4006 +text: .text%__1cWinvocationCounter_init6F_v_;
  3.4007 +text: .text%__1cKTypeOopPtrEmake6FnHTypePtrDPTR_i_pk0_;
  3.4008 +text: .text%__1cKTypeOopPtrFxdual6kM_pknEType__;
  3.4009 +text: .text%__1cFParseMjump_if_join6MpnENode_2_2_;
  3.4010 +text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4011 +text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o;
  3.4012 +text: .text%__1cLconvP2BNodeGExpand6MpnFState_rnJNode_List__pnIMachNode__;
  3.4013 +text: .text%__1cETypeRInitialize_shared6FpnHCompile__v_;
  3.4014 +text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_;
  3.4015 +text: .text%__1cVInterfaceSupport_init6F_v_;
  3.4016 +text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpnMRegisterImpl_pC2_v_;
  3.4017 +text: .text%__1cPGenerationSizerQinitialize_flags6M_v_: parallelScavengeHeap.o;
  3.4018 +text: .text%__1cZInterpreterMacroAssemblerPdispatch_normal6MnITosState__v_;
  3.4019 +text: .text%__1cJTimeStampMmilliseconds6kM_x_;
  3.4020 +text: .text%__1cDhpiZinitialize_socket_library6F_i_;
  3.4021 +text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_;
  3.4022 +text: .text%__1cWInlineCacheBuffer_init6F_v_;
  3.4023 +text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_;
  3.4024 +text: .text%__1cPGlobalTLABStats2t6M_v_;
  3.4025 +text: .text%__1cLicache_init6F_v_;
  3.4026 +text: .text%__1cSThreadLocalStorageEinit6F_v_;
  3.4027 +text: .text%__1cNThreadServiceEinit6F_v_;
  3.4028 +text: .text%__1cTICacheStubGeneratorVgenerate_icache_flush6MppFpCii_i_v_;
  3.4029 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: indexSet.o;
  3.4030 +text: .text%__1cPvm_init_globals6F_v_;
  3.4031 +text: .text%__1cMinit_globals6F_i_;
  3.4032 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_expand.o;
  3.4033 +text: .text%__1cMexit_globals6F_v_;
  3.4034 +text: .text%__1cSset_init_completed6F_v_;
  3.4035 +text: .text%__1cNinstanceKlassZrelease_C_heap_structures6M_v_;
  3.4036 +text: .text%__1cJTimeStampJupdate_to6Mx_v_;
  3.4037 +text: .text%__1cUParallelScavengeHeapItop_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o;
  3.4038 +text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_;
  3.4039 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interp_masm_sparc.o;
  3.4040 +text: .text%__1cQinterpreter_init6F_v_;
  3.4041 +text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_;
  3.4042 +text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o;
  3.4043 +text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o;
  3.4044 +text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_;
  3.4045 +text: .text%__1cCosLsignal_wait6F_i_;
  3.4046 +text: .text%__1cCosNsignal_notify6Fi_v_;
  3.4047 +text: .text%__1cCosOsignal_init_pd6F_v_;
  3.4048 +text: .text%__1cCosHSolarisPinit_signal_mem6F_v_;
  3.4049 +text: .text%__1cCosSget_temp_directory6F_pkc_;
  3.4050 +text: .text%__1cCosHSolarisOlibthread_init6F_v_;
  3.4051 +text: .text%__1cUParallelScavengeHeapIend_addr6kM_ppnIHeapWord__: parallelScavengeHeap.o;
  3.4052 +text: .text%__1cUParallelScavengeHeapEheap6F_p0_;
  3.4053 +text: .text%__1cUParallelScavengeHeapNgc_threads_do6kMpnNThreadClosure__v_;
  3.4054 +text: .text%__1cUParallelScavengeHeapYpermanent_object_iterate6MpnNObjectClosure__v_;
  3.4055 +text: .text%__1cKcmpOpFOperNgreater_equal6kM_i_: ad_sparc_clone.o;
  3.4056 +text: .text%__1cUParallelScavengeHeapMmax_capacity6kM_I_;
  3.4057 +text: .text%__1cUParallelScavengeHeapPpost_initialize6M_v_;
  3.4058 +text: .text%__1cUParallelScavengeHeapKinitialize6M_i_;
  3.4059 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o;
  3.4060 +text: .text%__1cZInterpreterMacroAssemblerbFset_method_data_pointer_for_bcp6M_v_;
  3.4061 +text: .text%__SLIP.DELETER__C: ostream.o;
  3.4062 +text: .text%__1cMostream_exit6F_v_;
  3.4063 +text: .text%__1cQostream_init_log6F_v_;
  3.4064 +text: .text%__1cMostream_init6F_v_;
  3.4065 +text: .text%__1cCosXnon_memory_address_word6F_pc_;
  3.4066 +text: .text%__1cCosGinit_26F_i_;
  3.4067 +text: .text%__1cCosEinit6F_v_;
  3.4068 +text: .text%__1cCosHSolarisUsynchronization_init6F_v_;
  3.4069 +text: .text%__1cVjni_GetLongField_addr6F_pC_;
  3.4070 +text: .text%__1cNIdealLoopTreeQsplit_outer_loop6MpnOPhaseIdealLoop__v_;
  3.4071 +text: .text%__1cRLowMemoryDetectorKinitialize6F_v_;
  3.4072 +text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_;
  3.4073 +text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_;
  3.4074 +text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_;
  3.4075 +text: .text%__1cTloadL_unalignedNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4076 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: machnode.o;
  3.4077 +text: .text%__1cPmanagement_init6F_v_;
  3.4078 +text: .text%__1cOvmStructs_init6F_v_;
  3.4079 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o;
  3.4080 +text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_;
  3.4081 +text: .text%__1cKManagementKinitialize6FpnGThread__v_;
  3.4082 +text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_;
  3.4083 +text: .text%__1cIVMThreadGcreate6F_v_;
  3.4084 +text: .text%__1cIVMThreadDrun6M_v_;
  3.4085 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o;
  3.4086 +text: .text%__1cLJvmtiExportNpost_vm_start6F_v_;
  3.4087 +text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_;
  3.4088 +text: .text%__1cLJvmtiExportNpost_vm_death6F_v_;
  3.4089 +text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_;
  3.4090 +text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_;
  3.4091 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o;
  3.4092 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o;
  3.4093 +text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4094 +text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o;
  3.4095 +text: .text%__1cVLoaderConstraintTable2t6Mi_v_;
  3.4096 +text: .text%__1cQregL_to_stkLNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.4097 +text: .text%__1cHRetDataPpost_initialize6MpnOBytecodeStream_pnRmethodDataOopDesc__v_;
  3.4098 +text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_;
  3.4099 +text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_;
  3.4100 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o;
  3.4101 +text: .text%__1cPVM_Version_init6F_v_;
  3.4102 +text: .text%__1cKVM_VersionKinitialize6F_v_;
  3.4103 +text: .text%__1cHRetDataJfixup_ret6MinQmethodDataHandle__pC_;
  3.4104 +text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4105 +text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o;
  3.4106 +text: .text%__1cQvtableStubs_init6F_v_;
  3.4107 +text: .text%__1cKi0RegPOperKin_RegMask6kMi_pknHRegMask__;
  3.4108 +text: .text%__1cKg1RegPOperKin_RegMask6kMi_pknHRegMask__;
  3.4109 +text: .text%__1cFVTuneEexit6F_v_;
  3.4110 +text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o;
  3.4111 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodLiveness.o;
  3.4112 +text: .text%__1cMMutableSpaceOobject_iterate6MpnNObjectClosure__v_;
  3.4113 +text: .text%__1cKvtune_init6F_v_;
  3.4114 +text: .text%__1cKmutex_init6F_v_;
  3.4115 +text: .text%__1cQaccessFlags_init6F_v_;
  3.4116 +text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpnMRegisterImpl_pC222_v_;
  3.4117 +text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_;
  3.4118 +text: .text%__1cOmarksweep_init6F_v_;
  3.4119 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o;
  3.4120 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: matcher.o;
  3.4121 +text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_;
  3.4122 +text: .text%__1cNMemoryManagerbDget_psScavenge_memory_manager6F_pnPGCMemoryManager__;
  3.4123 +text: .text%__1cNMemoryManagerbEget_psMarkSweep_memory_manager6F_pnPGCMemoryManager__;
  3.4124 +text: .text%__1cHVM_ExitEdoit6M_v_;
  3.4125 +text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_;
  3.4126 +text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_;
  3.4127 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o;
  3.4128 +text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4129 +text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o;
  3.4130 +text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o;
  3.4131 +text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_;
  3.4132 +text: .text%__1cLstoreF0NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4133 +text: .text%JNI_CreateJavaVM;
  3.4134 +text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_;
  3.4135 +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_;
  3.4136 +text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4137 +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_;
  3.4138 +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_;
  3.4139 +text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_;
  3.4140 +text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_;
  3.4141 +text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o;
  3.4142 +text: .text%__1cIUniversePcheck_alignment6FIIpkc_v_;
  3.4143 +text: .text%__1cIUniverseHgenesis6FpnGThread__v_;
  3.4144 +text: .text%__1cVquicken_jni_functions6F_v_;
  3.4145 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o;
  3.4146 +text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_;
  3.4147 +text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_;
  3.4148 +text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_;
  3.4149 +text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_;
  3.4150 +text: .text%__1cQdivD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4151 +text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_;
  3.4152 +text: .text%__1cQjavaClasses_init6F_v_;
  3.4153 +text: .text%jni_ToReflectedMethod: jni.o;
  3.4154 +text: .text%__1cQsubD_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4155 +text: .text%__1cYjni_GetBooleanField_addr6F_pC_;
  3.4156 +text: .text%__1cVjni_GetByteField_addr6F_pC_;
  3.4157 +text: .text%__1cQaddF_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4158 +text: .text%__1cVjni_GetCharField_addr6F_pC_;
  3.4159 +text: .text%__1cWjni_GetShortField_addr6F_pC_;
  3.4160 +text: .text%__1cUjni_GetIntField_addr6F_pC_;
  3.4161 +text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_;
  3.4162 +text: .text%__1cWjni_GetFloatField_addr6F_pC_;
  3.4163 +text: .text%__1cRsarL_reg_imm6NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4164 +text: .text%__1cXjni_GetDoubleField_addr6F_pC_;
  3.4165 +text: .text%__1cQshlI_reg_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4166 +text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_;
  3.4167 +text: .text%JVM_InitializeSocketLibrary;
  3.4168 +text: .text%JVM_RegisterUnsafeMethods;
  3.4169 +text: .text%__1cOcmovLI_regNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4170 +text: .text%__1cOcmovLI_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.4171 +text: .text%__1cOcmovDF_regNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.4172 +text: .text%JVM_Socket;
  3.4173 +text: .text%__1cbEinitialize_converter_functions6F_v_;
  3.4174 +text: .text%JVM_SupportsCX8;
  3.4175 +text: .text%__1cOcmovIF_immNodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4176 +text: .text%__1cUJvmtiEventControllerIvm_start6F_v_;
  3.4177 +text: .text%__1cUJvmtiEventControllerHvm_init6F_v_;
  3.4178 +text: .text%__1cUJvmtiEventControllerIvm_death6F_v_;
  3.4179 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o;
  3.4180 +text: .text%__1cKstfSSFNodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.4181 +text: .text%__1cLJvmtiExportRenter_start_phase6F_v_;
  3.4182 +text: .text%__1cLJvmtiExportQenter_live_phase6F_v_;
  3.4183 +text: .text%__1cSmulL_reg_imm13NodeEemit6kMrnKCodeBuffer_pnNPhaseRegAlloc__v_;
  3.4184 +text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_;
  3.4185 +text: .text%__1cSmulI_reg_imm13NodeEsize6kMpnNPhaseRegAlloc__I_;
  3.4186 +text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_;
  3.4187 +text: .text%__1cNuniverse_init6F_i_;
  3.4188 +text: .text%__1cOuniverse2_init6F_v_;
  3.4189 +text: .text%__1cQjni_handles_init6F_v_;
  3.4190 +text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o;
  3.4191 +text: .text%Unsafe_SetNativeLong;
  3.4192 +text: .text%JVM_InitProperties;
  3.4193 +text: .text%JVM_Halt;
  3.4194 +text: .text%Unsafe_FreeMemory;
  3.4195 +text: .text%Unsafe_PageSize;
  3.4196 +text: .text%JVM_MaxMemory;
  3.4197 +text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__;
  3.4198 +text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4199 +text: .text%JVM_GetClassDeclaredMethods;
  3.4200 +text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__;
  3.4201 +text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_;
  3.4202 +text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_;
  3.4203 +text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_;
  3.4204 +text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_;
  3.4205 +text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_;
  3.4206 +text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_;
  3.4207 +text: .text%__1cLClassLoaderQload_zip_library6F_v_;
  3.4208 +text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_;
  3.4209 +text: .text%__1cLClassLoaderKinitialize6F_v_;
  3.4210 +text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_;
  3.4211 +text: .text%__1cMPeriodicTask2T5B6M_v_;
  3.4212 +text: .text%__1cQclassLoader_init6F_v_;
  3.4213 +text: .text%__1cMPeriodicTaskJdisenroll6M_v_;
  3.4214 +text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o;
  3.4215 +text: .text%__1cTClassLoadingServiceEinit6F_v_;
  3.4216 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: regmask.o;
  3.4217 +text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_;
  3.4218 +text: .text%__1cVRegistersForDebuggingRrestore_registers6FpnOMacroAssembler_pnMRegisterImpl__v_: assembler_sparc.o;
  3.4219 +text: .text%__1cVRegistersForDebuggingOsave_registers6FpnOMacroAssembler__v_: assembler_sparc.o;
  3.4220 +text: .text%__1cJBytecodesKinitialize6F_v_;
  3.4221 +text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_;
  3.4222 +text: .text%__1cObytecodes_init6F_v_;
  3.4223 +text: .text%__1cLOptoRuntimeIgenerate6FpnFciEnv__v_;
  3.4224 +text: .text%__1cJBytecodesNpd_initialize6F_v_;
  3.4225 +text: .text%__1cHCompileRpd_compiler2_init6F_v_;
  3.4226 +text: .text%__1cKC2CompilerKinitialize6M_v_;
  3.4227 +text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_;
  3.4228 +text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_;
  3.4229 +text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_;
  3.4230 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o;
  3.4231 +text: .text%__1cMciKlassKlassEmake6F_p0_;
  3.4232 +text: .text%__1cIciMethodMvtable_index6M_i_;
  3.4233 +text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_;
  3.4234 +text: .text%__1cJLoadFNodeMstore_Opcode6kM_i_: classes.o;
  3.4235 +text: .text%__1cNTemplateTableGsipush6F_v_;
  3.4236 +text: .text%__1cQUncommonTrapBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_;
  3.4237 +text: .text%__1cNTemplateTableGldc2_w6F_v_;
  3.4238 +text: .text%__1cNExceptionBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_;
  3.4239 +text: .text%__1cNTemplateTableFiload6F_v_;
  3.4240 +text: .text%__1cNTemplateTableLfast_iload26F_v_;
  3.4241 +text: .text%__1cNTemplateTableKfast_iload6F_v_;
  3.4242 +text: .text%__1cNTemplateTableFlload6F_v_;
  3.4243 +text: .text%__1cNTemplateTableFfload6F_v_;
  3.4244 +text: .text%__1cNTemplateTableFdload6F_v_;
  3.4245 +text: .text%__1cNTemplateTableFaload6F_v_;
  3.4246 +text: .text%__1cNTemplateTableKwide_iload6F_v_;
  3.4247 +text: .text%__1cNTemplateTableKwide_lload6F_v_;
  3.4248 +text: .text%__1cNTemplateTableKwide_fload6F_v_;
  3.4249 +text: .text%__1cNTemplateTableKwide_dload6F_v_;
  3.4250 +text: .text%__1cNTemplateTableKwide_aload6F_v_;
  3.4251 +text: .text%__1cNTemplateTableGiaload6F_v_;
  3.4252 +text: .text%__1cNTemplateTableGlaload6F_v_;
  3.4253 +text: .text%__1cNTemplateTableGfaload6F_v_;
  3.4254 +text: .text%__1cNTemplateTableGdaload6F_v_;
  3.4255 +text: .text%__1cNTemplateTableGbipush6F_v_;
  3.4256 +text: .text%__1cLMoveF2INodeJideal_reg6kM_I_: classes.o;
  3.4257 +text: .text%__1cLMoveL2DNodeJideal_reg6kM_I_: classes.o;
  3.4258 +text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_;
  3.4259 +text: .text%__1cHOrLNodeGadd_id6kM_pknEType__: classes.o;
  3.4260 +text: .text%__1cHOrLNodeJideal_reg6kM_I_: classes.o;
  3.4261 +text: .text%__1cNTemplateTableF_goto6F_v_;
  3.4262 +text: .text%__1cNTemplateTableGgoto_w6F_v_;
  3.4263 +text: .text%__1cNTemplateTableFjsr_w6F_v_;
  3.4264 +text: .text%__1cNTemplateTableDjsr6F_v_;
  3.4265 +text: .text%__1cXreferenceProcessor_init6F_v_;
  3.4266 +text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_;
  3.4267 +text: .text%__1cStemplateTable_init6F_v_;
  3.4268 +text: .text%__1cNTemplateTableNpd_initialize6F_v_;
  3.4269 +text: .text%__1cNTemplateTableDnop6F_v_;
  3.4270 +text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_;
  3.4271 +text: .text%__1cNTemplateTableLaconst_null6F_v_;
  3.4272 +text: .text%__1cKPSYoungGenbCreset_survivors_after_shrink6M_v_;
  3.4273 +text: .text%__1cKPSYoungGenQlimit_gen_shrink6MI_I_;
  3.4274 +text: .text%__1cKPSYoungGenRavailable_to_live6M_I_;
  3.4275 +text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_;
  3.4276 +text: .text%__1cLOptoRuntimeUmultianewarray2_Type6F_pknITypeFunc__;
  3.4277 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: ad_sparc_pipeline.o;
  3.4278 +text: .text%__1cUAdjoiningGenerations2t6MnNReservedSpace_IIIIIII_v_;
  3.4279 +text: .text%__1cWAdjoiningVirtualSpaces2t6MnNReservedSpace_III_v_;
  3.4280 +text: .text%__1cOchunkpool_init6F_v_;
  3.4281 +text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_;
  3.4282 +text: .text%__1cJArgumentsWinit_system_properties6F_v_;
  3.4283 +text: .text%__1cMSysClassPathPexpand_endorsed6M_v_;
  3.4284 +text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_;
  3.4285 +text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_;
  3.4286 +text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_;
  3.4287 +text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_;
  3.4288 +text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_;
  3.4289 +text: .text%__1cLStatSamplerGengage6F_v_;
  3.4290 +text: .text%__1cNStubGeneratorbNgenerate_flush_callers_register_windows6M_pC_: stubGenerator_sparc.o;
  3.4291 +text: .text%__1cSstubRoutines_init16F_v_;
  3.4292 +text: .text%__1cSstubRoutines_init26F_v_;
  3.4293 +text: .text%__1cNStubGeneratorbIgenerate_handler_for_unsafe_access6M_pC_: stubGenerator_sparc.o;
  3.4294 +text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_sparc.o;
  3.4295 +text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_sparc.o;
  3.4296 +text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_sparc.o;
  3.4297 +text: .text%__1cNStubGeneratorSgenerate_test_stop6M_pC_: stubGenerator_sparc.o;
  3.4298 +text: .text%__1cNStubGeneratorbEgenerate_partial_subtype_check6M_pC_: stubGenerator_sparc.o;
  3.4299 +text: .text%__1cISubFNodeDsub6kMpknEType_3_3_;
  3.4300 +text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_;
  3.4301 +text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_;
  3.4302 +text: .text%__1cLStatSamplerHdestroy6F_v_;
  3.4303 +text: .text%__1cLStatSamplerJdisengage6F_v_;
  3.4304 +text: .text%__1cNRegisterSaverYrestore_result_registers6FpnOMacroAssembler__v_;
  3.4305 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o;
  3.4306 +text: .text%__1cORuntimeServiceYrecord_application_start6F_v_;
  3.4307 +text: .text%__1cOMacroAssemblerNset_vm_result6MpnMRegisterImpl__v_;
  3.4308 +text: .text%__1cORuntimeServiceEinit6F_v_;
  3.4309 +text: .text%__1cOMacroAssemblerVverify_oop_subroutine6M_v_;
  3.4310 +text: .text%__1cOMacroAssemblerPstop_subroutine6M_v_;
  3.4311 +text: .text%__1cOMacroAssemblerElcmp6MpnMRegisterImpl_2222_v_;
  3.4312 +text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_;
  3.4313 +text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_22222_v_;
  3.4314 +text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_22222_v_;
  3.4315 +text: .text%__1cOMacroAssemblerFlushr6MpnMRegisterImpl_22222_v_;
  3.4316 +text: .text%__1cLOptoRuntimeUmultianewarray5_Type6F_pknITypeFunc__;
  3.4317 +text: .text%__1cLOptoRuntimeUmultianewarray4_Type6F_pknITypeFunc__;
  3.4318 +text: .text%__1cLOptoRuntimeUmultianewarray3_Type6F_pknITypeFunc__;
  3.4319 +text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o;
  3.4320 +text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_;
  3.4321 +text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_;
  3.4322 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o;
  3.4323 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o;
  3.4324 +text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4325 +text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o;
  3.4326 +text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4327 +text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o;
  3.4328 +text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpnMRegisterImpl_pCi_v_;
  3.4329 +text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_;
  3.4330 +text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o;
  3.4331 +text: .text%__1cNTemplateTableGaaload6F_v_;
  3.4332 +text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4333 +text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o;
  3.4334 +text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o;
  3.4335 +text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_;
  3.4336 +text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4337 +text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o;
  3.4338 +text: .text%__1cQPlaceholderTable2t6Mi_v_;
  3.4339 +text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o;
  3.4340 +text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o;
  3.4341 +text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o;
  3.4342 +text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o;
  3.4343 +text: .text%__1cbAPSGCAdaptivePolicyCounters2t6MpkciipnUPSAdaptiveSizePolicy__v_;
  3.4344 +text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_;
  3.4345 +text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o;
  3.4346 +text: .text%__1cNTemplateTableNinvokespecial6Fi_v_;
  3.4347 +text: .text%__1cNTemplateTableMinvokestatic6Fi_v_;
  3.4348 +text: .text%__1cNTemplateTablebDinvokeinterface_object_method6FpnMRegisterImpl_222_v_;
  3.4349 +text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_;
  3.4350 +text: .text%__1cNTemplateTableE_new6F_v_;
  3.4351 +text: .text%__1cNTemplateTableInewarray6F_v_;
  3.4352 +text: .text%__1cNTemplateTableJanewarray6F_v_;
  3.4353 +text: .text%__1cNTemplateTableLarraylength6F_v_;
  3.4354 +text: .text%__1cNTemplateTableJcheckcast6F_v_;
  3.4355 +text: .text%__1cNTemplateTableKinstanceof6F_v_;
  3.4356 +text: .text%__1cNTemplateTableL_breakpoint6F_v_;
  3.4357 +text: .text%__1cNTemplateTableGathrow6F_v_;
  3.4358 +text: .text%__1cNTemplateTableMmonitorenter6F_v_;
  3.4359 +text: .text%__1cNTemplateTableLmonitorexit6F_v_;
  3.4360 +text: .text%__1cNTemplateTableEwide6F_v_;
  3.4361 +text: .text%__1cNTemplateTableOmultianewarray6F_v_;
  3.4362 +text: .text%__1cTcompilerOracle_init6F_v_;
  3.4363 +text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4364 +text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__;
  3.4365 +text: .text%__1cZCompiledArgumentOopFinderRhandle_oop_offset6M_v_: frame.o;
  3.4366 +text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_;
  3.4367 +text: .text%__1cHGCStats2t6M_v_;
  3.4368 +text: .text%__1cNGCTaskManager2t6MI_v_;
  3.4369 +text: .text%__1cNGCTaskManagerKinitialize6M_v_;
  3.4370 +text: .text%__1cNGCTaskManagerKthreads_do6MpnNThreadClosure__v_;
  3.4371 +text: .text%__1cPPerfDataManagerHdestroy6F_v_;
  3.4372 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o;
  3.4373 +text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_;
  3.4374 +text: .text%__1cWResolveOopMapConflictsUdo_potential_rewrite6MpnGThread__nMmethodHandle__;
  3.4375 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o;
  3.4376 +text: .text%__1cOThreadCriticalKinitialize6F_v_;
  3.4377 +text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_;
  3.4378 +text: .text%__1cICodeHeap2t6M_v_;
  3.4379 +text: .text%__1cDhpiKinitialize6F_i_;
  3.4380 +text: .text%__1cMPerfDataList2T6M_v_;
  3.4381 +text: .text%__1cNWatcherThreadDrun6M_v_;
  3.4382 +text: .text%__1cNWatcherThreadEstop6F_v_;
  3.4383 +text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o;
  3.4384 +text: .text%__1cFStateO_sub_Op_CMoveD6MpknENode__v_;
  3.4385 +text: .text%__1cFStateP_sub_Op_MoveF2I6MpknENode__v_;
  3.4386 +text: .text%__1cKDictionary2t6Mi_v_;
  3.4387 +text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_;
  3.4388 +text: .text%__1cNeventlog_init6F_v_;
  3.4389 +text: .text%__1cScheck_ThreadShadow6F_v_;
  3.4390 +text: .text%__1cOtailjmpIndNodeLout_RegMask6kM_rknHRegMask__;
  3.4391 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o;
  3.4392 +text: .text%__1cFframeVinterpreter_frame_mdp6kM_pC_;
  3.4393 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: phase.o;
  3.4394 +text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_;
  3.4395 +text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_;
  3.4396 +text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o;
  3.4397 +text: .text%__1cPperfMemory_exit6F_v_;
  3.4398 +text: .text%__1cPperfMemory_init6F_v_;
  3.4399 +text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_;
  3.4400 +text: .text%__1cNTemplateTableHfastore6F_v_;
  3.4401 +text: .text%__1cNTemplateTableHdastore6F_v_;
  3.4402 +text: .text%__1cNTemplateTableHaastore6F_v_;
  3.4403 +text: .text%__1cNTemplateTableHbastore6F_v_;
  3.4404 +text: .text%__1cNTemplateTableHsastore6F_v_;
  3.4405 +text: .text%__1cOcodeCache_init6F_v_;
  3.4406 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o;
  3.4407 +text: .text%__1cNTemplateTableDpop6F_v_;
  3.4408 +text: .text%__1cNTemplateTableEpop26F_v_;
  3.4409 +text: .text%__1cNTemplateTableDdup6F_v_;
  3.4410 +text: .text%__1cNTemplateTableGdup_x16F_v_;
  3.4411 +text: .text%__1cNTemplateTableGdup_x26F_v_;
  3.4412 +text: .text%__1cNTemplateTableEdup26F_v_;
  3.4413 +text: .text%__1cNTemplateTableHdup2_x16F_v_;
  3.4414 +text: .text%__1cNTemplateTableHdup2_x26F_v_;
  3.4415 +text: .text%__1cNTemplateTableEswap6F_v_;
  3.4416 +text: .text%__1cNCollectedHeap2t6M_v_;
  3.4417 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o;
  3.4418 +text: .text%__1cNTemplateTableEirem6F_v_;
  3.4419 +text: .text%__1cNTemplateTableElmul6F_v_;
  3.4420 +text: .text%__1cNTemplateTableHlastore6F_v_;
  3.4421 +text: .text%__1cNTemplateTableGbaload6F_v_;
  3.4422 +text: .text%__1cNTemplateTableGcaload6F_v_;
  3.4423 +text: .text%__1cNTemplateTableMfast_icaload6F_v_;
  3.4424 +text: .text%__1cNTemplateTableGsaload6F_v_;
  3.4425 +text: .text%__1cKPSYoungGenPinitialize_work6M_v_;
  3.4426 +text: .text%__1cKPSYoungGenKinitialize6MnNReservedSpace_I_v_;
  3.4427 +text: .text%__1cKPSYoungGenYinitialize_virtual_space6MnNReservedSpace_I_v_;
  3.4428 +text: .text%__1cKPSYoungGen2t6MIII_v_;
  3.4429 +text: .text%__1cNTemplateTableHaload_06F_v_;
  3.4430 +text: .text%__1cNTemplateTableGistore6F_v_;
  3.4431 +text: .text%__1cNTemplateTableGlstore6F_v_;
  3.4432 +text: .text%__1cNTemplateTableGfstore6F_v_;
  3.4433 +text: .text%__1cNTemplateTableGdstore6F_v_;
  3.4434 +text: .text%__1cNTemplateTableGastore6F_v_;
  3.4435 +text: .text%__1cNTemplateTableLwide_istore6F_v_;
  3.4436 +text: .text%__1cNTemplateTableLwide_lstore6F_v_;
  3.4437 +text: .text%__1cNTemplateTableLwide_fstore6F_v_;
  3.4438 +text: .text%__1cNTemplateTableLwide_dstore6F_v_;
  3.4439 +text: .text%__1cNTemplateTableLwide_astore6F_v_;
  3.4440 +text: .text%__1cNTemplateTableHiastore6F_v_;
  3.4441 +text: .text%__1cNTemplateTableEldiv6F_v_;
  3.4442 +text: .text%__1cNTemplateTableLtableswitch6F_v_;
  3.4443 +text: .text%__1cNTemplateTableMlookupswitch6F_v_;
  3.4444 +text: .text%__1cNTemplateTableRfast_linearswitch6F_v_;
  3.4445 +text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_;
  3.4446 +text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_;
  3.4447 +text: .text%__1cJPSPermGen2t6MnNReservedSpace_IIIIpkci_v_;
  3.4448 +text: .text%__1cNCompileBrokerQset_should_block6F_v_;
  3.4449 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o;
  3.4450 +text: .text%__1cNTemplateTableIgetfield6Fi_v_;
  3.4451 +text: .text%__1cNTemplateTableJgetstatic6Fi_v_;
  3.4452 +text: .text%__1cIPSOldGenKinitialize6MnNReservedSpace_Ipkci_v_;
  3.4453 +text: .text%__1cIPSOldGen2t6MIIIpkci_v_;
  3.4454 +text: .text%__1cIPSOldGen2t6MnNReservedSpace_IIIIpkci_v_;
  3.4455 +text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
  3.4456 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o;
  3.4457 +text: .text%__1cNTemplateTableIputfield6Fi_v_;
  3.4458 +text: .text%__1cNTemplateTableJputstatic6Fi_v_;
  3.4459 +text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o;
  3.4460 +text: .text%__1cLPSMarkSweepKinitialize6F_v_;
  3.4461 +text: .text%__1cNTemplateTableIwide_ret6F_v_;
  3.4462 +text: .text%__1cNTemplateTableElrem6F_v_;
  3.4463 +text: .text%__1cNTemplateTableElshl6F_v_;
  3.4464 +text: .text%__1cNTemplateTableElshr6F_v_;
  3.4465 +text: .text%__1cNTemplateTableFlushr6F_v_;
  3.4466 +text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_;
  3.4467 +text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_;
  3.4468 +text: .text%__1cNTemplateTableEineg6F_v_;
  3.4469 +text: .text%__1cNTemplateTableElneg6F_v_;
  3.4470 +text: .text%__1cNTemplateTableEfneg6F_v_;
  3.4471 +text: .text%__1cNTemplateTableEdneg6F_v_;
  3.4472 +text: .text%__1cNTemplateTableEiinc6F_v_;
  3.4473 +text: .text%__1cNTemplateTableJwide_iinc6F_v_;
  3.4474 +text: .text%__1cKPSScavengeKinitialize6F_v_;
  3.4475 +text: .text%__1cNTemplateTableElcmp6F_v_;
  3.4476 +text: .text%__1cWcompilationPolicy_init6F_v_;
  3.4477 +text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_;
  3.4478 +text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o;
  3.4479 +text: .text%__1cSPSPromotionManagerKinitialize6F_v_;
  3.4480 +text: .text%__1cNTemplateTableDret6F_v_;
     4.1 --- a/make/windows/build.make	Tue Sep 21 06:58:44 2010 -0700
     4.2 +++ b/make/windows/build.make	Wed Sep 22 12:54:51 2010 -0400
     4.3 @@ -1,5 +1,5 @@
     4.4  #
     4.5 -# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
     4.6 +# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
     4.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4.8  #
     4.9  # This code is free software; you can redistribute it and/or modify it
    4.10 @@ -19,7 +19,7 @@
    4.11  # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    4.12  # or visit www.oracle.com if you need additional information or have any
    4.13  # questions.
    4.14 -#  
    4.15 +#
    4.16  #
    4.17  
    4.18  # Note: this makefile is invoked both from build.bat and from the J2SE
    4.19 @@ -72,13 +72,11 @@
    4.20  !endif
    4.21  !endif
    4.22  
    4.23 -!if "$(BUILDARCH)" != "amd64"
    4.24  !if "$(BUILDARCH)" != "ia64"
    4.25  !ifndef CC_INTERP
    4.26  FORCE_TIERED=1
    4.27  !endif
    4.28  !endif
    4.29 -!endif
    4.30  
    4.31  !if "$(BUILDARCH)" == "amd64"
    4.32  Platform_arch=x86
    4.33 @@ -135,7 +133,7 @@
    4.34  # We can have update versions like "01a", but Windows requires
    4.35  # we use only integers in the file version field.  So:
    4.36  # JDK_UPDATE_VER = JDK_UPDATE_VERSION * 10 + EXCEPTION_VERSION
    4.37 -# 
    4.38 +#
    4.39  JDK_UPDATE_VER=0
    4.40  JDK_BUILD_NUMBER=0
    4.41  
    4.42 @@ -148,7 +146,7 @@
    4.43  #       1.6.0_01a-b02 will be 6.0.11.2
    4.44  #
    4.45  # JDK_* variables are defined in make/hotspot_version or on command line
    4.46 -# 
    4.47 +#
    4.48  JDK_VER=$(JDK_MINOR_VER),$(JDK_MICRO_VER),$(JDK_UPDATE_VER),$(JDK_BUILD_NUMBER)
    4.49  JDK_DOTVER=$(JDK_MINOR_VER).$(JDK_MICRO_VER).$(JDK_UPDATE_VER).$(JDK_BUILD_NUMBER)
    4.50  !if "$(JRE_RELEASE_VERSION)" == ""
    4.51 @@ -162,7 +160,7 @@
    4.52  
    4.53  # Hotspot Express VM FileVersion:
    4.54  # 10.0-b<yz> will have DLL version 10.0.0.yz (need 4 numbers).
    4.55 -# 
    4.56 +#
    4.57  # HS_* variables are defined in make/hotspot_version
    4.58  #
    4.59  HS_VER=$(HS_MAJOR_VER),$(HS_MINOR_VER),0,$(HS_BUILD_NUMBER)
    4.60 @@ -182,7 +180,7 @@
    4.61  
    4.62  
    4.63  # We don't support SA on ia64, and we can't
    4.64 -# build it if we are using a version of Vis Studio 
    4.65 +# build it if we are using a version of Vis Studio
    4.66  # older than .Net 2003.
    4.67  # SA_INCLUDE and SA_LIB are hold-overs from a previous
    4.68  # implementation in which we could build SA using
     5.1 --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
     5.2 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
     5.3 @@ -57,13 +57,12 @@
     5.4  #endif
     5.5  }
     5.6  
     5.7 -#ifdef TIERED
     5.8  
     5.9  void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
    5.10    __ bind(_entry);
    5.11    __ set(_bci, G4);
    5.12    __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
    5.13 -  __ delayed()->nop();
    5.14 +  __ delayed()->mov_or_nop(_method->as_register(), G5);
    5.15    ce->add_call_info_here(_info);
    5.16    ce->verify_oop_map(_info);
    5.17  
    5.18 @@ -71,7 +70,6 @@
    5.19    __ delayed()->nop();
    5.20  }
    5.21  
    5.22 -#endif // TIERED
    5.23  
    5.24  void DivByZeroStub::emit_code(LIR_Assembler* ce) {
    5.25    if (_offset != -1) {
     6.1 --- a/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
     6.2 +++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
     6.3 @@ -73,6 +73,7 @@
     6.4  // some useful constant RInfo's:
     6.5  LIR_Opr FrameMap::in_long_opr;
     6.6  LIR_Opr FrameMap::out_long_opr;
     6.7 +LIR_Opr FrameMap::g1_long_single_opr;
     6.8  
     6.9  LIR_Opr FrameMap::F0_opr;
    6.10  LIR_Opr FrameMap::F0_double_opr;
    6.11 @@ -238,6 +239,7 @@
    6.12  
    6.13    in_long_opr    = as_long_opr(I0);
    6.14    out_long_opr   = as_long_opr(O0);
    6.15 +  g1_long_single_opr    = as_long_single_opr(G1);
    6.16  
    6.17    G0_opr = as_opr(G0);
    6.18    G1_opr = as_opr(G1);
     7.1 --- a/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Tue Sep 21 06:58:44 2010 -0700
     7.2 +++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp	Wed Sep 22 12:54:51 2010 -0400
     7.3 @@ -103,6 +103,7 @@
     7.4  
     7.5    static LIR_Opr in_long_opr;
     7.6    static LIR_Opr out_long_opr;
     7.7 +  static LIR_Opr g1_long_single_opr;
     7.8  
     7.9    static LIR_Opr F0_opr;
    7.10    static LIR_Opr F0_double_opr;
    7.11 @@ -113,18 +114,25 @@
    7.12   private:
    7.13    static FloatRegister  _fpu_regs [nof_fpu_regs];
    7.14  
    7.15 +  static LIR_Opr as_long_single_opr(Register r) {
    7.16 +    return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
    7.17 +  }
    7.18 +  static LIR_Opr as_long_pair_opr(Register r) {
    7.19 +    return LIR_OprFact::double_cpu(cpu_reg2rnr(r->successor()), cpu_reg2rnr(r));
    7.20 +  }
    7.21 +
    7.22   public:
    7.23  
    7.24  #ifdef _LP64
    7.25    static LIR_Opr as_long_opr(Register r) {
    7.26 -    return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
    7.27 +    return as_long_single_opr(r);
    7.28    }
    7.29    static LIR_Opr as_pointer_opr(Register r) {
    7.30 -    return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
    7.31 +    return as_long_single_opr(r);
    7.32    }
    7.33  #else
    7.34    static LIR_Opr as_long_opr(Register r) {
    7.35 -    return LIR_OprFact::double_cpu(cpu_reg2rnr(r->successor()), cpu_reg2rnr(r));
    7.36 +    return as_long_pair_opr(r);
    7.37    }
    7.38    static LIR_Opr as_pointer_opr(Register r) {
    7.39      return as_opr(r);
     8.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
     8.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
     8.3 @@ -1625,13 +1625,18 @@
     8.4  
     8.5  void LIR_Assembler::return_op(LIR_Opr result) {
     8.6    // the poll may need a register so just pick one that isn't the return register
     8.7 -#ifdef TIERED
     8.8 +#if defined(TIERED) && !defined(_LP64)
     8.9    if (result->type_field() == LIR_OprDesc::long_type) {
    8.10      // Must move the result to G1
    8.11      // Must leave proper result in O0,O1 and G1 (TIERED only)
    8.12      __ sllx(I0, 32, G1);          // Shift bits into high G1
    8.13      __ srl (I1, 0, I1);           // Zero extend O1 (harmless?)
    8.14      __ or3 (I1, G1, G1);          // OR 64 bits into G1
    8.15 +#ifdef ASSERT
    8.16 +    // mangle it so any problems will show up
    8.17 +    __ set(0xdeadbeef, I0);
    8.18 +    __ set(0xdeadbeef, I1);
    8.19 +#endif
    8.20    }
    8.21  #endif // TIERED
    8.22    __ set((intptr_t)os::get_polling_page(), L0);
    8.23 @@ -2424,6 +2429,192 @@
    8.24  }
    8.25  
    8.26  
    8.27 +void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
    8.28 +                                        ciMethodData *md, ciProfileData *data,
    8.29 +                                        Register recv, Register tmp1, Label* update_done) {
    8.30 +  uint i;
    8.31 +  for (i = 0; i < VirtualCallData::row_limit(); i++) {
    8.32 +    Label next_test;
    8.33 +    // See if the receiver is receiver[n].
    8.34 +    Address receiver_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
    8.35 +                          mdo_offset_bias);
    8.36 +    __ ld_ptr(receiver_addr, tmp1);
    8.37 +    __ verify_oop(tmp1);
    8.38 +    __ cmp(recv, tmp1);
    8.39 +    __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
    8.40 +    __ delayed()->nop();
    8.41 +    Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
    8.42 +                      mdo_offset_bias);
    8.43 +    __ ld_ptr(data_addr, tmp1);
    8.44 +    __ add(tmp1, DataLayout::counter_increment, tmp1);
    8.45 +    __ st_ptr(tmp1, data_addr);
    8.46 +    __ ba(false, *update_done);
    8.47 +    __ delayed()->nop();
    8.48 +    __ bind(next_test);
    8.49 +  }
    8.50 +
    8.51 +  // Didn't find receiver; find next empty slot and fill it in
    8.52 +  for (i = 0; i < VirtualCallData::row_limit(); i++) {
    8.53 +    Label next_test;
    8.54 +    Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
    8.55 +                      mdo_offset_bias);
    8.56 +    load(recv_addr, tmp1, T_OBJECT);
    8.57 +    __ br_notnull(tmp1, false, Assembler::pt, next_test);
    8.58 +    __ delayed()->nop();
    8.59 +    __ st_ptr(recv, recv_addr);
    8.60 +    __ set(DataLayout::counter_increment, tmp1);
    8.61 +    __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)) -
    8.62 +              mdo_offset_bias);
    8.63 +    __ ba(false, *update_done);
    8.64 +    __ delayed()->nop();
    8.65 +    __ bind(next_test);
    8.66 +  }
    8.67 +}
    8.68 +
    8.69 +
    8.70 +void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
    8.71 +                                    ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
    8.72 +  md = method->method_data();
    8.73 +  if (md == NULL) {
    8.74 +    bailout("out of memory building methodDataOop");
    8.75 +    return;
    8.76 +  }
    8.77 +  data = md->bci_to_data(bci);
    8.78 +  assert(data != NULL,       "need data for checkcast");
    8.79 +  assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
    8.80 +  if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
    8.81 +    // The offset is large so bias the mdo by the base of the slot so
    8.82 +    // that the ld can use simm13s to reference the slots of the data
    8.83 +    mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
    8.84 +  }
    8.85 +}
    8.86 +
    8.87 +void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
    8.88 +  // we always need a stub for the failure case.
    8.89 +  CodeStub* stub = op->stub();
    8.90 +  Register obj = op->object()->as_register();
    8.91 +  Register k_RInfo = op->tmp1()->as_register();
    8.92 +  Register klass_RInfo = op->tmp2()->as_register();
    8.93 +  Register dst = op->result_opr()->as_register();
    8.94 +  Register Rtmp1 = op->tmp3()->as_register();
    8.95 +  ciKlass* k = op->klass();
    8.96 +
    8.97 +
    8.98 +  if (obj == k_RInfo) {
    8.99 +    k_RInfo = klass_RInfo;
   8.100 +    klass_RInfo = obj;
   8.101 +  }
   8.102 +
   8.103 +  ciMethodData* md;
   8.104 +  ciProfileData* data;
   8.105 +  int mdo_offset_bias = 0;
   8.106 +  if (op->should_profile()) {
   8.107 +    ciMethod* method = op->profiled_method();
   8.108 +    assert(method != NULL, "Should have method");
   8.109 +    setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
   8.110 +
   8.111 +    Label not_null;
   8.112 +    __ br_notnull(obj, false, Assembler::pn, not_null);
   8.113 +    __ delayed()->nop();
   8.114 +    Register mdo      = k_RInfo;
   8.115 +    Register data_val = Rtmp1;
   8.116 +    jobject2reg(md->constant_encoding(), mdo);
   8.117 +    if (mdo_offset_bias > 0) {
   8.118 +      __ set(mdo_offset_bias, data_val);
   8.119 +      __ add(mdo, data_val, mdo);
   8.120 +    }
   8.121 +    Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
   8.122 +    __ ldub(flags_addr, data_val);
   8.123 +    __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
   8.124 +    __ stb(data_val, flags_addr);
   8.125 +    __ ba(false, *obj_is_null);
   8.126 +    __ delayed()->nop();
   8.127 +    __ bind(not_null);
   8.128 +  } else {
   8.129 +    __ br_null(obj, false, Assembler::pn, *obj_is_null);
   8.130 +    __ delayed()->nop();
   8.131 +  }
   8.132 +
   8.133 +  Label profile_cast_failure, profile_cast_success;
   8.134 +  Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
   8.135 +  Label *success_target = op->should_profile() ? &profile_cast_success : success;
   8.136 +
   8.137 +  // patching may screw with our temporaries on sparc,
   8.138 +  // so let's do it before loading the class
   8.139 +  if (k->is_loaded()) {
   8.140 +    jobject2reg(k->constant_encoding(), k_RInfo);
   8.141 +  } else {
   8.142 +    jobject2reg_with_patching(k_RInfo, op->info_for_patch());
   8.143 +  }
   8.144 +  assert(obj != k_RInfo, "must be different");
   8.145 +
   8.146 +  // get object class
   8.147 +  // not a safepoint as obj null check happens earlier
   8.148 +  load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
   8.149 +  if (op->fast_check()) {
   8.150 +    assert_different_registers(klass_RInfo, k_RInfo);
   8.151 +    __ cmp(k_RInfo, klass_RInfo);
   8.152 +    __ brx(Assembler::notEqual, false, Assembler::pt, *failure_target);
   8.153 +    __ delayed()->nop();
   8.154 +  } else {
   8.155 +    bool need_slow_path = true;
   8.156 +    if (k->is_loaded()) {
   8.157 +      if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
   8.158 +        need_slow_path = false;
   8.159 +      // perform the fast part of the checking logic
   8.160 +      __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
   8.161 +                                       (need_slow_path ? success_target : NULL),
   8.162 +                                       failure_target, NULL,
   8.163 +                                       RegisterOrConstant(k->super_check_offset()));
   8.164 +    } else {
   8.165 +      // perform the fast part of the checking logic
   8.166 +      __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target,
   8.167 +                                       failure_target, NULL);
   8.168 +    }
   8.169 +    if (need_slow_path) {
   8.170 +      // call out-of-line instance of __ check_klass_subtype_slow_path(...):
   8.171 +      assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
   8.172 +      __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
   8.173 +      __ delayed()->nop();
   8.174 +      __ cmp(G3, 0);
   8.175 +      __ br(Assembler::equal, false, Assembler::pn, *failure_target);
   8.176 +      __ delayed()->nop();
   8.177 +      // Fall through to success case
   8.178 +    }
   8.179 +  }
   8.180 +
   8.181 +  if (op->should_profile()) {
   8.182 +    Register mdo  = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
   8.183 +    assert_different_registers(obj, mdo, recv, tmp1);
   8.184 +    __ bind(profile_cast_success);
   8.185 +    jobject2reg(md->constant_encoding(), mdo);
   8.186 +    if (mdo_offset_bias > 0) {
   8.187 +      __ set(mdo_offset_bias, tmp1);
   8.188 +      __ add(mdo, tmp1, mdo);
   8.189 +    }
   8.190 +    load(Address(obj, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
   8.191 +    type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
   8.192 +    // Jump over the failure case
   8.193 +    __ ba(false, *success);
   8.194 +    __ delayed()->nop();
   8.195 +    // Cast failure case
   8.196 +    __ bind(profile_cast_failure);
   8.197 +    jobject2reg(md->constant_encoding(), mdo);
   8.198 +    if (mdo_offset_bias > 0) {
   8.199 +      __ set(mdo_offset_bias, tmp1);
   8.200 +      __ add(mdo, tmp1, mdo);
   8.201 +    }
   8.202 +    Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
   8.203 +    __ ld_ptr(data_addr, tmp1);
   8.204 +    __ sub(tmp1, DataLayout::counter_increment, tmp1);
   8.205 +    __ st_ptr(tmp1, data_addr);
   8.206 +    __ ba(false, *failure);
   8.207 +    __ delayed()->nop();
   8.208 +  }
   8.209 +  __ ba(false, *success);
   8.210 +  __ delayed()->nop();
   8.211 +}
   8.212 +
   8.213  void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
   8.214    LIR_Code code = op->code();
   8.215    if (code == lir_store_check) {
   8.216 @@ -2434,193 +2625,106 @@
   8.217      Register Rtmp1 = op->tmp3()->as_register();
   8.218  
   8.219      __ verify_oop(value);
   8.220 -
   8.221      CodeStub* stub = op->stub();
   8.222 -    Label done;
   8.223 -    __ cmp(value, 0);
   8.224 -    __ br(Assembler::equal, false, Assembler::pn, done);
   8.225 -    __ delayed()->nop();
   8.226 +    // check if it needs to be profiled
   8.227 +    ciMethodData* md;
   8.228 +    ciProfileData* data;
   8.229 +    int mdo_offset_bias = 0;
   8.230 +    if (op->should_profile()) {
   8.231 +      ciMethod* method = op->profiled_method();
   8.232 +      assert(method != NULL, "Should have method");
   8.233 +      setup_md_access(method, op->profiled_bci(), md, data, mdo_offset_bias);
   8.234 +    }
   8.235 +    Label profile_cast_success, profile_cast_failure, done;
   8.236 +    Label *success_target = op->should_profile() ? &profile_cast_success : &done;
   8.237 +    Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
   8.238 +
   8.239 +    if (op->should_profile()) {
   8.240 +      Label not_null;
   8.241 +      __ br_notnull(value, false, Assembler::pn, not_null);
   8.242 +      __ delayed()->nop();
   8.243 +      Register mdo      = k_RInfo;
   8.244 +      Register data_val = Rtmp1;
   8.245 +      jobject2reg(md->constant_encoding(), mdo);
   8.246 +      if (mdo_offset_bias > 0) {
   8.247 +        __ set(mdo_offset_bias, data_val);
   8.248 +        __ add(mdo, data_val, mdo);
   8.249 +      }
   8.250 +      Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
   8.251 +      __ ldub(flags_addr, data_val);
   8.252 +      __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
   8.253 +      __ stb(data_val, flags_addr);
   8.254 +      __ ba(false, done);
   8.255 +      __ delayed()->nop();
   8.256 +      __ bind(not_null);
   8.257 +    } else {
   8.258 +      __ br_null(value, false, Assembler::pn, done);
   8.259 +      __ delayed()->nop();
   8.260 +    }
   8.261      load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception());
   8.262      load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
   8.263  
   8.264      // get instance klass
   8.265      load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
   8.266      // perform the fast part of the checking logic
   8.267 -    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, &done, stub->entry(), NULL);
   8.268 +    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
   8.269  
   8.270      // call out-of-line instance of __ check_klass_subtype_slow_path(...):
   8.271      assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
   8.272      __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
   8.273      __ delayed()->nop();
   8.274      __ cmp(G3, 0);
   8.275 -    __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
   8.276 +    __ br(Assembler::equal, false, Assembler::pn, *failure_target);
   8.277      __ delayed()->nop();
   8.278 +    // fall through to the success case
   8.279 +
   8.280 +    if (op->should_profile()) {
   8.281 +      Register mdo  = klass_RInfo, recv = k_RInfo, tmp1 = Rtmp1;
   8.282 +      assert_different_registers(value, mdo, recv, tmp1);
   8.283 +      __ bind(profile_cast_success);
   8.284 +      jobject2reg(md->constant_encoding(), mdo);
   8.285 +      if (mdo_offset_bias > 0) {
   8.286 +        __ set(mdo_offset_bias, tmp1);
   8.287 +        __ add(mdo, tmp1, mdo);
   8.288 +      }
   8.289 +      load(Address(value, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
   8.290 +      type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
   8.291 +      __ ba(false, done);
   8.292 +      __ delayed()->nop();
   8.293 +      // Cast failure case
   8.294 +      __ bind(profile_cast_failure);
   8.295 +      jobject2reg(md->constant_encoding(), mdo);
   8.296 +      if (mdo_offset_bias > 0) {
   8.297 +        __ set(mdo_offset_bias, tmp1);
   8.298 +        __ add(mdo, tmp1, mdo);
   8.299 +      }
   8.300 +      Address data_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()) - mdo_offset_bias);
   8.301 +      __ ld_ptr(data_addr, tmp1);
   8.302 +      __ sub(tmp1, DataLayout::counter_increment, tmp1);
   8.303 +      __ st_ptr(tmp1, data_addr);
   8.304 +      __ ba(false, *stub->entry());
   8.305 +      __ delayed()->nop();
   8.306 +    }
   8.307      __ bind(done);
   8.308 -  } else if (op->code() == lir_checkcast) {
   8.309 -    // we always need a stub for the failure case.
   8.310 -    CodeStub* stub = op->stub();
   8.311 +  } else if (code == lir_checkcast) {
   8.312      Register obj = op->object()->as_register();
   8.313 -    Register k_RInfo = op->tmp1()->as_register();
   8.314 -    Register klass_RInfo = op->tmp2()->as_register();
   8.315      Register dst = op->result_opr()->as_register();
   8.316 -    Register Rtmp1 = op->tmp3()->as_register();
   8.317 -    ciKlass* k = op->klass();
   8.318 -
   8.319 -    if (obj == k_RInfo) {
   8.320 -      k_RInfo = klass_RInfo;
   8.321 -      klass_RInfo = obj;
   8.322 -    }
   8.323 -    if (op->profiled_method() != NULL) {
   8.324 -      ciMethod* method = op->profiled_method();
   8.325 -      int bci          = op->profiled_bci();
   8.326 -
   8.327 -      // We need two temporaries to perform this operation on SPARC,
   8.328 -      // so to keep things simple we perform a redundant test here
   8.329 -      Label profile_done;
   8.330 -      __ cmp(obj, 0);
   8.331 -      __ br(Assembler::notEqual, false, Assembler::pn, profile_done);
   8.332 -      __ delayed()->nop();
   8.333 -      // Object is null; update methodDataOop
   8.334 -      ciMethodData* md = method->method_data();
   8.335 -      if (md == NULL) {
   8.336 -        bailout("out of memory building methodDataOop");
   8.337 -        return;
   8.338 -      }
   8.339 -      ciProfileData* data = md->bci_to_data(bci);
   8.340 -      assert(data != NULL,       "need data for checkcast");
   8.341 -      assert(data->is_BitData(), "need BitData for checkcast");
   8.342 -      Register mdo      = k_RInfo;
   8.343 -      Register data_val = Rtmp1;
   8.344 -      jobject2reg(md->constant_encoding(), mdo);
   8.345 -
   8.346 -      int mdo_offset_bias = 0;
   8.347 -      if (!Assembler::is_simm13(md->byte_offset_of_slot(data, DataLayout::header_offset()) + data->size_in_bytes())) {
   8.348 -        // The offset is large so bias the mdo by the base of the slot so
   8.349 -        // that the ld can use simm13s to reference the slots of the data
   8.350 -        mdo_offset_bias = md->byte_offset_of_slot(data, DataLayout::header_offset());
   8.351 -        __ set(mdo_offset_bias, data_val);
   8.352 -        __ add(mdo, data_val, mdo);
   8.353 -      }
   8.354 -
   8.355 -
   8.356 -      Address flags_addr(mdo, md->byte_offset_of_slot(data, DataLayout::flags_offset()) - mdo_offset_bias);
   8.357 -      __ ldub(flags_addr, data_val);
   8.358 -      __ or3(data_val, BitData::null_seen_byte_constant(), data_val);
   8.359 -      __ stb(data_val, flags_addr);
   8.360 -      __ bind(profile_done);
   8.361 -    }
   8.362 -
   8.363 -    Label done;
   8.364 -    // patching may screw with our temporaries on sparc,
   8.365 -    // so let's do it before loading the class
   8.366 -    if (k->is_loaded()) {
   8.367 -      jobject2reg(k->constant_encoding(), k_RInfo);
   8.368 -    } else {
   8.369 -      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
   8.370 -    }
   8.371 -    assert(obj != k_RInfo, "must be different");
   8.372 -    __ cmp(obj, 0);
   8.373 -    __ br(Assembler::equal, false, Assembler::pn, done);
   8.374 -    __ delayed()->nop();
   8.375 -
   8.376 -    // get object class
   8.377 -    // not a safepoint as obj null check happens earlier
   8.378 -    load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
   8.379 -    if (op->fast_check()) {
   8.380 -      assert_different_registers(klass_RInfo, k_RInfo);
   8.381 -      __ cmp(k_RInfo, klass_RInfo);
   8.382 -      __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
   8.383 -      __ delayed()->nop();
   8.384 -      __ bind(done);
   8.385 -    } else {
   8.386 -      bool need_slow_path = true;
   8.387 -      if (k->is_loaded()) {
   8.388 -        if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
   8.389 -          need_slow_path = false;
   8.390 -        // perform the fast part of the checking logic
   8.391 -        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, noreg,
   8.392 -                                         (need_slow_path ? &done : NULL),
   8.393 -                                         stub->entry(), NULL,
   8.394 -                                         RegisterOrConstant(k->super_check_offset()));
   8.395 -      } else {
   8.396 -        // perform the fast part of the checking logic
   8.397 -        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7,
   8.398 -                                         &done, stub->entry(), NULL);
   8.399 -      }
   8.400 -      if (need_slow_path) {
   8.401 -        // call out-of-line instance of __ check_klass_subtype_slow_path(...):
   8.402 -        assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
   8.403 -        __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
   8.404 -        __ delayed()->nop();
   8.405 -        __ cmp(G3, 0);
   8.406 -        __ br(Assembler::equal, false, Assembler::pn, *stub->entry());
   8.407 -        __ delayed()->nop();
   8.408 -      }
   8.409 -      __ bind(done);
   8.410 -    }
   8.411 +    Label success;
   8.412 +    emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
   8.413 +    __ bind(success);
   8.414      __ mov(obj, dst);
   8.415    } else if (code == lir_instanceof) {
   8.416      Register obj = op->object()->as_register();
   8.417 -    Register k_RInfo = op->tmp1()->as_register();
   8.418 -    Register klass_RInfo = op->tmp2()->as_register();
   8.419      Register dst = op->result_opr()->as_register();
   8.420 -    Register Rtmp1 = op->tmp3()->as_register();
   8.421 -    ciKlass* k = op->klass();
   8.422 -
   8.423 -    Label done;
   8.424 -    if (obj == k_RInfo) {
   8.425 -      k_RInfo = klass_RInfo;
   8.426 -      klass_RInfo = obj;
   8.427 -    }
   8.428 -    // patching may screw with our temporaries on sparc,
   8.429 -    // so let's do it before loading the class
   8.430 -    if (k->is_loaded()) {
   8.431 -      jobject2reg(k->constant_encoding(), k_RInfo);
   8.432 -    } else {
   8.433 -      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
   8.434 -    }
   8.435 -    assert(obj != k_RInfo, "must be different");
   8.436 -    __ cmp(obj, 0);
   8.437 -    __ br(Assembler::equal, true, Assembler::pn, done);
   8.438 -    __ delayed()->set(0, dst);
   8.439 -
   8.440 -    // get object class
   8.441 -    // not a safepoint as obj null check happens earlier
   8.442 -    load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
   8.443 -    if (op->fast_check()) {
   8.444 -      __ cmp(k_RInfo, klass_RInfo);
   8.445 -      __ br(Assembler::equal, true, Assembler::pt, done);
   8.446 -      __ delayed()->set(1, dst);
   8.447 -      __ set(0, dst);
   8.448 -      __ bind(done);
   8.449 -    } else {
   8.450 -      bool need_slow_path = true;
   8.451 -      if (k->is_loaded()) {
   8.452 -        if (k->super_check_offset() != sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes())
   8.453 -          need_slow_path = false;
   8.454 -        // perform the fast part of the checking logic
   8.455 -        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, noreg,
   8.456 -                                         (need_slow_path ? &done : NULL),
   8.457 -                                         (need_slow_path ? &done : NULL), NULL,
   8.458 -                                         RegisterOrConstant(k->super_check_offset()),
   8.459 -                                         dst);
   8.460 -      } else {
   8.461 -        assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
   8.462 -        // perform the fast part of the checking logic
   8.463 -        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, O7, dst,
   8.464 -                                         &done, &done, NULL,
   8.465 -                                         RegisterOrConstant(-1),
   8.466 -                                         dst);
   8.467 -      }
   8.468 -      if (need_slow_path) {
   8.469 -        // call out-of-line instance of __ check_klass_subtype_slow_path(...):
   8.470 -        assert(klass_RInfo == G3 && k_RInfo == G1, "incorrect call setup");
   8.471 -        __ call(Runtime1::entry_for(Runtime1::slow_subtype_check_id), relocInfo::runtime_call_type);
   8.472 -        __ delayed()->nop();
   8.473 -        __ mov(G3, dst);
   8.474 -      }
   8.475 -      __ bind(done);
   8.476 -    }
   8.477 +    Label success, failure, done;
   8.478 +    emit_typecheck_helper(op, &success, &failure, &failure);
   8.479 +    __ bind(failure);
   8.480 +    __ set(0, dst);
   8.481 +    __ ba(false, done);
   8.482 +    __ delayed()->nop();
   8.483 +    __ bind(success);
   8.484 +    __ set(1, dst);
   8.485 +    __ bind(done);
   8.486    } else {
   8.487      ShouldNotReachHere();
   8.488    }
   8.489 @@ -2776,9 +2880,14 @@
   8.490    ciProfileData* data = md->bci_to_data(bci);
   8.491    assert(data->is_CounterData(), "need CounterData for calls");
   8.492    assert(op->mdo()->is_single_cpu(),  "mdo must be allocated");
   8.493 +  Register mdo  = op->mdo()->as_register();
   8.494 +#ifdef _LP64
   8.495 +  assert(op->tmp1()->is_double_cpu(), "tmp1 must be allocated");
   8.496 +  Register tmp1 = op->tmp1()->as_register_lo();
   8.497 +#else
   8.498    assert(op->tmp1()->is_single_cpu(), "tmp1 must be allocated");
   8.499 -  Register mdo  = op->mdo()->as_register();
   8.500    Register tmp1 = op->tmp1()->as_register();
   8.501 +#endif
   8.502    jobject2reg(md->constant_encoding(), mdo);
   8.503    int mdo_offset_bias = 0;
   8.504    if (!Assembler::is_simm13(md->byte_offset_of_slot(data, CounterData::count_offset()) +
   8.505 @@ -2795,13 +2904,13 @@
   8.506    // Perform additional virtual call profiling for invokevirtual and
   8.507    // invokeinterface bytecodes
   8.508    if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
   8.509 -      Tier1ProfileVirtualCalls) {
   8.510 +      C1ProfileVirtualCalls) {
   8.511      assert(op->recv()->is_single_cpu(), "recv must be allocated");
   8.512      Register recv = op->recv()->as_register();
   8.513      assert_different_registers(mdo, tmp1, recv);
   8.514      assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
   8.515      ciKlass* known_klass = op->known_holder();
   8.516 -    if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {
   8.517 +    if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
   8.518        // We know the type that will be seen at this call site; we can
   8.519        // statically update the methodDataOop rather than needing to do
   8.520        // dynamic tests on the receiver type
   8.521 @@ -2816,9 +2925,9 @@
   8.522            Address data_addr(mdo, md->byte_offset_of_slot(data,
   8.523                                                           VirtualCallData::receiver_count_offset(i)) -
   8.524                              mdo_offset_bias);
   8.525 -          __ lduw(data_addr, tmp1);
   8.526 +          __ ld_ptr(data_addr, tmp1);
   8.527            __ add(tmp1, DataLayout::counter_increment, tmp1);
   8.528 -          __ stw(tmp1, data_addr);
   8.529 +          __ st_ptr(tmp1, data_addr);
   8.530            return;
   8.531          }
   8.532        }
   8.533 @@ -2837,70 +2946,32 @@
   8.534            __ st_ptr(tmp1, recv_addr);
   8.535            Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
   8.536                              mdo_offset_bias);
   8.537 -          __ lduw(data_addr, tmp1);
   8.538 +          __ ld_ptr(data_addr, tmp1);
   8.539            __ add(tmp1, DataLayout::counter_increment, tmp1);
   8.540 -          __ stw(tmp1, data_addr);
   8.541 +          __ st_ptr(tmp1, data_addr);
   8.542            return;
   8.543          }
   8.544        }
   8.545      } else {
   8.546        load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
   8.547        Label update_done;
   8.548 -      uint i;
   8.549 -      for (i = 0; i < VirtualCallData::row_limit(); i++) {
   8.550 -        Label next_test;
   8.551 -        // See if the receiver is receiver[n].
   8.552 -        Address receiver_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
   8.553 -                              mdo_offset_bias);
   8.554 -        __ ld_ptr(receiver_addr, tmp1);
   8.555 -        __ verify_oop(tmp1);
   8.556 -        __ cmp(recv, tmp1);
   8.557 -        __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
   8.558 -        __ delayed()->nop();
   8.559 -        Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
   8.560 -                          mdo_offset_bias);
   8.561 -        __ lduw(data_addr, tmp1);
   8.562 -        __ add(tmp1, DataLayout::counter_increment, tmp1);
   8.563 -        __ stw(tmp1, data_addr);
   8.564 -        __ br(Assembler::always, false, Assembler::pt, update_done);
   8.565 -        __ delayed()->nop();
   8.566 -        __ bind(next_test);
   8.567 -      }
   8.568 -
   8.569 -      // Didn't find receiver; find next empty slot and fill it in
   8.570 -      for (i = 0; i < VirtualCallData::row_limit(); i++) {
   8.571 -        Label next_test;
   8.572 -        Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)) -
   8.573 -                          mdo_offset_bias);
   8.574 -        load(recv_addr, tmp1, T_OBJECT);
   8.575 -        __ tst(tmp1);
   8.576 -        __ brx(Assembler::notEqual, false, Assembler::pt, next_test);
   8.577 -        __ delayed()->nop();
   8.578 -        __ st_ptr(recv, recv_addr);
   8.579 -        __ set(DataLayout::counter_increment, tmp1);
   8.580 -        __ st_ptr(tmp1, mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)) -
   8.581 -                  mdo_offset_bias);
   8.582 -        __ br(Assembler::always, false, Assembler::pt, update_done);
   8.583 -        __ delayed()->nop();
   8.584 -        __ bind(next_test);
   8.585 -      }
   8.586 +      type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
   8.587        // Receiver did not match any saved receiver and there is no empty row for it.
   8.588        // Increment total counter to indicate polymorphic case.
   8.589 -      __ lduw(counter_addr, tmp1);
   8.590 +      __ ld_ptr(counter_addr, tmp1);
   8.591        __ add(tmp1, DataLayout::counter_increment, tmp1);
   8.592 -      __ stw(tmp1, counter_addr);
   8.593 +      __ st_ptr(tmp1, counter_addr);
   8.594  
   8.595        __ bind(update_done);
   8.596      }
   8.597    } else {
   8.598      // Static call
   8.599 -    __ lduw(counter_addr, tmp1);
   8.600 +    __ ld_ptr(counter_addr, tmp1);
   8.601      __ add(tmp1, DataLayout::counter_increment, tmp1);
   8.602 -    __ stw(tmp1, counter_addr);
   8.603 +    __ st_ptr(tmp1, counter_addr);
   8.604    }
   8.605  }
   8.606  
   8.607 -
   8.608  void LIR_Assembler::align_backward_branch_target() {
   8.609    __ align(OptoLoopAlignment);
   8.610  }
   8.611 @@ -3093,31 +3164,36 @@
   8.612    // no-op on TSO
   8.613  }
   8.614  
   8.615 -// Macro to Pack two sequential registers containing 32 bit values
   8.616 +// Pack two sequential registers containing 32 bit values
   8.617  // into a single 64 bit register.
   8.618 -// rs and rs->successor() are packed into rd
   8.619 -// rd and rs may be the same register.
   8.620 -// Note: rs and rs->successor() are destroyed.
   8.621 -void LIR_Assembler::pack64( Register rs, Register rd ) {
   8.622 +// src and src->successor() are packed into dst
   8.623 +// src and dst may be the same register.
   8.624 +// Note: src is destroyed
   8.625 +void LIR_Assembler::pack64(LIR_Opr src, LIR_Opr dst) {
   8.626 +  Register rs = src->as_register();
   8.627 +  Register rd = dst->as_register_lo();
   8.628    __ sllx(rs, 32, rs);
   8.629    __ srl(rs->successor(), 0, rs->successor());
   8.630    __ or3(rs, rs->successor(), rd);
   8.631  }
   8.632  
   8.633 -// Macro to unpack a 64 bit value in a register into
   8.634 +// Unpack a 64 bit value in a register into
   8.635  // two sequential registers.
   8.636 -// rd is unpacked into rd and rd->successor()
   8.637 -void LIR_Assembler::unpack64( Register rd ) {
   8.638 -  __ mov(rd, rd->successor());
   8.639 -  __ srax(rd, 32, rd);
   8.640 -  __ sra(rd->successor(), 0, rd->successor());
   8.641 +// src is unpacked into dst and dst->successor()
   8.642 +void LIR_Assembler::unpack64(LIR_Opr src, LIR_Opr dst) {
   8.643 +  Register rs = src->as_register_lo();
   8.644 +  Register rd = dst->as_register_hi();
   8.645 +  assert_different_registers(rs, rd, rd->successor());
   8.646 +  __ srlx(rs, 32, rd);
   8.647 +  __ srl (rs,  0, rd->successor());
   8.648  }
   8.649  
   8.650  
   8.651  void LIR_Assembler::leal(LIR_Opr addr_opr, LIR_Opr dest) {
   8.652    LIR_Address* addr = addr_opr->as_address_ptr();
   8.653    assert(addr->index()->is_illegal() && addr->scale() == LIR_Address::times_1 && Assembler::is_simm13(addr->disp()), "can't handle complex addresses yet");
   8.654 -  __ add(addr->base()->as_register(), addr->disp(), dest->as_register());
   8.655 +
   8.656 +  __ add(addr->base()->as_pointer_register(), addr->disp(), dest->as_pointer_register());
   8.657  }
   8.658  
   8.659  
   8.660 @@ -3188,11 +3264,36 @@
   8.661              tty->cr();
   8.662            }
   8.663  #endif
   8.664 -          continue;
   8.665 +        } else {
   8.666 +          LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
   8.667 +          inst->insert_before(i + 1, delay_op);
   8.668 +          i++;
   8.669          }
   8.670  
   8.671 -        LIR_Op* delay_op = new LIR_OpDelay(new LIR_Op0(lir_nop), op->as_OpJavaCall()->info());
   8.672 -        inst->insert_before(i + 1, delay_op);
   8.673 +#if defined(TIERED) && !defined(_LP64)
   8.674 +        // fixup the return value from G1 to O0/O1 for long returns.
   8.675 +        // It's done here instead of in LIRGenerator because there's
   8.676 +        // such a mismatch between the single reg and double reg
   8.677 +        // calling convention.
   8.678 +        LIR_OpJavaCall* callop = op->as_OpJavaCall();
   8.679 +        if (callop->result_opr() == FrameMap::out_long_opr) {
   8.680 +          LIR_OpJavaCall* call;
   8.681 +          LIR_OprList* arguments = new LIR_OprList(callop->arguments()->length());
   8.682 +          for (int a = 0; a < arguments->length(); a++) {
   8.683 +            arguments[a] = callop->arguments()[a];
   8.684 +          }
   8.685 +          if (op->code() == lir_virtual_call) {
   8.686 +            call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
   8.687 +                                      callop->vtable_offset(), arguments, callop->info());
   8.688 +          } else {
   8.689 +            call = new LIR_OpJavaCall(op->code(), callop->method(), callop->receiver(), FrameMap::g1_long_single_opr,
   8.690 +                                      callop->addr(), arguments, callop->info());
   8.691 +          }
   8.692 +          inst->at_put(i - 1, call);
   8.693 +          inst->insert_before(i + 1, new LIR_Op1(lir_unpack64, FrameMap::g1_long_single_opr, callop->result_opr(),
   8.694 +                                                 T_LONG, lir_patch_none, NULL));
   8.695 +        }
   8.696 +#endif
   8.697          break;
   8.698        }
   8.699      }
     9.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp	Tue Sep 21 06:58:44 2010 -0700
     9.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp	Wed Sep 22 12:54:51 2010 -0400
     9.3 @@ -1,5 +1,5 @@
     9.4  /*
     9.5 - * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
     9.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
     9.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.8   *
     9.9   * This code is free software; you can redistribute it and/or modify it
    9.10 @@ -71,9 +71,16 @@
    9.11  
    9.12    static bool is_single_instruction(LIR_Op* op);
    9.13  
    9.14 +  // Record the type of the receiver in ReceiverTypeData
    9.15 +  void type_profile_helper(Register mdo, int mdo_offset_bias,
    9.16 +                           ciMethodData *md, ciProfileData *data,
    9.17 +                           Register recv, Register tmp1, Label* update_done);
    9.18 +  // Setup pointers to MDO, MDO slot, also compute offset bias to access the slot.
    9.19 +  void setup_md_access(ciMethod* method, int bci,
    9.20 +                       ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias);
    9.21   public:
    9.22 -  void pack64( Register rs, Register rd );
    9.23 -  void unpack64( Register rd );
    9.24 +  void   pack64(LIR_Opr src, LIR_Opr dst);
    9.25 +  void unpack64(LIR_Opr src, LIR_Opr dst);
    9.26  
    9.27  enum {
    9.28  #ifdef _LP64
    10.1 --- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
    10.2 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
    10.3 @@ -1,5 +1,5 @@
    10.4  /*
    10.5 - * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
    10.6 + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
    10.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8   *
    10.9   * This code is free software; you can redistribute it and/or modify it
   10.10 @@ -227,29 +227,37 @@
   10.11    }
   10.12  }
   10.13  
   10.14 +LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
   10.15 +  LIR_Opr r;
   10.16 +  if (type == T_LONG) {
   10.17 +    r = LIR_OprFact::longConst(x);
   10.18 +  } else if (type == T_INT) {
   10.19 +    r = LIR_OprFact::intConst(x);
   10.20 +  } else {
   10.21 +    ShouldNotReachHere();
   10.22 +  }
   10.23 +  if (!Assembler::is_simm13(x)) {
   10.24 +    LIR_Opr tmp = new_register(type);
   10.25 +    __ move(r, tmp);
   10.26 +    return tmp;
   10.27 +  }
   10.28 +  return r;
   10.29 +}
   10.30  
   10.31 -void LIRGenerator::increment_counter(address counter, int step) {
   10.32 +void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
   10.33    LIR_Opr pointer = new_pointer_register();
   10.34    __ move(LIR_OprFact::intptrConst(counter), pointer);
   10.35 -  LIR_Address* addr = new LIR_Address(pointer, T_INT);
   10.36 +  LIR_Address* addr = new LIR_Address(pointer, type);
   10.37    increment_counter(addr, step);
   10.38  }
   10.39  
   10.40  void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
   10.41 -  LIR_Opr temp = new_register(T_INT);
   10.42 +  LIR_Opr temp = new_register(addr->type());
   10.43    __ move(addr, temp);
   10.44 -  LIR_Opr c = LIR_OprFact::intConst(step);
   10.45 -  if (Assembler::is_simm13(step)) {
   10.46 -    __ add(temp, c, temp);
   10.47 -  } else {
   10.48 -    LIR_Opr temp2 = new_register(T_INT);
   10.49 -    __ move(c, temp2);
   10.50 -    __ add(temp, temp2, temp);
   10.51 -  }
   10.52 +  __ add(temp, load_immediate(step, addr->type()), temp);
   10.53    __ move(temp, addr);
   10.54  }
   10.55  
   10.56 -
   10.57  void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
   10.58    LIR_Opr o7opr = FrameMap::O7_opr;
   10.59    __ load(new LIR_Address(base, disp, T_INT), o7opr, info);
   10.60 @@ -611,7 +619,6 @@
   10.61    left.load_item();
   10.62    right.load_item();
   10.63    LIR_Opr reg = rlock_result(x);
   10.64 -
   10.65    if (x->x()->type()->is_float_kind()) {
   10.66      Bytecodes::Code code = x->op();
   10.67      __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
   10.68 @@ -1040,7 +1047,9 @@
   10.69    LIR_Opr tmp1 = FrameMap::G1_oop_opr;
   10.70    LIR_Opr tmp2 = FrameMap::G3_oop_opr;
   10.71    LIR_Opr tmp3 = FrameMap::G4_oop_opr;
   10.72 -  __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,  x->direct_compare(), patching_info);
   10.73 +  __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
   10.74 +                x->direct_compare(), patching_info,
   10.75 +                x->profiled_method(), x->profiled_bci());
   10.76  }
   10.77  
   10.78  
   10.79 @@ -1089,12 +1098,12 @@
   10.80    // add safepoint before generating condition code so it can be recomputed
   10.81    if (x->is_safepoint()) {
   10.82      // increment backedge counter if needed
   10.83 -    increment_backedge_counter(state_for(x, x->state_before()));
   10.84 -
   10.85 +    increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
   10.86      __ safepoint(new_register(T_INT), state_for(x, x->state_before()));
   10.87    }
   10.88  
   10.89    __ cmp(lir_cond(cond), left, right);
   10.90 +  // Generate branch profiling. Profiling code doesn't kill flags.
   10.91    profile_branch(x, cond);
   10.92    move_to_phi(x->state());
   10.93    if (x->x()->type()->is_float_kind()) {
    11.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
    11.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
    11.3 @@ -465,12 +465,10 @@
    11.4  
    11.5        break;
    11.6  
    11.7 -#ifdef TIERED
    11.8      case counter_overflow_id:
    11.9 -        // G4 contains bci
   11.10 -      oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4);
   11.11 +        // G4 contains bci, G5 contains method
   11.12 +      oop_maps = generate_stub_call(sasm, noreg, CAST_FROM_FN_PTR(address, counter_overflow), G4, G5);
   11.13        break;
   11.14 -#endif // TIERED
   11.15  
   11.16      case new_type_array_id:
   11.17      case new_object_array_id:
    12.1 --- a/src/cpu/sparc/vm/c1_globals_sparc.hpp	Tue Sep 21 06:58:44 2010 -0700
    12.2 +++ b/src/cpu/sparc/vm/c1_globals_sparc.hpp	Wed Sep 22 12:54:51 2010 -0400
    12.3 @@ -1,5 +1,5 @@
    12.4  /*
    12.5 - * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
    12.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    12.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.8   *
    12.9   * This code is free software; you can redistribute it and/or modify it
   12.10 @@ -34,14 +34,7 @@
   12.11  define_pd_global(bool, UseOnStackReplacement,        true );
   12.12  define_pd_global(bool, TieredCompilation,            false);
   12.13  define_pd_global(intx, CompileThreshold,             1000 ); // Design center runs on 1.3.1
   12.14 -define_pd_global(intx, Tier2CompileThreshold,        1500 );
   12.15 -define_pd_global(intx, Tier3CompileThreshold,        2000 );
   12.16 -define_pd_global(intx, Tier4CompileThreshold,        2500 );
   12.17 -
   12.18  define_pd_global(intx, BackEdgeThreshold,            100000);
   12.19 -define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
   12.20 -define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
   12.21 -define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
   12.22  
   12.23  define_pd_global(intx, OnStackReplacePercentage,     1400 );
   12.24  define_pd_global(bool, UseTLAB,                      true );
    13.1 --- a/src/cpu/sparc/vm/c2_globals_sparc.hpp	Tue Sep 21 06:58:44 2010 -0700
    13.2 +++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp	Wed Sep 22 12:54:51 2010 -0400
    13.3 @@ -1,5 +1,5 @@
    13.4  /*
    13.5 - * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
    13.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    13.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.8   *
    13.9   * This code is free software; you can redistribute it and/or modify it
   13.10 @@ -37,21 +37,8 @@
   13.11  define_pd_global(bool, ProfileInterpreter,           true);
   13.12  #endif // CC_INTERP
   13.13  define_pd_global(bool, TieredCompilation,            false);
   13.14 -#ifdef TIERED
   13.15 -define_pd_global(intx, CompileThreshold,             1000);
   13.16 -define_pd_global(intx, BackEdgeThreshold,            14000);
   13.17 -#else
   13.18  define_pd_global(intx, CompileThreshold,             10000);
   13.19  define_pd_global(intx, BackEdgeThreshold,            140000);
   13.20 -#endif // TIERED
   13.21 -
   13.22 -define_pd_global(intx, Tier2CompileThreshold,        10000); // unused level
   13.23 -define_pd_global(intx, Tier3CompileThreshold,        10000);
   13.24 -define_pd_global(intx, Tier4CompileThreshold,        40000);
   13.25 -
   13.26 -define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
   13.27 -define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
   13.28 -define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
   13.29  
   13.30  define_pd_global(intx, OnStackReplacePercentage,     140);
   13.31  define_pd_global(intx, ConditionalMoveLimit,         4);
    14.1 --- a/src/cpu/sparc/vm/frame_sparc.hpp	Tue Sep 21 06:58:44 2010 -0700
    14.2 +++ b/src/cpu/sparc/vm/frame_sparc.hpp	Wed Sep 22 12:54:51 2010 -0400
    14.3 @@ -1,5 +1,5 @@
    14.4  /*
    14.5 - * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
    14.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    14.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.8   *
    14.9   * This code is free software; you can redistribute it and/or modify it
   14.10 @@ -263,8 +263,7 @@
   14.11    };
   14.12  
   14.13   private:
   14.14 -
   14.15 -  constantPoolCacheOop* frame::interpreter_frame_cpoolcache_addr() const;
   14.16 +  constantPoolCacheOop* interpreter_frame_cpoolcache_addr() const;
   14.17  
   14.18  #ifndef CC_INTERP
   14.19  
    15.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
    15.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
    15.3 @@ -2431,3 +2431,20 @@
    15.4    }
    15.5  #endif // CC_INTERP
    15.6  }
    15.7 +
    15.8 +// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
    15.9 +void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
   15.10 +                                                        int increment, int mask,
   15.11 +                                                        Register scratch1, Register scratch2,
   15.12 +                                                        Condition cond, Label *where) {
   15.13 +  ld(counter_addr, scratch1);
   15.14 +  add(scratch1, increment, scratch1);
   15.15 +  if (is_simm13(mask)) {
   15.16 +    andcc(scratch1, mask, G0);
   15.17 +  } else {
   15.18 +    set(mask, scratch2);
   15.19 +    andcc(scratch1, scratch2,  G0);
   15.20 +  }
   15.21 +  br(cond, false, Assembler::pn, *where);
   15.22 +  delayed()->st(scratch1, counter_addr);
   15.23 +}
    16.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.hpp	Tue Sep 21 06:58:44 2010 -0700
    16.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.hpp	Wed Sep 22 12:54:51 2010 -0400
    16.3 @@ -278,6 +278,10 @@
    16.4    void increment_mdp_data_at(Register reg, int constant,
    16.5                               Register bumped_count, Register scratch2,
    16.6                               bool decrement = false);
    16.7 +  void increment_mask_and_jump(Address counter_addr,
    16.8 +                               int increment, int mask,
    16.9 +                               Register scratch1, Register scratch2,
   16.10 +                               Condition cond, Label *where);
   16.11    void set_mdp_flag_at(int flag_constant, Register scratch);
   16.12    void test_mdp_data_at(int offset, Register value, Label& not_equal_continue,
   16.13                          Register scratch);
   16.14 @@ -321,4 +325,5 @@
   16.15  
   16.16    void save_return_value(TosState state, bool is_native_call);
   16.17    void restore_return_value(TosState state, bool is_native_call);
   16.18 +
   16.19  };
    17.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
    17.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
    17.3 @@ -3331,10 +3331,8 @@
    17.4    __ stf(FloatRegisterImpl::D, Freturn0, saved_Freturn0_addr);
    17.5  #if !defined(_LP64)
    17.6  #if defined(COMPILER2)
    17.7 -  if (!TieredCompilation) {
    17.8 -    // 32-bit 1-register longs return longs in G1
    17.9 -    __ stx(Greturn1, saved_Greturn1_addr);
   17.10 -  }
   17.11 +  // 32-bit 1-register longs return longs in G1
   17.12 +  __ stx(Greturn1, saved_Greturn1_addr);
   17.13  #endif
   17.14    __ set_last_Java_frame(SP, noreg);
   17.15    __ call_VM_leaf(L7_thread_cache, CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames), G2_thread, G4deopt_mode);
   17.16 @@ -3347,24 +3345,15 @@
   17.17    __ reset_last_Java_frame();
   17.18    __ ldf(FloatRegisterImpl::D, saved_Freturn0_addr, Freturn0);
   17.19  
   17.20 -  // In tiered we never use C2 to compile methods returning longs so
   17.21 -  // the result is where we expect it already.
   17.22 -
   17.23  #if !defined(_LP64) && defined(COMPILER2)
   17.24    // In 32 bit, C2 returns longs in G1 so restore the saved G1 into
   17.25 -  // I0/I1 if the return value is long.  In the tiered world there is
   17.26 -  // a mismatch between how C1 and C2 return longs compiles and so
   17.27 -  // currently compilation of methods which return longs is disabled
   17.28 -  // for C2 and so is this code.  Eventually C1 and C2 will do the
   17.29 -  // same thing for longs in the tiered world.
   17.30 -  if (!TieredCompilation) {
   17.31 -    Label not_long;
   17.32 -    __ cmp(O0,T_LONG);
   17.33 -    __ br(Assembler::notEqual, false, Assembler::pt, not_long);
   17.34 -    __ delayed()->nop();
   17.35 -    __ ldd(saved_Greturn1_addr,I0);
   17.36 -    __ bind(not_long);
   17.37 -  }
   17.38 +  // I0/I1 if the return value is long.
   17.39 +  Label not_long;
   17.40 +  __ cmp(O0,T_LONG);
   17.41 +  __ br(Assembler::notEqual, false, Assembler::pt, not_long);
   17.42 +  __ delayed()->nop();
   17.43 +  __ ldd(saved_Greturn1_addr,I0);
   17.44 +  __ bind(not_long);
   17.45  #endif
   17.46    __ ret();
   17.47    __ delayed()->restore();
    18.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
    18.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
    18.3 @@ -1609,7 +1609,7 @@
    18.4      assert_clean_int(count, O3);     // Make sure 'count' is clean int.
    18.5  
    18.6      Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
    18.7 -    Label L_fill_2_bytes, L_fill_4_bytes, L_fill_32_bytes;
    18.8 +    Label L_fill_2_bytes, L_fill_elements, L_fill_32_bytes;
    18.9  
   18.10      int shift = -1;
   18.11      switch (t) {
   18.12 @@ -1635,8 +1635,8 @@
   18.13      }
   18.14      if (t == T_SHORT) {
   18.15        // Zero extend value
   18.16 -      __ sethi(0xffff0000, O3);
   18.17 -      __ andn(value, O3, value);
   18.18 +      __ sllx(value, 48, value);
   18.19 +      __ srlx(value, 48, value);
   18.20      }
   18.21      if (t == T_BYTE || t == T_SHORT) {
   18.22        __ sllx(value, 16, O3);
   18.23 @@ -1644,8 +1644,8 @@
   18.24      }
   18.25  
   18.26      __ cmp(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
   18.27 -    __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_4_bytes); // use unsigned cmp
   18.28 -    __ delayed()->andcc(count, 1<<shift, G0);
   18.29 +    __ brx(Assembler::lessUnsigned, false, Assembler::pn, L_fill_elements); // use unsigned cmp
   18.30 +    __ delayed()->andcc(count, 1, G0);
   18.31  
   18.32      if (!aligned && (t == T_BYTE || t == T_SHORT)) {
   18.33        // align source address at 4 bytes address boundary
   18.34 @@ -1683,12 +1683,6 @@
   18.35      }
   18.36  #endif
   18.37  
   18.38 -    Label L_check_fill_8_bytes;
   18.39 -    // Fill 32-byte chunks
   18.40 -    __ subcc(count, 8 << shift, count);
   18.41 -    __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
   18.42 -    __ delayed()->nop();
   18.43 -
   18.44      if (t == T_INT) {
   18.45        // Zero extend value
   18.46        __ srl(value, 0, value);
   18.47 @@ -1698,7 +1692,13 @@
   18.48        __ or3(value, O3, value);
   18.49      }
   18.50  
   18.51 -    Label L_fill_32_bytes_loop;
   18.52 +    Label L_check_fill_8_bytes;
   18.53 +    // Fill 32-byte chunks
   18.54 +    __ subcc(count, 8 << shift, count);
   18.55 +    __ brx(Assembler::less, false, Assembler::pt, L_check_fill_8_bytes);
   18.56 +    __ delayed()->nop();
   18.57 +
   18.58 +    Label L_fill_32_bytes_loop, L_fill_4_bytes;
   18.59      __ align(16);
   18.60      __ BIND(L_fill_32_bytes_loop);
   18.61  
   18.62 @@ -1730,6 +1730,9 @@
   18.63  
   18.64      // fill trailing 4 bytes
   18.65      __ andcc(count, 1<<shift, G0);  // in delay slot of branches
   18.66 +    if (t == T_INT) {
   18.67 +      __ BIND(L_fill_elements);
   18.68 +    }
   18.69      __ BIND(L_fill_4_bytes);
   18.70      __ brx(Assembler::zero, false, Assembler::pt, L_fill_2_bytes);
   18.71      if (t == T_BYTE || t == T_SHORT) {
   18.72 @@ -1762,7 +1765,48 @@
   18.73      }
   18.74      __ BIND(L_exit);
   18.75      __ retl();
   18.76 -    __ delayed()->mov(G0, O0); // return 0
   18.77 +    __ delayed()->nop();
   18.78 +
   18.79 +    // Handle copies less than 8 bytes.  Int is handled elsewhere.
   18.80 +    if (t == T_BYTE) {
   18.81 +      __ BIND(L_fill_elements);
   18.82 +      Label L_fill_2, L_fill_4;
   18.83 +      // in delay slot __ andcc(count, 1, G0);
   18.84 +      __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
   18.85 +      __ delayed()->andcc(count, 2, G0);
   18.86 +      __ stb(value, to, 0);
   18.87 +      __ inc(to, 1);
   18.88 +      __ BIND(L_fill_2);
   18.89 +      __ brx(Assembler::zero, false, Assembler::pt, L_fill_4);
   18.90 +      __ delayed()->andcc(count, 4, G0);
   18.91 +      __ stb(value, to, 0);
   18.92 +      __ stb(value, to, 1);
   18.93 +      __ inc(to, 2);
   18.94 +      __ BIND(L_fill_4);
   18.95 +      __ brx(Assembler::zero, false, Assembler::pt, L_exit);
   18.96 +      __ delayed()->nop();
   18.97 +      __ stb(value, to, 0);
   18.98 +      __ stb(value, to, 1);
   18.99 +      __ stb(value, to, 2);
  18.100 +      __ retl();
  18.101 +      __ delayed()->stb(value, to, 3);
  18.102 +    }
  18.103 +
  18.104 +    if (t == T_SHORT) {
  18.105 +      Label L_fill_2;
  18.106 +      __ BIND(L_fill_elements);
  18.107 +      // in delay slot __ andcc(count, 1, G0);
  18.108 +      __ brx(Assembler::zero, false, Assembler::pt, L_fill_2);
  18.109 +      __ delayed()->andcc(count, 2, G0);
  18.110 +      __ sth(value, to, 0);
  18.111 +      __ inc(to, 2);
  18.112 +      __ BIND(L_fill_2);
  18.113 +      __ brx(Assembler::zero, false, Assembler::pt, L_exit);
  18.114 +      __ delayed()->nop();
  18.115 +      __ sth(value, to, 0);
  18.116 +      __ retl();
  18.117 +      __ delayed()->sth(value, to, 2);
  18.118 +    }
  18.119      return start;
  18.120    }
  18.121  
    19.1 --- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Tue Sep 21 06:58:44 2010 -0700
    19.2 +++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Wed Sep 22 12:54:51 2010 -0400
    19.3 @@ -43,7 +43,7 @@
    19.4  
    19.5  // MethodHandles adapters
    19.6  enum method_handles_platform_dependent_constants {
    19.7 -  method_handles_adapters_code_size = 6000
    19.8 +  method_handles_adapters_code_size = 12000
    19.9  };
   19.10  
   19.11  class Sparc {
    20.1 --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
    20.2 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
    20.3 @@ -294,35 +294,65 @@
    20.4  // ??: invocation counter
    20.5  //
    20.6  void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
    20.7 -  // Update standard invocation counters
    20.8 -  __ increment_invocation_counter(O0, G3_scratch);
    20.9 -  if (ProfileInterpreter) {  // %%% Merge this into methodDataOop
   20.10 -    Address interpreter_invocation_counter(Lmethod, methodOopDesc::interpreter_invocation_counter_offset());
   20.11 -    __ ld(interpreter_invocation_counter, G3_scratch);
   20.12 -    __ inc(G3_scratch);
   20.13 -    __ st(G3_scratch, interpreter_invocation_counter);
   20.14 +  // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
   20.15 +  if (TieredCompilation) {
   20.16 +    const int increment = InvocationCounter::count_increment;
   20.17 +    const int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
   20.18 +    Label no_mdo, done;
   20.19 +    if (ProfileInterpreter) {
   20.20 +      // If no method data exists, go to profile_continue.
   20.21 +      __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
   20.22 +      __ br_null(G4_scratch, false, Assembler::pn, no_mdo);
   20.23 +      __ delayed()->nop();
   20.24 +      // Increment counter
   20.25 +      Address mdo_invocation_counter(G4_scratch,
   20.26 +                                     in_bytes(methodDataOopDesc::invocation_counter_offset()) +
   20.27 +                                     in_bytes(InvocationCounter::counter_offset()));
   20.28 +      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
   20.29 +                                 G3_scratch, Lscratch,
   20.30 +                                 Assembler::zero, overflow);
   20.31 +      __ ba(false, done);
   20.32 +      __ delayed()->nop();
   20.33 +    }
   20.34 +
   20.35 +    // Increment counter in methodOop
   20.36 +    __ bind(no_mdo);
   20.37 +    Address invocation_counter(Lmethod,
   20.38 +                               in_bytes(methodOopDesc::invocation_counter_offset()) +
   20.39 +                               in_bytes(InvocationCounter::counter_offset()));
   20.40 +    __ increment_mask_and_jump(invocation_counter, increment, mask,
   20.41 +                               G3_scratch, Lscratch,
   20.42 +                               Assembler::zero, overflow);
   20.43 +    __ bind(done);
   20.44 +  } else {
   20.45 +    // Update standard invocation counters
   20.46 +    __ increment_invocation_counter(O0, G3_scratch);
   20.47 +    if (ProfileInterpreter) {  // %%% Merge this into methodDataOop
   20.48 +      Address interpreter_invocation_counter(Lmethod,in_bytes(methodOopDesc::interpreter_invocation_counter_offset()));
   20.49 +      __ ld(interpreter_invocation_counter, G3_scratch);
   20.50 +      __ inc(G3_scratch);
   20.51 +      __ st(G3_scratch, interpreter_invocation_counter);
   20.52 +    }
   20.53 +
   20.54 +    if (ProfileInterpreter && profile_method != NULL) {
   20.55 +      // Test to see if we should create a method data oop
   20.56 +      AddressLiteral profile_limit((address)&InvocationCounter::InterpreterProfileLimit);
   20.57 +      __ load_contents(profile_limit, G3_scratch);
   20.58 +      __ cmp(O0, G3_scratch);
   20.59 +      __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
   20.60 +      __ delayed()->nop();
   20.61 +
   20.62 +      // if no method data exists, go to profile_method
   20.63 +      __ test_method_data_pointer(*profile_method);
   20.64 +    }
   20.65 +
   20.66 +    AddressLiteral invocation_limit((address)&InvocationCounter::InterpreterInvocationLimit);
   20.67 +    __ load_contents(invocation_limit, G3_scratch);
   20.68 +    __ cmp(O0, G3_scratch);
   20.69 +    __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
   20.70 +    __ delayed()->nop();
   20.71    }
   20.72  
   20.73 -  if (ProfileInterpreter && profile_method != NULL) {
   20.74 -    // Test to see if we should create a method data oop
   20.75 -    AddressLiteral profile_limit(&InvocationCounter::InterpreterProfileLimit);
   20.76 -    __ sethi(profile_limit, G3_scratch);
   20.77 -    __ ld(G3_scratch, profile_limit.low10(), G3_scratch);
   20.78 -    __ cmp(O0, G3_scratch);
   20.79 -    __ br(Assembler::lessUnsigned, false, Assembler::pn, *profile_method_continue);
   20.80 -    __ delayed()->nop();
   20.81 -
   20.82 -    // if no method data exists, go to profile_method
   20.83 -    __ test_method_data_pointer(*profile_method);
   20.84 -  }
   20.85 -
   20.86 -  AddressLiteral invocation_limit(&InvocationCounter::InterpreterInvocationLimit);
   20.87 -  __ sethi(invocation_limit, G3_scratch);
   20.88 -  __ ld(G3_scratch, invocation_limit.low10(), G3_scratch);
   20.89 -  __ cmp(O0, G3_scratch);
   20.90 -  __ br(Assembler::greaterEqualUnsigned, false, Assembler::pn, *overflow);
   20.91 -  __ delayed()->nop();
   20.92 -
   20.93  }
   20.94  
   20.95  // Allocate monitor and lock method (asm interpreter)
    21.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Sep 21 06:58:44 2010 -0700
    21.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Wed Sep 22 12:54:51 2010 -0400
    21.3 @@ -1580,6 +1580,7 @@
    21.4    const Register O0_cur_bcp = O0;
    21.5    __ mov( Lbcp, O0_cur_bcp );
    21.6  
    21.7 +
    21.8    bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
    21.9    if ( increment_invocation_counter_for_backward_branches ) {
   21.10      Label Lforward;
   21.11 @@ -1588,17 +1589,84 @@
   21.12      // Bump bytecode pointer by displacement (take the branch)
   21.13      __ delayed()->add( O1_disp, Lbcp, Lbcp );     // add to bc addr
   21.14  
   21.15 -    // Update Backedge branch separately from invocations
   21.16 -    const Register G4_invoke_ctr = G4;
   21.17 -    __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
   21.18 -    if (ProfileInterpreter) {
   21.19 -      __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
   21.20 -      if (UseOnStackReplacement) {
   21.21 -        __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
   21.22 +    if (TieredCompilation) {
   21.23 +      Label Lno_mdo, Loverflow;
   21.24 +      int increment = InvocationCounter::count_increment;
   21.25 +      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
   21.26 +      if (ProfileInterpreter) {
   21.27 +        // If no method data exists, go to profile_continue.
   21.28 +        __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
   21.29 +        __ br_null(G4_scratch, false, Assembler::pn, Lno_mdo);
   21.30 +        __ delayed()->nop();
   21.31 +
   21.32 +        // Increment backedge counter in the MDO
   21.33 +        Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
   21.34 +                                                 in_bytes(InvocationCounter::counter_offset()));
   21.35 +        __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
   21.36 +                                   Assembler::notZero, &Lforward);
   21.37 +        __ ba(false, Loverflow);
   21.38 +        __ delayed()->nop();
   21.39        }
   21.40 +
   21.41 +      // If there's no MDO, increment counter in methodOop
   21.42 +      __ bind(Lno_mdo);
   21.43 +      Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) +
   21.44 +                                        in_bytes(InvocationCounter::counter_offset()));
   21.45 +      __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
   21.46 +                                 Assembler::notZero, &Lforward);
   21.47 +      __ bind(Loverflow);
   21.48 +
   21.49 +      // notify point for loop, pass branch bytecode
   21.50 +      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
   21.51 +
   21.52 +      // Was an OSR adapter generated?
   21.53 +      // O0 = osr nmethod
   21.54 +      __ br_null(O0, false, Assembler::pn, Lforward);
   21.55 +      __ delayed()->nop();
   21.56 +
   21.57 +      // Has the nmethod been invalidated already?
   21.58 +      __ ld(O0, nmethod::entry_bci_offset(), O2);
   21.59 +      __ cmp(O2, InvalidOSREntryBci);
   21.60 +      __ br(Assembler::equal, false, Assembler::pn, Lforward);
   21.61 +      __ delayed()->nop();
   21.62 +
   21.63 +      // migrate the interpreter frame off of the stack
   21.64 +
   21.65 +      __ mov(G2_thread, L7);
   21.66 +      // save nmethod
   21.67 +      __ mov(O0, L6);
   21.68 +      __ set_last_Java_frame(SP, noreg);
   21.69 +      __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
   21.70 +      __ reset_last_Java_frame();
   21.71 +      __ mov(L7, G2_thread);
   21.72 +
   21.73 +      // move OSR nmethod to I1
   21.74 +      __ mov(L6, I1);
   21.75 +
   21.76 +      // OSR buffer to I0
   21.77 +      __ mov(O0, I0);
   21.78 +
   21.79 +      // remove the interpreter frame
   21.80 +      __ restore(I5_savedSP, 0, SP);
   21.81 +
   21.82 +      // Jump to the osr code.
   21.83 +      __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
   21.84 +      __ jmp(O2, G0);
   21.85 +      __ delayed()->nop();
   21.86 +
   21.87      } else {
   21.88 -      if (UseOnStackReplacement) {
   21.89 -        __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
   21.90 +      // Update Backedge branch separately from invocations
   21.91 +      const Register G4_invoke_ctr = G4;
   21.92 +      __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
   21.93 +      if (ProfileInterpreter) {
   21.94 +        __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
   21.95 +        if (UseOnStackReplacement) {
   21.96 +          __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
   21.97 +        }
   21.98 +      } else {
   21.99 +        if (UseOnStackReplacement) {
  21.100 +          __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
  21.101 +        }
  21.102        }
  21.103      }
  21.104  
    22.1 --- a/src/cpu/x86/vm/assembler_x86.cpp	Tue Sep 21 06:58:44 2010 -0700
    22.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp	Wed Sep 22 12:54:51 2010 -0400
    22.3 @@ -4993,19 +4993,22 @@
    22.4        ttyLocker ttyl;
    22.5        tty->print_cr("eip = 0x%08x", eip);
    22.6  #ifndef PRODUCT
    22.7 -      tty->cr();
    22.8 -      findpc(eip);
    22.9 -      tty->cr();
   22.10 +      if ((WizardMode || Verbose) && PrintMiscellaneous) {
   22.11 +        tty->cr();
   22.12 +        findpc(eip);
   22.13 +        tty->cr();
   22.14 +      }
   22.15  #endif
   22.16 -      tty->print_cr("rax, = 0x%08x", rax);
   22.17 -      tty->print_cr("rbx, = 0x%08x", rbx);
   22.18 +      tty->print_cr("rax = 0x%08x", rax);
   22.19 +      tty->print_cr("rbx = 0x%08x", rbx);
   22.20        tty->print_cr("rcx = 0x%08x", rcx);
   22.21        tty->print_cr("rdx = 0x%08x", rdx);
   22.22        tty->print_cr("rdi = 0x%08x", rdi);
   22.23        tty->print_cr("rsi = 0x%08x", rsi);
   22.24 -      tty->print_cr("rbp, = 0x%08x", rbp);
   22.25 +      tty->print_cr("rbp = 0x%08x", rbp);
   22.26        tty->print_cr("rsp = 0x%08x", rsp);
   22.27        BREAKPOINT;
   22.28 +      assert(false, "start up GDB");
   22.29      }
   22.30    } else {
   22.31      ttyLocker ttyl;
   22.32 @@ -7677,11 +7680,19 @@
   22.33    movptr(tmp, ExternalAddress((address) delayed_value_addr));
   22.34  
   22.35  #ifdef ASSERT
   22.36 -  Label L;
   22.37 -  testptr(tmp, tmp);
   22.38 -  jccb(Assembler::notZero, L);
   22.39 -  hlt();
   22.40 -  bind(L);
   22.41 +  { Label L;
   22.42 +    testptr(tmp, tmp);
   22.43 +    if (WizardMode) {
   22.44 +      jcc(Assembler::notZero, L);
   22.45 +      char* buf = new char[40];
   22.46 +      sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
   22.47 +      stop(buf);
   22.48 +    } else {
   22.49 +      jccb(Assembler::notZero, L);
   22.50 +      hlt();
   22.51 +    }
   22.52 +    bind(L);
   22.53 +  }
   22.54  #endif
   22.55  
   22.56    if (offset != 0)
    23.1 --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Sep 21 06:58:44 2010 -0700
    23.2 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Wed Sep 22 12:54:51 2010 -0400
    23.3 @@ -68,19 +68,15 @@
    23.4    __ jmp(_continuation);
    23.5  }
    23.6  
    23.7 -#ifdef TIERED
    23.8  void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
    23.9    __ bind(_entry);
   23.10 +  ce->store_parameter(_method->as_register(), 1);
   23.11    ce->store_parameter(_bci, 0);
   23.12    __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::counter_overflow_id)));
   23.13    ce->add_call_info_here(_info);
   23.14    ce->verify_oop_map(_info);
   23.15 -
   23.16    __ jmp(_continuation);
   23.17  }
   23.18 -#endif // TIERED
   23.19 -
   23.20 -
   23.21  
   23.22  RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
   23.23                                 bool throw_index_out_of_bounds_exception)
    24.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Tue Sep 21 06:58:44 2010 -0700
    24.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp	Wed Sep 22 12:54:51 2010 -0400
    24.3 @@ -1613,6 +1613,189 @@
    24.4    __ bind(*op->stub()->continuation());
    24.5  }
    24.6  
    24.7 +void LIR_Assembler::type_profile_helper(Register mdo,
    24.8 +                                        ciMethodData *md, ciProfileData *data,
    24.9 +                                        Register recv, Label* update_done) {
   24.10 +  uint i;
   24.11 +  for (i = 0; i < ReceiverTypeData::row_limit(); i++) {
   24.12 +    Label next_test;
   24.13 +    // See if the receiver is receiver[n].
   24.14 +    __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i))));
   24.15 +    __ jccb(Assembler::notEqual, next_test);
   24.16 +    Address data_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i)));
   24.17 +    __ addptr(data_addr, DataLayout::counter_increment);
   24.18 +    __ jmp(*update_done);
   24.19 +    __ bind(next_test);
   24.20 +  }
   24.21 +
   24.22 +  // Didn't find receiver; find next empty slot and fill it in
   24.23 +  for (i = 0; i < ReceiverTypeData::row_limit(); i++) {
   24.24 +    Label next_test;
   24.25 +    Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)));
   24.26 +    __ cmpptr(recv_addr, (intptr_t)NULL_WORD);
   24.27 +    __ jccb(Assembler::notEqual, next_test);
   24.28 +    __ movptr(recv_addr, recv);
   24.29 +    __ movptr(Address(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_count_offset(i))), DataLayout::counter_increment);
   24.30 +    __ jmp(*update_done);
   24.31 +    __ bind(next_test);
   24.32 +  }
   24.33 +}
   24.34 +
   24.35 +void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null) {
   24.36 +  // we always need a stub for the failure case.
   24.37 +  CodeStub* stub = op->stub();
   24.38 +  Register obj = op->object()->as_register();
   24.39 +  Register k_RInfo = op->tmp1()->as_register();
   24.40 +  Register klass_RInfo = op->tmp2()->as_register();
   24.41 +  Register dst = op->result_opr()->as_register();
   24.42 +  ciKlass* k = op->klass();
   24.43 +  Register Rtmp1 = noreg;
   24.44 +
   24.45 +  // check if it needs to be profiled
   24.46 +  ciMethodData* md;
   24.47 +  ciProfileData* data;
   24.48 +
   24.49 +  if (op->should_profile()) {
   24.50 +    ciMethod* method = op->profiled_method();
   24.51 +    assert(method != NULL, "Should have method");
   24.52 +    int bci = op->profiled_bci();
   24.53 +    md = method->method_data();
   24.54 +    if (md == NULL) {
   24.55 +      bailout("out of memory building methodDataOop");
   24.56 +      return;
   24.57 +    }
   24.58 +    data = md->bci_to_data(bci);
   24.59 +    assert(data != NULL,                "need data for type check");
   24.60 +    assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
   24.61 +  }
   24.62 +  Label profile_cast_success, profile_cast_failure;
   24.63 +  Label *success_target = op->should_profile() ? &profile_cast_success : success;
   24.64 +  Label *failure_target = op->should_profile() ? &profile_cast_failure : failure;
   24.65 +
   24.66 +  if (obj == k_RInfo) {
   24.67 +    k_RInfo = dst;
   24.68 +  } else if (obj == klass_RInfo) {
   24.69 +    klass_RInfo = dst;
   24.70 +  }
   24.71 +  if (k->is_loaded()) {
   24.72 +    select_different_registers(obj, dst, k_RInfo, klass_RInfo);
   24.73 +  } else {
   24.74 +    Rtmp1 = op->tmp3()->as_register();
   24.75 +    select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
   24.76 +  }
   24.77 +
   24.78 +  assert_different_registers(obj, k_RInfo, klass_RInfo);
   24.79 +  if (!k->is_loaded()) {
   24.80 +    jobject2reg_with_patching(k_RInfo, op->info_for_patch());
   24.81 +  } else {
   24.82 +#ifdef _LP64
   24.83 +    __ movoop(k_RInfo, k->constant_encoding());
   24.84 +#endif // _LP64
   24.85 +  }
   24.86 +  assert(obj != k_RInfo, "must be different");
   24.87 +
   24.88 +  __ cmpptr(obj, (int32_t)NULL_WORD);
   24.89 +  if (op->should_profile()) {
   24.90 +    Label not_null;
   24.91 +    __ jccb(Assembler::notEqual, not_null);
   24.92 +    // Object is null; update MDO and exit
   24.93 +    Register mdo  = klass_RInfo;
   24.94 +    __ movoop(mdo, md->constant_encoding());
   24.95 +    Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
   24.96 +    int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
   24.97 +    __ orl(data_addr, header_bits);
   24.98 +    __ jmp(*obj_is_null);
   24.99 +    __ bind(not_null);
  24.100 +  } else {
  24.101 +    __ jcc(Assembler::equal, *obj_is_null);
  24.102 +  }
  24.103 +  __ verify_oop(obj);
  24.104 +
  24.105 +  if (op->fast_check()) {
  24.106 +    // get object class
  24.107 +    // not a safepoint as obj null check happens earlier
  24.108 +    if (k->is_loaded()) {
  24.109 +#ifdef _LP64
  24.110 +      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
  24.111 +#else
  24.112 +      __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
  24.113 +#endif // _LP64
  24.114 +    } else {
  24.115 +      __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
  24.116 +    }
  24.117 +    __ jcc(Assembler::notEqual, *failure_target);
  24.118 +    // successful cast, fall through to profile or jump
  24.119 +  } else {
  24.120 +    // get object class
  24.121 +    // not a safepoint as obj null check happens earlier
  24.122 +    __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
  24.123 +    if (k->is_loaded()) {
  24.124 +      // See if we get an immediate positive hit
  24.125 +#ifdef _LP64
  24.126 +      __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
  24.127 +#else
  24.128 +      __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
  24.129 +#endif // _LP64
  24.130 +      if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
  24.131 +        __ jcc(Assembler::notEqual, *failure_target);
  24.132 +        // successful cast, fall through to profile or jump
  24.133 +      } else {
  24.134 +        // See if we get an immediate positive hit
  24.135 +        __ jcc(Assembler::equal, *success_target);
  24.136 +        // check for self
  24.137 +#ifdef _LP64
  24.138 +        __ cmpptr(klass_RInfo, k_RInfo);
  24.139 +#else
  24.140 +        __ cmpoop(klass_RInfo, k->constant_encoding());
  24.141 +#endif // _LP64
  24.142 +        __ jcc(Assembler::equal, *success_target);
  24.143 +
  24.144 +        __ push(klass_RInfo);
  24.145 +#ifdef _LP64
  24.146 +        __ push(k_RInfo);
  24.147 +#else
  24.148 +        __ pushoop(k->constant_encoding());
  24.149 +#endif // _LP64
  24.150 +        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
  24.151 +        __ pop(klass_RInfo);
  24.152 +        __ pop(klass_RInfo);
  24.153 +        // result is a boolean
  24.154 +        __ cmpl(klass_RInfo, 0);
  24.155 +        __ jcc(Assembler::equal, *failure_target);
  24.156 +        // successful cast, fall through to profile or jump
  24.157 +      }
  24.158 +    } else {
  24.159 +      // perform the fast part of the checking logic
  24.160 +      __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
  24.161 +      // call out-of-line instance of __ check_klass_subtype_slow_path(...):
  24.162 +      __ push(klass_RInfo);
  24.163 +      __ push(k_RInfo);
  24.164 +      __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
  24.165 +      __ pop(klass_RInfo);
  24.166 +      __ pop(k_RInfo);
  24.167 +      // result is a boolean
  24.168 +      __ cmpl(k_RInfo, 0);
  24.169 +      __ jcc(Assembler::equal, *failure_target);
  24.170 +      // successful cast, fall through to profile or jump
  24.171 +    }
  24.172 +  }
  24.173 +  if (op->should_profile()) {
  24.174 +    Register mdo  = klass_RInfo, recv = k_RInfo;
  24.175 +    __ bind(profile_cast_success);
  24.176 +    __ movoop(mdo, md->constant_encoding());
  24.177 +    __ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes()));
  24.178 +    Label update_done;
  24.179 +    type_profile_helper(mdo, md, data, recv, success);
  24.180 +    __ jmp(*success);
  24.181 +
  24.182 +    __ bind(profile_cast_failure);
  24.183 +    __ movoop(mdo, md->constant_encoding());
  24.184 +    Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
  24.185 +    __ subptr(counter_addr, DataLayout::counter_increment);
  24.186 +    __ jmp(*failure);
  24.187 +  }
  24.188 +  __ jmp(*success);
  24.189 +}
  24.190  
  24.191  
  24.192  void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
  24.193 @@ -1625,9 +1808,44 @@
  24.194      Register Rtmp1 = op->tmp3()->as_register();
  24.195  
  24.196      CodeStub* stub = op->stub();
  24.197 -    Label done;
  24.198 +
  24.199 +    // check if it needs to be profiled
  24.200 +    ciMethodData* md;
  24.201 +    ciProfileData* data;
  24.202 +
  24.203 +    if (op->should_profile()) {
  24.204 +      ciMethod* method = op->profiled_method();
  24.205 +      assert(method != NULL, "Should have method");
  24.206 +      int bci = op->profiled_bci();
  24.207 +      md = method->method_data();
  24.208 +      if (md == NULL) {
  24.209 +        bailout("out of memory building methodDataOop");
  24.210 +        return;
  24.211 +      }
  24.212 +      data = md->bci_to_data(bci);
  24.213 +      assert(data != NULL,                "need data for type check");
  24.214 +      assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
  24.215 +    }
  24.216 +    Label profile_cast_success, profile_cast_failure, done;
  24.217 +    Label *success_target = op->should_profile() ? &profile_cast_success : &done;
  24.218 +    Label *failure_target = op->should_profile() ? &profile_cast_failure : stub->entry();
  24.219 +
  24.220      __ cmpptr(value, (int32_t)NULL_WORD);
  24.221 -    __ jcc(Assembler::equal, done);
  24.222 +    if (op->should_profile()) {
  24.223 +      Label not_null;
  24.224 +      __ jccb(Assembler::notEqual, not_null);
  24.225 +      // Object is null; update MDO and exit
  24.226 +      Register mdo  = klass_RInfo;
  24.227 +      __ movoop(mdo, md->constant_encoding());
  24.228 +      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
  24.229 +      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
  24.230 +      __ orl(data_addr, header_bits);
  24.231 +      __ jmp(done);
  24.232 +      __ bind(not_null);
  24.233 +    } else {
  24.234 +      __ jcc(Assembler::equal, done);
  24.235 +    }
  24.236 +
  24.237      add_debug_info_for_null_check_here(op->info_for_exception());
  24.238      __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
  24.239      __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
  24.240 @@ -1635,7 +1853,7 @@
  24.241      // get instance klass
  24.242      __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
  24.243      // perform the fast part of the checking logic
  24.244 -    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
  24.245 +    __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
  24.246      // call out-of-line instance of __ check_klass_subtype_slow_path(...):
  24.247      __ push(klass_RInfo);
  24.248      __ push(k_RInfo);
  24.249 @@ -1644,229 +1862,52 @@
  24.250      __ pop(k_RInfo);
  24.251      // result is a boolean
  24.252      __ cmpl(k_RInfo, 0);
  24.253 -    __ jcc(Assembler::equal, *stub->entry());
  24.254 +    __ jcc(Assembler::equal, *failure_target);
  24.255 +    // fall through to the success case
  24.256 +
  24.257 +    if (op->should_profile()) {
  24.258 +      Register mdo  = klass_RInfo, recv = k_RInfo;
  24.259 +      __ bind(profile_cast_success);
  24.260 +      __ movoop(mdo, md->constant_encoding());
  24.261 +      __ movptr(recv, Address(value, oopDesc::klass_offset_in_bytes()));
  24.262 +      Label update_done;
  24.263 +      type_profile_helper(mdo, md, data, recv, &done);
  24.264 +      __ jmpb(done);
  24.265 +
  24.266 +      __ bind(profile_cast_failure);
  24.267 +      __ movoop(mdo, md->constant_encoding());
  24.268 +      Address counter_addr(mdo, md->byte_offset_of_slot(data, CounterData::count_offset()));
  24.269 +      __ subptr(counter_addr, DataLayout::counter_increment);
  24.270 +      __ jmp(*stub->entry());
  24.271 +    }
  24.272 +
  24.273      __ bind(done);
  24.274 -  } else if (op->code() == lir_checkcast) {
  24.275 -    // we always need a stub for the failure case.
  24.276 -    CodeStub* stub = op->stub();
  24.277 -    Register obj = op->object()->as_register();
  24.278 -    Register k_RInfo = op->tmp1()->as_register();
  24.279 -    Register klass_RInfo = op->tmp2()->as_register();
  24.280 -    Register dst = op->result_opr()->as_register();
  24.281 -    ciKlass* k = op->klass();
  24.282 -    Register Rtmp1 = noreg;
  24.283 -
  24.284 -    Label done;
  24.285 -    if (obj == k_RInfo) {
  24.286 -      k_RInfo = dst;
  24.287 -    } else if (obj == klass_RInfo) {
  24.288 -      klass_RInfo = dst;
  24.289 -    }
  24.290 -    if (k->is_loaded()) {
  24.291 -      select_different_registers(obj, dst, k_RInfo, klass_RInfo);
  24.292 -    } else {
  24.293 -      Rtmp1 = op->tmp3()->as_register();
  24.294 -      select_different_registers(obj, dst, k_RInfo, klass_RInfo, Rtmp1);
  24.295 -    }
  24.296 -
  24.297 -    assert_different_registers(obj, k_RInfo, klass_RInfo);
  24.298 -    if (!k->is_loaded()) {
  24.299 -      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
  24.300 -    } else {
  24.301 -#ifdef _LP64
  24.302 -      __ movoop(k_RInfo, k->constant_encoding());
  24.303 -#else
  24.304 -      k_RInfo = noreg;
  24.305 -#endif // _LP64
  24.306 -    }
  24.307 -    assert(obj != k_RInfo, "must be different");
  24.308 -    __ cmpptr(obj, (int32_t)NULL_WORD);
  24.309 -    if (op->profiled_method() != NULL) {
  24.310 -      ciMethod* method = op->profiled_method();
  24.311 -      int bci          = op->profiled_bci();
  24.312 -
  24.313 -      Label profile_done;
  24.314 -      __ jcc(Assembler::notEqual, profile_done);
  24.315 -      // Object is null; update methodDataOop
  24.316 -      ciMethodData* md = method->method_data();
  24.317 -      if (md == NULL) {
  24.318 -        bailout("out of memory building methodDataOop");
  24.319 -        return;
  24.320 +  } else
  24.321 +    if (code == lir_checkcast) {
  24.322 +      Register obj = op->object()->as_register();
  24.323 +      Register dst = op->result_opr()->as_register();
  24.324 +      Label success;
  24.325 +      emit_typecheck_helper(op, &success, op->stub()->entry(), &success);
  24.326 +      __ bind(success);
  24.327 +      if (dst != obj) {
  24.328 +        __ mov(dst, obj);
  24.329        }
  24.330 -      ciProfileData* data = md->bci_to_data(bci);
  24.331 -      assert(data != NULL,       "need data for checkcast");
  24.332 -      assert(data->is_BitData(), "need BitData for checkcast");
  24.333 -      Register mdo  = klass_RInfo;
  24.334 -      __ movoop(mdo, md->constant_encoding());
  24.335 -      Address data_addr(mdo, md->byte_offset_of_slot(data, DataLayout::header_offset()));
  24.336 -      int header_bits = DataLayout::flag_mask_to_header_mask(BitData::null_seen_byte_constant());
  24.337 -      __ orl(data_addr, header_bits);
  24.338 -      __ jmp(done);
  24.339 -      __ bind(profile_done);
  24.340 -    } else {
  24.341 -      __ jcc(Assembler::equal, done);
  24.342 -    }
  24.343 -    __ verify_oop(obj);
  24.344 -
  24.345 -    if (op->fast_check()) {
  24.346 -      // get object classo
  24.347 -      // not a safepoint as obj null check happens earlier
  24.348 -      if (k->is_loaded()) {
  24.349 -#ifdef _LP64
  24.350 -        __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
  24.351 -#else
  24.352 -        __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
  24.353 -#endif // _LP64
  24.354 -      } else {
  24.355 -        __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
  24.356 -
  24.357 -      }
  24.358 -      __ jcc(Assembler::notEqual, *stub->entry());
  24.359 -      __ bind(done);
  24.360 -    } else {
  24.361 -      // get object class
  24.362 -      // not a safepoint as obj null check happens earlier
  24.363 -      __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
  24.364 -      if (k->is_loaded()) {
  24.365 -        // See if we get an immediate positive hit
  24.366 -#ifdef _LP64
  24.367 -        __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
  24.368 -#else
  24.369 -        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
  24.370 -#endif // _LP64
  24.371 -        if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
  24.372 -          __ jcc(Assembler::notEqual, *stub->entry());
  24.373 -        } else {
  24.374 -          // See if we get an immediate positive hit
  24.375 -          __ jcc(Assembler::equal, done);
  24.376 -          // check for self
  24.377 -#ifdef _LP64
  24.378 -          __ cmpptr(klass_RInfo, k_RInfo);
  24.379 -#else
  24.380 -          __ cmpoop(klass_RInfo, k->constant_encoding());
  24.381 -#endif // _LP64
  24.382 -          __ jcc(Assembler::equal, done);
  24.383 -
  24.384 -          __ push(klass_RInfo);
  24.385 -#ifdef _LP64
  24.386 -          __ push(k_RInfo);
  24.387 -#else
  24.388 -          __ pushoop(k->constant_encoding());
  24.389 -#endif // _LP64
  24.390 -          __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
  24.391 -          __ pop(klass_RInfo);
  24.392 -          __ pop(klass_RInfo);
  24.393 -          // result is a boolean
  24.394 -          __ cmpl(klass_RInfo, 0);
  24.395 -          __ jcc(Assembler::equal, *stub->entry());
  24.396 -        }
  24.397 +    } else
  24.398 +      if (code == lir_instanceof) {
  24.399 +        Register obj = op->object()->as_register();
  24.400 +        Register dst = op->result_opr()->as_register();
  24.401 +        Label success, failure, done;
  24.402 +        emit_typecheck_helper(op, &success, &failure, &failure);
  24.403 +        __ bind(failure);
  24.404 +        __ xorptr(dst, dst);
  24.405 +        __ jmpb(done);
  24.406 +        __ bind(success);
  24.407 +        __ movptr(dst, 1);
  24.408          __ bind(done);
  24.409        } else {
  24.410 -        // perform the fast part of the checking logic
  24.411 -        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, &done, stub->entry(), NULL);
  24.412 -        // call out-of-line instance of __ check_klass_subtype_slow_path(...):
  24.413 -        __ push(klass_RInfo);
  24.414 -        __ push(k_RInfo);
  24.415 -        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
  24.416 -        __ pop(klass_RInfo);
  24.417 -        __ pop(k_RInfo);
  24.418 -        // result is a boolean
  24.419 -        __ cmpl(k_RInfo, 0);
  24.420 -        __ jcc(Assembler::equal, *stub->entry());
  24.421 -        __ bind(done);
  24.422 +        ShouldNotReachHere();
  24.423        }
  24.424  
  24.425 -    }
  24.426 -    if (dst != obj) {
  24.427 -      __ mov(dst, obj);
  24.428 -    }
  24.429 -  } else if (code == lir_instanceof) {
  24.430 -    Register obj = op->object()->as_register();
  24.431 -    Register k_RInfo = op->tmp1()->as_register();
  24.432 -    Register klass_RInfo = op->tmp2()->as_register();
  24.433 -    Register dst = op->result_opr()->as_register();
  24.434 -    ciKlass* k = op->klass();
  24.435 -
  24.436 -    Label done;
  24.437 -    Label zero;
  24.438 -    Label one;
  24.439 -    if (obj == k_RInfo) {
  24.440 -      k_RInfo = klass_RInfo;
  24.441 -      klass_RInfo = obj;
  24.442 -    }
  24.443 -    // patching may screw with our temporaries on sparc,
  24.444 -    // so let's do it before loading the class
  24.445 -    if (!k->is_loaded()) {
  24.446 -      jobject2reg_with_patching(k_RInfo, op->info_for_patch());
  24.447 -    } else {
  24.448 -      LP64_ONLY(__ movoop(k_RInfo, k->constant_encoding()));
  24.449 -    }
  24.450 -    assert(obj != k_RInfo, "must be different");
  24.451 -
  24.452 -    __ verify_oop(obj);
  24.453 -    if (op->fast_check()) {
  24.454 -      __ cmpptr(obj, (int32_t)NULL_WORD);
  24.455 -      __ jcc(Assembler::equal, zero);
  24.456 -      // get object class
  24.457 -      // not a safepoint as obj null check happens earlier
  24.458 -      if (LP64_ONLY(false &&) k->is_loaded()) {
  24.459 -        NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()));
  24.460 -        k_RInfo = noreg;
  24.461 -      } else {
  24.462 -        __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
  24.463 -
  24.464 -      }
  24.465 -      __ jcc(Assembler::equal, one);
  24.466 -    } else {
  24.467 -      // get object class
  24.468 -      // not a safepoint as obj null check happens earlier
  24.469 -      __ cmpptr(obj, (int32_t)NULL_WORD);
  24.470 -      __ jcc(Assembler::equal, zero);
  24.471 -      __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
  24.472 -
  24.473 -#ifndef _LP64
  24.474 -      if (k->is_loaded()) {
  24.475 -        // See if we get an immediate positive hit
  24.476 -        __ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->constant_encoding());
  24.477 -        __ jcc(Assembler::equal, one);
  24.478 -        if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() == k->super_check_offset()) {
  24.479 -          // check for self
  24.480 -          __ cmpoop(klass_RInfo, k->constant_encoding());
  24.481 -          __ jcc(Assembler::equal, one);
  24.482 -          __ push(klass_RInfo);
  24.483 -          __ pushoop(k->constant_encoding());
  24.484 -          __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
  24.485 -          __ pop(klass_RInfo);
  24.486 -          __ pop(dst);
  24.487 -          __ jmp(done);
  24.488 -        }
  24.489 -      }
  24.490 -        else // next block is unconditional if LP64:
  24.491 -#endif // LP64
  24.492 -      {
  24.493 -        assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
  24.494 -
  24.495 -        // perform the fast part of the checking logic
  24.496 -        __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, dst, &one, &zero, NULL);
  24.497 -        // call out-of-line instance of __ check_klass_subtype_slow_path(...):
  24.498 -        __ push(klass_RInfo);
  24.499 -        __ push(k_RInfo);
  24.500 -        __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
  24.501 -        __ pop(klass_RInfo);
  24.502 -        __ pop(dst);
  24.503 -        __ jmp(done);
  24.504 -      }
  24.505 -    }
  24.506 -    __ bind(zero);
  24.507 -    __ xorptr(dst, dst);
  24.508 -    __ jmp(done);
  24.509 -    __ bind(one);
  24.510 -    __ movptr(dst, 1);
  24.511 -    __ bind(done);
  24.512 -  } else {
  24.513 -    ShouldNotReachHere();
  24.514 -  }
  24.515 -
  24.516  }
  24.517  
  24.518  
  24.519 @@ -1922,7 +1963,6 @@
  24.520    }
  24.521  }
  24.522  
  24.523 -
  24.524  void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, LIR_Opr result) {
  24.525    Assembler::Condition acond, ncond;
  24.526    switch (condition) {
  24.527 @@ -2014,11 +2054,11 @@
  24.528        jint c = right->as_constant_ptr()->as_jint();
  24.529        switch (code) {
  24.530          case lir_add: {
  24.531 -          __ increment(lreg, c);
  24.532 +          __ incrementl(lreg, c);
  24.533            break;
  24.534          }
  24.535          case lir_sub: {
  24.536 -          __ decrement(lreg, c);
  24.537 +          __ decrementl(lreg, c);
  24.538            break;
  24.539          }
  24.540          default: ShouldNotReachHere();
  24.541 @@ -3253,13 +3293,13 @@
  24.542    // Perform additional virtual call profiling for invokevirtual and
  24.543    // invokeinterface bytecodes
  24.544    if ((bc == Bytecodes::_invokevirtual || bc == Bytecodes::_invokeinterface) &&
  24.545 -      Tier1ProfileVirtualCalls) {
  24.546 +      C1ProfileVirtualCalls) {
  24.547      assert(op->recv()->is_single_cpu(), "recv must be allocated");
  24.548      Register recv = op->recv()->as_register();
  24.549      assert_different_registers(mdo, recv);
  24.550      assert(data->is_VirtualCallData(), "need VirtualCallData for virtual calls");
  24.551      ciKlass* known_klass = op->known_holder();
  24.552 -    if (Tier1OptimizeVirtualCallProfiling && known_klass != NULL) {
  24.553 +    if (C1OptimizeVirtualCallProfiling && known_klass != NULL) {
  24.554        // We know the type that will be seen at this call site; we can
  24.555        // statically update the methodDataOop rather than needing to do
  24.556        // dynamic tests on the receiver type
  24.557 @@ -3272,7 +3312,7 @@
  24.558          ciKlass* receiver = vc_data->receiver(i);
  24.559          if (known_klass->equals(receiver)) {
  24.560            Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
  24.561 -          __ addl(data_addr, DataLayout::counter_increment);
  24.562 +          __ addptr(data_addr, DataLayout::counter_increment);
  24.563            return;
  24.564          }
  24.565        }
  24.566 @@ -3288,49 +3328,26 @@
  24.567            Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
  24.568            __ movoop(recv_addr, known_klass->constant_encoding());
  24.569            Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
  24.570 -          __ addl(data_addr, DataLayout::counter_increment);
  24.571 +          __ addptr(data_addr, DataLayout::counter_increment);
  24.572            return;
  24.573          }
  24.574        }
  24.575      } else {
  24.576        __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
  24.577        Label update_done;
  24.578 -      uint i;
  24.579 -      for (i = 0; i < VirtualCallData::row_limit(); i++) {
  24.580 -        Label next_test;
  24.581 -        // See if the receiver is receiver[n].
  24.582 -        __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
  24.583 -        __ jcc(Assembler::notEqual, next_test);
  24.584 -        Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
  24.585 -        __ addl(data_addr, DataLayout::counter_increment);
  24.586 -        __ jmp(update_done);
  24.587 -        __ bind(next_test);
  24.588 -      }
  24.589 -
  24.590 -      // Didn't find receiver; find next empty slot and fill it in
  24.591 -      for (i = 0; i < VirtualCallData::row_limit(); i++) {
  24.592 -        Label next_test;
  24.593 -        Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
  24.594 -        __ cmpptr(recv_addr, (int32_t)NULL_WORD);
  24.595 -        __ jcc(Assembler::notEqual, next_test);
  24.596 -        __ movptr(recv_addr, recv);
  24.597 -        __ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
  24.598 -        __ jmp(update_done);
  24.599 -        __ bind(next_test);
  24.600 -      }
  24.601 +      type_profile_helper(mdo, md, data, recv, &update_done);
  24.602        // Receiver did not match any saved receiver and there is no empty row for it.
  24.603        // Increment total counter to indicate polymorphic case.
  24.604 -      __ addl(counter_addr, DataLayout::counter_increment);
  24.605 +      __ addptr(counter_addr, DataLayout::counter_increment);
  24.606  
  24.607        __ bind(update_done);
  24.608      }
  24.609    } else {
  24.610      // Static call
  24.611 -    __ addl(counter_addr, DataLayout::counter_increment);
  24.612 +    __ addptr(counter_addr, DataLayout::counter_increment);
  24.613    }
  24.614  }
  24.615  
  24.616 -
  24.617  void LIR_Assembler::emit_delay(LIR_OpDelay*) {
  24.618    Unimplemented();
  24.619  }
    25.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp	Tue Sep 21 06:58:44 2010 -0700
    25.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp	Wed Sep 22 12:54:51 2010 -0400
    25.3 @@ -42,7 +42,10 @@
    25.4    // method.
    25.5    Address as_Address(LIR_Address* addr, Register tmp);
    25.6  
    25.7 -
    25.8 +  // Record the type of the receiver in ReceiverTypeData
    25.9 +  void type_profile_helper(Register mdo,
   25.10 +                           ciMethodData *md, ciProfileData *data,
   25.11 +                           Register recv, Label* update_done);
   25.12  public:
   25.13  
   25.14    void store_parameter(Register r, int offset_from_esp_in_words);
    26.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Tue Sep 21 06:58:44 2010 -0700
    26.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp	Wed Sep 22 12:54:51 2010 -0400
    26.3 @@ -1,5 +1,5 @@
    26.4  /*
    26.5 - * Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
    26.6 + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
    26.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    26.8   *
    26.9   * This code is free software; you can redistribute it and/or modify it
   26.10 @@ -182,10 +182,22 @@
   26.11  }
   26.12  
   26.13  
   26.14 -void LIRGenerator::increment_counter(address counter, int step) {
   26.15 +LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
   26.16 +  LIR_Opr r;
   26.17 +  if (type == T_LONG) {
   26.18 +    r = LIR_OprFact::longConst(x);
   26.19 +  } else if (type == T_INT) {
   26.20 +    r = LIR_OprFact::intConst(x);
   26.21 +  } else {
   26.22 +    ShouldNotReachHere();
   26.23 +  }
   26.24 +  return r;
   26.25 +}
   26.26 +
   26.27 +void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
   26.28    LIR_Opr pointer = new_pointer_register();
   26.29    __ move(LIR_OprFact::intptrConst(counter), pointer);
   26.30 -  LIR_Address* addr = new LIR_Address(pointer, T_INT);
   26.31 +  LIR_Address* addr = new LIR_Address(pointer, type);
   26.32    increment_counter(addr, step);
   26.33  }
   26.34  
   26.35 @@ -194,7 +206,6 @@
   26.36    __ add((LIR_Opr)addr, LIR_OprFact::intConst(step), (LIR_Opr)addr);
   26.37  }
   26.38  
   26.39 -
   26.40  void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
   26.41    __ cmp_mem_int(condition, base, disp, c, info);
   26.42  }
   26.43 @@ -1145,10 +1156,10 @@
   26.44      patching_info = state_for(x, x->state_before());
   26.45    }
   26.46    obj.load_item();
   26.47 -  LIR_Opr tmp = new_register(objectType);
   26.48    __ instanceof(reg, obj.result(), x->klass(),
   26.49 -                tmp, new_register(objectType), LIR_OprFact::illegalOpr,
   26.50 -                x->direct_compare(), patching_info);
   26.51 +                new_register(objectType), new_register(objectType),
   26.52 +                !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr,
   26.53 +                x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
   26.54  }
   26.55  
   26.56  
   26.57 @@ -1188,8 +1199,7 @@
   26.58    // add safepoint before generating condition code so it can be recomputed
   26.59    if (x->is_safepoint()) {
   26.60      // increment backedge counter if needed
   26.61 -    increment_backedge_counter(state_for(x, x->state_before()));
   26.62 -
   26.63 +    increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
   26.64      __ safepoint(LIR_OprFact::illegalOpr, state_for(x, x->state_before()));
   26.65    }
   26.66    set_no_result(x);
   26.67 @@ -1197,6 +1207,7 @@
   26.68    LIR_Opr left = xin->result();
   26.69    LIR_Opr right = yin->result();
   26.70    __ cmp(lir_cond(cond), left, right);
   26.71 +  // Generate branch profiling. Profiling code doesn't kill flags.
   26.72    profile_branch(x, cond);
   26.73    move_to_phi(x->state());
   26.74    if (x->x()->type()->is_float_kind()) {
    27.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Tue Sep 21 06:58:44 2010 -0700
    27.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Wed Sep 22 12:54:51 2010 -0400
    27.3 @@ -1068,15 +1068,16 @@
    27.4  
    27.5        break;
    27.6  
    27.7 -#ifdef TIERED
    27.8      case counter_overflow_id:
    27.9        {
   27.10 -        Register bci = rax;
   27.11 +        Register bci = rax, method = rbx;
   27.12          __ enter();
   27.13 -        OopMap* map = save_live_registers(sasm, 2);
   27.14 +        OopMap* map = save_live_registers(sasm, 3);
   27.15          // Retrieve bci
   27.16          __ movl(bci, Address(rbp, 2*BytesPerWord));
   27.17 -        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci);
   27.18 +        // And a pointer to the methodOop
   27.19 +        __ movptr(method, Address(rbp, 3*BytesPerWord));
   27.20 +        int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, counter_overflow), bci, method);
   27.21          oop_maps = new OopMapSet();
   27.22          oop_maps->add_gc_map(call_offset, map);
   27.23          restore_live_registers(sasm);
   27.24 @@ -1084,7 +1085,6 @@
   27.25          __ ret(0);
   27.26        }
   27.27        break;
   27.28 -#endif // TIERED
   27.29  
   27.30      case new_type_array_id:
   27.31      case new_object_array_id:
    28.1 --- a/src/cpu/x86/vm/c1_globals_x86.hpp	Tue Sep 21 06:58:44 2010 -0700
    28.2 +++ b/src/cpu/x86/vm/c1_globals_x86.hpp	Wed Sep 22 12:54:51 2010 -0400
    28.3 @@ -1,5 +1,5 @@
    28.4  /*
    28.5 - * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
    28.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    28.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.8   *
    28.9   * This code is free software; you can redistribute it and/or modify it
   28.10 @@ -35,14 +35,7 @@
   28.11  define_pd_global(bool, UseOnStackReplacement,        true );
   28.12  define_pd_global(bool, TieredCompilation,            false);
   28.13  define_pd_global(intx, CompileThreshold,             1500 );
   28.14 -define_pd_global(intx, Tier2CompileThreshold,        1500 );
   28.15 -define_pd_global(intx, Tier3CompileThreshold,        2500 );
   28.16 -define_pd_global(intx, Tier4CompileThreshold,        4500 );
   28.17 -
   28.18  define_pd_global(intx, BackEdgeThreshold,            100000);
   28.19 -define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
   28.20 -define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
   28.21 -define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
   28.22  
   28.23  define_pd_global(intx, OnStackReplacePercentage,     933  );
   28.24  define_pd_global(intx, FreqInlineSize,               325  );
    29.1 --- a/src/cpu/x86/vm/c2_globals_x86.hpp	Tue Sep 21 06:58:44 2010 -0700
    29.2 +++ b/src/cpu/x86/vm/c2_globals_x86.hpp	Wed Sep 22 12:54:51 2010 -0400
    29.3 @@ -1,5 +1,5 @@
    29.4  /*
    29.5 - * Copyright (c) 2000, 2007, Oracle and/or its affiliates. All rights reserved.
    29.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    29.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.8   *
    29.9   * This code is free software; you can redistribute it and/or modify it
   29.10 @@ -39,19 +39,8 @@
   29.11  define_pd_global(bool, ProfileInterpreter,           true);
   29.12  #endif // CC_INTERP
   29.13  define_pd_global(bool, TieredCompilation,            false);
   29.14 -#ifdef TIERED
   29.15 -define_pd_global(intx, CompileThreshold,             1000);
   29.16 -#else
   29.17  define_pd_global(intx, CompileThreshold,             10000);
   29.18 -#endif // TIERED
   29.19 -define_pd_global(intx, Tier2CompileThreshold,        10000);
   29.20 -define_pd_global(intx, Tier3CompileThreshold,        20000);
   29.21 -define_pd_global(intx, Tier4CompileThreshold,        40000);
   29.22 -
   29.23  define_pd_global(intx, BackEdgeThreshold,            100000);
   29.24 -define_pd_global(intx, Tier2BackEdgeThreshold,       100000);
   29.25 -define_pd_global(intx, Tier3BackEdgeThreshold,       100000);
   29.26 -define_pd_global(intx, Tier4BackEdgeThreshold,       100000);
   29.27  
   29.28  define_pd_global(intx, OnStackReplacePercentage,     140);
   29.29  define_pd_global(intx, ConditionalMoveLimit,         3);
    30.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp	Tue Sep 21 06:58:44 2010 -0700
    30.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp	Wed Sep 22 12:54:51 2010 -0400
    30.3 @@ -1397,3 +1397,17 @@
    30.4      NOT_CC_INTERP(pop(state));
    30.5    }
    30.6  }
    30.7 +
    30.8 +// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
    30.9 +void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
   30.10 +                                                        int increment, int mask,
   30.11 +                                                        Register scratch, bool preloaded,
   30.12 +                                                        Condition cond, Label* where) {
   30.13 +  if (!preloaded) {
   30.14 +    movl(scratch, counter_addr);
   30.15 +  }
   30.16 +  incrementl(scratch, increment);
   30.17 +  movl(counter_addr, scratch);
   30.18 +  andl(scratch, mask);
   30.19 +  jcc(cond, *where);
   30.20 +}
    31.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.hpp	Tue Sep 21 06:58:44 2010 -0700
    31.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp	Wed Sep 22 12:54:51 2010 -0400
    31.3 @@ -185,6 +185,10 @@
    31.4                               bool decrement = false);
    31.5    void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
    31.6                               bool decrement = false);
    31.7 +  void increment_mask_and_jump(Address counter_addr,
    31.8 +                               int increment, int mask,
    31.9 +                               Register scratch, bool preloaded,
   31.10 +                               Condition cond, Label* where);
   31.11    void set_mdp_flag_at(Register mdp_in, int flag_constant);
   31.12    void test_mdp_data_at(Register mdp_in, int offset, Register value,
   31.13                          Register test_value_out,
    32.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp	Tue Sep 21 06:58:44 2010 -0700
    32.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp	Wed Sep 22 12:54:51 2010 -0400
    32.3 @@ -1480,3 +1480,17 @@
    32.4      NOT_CC_INTERP(pop(state));
    32.5    }
    32.6  }
    32.7 +
    32.8 +// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
    32.9 +void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
   32.10 +                                                        int increment, int mask,
   32.11 +                                                        Register scratch, bool preloaded,
   32.12 +                                                        Condition cond, Label* where) {
   32.13 +  if (!preloaded) {
   32.14 +    movl(scratch, counter_addr);
   32.15 +  }
   32.16 +  incrementl(scratch, increment);
   32.17 +  movl(counter_addr, scratch);
   32.18 +  andl(scratch, mask);
   32.19 +  jcc(cond, *where);
   32.20 +}
    33.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.hpp	Tue Sep 21 06:58:44 2010 -0700
    33.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp	Wed Sep 22 12:54:51 2010 -0400
    33.3 @@ -194,6 +194,10 @@
    33.4                               bool decrement = false);
    33.5    void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
    33.6                               bool decrement = false);
    33.7 +  void increment_mask_and_jump(Address counter_addr,
    33.8 +                               int increment, int mask,
    33.9 +                               Register scratch, bool preloaded,
   33.10 +                               Condition cond, Label* where);
   33.11    void set_mdp_flag_at(Register mdp_in, int flag_constant);
   33.12    void test_mdp_data_at(Register mdp_in, int offset, Register value,
   33.13                          Register test_value_out,
    34.1 --- a/src/cpu/x86/vm/methodHandles_x86.cpp	Tue Sep 21 06:58:44 2010 -0700
    34.2 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Wed Sep 22 12:54:51 2010 -0400
    34.3 @@ -27,6 +27,14 @@
    34.4  
    34.5  #define __ _masm->
    34.6  
    34.7 +#ifdef PRODUCT
    34.8 +#define BLOCK_COMMENT(str) /* nothing */
    34.9 +#else
   34.10 +#define BLOCK_COMMENT(str) __ block_comment(str)
   34.11 +#endif
   34.12 +
   34.13 +#define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
   34.14 +
   34.15  address MethodHandleEntry::start_compiled_entry(MacroAssembler* _masm,
   34.16                                                  address interpreted_entry) {
   34.17    // Just before the actual machine code entry point, allocate space
   34.18 @@ -64,6 +72,7 @@
   34.19                             const char* error_message) {
   34.20    // Verify that argslot lies within (rsp, rbp].
   34.21    Label L_ok, L_bad;
   34.22 +  BLOCK_COMMENT("{ verify_argslot");
   34.23    __ cmpptr(argslot_reg, rbp);
   34.24    __ jccb(Assembler::above, L_bad);
   34.25    __ cmpptr(rsp, argslot_reg);
   34.26 @@ -71,6 +80,7 @@
   34.27    __ bind(L_bad);
   34.28    __ stop(error_message);
   34.29    __ bind(L_ok);
   34.30 +  BLOCK_COMMENT("} verify_argslot");
   34.31  }
   34.32  #endif
   34.33  
   34.34 @@ -80,16 +90,21 @@
   34.35    // rbx: methodOop
   34.36    // rcx: receiver method handle (must load from sp[MethodTypeForm.vmslots])
   34.37    // rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
   34.38 -  // rdx: garbage temp, blown away
   34.39 +  // rdx, rdi: garbage temp, blown away
   34.40  
   34.41    Register rbx_method = rbx;
   34.42    Register rcx_recv   = rcx;
   34.43    Register rax_mtype  = rax;
   34.44    Register rdx_temp   = rdx;
   34.45 +  Register rdi_temp   = rdi;
   34.46  
   34.47    // emit WrongMethodType path first, to enable jccb back-branch from main path
   34.48    Label wrong_method_type;
   34.49    __ bind(wrong_method_type);
   34.50 +  Label invoke_generic_slow_path;
   34.51 +  assert(methodOopDesc::intrinsic_id_size_in_bytes() == sizeof(u1), "");;
   34.52 +  __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeExact);
   34.53 +  __ jcc(Assembler::notEqual, invoke_generic_slow_path);
   34.54    __ push(rax_mtype);       // required mtype
   34.55    __ push(rcx_recv);        // bad mh (1st stacked argument)
   34.56    __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
   34.57 @@ -106,17 +121,68 @@
   34.58        tem = rax_mtype;          // in case there is another indirection
   34.59      }
   34.60    }
   34.61 -  Register rbx_temp = rbx_method; // done with incoming methodOop
   34.62  
   34.63    // given the MethodType, find out where the MH argument is buried
   34.64    __ movptr(rdx_temp, Address(rax_mtype,
   34.65 -                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rbx_temp)));
   34.66 -  __ movl(rdx_temp, Address(rdx_temp,
   34.67 -                            __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rbx_temp)));
   34.68 -  __ movptr(rcx_recv, __ argument_address(rdx_temp));
   34.69 +                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
   34.70 +  Register rdx_vmslots = rdx_temp;
   34.71 +  __ movl(rdx_vmslots, Address(rdx_temp,
   34.72 +                               __ delayed_value(java_dyn_MethodTypeForm::vmslots_offset_in_bytes, rdi_temp)));
   34.73 +  __ movptr(rcx_recv, __ argument_address(rdx_vmslots));
   34.74  
   34.75 -  __ check_method_handle_type(rax_mtype, rcx_recv, rdx_temp, wrong_method_type);
   34.76 -  __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
   34.77 +  trace_method_handle(_masm, "invokeExact");
   34.78 +
   34.79 +  __ check_method_handle_type(rax_mtype, rcx_recv, rdi_temp, wrong_method_type);
   34.80 +  __ jump_to_method_handle_entry(rcx_recv, rdi_temp);
   34.81 +
   34.82 +  // for invokeGeneric (only), apply argument and result conversions on the fly
   34.83 +  __ bind(invoke_generic_slow_path);
   34.84 +#ifdef ASSERT
   34.85 +  { Label L;
   34.86 +    __ cmpb(Address(rbx_method, methodOopDesc::intrinsic_id_offset_in_bytes()), (int) vmIntrinsics::_invokeGeneric);
   34.87 +    __ jcc(Assembler::equal, L);
   34.88 +    __ stop("bad methodOop::intrinsic_id");
   34.89 +    __ bind(L);
   34.90 +  }
   34.91 +#endif //ASSERT
   34.92 +  Register rbx_temp = rbx_method;  // don't need it now
   34.93 +
   34.94 +  // make room on the stack for another pointer:
   34.95 +  Register rcx_argslot = rcx_recv;
   34.96 +  __ lea(rcx_argslot, __ argument_address(rdx_vmslots, 1));
   34.97 +  insert_arg_slots(_masm, 2 * stack_move_unit(), _INSERT_REF_MASK,
   34.98 +                   rcx_argslot, rbx_temp, rdx_temp);
   34.99 +
  34.100 +  // load up an adapter from the calling type (Java weaves this)
  34.101 +  __ movptr(rdx_temp, Address(rax_mtype,
  34.102 +                              __ delayed_value(java_dyn_MethodType::form_offset_in_bytes, rdi_temp)));
  34.103 +  Register rdx_adapter = rdx_temp;
  34.104 +  // movptr(rdx_adapter, Address(rdx_temp, java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes()));
  34.105 +  // deal with old JDK versions:
  34.106 +  __ lea(rdi_temp, Address(rdx_temp,
  34.107 +                           __ delayed_value(java_dyn_MethodTypeForm::genericInvoker_offset_in_bytes, rdi_temp)));
  34.108 +  __ cmpptr(rdi_temp, rdx_temp);
  34.109 +  Label sorry_no_invoke_generic;
  34.110 +  __ jccb(Assembler::below, sorry_no_invoke_generic);
  34.111 +
  34.112 +  __ movptr(rdx_adapter, Address(rdi_temp, 0));
  34.113 +  __ testptr(rdx_adapter, rdx_adapter);
  34.114 +  __ jccb(Assembler::zero, sorry_no_invoke_generic);
  34.115 +  __ movptr(Address(rcx_argslot, 1 * Interpreter::stackElementSize), rdx_adapter);
  34.116 +  // As a trusted first argument, pass the type being called, so the adapter knows
  34.117 +  // the actual types of the arguments and return values.
  34.118 +  // (Generic invokers are shared among form-families of method-type.)
  34.119 +  __ movptr(Address(rcx_argslot, 0 * Interpreter::stackElementSize), rax_mtype);
  34.120 +  // FIXME: assert that rdx_adapter is of the right method-type.
  34.121 +  __ mov(rcx, rdx_adapter);
  34.122 +  trace_method_handle(_masm, "invokeGeneric");
  34.123 +  __ jump_to_method_handle_entry(rcx, rdi_temp);
  34.124 +
  34.125 +  __ bind(sorry_no_invoke_generic); // no invokeGeneric implementation available!
  34.126 +  __ movptr(rcx_recv, Address(rcx_argslot, -1 * Interpreter::stackElementSize));  // recover original MH
  34.127 +  __ push(rax_mtype);       // required mtype
  34.128 +  __ push(rcx_recv);        // bad mh (1st stacked argument)
  34.129 +  __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
  34.130  
  34.131    return entry_point;
  34.132  }
  34.133 @@ -164,11 +230,12 @@
  34.134    //   for (rdx = rsp + size; rdx < argslot; rdx++)
  34.135    //     rdx[-size] = rdx[0]
  34.136    //   argslot -= size;
  34.137 +  BLOCK_COMMENT("insert_arg_slots {");
  34.138    __ mov(rdx_temp, rsp);                        // source pointer for copy
  34.139    __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
  34.140    {
  34.141      Label loop;
  34.142 -    __ bind(loop);
  34.143 +    __ BIND(loop);
  34.144      // pull one word down each time through the loop
  34.145      __ movptr(rbx_temp, Address(rdx_temp, 0));
  34.146      __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
  34.147 @@ -179,6 +246,7 @@
  34.148  
  34.149    // Now move the argslot down, to point to the opened-up space.
  34.150    __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
  34.151 +  BLOCK_COMMENT("} insert_arg_slots");
  34.152  }
  34.153  
  34.154  // Helper to remove argument slots from the stack.
  34.155 @@ -218,6 +286,7 @@
  34.156    }
  34.157  #endif
  34.158  
  34.159 +  BLOCK_COMMENT("remove_arg_slots {");
  34.160    // Pull up everything shallower than rax_argslot.
  34.161    // Then remove the excess space on the stack.
  34.162    // The stacked return address gets pulled up with everything else.
  34.163 @@ -229,7 +298,7 @@
  34.164    __ lea(rdx_temp, Address(rax_argslot, -wordSize)); // source pointer for copy
  34.165    {
  34.166      Label loop;
  34.167 -    __ bind(loop);
  34.168 +    __ BIND(loop);
  34.169      // pull one word up each time through the loop
  34.170      __ movptr(rbx_temp, Address(rdx_temp, 0));
  34.171      __ movptr(Address(rdx_temp, arg_slots, Address::times_ptr), rbx_temp);
  34.172 @@ -242,12 +311,14 @@
  34.173    __ lea(rsp, Address(rsp, arg_slots, Address::times_ptr));
  34.174    // And adjust the argslot address to point at the deletion point.
  34.175    __ lea(rax_argslot, Address(rax_argslot, arg_slots, Address::times_ptr));
  34.176 +  BLOCK_COMMENT("} remove_arg_slots");
  34.177  }
  34.178  
  34.179  #ifndef PRODUCT
  34.180  extern "C" void print_method_handle(oop mh);
  34.181  void trace_method_handle_stub(const char* adaptername,
  34.182                                oop mh,
  34.183 +                              intptr_t* saved_regs,
  34.184                                intptr_t* entry_sp,
  34.185                                intptr_t* saved_sp,
  34.186                                intptr_t* saved_bp) {
  34.187 @@ -256,9 +327,47 @@
  34.188    intptr_t* base_sp = (intptr_t*) saved_bp[frame::interpreter_frame_monitor_block_top_offset];
  34.189    printf("MH %s mh="INTPTR_FORMAT" sp=("INTPTR_FORMAT"+"INTX_FORMAT") stack_size="INTX_FORMAT" bp="INTPTR_FORMAT"\n",
  34.190           adaptername, (intptr_t)mh, (intptr_t)entry_sp, (intptr_t)(saved_sp - entry_sp), (intptr_t)(base_sp - last_sp), (intptr_t)saved_bp);
  34.191 -  if (last_sp != saved_sp)
  34.192 +  if (last_sp != saved_sp && last_sp != NULL)
  34.193      printf("*** last_sp="INTPTR_FORMAT"\n", (intptr_t)last_sp);
  34.194 -  if (Verbose)  print_method_handle(mh);
  34.195 +  if (Verbose) {
  34.196 +    printf(" reg dump: ");
  34.197 +    int saved_regs_count = (entry_sp-1) - saved_regs;
  34.198 +    // 32 bit: rdi rsi rbp rsp; rbx rdx rcx (*) rax
  34.199 +    int i;
  34.200 +    for (i = 0; i <= saved_regs_count; i++) {
  34.201 +      if (i > 0 && i % 4 == 0 && i != saved_regs_count)
  34.202 +        printf("\n   + dump: ");
  34.203 +      printf(" %d: "INTPTR_FORMAT, i, saved_regs[i]);
  34.204 +    }
  34.205 +    printf("\n");
  34.206 +    int stack_dump_count = 16;
  34.207 +    if (stack_dump_count < (int)(saved_bp + 2 - saved_sp))
  34.208 +      stack_dump_count = (int)(saved_bp + 2 - saved_sp);
  34.209 +    if (stack_dump_count > 64)  stack_dump_count = 48;
  34.210 +    for (i = 0; i < stack_dump_count; i += 4) {
  34.211 +      printf(" dump at SP[%d] "INTPTR_FORMAT": "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT" "INTPTR_FORMAT"\n",
  34.212 +             i, &entry_sp[i+0], entry_sp[i+0], entry_sp[i+1], entry_sp[i+2], entry_sp[i+3]);
  34.213 +    }
  34.214 +    print_method_handle(mh);
  34.215 +  }
  34.216 +}
  34.217 +void MethodHandles::trace_method_handle(MacroAssembler* _masm, const char* adaptername) {
  34.218 +  if (!TraceMethodHandles)  return;
  34.219 +  BLOCK_COMMENT("trace_method_handle {");
  34.220 +  __ push(rax);
  34.221 +  __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
  34.222 +  __ pusha();
  34.223 +  // arguments:
  34.224 +  __ push(rbp);               // interpreter frame pointer
  34.225 +  __ push(rsi);               // saved_sp
  34.226 +  __ push(rax);               // entry_sp
  34.227 +  __ push(rcx);               // mh
  34.228 +  __ push(rcx);
  34.229 +  __ movptr(Address(rsp, 0), (intptr_t) adaptername);
  34.230 +  __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
  34.231 +  __ popa();
  34.232 +  __ pop(rax);
  34.233 +  BLOCK_COMMENT("} trace_method_handle");
  34.234  }
  34.235  #endif //PRODUCT
  34.236  
  34.237 @@ -324,21 +433,9 @@
  34.238    address interp_entry = __ pc();
  34.239    if (UseCompressedOops)  __ unimplemented("UseCompressedOops");
  34.240  
  34.241 -#ifndef PRODUCT
  34.242 -  if (TraceMethodHandles) {
  34.243 -    __ push(rax); __ push(rbx); __ push(rcx); __ push(rdx); __ push(rsi); __ push(rdi);
  34.244 -    __ lea(rax, Address(rsp, wordSize*6)); // entry_sp
  34.245 -    // arguments:
  34.246 -    __ push(rbp);               // interpreter frame pointer
  34.247 -    __ push(rsi);               // saved_sp
  34.248 -    __ push(rax);               // entry_sp
  34.249 -    __ push(rcx);               // mh
  34.250 -    __ push(rcx);
  34.251 -    __ movptr(Address(rsp, 0), (intptr_t)entry_name(ek));
  34.252 -    __ call_VM_leaf(CAST_FROM_FN_PTR(address, trace_method_handle_stub), 5);
  34.253 -    __ pop(rdi); __ pop(rsi); __ pop(rdx); __ pop(rcx); __ pop(rbx); __ pop(rax);
  34.254 -  }
  34.255 -#endif //PRODUCT
  34.256 +  trace_method_handle(_masm, entry_name(ek));
  34.257 +
  34.258 +  BLOCK_COMMENT(entry_name(ek));
  34.259  
  34.260    switch ((int) ek) {
  34.261    case _raise_exception:
    35.1 --- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Tue Sep 21 06:58:44 2010 -0700
    35.2 +++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp	Wed Sep 22 12:54:51 2010 -0400
    35.3 @@ -33,7 +33,7 @@
    35.4  
    35.5  // MethodHandles adapters
    35.6  enum method_handles_platform_dependent_constants {
    35.7 -  method_handles_adapters_code_size = 5000
    35.8 +  method_handles_adapters_code_size = 10000
    35.9  };
   35.10  
   35.11  class x86 {
    36.1 --- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Tue Sep 21 06:58:44 2010 -0700
    36.2 +++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp	Wed Sep 22 12:54:51 2010 -0400
    36.3 @@ -35,7 +35,7 @@
    36.4  
    36.5  // MethodHandles adapters
    36.6  enum method_handles_platform_dependent_constants {
    36.7 -  method_handles_adapters_code_size = 13000
    36.8 +  method_handles_adapters_code_size = 26000
    36.9  };
   36.10  
   36.11  class x86 {
    37.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Sep 21 06:58:44 2010 -0700
    37.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Wed Sep 22 12:54:51 2010 -0400
    37.3 @@ -359,40 +359,62 @@
    37.4  // rcx: invocation counter
    37.5  //
    37.6  void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
    37.7 +  const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
    37.8 +                                        in_bytes(InvocationCounter::counter_offset()));
    37.9 +  // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
   37.10 +  if (TieredCompilation) {
   37.11 +    int increment = InvocationCounter::count_increment;
   37.12 +    int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
   37.13 +    Label no_mdo, done;
   37.14 +    if (ProfileInterpreter) {
   37.15 +      // Are we profiling?
   37.16 +      __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
   37.17 +      __ testptr(rax, rax);
   37.18 +      __ jccb(Assembler::zero, no_mdo);
   37.19 +      // Increment counter in the MDO
   37.20 +      const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
   37.21 +                                                in_bytes(InvocationCounter::counter_offset()));
   37.22 +      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
   37.23 +      __ jmpb(done);
   37.24 +    }
   37.25 +    __ bind(no_mdo);
   37.26 +    // Increment counter in methodOop (we don't need to load it, it's in rcx).
   37.27 +    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
   37.28 +    __ bind(done);
   37.29 +  } else {
   37.30 +    const Address backedge_counter  (rbx, methodOopDesc::backedge_counter_offset() +
   37.31 +                                          InvocationCounter::counter_offset());
   37.32  
   37.33 -  const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
   37.34 -  const Address backedge_counter  (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
   37.35 +    if (ProfileInterpreter) { // %%% Merge this into methodDataOop
   37.36 +      __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
   37.37 +    }
   37.38 +    // Update standard invocation counters
   37.39 +    __ movl(rax, backedge_counter);               // load backedge counter
   37.40  
   37.41 -  if (ProfileInterpreter) { // %%% Merge this into methodDataOop
   37.42 -    __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
   37.43 +    __ incrementl(rcx, InvocationCounter::count_increment);
   37.44 +    __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
   37.45 +
   37.46 +    __ movl(invocation_counter, rcx);             // save invocation count
   37.47 +    __ addl(rcx, rax);                            // add both counters
   37.48 +
   37.49 +    // profile_method is non-null only for interpreted method so
   37.50 +    // profile_method != NULL == !native_call
   37.51 +    // BytecodeInterpreter only calls for native so code is elided.
   37.52 +
   37.53 +    if (ProfileInterpreter && profile_method != NULL) {
   37.54 +      // Test to see if we should create a method data oop
   37.55 +      __ cmp32(rcx,
   37.56 +               ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
   37.57 +      __ jcc(Assembler::less, *profile_method_continue);
   37.58 +
   37.59 +      // if no method data exists, go to profile_method
   37.60 +      __ test_method_data_pointer(rax, *profile_method);
   37.61 +    }
   37.62 +
   37.63 +    __ cmp32(rcx,
   37.64 +             ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
   37.65 +    __ jcc(Assembler::aboveEqual, *overflow);
   37.66    }
   37.67 -  // Update standard invocation counters
   37.68 -  __ movl(rax, backedge_counter);               // load backedge counter
   37.69 -
   37.70 -  __ incrementl(rcx, InvocationCounter::count_increment);
   37.71 -  __ andl(rax, InvocationCounter::count_mask_value);  // mask out the status bits
   37.72 -
   37.73 -  __ movl(invocation_counter, rcx);             // save invocation count
   37.74 -  __ addl(rcx, rax);                            // add both counters
   37.75 -
   37.76 -  // profile_method is non-null only for interpreted method so
   37.77 -  // profile_method != NULL == !native_call
   37.78 -  // BytecodeInterpreter only calls for native so code is elided.
   37.79 -
   37.80 -  if (ProfileInterpreter && profile_method != NULL) {
   37.81 -    // Test to see if we should create a method data oop
   37.82 -    __ cmp32(rcx,
   37.83 -             ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
   37.84 -    __ jcc(Assembler::less, *profile_method_continue);
   37.85 -
   37.86 -    // if no method data exists, go to profile_method
   37.87 -    __ test_method_data_pointer(rax, *profile_method);
   37.88 -  }
   37.89 -
   37.90 -  __ cmp32(rcx,
   37.91 -           ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
   37.92 -  __ jcc(Assembler::aboveEqual, *overflow);
   37.93 -
   37.94  }
   37.95  
   37.96  void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
    38.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Sep 21 06:58:44 2010 -0700
    38.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Wed Sep 22 12:54:51 2010 -0400
    38.3 @@ -310,42 +310,61 @@
    38.4          Label* overflow,
    38.5          Label* profile_method,
    38.6          Label* profile_method_continue) {
    38.7 +  const Address invocation_counter(rbx, in_bytes(methodOopDesc::invocation_counter_offset()) +
    38.8 +                                        in_bytes(InvocationCounter::counter_offset()));
    38.9 +  // Note: In tiered we increment either counters in methodOop or in MDO depending if we're profiling or not.
   38.10 +  if (TieredCompilation) {
   38.11 +    int increment = InvocationCounter::count_increment;
   38.12 +    int mask = ((1 << Tier0InvokeNotifyFreqLog)  - 1) << InvocationCounter::count_shift;
   38.13 +    Label no_mdo, done;
   38.14 +    if (ProfileInterpreter) {
   38.15 +      // Are we profiling?
   38.16 +      __ movptr(rax, Address(rbx, methodOopDesc::method_data_offset()));
   38.17 +      __ testptr(rax, rax);
   38.18 +      __ jccb(Assembler::zero, no_mdo);
   38.19 +      // Increment counter in the MDO
   38.20 +      const Address mdo_invocation_counter(rax, in_bytes(methodDataOopDesc::invocation_counter_offset()) +
   38.21 +                                                in_bytes(InvocationCounter::counter_offset()));
   38.22 +      __ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
   38.23 +      __ jmpb(done);
   38.24 +    }
   38.25 +    __ bind(no_mdo);
   38.26 +    // Increment counter in methodOop (we don't need to load it, it's in ecx).
   38.27 +    __ increment_mask_and_jump(invocation_counter, increment, mask, rcx, true, Assembler::zero, overflow);
   38.28 +    __ bind(done);
   38.29 +  } else {
   38.30 +    const Address backedge_counter(rbx,
   38.31 +                                   methodOopDesc::backedge_counter_offset() +
   38.32 +                                   InvocationCounter::counter_offset());
   38.33  
   38.34 -  const Address invocation_counter(rbx,
   38.35 -                                   methodOopDesc::invocation_counter_offset() +
   38.36 -                                   InvocationCounter::counter_offset());
   38.37 -  const Address backedge_counter(rbx,
   38.38 -                                 methodOopDesc::backedge_counter_offset() +
   38.39 -                                 InvocationCounter::counter_offset());
   38.40 +    if (ProfileInterpreter) { // %%% Merge this into methodDataOop
   38.41 +      __ incrementl(Address(rbx,
   38.42 +                            methodOopDesc::interpreter_invocation_counter_offset()));
   38.43 +    }
   38.44 +    // Update standard invocation counters
   38.45 +    __ movl(rax, backedge_counter);   // load backedge counter
   38.46  
   38.47 -  if (ProfileInterpreter) { // %%% Merge this into methodDataOop
   38.48 -    __ incrementl(Address(rbx,
   38.49 -                    methodOopDesc::interpreter_invocation_counter_offset()));
   38.50 +    __ incrementl(rcx, InvocationCounter::count_increment);
   38.51 +    __ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
   38.52 +
   38.53 +    __ movl(invocation_counter, rcx); // save invocation count
   38.54 +    __ addl(rcx, rax);                // add both counters
   38.55 +
   38.56 +    // profile_method is non-null only for interpreted method so
   38.57 +    // profile_method != NULL == !native_call
   38.58 +
   38.59 +    if (ProfileInterpreter && profile_method != NULL) {
   38.60 +      // Test to see if we should create a method data oop
   38.61 +      __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
   38.62 +      __ jcc(Assembler::less, *profile_method_continue);
   38.63 +
   38.64 +      // if no method data exists, go to profile_method
   38.65 +      __ test_method_data_pointer(rax, *profile_method);
   38.66 +    }
   38.67 +
   38.68 +    __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
   38.69 +    __ jcc(Assembler::aboveEqual, *overflow);
   38.70    }
   38.71 -  // Update standard invocation counters
   38.72 -  __ movl(rax, backedge_counter); // load backedge counter
   38.73 -
   38.74 -  __ incrementl(rcx, InvocationCounter::count_increment);
   38.75 -  __ andl(rax, InvocationCounter::count_mask_value); // mask out the
   38.76 -                                                     // status bits
   38.77 -
   38.78 -  __ movl(invocation_counter, rcx); // save invocation count
   38.79 -  __ addl(rcx, rax); // add both counters
   38.80 -
   38.81 -  // profile_method is non-null only for interpreted method so
   38.82 -  // profile_method != NULL == !native_call
   38.83 -
   38.84 -  if (ProfileInterpreter && profile_method != NULL) {
   38.85 -    // Test to see if we should create a method data oop
   38.86 -    __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit));
   38.87 -    __ jcc(Assembler::less, *profile_method_continue);
   38.88 -
   38.89 -    // if no method data exists, go to profile_method
   38.90 -    __ test_method_data_pointer(rax, *profile_method);
   38.91 -  }
   38.92 -
   38.93 -  __ cmp32(rcx, ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit));
   38.94 -  __ jcc(Assembler::aboveEqual, *overflow);
   38.95  }
   38.96  
   38.97  void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
    39.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp	Tue Sep 21 06:58:44 2010 -0700
    39.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp	Wed Sep 22 12:54:51 2010 -0400
    39.3 @@ -1558,47 +1558,68 @@
    39.4      __ testl(rdx, rdx);             // check if forward or backward branch
    39.5      __ jcc(Assembler::positive, dispatch); // count only if backward branch
    39.6  
    39.7 -    // increment counter
    39.8 -    __ movl(rax, Address(rcx, be_offset));        // load backedge counter
    39.9 -    __ incrementl(rax, InvocationCounter::count_increment); // increment counter
   39.10 -    __ movl(Address(rcx, be_offset), rax);        // store counter
   39.11 -
   39.12 -    __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
   39.13 -    __ andl(rax, InvocationCounter::count_mask_value);     // and the status bits
   39.14 -    __ addl(rax, Address(rcx, be_offset));        // add both counters
   39.15 -
   39.16 -    if (ProfileInterpreter) {
   39.17 -      // Test to see if we should create a method data oop
   39.18 -      __ cmp32(rax,
   39.19 -               ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
   39.20 -      __ jcc(Assembler::less, dispatch);
   39.21 -
   39.22 -      // if no method data exists, go to profile method
   39.23 -      __ test_method_data_pointer(rax, profile_method);
   39.24 -
   39.25 -      if (UseOnStackReplacement) {
   39.26 -        // check for overflow against rbx, which is the MDO taken count
   39.27 -        __ cmp32(rbx,
   39.28 -                 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
   39.29 -        __ jcc(Assembler::below, dispatch);
   39.30 -
   39.31 -        // When ProfileInterpreter is on, the backedge_count comes from the
   39.32 -        // methodDataOop, which value does not get reset on the call to
   39.33 -        // frequency_counter_overflow().  To avoid excessive calls to the overflow
   39.34 -        // routine while the method is being compiled, add a second test to make
   39.35 -        // sure the overflow function is called only once every overflow_frequency.
   39.36 -        const int overflow_frequency = 1024;
   39.37 -        __ andptr(rbx, overflow_frequency-1);
   39.38 -        __ jcc(Assembler::zero, backedge_counter_overflow);
   39.39 -
   39.40 +    if (TieredCompilation) {
   39.41 +      Label no_mdo;
   39.42 +      int increment = InvocationCounter::count_increment;
   39.43 +      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
   39.44 +      if (ProfileInterpreter) {
   39.45 +        // Are we profiling?
   39.46 +        __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
   39.47 +        __ testptr(rbx, rbx);
   39.48 +        __ jccb(Assembler::zero, no_mdo);
   39.49 +        // Increment the MDO backedge counter
   39.50 +        const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
   39.51 +                                                in_bytes(InvocationCounter::counter_offset()));
   39.52 +        __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
   39.53 +                                   rax, false, Assembler::zero, &backedge_counter_overflow);
   39.54 +        __ jmp(dispatch);
   39.55        }
   39.56 +      __ bind(no_mdo);
   39.57 +      // Increment backedge counter in methodOop
   39.58 +      __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
   39.59 +                                 rax, false, Assembler::zero, &backedge_counter_overflow);
   39.60      } else {
   39.61 -      if (UseOnStackReplacement) {
   39.62 -        // check for overflow against rax, which is the sum of the counters
   39.63 +      // increment counter
   39.64 +      __ movl(rax, Address(rcx, be_offset));        // load backedge counter
   39.65 +      __ incrementl(rax, InvocationCounter::count_increment); // increment counter
   39.66 +      __ movl(Address(rcx, be_offset), rax);        // store counter
   39.67 +
   39.68 +      __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
   39.69 +      __ andl(rax, InvocationCounter::count_mask_value);     // and the status bits
   39.70 +      __ addl(rax, Address(rcx, be_offset));        // add both counters
   39.71 +
   39.72 +      if (ProfileInterpreter) {
   39.73 +        // Test to see if we should create a method data oop
   39.74          __ cmp32(rax,
   39.75 -                 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
   39.76 -        __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
   39.77 -
   39.78 +                 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
   39.79 +        __ jcc(Assembler::less, dispatch);
   39.80 +
   39.81 +        // if no method data exists, go to profile method
   39.82 +        __ test_method_data_pointer(rax, profile_method);
   39.83 +
   39.84 +        if (UseOnStackReplacement) {
   39.85 +          // check for overflow against rbx, which is the MDO taken count
   39.86 +          __ cmp32(rbx,
   39.87 +                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
   39.88 +          __ jcc(Assembler::below, dispatch);
   39.89 +
   39.90 +          // When ProfileInterpreter is on, the backedge_count comes from the
   39.91 +          // methodDataOop, which value does not get reset on the call to
   39.92 +          // frequency_counter_overflow().  To avoid excessive calls to the overflow
   39.93 +          // routine while the method is being compiled, add a second test to make
   39.94 +          // sure the overflow function is called only once every overflow_frequency.
   39.95 +          const int overflow_frequency = 1024;
   39.96 +          __ andptr(rbx, overflow_frequency-1);
   39.97 +          __ jcc(Assembler::zero, backedge_counter_overflow);
   39.98 +        }
   39.99 +      } else {
  39.100 +        if (UseOnStackReplacement) {
  39.101 +          // check for overflow against rax, which is the sum of the counters
  39.102 +          __ cmp32(rax,
  39.103 +                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
  39.104 +          __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
  39.105 +
  39.106 +        }
  39.107        }
  39.108      }
  39.109      __ bind(dispatch);
    40.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp	Tue Sep 21 06:58:44 2010 -0700
    40.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp	Wed Sep 22 12:54:51 2010 -0400
    40.3 @@ -1583,51 +1583,71 @@
    40.4      // r14: locals pointer
    40.5      __ testl(rdx, rdx);             // check if forward or backward branch
    40.6      __ jcc(Assembler::positive, dispatch); // count only if backward branch
    40.7 -
    40.8 -    // increment counter
    40.9 -    __ movl(rax, Address(rcx, be_offset));        // load backedge counter
   40.10 -    __ incrementl(rax, InvocationCounter::count_increment); // increment
   40.11 -                                                            // counter
   40.12 -    __ movl(Address(rcx, be_offset), rax);        // store counter
   40.13 -
   40.14 -    __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
   40.15 -    __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
   40.16 -    __ addl(rax, Address(rcx, be_offset));        // add both counters
   40.17 -
   40.18 -    if (ProfileInterpreter) {
   40.19 -      // Test to see if we should create a method data oop
   40.20 -      __ cmp32(rax,
   40.21 -               ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
   40.22 -      __ jcc(Assembler::less, dispatch);
   40.23 -
   40.24 -      // if no method data exists, go to profile method
   40.25 -      __ test_method_data_pointer(rax, profile_method);
   40.26 -
   40.27 -      if (UseOnStackReplacement) {
   40.28 -        // check for overflow against ebx which is the MDO taken count
   40.29 -        __ cmp32(rbx,
   40.30 -                 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
   40.31 -        __ jcc(Assembler::below, dispatch);
   40.32 -
   40.33 -        // When ProfileInterpreter is on, the backedge_count comes
   40.34 -        // from the methodDataOop, which value does not get reset on
   40.35 -        // the call to frequency_counter_overflow().  To avoid
   40.36 -        // excessive calls to the overflow routine while the method is
   40.37 -        // being compiled, add a second test to make sure the overflow
   40.38 -        // function is called only once every overflow_frequency.
   40.39 -        const int overflow_frequency = 1024;
   40.40 -        __ andl(rbx, overflow_frequency - 1);
   40.41 -        __ jcc(Assembler::zero, backedge_counter_overflow);
   40.42 -
   40.43 +    if (TieredCompilation) {
   40.44 +      Label no_mdo;
   40.45 +      int increment = InvocationCounter::count_increment;
   40.46 +      int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
   40.47 +      if (ProfileInterpreter) {
   40.48 +        // Are we profiling?
   40.49 +        __ movptr(rbx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
   40.50 +        __ testptr(rbx, rbx);
   40.51 +        __ jccb(Assembler::zero, no_mdo);
   40.52 +        // Increment the MDO backedge counter
   40.53 +        const Address mdo_backedge_counter(rbx, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
   40.54 +                                           in_bytes(InvocationCounter::counter_offset()));
   40.55 +        __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
   40.56 +                                   rax, false, Assembler::zero, &backedge_counter_overflow);
   40.57 +        __ jmp(dispatch);
   40.58        }
   40.59 +      __ bind(no_mdo);
   40.60 +      // Increment backedge counter in methodOop
   40.61 +      __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
   40.62 +                                 rax, false, Assembler::zero, &backedge_counter_overflow);
   40.63      } else {
   40.64 -      if (UseOnStackReplacement) {
   40.65 -        // check for overflow against eax, which is the sum of the
   40.66 -        // counters
   40.67 +      // increment counter
   40.68 +      __ movl(rax, Address(rcx, be_offset));        // load backedge counter
   40.69 +      __ incrementl(rax, InvocationCounter::count_increment); // increment counter
   40.70 +      __ movl(Address(rcx, be_offset), rax);        // store counter
   40.71 +
   40.72 +      __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
   40.73 +      __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
   40.74 +      __ addl(rax, Address(rcx, be_offset));        // add both counters
   40.75 +
   40.76 +      if (ProfileInterpreter) {
   40.77 +        // Test to see if we should create a method data oop
   40.78          __ cmp32(rax,
   40.79 -                 ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
   40.80 -        __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
   40.81 -
   40.82 +                 ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
   40.83 +        __ jcc(Assembler::less, dispatch);
   40.84 +
   40.85 +        // if no method data exists, go to profile method
   40.86 +        __ test_method_data_pointer(rax, profile_method);
   40.87 +
   40.88 +        if (UseOnStackReplacement) {
   40.89 +          // check for overflow against ebx which is the MDO taken count
   40.90 +          __ cmp32(rbx,
   40.91 +                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
   40.92 +          __ jcc(Assembler::below, dispatch);
   40.93 +
   40.94 +          // When ProfileInterpreter is on, the backedge_count comes
   40.95 +          // from the methodDataOop, which value does not get reset on
   40.96 +          // the call to frequency_counter_overflow().  To avoid
   40.97 +          // excessive calls to the overflow routine while the method is
   40.98 +          // being compiled, add a second test to make sure the overflow
   40.99 +          // function is called only once every overflow_frequency.
  40.100 +          const int overflow_frequency = 1024;
  40.101 +          __ andl(rbx, overflow_frequency - 1);
  40.102 +          __ jcc(Assembler::zero, backedge_counter_overflow);
  40.103 +
  40.104 +        }
  40.105 +      } else {
  40.106 +        if (UseOnStackReplacement) {
  40.107 +          // check for overflow against eax, which is the sum of the
  40.108 +          // counters
  40.109 +          __ cmp32(rax,
  40.110 +                   ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
  40.111 +          __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
  40.112 +
  40.113 +        }
  40.114        }
  40.115      }
  40.116      __ bind(dispatch);
  40.117 @@ -2912,7 +2932,8 @@
  40.118  void TemplateTable::invokevirtual_helper(Register index,
  40.119                                           Register recv,
  40.120                                           Register flags) {
  40.121 -  // Uses temporary registers rax, rdx  assert_different_registers(index, recv, rax, rdx);
  40.122 +  // Uses temporary registers rax, rdx
  40.123 +  assert_different_registers(index, recv, rax, rdx);
  40.124  
  40.125    // Test for an invoke of a final method
  40.126    Label notFinal;
    41.1 --- a/src/cpu/x86/vm/vm_version_x86.hpp	Tue Sep 21 06:58:44 2010 -0700
    41.2 +++ b/src/cpu/x86/vm/vm_version_x86.hpp	Wed Sep 22 12:54:51 2010 -0400
    41.3 @@ -296,14 +296,14 @@
    41.4        result |= CPU_CX8;
    41.5      if (_cpuid_info.std_cpuid1_edx.bits.cmov != 0)
    41.6        result |= CPU_CMOV;
    41.7 -    if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || is_amd() &&
    41.8 -        _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0)
    41.9 +    if (_cpuid_info.std_cpuid1_edx.bits.fxsr != 0 || (is_amd() &&
   41.10 +        _cpuid_info.ext_cpuid1_edx.bits.fxsr != 0))
   41.11        result |= CPU_FXSR;
   41.12      // HT flag is set for multi-core processors also.
   41.13      if (threads_per_core() > 1)
   41.14        result |= CPU_HT;
   41.15 -    if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || is_amd() &&
   41.16 -        _cpuid_info.ext_cpuid1_edx.bits.mmx != 0)
   41.17 +    if (_cpuid_info.std_cpuid1_edx.bits.mmx != 0 || (is_amd() &&
   41.18 +        _cpuid_info.ext_cpuid1_edx.bits.mmx != 0))
   41.19        result |= CPU_MMX;
   41.20      if (_cpuid_info.std_cpuid1_edx.bits.sse != 0)
   41.21        result |= CPU_SSE;
    42.1 --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Tue Sep 21 06:58:44 2010 -0700
    42.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Wed Sep 22 12:54:51 2010 -0400
    42.3 @@ -1,5 +1,5 @@
    42.4  /*
    42.5 - * Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved.
    42.6 + * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    42.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    42.8   *
    42.9   * This code is free software; you can redistribute it and/or modify it
   42.10 @@ -209,7 +209,7 @@
   42.11             (UseCompressedOops ? 16 : 0);  // 1 leaq can be 3 bytes + 1 long
   42.12    } else {
   42.13      // Itable stub size
   42.14 -    return (DebugVtables ? 512 : 72) + (CountCompiledCalls ? 13 : 0) +
   42.15 +    return (DebugVtables ? 512 : 74) + (CountCompiledCalls ? 13 : 0) +
   42.16             (UseCompressedOops ? 32 : 0);  // 2 leaqs
   42.17    }
   42.18    // In order to tune these parameters, run the JVM with VM options
    43.1 --- a/src/share/vm/c1/c1_Canonicalizer.cpp	Tue Sep 21 06:58:44 2010 -0700
    43.2 +++ b/src/share/vm/c1/c1_Canonicalizer.cpp	Wed Sep 22 12:54:51 2010 -0400
    43.3 @@ -652,10 +652,20 @@
    43.4          else if (lss_sux == gtr_sux) { cond = If::neq; tsux = lss_sux; fsux = eql_sux; }
    43.5          else if (eql_sux == gtr_sux) { cond = If::geq; tsux = eql_sux; fsux = lss_sux; }
    43.6          else                         { ShouldNotReachHere();                           }
    43.7 -           If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint());
    43.8 +        If* canon = new If(cmp->x(), cond, nan_sux == tsux, cmp->y(), tsux, fsux, cmp->state_before(), x->is_safepoint());
    43.9          if (cmp->x() == cmp->y()) {
   43.10            do_If(canon);
   43.11          } else {
   43.12 +          if (compilation()->profile_branches()) {
   43.13 +            // TODO: If profiling, leave floating point comparisons unoptimized.
   43.14 +            // We currently do not support profiling of the unordered case.
   43.15 +            switch(cmp->op()) {
   43.16 +              case Bytecodes::_fcmpl: case Bytecodes::_fcmpg:
   43.17 +              case Bytecodes::_dcmpl: case Bytecodes::_dcmpg:
   43.18 +                set_canonical(x);
   43.19 +                return;
   43.20 +            }
   43.21 +          }
   43.22            set_canonical(canon);
   43.23            set_bci(cmp->bci());
   43.24          }
   43.25 @@ -663,6 +673,8 @@
   43.26      } else if (l->as_InstanceOf() != NULL) {
   43.27        // NOTE: Code permanently disabled for now since it leaves the old InstanceOf
   43.28        //       instruction in the graph (it is pinned). Need to fix this at some point.
   43.29 +      //       It should also be left in the graph when generating a profiled method version or Goto
   43.30 +      //       has to know that it was an InstanceOf.
   43.31        return;
   43.32        // pattern: If ((obj instanceof klass) cond rc) => simplify to: IfInstanceOf or: Goto
   43.33        InstanceOf* inst = l->as_InstanceOf();
   43.34 @@ -881,4 +893,5 @@
   43.35  void Canonicalizer::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
   43.36  void Canonicalizer::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
   43.37  void Canonicalizer::do_ProfileCall(ProfileCall* x) {}
   43.38 -void Canonicalizer::do_ProfileCounter(ProfileCounter* x) {}
   43.39 +void Canonicalizer::do_ProfileInvoke(ProfileInvoke* x) {}
   43.40 +
    44.1 --- a/src/share/vm/c1/c1_Canonicalizer.hpp	Tue Sep 21 06:58:44 2010 -0700
    44.2 +++ b/src/share/vm/c1/c1_Canonicalizer.hpp	Wed Sep 22 12:54:51 2010 -0400
    44.3 @@ -1,5 +1,5 @@
    44.4  /*
    44.5 - * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
    44.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    44.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44.8   *
    44.9   * This code is free software; you can redistribute it and/or modify it
   44.10 @@ -24,9 +24,11 @@
   44.11  
   44.12  class Canonicalizer: InstructionVisitor {
   44.13   private:
   44.14 +  Compilation *_compilation;
   44.15    Instruction* _canonical;
   44.16    int _bci;
   44.17  
   44.18 +  Compilation *compilation()                     { return _compilation; }
   44.19    void set_canonical(Value x);
   44.20    void set_bci(int bci)                          { _bci = bci; }
   44.21    void set_constant(jint x)                      { set_canonical(new Constant(new IntConstant(x))); }
   44.22 @@ -43,7 +45,9 @@
   44.23                          int* scale);
   44.24  
   44.25   public:
   44.26 -  Canonicalizer(Value x, int bci)                { _canonical = x; _bci = bci; if (CanonicalizeNodes) x->visit(this); }
   44.27 +  Canonicalizer(Compilation* c, Value x, int bci) : _compilation(c), _canonical(x), _bci(bci) {
   44.28 +    if (CanonicalizeNodes) x->visit(this);
   44.29 +  }
   44.30    Value canonical() const                        { return _canonical; }
   44.31    int bci() const                                { return _bci; }
   44.32  
   44.33 @@ -92,5 +96,5 @@
   44.34    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   44.35    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   44.36    virtual void do_ProfileCall    (ProfileCall*     x);
   44.37 -  virtual void do_ProfileCounter (ProfileCounter*  x);
   44.38 +  virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   44.39  };
    45.1 --- a/src/share/vm/c1/c1_CodeStubs.hpp	Tue Sep 21 06:58:44 2010 -0700
    45.2 +++ b/src/share/vm/c1/c1_CodeStubs.hpp	Wed Sep 22 12:54:51 2010 -0400
    45.3 @@ -80,20 +80,21 @@
    45.4    }
    45.5  };
    45.6  
    45.7 -#ifdef TIERED
    45.8  class CounterOverflowStub: public CodeStub {
    45.9   private:
   45.10    CodeEmitInfo* _info;
   45.11    int           _bci;
   45.12 +  LIR_Opr       _method;
   45.13  
   45.14  public:
   45.15 -  CounterOverflowStub(CodeEmitInfo* info, int bci) : _info(info), _bci(bci) {
   45.16 +  CounterOverflowStub(CodeEmitInfo* info, int bci, LIR_Opr method) :  _info(info), _bci(bci), _method(method) {
   45.17    }
   45.18  
   45.19    virtual void emit_code(LIR_Assembler* e);
   45.20  
   45.21    virtual void visit(LIR_OpVisitState* visitor) {
   45.22      visitor->do_slow_case(_info);
   45.23 +    visitor->do_input(_method);
   45.24    }
   45.25  
   45.26  #ifndef PRODUCT
   45.27 @@ -101,7 +102,6 @@
   45.28  #endif // PRODUCT
   45.29  
   45.30  };
   45.31 -#endif // TIERED
   45.32  
   45.33  class ConversionStub: public CodeStub {
   45.34   private:
    46.1 --- a/src/share/vm/c1/c1_Compilation.cpp	Tue Sep 21 06:58:44 2010 -0700
    46.2 +++ b/src/share/vm/c1/c1_Compilation.cpp	Wed Sep 22 12:54:51 2010 -0400
    46.3 @@ -290,9 +290,13 @@
    46.4  
    46.5    CHECK_BAILOUT_(no_frame_size);
    46.6  
    46.7 +  if (is_profiling()) {
    46.8 +    method()->build_method_data();
    46.9 +  }
   46.10 +
   46.11    {
   46.12      PhaseTraceTime timeit(_t_buildIR);
   46.13 -  build_hir();
   46.14 +    build_hir();
   46.15    }
   46.16    if (BailoutAfterHIR) {
   46.17      BAILOUT_("Bailing out because of -XX:+BailoutAfterHIR", no_frame_size);
   46.18 @@ -447,6 +451,7 @@
   46.19  , _masm(NULL)
   46.20  , _has_exception_handlers(false)
   46.21  , _has_fpu_code(true)   // pessimistic assumption
   46.22 +, _would_profile(false)
   46.23  , _has_unsafe_access(false)
   46.24  , _has_method_handle_invokes(false)
   46.25  , _bailout_msg(NULL)
   46.26 @@ -461,12 +466,16 @@
   46.27  #endif // PRODUCT
   46.28  {
   46.29    PhaseTraceTime timeit(_t_compile);
   46.30 -
   46.31    _arena = Thread::current()->resource_area();
   46.32    _env->set_compiler_data(this);
   46.33    _exception_info_list = new ExceptionInfoList();
   46.34    _implicit_exception_table.set_size(0);
   46.35    compile_method();
   46.36 +  if (is_profiling() && _would_profile) {
   46.37 +    ciMethodData *md = method->method_data();
   46.38 +    assert (md != NULL, "Should have MDO");
   46.39 +    md->set_would_profile(_would_profile);
   46.40 +  }
   46.41  }
   46.42  
   46.43  Compilation::~Compilation() {
    47.1 --- a/src/share/vm/c1/c1_Compilation.hpp	Tue Sep 21 06:58:44 2010 -0700
    47.2 +++ b/src/share/vm/c1/c1_Compilation.hpp	Wed Sep 22 12:54:51 2010 -0400
    47.3 @@ -69,6 +69,7 @@
    47.4    bool               _has_exception_handlers;
    47.5    bool               _has_fpu_code;
    47.6    bool               _has_unsafe_access;
    47.7 +  bool               _would_profile;
    47.8    bool               _has_method_handle_invokes;  // True if this method has MethodHandle invokes.
    47.9    const char*        _bailout_msg;
   47.10    ExceptionInfoList* _exception_info_list;
   47.11 @@ -143,6 +144,7 @@
   47.12    void set_has_exception_handlers(bool f)        { _has_exception_handlers = f; }
   47.13    void set_has_fpu_code(bool f)                  { _has_fpu_code = f; }
   47.14    void set_has_unsafe_access(bool f)             { _has_unsafe_access = f; }
   47.15 +  void set_would_profile(bool f)                 { _would_profile = f; }
   47.16    // Add a set of exception handlers covering the given PC offset
   47.17    void add_exception_handlers_for_pco(int pco, XHandlers* exception_handlers);
   47.18    // Statistics gathering
   47.19 @@ -202,6 +204,30 @@
   47.20    void compile_only_this_scope(outputStream* st, IRScope* scope);
   47.21    void exclude_this_method();
   47.22  #endif // PRODUCT
   47.23 +
   47.24 +  bool is_profiling() {
   47.25 +    return env()->comp_level() == CompLevel_full_profile ||
   47.26 +           env()->comp_level() == CompLevel_limited_profile;
   47.27 +  }
   47.28 +  bool count_invocations() { return is_profiling(); }
   47.29 +  bool count_backedges()   { return is_profiling(); }
   47.30 +
   47.31 +  // Helpers for generation of profile information
   47.32 +  bool profile_branches() {
   47.33 +    return env()->comp_level() == CompLevel_full_profile &&
   47.34 +      C1UpdateMethodData && C1ProfileBranches;
   47.35 +  }
   47.36 +  bool profile_calls() {
   47.37 +    return env()->comp_level() == CompLevel_full_profile &&
   47.38 +      C1UpdateMethodData && C1ProfileCalls;
   47.39 +  }
   47.40 +  bool profile_inlined_calls() {
   47.41 +    return profile_calls() && C1ProfileInlinedCalls;
   47.42 +  }
   47.43 +  bool profile_checkcasts() {
   47.44 +    return env()->comp_level() == CompLevel_full_profile &&
   47.45 +      C1UpdateMethodData && C1ProfileCheckcasts;
   47.46 +  }
   47.47  };
   47.48  
   47.49  
    48.1 --- a/src/share/vm/c1/c1_Compiler.hpp	Tue Sep 21 06:58:44 2010 -0700
    48.2 +++ b/src/share/vm/c1/c1_Compiler.hpp	Wed Sep 22 12:54:51 2010 -0400
    48.3 @@ -39,9 +39,7 @@
    48.4    // Name of this compiler
    48.5    virtual const char* name()                     { return "C1"; }
    48.6  
    48.7 -#ifdef TIERED
    48.8 -  virtual bool is_c1() { return true; };
    48.9 -#endif // TIERED
   48.10 +  virtual bool is_c1()                           { return true; };
   48.11  
   48.12    BufferBlob* build_buffer_blob();
   48.13  
    49.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Sep 21 06:58:44 2010 -0700
    49.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Wed Sep 22 12:54:51 2010 -0400
    49.3 @@ -967,6 +967,17 @@
    49.4    StoreIndexed* result = new StoreIndexed(array, index, length, type, value, lock_stack());
    49.5    append(result);
    49.6    _memory->store_value(value);
    49.7 +
    49.8 +  if (type == T_OBJECT && is_profiling()) {
    49.9 +    // Note that we'd collect profile data in this method if we wanted it.
   49.10 +    compilation()->set_would_profile(true);
   49.11 +
   49.12 +    if (profile_checkcasts()) {
   49.13 +      result->set_profiled_method(method());
   49.14 +      result->set_profiled_bci(bci());
   49.15 +      result->set_should_profile(true);
   49.16 +    }
   49.17 +  }
   49.18  }
   49.19  
   49.20  
   49.21 @@ -1144,8 +1155,16 @@
   49.22  
   49.23  
   49.24  void GraphBuilder::_goto(int from_bci, int to_bci) {
   49.25 -  profile_bci(from_bci);
   49.26 -  append(new Goto(block_at(to_bci), to_bci <= from_bci));
   49.27 +  Goto *x = new Goto(block_at(to_bci), to_bci <= from_bci);
   49.28 +  if (is_profiling()) {
   49.29 +    compilation()->set_would_profile(true);
   49.30 +  }
   49.31 +  if (profile_branches()) {
   49.32 +    x->set_profiled_method(method());
   49.33 +    x->set_profiled_bci(bci());
   49.34 +    x->set_should_profile(true);
   49.35 +  }
   49.36 +  append(x);
   49.37  }
   49.38  
   49.39  
   49.40 @@ -1153,11 +1172,45 @@
   49.41    BlockBegin* tsux = block_at(stream()->get_dest());
   49.42    BlockBegin* fsux = block_at(stream()->next_bci());
   49.43    bool is_bb = tsux->bci() < stream()->cur_bci() || fsux->bci() < stream()->cur_bci();
   49.44 -  If* if_node = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb))->as_If();
   49.45 -  if (profile_branches() && (if_node != NULL)) {
   49.46 -    if_node->set_profiled_method(method());
   49.47 -    if_node->set_profiled_bci(bci());
   49.48 -    if_node->set_should_profile(true);
   49.49 +  Instruction *i = append(new If(x, cond, false, y, tsux, fsux, is_bb ? state_before : NULL, is_bb));
   49.50 +
   49.51 +  if (is_profiling()) {
   49.52 +    If* if_node = i->as_If();
   49.53 +    if (if_node != NULL) {
   49.54 +      // Note that we'd collect profile data in this method if we wanted it.
   49.55 +      compilation()->set_would_profile(true);
   49.56 +      // At level 2 we need the proper bci to count backedges
   49.57 +      if_node->set_profiled_bci(bci());
   49.58 +      if (profile_branches()) {
   49.59 +        // Successors can be rotated by the canonicalizer, check for this case.
   49.60 +        if_node->set_profiled_method(method());
   49.61 +        if_node->set_should_profile(true);
   49.62 +        if (if_node->tsux() == fsux) {
   49.63 +          if_node->set_swapped(true);
   49.64 +        }
   49.65 +      }
   49.66 +      return;
   49.67 +    }
   49.68 +
   49.69 +    // Check if this If was reduced to Goto.
   49.70 +    Goto *goto_node = i->as_Goto();
   49.71 +    if (goto_node != NULL) {
   49.72 +      compilation()->set_would_profile(true);
   49.73 +      if (profile_branches()) {
   49.74 +        goto_node->set_profiled_method(method());
   49.75 +        goto_node->set_profiled_bci(bci());
   49.76 +        goto_node->set_should_profile(true);
   49.77 +        // Find out which successor is used.
   49.78 +        if (goto_node->default_sux() == tsux) {
   49.79 +          goto_node->set_direction(Goto::taken);
   49.80 +        } else if (goto_node->default_sux() == fsux) {
   49.81 +          goto_node->set_direction(Goto::not_taken);
   49.82 +        } else {
   49.83 +          ShouldNotReachHere();
   49.84 +        }
   49.85 +      }
   49.86 +      return;
   49.87 +    }
   49.88    }
   49.89  }
   49.90  
   49.91 @@ -1698,8 +1751,7 @@
   49.92  
   49.93    if (recv != NULL &&
   49.94        (code == Bytecodes::_invokespecial ||
   49.95 -       !is_loaded || target->is_final() ||
   49.96 -       profile_calls())) {
   49.97 +       !is_loaded || target->is_final())) {
   49.98      // invokespecial always needs a NULL check.  invokevirtual where
   49.99      // the target is final or where it's not known that whether the
  49.100      // target is final requires a NULL check.  Otherwise normal
  49.101 @@ -1709,15 +1761,23 @@
  49.102      null_check(recv);
  49.103    }
  49.104  
  49.105 -  if (profile_calls()) {
  49.106 -    assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
  49.107 -    ciKlass* target_klass = NULL;
  49.108 -    if (cha_monomorphic_target != NULL) {
  49.109 -      target_klass = cha_monomorphic_target->holder();
  49.110 -    } else if (exact_target != NULL) {
  49.111 -      target_klass = exact_target->holder();
  49.112 +  if (is_profiling()) {
  49.113 +    if (recv != NULL && profile_calls()) {
  49.114 +      null_check(recv);
  49.115      }
  49.116 -    profile_call(recv, target_klass);
  49.117 +    // Note that we'd collect profile data in this method if we wanted it.
  49.118 +    compilation()->set_would_profile(true);
  49.119 +
  49.120 +    if (profile_calls()) {
  49.121 +      assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
  49.122 +      ciKlass* target_klass = NULL;
  49.123 +      if (cha_monomorphic_target != NULL) {
  49.124 +        target_klass = cha_monomorphic_target->holder();
  49.125 +      } else if (exact_target != NULL) {
  49.126 +        target_klass = exact_target->holder();
  49.127 +      }
  49.128 +      profile_call(recv, target_klass);
  49.129 +    }
  49.130    }
  49.131  
  49.132    Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);
  49.133 @@ -1782,10 +1842,16 @@
  49.134    CheckCast* c = new CheckCast(klass, apop(), state_before);
  49.135    apush(append_split(c));
  49.136    c->set_direct_compare(direct_compare(klass));
  49.137 -  if (profile_checkcasts()) {
  49.138 -    c->set_profiled_method(method());
  49.139 -    c->set_profiled_bci(bci());
  49.140 -    c->set_should_profile(true);
  49.141 +
  49.142 +  if (is_profiling()) {
  49.143 +    // Note that we'd collect profile data in this method if we wanted it.
  49.144 +    compilation()->set_would_profile(true);
  49.145 +
  49.146 +    if (profile_checkcasts()) {
  49.147 +      c->set_profiled_method(method());
  49.148 +      c->set_profiled_bci(bci());
  49.149 +      c->set_should_profile(true);
  49.150 +    }
  49.151    }
  49.152  }
  49.153  
  49.154 @@ -1797,6 +1863,17 @@
  49.155    InstanceOf* i = new InstanceOf(klass, apop(), state_before);
  49.156    ipush(append_split(i));
  49.157    i->set_direct_compare(direct_compare(klass));
  49.158 +
  49.159 +  if (is_profiling()) {
  49.160 +    // Note that we'd collect profile data in this method if we wanted it.
  49.161 +    compilation()->set_would_profile(true);
  49.162 +
  49.163 +    if (profile_checkcasts()) {
  49.164 +      i->set_profiled_method(method());
  49.165 +      i->set_profiled_bci(bci());
  49.166 +      i->set_should_profile(true);
  49.167 +    }
  49.168 +  }
  49.169  }
  49.170  
  49.171  
  49.172 @@ -1868,7 +1945,7 @@
  49.173  
  49.174  
  49.175  Instruction* GraphBuilder::append_with_bci(Instruction* instr, int bci) {
  49.176 -  Canonicalizer canon(instr, bci);
  49.177 +  Canonicalizer canon(compilation(), instr, bci);
  49.178    Instruction* i1 = canon.canonical();
  49.179    if (i1->bci() != -99) {
  49.180      // Canonicalizer returned an instruction which was already
  49.181 @@ -2651,18 +2728,6 @@
  49.182    h->set_depth_first_number(0);
  49.183  
  49.184    Value l = h;
  49.185 -  if (profile_branches()) {
  49.186 -    // Increment the invocation count on entry to the method.  We
  49.187 -    // can't use profile_invocation here because append isn't setup to
  49.188 -    // work properly at this point.  The instruction have to be
  49.189 -    // appended to the instruction stream by hand.
  49.190 -    Value m = new Constant(new ObjectConstant(compilation()->method()));
  49.191 -    h->set_next(m, 0);
  49.192 -    Value p = new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1);
  49.193 -    m->set_next(p, 0);
  49.194 -    l = p;
  49.195 -  }
  49.196 -
  49.197    BlockEnd* g = new Goto(entry, false);
  49.198    l->set_next(g, entry->bci());
  49.199    h->set_end(g);
  49.200 @@ -2688,10 +2753,10 @@
  49.201    // also necessary when profiling so that there's a single block that
  49.202    // can increment the interpreter_invocation_count.
  49.203    BlockBegin* new_header_block;
  49.204 -  if (std_entry->number_of_preds() == 0 && !profile_branches()) {
  49.205 +  if (std_entry->number_of_preds() > 0 || count_invocations() || count_backedges()) {
  49.206 +    new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
  49.207 +  } else {
  49.208      new_header_block = std_entry;
  49.209 -  } else {
  49.210 -    new_header_block = header_block(std_entry, BlockBegin::std_entry_flag, state);
  49.211    }
  49.212  
  49.213    // setup start block (root for the IR graph)
  49.214 @@ -3115,16 +3180,21 @@
  49.215  
  49.216    Values* args = state()->pop_arguments(callee->arg_size());
  49.217    ValueStack* locks = lock_stack();
  49.218 -  if (profile_calls()) {
  49.219 +
  49.220 +  if (is_profiling()) {
  49.221      // Don't profile in the special case where the root method
  49.222      // is the intrinsic
  49.223      if (callee != method()) {
  49.224 -      Value recv = NULL;
  49.225 -      if (has_receiver) {
  49.226 -        recv = args->at(0);
  49.227 -        null_check(recv);
  49.228 +      // Note that we'd collect profile data in this method if we wanted it.
  49.229 +      compilation()->set_would_profile(true);
  49.230 +      if (profile_calls()) {
  49.231 +        Value recv = NULL;
  49.232 +        if (has_receiver) {
  49.233 +          recv = args->at(0);
  49.234 +          null_check(recv);
  49.235 +        }
  49.236 +        profile_call(recv, NULL);
  49.237        }
  49.238 -      profile_call(recv, NULL);
  49.239      }
  49.240    }
  49.241  
  49.242 @@ -3296,7 +3366,9 @@
  49.243  
  49.244  bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
  49.245    assert(!callee->is_native(), "callee must not be native");
  49.246 -
  49.247 +  if (count_backedges() && callee->has_loops()) {
  49.248 +    INLINE_BAILOUT("too complex for tiered");
  49.249 +  }
  49.250    // first perform tests of things it's not possible to inline
  49.251    if (callee->has_exception_handlers() &&
  49.252        !InlineMethodsWithExceptionHandlers) INLINE_BAILOUT("callee has exception handlers");
  49.253 @@ -3365,12 +3437,19 @@
  49.254      null_check(recv);
  49.255    }
  49.256  
  49.257 -  if (profile_inlined_calls()) {
  49.258 -    profile_call(recv, holder_known ? callee->holder() : NULL);
  49.259 +  if (is_profiling()) {
  49.260 +    // Note that we'd collect profile data in this method if we wanted it.
  49.261 +    // this may be redundant here...
  49.262 +    compilation()->set_would_profile(true);
  49.263 +
  49.264 +    if (profile_calls()) {
  49.265 +      profile_call(recv, holder_known ? callee->holder() : NULL);
  49.266 +    }
  49.267 +    if (profile_inlined_calls()) {
  49.268 +      profile_invocation(callee, state(), 0);
  49.269 +    }
  49.270    }
  49.271  
  49.272 -  profile_invocation(callee);
  49.273 -
  49.274    // Introduce a new callee continuation point - if the callee has
  49.275    // more than one return instruction or the return does not allow
  49.276    // fall-through of control flow, all return instructions of the
  49.277 @@ -3755,30 +3834,10 @@
  49.278  }
  49.279  #endif // PRODUCT
  49.280  
  49.281 -
  49.282  void GraphBuilder::profile_call(Value recv, ciKlass* known_holder) {
  49.283    append(new ProfileCall(method(), bci(), recv, known_holder));
  49.284  }
  49.285  
  49.286 -
  49.287 -void GraphBuilder::profile_invocation(ciMethod* callee) {
  49.288 -  if (profile_calls()) {
  49.289 -    // increment the interpreter_invocation_count for the inlinee
  49.290 -    Value m = append(new Constant(new ObjectConstant(callee)));
  49.291 -    append(new ProfileCounter(m, methodOopDesc::interpreter_invocation_counter_offset_in_bytes(), 1));
  49.292 -  }
  49.293 +void GraphBuilder::profile_invocation(ciMethod* callee, ValueStack* state, int bci) {
  49.294 +  append(new ProfileInvoke(callee, state, bci));
  49.295  }
  49.296 -
  49.297 -
  49.298 -void GraphBuilder::profile_bci(int bci) {
  49.299 -  if (profile_branches()) {
  49.300 -    ciMethodData* md = method()->method_data();
  49.301 -    if (md == NULL) {
  49.302 -      BAILOUT("out of memory building methodDataOop");
  49.303 -    }
  49.304 -    ciProfileData* data = md->bci_to_data(bci);
  49.305 -    assert(data != NULL && data->is_JumpData(), "need JumpData for goto");
  49.306 -    Value mdo = append(new Constant(new ObjectConstant(md)));
  49.307 -    append(new ProfileCounter(mdo, md->byte_offset_of_slot(data, JumpData::taken_offset()), 1));
  49.308 -  }
  49.309 -}
    50.1 --- a/src/share/vm/c1/c1_GraphBuilder.hpp	Tue Sep 21 06:58:44 2010 -0700
    50.2 +++ b/src/share/vm/c1/c1_GraphBuilder.hpp	Wed Sep 22 12:54:51 2010 -0400
    50.3 @@ -1,5 +1,5 @@
    50.4  /*
    50.5 - * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
    50.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    50.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    50.8   *
    50.9   * This code is free software; you can redistribute it and/or modify it
   50.10 @@ -342,27 +342,17 @@
   50.11  
   50.12    NOT_PRODUCT(void print_inline_result(ciMethod* callee, bool res);)
   50.13  
   50.14 -  // methodDataOop profiling helpers
   50.15    void profile_call(Value recv, ciKlass* predicted_holder);
   50.16 -  void profile_invocation(ciMethod* method);
   50.17 -  void profile_bci(int bci);
   50.18 +  void profile_invocation(ciMethod* inlinee, ValueStack* state, int bci);
   50.19  
   50.20 -  // Helpers for generation of profile information
   50.21 -  bool profile_branches() {
   50.22 -    return _compilation->env()->comp_level() == CompLevel_fast_compile &&
   50.23 -      Tier1UpdateMethodData && Tier1ProfileBranches;
   50.24 -  }
   50.25 -  bool profile_calls() {
   50.26 -    return _compilation->env()->comp_level() == CompLevel_fast_compile &&
   50.27 -      Tier1UpdateMethodData && Tier1ProfileCalls;
   50.28 -  }
   50.29 -  bool profile_inlined_calls() {
   50.30 -    return profile_calls() && Tier1ProfileInlinedCalls;
   50.31 -  }
   50.32 -  bool profile_checkcasts() {
   50.33 -    return _compilation->env()->comp_level() == CompLevel_fast_compile &&
   50.34 -      Tier1UpdateMethodData && Tier1ProfileCheckcasts;
   50.35 -  }
   50.36 +  // Shortcuts to profiling control.
   50.37 +  bool is_profiling()          { return _compilation->is_profiling();          }
   50.38 +  bool count_invocations()     { return _compilation->count_invocations();     }
   50.39 +  bool count_backedges()       { return _compilation->count_backedges();       }
   50.40 +  bool profile_branches()      { return _compilation->profile_branches();      }
   50.41 +  bool profile_calls()         { return _compilation->profile_calls();         }
   50.42 +  bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
   50.43 +  bool profile_checkcasts()    { return _compilation->profile_checkcasts();    }
   50.44  
   50.45   public:
   50.46    NOT_PRODUCT(void print_stats();)
    51.1 --- a/src/share/vm/c1/c1_IR.cpp	Tue Sep 21 06:58:44 2010 -0700
    51.2 +++ b/src/share/vm/c1/c1_IR.cpp	Wed Sep 22 12:54:51 2010 -0400
    51.3 @@ -296,19 +296,21 @@
    51.4  
    51.5  void IR::optimize() {
    51.6    Optimizer opt(this);
    51.7 -  if (DoCEE) {
    51.8 -    opt.eliminate_conditional_expressions();
    51.9 +  if (!compilation()->profile_branches()) {
   51.10 +    if (DoCEE) {
   51.11 +      opt.eliminate_conditional_expressions();
   51.12  #ifndef PRODUCT
   51.13 -    if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); }
   51.14 -    if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); }
   51.15 +      if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after CEE"); print(true); }
   51.16 +      if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after CEE"); print(false); }
   51.17  #endif
   51.18 -  }
   51.19 -  if (EliminateBlocks) {
   51.20 -    opt.eliminate_blocks();
   51.21 +    }
   51.22 +    if (EliminateBlocks) {
   51.23 +      opt.eliminate_blocks();
   51.24  #ifndef PRODUCT
   51.25 -    if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); }
   51.26 -    if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); }
   51.27 +      if (PrintCFG || PrintCFG1) { tty->print_cr("CFG after block elimination"); print(true); }
   51.28 +      if (PrintIR  || PrintIR1 ) { tty->print_cr("IR after block elimination"); print(false); }
   51.29  #endif
   51.30 +    }
   51.31    }
   51.32    if (EliminateNullChecks) {
   51.33      opt.eliminate_null_checks();
   51.34 @@ -484,6 +486,8 @@
   51.35    BitMap2D   _loop_map;            // two-dimensional bit set: a bit is set if a block is contained in a loop
   51.36    BlockList  _work_list;           // temporary list (used in mark_loops and compute_order)
   51.37  
   51.38 +  Compilation* _compilation;
   51.39 +
   51.40    // accessors for _visited_blocks and _active_blocks
   51.41    void init_visited()                     { _active_blocks.clear(); _visited_blocks.clear(); }
   51.42    bool is_visited(BlockBegin* b) const    { return _visited_blocks.at(b->block_id()); }
   51.43 @@ -526,8 +530,9 @@
   51.44    NOT_PRODUCT(void print_blocks();)
   51.45    DEBUG_ONLY(void verify();)
   51.46  
   51.47 +  Compilation* compilation() const { return _compilation; }
   51.48   public:
   51.49 -  ComputeLinearScanOrder(BlockBegin* start_block);
   51.50 +  ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block);
   51.51  
   51.52    // accessors for final result
   51.53    BlockList* linear_scan_order() const    { return _linear_scan_order; }
   51.54 @@ -535,7 +540,7 @@
   51.55  };
   51.56  
   51.57  
   51.58 -ComputeLinearScanOrder::ComputeLinearScanOrder(BlockBegin* start_block) :
   51.59 +ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start_block) :
   51.60    _max_block_id(BlockBegin::number_of_blocks()),
   51.61    _num_blocks(0),
   51.62    _num_loops(0),
   51.63 @@ -547,13 +552,18 @@
   51.64    _loop_end_blocks(8),
   51.65    _work_list(8),
   51.66    _linear_scan_order(NULL), // initialized later with correct size
   51.67 -  _loop_map(0, 0)           // initialized later with correct size
   51.68 +  _loop_map(0, 0),          // initialized later with correct size
   51.69 +  _compilation(c)
   51.70  {
   51.71    TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order");
   51.72  
   51.73    init_visited();
   51.74    count_edges(start_block, NULL);
   51.75  
   51.76 +  if (compilation()->is_profiling()) {
   51.77 +    compilation()->method()->method_data()->set_compilation_stats(_num_loops, _num_blocks);
   51.78 +  }
   51.79 +
   51.80    if (_num_loops > 0) {
   51.81      mark_loops();
   51.82      clear_non_natural_loops(start_block);
   51.83 @@ -1130,7 +1140,7 @@
   51.84  void IR::compute_code() {
   51.85    assert(is_valid(), "IR must be valid");
   51.86  
   51.87 -  ComputeLinearScanOrder compute_order(start());
   51.88 +  ComputeLinearScanOrder compute_order(compilation(), start());
   51.89    _num_loops = compute_order.num_loops();
   51.90    _code = compute_order.linear_scan_order();
   51.91  }
    52.1 --- a/src/share/vm/c1/c1_Instruction.cpp	Tue Sep 21 06:58:44 2010 -0700
    52.2 +++ b/src/share/vm/c1/c1_Instruction.cpp	Wed Sep 22 12:54:51 2010 -0400
    52.3 @@ -740,9 +740,9 @@
    52.4  
    52.5  
    52.6  #ifndef PRODUCT
    52.7 -  #define TRACE_PHI(code) if (PrintPhiFunctions) { code; }
    52.8 +   #define TRACE_PHI(code) if (PrintPhiFunctions) { code; }
    52.9  #else
   52.10 -  #define TRACE_PHI(coce)
   52.11 +   #define TRACE_PHI(coce)
   52.12  #endif
   52.13  
   52.14  
   52.15 @@ -1011,3 +1011,7 @@
   52.16  void Throw::state_values_do(ValueVisitor* f) {
   52.17    BlockEnd::state_values_do(f);
   52.18  }
   52.19 +
   52.20 +void ProfileInvoke::state_values_do(ValueVisitor* f) {
   52.21 +  if (state() != NULL) state()->values_do(f);
   52.22 +}
    53.1 --- a/src/share/vm/c1/c1_Instruction.hpp	Tue Sep 21 06:58:44 2010 -0700
    53.2 +++ b/src/share/vm/c1/c1_Instruction.hpp	Wed Sep 22 12:54:51 2010 -0400
    53.3 @@ -98,7 +98,7 @@
    53.4  class         UnsafePrefetchRead;
    53.5  class         UnsafePrefetchWrite;
    53.6  class   ProfileCall;
    53.7 -class   ProfileCounter;
    53.8 +class   ProfileInvoke;
    53.9  
   53.10  // A Value is a reference to the instruction creating the value
   53.11  typedef Instruction* Value;
   53.12 @@ -195,7 +195,7 @@
   53.13    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) = 0;
   53.14    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) = 0;
   53.15    virtual void do_ProfileCall    (ProfileCall*     x) = 0;
   53.16 -  virtual void do_ProfileCounter (ProfileCounter*  x) = 0;
   53.17 +  virtual void do_ProfileInvoke  (ProfileInvoke*   x) = 0;
   53.18  };
   53.19  
   53.20  
   53.21 @@ -906,11 +906,13 @@
   53.22   private:
   53.23    Value       _value;
   53.24  
   53.25 +  ciMethod* _profiled_method;
   53.26 +  int       _profiled_bci;
   53.27   public:
   53.28    // creation
   53.29    StoreIndexed(Value array, Value index, Value length, BasicType elt_type, Value value, ValueStack* lock_stack)
   53.30    : AccessIndexed(array, index, length, elt_type, lock_stack)
   53.31 -  , _value(value)
   53.32 +  , _value(value), _profiled_method(NULL), _profiled_bci(0)
   53.33    {
   53.34      set_flag(NeedsWriteBarrierFlag, (as_ValueType(elt_type)->is_object()));
   53.35      set_flag(NeedsStoreCheckFlag, (as_ValueType(elt_type)->is_object()));
   53.36 @@ -923,7 +925,13 @@
   53.37    IRScope* scope() const;                        // the state's scope
   53.38    bool needs_write_barrier() const               { return check_flag(NeedsWriteBarrierFlag); }
   53.39    bool needs_store_check() const                 { return check_flag(NeedsStoreCheckFlag); }
   53.40 -
   53.41 +  // Helpers for methodDataOop profiling
   53.42 +  void set_should_profile(bool value)                { set_flag(ProfileMDOFlag, value); }
   53.43 +  void set_profiled_method(ciMethod* method)         { _profiled_method = method;   }
   53.44 +  void set_profiled_bci(int bci)                     { _profiled_bci = bci;         }
   53.45 +  bool      should_profile() const                   { return check_flag(ProfileMDOFlag); }
   53.46 +  ciMethod* profiled_method() const                  { return _profiled_method;     }
   53.47 +  int       profiled_bci() const                     { return _profiled_bci;        }
   53.48    // generic
   53.49    virtual void input_values_do(ValueVisitor* f)   { AccessIndexed::input_values_do(f); f->visit(&_value); }
   53.50  };
   53.51 @@ -1297,9 +1305,14 @@
   53.52    Value       _obj;
   53.53    ValueStack* _state_before;
   53.54  
   53.55 +  ciMethod* _profiled_method;
   53.56 +  int       _profiled_bci;
   53.57 +
   53.58   public:
   53.59    // creation
   53.60 -  TypeCheck(ciKlass* klass, Value obj, ValueType* type, ValueStack* state_before) : StateSplit(type), _klass(klass), _obj(obj), _state_before(state_before) {
   53.61 +  TypeCheck(ciKlass* klass, Value obj, ValueType* type, ValueStack* state_before)
   53.62 +  : StateSplit(type), _klass(klass), _obj(obj), _state_before(state_before),
   53.63 +    _profiled_method(NULL), _profiled_bci(0) {
   53.64      ASSERT_VALUES
   53.65      set_direct_compare(false);
   53.66    }
   53.67 @@ -1318,20 +1331,22 @@
   53.68    virtual bool can_trap() const                  { return true; }
   53.69    virtual void input_values_do(ValueVisitor* f)   { StateSplit::input_values_do(f); f->visit(&_obj); }
   53.70    virtual void other_values_do(ValueVisitor* f);
   53.71 +
   53.72 +  // Helpers for methodDataOop profiling
   53.73 +  void set_should_profile(bool value)                { set_flag(ProfileMDOFlag, value); }
   53.74 +  void set_profiled_method(ciMethod* method)         { _profiled_method = method;   }
   53.75 +  void set_profiled_bci(int bci)                     { _profiled_bci = bci;         }
   53.76 +  bool      should_profile() const                   { return check_flag(ProfileMDOFlag); }
   53.77 +  ciMethod* profiled_method() const                  { return _profiled_method;     }
   53.78 +  int       profiled_bci() const                     { return _profiled_bci;        }
   53.79  };
   53.80  
   53.81  
   53.82  LEAF(CheckCast, TypeCheck)
   53.83 - private:
   53.84 -  ciMethod* _profiled_method;
   53.85 -  int       _profiled_bci;
   53.86 -
   53.87   public:
   53.88    // creation
   53.89    CheckCast(ciKlass* klass, Value obj, ValueStack* state_before)
   53.90 -  : TypeCheck(klass, obj, objectType, state_before)
   53.91 -  , _profiled_method(NULL)
   53.92 -  , _profiled_bci(0) {}
   53.93 +  : TypeCheck(klass, obj, objectType, state_before) {}
   53.94  
   53.95    void set_incompatible_class_change_check() {
   53.96      set_flag(ThrowIncompatibleClassChangeErrorFlag, true);
   53.97 @@ -1340,17 +1355,8 @@
   53.98      return check_flag(ThrowIncompatibleClassChangeErrorFlag);
   53.99    }
  53.100  
  53.101 -  // Helpers for methodDataOop profiling
  53.102 -  void set_should_profile(bool value)                { set_flag(ProfileMDOFlag, value); }
  53.103 -  void set_profiled_method(ciMethod* method)         { _profiled_method = method;   }
  53.104 -  void set_profiled_bci(int bci)                     { _profiled_bci = bci;         }
  53.105 -  bool      should_profile() const                   { return check_flag(ProfileMDOFlag); }
  53.106 -  ciMethod* profiled_method() const                  { return _profiled_method;     }
  53.107 -  int       profiled_bci() const                     { return _profiled_bci;        }
  53.108 -
  53.109    ciType* declared_type() const;
  53.110    ciType* exact_type() const;
  53.111 -
  53.112  };
  53.113  
  53.114  
  53.115 @@ -1734,19 +1740,44 @@
  53.116  
  53.117  LEAF(Goto, BlockEnd)
  53.118   public:
  53.119 +  enum Direction {
  53.120 +    none,            // Just a regular goto
  53.121 +    taken, not_taken // Goto produced from If
  53.122 +  };
  53.123 + private:
  53.124 +  ciMethod*   _profiled_method;
  53.125 +  int         _profiled_bci;
  53.126 +  Direction   _direction;
  53.127 + public:
  53.128    // creation
  53.129 -  Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false) : BlockEnd(illegalType, state_before, is_safepoint) {
  53.130 +  Goto(BlockBegin* sux, ValueStack* state_before, bool is_safepoint = false)
  53.131 +    : BlockEnd(illegalType, state_before, is_safepoint)
  53.132 +    , _direction(none)
  53.133 +    , _profiled_method(NULL)
  53.134 +    , _profiled_bci(0) {
  53.135      BlockList* s = new BlockList(1);
  53.136      s->append(sux);
  53.137      set_sux(s);
  53.138    }
  53.139  
  53.140 -  Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint) {
  53.141 +  Goto(BlockBegin* sux, bool is_safepoint) : BlockEnd(illegalType, NULL, is_safepoint)
  53.142 +                                           , _direction(none)
  53.143 +                                           , _profiled_method(NULL)
  53.144 +                                           , _profiled_bci(0) {
  53.145      BlockList* s = new BlockList(1);
  53.146      s->append(sux);
  53.147      set_sux(s);
  53.148    }
  53.149  
  53.150 +  bool should_profile() const                    { return check_flag(ProfileMDOFlag); }
  53.151 +  ciMethod* profiled_method() const              { return _profiled_method; } // set only for profiled branches
  53.152 +  int profiled_bci() const                       { return _profiled_bci; }
  53.153 +  Direction direction() const                    { return _direction; }
  53.154 +
  53.155 +  void set_should_profile(bool value)            { set_flag(ProfileMDOFlag, value); }
  53.156 +  void set_profiled_method(ciMethod* method)     { _profiled_method = method; }
  53.157 +  void set_profiled_bci(int bci)                 { _profiled_bci = bci; }
  53.158 +  void set_direction(Direction d)                { _direction = d; }
  53.159  };
  53.160  
  53.161  
  53.162 @@ -1757,6 +1788,8 @@
  53.163    Value       _y;
  53.164    ciMethod*   _profiled_method;
  53.165    int         _profiled_bci; // Canonicalizer may alter bci of If node
  53.166 +  bool        _swapped;      // Is the order reversed with respect to the original If in the
  53.167 +                             // bytecode stream?
  53.168   public:
  53.169    // creation
  53.170    // unordered_is_true is valid for float/double compares only
  53.171 @@ -1767,6 +1800,7 @@
  53.172    , _y(y)
  53.173    , _profiled_method(NULL)
  53.174    , _profiled_bci(0)
  53.175 +  , _swapped(false)
  53.176    {
  53.177      ASSERT_VALUES
  53.178      set_flag(UnorderedIsTrueFlag, unordered_is_true);
  53.179 @@ -1788,7 +1822,8 @@
  53.180    BlockBegin* usux() const                       { return sux_for(unordered_is_true()); }
  53.181    bool should_profile() const                    { return check_flag(ProfileMDOFlag); }
  53.182    ciMethod* profiled_method() const              { return _profiled_method; } // set only for profiled branches
  53.183 -  int profiled_bci() const                       { return _profiled_bci; }    // set only for profiled branches
  53.184 +  int profiled_bci() const                       { return _profiled_bci; }    // set for profiled branches and tiered
  53.185 +  bool is_swapped() const                        { return _swapped; }
  53.186  
  53.187    // manipulation
  53.188    void swap_operands() {
  53.189 @@ -1807,7 +1842,7 @@
  53.190    void set_should_profile(bool value)             { set_flag(ProfileMDOFlag, value); }
  53.191    void set_profiled_method(ciMethod* method)      { _profiled_method = method; }
  53.192    void set_profiled_bci(int bci)                  { _profiled_bci = bci;       }
  53.193 -
  53.194 +  void set_swapped(bool value)                    { _swapped = value;         }
  53.195    // generic
  53.196    virtual void input_values_do(ValueVisitor* f)   { BlockEnd::input_values_do(f); f->visit(&_x); f->visit(&_y); }
  53.197  };
  53.198 @@ -2235,7 +2270,6 @@
  53.199    }
  53.200  };
  53.201  
  53.202 -
  53.203  LEAF(ProfileCall, Instruction)
  53.204   private:
  53.205    ciMethod* _method;
  53.206 @@ -2263,35 +2297,32 @@
  53.207    virtual void input_values_do(ValueVisitor* f)   { if (_recv != NULL) f->visit(&_recv); }
  53.208  };
  53.209  
  53.210 +// Use to trip invocation counter of an inlined method
  53.211  
  53.212 -//
  53.213 -// Simple node representing a counter update generally used for updating MDOs
  53.214 -//
  53.215 -LEAF(ProfileCounter, Instruction)
  53.216 +LEAF(ProfileInvoke, Instruction)
  53.217   private:
  53.218 -  Value     _mdo;
  53.219 -  int       _offset;
  53.220 -  int       _increment;
  53.221 +  ciMethod*   _inlinee;
  53.222 +  ValueStack* _state;
  53.223 +  int         _bci_of_invoke;
  53.224  
  53.225   public:
  53.226 -  ProfileCounter(Value mdo, int offset, int increment = 1)
  53.227 +  ProfileInvoke(ciMethod* inlinee,  ValueStack* state, int bci)
  53.228      : Instruction(voidType)
  53.229 -    , _mdo(mdo)
  53.230 -    , _offset(offset)
  53.231 -    , _increment(increment)
  53.232 +    , _inlinee(inlinee)
  53.233 +    , _bci_of_invoke(bci)
  53.234 +    , _state(state)
  53.235    {
  53.236 -    // The ProfileCounter has side-effects and must occur precisely where located
  53.237 +    // The ProfileInvoke has side-effects and must occur precisely where located QQQ???
  53.238      pin();
  53.239    }
  53.240  
  53.241 -  Value mdo()      { return _mdo; }
  53.242 -  int offset()     { return _offset; }
  53.243 -  int increment()  { return _increment; }
  53.244 -
  53.245 -  virtual void input_values_do(ValueVisitor* f)   { f->visit(&_mdo); }
  53.246 +  ciMethod* inlinee()      { return _inlinee; }
  53.247 +  ValueStack* state()      { return _state; }
  53.248 +  int bci_of_invoke()      { return _bci_of_invoke; }
  53.249 +  virtual void input_values_do(ValueVisitor*)   {}
  53.250 +  virtual void state_values_do(ValueVisitor*);
  53.251  };
  53.252  
  53.253 -
  53.254  class BlockPair: public CompilationResourceObj {
  53.255   private:
  53.256    BlockBegin* _from;
    54.1 --- a/src/share/vm/c1/c1_InstructionPrinter.cpp	Tue Sep 21 06:58:44 2010 -0700
    54.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.cpp	Wed Sep 22 12:54:51 2010 -0400
    54.3 @@ -1,5 +1,5 @@
    54.4  /*
    54.5 - * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
    54.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    54.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    54.8   *
    54.9   * This code is free software; you can redistribute it and/or modify it
   54.10 @@ -819,7 +819,6 @@
   54.11    output()->put(')');
   54.12  }
   54.13  
   54.14 -
   54.15  void InstructionPrinter::do_ProfileCall(ProfileCall* x) {
   54.16    output()->print("profile ");
   54.17    print_value(x->recv());
   54.18 @@ -831,20 +830,11 @@
   54.19    output()->put(')');
   54.20  }
   54.21  
   54.22 +void InstructionPrinter::do_ProfileInvoke(ProfileInvoke* x) {
   54.23 +  output()->print("profile_invoke ");
   54.24 +  output()->print(" %s.%s", x->inlinee()->holder()->name()->as_utf8(), x->inlinee()->name()->as_utf8());
   54.25 +  output()->put(')');
   54.26  
   54.27 -void InstructionPrinter::do_ProfileCounter(ProfileCounter* x) {
   54.28 -
   54.29 -  ObjectConstant* oc = x->mdo()->type()->as_ObjectConstant();
   54.30 -  if (oc != NULL && oc->value()->is_method() &&
   54.31 -      x->offset() == methodOopDesc::interpreter_invocation_counter_offset_in_bytes()) {
   54.32 -    print_value(x->mdo());
   54.33 -    output()->print(".interpreter_invocation_count += %d", x->increment());
   54.34 -  } else {
   54.35 -    output()->print("counter [");
   54.36 -    print_value(x->mdo());
   54.37 -    output()->print(" + %d] += %d", x->offset(), x->increment());
   54.38 -  }
   54.39  }
   54.40  
   54.41 -
   54.42  #endif // PRODUCT
    55.1 --- a/src/share/vm/c1/c1_InstructionPrinter.hpp	Tue Sep 21 06:58:44 2010 -0700
    55.2 +++ b/src/share/vm/c1/c1_InstructionPrinter.hpp	Wed Sep 22 12:54:51 2010 -0400
    55.3 @@ -1,5 +1,5 @@
    55.4  /*
    55.5 - * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
    55.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    55.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    55.8   *
    55.9   * This code is free software; you can redistribute it and/or modify it
   55.10 @@ -123,6 +123,6 @@
   55.11    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   55.12    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   55.13    virtual void do_ProfileCall    (ProfileCall*     x);
   55.14 -  virtual void do_ProfileCounter (ProfileCounter*  x);
   55.15 +  virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   55.16  };
   55.17  #endif // PRODUCT
    56.1 --- a/src/share/vm/c1/c1_LIR.cpp	Tue Sep 21 06:58:44 2010 -0700
    56.2 +++ b/src/share/vm/c1/c1_LIR.cpp	Wed Sep 22 12:54:51 2010 -0400
    56.3 @@ -345,9 +345,8 @@
    56.4  LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
    56.5                                   LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3,
    56.6                                   bool fast_check, CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch,
    56.7 -                                 CodeStub* stub,
    56.8 -                                 ciMethod* profiled_method,
    56.9 -                                 int profiled_bci)
   56.10 +                                 CodeStub* stub)
   56.11 +
   56.12    : LIR_Op(code, result, NULL)
   56.13    , _object(object)
   56.14    , _array(LIR_OprFact::illegalOpr)
   56.15 @@ -359,8 +358,10 @@
   56.16    , _stub(stub)
   56.17    , _info_for_patch(info_for_patch)
   56.18    , _info_for_exception(info_for_exception)
   56.19 -  , _profiled_method(profiled_method)
   56.20 -  , _profiled_bci(profiled_bci) {
   56.21 +  , _profiled_method(NULL)
   56.22 +  , _profiled_bci(-1)
   56.23 +  , _should_profile(false)
   56.24 +{
   56.25    if (code == lir_checkcast) {
   56.26      assert(info_for_exception != NULL, "checkcast throws exceptions");
   56.27    } else if (code == lir_instanceof) {
   56.28 @@ -372,7 +373,7 @@
   56.29  
   56.30  
   56.31  
   56.32 -LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception, ciMethod* profiled_method, int profiled_bci)
   56.33 +LIR_OpTypeCheck::LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception)
   56.34    : LIR_Op(code, LIR_OprFact::illegalOpr, NULL)
   56.35    , _object(object)
   56.36    , _array(array)
   56.37 @@ -384,8 +385,10 @@
   56.38    , _stub(NULL)
   56.39    , _info_for_patch(NULL)
   56.40    , _info_for_exception(info_for_exception)
   56.41 -  , _profiled_method(profiled_method)
   56.42 -  , _profiled_bci(profiled_bci) {
   56.43 +  , _profiled_method(NULL)
   56.44 +  , _profiled_bci(-1)
   56.45 +  , _should_profile(false)
   56.46 +{
   56.47    if (code == lir_store_check) {
   56.48      _stub = new ArrayStoreExceptionStub(info_for_exception);
   56.49      assert(info_for_exception != NULL, "store_check throws exceptions");
   56.50 @@ -495,6 +498,8 @@
   56.51      case lir_monaddr:        // input and result always valid, info always invalid
   56.52      case lir_null_check:     // input and info always valid, result always invalid
   56.53      case lir_move:           // input and result always valid, may have info
   56.54 +    case lir_pack64:         // input and result always valid
   56.55 +    case lir_unpack64:       // input and result always valid
   56.56      case lir_prefetchr:      // input always valid, result and info always invalid
   56.57      case lir_prefetchw:      // input always valid, result and info always invalid
   56.58      {
   56.59 @@ -903,7 +908,6 @@
   56.60        assert(opProfileCall->_tmp1->is_valid(), "used");  do_temp(opProfileCall->_tmp1);
   56.61        break;
   56.62      }
   56.63 -
   56.64    default:
   56.65      ShouldNotReachHere();
   56.66    }
   56.67 @@ -1041,12 +1045,10 @@
   56.68    masm->emit_delay(this);
   56.69  }
   56.70  
   56.71 -
   56.72  void LIR_OpProfileCall::emit_code(LIR_Assembler* masm) {
   56.73    masm->emit_profile_call(this);
   56.74  }
   56.75  
   56.76 -
   56.77  // LIR_List
   56.78  LIR_List::LIR_List(Compilation* compilation, BlockBegin* block)
   56.79    : _operations(8)
   56.80 @@ -1364,19 +1366,29 @@
   56.81                            LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
   56.82                            CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
   56.83                            ciMethod* profiled_method, int profiled_bci) {
   56.84 -  append(new LIR_OpTypeCheck(lir_checkcast, result, object, klass,
   56.85 -                             tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub,
   56.86 -                             profiled_method, profiled_bci));
   56.87 +  LIR_OpTypeCheck* c = new LIR_OpTypeCheck(lir_checkcast, result, object, klass,
   56.88 +                                           tmp1, tmp2, tmp3, fast_check, info_for_exception, info_for_patch, stub);
   56.89 +  if (profiled_method != NULL) {
   56.90 +    c->set_profiled_method(profiled_method);
   56.91 +    c->set_profiled_bci(profiled_bci);
   56.92 +    c->set_should_profile(true);
   56.93 +  }
   56.94 +  append(c);
   56.95  }
   56.96  
   56.97 -
   56.98 -void LIR_List::instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch) {
   56.99 -  append(new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL, NULL, 0));
  56.100 +void LIR_List::instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci) {
  56.101 +  LIR_OpTypeCheck* c = new LIR_OpTypeCheck(lir_instanceof, result, object, klass, tmp1, tmp2, tmp3, fast_check, NULL, info_for_patch, NULL);
  56.102 +  if (profiled_method != NULL) {
  56.103 +    c->set_profiled_method(profiled_method);
  56.104 +    c->set_profiled_bci(profiled_bci);
  56.105 +    c->set_should_profile(true);
  56.106 +  }
  56.107 +  append(c);
  56.108  }
  56.109  
  56.110  
  56.111  void LIR_List::store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception) {
  56.112 -  append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception, NULL, 0));
  56.113 +  append(new LIR_OpTypeCheck(lir_store_check, object, array, tmp1, tmp2, tmp3, info_for_exception));
  56.114  }
  56.115  
  56.116  
  56.117 @@ -1611,6 +1623,8 @@
  56.118       case lir_convert:               s = "convert";       break;
  56.119       case lir_alloc_object:          s = "alloc_obj";     break;
  56.120       case lir_monaddr:               s = "mon_addr";      break;
  56.121 +     case lir_pack64:                s = "pack64";        break;
  56.122 +     case lir_unpack64:              s = "unpack64";      break;
  56.123       // LIR_Op2
  56.124       case lir_cmp:                   s = "cmp";           break;
  56.125       case lir_cmp_l2i:               s = "cmp_l2i";       break;
  56.126 @@ -1664,7 +1678,6 @@
  56.127       case lir_cas_int:               s = "cas_int";      break;
  56.128       // LIR_OpProfileCall
  56.129       case lir_profile_call:          s = "profile_call";  break;
  56.130 -
  56.131       case lir_none:                  ShouldNotReachHere();break;
  56.132      default:                         s = "illegal_op";    break;
  56.133    }
  56.134 @@ -1922,7 +1935,6 @@
  56.135    tmp1()->print(out);          out->print(" ");
  56.136  }
  56.137  
  56.138 -
  56.139  #endif // PRODUCT
  56.140  
  56.141  // Implementation of LIR_InsertionBuffer
    57.1 --- a/src/share/vm/c1/c1_LIR.hpp	Tue Sep 21 06:58:44 2010 -0700
    57.2 +++ b/src/share/vm/c1/c1_LIR.hpp	Wed Sep 22 12:54:51 2010 -0400
    57.3 @@ -849,6 +849,8 @@
    57.4        , lir_monaddr
    57.5        , lir_roundfp
    57.6        , lir_safepoint
    57.7 +      , lir_pack64
    57.8 +      , lir_unpack64
    57.9        , lir_unwind
   57.10    , end_op1
   57.11    , begin_op2
   57.12 @@ -1464,18 +1466,16 @@
   57.13    CodeEmitInfo* _info_for_patch;
   57.14    CodeEmitInfo* _info_for_exception;
   57.15    CodeStub*     _stub;
   57.16 -  // Helpers for Tier1UpdateMethodData
   57.17    ciMethod*     _profiled_method;
   57.18    int           _profiled_bci;
   57.19 +  bool          _should_profile;
   57.20  
   57.21  public:
   57.22    LIR_OpTypeCheck(LIR_Code code, LIR_Opr result, LIR_Opr object, ciKlass* klass,
   57.23                    LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
   57.24 -                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
   57.25 -                  ciMethod* profiled_method, int profiled_bci);
   57.26 +                  CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub);
   57.27    LIR_OpTypeCheck(LIR_Code code, LIR_Opr object, LIR_Opr array,
   57.28 -                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception,
   57.29 -                  ciMethod* profiled_method, int profiled_bci);
   57.30 +                  LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
   57.31  
   57.32    LIR_Opr object() const                         { return _object;         }
   57.33    LIR_Opr array() const                          { assert(code() == lir_store_check, "not valid"); return _array;         }
   57.34 @@ -1489,8 +1489,12 @@
   57.35    CodeStub* stub() const                         { return _stub;           }
   57.36  
   57.37    // methodDataOop profiling
   57.38 -  ciMethod* profiled_method()                    { return _profiled_method; }
   57.39 -  int       profiled_bci()                       { return _profiled_bci; }
   57.40 +  void set_profiled_method(ciMethod *method)     { _profiled_method = method; }
   57.41 +  void set_profiled_bci(int bci)                 { _profiled_bci = bci;       }
   57.42 +  void set_should_profile(bool b)                { _should_profile = b;       }
   57.43 +  ciMethod* profiled_method() const              { return _profiled_method;   }
   57.44 +  int       profiled_bci() const                 { return _profiled_bci;      }
   57.45 +  bool      should_profile() const               { return _should_profile;    }
   57.46  
   57.47    virtual void emit_code(LIR_Assembler* masm);
   57.48    virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
   57.49 @@ -1771,7 +1775,6 @@
   57.50    virtual void print_instr(outputStream* out) const PRODUCT_RETURN;
   57.51  };
   57.52  
   57.53 -
   57.54  class LIR_InsertionBuffer;
   57.55  
   57.56  //--------------------------------LIR_List---------------------------------------------------
   57.57 @@ -1835,6 +1838,7 @@
   57.58    //---------- mutators ---------------
   57.59    void insert_before(int i, LIR_List* op_list)   { _operations.insert_before(i, op_list->instructions_list()); }
   57.60    void insert_before(int i, LIR_Op* op)          { _operations.insert_before(i, op); }
   57.61 +  void remove_at(int i)                          { _operations.remove_at(i); }
   57.62  
   57.63    //---------- printing -------------
   57.64    void print_instructions() PRODUCT_RETURN;
   57.65 @@ -1908,6 +1912,9 @@
   57.66    void logical_or  (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_or,   left, right, dst)); }
   57.67    void logical_xor (LIR_Opr left, LIR_Opr right, LIR_Opr dst) { append(new LIR_Op2(lir_logic_xor,  left, right, dst)); }
   57.68  
   57.69 +  void   pack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_pack64,   src, dst, T_LONG, lir_patch_none, NULL)); }
   57.70 +  void unpack64(LIR_Opr src, LIR_Opr dst) { append(new LIR_Op1(lir_unpack64, src, dst, T_LONG, lir_patch_none, NULL)); }
   57.71 +
   57.72    void null_check(LIR_Opr opr, CodeEmitInfo* info)         { append(new LIR_Op1(lir_null_check, opr, info)); }
   57.73    void throw_exception(LIR_Opr exceptionPC, LIR_Opr exceptionOop, CodeEmitInfo* info) {
   57.74      append(new LIR_Op2(lir_throw, exceptionPC, exceptionOop, LIR_OprFact::illegalOpr, info));
   57.75 @@ -2034,15 +2041,17 @@
   57.76  
   57.77    void fpop_raw()                                { append(new LIR_Op0(lir_fpop_raw)); }
   57.78  
   57.79 +  void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
   57.80 +  void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
   57.81 +
   57.82    void checkcast (LIR_Opr result, LIR_Opr object, ciKlass* klass,
   57.83                    LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check,
   57.84                    CodeEmitInfo* info_for_exception, CodeEmitInfo* info_for_patch, CodeStub* stub,
   57.85                    ciMethod* profiled_method, int profiled_bci);
   57.86 -  void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch);
   57.87 -  void store_check(LIR_Opr object, LIR_Opr array, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, CodeEmitInfo* info_for_exception);
   57.88 -
   57.89    // methodDataOop profiling
   57.90 -  void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) { append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass)); }
   57.91 +  void profile_call(ciMethod* method, int bci, LIR_Opr mdo, LIR_Opr recv, LIR_Opr t1, ciKlass* cha_klass) {
   57.92 +    append(new LIR_OpProfileCall(lir_profile_call, method, bci, mdo, recv, t1, cha_klass));
   57.93 +  }
   57.94  };
   57.95  
   57.96  void print_LIR(BlockList* blocks);
    58.1 --- a/src/share/vm/c1/c1_LIRAssembler.cpp	Tue Sep 21 06:58:44 2010 -0700
    58.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp	Wed Sep 22 12:54:51 2010 -0400
    58.3 @@ -548,6 +548,16 @@
    58.4        monitor_address(op->in_opr()->as_constant_ptr()->as_jint(), op->result_opr());
    58.5        break;
    58.6  
    58.7 +#ifdef SPARC
    58.8 +    case lir_pack64:
    58.9 +      pack64(op->in_opr(), op->result_opr());
   58.10 +      break;
   58.11 +
   58.12 +    case lir_unpack64:
   58.13 +      unpack64(op->in_opr(), op->result_opr());
   58.14 +      break;
   58.15 +#endif
   58.16 +
   58.17      case lir_unwind:
   58.18        unwind_op(op->in_opr());
   58.19        break;
    59.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp	Tue Sep 21 06:58:44 2010 -0700
    59.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp	Wed Sep 22 12:54:51 2010 -0400
    59.3 @@ -187,6 +187,7 @@
    59.4    void emit_alloc_obj(LIR_OpAllocObj* op);
    59.5    void emit_alloc_array(LIR_OpAllocArray* op);
    59.6    void emit_opTypeCheck(LIR_OpTypeCheck* op);
    59.7 +  void emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, Label* failure, Label* obj_is_null);
    59.8    void emit_compare_and_swap(LIR_OpCompareAndSwap* op);
    59.9    void emit_lock(LIR_OpLock* op);
   59.10    void emit_call(LIR_OpJavaCall* op);
    60.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Sep 21 06:58:44 2010 -0700
    60.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Wed Sep 22 12:54:51 2010 -0400
    60.3 @@ -480,16 +480,6 @@
    60.4  }
    60.5  
    60.6  
    60.7 -// increment a counter returning the incremented value
    60.8 -LIR_Opr LIRGenerator::increment_and_return_counter(LIR_Opr base, int offset, int increment) {
    60.9 -  LIR_Address* counter = new LIR_Address(base, offset, T_INT);
   60.10 -  LIR_Opr result = new_register(T_INT);
   60.11 -  __ load(counter, result);
   60.12 -  __ add(result, LIR_OprFact::intConst(increment), result);
   60.13 -  __ store(result, counter);
   60.14 -  return result;
   60.15 -}
   60.16 -
   60.17  
   60.18  void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
   60.19    LIR_Opr result_op = result;
   60.20 @@ -821,7 +811,6 @@
   60.21    return tmp;
   60.22  }
   60.23  
   60.24 -
   60.25  void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
   60.26    if (if_instr->should_profile()) {
   60.27      ciMethod* method = if_instr->profiled_method();
   60.28 @@ -836,24 +825,32 @@
   60.29      assert(data->is_BranchData(), "need BranchData for two-way branches");
   60.30      int taken_count_offset     = md->byte_offset_of_slot(data, BranchData::taken_offset());
   60.31      int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
   60.32 +    if (if_instr->is_swapped()) {
   60.33 +      int t = taken_count_offset;
   60.34 +      taken_count_offset = not_taken_count_offset;
   60.35 +      not_taken_count_offset = t;
   60.36 +    }
   60.37 +
   60.38      LIR_Opr md_reg = new_register(T_OBJECT);
   60.39 -    __ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
   60.40 -    LIR_Opr data_offset_reg = new_register(T_INT);
   60.41 +    __ oop2reg(md->constant_encoding(), md_reg);
   60.42 +
   60.43 +    LIR_Opr data_offset_reg = new_pointer_register();
   60.44      __ cmove(lir_cond(cond),
   60.45 -             LIR_OprFact::intConst(taken_count_offset),
   60.46 -             LIR_OprFact::intConst(not_taken_count_offset),
   60.47 +             LIR_OprFact::intptrConst(taken_count_offset),
   60.48 +             LIR_OprFact::intptrConst(not_taken_count_offset),
   60.49               data_offset_reg);
   60.50 -    LIR_Opr data_reg = new_register(T_INT);
   60.51 -    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT);
   60.52 +
   60.53 +    // MDO cells are intptr_t, so the data_reg width is arch-dependent.
   60.54 +    LIR_Opr data_reg = new_pointer_register();
   60.55 +    LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
   60.56      __ move(LIR_OprFact::address(data_addr), data_reg);
   60.57 +    // Use leal instead of add to avoid destroying condition codes on x86
   60.58      LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
   60.59 -    // Use leal instead of add to avoid destroying condition codes on x86
   60.60      __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
   60.61      __ move(data_reg, LIR_OprFact::address(data_addr));
   60.62    }
   60.63  }
   60.64  
   60.65 -
   60.66  // Phi technique:
   60.67  // This is about passing live values from one basic block to the other.
   60.68  // In code generated with Java it is rather rare that more than one
   60.69 @@ -1305,8 +1302,6 @@
   60.70    LIR_Opr flag_val = new_register(T_INT);
   60.71    __ load(mark_active_flag_addr, flag_val);
   60.72  
   60.73 -  LabelObj* start_store = new LabelObj();
   60.74 -
   60.75    LIR_PatchCode pre_val_patch_code =
   60.76      patch ? lir_patch_normal : lir_patch_none;
   60.77  
   60.78 @@ -1757,7 +1752,7 @@
   60.79  
   60.80  #ifndef PRODUCT
   60.81    if (PrintC1Statistics) {
   60.82 -    increment_counter(Runtime1::throw_count_address());
   60.83 +    increment_counter(Runtime1::throw_count_address(), T_INT);
   60.84    }
   60.85  #endif
   60.86  
   60.87 @@ -2191,12 +2186,41 @@
   60.88      ValueStack* state = x->state_before() ? x->state_before() : x->state();
   60.89  
   60.90      // increment backedge counter if needed
   60.91 -    increment_backedge_counter(state_for(x, state));
   60.92 -
   60.93 +    CodeEmitInfo* info = state_for(x, state);
   60.94 +    increment_backedge_counter(info, info->bci());
   60.95      CodeEmitInfo* safepoint_info = state_for(x, state);
   60.96      __ safepoint(safepoint_poll_register(), safepoint_info);
   60.97    }
   60.98  
   60.99 +  // Gotos can be folded Ifs, handle this case.
  60.100 +  if (x->should_profile()) {
  60.101 +    ciMethod* method = x->profiled_method();
  60.102 +    assert(method != NULL, "method should be set if branch is profiled");
  60.103 +    ciMethodData* md = method->method_data();
  60.104 +    if (md == NULL) {
  60.105 +      bailout("out of memory building methodDataOop");
  60.106 +      return;
  60.107 +    }
  60.108 +    ciProfileData* data = md->bci_to_data(x->profiled_bci());
  60.109 +    assert(data != NULL, "must have profiling data");
  60.110 +    int offset;
  60.111 +    if (x->direction() == Goto::taken) {
  60.112 +      assert(data->is_BranchData(), "need BranchData for two-way branches");
  60.113 +      offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
  60.114 +    } else if (x->direction() == Goto::not_taken) {
  60.115 +      assert(data->is_BranchData(), "need BranchData for two-way branches");
  60.116 +      offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
  60.117 +    } else {
  60.118 +      assert(data->is_JumpData(), "need JumpData for branches");
  60.119 +      offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
  60.120 +    }
  60.121 +    LIR_Opr md_reg = new_register(T_OBJECT);
  60.122 +    __ oop2reg(md->constant_encoding(), md_reg);
  60.123 +
  60.124 +    increment_counter(new LIR_Address(md_reg, offset,
  60.125 +                                      NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
  60.126 +  }
  60.127 +
  60.128    // emit phi-instruction move after safepoint since this simplifies
  60.129    // describing the state as the safepoint.
  60.130    move_to_phi(x->state());
  60.131 @@ -2279,7 +2303,10 @@
  60.132    }
  60.133  
  60.134    // increment invocation counters if needed
  60.135 -  increment_invocation_counter(new CodeEmitInfo(0, scope()->start()->state(), NULL));
  60.136 +  if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
  60.137 +    CodeEmitInfo* info = new CodeEmitInfo(InvocationEntryBci, scope()->start()->state(), NULL);
  60.138 +    increment_invocation_counter(info);
  60.139 +  }
  60.140  
  60.141    // all blocks with a successor must end with an unconditional jump
  60.142    // to the successor even if they are consecutive
  60.143 @@ -2613,12 +2640,12 @@
  60.144    }
  60.145  }
  60.146  
  60.147 -
  60.148  void LIRGenerator::do_ProfileCall(ProfileCall* x) {
  60.149    // Need recv in a temporary register so it interferes with the other temporaries
  60.150    LIR_Opr recv = LIR_OprFact::illegalOpr;
  60.151    LIR_Opr mdo = new_register(T_OBJECT);
  60.152 -  LIR_Opr tmp = new_register(T_INT);
  60.153 +  // tmp is used to hold the counters on SPARC
  60.154 +  LIR_Opr tmp = new_pointer_register();
  60.155    if (x->recv() != NULL) {
  60.156      LIRItem value(x->recv(), this);
  60.157      value.load_item();
  60.158 @@ -2628,14 +2655,69 @@
  60.159    __ profile_call(x->method(), x->bci_of_invoke(), mdo, recv, tmp, x->known_holder());
  60.160  }
  60.161  
  60.162 -
  60.163 -void LIRGenerator::do_ProfileCounter(ProfileCounter* x) {
  60.164 -  LIRItem mdo(x->mdo(), this);
  60.165 -  mdo.load_item();
  60.166 -
  60.167 -  increment_counter(new LIR_Address(mdo.result(), x->offset(), T_INT), x->increment());
  60.168 +void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
  60.169 +  // We can safely ignore accessors here, since c2 will inline them anyway,
  60.170 +  // accessors are also always mature.
  60.171 +  if (!x->inlinee()->is_accessor()) {
  60.172 +    CodeEmitInfo* info = state_for(x, x->state(), true);
  60.173 +    // Increment invocation counter, don't notify the runtime, because we don't inline loops,
  60.174 +    increment_event_counter_impl(info, x->inlinee(), 0, InvocationEntryBci, false, false);
  60.175 +  }
  60.176  }
  60.177  
  60.178 +void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
  60.179 +  int freq_log;
  60.180 +  int level = compilation()->env()->comp_level();
  60.181 +  if (level == CompLevel_limited_profile) {
  60.182 +    freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
  60.183 +  } else if (level == CompLevel_full_profile) {
  60.184 +    freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
  60.185 +  } else {
  60.186 +    ShouldNotReachHere();
  60.187 +  }
  60.188 +  // Increment the appropriate invocation/backedge counter and notify the runtime.
  60.189 +  increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
  60.190 +}
  60.191 +
  60.192 +void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
  60.193 +                                                ciMethod *method, int frequency,
  60.194 +                                                int bci, bool backedge, bool notify) {
  60.195 +  assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
  60.196 +  int level = _compilation->env()->comp_level();
  60.197 +  assert(level > CompLevel_simple, "Shouldn't be here");
  60.198 +
  60.199 +  int offset = -1;
  60.200 +  LIR_Opr counter_holder = new_register(T_OBJECT);
  60.201 +  LIR_Opr meth;
  60.202 +  if (level == CompLevel_limited_profile) {
  60.203 +    offset = in_bytes(backedge ? methodOopDesc::backedge_counter_offset() :
  60.204 +                                 methodOopDesc::invocation_counter_offset());
  60.205 +    __ oop2reg(method->constant_encoding(), counter_holder);
  60.206 +    meth = counter_holder;
  60.207 +  } else if (level == CompLevel_full_profile) {
  60.208 +    offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
  60.209 +                                 methodDataOopDesc::invocation_counter_offset());
  60.210 +    __ oop2reg(method->method_data()->constant_encoding(), counter_holder);
  60.211 +    meth = new_register(T_OBJECT);
  60.212 +    __ oop2reg(method->constant_encoding(), meth);
  60.213 +  } else {
  60.214 +    ShouldNotReachHere();
  60.215 +  }
  60.216 +  LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
  60.217 +  LIR_Opr result = new_register(T_INT);
  60.218 +  __ load(counter, result);
  60.219 +  __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
  60.220 +  __ store(result, counter);
  60.221 +  if (notify) {
  60.222 +    LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
  60.223 +    __ logical_and(result, mask, result);
  60.224 +    __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
  60.225 +    // The bci for info can point to cmp for if's we want the if bci
  60.226 +    CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
  60.227 +    __ branch(lir_cond_equal, T_INT, overflow);
  60.228 +    __ branch_destination(overflow->continuation());
  60.229 +  }
  60.230 +}
  60.231  
  60.232  LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
  60.233    LIRItemList args(1);
  60.234 @@ -2748,28 +2830,3 @@
  60.235    return result;
  60.236  }
  60.237  
  60.238 -
  60.239 -
  60.240 -void LIRGenerator::increment_invocation_counter(CodeEmitInfo* info, bool backedge) {
  60.241 -#ifdef TIERED
  60.242 -  if (_compilation->env()->comp_level() == CompLevel_fast_compile &&
  60.243 -      (method()->code_size() >= Tier1BytecodeLimit || backedge)) {
  60.244 -    int limit = InvocationCounter::Tier1InvocationLimit;
  60.245 -    int offset = in_bytes(methodOopDesc::invocation_counter_offset() +
  60.246 -                          InvocationCounter::counter_offset());
  60.247 -    if (backedge) {
  60.248 -      limit = InvocationCounter::Tier1BackEdgeLimit;
  60.249 -      offset = in_bytes(methodOopDesc::backedge_counter_offset() +
  60.250 -                        InvocationCounter::counter_offset());
  60.251 -    }
  60.252 -
  60.253 -    LIR_Opr meth = new_register(T_OBJECT);
  60.254 -    __ oop2reg(method()->constant_encoding(), meth);
  60.255 -    LIR_Opr result = increment_and_return_counter(meth, offset, InvocationCounter::count_increment);
  60.256 -    __ cmp(lir_cond_aboveEqual, result, LIR_OprFact::intConst(limit));
  60.257 -    CodeStub* overflow = new CounterOverflowStub(info, info->bci());
  60.258 -    __ branch(lir_cond_aboveEqual, T_INT, overflow);
  60.259 -    __ branch_destination(overflow->continuation());
  60.260 -  }
  60.261 -#endif
  60.262 -}
    61.1 --- a/src/share/vm/c1/c1_LIRGenerator.hpp	Tue Sep 21 06:58:44 2010 -0700
    61.2 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp	Wed Sep 22 12:54:51 2010 -0400
    61.3 @@ -1,5 +1,5 @@
    61.4  /*
    61.5 - * Copyright (c) 2005, 2006, Oracle and/or its affiliates. All rights reserved.
    61.6 + * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
    61.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    61.8   *
    61.9   * This code is free software; you can redistribute it and/or modify it
   61.10 @@ -196,6 +196,9 @@
   61.11    LIR_Opr load_constant(Constant* x);
   61.12    LIR_Opr load_constant(LIR_Const* constant);
   61.13  
   61.14 +  // Given an immediate value, return an operand usable in logical ops.
   61.15 +  LIR_Opr load_immediate(int x, BasicType type);
   61.16 +
   61.17    void  set_result(Value x, LIR_Opr opr)           {
   61.18      assert(opr->is_valid(), "must set to valid value");
   61.19      assert(x->operand()->is_illegal(), "operand should never change");
   61.20 @@ -213,8 +216,6 @@
   61.21    LIR_Opr round_item(LIR_Opr opr);
   61.22    LIR_Opr force_to_spill(LIR_Opr value, BasicType t);
   61.23  
   61.24 -  void  profile_branch(If* if_instr, If::Condition cond);
   61.25 -
   61.26    PhiResolverState& resolver_state() { return _resolver_state; }
   61.27  
   61.28    void  move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val);
   61.29 @@ -285,12 +286,9 @@
   61.30  
   61.31    void arithmetic_call_op (Bytecodes::Code code, LIR_Opr result, LIR_OprList* args);
   61.32  
   61.33 -  void increment_counter(address counter, int step = 1);
   61.34 +  void increment_counter(address counter, BasicType type, int step = 1);
   61.35    void increment_counter(LIR_Address* addr, int step = 1);
   61.36  
   61.37 -  // increment a counter returning the incremented value
   61.38 -  LIR_Opr increment_and_return_counter(LIR_Opr base, int offset, int increment);
   61.39 -
   61.40    // is_strictfp is only needed for mul and div (and only generates different code on i486)
   61.41    void arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp, CodeEmitInfo* info = NULL);
   61.42    // machine dependent.  returns true if it emitted code for the multiply
   61.43 @@ -347,9 +345,21 @@
   61.44    bool can_store_as_constant(Value i, BasicType type) const;
   61.45  
   61.46    LIR_Opr safepoint_poll_register();
   61.47 -  void increment_invocation_counter(CodeEmitInfo* info, bool backedge = false);
   61.48 -  void increment_backedge_counter(CodeEmitInfo* info) {
   61.49 -    increment_invocation_counter(info, true);
   61.50 +
   61.51 +  void profile_branch(If* if_instr, If::Condition cond);
   61.52 +  void increment_event_counter_impl(CodeEmitInfo* info,
   61.53 +                                    ciMethod *method, int frequency,
   61.54 +                                    int bci, bool backedge, bool notify);
   61.55 +  void increment_event_counter(CodeEmitInfo* info, int bci, bool backedge);
   61.56 +  void increment_invocation_counter(CodeEmitInfo *info) {
   61.57 +    if (compilation()->count_invocations()) {
   61.58 +      increment_event_counter(info, InvocationEntryBci, false);
   61.59 +    }
   61.60 +  }
   61.61 +  void increment_backedge_counter(CodeEmitInfo* info, int bci) {
   61.62 +    if (compilation()->count_backedges()) {
   61.63 +      increment_event_counter(info, bci, true);
   61.64 +    }
   61.65    }
   61.66  
   61.67    CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false);
   61.68 @@ -503,7 +513,7 @@
   61.69    virtual void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   61.70    virtual void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   61.71    virtual void do_ProfileCall    (ProfileCall*     x);
   61.72 -  virtual void do_ProfileCounter (ProfileCounter*  x);
   61.73 +  virtual void do_ProfileInvoke  (ProfileInvoke*   x);
   61.74  };
   61.75  
   61.76  
    62.1 --- a/src/share/vm/c1/c1_Optimizer.cpp	Tue Sep 21 06:58:44 2010 -0700
    62.2 +++ b/src/share/vm/c1/c1_Optimizer.cpp	Wed Sep 22 12:54:51 2010 -0400
    62.3 @@ -1,5 +1,5 @@
    62.4  /*
    62.5 - * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved.
    62.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    62.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    62.8   *
    62.9   * This code is free software; you can redistribute it and/or modify it
   62.10 @@ -430,7 +430,7 @@
   62.11    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x);
   62.12    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x);
   62.13    void do_ProfileCall    (ProfileCall*     x);
   62.14 -  void do_ProfileCounter (ProfileCounter*  x);
   62.15 +  void do_ProfileInvoke  (ProfileInvoke*   x);
   62.16  };
   62.17  
   62.18  
   62.19 @@ -598,7 +598,7 @@
   62.20  void NullCheckVisitor::do_UnsafePrefetchRead (UnsafePrefetchRead*  x) {}
   62.21  void NullCheckVisitor::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {}
   62.22  void NullCheckVisitor::do_ProfileCall    (ProfileCall*     x) { nce()->clear_last_explicit_null_check(); }
   62.23 -void NullCheckVisitor::do_ProfileCounter (ProfileCounter*  x) {}
   62.24 +void NullCheckVisitor::do_ProfileInvoke  (ProfileInvoke*   x) {}
   62.25  
   62.26  
   62.27  void NullCheckEliminator::visit(Value* p) {
    63.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Tue Sep 21 06:58:44 2010 -0700
    63.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Wed Sep 22 12:54:51 2010 -0400
    63.3 @@ -140,9 +140,7 @@
    63.4      case slow_subtype_check_id:
    63.5      case fpu2long_stub_id:
    63.6      case unwind_exception_id:
    63.7 -#ifndef TIERED
    63.8 -    case counter_overflow_id: // Not generated outside the tiered world
    63.9 -#endif
   63.10 +    case counter_overflow_id:
   63.11  #if defined(SPARC) || defined(PPC)
   63.12      case handle_exception_nofpu_id:  // Unused on sparc
   63.13  #endif
   63.14 @@ -322,31 +320,60 @@
   63.15    }
   63.16  JRT_END
   63.17  
   63.18 -#ifdef TIERED
   63.19 -JRT_ENTRY(void, Runtime1::counter_overflow(JavaThread* thread, int bci))
   63.20 -  RegisterMap map(thread, false);
   63.21 -  frame fr =  thread->last_frame().sender(&map);
   63.22 +// This is a helper to allow us to safepoint but allow the outer entry
   63.23 +// to be safepoint free if we need to do an osr
   63.24 +static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, methodOopDesc* m) {
   63.25 +  nmethod* osr_nm = NULL;
   63.26 +  methodHandle method(THREAD, m);
   63.27 +
   63.28 +  RegisterMap map(THREAD, false);
   63.29 +  frame fr =  THREAD->last_frame().sender(&map);
   63.30    nmethod* nm = (nmethod*) fr.cb();
   63.31 -  assert(nm!= NULL && nm->is_nmethod(), "what?");
   63.32 -  methodHandle method(thread, nm->method());
   63.33 -  if (bci == 0) {
   63.34 -    // invocation counter overflow
   63.35 -    if (!Tier1CountOnly) {
   63.36 -      CompilationPolicy::policy()->method_invocation_event(method, CHECK);
   63.37 -    } else {
   63.38 -      method()->invocation_counter()->reset();
   63.39 +  assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
   63.40 +  methodHandle enclosing_method(THREAD, nm->method());
   63.41 +
   63.42 +  CompLevel level = (CompLevel)nm->comp_level();
   63.43 +  int bci = InvocationEntryBci;
   63.44 +  if (branch_bci != InvocationEntryBci) {
   63.45 +    // Compute desination bci
   63.46 +    address pc = method()->code_base() + branch_bci;
   63.47 +    Bytecodes::Code branch = Bytecodes::code_at(pc, method());
   63.48 +    int offset = 0;
   63.49 +    switch (branch) {
   63.50 +      case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
   63.51 +      case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
   63.52 +      case Bytecodes::_if_icmple: case Bytecodes::_ifle:
   63.53 +      case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
   63.54 +      case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
   63.55 +      case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
   63.56 +      case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
   63.57 +        offset = (int16_t)Bytes::get_Java_u2(pc + 1);
   63.58 +        break;
   63.59 +      case Bytecodes::_goto_w:
   63.60 +        offset = Bytes::get_Java_u4(pc + 1);
   63.61 +        break;
   63.62 +      default: ;
   63.63      }
   63.64 -  } else {
   63.65 -    if (!Tier1CountOnly) {
   63.66 -      // Twe have a bci but not the destination bci and besides a backedge
   63.67 -      // event is more for OSR which we don't want here.
   63.68 -      CompilationPolicy::policy()->method_invocation_event(method, CHECK);
   63.69 -    } else {
   63.70 -      method()->backedge_counter()->reset();
   63.71 +    bci = branch_bci + offset;
   63.72 +  }
   63.73 +
   63.74 +  osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, THREAD);
   63.75 +  return osr_nm;
   63.76 +}
   63.77 +
   63.78 +JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, methodOopDesc* method))
   63.79 +  nmethod* osr_nm;
   63.80 +  JRT_BLOCK
   63.81 +    osr_nm = counter_overflow_helper(thread, bci, method);
   63.82 +    if (osr_nm != NULL) {
   63.83 +      RegisterMap map(thread, false);
   63.84 +      frame fr =  thread->last_frame().sender(&map);
   63.85 +      VM_DeoptimizeFrame deopt(thread, fr.id());
   63.86 +      VMThread::execute(&deopt);
   63.87      }
   63.88 -  }
   63.89 +  JRT_BLOCK_END
   63.90 +  return NULL;
   63.91  JRT_END
   63.92 -#endif // TIERED
   63.93  
   63.94  extern void vm_exit(int code);
   63.95  
   63.96 @@ -898,7 +925,7 @@
   63.97              NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
   63.98  
   63.99              assert(n_copy->data() == 0 ||
  63.100 -                   n_copy->data() == (int)Universe::non_oop_word(),
  63.101 +                   n_copy->data() == (intptr_t)Universe::non_oop_word(),
  63.102                     "illegal init value");
  63.103              assert(load_klass() != NULL, "klass not set");
  63.104              n_copy->set_data((intx) (load_klass()));
    64.1 --- a/src/share/vm/c1/c1_Runtime1.hpp	Tue Sep 21 06:58:44 2010 -0700
    64.2 +++ b/src/share/vm/c1/c1_Runtime1.hpp	Wed Sep 22 12:54:51 2010 -0400
    64.3 @@ -123,9 +123,7 @@
    64.4    static void new_object_array(JavaThread* thread, klassOopDesc* klass, jint length);
    64.5    static void new_multi_array (JavaThread* thread, klassOopDesc* klass, int rank, jint* dims);
    64.6  
    64.7 -#ifdef TIERED
    64.8 -  static void counter_overflow(JavaThread* thread, int bci);
    64.9 -#endif // TIERED
   64.10 +  static address counter_overflow(JavaThread* thread, int bci, methodOopDesc* method);
   64.11  
   64.12    static void unimplemented_entry   (JavaThread* thread, StubID id);
   64.13  
    65.1 --- a/src/share/vm/c1/c1_ValueMap.hpp	Tue Sep 21 06:58:44 2010 -0700
    65.2 +++ b/src/share/vm/c1/c1_ValueMap.hpp	Wed Sep 22 12:54:51 2010 -0400
    65.3 @@ -1,5 +1,5 @@
    65.4  /*
    65.5 - * Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
    65.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    65.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    65.8   *
    65.9   * This code is free software; you can redistribute it and/or modify it
   65.10 @@ -185,11 +185,11 @@
   65.11    void do_ExceptionObject(ExceptionObject* x) { /* nothing to do */ }
   65.12    void do_RoundFP        (RoundFP*         x) { /* nothing to do */ }
   65.13    void do_UnsafeGetRaw   (UnsafeGetRaw*    x) { /* nothing to do */ }
   65.14 +  void do_ProfileInvoke  (ProfileInvoke*   x) { /* nothing to do */ };
   65.15    void do_UnsafeGetObject(UnsafeGetObject* x) { /* nothing to do */ }
   65.16    void do_UnsafePrefetchRead (UnsafePrefetchRead*  x) { /* nothing to do */ }
   65.17    void do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) { /* nothing to do */ }
   65.18    void do_ProfileCall    (ProfileCall*     x) { /* nothing to do */ }
   65.19 -  void do_ProfileCounter (ProfileCounter*  x) { /* nothing to do */ }
   65.20  };
   65.21  
   65.22  
    66.1 --- a/src/share/vm/c1/c1_globals.hpp	Tue Sep 21 06:58:44 2010 -0700
    66.2 +++ b/src/share/vm/c1/c1_globals.hpp	Wed Sep 22 12:54:51 2010 -0400
    66.3 @@ -25,12 +25,6 @@
    66.4  //
    66.5  // Defines all global flags used by the client compiler.
    66.6  //
    66.7 -#ifndef TIERED
    66.8 -  #define NOT_TIERED(x) x
    66.9 -#else
   66.10 -  #define NOT_TIERED(x)
   66.11 -#endif
   66.12 -
   66.13  #define C1_FLAGS(develop, develop_pd, product, product_pd, notproduct)      \
   66.14                                                                              \
   66.15    /* Printing */                                                            \
   66.16 @@ -55,7 +49,7 @@
   66.17    notproduct(bool, PrintIRDuringConstruction, false,                        \
   66.18            "Print IR as it's being constructed (helpful for debugging frontend)")\
   66.19                                                                              \
   66.20 -  notproduct(bool, PrintPhiFunctions, false,                                   \
   66.21 +  notproduct(bool, PrintPhiFunctions, false,                                \
   66.22            "Print phi functions when they are created and simplified")       \
   66.23                                                                              \
   66.24    notproduct(bool, PrintIR, false,                                          \
   66.25 @@ -279,41 +273,29 @@
   66.26    product_pd(intx, SafepointPollOffset,                                     \
   66.27            "Offset added to polling address (Intel only)")                   \
   66.28                                                                              \
   66.29 -  product(bool, UseNewFeature1, false,                                      \
   66.30 -          "Enable new feature for testing.  This is a dummy flag.")         \
   66.31 -                                                                            \
   66.32 -  product(bool, UseNewFeature2, false,                                      \
   66.33 -          "Enable new feature for testing.  This is a dummy flag.")         \
   66.34 -                                                                            \
   66.35 -  product(bool, UseNewFeature3, false,                                      \
   66.36 -          "Enable new feature for testing.  This is a dummy flag.")         \
   66.37 -                                                                            \
   66.38 -  product(bool, UseNewFeature4, false,                                      \
   66.39 -          "Enable new feature for testing.  This is a dummy flag.")         \
   66.40 -                                                                            \
   66.41    develop(bool, ComputeExactFPURegisterUsage, true,                         \
   66.42            "Compute additional live set for fpu registers to simplify fpu stack merge (Intel only)") \
   66.43                                                                              \
   66.44 -  product(bool, Tier1ProfileCalls, true,                                    \
   66.45 +  product(bool, C1ProfileCalls, true,                                       \
   66.46            "Profile calls when generating code for updating MDOs")           \
   66.47                                                                              \
   66.48 -  product(bool, Tier1ProfileVirtualCalls, true,                             \
   66.49 +  product(bool, C1ProfileVirtualCalls, true,                                \
   66.50            "Profile virtual calls when generating code for updating MDOs")   \
   66.51                                                                              \
   66.52 -  product(bool, Tier1ProfileInlinedCalls, true,                             \
   66.53 +  product(bool, C1ProfileInlinedCalls, true,                                \
   66.54            "Profile inlined calls when generating code for updating MDOs")   \
   66.55                                                                              \
   66.56 -  product(bool, Tier1ProfileBranches, true,                                 \
   66.57 +  product(bool, C1ProfileBranches, true,                                    \
   66.58            "Profile branches when generating code for updating MDOs")        \
   66.59                                                                              \
   66.60 -  product(bool, Tier1ProfileCheckcasts, true,                               \
   66.61 +  product(bool, C1ProfileCheckcasts, true,                                  \
   66.62            "Profile checkcasts when generating code for updating MDOs")      \
   66.63                                                                              \
   66.64 -  product(bool, Tier1OptimizeVirtualCallProfiling, true,                    \
   66.65 -          "Use CHA and exact type results at call sites when updating MDOs") \
   66.66 +  product(bool, C1OptimizeVirtualCallProfiling, true,                       \
   66.67 +          "Use CHA and exact type results at call sites when updating MDOs")\
   66.68                                                                              \
   66.69 -  develop(bool, Tier1CountOnly, false,                                      \
   66.70 -          "Don't schedule tier 2 compiles. Enter VM only")                  \
   66.71 +  product(bool, C1UpdateMethodData, trueInTiered,                           \
   66.72 +          "Update methodDataOops in Tier1-generated code")                  \
   66.73                                                                              \
   66.74    develop(bool, PrintCFGToFile, false,                                      \
   66.75            "print control flow graph to a separate file during compilation") \
    67.1 --- a/src/share/vm/ci/ciEnv.cpp	Tue Sep 21 06:58:44 2010 -0700
    67.2 +++ b/src/share/vm/ci/ciEnv.cpp	Wed Sep 22 12:54:51 2010 -0400
    67.3 @@ -956,18 +956,18 @@
    67.4        if (task() != NULL)  task()->set_code(nm);
    67.5  
    67.6        if (entry_bci == InvocationEntryBci) {
    67.7 -#ifdef TIERED
    67.8 -        // If there is an old version we're done with it
    67.9 -        nmethod* old = method->code();
   67.10 -        if (TraceMethodReplacement && old != NULL) {
   67.11 -          ResourceMark rm;
   67.12 -          char *method_name = method->name_and_sig_as_C_string();
   67.13 -          tty->print_cr("Replacing method %s", method_name);
   67.14 +        if (TieredCompilation) {
   67.15 +          // If there is an old version we're done with it
   67.16 +          nmethod* old = method->code();
   67.17 +          if (TraceMethodReplacement && old != NULL) {
   67.18 +            ResourceMark rm;
   67.19 +            char *method_name = method->name_and_sig_as_C_string();
   67.20 +            tty->print_cr("Replacing method %s", method_name);
   67.21 +          }
   67.22 +          if (old != NULL ) {
   67.23 +            old->make_not_entrant();
   67.24 +          }
   67.25          }
   67.26 -        if (old != NULL ) {
   67.27 -          old->make_not_entrant();
   67.28 -        }
   67.29 -#endif // TIERED
   67.30          if (TraceNMethodInstalls ) {
   67.31            ResourceMark rm;
   67.32            char *method_name = method->name_and_sig_as_C_string();
   67.33 @@ -1011,7 +1011,7 @@
   67.34  // ------------------------------------------------------------------
   67.35  // ciEnv::comp_level
   67.36  int ciEnv::comp_level() {
   67.37 -  if (task() == NULL)  return CompLevel_full_optimization;
   67.38 +  if (task() == NULL)  return CompLevel_highest_tier;
   67.39    return task()->comp_level();
   67.40  }
   67.41  
    68.1 --- a/src/share/vm/ci/ciMethod.cpp	Tue Sep 21 06:58:44 2010 -0700
    68.2 +++ b/src/share/vm/ci/ciMethod.cpp	Wed Sep 22 12:54:51 2010 -0400
    68.3 @@ -49,7 +49,8 @@
    68.4    _handler_count      = h_m()->exception_table()->length() / 4;
    68.5    _uses_monitors      = h_m()->access_flags().has_monitor_bytecodes();
    68.6    _balanced_monitors  = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
    68.7 -  _is_compilable      = !h_m()->is_not_compilable();
    68.8 +  _is_c1_compilable   = !h_m()->is_not_c1_compilable();
    68.9 +  _is_c2_compilable   = !h_m()->is_not_c2_compilable();
   68.10    // Lazy fields, filled in on demand.  Require allocation.
   68.11    _code               = NULL;
   68.12    _exception_handlers = NULL;
   68.13 @@ -61,11 +62,12 @@
   68.14  #endif // COMPILER2 || SHARK
   68.15  
   68.16    ciEnv *env = CURRENT_ENV;
   68.17 -  if (env->jvmti_can_hotswap_or_post_breakpoint() && _is_compilable) {
   68.18 +  if (env->jvmti_can_hotswap_or_post_breakpoint() && can_be_compiled()) {
   68.19      // 6328518 check hotswap conditions under the right lock.
   68.20      MutexLocker locker(Compile_lock);
   68.21      if (Dependencies::check_evol_method(h_m()) != NULL) {
   68.22 -      _is_compilable = false;
   68.23 +      _is_c1_compilable = false;
   68.24 +      _is_c2_compilable = false;
   68.25      }
   68.26    } else {
   68.27      CHECK_UNHANDLED_OOPS_ONLY(Thread::current()->clear_unhandled_oops());
   68.28 @@ -93,7 +95,7 @@
   68.29    _signature = new (env->arena()) ciSignature(_holder, sig_symbol);
   68.30    _method_data = NULL;
   68.31    // Take a snapshot of these values, so they will be commensurate with the MDO.
   68.32 -  if (ProfileInterpreter) {
   68.33 +  if (ProfileInterpreter || TieredCompilation) {
   68.34      int invcnt = h_m()->interpreter_invocation_count();
   68.35      // if the value overflowed report it as max int
   68.36      _interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ;
   68.37 @@ -437,11 +439,26 @@
   68.38          // In addition, virtual call sites have receiver type information
   68.39          int receivers_count_total = 0;
   68.40          int morphism = 0;
   68.41 +        // Precompute morphism for the possible fixup
   68.42          for (uint i = 0; i < call->row_limit(); i++) {
   68.43            ciKlass* receiver = call->receiver(i);
   68.44            if (receiver == NULL)  continue;
   68.45 -          morphism += 1;
   68.46 -          int rcount = call->receiver_count(i);
   68.47 +          morphism++;
   68.48 +        }
   68.49 +        int epsilon = 0;
   68.50 +        if (TieredCompilation && ProfileInterpreter) {
   68.51 +          // Interpreter and C1 treat final and special invokes differently.
   68.52 +          // C1 will record a type, whereas the interpreter will just
   68.53 +          // increment the count. Detect this case.
   68.54 +          if (morphism == 1 && count > 0) {
   68.55 +            epsilon = count;
   68.56 +            count = 0;
   68.57 +          }
   68.58 +        }
   68.59 +        for (uint i = 0; i < call->row_limit(); i++) {
   68.60 +          ciKlass* receiver = call->receiver(i);
   68.61 +          if (receiver == NULL)  continue;
   68.62 +          int rcount = call->receiver_count(i) + epsilon;
   68.63            if (rcount == 0) rcount = 1; // Should be valid value
   68.64            receivers_count_total += rcount;
   68.65            // Add the receiver to result data.
   68.66 @@ -687,10 +704,17 @@
   68.67  // invocation counts in methods.
   68.68  int ciMethod::scale_count(int count, float prof_factor) {
   68.69    if (count > 0 && method_data() != NULL) {
   68.70 -    int current_mileage = method_data()->current_mileage();
   68.71 -    int creation_mileage = method_data()->creation_mileage();
   68.72 -    int counter_life = current_mileage - creation_mileage;
   68.73 +    int counter_life;
   68.74      int method_life = interpreter_invocation_count();
   68.75 +    if (TieredCompilation) {
   68.76 +      // In tiered the MDO's life is measured directly, so just use the snapshotted counters
   68.77 +      counter_life = MAX2(method_data()->invocation_count(), method_data()->backedge_count());
   68.78 +    } else {
   68.79 +      int current_mileage = method_data()->current_mileage();
   68.80 +      int creation_mileage = method_data()->creation_mileage();
   68.81 +      counter_life = current_mileage - creation_mileage;
   68.82 +    }
   68.83 +
   68.84      // counter_life due to backedge_counter could be > method_life
   68.85      if (counter_life > method_life)
   68.86        counter_life = method_life;
   68.87 @@ -778,7 +802,8 @@
   68.88    Thread* my_thread = JavaThread::current();
   68.89    methodHandle h_m(my_thread, get_methodOop());
   68.90  
   68.91 -  if (Tier1UpdateMethodData && is_tier1_compile(env->comp_level())) {
   68.92 +  // Create an MDO for the inlinee
   68.93 +  if (TieredCompilation && is_c1_compile(env->comp_level())) {
   68.94      build_method_data(h_m);
   68.95    }
   68.96  
   68.97 @@ -885,7 +910,11 @@
   68.98  // Have previous compilations of this method succeeded?
   68.99  bool ciMethod::can_be_compiled() {
  68.100    check_is_loaded();
  68.101 -  return _is_compilable;
  68.102 +  ciEnv* env = CURRENT_ENV;
  68.103 +  if (is_c1_compile(env->comp_level())) {
  68.104 +    return _is_c1_compilable;
  68.105 +  }
  68.106 +  return _is_c2_compilable;
  68.107  }
  68.108  
  68.109  // ------------------------------------------------------------------
  68.110 @@ -895,8 +924,13 @@
  68.111  void ciMethod::set_not_compilable() {
  68.112    check_is_loaded();
  68.113    VM_ENTRY_MARK;
  68.114 -  _is_compilable = false;
  68.115 -  get_methodOop()->set_not_compilable();
  68.116 +  ciEnv* env = CURRENT_ENV;
  68.117 +  if (is_c1_compile(env->comp_level())) {
  68.118 +    _is_c1_compilable = false;
  68.119 +  } else {
  68.120 +    _is_c2_compilable = false;
  68.121 +  }
  68.122 +  get_methodOop()->set_not_compilable(env->comp_level());
  68.123  }
  68.124  
  68.125  // ------------------------------------------------------------------
  68.126 @@ -910,7 +944,8 @@
  68.127  bool ciMethod::can_be_osr_compiled(int entry_bci) {
  68.128    check_is_loaded();
  68.129    VM_ENTRY_MARK;
  68.130 -  return !get_methodOop()->access_flags().is_not_osr_compilable();
  68.131 +  ciEnv* env = CURRENT_ENV;
  68.132 +  return !get_methodOop()->is_not_osr_compilable(env->comp_level());
  68.133  }
  68.134  
  68.135  // ------------------------------------------------------------------
  68.136 @@ -920,6 +955,14 @@
  68.137    return get_methodOop()->code() != NULL;
  68.138  }
  68.139  
  68.140 +int ciMethod::comp_level() {
  68.141 +  check_is_loaded();
  68.142 +  VM_ENTRY_MARK;
  68.143 +  nmethod* nm = get_methodOop()->code();
  68.144 +  if (nm != NULL) return nm->comp_level();
  68.145 +  return 0;
  68.146 +}
  68.147 +
  68.148  // ------------------------------------------------------------------
  68.149  // ciMethod::instructions_size
  68.150  //
  68.151 @@ -928,18 +971,13 @@
  68.152  // junk like exception handler, stubs, and constant table, which are
  68.153  // not highly relevant to an inlined method.  So we use the more
  68.154  // specific accessor nmethod::insts_size.
  68.155 -int ciMethod::instructions_size() {
  68.156 +int ciMethod::instructions_size(int comp_level) {
  68.157    GUARDED_VM_ENTRY(
  68.158      nmethod* code = get_methodOop()->code();
  68.159 -    // if there's no compiled code or the code was produced by the
  68.160 -    // tier1 profiler return 0 for the code size.  This should
  68.161 -    // probably be based on the compilation level of the nmethod but
  68.162 -    // that currently isn't properly recorded.
  68.163 -    if (code == NULL ||
  68.164 -        (TieredCompilation && code->compiler() != NULL && code->compiler()->is_c1())) {
  68.165 -      return 0;
  68.166 +    if (code != NULL && (comp_level == CompLevel_any || comp_level == code->comp_level())) {
  68.167 +      return code->code_end() - code->verified_entry_point();
  68.168      }
  68.169 -    return code->insts_end() - code->verified_entry_point();
  68.170 +    return 0;
  68.171    )
  68.172  }
  68.173  
    69.1 --- a/src/share/vm/ci/ciMethod.hpp	Tue Sep 21 06:58:44 2010 -0700
    69.2 +++ b/src/share/vm/ci/ciMethod.hpp	Wed Sep 22 12:54:51 2010 -0400
    69.3 @@ -61,7 +61,8 @@
    69.4  
    69.5    bool _uses_monitors;
    69.6    bool _balanced_monitors;
    69.7 -  bool _is_compilable;
    69.8 +  bool _is_c1_compilable;
    69.9 +  bool _is_c2_compilable;
   69.10    bool _can_be_statically_bound;
   69.11  
   69.12    // Lazy fields, filled in on demand
   69.13 @@ -127,6 +128,8 @@
   69.14    int interpreter_invocation_count() const       { check_is_loaded(); return _interpreter_invocation_count; }
   69.15    int interpreter_throwout_count() const         { check_is_loaded(); return _interpreter_throwout_count; }
   69.16  
   69.17 +  int comp_level();
   69.18 +
   69.19    Bytecodes::Code java_code_at_bci(int bci) {
   69.20      address bcp = code() + bci;
   69.21      return Bytecodes::java_code_at(bcp);
   69.22 @@ -209,7 +212,7 @@
   69.23    bool can_be_osr_compiled(int entry_bci);
   69.24    void set_not_compilable();
   69.25    bool has_compiled_code();
   69.26 -  int  instructions_size();
   69.27 +  int  instructions_size(int comp_level = CompLevel_any);
   69.28    void log_nmethod_identity(xmlStream* log);
   69.29    bool is_not_reached(int bci);
   69.30    bool was_executed_more_than(int times);
    70.1 --- a/src/share/vm/ci/ciMethodData.cpp	Tue Sep 21 06:58:44 2010 -0700
    70.2 +++ b/src/share/vm/ci/ciMethodData.cpp	Wed Sep 22 12:54:51 2010 -0400
    70.3 @@ -1,5 +1,5 @@
    70.4  /*
    70.5 - * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
    70.6 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    70.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    70.8   *
    70.9   * This code is free software; you can redistribute it and/or modify it
   70.10 @@ -37,6 +37,8 @@
   70.11    _data_size = 0;
   70.12    _extra_data_size = 0;
   70.13    _current_mileage = 0;
   70.14 +  _invocation_counter = 0;
   70.15 +  _backedge_counter = 0;
   70.16    _state = empty_state;
   70.17    _saw_free_extra_data = false;
   70.18    // Set an initial hint. Don't use set_hint_di() because
   70.19 @@ -56,6 +58,8 @@
   70.20    _data_size = 0;
   70.21    _extra_data_size = 0;
   70.22    _current_mileage = 0;
   70.23 +  _invocation_counter = 0;
   70.24 +  _backedge_counter = 0;
   70.25    _state = empty_state;
   70.26    _saw_free_extra_data = false;
   70.27    // Set an initial hint. Don't use set_hint_di() because
   70.28 @@ -99,6 +103,8 @@
   70.29    }
   70.30    // Note:  Extra data are all BitData, and do not need translation.
   70.31    _current_mileage = methodDataOopDesc::mileage_of(mdo->method());
   70.32 +  _invocation_counter = mdo->invocation_count();
   70.33 +  _backedge_counter = mdo->backedge_count();
   70.34    _state = mdo->is_mature()? mature_state: immature_state;
   70.35  
   70.36    _eflags = mdo->eflags();
   70.37 @@ -253,6 +259,23 @@
   70.38    }
   70.39  }
   70.40  
   70.41 +void ciMethodData::set_compilation_stats(short loops, short blocks) {
   70.42 +  VM_ENTRY_MARK;
   70.43 +  methodDataOop mdo = get_methodDataOop();
   70.44 +  if (mdo != NULL) {
   70.45 +    mdo->set_num_loops(loops);
   70.46 +    mdo->set_num_blocks(blocks);
   70.47 +  }
   70.48 +}
   70.49 +
   70.50 +void ciMethodData::set_would_profile(bool p) {
   70.51 +  VM_ENTRY_MARK;
   70.52 +  methodDataOop mdo = get_methodDataOop();
   70.53 +  if (mdo != NULL) {
   70.54 +    mdo->set_would_profile(p);
   70.55 +  }
   70.56 +}
   70.57 +
   70.58  bool ciMethodData::has_escape_info() {
   70.59    return eflag_set(methodDataOopDesc::estimated);
   70.60  }
    71.1 --- a/src/share/vm/ci/ciMethodData.hpp	Tue Sep 21 06:58:44 2010 -0700
    71.2 +++ b/src/share/vm/ci/ciMethodData.hpp	Wed Sep 22 12:54:51 2010 -0400
    71.3 @@ -1,5 +1,5 @@
    71.4  /*
    71.5 - * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
    71.6 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    71.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    71.8   *
    71.9   * This code is free software; you can redistribute it and/or modify it
   71.10 @@ -162,6 +162,12 @@
   71.11    // Maturity of the oop when the snapshot is taken.
   71.12    int _current_mileage;
   71.13  
   71.14 +  // These counters hold the age of MDO in tiered. In tiered we can have the same method
   71.15 +  // running at different compilation levels concurrently. So, in order to precisely measure
   71.16 +  // its maturity we need separate counters.
   71.17 +  int _invocation_counter;
   71.18 +  int _backedge_counter;
   71.19 +
   71.20    // Coherent snapshot of original header.
   71.21    methodDataOopDesc _orig;
   71.22  
   71.23 @@ -223,6 +229,16 @@
   71.24    int creation_mileage() { return _orig.creation_mileage(); }
   71.25    int current_mileage()  { return _current_mileage; }
   71.26  
   71.27 +  int invocation_count() { return _invocation_counter; }
   71.28 +  int backedge_count()   { return _backedge_counter;   }
   71.29 +  // Transfer information about the method to methodDataOop.
   71.30 +  // would_profile means we would like to profile this method,
   71.31 +  // meaning it's not trivial.
   71.32 +  void set_would_profile(bool p);
   71.33 +  // Also set the numer of loops and blocks in the method.
   71.34 +  // Again, this is used to determine if a method is trivial.
   71.35 +  void set_compilation_stats(short loops, short blocks);
   71.36 +
   71.37    void load_data();
   71.38  
   71.39    // Convert a dp (data pointer) to a di (data index).
    72.1 --- a/src/share/vm/classfile/classLoader.cpp	Tue Sep 21 06:58:44 2010 -0700
    72.2 +++ b/src/share/vm/classfile/classLoader.cpp	Wed Sep 22 12:54:51 2010 -0400
    72.3 @@ -1292,7 +1292,7 @@
    72.4            // Iterate over all methods in class
    72.5            for (int n = 0; n < k->methods()->length(); n++) {
    72.6              methodHandle m (THREAD, methodOop(k->methods()->obj_at(n)));
    72.7 -            if (CompilationPolicy::canBeCompiled(m)) {
    72.8 +            if (CompilationPolicy::can_be_compiled(m)) {
    72.9  
   72.10                if (++_codecache_sweep_counter == CompileTheWorldSafepointInterval) {
   72.11                  // Give sweeper a chance to keep up with CTW
   72.12 @@ -1301,7 +1301,7 @@
   72.13                  _codecache_sweep_counter = 0;
   72.14                }
   72.15                // Force compilation
   72.16 -              CompileBroker::compile_method(m, InvocationEntryBci,
   72.17 +              CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_initial_compile,
   72.18                                              methodHandle(), 0, "CTW", THREAD);
   72.19                if (HAS_PENDING_EXCEPTION) {
   72.20                  CLEAR_PENDING_EXCEPTION;
   72.21 @@ -1315,7 +1315,7 @@
   72.22                    nm->make_not_entrant();
   72.23                    m->clear_code();
   72.24                  }
   72.25 -                CompileBroker::compile_method(m, InvocationEntryBci,
   72.26 +                CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_full_optimization,
   72.27                                                methodHandle(), 0, "CTW", THREAD);
   72.28                  if (HAS_PENDING_EXCEPTION) {
   72.29                    CLEAR_PENDING_EXCEPTION;
    73.1 --- a/src/share/vm/classfile/javaClasses.cpp	Tue Sep 21 06:58:44 2010 -0700
    73.2 +++ b/src/share/vm/classfile/javaClasses.cpp	Wed Sep 22 12:54:51 2010 -0400
    73.3 @@ -2424,12 +2424,15 @@
    73.4  
    73.5  int java_dyn_MethodTypeForm::_vmslots_offset;
    73.6  int java_dyn_MethodTypeForm::_erasedType_offset;
    73.7 +int java_dyn_MethodTypeForm::_genericInvoker_offset;
    73.8  
    73.9  void java_dyn_MethodTypeForm::compute_offsets() {
   73.10    klassOop k = SystemDictionary::MethodTypeForm_klass();
   73.11    if (k != NULL) {
   73.12      compute_optional_offset(_vmslots_offset,    k, vmSymbols::vmslots_name(),    vmSymbols::int_signature(), true);
   73.13      compute_optional_offset(_erasedType_offset, k, vmSymbols::erasedType_name(), vmSymbols::java_dyn_MethodType_signature(), true);
   73.14 +    compute_optional_offset(_genericInvoker_offset, k, vmSymbols::genericInvoker_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
   73.15 +    if (_genericInvoker_offset == 0)  _genericInvoker_offset = -1;  // set to explicit "empty" value
   73.16    }
   73.17  }
   73.18  
   73.19 @@ -2443,6 +2446,11 @@
   73.20    return mtform->obj_field(_erasedType_offset);
   73.21  }
   73.22  
   73.23 +oop java_dyn_MethodTypeForm::genericInvoker(oop mtform) {
   73.24 +  assert(mtform->klass() == SystemDictionary::MethodTypeForm_klass(), "MTForm only");
   73.25 +  return mtform->obj_field(_genericInvoker_offset);
   73.26 +}
   73.27 +
   73.28  
   73.29  // Support for java_dyn_CallSite
   73.30  
    74.1 --- a/src/share/vm/classfile/javaClasses.hpp	Tue Sep 21 06:58:44 2010 -0700
    74.2 +++ b/src/share/vm/classfile/javaClasses.hpp	Wed Sep 22 12:54:51 2010 -0400
    74.3 @@ -1048,6 +1048,7 @@
    74.4   private:
    74.5    static int _vmslots_offset;           // number of argument slots needed
    74.6    static int _erasedType_offset;        // erasedType = canonical MethodType
    74.7 +  static int _genericInvoker_offset;    // genericInvoker = adapter for invokeGeneric
    74.8  
    74.9    static void compute_offsets();
   74.10  
   74.11 @@ -1055,10 +1056,12 @@
   74.12    // Accessors
   74.13    static int            vmslots(oop mtform);
   74.14    static oop            erasedType(oop mtform);
   74.15 +  static oop            genericInvoker(oop mtform);
   74.16  
   74.17    // Accessors for code generation:
   74.18    static int vmslots_offset_in_bytes()          { return _vmslots_offset; }
   74.19    static int erasedType_offset_in_bytes()       { return _erasedType_offset; }
   74.20 +  static int genericInvoker_offset_in_bytes()   { return _genericInvoker_offset; }
   74.21  };
   74.22  
   74.23  
    75.1 --- a/src/share/vm/classfile/systemDictionary.cpp	Tue Sep 21 06:58:44 2010 -0700
    75.2 +++ b/src/share/vm/classfile/systemDictionary.cpp	Wed Sep 22 12:54:51 2010 -0400
    75.3 @@ -2361,8 +2361,11 @@
    75.4      // Must create lots of stuff here, but outside of the SystemDictionary lock.
    75.5      if (THREAD->is_Compiler_thread())
    75.6        return NULL;              // do not attempt from within compiler
    75.7 +    bool for_invokeGeneric = (name_id == vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name));
    75.8      bool found_on_bcp = false;
    75.9 -    Handle mt = find_method_handle_type(signature(), accessing_klass, found_on_bcp, CHECK_NULL);
   75.10 +    Handle mt = find_method_handle_type(signature(), accessing_klass,
   75.11 +                                        for_invokeGeneric,
   75.12 +                                        found_on_bcp, CHECK_NULL);
   75.13      KlassHandle  mh_klass = SystemDictionaryHandles::MethodHandle_klass();
   75.14      methodHandle m = methodOopDesc::make_invoke_method(mh_klass, name, signature,
   75.15                                                         mt, CHECK_NULL);
   75.16 @@ -2393,6 +2396,7 @@
   75.17  // consistent with this loader.
   75.18  Handle SystemDictionary::find_method_handle_type(symbolHandle signature,
   75.19                                                   KlassHandle accessing_klass,
   75.20 +                                                 bool for_invokeGeneric,
   75.21                                                   bool& return_bcp_flag,
   75.22                                                   TRAPS) {
   75.23    Handle class_loader, protection_domain;
   75.24 @@ -2448,10 +2452,26 @@
   75.25                           vmSymbols::findMethodHandleType_name(),
   75.26                           vmSymbols::findMethodHandleType_signature(),
   75.27                           &args, CHECK_(empty));
   75.28 +  Handle method_type(THREAD, (oop) result.get_jobject());
   75.29 +
   75.30 +  if (for_invokeGeneric) {
   75.31 +    // call sun.dyn.MethodHandleNatives::notifyGenericMethodType(MethodType) -> void
   75.32 +    JavaCallArguments args(Handle(THREAD, method_type()));
   75.33 +    JavaValue no_result(T_VOID);
   75.34 +    JavaCalls::call_static(&no_result,
   75.35 +                           SystemDictionary::MethodHandleNatives_klass(),
   75.36 +                           vmSymbols::notifyGenericMethodType_name(),
   75.37 +                           vmSymbols::notifyGenericMethodType_signature(),
   75.38 +                           &args, THREAD);
   75.39 +    if (HAS_PENDING_EXCEPTION) {
   75.40 +      // If the notification fails, just kill it.
   75.41 +      CLEAR_PENDING_EXCEPTION;
   75.42 +    }
   75.43 +  }
   75.44  
   75.45    // report back to the caller with the MethodType and the "on_bcp" flag
   75.46    return_bcp_flag = is_on_bcp;
   75.47 -  return Handle(THREAD, (oop) result.get_jobject());
   75.48 +  return method_type;
   75.49  }
   75.50  
   75.51  // Ask Java code to find or construct a method handle constant.
   75.52 @@ -2466,7 +2486,7 @@
   75.53    Handle type;
   75.54    if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') {
   75.55      bool ignore_is_on_bcp = false;
   75.56 -    type = find_method_handle_type(signature, caller, ignore_is_on_bcp, CHECK_(empty));
   75.57 +    type = find_method_handle_type(signature, caller, false, ignore_is_on_bcp, CHECK_(empty));
   75.58    } else {
   75.59      SignatureStream ss(signature(), false);
   75.60      if (!ss.is_done()) {
    76.1 --- a/src/share/vm/classfile/systemDictionary.hpp	Tue Sep 21 06:58:44 2010 -0700
    76.2 +++ b/src/share/vm/classfile/systemDictionary.hpp	Wed Sep 22 12:54:51 2010 -0400
    76.3 @@ -471,6 +471,7 @@
    76.4    // ask Java to compute a java.dyn.MethodType object for a given signature
    76.5    static Handle    find_method_handle_type(symbolHandle signature,
    76.6                                             KlassHandle accessing_klass,
    76.7 +                                           bool for_invokeGeneric,
    76.8                                             bool& return_bcp_flag,
    76.9                                             TRAPS);
   76.10    // ask Java to compute a java.dyn.MethodHandle object for a given CP entry
    77.1 --- a/src/share/vm/classfile/vmSymbols.hpp	Tue Sep 21 06:58:44 2010 -0700
    77.2 +++ b/src/share/vm/classfile/vmSymbols.hpp	Wed Sep 22 12:54:51 2010 -0400
    77.3 @@ -246,6 +246,8 @@
    77.4    /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */            \
    77.5    template(findMethodHandleType_name,                 "findMethodHandleType")                     \
    77.6    template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \
    77.7 +  template(notifyGenericMethodType_name,              "notifyGenericMethodType")                  \
    77.8 +  template(notifyGenericMethodType_signature,         "(Ljava/dyn/MethodType;)V")                 \
    77.9    template(linkMethodHandleConstant_name,             "linkMethodHandleConstant")                 \
   77.10    template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \
   77.11    template(makeDynamicCallSite_name,                  "makeDynamicCallSite")                      \
   77.12 @@ -345,6 +347,7 @@
   77.13    template(ptypes_name,                               "ptypes")                                   \
   77.14    template(form_name,                                 "form")                                     \
   77.15    template(erasedType_name,                           "erasedType")                               \
   77.16 +  template(genericInvoker_name,                       "genericInvoker")                           \
   77.17    template(append_name,                               "append")                                   \
   77.18                                                                                                    \
   77.19    /* non-intrinsic name/signature pairs: */                                                       \
    78.1 --- a/src/share/vm/code/nmethod.cpp	Tue Sep 21 06:58:44 2010 -0700
    78.2 +++ b/src/share/vm/code/nmethod.cpp	Wed Sep 22 12:54:51 2010 -0400
    78.3 @@ -867,9 +867,9 @@
    78.4    if (compiler() != NULL) {
    78.5      log->print(" compiler='%s'", compiler()->name());
    78.6    }
    78.7 -#ifdef TIERED
    78.8 -  log->print(" level='%d'", comp_level());
    78.9 -#endif // TIERED
   78.10 +  if (TieredCompilation) {
   78.11 +    log->print(" level='%d'", comp_level());
   78.12 +  }
   78.13  }
   78.14  
   78.15  
   78.16 @@ -908,35 +908,73 @@
   78.17  #undef LOG_OFFSET
   78.18  
   78.19  
   78.20 +void nmethod::print_compilation(outputStream *st, const char *method_name, const char *title,
   78.21 +                                methodOop method, bool is_blocking, int compile_id, int bci, int comp_level) {
   78.22 +  bool is_synchronized = false, has_xhandler = false, is_native = false;
   78.23 +  int code_size = -1;
   78.24 +  if (method != NULL) {
   78.25 +    is_synchronized = method->is_synchronized();
   78.26 +    has_xhandler    = method->has_exception_handler();
   78.27 +    is_native       = method->is_native();
   78.28 +    code_size       = method->code_size();
   78.29 +  }
   78.30 +  // print compilation number
   78.31 +  st->print("%7d %3d", (int)tty->time_stamp().milliseconds(), compile_id);
   78.32 +
   78.33 +  // print method attributes
   78.34 +  const bool is_osr = bci != InvocationEntryBci;
   78.35 +  const char blocking_char  = is_blocking     ? 'b' : ' ';
   78.36 +  const char compile_type   = is_osr          ? '%' : ' ';
   78.37 +  const char sync_char      = is_synchronized ? 's' : ' ';
   78.38 +  const char exception_char = has_xhandler    ? '!' : ' ';
   78.39 +  const char native_char    = is_native       ? 'n' : ' ';
   78.40 +  st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, native_char);
   78.41 +  if (TieredCompilation) {
   78.42 +    st->print("%d ", comp_level);
   78.43 +  }
   78.44 +
   78.45 +  // print optional title
   78.46 +  bool do_nl = false;
   78.47 +  if (title != NULL) {
   78.48 +    int tlen = (int) strlen(title);
   78.49 +    bool do_nl = false;
   78.50 +    if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
   78.51 +    st->print("%.*s", tlen, title);
   78.52 +  } else {
   78.53 +    do_nl = true;
   78.54 +  }
   78.55 +
   78.56 +  // print method name string if given
   78.57 +  if (method_name != NULL) {
   78.58 +    st->print(method_name);
   78.59 +  } else {
   78.60 +    // otherwise as the method to print itself
   78.61 +    if (method != NULL && !Universe::heap()->is_gc_active()) {
   78.62 +      method->print_short_name(st);
   78.63 +    } else {
   78.64 +      st->print("(method)");
   78.65 +    }
   78.66 +  }
   78.67 +
   78.68 +  if (method != NULL) {
   78.69 +    // print osr_bci if any
   78.70 +    if (is_osr) st->print(" @ %d", bci);
   78.71 +    // print method size
   78.72 +    st->print(" (%d bytes)", code_size);
   78.73 +  }
   78.74 +  if (do_nl) st->cr();
   78.75 +}
   78.76 +
   78.77  // Print out more verbose output usually for a newly created nmethod.
   78.78  void nmethod::print_on(outputStream* st, const char* title) const {
   78.79    if (st != NULL) {
   78.80      ttyLocker ttyl;
   78.81 -    // Print a little tag line that looks like +PrintCompilation output:
   78.82 -    int tlen = (int) strlen(title);
   78.83 -    bool do_nl = false;
   78.84 -    if (tlen > 0 && title[tlen-1] == '\n') { tlen--; do_nl = true; }
   78.85 -    st->print("%3d%c  %.*s",
   78.86 -              compile_id(),
   78.87 -              is_osr_method() ? '%' :
   78.88 -              method() != NULL &&
   78.89 -              is_native_method() ? 'n' : ' ',
   78.90 -              tlen, title);
   78.91 -#ifdef TIERED
   78.92 -    st->print(" (%d) ", comp_level());
   78.93 -#endif // TIERED
   78.94 +    print_compilation(st, /*method_name*/NULL, title,
   78.95 +                      method(), /*is_blocking*/false,
   78.96 +                      compile_id(),
   78.97 +                      is_osr_method() ? osr_entry_bci() : InvocationEntryBci,
   78.98 +                      comp_level());
   78.99      if (WizardMode) st->print(" (" INTPTR_FORMAT ")", this);
  78.100 -    if (Universe::heap()->is_gc_active() && method() != NULL) {
  78.101 -      st->print("(method)");
  78.102 -    } else if (method() != NULL) {
  78.103 -        method()->print_short_name(st);
  78.104 -      if (is_osr_method())
  78.105 -        st->print(" @ %d", osr_entry_bci());
  78.106 -      if (method()->code_size() > 0)
  78.107 -        st->print(" (%d bytes)", method()->code_size());
  78.108 -    }
  78.109 -
  78.110 -    if (do_nl)  st->cr();
  78.111    }
  78.112  }
  78.113  
  78.114 @@ -1137,6 +1175,7 @@
  78.115  }
  78.116  
  78.117  void nmethod::inc_decompile_count() {
  78.118 +  if (!is_compiled_by_c2()) return;
  78.119    // Could be gated by ProfileTraps, but do not bother...
  78.120    methodOop m = method();
  78.121    if (m == NULL)  return;
    79.1 --- a/src/share/vm/code/nmethod.hpp	Tue Sep 21 06:58:44 2010 -0700
    79.2 +++ b/src/share/vm/code/nmethod.hpp	Wed Sep 22 12:54:51 2010 -0400
    79.3 @@ -599,6 +599,10 @@
    79.4    void verify_scopes();
    79.5    void verify_interrupt_point(address interrupt_point);
    79.6  
    79.7 +  // print compilation helper
    79.8 +  static void print_compilation(outputStream *st, const char *method_name, const char *title,
    79.9 +                                methodOop method, bool is_blocking, int compile_id, int bci, int comp_level);
   79.10 +
   79.11    // printing support
   79.12    void print()                          const;
   79.13    void print_code();
    80.1 --- a/src/share/vm/compiler/compileBroker.cpp	Tue Sep 21 06:58:44 2010 -0700
    80.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Wed Sep 22 12:54:51 2010 -0400
    80.3 @@ -123,20 +123,12 @@
    80.4  int CompileBroker::_sum_nmethod_size             = 0;
    80.5  int CompileBroker::_sum_nmethod_code_size        = 0;
    80.6  
    80.7 -CompileQueue* CompileBroker::_method_queue   = NULL;
    80.8 +CompileQueue* CompileBroker::_c2_method_queue   = NULL;
    80.9 +CompileQueue* CompileBroker::_c1_method_queue   = NULL;
   80.10  CompileTask*  CompileBroker::_task_free_list = NULL;
   80.11  
   80.12  GrowableArray<CompilerThread*>* CompileBroker::_method_threads = NULL;
   80.13  
   80.14 -// CompileTaskWrapper
   80.15 -//
   80.16 -// Assign this task to the current thread.  Deallocate the task
   80.17 -// when the compilation is complete.
   80.18 -class CompileTaskWrapper : StackObj {
   80.19 -public:
   80.20 -  CompileTaskWrapper(CompileTask* task);
   80.21 -  ~CompileTaskWrapper();
   80.22 -};
   80.23  
   80.24  CompileTaskWrapper::CompileTaskWrapper(CompileTask* task) {
   80.25    CompilerThread* thread = CompilerThread::current();
   80.26 @@ -246,6 +238,12 @@
   80.27               bool_to_str(_is_complete), bool_to_str(_is_success));
   80.28  }
   80.29  
   80.30 +
   80.31 +void CompileTask::print_compilation(outputStream *st, methodOop method, char* method_name) {
   80.32 +  nmethod::print_compilation(st, method_name,/*title*/ NULL, method,
   80.33 +                             is_blocking(), compile_id(), osr_bci(), comp_level());
   80.34 +}
   80.35 +
   80.36  // ------------------------------------------------------------------
   80.37  // CompileTask::print_line_on_error
   80.38  //
   80.39 @@ -258,32 +256,13 @@
   80.40  //
   80.41  void CompileTask::print_line_on_error(outputStream* st, char* buf, int buflen) {
   80.42    methodOop method = (methodOop)JNIHandles::resolve(_method);
   80.43 -
   80.44    // print compiler name
   80.45    st->print("%s:", CompileBroker::compiler(comp_level())->name());
   80.46 -
   80.47 -  // print compilation number
   80.48 -  st->print("%3d", compile_id());
   80.49 -
   80.50 -  // print method attributes
   80.51 -  const bool is_osr = osr_bci() != CompileBroker::standard_entry_bci;
   80.52 -  { const char blocking_char  = is_blocking()                      ? 'b' : ' ';
   80.53 -    const char compile_type   = is_osr                             ? '%' : ' ';
   80.54 -    const char sync_char      = method->is_synchronized()          ? 's' : ' ';
   80.55 -    const char exception_char = method->has_exception_handler()    ? '!' : ' ';
   80.56 -    const char tier_char      =
   80.57 -      is_highest_tier_compile(comp_level())                        ? ' ' : ('0' + comp_level());
   80.58 -    st->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
   80.59 +  char* method_name = NULL;
   80.60 +  if (method != NULL) {
   80.61 +    method_name = method->name_and_sig_as_C_string(buf, buflen);
   80.62    }
   80.63 -
   80.64 -  // Use buf to get method name and signature
   80.65 -  if (method != NULL) st->print("%s", method->name_and_sig_as_C_string(buf, buflen));
   80.66 -
   80.67 -  // print osr_bci if any
   80.68 -  if (is_osr) st->print(" @ %d", osr_bci());
   80.69 -
   80.70 -  // print method size
   80.71 -  st->print_cr(" (%d bytes)", method->code_size());
   80.72 +  print_compilation(st, method, method_name);
   80.73  }
   80.74  
   80.75  // ------------------------------------------------------------------
   80.76 @@ -298,29 +277,7 @@
   80.77  
   80.78    // print compiler name if requested
   80.79    if (CIPrintCompilerName) tty->print("%s:", CompileBroker::compiler(comp_level())->name());
   80.80 -
   80.81 -  // print compilation number
   80.82 -  tty->print("%3d", compile_id());
   80.83 -
   80.84 -  // print method attributes
   80.85 -  const bool is_osr = osr_bci() != CompileBroker::standard_entry_bci;
   80.86 -  { const char blocking_char  = is_blocking()                      ? 'b' : ' ';
   80.87 -    const char compile_type   = is_osr                             ? '%' : ' ';
   80.88 -    const char sync_char      = method->is_synchronized()          ? 's' : ' ';
   80.89 -    const char exception_char = method->has_exception_handler()    ? '!' : ' ';
   80.90 -    const char tier_char      =
   80.91 -      is_highest_tier_compile(comp_level())                        ? ' ' : ('0' + comp_level());
   80.92 -    tty->print("%c%c%c%c%c ", compile_type, sync_char, exception_char, blocking_char, tier_char);
   80.93 -  }
   80.94 -
   80.95 -  // print method name
   80.96 -  method->print_short_name(tty);
   80.97 -
   80.98 -  // print osr_bci if any
   80.99 -  if (is_osr) tty->print(" @ %d", osr_bci());
  80.100 -
  80.101 -  // print method size
  80.102 -  tty->print_cr(" (%d bytes)", method->code_size());
  80.103 +  print_compilation(tty, method(), NULL);
  80.104  }
  80.105  
  80.106  
  80.107 @@ -427,6 +384,7 @@
  80.108    assert(lock()->owned_by_self(), "must own lock");
  80.109  
  80.110    task->set_next(NULL);
  80.111 +  task->set_prev(NULL);
  80.112  
  80.113    if (_last == NULL) {
  80.114      // The compile queue is empty.
  80.115 @@ -437,8 +395,10 @@
  80.116      // Append the task to the queue.
  80.117      assert(_last->next() == NULL, "not last");
  80.118      _last->set_next(task);
  80.119 +    task->set_prev(_last);
  80.120      _last = task;
  80.121    }
  80.122 +  ++_size;
  80.123  
  80.124    // Mark the method as being in the compile queue.
  80.125    ((methodOop)JNIHandles::resolve(task->method_handle()))->set_queued_for_compilation();
  80.126 @@ -452,10 +412,9 @@
  80.127    }
  80.128  
  80.129    // Notify CompilerThreads that a task is available.
  80.130 -  lock()->notify();
  80.131 +  lock()->notify_all();
  80.132  }
  80.133  
  80.134 -
  80.135  // ------------------------------------------------------------------
  80.136  // CompileQueue::get
  80.137  //
  80.138 @@ -464,7 +423,6 @@
  80.139    NMethodSweeper::possibly_sweep();
  80.140  
  80.141    MutexLocker locker(lock());
  80.142 -
  80.143    // Wait for an available CompileTask.
  80.144    while (_first == NULL) {
  80.145      // There is no work to be done right now.  Wait.
  80.146 @@ -481,20 +439,32 @@
  80.147        lock()->wait();
  80.148      }
  80.149    }
  80.150 +  CompileTask* task = CompilationPolicy::policy()->select_task(this);
  80.151 +  remove(task);
  80.152 +  return task;
  80.153 +}
  80.154  
  80.155 -  CompileTask* task = _first;
  80.156 -
  80.157 -  // Update queue first and last
  80.158 -  _first =_first->next();
  80.159 -  if (_first == NULL) {
  80.160 -    _last = NULL;
  80.161 +void CompileQueue::remove(CompileTask* task)
  80.162 +{
  80.163 +   assert(lock()->owned_by_self(), "must own lock");
  80.164 +  if (task->prev() != NULL) {
  80.165 +    task->prev()->set_next(task->next());
  80.166 +  } else {
  80.167 +    // max is the first element
  80.168 +    assert(task == _first, "Sanity");
  80.169 +    _first = task->next();
  80.170    }
  80.171  
  80.172 -  return task;
  80.173 -
  80.174 +  if (task->next() != NULL) {
  80.175 +    task->next()->set_prev(task->prev());
  80.176 +  } else {
  80.177 +    // max is the last element
  80.178 +    assert(task == _last, "Sanity");
  80.179 +    _last = task->prev();
  80.180 +  }
  80.181 +  --_size;
  80.182  }
  80.183  
  80.184 -
  80.185  // ------------------------------------------------------------------
  80.186  // CompileQueue::print
  80.187  void CompileQueue::print() {
  80.188 @@ -545,7 +515,6 @@
  80.189    }
  80.190  }
  80.191  
  80.192 -
  80.193  // ------------------------------------------------------------------
  80.194  // CompileBroker::compilation_init
  80.195  //
  80.196 @@ -554,18 +523,18 @@
  80.197    _last_method_compiled[0] = '\0';
  80.198  
  80.199    // Set the interface to the current compiler(s).
  80.200 +  int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
  80.201 +  int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
  80.202  #ifdef COMPILER1
  80.203 -  _compilers[0] = new Compiler();
  80.204 -#ifndef COMPILER2
  80.205 -  _compilers[1] = _compilers[0];
  80.206 -#endif
  80.207 +  if (c1_count > 0) {
  80.208 +    _compilers[0] = new Compiler();
  80.209 +  }
  80.210  #endif // COMPILER1
  80.211  
  80.212  #ifdef COMPILER2
  80.213 -  _compilers[1] = new C2Compiler();
  80.214 -#ifndef COMPILER1
  80.215 -  _compilers[0] = _compilers[1];
  80.216 -#endif
  80.217 +  if (c2_count > 0) {
  80.218 +    _compilers[1] = new C2Compiler();
  80.219 +  }
  80.220  #endif // COMPILER2
  80.221  
  80.222  #ifdef SHARK
  80.223 @@ -580,9 +549,7 @@
  80.224    _task_free_list = NULL;
  80.225  
  80.226    // Start the CompilerThreads
  80.227 -  init_compiler_threads(compiler_count());
  80.228 -
  80.229 -
  80.230 +  init_compiler_threads(c1_count, c2_count);
  80.231    // totalTime performance counter is always created as it is required
  80.232    // by the implementation of java.lang.management.CompilationMBean.
  80.233    {
  80.234 @@ -770,23 +737,38 @@
  80.235  // CompileBroker::init_compiler_threads
  80.236  //
  80.237  // Initialize the compilation queue
  80.238 -void CompileBroker::init_compiler_threads(int compiler_count) {
  80.239 +void CompileBroker::init_compiler_threads(int c1_compiler_count, int c2_compiler_count) {
  80.240    EXCEPTION_MARK;
  80.241 +  assert(c2_compiler_count > 0 || c1_compiler_count > 0, "No compilers?");
  80.242 +  if (c2_compiler_count > 0) {
  80.243 +    _c2_method_queue  = new CompileQueue("C2MethodQueue",  MethodCompileQueue_lock);
  80.244 +  }
  80.245 +  if (c1_compiler_count > 0) {
  80.246 +    _c1_method_queue  = new CompileQueue("C1MethodQueue",  MethodCompileQueue_lock);
  80.247 +  }
  80.248  
  80.249 -  _method_queue  = new CompileQueue("MethodQueue",  MethodCompileQueue_lock);
  80.250 +  int compiler_count = c1_compiler_count + c2_compiler_count;
  80.251 +
  80.252    _method_threads =
  80.253      new (ResourceObj::C_HEAP) GrowableArray<CompilerThread*>(compiler_count, true);
  80.254  
  80.255    char name_buffer[256];
  80.256 -  int i;
  80.257 -  for (i = 0; i < compiler_count; i++) {
  80.258 +  for (int i = 0; i < c2_compiler_count; i++) {
  80.259      // Create a name for our thread.
  80.260 -    sprintf(name_buffer, "CompilerThread%d", i);
  80.261 +    sprintf(name_buffer, "C2 CompilerThread%d", i);
  80.262      CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
  80.263 -
  80.264 -    CompilerThread* new_thread = make_compiler_thread(name_buffer, _method_queue, counters, CHECK);
  80.265 +    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c2_method_queue, counters, CHECK);
  80.266      _method_threads->append(new_thread);
  80.267    }
  80.268 +
  80.269 +  for (int i = c2_compiler_count; i < compiler_count; i++) {
  80.270 +    // Create a name for our thread.
  80.271 +    sprintf(name_buffer, "C1 CompilerThread%d", i);
  80.272 +    CompilerCounters* counters = new CompilerCounters("compilerThread", i, CHECK);
  80.273 +    CompilerThread* new_thread = make_compiler_thread(name_buffer, _c1_method_queue, counters, CHECK);
  80.274 +    _method_threads->append(new_thread);
  80.275 +  }
  80.276 +
  80.277    if (UsePerfData) {
  80.278      PerfDataManager::create_constant(SUN_CI, "threads", PerfData::U_Bytes,
  80.279                                       compiler_count, CHECK);
  80.280 @@ -796,7 +778,9 @@
  80.281  // ------------------------------------------------------------------
  80.282  // CompileBroker::is_idle
  80.283  bool CompileBroker::is_idle() {
  80.284 -  if (!_method_queue->is_empty()) {
  80.285 +  if (_c2_method_queue != NULL && !_c2_method_queue->is_empty()) {
  80.286 +    return false;
  80.287 +  } else if (_c1_method_queue != NULL && !_c1_method_queue->is_empty()) {
  80.288      return false;
  80.289    } else {
  80.290      int num_threads = _method_threads->length();
  80.291 @@ -859,6 +843,7 @@
  80.292      return;
  80.293    }
  80.294  
  80.295 +
  80.296    // If this method is already in the compile queue, then
  80.297    // we do not block the current thread.
  80.298    if (compilation_is_in_queue(method, osr_bci)) {
  80.299 @@ -876,10 +861,11 @@
  80.300    // Outputs from the following MutexLocker block:
  80.301    CompileTask* task     = NULL;
  80.302    bool         blocking = false;
  80.303 +  CompileQueue* queue  = compile_queue(comp_level);
  80.304  
  80.305    // Acquire our lock.
  80.306    {
  80.307 -    MutexLocker locker(_method_queue->lock(), THREAD);
  80.308 +    MutexLocker locker(queue->lock(), THREAD);
  80.309  
  80.310      // Make sure the method has not slipped into the queues since
  80.311      // last we checked; note that those checks were "fast bail-outs".
  80.312 @@ -945,7 +931,7 @@
  80.313      // and in that case it's best to protect both the testing (here) of
  80.314      // these bits, and their updating (here and elsewhere) under a
  80.315      // common lock.
  80.316 -    task = create_compile_task(_method_queue,
  80.317 +    task = create_compile_task(queue,
  80.318                                 compile_id, method,
  80.319                                 osr_bci, comp_level,
  80.320                                 hot_method, hot_count, comment,
  80.321 @@ -959,6 +945,7 @@
  80.322  
  80.323  
  80.324  nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
  80.325 +                                       int comp_level,
  80.326                                         methodHandle hot_method, int hot_count,
  80.327                                         const char* comment, TRAPS) {
  80.328    // make sure arguments make sense
  80.329 @@ -967,26 +954,9 @@
  80.330    assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
  80.331    assert(!instanceKlass::cast(method->method_holder())->is_not_initialized(), "method holder must be initialized");
  80.332  
  80.333 -  int comp_level = CompilationPolicy::policy()->compilation_level(method, osr_bci);
  80.334 -
  80.335 -#ifdef TIERED
  80.336 -  if (TieredCompilation && StressTieredRuntime) {
  80.337 -    static int flipper = 0;
  80.338 -    if (is_even(flipper++)) {
  80.339 -      comp_level = CompLevel_fast_compile;
  80.340 -    } else {
  80.341 -      comp_level = CompLevel_full_optimization;
  80.342 -    }
  80.343 +  if (!TieredCompilation) {
  80.344 +    comp_level = CompLevel_highest_tier;
  80.345    }
  80.346 -#ifdef SPARC
  80.347 -  // QQQ FIX ME
  80.348 -  // C2 only returns long results in G1 and c1 doesn't understand so disallow c2
  80.349 -  // compiles of long results
  80.350 -  if (TieredCompilation && method()->result_type() == T_LONG) {
  80.351 -    comp_level = CompLevel_fast_compile;
  80.352 -  }
  80.353 -#endif // SPARC
  80.354 -#endif // TIERED
  80.355  
  80.356    // return quickly if possible
  80.357  
  80.358 @@ -1000,12 +970,10 @@
  80.359    if (osr_bci == InvocationEntryBci) {
  80.360      // standard compilation
  80.361      nmethod* method_code = method->code();
  80.362 -    if (method_code != NULL
  80.363 -#ifdef TIERED
  80.364 -       && ( method_code->is_compiled_by_c2() || comp_level == CompLevel_fast_compile )
  80.365 -#endif // TIERED
  80.366 -      ) {
  80.367 -      return method_code;
  80.368 +    if (method_code != NULL) {
  80.369 +      if (compilation_is_complete(method, osr_bci, comp_level)) {
  80.370 +        return method_code;
  80.371 +      }
  80.372      }
  80.373      if (method->is_not_compilable(comp_level)) return NULL;
  80.374  
  80.375 @@ -1021,10 +989,11 @@
  80.376      // osr compilation
  80.377  #ifndef TIERED
  80.378      // seems like an assert of dubious value
  80.379 -    assert(comp_level == CompLevel_full_optimization,
  80.380 +    assert(comp_level == CompLevel_highest_tier,
  80.381             "all OSR compiles are assumed to be at a single compilation lavel");
  80.382  #endif // TIERED
  80.383 -    nmethod* nm = method->lookup_osr_nmethod_for(osr_bci);
  80.384 +    // We accept a higher level osr method
  80.385 +    nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
  80.386      if (nm != NULL) return nm;
  80.387      if (method->is_not_osr_compilable()) return NULL;
  80.388    }
  80.389 @@ -1071,8 +1040,7 @@
  80.390    // If the compiler is shut off due to code cache flushing or otherwise,
  80.391    // fail out now so blocking compiles dont hang the java thread
  80.392    if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
  80.393 -    method->invocation_counter()->decay();
  80.394 -    method->backedge_counter()->decay();
  80.395 +    CompilationPolicy::policy()->delay_compilation(method());
  80.396      return NULL;
  80.397    }
  80.398  
  80.399 @@ -1088,7 +1056,8 @@
  80.400    }
  80.401  
  80.402    // return requested nmethod
  80.403 -  return osr_bci  == InvocationEntryBci ? method->code() : method->lookup_osr_nmethod_for(osr_bci);
  80.404 +  // We accept a higher level osr method
  80.405 +  return osr_bci  == InvocationEntryBci ? method->code() : method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
  80.406  }
  80.407  
  80.408  
  80.409 @@ -1104,7 +1073,7 @@
  80.410      if (method->is_not_osr_compilable()) {
  80.411        return true;
  80.412      } else {
  80.413 -      nmethod* result = method->lookup_osr_nmethod_for(osr_bci);
  80.414 +      nmethod* result = method->lookup_osr_nmethod_for(osr_bci, comp_level, true);
  80.415        return (result != NULL);
  80.416      }
  80.417    } else {
  80.418 @@ -1113,15 +1082,7 @@
  80.419      } else {
  80.420        nmethod* result = method->code();
  80.421        if (result == NULL) return false;
  80.422 -#ifdef TIERED
  80.423 -      if (comp_level == CompLevel_fast_compile) {
  80.424 -        // At worst the code is from c1
  80.425 -        return true;
  80.426 -      }
  80.427 -      // comp level must be full opt
  80.428 -      return result->is_compiled_by_c2();
  80.429 -#endif // TIERED
  80.430 -      return true;
  80.431 +      return comp_level == result->comp_level();
  80.432      }
  80.433    }
  80.434  }
  80.435 @@ -1139,11 +1100,10 @@
  80.436  // versa).  This can be remedied by a full queue search to disambiguate
  80.437  // cases.  If it is deemed profitible, this may be done.
  80.438  bool CompileBroker::compilation_is_in_queue(methodHandle method,
  80.439 -                                          int          osr_bci) {
  80.440 +                                            int          osr_bci) {
  80.441    return method->queued_for_compilation();
  80.442  }
  80.443  
  80.444 -
  80.445  // ------------------------------------------------------------------
  80.446  // CompileBroker::compilation_is_prohibited
  80.447  //
  80.448 @@ -1151,11 +1111,9 @@
  80.449  bool CompileBroker::compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level) {
  80.450    bool is_native = method->is_native();
  80.451    // Some compilers may not support the compilation of natives.
  80.452 -  // QQQ this needs some work ought to only record not compilable at
  80.453 -  // the specified level
  80.454    if (is_native &&
  80.455        (!CICompileNatives || !compiler(comp_level)->supports_native())) {
  80.456 -    method->set_not_compilable_quietly();
  80.457 +    method->set_not_compilable_quietly(comp_level);
  80.458      return true;
  80.459    }
  80.460  
  80.461 @@ -1194,7 +1152,7 @@
  80.462  // compilations may be numbered separately from regular compilations
  80.463  // if certain debugging flags are used.
  80.464  uint CompileBroker::assign_compile_id(methodHandle method, int osr_bci) {
  80.465 -  assert(_method_queue->lock()->owner() == JavaThread::current(),
  80.466 +  assert(MethodCompileQueue_lock->owner() == Thread::current(),
  80.467           "must hold the compilation queue lock");
  80.468    bool is_osr = (osr_bci != standard_entry_bci);
  80.469    assert(!method->is_native(), "no longer compile natives");
  80.470 @@ -1643,7 +1601,6 @@
  80.471  #endif
  80.472  }
  80.473  
  80.474 -
  80.475  // ------------------------------------------------------------------
  80.476  // CompileBroker::handle_full_code_cache
  80.477  //
  80.478 @@ -1883,12 +1840,12 @@
  80.479                  CompileBroker::_t_standard_compilation.seconds() / CompileBroker::_total_standard_compile_count);
  80.480    tty->print_cr("    On stack replacement   : %6.3f s, Average : %2.3f", CompileBroker::_t_osr_compilation.seconds(), CompileBroker::_t_osr_compilation.seconds() / CompileBroker::_total_osr_compile_count);
  80.481  
  80.482 -  if (compiler(CompLevel_fast_compile)) {
  80.483 -    compiler(CompLevel_fast_compile)->print_timers();
  80.484 -    if (compiler(CompLevel_fast_compile) != compiler(CompLevel_highest_tier))
  80.485 -      compiler(CompLevel_highest_tier)->print_timers();
  80.486 +  if (compiler(CompLevel_simple) != NULL) {
  80.487 +    compiler(CompLevel_simple)->print_timers();
  80.488    }
  80.489 -
  80.490 +  if (compiler(CompLevel_full_optimization) != NULL) {
  80.491 +    compiler(CompLevel_full_optimization)->print_timers();
  80.492 +  }
  80.493    tty->cr();
  80.494    int tcb = CompileBroker::_sum_osr_bytes_compiled + CompileBroker::_sum_standard_bytes_compiled;
  80.495    tty->print_cr("  Total compiled bytecodes : %6d bytes", tcb);
    81.1 --- a/src/share/vm/compiler/compileBroker.hpp	Tue Sep 21 06:58:44 2010 -0700
    81.2 +++ b/src/share/vm/compiler/compileBroker.hpp	Wed Sep 22 12:54:51 2010 -0400
    81.3 @@ -1,5 +1,5 @@
    81.4  /*
    81.5 - * Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved.
    81.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    81.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    81.8   *
    81.9   * This code is free software; you can redistribute it and/or modify it
   81.10 @@ -41,7 +41,7 @@
   81.11    int          _comp_level;
   81.12    int          _num_inlined_bytecodes;
   81.13    nmethodLocker* _code_handle;  // holder of eventual result
   81.14 -  CompileTask* _next;
   81.15 +  CompileTask* _next, *_prev;
   81.16  
   81.17    // Fields used for logging why the compilation was initiated:
   81.18    jlong        _time_queued;  // in units of os::elapsed_counter()
   81.19 @@ -49,6 +49,7 @@
   81.20    int          _hot_count;    // information about its invocation counter
   81.21    const char*  _comment;      // more info about the task
   81.22  
   81.23 +  void print_compilation(outputStream *st, methodOop method, char* method_name);
   81.24   public:
   81.25    CompileTask() {
   81.26      _lock = new Monitor(Mutex::nonleaf+2, "CompileTaskLock");
   81.27 @@ -85,15 +86,17 @@
   81.28  
   81.29    CompileTask* next() const                      { return _next; }
   81.30    void         set_next(CompileTask* next)       { _next = next; }
   81.31 +  CompileTask* prev() const                      { return _prev; }
   81.32 +  void         set_prev(CompileTask* prev)       { _prev = prev; }
   81.33  
   81.34    void         print();
   81.35    void         print_line();
   81.36 +
   81.37    void         print_line_on_error(outputStream* st, char* buf, int buflen);
   81.38    void         log_task(xmlStream* log);
   81.39    void         log_task_queued();
   81.40    void         log_task_start(CompileLog* log);
   81.41    void         log_task_done(CompileLog* log);
   81.42 -
   81.43  };
   81.44  
   81.45  // CompilerCounters
   81.46 @@ -141,7 +144,6 @@
   81.47      PerfCounter* compile_counter()           { return _perf_compiles; }
   81.48  };
   81.49  
   81.50 -
   81.51  // CompileQueue
   81.52  //
   81.53  // A list of CompileTasks.
   81.54 @@ -153,26 +155,42 @@
   81.55    CompileTask* _first;
   81.56    CompileTask* _last;
   81.57  
   81.58 +  int _size;
   81.59   public:
   81.60    CompileQueue(const char* name, Monitor* lock) {
   81.61      _name = name;
   81.62      _lock = lock;
   81.63      _first = NULL;
   81.64      _last = NULL;
   81.65 +    _size = 0;
   81.66    }
   81.67  
   81.68    const char*  name() const                      { return _name; }
   81.69    Monitor*     lock() const                      { return _lock; }
   81.70  
   81.71    void         add(CompileTask* task);
   81.72 +  void         remove(CompileTask* task);
   81.73 +  CompileTask* first()                           { return _first; }
   81.74 +  CompileTask* last()                            { return _last;  }
   81.75  
   81.76    CompileTask* get();
   81.77  
   81.78    bool         is_empty() const                  { return _first == NULL; }
   81.79 +  int          size()     const                  { return _size;          }
   81.80  
   81.81    void         print();
   81.82  };
   81.83  
   81.84 +// CompileTaskWrapper
   81.85 +//
   81.86 +// Assign this task to the current thread.  Deallocate the task
   81.87 +// when the compilation is complete.
   81.88 +class CompileTaskWrapper : StackObj {
   81.89 +public:
   81.90 +  CompileTaskWrapper(CompileTask* task);
   81.91 +  ~CompileTaskWrapper();
   81.92 +};
   81.93 +
   81.94  
   81.95  // Compilation
   81.96  //
   81.97 @@ -208,7 +226,8 @@
   81.98    static int  _last_compile_level;
   81.99    static char _last_method_compiled[name_buffer_length];
  81.100  
  81.101 -  static CompileQueue* _method_queue;
  81.102 +  static CompileQueue* _c2_method_queue;
  81.103 +  static CompileQueue* _c1_method_queue;
  81.104    static CompileTask* _task_free_list;
  81.105  
  81.106    static GrowableArray<CompilerThread*>* _method_threads;
  81.107 @@ -256,19 +275,9 @@
  81.108    static int _sum_nmethod_size;
  81.109    static int _sum_nmethod_code_size;
  81.110  
  81.111 -  static int compiler_count() {
  81.112 -    return CICompilerCountPerCPU
  81.113 -      // Example: if CICompilerCountPerCPU is true, then we get
  81.114 -      // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
  81.115 -      // May help big-app startup time.
  81.116 -      ? (MAX2(log2_intptr(os::active_processor_count())-1,1))
  81.117 -      : CICompilerCount;
  81.118 -  }
  81.119 -
  81.120    static CompilerThread* make_compiler_thread(const char* name, CompileQueue* queue, CompilerCounters* counters, TRAPS);
  81.121 -  static void init_compiler_threads(int compiler_count);
  81.122 +  static void init_compiler_threads(int c1_compiler_count, int c2_compiler_count);
  81.123    static bool compilation_is_complete  (methodHandle method, int osr_bci, int comp_level);
  81.124 -  static bool compilation_is_in_queue  (methodHandle method, int osr_bci);
  81.125    static bool compilation_is_prohibited(methodHandle method, int osr_bci, int comp_level);
  81.126    static uint assign_compile_id        (methodHandle method, int osr_bci);
  81.127    static bool is_compile_blocking      (methodHandle method, int osr_bci);
  81.128 @@ -301,23 +310,35 @@
  81.129                                    int hot_count,
  81.130                                    const char* comment,
  81.131                                    TRAPS);
  81.132 -
  81.133 +  static CompileQueue* compile_queue(int comp_level) {
  81.134 +    if (is_c2_compile(comp_level)) return _c2_method_queue;
  81.135 +    if (is_c1_compile(comp_level)) return _c1_method_queue;
  81.136 +    return NULL;
  81.137 +  }
  81.138   public:
  81.139    enum {
  81.140      // The entry bci used for non-OSR compilations.
  81.141      standard_entry_bci = InvocationEntryBci
  81.142    };
  81.143  
  81.144 -  static AbstractCompiler* compiler(int level ) {
  81.145 -    if (level == CompLevel_fast_compile) return _compilers[0];
  81.146 -    assert(level == CompLevel_highest_tier, "what level?");
  81.147 -    return _compilers[1];
  81.148 +  static AbstractCompiler* compiler(int comp_level) {
  81.149 +    if (is_c2_compile(comp_level)) return _compilers[1]; // C2
  81.150 +    if (is_c1_compile(comp_level)) return _compilers[0]; // C1
  81.151 +    return NULL;
  81.152    }
  81.153  
  81.154 +  static bool compilation_is_in_queue(methodHandle method, int osr_bci);
  81.155 +  static int queue_size(int comp_level) {
  81.156 +    CompileQueue *q = compile_queue(comp_level);
  81.157 +    return q != NULL ? q->size() : 0;
  81.158 +  }
  81.159    static void compilation_init();
  81.160    static void init_compiler_thread_log();
  81.161 -  static nmethod* compile_method(methodHandle method, int osr_bci,
  81.162 -                                 methodHandle hot_method, int hot_count,
  81.163 +  static nmethod* compile_method(methodHandle method,
  81.164 +                                 int osr_bci,
  81.165 +                                 int comp_level,
  81.166 +                                 methodHandle hot_method,
  81.167 +                                 int hot_count,
  81.168                                   const char* comment, TRAPS);
  81.169  
  81.170    static void compiler_thread_loop();
    82.1 --- a/src/share/vm/includeDB_compiler1	Tue Sep 21 06:58:44 2010 -0700
    82.2 +++ b/src/share/vm/includeDB_compiler1	Wed Sep 22 12:54:51 2010 -0400
    82.3 @@ -19,7 +19,6 @@
    82.4  // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    82.5  // or visit www.oracle.com if you need additional information or have any
    82.6  // questions.
    82.7 -//  
    82.8  //
    82.9  
   82.10  // NOTE: DO NOT CHANGE THIS COPYRIGHT TO NEW STYLE - IT WILL BREAK makeDeps!
    83.1 --- a/src/share/vm/includeDB_compiler2	Tue Sep 21 06:58:44 2010 -0700
    83.2 +++ b/src/share/vm/includeDB_compiler2	Wed Sep 22 12:54:51 2010 -0400
    83.3 @@ -1,5 +1,5 @@
    83.4  //
    83.5 -// Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
    83.6 +// Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    83.7  // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    83.8  //
    83.9  // This code is free software; you can redistribute it and/or modify it
    84.1 --- a/src/share/vm/includeDB_core	Tue Sep 21 06:58:44 2010 -0700
    84.2 +++ b/src/share/vm/includeDB_core	Wed Sep 22 12:54:51 2010 -0400
    84.3 @@ -1081,6 +1081,8 @@
    84.4  compilationPolicy.cpp                   nmethod.hpp
    84.5  compilationPolicy.cpp                   oop.inline.hpp
    84.6  compilationPolicy.cpp                   rframe.hpp
    84.7 +compilationPolicy.cpp			scopeDesc.hpp
    84.8 +compilationPolicy.cpp                   simpleThresholdPolicy.hpp
    84.9  compilationPolicy.cpp                   stubRoutines.hpp
   84.10  compilationPolicy.cpp                   thread.hpp
   84.11  compilationPolicy.cpp                   timer.hpp
   84.12 @@ -1451,6 +1453,7 @@
   84.13  deoptimization.cpp                      allocation.inline.hpp
   84.14  deoptimization.cpp                      biasedLocking.hpp
   84.15  deoptimization.cpp                      bytecode.hpp
   84.16 +deoptimization.cpp			compilationPolicy.hpp
   84.17  deoptimization.cpp                      debugInfoRec.hpp
   84.18  deoptimization.cpp                      deoptimization.hpp
   84.19  deoptimization.cpp                      events.hpp
   84.20 @@ -2172,6 +2175,7 @@
   84.21  
   84.22  interpreterRuntime.cpp                  biasedLocking.hpp
   84.23  interpreterRuntime.cpp                  collectedHeap.hpp
   84.24 +interpreterRuntime.cpp                  compileBroker.hpp
   84.25  interpreterRuntime.cpp                  compilationPolicy.hpp
   84.26  interpreterRuntime.cpp                  constantPoolOop.hpp
   84.27  interpreterRuntime.cpp                  cpCacheOop.hpp
   84.28 @@ -2829,6 +2833,7 @@
   84.29  
   84.30  methodDataOop.cpp                       bytecode.hpp
   84.31  methodDataOop.cpp                       bytecodeStream.hpp
   84.32 +methodDataOop.cpp                       compilationPolicy.hpp
   84.33  methodDataOop.cpp                       deoptimization.hpp
   84.34  methodDataOop.cpp                       handles.inline.hpp
   84.35  methodDataOop.cpp                       linkResolver.hpp
   84.36 @@ -2841,6 +2846,7 @@
   84.37  methodDataOop.hpp                       oop.hpp
   84.38  methodDataOop.hpp                       orderAccess.hpp
   84.39  methodDataOop.hpp                       universe.hpp
   84.40 +methodDataOop.hpp                       methodOop.hpp
   84.41  
   84.42  methodHandleWalk.hpp                    methodHandles.hpp
   84.43  
   84.44 @@ -2906,6 +2912,7 @@
   84.45  methodOop.cpp                           bytecodeTracer.hpp
   84.46  methodOop.cpp                           bytecodes.hpp
   84.47  methodOop.cpp                           collectedHeap.inline.hpp
   84.48 +methodOop.cpp				compilationPolicy.hpp
   84.49  methodOop.cpp                           debugInfoRec.hpp
   84.50  methodOop.cpp                           frame.inline.hpp
   84.51  methodOop.cpp                           gcLocker.hpp
   84.52 @@ -3655,6 +3662,7 @@
   84.53  
   84.54  safepoint.cpp                           codeCache.hpp
   84.55  safepoint.cpp                           collectedHeap.hpp
   84.56 +safepoint.cpp                           compilationPolicy.hpp
   84.57  safepoint.cpp                           deoptimization.hpp
   84.58  safepoint.cpp                           events.hpp
   84.59  safepoint.cpp                           frame.inline.hpp
   84.60 @@ -3799,6 +3807,17 @@
   84.61  signature.hpp                           methodOop.hpp
   84.62  signature.hpp                           top.hpp
   84.63  
   84.64 +simpleThresholdPolicy.cpp               arguments.hpp
   84.65 +simpleThresholdPolicy.cpp               compileBroker.hpp
   84.66 +simpleThresholdPolicy.cpp               resourceArea.hpp
   84.67 +simpleThresholdPolicy.cpp               simpleThresholdPolicy.hpp
   84.68 +simpleThresholdPolicy.cpp               simpleThresholdPolicy.inline.hpp
   84.69 +
   84.70 +simpleThresholdPolicy.hpp               compilationPolicy.hpp
   84.71 +simpleThresholdPolicy.hpp               globalDefinitions.hpp
   84.72 +simpleThresholdPolicy.hpp               methodDataOop.hpp
   84.73 +simpleThresholdPolicy.hpp               nmethod.hpp
   84.74 +
   84.75  sizes.cpp                               sizes.hpp
   84.76  
   84.77  sizes.hpp                               allocation.hpp
   84.78 @@ -3977,6 +3996,7 @@
   84.79  
   84.80  sweeper.cpp                             atomic.hpp
   84.81  sweeper.cpp                             codeCache.hpp
   84.82 +sweeper.cpp				compilationPolicy.hpp
   84.83  sweeper.cpp                             compileBroker.hpp
   84.84  sweeper.cpp                             events.hpp
   84.85  sweeper.cpp                             methodOop.hpp
    85.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp	Tue Sep 21 06:58:44 2010 -0700
    85.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp	Wed Sep 22 12:54:51 2010 -0400
    85.3 @@ -200,6 +200,7 @@
    85.4  void InterpreterRuntime::note_trap(JavaThread* thread, int reason, TRAPS) {
    85.5    assert(ProfileTraps, "call me only if profiling");
    85.6    methodHandle trap_method(thread, method(thread));
    85.7 +
    85.8    if (trap_method.not_null()) {
    85.9      methodDataHandle trap_mdo(thread, trap_method->method_data());
   85.10      if (trap_mdo.is_null()) {
   85.11 @@ -777,43 +778,6 @@
   85.12  // Miscellaneous
   85.13  
   85.14  
   85.15 -#ifndef PRODUCT
   85.16 -static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci, address branch_bcp) {
   85.17 -  if (TraceInvocationCounterOverflow) {
   85.18 -    InvocationCounter* ic = m->invocation_counter();
   85.19 -    InvocationCounter* bc = m->backedge_counter();
   85.20 -    ResourceMark rm;
   85.21 -    const char* msg =
   85.22 -      branch_bcp == NULL
   85.23 -      ? "comp-policy cntr ovfl @ %d in entry of "
   85.24 -      : "comp-policy cntr ovfl @ %d in loop of ";
   85.25 -    tty->print(msg, bci);
   85.26 -    m->print_value();
   85.27 -    tty->cr();
   85.28 -    ic->print();
   85.29 -    bc->print();
   85.30 -    if (ProfileInterpreter) {
   85.31 -      if (branch_bcp != NULL) {
   85.32 -        methodDataOop mdo = m->method_data();
   85.33 -        if (mdo != NULL) {
   85.34 -          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
   85.35 -          tty->print_cr("back branch count = %d", count);
   85.36 -        }
   85.37 -      }
   85.38 -    }
   85.39 -  }
   85.40 -}
   85.41 -
   85.42 -static void trace_osr_request(methodHandle method, nmethod* osr, int bci) {
   85.43 -  if (TraceOnStackReplacement) {
   85.44 -    ResourceMark rm;
   85.45 -    tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
   85.46 -    method->print_short_name(tty);
   85.47 -    tty->print_cr(" at bci %d", bci);
   85.48 -  }
   85.49 -}
   85.50 -#endif // !PRODUCT
   85.51 -
   85.52  nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) {
   85.53    nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp);
   85.54    assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests");
   85.55 @@ -826,7 +790,7 @@
   85.56      frame fr = thread->last_frame();
   85.57      methodOop method =  fr.interpreter_frame_method();
   85.58      int bci = method->bci_from(fr.interpreter_frame_bcp());
   85.59 -    nm = method->lookup_osr_nmethod_for(bci);
   85.60 +    nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false);
   85.61    }
   85.62    return nm;
   85.63  }
   85.64 @@ -840,74 +804,32 @@
   85.65    frame fr = thread->last_frame();
   85.66    assert(fr.is_interpreted_frame(), "must come from interpreter");
   85.67    methodHandle method(thread, fr.interpreter_frame_method());
   85.68 -  const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : 0;
   85.69 -  const int bci = method->bci_from(fr.interpreter_frame_bcp());
   85.70 -  NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci, branch_bcp);)
   85.71 +  const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
   85.72 +  const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci;
   85.73  
   85.74 -  if (JvmtiExport::can_post_interpreter_events()) {
   85.75 -    if (thread->is_interp_only_mode()) {
   85.76 -      // If certain JVMTI events (e.g. frame pop event) are requested then the
   85.77 -      // thread is forced to remain in interpreted code. This is
   85.78 -      // implemented partly by a check in the run_compiled_code
   85.79 -      // section of the interpreter whether we should skip running
   85.80 -      // compiled code, and partly by skipping OSR compiles for
   85.81 -      // interpreted-only threads.
   85.82 -      if (branch_bcp != NULL) {
   85.83 -        CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
   85.84 -        return NULL;
   85.85 +  nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, thread);
   85.86 +
   85.87 +  if (osr_nm != NULL) {
   85.88 +    // We may need to do on-stack replacement which requires that no
   85.89 +    // monitors in the activation are biased because their
   85.90 +    // BasicObjectLocks will need to migrate during OSR. Force
   85.91 +    // unbiasing of all monitors in the activation now (even though
   85.92 +    // the OSR nmethod might be invalidated) because we don't have a
   85.93 +    // safepoint opportunity later once the migration begins.
   85.94 +    if (UseBiasedLocking) {
   85.95 +      ResourceMark rm;
   85.96 +      GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
   85.97 +      for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
   85.98 +           kptr < fr.interpreter_frame_monitor_begin();
   85.99 +           kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
  85.100 +        if( kptr->obj() != NULL ) {
  85.101 +          objects_to_revoke->append(Handle(THREAD, kptr->obj()));
  85.102 +        }
  85.103        }
  85.104 +      BiasedLocking::revoke(objects_to_revoke);
  85.105      }
  85.106    }
  85.107 -
  85.108 -  if (branch_bcp == NULL) {
  85.109 -    // when code cache is full, compilation gets switched off, UseCompiler
  85.110 -    // is set to false
  85.111 -    if (!method->has_compiled_code() && UseCompiler) {
  85.112 -      CompilationPolicy::policy()->method_invocation_event(method, CHECK_NULL);
  85.113 -    } else {
  85.114 -      // Force counter overflow on method entry, even if no compilation
  85.115 -      // happened.  (The method_invocation_event call does this also.)
  85.116 -      CompilationPolicy::policy()->reset_counter_for_invocation_event(method);
  85.117 -    }
  85.118 -    // compilation at an invocation overflow no longer goes and retries test for
  85.119 -    // compiled method. We always run the loser of the race as interpreted.
  85.120 -    // so return NULL
  85.121 -    return NULL;
  85.122 -  } else {
  85.123 -    // counter overflow in a loop => try to do on-stack-replacement
  85.124 -    nmethod* osr_nm = method->lookup_osr_nmethod_for(bci);
  85.125 -    NOT_PRODUCT(trace_osr_request(method, osr_nm, bci);)
  85.126 -    // when code cache is full, we should not compile any more...
  85.127 -    if (osr_nm == NULL && UseCompiler) {
  85.128 -      const int branch_bci = method->bci_from(branch_bcp);
  85.129 -      CompilationPolicy::policy()->method_back_branch_event(method, branch_bci, bci, CHECK_NULL);
  85.130 -      osr_nm = method->lookup_osr_nmethod_for(bci);
  85.131 -    }
  85.132 -    if (osr_nm == NULL) {
  85.133 -      CompilationPolicy::policy()->reset_counter_for_back_branch_event(method);
  85.134 -      return NULL;
  85.135 -    } else {
  85.136 -      // We may need to do on-stack replacement which requires that no
  85.137 -      // monitors in the activation are biased because their
  85.138 -      // BasicObjectLocks will need to migrate during OSR. Force
  85.139 -      // unbiasing of all monitors in the activation now (even though
  85.140 -      // the OSR nmethod might be invalidated) because we don't have a
  85.141 -      // safepoint opportunity later once the migration begins.
  85.142 -      if (UseBiasedLocking) {
  85.143 -        ResourceMark rm;
  85.144 -        GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
  85.145 -        for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
  85.146 -             kptr < fr.interpreter_frame_monitor_begin();
  85.147 -             kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
  85.148 -          if( kptr->obj() != NULL ) {
  85.149 -            objects_to_revoke->append(Handle(THREAD, kptr->obj()));
  85.150 -          }
  85.151 -        }
  85.152 -        BiasedLocking::revoke(objects_to_revoke);
  85.153 -      }
  85.154 -      return osr_nm;
  85.155 -    }
  85.156 -  }
  85.157 +  return osr_nm;
  85.158  IRT_END
  85.159  
  85.160  IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp))
    86.1 --- a/src/share/vm/interpreter/invocationCounter.cpp	Tue Sep 21 06:58:44 2010 -0700
    86.2 +++ b/src/share/vm/interpreter/invocationCounter.cpp	Wed Sep 22 12:54:51 2010 -0400
    86.3 @@ -1,5 +1,5 @@
    86.4  /*
    86.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
    86.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    86.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    86.8   *
    86.9   * This code is free software; you can redistribute it and/or modify it
   86.10 @@ -40,8 +40,7 @@
   86.11  }
   86.12  
   86.13  void InvocationCounter::set_carry() {
   86.14 -  _counter |= carry_mask;
   86.15 -
   86.16 +  set_carry_flag();
   86.17    // The carry bit now indicates that this counter had achieved a very
   86.18    // large value.  Now reduce the value, so that the method can be
   86.19    // executed many more times before re-entering the VM.
   86.20 @@ -52,7 +51,6 @@
   86.21    if (old_count != new_count)  set(state(), new_count);
   86.22  }
   86.23  
   86.24 -
   86.25  void InvocationCounter::set_state(State state) {
   86.26    assert(0 <= state && state < number_of_states, "illegal state");
   86.27    int init = _init[state];
   86.28 @@ -82,11 +80,6 @@
   86.29  int                       InvocationCounter::InterpreterBackwardBranchLimit;
   86.30  int                       InvocationCounter::InterpreterProfileLimit;
   86.31  
   86.32 -// Tier1 limits
   86.33 -int                       InvocationCounter::Tier1InvocationLimit;
   86.34 -int                       InvocationCounter::Tier1BackEdgeLimit;
   86.35 -
   86.36 -
   86.37  
   86.38  const char* InvocationCounter::state_as_string(State state) {
   86.39    switch (state) {
   86.40 @@ -146,8 +139,6 @@
   86.41  
   86.42    InterpreterInvocationLimit = CompileThreshold << number_of_noncount_bits;
   86.43    InterpreterProfileLimit = ((CompileThreshold * InterpreterProfilePercentage) / 100)<< number_of_noncount_bits;
   86.44 -  Tier1InvocationLimit = Tier2CompileThreshold << number_of_noncount_bits;
   86.45 -  Tier1BackEdgeLimit   = Tier2BackEdgeThreshold << number_of_noncount_bits;
   86.46  
   86.47    // When methodData is collected, the backward branch limit is compared against a
   86.48    // methodData counter, rather than an InvocationCounter.  In the former case, we
    87.1 --- a/src/share/vm/interpreter/invocationCounter.hpp	Tue Sep 21 06:58:44 2010 -0700
    87.2 +++ b/src/share/vm/interpreter/invocationCounter.hpp	Wed Sep 22 12:54:51 2010 -0400
    87.3 @@ -1,5 +1,5 @@
    87.4  /*
    87.5 - * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved.
    87.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    87.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    87.8   *
    87.9   * This code is free software; you can redistribute it and/or modify it
   87.10 @@ -43,7 +43,6 @@
   87.11      number_of_count_bits = BitsPerInt - number_of_noncount_bits,
   87.12      state_limit          = nth_bit(number_of_state_bits),
   87.13      count_grain          = nth_bit(number_of_state_bits + number_of_carry_bits),
   87.14 -    count_limit          = nth_bit(number_of_count_bits - 1),
   87.15      carry_mask           = right_n_bits(number_of_carry_bits) << number_of_state_bits,
   87.16      state_mask           = right_n_bits(number_of_state_bits),
   87.17      status_mask          = right_n_bits(number_of_state_bits + number_of_carry_bits),
   87.18 @@ -52,18 +51,16 @@
   87.19  
   87.20   public:
   87.21    static int InterpreterInvocationLimit;        // CompileThreshold scaled for interpreter use
   87.22 -  static int Tier1InvocationLimit;              // CompileThreshold scaled for tier1 use
   87.23 -  static int Tier1BackEdgeLimit;                // BackEdgeThreshold scaled for tier1 use
   87.24 -
   87.25    static int InterpreterBackwardBranchLimit;    // A separate threshold for on stack replacement
   87.26 -
   87.27    static int InterpreterProfileLimit;           // Profiling threshold scaled for interpreter use
   87.28  
   87.29    typedef address (*Action)(methodHandle method, TRAPS);
   87.30  
   87.31    enum PublicConstants {
   87.32      count_increment      = count_grain,          // use this value to increment the 32bit _counter word
   87.33 -    count_mask_value     = count_mask            // use this value to mask the backedge counter
   87.34 +    count_mask_value     = count_mask,           // use this value to mask the backedge counter
   87.35 +    count_shift          = number_of_noncount_bits,
   87.36 +    count_limit          = nth_bit(number_of_count_bits - 1)
   87.37    };
   87.38  
   87.39    enum State {
   87.40 @@ -79,6 +76,7 @@
   87.41    inline void set(State state, int count);       // sets state and counter
   87.42    inline void decay();                           // decay counter (divide by two)
   87.43    void set_carry();                              // set the sticky carry bit
   87.44 +  void set_carry_flag()                          {  _counter |= carry_mask; }
   87.45  
   87.46    // Accessors
   87.47    State  state() const                           { return (State)(_counter & state_mask); }
   87.48 @@ -135,3 +133,4 @@
   87.49    if (c > 0 && new_count == 0) new_count = 1;
   87.50    set(state(), new_count);
   87.51  }
   87.52 +
    88.1 --- a/src/share/vm/interpreter/linkResolver.cpp	Tue Sep 21 06:58:44 2010 -0700
    88.2 +++ b/src/share/vm/interpreter/linkResolver.cpp	Wed Sep 22 12:54:51 2010 -0400
    88.3 @@ -1,5 +1,5 @@
    88.4  /*
    88.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
    88.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    88.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    88.8   *
    88.9   * This code is free software; you can redistribute it and/or modify it
   88.10 @@ -83,12 +83,12 @@
   88.11    _resolved_method = resolved_method;
   88.12    _selected_method = selected_method;
   88.13    _vtable_index    = vtable_index;
   88.14 -  if (CompilationPolicy::mustBeCompiled(selected_method)) {
   88.15 +  if (CompilationPolicy::must_be_compiled(selected_method)) {
   88.16      // This path is unusual, mostly used by the '-Xcomp' stress test mode.
   88.17  
   88.18 -    // Note: with several active threads, the mustBeCompiled may be true
   88.19 -    //       while canBeCompiled is false; remove assert
   88.20 -    // assert(CompilationPolicy::canBeCompiled(selected_method), "cannot compile");
   88.21 +    // Note: with several active threads, the must_be_compiled may be true
   88.22 +    //       while can_be_compiled is false; remove assert
   88.23 +    // assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
   88.24      if (THREAD->is_Compiler_thread()) {
   88.25        // don't force compilation, resolve was on behalf of compiler
   88.26        return;
   88.27 @@ -104,7 +104,8 @@
   88.28        return;
   88.29      }
   88.30      CompileBroker::compile_method(selected_method, InvocationEntryBci,
   88.31 -                                  methodHandle(), 0, "mustBeCompiled", CHECK);
   88.32 +                                  CompLevel_initial_compile,
   88.33 +                                  methodHandle(), 0, "must_be_compiled", CHECK);
   88.34    }
   88.35  }
   88.36  
    89.1 --- a/src/share/vm/memory/collectorPolicy.cpp	Tue Sep 21 06:58:44 2010 -0700
    89.2 +++ b/src/share/vm/memory/collectorPolicy.cpp	Wed Sep 22 12:54:51 2010 -0400
    89.3 @@ -32,7 +32,11 @@
    89.4      MaxPermSize = PermSize;
    89.5    }
    89.6    PermSize = MAX2(min_alignment(), align_size_down_(PermSize, min_alignment()));
    89.7 -  MaxPermSize = align_size_up(MaxPermSize, max_alignment());
    89.8 +  // Don't increase Perm size limit above specified.
    89.9 +  MaxPermSize = align_size_down(MaxPermSize, max_alignment());
   89.10 +  if (PermSize > MaxPermSize) {
   89.11 +    PermSize = MaxPermSize;
   89.12 +  }
   89.13  
   89.14    MinPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MinPermHeapExpansion, min_alignment()));
   89.15    MaxPermHeapExpansion = MAX2(min_alignment(), align_size_down_(MaxPermHeapExpansion, min_alignment()));
    90.1 --- a/src/share/vm/memory/referenceProcessor.hpp	Tue Sep 21 06:58:44 2010 -0700
    90.2 +++ b/src/share/vm/memory/referenceProcessor.hpp	Wed Sep 22 12:54:51 2010 -0400
    90.3 @@ -1,5 +1,5 @@
    90.4  /*
    90.5 - * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved.
    90.6 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
    90.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    90.8   *
    90.9   * This code is free software; you can redistribute it and/or modify it
   90.10 @@ -346,7 +346,8 @@
   90.11    bool _was_discovering_refs;
   90.12   public:
   90.13    NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) {
   90.14 -    if (_was_discovering_refs = _rp->discovery_enabled()) {
   90.15 +    _was_discovering_refs = _rp->discovery_enabled();
   90.16 +    if (_was_discovering_refs) {
   90.17        _rp->disable_discovery();
   90.18      }
   90.19    }
    91.1 --- a/src/share/vm/oops/constantPoolOop.cpp	Tue Sep 21 06:58:44 2010 -0700
    91.2 +++ b/src/share/vm/oops/constantPoolOop.cpp	Wed Sep 22 12:54:51 2010 -0400
    91.3 @@ -466,6 +466,7 @@
    91.4        bool ignore_is_on_bcp = false;
    91.5        Handle value = SystemDictionary::find_method_handle_type(signature,
    91.6                                                                 klass,
    91.7 +                                                               false,
    91.8                                                                 ignore_is_on_bcp,
    91.9                                                                 CHECK_NULL);
   91.10        result_oop = value();
    92.1 --- a/src/share/vm/oops/instanceKlass.cpp	Tue Sep 21 06:58:44 2010 -0700
    92.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Wed Sep 22 12:54:51 2010 -0400
    92.3 @@ -1,5 +1,5 @@
    92.4  /*
    92.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
    92.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    92.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    92.8   *
    92.9   * This code is free software; you can redistribute it and/or modify it
   92.10 @@ -2200,8 +2200,23 @@
   92.11    assert(n->is_osr_method(), "wrong kind of nmethod");
   92.12    n->set_osr_link(osr_nmethods_head());
   92.13    set_osr_nmethods_head(n);
   92.14 +  // Raise the highest osr level if necessary
   92.15 +  if (TieredCompilation) {
   92.16 +    methodOop m = n->method();
   92.17 +    m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
   92.18 +  }
   92.19    // Remember to unlock again
   92.20    OsrList_lock->unlock();
   92.21 +
   92.22 +  // Get rid of the osr methods for the same bci that have lower levels.
   92.23 +  if (TieredCompilation) {
   92.24 +    for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
   92.25 +      nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
   92.26 +      if (inv != NULL && inv->is_in_use()) {
   92.27 +        inv->make_not_entrant();
   92.28 +      }
   92.29 +    }
   92.30 +  }
   92.31  }
   92.32  
   92.33  
   92.34 @@ -2211,39 +2226,79 @@
   92.35    assert(n->is_osr_method(), "wrong kind of nmethod");
   92.36    nmethod* last = NULL;
   92.37    nmethod* cur  = osr_nmethods_head();
   92.38 +  int max_level = CompLevel_none;  // Find the max comp level excluding n
   92.39 +  methodOop m = n->method();
   92.40    // Search for match
   92.41    while(cur != NULL && cur != n) {
   92.42 +    if (TieredCompilation) {
   92.43 +      // Find max level before n
   92.44 +      max_level = MAX2(max_level, cur->comp_level());
   92.45 +    }
   92.46      last = cur;
   92.47      cur = cur->osr_link();
   92.48    }
   92.49 +  nmethod* next = NULL;
   92.50    if (cur == n) {
   92.51 +    next = cur->osr_link();
   92.52      if (last == NULL) {
   92.53        // Remove first element
   92.54 -      set_osr_nmethods_head(osr_nmethods_head()->osr_link());
   92.55 +      set_osr_nmethods_head(next);
   92.56      } else {
   92.57 -      last->set_osr_link(cur->osr_link());
   92.58 +      last->set_osr_link(next);
   92.59      }
   92.60    }
   92.61    n->set_osr_link(NULL);
   92.62 +  if (TieredCompilation) {
   92.63 +    cur = next;
   92.64 +    while (cur != NULL) {
   92.65 +      // Find max level after n
   92.66 +      max_level = MAX2(max_level, cur->comp_level());
   92.67 +      cur = cur->osr_link();
   92.68 +    }
   92.69 +    m->set_highest_osr_comp_level(max_level);
   92.70 +  }
   92.71    // Remember to unlock again
   92.72    OsrList_lock->unlock();
   92.73  }
   92.74  
   92.75 -nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci) const {
   92.76 +nmethod* instanceKlass::lookup_osr_nmethod(const methodOop m, int bci, int comp_level, bool match_level) const {
   92.77    // This is a short non-blocking critical region, so the no safepoint check is ok.
   92.78    OsrList_lock->lock_without_safepoint_check();
   92.79    nmethod* osr = osr_nmethods_head();
   92.80 +  nmethod* best = NULL;
   92.81    while (osr != NULL) {
   92.82      assert(osr->is_osr_method(), "wrong kind of nmethod found in chain");
   92.83 +    // There can be a time when a c1 osr method exists but we are waiting
   92.84 +    // for a c2 version. When c2 completes its osr nmethod we will trash
   92.85 +    // the c1 version and only be able to find the c2 version. However
   92.86 +    // while we overflow in the c1 code at back branches we don't want to
   92.87 +    // try and switch to the same code as we are already running
   92.88 +
   92.89      if (osr->method() == m &&
   92.90          (bci == InvocationEntryBci || osr->osr_entry_bci() == bci)) {
   92.91 -      // Found a match - return it.
   92.92 -      OsrList_lock->unlock();
   92.93 -      return osr;
   92.94 +      if (match_level) {
   92.95 +        if (osr->comp_level() == comp_level) {
   92.96 +          // Found a match - return it.
   92.97 +          OsrList_lock->unlock();
   92.98 +          return osr;
   92.99 +        }
  92.100 +      } else {
  92.101 +        if (best == NULL || (osr->comp_level() > best->comp_level())) {
  92.102 +          if (osr->comp_level() == CompLevel_highest_tier) {
  92.103 +            // Found the best possible - return it.
  92.104 +            OsrList_lock->unlock();
  92.105 +            return osr;
  92.106 +          }
  92.107 +          best = osr;
  92.108 +        }
  92.109 +      }
  92.110      }
  92.111      osr = osr->osr_link();
  92.112    }
  92.113    OsrList_lock->unlock();
  92.114 +  if (best != NULL && best->comp_level() >= comp_level && match_level == false) {
  92.115 +    return best;
  92.116 +  }
  92.117    return NULL;
  92.118  }
  92.119  
    93.1 --- a/src/share/vm/oops/instanceKlass.hpp	Tue Sep 21 06:58:44 2010 -0700
    93.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Wed Sep 22 12:54:51 2010 -0400
    93.3 @@ -1,5 +1,5 @@
    93.4  /*
    93.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
    93.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    93.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    93.8   *
    93.9   * This code is free software; you can redistribute it and/or modify it
   93.10 @@ -588,7 +588,7 @@
   93.11    void set_osr_nmethods_head(nmethod* h)     { _osr_nmethods_head = h; };
   93.12    void add_osr_nmethod(nmethod* n);
   93.13    void remove_osr_nmethod(nmethod* n);
   93.14 -  nmethod* lookup_osr_nmethod(const methodOop m, int bci) const;
   93.15 +  nmethod* lookup_osr_nmethod(const methodOop m, int bci, int level, bool match_level) const;
   93.16  
   93.17    // Breakpoint support (see methods on methodOop for details)
   93.18    BreakpointInfo* breakpoints() const       { return _breakpoints; };
    94.1 --- a/src/share/vm/oops/methodDataOop.cpp	Tue Sep 21 06:58:44 2010 -0700
    94.2 +++ b/src/share/vm/oops/methodDataOop.cpp	Wed Sep 22 12:54:51 2010 -0400
    94.3 @@ -1,5 +1,5 @@
    94.4  /*
    94.5 - * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
    94.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
    94.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    94.8   *
    94.9   * This code is free software; you can redistribute it and/or modify it
   94.10 @@ -283,11 +283,17 @@
   94.11      if (receiver(row) != NULL)  entries++;
   94.12    }
   94.13    st->print_cr("count(%u) entries(%u)", count(), entries);
   94.14 +  int total = count();
   94.15 +  for (row = 0; row < row_limit(); row++) {
   94.16 +    if (receiver(row) != NULL) {
   94.17 +      total += receiver_count(row);
   94.18 +    }
   94.19 +  }
   94.20    for (row = 0; row < row_limit(); row++) {
   94.21      if (receiver(row) != NULL) {
   94.22        tab(st);
   94.23        receiver(row)->print_value_on(st);
   94.24 -      st->print_cr("(%u)", receiver_count(row));
   94.25 +      st->print_cr("(%u %4.2f)", receiver_count(row), (float) receiver_count(row) / (float) total);
   94.26      }
   94.27    }
   94.28  }
   94.29 @@ -743,9 +749,18 @@
   94.30  // Initialize the methodDataOop corresponding to a given method.
   94.31  void methodDataOopDesc::initialize(methodHandle method) {
   94.32    ResourceMark rm;
   94.33 -
   94.34    // Set the method back-pointer.
   94.35    _method = method();
   94.36 +
   94.37 +  if (TieredCompilation) {
   94.38 +    _invocation_counter.init();
   94.39 +    _backedge_counter.init();
   94.40 +    _num_loops = 0;
   94.41 +    _num_blocks = 0;
   94.42 +    _highest_comp_level = 0;
   94.43 +    _highest_osr_comp_level = 0;
   94.44 +    _would_profile = false;
   94.45 +  }
   94.46    set_creation_mileage(mileage_of(method()));
   94.47  
   94.48    // Initialize flags and trap history.
   94.49 @@ -798,32 +813,25 @@
   94.50  // Get a measure of how much mileage the method has on it.
   94.51  int methodDataOopDesc::mileage_of(methodOop method) {
   94.52    int mileage = 0;
   94.53 -  int iic = method->interpreter_invocation_count();
   94.54 -  if (mileage < iic)  mileage = iic;
   94.55 -
   94.56 -  InvocationCounter* ic = method->invocation_counter();
   94.57 -  InvocationCounter* bc = method->backedge_counter();
   94.58 -
   94.59 -  int icval = ic->count();
   94.60 -  if (ic->carry()) icval += CompileThreshold;
   94.61 -  if (mileage < icval)  mileage = icval;
   94.62 -  int bcval = bc->count();
   94.63 -  if (bc->carry()) bcval += CompileThreshold;
   94.64 -  if (mileage < bcval)  mileage = bcval;
   94.65 +  if (TieredCompilation) {
   94.66 +    mileage = MAX2(method->invocation_count(), method->backedge_count());
   94.67 +  } else {
   94.68 +    int iic = method->interpreter_invocation_count();
   94.69 +    if (mileage < iic)  mileage = iic;
   94.70 +    InvocationCounter* ic = method->invocation_counter();
   94.71 +    InvocationCounter* bc = method->backedge_counter();
   94.72 +    int icval = ic->count();
   94.73 +    if (ic->carry()) icval += CompileThreshold;
   94.74 +    if (mileage < icval)  mileage = icval;
   94.75 +    int bcval = bc->count();
   94.76 +    if (bc->carry()) bcval += CompileThreshold;
   94.77 +    if (mileage < bcval)  mileage = bcval;
   94.78 +  }
   94.79    return mileage;
   94.80  }
   94.81  
   94.82  bool methodDataOopDesc::is_mature() const {
   94.83 -  uint current = mileage_of(_method);
   94.84 -  uint initial = creation_mileage();
   94.85 -  if (current < initial)
   94.86 -    return true;  // some sort of overflow
   94.87 -  uint target;
   94.88 -  if (ProfileMaturityPercentage <= 0)
   94.89 -    target = (uint) -ProfileMaturityPercentage;  // absolute value
   94.90 -  else
   94.91 -    target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
   94.92 -  return (current >= initial + target);
   94.93 +  return CompilationPolicy::policy()->is_mature(_method);
   94.94  }
   94.95  
   94.96  // Translate a bci to its corresponding data index (di).
    95.1 --- a/src/share/vm/oops/methodDataOop.hpp	Tue Sep 21 06:58:44 2010 -0700
    95.2 +++ b/src/share/vm/oops/methodDataOop.hpp	Wed Sep 22 12:54:51 2010 -0400
    95.3 @@ -1206,7 +1206,25 @@
    95.4    intx              _arg_stack;       // bit set of stack-allocatable arguments
    95.5    intx              _arg_returned;    // bit set of returned arguments
    95.6  
    95.7 -  int _creation_mileage;            // method mileage at MDO creation
    95.8 +  int _creation_mileage;              // method mileage at MDO creation
    95.9 +
   95.10 +  // How many invocations has this MDO seen?
   95.11 +  // These counters are used to determine the exact age of MDO.
   95.12 +  // We need those because in tiered a method can be concurrently
   95.13 +  // executed at different levels.
   95.14 +  InvocationCounter _invocation_counter;
   95.15 +  // Same for backedges.
   95.16 +  InvocationCounter _backedge_counter;
   95.17 +  // Number of loops and blocks is computed when compiling the first
   95.18 +  // time with C1. It is used to determine if method is trivial.
   95.19 +  short             _num_loops;
   95.20 +  short             _num_blocks;
   95.21 +  // Highest compile level this method has ever seen.
   95.22 +  u1                _highest_comp_level;
   95.23 +  // Same for OSR level
   95.24 +  u1                _highest_osr_comp_level;
   95.25 +  // Does this method contain anything worth profiling?
   95.26 +  bool              _would_profile;
   95.27  
   95.28    // Size of _data array in bytes.  (Excludes header and extra_data fields.)
   95.29    int _data_size;
   95.30 @@ -1292,6 +1310,36 @@
   95.31  
   95.32    int      creation_mileage() const  { return _creation_mileage; }
   95.33    void set_creation_mileage(int x)   { _creation_mileage = x; }
   95.34 +
   95.35 +  int invocation_count() {
   95.36 +    if (invocation_counter()->carry()) {
   95.37 +      return InvocationCounter::count_limit;
   95.38 +    }
   95.39 +    return invocation_counter()->count();
   95.40 +  }
   95.41 +  int backedge_count() {
   95.42 +    if (backedge_counter()->carry()) {
   95.43 +      return InvocationCounter::count_limit;
   95.44 +    }
   95.45 +    return backedge_counter()->count();
   95.46 +  }
   95.47 +
   95.48 +  InvocationCounter* invocation_counter()     { return &_invocation_counter; }
   95.49 +  InvocationCounter* backedge_counter()       { return &_backedge_counter;   }
   95.50 +
   95.51 +  void set_would_profile(bool p)              { _would_profile = p;    }
   95.52 +  bool would_profile() const                  { return _would_profile; }
   95.53 +
   95.54 +  int highest_comp_level()                    { return _highest_comp_level;      }
   95.55 +  void set_highest_comp_level(int level)      { _highest_comp_level = level;     }
   95.56 +  int highest_osr_comp_level()                { return _highest_osr_comp_level;  }
   95.57 +  void set_highest_osr_comp_level(int level)  { _highest_osr_comp_level = level; }
   95.58 +
   95.59 +  int num_loops() const                       { return _num_loops;  }
   95.60 +  void set_num_loops(int n)                   { _num_loops = n;     }
   95.61 +  int num_blocks() const                      { return _num_blocks; }
   95.62 +  void set_num_blocks(int n)                  { _num_blocks = n;    }
   95.63 +
   95.64    bool is_mature() const;  // consult mileage and ProfileMaturityPercentage
   95.65    static int mileage_of(methodOop m);
   95.66  
   95.67 @@ -1413,7 +1461,7 @@
   95.68    void inc_decompile_count() {
   95.69      _nof_decompiles += 1;
   95.70      if (decompile_count() > (uint)PerMethodRecompilationCutoff) {
   95.71 -      method()->set_not_compilable();
   95.72 +      method()->set_not_compilable(CompLevel_full_optimization);
   95.73      }
   95.74    }
   95.75  
   95.76 @@ -1422,6 +1470,13 @@
   95.77      return byte_offset_of(methodDataOopDesc, _data[0]);
   95.78    }
   95.79  
   95.80 +  static ByteSize invocation_counter_offset() {
   95.81 +    return byte_offset_of(methodDataOopDesc, _invocation_counter);
   95.82 +  }
   95.83 +  static ByteSize backedge_counter_offset() {
   95.84 +    return byte_offset_of(methodDataOopDesc, _backedge_counter);
   95.85 +  }
   95.86 +
   95.87    // GC support
   95.88    oop* adr_method() const { return (oop*)&_method; }
   95.89    bool object_is_parsable() const { return _size != 0; }
    96.1 --- a/src/share/vm/oops/methodKlass.cpp	Tue Sep 21 06:58:44 2010 -0700
    96.2 +++ b/src/share/vm/oops/methodKlass.cpp	Wed Sep 22 12:54:51 2010 -0400
    96.3 @@ -1,5 +1,5 @@
    96.4  /*
    96.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
    96.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    96.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    96.8   *
    96.9   * This code is free software; you can redistribute it and/or modify it
   96.10 @@ -75,7 +75,6 @@
   96.11  
   96.12    // Fix and bury in methodOop
   96.13    m->set_interpreter_entry(NULL); // sets i2i entry and from_int
   96.14 -  m->set_highest_tier_compile(CompLevel_none);
   96.15    m->set_adapter_entry(NULL);
   96.16    m->clear_code(); // from_c/from_i get set to c2i/i2i
   96.17  
   96.18 @@ -89,6 +88,7 @@
   96.19    m->invocation_counter()->init();
   96.20    m->backedge_counter()->init();
   96.21    m->clear_number_of_breakpoints();
   96.22 +
   96.23    assert(m->is_parsable(), "must be parsable here.");
   96.24    assert(m->size() == size, "wrong size for object");
   96.25    // We should not publish an uprasable object's reference
   96.26 @@ -246,8 +246,8 @@
   96.27    st->print_cr(" - method size:       %d",   m->method_size());
   96.28    if (m->intrinsic_id() != vmIntrinsics::_none)
   96.29      st->print_cr(" - intrinsic id:      %d %s", m->intrinsic_id(), vmIntrinsics::name_at(m->intrinsic_id()));
   96.30 -  if (m->highest_tier_compile() != CompLevel_none)
   96.31 -    st->print_cr(" - highest tier:      %d", m->highest_tier_compile());
   96.32 +  if (m->highest_comp_level() != CompLevel_none)
   96.33 +    st->print_cr(" - highest level:     %d", m->highest_comp_level());
   96.34    st->print_cr(" - vtable index:      %d",   m->_vtable_index);
   96.35    st->print_cr(" - i2i entry:         " INTPTR_FORMAT, m->interpreter_entry());
   96.36    st->print_cr(" - adapter:           " INTPTR_FORMAT, m->adapter());
    97.1 --- a/src/share/vm/oops/methodOop.cpp	Tue Sep 21 06:58:44 2010 -0700
    97.2 +++ b/src/share/vm/oops/methodOop.cpp	Wed Sep 22 12:54:51 2010 -0400
    97.3 @@ -233,7 +233,7 @@
    97.4  }
    97.5  
    97.6  
    97.7 -bool methodOopDesc::was_executed_more_than(int n) const {
    97.8 +bool methodOopDesc::was_executed_more_than(int n) {
    97.9    // Invocation counter is reset when the methodOop is compiled.
   97.10    // If the method has compiled code we therefore assume it has
   97.11    // be excuted more than n times.
   97.12 @@ -241,7 +241,8 @@
   97.13      // interpreter doesn't bump invocation counter of trivial methods
   97.14      // compiler does not bump invocation counter of compiled methods
   97.15      return true;
   97.16 -  } else if (_invocation_counter.carry()) {
   97.17 +  }
   97.18 +  else if (_invocation_counter.carry() || (method_data() != NULL && method_data()->invocation_counter()->carry())) {
   97.19      // The carry bit is set when the counter overflows and causes
   97.20      // a compilation to occur.  We don't know how many times
   97.21      // the counter has been reset, so we simply assume it has
   97.22 @@ -253,7 +254,7 @@
   97.23  }
   97.24  
   97.25  #ifndef PRODUCT
   97.26 -void methodOopDesc::print_invocation_count() const {
   97.27 +void methodOopDesc::print_invocation_count() {
   97.28    if (is_static()) tty->print("static ");
   97.29    if (is_final()) tty->print("final ");
   97.30    if (is_synchronized()) tty->print("synchronized ");
   97.31 @@ -574,16 +575,19 @@
   97.32      // compilers must recognize this method specially, or not at all
   97.33      return true;
   97.34    }
   97.35 -
   97.36 -#ifdef COMPILER2
   97.37 -  if (is_tier1_compile(comp_level)) {
   97.38 -    if (is_not_tier1_compilable()) {
   97.39 -      return true;
   97.40 -    }
   97.41 +  if (number_of_breakpoints() > 0) {
   97.42 +    return true;
   97.43    }
   97.44 -#endif // COMPILER2
   97.45 -  return (_invocation_counter.state() == InvocationCounter::wait_for_nothing)
   97.46 -          || (number_of_breakpoints() > 0);
   97.47 +  if (comp_level == CompLevel_any) {
   97.48 +    return is_not_c1_compilable() || is_not_c2_compilable();
   97.49 +  }
   97.50 +  if (is_c1_compile(comp_level)) {
   97.51 +    return is_not_c1_compilable();
   97.52 +  }
   97.53 +  if (is_c2_compile(comp_level)) {
   97.54 +    return is_not_c2_compilable();
   97.55 +  }
   97.56 +  return false;
   97.57  }
   97.58  
   97.59  // call this when compiler finds that this method is not compilable
   97.60 @@ -604,15 +608,18 @@
   97.61      xtty->stamp();
   97.62      xtty->end_elem();
   97.63    }
   97.64 -#ifdef COMPILER2
   97.65 -  if (is_tier1_compile(comp_level)) {
   97.66 -    set_not_tier1_compilable();
   97.67 -    return;
   97.68 +  if (comp_level == CompLevel_all) {
   97.69 +    set_not_c1_compilable();
   97.70 +    set_not_c2_compilable();
   97.71 +  } else {
   97.72 +    if (is_c1_compile(comp_level)) {
   97.73 +      set_not_c1_compilable();
   97.74 +    } else
   97.75 +      if (is_c2_compile(comp_level)) {
   97.76 +        set_not_c2_compilable();
   97.77 +      }
   97.78    }
   97.79 -#endif /* COMPILER2 */
   97.80 -  assert(comp_level == CompLevel_highest_tier, "unexpected compilation level");
   97.81 -  invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
   97.82 -  backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
   97.83 +  CompilationPolicy::policy()->disable_compilation(this);
   97.84  }
   97.85  
   97.86  // Revert to using the interpreter and clear out the nmethod
   97.87 @@ -649,7 +656,6 @@
   97.88    set_method_data(NULL);
   97.89    set_interpreter_throwout_count(0);
   97.90    set_interpreter_invocation_count(0);
   97.91 -  _highest_tier_compile = CompLevel_none;
   97.92  }
   97.93  
   97.94  // Called when the method_holder is getting linked. Setup entrypoints so the method
   97.95 @@ -746,8 +752,8 @@
   97.96    int comp_level = code->comp_level();
   97.97    // In theory there could be a race here. In practice it is unlikely
   97.98    // and not worth worrying about.
   97.99 -  if (comp_level > mh->highest_tier_compile()) {
  97.100 -    mh->set_highest_tier_compile(comp_level);
  97.101 +  if (comp_level > mh->highest_comp_level()) {
  97.102 +    mh->set_highest_comp_level(comp_level);
  97.103    }
  97.104  
  97.105    OrderAccess::storestore();
  97.106 @@ -813,11 +819,13 @@
  97.107  
  97.108  bool methodOopDesc::is_method_handle_invoke_name(vmSymbols::SID name_sid) {
  97.109    switch (name_sid) {
  97.110 -  case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name):  // FIXME: remove this transitional form
  97.111    case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name):
  97.112    case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
  97.113      return true;
  97.114    }
  97.115 +  if (AllowTransitionalJSR292
  97.116 +      && name_sid == vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name))
  97.117 +    return true;
  97.118    return false;
  97.119  }
  97.120  
  97.121 @@ -905,12 +913,16 @@
  97.122    m->set_signature_index(_imcp_invoke_signature);
  97.123    assert(is_method_handle_invoke_name(m->name()), "");
  97.124    assert(m->signature() == signature(), "");
  97.125 +  assert(m->is_method_handle_invoke(), "");
  97.126  #ifdef CC_INTERP
  97.127    ResultTypeFinder rtf(signature());
  97.128    m->set_result_index(rtf.type());
  97.129  #endif
  97.130    m->compute_size_of_parameters(THREAD);
  97.131    m->set_exception_table(Universe::the_empty_int_array());
  97.132 +  m->init_intrinsic_id();
  97.133 +  assert(m->intrinsic_id() == vmIntrinsics::_invokeExact ||
  97.134 +         m->intrinsic_id() == vmIntrinsics::_invokeGeneric, "must be an invoker");
  97.135  
  97.136    // Finally, set up its entry points.
  97.137    assert(m->method_handle_type() == method_type(), "");
  97.138 @@ -1023,6 +1035,7 @@
  97.139    assert(_intrinsic_id == vmIntrinsics::_none, "do this just once");
  97.140    const uintptr_t max_id_uint = right_n_bits((int)(sizeof(_intrinsic_id) * BitsPerByte));
  97.141    assert((uintptr_t)vmIntrinsics::ID_LIMIT <= max_id_uint, "else fix size");
  97.142 +  assert(intrinsic_id_size_in_bytes() == sizeof(_intrinsic_id), "");
  97.143  
  97.144    // the klass name is well-known:
  97.145    vmSymbols::SID klass_id = klass_id_for_intrinsics(method_holder());
  97.146 @@ -1030,9 +1043,10 @@
  97.147  
  97.148    // ditto for method and signature:
  97.149    vmSymbols::SID  name_id = vmSymbols::find_sid(name());
  97.150 -  if (name_id  == vmSymbols::NO_SID)  return;
  97.151 +  if (name_id == vmSymbols::NO_SID)  return;
  97.152    vmSymbols::SID   sig_id = vmSymbols::find_sid(signature());
  97.153 -  if (sig_id   == vmSymbols::NO_SID)  return;
  97.154 +  if (klass_id != vmSymbols::VM_SYMBOL_ENUM_NAME(java_dyn_MethodHandle)
  97.155 +      && sig_id == vmSymbols::NO_SID)  return;
  97.156    jshort flags = access_flags().as_short();
  97.157  
  97.158    vmIntrinsics::ID id = vmIntrinsics::find_id(klass_id, name_id, sig_id, flags);
  97.159 @@ -1061,10 +1075,13 @@
  97.160      if (is_static() || !is_native())  break;
  97.161      switch (name_id) {
  97.162      case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeGeneric_name):
  97.163 -      id = vmIntrinsics::_invokeGeneric; break;
  97.164 -    default:
  97.165 -      if (is_method_handle_invoke_name(name()))
  97.166 -        id = vmIntrinsics::_invokeExact;
  97.167 +      id = vmIntrinsics::_invokeGeneric;
  97.168 +      break;
  97.169 +    case vmSymbols::VM_SYMBOL_ENUM_NAME(invokeExact_name):
  97.170 +      id = vmIntrinsics::_invokeExact;
  97.171 +      break;
  97.172 +    case vmSymbols::VM_SYMBOL_ENUM_NAME(invoke_name):
  97.173 +      if (AllowTransitionalJSR292)  id = vmIntrinsics::_invokeExact;
  97.174        break;
  97.175      }
  97.176      break;
  97.177 @@ -1442,6 +1459,64 @@
  97.178  }
  97.179  
  97.180  
  97.181 +int methodOopDesc::invocation_count() {
  97.182 +  if (TieredCompilation) {
  97.183 +    const methodDataOop mdo = method_data();
  97.184 +    if (invocation_counter()->carry() || ((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) {
  97.185 +      return InvocationCounter::count_limit;
  97.186 +    } else {
  97.187 +      return invocation_counter()->count() + ((mdo != NULL) ? mdo->invocation_counter()->count() : 0);
  97.188 +    }
  97.189 +  } else {
  97.190 +    return invocation_counter()->count();
  97.191 +  }
  97.192 +}
  97.193 +
  97.194 +int methodOopDesc::backedge_count() {
  97.195 +  if (TieredCompilation) {
  97.196 +    const methodDataOop mdo = method_data();
  97.197 +    if (backedge_counter()->carry() || ((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) {
  97.198 +      return InvocationCounter::count_limit;
  97.199 +    } else {
  97.200 +      return backedge_counter()->count() + ((mdo != NULL) ? mdo->backedge_counter()->count() : 0);
  97.201 +    }
  97.202 +  } else {
  97.203 +    return backedge_counter()->count();
  97.204 +  }
  97.205 +}
  97.206 +
  97.207 +int methodOopDesc::highest_comp_level() const {
  97.208 +  methodDataOop mdo = method_data();
  97.209 +  if (mdo != NULL) {
  97.210 +    return mdo->highest_comp_level();
  97.211 +  } else {
  97.212 +    return CompLevel_none;
  97.213 +  }
  97.214 +}
  97.215 +
  97.216 +int methodOopDesc::highest_osr_comp_level() const {
  97.217 +  methodDataOop mdo = method_data();
  97.218 +  if (mdo != NULL) {
  97.219 +    return mdo->highest_osr_comp_level();
  97.220 +  } else {
  97.221 +    return CompLevel_none;
  97.222 +  }
  97.223 +}
  97.224 +
  97.225 +void methodOopDesc::set_highest_comp_level(int level) {
  97.226 +  methodDataOop mdo = method_data();
  97.227 +  if (mdo != NULL) {
  97.228 +    mdo->set_highest_comp_level(level);
  97.229 +  }
  97.230 +}
  97.231 +
  97.232 +void methodOopDesc::set_highest_osr_comp_level(int level) {
  97.233 +  methodDataOop mdo = method_data();
  97.234 +  if (mdo != NULL) {
  97.235 +    mdo->set_highest_osr_comp_level(level);
  97.236 +  }
  97.237 +}
  97.238 +
  97.239  BreakpointInfo::BreakpointInfo(methodOop m, int bci) {
  97.240    _bci = bci;
  97.241    _name_index = m->name_index();
    98.1 --- a/src/share/vm/oops/methodOop.hpp	Tue Sep 21 06:58:44 2010 -0700
    98.2 +++ b/src/share/vm/oops/methodOop.hpp	Wed Sep 22 12:54:51 2010 -0400
    98.3 @@ -62,9 +62,9 @@
    98.4  // | method_size             | max_stack                  |
    98.5  // | max_locals              | size_of_parameters         |
    98.6  // |------------------------------------------------------|
    98.7 -// | intrinsic_id, highest_tier  |       (unused)         |
    98.8 +// | intrinsic_id, (unused)  |  throwout_count            |
    98.9  // |------------------------------------------------------|
   98.10 -// | throwout_count          | num_breakpoints            |
   98.11 +// | num_breakpoints         |  (unused)                  |
   98.12  // |------------------------------------------------------|
   98.13  // | invocation_counter                                   |
   98.14  // | backedge_counter                                     |
   98.15 @@ -83,7 +83,6 @@
   98.16  class CheckedExceptionElement;
   98.17  class LocalVariableTableElement;
   98.18  class AdapterHandlerEntry;
   98.19 -
   98.20  class methodDataOopDesc;
   98.21  
   98.22  class methodOopDesc : public oopDesc {
   98.23 @@ -93,7 +92,7 @@
   98.24    constMethodOop    _constMethod;                // Method read-only data.
   98.25    constantPoolOop   _constants;                  // Constant pool
   98.26    methodDataOop     _method_data;
   98.27 -  int               _interpreter_invocation_count; // Count of times invoked
   98.28 +  int               _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
   98.29    AccessFlags       _access_flags;               // Access flags
   98.30    int               _vtable_index;               // vtable index of this method (see VtableIndexFlag)
   98.31                                                   // note: can have vtables with >2**16 elements (because of inheritance)
   98.32 @@ -105,11 +104,11 @@
   98.33    u2                _max_locals;                 // Number of local variables used by this method
   98.34    u2                _size_of_parameters;         // size of the parameter block (receiver + arguments) in words
   98.35    u1                _intrinsic_id;               // vmSymbols::intrinsic_id (0 == _none)
   98.36 -  u1                _highest_tier_compile;       // Highest compile level this method has ever seen.
   98.37    u2                _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
   98.38    u2                _number_of_breakpoints;      // fullspeed debugging support
   98.39    InvocationCounter _invocation_counter;         // Incremented before each activation of the method - used to trigger frequency-based optimizations
   98.40    InvocationCounter _backedge_counter;           // Incremented before each backedge taken - used to trigger frequencey-based optimizations
   98.41 +
   98.42  #ifndef PRODUCT
   98.43    int               _compiled_invocation_count;  // Number of nmethod invocations so far (for perf. debugging)
   98.44  #endif
   98.45 @@ -221,8 +220,11 @@
   98.46    // max locals
   98.47    int  max_locals() const                        { return _max_locals; }
   98.48    void set_max_locals(int size)                  { _max_locals = size; }
   98.49 -  int highest_tier_compile()                     { return _highest_tier_compile;}
   98.50 -  void set_highest_tier_compile(int level)      { _highest_tier_compile = level;}
   98.51 +
   98.52 +  int highest_comp_level() const;
   98.53 +  void set_highest_comp_level(int level);
   98.54 +  int highest_osr_comp_level() const;
   98.55 +  void set_highest_osr_comp_level(int level);
   98.56  
   98.57    // Count of times method was exited via exception while interpreting
   98.58    void interpreter_throwout_increment() {
   98.59 @@ -276,21 +278,29 @@
   98.60    }
   98.61  
   98.62    // invocation counter
   98.63 -  InvocationCounter* invocation_counter()        { return &_invocation_counter; }
   98.64 -  InvocationCounter* backedge_counter()          { return &_backedge_counter; }
   98.65 -  int invocation_count() const                   { return _invocation_counter.count(); }
   98.66 -  int backedge_count() const                     { return _backedge_counter.count(); }
   98.67 -  bool was_executed_more_than(int n) const;
   98.68 -  bool was_never_executed() const                { return !was_executed_more_than(0); }
   98.69 +  InvocationCounter* invocation_counter() { return &_invocation_counter; }
   98.70 +  InvocationCounter* backedge_counter()   { return &_backedge_counter; }
   98.71 +
   98.72 +  int invocation_count();
   98.73 +  int backedge_count();
   98.74 +
   98.75 +  bool was_executed_more_than(int n);
   98.76 +  bool was_never_executed()                      { return !was_executed_more_than(0); }
   98.77  
   98.78    static void build_interpreter_method_data(methodHandle method, TRAPS);
   98.79  
   98.80 -  int interpreter_invocation_count() const       { return _interpreter_invocation_count; }
   98.81 +  int interpreter_invocation_count() {
   98.82 +    if (TieredCompilation) return invocation_count();
   98.83 +    else return _interpreter_invocation_count;
   98.84 +  }
   98.85    void set_interpreter_invocation_count(int count) { _interpreter_invocation_count = count; }
   98.86 -  int increment_interpreter_invocation_count() { return ++_interpreter_invocation_count; }
   98.87 +  int increment_interpreter_invocation_count() {
   98.88 +    if (TieredCompilation) ShouldNotReachHere();
   98.89 +    return ++_interpreter_invocation_count;
   98.90 +  }
   98.91  
   98.92  #ifndef PRODUCT
   98.93 -  int  compiled_invocation_count() const         { return _compiled_invocation_count; }
   98.94 +  int  compiled_invocation_count() const         { return _compiled_invocation_count;  }
   98.95    void set_compiled_invocation_count(int count)  { _compiled_invocation_count = count; }
   98.96  #endif // not PRODUCT
   98.97  
   98.98 @@ -361,7 +371,7 @@
   98.99  
  98.100  #ifndef PRODUCT
  98.101    // operations on invocation counter
  98.102 -  void print_invocation_count() const;
  98.103 +  void print_invocation_count();
  98.104  #endif
  98.105  
  98.106    // byte codes
  98.107 @@ -506,6 +516,8 @@
  98.108    static int method_data_offset_in_bytes()       { return offset_of(methodOopDesc, _method_data); }
  98.109    static int interpreter_invocation_counter_offset_in_bytes()
  98.110                                                   { return offset_of(methodOopDesc, _interpreter_invocation_count); }
  98.111 +  static int intrinsic_id_offset_in_bytes()      { return offset_of(methodOopDesc, _intrinsic_id); }
  98.112 +  static int intrinsic_id_size_in_bytes()        { return sizeof(u1); }
  98.113  
  98.114    // Static methods that are used to implement member methods where an exposed this pointer
  98.115    // is needed due to possible GCs
  98.116 @@ -587,8 +599,13 @@
  98.117    static vmSymbols::SID klass_id_for_intrinsics(klassOop holder);
  98.118  
  98.119    // On-stack replacement support
  98.120 -  bool has_osr_nmethod()                         { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci) != NULL; }
  98.121 -  nmethod* lookup_osr_nmethod_for(int bci)       { return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci); }
  98.122 +  bool has_osr_nmethod(int level, bool match_level) {
  98.123 +   return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, InvocationEntryBci, level, match_level) != NULL;
  98.124 +  }
  98.125 +
  98.126 +  nmethod* lookup_osr_nmethod_for(int bci, int level, bool match_level) {
  98.127 +    return instanceKlass::cast(method_holder())->lookup_osr_nmethod(this, bci, level, match_level);
  98.128 +  }
  98.129  
  98.130    // Inline cache support
  98.131    void cleanup_inline_caches();
  98.132 @@ -600,22 +617,24 @@
  98.133    // Indicates whether compilation failed earlier for this method, or
  98.134    // whether it is not compilable for another reason like having a
  98.135    // breakpoint set in it.
  98.136 -  bool is_not_compilable(int comp_level = CompLevel_highest_tier) const;
  98.137 -  void set_not_compilable(int comp_level = CompLevel_highest_tier, bool report = true);
  98.138 -  void set_not_compilable_quietly(int comp_level = CompLevel_highest_tier) {
  98.139 +  bool is_not_compilable(int comp_level = CompLevel_any) const;
  98.140 +  void set_not_compilable(int comp_level = CompLevel_all, bool report = true);
  98.141 +  void set_not_compilable_quietly(int comp_level = CompLevel_all) {
  98.142      set_not_compilable(comp_level, false);
  98.143    }
  98.144 -
  98.145 -  bool is_not_osr_compilable() const             { return is_not_compilable() || access_flags().is_not_osr_compilable(); }
  98.146 -  void set_not_osr_compilable()                  { _access_flags.set_not_osr_compilable(); }
  98.147 -
  98.148 -  bool is_not_tier1_compilable() const           { return access_flags().is_not_tier1_compilable(); }
  98.149 -  void set_not_tier1_compilable()                { _access_flags.set_not_tier1_compilable(); }
  98.150 +  bool is_not_osr_compilable(int comp_level = CompLevel_any) const {
  98.151 +    return is_not_compilable(comp_level) || access_flags().is_not_osr_compilable();
  98.152 +  }
  98.153 +  void set_not_osr_compilable()               { _access_flags.set_not_osr_compilable();       }
  98.154 +  bool is_not_c1_compilable() const           { return access_flags().is_not_c1_compilable(); }
  98.155 +  void set_not_c1_compilable()                { _access_flags.set_not_c1_compilable();        }
  98.156 +  bool is_not_c2_compilable() const           { return access_flags().is_not_c2_compilable(); }
  98.157 +  void set_not_c2_compilable()                { _access_flags.set_not_c2_compilable();        }
  98.158  
  98.159    // Background compilation support
  98.160 -  bool queued_for_compilation() const            { return access_flags().queued_for_compilation();    }
  98.161 -  void set_queued_for_compilation()              { _access_flags.set_queued_for_compilation(); }
  98.162 -  void clear_queued_for_compilation()            { _access_flags.clear_queued_for_compilation(); }
  98.163 +  bool queued_for_compilation() const  { return access_flags().queued_for_compilation(); }
  98.164 +  void set_queued_for_compilation()    { _access_flags.set_queued_for_compilation();     }
  98.165 +  void clear_queued_for_compilation()  { _access_flags.clear_queued_for_compilation();   }
  98.166  
  98.167    static methodOop method_from_bcp(address bcp);
  98.168  
    99.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Tue Sep 21 06:58:44 2010 -0700
    99.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Wed Sep 22 12:54:51 2010 -0400
    99.3 @@ -140,7 +140,7 @@
    99.4    } else {
    99.5      // Not hot.  Check for medium-sized pre-existing nmethod at cold sites.
    99.6      if (callee_method->has_compiled_code() &&
    99.7 -        callee_method->instructions_size() > InlineSmallCode/4)
    99.8 +        callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode/4)
    99.9        return "already compiled into a medium method";
   99.10    }
   99.11    if (size > max_size) {
   99.12 @@ -180,7 +180,7 @@
   99.13        }
   99.14      }
   99.15  
   99.16 -    if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode) {
   99.17 +    if (callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode) {
   99.18        wci_result->set_profit(wci_result->profit() * 0.1);
   99.19        // %%% adjust wci_result->size()?
   99.20      }
   99.21 @@ -206,7 +206,7 @@
   99.22  
   99.23    // Now perform checks which are heuristic
   99.24  
   99.25 -  if( callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode )
   99.26 +  if( callee_method->has_compiled_code() && callee_method->instructions_size(CompLevel_full_optimization) > InlineSmallCode )
   99.27      return "already compiled into a big method";
   99.28  
   99.29    // don't inline exception code unless the top method belongs to an
   100.1 --- a/src/share/vm/opto/compile.cpp	Tue Sep 21 06:58:44 2010 -0700
   100.2 +++ b/src/share/vm/opto/compile.cpp	Wed Sep 22 12:54:51 2010 -0400
   100.3 @@ -850,25 +850,13 @@
   100.4    set_decompile_count(0);
   100.5  
   100.6    set_do_freq_based_layout(BlockLayoutByFrequency || method_has_option("BlockLayoutByFrequency"));
   100.7 -  // Compilation level related initialization
   100.8 -  if (env()->comp_level() == CompLevel_fast_compile) {
   100.9 -    set_num_loop_opts(Tier1LoopOptsCount);
  100.10 -    set_do_inlining(Tier1Inline != 0);
  100.11 -    set_max_inline_size(Tier1MaxInlineSize);
  100.12 -    set_freq_inline_size(Tier1FreqInlineSize);
  100.13 -    set_do_scheduling(false);
  100.14 -    set_do_count_invocations(Tier1CountInvocations);
  100.15 -    set_do_method_data_update(Tier1UpdateMethodData);
  100.16 -  } else {
  100.17 -    assert(env()->comp_level() == CompLevel_full_optimization, "unknown comp level");
  100.18 -    set_num_loop_opts(LoopOptsCount);
  100.19 -    set_do_inlining(Inline);
  100.20 -    set_max_inline_size(MaxInlineSize);
  100.21 -    set_freq_inline_size(FreqInlineSize);
  100.22 -    set_do_scheduling(OptoScheduling);
  100.23 -    set_do_count_invocations(false);
  100.24 -    set_do_method_data_update(false);
  100.25 -  }
  100.26 +  set_num_loop_opts(LoopOptsCount);
  100.27 +  set_do_inlining(Inline);
  100.28 +  set_max_inline_size(MaxInlineSize);
  100.29 +  set_freq_inline_size(FreqInlineSize);
  100.30 +  set_do_scheduling(OptoScheduling);
  100.31 +  set_do_count_invocations(false);
  100.32 +  set_do_method_data_update(false);
  100.33  
  100.34    if (debug_info()->recording_non_safepoints()) {
  100.35      set_node_note_array(new(comp_arena()) GrowableArray<Node_Notes*>
   101.1 --- a/src/share/vm/opto/graphKit.cpp	Tue Sep 21 06:58:44 2010 -0700
   101.2 +++ b/src/share/vm/opto/graphKit.cpp	Wed Sep 22 12:54:51 2010 -0400
   101.3 @@ -1739,6 +1739,7 @@
   101.4    C->gvn_replace_by(callprojs.fallthrough_catchproj, final_state->in(TypeFunc::Control));
   101.5    C->gvn_replace_by(callprojs.fallthrough_memproj,   final_state->in(TypeFunc::Memory));
   101.6    C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_state->in(TypeFunc::I_O));
   101.7 +  Node* final_mem = final_state->in(TypeFunc::Memory);
   101.8  
   101.9    // Replace the result with the new result if it exists and is used
  101.10    if (callprojs.resproj != NULL && result != NULL) {
  101.11 @@ -1776,6 +1777,21 @@
  101.12    // Disconnect the call from the graph
  101.13    call->disconnect_inputs(NULL);
  101.14    C->gvn_replace_by(call, C->top());
  101.15 +
  101.16 +  // Clean up any MergeMems that feed other MergeMems since the
  101.17 +  // optimizer doesn't like that.
  101.18 +  if (final_mem->is_MergeMem()) {
  101.19 +    Node_List wl;
  101.20 +    for (SimpleDUIterator i(final_mem); i.has_next(); i.next()) {
  101.21 +      Node* m = i.get();
  101.22 +      if (m->is_MergeMem() && !wl.contains(m)) {
  101.23 +        wl.push(m);
  101.24 +      }
  101.25 +    }
  101.26 +    while (wl.size()  > 0) {
  101.27 +      _gvn.transform(wl.pop());
  101.28 +    }
  101.29 +  }
  101.30  }
  101.31  
  101.32  
   102.1 --- a/src/share/vm/opto/loopTransform.cpp	Tue Sep 21 06:58:44 2010 -0700
   102.2 +++ b/src/share/vm/opto/loopTransform.cpp	Wed Sep 22 12:54:51 2010 -0400
   102.3 @@ -2417,6 +2417,8 @@
   102.4        Node* value = n->in(MemNode::ValueIn);
   102.5        if (!lpt->is_invariant(value)) {
   102.6          msg  = "variant store value";
   102.7 +      } else if (!_igvn.type(n->in(MemNode::Address))->isa_aryptr()) {
   102.8 +        msg = "not array address";
   102.9        }
  102.10        store = n;
  102.11        store_value = value;
  102.12 @@ -2468,6 +2470,7 @@
  102.13    // head->phi * elsize + con.  head->phi might have a ConvI2L.
  102.14    Node* elements[4];
  102.15    Node* conv = NULL;
  102.16 +  bool found_index = false;
  102.17    int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
  102.18    for (int e = 0; e < count; e++) {
  102.19      Node* n = elements[e];
  102.20 @@ -2484,17 +2487,20 @@
  102.21        if (value != head->phi()) {
  102.22          msg = "unhandled shift in address";
  102.23        } else {
  102.24 +        found_index = true;
  102.25          shift = n;
  102.26          assert(type2aelembytes(store->as_Mem()->memory_type(), true) == 1 << shift->in(2)->get_int(), "scale should match");
  102.27        }
  102.28      } else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
  102.29        if (n->in(1) == head->phi()) {
  102.30 +        found_index = true;
  102.31          conv = n;
  102.32        } else {
  102.33          msg = "unhandled input to ConvI2L";
  102.34        }
  102.35      } else if (n == head->phi()) {
  102.36        // no shift, check below for allowed cases
  102.37 +      found_index = true;
  102.38      } else {
  102.39        msg = "unhandled node in address";
  102.40        msg_node = n;
  102.41 @@ -2506,6 +2512,10 @@
  102.42      msg_node = store;
  102.43    }
  102.44  
  102.45 +  if (!found_index) {
  102.46 +    msg = "missing use of index";
  102.47 +  }
  102.48 +
  102.49    // byte sized items won't have a shift
  102.50    if (msg == NULL && shift == NULL && t != T_BYTE && t != T_BOOLEAN) {
  102.51      msg = "can't find shift";
   103.1 --- a/src/share/vm/opto/type.hpp	Tue Sep 21 06:58:44 2010 -0700
   103.2 +++ b/src/share/vm/opto/type.hpp	Wed Sep 22 12:54:51 2010 -0400
   103.3 @@ -836,7 +836,7 @@
   103.4      if (k != NULL) {
   103.5        // Verify that specified klass and TypeAryPtr::klass() follow the same rules.
   103.6        ciKlass* ck = compute_klass(true);
   103.7 -      if (UseNewCode || k != ck) {
   103.8 +      if (k != ck) {
   103.9          this->dump(); tty->cr();
  103.10          tty->print(" k: ");
  103.11          k->print(); tty->cr();
   104.1 --- a/src/share/vm/prims/methodHandleWalk.cpp	Tue Sep 21 06:58:44 2010 -0700
   104.2 +++ b/src/share/vm/prims/methodHandleWalk.cpp	Wed Sep 22 12:54:51 2010 -0400
   104.3 @@ -333,8 +333,7 @@
   104.4          ArgToken arglist[2];
   104.5          arglist[0] = arg;         // outgoing value
   104.6          arglist[1] = ArgToken();  // sentinel
   104.7 -        assert(false, "I think the argument count must be 1 instead of 0");
   104.8 -        arg = make_invoke(NULL, boxer, Bytecodes::_invokevirtual, false, 0, &arglist[0], CHECK_(empty));
   104.9 +        arg = make_invoke(NULL, boxer, Bytecodes::_invokevirtual, false, 1, &arglist[0], CHECK_(empty));
  104.10          change_argument(src, arg_slot, T_OBJECT, arg);
  104.11          break;
  104.12        }
  104.13 @@ -979,7 +978,7 @@
  104.14  
  104.15    // Inline the method.
  104.16    InvocationCounter* ic = m->invocation_counter();
  104.17 -  ic->set_carry();
  104.18 +  ic->set_carry_flag();
  104.19  
  104.20    for (int i = 0; i < argc; i++) {
  104.21      ArgToken arg = argv[i];
  104.22 @@ -1209,7 +1208,7 @@
  104.23    // Set the carry bit of the invocation counter to force inlining of
  104.24    // the adapter.
  104.25    InvocationCounter* ic = m->invocation_counter();
  104.26 -  ic->set_carry();
  104.27 +  ic->set_carry_flag();
  104.28  
  104.29    // Rewrite the method and set up the constant pool cache.
  104.30    objArrayOop m_array = oopFactory::new_system_objArray(1, CHECK_(nullHandle));
  104.31 @@ -1398,7 +1397,9 @@
  104.32  
  104.33  extern "C"
  104.34  void print_method_handle(oop mh) {
  104.35 -  if (java_dyn_MethodHandle::is_instance(mh)) {
  104.36 +  if (!mh->is_oop()) {
  104.37 +    tty->print_cr("*** not a method handle: "INTPTR_FORMAT, (intptr_t)mh);
  104.38 +  } else if (java_dyn_MethodHandle::is_instance(mh)) {
  104.39      //MethodHandlePrinter::print(mh);
  104.40    } else {
  104.41      tty->print("*** not a method handle: ");
   105.1 --- a/src/share/vm/prims/methodHandles.hpp	Tue Sep 21 06:58:44 2010 -0700
   105.2 +++ b/src/share/vm/prims/methodHandles.hpp	Wed Sep 22 12:54:51 2010 -0400
   105.3 @@ -446,6 +446,8 @@
   105.4                                 RegisterOrConstant arg_slots,
   105.5                                 Register argslot_reg,
   105.6                                 Register temp_reg, Register temp2_reg, Register temp3_reg = noreg);
   105.7 +
   105.8 +  static void trace_method_handle(MacroAssembler* _masm, const char* adaptername) PRODUCT_RETURN;
   105.9  };
  105.10  
  105.11  
   106.1 --- a/src/share/vm/runtime/arguments.cpp	Tue Sep 21 06:58:44 2010 -0700
   106.2 +++ b/src/share/vm/runtime/arguments.cpp	Wed Sep 22 12:54:51 2010 -0400
   106.3 @@ -50,7 +50,6 @@
   106.4  bool   Arguments::_UseOnStackReplacement        = UseOnStackReplacement;
   106.5  bool   Arguments::_BackgroundCompilation        = BackgroundCompilation;
   106.6  bool   Arguments::_ClipInlining                 = ClipInlining;
   106.7 -intx   Arguments::_Tier2CompileThreshold        = Tier2CompileThreshold;
   106.8  
   106.9  char*  Arguments::SharedArchivePath             = NULL;
  106.10  
  106.11 @@ -913,7 +912,6 @@
  106.12    AlwaysCompileLoopMethods   = Arguments::_AlwaysCompileLoopMethods;
  106.13    UseOnStackReplacement      = Arguments::_UseOnStackReplacement;
  106.14    BackgroundCompilation      = Arguments::_BackgroundCompilation;
  106.15 -  Tier2CompileThreshold      = Arguments::_Tier2CompileThreshold;
  106.16  
  106.17    // Change from defaults based on mode
  106.18    switch (mode) {
  106.19 @@ -950,6 +948,31 @@
  106.20    }
  106.21  }
  106.22  
  106.23 +void Arguments::set_tiered_flags() {
  106.24 +  if (FLAG_IS_DEFAULT(CompilationPolicyChoice)) {
  106.25 +    FLAG_SET_DEFAULT(CompilationPolicyChoice, 2);
  106.26 +  }
  106.27 +
  106.28 +  if (CompilationPolicyChoice < 2) {
  106.29 +    vm_exit_during_initialization(
  106.30 +      "Incompatible compilation policy selected", NULL);
  106.31 +  }
  106.32 +
  106.33 +#ifdef _LP64
  106.34 +  if (FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops)) {
  106.35 +    UseCompressedOops = false;
  106.36 +  }
  106.37 +  if (UseCompressedOops) {
  106.38 +    vm_exit_during_initialization(
  106.39 +      "Tiered compilation is not supported with compressed oops yet", NULL);
  106.40 +  }
  106.41 +#endif
  106.42 + // Increase the code cache size - tiered compiles a lot more.
  106.43 +  if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
  106.44 +    FLAG_SET_DEFAULT(ReservedCodeCacheSize, ReservedCodeCacheSize * 2);
  106.45 +  }
  106.46 +}
  106.47 +
  106.48  #ifndef KERNEL
  106.49  // If the user has chosen ParallelGCThreads > 0, we set UseParNewGC
  106.50  // if it's not explictly set or unset. If the user has chosen
  106.51 @@ -1250,7 +1273,8 @@
  106.52  }
  106.53  
  106.54  inline uintx max_heap_for_compressed_oops() {
  106.55 -  LP64_ONLY(return OopEncodingHeapMax - MaxPermSize - os::vm_page_size());
  106.56 +  // Heap should be above HeapBaseMinAddress to get zero based compressed oops.
  106.57 +  LP64_ONLY(return OopEncodingHeapMax - MaxPermSize - os::vm_page_size() - HeapBaseMinAddress);
  106.58    NOT_LP64(ShouldNotReachHere(); return 0);
  106.59  }
  106.60  
  106.61 @@ -1299,7 +1323,7 @@
  106.62    // Check that UseCompressedOops can be set with the max heap size allocated
  106.63    // by ergonomics.
  106.64    if (MaxHeapSize <= max_heap_for_compressed_oops()) {
  106.65 -#ifndef COMPILER1
  106.66 +#if !defined(COMPILER1) || defined(TIERED)
  106.67      if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
  106.68        FLAG_SET_ERGO(bool, UseCompressedOops, true);
  106.69      }
  106.70 @@ -1933,7 +1957,6 @@
  106.71    Arguments::_UseOnStackReplacement    = UseOnStackReplacement;
  106.72    Arguments::_ClipInlining             = ClipInlining;
  106.73    Arguments::_BackgroundCompilation    = BackgroundCompilation;
  106.74 -  Arguments::_Tier2CompileThreshold    = Tier2CompileThreshold;
  106.75  
  106.76    // Parse JAVA_TOOL_OPTIONS environment variable (if present)
  106.77    jint result = parse_java_tool_options_environment_variable(&scp, &scp_assembly_required);
  106.78 @@ -2651,23 +2674,6 @@
  106.79      set_mode_flags(_int);
  106.80    }
  106.81  
  106.82 -#ifdef TIERED
  106.83 -  // If we are using tiered compilation in the tiered vm then c1 will
  106.84 -  // do the profiling and we don't want to waste that time in the
  106.85 -  // interpreter.
  106.86 -  if (TieredCompilation) {
  106.87 -    ProfileInterpreter = false;
  106.88 -  } else {
  106.89 -    // Since we are running vanilla server we must adjust the compile threshold
  106.90 -    // unless the user has already adjusted it because the default threshold assumes
  106.91 -    // we will run tiered.
  106.92 -
  106.93 -    if (FLAG_IS_DEFAULT(CompileThreshold)) {
  106.94 -      CompileThreshold = Tier2CompileThreshold;
  106.95 -    }
  106.96 -  }
  106.97 -#endif // TIERED
  106.98 -
  106.99  #ifndef COMPILER2
 106.100    // Don't degrade server performance for footprint
 106.101    if (FLAG_IS_DEFAULT(UseLargePages) &&
 106.102 @@ -2682,7 +2688,6 @@
 106.103  
 106.104    // Tiered compilation is undefined with C1.
 106.105    TieredCompilation = false;
 106.106 -
 106.107  #else
 106.108    if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) {
 106.109      FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1);
 106.110 @@ -2946,7 +2951,7 @@
 106.111      PrintGC = true;
 106.112    }
 106.113  
 106.114 -#if defined(_LP64) && defined(COMPILER1)
 106.115 +#if defined(_LP64) && defined(COMPILER1) && !defined(TIERED)
 106.116    UseCompressedOops = false;
 106.117  #endif
 106.118  
 106.119 @@ -2977,6 +2982,16 @@
 106.120      return JNI_EINVAL;
 106.121    }
 106.122  
 106.123 +  if (TieredCompilation) {
 106.124 +    set_tiered_flags();
 106.125 +  } else {
 106.126 +    // Check if the policy is valid. Policies 0 and 1 are valid for non-tiered setup.
 106.127 +    if (CompilationPolicyChoice >= 2) {
 106.128 +      vm_exit_during_initialization(
 106.129 +        "Incompatible compilation policy selected", NULL);
 106.130 +    }
 106.131 +  }
 106.132 +
 106.133  #ifndef KERNEL
 106.134    if (UseConcMarkSweepGC) {
 106.135      // Set flags for CMS and ParNew.  Check UseConcMarkSweep first
   107.1 --- a/src/share/vm/runtime/arguments.hpp	Tue Sep 21 06:58:44 2010 -0700
   107.2 +++ b/src/share/vm/runtime/arguments.hpp	Wed Sep 22 12:54:51 2010 -0400
   107.3 @@ -1,5 +1,5 @@
   107.4  /*
   107.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
   107.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   107.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   107.8   *
   107.9   * This code is free software; you can redistribute it and/or modify it
  107.10 @@ -288,8 +288,9 @@
  107.11    static bool _BackgroundCompilation;
  107.12    static bool _ClipInlining;
  107.13    static bool _CIDynamicCompilePriority;
  107.14 -  static intx _Tier2CompileThreshold;
  107.15  
  107.16 +  // Tiered
  107.17 +  static void set_tiered_flags();
  107.18    // CMS/ParNew garbage collectors
  107.19    static void set_parnew_gc_flags();
  107.20    static void set_cms_and_parnew_gc_flags();
   108.1 --- a/src/share/vm/runtime/compilationPolicy.cpp	Tue Sep 21 06:58:44 2010 -0700
   108.2 +++ b/src/share/vm/runtime/compilationPolicy.cpp	Wed Sep 22 12:54:51 2010 -0400
   108.3 @@ -45,10 +45,17 @@
   108.4      Unimplemented();
   108.5  #endif
   108.6      break;
   108.7 -
   108.8 +  case 2:
   108.9 +#ifdef TIERED
  108.10 +    CompilationPolicy::set_policy(new SimpleThresholdPolicy());
  108.11 +#else
  108.12 +    Unimplemented();
  108.13 +#endif
  108.14 +    break;
  108.15    default:
  108.16 -    fatal("CompilationPolicyChoice must be in the range: [0-1]");
  108.17 +    fatal("CompilationPolicyChoice must be in the range: [0-2]");
  108.18    }
  108.19 +  CompilationPolicy::policy()->initialize();
  108.20  }
  108.21  
  108.22  void CompilationPolicy::completed_vm_startup() {
  108.23 @@ -61,16 +68,16 @@
  108.24  // Returns true if m must be compiled before executing it
  108.25  // This is intended to force compiles for methods (usually for
  108.26  // debugging) that would otherwise be interpreted for some reason.
  108.27 -bool CompilationPolicy::mustBeCompiled(methodHandle m) {
  108.28 +bool CompilationPolicy::must_be_compiled(methodHandle m, int comp_level) {
  108.29    if (m->has_compiled_code()) return false;       // already compiled
  108.30 -  if (!canBeCompiled(m))      return false;
  108.31 +  if (!can_be_compiled(m, comp_level)) return false;
  108.32  
  108.33    return !UseInterpreter ||                                              // must compile all methods
  108.34           (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods
  108.35  }
  108.36  
  108.37  // Returns true if m is allowed to be compiled
  108.38 -bool CompilationPolicy::canBeCompiled(methodHandle m) {
  108.39 +bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) {
  108.40    if (m->is_abstract()) return false;
  108.41    if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false;
  108.42  
  108.43 @@ -83,8 +90,16 @@
  108.44    if (!AbstractInterpreter::can_be_compiled(m)) {
  108.45      return false;
  108.46    }
  108.47 +  if (comp_level == CompLevel_all) {
  108.48 +    return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization);
  108.49 +  } else {
  108.50 +    return !m->is_not_compilable(comp_level);
  108.51 +  }
  108.52 +}
  108.53  
  108.54 -  return !m->is_not_compilable();
  108.55 +bool CompilationPolicy::is_compilation_enabled() {
  108.56 +  // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler
  108.57 +  return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs();
  108.58  }
  108.59  
  108.60  #ifndef PRODUCT
  108.61 @@ -94,7 +109,7 @@
  108.62    tty->print_cr ("  Total: %3.3f sec.", _accumulated_time.seconds());
  108.63  }
  108.64  
  108.65 -static void trace_osr_completion(nmethod* osr_nm) {
  108.66 +void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) {
  108.67    if (TraceOnStackReplacement) {
  108.68      if (osr_nm == NULL) tty->print_cr("compilation failed");
  108.69      else tty->print_cr("nmethod " INTPTR_FORMAT, osr_nm);
  108.70 @@ -102,7 +117,35 @@
  108.71  }
  108.72  #endif // !PRODUCT
  108.73  
  108.74 -void CompilationPolicy::reset_counter_for_invocation_event(methodHandle m) {
  108.75 +void NonTieredCompPolicy::initialize() {
  108.76 +  // Setup the compiler thread numbers
  108.77 +  if (CICompilerCountPerCPU) {
  108.78 +    // Example: if CICompilerCountPerCPU is true, then we get
  108.79 +    // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine.
  108.80 +    // May help big-app startup time.
  108.81 +    _compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1);
  108.82 +  } else {
  108.83 +    _compiler_count = CICompilerCount;
  108.84 +  }
  108.85 +}
  108.86 +
  108.87 +int NonTieredCompPolicy::compiler_count(CompLevel comp_level) {
  108.88 +#ifdef COMPILER1
  108.89 +  if (is_c1_compile(comp_level)) {
  108.90 +    return _compiler_count;
  108.91 +  }
  108.92 +#endif
  108.93 +
  108.94 +#ifdef COMPILER2
  108.95 +  if (is_c2_compile(comp_level)) {
  108.96 +    return _compiler_count;
  108.97 +  }
  108.98 +#endif
  108.99 +
 108.100 +  return 0;
 108.101 +}
 108.102 +
 108.103 +void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) {
 108.104    // Make sure invocation and backedge counter doesn't overflow again right away
 108.105    // as would be the case for native methods.
 108.106  
 108.107 @@ -114,7 +157,7 @@
 108.108    assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed");
 108.109  }
 108.110  
 108.111 -void CompilationPolicy::reset_counter_for_back_branch_event(methodHandle m) {
 108.112 +void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) {
 108.113    // Delay next back-branch event but pump up invocation counter to triger
 108.114    // whole method compilation.
 108.115    InvocationCounter* i = m->invocation_counter();
 108.116 @@ -128,6 +171,185 @@
 108.117    b->set(b->state(), CompileThreshold / 2);
 108.118  }
 108.119  
 108.120 +//
 108.121 +// CounterDecay
 108.122 +//
 108.123 +// Interates through invocation counters and decrements them. This
 108.124 +// is done at each safepoint.
 108.125 +//
 108.126 +class CounterDecay : public AllStatic {
 108.127 +  static jlong _last_timestamp;
 108.128 +  static void do_method(methodOop m) {
 108.129 +    m->invocation_counter()->decay();
 108.130 +  }
 108.131 +public:
 108.132 +  static void decay();
 108.133 +  static bool is_decay_needed() {
 108.134 +    return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength;
 108.135 +  }
 108.136 +};
 108.137 +
 108.138 +jlong CounterDecay::_last_timestamp = 0;
 108.139 +
 108.140 +void CounterDecay::decay() {
 108.141 +  _last_timestamp = os::javaTimeMillis();
 108.142 +
 108.143 +  // This operation is going to be performed only at the end of a safepoint
 108.144 +  // and hence GC's will not be going on, all Java mutators are suspended
 108.145 +  // at this point and hence SystemDictionary_lock is also not needed.
 108.146 +  assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
 108.147 +  int nclasses = SystemDictionary::number_of_classes();
 108.148 +  double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
 108.149 +                                        CounterHalfLifeTime);
 108.150 +  for (int i = 0; i < classes_per_tick; i++) {
 108.151 +    klassOop k = SystemDictionary::try_get_next_class();
 108.152 +    if (k != NULL && k->klass_part()->oop_is_instance()) {
 108.153 +      instanceKlass::cast(k)->methods_do(do_method);
 108.154 +    }
 108.155 +  }
 108.156 +}
 108.157 +
 108.158 +// Called at the end of the safepoint
 108.159 +void NonTieredCompPolicy::do_safepoint_work() {
 108.160 +  if(UseCounterDecay && CounterDecay::is_decay_needed()) {
 108.161 +    CounterDecay::decay();
 108.162 +  }
 108.163 +}
 108.164 +
 108.165 +void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) {
 108.166 +  ScopeDesc* sd = trap_scope;
 108.167 +  for (; !sd->is_top(); sd = sd->sender()) {
 108.168 +    // Reset ICs of inlined methods, since they can trigger compilations also.
 108.169 +    sd->method()->invocation_counter()->reset();
 108.170 +  }
 108.171 +  InvocationCounter* c = sd->method()->invocation_counter();
 108.172 +  if (is_osr) {
 108.173 +    // It was an OSR method, so bump the count higher.
 108.174 +    c->set(c->state(), CompileThreshold);
 108.175 +  } else {
 108.176 +    c->reset();
 108.177 +  }
 108.178 +  sd->method()->backedge_counter()->reset();
 108.179 +}
 108.180 +
 108.181 +// This method can be called by any component of the runtime to notify the policy
 108.182 +// that it's recommended to delay the complation of this method.
 108.183 +void NonTieredCompPolicy::delay_compilation(methodOop method) {
 108.184 +  method->invocation_counter()->decay();
 108.185 +  method->backedge_counter()->decay();
 108.186 +}
 108.187 +
 108.188 +void NonTieredCompPolicy::disable_compilation(methodOop method) {
 108.189 +  method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing);
 108.190 +  method->backedge_counter()->set_state(InvocationCounter::wait_for_nothing);
 108.191 +}
 108.192 +
 108.193 +CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) {
 108.194 +  return compile_queue->first();
 108.195 +}
 108.196 +
 108.197 +bool NonTieredCompPolicy::is_mature(methodOop method) {
 108.198 +  methodDataOop mdo = method->method_data();
 108.199 +  assert(mdo != NULL, "Should be");
 108.200 +  uint current = mdo->mileage_of(method);
 108.201 +  uint initial = mdo->creation_mileage();
 108.202 +  if (current < initial)
 108.203 +    return true;  // some sort of overflow
 108.204 +  uint target;
 108.205 +  if (ProfileMaturityPercentage <= 0)
 108.206 +    target = (uint) -ProfileMaturityPercentage;  // absolute value
 108.207 +  else
 108.208 +    target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 );
 108.209 +  return (current >= initial + target);
 108.210 +}
 108.211 +
 108.212 +nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) {
 108.213 +  assert(comp_level == CompLevel_none, "This should be only called from the interpreter");
 108.214 +  NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci));
 108.215 +  if (JvmtiExport::can_post_interpreter_events()) {
 108.216 +    assert(THREAD->is_Java_thread(), "Wrong type of thread");
 108.217 +    if (((JavaThread*)THREAD)->is_interp_only_mode()) {
 108.218 +      // If certain JVMTI events (e.g. frame pop event) are requested then the
 108.219 +      // thread is forced to remain in interpreted code. This is
 108.220 +      // implemented partly by a check in the run_compiled_code
 108.221 +      // section of the interpreter whether we should skip running
 108.222 +      // compiled code, and partly by skipping OSR compiles for
 108.223 +      // interpreted-only threads.
 108.224 +      if (bci != InvocationEntryBci) {
 108.225 +        reset_counter_for_back_branch_event(method);
 108.226 +        return NULL;
 108.227 +      }
 108.228 +    }
 108.229 +  }
 108.230 +  if (bci == InvocationEntryBci) {
 108.231 +    // when code cache is full, compilation gets switched off, UseCompiler
 108.232 +    // is set to false
 108.233 +    if (!method->has_compiled_code() && UseCompiler) {
 108.234 +      method_invocation_event(method, CHECK_NULL);
 108.235 +    } else {
 108.236 +      // Force counter overflow on method entry, even if no compilation
 108.237 +      // happened.  (The method_invocation_event call does this also.)
 108.238 +      reset_counter_for_invocation_event(method);
 108.239 +    }
 108.240 +    // compilation at an invocation overflow no longer goes and retries test for
 108.241 +    // compiled method. We always run the loser of the race as interpreted.
 108.242 +    // so return NULL
 108.243 +    return NULL;
 108.244 +  } else {
 108.245 +    // counter overflow in a loop => try to do on-stack-replacement
 108.246 +    nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
 108.247 +    NOT_PRODUCT(trace_osr_request(method, osr_nm, bci));
 108.248 +    // when code cache is full, we should not compile any more...
 108.249 +    if (osr_nm == NULL && UseCompiler) {
 108.250 +      method_back_branch_event(method, bci, CHECK_NULL);
 108.251 +      osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true);
 108.252 +    }
 108.253 +    if (osr_nm == NULL) {
 108.254 +      reset_counter_for_back_branch_event(method);
 108.255 +      return NULL;
 108.256 +    }
 108.257 +    return osr_nm;
 108.258 +  }
 108.259 +  return NULL;
 108.260 +}
 108.261 +
 108.262 +#ifndef PRODUCT
 108.263 +void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) {
 108.264 +  if (TraceInvocationCounterOverflow) {
 108.265 +    InvocationCounter* ic = m->invocation_counter();
 108.266 +    InvocationCounter* bc = m->backedge_counter();
 108.267 +    ResourceMark rm;
 108.268 +    const char* msg =
 108.269 +      bci == InvocationEntryBci
 108.270 +      ? "comp-policy cntr ovfl @ %d in entry of "
 108.271 +      : "comp-policy cntr ovfl @ %d in loop of ";
 108.272 +    tty->print(msg, bci);
 108.273 +    m->print_value();
 108.274 +    tty->cr();
 108.275 +    ic->print();
 108.276 +    bc->print();
 108.277 +    if (ProfileInterpreter) {
 108.278 +      if (bci != InvocationEntryBci) {
 108.279 +        methodDataOop mdo = m->method_data();
 108.280 +        if (mdo != NULL) {
 108.281 +          int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken();
 108.282 +          tty->print_cr("back branch count = %d", count);
 108.283 +        }
 108.284 +      }
 108.285 +    }
 108.286 +  }
 108.287 +}
 108.288 +
 108.289 +void NonTieredCompPolicy::trace_osr_request(methodHandle method, nmethod* osr, int bci) {
 108.290 +  if (TraceOnStackReplacement) {
 108.291 +    ResourceMark rm;
 108.292 +    tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for ");
 108.293 +    method->print_short_name(tty);
 108.294 +    tty->print_cr(" at bci %d", bci);
 108.295 +  }
 108.296 +}
 108.297 +#endif // !PRODUCT
 108.298 +
 108.299  // SimpleCompPolicy - compile current method
 108.300  
 108.301  void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) {
 108.302 @@ -137,59 +359,28 @@
 108.303    reset_counter_for_invocation_event(m);
 108.304    const char* comment = "count";
 108.305  
 108.306 -  if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
 108.307 +  if (is_compilation_enabled() && can_be_compiled(m)) {
 108.308      nmethod* nm = m->code();
 108.309      if (nm == NULL ) {
 108.310        const char* comment = "count";
 108.311 -      CompileBroker::compile_method(m, InvocationEntryBci,
 108.312 +      CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier,
 108.313                                      m, hot_count, comment, CHECK);
 108.314 -    } else {
 108.315 -#ifdef TIERED
 108.316 -
 108.317 -      if (nm->is_compiled_by_c1()) {
 108.318 -        const char* comment = "tier1 overflow";
 108.319 -        CompileBroker::compile_method(m, InvocationEntryBci,
 108.320 -                                      m, hot_count, comment, CHECK);
 108.321 -      }
 108.322 -#endif // TIERED
 108.323      }
 108.324    }
 108.325  }
 108.326  
 108.327 -void SimpleCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) {
 108.328 +void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
 108.329    assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
 108.330  
 108.331    int hot_count = m->backedge_count();
 108.332    const char* comment = "backedge_count";
 108.333  
 108.334 -  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
 108.335 -    CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
 108.336 -
 108.337 -    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
 108.338 +  if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
 108.339 +    CompileBroker::compile_method(m, bci, CompLevel_highest_tier,
 108.340 +                                  m, hot_count, comment, CHECK);
 108.341 +    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
 108.342    }
 108.343  }
 108.344 -
 108.345 -int SimpleCompPolicy::compilation_level(methodHandle m, int branch_bci)
 108.346 -{
 108.347 -#ifdef TIERED
 108.348 -  if (!TieredCompilation) {
 108.349 -    return CompLevel_highest_tier;
 108.350 -  }
 108.351 -  if (/* m()->tier1_compile_done() && */
 108.352 -     // QQQ HACK FIX ME set tier1_compile_done!!
 108.353 -      !m()->is_native()) {
 108.354 -    // Grab the nmethod so it doesn't go away while it's being queried
 108.355 -    nmethod* code = m()->code();
 108.356 -    if (code != NULL && code->is_compiled_by_c1()) {
 108.357 -      return CompLevel_highest_tier;
 108.358 -    }
 108.359 -  }
 108.360 -  return CompLevel_fast_compile;
 108.361 -#else
 108.362 -  return CompLevel_highest_tier;
 108.363 -#endif // TIERED
 108.364 -}
 108.365 -
 108.366  // StackWalkCompPolicy - walk up stack to find a suitable method to compile
 108.367  
 108.368  #ifdef COMPILER2
 108.369 @@ -204,7 +395,7 @@
 108.370    reset_counter_for_invocation_event(m);
 108.371    const char* comment = "count";
 108.372  
 108.373 -  if (m->code() == NULL && !delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) {
 108.374 +  if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m)) {
 108.375      ResourceMark rm(THREAD);
 108.376      JavaThread *thread = (JavaThread*)THREAD;
 108.377      frame       fr     = thread->last_frame();
 108.378 @@ -224,10 +415,6 @@
 108.379      if (first->top_method()->code() != NULL) {
 108.380        // called obsolete method/nmethod -- no need to recompile
 108.381        if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code());
 108.382 -    } else if (compilation_level(m, InvocationEntryBci) == CompLevel_fast_compile) {
 108.383 -      // Tier1 compilation policy avaoids stack walking.
 108.384 -      CompileBroker::compile_method(m, InvocationEntryBci,
 108.385 -                                    m, hot_count, comment, CHECK);
 108.386      } else {
 108.387        if (TimeCompilationPolicy) accumulated_time()->start();
 108.388        GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50);
 108.389 @@ -236,53 +423,25 @@
 108.390        if (TimeCompilationPolicy) accumulated_time()->stop();
 108.391        assert(top != NULL, "findTopInlinableFrame returned null");
 108.392        if (TraceCompilationPolicy) top->print();
 108.393 -      CompileBroker::compile_method(top->top_method(), InvocationEntryBci,
 108.394 +      CompileBroker::compile_method(top->top_method(), InvocationEntryBci, CompLevel_highest_tier,
 108.395                                      m, hot_count, comment, CHECK);
 108.396      }
 108.397    }
 108.398  }
 108.399  
 108.400 -void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) {
 108.401 +void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) {
 108.402    assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now.");
 108.403  
 108.404    int hot_count = m->backedge_count();
 108.405    const char* comment = "backedge_count";
 108.406  
 108.407 -  if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) {
 108.408 -    CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK);
 108.409 +  if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) {
 108.410 +    CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, CHECK);
 108.411  
 108.412 -    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));)
 108.413 +    NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));)
 108.414    }
 108.415  }
 108.416  
 108.417 -int StackWalkCompPolicy::compilation_level(methodHandle m, int osr_bci)
 108.418 -{
 108.419 -  int comp_level = CompLevel_full_optimization;
 108.420 -  if (TieredCompilation && osr_bci == InvocationEntryBci) {
 108.421 -    if (CompileTheWorld) {
 108.422 -      // Under CTW, the first compile is tier1, the second tier2
 108.423 -      if (m->highest_tier_compile() == CompLevel_none) {
 108.424 -        comp_level = CompLevel_fast_compile;
 108.425 -      }
 108.426 -    } else if (!m->has_osr_nmethod()) {
 108.427 -      // Before tier1 is done, use invocation_count + backedge_count to
 108.428 -      // compare against the threshold.  After that, the counters may/will
 108.429 -      // be reset, so rely on the straight interpreter_invocation_count.
 108.430 -      if (m->highest_tier_compile() == CompLevel_initial_compile) {
 108.431 -        if (m->interpreter_invocation_count() < Tier2CompileThreshold) {
 108.432 -          comp_level = CompLevel_fast_compile;
 108.433 -        }
 108.434 -      } else if (m->invocation_count() + m->backedge_count() <
 108.435 -                 Tier2CompileThreshold) {
 108.436 -        comp_level = CompLevel_fast_compile;
 108.437 -      }
 108.438 -    }
 108.439 -
 108.440 -  }
 108.441 -  return comp_level;
 108.442 -}
 108.443 -
 108.444 -
 108.445  RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) {
 108.446    // go up the stack until finding a frame that (probably) won't be inlined
 108.447    // into its caller
 108.448 @@ -372,7 +531,7 @@
 108.449  
 108.450      // If the caller method is too big or something then we do not want to
 108.451      // compile it just to inline a method
 108.452 -    if (!canBeCompiled(next_m)) {
 108.453 +    if (!can_be_compiled(next_m)) {
 108.454        msg = "caller cannot be compiled";
 108.455        break;
 108.456      }
   109.1 --- a/src/share/vm/runtime/compilationPolicy.hpp	Tue Sep 21 06:58:44 2010 -0700
   109.2 +++ b/src/share/vm/runtime/compilationPolicy.hpp	Wed Sep 22 12:54:51 2010 -0400
   109.3 @@ -1,5 +1,5 @@
   109.4  /*
   109.5 - * Copyright (c) 2000, 2006, Oracle and/or its affiliates. All rights reserved.
   109.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
   109.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   109.8   *
   109.9   * This code is free software; you can redistribute it and/or modify it
  109.10 @@ -25,53 +25,91 @@
  109.11  // The CompilationPolicy selects which method (if any) should be compiled.
  109.12  // It also decides which methods must always be compiled (i.e., are never
  109.13  // interpreted).
  109.14 +class CompileTask;
  109.15 +class CompileQueue;
  109.16  
  109.17  class CompilationPolicy : public CHeapObj {
  109.18 - private:
  109.19    static CompilationPolicy* _policy;
  109.20    // Accumulated time
  109.21    static elapsedTimer       _accumulated_time;
  109.22  
  109.23    static bool               _in_vm_startup;
  109.24 -
  109.25 - public:
  109.26 -  virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
  109.27 -  virtual void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) = 0;
  109.28 -  virtual int compilation_level(methodHandle m, int branch_bci) = 0;
  109.29 -
  109.30 -  void reset_counter_for_invocation_event(methodHandle method);
  109.31 -  void reset_counter_for_back_branch_event(methodHandle method);
  109.32 -
  109.33 +public:
  109.34    static  void set_in_vm_startup(bool in_vm_startup) { _in_vm_startup = in_vm_startup; }
  109.35    static  void completed_vm_startup();
  109.36 -  static  bool delayCompilationDuringStartup() { return _in_vm_startup; }
  109.37 +  static  bool delay_compilation_during_startup()    { return _in_vm_startup; }
  109.38  
  109.39 -  static bool mustBeCompiled(methodHandle m);      // m must be compiled before executing it
  109.40 -  static bool canBeCompiled(methodHandle m);       // m is allowed to be compiled
  109.41 -
  109.42 +  // m must be compiled before executing it
  109.43 +  static bool must_be_compiled(methodHandle m, int comp_level = CompLevel_all);
  109.44 +  // m is allowed to be compiled
  109.45 +  static bool can_be_compiled(methodHandle m, int comp_level = CompLevel_all);
  109.46 +  static bool is_compilation_enabled();
  109.47    static void set_policy(CompilationPolicy* policy) { _policy = policy; }
  109.48 -  static CompilationPolicy* policy() { return _policy; }
  109.49 +  static CompilationPolicy* policy()                { return _policy; }
  109.50  
  109.51    // Profiling
  109.52    elapsedTimer* accumulated_time() { return &_accumulated_time; }
  109.53    void print_time() PRODUCT_RETURN;
  109.54 +  virtual int compiler_count(CompLevel comp_level) = 0;
  109.55 +  // main notification entry, return a pointer to an nmethod if the OSR is required,
  109.56 +  // returns NULL otherwise.
  109.57 +  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) = 0;
  109.58 +  // safepoint() is called at the end of the safepoint
  109.59 +  virtual void do_safepoint_work() = 0;
  109.60 +  // reprofile request
  109.61 +  virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0;
  109.62 +  // delay_compilation(method) can be called by any component of the runtime to notify the policy
  109.63 +  // that it's recommended to delay the complation of this method.
  109.64 +  virtual void delay_compilation(methodOop method) = 0;
  109.65 +  // disable_compilation() is called whenever the runtime decides to disable compilation of the
  109.66 +  // specified method.
  109.67 +  virtual void disable_compilation(methodOop method) = 0;
  109.68 +  // Select task is called by CompileBroker. The queue is guaranteed to have at least one
  109.69 +  // element and is locked. The function should select one and return it.
  109.70 +  virtual CompileTask* select_task(CompileQueue* compile_queue) = 0;
  109.71 +  // Tell the runtime if we think a given method is adequately profiled.
  109.72 +  virtual bool is_mature(methodOop method) = 0;
  109.73 +  // Do policy initialization
  109.74 +  virtual void initialize() = 0;
  109.75  };
  109.76  
  109.77 -class SimpleCompPolicy : public CompilationPolicy {
  109.78 +// A base class for baseline policies.
  109.79 +class NonTieredCompPolicy : public CompilationPolicy {
  109.80 +  int _compiler_count;
  109.81 +protected:
  109.82 +  static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci);
  109.83 +  static void trace_osr_request(methodHandle method, nmethod* osr, int bci);
  109.84 +  static void trace_osr_completion(nmethod* osr_nm);
  109.85 +  void reset_counter_for_invocation_event(methodHandle method);
  109.86 +  void reset_counter_for_back_branch_event(methodHandle method);
  109.87 +public:
  109.88 +  NonTieredCompPolicy() : _compiler_count(0) { }
  109.89 +  virtual int compiler_count(CompLevel comp_level);
  109.90 +  virtual void do_safepoint_work();
  109.91 +  virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
  109.92 +  virtual void delay_compilation(methodOop method);
  109.93 +  virtual void disable_compilation(methodOop method);
  109.94 +  virtual bool is_mature(methodOop method);
  109.95 +  virtual void initialize();
  109.96 +  virtual CompileTask* select_task(CompileQueue* compile_queue);
  109.97 +  virtual nmethod* event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS);
  109.98 +  virtual void method_invocation_event(methodHandle m, TRAPS) = 0;
  109.99 +  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS) = 0;
 109.100 +};
 109.101 +
 109.102 +class SimpleCompPolicy : public NonTieredCompPolicy {
 109.103   public:
 109.104 -  void method_invocation_event( methodHandle m, TRAPS);
 109.105 -  void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS);
 109.106 -  int compilation_level(methodHandle m, int branch_bci);
 109.107 +  virtual void method_invocation_event(methodHandle m, TRAPS);
 109.108 +  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
 109.109  };
 109.110  
 109.111  // StackWalkCompPolicy - existing C2 policy
 109.112  
 109.113  #ifdef COMPILER2
 109.114 -class StackWalkCompPolicy : public CompilationPolicy {
 109.115 +class StackWalkCompPolicy : public NonTieredCompPolicy {
 109.116   public:
 109.117 -  void method_invocation_event(methodHandle m, TRAPS);
 109.118 -  void method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS);
 109.119 -  int compilation_level(methodHandle m, int branch_bci);
 109.120 +  virtual void method_invocation_event(methodHandle m, TRAPS);
 109.121 +  virtual void method_back_branch_event(methodHandle m, int bci, TRAPS);
 109.122  
 109.123   private:
 109.124    RFrame* findTopInlinableFrame(GrowableArray<RFrame*>* stack);
   110.1 --- a/src/share/vm/runtime/deoptimization.cpp	Tue Sep 21 06:58:44 2010 -0700
   110.2 +++ b/src/share/vm/runtime/deoptimization.cpp	Wed Sep 22 12:54:51 2010 -0400
   110.3 @@ -1301,7 +1301,7 @@
   110.4      bool update_trap_state = true;
   110.5      bool make_not_entrant = false;
   110.6      bool make_not_compilable = false;
   110.7 -    bool reset_counters = false;
   110.8 +    bool reprofile = false;
   110.9      switch (action) {
  110.10      case Action_none:
  110.11        // Keep the old code.
  110.12 @@ -1328,7 +1328,7 @@
  110.13        // had been traps taken from compiled code.  This will update
  110.14        // the MDO trap history so that the next compilation will
  110.15        // properly detect hot trap sites.
  110.16 -      reset_counters = true;
  110.17 +      reprofile = true;
  110.18        break;
  110.19      case Action_make_not_entrant:
  110.20        // Request immediate recompilation, and get rid of the old code.
  110.21 @@ -1422,7 +1422,7 @@
  110.22        // this trap point already, run the method in the interpreter
  110.23        // for a while to exercise it more thoroughly.
  110.24        if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
  110.25 -        reset_counters = true;
  110.26 +        reprofile = true;
  110.27        }
  110.28  
  110.29      }
  110.30 @@ -1452,24 +1452,21 @@
  110.31          if (trap_method() == nm->method()) {
  110.32            make_not_compilable = true;
  110.33          } else {
  110.34 -          trap_method->set_not_compilable();
  110.35 +          trap_method->set_not_compilable(CompLevel_full_optimization);
  110.36            // But give grace to the enclosing nm->method().
  110.37          }
  110.38        }
  110.39      }
  110.40  
  110.41 -    // Reset invocation counters
  110.42 -    if (reset_counters) {
  110.43 -      if (nm->is_osr_method())
  110.44 -        reset_invocation_counter(trap_scope, CompileThreshold);
  110.45 -      else
  110.46 -        reset_invocation_counter(trap_scope);
  110.47 +    // Reprofile
  110.48 +    if (reprofile) {
  110.49 +      CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
  110.50      }
  110.51  
  110.52      // Give up compiling
  110.53 -    if (make_not_compilable && !nm->method()->is_not_compilable()) {
  110.54 +    if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
  110.55        assert(make_not_entrant, "consistent");
  110.56 -      nm->method()->set_not_compilable();
  110.57 +      nm->method()->set_not_compilable(CompLevel_full_optimization);
  110.58      }
  110.59  
  110.60    } // Free marked resources
  110.61 @@ -1569,22 +1566,6 @@
  110.62                             ignore_maybe_prior_recompile);
  110.63  }
  110.64  
  110.65 -void Deoptimization::reset_invocation_counter(ScopeDesc* trap_scope, jint top_count) {
  110.66 -  ScopeDesc* sd = trap_scope;
  110.67 -  for (; !sd->is_top(); sd = sd->sender()) {
  110.68 -    // Reset ICs of inlined methods, since they can trigger compilations also.
  110.69 -    sd->method()->invocation_counter()->reset();
  110.70 -  }
  110.71 -  InvocationCounter* c = sd->method()->invocation_counter();
  110.72 -  if (top_count != _no_count) {
  110.73 -    // It was an OSR method, so bump the count higher.
  110.74 -    c->set(c->state(), top_count);
  110.75 -  } else {
  110.76 -    c->reset();
  110.77 -  }
  110.78 -  sd->method()->backedge_counter()->reset();
  110.79 -}
  110.80 -
  110.81  Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
  110.82  
  110.83    // Still in Java no safepoints
   111.1 --- a/src/share/vm/runtime/deoptimization.hpp	Tue Sep 21 06:58:44 2010 -0700
   111.2 +++ b/src/share/vm/runtime/deoptimization.hpp	Wed Sep 22 12:54:51 2010 -0400
   111.3 @@ -311,12 +311,6 @@
   111.4    static void popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address);
   111.5  
   111.6   private:
   111.7 -  enum {
   111.8 -    _no_count = -1
   111.9 -  };
  111.10 -
  111.11 -  static void reset_invocation_counter(ScopeDesc* trap_scope, jint count = _no_count);
  111.12 -
  111.13    static methodDataOop get_method_data(JavaThread* thread, methodHandle m, bool create_if_missing);
  111.14    // Update the mdo's count and per-BCI reason bits, returning previous state:
  111.15    static ProfileData* query_update_method_data(methodDataHandle trap_mdo,
   112.1 --- a/src/share/vm/runtime/dtraceJSDT.cpp	Tue Sep 21 06:58:44 2010 -0700
   112.2 +++ b/src/share/vm/runtime/dtraceJSDT.cpp	Wed Sep 22 12:54:51 2010 -0400
   112.3 @@ -65,7 +65,7 @@
   112.4          THROW_MSG_0(vmSymbols::java_lang_RuntimeException(),
   112.5            "Unable to register DTrace probes (CodeCache: no room for DTrace nmethods).");
   112.6        }
   112.7 -      h_method()->set_not_compilable(CompLevel_highest_tier);
   112.8 +      h_method()->set_not_compilable();
   112.9        h_method()->set_code(h_method, nm);
  112.10        probes->nmethod_at_put(count++, nm);
  112.11      }
   113.1 --- a/src/share/vm/runtime/globals.hpp	Tue Sep 21 06:58:44 2010 -0700
   113.2 +++ b/src/share/vm/runtime/globals.hpp	Wed Sep 22 12:54:51 2010 -0400
   113.3 @@ -35,14 +35,7 @@
   113.4  define_pd_global(bool, TieredCompilation,            false);
   113.5  
   113.6  define_pd_global(intx, CompileThreshold,             0);
   113.7 -define_pd_global(intx, Tier2CompileThreshold,        0);
   113.8 -define_pd_global(intx, Tier3CompileThreshold,        0);
   113.9 -define_pd_global(intx, Tier4CompileThreshold,        0);
  113.10 -
  113.11  define_pd_global(intx, BackEdgeThreshold,            0);
  113.12 -define_pd_global(intx, Tier2BackEdgeThreshold,       0);
  113.13 -define_pd_global(intx, Tier3BackEdgeThreshold,       0);
  113.14 -define_pd_global(intx, Tier4BackEdgeThreshold,       0);
  113.15  
  113.16  define_pd_global(intx, OnStackReplacePercentage,     0);
  113.17  define_pd_global(bool, ResizeTLAB,                   false);
  113.18 @@ -1974,7 +1967,7 @@
  113.19    product(uintx, TenuredGenerationSizeSupplementDecay, 2,                   \
  113.20            "Decay factor to TenuredGenerationSizeIncrement")                 \
  113.21                                                                              \
  113.22 -  product(uintx, MaxGCPauseMillis, max_uintx,                               \
  113.23 +  product(uintx, MaxGCPauseMillis, max_uintx,                           \
  113.24            "Adaptive size policy maximum GC pause time goal in msec, "       \
  113.25            "or (G1 Only) the max. GC time per MMU time slice")               \
  113.26                                                                              \
  113.27 @@ -2369,9 +2362,6 @@
  113.28    develop(bool, EagerInitialization, false,                                 \
  113.29            "Eagerly initialize classes if possible")                         \
  113.30                                                                              \
  113.31 -  product(bool, Tier1UpdateMethodData, trueInTiered,                        \
  113.32 -          "Update methodDataOops in Tier1-generated code")                  \
  113.33 -                                                                            \
  113.34    develop(bool, TraceMethodReplacement, false,                              \
  113.35            "Print when methods are replaced do to recompilation")            \
  113.36                                                                              \
  113.37 @@ -2904,7 +2894,7 @@
  113.38            "if non-zero, start verifying C heap after Nth call to "          \
  113.39            "malloc/realloc/free")                                            \
  113.40                                                                              \
  113.41 -  product(intx, TypeProfileWidth,      2,                                   \
  113.42 +  product(intx, TypeProfileWidth,     2,                                   \
  113.43            "number of receiver types to record in call/cast profile")        \
  113.44                                                                              \
  113.45    develop(intx, BciProfileWidth,      2,                                    \
  113.46 @@ -3312,30 +3302,98 @@
  113.47    product_pd(intx, BackEdgeThreshold,                                       \
  113.48            "Interpreter Back edge threshold at which an OSR compilation is invoked")\
  113.49                                                                              \
  113.50 -  product(intx, Tier1BytecodeLimit,      10,                                \
  113.51 -          "Must have at least this many bytecodes before tier1"             \
  113.52 -          "invocation counters are used")                                   \
  113.53 -                                                                            \
  113.54 -  product_pd(intx, Tier2CompileThreshold,                                   \
  113.55 -          "threshold at which a tier 2 compilation is invoked")             \
  113.56 -                                                                            \
  113.57 -  product_pd(intx, Tier2BackEdgeThreshold,                                  \
  113.58 -          "Back edge threshold at which a tier 2 compilation is invoked")   \
  113.59 -                                                                            \
  113.60 -  product_pd(intx, Tier3CompileThreshold,                                   \
  113.61 -          "threshold at which a tier 3 compilation is invoked")             \
  113.62 -                                                                            \
  113.63 -  product_pd(intx, Tier3BackEdgeThreshold,                                  \
  113.64 -          "Back edge threshold at which a tier 3 compilation is invoked")   \
  113.65 -                                                                            \
  113.66 -  product_pd(intx, Tier4CompileThreshold,                                   \
  113.67 -          "threshold at which a tier 4 compilation is invoked")             \
  113.68 -                                                                            \
  113.69 -  product_pd(intx, Tier4BackEdgeThreshold,                                  \
  113.70 -          "Back edge threshold at which a tier 4 compilation is invoked")   \
  113.71 +  product(intx, Tier0InvokeNotifyFreqLog, 7,                                \
  113.72 +          "Interpreter (tier 0) invocation notification frequency.")        \
  113.73 +                                                                            \
  113.74 +  product(intx, Tier2InvokeNotifyFreqLog, 11,                               \
  113.75 +          "C1 without MDO (tier 2) invocation notification frequency.")     \
  113.76 +                                                                            \
  113.77 +  product(intx, Tier3InvokeNotifyFreqLog, 10,                               \
  113.78 +          "C1 with MDO profiling (tier 3) invocation notification "         \
  113.79 +          "frequency.")                                                     \
  113.80 +                                                                            \
  113.81 +  product(intx, Tier0BackedgeNotifyFreqLog, 10,                             \
  113.82 +          "Interpreter (tier 0) invocation notification frequency.")        \
  113.83 +                                                                            \
  113.84 +  product(intx, Tier2BackedgeNotifyFreqLog, 14,                             \
  113.85 +          "C1 without MDO (tier 2) invocation notification frequency.")     \
  113.86 +                                                                            \
  113.87 +  product(intx, Tier3BackedgeNotifyFreqLog, 13,                             \
  113.88 +          "C1 with MDO profiling (tier 3) invocation notification "         \
  113.89 +          "frequency.")                                                     \
  113.90 +                                                                            \
  113.91 +  product(intx, Tier2CompileThreshold, 0,                                   \
  113.92 +          "threshold at which tier 2 compilation is invoked")               \
  113.93 +                                                                            \
  113.94 +  product(intx, Tier2BackEdgeThreshold, 0,                                  \
  113.95 +          "Back edge threshold at which tier 2 compilation is invoked")     \
  113.96 +                                                                            \
  113.97 +  product(intx, Tier3InvocationThreshold, 200,                              \
  113.98 +          "Compile if number of method invocations crosses this "           \
  113.99 +          "threshold")                                                      \
 113.100 +                                                                            \
 113.101 +  product(intx, Tier3MinInvocationThreshold, 100,                           \
 113.102 +          "Minimum invocation to compile at tier 3")                        \
 113.103 +                                                                            \
 113.104 +  product(intx, Tier3CompileThreshold, 2000,                                \
 113.105 +          "Threshold at which tier 3 compilation is invoked (invocation "   \
 113.106 +          "minimum must be satisfied.")                                     \
 113.107 +                                                                            \
 113.108 +  product(intx, Tier3BackEdgeThreshold,  7000,                              \
 113.109 +          "Back edge threshold at which tier 3 OSR compilation is invoked") \
 113.110 +                                                                            \
 113.111 +  product(intx, Tier4InvocationThreshold, 5000,                             \
 113.112 +          "Compile if number of method invocations crosses this "           \
 113.113 +          "threshold")                                                      \
 113.114 +                                                                            \
 113.115 +  product(intx, Tier4MinInvocationThreshold, 600,                           \
 113.116 +          "Minimum invocation to compile at tier 4")                        \
 113.117 +                                                                            \
 113.118 +  product(intx, Tier4CompileThreshold, 15000,                               \
 113.119 +          "Threshold at which tier 4 compilation is invoked (invocation "   \
 113.120 +          "minimum must be satisfied.")                                     \
 113.121 +                                                                            \
 113.122 +  product(intx, Tier4BackEdgeThreshold, 40000,                              \
 113.123 +          "Back edge threshold at which tier 4 OSR compilation is invoked") \
 113.124 +                                                                            \
 113.125 +  product(intx, Tier3DelayOn, 5,                                            \
 113.126 +          "If C2 queue size grows over this amount per compiler thread "    \
 113.127 +          "stop compiling at tier 3 and start compiling at tier 2")         \
 113.128 +                                                                            \
 113.129 +  product(intx, Tier3DelayOff, 2,                                           \
 113.130 +          "If C2 queue size is less than this amount per compiler thread "  \
 113.131 +          "allow methods compiled at tier 2 transition to tier 3")          \
 113.132 +                                                                            \
 113.133 +  product(intx, Tier3LoadFeedback, 5,                                       \
 113.134 +          "Tier 3 thresholds will increase twofold when C1 queue size "     \
 113.135 +          "reaches this amount per compiler thread")                        \
 113.136 +                                                                            \
 113.137 +  product(intx, Tier4LoadFeedback, 3,                                       \
 113.138 +          "Tier 4 thresholds will increase twofold when C2 queue size "     \
 113.139 +          "reaches this amount per compiler thread")                        \
 113.140 +                                                                            \
 113.141 +  product(intx, TieredCompileTaskTimeout, 50,                               \
 113.142 +          "Kill compile task if method was not used within "                \
 113.143 +          "given timeout in milliseconds")                                  \
 113.144 +                                                                            \
 113.145 +  product(intx, TieredStopAtLevel, 4,                                       \
 113.146 +          "Stop at given compilation level")                                \
 113.147 +                                                                            \
 113.148 +  product(intx, Tier0ProfilingStartPercentage, 200,                         \
 113.149 +          "Start profiling in interpreter if the counters exceed tier 3"    \
 113.150 +          "thresholds by the specified percentage")                         \
 113.151 +                                                                            \
 113.152 +  product(intx, TieredRateUpdateMinTime, 1,                                 \
 113.153 +          "Minimum rate sampling interval (in milliseconds)")               \
 113.154 +                                                                            \
 113.155 +  product(intx, TieredRateUpdateMaxTime, 25,                                \
 113.156 +          "Maximum rate sampling interval (in milliseconds)")               \
 113.157                                                                              \
 113.158    product_pd(bool, TieredCompilation,                                       \
 113.159 -          "Enable two-tier compilation")                                    \
 113.160 +          "Enable tiered compilation")                                      \
 113.161 +                                                                            \
 113.162 +  product(bool, PrintTieredEvents, false,                                   \
 113.163 +          "Print tiered events notifications")                              \
 113.164                                                                              \
 113.165    product(bool, StressTieredRuntime, false,                                 \
 113.166            "Alternate client and server compiler on compile requests")       \
   114.1 --- a/src/share/vm/runtime/java.cpp	Tue Sep 21 06:58:44 2010 -0700
   114.2 +++ b/src/share/vm/runtime/java.cpp	Wed Sep 22 12:54:51 2010 -0400
   114.3 @@ -198,7 +198,7 @@
   114.4    if (CountCompiledCalls) {
   114.5      print_method_invocation_histogram();
   114.6    }
   114.7 -  if (ProfileInterpreter || Tier1UpdateMethodData) {
   114.8 +  if (ProfileInterpreter || C1UpdateMethodData) {
   114.9      print_method_profiling_data();
  114.10    }
  114.11    if (TimeCompiler) {
   115.1 --- a/src/share/vm/runtime/javaCalls.cpp	Tue Sep 21 06:58:44 2010 -0700
   115.2 +++ b/src/share/vm/runtime/javaCalls.cpp	Wed Sep 22 12:54:51 2010 -0400
   115.3 @@ -329,9 +329,10 @@
   115.4  
   115.5  
   115.6    assert(!thread->is_Compiler_thread(), "cannot compile from the compiler");
   115.7 -  if (CompilationPolicy::mustBeCompiled(method)) {
   115.8 +  if (CompilationPolicy::must_be_compiled(method)) {
   115.9      CompileBroker::compile_method(method, InvocationEntryBci,
  115.10 -                                  methodHandle(), 0, "mustBeCompiled", CHECK);
  115.11 +                                  CompLevel_initial_compile,
  115.12 +                                  methodHandle(), 0, "must_be_compiled", CHECK);
  115.13    }
  115.14  
  115.15    // Since the call stub sets up like the interpreter we call the from_interpreted_entry
   116.1 --- a/src/share/vm/runtime/safepoint.cpp	Tue Sep 21 06:58:44 2010 -0700
   116.2 +++ b/src/share/vm/runtime/safepoint.cpp	Wed Sep 22 12:54:51 2010 -0400
   116.3 @@ -430,29 +430,7 @@
   116.4    return false;
   116.5  }
   116.6  
   116.7 -jlong CounterDecay::_last_timestamp = 0;
   116.8  
   116.9 -static void do_method(methodOop m) {
  116.10 -  m->invocation_counter()->decay();
  116.11 -}
  116.12 -
  116.13 -void CounterDecay::decay() {
  116.14 -  _last_timestamp = os::javaTimeMillis();
  116.15 -
  116.16 -  // This operation is going to be performed only at the end of a safepoint
  116.17 -  // and hence GC's will not be going on, all Java mutators are suspended
  116.18 -  // at this point and hence SystemDictionary_lock is also not needed.
  116.19 -  assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint");
  116.20 -  int nclasses = SystemDictionary::number_of_classes();
  116.21 -  double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 /
  116.22 -                                        CounterHalfLifeTime);
  116.23 -  for (int i = 0; i < classes_per_tick; i++) {
  116.24 -    klassOop k = SystemDictionary::try_get_next_class();
  116.25 -    if (k != NULL && k->klass_part()->oop_is_instance()) {
  116.26 -      instanceKlass::cast(k)->methods_do(do_method);
  116.27 -    }
  116.28 -  }
  116.29 -}
  116.30  
  116.31  // Various cleaning tasks that should be done periodically at safepoints
  116.32  void SafepointSynchronize::do_cleanup_tasks() {
  116.33 @@ -465,10 +443,9 @@
  116.34      TraceTime t2("updating inline caches", TraceSafepointCleanupTime);
  116.35      InlineCacheBuffer::update_inline_caches();
  116.36    }
  116.37 -
  116.38 -  if(UseCounterDecay && CounterDecay::is_decay_needed()) {
  116.39 -    TraceTime t3("decaying counter", TraceSafepointCleanupTime);
  116.40 -    CounterDecay::decay();
  116.41 +  {
  116.42 +    TraceTime t3("compilation policy safepoint handler", TraceSafepointCleanupTime);
  116.43 +    CompilationPolicy::policy()->do_safepoint_work();
  116.44    }
  116.45  
  116.46    TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
   117.1 --- a/src/share/vm/runtime/safepoint.hpp	Tue Sep 21 06:58:44 2010 -0700
   117.2 +++ b/src/share/vm/runtime/safepoint.hpp	Wed Sep 22 12:54:51 2010 -0400
   117.3 @@ -1,5 +1,5 @@
   117.4  /*
   117.5 - * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
   117.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   117.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   117.8   *
   117.9   * This code is free software; you can redistribute it and/or modify it
  117.10 @@ -147,6 +147,9 @@
  117.11    static long last_non_safepoint_interval() {
  117.12      return os::javaTimeMillis() - _end_of_last_safepoint;
  117.13    }
  117.14 +  static long end_of_last_safepoint() {
  117.15 +    return _end_of_last_safepoint;
  117.16 +  }
  117.17    static bool is_cleanup_needed();
  117.18    static void do_cleanup_tasks();
  117.19  
  117.20 @@ -228,15 +231,4 @@
  117.21    }
  117.22  };
  117.23  
  117.24 -//
  117.25 -// CounterDecay
  117.26 -//
  117.27 -// Interates through invocation counters and decrements them. This
  117.28 -// is done at each safepoint.
  117.29 -//
  117.30 -class CounterDecay : public AllStatic {
  117.31 -  static jlong _last_timestamp;
  117.32 - public:
  117.33 -  static  void decay();
  117.34 -  static  bool is_decay_needed() { return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; }
  117.35 -};
  117.36 +
   118.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Tue Sep 21 06:58:44 2010 -0700
   118.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Wed Sep 22 12:54:51 2010 -0400
   118.3 @@ -1633,8 +1633,13 @@
   118.4  char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread,
   118.5                                                          oopDesc* required,
   118.6                                                          oopDesc* actual) {
   118.7 +  if (TraceMethodHandles) {
   118.8 +    tty->print_cr("WrongMethodType thread="PTR_FORMAT" req="PTR_FORMAT" act="PTR_FORMAT"",
   118.9 +                  thread, required, actual);
  118.10 +  }
  118.11    assert(EnableMethodHandles, "");
  118.12    oop singleKlass = wrong_method_type_is_for_single_argument(thread, required);
  118.13 +  char* message = NULL;
  118.14    if (singleKlass != NULL) {
  118.15      const char* objName = "argument or return value";
  118.16      if (actual != NULL) {
  118.17 @@ -1647,7 +1652,7 @@
  118.18      Klass* targetKlass = Klass::cast(required->is_klass()
  118.19                                       ? (klassOop)required
  118.20                                       : java_lang_Class::as_klassOop(required));
  118.21 -    return generate_class_cast_message(objName, targetKlass->external_name());
  118.22 +    message = generate_class_cast_message(objName, targetKlass->external_name());
  118.23    } else {
  118.24      // %%% need to get the MethodType string, without messing around too much
  118.25      // Get a signature from the invoke instruction
  118.26 @@ -1679,9 +1684,13 @@
  118.27        if (mhName[0] == '$')
  118.28          mhName = actual_method->signature()->as_C_string();
  118.29      }
  118.30 -    return generate_class_cast_message(mhName, targetType,
  118.31 -                                       " cannot be called as ");
  118.32 +    message = generate_class_cast_message(mhName, targetType,
  118.33 +                                          " cannot be called as ");
  118.34    }
  118.35 +  if (TraceMethodHandles) {
  118.36 +    tty->print_cr("WrongMethodType => message=%s", message);
  118.37 +  }
  118.38 +  return message;
  118.39  }
  118.40  
  118.41  oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr,
   119.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   119.2 +++ b/src/share/vm/runtime/simpleThresholdPolicy.cpp	Wed Sep 22 12:54:51 2010 -0400
   119.3 @@ -0,0 +1,377 @@
   119.4 +/*
   119.5 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   119.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   119.7 + *
   119.8 + * This code is free software; you can redistribute it and/or modify it
   119.9 + * under the terms of the GNU General Public License version 2 only, as
  119.10 + * published by the Free Software Foundation.
  119.11 + *
  119.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  119.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  119.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  119.15 + * version 2 for more details (a copy is included in the LICENSE file that
  119.16 + * accompanied this code).
  119.17 + *
  119.18 + * You should have received a copy of the GNU General Public License version
  119.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  119.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  119.21 + *
  119.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  119.23 + * or visit www.oracle.com if you need additional information or have any
  119.24 + * questions.
  119.25 + *
  119.26 + */
  119.27 +
  119.28 +# include "incls/_precompiled.incl"
  119.29 +# include "incls/_simpleThresholdPolicy.cpp.incl"
  119.30 +
  119.31 +// Print an event.
  119.32 +void SimpleThresholdPolicy::print_event(EventType type, methodHandle mh, methodHandle imh,
  119.33 +                                        int bci, CompLevel level) {
  119.34 +  bool inlinee_event = mh() != imh();
  119.35 +
  119.36 +  ttyLocker tty_lock;
  119.37 +  tty->print("%lf: [", os::elapsedTime());
  119.38 +
  119.39 +  int invocation_count = mh->invocation_count();
  119.40 +  int backedge_count = mh->backedge_count();
  119.41 +  switch(type) {
  119.42 +  case CALL:
  119.43 +    tty->print("call");
  119.44 +    break;
  119.45 +  case LOOP:
  119.46 +    tty->print("loop");
  119.47 +    break;
  119.48 +  case COMPILE:
  119.49 +    tty->print("compile");
  119.50 +  }
  119.51 +
  119.52 +  tty->print(" level: %d ", level);
  119.53 +
  119.54 +  ResourceMark rm;
  119.55 +  char *method_name = mh->name_and_sig_as_C_string();
  119.56 +  tty->print("[%s", method_name);
  119.57 +  // We can have an inlinee, although currently we don't generate any notifications for the inlined methods.
  119.58 +  if (inlinee_event) {
  119.59 +    char *inlinee_name = imh->name_and_sig_as_C_string();
  119.60 +    tty->print(" [%s]] ", inlinee_name);
  119.61 +  }
  119.62 +  else tty->print("] ");
  119.63 +  tty->print("@%d queues: %d,%d", bci, CompileBroker::queue_size(CompLevel_full_profile),
  119.64 +                                       CompileBroker::queue_size(CompLevel_full_optimization));
  119.65 +
  119.66 +  print_specific(type, mh, imh, bci, level);
  119.67 +
  119.68 +  if (type != COMPILE) {
  119.69 +    methodDataHandle mdh = mh->method_data();
  119.70 +    int mdo_invocations = 0, mdo_backedges = 0;
  119.71 +    if (mdh() != NULL) {
  119.72 +      mdo_invocations = mdh->invocation_count();
  119.73 +      mdo_backedges = mdh->backedge_count();
  119.74 +    }
  119.75 +    tty->print(" total: %d,%d mdo: %d,%d",
  119.76 +               invocation_count, backedge_count,
  119.77 +               mdo_invocations, mdo_backedges);
  119.78 +    tty->print(" max levels: %d,%d",
  119.79 +                mh->highest_comp_level(), mh->highest_osr_comp_level());
  119.80 +    if (inlinee_event) {
  119.81 +      tty->print(" inlinee max levels: %d,%d", imh->highest_comp_level(), imh->highest_osr_comp_level());
  119.82 +    }
  119.83 +    tty->print(" compilable: ");
  119.84 +    bool need_comma = false;
  119.85 +    if (!mh->is_not_compilable(CompLevel_full_profile)) {
  119.86 +      tty->print("c1");
  119.87 +      need_comma = true;
  119.88 +    }
  119.89 +    if (!mh->is_not_compilable(CompLevel_full_optimization)) {
  119.90 +      if (need_comma) tty->print(", ");
  119.91 +      tty->print("c2");
  119.92 +      need_comma = true;
  119.93 +    }
  119.94 +    if (!mh->is_not_osr_compilable()) {
  119.95 +      if (need_comma) tty->print(", ");
  119.96 +      tty->print("osr");
  119.97 +    }
  119.98 +    tty->print(" status:");
  119.99 +    if (mh->queued_for_compilation()) {
 119.100 +      tty->print(" in queue");
 119.101 +    } else tty->print(" idle");
 119.102 +  }
 119.103 +  tty->print_cr("]");
 119.104 +}
 119.105 +
 119.106 +void SimpleThresholdPolicy::initialize() {
 119.107 +  if (FLAG_IS_DEFAULT(CICompilerCount)) {
 119.108 +    FLAG_SET_DEFAULT(CICompilerCount, 3);
 119.109 +  }
 119.110 +  int count = CICompilerCount;
 119.111 +  if (CICompilerCountPerCPU) {
 119.112 +    count = MAX2(log2_intptr(os::active_processor_count()), 1) * 3 / 2;
 119.113 +  }
 119.114 +  set_c1_count(MAX2(count / 3, 1));
 119.115 +  set_c2_count(MAX2(count - count / 3, 1));
 119.116 +}
 119.117 +
 119.118 +void SimpleThresholdPolicy::set_carry_if_necessary(InvocationCounter *counter) {
 119.119 +  if (!counter->carry() && counter->count() > InvocationCounter::count_limit / 2) {
 119.120 +    counter->set_carry_flag();
 119.121 +  }
 119.122 +}
 119.123 +
 119.124 +// Set carry flags on the counters if necessary
 119.125 +void SimpleThresholdPolicy::handle_counter_overflow(methodOop method) {
 119.126 +  set_carry_if_necessary(method->invocation_counter());
 119.127 +  set_carry_if_necessary(method->backedge_counter());
 119.128 +  methodDataOop mdo = method->method_data();
 119.129 +  if (mdo != NULL) {
 119.130 +    set_carry_if_necessary(mdo->invocation_counter());
 119.131 +    set_carry_if_necessary(mdo->backedge_counter());
 119.132 +  }
 119.133 +}
 119.134 +
 119.135 +// Called with the queue locked and with at least one element
 119.136 +CompileTask* SimpleThresholdPolicy::select_task(CompileQueue* compile_queue) {
 119.137 +  return compile_queue->first();
 119.138 +}
 119.139 +
 119.140 +nmethod* SimpleThresholdPolicy::event(methodHandle method, methodHandle inlinee,
 119.141 +                                      int branch_bci, int bci, CompLevel comp_level, TRAPS) {
 119.142 +  if (comp_level == CompLevel_none &&
 119.143 +      JvmtiExport::can_post_interpreter_events()) {
 119.144 +    assert(THREAD->is_Java_thread(), "Should be java thread");
 119.145 +    if (((JavaThread*)THREAD)->is_interp_only_mode()) {
 119.146 +      return NULL;
 119.147 +    }
 119.148 +  }
 119.149 +  nmethod *osr_nm = NULL;
 119.150 +
 119.151 +  handle_counter_overflow(method());
 119.152 +  if (method() != inlinee()) {
 119.153 +    handle_counter_overflow(inlinee());
 119.154 +  }
 119.155 +
 119.156 +  if (PrintTieredEvents) {
 119.157 +    print_event(bci == InvocationEntryBci ? CALL : LOOP, method, inlinee, bci, comp_level);
 119.158 +  }
 119.159 +
 119.160 +  if (bci == InvocationEntryBci) {
 119.161 +    method_invocation_event(method, inlinee, comp_level, THREAD);
 119.162 +  } else {
 119.163 +    method_back_branch_event(method, inlinee, bci, comp_level, THREAD);
 119.164 +    int highest_level = method->highest_osr_comp_level();
 119.165 +    if (highest_level > comp_level) {
 119.166 +      osr_nm = method->lookup_osr_nmethod_for(bci, highest_level, false);
 119.167 +    }
 119.168 +  }
 119.169 +  return osr_nm;
 119.170 +}
 119.171 +
 119.172 +// Check if the method can be compiled, change level if necessary
 119.173 +void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
 119.174 +  // Take the given ceiling into the account.
 119.175 +  // NOTE: You can set it to 1 to get a pure C1 version.
 119.176 +  if ((CompLevel)TieredStopAtLevel < level) {
 119.177 +    level = (CompLevel)TieredStopAtLevel;
 119.178 +  }
 119.179 +  if (level == CompLevel_none) {
 119.180 +    return;
 119.181 +  }
 119.182 +  // Check if the method can be compiled, if not - try different levels.
 119.183 +  if (!can_be_compiled(mh, level)) {
 119.184 +    if (level < CompLevel_full_optimization && can_be_compiled(mh, CompLevel_full_optimization)) {
 119.185 +      compile(mh, bci, CompLevel_full_optimization, THREAD);
 119.186 +    }
 119.187 +    if (level == CompLevel_full_optimization && can_be_compiled(mh, CompLevel_simple)) {
 119.188 +        compile(mh, bci, CompLevel_simple, THREAD);
 119.189 +    }
 119.190 +    return;
 119.191 +  }
 119.192 +  if (bci != InvocationEntryBci && mh->is_not_osr_compilable()) {
 119.193 +    return;
 119.194 +  }
 119.195 +  if (PrintTieredEvents) {
 119.196 +    print_event(COMPILE, mh, mh, bci, level);
 119.197 +  }
 119.198 +  if (!CompileBroker::compilation_is_in_queue(mh, bci)) {
 119.199 +    submit_compile(mh, bci, level, THREAD);
 119.200 +  }
 119.201 +}
 119.202 +
 119.203 +// Tell the broker to compile the method
 119.204 +void SimpleThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS) {
 119.205 +  int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
 119.206 +  CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", THREAD);
 119.207 +}
 119.208 +
 119.209 +// Call and loop predicates determine whether a transition to a higher
 119.210 +// compilation level should be performed (pointers to predicate functions
 119.211 +// are passed to common() transition function).
 119.212 +bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
 119.213 +  switch(cur_level) {
 119.214 +  case CompLevel_none:
 119.215 +  case CompLevel_limited_profile: {
 119.216 +    return loop_predicate_helper<CompLevel_none>(i, b, 1.0);
 119.217 +  }
 119.218 +  case CompLevel_full_profile: {
 119.219 +    return loop_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
 119.220 +  }
 119.221 +  default:
 119.222 +    return true;
 119.223 +  }
 119.224 +}
 119.225 +
 119.226 +bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
 119.227 +  switch(cur_level) {
 119.228 +  case CompLevel_none:
 119.229 +  case CompLevel_limited_profile: {
 119.230 +    return call_predicate_helper<CompLevel_none>(i, b, 1.0);
 119.231 +  }
 119.232 +  case CompLevel_full_profile: {
 119.233 +    return call_predicate_helper<CompLevel_full_profile>(i, b, 1.0);
 119.234 +  }
 119.235 +  default:
 119.236 +    return true;
 119.237 +  }
 119.238 +}
 119.239 +
 119.240 +// Determine is a method is mature.
 119.241 +bool SimpleThresholdPolicy::is_mature(methodOop method) {
 119.242 +  if (is_trivial(method)) return true;
 119.243 +  methodDataOop mdo = method->method_data();
 119.244 +  if (mdo != NULL) {
 119.245 +    int i = mdo->invocation_count();
 119.246 +    int b = mdo->backedge_count();
 119.247 +    double k = ProfileMaturityPercentage / 100.0;
 119.248 +    return call_predicate_helper<CompLevel_full_profile>(i, b, k) ||
 119.249 +           loop_predicate_helper<CompLevel_full_profile>(i, b, k);
 119.250 +  }
 119.251 +  return false;
 119.252 +}
 119.253 +
 119.254 +// Common transition function. Given a predicate determines if a method should transition to another level.
 119.255 +CompLevel SimpleThresholdPolicy::common(Predicate p, methodOop method, CompLevel cur_level) {
 119.256 +  CompLevel next_level = cur_level;
 119.257 +  int i = method->invocation_count();
 119.258 +  int b = method->backedge_count();
 119.259 +
 119.260 +  switch(cur_level) {
 119.261 +  case CompLevel_none:
 119.262 +    {
 119.263 +      methodDataOop mdo = method->method_data();
 119.264 +      if (mdo != NULL) {
 119.265 +        int mdo_i = mdo->invocation_count();
 119.266 +        int mdo_b = mdo->backedge_count();
 119.267 +        // If we were at full profile level, would we switch to full opt?
 119.268 +        if ((this->*p)(mdo_i, mdo_b, CompLevel_full_profile)) {
 119.269 +          next_level = CompLevel_full_optimization;
 119.270 +        }
 119.271 +      }
 119.272 +    }
 119.273 +    if (next_level == cur_level && (this->*p)(i, b, cur_level)) {
 119.274 +      if (is_trivial(method)) {
 119.275 +        next_level = CompLevel_simple;
 119.276 +      } else {
 119.277 +        next_level = CompLevel_full_profile;
 119.278 +      }
 119.279 +    }
 119.280 +    break;
 119.281 +  case CompLevel_limited_profile:
 119.282 +  case CompLevel_full_profile:
 119.283 +    if (is_trivial(method)) {
 119.284 +      next_level = CompLevel_simple;
 119.285 +    } else {
 119.286 +      methodDataOop mdo = method->method_data();
 119.287 +      guarantee(mdo != NULL, "MDO should always exist");
 119.288 +      if (mdo->would_profile()) {
 119.289 +        int mdo_i = mdo->invocation_count();
 119.290 +        int mdo_b = mdo->backedge_count();
 119.291 +        if ((this->*p)(mdo_i, mdo_b, cur_level)) {
 119.292 +          next_level = CompLevel_full_optimization;
 119.293 +        }
 119.294 +      } else {
 119.295 +        next_level = CompLevel_full_optimization;
 119.296 +      }
 119.297 +    }
 119.298 +    break;
 119.299 +  }
 119.300 +  return next_level;
 119.301 +}
 119.302 +
 119.303 +// Determine if a method should be compiled with a normal entry point at a different level.
 119.304 +CompLevel SimpleThresholdPolicy::call_event(methodOop method,  CompLevel cur_level) {
 119.305 +  CompLevel highest_level = (CompLevel)method->highest_comp_level();
 119.306 +  if (cur_level == CompLevel_none && highest_level > cur_level) {
 119.307 +    // TODO: We may want to try to do more extensive reprofiling in this case.
 119.308 +    return highest_level;
 119.309 +  }
 119.310 +
 119.311 +  CompLevel osr_level = (CompLevel) method->highest_osr_comp_level();
 119.312 +  CompLevel next_level = common(&SimpleThresholdPolicy::call_predicate, method, cur_level);
 119.313 +
 119.314 +  // If OSR method level is greater than the regular method level, the levels should be
 119.315 +  // equalized by raising the regular method level in order to avoid OSRs during each
 119.316 +  // invocation of the method.
 119.317 +  if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
 119.318 +    methodDataOop mdo = method->method_data();
 119.319 +    guarantee(mdo != NULL, "MDO should not be NULL");
 119.320 +    if (mdo->invocation_count() >= 1) {
 119.321 +      next_level = CompLevel_full_optimization;
 119.322 +    }
 119.323 +  } else {
 119.324 +    next_level = MAX2(osr_level, next_level);
 119.325 +  }
 119.326 +
 119.327 +  return next_level;
 119.328 +}
 119.329 +
 119.330 +// Determine if we should do an OSR compilation of a given method.
 119.331 +CompLevel SimpleThresholdPolicy::loop_event(methodOop method, CompLevel cur_level) {
 119.332 +  if (cur_level == CompLevel_none) {
 119.333 +    // If there is a live OSR method that means that we deopted to the interpreter
 119.334 +    // for the transition.
 119.335 +    CompLevel osr_level = (CompLevel)method->highest_osr_comp_level();
 119.336 +    if (osr_level > CompLevel_none) {
 119.337 +      return osr_level;
 119.338 +    }
 119.339 +  }
 119.340 +  return common(&SimpleThresholdPolicy::loop_predicate, method, cur_level);
 119.341 +}
 119.342 +
 119.343 +
 119.344 +// Handle the invocation event.
 119.345 +void SimpleThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
 119.346 +                                              CompLevel level, TRAPS) {
 119.347 +  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
 119.348 +    CompLevel next_level = call_event(mh(), level);
 119.349 +    if (next_level != level) {
 119.350 +      compile(mh, InvocationEntryBci, next_level, THREAD);
 119.351 +    }
 119.352 +  }
 119.353 +}
 119.354 +
 119.355 +// Handle the back branch event. Notice that we can compile the method
 119.356 +// with a regular entry from here.
 119.357 +void SimpleThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
 119.358 +                                               int bci, CompLevel level, TRAPS) {
 119.359 +  // If the method is already compiling, quickly bail out.
 119.360 +  if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, bci)) {
 119.361 +    // Use loop event as an opportinity to also check there's been
 119.362 +    // enough calls.
 119.363 +    CompLevel cur_level = comp_level(mh());
 119.364 +    CompLevel next_level = call_event(mh(), cur_level);
 119.365 +    CompLevel next_osr_level = loop_event(mh(), level);
 119.366 +
 119.367 +    next_level = MAX2(next_level,
 119.368 +                      next_osr_level < CompLevel_full_optimization ? next_osr_level : cur_level);
 119.369 +    bool is_compiling = false;
 119.370 +    if (next_level != cur_level) {
 119.371 +      compile(mh, InvocationEntryBci, next_level, THREAD);
 119.372 +      is_compiling = true;
 119.373 +    }
 119.374 +
 119.375 +    // Do the OSR version
 119.376 +    if (!is_compiling && next_osr_level != level) {
 119.377 +      compile(mh, bci, next_osr_level, THREAD);
 119.378 +    }
 119.379 +  }
 119.380 +}
   120.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   120.2 +++ b/src/share/vm/runtime/simpleThresholdPolicy.hpp	Wed Sep 22 12:54:51 2010 -0400
   120.3 @@ -0,0 +1,107 @@
   120.4 +/*
   120.5 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   120.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   120.7 + *
   120.8 + * This code is free software; you can redistribute it and/or modify it
   120.9 + * under the terms of the GNU General Public License version 2 only, as
  120.10 + * published by the Free Software Foundation.
  120.11 + *
  120.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  120.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  120.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  120.15 + * version 2 for more details (a copy is included in the LICENSE file that
  120.16 + * accompanied this code).
  120.17 + *
  120.18 + * You should have received a copy of the GNU General Public License version
  120.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  120.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  120.21 + *
  120.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  120.23 + * or visit www.oracle.com if you need additional information or have any
  120.24 + * questions.
  120.25 + *
  120.26 + */
  120.27 +
  120.28 +class CompileTask;
  120.29 +class CompileQueue;
  120.30 +
  120.31 +class SimpleThresholdPolicy : public CompilationPolicy {
  120.32 +  int _c1_count, _c2_count;
  120.33 +
  120.34 +  // Check if the counter is big enough and set carry (effectively infinity).
  120.35 +  inline void set_carry_if_necessary(InvocationCounter *counter);
  120.36 +  // Set carry flags in the counters (in methodOop and MDO).
  120.37 +  inline void handle_counter_overflow(methodOop method);
  120.38 +  // Call and loop predicates determine whether a transition to a higher compilation
  120.39 +  // level should be performed (pointers to predicate functions are passed to common_TF().
  120.40 +  // Predicates also take compiler load into account.
  120.41 +  typedef bool (SimpleThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level);
  120.42 +  bool call_predicate(int i, int b, CompLevel cur_level);
  120.43 +  bool loop_predicate(int i, int b, CompLevel cur_level);
  120.44 +  // Common transition function. Given a predicate determines if a method should transition to another level.
  120.45 +  CompLevel common(Predicate p, methodOop method, CompLevel cur_level);
  120.46 +  // Transition functions.
  120.47 +  // call_event determines if a method should be compiled at a different
  120.48 +  // level with a regular invocation entry.
  120.49 +  CompLevel call_event(methodOop method, CompLevel cur_level);
  120.50 +  // loop_event checks if a method should be OSR compiled at a different
  120.51 +  // level.
  120.52 +  CompLevel loop_event(methodOop method, CompLevel cur_level);
  120.53 +
  120.54 +protected:
  120.55 +  int c1_count() const     { return _c1_count; }
  120.56 +  int c2_count() const     { return _c2_count; }
  120.57 +  void set_c1_count(int x) { _c1_count = x;    }
  120.58 +  void set_c2_count(int x) { _c2_count = x;    }
  120.59 +
  120.60 +  enum EventType { CALL, LOOP, COMPILE };
  120.61 +  void print_event(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level);
  120.62 +  // Print policy-specific information if necessary
  120.63 +  virtual void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level) { }
  120.64 +  // Check if the method can be compiled, change level if necessary
  120.65 +  void compile(methodHandle mh, int bci, CompLevel level, TRAPS);
  120.66 +  // Submit a given method for compilation
  120.67 +  virtual void submit_compile(methodHandle mh, int bci, CompLevel level, TRAPS);
  120.68 +  // Simple methods are as good being compiled with C1 as C2.
  120.69 +  // This function tells if it's such a function.
  120.70 +  inline bool is_trivial(methodOop method);
  120.71 +
  120.72 +  // Predicate helpers are used by .*_predicate() methods as well as others.
  120.73 +  // They check the given counter values, multiplied by the scale against the thresholds.
  120.74 +  template<CompLevel level> static inline bool call_predicate_helper(int i, int b, double scale);
  120.75 +  template<CompLevel level> static inline bool loop_predicate_helper(int i, int b, double scale);
  120.76 +
  120.77 +  // Get a compilation level for a given method.
  120.78 +  static CompLevel comp_level(methodOop method) {
  120.79 +    nmethod *nm = method->code();
  120.80 +    if (nm != NULL && nm->is_in_use()) {
  120.81 +      return (CompLevel)nm->comp_level();
  120.82 +    }
  120.83 +    return CompLevel_none;
  120.84 +  }
  120.85 +  virtual void method_invocation_event(methodHandle method, methodHandle inlinee,
  120.86 +                                       CompLevel level, TRAPS);
  120.87 +  virtual void method_back_branch_event(methodHandle method, methodHandle inlinee,
  120.88 +                                        int bci, CompLevel level, TRAPS);
  120.89 +public:
  120.90 +  SimpleThresholdPolicy() : _c1_count(0), _c2_count(0) { }
  120.91 +  virtual int compiler_count(CompLevel comp_level) {
  120.92 +    if (is_c1_compile(comp_level)) return c1_count();
  120.93 +    if (is_c2_compile(comp_level)) return c2_count();
  120.94 +    return 0;
  120.95 +  }
  120.96 +  virtual void do_safepoint_work() { }
  120.97 +  virtual void delay_compilation(methodOop method) { }
  120.98 +  virtual void disable_compilation(methodOop method) { }
  120.99 +  // TODO: we should honour reprofiling requests in the future. Currently reprofiling
 120.100 +  // would happen but not to the extent we would ideally like.
 120.101 +  virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) { }
 120.102 +  virtual nmethod* event(methodHandle method, methodHandle inlinee,
 120.103 +                         int branch_bci, int bci, CompLevel comp_level, TRAPS);
 120.104 +  // Select task is called by CompileBroker. We should return a task or NULL.
 120.105 +  virtual CompileTask* select_task(CompileQueue* compile_queue);
 120.106 +  // Tell the runtime if we think a given method is adequately profiled.
 120.107 +  virtual bool is_mature(methodOop method);
 120.108 +  // Initialize: set compiler thread count
 120.109 +  virtual void initialize();
 120.110 +};
   121.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   121.2 +++ b/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp	Wed Sep 22 12:54:51 2010 -0400
   121.3 @@ -0,0 +1,64 @@
   121.4 +/*
   121.5 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   121.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   121.7 + *
   121.8 + * This code is free software; you can redistribute it and/or modify it
   121.9 + * under the terms of the GNU General Public License version 2 only, as
  121.10 + * published by the Free Software Foundation.
  121.11 + *
  121.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  121.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  121.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  121.15 + * version 2 for more details (a copy is included in the LICENSE file that
  121.16 + * accompanied this code).
  121.17 + *
  121.18 + * You should have received a copy of the GNU General Public License version
  121.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  121.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  121.21 + *
  121.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  121.23 + * or visit www.oracle.com if you need additional information or have any
  121.24 + * questions.
  121.25 + *
  121.26 + */
  121.27 +
  121.28 +
  121.29 +template<CompLevel level>
  121.30 +bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale) {
  121.31 +  switch(level) {
  121.32 +  case CompLevel_none:
  121.33 +  case CompLevel_limited_profile:
  121.34 +    return (i > Tier3InvocationThreshold * scale) ||
  121.35 +           (i > Tier3MinInvocationThreshold * scale && i + b > Tier3CompileThreshold * scale);
  121.36 +  case CompLevel_full_profile:
  121.37 +   return (i > Tier4InvocationThreshold * scale) ||
  121.38 +          (i > Tier4MinInvocationThreshold * scale && i + b > Tier4CompileThreshold * scale);
  121.39 +  }
  121.40 +  return true;
  121.41 +}
  121.42 +
  121.43 +template<CompLevel level>
  121.44 +bool SimpleThresholdPolicy::loop_predicate_helper(int i, int b, double scale) {
  121.45 +  switch(level) {
  121.46 +  case CompLevel_none:
  121.47 +  case CompLevel_limited_profile:
  121.48 +    return b > Tier3BackEdgeThreshold * scale;
  121.49 +  case CompLevel_full_profile:
  121.50 +    return b > Tier4BackEdgeThreshold * scale;
  121.51 +  }
  121.52 +  return true;
  121.53 +}
  121.54 +
  121.55 +// Simple methods are as good being compiled with C1 as C2.
  121.56 +// Determine if a given method is such a case.
  121.57 +bool SimpleThresholdPolicy::is_trivial(methodOop method) {
  121.58 +  if (method->is_accessor()) return true;
  121.59 +  if (method->code() != NULL) {
  121.60 +    methodDataOop mdo = method->method_data();
  121.61 +    if (mdo != NULL && mdo->num_loops() == 0 &&
  121.62 +        (method->code_size() < 5  || (mdo->num_blocks() < 4) && (method->code_size() < 15))) {
  121.63 +      return !mdo->would_profile();
  121.64 +    }
  121.65 +  }
  121.66 +  return false;
  121.67 +}
   122.1 --- a/src/share/vm/runtime/sweeper.cpp	Tue Sep 21 06:58:44 2010 -0700
   122.2 +++ b/src/share/vm/runtime/sweeper.cpp	Wed Sep 22 12:54:51 2010 -0400
   122.3 @@ -368,8 +368,7 @@
   122.4          disconnected++;
   122.5        } else if (nm->is_speculatively_disconnected()) {
   122.6          // This method was previously considered for preemptive unloading and was not called since then
   122.7 -        nm->method()->invocation_counter()->decay();
   122.8 -        nm->method()->backedge_counter()->decay();
   122.9 +        CompilationPolicy::policy()->delay_compilation(nm->method());
  122.10          nm->make_not_entrant();
  122.11          made_not_entrant++;
  122.12        }
   123.1 --- a/src/share/vm/utilities/accessFlags.hpp	Tue Sep 21 06:58:44 2010 -0700
   123.2 +++ b/src/share/vm/utilities/accessFlags.hpp	Wed Sep 22 12:54:51 2010 -0400
   123.3 @@ -1,5 +1,5 @@
   123.4  /*
   123.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
   123.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   123.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   123.8   *
   123.9   * This code is free software; you can redistribute it and/or modify it
  123.10 @@ -39,7 +39,8 @@
  123.11    JVM_ACC_HAS_LOOPS               = 0x40000000,     // Method has loops
  123.12    JVM_ACC_LOOPS_FLAG_INIT         = (int)0x80000000,// The loop flag has been initialized
  123.13    JVM_ACC_QUEUED                  = 0x01000000,     // Queued for compilation
  123.14 -  JVM_ACC_NOT_TIER1_COMPILABLE    = 0x04000000,
  123.15 +  JVM_ACC_NOT_C2_COMPILABLE       = 0x02000000,
  123.16 +  JVM_ACC_NOT_C1_COMPILABLE       = 0x04000000,
  123.17    JVM_ACC_NOT_OSR_COMPILABLE      = 0x08000000,
  123.18    JVM_ACC_HAS_LINE_NUMBER_TABLE   = 0x00100000,
  123.19    JVM_ACC_HAS_CHECKED_EXCEPTIONS  = 0x00400000,
  123.20 @@ -47,6 +48,7 @@
  123.21    JVM_ACC_IS_OLD                  = 0x00010000,     // RedefineClasses() has replaced this method
  123.22    JVM_ACC_IS_OBSOLETE             = 0x00020000,     // RedefineClasses() has made method obsolete
  123.23    JVM_ACC_IS_PREFIXED_NATIVE      = 0x00040000,     // JVMTI has prefixed this native method
  123.24 +
  123.25    JVM_MH_INVOKE_BITS           // = 0x10001100      // MethodHandle.invoke quasi-native
  123.26                                    = (JVM_ACC_NATIVE | JVM_ACC_SYNTHETIC | JVM_ACC_MONITOR_MATCH),
  123.27  
  123.28 @@ -108,7 +110,8 @@
  123.29    bool has_loops               () const { return (_flags & JVM_ACC_HAS_LOOPS              ) != 0; }
  123.30    bool loops_flag_init         () const { return (_flags & JVM_ACC_LOOPS_FLAG_INIT        ) != 0; }
  123.31    bool queued_for_compilation  () const { return (_flags & JVM_ACC_QUEUED                 ) != 0; }
  123.32 -  bool is_not_tier1_compilable  () const { return (_flags & JVM_ACC_NOT_TIER1_COMPILABLE  ) != 0; }
  123.33 +  bool is_not_c1_compilable () const    { return (_flags & JVM_ACC_NOT_C1_COMPILABLE      ) != 0; }
  123.34 +  bool is_not_c2_compilable () const    { return (_flags & JVM_ACC_NOT_C2_COMPILABLE      ) != 0; }
  123.35    bool is_not_osr_compilable   () const { return (_flags & JVM_ACC_NOT_OSR_COMPILABLE     ) != 0; }
  123.36    bool has_linenumber_table    () const { return (_flags & JVM_ACC_HAS_LINE_NUMBER_TABLE  ) != 0; }
  123.37    bool has_checked_exceptions  () const { return (_flags & JVM_ACC_HAS_CHECKED_EXCEPTIONS ) != 0; }
  123.38 @@ -172,7 +175,8 @@
  123.39    void set_has_monitor_bytecodes()     { atomic_set_bits(JVM_ACC_HAS_MONITOR_BYTECODES);   }
  123.40    void set_has_loops()                 { atomic_set_bits(JVM_ACC_HAS_LOOPS);               }
  123.41    void set_loops_flag_init()           { atomic_set_bits(JVM_ACC_LOOPS_FLAG_INIT);         }
  123.42 -  void set_not_tier1_compilable()      { atomic_set_bits(JVM_ACC_NOT_TIER1_COMPILABLE);    }
  123.43 +  void set_not_c1_compilable()         { atomic_set_bits(JVM_ACC_NOT_C1_COMPILABLE);       }
  123.44 +  void set_not_c2_compilable()         { atomic_set_bits(JVM_ACC_NOT_C2_COMPILABLE);       }
  123.45    void set_not_osr_compilable()        { atomic_set_bits(JVM_ACC_NOT_OSR_COMPILABLE);      }
  123.46    void set_has_linenumber_table()      { atomic_set_bits(JVM_ACC_HAS_LINE_NUMBER_TABLE);   }
  123.47    void set_has_checked_exceptions()    { atomic_set_bits(JVM_ACC_HAS_CHECKED_EXCEPTIONS);  }
   124.1 --- a/src/share/vm/utilities/globalDefinitions.hpp	Tue Sep 21 06:58:44 2010 -0700
   124.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp	Wed Sep 22 12:54:51 2010 -0400
   124.3 @@ -710,24 +710,41 @@
   124.4  
   124.5  // Enumeration to distinguish tiers of compilation
   124.6  enum CompLevel {
   124.7 -  CompLevel_none              = 0,
   124.8 -  CompLevel_fast_compile      = 1,
   124.9 -  CompLevel_full_optimization = 2,
  124.10 +  CompLevel_any               = -1,
  124.11 +  CompLevel_all               = -1,
  124.12 +  CompLevel_none              = 0,         // Interpreter
  124.13 +  CompLevel_simple            = 1,         // C1
  124.14 +  CompLevel_limited_profile   = 2,         // C1, invocation & backedge counters
  124.15 +  CompLevel_full_profile      = 3,         // C1, invocation & backedge counters + mdo
  124.16 +  CompLevel_full_optimization = 4,         // C2
  124.17  
  124.18 -  CompLevel_highest_tier      = CompLevel_full_optimization,
  124.19 -#ifdef TIERED
  124.20 -  CompLevel_initial_compile   = CompLevel_fast_compile
  124.21 +#if defined(COMPILER2)
  124.22 +  CompLevel_highest_tier      = CompLevel_full_optimization,  // pure C2 and tiered
  124.23 +#elif defined(COMPILER1)
  124.24 +  CompLevel_highest_tier      = CompLevel_simple,             // pure C1
  124.25  #else
  124.26 -  CompLevel_initial_compile   = CompLevel_full_optimization
  124.27 -#endif // TIERED
  124.28 +  CompLevel_highest_tier      = CompLevel_none,
  124.29 +#endif
  124.30 +
  124.31 +#if defined(TIERED)
  124.32 +  CompLevel_initial_compile   = CompLevel_full_profile        // tiered
  124.33 +#elif defined(COMPILER1)
  124.34 +  CompLevel_initial_compile   = CompLevel_simple              // pure C1
  124.35 +#elif defined(COMPILER2)
  124.36 +  CompLevel_initial_compile   = CompLevel_full_optimization   // pure C2
  124.37 +#else
  124.38 +  CompLevel_initial_compile   = CompLevel_none
  124.39 +#endif
  124.40  };
  124.41  
  124.42 -inline bool is_tier1_compile(int comp_level) {
  124.43 -  return comp_level == CompLevel_fast_compile;
  124.44 +inline bool is_c1_compile(int comp_level) {
  124.45 +  return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization;
  124.46  }
  124.47 -inline bool is_tier2_compile(int comp_level) {
  124.48 +
  124.49 +inline bool is_c2_compile(int comp_level) {
  124.50    return comp_level == CompLevel_full_optimization;
  124.51  }
  124.52 +
  124.53  inline bool is_highest_tier_compile(int comp_level) {
  124.54    return comp_level == CompLevel_highest_tier;
  124.55  }
  124.56 @@ -1017,22 +1034,22 @@
  124.57  
  124.58  // This routine takes eight bytes:
  124.59  inline u8 build_u8_from( u1 c1, u1 c2, u1 c3, u1 c4, u1 c5, u1 c6, u1 c7, u1 c8 ) {
  124.60 -  return  ( u8(c1) << 56 )  &  ( u8(0xff) << 56 )
  124.61 -       |  ( u8(c2) << 48 )  &  ( u8(0xff) << 48 )
  124.62 -       |  ( u8(c3) << 40 )  &  ( u8(0xff) << 40 )
  124.63 -       |  ( u8(c4) << 32 )  &  ( u8(0xff) << 32 )
  124.64 -       |  ( u8(c5) << 24 )  &  ( u8(0xff) << 24 )
  124.65 -       |  ( u8(c6) << 16 )  &  ( u8(0xff) << 16 )
  124.66 -       |  ( u8(c7) <<  8 )  &  ( u8(0xff) <<  8 )
  124.67 -       |  ( u8(c8) <<  0 )  &  ( u8(0xff) <<  0 );
  124.68 +  return  (( u8(c1) << 56 )  &  ( u8(0xff) << 56 ))
  124.69 +       |  (( u8(c2) << 48 )  &  ( u8(0xff) << 48 ))
  124.70 +       |  (( u8(c3) << 40 )  &  ( u8(0xff) << 40 ))
  124.71 +       |  (( u8(c4) << 32 )  &  ( u8(0xff) << 32 ))
  124.72 +       |  (( u8(c5) << 24 )  &  ( u8(0xff) << 24 ))
  124.73 +       |  (( u8(c6) << 16 )  &  ( u8(0xff) << 16 ))
  124.74 +       |  (( u8(c7) <<  8 )  &  ( u8(0xff) <<  8 ))
  124.75 +       |  (( u8(c8) <<  0 )  &  ( u8(0xff) <<  0 ));
  124.76  }
  124.77  
  124.78  // This routine takes four bytes:
  124.79  inline u4 build_u4_from( u1 c1, u1 c2, u1 c3, u1 c4 ) {
  124.80 -  return  ( u4(c1) << 24 )  &  0xff000000
  124.81 -       |  ( u4(c2) << 16 )  &  0x00ff0000
  124.82 -       |  ( u4(c3) <<  8 )  &  0x0000ff00
  124.83 -       |  ( u4(c4) <<  0 )  &  0x000000ff;
  124.84 +  return  (( u4(c1) << 24 )  &  0xff000000)
  124.85 +       |  (( u4(c2) << 16 )  &  0x00ff0000)
  124.86 +       |  (( u4(c3) <<  8 )  &  0x0000ff00)
  124.87 +       |  (( u4(c4) <<  0 )  &  0x000000ff);
  124.88  }
  124.89  
  124.90  // And this one works if the four bytes are contiguous in memory:
  124.91 @@ -1042,8 +1059,8 @@
  124.92  
  124.93  // Ditto for two-byte ints:
  124.94  inline u2 build_u2_from( u1 c1, u1 c2 ) {
  124.95 -  return  u2(( u2(c1) <<  8 )  &  0xff00
  124.96 -          |  ( u2(c2) <<  0 )  &  0x00ff);
  124.97 +  return  u2((( u2(c1) <<  8 )  &  0xff00)
  124.98 +          |  (( u2(c2) <<  0 )  &  0x00ff));
  124.99  }
 124.100  
 124.101  // And this one works if the two bytes are contiguous in memory:
 124.102 @@ -1066,14 +1083,14 @@
 124.103  // now (64-bit) longs
 124.104  
 124.105  inline jlong build_long_from( u1 c1, u1 c2, u1 c3, u1 c4, u1 c5, u1 c6, u1 c7, u1 c8 ) {
 124.106 -  return  ( jlong(c1) << 56 )  &  ( jlong(0xff) << 56 )
 124.107 -       |  ( jlong(c2) << 48 )  &  ( jlong(0xff) << 48 )
 124.108 -       |  ( jlong(c3) << 40 )  &  ( jlong(0xff) << 40 )
 124.109 -       |  ( jlong(c4) << 32 )  &  ( jlong(0xff) << 32 )
 124.110 -       |  ( jlong(c5) << 24 )  &  ( jlong(0xff) << 24 )
 124.111 -       |  ( jlong(c6) << 16 )  &  ( jlong(0xff) << 16 )
 124.112 -       |  ( jlong(c7) <<  8 )  &  ( jlong(0xff) <<  8 )
 124.113 -       |  ( jlong(c8) <<  0 )  &  ( jlong(0xff) <<  0 );
 124.114 +  return  (( jlong(c1) << 56 )  &  ( jlong(0xff) << 56 ))
 124.115 +       |  (( jlong(c2) << 48 )  &  ( jlong(0xff) << 48 ))
 124.116 +       |  (( jlong(c3) << 40 )  &  ( jlong(0xff) << 40 ))
 124.117 +       |  (( jlong(c4) << 32 )  &  ( jlong(0xff) << 32 ))
 124.118 +       |  (( jlong(c5) << 24 )  &  ( jlong(0xff) << 24 ))
 124.119 +       |  (( jlong(c6) << 16 )  &  ( jlong(0xff) << 16 ))
 124.120 +       |  (( jlong(c7) <<  8 )  &  ( jlong(0xff) <<  8 ))
 124.121 +       |  (( jlong(c8) <<  0 )  &  ( jlong(0xff) <<  0 ));
 124.122  }
 124.123  
 124.124  inline jlong build_long_from( u1* p ) {
   125.1 --- a/src/share/vm/utilities/macros.hpp	Tue Sep 21 06:58:44 2010 -0700
   125.2 +++ b/src/share/vm/utilities/macros.hpp	Wed Sep 22 12:54:51 2010 -0400
   125.3 @@ -71,6 +71,14 @@
   125.4  #define NOT_COMPILER2(code) code
   125.5  #endif // COMPILER2
   125.6  
   125.7 +#ifdef TIERED
   125.8 +#define TIERED_ONLY(code) code
   125.9 +#define NOT_TIERED(code)
  125.10 +#else
  125.11 +#define TIERED_ONLY(code)
  125.12 +#define NOT_TIERED(code) code
  125.13 +#endif // TIERED
  125.14 +
  125.15  
  125.16  // PRODUCT variant
  125.17  #ifdef PRODUCT
   126.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   126.2 +++ b/test/compiler/6982370/Test6982370.java	Wed Sep 22 12:54:51 2010 -0400
   126.3 @@ -0,0 +1,139 @@
   126.4 +/*
   126.5 + * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
   126.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   126.7 + *
   126.8 + * This code is free software; you can redistribute it and/or modify it
   126.9 + * under the terms of the GNU General Public License version 2 only, as
  126.10 + * published by the Free Software Foundation.
  126.11 + *
  126.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  126.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  126.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  126.15 + * version 2 for more details (a copy is included in the LICENSE file that
  126.16 + * accompanied this code).
  126.17 + *
  126.18 + * You should have received a copy of the GNU General Public License version
  126.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  126.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  126.21 + *
  126.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  126.23 + * or visit www.oracle.com if you need additional information or have any
  126.24 + * questions.
  126.25 + *
  126.26 + */
  126.27 +
  126.28 +/**
  126.29 + * @test
  126.30 + * @bug 6982370
  126.31 + * @summary SIGBUS in jbyte_fill
  126.32 + *
  126.33 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+OptimizeFill -Xbatch Test6982370
  126.34 + */
  126.35 +
  126.36 +import java.util.Arrays;
  126.37 +
  126.38 +/**
  126.39 + * Exercise the fill routine for various short alignments and sizes
  126.40 + */
  126.41 +
  126.42 +public class Test6982370 {
  126.43 +    public static void main(String[] args) {
  126.44 +        test_byte();
  126.45 +        test_char();
  126.46 +        test_short();
  126.47 +        test_int();
  126.48 +        test_float();
  126.49 +    }
  126.50 +
  126.51 +    public static void test_int() {
  126.52 +        int[] a = new int[16];
  126.53 +        for (int i = 0; i < 200000; i++) {
  126.54 +            int start = i & 7;
  126.55 +            int end = start + ((i >> 4) & 7);
  126.56 +            int value = i;
  126.57 +            if ((i & 1) == 1) value = -value;
  126.58 +            Arrays.fill(a, start, end, value);
  126.59 +            boolean error = false;
  126.60 +            for (int j = start; j < end; j++) {
  126.61 +                if (a[j] != value) {
  126.62 +                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
  126.63 +                    error = true;
  126.64 +                }
  126.65 +            }
  126.66 +            if (error) throw new InternalError();
  126.67 +        }
  126.68 +    }
  126.69 +
  126.70 +    public static void test_float() {
  126.71 +        float[] a = new float[16];
  126.72 +        for (int i = 0; i < 200000; i++) {
  126.73 +            int start = i & 7;
  126.74 +            int end = start + ((i >> 4) & 7);
  126.75 +            float value = (float)i;
  126.76 +            if ((i & 1) == 1) value = -value;
  126.77 +            Arrays.fill(a, start, end, value);
  126.78 +            boolean error = false;
  126.79 +            for (int j = start; j < end; j++) {
  126.80 +                if (a[j] != value) {
  126.81 +                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
  126.82 +                    error = true;
  126.83 +                }
  126.84 +            }
  126.85 +            if (error) throw new InternalError();
  126.86 +        }
  126.87 +    }
  126.88 +    public static void test_char() {
  126.89 +        char[] a = new char[16];
  126.90 +        for (int i = 0; i < 200000; i++) {
  126.91 +            int start = i & 7;
  126.92 +            int end = start + ((i >> 4) & 7);
  126.93 +            char value = (char)i;
  126.94 +            Arrays.fill(a, start, end, value);
  126.95 +            boolean error = false;
  126.96 +            for (int j = start; j < end; j++) {
  126.97 +                if (a[j] != value) {
  126.98 +                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
  126.99 +                    error = true;
 126.100 +                }
 126.101 +            }
 126.102 +            if (error) throw new InternalError();
 126.103 +        }
 126.104 +    }
 126.105 +    public static void test_short() {
 126.106 +        short[] a = new short[16];
 126.107 +        for (int i = 0; i < 200000; i++) {
 126.108 +            int start = i & 7;
 126.109 +            int end = start + ((i >> 4) & 7);
 126.110 +            short value = (short)i;
 126.111 +            if ((i & 1) == 1) value = (short)-value;
 126.112 +            Arrays.fill(a, start, end, value);
 126.113 +            boolean error = false;
 126.114 +            for (int j = start; j < end; j++) {
 126.115 +                if (a[j] != value) {
 126.116 +                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
 126.117 +                    error = true;
 126.118 +                }
 126.119 +            }
 126.120 +            if (error) throw new InternalError();
 126.121 +        }
 126.122 +    }
 126.123 +
 126.124 +    public static void test_byte() {
 126.125 +        for (int i = 0; i < 200000; i++) {
 126.126 +            byte[] a = new byte[16];
 126.127 +            int start = i & 7;
 126.128 +            int end = start + ((i >> 4) & 7);
 126.129 +            byte value = (byte)i;
 126.130 +            if ((i & 1) == 1) value = (byte)-value;
 126.131 +            Arrays.fill(a, start, end, value);
 126.132 +            boolean error = false;
 126.133 +            for (int j = start; j < end; j++) {
 126.134 +                if (a[j] != value) {
 126.135 +                    System.err.println("a[" + j + "] = " + a[j] + " != " + value + " for " + a.length);
 126.136 +                    error = true;
 126.137 +                }
 126.138 +            }
 126.139 +            if (error) throw new InternalError();
 126.140 +        }
 126.141 +    }
 126.142 +}

mercurial