Merge

Tue, 08 Jan 2013 11:39:53 -0800

author
zgu
date
Tue, 08 Jan 2013 11:39:53 -0800
changeset 4401
37a3e8b7a1e9
parent 4400
ecd24264898b
parent 4396
6c3f47d964f3
child 4402
0c93d4818214

Merge

src/share/vm/oops/instanceKlass.cpp file | annotate | diff | comparison | revisions
src/share/vm/oops/instanceKlass.hpp file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Tue Jan 08 14:04:25 2013 -0500
     1.2 +++ b/.hgtags	Tue Jan 08 11:39:53 2013 -0800
     1.3 @@ -299,3 +299,7 @@
     1.4  b61d9c88b759d1594b8af1655598e8fa00393672 hs25-b11
     1.5  25bdce771bb3a7ae9825261a284d292cda700122 jdk8-b67
     1.6  a35a72dd2e1255239d31f796f9f693e49b36bc9f hs25-b12
     1.7 +121aa71316af6cd877bf455e775fa3fdbcdd4b65 jdk8-b68
     1.8 +b6c9c0109a608eedbb6b868d260952990e3c91fe hs25-b13
     1.9 +cb8a4e04bc8c104de8a2f67463c7e31232bf8d68 jdk8-b69
    1.10 +990bbd393c239d95310ccc38094e57923bbf1d4a hs25-b14
     2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java	Tue Jan 08 14:04:25 2013 -0500
     2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstMethod.java	Tue Jan 08 11:39:53 2013 -0800
     2.3 @@ -69,6 +69,8 @@
     2.4      signatureIndex             = new CIntField(type.getCIntegerField("_signature_index"), 0);
     2.5      idnum                      = new CIntField(type.getCIntegerField("_method_idnum"), 0);
     2.6      maxStack                   = new CIntField(type.getCIntegerField("_max_stack"), 0);
     2.7 +    maxLocals                  = new CIntField(type.getCIntegerField("_max_locals"), 0);
     2.8 +    sizeOfParameters           = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
     2.9  
    2.10      // start of byte code
    2.11      bytecodeOffset = type.getSize();
    2.12 @@ -96,6 +98,8 @@
    2.13    private static CIntField signatureIndex;
    2.14    private static CIntField idnum;
    2.15    private static CIntField maxStack;
    2.16 +  private static CIntField maxLocals;
    2.17 +  private static CIntField sizeOfParameters;
    2.18  
    2.19    // start of bytecode
    2.20    private static long bytecodeOffset;
    2.21 @@ -151,6 +155,14 @@
    2.22      return maxStack.getValue(this);
    2.23    }
    2.24  
    2.25 +  public long getMaxLocals() {
    2.26 +    return maxLocals.getValue(this);
    2.27 +  }
    2.28 +
    2.29 +  public long getSizeOfParameters() {
    2.30 +    return sizeOfParameters.getValue(this);
    2.31 +  }
    2.32 +
    2.33    public Symbol getName() {
    2.34      return getMethod().getName();
    2.35    }
    2.36 @@ -247,6 +259,8 @@
    2.37        visitor.doCInt(signatureIndex, true);
    2.38        visitor.doCInt(codeSize, true);
    2.39        visitor.doCInt(maxStack, true);
    2.40 +      visitor.doCInt(maxLocals, true);
    2.41 +      visitor.doCInt(sizeOfParameters, true);
    2.42      }
    2.43  
    2.44    // Accessors
     3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Tue Jan 08 14:04:25 2013 -0500
     3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/Method.java	Tue Jan 08 11:39:53 2013 -0800
     3.3 @@ -50,8 +50,6 @@
     3.4      constMethod                = type.getAddressField("_constMethod");
     3.5      methodData                 = type.getAddressField("_method_data");
     3.6      methodSize                 = new CIntField(type.getCIntegerField("_method_size"), 0);
     3.7 -    maxLocals                  = new CIntField(type.getCIntegerField("_max_locals"), 0);
     3.8 -    sizeOfParameters           = new CIntField(type.getCIntegerField("_size_of_parameters"), 0);
     3.9      accessFlags                = new CIntField(type.getCIntegerField("_access_flags"), 0);
    3.10      code                       = type.getAddressField("_code");
    3.11      vtableIndex                = new CIntField(type.getCIntegerField("_vtable_index"), 0);
    3.12 @@ -83,8 +81,6 @@
    3.13    private static AddressField  constMethod;
    3.14    private static AddressField  methodData;
    3.15    private static CIntField methodSize;
    3.16 -  private static CIntField maxLocals;
    3.17 -  private static CIntField sizeOfParameters;
    3.18    private static CIntField accessFlags;
    3.19    private static CIntField vtableIndex;
    3.20    private static CIntField invocationCounter;
    3.21 @@ -134,8 +130,8 @@
    3.22    /** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
    3.23    public long         getMethodSize()                 { return                methodSize.getValue(this);        }
    3.24    public long         getMaxStack()                   { return                getConstMethod().getMaxStack();   }
    3.25 -  public long         getMaxLocals()                  { return                maxLocals.getValue(this);         }
    3.26 -  public long         getSizeOfParameters()           { return                sizeOfParameters.getValue(this);  }
    3.27 +  public long         getMaxLocals()                  { return                getConstMethod().getMaxLocals();         }
    3.28 +  public long         getSizeOfParameters()           { return                getConstMethod().getSizeOfParameters();  }
    3.29    public long         getNameIndex()                  { return                getConstMethod().getNameIndex();  }
    3.30    public long         getSignatureIndex()             { return            getConstMethod().getSignatureIndex(); }
    3.31    public long         getGenericSignatureIndex()      { return     getConstMethod().getGenericSignatureIndex(); }
    3.32 @@ -282,8 +278,6 @@
    3.33  
    3.34    public void iterateFields(MetadataVisitor visitor) {
    3.35        visitor.doCInt(methodSize, true);
    3.36 -      visitor.doCInt(maxLocals, true);
    3.37 -      visitor.doCInt(sizeOfParameters, true);
    3.38        visitor.doCInt(accessFlags, true);
    3.39      }
    3.40  
     4.1 --- a/make/bsd/Makefile	Tue Jan 08 14:04:25 2013 -0500
     4.2 +++ b/make/bsd/Makefile	Tue Jan 08 11:39:53 2013 -0800
     4.3 @@ -47,10 +47,10 @@
     4.4  
     4.5  # Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
     4.6  # JDI binding on SA produces two binaries:
     4.7 -#  1. sa-jdi.jar       - This is build before building libjvm[_g].so
     4.8 +#  1. sa-jdi.jar       - This is built before building libjvm.so
     4.9  #                        Please refer to ./makefiles/sa.make
    4.10 -#  2. libsa[_g].so     - Native library for SA - This is built after
    4.11 -#                        libjsig[_g].so (signal interposition library)
    4.12 +#  2. libsa.so         - Native library for SA - This is built after
    4.13 +#                        libjsig.so (signal interposition library)
    4.14  #                        Please refer to ./makefiles/vm.make
    4.15  # If $(GAMMADIR)/agent dir is not present, SA components are not built.
    4.16  
    4.17 @@ -181,9 +181,9 @@
    4.18  #
    4.19  # What you get with each target:
    4.20  #
    4.21 -# debug*     - "thin" libjvm_g - debug info linked into the gamma_g launcher
    4.22 +# debug*     - "thin" libjvm - debug info linked into the gamma launcher
    4.23  # fastdebug* - optimized compile, but with asserts enabled
    4.24 -# jvmg*      - "fat" libjvm_g - debug info linked into libjvm_g.so
    4.25 +# jvmg*      - "fat" libjvm - debug info linked into libjvm.so
    4.26  # optimized* - optimized compile, no asserts
    4.27  # profiled*  - gprof
    4.28  # product*   - the shippable thing:  optimized compile, no asserts, -DPRODUCT
     5.1 --- a/make/bsd/makefiles/buildtree.make	Tue Jan 08 14:04:25 2013 -0500
     5.2 +++ b/make/bsd/makefiles/buildtree.make	Tue Jan 08 11:39:53 2013 -0800
     5.3 @@ -449,12 +449,7 @@
     5.4  	echo "  exit 0"; \
     5.5  	echo "fi"; \
     5.6  	echo ""; \
     5.7 -	echo "# Use gamma_g if it exists"; \
     5.8 -	echo ""; \
     5.9  	echo "GAMMA_PROG=gamma"; \
    5.10 -	echo "if [ -f gamma_g ]; then "; \
    5.11 -	echo "  GAMMA_PROG=gamma_g"; \
    5.12 -	echo "fi"; \
    5.13  	echo ""; \
    5.14  	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
    5.15  	echo "  # Ensure architecture for gamma and JAVA_HOME is the same."; \
     6.1 --- a/make/bsd/makefiles/debug.make	Tue Jan 08 14:04:25 2013 -0500
     6.2 +++ b/make/bsd/makefiles/debug.make	Tue Jan 08 11:39:53 2013 -0800
     6.3 @@ -1,5 +1,5 @@
     6.4  #
     6.5 -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
     6.6 +# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     6.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.8  #
     6.9  # This code is free software; you can redistribute it and/or modify it
    6.10 @@ -38,7 +38,6 @@
    6.11   "Please use 'make jvmg' to build debug JVM.                            \n" \
    6.12   "----------------------------------------------------------------------\n")
    6.13  
    6.14 -G_SUFFIX = _g
    6.15  VERSION = debug
    6.16  SYSDEFS += -DASSERT -DDEBUG
    6.17  PICFLAGS = DEFAULT
     7.1 --- a/make/bsd/makefiles/dtrace.make	Tue Jan 08 14:04:25 2013 -0500
     7.2 +++ b/make/bsd/makefiles/dtrace.make	Tue Jan 08 11:39:53 2013 -0800
     7.3 @@ -38,12 +38,10 @@
     7.4  # Bsd does not build libjvm_db, does not compile on macosx
     7.5  # disabled in build: rule in vm.make
     7.6  JVM_DB = libjvm_db
     7.7 -#LIBJVM_DB = libjvm_db.dylib
     7.8 -LIBJVM_DB = libjvm$(G_SUFFIX)_db.dylib
     7.9 +LIBJVM_DB = libjvm_db.dylib
    7.10  
    7.11  JVM_DTRACE = jvm_dtrace
    7.12 -#LIBJVM_DTRACE = libjvm_dtrace.dylib
    7.13 -LIBJVM_DTRACE = libjvm$(G_SUFFIX)_dtrace.dylib
    7.14 +LIBJVM_DTRACE = libjvm_dtrace.dylib
    7.15  
    7.16  JVMOFFS = JvmOffsets
    7.17  JVMOFFS.o = $(JVMOFFS).o
    7.18 @@ -80,9 +78,7 @@
    7.19  ifneq ("${ISA}","${BUILDARCH}")
    7.20  
    7.21  XLIBJVM_DB = 64/$(LIBJVM_DB)
    7.22 -XLIBJVM_DB_G = 64/$(LIBJVM_DB_G)
    7.23  XLIBJVM_DTRACE = 64/$(LIBJVM_DTRACE)
    7.24 -XLIBJVM_DTRACE_G = 64/$(LIBJVM_DTRACE_G)
    7.25  XARCH = $(subst sparcv9,v9,$(shell echo $(ISA)))
    7.26  
    7.27  $(XLIBJVM_DB): $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
    7.28 @@ -90,14 +86,12 @@
    7.29  	$(QUIETLY) mkdir -p 64/ ; \
    7.30  	$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. -I$(GENERATED) \
    7.31  		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c #-lc
    7.32 -#	[ -f $(XLIBJVM_DB_G) ] || { ln -s $(LIBJVM_DB) $(XLIBJVM_DB_G); }
    7.33  
    7.34  $(XLIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
    7.35  	@echo Making $@
    7.36  	$(QUIETLY) mkdir -p 64/ ; \
    7.37  	$(CC) $(SYMFLAG) -xarch=$(XARCH) -D$(TYPE) -I. \
    7.38  		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
    7.39 -#	[ -f $(XLIBJVM_DTRACE_G) ] || { ln -s $(LIBJVM_DTRACE) $(XLIBJVM_DTRACE_G); }
    7.40  
    7.41  endif # ifneq ("${ISA}","${BUILDARCH}")
    7.42  
    7.43 @@ -141,13 +135,11 @@
    7.44  	@echo Making $@
    7.45  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
    7.46  		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -Wall # -lc
    7.47 -#	[ -f $(LIBJVM_DB_G) ] || { ln -s $@ $(LIBJVM_DB_G); }
    7.48  
    7.49  $(LIBJVM_DTRACE): $(DTRACE_SRCDIR)/$(JVM_DTRACE).c $(XLIBJVM_DTRACE) $(DTRACE_SRCDIR)/$(JVM_DTRACE).h $(LIBJVM_DTRACE_MAPFILE)
    7.50  	@echo Making $@
    7.51  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
    7.52  		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c #-lc -lthread -ldoor
    7.53 -#	[ -f $(LIBJVM_DTRACE_G) ] || { ln -s $@ $(LIBJVM_DTRACE_G); }
    7.54  
    7.55  #$(DTRACE).d: $(DTRACE_SRCDIR)/hotspot.d $(DTRACE_SRCDIR)/hotspot_jni.d \
    7.56  #             $(DTRACE_SRCDIR)/hs_private.d $(DTRACE_SRCDIR)/jhelper.d
     8.1 --- a/make/bsd/makefiles/fastdebug.make	Tue Jan 08 14:04:25 2013 -0500
     8.2 +++ b/make/bsd/makefiles/fastdebug.make	Tue Jan 08 11:39:53 2013 -0800
     8.3 @@ -1,5 +1,5 @@
     8.4  #
     8.5 -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
     8.6 +# Copyright (c) 1999, 2012 Oracle and/or its affiliates. All rights reserved.
     8.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     8.8  #
     8.9  # This code is free software; you can redistribute it and/or modify it
    8.10 @@ -58,7 +58,6 @@
    8.11  # Linker mapfile
    8.12  MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
    8.13  
    8.14 -G_SUFFIX = _g
    8.15  VERSION = optimized
    8.16  SYSDEFS += -DASSERT -DFASTDEBUG
    8.17  PICFLAGS = DEFAULT
     9.1 --- a/make/bsd/makefiles/gcc.make	Tue Jan 08 14:04:25 2013 -0500
     9.2 +++ b/make/bsd/makefiles/gcc.make	Tue Jan 08 11:39:53 2013 -0800
     9.3 @@ -284,9 +284,9 @@
     9.4  
     9.5  # Use the stabs format for debugging information (this is the default
     9.6  # on gcc-2.91). It's good enough, has all the information about line
     9.7 -# numbers and local variables, and libjvm_g.so is only about 16M.
     9.8 +# numbers and local variables, and libjvm.so is only about 16M.
     9.9  # Change this back to "-g" if you want the most expressive format.
    9.10 -# (warning: that could easily inflate libjvm_g.so to 150M!)
    9.11 +# (warning: that could easily inflate libjvm.so to 150M!)
    9.12  # Note: The Itanium gcc compiler crashes when using -gstabs.
    9.13  DEBUG_CFLAGS/ia64  = -g
    9.14  DEBUG_CFLAGS/amd64 = -g
    10.1 --- a/make/bsd/makefiles/jsig.make	Tue Jan 08 14:04:25 2013 -0500
    10.2 +++ b/make/bsd/makefiles/jsig.make	Tue Jan 08 11:39:53 2013 -0800
    10.3 @@ -1,5 +1,5 @@
    10.4  #
    10.5 -# Copyright (c) 2005, 2009, Oracle and/or its affiliates. All rights reserved.
    10.6 +# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
    10.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    10.8  #
    10.9  # This code is free software; you can redistribute it and/or modify it
   10.10 @@ -24,16 +24,13 @@
   10.11  
   10.12  # Rules to build signal interposition library, used by vm.make
   10.13  
   10.14 -# libjsig[_g].so: signal interposition library
   10.15 +# libjsig.so: signal interposition library
   10.16  JSIG   = jsig
   10.17 -JSIG_G = $(JSIG)$(G_SUFFIX)
   10.18  
   10.19  ifeq ($(OS_VENDOR), Darwin)
   10.20    LIBJSIG   = lib$(JSIG).dylib
   10.21 -  LIBJSIG_G = lib$(JSIG_G).dylib
   10.22  else
   10.23    LIBJSIG   = lib$(JSIG).so
   10.24 -  LIBJSIG_G = lib$(JSIG_G).so
   10.25  endif
   10.26  
   10.27  JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
   10.28 @@ -58,7 +55,6 @@
   10.29  	@echo Making signal interposition lib...
   10.30  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
   10.31                           $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $<
   10.32 -	$(QUIETLY) [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
   10.33  
   10.34  install_jsig: $(LIBJSIG)
   10.35  	@echo "Copying $(LIBJSIG) to $(DEST_JSIG)"
    11.1 --- a/make/bsd/makefiles/jvmg.make	Tue Jan 08 14:04:25 2013 -0500
    11.2 +++ b/make/bsd/makefiles/jvmg.make	Tue Jan 08 11:39:53 2013 -0800
    11.3 @@ -37,7 +37,6 @@
    11.4  # Linker mapfile
    11.5  MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
    11.6  
    11.7 -G_SUFFIX = _g
    11.8  VERSION = debug
    11.9  SYSDEFS += -DASSERT -DDEBUG
   11.10  PICFLAGS = DEFAULT
    12.1 --- a/make/bsd/makefiles/mapfile-vers-debug	Tue Jan 08 14:04:25 2013 -0500
    12.2 +++ b/make/bsd/makefiles/mapfile-vers-debug	Tue Jan 08 11:39:53 2013 -0800
    12.3 @@ -126,8 +126,9 @@
    12.4                  JVM_GetClassModifiers;
    12.5                  JVM_GetClassName;
    12.6                  JVM_GetClassNameUTF;
    12.7 -		JVM_GetClassSignature;
    12.8 +		        JVM_GetClassSignature;
    12.9                  JVM_GetClassSigners;
   12.10 +                JVM_GetClassTypeAnnotations;
   12.11                  JVM_GetComponentType;
   12.12                  JVM_GetDeclaredClasses;
   12.13                  JVM_GetDeclaringClass;
    13.1 --- a/make/bsd/makefiles/mapfile-vers-product	Tue Jan 08 14:04:25 2013 -0500
    13.2 +++ b/make/bsd/makefiles/mapfile-vers-product	Tue Jan 08 11:39:53 2013 -0800
    13.3 @@ -128,6 +128,7 @@
    13.4                  JVM_GetClassNameUTF;
    13.5                  JVM_GetClassSignature;
    13.6                  JVM_GetClassSigners;
    13.7 +                JVM_GetClassTypeAnnotations;
    13.8                  JVM_GetComponentType;
    13.9                  JVM_GetDeclaredClasses;
   13.10                  JVM_GetDeclaringClass;
    14.1 --- a/make/bsd/makefiles/optimized.make	Tue Jan 08 14:04:25 2013 -0500
    14.2 +++ b/make/bsd/makefiles/optimized.make	Tue Jan 08 11:39:53 2013 -0800
    14.3 @@ -1,5 +1,5 @@
    14.4  #
    14.5 -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
    14.6 +# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    14.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.8  #
    14.9  # This code is free software; you can redistribute it and/or modify it
   14.10 @@ -40,5 +40,4 @@
   14.11  # Linker mapfile
   14.12  MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
   14.13  
   14.14 -G_SUFFIX =
   14.15  VERSION = optimized
    15.1 --- a/make/bsd/makefiles/product.make	Tue Jan 08 14:04:25 2013 -0500
    15.2 +++ b/make/bsd/makefiles/product.make	Tue Jan 08 11:39:53 2013 -0800
    15.3 @@ -40,7 +40,6 @@
    15.4  # Linker mapfile
    15.5  MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-product
    15.6  
    15.7 -G_SUFFIX =
    15.8  SYSDEFS += -DPRODUCT
    15.9  VERSION = optimized
   15.10  
    16.1 --- a/make/bsd/makefiles/saproc.make	Tue Jan 08 14:04:25 2013 -0500
    16.2 +++ b/make/bsd/makefiles/saproc.make	Tue Jan 08 11:39:53 2013 -0800
    16.3 @@ -24,16 +24,13 @@
    16.4  
    16.5  # Rules to build serviceability agent library, used by vm.make
    16.6  
    16.7 -# libsaproc[_g].so: serviceability agent
    16.8 +# libsaproc.so: serviceability agent
    16.9  SAPROC   = saproc
   16.10 -SAPROC_G = $(SAPROC)$(G_SUFFIX)
   16.11  
   16.12  ifeq ($(OS_VENDOR), Darwin)
   16.13    LIBSAPROC   = lib$(SAPROC).dylib
   16.14 -  LIBSAPROC_G = lib$(SAPROC_G).dylib
   16.15  else
   16.16    LIBSAPROC   = lib$(SAPROC).so
   16.17 -  LIBSAPROC_G = lib$(SAPROC_G).so
   16.18  endif
   16.19  
   16.20  AGENT_DIR = $(GAMMADIR)/agent
   16.21 @@ -114,7 +111,6 @@
   16.22  	           $(SA_DEBUG_CFLAGS)                                   \
   16.23  	           -o $@                                                \
   16.24  	           $(SALIBS)
   16.25 -	$(QUIETLY) [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
   16.26  
   16.27  install_saproc: $(BUILDLIBSAPROC)
   16.28  	$(QUIETLY) if [ -e $(LIBSAPROC) ] ; then             \
    17.1 --- a/make/bsd/makefiles/vm.make	Tue Jan 08 14:04:25 2013 -0500
    17.2 +++ b/make/bsd/makefiles/vm.make	Tue Jan 08 11:39:53 2013 -0800
    17.3 @@ -138,11 +138,9 @@
    17.4  JVM    = jvm
    17.5  ifeq ($(OS_VENDOR), Darwin)
    17.6    LIBJVM   = lib$(JVM).dylib
    17.7 -  LIBJVM_G = lib$(JVM)$(G_SUFFIX).dylib
    17.8    CFLAGS  += -D_XOPEN_SOURCE -D_DARWIN_C_SOURCE
    17.9  else
   17.10    LIBJVM   = lib$(JVM).so
   17.11 -  LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
   17.12  endif
   17.13  
   17.14  SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
   17.15 @@ -314,7 +312,6 @@
   17.16  		       $(LFLAGS_VM) -o $@ $(sort $(LIBJVM.o)) $(LIBS_VM); \
   17.17  	    $(LINK_LIB.CXX/POST_HOOK)                                    \
   17.18  	    rm -f $@.1; ln -s $@ $@.1;                                  \
   17.19 -	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
   17.20  	}
   17.21  
   17.22  DEST_JVM = $(JDK_LIBDIR)/$(VM_SUBDIR)/$(LIBJVM)
    18.1 --- a/make/hotspot_version	Tue Jan 08 14:04:25 2013 -0500
    18.2 +++ b/make/hotspot_version	Tue Jan 08 11:39:53 2013 -0800
    18.3 @@ -35,7 +35,7 @@
    18.4  
    18.5  HS_MAJOR_VER=25
    18.6  HS_MINOR_VER=0
    18.7 -HS_BUILD_NUMBER=13
    18.8 +HS_BUILD_NUMBER=15
    18.9  
   18.10  JDK_MAJOR_VER=1
   18.11  JDK_MINOR_VER=8
    19.1 --- a/make/linux/Makefile	Tue Jan 08 14:04:25 2013 -0500
    19.2 +++ b/make/linux/Makefile	Tue Jan 08 11:39:53 2013 -0800
    19.3 @@ -47,10 +47,10 @@
    19.4  
    19.5  # Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
    19.6  # JDI binding on SA produces two binaries:
    19.7 -#  1. sa-jdi.jar       - This is build before building libjvm[_g].so
    19.8 +#  1. sa-jdi.jar       - This is built before building libjvm.so
    19.9  #                        Please refer to ./makefiles/sa.make
   19.10 -#  2. libsa[_g].so     - Native library for SA - This is built after
   19.11 -#                        libjsig[_g].so (signal interposition library)
   19.12 +#  2. libsa.so         - Native library for SA - This is built after
   19.13 +#                        libjsig.so (signal interposition library)
   19.14  #                        Please refer to ./makefiles/vm.make
   19.15  # If $(GAMMADIR)/agent dir is not present, SA components are not built.
   19.16  
   19.17 @@ -181,9 +181,9 @@
   19.18  #
   19.19  # What you get with each target:
   19.20  #
   19.21 -# debug*     - "thin" libjvm_g - debug info linked into the gamma_g launcher
   19.22 +# debug*     - "thin" libjvm - debug info linked into the gamma launcher
   19.23  # fastdebug* - optimized compile, but with asserts enabled
   19.24 -# jvmg*      - "fat" libjvm_g - debug info linked into libjvm_g.so
   19.25 +# jvmg*      - "fat" libjvm - debug info linked into libjvm.so
   19.26  # optimized* - optimized compile, no asserts
   19.27  # profiled*  - gprof
   19.28  # product*   - the shippable thing:  optimized compile, no asserts, -DPRODUCT
    20.1 --- a/make/linux/makefiles/buildtree.make	Tue Jan 08 14:04:25 2013 -0500
    20.2 +++ b/make/linux/makefiles/buildtree.make	Tue Jan 08 11:39:53 2013 -0800
    20.3 @@ -442,12 +442,7 @@
    20.4  	echo "  exit 0"; \
    20.5  	echo "fi"; \
    20.6  	echo ""; \
    20.7 -	echo "# Use gamma_g if it exists"; \
    20.8 -	echo ""; \
    20.9  	echo "GAMMA_PROG=gamma"; \
   20.10 -	echo "if [ -f gamma_g ]; then "; \
   20.11 -	echo "  GAMMA_PROG=gamma_g"; \
   20.12 -	echo "fi"; \
   20.13  	echo ""; \
   20.14  	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
   20.15  	echo "  # Ensure architecture for gamma and JAVA_HOME is the same."; \
    21.1 --- a/make/linux/makefiles/debug.make	Tue Jan 08 14:04:25 2013 -0500
    21.2 +++ b/make/linux/makefiles/debug.make	Tue Jan 08 11:39:53 2013 -0800
    21.3 @@ -1,5 +1,5 @@
    21.4  #
    21.5 -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
    21.6 +# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    21.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    21.8  #
    21.9  # This code is free software; you can redistribute it and/or modify it
   21.10 @@ -38,7 +38,6 @@
   21.11   "Please use 'make jvmg' to build debug JVM.                            \n" \
   21.12   "----------------------------------------------------------------------\n")
   21.13  
   21.14 -G_SUFFIX = _g
   21.15  VERSION = debug
   21.16  SYSDEFS += -DASSERT -DDEBUG
   21.17  PICFLAGS = DEFAULT
    22.1 --- a/make/linux/makefiles/fastdebug.make	Tue Jan 08 14:04:25 2013 -0500
    22.2 +++ b/make/linux/makefiles/fastdebug.make	Tue Jan 08 11:39:53 2013 -0800
    22.3 @@ -1,5 +1,5 @@
    22.4  #
    22.5 -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
    22.6 +# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    22.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    22.8  #
    22.9  # This code is free software; you can redistribute it and/or modify it
   22.10 @@ -58,7 +58,6 @@
   22.11  # Linker mapfile
   22.12  MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
   22.13  
   22.14 -G_SUFFIX = _g
   22.15  VERSION = optimized
   22.16  SYSDEFS += -DASSERT -DFASTDEBUG
   22.17  PICFLAGS = DEFAULT
    23.1 --- a/make/linux/makefiles/gcc.make	Tue Jan 08 14:04:25 2013 -0500
    23.2 +++ b/make/linux/makefiles/gcc.make	Tue Jan 08 11:39:53 2013 -0800
    23.3 @@ -229,9 +229,9 @@
    23.4  else
    23.5    # Use the stabs format for debugging information (this is the default
    23.6    # on gcc-2.91). It's good enough, has all the information about line
    23.7 -  # numbers and local variables, and libjvm_g.so is only about 16M.
    23.8 +  # numbers and local variables, and libjvm.so is only about 16M.
    23.9    # Change this back to "-g" if you want the most expressive format.
   23.10 -  # (warning: that could easily inflate libjvm_g.so to 150M!)
   23.11 +  # (warning: that could easily inflate libjvm.so to 150M!)
   23.12    # Note: The Itanium gcc compiler crashes when using -gstabs.
   23.13    DEBUG_CFLAGS/ia64  = -g
   23.14    DEBUG_CFLAGS/amd64 = -g
    24.1 --- a/make/linux/makefiles/jsig.make	Tue Jan 08 14:04:25 2013 -0500
    24.2 +++ b/make/linux/makefiles/jsig.make	Tue Jan 08 11:39:53 2013 -0800
    24.3 @@ -24,17 +24,12 @@
    24.4  
    24.5  # Rules to build signal interposition library, used by vm.make
    24.6  
    24.7 -# libjsig[_g].so: signal interposition library
    24.8 +# libjsig.so: signal interposition library
    24.9  JSIG = jsig
   24.10  LIBJSIG = lib$(JSIG).so
   24.11  
   24.12 -JSIG_G    = $(JSIG)$(G_SUFFIX)
   24.13 -LIBJSIG_G = lib$(JSIG_G).so
   24.14 -
   24.15  LIBJSIG_DEBUGINFO   = lib$(JSIG).debuginfo
   24.16  LIBJSIG_DIZ         = lib$(JSIG).diz
   24.17 -LIBJSIG_G_DEBUGINFO = lib$(JSIG_G).debuginfo
   24.18 -LIBJSIG_G_DIZ       = lib$(JSIG_G).diz
   24.19  
   24.20  JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
   24.21  
   24.22 @@ -60,7 +55,6 @@
   24.23  	@echo Making signal interposition lib...
   24.24  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
   24.25                           $(LFLAGS_JSIG) $(JSIG_DEBUG_CFLAGS) -o $@ $< -ldl
   24.26 -	$(QUIETLY) [ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
   24.27  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   24.28  	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBJSIG_DEBUGINFO)
   24.29  	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBJSIG_DEBUGINFO) $@
   24.30 @@ -72,11 +66,9 @@
   24.31      # implied else here is no stripping at all
   24.32      endif
   24.33    endif
   24.34 -	[ -f $(LIBJSIG_G_DEBUGINFO) ] || { ln -s $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO); }
   24.35    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   24.36 -	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO)
   24.37 -	$(RM) $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO)
   24.38 -	[ -f $(LIBJSIG_G_DIZ) ] || { ln -s $(LIBJSIG_DIZ) $(LIBJSIG_G_DIZ); }
   24.39 +	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
   24.40 +	$(RM) $(LIBJSIG_DEBUGINFO)
   24.41    endif
   24.42  endif
   24.43  
    25.1 --- a/make/linux/makefiles/jvmg.make	Tue Jan 08 14:04:25 2013 -0500
    25.2 +++ b/make/linux/makefiles/jvmg.make	Tue Jan 08 11:39:53 2013 -0800
    25.3 @@ -37,7 +37,6 @@
    25.4  # Linker mapfile
    25.5  MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
    25.6  
    25.7 -G_SUFFIX = _g
    25.8  VERSION = debug
    25.9  SYSDEFS += -DASSERT -DDEBUG
   25.10  PICFLAGS = DEFAULT
    26.1 --- a/make/linux/makefiles/mapfile-vers-debug	Tue Jan 08 14:04:25 2013 -0500
    26.2 +++ b/make/linux/makefiles/mapfile-vers-debug	Tue Jan 08 11:39:53 2013 -0800
    26.3 @@ -124,6 +124,7 @@
    26.4                  JVM_GetClassNameUTF;
    26.5  		JVM_GetClassSignature;
    26.6                  JVM_GetClassSigners;
    26.7 +                JVM_GetClassTypeAnnotations;
    26.8                  JVM_GetComponentType;
    26.9                  JVM_GetDeclaredClasses;
   26.10                  JVM_GetDeclaringClass;
    27.1 --- a/make/linux/makefiles/mapfile-vers-product	Tue Jan 08 14:04:25 2013 -0500
    27.2 +++ b/make/linux/makefiles/mapfile-vers-product	Tue Jan 08 11:39:53 2013 -0800
    27.3 @@ -124,6 +124,7 @@
    27.4                  JVM_GetClassNameUTF;
    27.5                  JVM_GetClassSignature;
    27.6                  JVM_GetClassSigners;
    27.7 +                JVM_GetClassTypeAnnotations;
    27.8                  JVM_GetComponentType;
    27.9                  JVM_GetDeclaredClasses;
   27.10                  JVM_GetDeclaringClass;
    28.1 --- a/make/linux/makefiles/optimized.make	Tue Jan 08 14:04:25 2013 -0500
    28.2 +++ b/make/linux/makefiles/optimized.make	Tue Jan 08 11:39:53 2013 -0800
    28.3 @@ -1,5 +1,5 @@
    28.4  #
    28.5 -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved.
    28.6 +# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    28.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    28.8  #
    28.9  # This code is free software; you can redistribute it and/or modify it
   28.10 @@ -40,5 +40,4 @@
   28.11  # Linker mapfile
   28.12  MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
   28.13  
   28.14 -G_SUFFIX =
   28.15  VERSION = optimized
    29.1 --- a/make/linux/makefiles/product.make	Tue Jan 08 14:04:25 2013 -0500
    29.2 +++ b/make/linux/makefiles/product.make	Tue Jan 08 11:39:53 2013 -0800
    29.3 @@ -40,7 +40,6 @@
    29.4  # Linker mapfile
    29.5  MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-product
    29.6  
    29.7 -G_SUFFIX =
    29.8  SYSDEFS += -DPRODUCT
    29.9  VERSION = optimized
   29.10  
    30.1 --- a/make/linux/makefiles/saproc.make	Tue Jan 08 14:04:25 2013 -0500
    30.2 +++ b/make/linux/makefiles/saproc.make	Tue Jan 08 11:39:53 2013 -0800
    30.3 @@ -26,18 +26,13 @@
    30.4  
    30.5  # Rules to build serviceability agent library, used by vm.make
    30.6  
    30.7 -# libsaproc[_g].so: serviceability agent
    30.8 +# libsaproc.so: serviceability agent
    30.9  
   30.10  SAPROC = saproc
   30.11  LIBSAPROC = lib$(SAPROC).so
   30.12  
   30.13 -SAPROC_G = $(SAPROC)$(G_SUFFIX)
   30.14 -LIBSAPROC_G = lib$(SAPROC_G).so
   30.15 -
   30.16  LIBSAPROC_DEBUGINFO   = lib$(SAPROC).debuginfo
   30.17  LIBSAPROC_DIZ         = lib$(SAPROC).diz
   30.18 -LIBSAPROC_G_DEBUGINFO = lib$(SAPROC_G).debuginfo
   30.19 -LIBSAPROC_G_DIZ       = lib$(SAPROC_G).diz
   30.20  
   30.21  AGENT_DIR = $(GAMMADIR)/agent
   30.22  
   30.23 @@ -99,7 +94,6 @@
   30.24  	           $(SA_DEBUG_CFLAGS)                                   \
   30.25  	           -o $@                                                \
   30.26  	           -lthread_db
   30.27 -	$(QUIETLY) [ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
   30.28  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   30.29  	$(QUIETLY) $(OBJCOPY) --only-keep-debug $@ $(LIBSAPROC_DEBUGINFO)
   30.30  	$(QUIETLY) $(OBJCOPY) --add-gnu-debuglink=$(LIBSAPROC_DEBUGINFO) $@
   30.31 @@ -111,11 +105,9 @@
   30.32      # implied else here is no stripping at all
   30.33      endif
   30.34    endif
   30.35 -	[ -f $(LIBSAPROC_G_DEBUGINFO) ] || { ln -s $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO); }
   30.36    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   30.37 -	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO)
   30.38 -	$(RM) $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO)
   30.39 -	[ -f $(LIBSAPROC_G_DIZ) ] || { ln -s $(LIBSAPROC_DIZ) $(LIBSAPROC_G_DIZ); }
   30.40 +	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
   30.41 +	$(RM) $(LIBSAPROC_DEBUGINFO)
   30.42    endif
   30.43  endif
   30.44  
    31.1 --- a/make/linux/makefiles/vm.make	Tue Jan 08 14:04:25 2013 -0500
    31.2 +++ b/make/linux/makefiles/vm.make	Tue Jan 08 11:39:53 2013 -0800
    31.3 @@ -138,12 +138,9 @@
    31.4  
    31.5  JVM      = jvm
    31.6  LIBJVM   = lib$(JVM).so
    31.7 -LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
    31.8  
    31.9  LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
   31.10  LIBJVM_DIZ         = lib$(JVM).diz
   31.11 -LIBJVM_G_DEBUGINFO = lib$(JVM)$(G_SUFFIX).debuginfo
   31.12 -LIBJVM_G_DIZ       = lib$(JVM)$(G_SUFFIX).diz
   31.13  
   31.14  SPECIAL_PATHS:=adlc c1 gc_implementation opto shark libadt
   31.15  
   31.16 @@ -323,7 +320,6 @@
   31.17  		       $(LFLAGS_VM) -o $@ $(sort $(LIBJVM.o)) $(LIBS_VM);       \
   31.18  	    $(LINK_LIB.CXX/POST_HOOK)                                    \
   31.19  	    rm -f $@.1; ln -s $@ $@.1;                                  \
   31.20 -	    [ -f $(LIBJVM_G) ] || { ln -s $@ $(LIBJVM_G); ln -s $@.1 $(LIBJVM_G).1; }; \
   31.21              if [ \"$(CROSS_COMPILE_ARCH)\" = \"\" ] ; then                    \
   31.22  	      if [ -x /usr/sbin/selinuxenabled ] ; then                 \
   31.23  	        /usr/sbin/selinuxenabled;                               \
   31.24 @@ -348,11 +344,9 @@
   31.25      # implied else here is no stripping at all
   31.26      endif
   31.27    endif
   31.28 -	$(QUIETLY) [ -f $(LIBJVM_G_DEBUGINFO) ] || ln -s $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
   31.29    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   31.30 -	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
   31.31 -	$(RM) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
   31.32 -	[ -f $(LIBJVM_G_DIZ) ] || { ln -s $(LIBJVM_DIZ) $(LIBJVM_G_DIZ); }
   31.33 +	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
   31.34 +	$(RM) $(LIBJVM_DEBUGINFO)
   31.35    endif
   31.36  endif
   31.37  
    32.1 --- a/make/solaris/Makefile	Tue Jan 08 14:04:25 2013 -0500
    32.2 +++ b/make/solaris/Makefile	Tue Jan 08 11:39:53 2013 -0800
    32.3 @@ -38,10 +38,10 @@
    32.4  
    32.5  # Along with VM, Serviceability Agent (SA) is built for SA/JDI binding.
    32.6  # JDI binding on SA produces two binaries:
    32.7 -#  1. sa-jdi.jar       - This is build before building libjvm[_g].so
    32.8 +#  1. sa-jdi.jar       - This is built before building libjvm.so
    32.9  #                        Please refer to ./makefiles/sa.make
   32.10 -#  2. libsaproc[_g].so - Native library for SA - This is built after
   32.11 -#                        libjsig[_g].so (signal interposition library)
   32.12 +#  2. libsaproc.so     - Native library for SA - This is built after
   32.13 +#                        libjsig.so (signal interposition library)
   32.14  #                        Please refer to ./makefiles/vm.make
   32.15  # If $(GAMMADIR)/agent dir is not present, SA components are not built.
   32.16  
   32.17 @@ -141,9 +141,9 @@
   32.18  #
   32.19  # What you get with each target:
   32.20  #
   32.21 -# debug*     - "thin" libjvm_g - debug info linked into the gamma_g launcher
   32.22 +# debug*     - "thin" libjvm - debug info linked into the gamma launcher
   32.23  # fastdebug* - optimized compile, but with asserts enabled
   32.24 -# jvmg*      - "fat" libjvm_g - debug info linked into libjvm_g.so
   32.25 +# jvmg*      - "fat" libjvm - debug info linked into libjvm.so
   32.26  # optimized* - optimized compile, no asserts
   32.27  # profiled*  - gprof
   32.28  # product*   - the shippable thing:  optimized compile, no asserts, -DPRODUCT
    33.1 --- a/make/solaris/makefiles/buildtree.make	Tue Jan 08 14:04:25 2013 -0500
    33.2 +++ b/make/solaris/makefiles/buildtree.make	Tue Jan 08 11:39:53 2013 -0800
    33.3 @@ -436,12 +436,7 @@
    33.4  	echo "  exit 0"; \
    33.5  	echo "fi"; \
    33.6  	echo ""; \
    33.7 -	echo "# Use gamma_g if it exists"; \
    33.8 -	echo ""; \
    33.9  	echo "GAMMA_PROG=gamma"; \
   33.10 -	echo "if [ -f gamma_g ]; then "; \
   33.11 -	echo "  GAMMA_PROG=gamma_g"; \
   33.12 -	echo "fi"; \
   33.13  	echo ""; \
   33.14  	echo "if [ \"$(OS_VENDOR)\" = \"Darwin\" ]; then "; \
   33.15  	echo "  # Ensure architecture for gamma and JAVA_HOME is the same."; \
    34.1 --- a/make/solaris/makefiles/debug.make	Tue Jan 08 14:04:25 2013 -0500
    34.2 +++ b/make/solaris/makefiles/debug.make	Tue Jan 08 11:39:53 2013 -0800
    34.3 @@ -1,5 +1,5 @@
    34.4  #
    34.5 -# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved.
    34.6 +# Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    34.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    34.8  #
    34.9  # This code is free software; you can redistribute it and/or modify it
   34.10 @@ -53,7 +53,6 @@
   34.11   "Please use 'gnumake jvmg' to build debug JVM.                            \n" \
   34.12   "-------------------------------------------------------------------------\n")
   34.13  
   34.14 -G_SUFFIX = _g
   34.15  VERSION = debug
   34.16  SYSDEFS += -DASSERT -DDEBUG
   34.17  PICFLAGS = DEFAULT
    35.1 --- a/make/solaris/makefiles/dtrace.make	Tue Jan 08 14:04:25 2013 -0500
    35.2 +++ b/make/solaris/makefiles/dtrace.make	Tue Jan 08 11:39:53 2013 -0800
    35.3 @@ -39,21 +39,15 @@
    35.4  
    35.5  JVM_DB = libjvm_db
    35.6  LIBJVM_DB = libjvm_db.so
    35.7 -LIBJVM_DB_G = libjvm$(G_SUFFIX)_db.so
    35.8  
    35.9  LIBJVM_DB_DEBUGINFO   = libjvm_db.debuginfo
   35.10  LIBJVM_DB_DIZ         = libjvm_db.diz
   35.11 -LIBJVM_DB_G_DEBUGINFO = libjvm$(G_SUFFIX)_db.debuginfo
   35.12 -LIBJVM_DB_G_DIZ       = libjvm$(G_SUFFIX)_db.diz
   35.13  
   35.14  JVM_DTRACE = jvm_dtrace
   35.15  LIBJVM_DTRACE = libjvm_dtrace.so
   35.16 -LIBJVM_DTRACE_G = libjvm$(G_SUFFIX)_dtrace.so
   35.17  
   35.18  LIBJVM_DTRACE_DEBUGINFO   = libjvm_dtrace.debuginfo
   35.19  LIBJVM_DTRACE_DIZ         = libjvm_dtrace.diz
   35.20 -LIBJVM_DTRACE_G_DEBUGINFO = libjvm$(G_SUFFIX)_dtrace.debuginfo
   35.21 -LIBJVM_DTRACE_G_DIZ       = libjvm$(G_SUFFIX)_dtrace.diz
   35.22  
   35.23  JVMOFFS = JvmOffsets
   35.24  JVMOFFS.o = $(JVMOFFS).o
   35.25 @@ -96,25 +90,18 @@
   35.26  
   35.27  XLIBJVM_DIR = 64
   35.28  XLIBJVM_DB = $(XLIBJVM_DIR)/$(LIBJVM_DB)
   35.29 -XLIBJVM_DB_G = $(XLIBJVM_DIR)/$(LIBJVM_DB_G)
   35.30  XLIBJVM_DTRACE = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE)
   35.31 -XLIBJVM_DTRACE_G = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_G)
   35.32  
   35.33  XLIBJVM_DB_DEBUGINFO       = $(XLIBJVM_DIR)/$(LIBJVM_DB_DEBUGINFO)
   35.34  XLIBJVM_DB_DIZ             = $(XLIBJVM_DIR)/$(LIBJVM_DB_DIZ)
   35.35 -XLIBJVM_DB_G_DEBUGINFO     = $(XLIBJVM_DIR)/$(LIBJVM_DB_G_DEBUGINFO)
   35.36 -XLIBJVM_DB_G_DIZ           = $(XLIBJVM_DIR)/$(LIBJVM_DB_G_DIZ)
   35.37  XLIBJVM_DTRACE_DEBUGINFO   = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DEBUGINFO)
   35.38  XLIBJVM_DTRACE_DIZ         = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_DIZ)
   35.39 -XLIBJVM_DTRACE_G_DEBUGINFO = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_G_DEBUGINFO)
   35.40 -XLIBJVM_DTRACE_G_DIZ       = $(XLIBJVM_DIR)/$(LIBJVM_DTRACE_G_DIZ)
   35.41  
   35.42  $(XLIBJVM_DB): $(ADD_GNU_DEBUGLINK) $(FIX_EMPTY_SEC_HDR_FLAGS) $(DTRACE_SRCDIR)/$(JVM_DB).c $(JVMOFFS).h $(LIBJVM_DB_MAPFILE)
   35.43  	@echo Making $@
   35.44  	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
   35.45  	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. -I$(GENERATED) \
   35.46  		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
   35.47 -	[ -f $(XLIBJVM_DB_G) ] || { ln -s $(LIBJVM_DB) $(XLIBJVM_DB_G); }
   35.48  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   35.49  # gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
   35.50  # Clear the SHF_ALLOC flag (if set) from empty section headers.
   35.51 @@ -137,13 +124,11 @@
   35.52      # implied else here is no stripping at all
   35.53      endif
   35.54    endif
   35.55 -	[ -f $(XLIBJVM_DB_G_DEBUGINFO) ] || { cd $(XLIBJVM_DIR) && ln -s $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO); }
   35.56    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   35.57  # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
   35.58  # in the archived name:
   35.59 -	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO) )
   35.60 -	$(RM) $(XLIBJVM_DB_DEBUGINFO) $(XLIBJVM_DB_G_DEBUGINFO)
   35.61 -	[ -f $(XLIBJVM_DB_G_DIZ) ] || { cd $(XLIBJVM_DIR) && ln -s $(LIBJVM_DB_DIZ) $(LIBJVM_DB_G_DIZ); }
   35.62 +	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) )
   35.63 +	$(RM) $(XLIBJVM_DB_DEBUGINFO)
   35.64    endif
   35.65  endif
   35.66  
   35.67 @@ -152,7 +137,6 @@
   35.68  	$(QUIETLY) mkdir -p $(XLIBJVM_DIR) ; \
   35.69  	$(CC) $(SYMFLAG) $(ARCHFLAG/$(ISA)) -D$(TYPE) -I. \
   35.70  		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
   35.71 -	[ -f $(XLIBJVM_DTRACE_G) ] || { ln -s $(LIBJVM_DTRACE) $(XLIBJVM_DTRACE_G); }
   35.72  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   35.73  # Clear the SHF_ALLOC flag (if set) from empty section headers.
   35.74  	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
   35.75 @@ -170,13 +154,11 @@
   35.76      # implied else here is no stripping at all
   35.77      endif
   35.78    endif
   35.79 -	[ -f $(XLIBJVM_DTRACE_G_DEBUGINFO) ] || { cd $(XLIBJVM_DIR) && ln -s $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO); }
   35.80    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   35.81  # Do this part in the $(XLIBJVM_DIR) subdir so $(XLIBJVM_DIR) is not
   35.82  # in the archived name:
   35.83 -	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO) )
   35.84 -	$(RM) $(XLIBJVM_DTRACE_DEBUGINFO) $(XLIBJVM_DTRACE_G_DEBUGINFO)
   35.85 -	[ -f $(XLIBJVM_DTRACE_G_DIZ) ] || { cd $(XLIBJVM_DIR) && ln -s $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_G_DIZ); }
   35.86 +	( cd $(XLIBJVM_DIR) && $(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO))
   35.87 +	$(RM) $(XLIBJVM_DTRACE_DEBUGINFO)
   35.88    endif
   35.89  endif
   35.90  
   35.91 @@ -224,7 +206,6 @@
   35.92  	@echo Making $@
   35.93  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I. -I$(GENERATED) \
   35.94  		$(SHARED_FLAG) $(LFLAGS_JVM_DB) -o $@ $(DTRACE_SRCDIR)/$(JVM_DB).c -lc
   35.95 -	[ -f $(LIBJVM_DB_G) ] || { ln -s $@ $(LIBJVM_DB_G); }
   35.96  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   35.97  # Clear the SHF_ALLOC flag (if set) from empty section headers.
   35.98  	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
   35.99 @@ -240,11 +221,9 @@
  35.100      # implied else here is no stripping at all
  35.101      endif
  35.102    endif
  35.103 -	[ -f $(LIBJVM_DB_G_DEBUGINFO) ] || { ln -s $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO); }
  35.104    ifeq ($(ZIP_DEBUGINFO_FILES),1)
  35.105 -	$(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO)
  35.106 -	$(RM) $(LIBJVM_DB_DEBUGINFO) $(LIBJVM_DB_G_DEBUGINFO)
  35.107 -	[ -f $(LIBJVM_DB_G_DIZ) ] || { ln -s $(LIBJVM_DB_DIZ) $(LIBJVM_DB_G_DIZ); }
  35.108 +	$(ZIPEXE) -q -y $(LIBJVM_DB_DIZ) $(LIBJVM_DB_DEBUGINFO)
  35.109 +	$(RM) $(LIBJVM_DB_DEBUGINFO)
  35.110    endif
  35.111  endif
  35.112  
  35.113 @@ -252,7 +231,6 @@
  35.114  	@echo Making $@
  35.115  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) -D$(TYPE) -I.  \
  35.116  		$(SHARED_FLAG) $(LFLAGS_JVM_DTRACE) -o $@ $(DTRACE_SRCDIR)/$(JVM_DTRACE).c -lc -lthread -ldoor
  35.117 -	[ -f $(LIBJVM_DTRACE_G) ] || { ln -s $@ $(LIBJVM_DTRACE_G); }
  35.118  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
  35.119  # Clear the SHF_ALLOC flag (if set) from empty section headers.
  35.120  	$(QUIETLY) $(FIX_EMPTY_SEC_HDR_FLAGS) $@
  35.121 @@ -268,11 +246,9 @@
  35.122      # implied else here is no stripping at all
  35.123      endif
  35.124    endif
  35.125 -	[ -f $(LIBJVM_DTRACE_G_DEBUGINFO) ] || { ln -s $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO); }
  35.126    ifeq ($(ZIP_DEBUGINFO_FILES),1)
  35.127 -	$(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO)
  35.128 -	$(RM) $(LIBJVM_DTRACE_DEBUGINFO) $(LIBJVM_DTRACE_G_DEBUGINFO)
  35.129 -	[ -f $(LIBJVM_DTRACE_G_DIZ) ] || { ln -s $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_G_DIZ); }
  35.130 +	$(ZIPEXE) -q -y $(LIBJVM_DTRACE_DIZ) $(LIBJVM_DTRACE_DEBUGINFO) 
  35.131 +	$(RM) $(LIBJVM_DTRACE_DEBUGINFO)
  35.132    endif
  35.133  endif
  35.134  
    36.1 --- a/make/solaris/makefiles/fastdebug.make	Tue Jan 08 14:04:25 2013 -0500
    36.2 +++ b/make/solaris/makefiles/fastdebug.make	Tue Jan 08 11:39:53 2013 -0800
    36.3 @@ -122,7 +122,6 @@
    36.4  # and mustn't be otherwise.
    36.5  MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
    36.6  
    36.7 -G_SUFFIX = _g
    36.8  VERSION = optimized
    36.9  SYSDEFS += -DASSERT -DFASTDEBUG -DCHECK_UNHANDLED_OOPS
   36.10  PICFLAGS = DEFAULT
    37.1 --- a/make/solaris/makefiles/gcc.make	Tue Jan 08 14:04:25 2013 -0500
    37.2 +++ b/make/solaris/makefiles/gcc.make	Tue Jan 08 11:39:53 2013 -0800
    37.3 @@ -187,9 +187,9 @@
    37.4  
    37.5  # Use the stabs format for debugging information (this is the default 
    37.6  # on gcc-2.91). It's good enough, has all the information about line 
    37.7 -# numbers and local variables, and libjvm_g.so is only about 16M. 
    37.8 +# numbers and local variables, and libjvm.so is only about 16M. 
    37.9  # Change this back to "-g" if you want the most expressive format. 
   37.10 -# (warning: that could easily inflate libjvm_g.so to 150M!) 
   37.11 +# (warning: that could easily inflate libjvm.so to 150M!) 
   37.12  # Note: The Itanium gcc compiler crashes when using -gstabs. 
   37.13  DEBUG_CFLAGS/ia64  = -g 
   37.14  DEBUG_CFLAGS/amd64 = -g 
    38.1 --- a/make/solaris/makefiles/jsig.make	Tue Jan 08 14:04:25 2013 -0500
    38.2 +++ b/make/solaris/makefiles/jsig.make	Tue Jan 08 11:39:53 2013 -0800
    38.3 @@ -24,17 +24,12 @@
    38.4  
    38.5  # Rules to build signal interposition library, used by vm.make
    38.6  
    38.7 -# libjsig[_g].so: signal interposition library
    38.8 +# libjsig.so: signal interposition library
    38.9  JSIG      = jsig
   38.10  LIBJSIG   = lib$(JSIG).so
   38.11  
   38.12 -JSIG_G    = $(JSIG)$(G_SUFFIX)
   38.13 -LIBJSIG_G = lib$(JSIG_G).so
   38.14 -
   38.15  LIBJSIG_DEBUGINFO   = lib$(JSIG).debuginfo
   38.16  LIBJSIG_DIZ         = lib$(JSIG).diz
   38.17 -LIBJSIG_G_DEBUGINFO = lib$(JSIG_G).debuginfo
   38.18 -LIBJSIG_G_DIZ       = lib$(JSIG_G).diz
   38.19  
   38.20  JSIGSRCDIR = $(GAMMADIR)/src/os/$(Platform_os_family)/vm
   38.21  
   38.22 @@ -56,7 +51,6 @@
   38.23  	@echo Making signal interposition lib...
   38.24  	$(QUIETLY) $(CC) $(SYMFLAG) $(ARCHFLAG) $(SHARED_FLAG) $(PICFLAG) \
   38.25                           $(LFLAGS_JSIG) -o $@ $(JSIGSRCDIR)/jsig.c -ldl
   38.26 -	[ -f $(LIBJSIG_G) ] || { ln -s $@ $(LIBJSIG_G); }
   38.27  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   38.28  # gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
   38.29  # Clear the SHF_ALLOC flag (if set) from empty section headers.
   38.30 @@ -77,11 +71,9 @@
   38.31      # implied else here is no stripping at all
   38.32      endif
   38.33    endif
   38.34 -	[ -f $(LIBJSIG_G_DEBUGINFO) ] || { ln -s $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO); }
   38.35    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   38.36 -	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO)
   38.37 -	$(RM) $(LIBJSIG_DEBUGINFO) $(LIBJSIG_G_DEBUGINFO)
   38.38 -	[ -f $(LIBJSIG_G_DIZ) ] || { ln -s $(LIBJSIG_DIZ) $(LIBJSIG_G_DIZ); }
   38.39 +	$(ZIPEXE) -q -y $(LIBJSIG_DIZ) $(LIBJSIG_DEBUGINFO)
   38.40 +	$(RM) $(LIBJSIG_DEBUGINFO)
   38.41    endif
   38.42  endif
   38.43  
    39.1 --- a/make/solaris/makefiles/jvmg.make	Tue Jan 08 14:04:25 2013 -0500
    39.2 +++ b/make/solaris/makefiles/jvmg.make	Tue Jan 08 11:39:53 2013 -0800
    39.3 @@ -51,7 +51,6 @@
    39.4  # and mustn't be otherwise.
    39.5  MAPFILE_DTRACE = $(GAMMADIR)/make/solaris/makefiles/mapfile-vers-$(TYPE)
    39.6  
    39.7 -G_SUFFIX = _g
    39.8  VERSION = debug
    39.9  SYSDEFS += -DASSERT -DDEBUG
   39.10  PICFLAGS = DEFAULT
    40.1 --- a/make/solaris/makefiles/mapfile-vers	Tue Jan 08 14:04:25 2013 -0500
    40.2 +++ b/make/solaris/makefiles/mapfile-vers	Tue Jan 08 11:39:53 2013 -0800
    40.3 @@ -125,6 +125,7 @@
    40.4  		JVM_GetClassSignature;
    40.5  		JVM_GetClassSigners;
    40.6  		JVM_GetComponentType;
    40.7 +		JVM_GetClassTypeAnnotations;
    40.8  		JVM_GetDeclaredClasses;
    40.9  		JVM_GetDeclaringClass;
   40.10  		JVM_GetEnclosingMethodInfo;
    41.1 --- a/make/solaris/makefiles/optimized.make	Tue Jan 08 14:04:25 2013 -0500
    41.2 +++ b/make/solaris/makefiles/optimized.make	Tue Jan 08 11:39:53 2013 -0800
    41.3 @@ -62,5 +62,4 @@
    41.4  # Set the environment variable HOTSPARC_GENERIC to "true"
    41.5  # to inhibit the effect of the previous line on CFLAGS.
    41.6  
    41.7 -G_SUFFIX =
    41.8  VERSION = optimized
    42.1 --- a/make/solaris/makefiles/product.make	Tue Jan 08 14:04:25 2013 -0500
    42.2 +++ b/make/solaris/makefiles/product.make	Tue Jan 08 11:39:53 2013 -0800
    42.3 @@ -78,6 +78,5 @@
    42.4  # and this macro is not used.
    42.5  # LINK_LIB.CXX/POST_HOOK += $(STRIP_LIB.CXX/POST_HOOK)
    42.6  
    42.7 -G_SUFFIX =
    42.8  SYSDEFS += -DPRODUCT
    42.9  VERSION = optimized
    43.1 --- a/make/solaris/makefiles/saproc.make	Tue Jan 08 14:04:25 2013 -0500
    43.2 +++ b/make/solaris/makefiles/saproc.make	Tue Jan 08 11:39:53 2013 -0800
    43.3 @@ -24,20 +24,15 @@
    43.4  
    43.5  # Rules to build serviceability agent library, used by vm.make
    43.6  
    43.7 -# libsaproc[_g].so: serviceability agent
    43.8 +# libsaproc.so: serviceability agent
    43.9  
   43.10  SAPROC = saproc
   43.11  SADIS = sadis
   43.12  LIBSAPROC = lib$(SAPROC).so
   43.13  SADISOBJ = $(SADIS).o
   43.14  
   43.15 -SAPROC_G = $(SAPROC)$(G_SUFFIX)
   43.16 -LIBSAPROC_G = lib$(SAPROC_G).so
   43.17 -
   43.18  LIBSAPROC_DEBUGINFO   = lib$(SAPROC).debuginfo
   43.19  LIBSAPROC_DIZ         = lib$(SAPROC).diz
   43.20 -LIBSAPROC_G_DEBUGINFO = lib$(SAPROC_G).debuginfo
   43.21 -LIBSAPROC_G_DIZ       = lib$(SAPROC_G).diz
   43.22  
   43.23  AGENT_DIR = $(GAMMADIR)/agent
   43.24  
   43.25 @@ -113,7 +108,6 @@
   43.26  	           $(SA_LFLAGS)                                         \
   43.27  	           -o $@                                                \
   43.28  	           -ldl -ldemangle -lthread -lc
   43.29 -	[ -f $(LIBSAPROC_G) ] || { ln -s $@ $(LIBSAPROC_G); }
   43.30  
   43.31  $(SADISOBJ): $(SADISSRCFILES)
   43.32  	           $(QUIETLY) $(CC)                                     \
   43.33 @@ -146,11 +140,9 @@
   43.34      # implied else here is no stripping at all
   43.35      endif
   43.36    endif
   43.37 -	[ -f $(LIBSAPROC_G_DEBUGINFO) ] || { ln -s $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO); }
   43.38    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   43.39 -	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO)
   43.40 -	$(RM) $(LIBSAPROC_DEBUGINFO) $(LIBSAPROC_G_DEBUGINFO)
   43.41 -	[ -f $(LIBSAPROC_G_DIZ) ] || { ln -s $(LIBSAPROC_DIZ) $(LIBSAPROC_G_DIZ); }
   43.42 +	$(ZIPEXE) -q -y $(LIBSAPROC_DIZ) $(LIBSAPROC_DEBUGINFO)
   43.43 +	$(RM) $(LIBSAPROC_DEBUGINFO)
   43.44    endif
   43.45  endif
   43.46  
    44.1 --- a/make/solaris/makefiles/vm.make	Tue Jan 08 14:04:25 2013 -0500
    44.2 +++ b/make/solaris/makefiles/vm.make	Tue Jan 08 11:39:53 2013 -0800
    44.3 @@ -157,12 +157,9 @@
    44.4  
    44.5  JVM      = jvm
    44.6  LIBJVM   = lib$(JVM).so
    44.7 -LIBJVM_G = lib$(JVM)$(G_SUFFIX).so
    44.8  
    44.9  LIBJVM_DEBUGINFO   = lib$(JVM).debuginfo
   44.10  LIBJVM_DIZ         = lib$(JVM).diz
   44.11 -LIBJVM_G_DEBUGINFO = lib$(JVM)$(G_SUFFIX).debuginfo
   44.12 -LIBJVM_G_DIZ       = lib$(JVM)$(G_SUFFIX).diz
   44.13  
   44.14  SPECIAL_PATHS:=adlc c1 dist gc_implementation opto shark libadt
   44.15  
   44.16 @@ -291,8 +288,6 @@
   44.17  	$(QUIETLY) $(LINK_VM) $(LFLAGS_VM) -o $@ $(sort $(LIBJVM.o)) $(LIBS_VM)
   44.18  	$(QUIETLY) $(LINK_LIB.CXX/POST_HOOK)
   44.19  	$(QUIETLY) rm -f $@.1 && ln -s $@ $@.1
   44.20 -	$(QUIETLY) [ -f $(LIBJVM_G) ] || ln -s $@ $(LIBJVM_G)
   44.21 -	$(QUIETLY) [ -f $(LIBJVM_G).1 ] || ln -s $@.1 $(LIBJVM_G).1
   44.22  ifeq ($(ENABLE_FULL_DEBUG_SYMBOLS),1)
   44.23  # gobjcopy crashes on "empty" section headers with the SHF_ALLOC flag set.
   44.24  # Clear the SHF_ALLOC flag (if set) from empty section headers.
   44.25 @@ -313,11 +308,9 @@
   44.26      # implied else here is no stripping at all
   44.27      endif
   44.28    endif
   44.29 -	$(QUIETLY) [ -f $(LIBJVM_G_DEBUGINFO) ] || ln -s $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
   44.30    ifeq ($(ZIP_DEBUGINFO_FILES),1)
   44.31 -	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
   44.32 -	$(RM) $(LIBJVM_DEBUGINFO) $(LIBJVM_G_DEBUGINFO)
   44.33 -	[ -f $(LIBJVM_G_DIZ) ] || { ln -s $(LIBJVM_DIZ) $(LIBJVM_G_DIZ); }
   44.34 +	$(ZIPEXE) -q -y $(LIBJVM_DIZ) $(LIBJVM_DEBUGINFO)
   44.35 +	$(RM) $(LIBJVM_DEBUGINFO)
   44.36    endif
   44.37  endif
   44.38  endif # filter -sbfast -xsbfast
    45.1 --- a/make/windows/build.make	Tue Jan 08 14:04:25 2013 -0500
    45.2 +++ b/make/windows/build.make	Tue Jan 08 11:39:53 2013 -0800
    45.3 @@ -33,7 +33,7 @@
    45.4  # SA components are built if BUILD_WIN_SA=1 is specified.
    45.5  # See notes in README. This produces files:
    45.6  #  1. sa-jdi.jar       - This is built before building jvm.dll
    45.7 -#  2. sawindbg[_g].dll - Native library for SA - This is built after jvm.dll
    45.8 +#  2. sawindbg.dll     - Native library for SA - This is built after jvm.dll
    45.9  #                      - Also, .lib, .map, .pdb.
   45.10  #
   45.11  # Please refer to ./makefiles/sa.make
   45.12 @@ -115,7 +115,7 @@
   45.13  !endif
   45.14  
   45.15  #########################################################################
   45.16 -# Parameters for VERSIONINFO resource for jvm[_g].dll.
   45.17 +# Parameters for VERSIONINFO resource for jvm.dll.
   45.18  # These can be overridden via the nmake.exe command line.
   45.19  # They are overridden by RE during the control builds.
   45.20  #
   45.21 @@ -225,11 +225,6 @@
   45.22  
   45.23  #########################################################################
   45.24  
   45.25 -# With the jvm_g.dll now being named jvm.dll, we can't build both and place
   45.26 -#   the dll's in the same directory, so we only build one at a time,
   45.27 -#   re-directing the output to different output directories (done by user
   45.28 -#   of this makefile).
   45.29 -#
   45.30  defaultTarget: product
   45.31  
   45.32  # The product or release build is an optimized build, and is the default
    46.1 --- a/make/windows/projectfiles/compiler2/ADLCompiler.dsp	Tue Jan 08 14:04:25 2013 -0500
    46.2 +++ b/make/windows/projectfiles/compiler2/ADLCompiler.dsp	Tue Jan 08 11:39:53 2013 -0800
    46.3 @@ -72,11 +72,11 @@
    46.4  # ADD RSC /l 0x409
    46.5  BSC32=bscmake.exe
    46.6  # ADD BASE BSC32 /nologo
    46.7 -# ADD BSC32 /o".\adlc\Debug\adlc_g.bsc"
    46.8 +# ADD BSC32 /o".\adlc\Debug\adlc.bsc"
    46.9  # SUBTRACT BSC32 /nologo
   46.10  LINK32=link.exe
   46.11  # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
   46.12 -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib uuid.lib /nologo /subsystem:console /debug /machine:I386 /out:".\bin\adlc_g.exe"
   46.13 +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib uuid.lib /nologo /subsystem:console /debug /machine:I386 /out:".\bin\adlc.exe"
   46.14  
   46.15  !ENDIF 
   46.16  
    47.1 --- a/make/windows/projectfiles/tiered/ADLCompiler.dsp	Tue Jan 08 14:04:25 2013 -0500
    47.2 +++ b/make/windows/projectfiles/tiered/ADLCompiler.dsp	Tue Jan 08 11:39:53 2013 -0800
    47.3 @@ -72,11 +72,11 @@
    47.4  # ADD RSC /l 0x409
    47.5  BSC32=bscmake.exe
    47.6  # ADD BASE BSC32 /nologo
    47.7 -# ADD BSC32 /o".\adlc\Debug\adlc_g.bsc"
    47.8 +# ADD BSC32 /o".\adlc\Debug\adlc.bsc"
    47.9  # SUBTRACT BSC32 /nologo
   47.10  LINK32=link.exe
   47.11  # ADD BASE LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib ole32.lib oleaut32.lib uuid.lib odbc32.lib odbccp32.lib /nologo /subsystem:console /debug /machine:I386 /pdbtype:sept
   47.12 -# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib uuid.lib /nologo /subsystem:console /debug /machine:I386 /out:".\bin\adlc_g.exe"
   47.13 +# ADD LINK32 kernel32.lib user32.lib gdi32.lib winspool.lib comdlg32.lib advapi32.lib shell32.lib uuid.lib /nologo /subsystem:console /debug /machine:I386 /out:".\bin\adlc.exe"
   47.14  
   47.15  !ENDIF 
   47.16  
    48.1 --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue Jan 08 14:04:25 2013 -0500
    48.2 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Tue Jan 08 11:39:53 2013 -0800
    48.3 @@ -298,7 +298,7 @@
    48.4      for (int i = 0; i < _bytes_to_copy; i++) {
    48.5        address ptr = (address)(_pc_start + i);
    48.6        int a_byte = (*ptr) & 0xFF;
    48.7 -      __ a_byte (a_byte);
    48.8 +      __ emit_int8 (a_byte);
    48.9      }
   48.10    }
   48.11  
   48.12 @@ -340,10 +340,10 @@
   48.13    int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
   48.14  
   48.15    // Emit the patch record.  We need to emit a full word, so emit an extra empty byte
   48.16 -  __ a_byte(0);
   48.17 -  __ a_byte(being_initialized_entry_offset);
   48.18 -  __ a_byte(bytes_to_skip);
   48.19 -  __ a_byte(_bytes_to_copy);
   48.20 +  __ emit_int8(0);
   48.21 +  __ emit_int8(being_initialized_entry_offset);
   48.22 +  __ emit_int8(bytes_to_skip);
   48.23 +  __ emit_int8(_bytes_to_copy);
   48.24    address patch_info_pc = __ pc();
   48.25    assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
   48.26  
    49.1 --- a/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Jan 08 14:04:25 2013 -0500
    49.2 +++ b/src/cpu/sparc/vm/cppInterpreter_sparc.cpp	Tue Jan 08 11:39:53 2013 -0800
    49.3 @@ -582,7 +582,9 @@
    49.4    // the following temporary registers are used during frame creation
    49.5    const Register Gtmp1 = G3_scratch ;
    49.6    const Register Gtmp2 = G1_scratch;
    49.7 -  const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
    49.8 +  const Register RconstMethod = Gtmp1;
    49.9 +  const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
   49.10 +  const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
   49.11  
   49.12    bool inc_counter  = UseCompiler || CountCompiledCalls;
   49.13  
   49.14 @@ -618,6 +620,7 @@
   49.15    }
   49.16  #endif // ASSERT
   49.17  
   49.18 +  __ ld_ptr(constMethod, RconstMethod);
   49.19    __ lduh(size_of_parameters, Gtmp1);
   49.20    __ sll(Gtmp1, LogBytesPerWord, Gtmp2);       // parameter size in bytes
   49.21    __ add(Gargs, Gtmp2, Gargs);                 // points to first local + BytesPerWord
   49.22 @@ -1047,8 +1050,6 @@
   49.23    const Register Gtmp = G3_scratch;
   49.24    const Address constMethod       (G5_method, 0, in_bytes(Method::const_offset()));
   49.25    const Address access_flags      (G5_method, 0, in_bytes(Method::access_flags_offset()));
   49.26 -  const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
   49.27 -  const Address size_of_locals    (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
   49.28  
   49.29    // slop factor is two extra slots on the expression stack so that
   49.30    // we always have room to store a result when returning from a call without parameters
   49.31 @@ -1066,6 +1067,9 @@
   49.32    // Now compute new frame size
   49.33  
   49.34    if (native) {
   49.35 +    const Register RconstMethod = Gtmp;
   49.36 +    const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
   49.37 +    __ ld_ptr(constMethod, RconstMethod);
   49.38      __ lduh( size_of_parameters, Gtmp );
   49.39      __ calc_mem_param_words(Gtmp, Gtmp);     // space for native call parameters passed on the stack in words
   49.40    } else {
   49.41 @@ -1236,9 +1240,13 @@
   49.42      }
   49.43      if (init_value != noreg) {
   49.44        Label clear_loop;
   49.45 +      const Register RconstMethod = O1;
   49.46 +      const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
   49.47 +      const Address size_of_locals    (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
   49.48  
   49.49        // NOTE: If you change the frame layout, this code will need to
   49.50        // be updated!
   49.51 +      __ ld_ptr( constMethod, RconstMethod );
   49.52        __ lduh( size_of_locals, O2 );
   49.53        __ lduh( size_of_parameters, O1 );
   49.54        __ sll( O2, LogBytesPerWord, O2);
   49.55 @@ -1483,13 +1491,16 @@
   49.56  //
   49.57  //  assert_different_registers(state, prev_state);
   49.58    const Register Gtmp = G3_scratch;
   49.59 +  const RconstMethod = G3_scratch;
   49.60    const Register tmp = O2;
   49.61 -  const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
   49.62 -  const Address size_of_locals    (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
   49.63 +  const Address constMethod(G5_method, 0, in_bytes(Method::const_offset()));
   49.64 +  const Address size_of_parameters(RconstMethod, 0, in_bytes(ConstMethod::size_of_parameters_offset()));
   49.65 +  const Address size_of_locals    (RconstMethod, 0, in_bytes(ConstMethod::size_of_locals_offset()));
   49.66  
   49.67 +  __ ld_ptr(constMethod, RconstMethod);
   49.68    __ lduh(size_of_parameters, tmp);
   49.69 -  __ sll(tmp, LogBytesPerWord, Gtmp);       // parameter size in bytes
   49.70 -  __ add(args, Gtmp, Gargs);                // points to first local + BytesPerWord
   49.71 +  __ sll(tmp, LogBytesPerWord, Gargs);       // parameter size in bytes
   49.72 +  __ add(args, Gargs, Gargs);                // points to first local + BytesPerWord
   49.73    // NEW
   49.74    __ add(Gargs, -wordSize, Gargs);             // points to first local[0]
   49.75    // determine extra space for non-argument locals & adjust caller's SP
   49.76 @@ -1541,8 +1552,6 @@
   49.77  
   49.78    const Address constMethod       (G5_method, 0, in_bytes(Method::const_offset()));
   49.79    const Address access_flags      (G5_method, 0, in_bytes(Method::access_flags_offset()));
   49.80 -  const Address size_of_parameters(G5_method, 0, in_bytes(Method::size_of_parameters_offset()));
   49.81 -  const Address size_of_locals    (G5_method, 0, in_bytes(Method::size_of_locals_offset()));
   49.82  
   49.83    address entry_point = __ pc();
   49.84    __ mov(G0, prevState);                                                 // no current activation
   49.85 @@ -1750,7 +1759,9 @@
   49.86  
   49.87    __ ld_ptr(STATE(_result._to_call._callee), L4_scratch);                        // called method
   49.88    __ ld_ptr(STATE(_stack), L1_scratch);                                          // get top of java expr stack
   49.89 -  __ lduh(L4_scratch, in_bytes(Method::size_of_parameters_offset()), L2_scratch); // get parameter size
   49.90 +  // get parameter size
   49.91 +  __ ld_ptr(L4_scratch, in_bytes(Method::const_offset()), L2_scratch);
   49.92 +  __ lduh(L2_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), L2_scratch);
   49.93    __ sll(L2_scratch, LogBytesPerWord, L2_scratch     );                           // parameter size in bytes
   49.94    __ add(L1_scratch, L2_scratch, L1_scratch);                                      // stack destination for result
   49.95    __ ld(L4_scratch, in_bytes(Method::result_index_offset()), L3_scratch); // called method result type index
    50.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Jan 08 14:04:25 2013 -0500
    50.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Jan 08 11:39:53 2013 -0800
    50.3 @@ -100,34 +100,6 @@
    50.4  bool AbstractAssembler::pd_check_instruction_mark() { return false; }
    50.5  #endif
    50.6  
    50.7 -
    50.8 -void MacroAssembler::print_instruction(int inst) {
    50.9 -  const char* s;
   50.10 -  switch (inv_op(inst)) {
   50.11 -  default:         s = "????"; break;
   50.12 -  case call_op:    s = "call"; break;
   50.13 -  case branch_op:
   50.14 -    switch (inv_op2(inst)) {
   50.15 -      case fb_op2:     s = "fb";   break;
   50.16 -      case fbp_op2:    s = "fbp";  break;
   50.17 -      case br_op2:     s = "br";   break;
   50.18 -      case bp_op2:     s = "bp";   break;
   50.19 -      case cb_op2:     s = "cb";   break;
   50.20 -      case bpr_op2: {
   50.21 -        if (is_cbcond(inst)) {
   50.22 -          s = is_cxb(inst) ? "cxb" : "cwb";
   50.23 -        } else {
   50.24 -          s = "bpr";
   50.25 -        }
   50.26 -        break;
   50.27 -      }
   50.28 -      default:         s = "????"; break;
   50.29 -    }
   50.30 -  }
   50.31 -  ::tty->print("%s", s);
   50.32 -}
   50.33 -
   50.34 -
   50.35  // Patch instruction inst at offset inst_pos to refer to dest_pos
   50.36  // and return the resulting instruction.
   50.37  // We should have pcs, not offsets, but since all is relative, it will work out
    51.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Tue Jan 08 14:04:25 2013 -0500
    51.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Tue Jan 08 11:39:53 2013 -0800
    51.3 @@ -603,7 +603,6 @@
    51.4    friend class Label;
    51.5  
    51.6   protected:
    51.7 -  static void print_instruction(int inst);
    51.8    static int  patched_branch(int dest_pos, int inst, int inst_pos);
    51.9    static int  branch_destination(int inst, int pos);
   51.10  
   51.11 @@ -759,9 +758,6 @@
   51.12    // Required platform-specific helpers for Label::patch_instructions.
   51.13    // They _shadow_ the declarations in AbstractAssembler, which are undefined.
   51.14    void pd_patch_instruction(address branch, address target);
   51.15 -#ifndef PRODUCT
   51.16 -  static void pd_print_patched_instruction(address branch);
   51.17 -#endif
   51.18  
   51.19    // sethi Macro handles optimizations and relocations
   51.20  private:
    52.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Tue Jan 08 14:04:25 2013 -0500
    52.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Tue Jan 08 11:39:53 2013 -0800
    52.3 @@ -43,14 +43,6 @@
    52.4    stub_inst = patched_branch(target - branch, stub_inst, 0);
    52.5  }
    52.6  
    52.7 -#ifndef PRODUCT
    52.8 -inline void MacroAssembler::pd_print_patched_instruction(address branch) {
    52.9 -  jint stub_inst = *(jint*) branch;
   52.10 -  print_instruction(stub_inst);
   52.11 -  ::tty->print("%s", " (unresolved)");
   52.12 -}
   52.13 -#endif // PRODUCT
   52.14 -
   52.15  // Use the right loads/stores for the platform
   52.16  inline void MacroAssembler::ld_ptr( Register s1, Register s2, Register d ) {
   52.17  #ifdef _LP64
    53.1 --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp	Tue Jan 08 14:04:25 2013 -0500
    53.2 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp	Tue Jan 08 11:39:53 2013 -0800
    53.3 @@ -171,7 +171,8 @@
    53.4  
    53.5    if (VerifyMethodHandles && !for_compiler_entry) {
    53.6      // make sure recv is already on stack
    53.7 -    __ load_sized_value(Address(method_temp, Method::size_of_parameters_offset()),
    53.8 +    __ ld_ptr(method_temp, in_bytes(Method::const_offset()), temp2);
    53.9 +    __ load_sized_value(Address(temp2, ConstMethod::size_of_parameters_offset()),
   53.10                          temp2,
   53.11                          sizeof(u2), /*is_signed*/ false);
   53.12      // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
   53.13 @@ -233,7 +234,8 @@
   53.14    int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
   53.15    assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
   53.16    if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
   53.17 -    __ load_sized_value(Address(G5_method, Method::size_of_parameters_offset()),
   53.18 +    __ ld_ptr(G5_method, in_bytes(Method::const_offset()), O4_param_size);
   53.19 +    __ load_sized_value(Address(O4_param_size, ConstMethod::size_of_parameters_offset()),
   53.20                          O4_param_size,
   53.21                          sizeof(u2), /*is_signed*/ false);
   53.22      // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
    54.1 --- a/src/cpu/sparc/vm/sparc.ad	Tue Jan 08 14:04:25 2013 -0500
    54.2 +++ b/src/cpu/sparc/vm/sparc.ad	Tue Jan 08 11:39:53 2013 -0800
    54.3 @@ -10224,7 +10224,7 @@
    54.4  
    54.5  //---------- Zeros Count Instructions ------------------------------------------
    54.6  
    54.7 -instruct countLeadingZerosI(iRegI dst, iRegI src, iRegI tmp, flagsReg cr) %{
    54.8 +instruct countLeadingZerosI(iRegIsafe dst, iRegI src, iRegI tmp, flagsReg cr) %{
    54.9    predicate(UsePopCountInstruction);  // See Matcher::match_rule_supported
   54.10    match(Set dst (CountLeadingZerosI src));
   54.11    effect(TEMP dst, TEMP tmp, KILL cr);
   54.12 @@ -10321,7 +10321,7 @@
   54.13    ins_pipe(ialu_reg);
   54.14  %}
   54.15  
   54.16 -instruct countTrailingZerosI(iRegI dst, iRegI src, flagsReg cr) %{
   54.17 +instruct countTrailingZerosI(iRegIsafe dst, iRegI src, flagsReg cr) %{
   54.18    predicate(UsePopCountInstruction);  // See Matcher::match_rule_supported
   54.19    match(Set dst (CountTrailingZerosI src));
   54.20    effect(TEMP dst, KILL cr);
   54.21 @@ -10364,19 +10364,21 @@
   54.22  
   54.23  //---------- Population Count Instructions -------------------------------------
   54.24  
   54.25 -instruct popCountI(iRegI dst, iRegI src) %{
   54.26 +instruct popCountI(iRegIsafe dst, iRegI src) %{
   54.27    predicate(UsePopCountInstruction);
   54.28    match(Set dst (PopCountI src));
   54.29  
   54.30 -  format %{ "POPC   $src, $dst" %}
   54.31 -  ins_encode %{
   54.32 -    __ popc($src$$Register, $dst$$Register);
   54.33 +  format %{ "SRL    $src, G0, $dst\t! clear upper word for 64 bit POPC\n\t"
   54.34 +            "POPC   $dst, $dst" %}
   54.35 +  ins_encode %{
   54.36 +    __ srl($src$$Register, G0, $dst$$Register);
   54.37 +    __ popc($dst$$Register, $dst$$Register);
   54.38    %}
   54.39    ins_pipe(ialu_reg);
   54.40  %}
   54.41  
   54.42  // Note: Long.bitCount(long) returns an int.
   54.43 -instruct popCountL(iRegI dst, iRegL src) %{
   54.44 +instruct popCountL(iRegIsafe dst, iRegL src) %{
   54.45    predicate(UsePopCountInstruction);
   54.46    match(Set dst (PopCountL src));
   54.47  
    55.1 --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jan 08 14:04:25 2013 -0500
    55.2 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jan 08 11:39:53 2013 -0800
    55.3 @@ -434,7 +434,7 @@
    55.4  
    55.5    // the frame is greater than one page in size, so check against
    55.6    // the bottom of the stack
    55.7 -  __ cmp_and_brx_short(SP, Rscratch, Assembler::greater, Assembler::pt, after_frame_check);
    55.8 +  __ cmp_and_brx_short(SP, Rscratch, Assembler::greaterUnsigned, Assembler::pt, after_frame_check);
    55.9  
   55.10    // the stack will overflow, throw an exception
   55.11  
   55.12 @@ -494,9 +494,6 @@
   55.13    // (gri - 2/25/2000)
   55.14  
   55.15  
   55.16 -  const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
   55.17 -  const Address size_of_locals    (G5_method, Method::size_of_locals_offset());
   55.18 -  const Address constMethod       (G5_method, Method::const_offset());
   55.19    int rounded_vm_local_words = round_to( frame::interpreter_frame_vm_local_words, WordsPerLong );
   55.20  
   55.21    const int extra_space =
   55.22 @@ -506,11 +503,15 @@
   55.23      (native_call ? frame::interpreter_frame_extra_outgoing_argument_words : 0);
   55.24  
   55.25    const Register Glocals_size = G3;
   55.26 +  const Register RconstMethod = Glocals_size;
   55.27    const Register Otmp1 = O3;
   55.28    const Register Otmp2 = O4;
   55.29    // Lscratch can't be used as a temporary because the call_stub uses
   55.30    // it to assert that the stack frame was setup correctly.
   55.31 +  const Address constMethod       (G5_method, Method::const_offset());
   55.32 +  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
   55.33  
   55.34 +  __ ld_ptr( constMethod, RconstMethod );
   55.35    __ lduh( size_of_parameters, Glocals_size);
   55.36  
   55.37    // Gargs points to first local + BytesPerWord
   55.38 @@ -530,6 +531,8 @@
   55.39      //
   55.40      // Compute number of locals in method apart from incoming parameters
   55.41      //
   55.42 +    const Address size_of_locals    (Otmp1, ConstMethod::size_of_locals_offset());
   55.43 +    __ ld_ptr( constMethod, Otmp1 );
   55.44      __ lduh( size_of_locals, Otmp1 );
   55.45      __ sub( Otmp1, Glocals_size, Glocals_size );
   55.46      __ round_to( Glocals_size, WordsPerLong );
   55.47 @@ -1256,8 +1259,7 @@
   55.48    // make sure registers are different!
   55.49    assert_different_registers(G2_thread, G5_method, Gargs, Gtmp1, Gtmp2);
   55.50  
   55.51 -  const Address size_of_parameters(G5_method, Method::size_of_parameters_offset());
   55.52 -  const Address size_of_locals    (G5_method, Method::size_of_locals_offset());
   55.53 +  const Address constMethod       (G5_method, Method::const_offset());
   55.54    // Seems like G5_method is live at the point this is used. So we could make this look consistent
   55.55    // and use in the asserts.
   55.56    const Address access_flags      (Lmethod,   Method::access_flags_offset());
   55.57 @@ -1307,8 +1309,13 @@
   55.58    init_value = G0;
   55.59    Label clear_loop;
   55.60  
   55.61 +  const Register RconstMethod = O1;
   55.62 +  const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
   55.63 +  const Address size_of_locals    (RconstMethod, ConstMethod::size_of_locals_offset());
   55.64 +
   55.65    // NOTE: If you change the frame layout, this code will need to
   55.66    // be updated!
   55.67 +  __ ld_ptr( constMethod, RconstMethod );
   55.68    __ lduh( size_of_locals, O2 );
   55.69    __ lduh( size_of_parameters, O1 );
   55.70    __ sll( O2, Interpreter::logStackElementSize, O2);
   55.71 @@ -1823,9 +1830,13 @@
   55.72  
   55.73      const Register Gtmp1 = G3_scratch;
   55.74      const Register Gtmp2 = G1_scratch;
   55.75 +    const Register RconstMethod = Gtmp1;
   55.76 +    const Address constMethod(Lmethod, Method::const_offset());
   55.77 +    const Address size_of_parameters(RconstMethod, ConstMethod::size_of_parameters_offset());
   55.78  
   55.79      // Compute size of arguments for saving when returning to deoptimized caller
   55.80 -    __ lduh(Lmethod, in_bytes(Method::size_of_parameters_offset()), Gtmp1);
   55.81 +    __ ld_ptr(constMethod, RconstMethod);
   55.82 +    __ lduh(size_of_parameters, Gtmp1);
   55.83      __ sll(Gtmp1, Interpreter::logStackElementSize, Gtmp1);
   55.84      __ sub(Llocals, Gtmp1, Gtmp2);
   55.85      __ add(Gtmp2, wordSize, Gtmp2);
    56.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Jan 08 14:04:25 2013 -0500
    56.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Jan 08 11:39:53 2013 -0800
    56.3 @@ -3040,7 +3040,8 @@
    56.4    Register Rtemp = G4_scratch;
    56.5  
    56.6    // Load receiver from stack slot
    56.7 -  __ lduh(G5_method, in_bytes(Method::size_of_parameters_offset()), G4_scratch);
    56.8 +  __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
    56.9 +  __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
   56.10    __ load_receiver(G4_scratch, O0);
   56.11  
   56.12    // receiver NULL check
    57.1 --- a/src/cpu/x86/vm/assembler_x86.cpp	Tue Jan 08 14:04:25 2013 -0500
    57.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp	Tue Jan 08 11:39:53 2013 -0800
    57.3 @@ -226,9 +226,9 @@
    57.4    assert(isByte(op1) && isByte(op2), "wrong opcode");
    57.5    assert(isByte(imm8), "not a byte");
    57.6    assert((op1 & 0x01) == 0, "should be 8bit operation");
    57.7 -  emit_byte(op1);
    57.8 -  emit_byte(op2 | encode(dst));
    57.9 -  emit_byte(imm8);
   57.10 +  emit_int8(op1);
   57.11 +  emit_int8(op2 | encode(dst));
   57.12 +  emit_int8(imm8);
   57.13  }
   57.14  
   57.15  
   57.16 @@ -237,12 +237,12 @@
   57.17    assert((op1 & 0x01) == 1, "should be 32bit operation");
   57.18    assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
   57.19    if (is8bit(imm32)) {
   57.20 -    emit_byte(op1 | 0x02); // set sign bit
   57.21 -    emit_byte(op2 | encode(dst));
   57.22 -    emit_byte(imm32 & 0xFF);
   57.23 +    emit_int8(op1 | 0x02); // set sign bit
   57.24 +    emit_int8(op2 | encode(dst));
   57.25 +    emit_int8(imm32 & 0xFF);
   57.26    } else {
   57.27 -    emit_byte(op1);
   57.28 -    emit_byte(op2 | encode(dst));
   57.29 +    emit_int8(op1);
   57.30 +    emit_int8(op2 | encode(dst));
   57.31      emit_long(imm32);
   57.32    }
   57.33  }
   57.34 @@ -252,8 +252,8 @@
   57.35    assert(isByte(op1) && isByte(op2), "wrong opcode");
   57.36    assert((op1 & 0x01) == 1, "should be 32bit operation");
   57.37    assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
   57.38 -  emit_byte(op1);
   57.39 -  emit_byte(op2 | encode(dst));
   57.40 +  emit_int8(op1);
   57.41 +  emit_int8(op2 | encode(dst));
   57.42    emit_long(imm32);
   57.43  }
   57.44  
   57.45 @@ -262,11 +262,11 @@
   57.46    assert((op1 & 0x01) == 1, "should be 32bit operation");
   57.47    assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
   57.48    if (is8bit(imm32)) {
   57.49 -    emit_byte(op1 | 0x02); // set sign bit
   57.50 +    emit_int8(op1 | 0x02); // set sign bit
   57.51      emit_operand(rm, adr, 1);
   57.52 -    emit_byte(imm32 & 0xFF);
   57.53 +    emit_int8(imm32 & 0xFF);
   57.54    } else {
   57.55 -    emit_byte(op1);
   57.56 +    emit_int8(op1);
   57.57      emit_operand(rm, adr, 4);
   57.58      emit_long(imm32);
   57.59    }
   57.60 @@ -275,8 +275,8 @@
   57.61  
   57.62  void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
   57.63    assert(isByte(op1) && isByte(op2), "wrong opcode");
   57.64 -  emit_byte(op1);
   57.65 -  emit_byte(op2 | encode(dst) << 3 | encode(src));
   57.66 +  emit_int8(op1);
   57.67 +  emit_int8(op2 | encode(dst) << 3 | encode(src));
   57.68  }
   57.69  
   57.70  
   57.71 @@ -301,21 +301,21 @@
   57.72          // [base + index*scale]
   57.73          // [00 reg 100][ss index base]
   57.74          assert(index != rsp, "illegal addressing mode");
   57.75 -        emit_byte(0x04 | regenc);
   57.76 -        emit_byte(scale << 6 | indexenc | baseenc);
   57.77 +        emit_int8(0x04 | regenc);
   57.78 +        emit_int8(scale << 6 | indexenc | baseenc);
   57.79        } else if (is8bit(disp) && rtype == relocInfo::none) {
   57.80          // [base + index*scale + imm8]
   57.81          // [01 reg 100][ss index base] imm8
   57.82          assert(index != rsp, "illegal addressing mode");
   57.83 -        emit_byte(0x44 | regenc);
   57.84 -        emit_byte(scale << 6 | indexenc | baseenc);
   57.85 -        emit_byte(disp & 0xFF);
   57.86 +        emit_int8(0x44 | regenc);
   57.87 +        emit_int8(scale << 6 | indexenc | baseenc);
   57.88 +        emit_int8(disp & 0xFF);
   57.89        } else {
   57.90          // [base + index*scale + disp32]
   57.91          // [10 reg 100][ss index base] disp32
   57.92          assert(index != rsp, "illegal addressing mode");
   57.93 -        emit_byte(0x84 | regenc);
   57.94 -        emit_byte(scale << 6 | indexenc | baseenc);
   57.95 +        emit_int8(0x84 | regenc);
   57.96 +        emit_int8(scale << 6 | indexenc | baseenc);
   57.97          emit_data(disp, rspec, disp32_operand);
   57.98        }
   57.99      } else if (base == rsp LP64_ONLY(|| base == r12)) {
  57.100 @@ -323,19 +323,19 @@
  57.101        if (disp == 0 && rtype == relocInfo::none) {
  57.102          // [rsp]
  57.103          // [00 reg 100][00 100 100]
  57.104 -        emit_byte(0x04 | regenc);
  57.105 -        emit_byte(0x24);
  57.106 +        emit_int8(0x04 | regenc);
  57.107 +        emit_int8(0x24);
  57.108        } else if (is8bit(disp) && rtype == relocInfo::none) {
  57.109          // [rsp + imm8]
  57.110          // [01 reg 100][00 100 100] disp8
  57.111 -        emit_byte(0x44 | regenc);
  57.112 -        emit_byte(0x24);
  57.113 -        emit_byte(disp & 0xFF);
  57.114 +        emit_int8(0x44 | regenc);
  57.115 +        emit_int8(0x24);
  57.116 +        emit_int8(disp & 0xFF);
  57.117        } else {
  57.118          // [rsp + imm32]
  57.119          // [10 reg 100][00 100 100] disp32
  57.120 -        emit_byte(0x84 | regenc);
  57.121 -        emit_byte(0x24);
  57.122 +        emit_int8(0x84 | regenc);
  57.123 +        emit_int8(0x24);
  57.124          emit_data(disp, rspec, disp32_operand);
  57.125        }
  57.126      } else {
  57.127 @@ -345,16 +345,16 @@
  57.128            base != rbp LP64_ONLY(&& base != r13)) {
  57.129          // [base]
  57.130          // [00 reg base]
  57.131 -        emit_byte(0x00 | regenc | baseenc);
  57.132 +        emit_int8(0x00 | regenc | baseenc);
  57.133        } else if (is8bit(disp) && rtype == relocInfo::none) {
  57.134          // [base + disp8]
  57.135          // [01 reg base] disp8
  57.136 -        emit_byte(0x40 | regenc | baseenc);
  57.137 -        emit_byte(disp & 0xFF);
  57.138 +        emit_int8(0x40 | regenc | baseenc);
  57.139 +        emit_int8(disp & 0xFF);
  57.140        } else {
  57.141          // [base + disp32]
  57.142          // [10 reg base] disp32
  57.143 -        emit_byte(0x80 | regenc | baseenc);
  57.144 +        emit_int8(0x80 | regenc | baseenc);
  57.145          emit_data(disp, rspec, disp32_operand);
  57.146        }
  57.147      }
  57.148 @@ -364,14 +364,14 @@
  57.149        // [index*scale + disp]
  57.150        // [00 reg 100][ss index 101] disp32
  57.151        assert(index != rsp, "illegal addressing mode");
  57.152 -      emit_byte(0x04 | regenc);
  57.153 -      emit_byte(scale << 6 | indexenc | 0x05);
  57.154 +      emit_int8(0x04 | regenc);
  57.155 +      emit_int8(scale << 6 | indexenc | 0x05);
  57.156        emit_data(disp, rspec, disp32_operand);
  57.157      } else if (rtype != relocInfo::none ) {
  57.158        // [disp] (64bit) RIP-RELATIVE (32bit) abs
  57.159        // [00 000 101] disp32
  57.160  
  57.161 -      emit_byte(0x05 | regenc);
  57.162 +      emit_int8(0x05 | regenc);
  57.163        // Note that the RIP-rel. correction applies to the generated
  57.164        // disp field, but _not_ to the target address in the rspec.
  57.165  
  57.166 @@ -391,8 +391,8 @@
  57.167        // 32bit never did this, did everything as the rip-rel/disp code above
  57.168        // [disp] ABSOLUTE
  57.169        // [00 reg 100][00 100 101] disp32
  57.170 -      emit_byte(0x04 | regenc);
  57.171 -      emit_byte(0x25);
  57.172 +      emit_int8(0x04 | regenc);
  57.173 +      emit_int8(0x25);
  57.174        emit_data(disp, rspec, disp32_operand);
  57.175      }
  57.176    }
  57.177 @@ -883,8 +883,8 @@
  57.178  void Assembler::emit_farith(int b1, int b2, int i) {
  57.179    assert(isByte(b1) && isByte(b2), "wrong opcode");
  57.180    assert(0 <= i &&  i < 8, "illegal stack offset");
  57.181 -  emit_byte(b1);
  57.182 -  emit_byte(b2 + i);
  57.183 +  emit_int8(b1);
  57.184 +  emit_int8(b2 + i);
  57.185  }
  57.186  
  57.187  
  57.188 @@ -899,7 +899,7 @@
  57.189  void Assembler::adcl(Address dst, Register src) {
  57.190    InstructionMark im(this);
  57.191    prefix(dst, src);
  57.192 -  emit_byte(0x11);
  57.193 +  emit_int8(0x11);
  57.194    emit_operand(src, dst);
  57.195  }
  57.196  
  57.197 @@ -911,7 +911,7 @@
  57.198  void Assembler::adcl(Register dst, Address src) {
  57.199    InstructionMark im(this);
  57.200    prefix(src, dst);
  57.201 -  emit_byte(0x13);
  57.202 +  emit_int8(0x13);
  57.203    emit_operand(dst, src);
  57.204  }
  57.205  
  57.206 @@ -929,7 +929,7 @@
  57.207  void Assembler::addl(Address dst, Register src) {
  57.208    InstructionMark im(this);
  57.209    prefix(dst, src);
  57.210 -  emit_byte(0x01);
  57.211 +  emit_int8(0x01);
  57.212    emit_operand(src, dst);
  57.213  }
  57.214  
  57.215 @@ -941,7 +941,7 @@
  57.216  void Assembler::addl(Register dst, Address src) {
  57.217    InstructionMark im(this);
  57.218    prefix(src, dst);
  57.219 -  emit_byte(0x03);
  57.220 +  emit_int8(0x03);
  57.221    emit_operand(dst, src);
  57.222  }
  57.223  
  57.224 @@ -953,38 +953,40 @@
  57.225  void Assembler::addr_nop_4() {
  57.226    assert(UseAddressNop, "no CPU support");
  57.227    // 4 bytes: NOP DWORD PTR [EAX+0]
  57.228 -  emit_byte(0x0F);
  57.229 -  emit_byte(0x1F);
  57.230 -  emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
  57.231 -  emit_byte(0);    // 8-bits offset (1 byte)
  57.232 +  emit_int8(0x0F);
  57.233 +  emit_int8(0x1F);
  57.234 +  emit_int8(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
  57.235 +  emit_int8(0);    // 8-bits offset (1 byte)
  57.236  }
  57.237  
  57.238  void Assembler::addr_nop_5() {
  57.239    assert(UseAddressNop, "no CPU support");
  57.240    // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
  57.241 -  emit_byte(0x0F);
  57.242 -  emit_byte(0x1F);
  57.243 -  emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
  57.244 -  emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
  57.245 -  emit_byte(0);    // 8-bits offset (1 byte)
  57.246 +  emit_int8(0x0F);
  57.247 +  emit_int8(0x1F);
  57.248 +  emit_int8(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
  57.249 +  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
  57.250 +  emit_int8(0);    // 8-bits offset (1 byte)
  57.251  }
  57.252  
  57.253  void Assembler::addr_nop_7() {
  57.254    assert(UseAddressNop, "no CPU support");
  57.255    // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
  57.256 -  emit_byte(0x0F);
  57.257 -  emit_byte(0x1F);
  57.258 -  emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
  57.259 +  emit_int8(0x0F);
  57.260 +  emit_int8(0x1F);
  57.261 +  emit_int8((unsigned char)0x80);
  57.262 +                   // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
  57.263    emit_long(0);    // 32-bits offset (4 bytes)
  57.264  }
  57.265  
  57.266  void Assembler::addr_nop_8() {
  57.267    assert(UseAddressNop, "no CPU support");
  57.268    // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
  57.269 -  emit_byte(0x0F);
  57.270 -  emit_byte(0x1F);
  57.271 -  emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
  57.272 -  emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
  57.273 +  emit_int8(0x0F);
  57.274 +  emit_int8(0x1F);
  57.275 +  emit_int8((unsigned char)0x84);
  57.276 +                   // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
  57.277 +  emit_int8(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
  57.278    emit_long(0);    // 32-bits offset (4 bytes)
  57.279  }
  57.280  
  57.281 @@ -1012,67 +1014,67 @@
  57.282    assert(VM_Version::supports_aes(), "");
  57.283    InstructionMark im(this);
  57.284    simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  57.285 -  emit_byte(0xde);
  57.286 +  emit_int8((unsigned char)0xDE);
  57.287    emit_operand(dst, src);
  57.288  }
  57.289  
  57.290  void Assembler::aesdec(XMMRegister dst, XMMRegister src) {
  57.291    assert(VM_Version::supports_aes(), "");
  57.292    int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  57.293 -  emit_byte(0xde);
  57.294 -  emit_byte(0xC0 | encode);
  57.295 +  emit_int8((unsigned char)0xDE);
  57.296 +  emit_int8(0xC0 | encode);
  57.297  }
  57.298  
  57.299  void Assembler::aesdeclast(XMMRegister dst, Address src) {
  57.300    assert(VM_Version::supports_aes(), "");
  57.301    InstructionMark im(this);
  57.302    simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  57.303 -  emit_byte(0xdf);
  57.304 +  emit_int8((unsigned char)0xDF);
  57.305    emit_operand(dst, src);
  57.306  }
  57.307  
  57.308  void Assembler::aesdeclast(XMMRegister dst, XMMRegister src) {
  57.309    assert(VM_Version::supports_aes(), "");
  57.310    int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  57.311 -  emit_byte(0xdf);
  57.312 -  emit_byte(0xC0 | encode);
  57.313 +  emit_int8((unsigned char)0xDF);
  57.314 +  emit_int8((unsigned char)(0xC0 | encode));
  57.315  }
  57.316  
  57.317  void Assembler::aesenc(XMMRegister dst, Address src) {
  57.318    assert(VM_Version::supports_aes(), "");
  57.319    InstructionMark im(this);
  57.320    simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  57.321 -  emit_byte(0xdc);
  57.322 +  emit_int8((unsigned char)0xDC);
  57.323    emit_operand(dst, src);
  57.324  }
  57.325  
  57.326  void Assembler::aesenc(XMMRegister dst, XMMRegister src) {
  57.327    assert(VM_Version::supports_aes(), "");
  57.328    int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  57.329 -  emit_byte(0xdc);
  57.330 -  emit_byte(0xC0 | encode);
  57.331 +  emit_int8((unsigned char)0xDC);
  57.332 +  emit_int8(0xC0 | encode);
  57.333  }
  57.334  
  57.335  void Assembler::aesenclast(XMMRegister dst, Address src) {
  57.336    assert(VM_Version::supports_aes(), "");
  57.337    InstructionMark im(this);
  57.338    simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  57.339 -  emit_byte(0xdd);
  57.340 +  emit_int8((unsigned char)0xDD);
  57.341    emit_operand(dst, src);
  57.342  }
  57.343  
  57.344  void Assembler::aesenclast(XMMRegister dst, XMMRegister src) {
  57.345    assert(VM_Version::supports_aes(), "");
  57.346    int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
  57.347 -  emit_byte(0xdd);
  57.348 -  emit_byte(0xC0 | encode);
  57.349 +  emit_int8((unsigned char)0xDD);
  57.350 +  emit_int8((unsigned char)(0xC0 | encode));
  57.351  }
  57.352  
  57.353  
  57.354  void Assembler::andl(Address dst, int32_t imm32) {
  57.355    InstructionMark im(this);
  57.356    prefix(dst);
  57.357 -  emit_byte(0x81);
  57.358 +  emit_int8((unsigned char)0x81);
  57.359    emit_operand(rsp, dst, 4);
  57.360    emit_long(imm32);
  57.361  }
  57.362 @@ -1085,7 +1087,7 @@
  57.363  void Assembler::andl(Register dst, Address src) {
  57.364    InstructionMark im(this);
  57.365    prefix(src, dst);
  57.366 -  emit_byte(0x23);
  57.367 +  emit_int8(0x23);
  57.368    emit_operand(dst, src);
  57.369  }
  57.370  
  57.371 @@ -1096,23 +1098,23 @@
  57.372  
  57.373  void Assembler::bsfl(Register dst, Register src) {
  57.374    int encode = prefix_and_encode(dst->encoding(), src->encoding());
  57.375 -  emit_byte(0x0F);
  57.376 -  emit_byte(0xBC);
  57.377 -  emit_byte(0xC0 | encode);
  57.378 +  emit_int8(0x0F);
  57.379 +  emit_int8((unsigned char)0xBC);
  57.380 +  emit_int8((unsigned char)(0xC0 | encode));
  57.381  }
  57.382  
  57.383  void Assembler::bsrl(Register dst, Register src) {
  57.384    assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
  57.385    int encode = prefix_and_encode(dst->encoding(), src->encoding());
  57.386 -  emit_byte(0x0F);
  57.387 -  emit_byte(0xBD);
  57.388 -  emit_byte(0xC0 | encode);
  57.389 +  emit_int8(0x0F);
  57.390 +  emit_int8((unsigned char)0xBD);
  57.391 +  emit_int8((unsigned char)(0xC0 | encode));
  57.392  }
  57.393  
  57.394  void Assembler::bswapl(Register reg) { // bswap
  57.395    int encode = prefix_and_encode(reg->encoding());
  57.396 -  emit_byte(0x0F);
  57.397 -  emit_byte(0xC8 | encode);
  57.398 +  emit_int8(0x0F);
  57.399 +  emit_int8((unsigned char)(0xC8 | encode));
  57.400  }
  57.401  
  57.402  void Assembler::call(Label& L, relocInfo::relocType rtype) {
  57.403 @@ -1125,36 +1127,36 @@
  57.404      assert(offs <= 0, "assembler error");
  57.405      InstructionMark im(this);
  57.406      // 1110 1000 #32-bit disp
  57.407 -    emit_byte(0xE8);
  57.408 +    emit_int8((unsigned char)0xE8);
  57.409      emit_data(offs - long_size, rtype, operand);
  57.410    } else {
  57.411      InstructionMark im(this);
  57.412      // 1110 1000 #32-bit disp
  57.413      L.add_patch_at(code(), locator());
  57.414  
  57.415 -    emit_byte(0xE8);
  57.416 +    emit_int8((unsigned char)0xE8);
  57.417      emit_data(int(0), rtype, operand);
  57.418    }
  57.419  }
  57.420  
  57.421  void Assembler::call(Register dst) {
  57.422    int encode = prefix_and_encode(dst->encoding());
  57.423 -  emit_byte(0xFF);
  57.424 -  emit_byte(0xD0 | encode);
  57.425 +  emit_int8((unsigned char)0xFF);
  57.426 +  emit_int8((unsigned char)(0xD0 | encode));
  57.427  }
  57.428  
  57.429  
  57.430  void Assembler::call(Address adr) {
  57.431    InstructionMark im(this);
  57.432    prefix(adr);
  57.433 -  emit_byte(0xFF);
  57.434 +  emit_int8((unsigned char)0xFF);
  57.435    emit_operand(rdx, adr);
  57.436  }
  57.437  
  57.438  void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
  57.439    assert(entry != NULL, "call most probably wrong");
  57.440    InstructionMark im(this);
  57.441 -  emit_byte(0xE8);
  57.442 +  emit_int8((unsigned char)0xE8);
  57.443    intptr_t disp = entry - (pc() + sizeof(int32_t));
  57.444    assert(is_simm32(disp), "must be 32bit offset (call2)");
  57.445    // Technically, should use call32_operand, but this format is
  57.446 @@ -1165,42 +1167,42 @@
  57.447  }
  57.448  
  57.449  void Assembler::cdql() {
  57.450 -  emit_byte(0x99);
  57.451 +  emit_int8((unsigned char)0x99);
  57.452  }
  57.453  
  57.454  void Assembler::cld() {
  57.455 -  emit_byte(0xfc);
  57.456 +  emit_int8((unsigned char)0xFC);
  57.457  }
  57.458  
  57.459  void Assembler::cmovl(Condition cc, Register dst, Register src) {
  57.460    NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
  57.461    int encode = prefix_and_encode(dst->encoding(), src->encoding());
  57.462 -  emit_byte(0x0F);
  57.463 -  emit_byte(0x40 | cc);
  57.464 -  emit_byte(0xC0 | encode);
  57.465 +  emit_int8(0x0F);
  57.466 +  emit_int8(0x40 | cc);
  57.467 +  emit_int8((unsigned char)(0xC0 | encode));
  57.468  }
  57.469  
  57.470  
  57.471  void Assembler::cmovl(Condition cc, Register dst, Address src) {
  57.472    NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
  57.473    prefix(src, dst);
  57.474 -  emit_byte(0x0F);
  57.475 -  emit_byte(0x40 | cc);
  57.476 +  emit_int8(0x0F);
  57.477 +  emit_int8(0x40 | cc);
  57.478    emit_operand(dst, src);
  57.479  }
  57.480  
  57.481  void Assembler::cmpb(Address dst, int imm8) {
  57.482    InstructionMark im(this);
  57.483    prefix(dst);
  57.484 -  emit_byte(0x80);
  57.485 +  emit_int8((unsigned char)0x80);
  57.486    emit_operand(rdi, dst, 1);
  57.487 -  emit_byte(imm8);
  57.488 +  emit_int8(imm8);
  57.489  }
  57.490  
  57.491  void Assembler::cmpl(Address dst, int32_t imm32) {
  57.492    InstructionMark im(this);
  57.493    prefix(dst);
  57.494 -  emit_byte(0x81);
  57.495 +  emit_int8((unsigned char)0x81);
  57.496    emit_operand(rdi, dst, 4);
  57.497    emit_long(imm32);
  57.498  }
  57.499 @@ -1219,17 +1221,17 @@
  57.500  void Assembler::cmpl(Register dst, Address  src) {
  57.501    InstructionMark im(this);
  57.502    prefix(src, dst);
  57.503 -  emit_byte(0x3B);
  57.504 +  emit_int8((unsigned char)0x3B);
  57.505    emit_operand(dst, src);
  57.506  }
  57.507  
  57.508  void Assembler::cmpw(Address dst, int imm16) {
  57.509    InstructionMark im(this);
  57.510    assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
  57.511 -  emit_byte(0x66);
  57.512 -  emit_byte(0x81);
  57.513 +  emit_int8(0x66);
  57.514 +  emit_int8((unsigned char)0x81);
  57.515    emit_operand(rdi, dst, 2);
  57.516 -  emit_word(imm16);
  57.517 +  emit_int16(imm16);
  57.518  }
  57.519  
  57.520  // The 32-bit cmpxchg compares the value at adr with the contents of rax,
  57.521 @@ -1238,8 +1240,8 @@
  57.522  void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
  57.523    InstructionMark im(this);
  57.524    prefix(adr, reg);
  57.525 -  emit_byte(0x0F);
  57.526 -  emit_byte(0xB1);
  57.527 +  emit_int8(0x0F);
  57.528 +  emit_int8((unsigned char)0xB1);
  57.529    emit_operand(reg, adr);
  57.530  }
  57.531  
  57.532 @@ -1266,8 +1268,8 @@
  57.533  }
  57.534  
  57.535  void Assembler::cpuid() {
  57.536 -  emit_byte(0x0F);
  57.537 -  emit_byte(0xA2);
  57.538 +  emit_int8(0x0F);
  57.539 +  emit_int8((unsigned char)0xA2);
  57.540  }
  57.541  
  57.542  void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
  57.543 @@ -1293,8 +1295,8 @@
  57.544  void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
  57.545    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  57.546    int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2);
  57.547 -  emit_byte(0x2A);
  57.548 -  emit_byte(0xC0 | encode);
  57.549 +  emit_int8(0x2A);
  57.550 +  emit_int8((unsigned char)(0xC0 | encode));
  57.551  }
  57.552  
  57.553  void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
  57.554 @@ -1305,8 +1307,8 @@
  57.555  void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
  57.556    NOT_LP64(assert(VM_Version::supports_sse(), ""));
  57.557    int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3);
  57.558 -  emit_byte(0x2A);
  57.559 -  emit_byte(0xC0 | encode);
  57.560 +  emit_int8(0x2A);
  57.561 +  emit_int8((unsigned char)(0xC0 | encode));
  57.562  }
  57.563  
  57.564  void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
  57.565 @@ -1328,22 +1330,22 @@
  57.566  void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
  57.567    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  57.568    int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2);
  57.569 -  emit_byte(0x2C);
  57.570 -  emit_byte(0xC0 | encode);
  57.571 +  emit_int8(0x2C);
  57.572 +  emit_int8((unsigned char)(0xC0 | encode));
  57.573  }
  57.574  
  57.575  void Assembler::cvttss2sil(Register dst, XMMRegister src) {
  57.576    NOT_LP64(assert(VM_Version::supports_sse(), ""));
  57.577    int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3);
  57.578 -  emit_byte(0x2C);
  57.579 -  emit_byte(0xC0 | encode);
  57.580 +  emit_int8(0x2C);
  57.581 +  emit_int8((unsigned char)(0xC0 | encode));
  57.582  }
  57.583  
  57.584  void Assembler::decl(Address dst) {
  57.585    // Don't use it directly. Use MacroAssembler::decrement() instead.
  57.586    InstructionMark im(this);
  57.587    prefix(dst);
  57.588 -  emit_byte(0xFF);
  57.589 +  emit_int8((unsigned char)0xFF);
  57.590    emit_operand(rcx, dst);
  57.591  }
  57.592  
  57.593 @@ -1369,43 +1371,43 @@
  57.594  
  57.595  void Assembler::emms() {
  57.596    NOT_LP64(assert(VM_Version::supports_mmx(), ""));
  57.597 -  emit_byte(0x0F);
  57.598 -  emit_byte(0x77);
  57.599 +  emit_int8(0x0F);
  57.600 +  emit_int8(0x77);
  57.601  }
  57.602  
  57.603  void Assembler::hlt() {
  57.604 -  emit_byte(0xF4);
  57.605 +  emit_int8((unsigned char)0xF4);
  57.606  }
  57.607  
  57.608  void Assembler::idivl(Register src) {
  57.609    int encode = prefix_and_encode(src->encoding());
  57.610 -  emit_byte(0xF7);
  57.611 -  emit_byte(0xF8 | encode);
  57.612 +  emit_int8((unsigned char)0xF7);
  57.613 +  emit_int8((unsigned char)(0xF8 | encode));
  57.614  }
  57.615  
  57.616  void Assembler::divl(Register src) { // Unsigned
  57.617    int encode = prefix_and_encode(src->encoding());
  57.618 -  emit_byte(0xF7);
  57.619 -  emit_byte(0xF0 | encode);
  57.620 +  emit_int8((unsigned char)0xF7);
  57.621 +  emit_int8((unsigned char)(0xF0 | encode));
  57.622  }
  57.623  
  57.624  void Assembler::imull(Register dst, Register src) {
  57.625    int encode = prefix_and_encode(dst->encoding(), src->encoding());
  57.626 -  emit_byte(0x0F);
  57.627 -  emit_byte(0xAF);
  57.628 -  emit_byte(0xC0 | encode);
  57.629 +  emit_int8(0x0F);
  57.630 +  emit_int8((unsigned char)0xAF);
  57.631 +  emit_int8((unsigned char)(0xC0 | encode));
  57.632  }
  57.633  
  57.634  
  57.635  void Assembler::imull(Register dst, Register src, int value) {
  57.636    int encode = prefix_and_encode(dst->encoding(), src->encoding());
  57.637    if (is8bit(value)) {
  57.638 -    emit_byte(0x6B);
  57.639 -    emit_byte(0xC0 | encode);
  57.640 -    emit_byte(value & 0xFF);
  57.641 +    emit_int8(0x6B);
  57.642 +    emit_int8((unsigned char)(0xC0 | encode));
  57.643 +    emit_int8(value & 0xFF);
  57.644    } else {
  57.645 -    emit_byte(0x69);
  57.646 -    emit_byte(0xC0 | encode);
  57.647 +    emit_int8(0x69);
  57.648 +    emit_int8((unsigned char)(0xC0 | encode));
  57.649      emit_long(value);
  57.650    }
  57.651  }
  57.652 @@ -1414,7 +1416,7 @@
  57.653    // Don't use it directly. Use MacroAssembler::increment() instead.
  57.654    InstructionMark im(this);
  57.655    prefix(dst);
  57.656 -  emit_byte(0xFF);
  57.657 +  emit_int8((unsigned char)0xFF);
  57.658    emit_operand(rax, dst);
  57.659  }
  57.660  
  57.661 @@ -1430,14 +1432,14 @@
  57.662      intptr_t offs = (intptr_t)dst - (intptr_t)pc();
  57.663      if (maybe_short && is8bit(offs - short_size)) {
  57.664        // 0111 tttn #8-bit disp
  57.665 -      emit_byte(0x70 | cc);
  57.666 -      emit_byte((offs - short_size) & 0xFF);
  57.667 +      emit_int8(0x70 | cc);
  57.668 +      emit_int8((offs - short_size) & 0xFF);
  57.669      } else {
  57.670        // 0000 1111 1000 tttn #32-bit disp
  57.671        assert(is_simm32(offs - long_size),
  57.672               "must be 32bit offset (call4)");
  57.673 -      emit_byte(0x0F);
  57.674 -      emit_byte(0x80 | cc);
  57.675 +      emit_int8(0x0F);
  57.676 +      emit_int8((unsigned char)(0x80 | cc));
  57.677        emit_long(offs - long_size);
  57.678      }
  57.679    } else {
  57.680 @@ -1446,8 +1448,8 @@
  57.681      // Note: use jccb() if label to be bound is very close to get
  57.682      //       an 8-bit displacement
  57.683      L.add_patch_at(code(), locator());
  57.684 -    emit_byte(0x0F);
  57.685 -    emit_byte(0x80 | cc);
  57.686 +    emit_int8(0x0F);
  57.687 +    emit_int8((unsigned char)(0x80 | cc));
  57.688      emit_long(0);
  57.689    }
  57.690  }
  57.691 @@ -1466,20 +1468,20 @@
  57.692  #endif
  57.693      intptr_t offs = (intptr_t)entry - (intptr_t)pc();
  57.694      // 0111 tttn #8-bit disp
  57.695 -    emit_byte(0x70 | cc);
  57.696 -    emit_byte((offs - short_size) & 0xFF);
  57.697 +    emit_int8(0x70 | cc);
  57.698 +    emit_int8((offs - short_size) & 0xFF);
  57.699    } else {
  57.700      InstructionMark im(this);
  57.701      L.add_patch_at(code(), locator());
  57.702 -    emit_byte(0x70 | cc);
  57.703 -    emit_byte(0);
  57.704 +    emit_int8(0x70 | cc);
  57.705 +    emit_int8(0);
  57.706    }
  57.707  }
  57.708  
  57.709  void Assembler::jmp(Address adr) {
  57.710    InstructionMark im(this);
  57.711    prefix(adr);
  57.712 -  emit_byte(0xFF);
  57.713 +  emit_int8((unsigned char)0xFF);
  57.714    emit_operand(rsp, adr);
  57.715  }
  57.716  
  57.717 @@ -1492,10 +1494,10 @@
  57.718      const int long_size = 5;
  57.719      intptr_t offs = entry - pc();
  57.720      if (maybe_short && is8bit(offs - short_size)) {
  57.721 -      emit_byte(0xEB);
  57.722 -      emit_byte((offs - short_size) & 0xFF);
  57.723 +      emit_int8((unsigned char)0xEB);
  57.724 +      emit_int8((offs - short_size) & 0xFF);
  57.725      } else {
  57.726 -      emit_byte(0xE9);
  57.727 +      emit_int8((unsigned char)0xE9);
  57.728        emit_long(offs - long_size);
  57.729      }
  57.730    } else {
  57.731 @@ -1505,20 +1507,20 @@
  57.732      // force an 8-bit displacement.
  57.733      InstructionMark im(this);
  57.734      L.add_patch_at(code(), locator());
  57.735 -    emit_byte(0xE9);
  57.736 +    emit_int8((unsigned char)0xE9);
  57.737      emit_long(0);
  57.738    }
  57.739  }
  57.740  
  57.741  void Assembler::jmp(Register entry) {
  57.742    int encode = prefix_and_encode(entry->encoding());
  57.743 -  emit_byte(0xFF);
  57.744 -  emit_byte(0xE0 | encode);
  57.745 +  emit_int8((unsigned char)0xFF);
  57.746 +  emit_int8((unsigned char)(0xE0 | encode));
  57.747  }
  57.748  
  57.749  void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
  57.750    InstructionMark im(this);
  57.751 -  emit_byte(0xE9);
  57.752 +  emit_int8((unsigned char)0xE9);
  57.753    assert(dest != NULL, "must have a target");
  57.754    intptr_t disp = dest - (pc() + sizeof(int32_t));
  57.755    assert(is_simm32(disp), "must be 32bit offset (jmp)");
  57.756 @@ -1539,13 +1541,13 @@
  57.757      assert(is8bit(dist), "Dispacement too large for a short jmp");
  57.758  #endif
  57.759      intptr_t offs = entry - pc();
  57.760 -    emit_byte(0xEB);
  57.761 -    emit_byte((offs - short_size) & 0xFF);
  57.762 +    emit_int8((unsigned char)0xEB);
  57.763 +    emit_int8((offs - short_size) & 0xFF);
  57.764    } else {
  57.765      InstructionMark im(this);
  57.766      L.add_patch_at(code(), locator());
  57.767 -    emit_byte(0xEB);
  57.768 -    emit_byte(0);
  57.769 +    emit_int8((unsigned char)0xEB);
  57.770 +    emit_int8(0);
  57.771    }
  57.772  }
  57.773  
  57.774 @@ -1553,46 +1555,46 @@
  57.775    NOT_LP64(assert(VM_Version::supports_sse(), ""));
  57.776    InstructionMark im(this);
  57.777    prefix(src);
  57.778 -  emit_byte(0x0F);
  57.779 -  emit_byte(0xAE);
  57.780 +  emit_int8(0x0F);
  57.781 +  emit_int8((unsigned char)0xAE);
  57.782    emit_operand(as_Register(2), src);
  57.783  }
  57.784  
  57.785  void Assembler::leal(Register dst, Address src) {
  57.786    InstructionMark im(this);
  57.787  #ifdef _LP64
  57.788 -  emit_byte(0x67); // addr32
  57.789 +  emit_int8(0x67); // addr32
  57.790    prefix(src, dst);
  57.791  #endif // LP64
  57.792 -  emit_byte(0x8D);
  57.793 +  emit_int8((unsigned char)0x8D);
  57.794    emit_operand(dst, src);
  57.795  }
  57.796  
  57.797  void Assembler::lfence() {
  57.798 -  emit_byte(0x0F);
  57.799 -  emit_byte(0xAE);
  57.800 -  emit_byte(0xE8);
  57.801 +  emit_int8(0x0F);
  57.802 +  emit_int8((unsigned char)0xAE);
  57.803 +  emit_int8((unsigned char)0xE8);
  57.804  }
  57.805  
  57.806  void Assembler::lock() {
  57.807 -  emit_byte(0xF0);
  57.808 +  emit_int8((unsigned char)0xF0);
  57.809  }
  57.810  
  57.811  void Assembler::lzcntl(Register dst, Register src) {
  57.812    assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
  57.813 -  emit_byte(0xF3);
  57.814 +  emit_int8((unsigned char)0xF3);
  57.815    int encode = prefix_and_encode(dst->encoding(), src->encoding());
  57.816 -  emit_byte(0x0F);
  57.817 -  emit_byte(0xBD);
  57.818 -  emit_byte(0xC0 | encode);
  57.819 +  emit_int8(0x0F);
  57.820 +  emit_int8((unsigned char)0xBD);
  57.821 +  emit_int8((unsigned char)(0xC0 | encode));
  57.822  }
  57.823  
  57.824  // Emit mfence instruction
  57.825  void Assembler::mfence() {
  57.826    NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
  57.827 -  emit_byte( 0x0F );
  57.828 -  emit_byte( 0xAE );
  57.829 -  emit_byte( 0xF0 );
  57.830 +  emit_int8(0x0F);
  57.831 +  emit_int8((unsigned char)0xAE);
  57.832 +  emit_int8((unsigned char)0xF0);
  57.833  }
  57.834  
  57.835  void Assembler::mov(Register dst, Register src) {
  57.836 @@ -1612,15 +1614,15 @@
  57.837  void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
  57.838    NOT_LP64(assert(VM_Version::supports_sse(), ""));
  57.839    int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
  57.840 -  emit_byte(0x16);
  57.841 -  emit_byte(0xC0 | encode);
  57.842 +  emit_int8(0x16);
  57.843 +  emit_int8((unsigned char)(0xC0 | encode));
  57.844  }
  57.845  
  57.846  void Assembler::movb(Register dst, Address src) {
  57.847    NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
  57.848    InstructionMark im(this);
  57.849    prefix(src, dst, true);
  57.850 -  emit_byte(0x8A);
  57.851 +  emit_int8((unsigned char)0x8A);
  57.852    emit_operand(dst, src);
  57.853  }
  57.854  
  57.855 @@ -1628,9 +1630,9 @@
  57.856  void Assembler::movb(Address dst, int imm8) {
  57.857    InstructionMark im(this);
  57.858     prefix(dst);
  57.859 -  emit_byte(0xC6);
  57.860 +  emit_int8((unsigned char)0xC6);
  57.861    emit_operand(rax, dst, 1);
  57.862 -  emit_byte(imm8);
  57.863 +  emit_int8(imm8);
  57.864  }
  57.865  
  57.866  
  57.867 @@ -1638,30 +1640,30 @@
  57.868    assert(src->has_byte_register(), "must have byte register");
  57.869    InstructionMark im(this);
  57.870    prefix(dst, src, true);
  57.871 -  emit_byte(0x88);
  57.872 +  emit_int8((unsigned char)0x88);
  57.873    emit_operand(src, dst);
  57.874  }
  57.875  
  57.876  void Assembler::movdl(XMMRegister dst, Register src) {
  57.877    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  57.878    int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
  57.879 -  emit_byte(0x6E);
  57.880 -  emit_byte(0xC0 | encode);
  57.881 +  emit_int8(0x6E);
  57.882 +  emit_int8((unsigned char)(0xC0 | encode));
  57.883  }
  57.884  
  57.885  void Assembler::movdl(Register dst, XMMRegister src) {
  57.886    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  57.887    // swap src/dst to get correct prefix
  57.888    int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66);
  57.889 -  emit_byte(0x7E);
  57.890 -  emit_byte(0xC0 | encode);
  57.891 +  emit_int8(0x7E);
  57.892 +  emit_int8((unsigned char)(0xC0 | encode));
  57.893  }
  57.894  
  57.895  void Assembler::movdl(XMMRegister dst, Address src) {
  57.896    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  57.897    InstructionMark im(this);
  57.898    simd_prefix(dst, src, VEX_SIMD_66);
  57.899 -  emit_byte(0x6E);
  57.900 +  emit_int8(0x6E);
  57.901    emit_operand(dst, src);
  57.902  }
  57.903  
  57.904 @@ -1669,7 +1671,7 @@
  57.905    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  57.906    InstructionMark im(this);
  57.907    simd_prefix(dst, src, VEX_SIMD_66);
  57.908 -  emit_byte(0x7E);
  57.909 +  emit_int8(0x7E);
  57.910    emit_operand(src, dst);
  57.911  }
  57.912  
  57.913 @@ -1692,7 +1694,7 @@
  57.914    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
  57.915    InstructionMark im(this);
  57.916    simd_prefix(dst, src, VEX_SIMD_F3);
  57.917 -  emit_byte(0x7F);
  57.918 +  emit_int8(0x7F);
  57.919    emit_operand(src, dst);
  57.920  }
  57.921  
  57.922 @@ -1701,8 +1703,8 @@
  57.923    assert(UseAVX, "");
  57.924    bool vector256 = true;
  57.925    int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
  57.926 -  emit_byte(0x6F);
  57.927 -  emit_byte(0xC0 | encode);
  57.928 +  emit_int8(0x6F);
  57.929 +  emit_int8((unsigned char)(0xC0 | encode));
  57.930  }
  57.931  
  57.932  void Assembler::vmovdqu(XMMRegister dst, Address src) {
  57.933 @@ -1710,7 +1712,7 @@
  57.934    InstructionMark im(this);
  57.935    bool vector256 = true;
  57.936    vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
  57.937 -  emit_byte(0x6F);
  57.938 +  emit_int8(0x6F);
  57.939    emit_operand(dst, src);
  57.940  }
  57.941  
  57.942 @@ -1721,7 +1723,7 @@
  57.943    // swap src<->dst for encoding
  57.944    assert(src != xnoreg, "sanity");
  57.945    vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
  57.946 -  emit_byte(0x7F);
  57.947 +  emit_int8(0x7F);
  57.948    emit_operand(src, dst);
  57.949  }
  57.950  
  57.951 @@ -1729,27 +1731,27 @@
  57.952  
  57.953  void Assembler::movl(Register dst, int32_t imm32) {
  57.954    int encode = prefix_and_encode(dst->encoding());
  57.955 -  emit_byte(0xB8 | encode);
  57.956 +  emit_int8((unsigned char)(0xB8 | encode));
  57.957    emit_long(imm32);
  57.958  }
  57.959  
  57.960  void Assembler::movl(Register dst, Register src) {
  57.961    int encode = prefix_and_encode(dst->encoding(), src->encoding());
  57.962 -  emit_byte(0x8B);
  57.963 -  emit_byte(0xC0 | encode);
  57.964 +  emit_int8((unsigned char)0x8B);
  57.965 +  emit_int8((unsigned char)(0xC0 | encode));
  57.966  }
  57.967  
  57.968  void Assembler::movl(Register dst, Address src) {
  57.969    InstructionMark im(this);
  57.970    prefix(src, dst);
  57.971 -  emit_byte(0x8B);
  57.972 +  emit_int8((unsigned char)0x8B);
  57.973    emit_operand(dst, src);
  57.974  }
  57.975  
  57.976  void Assembler::movl(Address dst, int32_t imm32) {
  57.977    InstructionMark im(this);
  57.978    prefix(dst);
  57.979 -  emit_byte(0xC7);
  57.980 +  emit_int8((unsigned char)0xC7);
  57.981    emit_operand(rax, dst, 4);
  57.982    emit_long(imm32);
  57.983  }
  57.984 @@ -1757,7 +1759,7 @@
  57.985  void Assembler::movl(Address dst, Register src) {
  57.986    InstructionMark im(this);
  57.987    prefix(dst, src);
  57.988 -  emit_byte(0x89);
  57.989 +  emit_int8((unsigned char)0x89);
  57.990    emit_operand(src, dst);
  57.991  }
  57.992  
  57.993 @@ -1771,15 +1773,15 @@
  57.994  
  57.995  void Assembler::movq( MMXRegister dst, Address src ) {
  57.996    assert( VM_Version::supports_mmx(), "" );
  57.997 -  emit_byte(0x0F);
  57.998 -  emit_byte(0x6F);
  57.999 +  emit_int8(0x0F);
 57.1000 +  emit_int8(0x6F);
 57.1001    emit_operand(dst, src);
 57.1002  }
 57.1003  
 57.1004  void Assembler::movq( Address dst, MMXRegister src ) {
 57.1005    assert( VM_Version::supports_mmx(), "" );
 57.1006 -  emit_byte(0x0F);
 57.1007 -  emit_byte(0x7F);
 57.1008 +  emit_int8(0x0F);
 57.1009 +  emit_int8(0x7F);
 57.1010    // workaround gcc (3.2.1-7a) bug
 57.1011    // In that version of gcc with only an emit_operand(MMX, Address)
 57.1012    // gcc will tail jump and try and reverse the parameters completely
 57.1013 @@ -1793,7 +1795,7 @@
 57.1014    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.1015    InstructionMark im(this);
 57.1016    simd_prefix(dst, src, VEX_SIMD_F3);
 57.1017 -  emit_byte(0x7E);
 57.1018 +  emit_int8(0x7E);
 57.1019    emit_operand(dst, src);
 57.1020  }
 57.1021  
 57.1022 @@ -1801,24 +1803,24 @@
 57.1023    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.1024    InstructionMark im(this);
 57.1025    simd_prefix(dst, src, VEX_SIMD_66);
 57.1026 -  emit_byte(0xD6);
 57.1027 +  emit_int8((unsigned char)0xD6);
 57.1028    emit_operand(src, dst);
 57.1029  }
 57.1030  
 57.1031  void Assembler::movsbl(Register dst, Address src) { // movsxb
 57.1032    InstructionMark im(this);
 57.1033    prefix(src, dst);
 57.1034 -  emit_byte(0x0F);
 57.1035 -  emit_byte(0xBE);
 57.1036 +  emit_int8(0x0F);
 57.1037 +  emit_int8((unsigned char)0xBE);
 57.1038    emit_operand(dst, src);
 57.1039  }
 57.1040  
 57.1041  void Assembler::movsbl(Register dst, Register src) { // movsxb
 57.1042    NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
 57.1043    int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
 57.1044 -  emit_byte(0x0F);
 57.1045 -  emit_byte(0xBE);
 57.1046 -  emit_byte(0xC0 | encode);
 57.1047 +  emit_int8(0x0F);
 57.1048 +  emit_int8((unsigned char)0xBE);
 57.1049 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1050  }
 57.1051  
 57.1052  void Assembler::movsd(XMMRegister dst, XMMRegister src) {
 57.1053 @@ -1835,7 +1837,7 @@
 57.1054    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.1055    InstructionMark im(this);
 57.1056    simd_prefix(dst, src, VEX_SIMD_F2);
 57.1057 -  emit_byte(0x11);
 57.1058 +  emit_int8(0x11);
 57.1059    emit_operand(src, dst);
 57.1060  }
 57.1061  
 57.1062 @@ -1853,93 +1855,93 @@
 57.1063    NOT_LP64(assert(VM_Version::supports_sse(), ""));
 57.1064    InstructionMark im(this);
 57.1065    simd_prefix(dst, src, VEX_SIMD_F3);
 57.1066 -  emit_byte(0x11);
 57.1067 +  emit_int8(0x11);
 57.1068    emit_operand(src, dst);
 57.1069  }
 57.1070  
 57.1071  void Assembler::movswl(Register dst, Address src) { // movsxw
 57.1072    InstructionMark im(this);
 57.1073    prefix(src, dst);
 57.1074 -  emit_byte(0x0F);
 57.1075 -  emit_byte(0xBF);
 57.1076 +  emit_int8(0x0F);
 57.1077 +  emit_int8((unsigned char)0xBF);
 57.1078    emit_operand(dst, src);
 57.1079  }
 57.1080  
 57.1081  void Assembler::movswl(Register dst, Register src) { // movsxw
 57.1082    int encode = prefix_and_encode(dst->encoding(), src->encoding());
 57.1083 -  emit_byte(0x0F);
 57.1084 -  emit_byte(0xBF);
 57.1085 -  emit_byte(0xC0 | encode);
 57.1086 +  emit_int8(0x0F);
 57.1087 +  emit_int8((unsigned char)0xBF);
 57.1088 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1089  }
 57.1090  
 57.1091  void Assembler::movw(Address dst, int imm16) {
 57.1092    InstructionMark im(this);
 57.1093  
 57.1094 -  emit_byte(0x66); // switch to 16-bit mode
 57.1095 +  emit_int8(0x66); // switch to 16-bit mode
 57.1096    prefix(dst);
 57.1097 -  emit_byte(0xC7);
 57.1098 +  emit_int8((unsigned char)0xC7);
 57.1099    emit_operand(rax, dst, 2);
 57.1100 -  emit_word(imm16);
 57.1101 +  emit_int16(imm16);
 57.1102  }
 57.1103  
 57.1104  void Assembler::movw(Register dst, Address src) {
 57.1105    InstructionMark im(this);
 57.1106 -  emit_byte(0x66);
 57.1107 +  emit_int8(0x66);
 57.1108    prefix(src, dst);
 57.1109 -  emit_byte(0x8B);
 57.1110 +  emit_int8((unsigned char)0x8B);
 57.1111    emit_operand(dst, src);
 57.1112  }
 57.1113  
 57.1114  void Assembler::movw(Address dst, Register src) {
 57.1115    InstructionMark im(this);
 57.1116 -  emit_byte(0x66);
 57.1117 +  emit_int8(0x66);
 57.1118    prefix(dst, src);
 57.1119 -  emit_byte(0x89);
 57.1120 +  emit_int8((unsigned char)0x89);
 57.1121    emit_operand(src, dst);
 57.1122  }
 57.1123  
 57.1124  void Assembler::movzbl(Register dst, Address src) { // movzxb
 57.1125    InstructionMark im(this);
 57.1126    prefix(src, dst);
 57.1127 -  emit_byte(0x0F);
 57.1128 -  emit_byte(0xB6);
 57.1129 +  emit_int8(0x0F);
 57.1130 +  emit_int8((unsigned char)0xB6);
 57.1131    emit_operand(dst, src);
 57.1132  }
 57.1133  
 57.1134  void Assembler::movzbl(Register dst, Register src) { // movzxb
 57.1135    NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
 57.1136    int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
 57.1137 -  emit_byte(0x0F);
 57.1138 -  emit_byte(0xB6);
 57.1139 -  emit_byte(0xC0 | encode);
 57.1140 +  emit_int8(0x0F);
 57.1141 +  emit_int8((unsigned char)0xB6);
 57.1142 +  emit_int8(0xC0 | encode);
 57.1143  }
 57.1144  
 57.1145  void Assembler::movzwl(Register dst, Address src) { // movzxw
 57.1146    InstructionMark im(this);
 57.1147    prefix(src, dst);
 57.1148 -  emit_byte(0x0F);
 57.1149 -  emit_byte(0xB7);
 57.1150 +  emit_int8(0x0F);
 57.1151 +  emit_int8((unsigned char)0xB7);
 57.1152    emit_operand(dst, src);
 57.1153  }
 57.1154  
 57.1155  void Assembler::movzwl(Register dst, Register src) { // movzxw
 57.1156    int encode = prefix_and_encode(dst->encoding(), src->encoding());
 57.1157 -  emit_byte(0x0F);
 57.1158 -  emit_byte(0xB7);
 57.1159 -  emit_byte(0xC0 | encode);
 57.1160 +  emit_int8(0x0F);
 57.1161 +  emit_int8((unsigned char)0xB7);
 57.1162 +  emit_int8(0xC0 | encode);
 57.1163  }
 57.1164  
 57.1165  void Assembler::mull(Address src) {
 57.1166    InstructionMark im(this);
 57.1167    prefix(src);
 57.1168 -  emit_byte(0xF7);
 57.1169 +  emit_int8((unsigned char)0xF7);
 57.1170    emit_operand(rsp, src);
 57.1171  }
 57.1172  
 57.1173  void Assembler::mull(Register src) {
 57.1174    int encode = prefix_and_encode(src->encoding());
 57.1175 -  emit_byte(0xF7);
 57.1176 -  emit_byte(0xE0 | encode);
 57.1177 +  emit_int8((unsigned char)0xF7);
 57.1178 +  emit_int8((unsigned char)(0xE0 | encode));
 57.1179  }
 57.1180  
 57.1181  void Assembler::mulsd(XMMRegister dst, Address src) {
 57.1182 @@ -1964,8 +1966,8 @@
 57.1183  
 57.1184  void Assembler::negl(Register dst) {
 57.1185    int encode = prefix_and_encode(dst->encoding());
 57.1186 -  emit_byte(0xF7);
 57.1187 -  emit_byte(0xD8 | encode);
 57.1188 +  emit_int8((unsigned char)0xF7);
 57.1189 +  emit_int8((unsigned char)(0xD8 | encode));
 57.1190  }
 57.1191  
 57.1192  void Assembler::nop(int i) {
 57.1193 @@ -1976,7 +1978,7 @@
 57.1194    // speed is not an issue so simply use the single byte traditional nop
 57.1195    // to do alignment.
 57.1196  
 57.1197 -  for (; i > 0 ; i--) emit_byte(0x90);
 57.1198 +  for (; i > 0 ; i--) emit_int8((unsigned char)0x90);
 57.1199    return;
 57.1200  
 57.1201  #endif // ASSERT
 57.1202 @@ -2006,33 +2008,35 @@
 57.1203      while(i >= 15) {
 57.1204        // For Intel don't generate consecutive addess nops (mix with regular nops)
 57.1205        i -= 15;
 57.1206 -      emit_byte(0x66);   // size prefix
 57.1207 -      emit_byte(0x66);   // size prefix
 57.1208 -      emit_byte(0x66);   // size prefix
 57.1209 +      emit_int8(0x66);   // size prefix
 57.1210 +      emit_int8(0x66);   // size prefix
 57.1211 +      emit_int8(0x66);   // size prefix
 57.1212        addr_nop_8();
 57.1213 -      emit_byte(0x66);   // size prefix
 57.1214 -      emit_byte(0x66);   // size prefix
 57.1215 -      emit_byte(0x66);   // size prefix
 57.1216 -      emit_byte(0x90);   // nop
 57.1217 +      emit_int8(0x66);   // size prefix
 57.1218 +      emit_int8(0x66);   // size prefix
 57.1219 +      emit_int8(0x66);   // size prefix
 57.1220 +      emit_int8((unsigned char)0x90);
 57.1221 +                         // nop
 57.1222      }
 57.1223      switch (i) {
 57.1224        case 14:
 57.1225 -        emit_byte(0x66); // size prefix
 57.1226 +        emit_int8(0x66); // size prefix
 57.1227        case 13:
 57.1228 -        emit_byte(0x66); // size prefix
 57.1229 +        emit_int8(0x66); // size prefix
 57.1230        case 12:
 57.1231          addr_nop_8();
 57.1232 -        emit_byte(0x66); // size prefix
 57.1233 -        emit_byte(0x66); // size prefix
 57.1234 -        emit_byte(0x66); // size prefix
 57.1235 -        emit_byte(0x90); // nop
 57.1236 +        emit_int8(0x66); // size prefix
 57.1237 +        emit_int8(0x66); // size prefix
 57.1238 +        emit_int8(0x66); // size prefix
 57.1239 +        emit_int8((unsigned char)0x90);
 57.1240 +                         // nop
 57.1241          break;
 57.1242        case 11:
 57.1243 -        emit_byte(0x66); // size prefix
 57.1244 +        emit_int8(0x66); // size prefix
 57.1245        case 10:
 57.1246 -        emit_byte(0x66); // size prefix
 57.1247 +        emit_int8(0x66); // size prefix
 57.1248        case 9:
 57.1249 -        emit_byte(0x66); // size prefix
 57.1250 +        emit_int8(0x66); // size prefix
 57.1251        case 8:
 57.1252          addr_nop_8();
 57.1253          break;
 57.1254 @@ -2040,7 +2044,7 @@
 57.1255          addr_nop_7();
 57.1256          break;
 57.1257        case 6:
 57.1258 -        emit_byte(0x66); // size prefix
 57.1259 +        emit_int8(0x66); // size prefix
 57.1260        case 5:
 57.1261          addr_nop_5();
 57.1262          break;
 57.1263 @@ -2049,11 +2053,12 @@
 57.1264          break;
 57.1265        case 3:
 57.1266          // Don't use "0x0F 0x1F 0x00" - need patching safe padding
 57.1267 -        emit_byte(0x66); // size prefix
 57.1268 +        emit_int8(0x66); // size prefix
 57.1269        case 2:
 57.1270 -        emit_byte(0x66); // size prefix
 57.1271 +        emit_int8(0x66); // size prefix
 57.1272        case 1:
 57.1273 -        emit_byte(0x90); // nop
 57.1274 +        emit_int8((unsigned char)0x90);
 57.1275 +                         // nop
 57.1276          break;
 57.1277        default:
 57.1278          assert(i == 0, " ");
 57.1279 @@ -2086,24 +2091,24 @@
 57.1280  
 57.1281      while(i >= 22) {
 57.1282        i -= 11;
 57.1283 -      emit_byte(0x66); // size prefix
 57.1284 -      emit_byte(0x66); // size prefix
 57.1285 -      emit_byte(0x66); // size prefix
 57.1286 +      emit_int8(0x66); // size prefix
 57.1287 +      emit_int8(0x66); // size prefix
 57.1288 +      emit_int8(0x66); // size prefix
 57.1289        addr_nop_8();
 57.1290      }
 57.1291      // Generate first nop for size between 21-12
 57.1292      switch (i) {
 57.1293        case 21:
 57.1294          i -= 1;
 57.1295 -        emit_byte(0x66); // size prefix
 57.1296 +        emit_int8(0x66); // size prefix
 57.1297        case 20:
 57.1298        case 19:
 57.1299          i -= 1;
 57.1300 -        emit_byte(0x66); // size prefix
 57.1301 +        emit_int8(0x66); // size prefix
 57.1302        case 18:
 57.1303        case 17:
 57.1304          i -= 1;
 57.1305 -        emit_byte(0x66); // size prefix
 57.1306 +        emit_int8(0x66); // size prefix
 57.1307        case 16:
 57.1308        case 15:
 57.1309          i -= 8;
 57.1310 @@ -2116,7 +2121,7 @@
 57.1311          break;
 57.1312        case 12:
 57.1313          i -= 6;
 57.1314 -        emit_byte(0x66); // size prefix
 57.1315 +        emit_int8(0x66); // size prefix
 57.1316          addr_nop_5();
 57.1317          break;
 57.1318        default:
 57.1319 @@ -2126,11 +2131,11 @@
 57.1320      // Generate second nop for size between 11-1
 57.1321      switch (i) {
 57.1322        case 11:
 57.1323 -        emit_byte(0x66); // size prefix
 57.1324 +        emit_int8(0x66); // size prefix
 57.1325        case 10:
 57.1326 -        emit_byte(0x66); // size prefix
 57.1327 +        emit_int8(0x66); // size prefix
 57.1328        case 9:
 57.1329 -        emit_byte(0x66); // size prefix
 57.1330 +        emit_int8(0x66); // size prefix
 57.1331        case 8:
 57.1332          addr_nop_8();
 57.1333          break;
 57.1334 @@ -2138,7 +2143,7 @@
 57.1335          addr_nop_7();
 57.1336          break;
 57.1337        case 6:
 57.1338 -        emit_byte(0x66); // size prefix
 57.1339 +        emit_int8(0x66); // size prefix
 57.1340        case 5:
 57.1341          addr_nop_5();
 57.1342          break;
 57.1343 @@ -2147,11 +2152,12 @@
 57.1344          break;
 57.1345        case 3:
 57.1346          // Don't use "0x0F 0x1F 0x00" - need patching safe padding
 57.1347 -        emit_byte(0x66); // size prefix
 57.1348 +        emit_int8(0x66); // size prefix
 57.1349        case 2:
 57.1350 -        emit_byte(0x66); // size prefix
 57.1351 +        emit_int8(0x66); // size prefix
 57.1352        case 1:
 57.1353 -        emit_byte(0x90); // nop
 57.1354 +        emit_int8((unsigned char)0x90);
 57.1355 +                         // nop
 57.1356          break;
 57.1357        default:
 57.1358          assert(i == 0, " ");
 57.1359 @@ -2174,42 +2180,43 @@
 57.1360    //
 57.1361    while(i > 12) {
 57.1362      i -= 4;
 57.1363 -    emit_byte(0x66); // size prefix
 57.1364 -    emit_byte(0x66);
 57.1365 -    emit_byte(0x66);
 57.1366 -    emit_byte(0x90); // nop
 57.1367 +    emit_int8(0x66); // size prefix
 57.1368 +    emit_int8(0x66);
 57.1369 +    emit_int8(0x66);
 57.1370 +    emit_int8((unsigned char)0x90);
 57.1371 +                     // nop
 57.1372    }
 57.1373    // 1 - 12 nops
 57.1374    if(i > 8) {
 57.1375      if(i > 9) {
 57.1376        i -= 1;
 57.1377 -      emit_byte(0x66);
 57.1378 +      emit_int8(0x66);
 57.1379      }
 57.1380      i -= 3;
 57.1381 -    emit_byte(0x66);
 57.1382 -    emit_byte(0x66);
 57.1383 -    emit_byte(0x90);
 57.1384 +    emit_int8(0x66);
 57.1385 +    emit_int8(0x66);
 57.1386 +    emit_int8((unsigned char)0x90);
 57.1387    }
 57.1388    // 1 - 8 nops
 57.1389    if(i > 4) {
 57.1390      if(i > 6) {
 57.1391        i -= 1;
 57.1392 -      emit_byte(0x66);
 57.1393 +      emit_int8(0x66);
 57.1394      }
 57.1395      i -= 3;
 57.1396 -    emit_byte(0x66);
 57.1397 -    emit_byte(0x66);
 57.1398 -    emit_byte(0x90);
 57.1399 +    emit_int8(0x66);
 57.1400 +    emit_int8(0x66);
 57.1401 +    emit_int8((unsigned char)0x90);
 57.1402    }
 57.1403    switch (i) {
 57.1404      case 4:
 57.1405 -      emit_byte(0x66);
 57.1406 +      emit_int8(0x66);
 57.1407      case 3:
 57.1408 -      emit_byte(0x66);
 57.1409 +      emit_int8(0x66);
 57.1410      case 2:
 57.1411 -      emit_byte(0x66);
 57.1412 +      emit_int8(0x66);
 57.1413      case 1:
 57.1414 -      emit_byte(0x90);
 57.1415 +      emit_int8((unsigned char)0x90);
 57.1416        break;
 57.1417      default:
 57.1418        assert(i == 0, " ");
 57.1419 @@ -2218,8 +2225,8 @@
 57.1420  
 57.1421  void Assembler::notl(Register dst) {
 57.1422    int encode = prefix_and_encode(dst->encoding());
 57.1423 -  emit_byte(0xF7);
 57.1424 -  emit_byte(0xD0 | encode );
 57.1425 +  emit_int8((unsigned char)0xF7);
 57.1426 +  emit_int8((unsigned char)(0xD0 | encode));
 57.1427  }
 57.1428  
 57.1429  void Assembler::orl(Address dst, int32_t imm32) {
 57.1430 @@ -2236,7 +2243,7 @@
 57.1431  void Assembler::orl(Register dst, Address src) {
 57.1432    InstructionMark im(this);
 57.1433    prefix(src, dst);
 57.1434 -  emit_byte(0x0B);
 57.1435 +  emit_int8(0x0B);
 57.1436    emit_operand(dst, src);
 57.1437  }
 57.1438  
 57.1439 @@ -2260,61 +2267,61 @@
 57.1440    assert(VM_Version::supports_sse4_2(), "");
 57.1441    InstructionMark im(this);
 57.1442    simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
 57.1443 -  emit_byte(0x61);
 57.1444 +  emit_int8(0x61);
 57.1445    emit_operand(dst, src);
 57.1446 -  emit_byte(imm8);
 57.1447 +  emit_int8(imm8);
 57.1448  }
 57.1449  
 57.1450  void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
 57.1451    assert(VM_Version::supports_sse4_2(), "");
 57.1452    int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
 57.1453 -  emit_byte(0x61);
 57.1454 -  emit_byte(0xC0 | encode);
 57.1455 -  emit_byte(imm8);
 57.1456 +  emit_int8(0x61);
 57.1457 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1458 +  emit_int8(imm8);
 57.1459  }
 57.1460  
 57.1461  void Assembler::pmovzxbw(XMMRegister dst, Address src) {
 57.1462    assert(VM_Version::supports_sse4_1(), "");
 57.1463    InstructionMark im(this);
 57.1464    simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
 57.1465 -  emit_byte(0x30);
 57.1466 +  emit_int8(0x30);
 57.1467    emit_operand(dst, src);
 57.1468  }
 57.1469  
 57.1470  void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
 57.1471    assert(VM_Version::supports_sse4_1(), "");
 57.1472    int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
 57.1473 -  emit_byte(0x30);
 57.1474 -  emit_byte(0xC0 | encode);
 57.1475 +  emit_int8(0x30);
 57.1476 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1477  }
 57.1478  
 57.1479  // generic
 57.1480  void Assembler::pop(Register dst) {
 57.1481    int encode = prefix_and_encode(dst->encoding());
 57.1482 -  emit_byte(0x58 | encode);
 57.1483 +  emit_int8(0x58 | encode);
 57.1484  }
 57.1485  
 57.1486  void Assembler::popcntl(Register dst, Address src) {
 57.1487    assert(VM_Version::supports_popcnt(), "must support");
 57.1488    InstructionMark im(this);
 57.1489 -  emit_byte(0xF3);
 57.1490 +  emit_int8((unsigned char)0xF3);
 57.1491    prefix(src, dst);
 57.1492 -  emit_byte(0x0F);
 57.1493 -  emit_byte(0xB8);
 57.1494 +  emit_int8(0x0F);
 57.1495 +  emit_int8((unsigned char)0xB8);
 57.1496    emit_operand(dst, src);
 57.1497  }
 57.1498  
 57.1499  void Assembler::popcntl(Register dst, Register src) {
 57.1500    assert(VM_Version::supports_popcnt(), "must support");
 57.1501 -  emit_byte(0xF3);
 57.1502 +  emit_int8((unsigned char)0xF3);
 57.1503    int encode = prefix_and_encode(dst->encoding(), src->encoding());
 57.1504 -  emit_byte(0x0F);
 57.1505 -  emit_byte(0xB8);
 57.1506 -  emit_byte(0xC0 | encode);
 57.1507 +  emit_int8(0x0F);
 57.1508 +  emit_int8((unsigned char)0xB8);
 57.1509 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1510  }
 57.1511  
 57.1512  void Assembler::popf() {
 57.1513 -  emit_byte(0x9D);
 57.1514 +  emit_int8((unsigned char)0x9D);
 57.1515  }
 57.1516  
 57.1517  #ifndef _LP64 // no 32bit push/pop on amd64
 57.1518 @@ -2322,21 +2329,21 @@
 57.1519    // NOTE: this will adjust stack by 8byte on 64bits
 57.1520    InstructionMark im(this);
 57.1521    prefix(dst);
 57.1522 -  emit_byte(0x8F);
 57.1523 +  emit_int8((unsigned char)0x8F);
 57.1524    emit_operand(rax, dst);
 57.1525  }
 57.1526  #endif
 57.1527  
 57.1528  void Assembler::prefetch_prefix(Address src) {
 57.1529    prefix(src);
 57.1530 -  emit_byte(0x0F);
 57.1531 +  emit_int8(0x0F);
 57.1532  }
 57.1533  
 57.1534  void Assembler::prefetchnta(Address src) {
 57.1535    NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
 57.1536    InstructionMark im(this);
 57.1537    prefetch_prefix(src);
 57.1538 -  emit_byte(0x18);
 57.1539 +  emit_int8(0x18);
 57.1540    emit_operand(rax, src); // 0, src
 57.1541  }
 57.1542  
 57.1543 @@ -2344,7 +2351,7 @@
 57.1544    assert(VM_Version::supports_3dnow_prefetch(), "must support");
 57.1545    InstructionMark im(this);
 57.1546    prefetch_prefix(src);
 57.1547 -  emit_byte(0x0D);
 57.1548 +  emit_int8(0x0D);
 57.1549    emit_operand(rax, src); // 0, src
 57.1550  }
 57.1551  
 57.1552 @@ -2352,7 +2359,7 @@
 57.1553    NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
 57.1554    InstructionMark im(this);
 57.1555    prefetch_prefix(src);
 57.1556 -  emit_byte(0x18);
 57.1557 +  emit_int8(0x18);
 57.1558    emit_operand(rcx, src); // 1, src
 57.1559  }
 57.1560  
 57.1561 @@ -2360,7 +2367,7 @@
 57.1562    NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
 57.1563    InstructionMark im(this);
 57.1564    prefetch_prefix(src);
 57.1565 -  emit_byte(0x18);
 57.1566 +  emit_int8(0x18);
 57.1567    emit_operand(rdx, src); // 2, src
 57.1568  }
 57.1569  
 57.1570 @@ -2368,7 +2375,7 @@
 57.1571    NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
 57.1572    InstructionMark im(this);
 57.1573    prefetch_prefix(src);
 57.1574 -  emit_byte(0x18);
 57.1575 +  emit_int8(0x18);
 57.1576    emit_operand(rbx, src); // 3, src
 57.1577  }
 57.1578  
 57.1579 @@ -2376,27 +2383,26 @@
 57.1580    assert(VM_Version::supports_3dnow_prefetch(), "must support");
 57.1581    InstructionMark im(this);
 57.1582    prefetch_prefix(src);
 57.1583 -  emit_byte(0x0D);
 57.1584 +  emit_int8(0x0D);
 57.1585    emit_operand(rcx, src); // 1, src
 57.1586  }
 57.1587  
 57.1588  void Assembler::prefix(Prefix p) {
 57.1589 -  a_byte(p);
 57.1590 +  emit_int8(p);
 57.1591  }
 57.1592  
 57.1593  void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
 57.1594    assert(VM_Version::supports_ssse3(), "");
 57.1595    int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
 57.1596 -  emit_byte(0x00);
 57.1597 -  emit_byte(0xC0 | encode);
 57.1598 +  emit_int8(0x00);
 57.1599 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1600  }
 57.1601  
 57.1602  void Assembler::pshufb(XMMRegister dst, Address src) {
 57.1603    assert(VM_Version::supports_ssse3(), "");
 57.1604 -  assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 57.1605    InstructionMark im(this);
 57.1606    simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
 57.1607 -  emit_byte(0x00);
 57.1608 +  emit_int8(0x00);
 57.1609    emit_operand(dst, src);
 57.1610  }
 57.1611  
 57.1612 @@ -2404,7 +2410,7 @@
 57.1613    assert(isByte(mode), "invalid value");
 57.1614    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.1615    emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66);
 57.1616 -  emit_byte(mode & 0xFF);
 57.1617 +  emit_int8(mode & 0xFF);
 57.1618  
 57.1619  }
 57.1620  
 57.1621 @@ -2414,16 +2420,16 @@
 57.1622    assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 57.1623    InstructionMark im(this);
 57.1624    simd_prefix(dst, src, VEX_SIMD_66);
 57.1625 -  emit_byte(0x70);
 57.1626 +  emit_int8(0x70);
 57.1627    emit_operand(dst, src);
 57.1628 -  emit_byte(mode & 0xFF);
 57.1629 +  emit_int8(mode & 0xFF);
 57.1630  }
 57.1631  
 57.1632  void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
 57.1633    assert(isByte(mode), "invalid value");
 57.1634    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.1635    emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2);
 57.1636 -  emit_byte(mode & 0xFF);
 57.1637 +  emit_int8(mode & 0xFF);
 57.1638  }
 57.1639  
 57.1640  void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
 57.1641 @@ -2432,18 +2438,18 @@
 57.1642    assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 57.1643    InstructionMark im(this);
 57.1644    simd_prefix(dst, src, VEX_SIMD_F2);
 57.1645 -  emit_byte(0x70);
 57.1646 +  emit_int8(0x70);
 57.1647    emit_operand(dst, src);
 57.1648 -  emit_byte(mode & 0xFF);
 57.1649 +  emit_int8(mode & 0xFF);
 57.1650  }
 57.1651  
 57.1652  void Assembler::psrldq(XMMRegister dst, int shift) {
 57.1653    // Shift 128 bit value in xmm register by number of bytes.
 57.1654    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.1655    int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66);
 57.1656 -  emit_byte(0x73);
 57.1657 -  emit_byte(0xC0 | encode);
 57.1658 -  emit_byte(shift);
 57.1659 +  emit_int8(0x73);
 57.1660 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1661 +  emit_int8(shift);
 57.1662  }
 57.1663  
 57.1664  void Assembler::ptest(XMMRegister dst, Address src) {
 57.1665 @@ -2451,15 +2457,15 @@
 57.1666    assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
 57.1667    InstructionMark im(this);
 57.1668    simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
 57.1669 -  emit_byte(0x17);
 57.1670 +  emit_int8(0x17);
 57.1671    emit_operand(dst, src);
 57.1672  }
 57.1673  
 57.1674  void Assembler::ptest(XMMRegister dst, XMMRegister src) {
 57.1675    assert(VM_Version::supports_sse4_1(), "");
 57.1676    int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
 57.1677 -  emit_byte(0x17);
 57.1678 -  emit_byte(0xC0 | encode);
 57.1679 +  emit_int8(0x17);
 57.1680 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1681  }
 57.1682  
 57.1683  void Assembler::punpcklbw(XMMRegister dst, Address src) {
 57.1684 @@ -2492,18 +2498,18 @@
 57.1685  void Assembler::push(int32_t imm32) {
 57.1686    // in 64bits we push 64bits onto the stack but only
 57.1687    // take a 32bit immediate
 57.1688 -  emit_byte(0x68);
 57.1689 +  emit_int8(0x68);
 57.1690    emit_long(imm32);
 57.1691  }
 57.1692  
 57.1693  void Assembler::push(Register src) {
 57.1694    int encode = prefix_and_encode(src->encoding());
 57.1695  
 57.1696 -  emit_byte(0x50 | encode);
 57.1697 +  emit_int8(0x50 | encode);
 57.1698  }
 57.1699  
 57.1700  void Assembler::pushf() {
 57.1701 -  emit_byte(0x9C);
 57.1702 +  emit_int8((unsigned char)0x9C);
 57.1703  }
 57.1704  
 57.1705  #ifndef _LP64 // no 32bit push/pop on amd64
 57.1706 @@ -2511,7 +2517,7 @@
 57.1707    // Note this will push 64bit on 64bit
 57.1708    InstructionMark im(this);
 57.1709    prefix(src);
 57.1710 -  emit_byte(0xFF);
 57.1711 +  emit_int8((unsigned char)0xFF);
 57.1712    emit_operand(rsi, src);
 57.1713  }
 57.1714  #endif
 57.1715 @@ -2520,58 +2526,58 @@
 57.1716    assert(isShiftCount(imm8), "illegal shift count");
 57.1717    int encode = prefix_and_encode(dst->encoding());
 57.1718    if (imm8 == 1) {
 57.1719 -    emit_byte(0xD1);
 57.1720 -    emit_byte(0xD0 | encode);
 57.1721 +    emit_int8((unsigned char)0xD1);
 57.1722 +    emit_int8((unsigned char)(0xD0 | encode));
 57.1723    } else {
 57.1724 -    emit_byte(0xC1);
 57.1725 -    emit_byte(0xD0 | encode);
 57.1726 -    emit_byte(imm8);
 57.1727 +    emit_int8((unsigned char)0xC1);
 57.1728 +    emit_int8((unsigned char)0xD0 | encode);
 57.1729 +    emit_int8(imm8);
 57.1730    }
 57.1731  }
 57.1732  
 57.1733  // copies data from [esi] to [edi] using rcx pointer sized words
 57.1734  // generic
 57.1735  void Assembler::rep_mov() {
 57.1736 -  emit_byte(0xF3);
 57.1737 +  emit_int8((unsigned char)0xF3);
 57.1738    // MOVSQ
 57.1739    LP64_ONLY(prefix(REX_W));
 57.1740 -  emit_byte(0xA5);
 57.1741 +  emit_int8((unsigned char)0xA5);
 57.1742  }
 57.1743  
 57.1744  // sets rcx pointer sized words with rax, value at [edi]
 57.1745  // generic
 57.1746  void Assembler::rep_set() { // rep_set
 57.1747 -  emit_byte(0xF3);
 57.1748 +  emit_int8((unsigned char)0xF3);
 57.1749    // STOSQ
 57.1750    LP64_ONLY(prefix(REX_W));
 57.1751 -  emit_byte(0xAB);
 57.1752 +  emit_int8((unsigned char)0xAB);
 57.1753  }
 57.1754  
 57.1755  // scans rcx pointer sized words at [edi] for occurance of rax,
 57.1756  // generic
 57.1757  void Assembler::repne_scan() { // repne_scan
 57.1758 -  emit_byte(0xF2);
 57.1759 +  emit_int8((unsigned char)0xF2);
 57.1760    // SCASQ
 57.1761    LP64_ONLY(prefix(REX_W));
 57.1762 -  emit_byte(0xAF);
 57.1763 +  emit_int8((unsigned char)0xAF);
 57.1764  }
 57.1765  
 57.1766  #ifdef _LP64
 57.1767  // scans rcx 4 byte words at [edi] for occurance of rax,
 57.1768  // generic
 57.1769  void Assembler::repne_scanl() { // repne_scan
 57.1770 -  emit_byte(0xF2);
 57.1771 +  emit_int8((unsigned char)0xF2);
 57.1772    // SCASL
 57.1773 -  emit_byte(0xAF);
 57.1774 +  emit_int8((unsigned char)0xAF);
 57.1775  }
 57.1776  #endif
 57.1777  
 57.1778  void Assembler::ret(int imm16) {
 57.1779    if (imm16 == 0) {
 57.1780 -    emit_byte(0xC3);
 57.1781 +    emit_int8((unsigned char)0xC3);
 57.1782    } else {
 57.1783 -    emit_byte(0xC2);
 57.1784 -    emit_word(imm16);
 57.1785 +    emit_int8((unsigned char)0xC2);
 57.1786 +    emit_int16(imm16);
 57.1787    }
 57.1788  }
 57.1789  
 57.1790 @@ -2580,26 +2586,26 @@
 57.1791    // Not supported in 64bit mode
 57.1792    ShouldNotReachHere();
 57.1793  #endif
 57.1794 -  emit_byte(0x9E);
 57.1795 +  emit_int8((unsigned char)0x9E);
 57.1796  }
 57.1797  
 57.1798  void Assembler::sarl(Register dst, int imm8) {
 57.1799    int encode = prefix_and_encode(dst->encoding());
 57.1800    assert(isShiftCount(imm8), "illegal shift count");
 57.1801    if (imm8 == 1) {
 57.1802 -    emit_byte(0xD1);
 57.1803 -    emit_byte(0xF8 | encode);
 57.1804 +    emit_int8((unsigned char)0xD1);
 57.1805 +    emit_int8((unsigned char)(0xF8 | encode));
 57.1806    } else {
 57.1807 -    emit_byte(0xC1);
 57.1808 -    emit_byte(0xF8 | encode);
 57.1809 -    emit_byte(imm8);
 57.1810 +    emit_int8((unsigned char)0xC1);
 57.1811 +    emit_int8((unsigned char)(0xF8 | encode));
 57.1812 +    emit_int8(imm8);
 57.1813    }
 57.1814  }
 57.1815  
 57.1816  void Assembler::sarl(Register dst) {
 57.1817    int encode = prefix_and_encode(dst->encoding());
 57.1818 -  emit_byte(0xD3);
 57.1819 -  emit_byte(0xF8 | encode);
 57.1820 +  emit_int8((unsigned char)0xD3);
 57.1821 +  emit_int8((unsigned char)(0xF8 | encode));
 57.1822  }
 57.1823  
 57.1824  void Assembler::sbbl(Address dst, int32_t imm32) {
 57.1825 @@ -2617,7 +2623,7 @@
 57.1826  void Assembler::sbbl(Register dst, Address src) {
 57.1827    InstructionMark im(this);
 57.1828    prefix(src, dst);
 57.1829 -  emit_byte(0x1B);
 57.1830 +  emit_int8(0x1B);
 57.1831    emit_operand(dst, src);
 57.1832  }
 57.1833  
 57.1834 @@ -2629,47 +2635,47 @@
 57.1835  void Assembler::setb(Condition cc, Register dst) {
 57.1836    assert(0 <= cc && cc < 16, "illegal cc");
 57.1837    int encode = prefix_and_encode(dst->encoding(), true);
 57.1838 -  emit_byte(0x0F);
 57.1839 -  emit_byte(0x90 | cc);
 57.1840 -  emit_byte(0xC0 | encode);
 57.1841 +  emit_int8(0x0F);
 57.1842 +  emit_int8((unsigned char)0x90 | cc);
 57.1843 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1844  }
 57.1845  
 57.1846  void Assembler::shll(Register dst, int imm8) {
 57.1847    assert(isShiftCount(imm8), "illegal shift count");
 57.1848    int encode = prefix_and_encode(dst->encoding());
 57.1849    if (imm8 == 1 ) {
 57.1850 -    emit_byte(0xD1);
 57.1851 -    emit_byte(0xE0 | encode);
 57.1852 +    emit_int8((unsigned char)0xD1);
 57.1853 +    emit_int8((unsigned char)(0xE0 | encode));
 57.1854    } else {
 57.1855 -    emit_byte(0xC1);
 57.1856 -    emit_byte(0xE0 | encode);
 57.1857 -    emit_byte(imm8);
 57.1858 +    emit_int8((unsigned char)0xC1);
 57.1859 +    emit_int8((unsigned char)(0xE0 | encode));
 57.1860 +    emit_int8(imm8);
 57.1861    }
 57.1862  }
 57.1863  
 57.1864  void Assembler::shll(Register dst) {
 57.1865    int encode = prefix_and_encode(dst->encoding());
 57.1866 -  emit_byte(0xD3);
 57.1867 -  emit_byte(0xE0 | encode);
 57.1868 +  emit_int8((unsigned char)0xD3);
 57.1869 +  emit_int8((unsigned char)(0xE0 | encode));
 57.1870  }
 57.1871  
 57.1872  void Assembler::shrl(Register dst, int imm8) {
 57.1873    assert(isShiftCount(imm8), "illegal shift count");
 57.1874    int encode = prefix_and_encode(dst->encoding());
 57.1875 -  emit_byte(0xC1);
 57.1876 -  emit_byte(0xE8 | encode);
 57.1877 -  emit_byte(imm8);
 57.1878 +  emit_int8((unsigned char)0xC1);
 57.1879 +  emit_int8((unsigned char)(0xE8 | encode));
 57.1880 +  emit_int8(imm8);
 57.1881  }
 57.1882  
 57.1883  void Assembler::shrl(Register dst) {
 57.1884    int encode = prefix_and_encode(dst->encoding());
 57.1885 -  emit_byte(0xD3);
 57.1886 -  emit_byte(0xE8 | encode);
 57.1887 +  emit_int8((unsigned char)0xD3);
 57.1888 +  emit_int8((unsigned char)(0xE8 | encode));
 57.1889  }
 57.1890  
 57.1891  // copies a single word from [esi] to [edi]
 57.1892  void Assembler::smovl() {
 57.1893 -  emit_byte(0xA5);
 57.1894 +  emit_int8((unsigned char)0xA5);
 57.1895  }
 57.1896  
 57.1897  void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
 57.1898 @@ -2688,7 +2694,7 @@
 57.1899  }
 57.1900  
 57.1901  void Assembler::std() {
 57.1902 -  emit_byte(0xfd);
 57.1903 +  emit_int8((unsigned char)0xFD);
 57.1904  }
 57.1905  
 57.1906  void Assembler::sqrtss(XMMRegister dst, Address src) {
 57.1907 @@ -2700,8 +2706,8 @@
 57.1908    NOT_LP64(assert(VM_Version::supports_sse(), ""));
 57.1909    InstructionMark im(this);
 57.1910    prefix(dst);
 57.1911 -  emit_byte(0x0F);
 57.1912 -  emit_byte(0xAE);
 57.1913 +  emit_int8(0x0F);
 57.1914 +  emit_int8((unsigned char)0xAE);
 57.1915    emit_operand(as_Register(3), dst);
 57.1916  }
 57.1917  
 57.1918 @@ -2714,7 +2720,7 @@
 57.1919  void Assembler::subl(Address dst, Register src) {
 57.1920    InstructionMark im(this);
 57.1921    prefix(dst, src);
 57.1922 -  emit_byte(0x29);
 57.1923 +  emit_int8(0x29);
 57.1924    emit_operand(src, dst);
 57.1925  }
 57.1926  
 57.1927 @@ -2732,7 +2738,7 @@
 57.1928  void Assembler::subl(Register dst, Address src) {
 57.1929    InstructionMark im(this);
 57.1930    prefix(src, dst);
 57.1931 -  emit_byte(0x2B);
 57.1932 +  emit_int8(0x2B);
 57.1933    emit_operand(dst, src);
 57.1934  }
 57.1935  
 57.1936 @@ -2773,11 +2779,11 @@
 57.1937    // 8bit operands
 57.1938    int encode = dst->encoding();
 57.1939    if (encode == 0) {
 57.1940 -    emit_byte(0xA9);
 57.1941 +    emit_int8((unsigned char)0xA9);
 57.1942    } else {
 57.1943      encode = prefix_and_encode(encode);
 57.1944 -    emit_byte(0xF7);
 57.1945 -    emit_byte(0xC0 | encode);
 57.1946 +    emit_int8((unsigned char)0xF7);
 57.1947 +    emit_int8((unsigned char)(0xC0 | encode));
 57.1948    }
 57.1949    emit_long(imm32);
 57.1950  }
 57.1951 @@ -2790,7 +2796,7 @@
 57.1952  void Assembler::testl(Register dst, Address  src) {
 57.1953    InstructionMark im(this);
 57.1954    prefix(src, dst);
 57.1955 -  emit_byte(0x85);
 57.1956 +  emit_int8((unsigned char)0x85);
 57.1957    emit_operand(dst, src);
 57.1958  }
 57.1959  
 57.1960 @@ -2818,28 +2824,28 @@
 57.1961  void Assembler::xaddl(Address dst, Register src) {
 57.1962    InstructionMark im(this);
 57.1963    prefix(dst, src);
 57.1964 -  emit_byte(0x0F);
 57.1965 -  emit_byte(0xC1);
 57.1966 +  emit_int8(0x0F);
 57.1967 +  emit_int8((unsigned char)0xC1);
 57.1968    emit_operand(src, dst);
 57.1969  }
 57.1970  
 57.1971  void Assembler::xchgl(Register dst, Address src) { // xchg
 57.1972    InstructionMark im(this);
 57.1973    prefix(src, dst);
 57.1974 -  emit_byte(0x87);
 57.1975 +  emit_int8((unsigned char)0x87);
 57.1976    emit_operand(dst, src);
 57.1977  }
 57.1978  
 57.1979  void Assembler::xchgl(Register dst, Register src) {
 57.1980    int encode = prefix_and_encode(dst->encoding(), src->encoding());
 57.1981 -  emit_byte(0x87);
 57.1982 -  emit_byte(0xc0 | encode);
 57.1983 +  emit_int8((unsigned char)0x87);
 57.1984 +  emit_int8((unsigned char)(0xC0 | encode));
 57.1985  }
 57.1986  
 57.1987  void Assembler::xgetbv() {
 57.1988 -  emit_byte(0x0F);
 57.1989 -  emit_byte(0x01);
 57.1990 -  emit_byte(0xD0);
 57.1991 +  emit_int8(0x0F);
 57.1992 +  emit_int8(0x01);
 57.1993 +  emit_int8((unsigned char)0xD0);
 57.1994  }
 57.1995  
 57.1996  void Assembler::xorl(Register dst, int32_t imm32) {
 57.1997 @@ -2850,7 +2856,7 @@
 57.1998  void Assembler::xorl(Register dst, Address src) {
 57.1999    InstructionMark im(this);
 57.2000    prefix(src, dst);
 57.2001 -  emit_byte(0x33);
 57.2002 +  emit_int8(0x33);
 57.2003    emit_operand(dst, src);
 57.2004  }
 57.2005  
 57.2006 @@ -3276,8 +3282,8 @@
 57.2007  void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
 57.2008    assert(VM_Version::supports_sse4_1(), "");
 57.2009    int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
 57.2010 -  emit_byte(0x40);
 57.2011 -  emit_byte(0xC0 | encode);
 57.2012 +  emit_int8(0x40);
 57.2013 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2014  }
 57.2015  
 57.2016  void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
 57.2017 @@ -3288,8 +3294,8 @@
 57.2018  void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
 57.2019    assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 57.2020    int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
 57.2021 -  emit_byte(0x40);
 57.2022 -  emit_byte(0xC0 | encode);
 57.2023 +  emit_int8(0x40);
 57.2024 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2025  }
 57.2026  
 57.2027  void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
 57.2028 @@ -3303,7 +3309,7 @@
 57.2029    int dst_enc = dst->encoding();
 57.2030    int nds_enc = nds->is_valid() ? nds->encoding() : 0;
 57.2031    vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
 57.2032 -  emit_byte(0x40);
 57.2033 +  emit_int8(0x40);
 57.2034    emit_operand(dst, src);
 57.2035  }
 57.2036  
 57.2037 @@ -3312,27 +3318,27 @@
 57.2038    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.2039    // XMM6 is for /6 encoding: 66 0F 71 /6 ib
 57.2040    int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
 57.2041 -  emit_byte(0x71);
 57.2042 -  emit_byte(0xC0 | encode);
 57.2043 -  emit_byte(shift & 0xFF);
 57.2044 +  emit_int8(0x71);
 57.2045 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2046 +  emit_int8(shift & 0xFF);
 57.2047  }
 57.2048  
 57.2049  void Assembler::pslld(XMMRegister dst, int shift) {
 57.2050    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.2051    // XMM6 is for /6 encoding: 66 0F 72 /6 ib
 57.2052    int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
 57.2053 -  emit_byte(0x72);
 57.2054 -  emit_byte(0xC0 | encode);
 57.2055 -  emit_byte(shift & 0xFF);
 57.2056 +  emit_int8(0x72);
 57.2057 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2058 +  emit_int8(shift & 0xFF);
 57.2059  }
 57.2060  
 57.2061  void Assembler::psllq(XMMRegister dst, int shift) {
 57.2062    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.2063    // XMM6 is for /6 encoding: 66 0F 73 /6 ib
 57.2064    int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
 57.2065 -  emit_byte(0x73);
 57.2066 -  emit_byte(0xC0 | encode);
 57.2067 -  emit_byte(shift & 0xFF);
 57.2068 +  emit_int8(0x73);
 57.2069 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2070 +  emit_int8(shift & 0xFF);
 57.2071  }
 57.2072  
 57.2073  void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
 57.2074 @@ -3354,21 +3360,21 @@
 57.2075    assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 57.2076    // XMM6 is for /6 encoding: 66 0F 71 /6 ib
 57.2077    emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector256);
 57.2078 -  emit_byte(shift & 0xFF);
 57.2079 +  emit_int8(shift & 0xFF);
 57.2080  }
 57.2081  
 57.2082  void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
 57.2083    assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 57.2084    // XMM6 is for /6 encoding: 66 0F 72 /6 ib
 57.2085    emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector256);
 57.2086 -  emit_byte(shift & 0xFF);
 57.2087 +  emit_int8(shift & 0xFF);
 57.2088  }
 57.2089  
 57.2090  void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
 57.2091    assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 57.2092    // XMM6 is for /6 encoding: 66 0F 73 /6 ib
 57.2093    emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector256);
 57.2094 -  emit_byte(shift & 0xFF);
 57.2095 +  emit_int8(shift & 0xFF);
 57.2096  }
 57.2097  
 57.2098  void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
 57.2099 @@ -3391,18 +3397,18 @@
 57.2100    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.2101    // XMM2 is for /2 encoding: 66 0F 71 /2 ib
 57.2102    int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
 57.2103 -  emit_byte(0x71);
 57.2104 -  emit_byte(0xC0 | encode);
 57.2105 -  emit_byte(shift & 0xFF);
 57.2106 +  emit_int8(0x71);
 57.2107 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2108 +  emit_int8(shift & 0xFF);
 57.2109  }
 57.2110  
 57.2111  void Assembler::psrld(XMMRegister dst, int shift) {
 57.2112    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.2113    // XMM2 is for /2 encoding: 66 0F 72 /2 ib
 57.2114    int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
 57.2115 -  emit_byte(0x72);
 57.2116 -  emit_byte(0xC0 | encode);
 57.2117 -  emit_byte(shift & 0xFF);
 57.2118 +  emit_int8(0x72);
 57.2119 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2120 +  emit_int8(shift & 0xFF);
 57.2121  }
 57.2122  
 57.2123  void Assembler::psrlq(XMMRegister dst, int shift) {
 57.2124 @@ -3411,9 +3417,9 @@
 57.2125    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.2126    // XMM2 is for /2 encoding: 66 0F 73 /2 ib
 57.2127    int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
 57.2128 -  emit_byte(0x73);
 57.2129 -  emit_byte(0xC0 | encode);
 57.2130 -  emit_byte(shift & 0xFF);
 57.2131 +  emit_int8(0x73);
 57.2132 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2133 +  emit_int8(shift & 0xFF);
 57.2134  }
 57.2135  
 57.2136  void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
 57.2137 @@ -3435,21 +3441,21 @@
 57.2138    assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 57.2139    // XMM2 is for /2 encoding: 66 0F 73 /2 ib
 57.2140    emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector256);
 57.2141 -  emit_byte(shift & 0xFF);
 57.2142 +  emit_int8(shift & 0xFF);
 57.2143  }
 57.2144  
 57.2145  void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
 57.2146    assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 57.2147    // XMM2 is for /2 encoding: 66 0F 73 /2 ib
 57.2148    emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector256);
 57.2149 -  emit_byte(shift & 0xFF);
 57.2150 +  emit_int8(shift & 0xFF);
 57.2151  }
 57.2152  
 57.2153  void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
 57.2154    assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 57.2155    // XMM2 is for /2 encoding: 66 0F 73 /2 ib
 57.2156    emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector256);
 57.2157 -  emit_byte(shift & 0xFF);
 57.2158 +  emit_int8(shift & 0xFF);
 57.2159  }
 57.2160  
 57.2161  void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
 57.2162 @@ -3472,18 +3478,18 @@
 57.2163    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.2164    // XMM4 is for /4 encoding: 66 0F 71 /4 ib
 57.2165    int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
 57.2166 -  emit_byte(0x71);
 57.2167 -  emit_byte(0xC0 | encode);
 57.2168 -  emit_byte(shift & 0xFF);
 57.2169 +  emit_int8(0x71);
 57.2170 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2171 +  emit_int8(shift & 0xFF);
 57.2172  }
 57.2173  
 57.2174  void Assembler::psrad(XMMRegister dst, int shift) {
 57.2175    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.2176    // XMM4 is for /4 encoding: 66 0F 72 /4 ib
 57.2177    int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
 57.2178 -  emit_byte(0x72);
 57.2179 -  emit_byte(0xC0 | encode);
 57.2180 -  emit_byte(shift & 0xFF);
 57.2181 +  emit_int8(0x72);
 57.2182 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2183 +  emit_int8(shift & 0xFF);
 57.2184  }
 57.2185  
 57.2186  void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
 57.2187 @@ -3500,14 +3506,14 @@
 57.2188    assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 57.2189    // XMM4 is for /4 encoding: 66 0F 71 /4 ib
 57.2190    emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector256);
 57.2191 -  emit_byte(shift & 0xFF);
 57.2192 +  emit_int8(shift & 0xFF);
 57.2193  }
 57.2194  
 57.2195  void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
 57.2196    assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
 57.2197    // XMM4 is for /4 encoding: 66 0F 71 /4 ib
 57.2198    emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector256);
 57.2199 -  emit_byte(shift & 0xFF);
 57.2200 +  emit_int8(shift & 0xFF);
 57.2201  }
 57.2202  
 57.2203  void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
 57.2204 @@ -3572,11 +3578,11 @@
 57.2205    assert(VM_Version::supports_avx(), "");
 57.2206    bool vector256 = true;
 57.2207    int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
 57.2208 -  emit_byte(0x18);
 57.2209 -  emit_byte(0xC0 | encode);
 57.2210 +  emit_int8(0x18);
 57.2211 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2212    // 0x00 - insert into lower 128 bits
 57.2213    // 0x01 - insert into upper 128 bits
 57.2214 -  emit_byte(0x01);
 57.2215 +  emit_int8(0x01);
 57.2216  }
 57.2217  
 57.2218  void Assembler::vinsertf128h(XMMRegister dst, Address src) {
 57.2219 @@ -3587,10 +3593,10 @@
 57.2220    int dst_enc = dst->encoding();
 57.2221    // swap src<->dst for encoding
 57.2222    vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
 57.2223 -  emit_byte(0x18);
 57.2224 +  emit_int8(0x18);
 57.2225    emit_operand(dst, src);
 57.2226    // 0x01 - insert into upper 128 bits
 57.2227 -  emit_byte(0x01);
 57.2228 +  emit_int8(0x01);
 57.2229  }
 57.2230  
 57.2231  void Assembler::vextractf128h(Address dst, XMMRegister src) {
 57.2232 @@ -3600,21 +3606,21 @@
 57.2233    assert(src != xnoreg, "sanity");
 57.2234    int src_enc = src->encoding();
 57.2235    vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
 57.2236 -  emit_byte(0x19);
 57.2237 +  emit_int8(0x19);
 57.2238    emit_operand(src, dst);
 57.2239    // 0x01 - extract from upper 128 bits
 57.2240 -  emit_byte(0x01);
 57.2241 +  emit_int8(0x01);
 57.2242  }
 57.2243  
 57.2244  void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
 57.2245    assert(VM_Version::supports_avx2(), "");
 57.2246    bool vector256 = true;
 57.2247    int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
 57.2248 -  emit_byte(0x38);
 57.2249 -  emit_byte(0xC0 | encode);
 57.2250 +  emit_int8(0x38);
 57.2251 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2252    // 0x00 - insert into lower 128 bits
 57.2253    // 0x01 - insert into upper 128 bits
 57.2254 -  emit_byte(0x01);
 57.2255 +  emit_int8(0x01);
 57.2256  }
 57.2257  
 57.2258  void Assembler::vinserti128h(XMMRegister dst, Address src) {
 57.2259 @@ -3625,10 +3631,10 @@
 57.2260    int dst_enc = dst->encoding();
 57.2261    // swap src<->dst for encoding
 57.2262    vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
 57.2263 -  emit_byte(0x38);
 57.2264 +  emit_int8(0x38);
 57.2265    emit_operand(dst, src);
 57.2266    // 0x01 - insert into upper 128 bits
 57.2267 -  emit_byte(0x01);
 57.2268 +  emit_int8(0x01);
 57.2269  }
 57.2270  
 57.2271  void Assembler::vextracti128h(Address dst, XMMRegister src) {
 57.2272 @@ -3638,16 +3644,16 @@
 57.2273    assert(src != xnoreg, "sanity");
 57.2274    int src_enc = src->encoding();
 57.2275    vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
 57.2276 -  emit_byte(0x39);
 57.2277 +  emit_int8(0x39);
 57.2278    emit_operand(src, dst);
 57.2279    // 0x01 - extract from upper 128 bits
 57.2280 -  emit_byte(0x01);
 57.2281 +  emit_int8(0x01);
 57.2282  }
 57.2283  
 57.2284  void Assembler::vzeroupper() {
 57.2285    assert(VM_Version::supports_avx(), "");
 57.2286    (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
 57.2287 -  emit_byte(0x77);
 57.2288 +  emit_int8(0x77);
 57.2289  }
 57.2290  
 57.2291  
 57.2292 @@ -3657,15 +3663,15 @@
 57.2293  void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
 57.2294    // NO PREFIX AS NEVER 64BIT
 57.2295    InstructionMark im(this);
 57.2296 -  emit_byte(0x81);
 57.2297 -  emit_byte(0xF8 | src1->encoding());
 57.2298 +  emit_int8((unsigned char)0x81);
 57.2299 +  emit_int8((unsigned char)(0xF8 | src1->encoding()));
 57.2300    emit_data(imm32, rspec, 0);
 57.2301  }
 57.2302  
 57.2303  void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
 57.2304    // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
 57.2305    InstructionMark im(this);
 57.2306 -  emit_byte(0x81);
 57.2307 +  emit_int8((unsigned char)0x81);
 57.2308    emit_operand(rdi, src1);
 57.2309    emit_data(imm32, rspec, 0);
 57.2310  }
 57.2311 @@ -3675,14 +3681,14 @@
 57.2312  // into rdx:rax.  The ZF is set if the compared values were equal, and cleared otherwise.
 57.2313  void Assembler::cmpxchg8(Address adr) {
 57.2314    InstructionMark im(this);
 57.2315 -  emit_byte(0x0F);
 57.2316 -  emit_byte(0xc7);
 57.2317 +  emit_int8(0x0F);
 57.2318 +  emit_int8((unsigned char)0xC7);
 57.2319    emit_operand(rcx, adr);
 57.2320  }
 57.2321  
 57.2322  void Assembler::decl(Register dst) {
 57.2323    // Don't use it directly. Use MacroAssembler::decrementl() instead.
 57.2324 - emit_byte(0x48 | dst->encoding());
 57.2325 + emit_int8(0x48 | dst->encoding());
 57.2326  }
 57.2327  
 57.2328  #endif // _LP64
 57.2329 @@ -3690,8 +3696,8 @@
 57.2330  // 64bit typically doesn't use the x87 but needs to for the trig funcs
 57.2331  
 57.2332  void Assembler::fabs() {
 57.2333 -  emit_byte(0xD9);
 57.2334 -  emit_byte(0xE1);
 57.2335 +  emit_int8((unsigned char)0xD9);
 57.2336 +  emit_int8((unsigned char)0xE1);
 57.2337  }
 57.2338  
 57.2339  void Assembler::fadd(int i) {
 57.2340 @@ -3700,13 +3706,13 @@
 57.2341  
 57.2342  void Assembler::fadd_d(Address src) {
 57.2343    InstructionMark im(this);
 57.2344 -  emit_byte(0xDC);
 57.2345 +  emit_int8((unsigned char)0xDC);
 57.2346    emit_operand32(rax, src);
 57.2347  }
 57.2348  
 57.2349  void Assembler::fadd_s(Address src) {
 57.2350    InstructionMark im(this);
 57.2351 -  emit_byte(0xD8);
 57.2352 +  emit_int8((unsigned char)0xD8);
 57.2353    emit_operand32(rax, src);
 57.2354  }
 57.2355  
 57.2356 @@ -3719,8 +3725,8 @@
 57.2357  }
 57.2358  
 57.2359  void Assembler::fchs() {
 57.2360 -  emit_byte(0xD9);
 57.2361 -  emit_byte(0xE0);
 57.2362 +  emit_int8((unsigned char)0xD9);
 57.2363 +  emit_int8((unsigned char)0xE0);
 57.2364  }
 57.2365  
 57.2366  void Assembler::fcom(int i) {
 57.2367 @@ -3733,29 +3739,29 @@
 57.2368  
 57.2369  void Assembler::fcomp_d(Address src) {
 57.2370    InstructionMark im(this);
 57.2371 -  emit_byte(0xDC);
 57.2372 +  emit_int8((unsigned char)0xDC);
 57.2373    emit_operand32(rbx, src);
 57.2374  }
 57.2375  
 57.2376  void Assembler::fcomp_s(Address src) {
 57.2377    InstructionMark im(this);
 57.2378 -  emit_byte(0xD8);
 57.2379 +  emit_int8((unsigned char)0xD8);
 57.2380    emit_operand32(rbx, src);
 57.2381  }
 57.2382  
 57.2383  void Assembler::fcompp() {
 57.2384 -  emit_byte(0xDE);
 57.2385 -  emit_byte(0xD9);
 57.2386 +  emit_int8((unsigned char)0xDE);
 57.2387 +  emit_int8((unsigned char)0xD9);
 57.2388  }
 57.2389  
 57.2390  void Assembler::fcos() {
 57.2391 -  emit_byte(0xD9);
 57.2392 -  emit_byte(0xFF);
 57.2393 +  emit_int8((unsigned char)0xD9);
 57.2394 +  emit_int8((unsigned char)0xFF);
 57.2395  }
 57.2396  
 57.2397  void Assembler::fdecstp() {
 57.2398 -  emit_byte(0xD9);
 57.2399 -  emit_byte(0xF6);
 57.2400 +  emit_int8((unsigned char)0xD9);
 57.2401 +  emit_int8((unsigned char)0xF6);
 57.2402  }
 57.2403  
 57.2404  void Assembler::fdiv(int i) {
 57.2405 @@ -3764,13 +3770,13 @@
 57.2406  
 57.2407  void Assembler::fdiv_d(Address src) {
 57.2408    InstructionMark im(this);
 57.2409 -  emit_byte(0xDC);
 57.2410 +  emit_int8((unsigned char)0xDC);
 57.2411    emit_operand32(rsi, src);
 57.2412  }
 57.2413  
 57.2414  void Assembler::fdiv_s(Address src) {
 57.2415    InstructionMark im(this);
 57.2416 -  emit_byte(0xD8);
 57.2417 +  emit_int8((unsigned char)0xD8);
 57.2418    emit_operand32(rsi, src);
 57.2419  }
 57.2420  
 57.2421 @@ -3791,13 +3797,13 @@
 57.2422  
 57.2423  void Assembler::fdivr_d(Address src) {
 57.2424    InstructionMark im(this);
 57.2425 -  emit_byte(0xDC);
 57.2426 +  emit_int8((unsigned char)0xDC);
 57.2427    emit_operand32(rdi, src);
 57.2428  }
 57.2429  
 57.2430  void Assembler::fdivr_s(Address src) {
 57.2431    InstructionMark im(this);
 57.2432 -  emit_byte(0xD8);
 57.2433 +  emit_int8((unsigned char)0xD8);
 57.2434    emit_operand32(rdi, src);
 57.2435  }
 57.2436  
 57.2437 @@ -3815,59 +3821,59 @@
 57.2438  
 57.2439  void Assembler::fild_d(Address adr) {
 57.2440    InstructionMark im(this);
 57.2441 -  emit_byte(0xDF);
 57.2442 +  emit_int8((unsigned char)0xDF);
 57.2443    emit_operand32(rbp, adr);
 57.2444  }
 57.2445  
 57.2446  void Assembler::fild_s(Address adr) {
 57.2447    InstructionMark im(this);
 57.2448 -  emit_byte(0xDB);
 57.2449 +  emit_int8((unsigned char)0xDB);
 57.2450    emit_operand32(rax, adr);
 57.2451  }
 57.2452  
 57.2453  void Assembler::fincstp() {
 57.2454 -  emit_byte(0xD9);
 57.2455 -  emit_byte(0xF7);
 57.2456 +  emit_int8((unsigned char)0xD9);
 57.2457 +  emit_int8((unsigned char)0xF7);
 57.2458  }
 57.2459  
 57.2460  void Assembler::finit() {
 57.2461 -  emit_byte(0x9B);
 57.2462 -  emit_byte(0xDB);
 57.2463 -  emit_byte(0xE3);
 57.2464 +  emit_int8((unsigned char)0x9B);
 57.2465 +  emit_int8((unsigned char)0xDB);
 57.2466 +  emit_int8((unsigned char)0xE3);
 57.2467  }
 57.2468  
 57.2469  void Assembler::fist_s(Address adr) {
 57.2470    InstructionMark im(this);
 57.2471 -  emit_byte(0xDB);
 57.2472 +  emit_int8((unsigned char)0xDB);
 57.2473    emit_operand32(rdx, adr);
 57.2474  }
 57.2475  
 57.2476  void Assembler::fistp_d(Address adr) {
 57.2477    InstructionMark im(this);
 57.2478 -  emit_byte(0xDF);
 57.2479 +  emit_int8((unsigned char)0xDF);
 57.2480    emit_operand32(rdi, adr);
 57.2481  }
 57.2482  
 57.2483  void Assembler::fistp_s(Address adr) {
 57.2484    InstructionMark im(this);
 57.2485 -  emit_byte(0xDB);
 57.2486 +  emit_int8((unsigned char)0xDB);
 57.2487    emit_operand32(rbx, adr);
 57.2488  }
 57.2489  
 57.2490  void Assembler::fld1() {
 57.2491 -  emit_byte(0xD9);
 57.2492 -  emit_byte(0xE8);
 57.2493 +  emit_int8((unsigned char)0xD9);
 57.2494 +  emit_int8((unsigned char)0xE8);
 57.2495  }
 57.2496  
 57.2497  void Assembler::fld_d(Address adr) {
 57.2498    InstructionMark im(this);
 57.2499 -  emit_byte(0xDD);
 57.2500 +  emit_int8((unsigned char)0xDD);
 57.2501    emit_operand32(rax, adr);
 57.2502  }
 57.2503  
 57.2504  void Assembler::fld_s(Address adr) {
 57.2505    InstructionMark im(this);
 57.2506 -  emit_byte(0xD9);
 57.2507 +  emit_int8((unsigned char)0xD9);
 57.2508    emit_operand32(rax, adr);
 57.2509  }
 57.2510  
 57.2511 @@ -3878,35 +3884,35 @@
 57.2512  
 57.2513  void Assembler::fld_x(Address adr) {
 57.2514    InstructionMark im(this);
 57.2515 -  emit_byte(0xDB);
 57.2516 +  emit_int8((unsigned char)0xDB);
 57.2517    emit_operand32(rbp, adr);
 57.2518  }
 57.2519  
 57.2520  void Assembler::fldcw(Address src) {
 57.2521    InstructionMark im(this);
 57.2522 -  emit_byte(0xd9);
 57.2523 +  emit_int8((unsigned char)0xD9);
 57.2524    emit_operand32(rbp, src);
 57.2525  }
 57.2526  
 57.2527  void Assembler::fldenv(Address src) {
 57.2528    InstructionMark im(this);
 57.2529 -  emit_byte(0xD9);
 57.2530 +  emit_int8((unsigned char)0xD9);
 57.2531    emit_operand32(rsp, src);
 57.2532  }
 57.2533  
 57.2534  void Assembler::fldlg2() {
 57.2535 -  emit_byte(0xD9);
 57.2536 -  emit_byte(0xEC);
 57.2537 +  emit_int8((unsigned char)0xD9);
 57.2538 +  emit_int8((unsigned char)0xEC);
 57.2539  }
 57.2540  
 57.2541  void Assembler::fldln2() {
 57.2542 -  emit_byte(0xD9);
 57.2543 -  emit_byte(0xED);
 57.2544 +  emit_int8((unsigned char)0xD9);
 57.2545 +  emit_int8((unsigned char)0xED);
 57.2546  }
 57.2547  
 57.2548  void Assembler::fldz() {
 57.2549 -  emit_byte(0xD9);
 57.2550 -  emit_byte(0xEE);
 57.2551 +  emit_int8((unsigned char)0xD9);
 57.2552 +  emit_int8((unsigned char)0xEE);
 57.2553  }
 57.2554  
 57.2555  void Assembler::flog() {
 57.2556 @@ -3927,13 +3933,13 @@
 57.2557  
 57.2558  void Assembler::fmul_d(Address src) {
 57.2559    InstructionMark im(this);
 57.2560 -  emit_byte(0xDC);
 57.2561 +  emit_int8((unsigned char)0xDC);
 57.2562    emit_operand32(rcx, src);
 57.2563  }
 57.2564  
 57.2565  void Assembler::fmul_s(Address src) {
 57.2566    InstructionMark im(this);
 57.2567 -  emit_byte(0xD8);
 57.2568 +  emit_int8((unsigned char)0xD8);
 57.2569    emit_operand32(rcx, src);
 57.2570  }
 57.2571  
 57.2572 @@ -3947,63 +3953,63 @@
 57.2573  
 57.2574  void Assembler::fnsave(Address dst) {
 57.2575    InstructionMark im(this);
 57.2576 -  emit_byte(0xDD);
 57.2577 +  emit_int8((unsigned char)0xDD);
 57.2578    emit_operand32(rsi, dst);
 57.2579  }
 57.2580  
 57.2581  void Assembler::fnstcw(Address src) {
 57.2582    InstructionMark im(this);
 57.2583 -  emit_byte(0x9B);
 57.2584 -  emit_byte(0xD9);
 57.2585 +  emit_int8((unsigned char)0x9B);
 57.2586 +  emit_int8((unsigned char)0xD9);
 57.2587    emit_operand32(rdi, src);
 57.2588  }
 57.2589  
 57.2590  void Assembler::fnstsw_ax() {
 57.2591 -  emit_byte(0xdF);
 57.2592 -  emit_byte(0xE0);
 57.2593 +  emit_int8((unsigned char)0xDF);
 57.2594 +  emit_int8((unsigned char)0xE0);
 57.2595  }
 57.2596  
 57.2597  void Assembler::fprem() {
 57.2598 -  emit_byte(0xD9);
 57.2599 -  emit_byte(0xF8);
 57.2600 +  emit_int8((unsigned char)0xD9);
 57.2601 +  emit_int8((unsigned char)0xF8);
 57.2602  }
 57.2603  
 57.2604  void Assembler::fprem1() {
 57.2605 -  emit_byte(0xD9);
 57.2606 -  emit_byte(0xF5);
 57.2607 +  emit_int8((unsigned char)0xD9);
 57.2608 +  emit_int8((unsigned char)0xF5);
 57.2609  }
 57.2610  
 57.2611  void Assembler::frstor(Address src) {
 57.2612    InstructionMark im(this);
 57.2613 -  emit_byte(0xDD);
 57.2614 +  emit_int8((unsigned char)0xDD);
 57.2615    emit_operand32(rsp, src);
 57.2616  }
 57.2617  
 57.2618  void Assembler::fsin() {
 57.2619 -  emit_byte(0xD9);
 57.2620 -  emit_byte(0xFE);
 57.2621 +  emit_int8((unsigned char)0xD9);
 57.2622 +  emit_int8((unsigned char)0xFE);
 57.2623  }
 57.2624  
 57.2625  void Assembler::fsqrt() {
 57.2626 -  emit_byte(0xD9);
 57.2627 -  emit_byte(0xFA);
 57.2628 +  emit_int8((unsigned char)0xD9);
 57.2629 +  emit_int8((unsigned char)0xFA);
 57.2630  }
 57.2631  
 57.2632  void Assembler::fst_d(Address adr) {
 57.2633    InstructionMark im(this);
 57.2634 -  emit_byte(0xDD);
 57.2635 +  emit_int8((unsigned char)0xDD);
 57.2636    emit_operand32(rdx, adr);
 57.2637  }
 57.2638  
 57.2639  void Assembler::fst_s(Address adr) {
 57.2640    InstructionMark im(this);
 57.2641 -  emit_byte(0xD9);
 57.2642 +  emit_int8((unsigned char)0xD9);
 57.2643    emit_operand32(rdx, adr);
 57.2644  }
 57.2645  
 57.2646  void Assembler::fstp_d(Address adr) {
 57.2647    InstructionMark im(this);
 57.2648 -  emit_byte(0xDD);
 57.2649 +  emit_int8((unsigned char)0xDD);
 57.2650    emit_operand32(rbx, adr);
 57.2651  }
 57.2652  
 57.2653 @@ -4013,13 +4019,13 @@
 57.2654  
 57.2655  void Assembler::fstp_s(Address adr) {
 57.2656    InstructionMark im(this);
 57.2657 -  emit_byte(0xD9);
 57.2658 +  emit_int8((unsigned char)0xD9);
 57.2659    emit_operand32(rbx, adr);
 57.2660  }
 57.2661  
 57.2662  void Assembler::fstp_x(Address adr) {
 57.2663    InstructionMark im(this);
 57.2664 -  emit_byte(0xDB);
 57.2665 +  emit_int8((unsigned char)0xDB);
 57.2666    emit_operand32(rdi, adr);
 57.2667  }
 57.2668  
 57.2669 @@ -4029,13 +4035,13 @@
 57.2670  
 57.2671  void Assembler::fsub_d(Address src) {
 57.2672    InstructionMark im(this);
 57.2673 -  emit_byte(0xDC);
 57.2674 +  emit_int8((unsigned char)0xDC);
 57.2675    emit_operand32(rsp, src);
 57.2676  }
 57.2677  
 57.2678  void Assembler::fsub_s(Address src) {
 57.2679    InstructionMark im(this);
 57.2680 -  emit_byte(0xD8);
 57.2681 +  emit_int8((unsigned char)0xD8);
 57.2682    emit_operand32(rsp, src);
 57.2683  }
 57.2684  
 57.2685 @@ -4053,13 +4059,13 @@
 57.2686  
 57.2687  void Assembler::fsubr_d(Address src) {
 57.2688    InstructionMark im(this);
 57.2689 -  emit_byte(0xDC);
 57.2690 +  emit_int8((unsigned char)0xDC);
 57.2691    emit_operand32(rbp, src);
 57.2692  }
 57.2693  
 57.2694  void Assembler::fsubr_s(Address src) {
 57.2695    InstructionMark im(this);
 57.2696 -  emit_byte(0xD8);
 57.2697 +  emit_int8((unsigned char)0xD8);
 57.2698    emit_operand32(rbp, src);
 57.2699  }
 57.2700  
 57.2701 @@ -4072,15 +4078,15 @@
 57.2702  }
 57.2703  
 57.2704  void Assembler::ftan() {
 57.2705 -  emit_byte(0xD9);
 57.2706 -  emit_byte(0xF2);
 57.2707 -  emit_byte(0xDD);
 57.2708 -  emit_byte(0xD8);
 57.2709 +  emit_int8((unsigned char)0xD9);
 57.2710 +  emit_int8((unsigned char)0xF2);
 57.2711 +  emit_int8((unsigned char)0xDD);
 57.2712 +  emit_int8((unsigned char)0xD8);
 57.2713  }
 57.2714  
 57.2715  void Assembler::ftst() {
 57.2716 -  emit_byte(0xD9);
 57.2717 -  emit_byte(0xE4);
 57.2718 +  emit_int8((unsigned char)0xD9);
 57.2719 +  emit_int8((unsigned char)0xE4);
 57.2720  }
 57.2721  
 57.2722  void Assembler::fucomi(int i) {
 57.2723 @@ -4096,7 +4102,7 @@
 57.2724  }
 57.2725  
 57.2726  void Assembler::fwait() {
 57.2727 -  emit_byte(0x9B);
 57.2728 +  emit_int8((unsigned char)0x9B);
 57.2729  }
 57.2730  
 57.2731  void Assembler::fxch(int i) {
 57.2732 @@ -4104,23 +4110,23 @@
 57.2733  }
 57.2734  
 57.2735  void Assembler::fyl2x() {
 57.2736 -  emit_byte(0xD9);
 57.2737 -  emit_byte(0xF1);
 57.2738 +  emit_int8((unsigned char)0xD9);
 57.2739 +  emit_int8((unsigned char)0xF1);
 57.2740  }
 57.2741  
 57.2742  void Assembler::frndint() {
 57.2743 -  emit_byte(0xD9);
 57.2744 -  emit_byte(0xFC);
 57.2745 +  emit_int8((unsigned char)0xD9);
 57.2746 +  emit_int8((unsigned char)0xFC);
 57.2747  }
 57.2748  
 57.2749  void Assembler::f2xm1() {
 57.2750 -  emit_byte(0xD9);
 57.2751 -  emit_byte(0xF0);
 57.2752 +  emit_int8((unsigned char)0xD9);
 57.2753 +  emit_int8((unsigned char)0xF0);
 57.2754  }
 57.2755  
 57.2756  void Assembler::fldl2e() {
 57.2757 -  emit_byte(0xD9);
 57.2758 -  emit_byte(0xEA);
 57.2759 +  emit_int8((unsigned char)0xD9);
 57.2760 +  emit_int8((unsigned char)0xEA);
 57.2761  }
 57.2762  
 57.2763  // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
 57.2764 @@ -4131,7 +4137,7 @@
 57.2765  // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
 57.2766  void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
 57.2767    if (pre > 0) {
 57.2768 -    emit_byte(simd_pre[pre]);
 57.2769 +    emit_int8(simd_pre[pre]);
 57.2770    }
 57.2771    if (rex_w) {
 57.2772      prefixq(adr, xreg);
 57.2773 @@ -4139,25 +4145,25 @@
 57.2774      prefix(adr, xreg);
 57.2775    }
 57.2776    if (opc > 0) {
 57.2777 -    emit_byte(0x0F);
 57.2778 +    emit_int8(0x0F);
 57.2779      int opc2 = simd_opc[opc];
 57.2780      if (opc2 > 0) {
 57.2781 -      emit_byte(opc2);
 57.2782 +      emit_int8(opc2);
 57.2783      }
 57.2784    }
 57.2785  }
 57.2786  
 57.2787  int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
 57.2788    if (pre > 0) {
 57.2789 -    emit_byte(simd_pre[pre]);
 57.2790 +    emit_int8(simd_pre[pre]);
 57.2791    }
 57.2792    int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) :
 57.2793                            prefix_and_encode(dst_enc, src_enc);
 57.2794    if (opc > 0) {
 57.2795 -    emit_byte(0x0F);
 57.2796 +    emit_int8(0x0F);
 57.2797      int opc2 = simd_opc[opc];
 57.2798      if (opc2 > 0) {
 57.2799 -      emit_byte(opc2);
 57.2800 +      emit_int8(opc2);
 57.2801      }
 57.2802    }
 57.2803    return encode;
 57.2804 @@ -4171,11 +4177,11 @@
 57.2805      int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
 57.2806      byte1 = (~byte1) & 0xE0;
 57.2807      byte1 |= opc;
 57.2808 -    a_byte(byte1);
 57.2809 +    emit_int8(byte1);
 57.2810  
 57.2811      int byte2 = ((~nds_enc) & 0xf) << 3;
 57.2812      byte2 |= (vex_w ? VEX_W : 0) | (vector256 ? 4 : 0) | pre;
 57.2813 -    emit_byte(byte2);
 57.2814 +    emit_int8(byte2);
 57.2815    } else {
 57.2816      prefix(VEX_2bytes);
 57.2817  
 57.2818 @@ -4183,7 +4189,7 @@
 57.2819      byte1 = (~byte1) & 0x80;
 57.2820      byte1 |= ((~nds_enc) & 0xf) << 3;
 57.2821      byte1 |= (vector256 ? 4 : 0) | pre;
 57.2822 -    emit_byte(byte1);
 57.2823 +    emit_int8(byte1);
 57.2824    }
 57.2825  }
 57.2826  
 57.2827 @@ -4229,28 +4235,28 @@
 57.2828  void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
 57.2829    InstructionMark im(this);
 57.2830    simd_prefix(dst, dst, src, pre);
 57.2831 -  emit_byte(opcode);
 57.2832 +  emit_int8(opcode);
 57.2833    emit_operand(dst, src);
 57.2834  }
 57.2835  
 57.2836  void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
 57.2837    int encode = simd_prefix_and_encode(dst, dst, src, pre);
 57.2838 -  emit_byte(opcode);
 57.2839 -  emit_byte(0xC0 | encode);
 57.2840 +  emit_int8(opcode);
 57.2841 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2842  }
 57.2843  
 57.2844  // Versions with no second source register (non-destructive source).
 57.2845  void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
 57.2846    InstructionMark im(this);
 57.2847    simd_prefix(dst, xnoreg, src, pre);
 57.2848 -  emit_byte(opcode);
 57.2849 +  emit_int8(opcode);
 57.2850    emit_operand(dst, src);
 57.2851  }
 57.2852  
 57.2853  void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
 57.2854    int encode = simd_prefix_and_encode(dst, xnoreg, src, pre);
 57.2855 -  emit_byte(opcode);
 57.2856 -  emit_byte(0xC0 | encode);
 57.2857 +  emit_int8(opcode);
 57.2858 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2859  }
 57.2860  
 57.2861  // 3-operands AVX instructions
 57.2862 @@ -4258,22 +4264,22 @@
 57.2863                                 Address src, VexSimdPrefix pre, bool vector256) {
 57.2864    InstructionMark im(this);
 57.2865    vex_prefix(dst, nds, src, pre, vector256);
 57.2866 -  emit_byte(opcode);
 57.2867 +  emit_int8(opcode);
 57.2868    emit_operand(dst, src);
 57.2869  }
 57.2870  
 57.2871  void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
 57.2872                                 XMMRegister src, VexSimdPrefix pre, bool vector256) {
 57.2873    int encode = vex_prefix_and_encode(dst, nds, src, pre, vector256);
 57.2874 -  emit_byte(opcode);
 57.2875 -  emit_byte(0xC0 | encode);
 57.2876 +  emit_int8(opcode);
 57.2877 +  emit_int8((unsigned char)(0xC0 | encode));
 57.2878  }
 57.2879  
 57.2880  #ifndef _LP64
 57.2881  
 57.2882  void Assembler::incl(Register dst) {
 57.2883    // Don't use it directly. Use MacroAssembler::incrementl() instead.
 57.2884 -  emit_byte(0x40 | dst->encoding());
 57.2885 +  emit_int8(0x40 | dst->encoding());
 57.2886  }
 57.2887  
 57.2888  void Assembler::lea(Register dst, Address src) {
 57.2889 @@ -4282,7 +4288,7 @@
 57.2890  
 57.2891  void Assembler::mov_literal32(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
 57.2892    InstructionMark im(this);
 57.2893 -  emit_byte(0xC7);
 57.2894 +  emit_int8((unsigned char)0xC7);
 57.2895    emit_operand(rax, dst);
 57.2896    emit_data((int)imm32, rspec, 0);
 57.2897  }
 57.2898 @@ -4290,49 +4296,49 @@
 57.2899  void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
 57.2900    InstructionMark im(this);
 57.2901    int encode = prefix_and_encode(dst->encoding());
 57.2902 -  emit_byte(0xB8 | encode);
 57.2903 +  emit_int8((unsigned char)(0xB8 | encode));
 57.2904    emit_data((int)imm32, rspec, 0);
 57.2905  }
 57.2906  
 57.2907  void Assembler::popa() { // 32bit
 57.2908 -  emit_byte(0x61);
 57.2909 +  emit_int8(0x61);
 57.2910  }
 57.2911  
 57.2912  void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
 57.2913    InstructionMark im(this);
 57.2914 -  emit_byte(0x68);
 57.2915 +  emit_int8(0x68);
 57.2916    emit_data(imm32, rspec, 0);
 57.2917  }
 57.2918  
 57.2919  void Assembler::pusha() { // 32bit
 57.2920 -  emit_byte(0x60);
 57.2921 +  emit_int8(0x60);
 57.2922  }
 57.2923  
 57.2924  void Assembler::set_byte_if_not_zero(Register dst) {
 57.2925 -  emit_byte(0x0F);
 57.2926 -  emit_byte(0x95);
 57.2927 -  emit_byte(0xE0 | dst->encoding());
 57.2928 +  emit_int8(0x0F);
 57.2929 +  emit_int8((unsigned char)0x95);
 57.2930 +  emit_int8((unsigned char)(0xE0 | dst->encoding()));
 57.2931  }
 57.2932  
 57.2933  void Assembler::shldl(Register dst, Register src) {
 57.2934 -  emit_byte(0x0F);
 57.2935 -  emit_byte(0xA5);
 57.2936 -  emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
 57.2937 +  emit_int8(0x0F);
 57.2938 +  emit_int8((unsigned char)0xA5);
 57.2939 +  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
 57.2940  }
 57.2941  
 57.2942  void Assembler::shrdl(Register dst, Register src) {
 57.2943 -  emit_byte(0x0F);
 57.2944 -  emit_byte(0xAD);
 57.2945 -  emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
 57.2946 +  emit_int8(0x0F);
 57.2947 +  emit_int8((unsigned char)0xAD);
 57.2948 +  emit_int8((unsigned char)(0xC0 | src->encoding() << 3 | dst->encoding()));
 57.2949  }
 57.2950  
 57.2951  #else // LP64
 57.2952  
 57.2953  void Assembler::set_byte_if_not_zero(Register dst) {
 57.2954    int enc = prefix_and_encode(dst->encoding(), true);
 57.2955 -  emit_byte(0x0F);
 57.2956 -  emit_byte(0x95);
 57.2957 -  emit_byte(0xE0 | enc);
 57.2958 +  emit_int8(0x0F);
 57.2959 +  emit_int8((unsigned char)0x95);
 57.2960 +  emit_int8((unsigned char)(0xE0 | enc));
 57.2961  }
 57.2962  
 57.2963  // 64bit only pieces of the assembler
 57.2964 @@ -4670,7 +4676,7 @@
 57.2965  void Assembler::adcq(Register dst, Address src) {
 57.2966    InstructionMark im(this);
 57.2967    prefixq(src, dst);
 57.2968 -  emit_byte(0x13);
 57.2969 +  emit_int8(0x13);
 57.2970    emit_operand(dst, src);
 57.2971  }
 57.2972  
 57.2973 @@ -4688,7 +4694,7 @@
 57.2974  void Assembler::addq(Address dst, Register src) {
 57.2975    InstructionMark im(this);
 57.2976    prefixq(dst, src);
 57.2977 -  emit_byte(0x01);
 57.2978 +  emit_int8(0x01);
 57.2979    emit_operand(src, dst);
 57.2980  }
 57.2981  
 57.2982 @@ -4700,7 +4706,7 @@
 57.2983  void Assembler::addq(Register dst, Address src) {
 57.2984    InstructionMark im(this);
 57.2985    prefixq(src, dst);
 57.2986 -  emit_byte(0x03);
 57.2987 +  emit_int8(0x03);
 57.2988    emit_operand(dst, src);
 57.2989  }
 57.2990  
 57.2991 @@ -4712,7 +4718,7 @@
 57.2992  void Assembler::andq(Address dst, int32_t imm32) {
 57.2993    InstructionMark im(this);
 57.2994    prefixq(dst);
 57.2995 -  emit_byte(0x81);
 57.2996 +  emit_int8((unsigned char)0x81);
 57.2997    emit_operand(rsp, dst, 4);
 57.2998    emit_long(imm32);
 57.2999  }
 57.3000 @@ -4725,7 +4731,7 @@
 57.3001  void Assembler::andq(Register dst, Address src) {
 57.3002    InstructionMark im(this);
 57.3003    prefixq(src, dst);
 57.3004 -  emit_byte(0x23);
 57.3005 +  emit_int8(0x23);
 57.3006    emit_operand(dst, src);
 57.3007  }
 57.3008  
 57.3009 @@ -4736,56 +4742,56 @@
 57.3010  
 57.3011  void Assembler::bsfq(Register dst, Register src) {
 57.3012    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3013 -  emit_byte(0x0F);
 57.3014 -  emit_byte(0xBC);
 57.3015 -  emit_byte(0xC0 | encode);
 57.3016 +  emit_int8(0x0F);
 57.3017 +  emit_int8((unsigned char)0xBC);
 57.3018 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3019  }
 57.3020  
 57.3021  void Assembler::bsrq(Register dst, Register src) {
 57.3022    assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
 57.3023    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3024 -  emit_byte(0x0F);
 57.3025 -  emit_byte(0xBD);
 57.3026 -  emit_byte(0xC0 | encode);
 57.3027 +  emit_int8(0x0F);
 57.3028 +  emit_int8((unsigned char)0xBD);
 57.3029 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3030  }
 57.3031  
 57.3032  void Assembler::bswapq(Register reg) {
 57.3033    int encode = prefixq_and_encode(reg->encoding());
 57.3034 -  emit_byte(0x0F);
 57.3035 -  emit_byte(0xC8 | encode);
 57.3036 +  emit_int8(0x0F);
 57.3037 +  emit_int8((unsigned char)(0xC8 | encode));
 57.3038  }
 57.3039  
 57.3040  void Assembler::cdqq() {
 57.3041    prefix(REX_W);
 57.3042 -  emit_byte(0x99);
 57.3043 +  emit_int8((unsigned char)0x99);
 57.3044  }
 57.3045  
 57.3046  void Assembler::clflush(Address adr) {
 57.3047    prefix(adr);
 57.3048 -  emit_byte(0x0F);
 57.3049 -  emit_byte(0xAE);
 57.3050 +  emit_int8(0x0F);
 57.3051 +  emit_int8((unsigned char)0xAE);
 57.3052    emit_operand(rdi, adr);
 57.3053  }
 57.3054  
 57.3055  void Assembler::cmovq(Condition cc, Register dst, Register src) {
 57.3056    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3057 -  emit_byte(0x0F);
 57.3058 -  emit_byte(0x40 | cc);
 57.3059 -  emit_byte(0xC0 | encode);
 57.3060 +  emit_int8(0x0F);
 57.3061 +  emit_int8(0x40 | cc);
 57.3062 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3063  }
 57.3064  
 57.3065  void Assembler::cmovq(Condition cc, Register dst, Address src) {
 57.3066    InstructionMark im(this);
 57.3067    prefixq(src, dst);
 57.3068 -  emit_byte(0x0F);
 57.3069 -  emit_byte(0x40 | cc);
 57.3070 +  emit_int8(0x0F);
 57.3071 +  emit_int8(0x40 | cc);
 57.3072    emit_operand(dst, src);
 57.3073  }
 57.3074  
 57.3075  void Assembler::cmpq(Address dst, int32_t imm32) {
 57.3076    InstructionMark im(this);
 57.3077    prefixq(dst);
 57.3078 -  emit_byte(0x81);
 57.3079 +  emit_int8((unsigned char)0x81);
 57.3080    emit_operand(rdi, dst, 4);
 57.3081    emit_long(imm32);
 57.3082  }
 57.3083 @@ -4798,7 +4804,7 @@
 57.3084  void Assembler::cmpq(Address dst, Register src) {
 57.3085    InstructionMark im(this);
 57.3086    prefixq(dst, src);
 57.3087 -  emit_byte(0x3B);
 57.3088 +  emit_int8(0x3B);
 57.3089    emit_operand(src, dst);
 57.3090  }
 57.3091  
 57.3092 @@ -4810,122 +4816,122 @@
 57.3093  void Assembler::cmpq(Register dst, Address  src) {
 57.3094    InstructionMark im(this);
 57.3095    prefixq(src, dst);
 57.3096 -  emit_byte(0x3B);
 57.3097 +  emit_int8(0x3B);
 57.3098    emit_operand(dst, src);
 57.3099  }
 57.3100  
 57.3101  void Assembler::cmpxchgq(Register reg, Address adr) {
 57.3102    InstructionMark im(this);
 57.3103    prefixq(adr, reg);
 57.3104 -  emit_byte(0x0F);
 57.3105 -  emit_byte(0xB1);
 57.3106 +  emit_int8(0x0F);
 57.3107 +  emit_int8((unsigned char)0xB1);
 57.3108    emit_operand(reg, adr);
 57.3109  }
 57.3110  
 57.3111  void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
 57.3112    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.3113    int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2);
 57.3114 -  emit_byte(0x2A);
 57.3115 -  emit_byte(0xC0 | encode);
 57.3116 +  emit_int8(0x2A);
 57.3117 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3118  }
 57.3119  
 57.3120  void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
 57.3121    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.3122    InstructionMark im(this);
 57.3123    simd_prefix_q(dst, dst, src, VEX_SIMD_F2);
 57.3124 -  emit_byte(0x2A);
 57.3125 +  emit_int8(0x2A);
 57.3126    emit_operand(dst, src);
 57.3127  }
 57.3128  
 57.3129  void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
 57.3130    NOT_LP64(assert(VM_Version::supports_sse(), ""));
 57.3131    int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3);
 57.3132 -  emit_byte(0x2A);
 57.3133 -  emit_byte(0xC0 | encode);
 57.3134 +  emit_int8(0x2A);
 57.3135 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3136  }
 57.3137  
 57.3138  void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
 57.3139    NOT_LP64(assert(VM_Version::supports_sse(), ""));
 57.3140    InstructionMark im(this);
 57.3141    simd_prefix_q(dst, dst, src, VEX_SIMD_F3);
 57.3142 -  emit_byte(0x2A);
 57.3143 +  emit_int8(0x2A);
 57.3144    emit_operand(dst, src);
 57.3145  }
 57.3146  
 57.3147  void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
 57.3148    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.3149    int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2);
 57.3150 -  emit_byte(0x2C);
 57.3151 -  emit_byte(0xC0 | encode);
 57.3152 +  emit_int8(0x2C);
 57.3153 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3154  }
 57.3155  
 57.3156  void Assembler::cvttss2siq(Register dst, XMMRegister src) {
 57.3157    NOT_LP64(assert(VM_Version::supports_sse(), ""));
 57.3158    int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3);
 57.3159 -  emit_byte(0x2C);
 57.3160 -  emit_byte(0xC0 | encode);
 57.3161 +  emit_int8(0x2C);
 57.3162 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3163  }
 57.3164  
 57.3165  void Assembler::decl(Register dst) {
 57.3166    // Don't use it directly. Use MacroAssembler::decrementl() instead.
 57.3167    // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
 57.3168    int encode = prefix_and_encode(dst->encoding());
 57.3169 -  emit_byte(0xFF);
 57.3170 -  emit_byte(0xC8 | encode);
 57.3171 +  emit_int8((unsigned char)0xFF);
 57.3172 +  emit_int8((unsigned char)(0xC8 | encode));
 57.3173  }
 57.3174  
 57.3175  void Assembler::decq(Register dst) {
 57.3176    // Don't use it directly. Use MacroAssembler::decrementq() instead.
 57.3177    // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
 57.3178    int encode = prefixq_and_encode(dst->encoding());
 57.3179 -  emit_byte(0xFF);
 57.3180 -  emit_byte(0xC8 | encode);
 57.3181 +  emit_int8((unsigned char)0xFF);
 57.3182 +  emit_int8(0xC8 | encode);
 57.3183  }
 57.3184  
 57.3185  void Assembler::decq(Address dst) {
 57.3186    // Don't use it directly. Use MacroAssembler::decrementq() instead.
 57.3187    InstructionMark im(this);
 57.3188    prefixq(dst);
 57.3189 -  emit_byte(0xFF);
 57.3190 +  emit_int8((unsigned char)0xFF);
 57.3191    emit_operand(rcx, dst);
 57.3192  }
 57.3193  
 57.3194  void Assembler::fxrstor(Address src) {
 57.3195    prefixq(src);
 57.3196 -  emit_byte(0x0F);
 57.3197 -  emit_byte(0xAE);
 57.3198 +  emit_int8(0x0F);
 57.3199 +  emit_int8((unsigned char)0xAE);
 57.3200    emit_operand(as_Register(1), src);
 57.3201  }
 57.3202  
 57.3203  void Assembler::fxsave(Address dst) {
 57.3204    prefixq(dst);
 57.3205 -  emit_byte(0x0F);
 57.3206 -  emit_byte(0xAE);
 57.3207 +  emit_int8(0x0F);
 57.3208 +  emit_int8((unsigned char)0xAE);
 57.3209    emit_operand(as_Register(0), dst);
 57.3210  }
 57.3211  
 57.3212  void Assembler::idivq(Register src) {
 57.3213    int encode = prefixq_and_encode(src->encoding());
 57.3214 -  emit_byte(0xF7);
 57.3215 -  emit_byte(0xF8 | encode);
 57.3216 +  emit_int8((unsigned char)0xF7);
 57.3217 +  emit_int8((unsigned char)(0xF8 | encode));
 57.3218  }
 57.3219  
 57.3220  void Assembler::imulq(Register dst, Register src) {
 57.3221    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3222 -  emit_byte(0x0F);
 57.3223 -  emit_byte(0xAF);
 57.3224 -  emit_byte(0xC0 | encode);
 57.3225 +  emit_int8(0x0F);
 57.3226 +  emit_int8((unsigned char)0xAF);
 57.3227 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3228  }
 57.3229  
 57.3230  void Assembler::imulq(Register dst, Register src, int value) {
 57.3231    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3232    if (is8bit(value)) {
 57.3233 -    emit_byte(0x6B);
 57.3234 -    emit_byte(0xC0 | encode);
 57.3235 -    emit_byte(value & 0xFF);
 57.3236 +    emit_int8(0x6B);
 57.3237 +    emit_int8((unsigned char)(0xC0 | encode));
 57.3238 +    emit_int8(value & 0xFF);
 57.3239    } else {
 57.3240 -    emit_byte(0x69);
 57.3241 -    emit_byte(0xC0 | encode);
 57.3242 +    emit_int8(0x69);
 57.3243 +    emit_int8((unsigned char)(0xC0 | encode));
 57.3244      emit_long(value);
 57.3245    }
 57.3246  }
 57.3247 @@ -4934,23 +4940,23 @@
 57.3248    // Don't use it directly. Use MacroAssembler::incrementl() instead.
 57.3249    // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
 57.3250    int encode = prefix_and_encode(dst->encoding());
 57.3251 -  emit_byte(0xFF);
 57.3252 -  emit_byte(0xC0 | encode);
 57.3253 +  emit_int8((unsigned char)0xFF);
 57.3254 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3255  }
 57.3256  
 57.3257  void Assembler::incq(Register dst) {
 57.3258    // Don't use it directly. Use MacroAssembler::incrementq() instead.
 57.3259    // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
 57.3260    int encode = prefixq_and_encode(dst->encoding());
 57.3261 -  emit_byte(0xFF);
 57.3262 -  emit_byte(0xC0 | encode);
 57.3263 +  emit_int8((unsigned char)0xFF);
 57.3264 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3265  }
 57.3266  
 57.3267  void Assembler::incq(Address dst) {
 57.3268    // Don't use it directly. Use MacroAssembler::incrementq() instead.
 57.3269    InstructionMark im(this);
 57.3270    prefixq(dst);
 57.3271 -  emit_byte(0xFF);
 57.3272 +  emit_int8((unsigned char)0xFF);
 57.3273    emit_operand(rax, dst);
 57.3274  }
 57.3275  
 57.3276 @@ -4961,35 +4967,35 @@
 57.3277  void Assembler::leaq(Register dst, Address src) {
 57.3278    InstructionMark im(this);
 57.3279    prefixq(src, dst);
 57.3280 -  emit_byte(0x8D);
 57.3281 +  emit_int8((unsigned char)0x8D);
 57.3282    emit_operand(dst, src);
 57.3283  }
 57.3284  
 57.3285  void Assembler::mov64(Register dst, int64_t imm64) {
 57.3286    InstructionMark im(this);
 57.3287    int encode = prefixq_and_encode(dst->encoding());
 57.3288 -  emit_byte(0xB8 | encode);
 57.3289 +  emit_int8((unsigned char)(0xB8 | encode));
 57.3290    emit_int64(imm64);
 57.3291  }
 57.3292  
 57.3293  void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
 57.3294    InstructionMark im(this);
 57.3295    int encode = prefixq_and_encode(dst->encoding());
 57.3296 -  emit_byte(0xB8 | encode);
 57.3297 +  emit_int8(0xB8 | encode);
 57.3298    emit_data64(imm64, rspec);
 57.3299  }
 57.3300  
 57.3301  void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
 57.3302    InstructionMark im(this);
 57.3303    int encode = prefix_and_encode(dst->encoding());
 57.3304 -  emit_byte(0xB8 | encode);
 57.3305 +  emit_int8((unsigned char)(0xB8 | encode));
 57.3306    emit_data((int)imm32, rspec, narrow_oop_operand);
 57.3307  }
 57.3308  
 57.3309  void Assembler::mov_narrow_oop(Address dst, int32_t imm32,  RelocationHolder const& rspec) {
 57.3310    InstructionMark im(this);
 57.3311    prefix(dst);
 57.3312 -  emit_byte(0xC7);
 57.3313 +  emit_int8((unsigned char)0xC7);
 57.3314    emit_operand(rax, dst, 4);
 57.3315    emit_data((int)imm32, rspec, narrow_oop_operand);
 57.3316  }
 57.3317 @@ -4997,34 +5003,34 @@
 57.3318  void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
 57.3319    InstructionMark im(this);
 57.3320    int encode = prefix_and_encode(src1->encoding());
 57.3321 -  emit_byte(0x81);
 57.3322 -  emit_byte(0xF8 | encode);
 57.3323 +  emit_int8((unsigned char)0x81);
 57.3324 +  emit_int8((unsigned char)(0xF8 | encode));
 57.3325    emit_data((int)imm32, rspec, narrow_oop_operand);
 57.3326  }
 57.3327  
 57.3328  void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
 57.3329    InstructionMark im(this);
 57.3330    prefix(src1);
 57.3331 -  emit_byte(0x81);
 57.3332 +  emit_int8((unsigned char)0x81);
 57.3333    emit_operand(rax, src1, 4);
 57.3334    emit_data((int)imm32, rspec, narrow_oop_operand);
 57.3335  }
 57.3336  
 57.3337  void Assembler::lzcntq(Register dst, Register src) {
 57.3338    assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
 57.3339 -  emit_byte(0xF3);
 57.3340 +  emit_int8((unsigned char)0xF3);
 57.3341    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3342 -  emit_byte(0x0F);
 57.3343 -  emit_byte(0xBD);
 57.3344 -  emit_byte(0xC0 | encode);
 57.3345 +  emit_int8(0x0F);
 57.3346 +  emit_int8((unsigned char)0xBD);
 57.3347 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3348  }
 57.3349  
 57.3350  void Assembler::movdq(XMMRegister dst, Register src) {
 57.3351    // table D-1 says MMX/SSE2
 57.3352    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.3353    int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66);
 57.3354 -  emit_byte(0x6E);
 57.3355 -  emit_byte(0xC0 | encode);
 57.3356 +  emit_int8(0x6E);
 57.3357 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3358  }
 57.3359  
 57.3360  void Assembler::movdq(Register dst, XMMRegister src) {
 57.3361 @@ -5032,43 +5038,43 @@
 57.3362    NOT_LP64(assert(VM_Version::supports_sse2(), ""));
 57.3363    // swap src/dst to get correct prefix
 57.3364    int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66);
 57.3365 -  emit_byte(0x7E);
 57.3366 -  emit_byte(0xC0 | encode);
 57.3367 +  emit_int8(0x7E);
 57.3368 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3369  }
 57.3370  
 57.3371  void Assembler::movq(Register dst, Register src) {
 57.3372    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3373 -  emit_byte(0x8B);
 57.3374 -  emit_byte(0xC0 | encode);
 57.3375 +  emit_int8((unsigned char)0x8B);
 57.3376 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3377  }
 57.3378  
 57.3379  void Assembler::movq(Register dst, Address src) {
 57.3380    InstructionMark im(this);
 57.3381    prefixq(src, dst);
 57.3382 -  emit_byte(0x8B);
 57.3383 +  emit_int8((unsigned char)0x8B);
 57.3384    emit_operand(dst, src);
 57.3385  }
 57.3386  
 57.3387  void Assembler::movq(Address dst, Register src) {
 57.3388    InstructionMark im(this);
 57.3389    prefixq(dst, src);
 57.3390 -  emit_byte(0x89);
 57.3391 +  emit_int8((unsigned char)0x89);
 57.3392    emit_operand(src, dst);
 57.3393  }
 57.3394  
 57.3395  void Assembler::movsbq(Register dst, Address src) {
 57.3396    InstructionMark im(this);
 57.3397    prefixq(src, dst);
 57.3398 -  emit_byte(0x0F);
 57.3399 -  emit_byte(0xBE);
 57.3400 +  emit_int8(0x0F);
 57.3401 +  emit_int8((unsigned char)0xBE);
 57.3402    emit_operand(dst, src);
 57.3403  }
 57.3404  
 57.3405  void Assembler::movsbq(Register dst, Register src) {
 57.3406    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3407 -  emit_byte(0x0F);
 57.3408 -  emit_byte(0xBE);
 57.3409 -  emit_byte(0xC0 | encode);
 57.3410 +  emit_int8(0x0F);
 57.3411 +  emit_int8((unsigned char)0xBE);
 57.3412 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3413  }
 57.3414  
 57.3415  void Assembler::movslq(Register dst, int32_t imm32) {
 57.3416 @@ -5078,7 +5084,7 @@
 57.3417    ShouldNotReachHere();
 57.3418    InstructionMark im(this);
 57.3419    int encode = prefixq_and_encode(dst->encoding());
 57.3420 -  emit_byte(0xC7 | encode);
 57.3421 +  emit_int8((unsigned char)(0xC7 | encode));
 57.3422    emit_long(imm32);
 57.3423  }
 57.3424  
 57.3425 @@ -5086,7 +5092,7 @@
 57.3426    assert(is_simm32(imm32), "lost bits");
 57.3427    InstructionMark im(this);
 57.3428    prefixq(dst);
 57.3429 -  emit_byte(0xC7);
 57.3430 +  emit_int8((unsigned char)0xC7);
 57.3431    emit_operand(rax, dst, 4);
 57.3432    emit_long(imm32);
 57.3433  }
 57.3434 @@ -5094,77 +5100,77 @@
 57.3435  void Assembler::movslq(Register dst, Address src) {
 57.3436    InstructionMark im(this);
 57.3437    prefixq(src, dst);
 57.3438 -  emit_byte(0x63);
 57.3439 +  emit_int8(0x63);
 57.3440    emit_operand(dst, src);
 57.3441  }
 57.3442  
 57.3443  void Assembler::movslq(Register dst, Register src) {
 57.3444    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3445 -  emit_byte(0x63);
 57.3446 -  emit_byte(0xC0 | encode);
 57.3447 +  emit_int8(0x63);
 57.3448 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3449  }
 57.3450  
 57.3451  void Assembler::movswq(Register dst, Address src) {
 57.3452    InstructionMark im(this);
 57.3453    prefixq(src, dst);
 57.3454 -  emit_byte(0x0F);
 57.3455 -  emit_byte(0xBF);
 57.3456 +  emit_int8(0x0F);
 57.3457 +  emit_int8((unsigned char)0xBF);
 57.3458    emit_operand(dst, src);
 57.3459  }
 57.3460  
 57.3461  void Assembler::movswq(Register dst, Register src) {
 57.3462    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3463 -  emit_byte(0x0F);
 57.3464 -  emit_byte(0xBF);
 57.3465 -  emit_byte(0xC0 | encode);
 57.3466 +  emit_int8((unsigned char)0x0F);
 57.3467 +  emit_int8((unsigned char)0xBF);
 57.3468 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3469  }
 57.3470  
 57.3471  void Assembler::movzbq(Register dst, Address src) {
 57.3472    InstructionMark im(this);
 57.3473    prefixq(src, dst);
 57.3474 -  emit_byte(0x0F);
 57.3475 -  emit_byte(0xB6);
 57.3476 +  emit_int8((unsigned char)0x0F);
 57.3477 +  emit_int8((unsigned char)0xB6);
 57.3478    emit_operand(dst, src);
 57.3479  }
 57.3480  
 57.3481  void Assembler::movzbq(Register dst, Register src) {
 57.3482    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3483 -  emit_byte(0x0F);
 57.3484 -  emit_byte(0xB6);
 57.3485 -  emit_byte(0xC0 | encode);
 57.3486 +  emit_int8(0x0F);
 57.3487 +  emit_int8((unsigned char)0xB6);
 57.3488 +  emit_int8(0xC0 | encode);
 57.3489  }
 57.3490  
 57.3491  void Assembler::movzwq(Register dst, Address src) {
 57.3492    InstructionMark im(this);
 57.3493    prefixq(src, dst);
 57.3494 -  emit_byte(0x0F);
 57.3495 -  emit_byte(0xB7);
 57.3496 +  emit_int8((unsigned char)0x0F);
 57.3497 +  emit_int8((unsigned char)0xB7);
 57.3498    emit_operand(dst, src);
 57.3499  }
 57.3500  
 57.3501  void Assembler::movzwq(Register dst, Register src) {
 57.3502    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3503 -  emit_byte(0x0F);
 57.3504 -  emit_byte(0xB7);
 57.3505 -  emit_byte(0xC0 | encode);
 57.3506 +  emit_int8((unsigned char)0x0F);
 57.3507 +  emit_int8((unsigned char)0xB7);
 57.3508 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3509  }
 57.3510  
 57.3511  void Assembler::negq(Register dst) {
 57.3512    int encode = prefixq_and_encode(dst->encoding());
 57.3513 -  emit_byte(0xF7);
 57.3514 -  emit_byte(0xD8 | encode);
 57.3515 +  emit_int8((unsigned char)0xF7);
 57.3516 +  emit_int8((unsigned char)(0xD8 | encode));
 57.3517  }
 57.3518  
 57.3519  void Assembler::notq(Register dst) {
 57.3520    int encode = prefixq_and_encode(dst->encoding());
 57.3521 -  emit_byte(0xF7);
 57.3522 -  emit_byte(0xD0 | encode);
 57.3523 +  emit_int8((unsigned char)0xF7);
 57.3524 +  emit_int8((unsigned char)(0xD0 | encode));
 57.3525  }
 57.3526  
 57.3527  void Assembler::orq(Address dst, int32_t imm32) {
 57.3528    InstructionMark im(this);
 57.3529    prefixq(dst);
 57.3530 -  emit_byte(0x81);
 57.3531 +  emit_int8((unsigned char)0x81);
 57.3532    emit_operand(rcx, dst, 4);
 57.3533    emit_long(imm32);
 57.3534  }
 57.3535 @@ -5177,7 +5183,7 @@
 57.3536  void Assembler::orq(Register dst, Address src) {
 57.3537    InstructionMark im(this);
 57.3538    prefixq(src, dst);
 57.3539 -  emit_byte(0x0B);
 57.3540 +  emit_int8(0x0B);
 57.3541    emit_operand(dst, src);
 57.3542  }
 57.3543  
 57.3544 @@ -5210,26 +5216,26 @@
 57.3545  void Assembler::popcntq(Register dst, Address src) {
 57.3546    assert(VM_Version::supports_popcnt(), "must support");
 57.3547    InstructionMark im(this);
 57.3548 -  emit_byte(0xF3);
 57.3549 +  emit_int8((unsigned char)0xF3);
 57.3550    prefixq(src, dst);
 57.3551 -  emit_byte(0x0F);
 57.3552 -  emit_byte(0xB8);
 57.3553 +  emit_int8((unsigned char)0x0F);
 57.3554 +  emit_int8((unsigned char)0xB8);
 57.3555    emit_operand(dst, src);
 57.3556  }
 57.3557  
 57.3558  void Assembler::popcntq(Register dst, Register src) {
 57.3559    assert(VM_Version::supports_popcnt(), "must support");
 57.3560 -  emit_byte(0xF3);
 57.3561 +  emit_int8((unsigned char)0xF3);
 57.3562    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3563 -  emit_byte(0x0F);
 57.3564 -  emit_byte(0xB8);
 57.3565 -  emit_byte(0xC0 | encode);
 57.3566 +  emit_int8((unsigned char)0x0F);
 57.3567 +  emit_int8((unsigned char)0xB8);
 57.3568 +  emit_int8((unsigned char)(0xC0 | encode));
 57.3569  }
 57.3570  
 57.3571  void Assembler::popq(Address dst) {
 57.3572    InstructionMark im(this);
 57.3573    prefixq(dst);
 57.3574 -  emit_byte(0x8F);
 57.3575 +  emit_int8((unsigned char)0x8F);
 57.3576    emit_operand(rax, dst);
 57.3577  }
 57.3578  
 57.3579 @@ -5261,7 +5267,7 @@
 57.3580  void Assembler::pushq(Address src) {
 57.3581    InstructionMark im(this);
 57.3582    prefixq(src);
 57.3583 -  emit_byte(0xFF);
 57.3584 +  emit_int8((unsigned char)0xFF);
 57.3585    emit_operand(rsi, src);
 57.3586  }
 57.3587  
 57.3588 @@ -5269,31 +5275,31 @@
 57.3589    assert(isShiftCount(imm8 >> 1), "illegal shift count");
 57.3590    int encode = prefixq_and_encode(dst->encoding());
 57.3591    if (imm8 == 1) {
 57.3592 -    emit_byte(0xD1);
 57.3593 -    emit_byte(0xD0 | encode);
 57.3594 +    emit_int8((unsigned char)0xD1);
 57.3595 +    emit_int8((unsigned char)(0xD0 | encode));
 57.3596    } else {
 57.3597 -    emit_byte(0xC1);
 57.3598 -    emit_byte(0xD0 | encode);
 57.3599 -    emit_byte(imm8);
 57.3600 +    emit_int8((unsigned char)0xC1);
 57.3601 +    emit_int8((unsigned char)(0xD0 | encode));
 57.3602 +    emit_int8(imm8);
 57.3603    }
 57.3604  }
 57.3605  void Assembler::sarq(Register dst, int imm8) {
 57.3606    assert(isShiftCount(imm8 >> 1), "illegal shift count");
 57.3607    int encode = prefixq_and_encode(dst->encoding());
 57.3608    if (imm8 == 1) {
 57.3609 -    emit_byte(0xD1);
 57.3610 -    emit_byte(0xF8 | encode);
 57.3611 +    emit_int8((unsigned char)0xD1);
 57.3612 +    emit_int8((unsigned char)(0xF8 | encode));
 57.3613    } else {
 57.3614 -    emit_byte(0xC1);
 57.3615 -    emit_byte(0xF8 | encode);
 57.3616 -    emit_byte(imm8);
 57.3617 +    emit_int8((unsigned char)0xC1);
 57.3618 +    emit_int8((unsigned char)(0xF8 | encode));
 57.3619 +    emit_int8(imm8);
 57.3620    }
 57.3621  }
 57.3622  
 57.3623  void Assembler::sarq(Register dst) {
 57.3624    int encode = prefixq_and_encode(dst->encoding());
 57.3625 -  emit_byte(0xD3);
 57.3626 -  emit_byte(0xF8 | encode);
 57.3627 +  emit_int8((unsigned char)0xD3);
 57.3628 +  emit_int8((unsigned char)(0xF8 | encode));
 57.3629  }
 57.3630  
 57.3631  void Assembler::sbbq(Address dst, int32_t imm32) {
 57.3632 @@ -5310,7 +5316,7 @@
 57.3633  void Assembler::sbbq(Register dst, Address src) {
 57.3634    InstructionMark im(this);
 57.3635    prefixq(src, dst);
 57.3636 -  emit_byte(0x1B);
 57.3637 +  emit_int8(0x1B);
 57.3638    emit_operand(dst, src);
 57.3639  }
 57.3640  
 57.3641 @@ -5323,33 +5329,33 @@
 57.3642    assert(isShiftCount(imm8 >> 1), "illegal shift count");
 57.3643    int encode = prefixq_and_encode(dst->encoding());
 57.3644    if (imm8 == 1) {
 57.3645 -    emit_byte(0xD1);
 57.3646 -    emit_byte(0xE0 | encode);
 57.3647 +    emit_int8((unsigned char)0xD1);
 57.3648 +    emit_int8((unsigned char)(0xE0 | encode));
 57.3649    } else {
 57.3650 -    emit_byte(0xC1);
 57.3651 -    emit_byte(0xE0 | encode);
 57.3652 -    emit_byte(imm8);
 57.3653 +    emit_int8((unsigned char)0xC1);
 57.3654 +    emit_int8((unsigned char)(0xE0 | encode));
 57.3655 +    emit_int8(imm8);
 57.3656    }
 57.3657  }
 57.3658  
 57.3659  void Assembler::shlq(Register dst) {
 57.3660    int encode = prefixq_and_encode(dst->encoding());
 57.3661 -  emit_byte(0xD3);
 57.3662 -  emit_byte(0xE0 | encode);
 57.3663 +  emit_int8((unsigned char)0xD3);
 57.3664 +  emit_int8((unsigned char)(0xE0 | encode));
 57.3665  }
 57.3666  
 57.3667  void Assembler::shrq(Register dst, int imm8) {
 57.3668    assert(isShiftCount(imm8 >> 1), "illegal shift count");
 57.3669    int encode = prefixq_and_encode(dst->encoding());
 57.3670 -  emit_byte(0xC1);
 57.3671 -  emit_byte(0xE8 | encode);
 57.3672 -  emit_byte(imm8);
 57.3673 +  emit_int8((unsigned char)0xC1);
 57.3674 +  emit_int8((unsigned char)(0xE8 | encode));
 57.3675 +  emit_int8(imm8);
 57.3676  }
 57.3677  
 57.3678  void Assembler::shrq(Register dst) {
 57.3679    int encode = prefixq_and_encode(dst->encoding());
 57.3680 -  emit_byte(0xD3);
 57.3681 -  emit_byte(0xE8 | encode);
 57.3682 +  emit_int8((unsigned char)0xD3);
 57.3683 +  emit_int8(0xE8 | encode);
 57.3684  }
 57.3685  
 57.3686  void Assembler::subq(Address dst, int32_t imm32) {
 57.3687 @@ -5361,7 +5367,7 @@
 57.3688  void Assembler::subq(Address dst, Register src) {
 57.3689    InstructionMark im(this);
 57.3690    prefixq(dst, src);
 57.3691 -  emit_byte(0x29);
 57.3692 +  emit_int8(0x29);
 57.3693    emit_operand(src, dst);
 57.3694  }
 57.3695  
 57.3696 @@ -5379,7 +5385,7 @@
 57.3697  void Assembler::subq(Register dst, Address src) {
 57.3698    InstructionMark im(this);
 57.3699    prefixq(src, dst);
 57.3700 -  emit_byte(0x2B);
 57.3701 +  emit_int8(0x2B);
 57.3702    emit_operand(dst, src);
 57.3703  }
 57.3704  
 57.3705 @@ -5395,11 +5401,11 @@
 57.3706    int encode = dst->encoding();
 57.3707    if (encode == 0) {
 57.3708      prefix(REX_W);
 57.3709 -    emit_byte(0xA9);
 57.3710 +    emit_int8((unsigned char)0xA9);
 57.3711    } else {
 57.3712      encode = prefixq_and_encode(encode);
 57.3713 -    emit_byte(0xF7);
 57.3714 -    emit_byte(0xC0 | encode);
 57.3715 +    emit_int8((unsigned char)0xF7);
 57.3716 +    emit_int8((unsigned char)(0xC0 | encode));
 57.3717    }
 57.3718    emit_long(imm32);
 57.3719  }
 57.3720 @@ -5412,22 +5418,22 @@
 57.3721  void Assembler::xaddq(Address dst, Register src) {
 57.3722    InstructionMark im(this);
 57.3723    prefixq(dst, src);
 57.3724 -  emit_byte(0x0F);
 57.3725 -  emit_byte(0xC1);
 57.3726 +  emit_int8(0x0F);
 57.3727 +  emit_int8((unsigned char)0xC1);
 57.3728    emit_operand(src, dst);
 57.3729  }
 57.3730  
 57.3731  void Assembler::xchgq(Register dst, Address src) {
 57.3732    InstructionMark im(this);
 57.3733    prefixq(src, dst);
 57.3734 -  emit_byte(0x87);
 57.3735 +  emit_int8((unsigned char)0x87);
 57.3736    emit_operand(dst, src);
 57.3737  }
 57.3738  
 57.3739  void Assembler::xchgq(Register dst, Register src) {
 57.3740    int encode = prefixq_and_encode(dst->encoding(), src->encoding());
 57.3741 -  emit_byte(0x87);
 57.3742 -  emit_byte(0xc0 | encode);
 57.3743 +  emit_int8((unsigned char)0x87);
 57.3744 +  emit_int8((unsigned char)(0xc0 | encode));
 57.3745  }
 57.3746  
 57.3747  void Assembler::xorq(Register dst, Register src) {
 57.3748 @@ -5438,7 +5444,7 @@
 57.3749  void Assembler::xorq(Register dst, Address src) {
 57.3750    InstructionMark im(this);
 57.3751    prefixq(src, dst);
 57.3752 -  emit_byte(0x33);
 57.3753 +  emit_int8(0x33);
 57.3754    emit_operand(dst, src);
 57.3755  }
 57.3756  
    58.1 --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Jan 08 14:04:25 2013 -0500
    58.2 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp	Tue Jan 08 11:39:53 2013 -0800
    58.3 @@ -313,10 +313,10 @@
    58.4  #endif
    58.5    } else {
    58.6      // make a copy the code which is going to be patched.
    58.7 -    for ( int i = 0; i < _bytes_to_copy; i++) {
    58.8 +    for (int i = 0; i < _bytes_to_copy; i++) {
    58.9        address ptr = (address)(_pc_start + i);
   58.10        int a_byte = (*ptr) & 0xFF;
   58.11 -      __ a_byte (a_byte);
   58.12 +      __ emit_int8(a_byte);
   58.13        *ptr = 0x90; // make the site look like a nop
   58.14      }
   58.15    }
   58.16 @@ -363,11 +363,11 @@
   58.17    // emit the offsets needed to find the code to patch
   58.18    int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
   58.19  
   58.20 -  __ a_byte(0xB8);
   58.21 -  __ a_byte(0);
   58.22 -  __ a_byte(being_initialized_entry_offset);
   58.23 -  __ a_byte(bytes_to_skip);
   58.24 -  __ a_byte(_bytes_to_copy);
   58.25 +  __ emit_int8((unsigned char)0xB8);
   58.26 +  __ emit_int8(0);
   58.27 +  __ emit_int8(being_initialized_entry_offset);
   58.28 +  __ emit_int8(bytes_to_skip);
   58.29 +  __ emit_int8(_bytes_to_copy);
   58.30    address patch_info_pc = __ pc();
   58.31    assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
   58.32  
    59.1 --- a/src/cpu/x86/vm/cppInterpreter_x86.cpp	Tue Jan 08 14:04:25 2013 -0500
    59.2 +++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp	Tue Jan 08 11:39:53 2013 -0800
    59.3 @@ -611,8 +611,6 @@
    59.4    // C++ interpreter only
    59.5    // rsi/r13 - previous interpreter state pointer
    59.6  
    59.7 -  const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
    59.8 -
    59.9    // InterpreterRuntime::frequency_counter_overflow takes one argument
   59.10    // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
   59.11    // The call returns the address of the verified entry point for the method or NULL
   59.12 @@ -977,15 +975,16 @@
   59.13    //      to save/restore.
   59.14    address entry_point = __ pc();
   59.15  
   59.16 -  const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
   59.17 -  const Address size_of_locals    (rbx, Method::size_of_locals_offset());
   59.18 +  const Address constMethod       (rbx, Method::const_offset());
   59.19    const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
   59.20    const Address access_flags      (rbx, Method::access_flags_offset());
   59.21 +  const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
   59.22  
   59.23    // rsi/r13 == state/locals rdi == prevstate
   59.24    const Register locals = rdi;
   59.25  
   59.26    // get parameter size (always needed)
   59.27 +  __ movptr(rcx, constMethod);
   59.28    __ load_unsigned_short(rcx, size_of_parameters);
   59.29  
   59.30    // rbx: Method*
   59.31 @@ -994,6 +993,7 @@
   59.32    // for natives the size of locals is zero
   59.33  
   59.34    // compute beginning of parameters /locals
   59.35 +
   59.36    __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
   59.37  
   59.38    // initialize fixed part of activation frame
   59.39 @@ -1107,11 +1107,14 @@
   59.40    const Register method = rbx;
   59.41    const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
   59.42    const Register t      = InterpreterRuntime::SignatureHandlerGenerator::temp();    // rcx|rscratch1
   59.43 +  const Address constMethod       (method, Method::const_offset());
   59.44 +  const Address size_of_parameters(t, ConstMethod::size_of_parameters_offset());
   59.45  
   59.46    // allocate space for parameters
   59.47    __ movptr(method, STATE(_method));
   59.48    __ verify_method_ptr(method);
   59.49 -  __ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
   59.50 +  __ movptr(t, constMethod);
   59.51 +  __ load_unsigned_short(t, size_of_parameters);
   59.52    __ shll(t, 2);
   59.53  #ifdef _LP64
   59.54    __ subptr(rsp, t);
   59.55 @@ -1700,15 +1703,17 @@
   59.56    // save sender sp
   59.57    __ push(rcx);
   59.58  
   59.59 -  const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
   59.60 -  const Address size_of_locals    (rbx, Method::size_of_locals_offset());
   59.61 +  const Address constMethod       (rbx, Method::const_offset());
   59.62    const Address access_flags      (rbx, Method::access_flags_offset());
   59.63 +  const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
   59.64 +  const Address size_of_locals    (rdx, ConstMethod::size_of_locals_offset());
   59.65  
   59.66    // const Address monitor_block_top (rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
   59.67    // const Address monitor_block_bot (rbp, frame::interpreter_frame_initial_sp_offset        * wordSize);
   59.68    // const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
   59.69  
   59.70    // get parameter size (always needed)
   59.71 +  __ movptr(rdx, constMethod);
   59.72    __ load_unsigned_short(rcx, size_of_parameters);
   59.73  
   59.74    // rbx: Method*
   59.75 @@ -1989,7 +1994,9 @@
   59.76    __ movptr(rbx, STATE(_result._to_call._callee));
   59.77  
   59.78    // callee left args on top of expression stack, remove them
   59.79 -  __ load_unsigned_short(rcx, Address(rbx, Method::size_of_parameters_offset()));
   59.80 +  __ movptr(rcx, constMethod);
   59.81 +  __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
   59.82 +
   59.83    __ lea(rsp, Address(rsp, rcx, Address::times_ptr));
   59.84  
   59.85    __ movl(rcx, Address(rbx, Method::result_index_offset()));
   59.86 @@ -2159,7 +2166,9 @@
   59.87    // Make it look like call_stub calling conventions
   59.88  
   59.89    // Get (potential) receiver
   59.90 -  __ load_unsigned_short(rcx, size_of_parameters);                   // get size of parameters in words
   59.91 +  // get size of parameters in words
   59.92 +  __ movptr(rcx, constMethod);
   59.93 +  __ load_unsigned_short(rcx, Address(rcx, ConstMethod::size_of_parameters_offset()));
   59.94  
   59.95    ExternalAddress recursive(CAST_FROM_FN_PTR(address, RecursiveInterpreterActivation));
   59.96    __ pushptr(recursive.addr());                                      // make it look good in the debugger
    60.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Jan 08 14:04:25 2013 -0500
    60.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Tue Jan 08 11:39:53 2013 -0800
    60.3 @@ -1023,7 +1023,7 @@
    60.4  
    60.5  void MacroAssembler::leave() {
    60.6    // %%% is this really better? Why not on 32bit too?
    60.7 -  emit_byte(0xC9); // LEAVE
    60.8 +  emit_int8((unsigned char)0xC9); // LEAVE
    60.9  }
   60.10  
   60.11  void MacroAssembler::lneg(Register hi, Register lo) {
   60.12 @@ -2112,11 +2112,11 @@
   60.13    if (UseAddressNop) {
   60.14      addr_nop_5();
   60.15    } else {
   60.16 -    emit_byte(0x26); // es:
   60.17 -    emit_byte(0x2e); // cs:
   60.18 -    emit_byte(0x64); // fs:
   60.19 -    emit_byte(0x65); // gs:
   60.20 -    emit_byte(0x90);
   60.21 +    emit_int8(0x26); // es:
   60.22 +    emit_int8(0x2e); // cs:
   60.23 +    emit_int8(0x64); // fs:
   60.24 +    emit_int8(0x65); // gs:
   60.25 +    emit_int8((unsigned char)0x90);
   60.26    }
   60.27  }
   60.28  
   60.29 @@ -2534,12 +2534,12 @@
   60.30      int offs = (intptr_t)dst.target() - ((intptr_t)pc());
   60.31      if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
   60.32        // 0111 tttn #8-bit disp
   60.33 -      emit_byte(0x70 | cc);
   60.34 -      emit_byte((offs - short_size) & 0xFF);
   60.35 +      emit_int8(0x70 | cc);
   60.36 +      emit_int8((offs - short_size) & 0xFF);
   60.37      } else {
   60.38        // 0000 1111 1000 tttn #32-bit disp
   60.39 -      emit_byte(0x0F);
   60.40 -      emit_byte(0x80 | cc);
   60.41 +      emit_int8(0x0F);
   60.42 +      emit_int8((unsigned char)(0x80 | cc));
   60.43        emit_long(offs - long_size);
   60.44      }
   60.45    } else {
   60.46 @@ -3085,7 +3085,8 @@
   60.47  
   60.48  void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
   60.49    // Used in sign-bit flipping with aligned address.
   60.50 -  assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
   60.51 +  bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
   60.52 +  assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
   60.53    if (reachable(src)) {
   60.54      Assembler::pshufb(dst, as_Address(src));
   60.55    } else {
    61.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Jan 08 14:04:25 2013 -0500
    61.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp	Tue Jan 08 11:39:53 2013 -0800
    61.3 @@ -126,25 +126,6 @@
    61.4      }
    61.5    }
    61.6  
    61.7 -#ifndef PRODUCT
    61.8 -  static void pd_print_patched_instruction(address branch) {
    61.9 -    const char* s;
   61.10 -    unsigned char op = branch[0];
   61.11 -    if (op == 0xE8) {
   61.12 -      s = "call";
   61.13 -    } else if (op == 0xE9 || op == 0xEB) {
   61.14 -      s = "jmp";
   61.15 -    } else if ((op & 0xF0) == 0x70) {
   61.16 -      s = "jcc";
   61.17 -    } else if (op == 0x0F) {
   61.18 -      s = "jcc";
   61.19 -    } else {
   61.20 -      s = "????";
   61.21 -    }
   61.22 -    tty->print("%s (unresolved)", s);
   61.23 -  }
   61.24 -#endif
   61.25 -
   61.26    // The following 4 methods return the offset of the appropriate move instruction
   61.27  
   61.28    // Support for fast byte/short loading with zero extension (depending on particular CPU)
    62.1 --- a/src/cpu/x86/vm/methodHandles_x86.cpp	Tue Jan 08 14:04:25 2013 -0500
    62.2 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Tue Jan 08 11:39:53 2013 -0800
    62.3 @@ -169,8 +169,9 @@
    62.4  
    62.5    if (VerifyMethodHandles && !for_compiler_entry) {
    62.6      // make sure recv is already on stack
    62.7 +    __ movptr(temp2, Address(method_temp, Method::const_offset()));
    62.8      __ load_sized_value(temp2,
    62.9 -                        Address(method_temp, Method::size_of_parameters_offset()),
   62.10 +                        Address(temp2, ConstMethod::size_of_parameters_offset()),
   62.11                          sizeof(u2), /*is_signed*/ false);
   62.12      // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
   62.13      Label L;
   62.14 @@ -234,8 +235,9 @@
   62.15    int ref_kind = signature_polymorphic_intrinsic_ref_kind(iid);
   62.16    assert(ref_kind != 0 || iid == vmIntrinsics::_invokeBasic, "must be _invokeBasic or a linkTo intrinsic");
   62.17    if (ref_kind == 0 || MethodHandles::ref_kind_has_receiver(ref_kind)) {
   62.18 +    __ movptr(rdx_argp, Address(rbx_method, Method::const_offset()));
   62.19      __ load_sized_value(rdx_argp,
   62.20 -                        Address(rbx_method, Method::size_of_parameters_offset()),
   62.21 +                        Address(rdx_argp, ConstMethod::size_of_parameters_offset()),
   62.22                          sizeof(u2), /*is_signed*/ false);
   62.23      // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
   62.24      rdx_first_arg_addr = __ argument_address(rdx_argp, -1);
    63.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Tue Jan 08 14:04:25 2013 -0500
    63.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Tue Jan 08 11:39:53 2013 -0800
    63.3 @@ -2174,13 +2174,13 @@
    63.4    //   c_rarg2   - K (key) in little endian int array
    63.5    //
    63.6    address generate_aescrypt_encryptBlock() {
    63.7 -    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
    63.8 +    assert(UseAES, "need AES instructions and misaligned SSE support");
    63.9      __ align(CodeEntryAlignment);
   63.10      StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
   63.11      Label L_doLast;
   63.12      address start = __ pc();
   63.13  
   63.14 -    const Register from        = rsi;      // source array address
   63.15 +    const Register from        = rdx;      // source array address
   63.16      const Register to          = rdx;      // destination array address
   63.17      const Register key         = rcx;      // key array address
   63.18      const Register keylen      = rax;
   63.19 @@ -2189,47 +2189,74 @@
   63.20      const Address  key_param (rbp, 8+8);
   63.21  
   63.22      const XMMRegister xmm_result = xmm0;
   63.23 -    const XMMRegister xmm_temp   = xmm1;
   63.24 -    const XMMRegister xmm_key_shuf_mask = xmm2;
   63.25 -
   63.26 -    __ enter(); // required for proper stackwalking of RuntimeStub frame
   63.27 -    __ push(rsi);
   63.28 -    __ movptr(from , from_param);
   63.29 -    __ movptr(to   , to_param);
   63.30 -    __ movptr(key  , key_param);
   63.31 -
   63.32 +    const XMMRegister xmm_key_shuf_mask = xmm1;
   63.33 +    const XMMRegister xmm_temp1  = xmm2;
   63.34 +    const XMMRegister xmm_temp2  = xmm3;
   63.35 +    const XMMRegister xmm_temp3  = xmm4;
   63.36 +    const XMMRegister xmm_temp4  = xmm5;
   63.37 +
   63.38 +    __ enter();   // required for proper stackwalking of RuntimeStub frame
   63.39 +    __ movptr(from, from_param);
   63.40 +    __ movptr(key, key_param);
   63.41 +
   63.42 +    // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
   63.43      __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
   63.44 -    // keylen = # of 32-bit words, convert to 128-bit words
   63.45 -    __ shrl(keylen, 2);
   63.46 -    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
   63.47  
   63.48      __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
   63.49      __ movdqu(xmm_result, Address(from, 0));  // get 16 bytes of input
   63.50 +    __ movptr(to, to_param);
   63.51  
   63.52      // For encryption, the java expanded key ordering is just what we need
   63.53  
   63.54 -    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
   63.55 -    __ pxor(xmm_result, xmm_temp);
   63.56 -    for (int offset = 0x10; offset <= 0x90; offset += 0x10) {
   63.57 -      aes_enc_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
   63.58 -    }
   63.59 -    load_key  (xmm_temp, key, 0xa0, xmm_key_shuf_mask);
   63.60 -    __ cmpl(keylen, 0);
   63.61 -    __ jcc(Assembler::equal, L_doLast);
   63.62 -    __ aesenc(xmm_result, xmm_temp);                   // only in 192 and 256 bit keys
   63.63 -    aes_enc_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
   63.64 -    load_key(xmm_temp, key, 0xc0, xmm_key_shuf_mask);
   63.65 -    __ subl(keylen, 2);
   63.66 -    __ jcc(Assembler::equal, L_doLast);
   63.67 -    __ aesenc(xmm_result, xmm_temp);                   // only in 256 bit keys
   63.68 -    aes_enc_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
   63.69 -    load_key(xmm_temp, key, 0xe0, xmm_key_shuf_mask);
   63.70 +    load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask);
   63.71 +    __ pxor(xmm_result, xmm_temp1);
   63.72 +
   63.73 +    load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
   63.74 +    load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
   63.75 +    load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
   63.76 +    load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
   63.77 +
   63.78 +    __ aesenc(xmm_result, xmm_temp1);
   63.79 +    __ aesenc(xmm_result, xmm_temp2);
   63.80 +    __ aesenc(xmm_result, xmm_temp3);
   63.81 +    __ aesenc(xmm_result, xmm_temp4);
   63.82 +
   63.83 +    load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
   63.84 +    load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
   63.85 +    load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
   63.86 +    load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
   63.87 +
   63.88 +    __ aesenc(xmm_result, xmm_temp1);
   63.89 +    __ aesenc(xmm_result, xmm_temp2);
   63.90 +    __ aesenc(xmm_result, xmm_temp3);
   63.91 +    __ aesenc(xmm_result, xmm_temp4);
   63.92 +
   63.93 +    load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
   63.94 +    load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
   63.95 +
   63.96 +    __ cmpl(keylen, 44);
   63.97 +    __ jccb(Assembler::equal, L_doLast);
   63.98 +
   63.99 +    __ aesenc(xmm_result, xmm_temp1);
  63.100 +    __ aesenc(xmm_result, xmm_temp2);
  63.101 +
  63.102 +    load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
  63.103 +    load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
  63.104 +
  63.105 +    __ cmpl(keylen, 52);
  63.106 +    __ jccb(Assembler::equal, L_doLast);
  63.107 +
  63.108 +    __ aesenc(xmm_result, xmm_temp1);
  63.109 +    __ aesenc(xmm_result, xmm_temp2);
  63.110 +
  63.111 +    load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
  63.112 +    load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
  63.113  
  63.114      __ BIND(L_doLast);
  63.115 -    __ aesenclast(xmm_result, xmm_temp);
  63.116 +    __ aesenc(xmm_result, xmm_temp1);
  63.117 +    __ aesenclast(xmm_result, xmm_temp2);
  63.118      __ movdqu(Address(to, 0), xmm_result);        // store the result
  63.119      __ xorptr(rax, rax); // return 0
  63.120 -    __ pop(rsi);
  63.121      __ leave(); // required for proper stackwalking of RuntimeStub frame
  63.122      __ ret(0);
  63.123  
  63.124 @@ -2245,13 +2272,13 @@
  63.125    //   c_rarg2   - K (key) in little endian int array
  63.126    //
  63.127    address generate_aescrypt_decryptBlock() {
  63.128 -    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
  63.129 +    assert(UseAES, "need AES instructions and misaligned SSE support");
  63.130      __ align(CodeEntryAlignment);
  63.131      StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
  63.132      Label L_doLast;
  63.133      address start = __ pc();
  63.134  
  63.135 -    const Register from        = rsi;      // source array address
  63.136 +    const Register from        = rdx;      // source array address
  63.137      const Register to          = rdx;      // destination array address
  63.138      const Register key         = rcx;      // key array address
  63.139      const Register keylen      = rax;
  63.140 @@ -2260,51 +2287,76 @@
  63.141      const Address  key_param (rbp, 8+8);
  63.142  
  63.143      const XMMRegister xmm_result = xmm0;
  63.144 -    const XMMRegister xmm_temp   = xmm1;
  63.145 -    const XMMRegister xmm_key_shuf_mask = xmm2;
  63.146 +    const XMMRegister xmm_key_shuf_mask = xmm1;
  63.147 +    const XMMRegister xmm_temp1  = xmm2;
  63.148 +    const XMMRegister xmm_temp2  = xmm3;
  63.149 +    const XMMRegister xmm_temp3  = xmm4;
  63.150 +    const XMMRegister xmm_temp4  = xmm5;
  63.151  
  63.152      __ enter(); // required for proper stackwalking of RuntimeStub frame
  63.153 -    __ push(rsi);
  63.154 -    __ movptr(from , from_param);
  63.155 -    __ movptr(to   , to_param);
  63.156 -    __ movptr(key  , key_param);
  63.157 -
  63.158 +    __ movptr(from, from_param);
  63.159 +    __ movptr(key, key_param);
  63.160 +
  63.161 +    // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
  63.162      __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
  63.163 -    // keylen = # of 32-bit words, convert to 128-bit words
  63.164 -    __ shrl(keylen, 2);
  63.165 -    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
  63.166  
  63.167      __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
  63.168      __ movdqu(xmm_result, Address(from, 0));
  63.169 +    __ movptr(to, to_param);
  63.170  
  63.171      // for decryption java expanded key ordering is rotated one position from what we want
  63.172      // so we start from 0x10 here and hit 0x00 last
  63.173      // we don't know if the key is aligned, hence not using load-execute form
  63.174 -    load_key(xmm_temp, key, 0x10, xmm_key_shuf_mask);
  63.175 -    __ pxor  (xmm_result, xmm_temp);
  63.176 -    for (int offset = 0x20; offset <= 0xa0; offset += 0x10) {
  63.177 -      aes_dec_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
  63.178 -    }
  63.179 -    __ cmpl(keylen, 0);
  63.180 -    __ jcc(Assembler::equal, L_doLast);
  63.181 -    // only in 192 and 256 bit keys
  63.182 -    aes_dec_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
  63.183 -    aes_dec_key(xmm_result, xmm_temp, key, 0xc0, xmm_key_shuf_mask);
  63.184 -    __ subl(keylen, 2);
  63.185 -    __ jcc(Assembler::equal, L_doLast);
  63.186 -    // only in 256 bit keys
  63.187 -    aes_dec_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
  63.188 -    aes_dec_key(xmm_result, xmm_temp, key, 0xe0, xmm_key_shuf_mask);
  63.189 +    load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
  63.190 +    load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
  63.191 +    load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
  63.192 +    load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
  63.193 +
  63.194 +    __ pxor  (xmm_result, xmm_temp1);
  63.195 +    __ aesdec(xmm_result, xmm_temp2);
  63.196 +    __ aesdec(xmm_result, xmm_temp3);
  63.197 +    __ aesdec(xmm_result, xmm_temp4);
  63.198 +
  63.199 +    load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
  63.200 +    load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
  63.201 +    load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
  63.202 +    load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
  63.203 +
  63.204 +    __ aesdec(xmm_result, xmm_temp1);
  63.205 +    __ aesdec(xmm_result, xmm_temp2);
  63.206 +    __ aesdec(xmm_result, xmm_temp3);
  63.207 +    __ aesdec(xmm_result, xmm_temp4);
  63.208 +
  63.209 +    load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
  63.210 +    load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
  63.211 +    load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask);
  63.212 +
  63.213 +    __ cmpl(keylen, 44);
  63.214 +    __ jccb(Assembler::equal, L_doLast);
  63.215 +
  63.216 +    __ aesdec(xmm_result, xmm_temp1);
  63.217 +    __ aesdec(xmm_result, xmm_temp2);
  63.218 +
  63.219 +    load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
  63.220 +    load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
  63.221 +
  63.222 +    __ cmpl(keylen, 52);
  63.223 +    __ jccb(Assembler::equal, L_doLast);
  63.224 +
  63.225 +    __ aesdec(xmm_result, xmm_temp1);
  63.226 +    __ aesdec(xmm_result, xmm_temp2);
  63.227 +
  63.228 +    load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
  63.229 +    load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
  63.230  
  63.231      __ BIND(L_doLast);
  63.232 +    __ aesdec(xmm_result, xmm_temp1);
  63.233 +    __ aesdec(xmm_result, xmm_temp2);
  63.234 +
  63.235      // for decryption the aesdeclast operation is always on key+0x00
  63.236 -    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
  63.237 -    __ aesdeclast(xmm_result, xmm_temp);
  63.238 -
  63.239 +    __ aesdeclast(xmm_result, xmm_temp3);
  63.240      __ movdqu(Address(to, 0), xmm_result);  // store the result
  63.241 -
  63.242      __ xorptr(rax, rax); // return 0
  63.243 -    __ pop(rsi);
  63.244      __ leave(); // required for proper stackwalking of RuntimeStub frame
  63.245      __ ret(0);
  63.246  
  63.247 @@ -2340,7 +2392,7 @@
  63.248    //   c_rarg4   - input length
  63.249    //
  63.250    address generate_cipherBlockChaining_encryptAESCrypt() {
  63.251 -    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
  63.252 +    assert(UseAES, "need AES instructions and misaligned SSE support");
  63.253      __ align(CodeEntryAlignment);
  63.254      StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
  63.255      address start = __ pc();
  63.256 @@ -2393,7 +2445,7 @@
  63.257      __ jcc(Assembler::notEqual, L_key_192_256);
  63.258  
  63.259      // 128 bit code follows here
  63.260 -    __ movptr(pos, 0);
  63.261 +    __ movl(pos, 0);
  63.262      __ align(OptoLoopAlignment);
  63.263      __ BIND(L_loopTop_128);
  63.264      __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
  63.265 @@ -2423,15 +2475,15 @@
  63.266      __ leave();                                  // required for proper stackwalking of RuntimeStub frame
  63.267      __ ret(0);
  63.268  
  63.269 -  __ BIND(L_key_192_256);
  63.270 -  // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
  63.271 +    __ BIND(L_key_192_256);
  63.272 +    // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
  63.273      __ cmpl(rax, 52);
  63.274      __ jcc(Assembler::notEqual, L_key_256);
  63.275  
  63.276      // 192-bit code follows here (could be changed to use more xmm registers)
  63.277 -    __ movptr(pos, 0);
  63.278 -  __ align(OptoLoopAlignment);
  63.279 -  __ BIND(L_loopTop_192);
  63.280 +    __ movl(pos, 0);
  63.281 +    __ align(OptoLoopAlignment);
  63.282 +    __ BIND(L_loopTop_192);
  63.283      __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
  63.284      __ pxor  (xmm_result, xmm_temp);                                // xor with the current r vector
  63.285  
  63.286 @@ -2452,11 +2504,11 @@
  63.287      __ jcc(Assembler::notEqual, L_loopTop_192);
  63.288      __ jmp(L_exit);
  63.289  
  63.290 -  __ BIND(L_key_256);
  63.291 +    __ BIND(L_key_256);
  63.292      // 256-bit code follows here (could be changed to use more xmm registers)
  63.293 -    __ movptr(pos, 0);
  63.294 -  __ align(OptoLoopAlignment);
  63.295 -  __ BIND(L_loopTop_256);
  63.296 +    __ movl(pos, 0);
  63.297 +    __ align(OptoLoopAlignment);
  63.298 +    __ BIND(L_loopTop_256);
  63.299      __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
  63.300      __ pxor  (xmm_result, xmm_temp);                                // xor with the current r vector
  63.301  
  63.302 @@ -2495,7 +2547,7 @@
  63.303    //
  63.304  
  63.305    address generate_cipherBlockChaining_decryptAESCrypt() {
  63.306 -    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
  63.307 +    assert(UseAES, "need AES instructions and misaligned SSE support");
  63.308      __ align(CodeEntryAlignment);
  63.309      StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
  63.310      address start = __ pc();
  63.311 @@ -2556,9 +2608,9 @@
  63.312  
  63.313  
  63.314      // 128-bit code follows here, parallelized
  63.315 -    __ movptr(pos, 0);
  63.316 -  __ align(OptoLoopAlignment);
  63.317 -  __ BIND(L_singleBlock_loopTop_128);
  63.318 +    __ movl(pos, 0);
  63.319 +    __ align(OptoLoopAlignment);
  63.320 +    __ BIND(L_singleBlock_loopTop_128);
  63.321      __ cmpptr(len_reg, 0);           // any blocks left??
  63.322      __ jcc(Assembler::equal, L_exit);
  63.323      __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
  63.324 @@ -2597,7 +2649,7 @@
  63.325      __ jcc(Assembler::notEqual, L_key_256);
  63.326  
  63.327      // 192-bit code follows here (could be optimized to use parallelism)
  63.328 -    __ movptr(pos, 0);
  63.329 +    __ movl(pos, 0);
  63.330      __ align(OptoLoopAlignment);
  63.331      __ BIND(L_singleBlock_loopTop_192);
  63.332      __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
  63.333 @@ -2622,7 +2674,7 @@
  63.334  
  63.335      __ BIND(L_key_256);
  63.336      // 256-bit code follows here (could be optimized to use parallelism)
  63.337 -    __ movptr(pos, 0);
  63.338 +    __ movl(pos, 0);
  63.339      __ align(OptoLoopAlignment);
  63.340      __ BIND(L_singleBlock_loopTop_256);
  63.341      __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
    64.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Jan 08 14:04:25 2013 -0500
    64.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Jan 08 11:39:53 2013 -0800
    64.3 @@ -2953,21 +2953,6 @@
    64.4      }
    64.5    }
    64.6  
    64.7 -  // aesenc using specified key+offset
    64.8 -  // can optionally specify that the shuffle mask is already in an xmmregister
    64.9 -  void aes_enc_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
   64.10 -    load_key(xmmtmp, key, offset, xmm_shuf_mask);
   64.11 -    __ aesenc(xmmdst, xmmtmp);
   64.12 -  }
   64.13 -
   64.14 -  // aesdec using specified key+offset
   64.15 -  // can optionally specify that the shuffle mask is already in an xmmregister
   64.16 -  void aes_dec_key(XMMRegister xmmdst, XMMRegister xmmtmp, Register key, int offset, XMMRegister xmm_shuf_mask=NULL) {
   64.17 -    load_key(xmmtmp, key, offset, xmm_shuf_mask);
   64.18 -    __ aesdec(xmmdst, xmmtmp);
   64.19 -  }
   64.20 -
   64.21 -
   64.22    // Arguments:
   64.23    //
   64.24    // Inputs:
   64.25 @@ -2976,7 +2961,7 @@
   64.26    //   c_rarg2   - K (key) in little endian int array
   64.27    //
   64.28    address generate_aescrypt_encryptBlock() {
   64.29 -    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
   64.30 +    assert(UseAES, "need AES instructions and misaligned SSE support");
   64.31      __ align(CodeEntryAlignment);
   64.32      StubCodeMark mark(this, "StubRoutines", "aescrypt_encryptBlock");
   64.33      Label L_doLast;
   64.34 @@ -2988,15 +2973,17 @@
   64.35      const Register keylen      = rax;
   64.36  
   64.37      const XMMRegister xmm_result = xmm0;
   64.38 -    const XMMRegister xmm_temp   = xmm1;
   64.39 -    const XMMRegister xmm_key_shuf_mask = xmm2;
   64.40 +    const XMMRegister xmm_key_shuf_mask = xmm1;
   64.41 +    // On win64 xmm6-xmm15 must be preserved so don't use them.
   64.42 +    const XMMRegister xmm_temp1  = xmm2;
   64.43 +    const XMMRegister xmm_temp2  = xmm3;
   64.44 +    const XMMRegister xmm_temp3  = xmm4;
   64.45 +    const XMMRegister xmm_temp4  = xmm5;
   64.46  
   64.47      __ enter(); // required for proper stackwalking of RuntimeStub frame
   64.48  
   64.49 +    // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
   64.50      __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
   64.51 -    // keylen = # of 32-bit words, convert to 128-bit words
   64.52 -    __ shrl(keylen, 2);
   64.53 -    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
   64.54  
   64.55      __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
   64.56      __ movdqu(xmm_result, Address(from, 0));  // get 16 bytes of input
   64.57 @@ -3004,25 +2991,53 @@
   64.58      // For encryption, the java expanded key ordering is just what we need
   64.59      // we don't know if the key is aligned, hence not using load-execute form
   64.60  
   64.61 -    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
   64.62 -    __ pxor(xmm_result, xmm_temp);
   64.63 -    for (int offset = 0x10; offset <= 0x90; offset += 0x10) {
   64.64 -      aes_enc_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
   64.65 -    }
   64.66 -    load_key  (xmm_temp, key, 0xa0, xmm_key_shuf_mask);
   64.67 -    __ cmpl(keylen, 0);
   64.68 -    __ jcc(Assembler::equal, L_doLast);
   64.69 -    __ aesenc(xmm_result, xmm_temp);                   // only in 192 and 256 bit keys
   64.70 -    aes_enc_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
   64.71 -    load_key(xmm_temp, key, 0xc0, xmm_key_shuf_mask);
   64.72 -    __ subl(keylen, 2);
   64.73 -    __ jcc(Assembler::equal, L_doLast);
   64.74 -    __ aesenc(xmm_result, xmm_temp);                   // only in 256 bit keys
   64.75 -    aes_enc_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
   64.76 -    load_key(xmm_temp, key, 0xe0, xmm_key_shuf_mask);
   64.77 +    load_key(xmm_temp1, key, 0x00, xmm_key_shuf_mask);
   64.78 +    __ pxor(xmm_result, xmm_temp1);
   64.79 +
   64.80 +    load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
   64.81 +    load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
   64.82 +    load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
   64.83 +    load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
   64.84 +
   64.85 +    __ aesenc(xmm_result, xmm_temp1);
   64.86 +    __ aesenc(xmm_result, xmm_temp2);
   64.87 +    __ aesenc(xmm_result, xmm_temp3);
   64.88 +    __ aesenc(xmm_result, xmm_temp4);
   64.89 +
   64.90 +    load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
   64.91 +    load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
   64.92 +    load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
   64.93 +    load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
   64.94 +
   64.95 +    __ aesenc(xmm_result, xmm_temp1);
   64.96 +    __ aesenc(xmm_result, xmm_temp2);
   64.97 +    __ aesenc(xmm_result, xmm_temp3);
   64.98 +    __ aesenc(xmm_result, xmm_temp4);
   64.99 +
  64.100 +    load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
  64.101 +    load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
  64.102 +
  64.103 +    __ cmpl(keylen, 44);
  64.104 +    __ jccb(Assembler::equal, L_doLast);
  64.105 +
  64.106 +    __ aesenc(xmm_result, xmm_temp1);
  64.107 +    __ aesenc(xmm_result, xmm_temp2);
  64.108 +
  64.109 +    load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
  64.110 +    load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
  64.111 +
  64.112 +    __ cmpl(keylen, 52);
  64.113 +    __ jccb(Assembler::equal, L_doLast);
  64.114 +
  64.115 +    __ aesenc(xmm_result, xmm_temp1);
  64.116 +    __ aesenc(xmm_result, xmm_temp2);
  64.117 +
  64.118 +    load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
  64.119 +    load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
  64.120  
  64.121      __ BIND(L_doLast);
  64.122 -    __ aesenclast(xmm_result, xmm_temp);
  64.123 +    __ aesenc(xmm_result, xmm_temp1);
  64.124 +    __ aesenclast(xmm_result, xmm_temp2);
  64.125      __ movdqu(Address(to, 0), xmm_result);        // store the result
  64.126      __ xorptr(rax, rax); // return 0
  64.127      __ leave(); // required for proper stackwalking of RuntimeStub frame
  64.128 @@ -3040,7 +3055,7 @@
  64.129    //   c_rarg2   - K (key) in little endian int array
  64.130    //
  64.131    address generate_aescrypt_decryptBlock() {
  64.132 -    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
  64.133 +    assert(UseAES, "need AES instructions and misaligned SSE support");
  64.134      __ align(CodeEntryAlignment);
  64.135      StubCodeMark mark(this, "StubRoutines", "aescrypt_decryptBlock");
  64.136      Label L_doLast;
  64.137 @@ -3052,15 +3067,17 @@
  64.138      const Register keylen      = rax;
  64.139  
  64.140      const XMMRegister xmm_result = xmm0;
  64.141 -    const XMMRegister xmm_temp   = xmm1;
  64.142 -    const XMMRegister xmm_key_shuf_mask = xmm2;
  64.143 +    const XMMRegister xmm_key_shuf_mask = xmm1;
  64.144 +    // On win64 xmm6-xmm15 must be preserved so don't use them.
  64.145 +    const XMMRegister xmm_temp1  = xmm2;
  64.146 +    const XMMRegister xmm_temp2  = xmm3;
  64.147 +    const XMMRegister xmm_temp3  = xmm4;
  64.148 +    const XMMRegister xmm_temp4  = xmm5;
  64.149  
  64.150      __ enter(); // required for proper stackwalking of RuntimeStub frame
  64.151  
  64.152 +    // keylen could be only {11, 13, 15} * 4 = {44, 52, 60}
  64.153      __ movl(keylen, Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)));
  64.154 -    // keylen = # of 32-bit words, convert to 128-bit words
  64.155 -    __ shrl(keylen, 2);
  64.156 -    __ subl(keylen, 11);   // every key has at least 11 128-bit words, some have more
  64.157  
  64.158      __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
  64.159      __ movdqu(xmm_result, Address(from, 0));
  64.160 @@ -3068,29 +3085,55 @@
  64.161      // for decryption java expanded key ordering is rotated one position from what we want
  64.162      // so we start from 0x10 here and hit 0x00 last
  64.163      // we don't know if the key is aligned, hence not using load-execute form
  64.164 -    load_key(xmm_temp, key, 0x10, xmm_key_shuf_mask);
  64.165 -    __ pxor  (xmm_result, xmm_temp);
  64.166 -    for (int offset = 0x20; offset <= 0xa0; offset += 0x10) {
  64.167 -      aes_dec_key(xmm_result, xmm_temp, key, offset, xmm_key_shuf_mask);
  64.168 -    }
  64.169 -    __ cmpl(keylen, 0);
  64.170 -    __ jcc(Assembler::equal, L_doLast);
  64.171 -    // only in 192 and 256 bit keys
  64.172 -    aes_dec_key(xmm_result, xmm_temp, key, 0xb0, xmm_key_shuf_mask);
  64.173 -    aes_dec_key(xmm_result, xmm_temp, key, 0xc0, xmm_key_shuf_mask);
  64.174 -    __ subl(keylen, 2);
  64.175 -    __ jcc(Assembler::equal, L_doLast);
  64.176 -    // only in 256 bit keys
  64.177 -    aes_dec_key(xmm_result, xmm_temp, key, 0xd0, xmm_key_shuf_mask);
  64.178 -    aes_dec_key(xmm_result, xmm_temp, key, 0xe0, xmm_key_shuf_mask);
  64.179 +    load_key(xmm_temp1, key, 0x10, xmm_key_shuf_mask);
  64.180 +    load_key(xmm_temp2, key, 0x20, xmm_key_shuf_mask);
  64.181 +    load_key(xmm_temp3, key, 0x30, xmm_key_shuf_mask);
  64.182 +    load_key(xmm_temp4, key, 0x40, xmm_key_shuf_mask);
  64.183 +
  64.184 +    __ pxor  (xmm_result, xmm_temp1);
  64.185 +    __ aesdec(xmm_result, xmm_temp2);
  64.186 +    __ aesdec(xmm_result, xmm_temp3);
  64.187 +    __ aesdec(xmm_result, xmm_temp4);
  64.188 +
  64.189 +    load_key(xmm_temp1, key, 0x50, xmm_key_shuf_mask);
  64.190 +    load_key(xmm_temp2, key, 0x60, xmm_key_shuf_mask);
  64.191 +    load_key(xmm_temp3, key, 0x70, xmm_key_shuf_mask);
  64.192 +    load_key(xmm_temp4, key, 0x80, xmm_key_shuf_mask);
  64.193 +
  64.194 +    __ aesdec(xmm_result, xmm_temp1);
  64.195 +    __ aesdec(xmm_result, xmm_temp2);
  64.196 +    __ aesdec(xmm_result, xmm_temp3);
  64.197 +    __ aesdec(xmm_result, xmm_temp4);
  64.198 +
  64.199 +    load_key(xmm_temp1, key, 0x90, xmm_key_shuf_mask);
  64.200 +    load_key(xmm_temp2, key, 0xa0, xmm_key_shuf_mask);
  64.201 +    load_key(xmm_temp3, key, 0x00, xmm_key_shuf_mask);
  64.202 +
  64.203 +    __ cmpl(keylen, 44);
  64.204 +    __ jccb(Assembler::equal, L_doLast);
  64.205 +
  64.206 +    __ aesdec(xmm_result, xmm_temp1);
  64.207 +    __ aesdec(xmm_result, xmm_temp2);
  64.208 +
  64.209 +    load_key(xmm_temp1, key, 0xb0, xmm_key_shuf_mask);
  64.210 +    load_key(xmm_temp2, key, 0xc0, xmm_key_shuf_mask);
  64.211 +
  64.212 +    __ cmpl(keylen, 52);
  64.213 +    __ jccb(Assembler::equal, L_doLast);
  64.214 +
  64.215 +    __ aesdec(xmm_result, xmm_temp1);
  64.216 +    __ aesdec(xmm_result, xmm_temp2);
  64.217 +
  64.218 +    load_key(xmm_temp1, key, 0xd0, xmm_key_shuf_mask);
  64.219 +    load_key(xmm_temp2, key, 0xe0, xmm_key_shuf_mask);
  64.220  
  64.221      __ BIND(L_doLast);
  64.222 +    __ aesdec(xmm_result, xmm_temp1);
  64.223 +    __ aesdec(xmm_result, xmm_temp2);
  64.224 +
  64.225      // for decryption the aesdeclast operation is always on key+0x00
  64.226 -    load_key(xmm_temp, key, 0x00, xmm_key_shuf_mask);
  64.227 -    __ aesdeclast(xmm_result, xmm_temp);
  64.228 -
  64.229 +    __ aesdeclast(xmm_result, xmm_temp3);
  64.230      __ movdqu(Address(to, 0), xmm_result);  // store the result
  64.231 -
  64.232      __ xorptr(rax, rax); // return 0
  64.233      __ leave(); // required for proper stackwalking of RuntimeStub frame
  64.234      __ ret(0);
  64.235 @@ -3109,7 +3152,7 @@
  64.236    //   c_rarg4   - input length
  64.237    //
  64.238    address generate_cipherBlockChaining_encryptAESCrypt() {
  64.239 -    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
  64.240 +    assert(UseAES, "need AES instructions and misaligned SSE support");
  64.241      __ align(CodeEntryAlignment);
  64.242      StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
  64.243      address start = __ pc();
  64.244 @@ -3133,16 +3176,19 @@
  64.245      const XMMRegister xmm_temp   = xmm1;
  64.246      // keys 0-10 preloaded into xmm2-xmm12
  64.247      const int XMM_REG_NUM_KEY_FIRST = 2;
  64.248 -    const int XMM_REG_NUM_KEY_LAST  = 12;
  64.249 +    const int XMM_REG_NUM_KEY_LAST  = 15;
  64.250      const XMMRegister xmm_key0   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
  64.251 -    const XMMRegister xmm_key10  = as_XMMRegister(XMM_REG_NUM_KEY_LAST);
  64.252 +    const XMMRegister xmm_key10  = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+10);
  64.253 +    const XMMRegister xmm_key11  = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+11);
  64.254 +    const XMMRegister xmm_key12  = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+12);
  64.255 +    const XMMRegister xmm_key13  = as_XMMRegister(XMM_REG_NUM_KEY_FIRST+13);
  64.256  
  64.257      __ enter(); // required for proper stackwalking of RuntimeStub frame
  64.258  
  64.259  #ifdef _WIN64
  64.260      // on win64, fill len_reg from stack position
  64.261      __ movl(len_reg, len_mem);
  64.262 -    // save the xmm registers which must be preserved 6-12
  64.263 +    // save the xmm registers which must be preserved 6-15
  64.264      __ subptr(rsp, -rsp_after_call_off * wordSize);
  64.265      for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
  64.266        __ movdqu(xmm_save(i), as_XMMRegister(i));
  64.267 @@ -3151,12 +3197,11 @@
  64.268  
  64.269      const XMMRegister xmm_key_shuf_mask = xmm_temp;  // used temporarily to swap key bytes up front
  64.270      __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
  64.271 -    // load up xmm regs 2 thru 12 with key 0x00 - 0xa0
  64.272 -    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
  64.273 +    // load up xmm regs xmm2 thru xmm12 with key 0x00 - 0xa0
  64.274 +    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x00; rnum <= XMM_REG_NUM_KEY_FIRST+10; rnum++) {
  64.275        load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
  64.276        offset += 0x10;
  64.277      }
  64.278 -
  64.279      __ movdqu(xmm_result, Address(rvec, 0x00));   // initialize xmm_result with r vec
  64.280  
  64.281      // now split to different paths depending on the keylen (len in ints of AESCrypt.KLE array (52=192, or 60=256))
  64.282 @@ -3167,16 +3212,15 @@
  64.283      // 128 bit code follows here
  64.284      __ movptr(pos, 0);
  64.285      __ align(OptoLoopAlignment);
  64.286 +
  64.287      __ BIND(L_loopTop_128);
  64.288      __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
  64.289      __ pxor  (xmm_result, xmm_temp);               // xor with the current r vector
  64.290 -
  64.291      __ pxor  (xmm_result, xmm_key0);               // do the aes rounds
  64.292 -    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
  64.293 +    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_FIRST + 9; rnum++) {
  64.294        __ aesenc(xmm_result, as_XMMRegister(rnum));
  64.295      }
  64.296      __ aesenclast(xmm_result, xmm_key10);
  64.297 -
  64.298      __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
  64.299      // no need to store r to memory until we exit
  64.300      __ addptr(pos, AESBlockSize);
  64.301 @@ -3198,24 +3242,23 @@
  64.302  
  64.303      __ BIND(L_key_192_256);
  64.304      // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
  64.305 +    load_key(xmm_key11, key, 0xb0, xmm_key_shuf_mask);
  64.306 +    load_key(xmm_key12, key, 0xc0, xmm_key_shuf_mask);
  64.307      __ cmpl(rax, 52);
  64.308      __ jcc(Assembler::notEqual, L_key_256);
  64.309  
  64.310      // 192-bit code follows here (could be changed to use more xmm registers)
  64.311      __ movptr(pos, 0);
  64.312      __ align(OptoLoopAlignment);
  64.313 +
  64.314      __ BIND(L_loopTop_192);
  64.315      __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
  64.316      __ pxor  (xmm_result, xmm_temp);               // xor with the current r vector
  64.317 -
  64.318      __ pxor  (xmm_result, xmm_key0);               // do the aes rounds
  64.319 -    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
  64.320 +    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_FIRST + 11; rnum++) {
  64.321        __ aesenc(xmm_result, as_XMMRegister(rnum));
  64.322      }
  64.323 -    aes_enc_key(xmm_result, xmm_temp, key, 0xb0);
  64.324 -    load_key(xmm_temp, key, 0xc0);
  64.325 -    __ aesenclast(xmm_result, xmm_temp);
  64.326 -
  64.327 +    __ aesenclast(xmm_result, xmm_key12);
  64.328      __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
  64.329      // no need to store r to memory until we exit
  64.330      __ addptr(pos, AESBlockSize);
  64.331 @@ -3225,22 +3268,19 @@
  64.332  
  64.333      __ BIND(L_key_256);
  64.334      // 256-bit code follows here (could be changed to use more xmm registers)
  64.335 +    load_key(xmm_key13, key, 0xd0, xmm_key_shuf_mask);
  64.336      __ movptr(pos, 0);
  64.337      __ align(OptoLoopAlignment);
  64.338 +
  64.339      __ BIND(L_loopTop_256);
  64.340      __ movdqu(xmm_temp, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of input
  64.341      __ pxor  (xmm_result, xmm_temp);               // xor with the current r vector
  64.342 -
  64.343      __ pxor  (xmm_result, xmm_key0);               // do the aes rounds
  64.344 -    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_LAST; rnum++) {
  64.345 +    for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum  <= XMM_REG_NUM_KEY_FIRST + 13; rnum++) {
  64.346        __ aesenc(xmm_result, as_XMMRegister(rnum));
  64.347      }
  64.348 -    aes_enc_key(xmm_result, xmm_temp, key, 0xb0);
  64.349 -    aes_enc_key(xmm_result, xmm_temp, key, 0xc0);
  64.350 -    aes_enc_key(xmm_result, xmm_temp, key, 0xd0);
  64.351      load_key(xmm_temp, key, 0xe0);
  64.352      __ aesenclast(xmm_result, xmm_temp);
  64.353 -
  64.354      __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
  64.355      // no need to store r to memory until we exit
  64.356      __ addptr(pos, AESBlockSize);
  64.357 @@ -3267,7 +3307,7 @@
  64.358    //
  64.359  
  64.360    address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
  64.361 -    assert(UseAES && (UseAVX > 0), "need AES instructions and misaligned SSE support");
  64.362 +    assert(UseAES, "need AES instructions and misaligned SSE support");
  64.363      __ align(CodeEntryAlignment);
  64.364      StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
  64.365      address start = __ pc();
  64.366 @@ -3288,12 +3328,10 @@
  64.367  #endif
  64.368      const Register pos         = rax;
  64.369  
  64.370 -    // xmm register assignments for the loops below
  64.371 -    const XMMRegister xmm_result = xmm0;
  64.372      // keys 0-10 preloaded into xmm2-xmm12
  64.373      const int XMM_REG_NUM_KEY_FIRST = 5;
  64.374      const int XMM_REG_NUM_KEY_LAST  = 15;
  64.375 -    const XMMRegister xmm_key_first   = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
  64.376 +    const XMMRegister xmm_key_first = as_XMMRegister(XMM_REG_NUM_KEY_FIRST);
  64.377      const XMMRegister xmm_key_last  = as_XMMRegister(XMM_REG_NUM_KEY_LAST);
  64.378  
  64.379      __ enter(); // required for proper stackwalking of RuntimeStub frame
  64.380 @@ -3312,13 +3350,14 @@
  64.381      const XMMRegister xmm_key_shuf_mask = xmm1;  // used temporarily to swap key bytes up front
  64.382      __ movdqu(xmm_key_shuf_mask, ExternalAddress(StubRoutines::x86::key_shuffle_mask_addr()));
  64.383      // load up xmm regs 5 thru 15 with key 0x10 - 0xa0 - 0x00
  64.384 -    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum <= XMM_REG_NUM_KEY_LAST; rnum++) {
  64.385 -      if (rnum == XMM_REG_NUM_KEY_LAST) offset = 0x00;
  64.386 +    for (int rnum = XMM_REG_NUM_KEY_FIRST, offset = 0x10; rnum < XMM_REG_NUM_KEY_LAST; rnum++) {
  64.387        load_key(as_XMMRegister(rnum), key, offset, xmm_key_shuf_mask);
  64.388        offset += 0x10;
  64.389      }
  64.390 +    load_key(xmm_key_last, key, 0x00, xmm_key_shuf_mask);
  64.391  
  64.392      const XMMRegister xmm_prev_block_cipher = xmm1;  // holds cipher of previous block
  64.393 +
  64.394      // registers holding the four results in the parallelized loop
  64.395      const XMMRegister xmm_result0 = xmm0;
  64.396      const XMMRegister xmm_result1 = xmm2;
  64.397 @@ -3376,8 +3415,12 @@
  64.398      __ jmp(L_multiBlock_loopTop_128);
  64.399  
  64.400      // registers used in the non-parallelized loops
  64.401 +    // xmm register assignments for the loops below
  64.402 +    const XMMRegister xmm_result = xmm0;
  64.403      const XMMRegister xmm_prev_block_cipher_save = xmm2;
  64.404 -    const XMMRegister xmm_temp   = xmm3;
  64.405 +    const XMMRegister xmm_key11 = xmm3;
  64.406 +    const XMMRegister xmm_key12 = xmm4;
  64.407 +    const XMMRegister xmm_temp  = xmm4;
  64.408  
  64.409      __ align(OptoLoopAlignment);
  64.410      __ BIND(L_singleBlock_loopTop_128);
  64.411 @@ -3415,12 +3458,15 @@
  64.412  
  64.413      __ BIND(L_key_192_256);
  64.414      // here rax = len in ints of AESCrypt.KLE array (52=192, or 60=256)
  64.415 +    load_key(xmm_key11, key, 0xb0);
  64.416      __ cmpl(rax, 52);
  64.417      __ jcc(Assembler::notEqual, L_key_256);
  64.418  
  64.419      // 192-bit code follows here (could be optimized to use parallelism)
  64.420 +    load_key(xmm_key12, key, 0xc0);     // 192-bit key goes up to c0
  64.421      __ movptr(pos, 0);
  64.422      __ align(OptoLoopAlignment);
  64.423 +
  64.424      __ BIND(L_singleBlock_loopTop_192);
  64.425      __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
  64.426      __ movdqa(xmm_prev_block_cipher_save, xmm_result);              // save for next r vector
  64.427 @@ -3428,14 +3474,13 @@
  64.428      for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
  64.429        __ aesdec(xmm_result, as_XMMRegister(rnum));
  64.430      }
  64.431 -    aes_dec_key(xmm_result, xmm_temp, key, 0xb0);     // 192-bit key goes up to c0
  64.432 -    aes_dec_key(xmm_result, xmm_temp, key, 0xc0);
  64.433 +    __ aesdec(xmm_result, xmm_key11);
  64.434 +    __ aesdec(xmm_result, xmm_key12);
  64.435      __ aesdeclast(xmm_result, xmm_key_last);                    // xmm15 always came from key+0
  64.436      __ pxor  (xmm_result, xmm_prev_block_cipher);               // xor with the current r vector
  64.437 -    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
  64.438 +    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);  // store into the next 16 bytes of output
  64.439      // no need to store r to memory until we exit
  64.440 -    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);              // set up next r vector with cipher input from this block
  64.441 -
  64.442 +    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);  // set up next r vector with cipher input from this block
  64.443      __ addptr(pos, AESBlockSize);
  64.444      __ subptr(len_reg, AESBlockSize);
  64.445      __ jcc(Assembler::notEqual,L_singleBlock_loopTop_192);
  64.446 @@ -3445,23 +3490,26 @@
  64.447      // 256-bit code follows here (could be optimized to use parallelism)
  64.448      __ movptr(pos, 0);
  64.449      __ align(OptoLoopAlignment);
  64.450 +
  64.451      __ BIND(L_singleBlock_loopTop_256);
  64.452 -    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0));   // get next 16 bytes of cipher input
  64.453 +    __ movdqu(xmm_result, Address(from, pos, Address::times_1, 0)); // get next 16 bytes of cipher input
  64.454      __ movdqa(xmm_prev_block_cipher_save, xmm_result);              // save for next r vector
  64.455      __ pxor  (xmm_result, xmm_key_first);               // do the aes dec rounds
  64.456      for (int rnum = XMM_REG_NUM_KEY_FIRST + 1; rnum <= XMM_REG_NUM_KEY_LAST - 1; rnum++) {
  64.457        __ aesdec(xmm_result, as_XMMRegister(rnum));
  64.458      }
  64.459 -    aes_dec_key(xmm_result, xmm_temp, key, 0xb0);     // 256-bit key goes up to e0
  64.460 -    aes_dec_key(xmm_result, xmm_temp, key, 0xc0);
  64.461 -    aes_dec_key(xmm_result, xmm_temp, key, 0xd0);
  64.462 -    aes_dec_key(xmm_result, xmm_temp, key, 0xe0);
  64.463 -    __ aesdeclast(xmm_result, xmm_key_last);             // xmm15 came from key+0
  64.464 +    __ aesdec(xmm_result, xmm_key11);
  64.465 +    load_key(xmm_temp, key, 0xc0);
  64.466 +    __ aesdec(xmm_result, xmm_temp);
  64.467 +    load_key(xmm_temp, key, 0xd0);
  64.468 +    __ aesdec(xmm_result, xmm_temp);
  64.469 +    load_key(xmm_temp, key, 0xe0);     // 256-bit key goes up to e0
  64.470 +    __ aesdec(xmm_result, xmm_temp);
  64.471 +    __ aesdeclast(xmm_result, xmm_key_last);          // xmm15 came from key+0
  64.472      __ pxor  (xmm_result, xmm_prev_block_cipher);               // xor with the current r vector
  64.473 -    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);     // store into the next 16 bytes of output
  64.474 +    __ movdqu(Address(to, pos, Address::times_1, 0), xmm_result);  // store into the next 16 bytes of output
  64.475      // no need to store r to memory until we exit
  64.476 -    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);              // set up next r vector with cipher input from this block
  64.477 -
  64.478 +    __ movdqa(xmm_prev_block_cipher, xmm_prev_block_cipher_save);  // set up next r vector with cipher input from this block
  64.479      __ addptr(pos, AESBlockSize);
  64.480      __ subptr(len_reg, AESBlockSize);
  64.481      __ jcc(Assembler::notEqual,L_singleBlock_loopTop_256);
    65.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Jan 08 14:04:25 2013 -0500
    65.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp	Tue Jan 08 11:39:53 2013 -0800
    65.3 @@ -424,8 +424,6 @@
    65.4    // C++ interpreter only
    65.5    // rsi - previous interpreter state pointer
    65.6  
    65.7 -  const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
    65.8 -
    65.9    // InterpreterRuntime::frequency_counter_overflow takes one argument
   65.10    // indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
   65.11    // The call returns the address of the verified entry point for the method or NULL
   65.12 @@ -868,12 +866,13 @@
   65.13    // rsi: previous interpreter state (C++ interpreter) must preserve
   65.14    address entry_point = __ pc();
   65.15  
   65.16 -
   65.17 -  const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
   65.18 +  const Address constMethod       (rbx, Method::const_offset());
   65.19    const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
   65.20    const Address access_flags      (rbx, Method::access_flags_offset());
   65.21 +  const Address size_of_parameters(rcx, ConstMethod::size_of_parameters_offset());
   65.22  
   65.23    // get parameter size (always needed)
   65.24 +  __ movptr(rcx, constMethod);
   65.25    __ load_unsigned_short(rcx, size_of_parameters);
   65.26  
   65.27    // native calls don't need the stack size check since they have no expression stack
   65.28 @@ -988,7 +987,9 @@
   65.29  
   65.30    // allocate space for parameters
   65.31    __ get_method(method);
   65.32 -  __ load_unsigned_short(t, Address(method, Method::size_of_parameters_offset()));
   65.33 +  __ movptr(t, Address(method, Method::const_offset()));
   65.34 +  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
   65.35 +
   65.36    __ shlptr(t, Interpreter::logStackElementSize);
   65.37    __ addptr(t, 2*wordSize);     // allocate two more slots for JNIEnv and possible mirror
   65.38    __ subptr(rsp, t);
   65.39 @@ -1297,13 +1298,14 @@
   65.40    // rsi: sender sp
   65.41    address entry_point = __ pc();
   65.42  
   65.43 -
   65.44 -  const Address size_of_parameters(rbx, Method::size_of_parameters_offset());
   65.45 -  const Address size_of_locals    (rbx, Method::size_of_locals_offset());
   65.46 +  const Address constMethod       (rbx, Method::const_offset());
   65.47    const Address invocation_counter(rbx, Method::invocation_counter_offset() + InvocationCounter::counter_offset());
   65.48    const Address access_flags      (rbx, Method::access_flags_offset());
   65.49 +  const Address size_of_parameters(rdx, ConstMethod::size_of_parameters_offset());
   65.50 +  const Address size_of_locals    (rdx, ConstMethod::size_of_locals_offset());
   65.51  
   65.52    // get parameter size (always needed)
   65.53 +  __ movptr(rdx, constMethod);
   65.54    __ load_unsigned_short(rcx, size_of_parameters);
   65.55  
   65.56    // rbx,: Method*
   65.57 @@ -1734,7 +1736,8 @@
   65.58  
   65.59      // Compute size of arguments for saving when returning to deoptimized caller
   65.60      __ get_method(rax);
   65.61 -    __ load_unsigned_short(rax, Address(rax, in_bytes(Method::size_of_parameters_offset())));
   65.62 +    __ movptr(rax, Address(rax, Method::const_offset()));
   65.63 +    __ load_unsigned_short(rax, Address(rax, ConstMethod::size_of_parameters_offset()));
   65.64      __ shlptr(rax, Interpreter::logStackElementSize);
   65.65      __ restore_locals();
   65.66      __ subptr(rdi, rax);
    66.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Jan 08 14:04:25 2013 -0500
    66.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp	Tue Jan 08 11:39:53 2013 -0800
    66.3 @@ -369,9 +369,6 @@
    66.4    // Everything as it was on entry
    66.5    // rdx is not restored. Doesn't appear to really be set.
    66.6  
    66.7 -  const Address size_of_parameters(rbx,
    66.8 -                                   Method::size_of_parameters_offset());
    66.9 -
   66.10    // InterpreterRuntime::frequency_counter_overflow takes two
   66.11    // arguments, the first (thread) is passed by call_VM, the second
   66.12    // indicates if the counter overflow occurs at a backwards branch
   66.13 @@ -844,14 +841,17 @@
   66.14  
   66.15    address entry_point = __ pc();
   66.16  
   66.17 -  const Address size_of_parameters(rbx, Method::
   66.18 -                                        size_of_parameters_offset());
   66.19 +  const Address constMethod       (rbx, Method::const_offset());
   66.20    const Address invocation_counter(rbx, Method::
   66.21                                          invocation_counter_offset() +
   66.22                                          InvocationCounter::counter_offset());
   66.23    const Address access_flags      (rbx, Method::access_flags_offset());
   66.24 +  const Address size_of_parameters(rcx, ConstMethod::
   66.25 +                                        size_of_parameters_offset());
   66.26 +
   66.27  
   66.28    // get parameter size (always needed)
   66.29 +  __ movptr(rcx, constMethod);
   66.30    __ load_unsigned_short(rcx, size_of_parameters);
   66.31  
   66.32    // native calls don't need the stack size check since they have no
   66.33 @@ -967,9 +967,8 @@
   66.34  
   66.35    // allocate space for parameters
   66.36    __ get_method(method);
   66.37 -  __ load_unsigned_short(t,
   66.38 -                         Address(method,
   66.39 -                                 Method::size_of_parameters_offset()));
   66.40 +  __ movptr(t, Address(method, Method::const_offset()));
   66.41 +  __ load_unsigned_short(t, Address(t, ConstMethod::size_of_parameters_offset()));
   66.42    __ shll(t, Interpreter::logStackElementSize);
   66.43  
   66.44    __ subptr(rsp, t);
   66.45 @@ -1302,15 +1301,18 @@
   66.46    // r13: sender sp
   66.47    address entry_point = __ pc();
   66.48  
   66.49 -  const Address size_of_parameters(rbx,
   66.50 -                                   Method::size_of_parameters_offset());
   66.51 -  const Address size_of_locals(rbx, Method::size_of_locals_offset());
   66.52 +  const Address constMethod(rbx, Method::const_offset());
   66.53    const Address invocation_counter(rbx,
   66.54                                     Method::invocation_counter_offset() +
   66.55                                     InvocationCounter::counter_offset());
   66.56    const Address access_flags(rbx, Method::access_flags_offset());
   66.57 +  const Address size_of_parameters(rdx,
   66.58 +                                   ConstMethod::size_of_parameters_offset());
   66.59 +  const Address size_of_locals(rdx, ConstMethod::size_of_locals_offset());
   66.60 +
   66.61  
   66.62    // get parameter size (always needed)
   66.63 +  __ movptr(rdx, constMethod);
   66.64    __ load_unsigned_short(rcx, size_of_parameters);
   66.65  
   66.66    // rbx: Method*
   66.67 @@ -1752,7 +1754,8 @@
   66.68      // Compute size of arguments for saving when returning to
   66.69      // deoptimized caller
   66.70      __ get_method(rax);
   66.71 -    __ load_unsigned_short(rax, Address(rax, in_bytes(Method::
   66.72 +    __ movptr(rax, Address(rax, Method::const_offset()));
   66.73 +    __ load_unsigned_short(rax, Address(rax, in_bytes(ConstMethod::
   66.74                                                  size_of_parameters_offset())));
   66.75      __ shll(rax, Interpreter::logStackElementSize);
   66.76      __ restore_locals(); // XXX do we need this?
    67.1 --- a/src/cpu/x86/vm/vm_version_x86.cpp	Tue Jan 08 14:04:25 2013 -0500
    67.2 +++ b/src/cpu/x86/vm/vm_version_x86.cpp	Tue Jan 08 11:39:53 2013 -0800
    67.3 @@ -489,8 +489,8 @@
    67.4    }
    67.5  
    67.6    // The AES intrinsic stubs require AES instruction support (of course)
    67.7 -  // but also require AVX and sse3 modes for instructions it use.
    67.8 -  if (UseAES && (UseAVX > 0) && (UseSSE > 2)) {
    67.9 +  // but also require sse3 mode for instructions it use.
   67.10 +  if (UseAES && (UseSSE > 2)) {
   67.11      if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
   67.12        UseAESIntrinsics = true;
   67.13      }
    68.1 --- a/src/cpu/zero/vm/assembler_zero.cpp	Tue Jan 08 14:04:25 2013 -0500
    68.2 +++ b/src/cpu/zero/vm/assembler_zero.cpp	Tue Jan 08 11:39:53 2013 -0800
    68.3 @@ -56,15 +56,9 @@
    68.4    ShouldNotCallThis();
    68.5  }
    68.6  
    68.7 -#ifndef PRODUCT
    68.8 -void Assembler::pd_print_patched_instruction(address branch) {
    68.9 -  ShouldNotCallThis();
   68.10 -}
   68.11 -#endif // PRODUCT
   68.12 -
   68.13  void MacroAssembler::align(int modulus) {
   68.14    while (offset() % modulus != 0)
   68.15 -    emit_byte(AbstractAssembler::code_fill_byte());
   68.16 +    emit_int8(AbstractAssembler::code_fill_byte());
   68.17  }
   68.18  
   68.19  void MacroAssembler::bang_stack_with_offset(int offset) {
   68.20 @@ -72,8 +66,7 @@
   68.21  }
   68.22  
   68.23  void MacroAssembler::advance(int bytes) {
   68.24 -  _code_pos += bytes;
   68.25 -  sync();
   68.26 +  code_section()->set_end(code_section()->end() + bytes);
   68.27  }
   68.28  
   68.29  RegisterOrConstant MacroAssembler::delayed_value_impl(
    69.1 --- a/src/cpu/zero/vm/assembler_zero.hpp	Tue Jan 08 14:04:25 2013 -0500
    69.2 +++ b/src/cpu/zero/vm/assembler_zero.hpp	Tue Jan 08 11:39:53 2013 -0800
    69.3 @@ -37,9 +37,6 @@
    69.4  
    69.5   public:
    69.6    void pd_patch_instruction(address branch, address target);
    69.7 -#ifndef PRODUCT
    69.8 -  static void pd_print_patched_instruction(address branch);
    69.9 -#endif // PRODUCT
   69.10  };
   69.11  
   69.12  class MacroAssembler : public Assembler {
    70.1 --- a/src/os/bsd/vm/os_bsd.cpp	Tue Jan 08 14:04:25 2013 -0500
    70.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Tue Jan 08 11:39:53 2013 -0800
    70.3 @@ -298,12 +298,12 @@
    70.4  
    70.5    // The next steps are taken in the product version:
    70.6    //
    70.7 -  // Obtain the JAVA_HOME value from the location of libjvm[_g].so.
    70.8 +  // Obtain the JAVA_HOME value from the location of libjvm.so.
    70.9    // This library should be located at:
   70.10 -  // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
   70.11 +  // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
   70.12    //
   70.13    // If "/jre/lib/" appears at the right place in the path, then we
   70.14 -  // assume libjvm[_g].so is installed in a JDK and we use this path.
   70.15 +  // assume libjvm.so is installed in a JDK and we use this path.
   70.16    //
   70.17    // Otherwise exit with message: "Could not create the Java virtual machine."
   70.18    //
   70.19 @@ -313,9 +313,9 @@
   70.20    // instead of exit check for $JAVA_HOME environment variable.
   70.21    //
   70.22    // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
   70.23 -  // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so
   70.24 -  // it looks like libjvm[_g].so is installed there
   70.25 -  // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
   70.26 +  // then we append a fake suffix "hotspot/libjvm.so" to this path so
   70.27 +  // it looks like libjvm.so is installed there
   70.28 +  // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
   70.29    //
   70.30    // Otherwise exit.
   70.31    //
   70.32 @@ -1228,7 +1228,7 @@
   70.33    return getcwd(buf, buflen);
   70.34  }
   70.35  
   70.36 -// check if addr is inside libjvm[_g].so
   70.37 +// check if addr is inside libjvm.so
   70.38  bool os::address_is_in_vm(address addr) {
   70.39    static address libjvm_base_addr;
   70.40    Dl_info dlinfo;
   70.41 @@ -1689,7 +1689,7 @@
   70.42  
   70.43  static char saved_jvm_path[MAXPATHLEN] = {0};
   70.44  
   70.45 -// Find the full path to the current module, libjvm or libjvm_g
   70.46 +// Find the full path to the current module, libjvm
   70.47  void os::jvm_path(char *buf, jint buflen) {
   70.48    // Error checking.
   70.49    if (buflen < MAXPATHLEN) {
   70.50 @@ -1732,10 +1732,9 @@
   70.51          char* jrelib_p;
   70.52          int len;
   70.53  
   70.54 -        // Check the current module name "libjvm" or "libjvm_g".
   70.55 +        // Check the current module name "libjvm"
   70.56          p = strrchr(buf, '/');
   70.57          assert(strstr(p, "/libjvm") == p, "invalid library name");
   70.58 -        p = strstr(p, "_g") ? "_g" : "";
   70.59  
   70.60          rp = realpath(java_home_var, buf);
   70.61          if (rp == NULL)
   70.62 @@ -1764,11 +1763,9 @@
   70.63          // to complete the path to JVM being overridden.  Otherwise fallback
   70.64          // to the path to the current library.
   70.65          if (0 == access(buf, F_OK)) {
   70.66 -          // Use current module name "libjvm[_g]" instead of
   70.67 -          // "libjvm"debug_only("_g")"" since for fastdebug version
   70.68 -          // we should have "libjvm" but debug_only("_g") adds "_g"!
   70.69 +          // Use current module name "libjvm"
   70.70            len = strlen(buf);
   70.71 -          snprintf(buf + len, buflen-len, "/libjvm%s%s", p, JNI_LIB_SUFFIX);
   70.72 +          snprintf(buf + len, buflen-len, "/libjvm%s", JNI_LIB_SUFFIX);
   70.73          } else {
   70.74            // Fall back to path of current library
   70.75            rp = realpath(dli_fname, buf);
    71.1 --- a/src/os/linux/vm/os_linux.cpp	Tue Jan 08 14:04:25 2013 -0500
    71.2 +++ b/src/os/linux/vm/os_linux.cpp	Tue Jan 08 11:39:53 2013 -0800
    71.3 @@ -321,12 +321,12 @@
    71.4  
    71.5    // The next steps are taken in the product version:
    71.6    //
    71.7 -  // Obtain the JAVA_HOME value from the location of libjvm[_g].so.
    71.8 +  // Obtain the JAVA_HOME value from the location of libjvm.so.
    71.9    // This library should be located at:
   71.10 -  // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
   71.11 +  // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
   71.12    //
   71.13    // If "/jre/lib/" appears at the right place in the path, then we
   71.14 -  // assume libjvm[_g].so is installed in a JDK and we use this path.
   71.15 +  // assume libjvm.so is installed in a JDK and we use this path.
   71.16    //
   71.17    // Otherwise exit with message: "Could not create the Java virtual machine."
   71.18    //
   71.19 @@ -336,9 +336,9 @@
   71.20    // instead of exit check for $JAVA_HOME environment variable.
   71.21    //
   71.22    // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
   71.23 -  // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so
   71.24 -  // it looks like libjvm[_g].so is installed there
   71.25 -  // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
   71.26 +  // then we append a fake suffix "hotspot/libjvm.so" to this path so
   71.27 +  // it looks like libjvm.so is installed there
   71.28 +  // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
   71.29    //
   71.30    // Otherwise exit.
   71.31    //
   71.32 @@ -1679,7 +1679,7 @@
   71.33    return getcwd(buf, buflen);
   71.34  }
   71.35  
   71.36 -// check if addr is inside libjvm[_g].so
   71.37 +// check if addr is inside libjvm.so
   71.38  bool os::address_is_in_vm(address addr) {
   71.39    static address libjvm_base_addr;
   71.40    Dl_info dlinfo;
   71.41 @@ -2180,7 +2180,7 @@
   71.42  
   71.43  static char saved_jvm_path[MAXPATHLEN] = {0};
   71.44  
   71.45 -// Find the full path to the current module, libjvm.so or libjvm_g.so
   71.46 +// Find the full path to the current module, libjvm.so
   71.47  void os::jvm_path(char *buf, jint buflen) {
   71.48    // Error checking.
   71.49    if (buflen < MAXPATHLEN) {
   71.50 @@ -2223,10 +2223,9 @@
   71.51          char* jrelib_p;
   71.52          int len;
   71.53  
   71.54 -        // Check the current module name "libjvm.so" or "libjvm_g.so".
   71.55 +        // Check the current module name "libjvm.so".
   71.56          p = strrchr(buf, '/');
   71.57          assert(strstr(p, "/libjvm") == p, "invalid library name");
   71.58 -        p = strstr(p, "_g") ? "_g" : "";
   71.59  
   71.60          rp = realpath(java_home_var, buf);
   71.61          if (rp == NULL)
   71.62 @@ -2242,11 +2241,9 @@
   71.63          }
   71.64  
   71.65          if (0 == access(buf, F_OK)) {
   71.66 -          // Use current module name "libjvm[_g].so" instead of
   71.67 -          // "libjvm"debug_only("_g")".so" since for fastdebug version
   71.68 -          // we should have "libjvm.so" but debug_only("_g") adds "_g"!
   71.69 +          // Use current module name "libjvm.so"
   71.70            len = strlen(buf);
   71.71 -          snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
   71.72 +          snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
   71.73          } else {
   71.74            // Go back to path of .so
   71.75            rp = realpath(dli_fname, buf);
    72.1 --- a/src/os/posix/vm/os_posix.cpp	Tue Jan 08 14:04:25 2013 -0500
    72.2 +++ b/src/os/posix/vm/os_posix.cpp	Tue Jan 08 11:39:53 2013 -0800
    72.3 @@ -93,6 +93,47 @@
    72.4    return;
    72.5  }
    72.6  
    72.7 +// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
    72.8 +// so on posix, unmap the section at the start and at the end of the chunk that we mapped
    72.9 +// rather than unmapping and remapping the whole chunk to get requested alignment.
   72.10 +char* os::reserve_memory_aligned(size_t size, size_t alignment) {
   72.11 +  assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
   72.12 +      "Alignment must be a multiple of allocation granularity (page size)");
   72.13 +  assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
   72.14 +
   72.15 +  size_t extra_size = size + alignment;
   72.16 +  assert(extra_size >= size, "overflow, size is too large to allow alignment");
   72.17 +
   72.18 +  char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
   72.19 +
   72.20 +  if (extra_base == NULL) {
   72.21 +    return NULL;
   72.22 +  }
   72.23 +
   72.24 +  // Do manual alignment
   72.25 +  char* aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
   72.26 +
   72.27 +  // [  |                                       |  ]
   72.28 +  // ^ extra_base
   72.29 +  //    ^ extra_base + begin_offset == aligned_base
   72.30 +  //     extra_base + begin_offset + size       ^
   72.31 +  //                       extra_base + extra_size ^
   72.32 +  // |<>| == begin_offset
   72.33 +  //                              end_offset == |<>|
   72.34 +  size_t begin_offset = aligned_base - extra_base;
   72.35 +  size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
   72.36 +
   72.37 +  if (begin_offset > 0) {
   72.38 +      os::release_memory(extra_base, begin_offset);
   72.39 +  }
   72.40 +
   72.41 +  if (end_offset > 0) {
   72.42 +      os::release_memory(extra_base + begin_offset + size, end_offset);
   72.43 +  }
   72.44 +
   72.45 +  return aligned_base;
   72.46 +}
   72.47 +
   72.48  void os::Posix::print_load_average(outputStream* st) {
   72.49    st->print("load average:");
   72.50    double loadavg[3];
    73.1 --- a/src/os/solaris/vm/os_solaris.cpp	Tue Jan 08 14:04:25 2013 -0500
    73.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Tue Jan 08 11:39:53 2013 -0800
    73.3 @@ -734,12 +734,12 @@
    73.4  
    73.5    // The next steps are taken in the product version:
    73.6    //
    73.7 -  // Obtain the JAVA_HOME value from the location of libjvm[_g].so.
    73.8 +  // Obtain the JAVA_HOME value from the location of libjvm.so.
    73.9    // This library should be located at:
   73.10 -  // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm[_g].so.
   73.11 +  // <JAVA_HOME>/jre/lib/<arch>/{client|server}/libjvm.so.
   73.12    //
   73.13    // If "/jre/lib/" appears at the right place in the path, then we
   73.14 -  // assume libjvm[_g].so is installed in a JDK and we use this path.
   73.15 +  // assume libjvm.so is installed in a JDK and we use this path.
   73.16    //
   73.17    // Otherwise exit with message: "Could not create the Java virtual machine."
   73.18    //
   73.19 @@ -749,9 +749,9 @@
   73.20    // instead of exit check for $JAVA_HOME environment variable.
   73.21    //
   73.22    // If it is defined and we are able to locate $JAVA_HOME/jre/lib/<arch>,
   73.23 -  // then we append a fake suffix "hotspot/libjvm[_g].so" to this path so
   73.24 -  // it looks like libjvm[_g].so is installed there
   73.25 -  // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm[_g].so.
   73.26 +  // then we append a fake suffix "hotspot/libjvm.so" to this path so
   73.27 +  // it looks like libjvm.so is installed there
   73.28 +  // <JAVA_HOME>/jre/lib/<arch>/hotspot/libjvm.so.
   73.29    //
   73.30    // Otherwise exit.
   73.31    //
   73.32 @@ -1934,7 +1934,7 @@
   73.33    return getcwd(buf, buflen);
   73.34  }
   73.35  
   73.36 -// check if addr is inside libjvm[_g].so
   73.37 +// check if addr is inside libjvm.so
   73.38  bool os::address_is_in_vm(address addr) {
   73.39    static address libjvm_base_addr;
   73.40    Dl_info dlinfo;
   73.41 @@ -2474,7 +2474,7 @@
   73.42  
   73.43  static char saved_jvm_path[MAXPATHLEN] = { 0 };
   73.44  
   73.45 -// Find the full path to the current module, libjvm.so or libjvm_g.so
   73.46 +// Find the full path to the current module, libjvm.so
   73.47  void os::jvm_path(char *buf, jint buflen) {
   73.48    // Error checking.
   73.49    if (buflen < MAXPATHLEN) {
   73.50 @@ -2522,10 +2522,9 @@
   73.51            strcpy(cpu_arch, "amd64");
   73.52          }
   73.53  #endif
   73.54 -        // Check the current module name "libjvm.so" or "libjvm_g.so".
   73.55 +        // Check the current module name "libjvm.so".
   73.56          p = strrchr(buf, '/');
   73.57          assert(strstr(p, "/libjvm") == p, "invalid library name");
   73.58 -        p = strstr(p, "_g") ? "_g" : "";
   73.59  
   73.60          realpath(java_home_var, buf);
   73.61          // determine if this is a legacy image or modules image
   73.62 @@ -2538,11 +2537,9 @@
   73.63          }
   73.64  
   73.65          if (0 == access(buf, F_OK)) {
   73.66 -          // Use current module name "libjvm[_g].so" instead of
   73.67 -          // "libjvm"debug_only("_g")".so" since for fastdebug version
   73.68 -          // we should have "libjvm.so" but debug_only("_g") adds "_g"!
   73.69 +          // Use current module name "libjvm.so"
   73.70            len = strlen(buf);
   73.71 -          snprintf(buf + len, buflen-len, "/hotspot/libjvm%s.so", p);
   73.72 +          snprintf(buf + len, buflen-len, "/hotspot/libjvm.so");
   73.73          } else {
   73.74            // Go back to path of .so
   73.75            realpath((char *)dlinfo.dli_fname, buf);
    74.1 --- a/src/os/windows/vm/os_windows.cpp	Tue Jan 08 14:04:25 2013 -0500
    74.2 +++ b/src/os/windows/vm/os_windows.cpp	Tue Jan 08 11:39:53 2013 -0800
    74.3 @@ -182,7 +182,7 @@
    74.4  
    74.5        if (!getenv("_ALT_JAVA_HOME_DIR", home_dir, MAX_PATH)) {
    74.6            os::jvm_path(home_dir, sizeof(home_dir));
    74.7 -          // Found the full path to jvm[_g].dll.
    74.8 +          // Found the full path to jvm.dll.
    74.9            // Now cut the path to <java_home>/jre if we can.
   74.10            *(strrchr(home_dir, '\\')) = '\0';  /* get rid of \jvm.dll */
   74.11            pslash = strrchr(home_dir, '\\');
   74.12 @@ -1715,7 +1715,7 @@
   74.13  
   74.14  static char saved_jvm_path[MAX_PATH] = {0};
   74.15  
   74.16 -// Find the full path to the current module, jvm.dll or jvm_g.dll
   74.17 +// Find the full path to the current module, jvm.dll
   74.18  void os::jvm_path(char *buf, jint buflen) {
   74.19    // Error checking.
   74.20    if (buflen < MAX_PATH) {
   74.21 @@ -2895,6 +2895,36 @@
   74.22    }
   74.23  }
   74.24  
   74.25 +// Multiple threads can race in this code but it's not possible to unmap small sections of
   74.26 +// virtual space to get requested alignment, like posix-like os's.
   74.27 +// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
   74.28 +char* os::reserve_memory_aligned(size_t size, size_t alignment) {
   74.29 +  assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
   74.30 +      "Alignment must be a multiple of allocation granularity (page size)");
   74.31 +  assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
   74.32 +
   74.33 +  size_t extra_size = size + alignment;
   74.34 +  assert(extra_size >= size, "overflow, size is too large to allow alignment");
   74.35 +
   74.36 +  char* aligned_base = NULL;
   74.37 +
   74.38 +  do {
   74.39 +    char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
   74.40 +    if (extra_base == NULL) {
   74.41 +      return NULL;
   74.42 +    }
   74.43 +    // Do manual alignment
   74.44 +    aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
   74.45 +
   74.46 +    os::release_memory(extra_base, extra_size);
   74.47 +
   74.48 +    aligned_base = os::reserve_memory(size, aligned_base);
   74.49 +
   74.50 +  } while (aligned_base == NULL);
   74.51 +
   74.52 +  return aligned_base;
   74.53 +}
   74.54 +
   74.55  char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
   74.56    assert((size_t)addr % os::vm_allocation_granularity() == 0,
   74.57           "reserve alignment");
    75.1 --- a/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp	Tue Jan 08 14:04:25 2013 -0500
    75.2 +++ b/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp	Tue Jan 08 11:39:53 2013 -0800
    75.3 @@ -116,7 +116,7 @@
    75.4    ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode ();
    75.5    if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) {            // T1
    75.6       // Use thread as a temporary: mov r, gs:[0]; mov r, [r+tlsOffset]
    75.7 -     emit_byte (segment);
    75.8 +     emit_int8 (segment);
    75.9       // ExternalAddress doesn't work because it can't take NULL
   75.10       AddressLiteral null(0, relocInfo::none);
   75.11       movptr (thread, null);
   75.12 @@ -125,7 +125,7 @@
   75.13    } else
   75.14    if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) {              // T2
   75.15       // mov r, gs:[tlsOffset]
   75.16 -     emit_byte (segment);
   75.17 +     emit_int8 (segment);
   75.18       AddressLiteral tls_off((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none);
   75.19       movptr (thread, tls_off);
   75.20       return ;
    76.1 --- a/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp	Tue Jan 08 14:04:25 2013 -0500
    76.2 +++ b/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp	Tue Jan 08 11:39:53 2013 -0800
    76.3 @@ -30,7 +30,7 @@
    76.4  
    76.5  
    76.6  void MacroAssembler::int3() {
    76.7 -  emit_byte(0xCC);
    76.8 +  emit_int8((unsigned char)0xCC);
    76.9  }
   76.10  
   76.11  #ifndef _LP64
    77.1 --- a/src/share/tools/ProjectCreator/ProjectCreator.java	Tue Jan 08 14:04:25 2013 -0500
    77.2 +++ b/src/share/tools/ProjectCreator/ProjectCreator.java	Tue Jan 08 11:39:53 2013 -0800
    77.3 @@ -36,7 +36,7 @@
    77.4              + "into .dsp file, substituting for path given in "
    77.5              + "-sourceBase. Example: HotSpotWorkSpace>");
    77.6        System.err.println("  -dllLoc <path to directory in which to put "
    77.7 -            + "jvm.dll and jvm_g.dll; no trailing slash>");
    77.8 +            + "jvm.dll; no trailing slash>");
    77.9        System.err.println("  If any of the above are specified, "
   77.10              + "they must all be.");
   77.11        System.err.println("  Additional, optional arguments, which can be "
    78.1 --- a/src/share/vm/asm/assembler.cpp	Tue Jan 08 14:04:25 2013 -0500
    78.2 +++ b/src/share/vm/asm/assembler.cpp	Tue Jan 08 11:39:53 2013 -0800
    78.3 @@ -109,37 +109,6 @@
    78.4    ICache::invalidate_range(addr_at(0), offset());
    78.5  }
    78.6  
    78.7 -
    78.8 -void AbstractAssembler::a_byte(int x) {
    78.9 -  emit_byte(x);
   78.10 -}
   78.11 -
   78.12 -
   78.13 -void AbstractAssembler::a_long(jint x) {
   78.14 -  emit_long(x);
   78.15 -}
   78.16 -
   78.17 -// Labels refer to positions in the (to be) generated code.  There are bound
   78.18 -// and unbound
   78.19 -//
   78.20 -// Bound labels refer to known positions in the already generated code.
   78.21 -// offset() is the position the label refers to.
   78.22 -//
   78.23 -// Unbound labels refer to unknown positions in the code to be generated; it
   78.24 -// may contain a list of unresolved displacements that refer to it
   78.25 -#ifndef PRODUCT
   78.26 -void AbstractAssembler::print(Label& L) {
   78.27 -  if (L.is_bound()) {
   78.28 -    tty->print_cr("bound label to %d|%d", L.loc_pos(), L.loc_sect());
   78.29 -  } else if (L.is_unbound()) {
   78.30 -    L.print_instructions((MacroAssembler*)this);
   78.31 -  } else {
   78.32 -    tty->print_cr("label in inconsistent state (loc = %d)", L.loc());
   78.33 -  }
   78.34 -}
   78.35 -#endif // PRODUCT
   78.36 -
   78.37 -
   78.38  void AbstractAssembler::bind(Label& L) {
   78.39    if (L.is_bound()) {
   78.40      // Assembler can bind a label more than once to the same place.
   78.41 @@ -342,28 +311,3 @@
   78.42  #endif
   78.43    return offset < 0 || os::vm_page_size() <= offset;
   78.44  }
   78.45 -
   78.46 -#ifndef PRODUCT
   78.47 -void Label::print_instructions(MacroAssembler* masm) const {
   78.48 -  CodeBuffer* cb = masm->code();
   78.49 -  for (int i = 0; i < _patch_index; ++i) {
   78.50 -    int branch_loc;
   78.51 -    if (i >= PatchCacheSize) {
   78.52 -      branch_loc = _patch_overflow->at(i - PatchCacheSize);
   78.53 -    } else {
   78.54 -      branch_loc = _patches[i];
   78.55 -    }
   78.56 -    int branch_pos  = CodeBuffer::locator_pos(branch_loc);
   78.57 -    int branch_sect = CodeBuffer::locator_sect(branch_loc);
   78.58 -    address branch = cb->locator_address(branch_loc);
   78.59 -    tty->print_cr("unbound label");
   78.60 -    tty->print("@ %d|%d ", branch_pos, branch_sect);
   78.61 -    if (branch_sect == CodeBuffer::SECT_CONSTS) {
   78.62 -      tty->print_cr(PTR_FORMAT, *(address*)branch);
   78.63 -      continue;
   78.64 -    }
   78.65 -    masm->pd_print_patched_instruction(branch);
   78.66 -    tty->cr();
   78.67 -  }
   78.68 -}
   78.69 -#endif // ndef PRODUCT
    79.1 --- a/src/share/vm/asm/assembler.hpp	Tue Jan 08 14:04:25 2013 -0500
    79.2 +++ b/src/share/vm/asm/assembler.hpp	Tue Jan 08 11:39:53 2013 -0800
    79.3 @@ -216,17 +216,6 @@
    79.4    bool isByte(int x) const             { return 0 <= x && x < 0x100; }
    79.5    bool isShiftCount(int x) const       { return 0 <= x && x < 32; }
    79.6  
    79.7 -  void emit_int8(   int8_t  x) { code_section()->emit_int8(   x); }
    79.8 -  void emit_int16(  int16_t x) { code_section()->emit_int16(  x); }
    79.9 -  void emit_int32(  int32_t x) { code_section()->emit_int32(  x); }
   79.10 -  void emit_int64(  int64_t x) { code_section()->emit_int64(  x); }
   79.11 -
   79.12 -  void emit_float(  jfloat  x) { code_section()->emit_float(  x); }
   79.13 -  void emit_double( jdouble x) { code_section()->emit_double( x); }
   79.14 -  void emit_address(address x) { code_section()->emit_address(x); }
   79.15 -
   79.16 -  void emit_byte(int x)  { emit_int8 (x); }  // deprecated
   79.17 -  void emit_word(int x)  { emit_int16(x); }  // deprecated
   79.18    void emit_long(jint x) { emit_int32(x); }  // deprecated
   79.19  
   79.20    // Instruction boundaries (required when emitting relocatable values).
   79.21 @@ -277,9 +266,6 @@
   79.22    };
   79.23  #endif
   79.24  
   79.25 -  // Label functions
   79.26 -  void print(Label& L);
   79.27 -
   79.28   public:
   79.29  
   79.30    // Creation
   79.31 @@ -288,6 +274,15 @@
   79.32    // ensure buf contains all code (call this before using/copying the code)
   79.33    void flush();
   79.34  
   79.35 +  void emit_int8(   int8_t  x) { code_section()->emit_int8(   x); }
   79.36 +  void emit_int16(  int16_t x) { code_section()->emit_int16(  x); }
   79.37 +  void emit_int32(  int32_t x) { code_section()->emit_int32(  x); }
   79.38 +  void emit_int64(  int64_t x) { code_section()->emit_int64(  x); }
   79.39 +
   79.40 +  void emit_float(  jfloat  x) { code_section()->emit_float(  x); }
   79.41 +  void emit_double( jdouble x) { code_section()->emit_double( x); }
   79.42 +  void emit_address(address x) { code_section()->emit_address(x); }
   79.43 +
   79.44    // min and max values for signed immediate ranges
   79.45    static int min_simm(int nbits) { return -(intptr_t(1) << (nbits - 1))    ; }
   79.46    static int max_simm(int nbits) { return  (intptr_t(1) << (nbits - 1)) - 1; }
   79.47 @@ -327,8 +322,6 @@
   79.48    void    clear_inst_mark()       {        code_section()->clear_mark(); }
   79.49  
   79.50    // Constants in code
   79.51 -  void a_byte(int x);
   79.52 -  void a_long(jint x);
   79.53    void relocate(RelocationHolder const& rspec, int format = 0) {
   79.54      assert(!pd_check_instruction_mark()
   79.55          || inst_mark() == NULL || inst_mark() == code_section()->end(),
   79.56 @@ -441,15 +434,6 @@
   79.57     */
   79.58    void pd_patch_instruction(address branch, address target);
   79.59  
   79.60 -#ifndef PRODUCT
   79.61 -  /**
   79.62 -   * Platform-dependent method of printing an instruction that needs to be
   79.63 -   * patched.
   79.64 -   *
   79.65 -   * @param branch the instruction to be patched in the buffer.
   79.66 -   */
   79.67 -  static void pd_print_patched_instruction(address branch);
   79.68 -#endif // PRODUCT
   79.69  };
   79.70  
   79.71  #ifdef TARGET_ARCH_x86
    80.1 --- a/src/share/vm/asm/codeBuffer.cpp	Tue Jan 08 14:04:25 2013 -0500
    80.2 +++ b/src/share/vm/asm/codeBuffer.cpp	Tue Jan 08 11:39:53 2013 -0800
    80.3 @@ -496,21 +496,9 @@
    80.4    dest->verify_section_allocation();
    80.5  }
    80.6  
    80.7 -// Anonymous classes need mirror to keep the metadata alive but
    80.8 -// for regular classes, the class_loader is sufficient.
    80.9 +// Append an oop reference that keeps the class alive.
   80.10  static void append_oop_references(GrowableArray<oop>* oops, Klass* k) {
   80.11 -  if (k->oop_is_instance()) {
   80.12 -    InstanceKlass* ik = InstanceKlass::cast(k);
   80.13 -    if (ik->is_anonymous()) {
   80.14 -      oop o = ik->java_mirror();
   80.15 -      assert (o != NULL, "should have a mirror");
   80.16 -      if (!oops->contains(o)) {
   80.17 -        oops->append(o);
   80.18 -      }
   80.19 -      return;  // only need the mirror
   80.20 -    }
   80.21 -  }
   80.22 -  oop cl = k->class_loader();
   80.23 +  oop cl = k->klass_holder();
   80.24    if (cl != NULL && !oops->contains(cl)) {
   80.25      oops->append(cl);
   80.26    }
    81.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Jan 08 14:04:25 2013 -0500
    81.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Jan 08 11:39:53 2013 -0800
    81.3 @@ -3442,6 +3442,11 @@
    81.4        preserves_state = true;
    81.5        break;
    81.6  
    81.7 +    case vmIntrinsics::_loadFence :
    81.8 +    case vmIntrinsics::_storeFence:
    81.9 +    case vmIntrinsics::_fullFence :
   81.10 +      break;
   81.11 +
   81.12      default                       : return false; // do not inline
   81.13    }
   81.14    // create intrinsic node
    82.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Jan 08 14:04:25 2013 -0500
    82.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp	Tue Jan 08 11:39:53 2013 -0800
    82.3 @@ -2977,6 +2977,16 @@
    82.4      do_CompareAndSwap(x, longType);
    82.5      break;
    82.6  
    82.7 +  case vmIntrinsics::_loadFence :
    82.8 +    if (os::is_MP()) __ membar_acquire();
    82.9 +    break;
   82.10 +  case vmIntrinsics::_storeFence:
   82.11 +    if (os::is_MP()) __ membar_release();
   82.12 +    break;
   82.13 +  case vmIntrinsics::_fullFence :
   82.14 +    if (os::is_MP()) __ membar();
   82.15 +    break;
   82.16 +
   82.17    case vmIntrinsics::_Reference_get:
   82.18      do_Reference_get(x);
   82.19      break;
    83.1 --- a/src/share/vm/ci/ciField.cpp	Tue Jan 08 14:04:25 2013 -0500
    83.2 +++ b/src/share/vm/ci/ciField.cpp	Tue Jan 08 11:39:53 2013 -0800
    83.3 @@ -366,10 +366,12 @@
    83.4  // ------------------------------------------------------------------
    83.5  // ciField::print
    83.6  void ciField::print() {
    83.7 -  tty->print("<ciField ");
    83.8 +  tty->print("<ciField name=");
    83.9    _holder->print_name();
   83.10    tty->print(".");
   83.11    _name->print_symbol();
   83.12 +  tty->print(" signature=");
   83.13 +  _signature->print_symbol();
   83.14    tty->print(" offset=%d type=", _offset);
   83.15    if (_type != NULL) _type->print_name();
   83.16    else               tty->print("(reference)");
    84.1 --- a/src/share/vm/classfile/classFileParser.cpp	Tue Jan 08 14:04:25 2013 -0500
    84.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Tue Jan 08 11:39:53 2013 -0800
    84.3 @@ -906,6 +906,7 @@
    84.4                                               bool* is_synthetic_addr,
    84.5                                               u2* generic_signature_index_addr,
    84.6                                               AnnotationArray** field_annotations,
    84.7 +                                             AnnotationArray** field_type_annotations,
    84.8                                               ClassFileParser::FieldAnnotationCollector* parsed_annotations,
    84.9                                               TRAPS) {
   84.10    ClassFileStream* cfs = stream();
   84.11 @@ -917,6 +918,10 @@
   84.12    int runtime_visible_annotations_length = 0;
   84.13    u1* runtime_invisible_annotations = NULL;
   84.14    int runtime_invisible_annotations_length = 0;
   84.15 +  u1* runtime_visible_type_annotations = NULL;
   84.16 +  int runtime_visible_type_annotations_length = 0;
   84.17 +  u1* runtime_invisible_type_annotations = NULL;
   84.18 +  int runtime_invisible_type_annotations_length = 0;
   84.19    while (attributes_count--) {
   84.20      cfs->guarantee_more(6, CHECK);  // attribute_name_index, attribute_length
   84.21      u2 attribute_name_index = cfs->get_u2_fast();
   84.22 @@ -971,6 +976,16 @@
   84.23          runtime_invisible_annotations = cfs->get_u1_buffer();
   84.24          assert(runtime_invisible_annotations != NULL, "null invisible annotations");
   84.25          cfs->skip_u1(runtime_invisible_annotations_length, CHECK);
   84.26 +      } else if (attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
   84.27 +        runtime_visible_type_annotations_length = attribute_length;
   84.28 +        runtime_visible_type_annotations = cfs->get_u1_buffer();
   84.29 +        assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
   84.30 +        cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
   84.31 +      } else if (PreserveAllAnnotations && attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
   84.32 +        runtime_invisible_type_annotations_length = attribute_length;
   84.33 +        runtime_invisible_type_annotations = cfs->get_u1_buffer();
   84.34 +        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
   84.35 +        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
   84.36        } else {
   84.37          cfs->skip_u1(attribute_length, CHECK);  // Skip unknown attributes
   84.38        }
   84.39 @@ -988,6 +1003,12 @@
   84.40                                              runtime_invisible_annotations,
   84.41                                              runtime_invisible_annotations_length,
   84.42                                              CHECK);
   84.43 +  *field_type_annotations = assemble_annotations(loader_data,
   84.44 +                                            runtime_visible_type_annotations,
   84.45 +                                            runtime_visible_type_annotations_length,
   84.46 +                                            runtime_invisible_type_annotations,
   84.47 +                                            runtime_invisible_type_annotations_length,
   84.48 +                                            CHECK);
   84.49    return;
   84.50  }
   84.51  
   84.52 @@ -1084,6 +1105,7 @@
   84.53                                           bool is_interface,
   84.54                                           FieldAllocationCount *fac,
   84.55                                           Array<AnnotationArray*>** fields_annotations,
   84.56 +                                         Array<AnnotationArray*>** fields_type_annotations,
   84.57                                           u2* java_fields_count_ptr, TRAPS) {
   84.58    ClassFileStream* cfs = stream();
   84.59    cfs->guarantee_more(2, CHECK_NULL);  // length
   84.60 @@ -1119,6 +1141,7 @@
   84.61               THREAD, u2, total_fields * (FieldInfo::field_slots + 1));
   84.62  
   84.63    AnnotationArray* field_annotations = NULL;
   84.64 +  AnnotationArray* field_type_annotations = NULL;
   84.65    // The generic signature slots start after all other fields' data.
   84.66    int generic_signature_slot = total_fields * FieldInfo::field_slots;
   84.67    int num_generic_signature = 0;
   84.68 @@ -1160,7 +1183,7 @@
   84.69                               cp, attributes_count, is_static, signature_index,
   84.70                               &constantvalue_index, &is_synthetic,
   84.71                               &generic_signature_index, &field_annotations,
   84.72 -                             &parsed_annotations,
   84.73 +                             &field_type_annotations, &parsed_annotations,
   84.74                               CHECK_NULL);
   84.75        if (field_annotations != NULL) {
   84.76          if (*fields_annotations == NULL) {
   84.77 @@ -1170,6 +1193,14 @@
   84.78          }
   84.79          (*fields_annotations)->at_put(n, field_annotations);
   84.80        }
   84.81 +      if (field_type_annotations != NULL) {
   84.82 +        if (*fields_type_annotations == NULL) {
   84.83 +          *fields_type_annotations = MetadataFactory::new_array<AnnotationArray*>(
   84.84 +                                                  loader_data, length, NULL,
   84.85 +                                                  CHECK_NULL);
   84.86 +        }
   84.87 +        (*fields_type_annotations)->at_put(n, field_type_annotations);
   84.88 +      }
   84.89        if (is_synthetic) {
   84.90          access_flags.set_is_synthetic();
   84.91        }
   84.92 @@ -1831,6 +1862,7 @@
   84.93                                             AnnotationArray** method_annotations,
   84.94                                             AnnotationArray** method_parameter_annotations,
   84.95                                             AnnotationArray** method_default_annotations,
   84.96 +                                           AnnotationArray** method_type_annotations,
   84.97                                             TRAPS) {
   84.98    ClassFileStream* cfs = stream();
   84.99    methodHandle nullHandle;
  84.100 @@ -1918,6 +1950,10 @@
  84.101    int runtime_visible_parameter_annotations_length = 0;
  84.102    u1* runtime_invisible_parameter_annotations = NULL;
  84.103    int runtime_invisible_parameter_annotations_length = 0;
  84.104 +  u1* runtime_visible_type_annotations = NULL;
  84.105 +  int runtime_visible_type_annotations_length = 0;
  84.106 +  u1* runtime_invisible_type_annotations = NULL;
  84.107 +  int runtime_invisible_type_annotations_length = 0;
  84.108    u1* annotation_default = NULL;
  84.109    int annotation_default_length = 0;
  84.110  
  84.111 @@ -2159,6 +2195,17 @@
  84.112          annotation_default = cfs->get_u1_buffer();
  84.113          assert(annotation_default != NULL, "null annotation default");
  84.114          cfs->skip_u1(annotation_default_length, CHECK_(nullHandle));
  84.115 +      } else if (method_attribute_name == vmSymbols::tag_runtime_visible_type_annotations()) {
  84.116 +        runtime_visible_type_annotations_length = method_attribute_length;
  84.117 +        runtime_visible_type_annotations = cfs->get_u1_buffer();
  84.118 +        assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
  84.119 +        // No need for the VM to parse Type annotations
  84.120 +        cfs->skip_u1(runtime_visible_type_annotations_length, CHECK_(nullHandle));
  84.121 +      } else if (PreserveAllAnnotations && method_attribute_name == vmSymbols::tag_runtime_invisible_type_annotations()) {
  84.122 +        runtime_invisible_type_annotations_length = method_attribute_length;
  84.123 +        runtime_invisible_type_annotations = cfs->get_u1_buffer();
  84.124 +        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
  84.125 +        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK_(nullHandle));
  84.126        } else {
  84.127          // Skip unknown attributes
  84.128          cfs->skip_u1(method_attribute_length, CHECK_(nullHandle));
  84.129 @@ -2333,6 +2380,12 @@
  84.130                                                       NULL,
  84.131                                                       0,
  84.132                                                       CHECK_(nullHandle));
  84.133 +  *method_type_annotations = assemble_annotations(loader_data,
  84.134 +                                                  runtime_visible_type_annotations,
  84.135 +                                                  runtime_visible_type_annotations_length,
  84.136 +                                                  runtime_invisible_type_annotations,
  84.137 +                                                  runtime_invisible_type_annotations_length,
  84.138 +                                                  CHECK_(nullHandle));
  84.139  
  84.140    if (name == vmSymbols::finalize_method_name() &&
  84.141        signature == vmSymbols::void_method_signature()) {
  84.142 @@ -2364,12 +2417,14 @@
  84.143                                                 Array<AnnotationArray*>** methods_annotations,
  84.144                                                 Array<AnnotationArray*>** methods_parameter_annotations,
  84.145                                                 Array<AnnotationArray*>** methods_default_annotations,
  84.146 +                                               Array<AnnotationArray*>** methods_type_annotations,
  84.147                                                 bool* has_default_methods,
  84.148                                                 TRAPS) {
  84.149    ClassFileStream* cfs = stream();
  84.150    AnnotationArray* method_annotations = NULL;
  84.151    AnnotationArray* method_parameter_annotations = NULL;
  84.152    AnnotationArray* method_default_annotations = NULL;
  84.153 +  AnnotationArray* method_type_annotations = NULL;
  84.154    cfs->guarantee_more(2, CHECK_NULL);  // length
  84.155    u2 length = cfs->get_u2_fast();
  84.156    if (length == 0) {
  84.157 @@ -2386,6 +2441,7 @@
  84.158                                           &method_annotations,
  84.159                                           &method_parameter_annotations,
  84.160                                           &method_default_annotations,
  84.161 +                                         &method_type_annotations,
  84.162                                           CHECK_NULL);
  84.163  
  84.164        if (method->is_final()) {
  84.165 @@ -2411,7 +2467,13 @@
  84.166              MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
  84.167        }
  84.168        (*methods_default_annotations)->at_put(index, method_default_annotations);
  84.169 +      if (*methods_type_annotations == NULL) {
  84.170 +        *methods_type_annotations =
  84.171 +             MetadataFactory::new_array<AnnotationArray*>(loader_data, length, NULL, CHECK_NULL);
  84.172 +      }
  84.173 +      (*methods_type_annotations)->at_put(index, method_type_annotations);
  84.174      }
  84.175 +
  84.176      if (_need_verify && length > 1) {
  84.177        // Check duplicated methods
  84.178        ResourceMark rm(THREAD);
  84.179 @@ -2445,6 +2507,7 @@
  84.180                                            Array<AnnotationArray*>* methods_annotations,
  84.181                                            Array<AnnotationArray*>* methods_parameter_annotations,
  84.182                                            Array<AnnotationArray*>* methods_default_annotations,
  84.183 +                                          Array<AnnotationArray*>* methods_type_annotations,
  84.184                                                TRAPS) {
  84.185    int length = methods->length();
  84.186    // If JVMTI original method ordering or sharing is enabled we have to
  84.187 @@ -2463,7 +2526,8 @@
  84.188    // Note that the ordering is not alphabetical, see Symbol::fast_compare
  84.189    Method::sort_methods(methods, methods_annotations,
  84.190                         methods_parameter_annotations,
  84.191 -                       methods_default_annotations);
  84.192 +                       methods_default_annotations,
  84.193 +                       methods_type_annotations);
  84.194  
  84.195    // If JVMTI original method ordering or sharing is enabled construct int
  84.196    // array remembering the original ordering
  84.197 @@ -2728,6 +2792,10 @@
  84.198    int runtime_visible_annotations_length = 0;
  84.199    u1* runtime_invisible_annotations = NULL;
  84.200    int runtime_invisible_annotations_length = 0;
  84.201 +  u1* runtime_visible_type_annotations = NULL;
  84.202 +  int runtime_visible_type_annotations_length = 0;
  84.203 +  u1* runtime_invisible_type_annotations = NULL;
  84.204 +  int runtime_invisible_type_annotations_length = 0;
  84.205    u1* inner_classes_attribute_start = NULL;
  84.206    u4  inner_classes_attribute_length = 0;
  84.207    u2  enclosing_method_class_index = 0;
  84.208 @@ -2834,6 +2902,17 @@
  84.209            classfile_parse_error("Multiple BootstrapMethods attributes in class file %s", CHECK);
  84.210          parsed_bootstrap_methods_attribute = true;
  84.211          parse_classfile_bootstrap_methods_attribute(loader_data, cp, attribute_length, CHECK);
  84.212 +      } else if (tag == vmSymbols::tag_runtime_visible_type_annotations()) {
  84.213 +        runtime_visible_type_annotations_length = attribute_length;
  84.214 +        runtime_visible_type_annotations = cfs->get_u1_buffer();
  84.215 +        assert(runtime_visible_type_annotations != NULL, "null visible type annotations");
  84.216 +        // No need for the VM to parse Type annotations
  84.217 +        cfs->skip_u1(runtime_visible_type_annotations_length, CHECK);
  84.218 +      } else if (PreserveAllAnnotations && tag == vmSymbols::tag_runtime_invisible_type_annotations()) {
  84.219 +        runtime_invisible_type_annotations_length = attribute_length;
  84.220 +        runtime_invisible_type_annotations = cfs->get_u1_buffer();
  84.221 +        assert(runtime_invisible_type_annotations != NULL, "null invisible type annotations");
  84.222 +        cfs->skip_u1(runtime_invisible_type_annotations_length, CHECK);
  84.223        } else {
  84.224          // Unknown attribute
  84.225          cfs->skip_u1(attribute_length, CHECK);
  84.226 @@ -2850,6 +2929,13 @@
  84.227                                                        runtime_invisible_annotations_length,
  84.228                                                        CHECK);
  84.229    set_class_annotations(annotations);
  84.230 +  AnnotationArray* type_annotations = assemble_annotations(loader_data,
  84.231 +                                                           runtime_visible_type_annotations,
  84.232 +                                                           runtime_visible_type_annotations_length,
  84.233 +                                                           runtime_invisible_type_annotations,
  84.234 +                                                           runtime_invisible_type_annotations_length,
  84.235 +                                                           CHECK);
  84.236 +  set_class_type_annotations(type_annotations);
  84.237  
  84.238    if (parsed_innerclasses_attribute || parsed_enclosingmethod_attribute) {
  84.239      u2 num_of_classes = parse_classfile_inner_classes_attribute(
  84.240 @@ -3190,7 +3276,9 @@
  84.241      // Fields (offsets are filled in later)
  84.242      FieldAllocationCount fac;
  84.243      Array<AnnotationArray*>* fields_annotations = NULL;
  84.244 +    Array<AnnotationArray*>* fields_type_annotations = NULL;
  84.245      Array<u2>* fields = parse_fields(loader_data, class_name, cp, access_flags.is_interface(), &fac, &fields_annotations,
  84.246 +                                          &fields_type_annotations,
  84.247                                            &java_fields_count,
  84.248                                            CHECK_(nullHandle));
  84.249      // Methods
  84.250 @@ -3202,6 +3290,7 @@
  84.251      Array<AnnotationArray*>* methods_annotations = NULL;
  84.252      Array<AnnotationArray*>* methods_parameter_annotations = NULL;
  84.253      Array<AnnotationArray*>* methods_default_annotations = NULL;
  84.254 +    Array<AnnotationArray*>* methods_type_annotations = NULL;
  84.255      Array<Method*>* methods = parse_methods(loader_data,
  84.256                                              cp, access_flags.is_interface(),
  84.257                                              &promoted_flags,
  84.258 @@ -3209,6 +3298,7 @@
  84.259                                              &methods_annotations,
  84.260                                              &methods_parameter_annotations,
  84.261                                              &methods_default_annotations,
  84.262 +                                            &methods_type_annotations,
  84.263                                              &has_default_methods,
  84.264                                              CHECK_(nullHandle));
  84.265  
  84.266 @@ -3270,6 +3360,7 @@
  84.267                                                 methods_annotations,
  84.268                                                 methods_parameter_annotations,
  84.269                                                 methods_default_annotations,
  84.270 +                                               methods_type_annotations,
  84.271                                                 CHECK_(nullHandle));
  84.272  
  84.273      // promote flags from parse_methods() to the klass' flags
  84.274 @@ -3687,11 +3778,13 @@
  84.275      if (is_anonymous())  // I am well known to myself
  84.276        cp->klass_at_put(this_class_index, this_klass()); // eagerly resolve
  84.277  
  84.278 +    // Allocate an annotation type if needed.
  84.279      if (fields_annotations != NULL ||
  84.280          methods_annotations != NULL ||
  84.281          methods_parameter_annotations != NULL ||
  84.282 -        methods_default_annotations != NULL) {
  84.283 -      // Allocate an annotation type if needed.
  84.284 +        methods_default_annotations != NULL ||
  84.285 +        fields_type_annotations != NULL ||
  84.286 +        methods_type_annotations != NULL) {
  84.287        Annotations* anno = Annotations::allocate(loader_data,
  84.288                              fields_annotations, methods_annotations,
  84.289                              methods_parameter_annotations,
  84.290 @@ -3701,6 +3794,16 @@
  84.291        this_klass->set_annotations(NULL);
  84.292      }
  84.293  
  84.294 +    if (fields_type_annotations != NULL ||
  84.295 +        methods_type_annotations != NULL) {
  84.296 +      assert(this_klass->annotations() != NULL, "annotations should have been allocated");
  84.297 +      Annotations* anno = Annotations::allocate(loader_data,
  84.298 +                                                fields_type_annotations,
  84.299 +                                                methods_type_annotations,
  84.300 +                                                NULL,
  84.301 +                                                NULL, CHECK_(nullHandle));
  84.302 +      this_klass->annotations()->set_type_annotations(anno);
  84.303 +    }
  84.304  
  84.305      this_klass->set_minor_version(minor_version);
  84.306      this_klass->set_major_version(major_version);
  84.307 @@ -3725,6 +3828,7 @@
  84.308      // Fill in field values obtained by parse_classfile_attributes
  84.309      if (parsed_annotations.has_any_annotations())
  84.310        parsed_annotations.apply_to(this_klass);
  84.311 +
  84.312      // Create annotations
  84.313      if (_annotations != NULL && this_klass->annotations() == NULL) {
  84.314        Annotations* anno = Annotations::allocate(loader_data, CHECK_NULL);
  84.315 @@ -3732,6 +3836,19 @@
  84.316      }
  84.317      apply_parsed_class_attributes(this_klass);
  84.318  
  84.319 +    // Create type annotations
  84.320 +    if (_type_annotations != NULL) {
  84.321 +      if (this_klass->annotations() == NULL) {
  84.322 +        Annotations* anno = Annotations::allocate(loader_data, CHECK_NULL);
  84.323 +        this_klass->set_annotations(anno);
  84.324 +      }
  84.325 +      if (this_klass->annotations()->type_annotations() == NULL) {
  84.326 +        Annotations* anno = Annotations::allocate(loader_data, CHECK_NULL);
  84.327 +        this_klass->annotations()->set_type_annotations(anno);
  84.328 +      }
  84.329 +      this_klass->annotations()->type_annotations()->set_class_annotations(_type_annotations);
  84.330 +    }
  84.331 +
  84.332      // Miranda methods
  84.333      if ((num_miranda_methods > 0) ||
  84.334          // if this class introduced new miranda methods or
    85.1 --- a/src/share/vm/classfile/classFileParser.hpp	Tue Jan 08 14:04:25 2013 -0500
    85.2 +++ b/src/share/vm/classfile/classFileParser.hpp	Tue Jan 08 11:39:53 2013 -0800
    85.3 @@ -64,6 +64,7 @@
    85.4    int        _sde_length;
    85.5    Array<u2>* _inner_classes;
    85.6    AnnotationArray* _annotations;
    85.7 +  AnnotationArray* _type_annotations;
    85.8  
    85.9    void set_class_synthetic_flag(bool x)           { _synthetic_flag = x; }
   85.10    void set_class_sourcefile(Symbol* x)            { _sourcefile = x; }
   85.11 @@ -71,12 +72,14 @@
   85.12    void set_class_sde_buffer(char* x, int len)     { _sde_buffer = x; _sde_length = len; }
   85.13    void set_class_inner_classes(Array<u2>* x)      { _inner_classes = x; }
   85.14    void set_class_annotations(AnnotationArray* x)  { _annotations = x; }
   85.15 +  void set_class_type_annotations(AnnotationArray* x)  { _type_annotations = x; }
   85.16    void init_parsed_class_attributes() {
   85.17      _synthetic_flag = false;
   85.18      _sourcefile = NULL;
   85.19      _generic_signature = NULL;
   85.20      _sde_buffer = NULL;
   85.21      _sde_length = 0;
   85.22 +    _annotations = _type_annotations = NULL;
   85.23      // initialize the other flags too:
   85.24      _has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false;
   85.25      _max_bootstrap_specifier_index = -1;
   85.26 @@ -163,6 +166,7 @@
   85.27                                bool* is_synthetic_addr,
   85.28                                u2* generic_signature_index_addr,
   85.29                                AnnotationArray** field_annotations,
   85.30 +                              AnnotationArray** field_type_annotations,
   85.31                                FieldAnnotationCollector* parsed_annotations,
   85.32                                TRAPS);
   85.33    Array<u2>* parse_fields(ClassLoaderData* loader_data,
   85.34 @@ -170,6 +174,7 @@
   85.35                            constantPoolHandle cp, bool is_interface,
   85.36                            FieldAllocationCount *fac,
   85.37                            Array<AnnotationArray*>** fields_annotations,
   85.38 +                          Array<AnnotationArray*>** fields_type_annotations,
   85.39                            u2* java_fields_count_ptr, TRAPS);
   85.40  
   85.41    // Method parsing
   85.42 @@ -180,6 +185,7 @@
   85.43                              AnnotationArray** method_annotations,
   85.44                              AnnotationArray** method_parameter_annotations,
   85.45                              AnnotationArray** method_default_annotations,
   85.46 +                            AnnotationArray** method_type_annotations,
   85.47                              TRAPS);
   85.48    Array<Method*>* parse_methods(ClassLoaderData* loader_data,
   85.49                                  constantPoolHandle cp,
   85.50 @@ -189,6 +195,7 @@
   85.51                                  Array<AnnotationArray*>** methods_annotations,
   85.52                                  Array<AnnotationArray*>** methods_parameter_annotations,
   85.53                                  Array<AnnotationArray*>** methods_default_annotations,
   85.54 +                                Array<AnnotationArray*>** methods_type_annotations,
   85.55                                  bool* has_default_method,
   85.56                                  TRAPS);
   85.57    Array<int>* sort_methods(ClassLoaderData* loader_data,
   85.58 @@ -196,6 +203,7 @@
   85.59                             Array<AnnotationArray*>* methods_annotations,
   85.60                             Array<AnnotationArray*>* methods_parameter_annotations,
   85.61                             Array<AnnotationArray*>* methods_default_annotations,
   85.62 +                           Array<AnnotationArray*>* methods_type_annotations,
   85.63                                  TRAPS);
   85.64    u2* parse_exception_table(ClassLoaderData* loader_data,
   85.65                              u4 code_length, u4 exception_table_length,
    86.1 --- a/src/share/vm/classfile/classLoaderData.cpp	Tue Jan 08 14:04:25 2013 -0500
    86.2 +++ b/src/share/vm/classfile/classLoaderData.cpp	Tue Jan 08 11:39:53 2013 -0800
    86.3 @@ -64,8 +64,10 @@
    86.4  
    86.5  ClassLoaderData * ClassLoaderData::_the_null_class_loader_data = NULL;
    86.6  
    86.7 -ClassLoaderData::ClassLoaderData(Handle h_class_loader) : _class_loader(h_class_loader()),
    86.8 -  _metaspace(NULL), _unloading(false), _keep_alive(false), _klasses(NULL),
    86.9 +ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous) :
   86.10 +  _class_loader(h_class_loader()),
   86.11 +  _is_anonymous(is_anonymous), _keep_alive(is_anonymous), // initially
   86.12 +  _metaspace(NULL), _unloading(false), _klasses(NULL),
   86.13    _claimed(0), _jmethod_ids(NULL), _handles(NULL), _deallocate_list(NULL),
   86.14    _next(NULL), _dependencies(NULL),
   86.15    _metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true)) {
   86.16 @@ -167,16 +169,18 @@
   86.17      ok = (objArrayOop)ok->obj_at(1);
   86.18    }
   86.19  
   86.20 +  // Must handle over GC points
   86.21 +  assert (last != NULL, "dependencies should be initialized");
   86.22 +  objArrayHandle last_handle(THREAD, last);
   86.23 +
   86.24    // Create a new dependency node with fields for (class_loader or mirror, next)
   86.25    objArrayOop deps = oopFactory::new_objectArray(2, CHECK);
   86.26    deps->obj_at_put(0, dependency());
   86.27  
   86.28 -  // Must handle over more GC points
   86.29 +  // Must handle over GC points
   86.30    objArrayHandle new_dependency(THREAD, deps);
   86.31  
   86.32    // Add the dependency under lock
   86.33 -  assert (last != NULL, "dependencies should be initialized");
   86.34 -  objArrayHandle last_handle(THREAD, last);
   86.35    locked_add_dependency(last_handle, new_dependency);
   86.36  }
   86.37  
   86.38 @@ -257,13 +261,6 @@
   86.39    ShouldNotReachHere();   // should have found this class!!
   86.40  }
   86.41  
   86.42 -
   86.43 -bool ClassLoaderData::is_anonymous() const {
   86.44 -  Klass* k = _klasses;
   86.45 -  return (_keep_alive || (k != NULL && k->oop_is_instance() &&
   86.46 -          InstanceKlass::cast(k)->is_anonymous()));
   86.47 -}
   86.48 -
   86.49  void ClassLoaderData::unload() {
   86.50    _unloading = true;
   86.51  
   86.52 @@ -396,8 +393,7 @@
   86.53  // These anonymous class loaders are to contain classes used for JSR292
   86.54  ClassLoaderData* ClassLoaderData::anonymous_class_loader_data(oop loader, TRAPS) {
   86.55    // Add a new class loader data to the graph.
   86.56 -  ClassLoaderData* cld = ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL);
   86.57 -  return cld;
   86.58 +  return ClassLoaderDataGraph::add(NULL, loader, CHECK_NULL);
   86.59  }
   86.60  
   86.61  const char* ClassLoaderData::loader_name() {
   86.62 @@ -475,7 +471,9 @@
   86.63    // Create one.
   86.64    ClassLoaderData* *list_head = &_head;
   86.65    ClassLoaderData* next = _head;
   86.66 -  ClassLoaderData* cld = new ClassLoaderData(loader);
   86.67 +
   86.68 +  bool is_anonymous = (cld_addr == NULL);
   86.69 +  ClassLoaderData* cld = new ClassLoaderData(loader, is_anonymous);
   86.70  
   86.71    if (cld_addr != NULL) {
   86.72      // First, Atomically set it
   86.73 @@ -485,10 +483,6 @@
   86.74        // Returns the data.
   86.75        return old;
   86.76      }
   86.77 -  } else {
   86.78 -    // Disallow unloading for this CLD during initialization if there is no
   86.79 -    // class_loader oop to link this to.
   86.80 -    cld->set_keep_alive(true);
   86.81    }
   86.82  
   86.83    // We won the race, and therefore the task of adding the data to the list of
    87.1 --- a/src/share/vm/classfile/classLoaderData.hpp	Tue Jan 08 14:04:25 2013 -0500
    87.2 +++ b/src/share/vm/classfile/classLoaderData.hpp	Tue Jan 08 11:39:53 2013 -0800
    87.3 @@ -109,6 +109,7 @@
    87.4    Mutex* _metaspace_lock;  // Locks the metaspace for allocations and setup.
    87.5    bool _unloading;         // true if this class loader goes away
    87.6    bool _keep_alive;        // if this CLD can be unloaded for anonymous loaders
    87.7 +  bool _is_anonymous;      // if this CLD is for an anonymous class
    87.8    volatile int _claimed;   // true if claimed, for example during GC traces.
    87.9                             // To avoid applying oop closure more than once.
   87.10                             // Has to be an int because we cas it.
   87.11 @@ -139,7 +140,7 @@
   87.12    void set_next(ClassLoaderData* next) { _next = next; }
   87.13    ClassLoaderData* next() const        { return _next; }
   87.14  
   87.15 -  ClassLoaderData(Handle h_class_loader);
   87.16 +  ClassLoaderData(Handle h_class_loader, bool is_anonymous);
   87.17    ~ClassLoaderData();
   87.18  
   87.19    void set_metaspace(Metaspace* m) { _metaspace = m; }
   87.20 @@ -174,12 +175,12 @@
   87.21      return _the_null_class_loader_data;
   87.22    }
   87.23  
   87.24 -  bool is_anonymous() const;
   87.25 +  bool is_anonymous() const { return _is_anonymous; }
   87.26  
   87.27    static void init_null_class_loader_data() {
   87.28      assert(_the_null_class_loader_data == NULL, "cannot initialize twice");
   87.29      assert(ClassLoaderDataGraph::_head == NULL, "cannot initialize twice");
   87.30 -    _the_null_class_loader_data = new ClassLoaderData((oop)NULL);
   87.31 +    _the_null_class_loader_data = new ClassLoaderData((oop)NULL, false);
   87.32      ClassLoaderDataGraph::_head = _the_null_class_loader_data;
   87.33      assert(_the_null_class_loader_data->is_the_null_class_loader_data(), "Must be");
   87.34      if (DumpSharedSpaces) {
    88.1 --- a/src/share/vm/classfile/javaClasses.cpp	Tue Jan 08 14:04:25 2013 -0500
    88.2 +++ b/src/share/vm/classfile/javaClasses.cpp	Tue Jan 08 11:39:53 2013 -0800
    88.3 @@ -327,14 +327,14 @@
    88.4    return result;
    88.5  }
    88.6  
    88.7 -unsigned int java_lang_String::to_hash(oop java_string) {
    88.8 +unsigned int java_lang_String::hash_code(oop java_string) {
    88.9    int          length = java_lang_String::length(java_string);
   88.10 -  // Zero length string will hash to zero with String.toHash() function.
   88.11 +  // Zero length string will hash to zero with String.hashCode() function.
   88.12    if (length == 0) return 0;
   88.13  
   88.14    typeArrayOop value  = java_lang_String::value(java_string);
   88.15    int          offset = java_lang_String::offset(java_string);
   88.16 -  return java_lang_String::to_hash(value->char_at_addr(offset), length);
   88.17 +  return java_lang_String::hash_code(value->char_at_addr(offset), length);
   88.18  }
   88.19  
   88.20  char* java_lang_String::as_quoted_ascii(oop java_string) {
   88.21 @@ -1813,10 +1813,12 @@
   88.22    annotations_offset = -1;
   88.23    parameter_annotations_offset = -1;
   88.24    annotation_default_offset = -1;
   88.25 +  type_annotations_offset = -1;
   88.26    compute_optional_offset(signature_offset,             k, vmSymbols::signature_name(),             vmSymbols::string_signature());
   88.27    compute_optional_offset(annotations_offset,           k, vmSymbols::annotations_name(),           vmSymbols::byte_array_signature());
   88.28    compute_optional_offset(parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature());
   88.29    compute_optional_offset(annotation_default_offset,    k, vmSymbols::annotation_default_name(),    vmSymbols::byte_array_signature());
   88.30 +  compute_optional_offset(type_annotations_offset,      k, vmSymbols::type_annotations_name(),      vmSymbols::byte_array_signature());
   88.31  }
   88.32  
   88.33  Handle java_lang_reflect_Method::create(TRAPS) {
   88.34 @@ -1962,6 +1964,22 @@
   88.35    method->obj_field_put(annotation_default_offset, value);
   88.36  }
   88.37  
   88.38 +bool java_lang_reflect_Method::has_type_annotations_field() {
   88.39 +  return (type_annotations_offset >= 0);
   88.40 +}
   88.41 +
   88.42 +oop java_lang_reflect_Method::type_annotations(oop method) {
   88.43 +  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
   88.44 +  assert(has_type_annotations_field(), "type_annotations field must be present");
   88.45 +  return method->obj_field(type_annotations_offset);
   88.46 +}
   88.47 +
   88.48 +void java_lang_reflect_Method::set_type_annotations(oop method, oop value) {
   88.49 +  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
   88.50 +  assert(has_type_annotations_field(), "type_annotations field must be present");
   88.51 +  method->obj_field_put(type_annotations_offset, value);
   88.52 +}
   88.53 +
   88.54  void java_lang_reflect_Constructor::compute_offsets() {
   88.55    Klass* k = SystemDictionary::reflect_Constructor_klass();
   88.56    compute_offset(clazz_offset,          k, vmSymbols::clazz_name(),          vmSymbols::class_signature());
   88.57 @@ -1973,9 +1991,11 @@
   88.58    signature_offset = -1;
   88.59    annotations_offset = -1;
   88.60    parameter_annotations_offset = -1;
   88.61 +  type_annotations_offset = -1;
   88.62    compute_optional_offset(signature_offset,             k, vmSymbols::signature_name(),             vmSymbols::string_signature());
   88.63    compute_optional_offset(annotations_offset,           k, vmSymbols::annotations_name(),           vmSymbols::byte_array_signature());
   88.64    compute_optional_offset(parameter_annotations_offset, k, vmSymbols::parameter_annotations_name(), vmSymbols::byte_array_signature());
   88.65 +  compute_optional_offset(type_annotations_offset,      k, vmSymbols::type_annotations_name(),      vmSymbols::byte_array_signature());
   88.66  }
   88.67  
   88.68  Handle java_lang_reflect_Constructor::create(TRAPS) {
   88.69 @@ -2086,6 +2106,22 @@
   88.70    method->obj_field_put(parameter_annotations_offset, value);
   88.71  }
   88.72  
   88.73 +bool java_lang_reflect_Constructor::has_type_annotations_field() {
   88.74 +  return (type_annotations_offset >= 0);
   88.75 +}
   88.76 +
   88.77 +oop java_lang_reflect_Constructor::type_annotations(oop constructor) {
   88.78 +  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
   88.79 +  assert(has_type_annotations_field(), "type_annotations field must be present");
   88.80 +  return constructor->obj_field(type_annotations_offset);
   88.81 +}
   88.82 +
   88.83 +void java_lang_reflect_Constructor::set_type_annotations(oop constructor, oop value) {
   88.84 +  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
   88.85 +  assert(has_type_annotations_field(), "type_annotations field must be present");
   88.86 +  constructor->obj_field_put(type_annotations_offset, value);
   88.87 +}
   88.88 +
   88.89  void java_lang_reflect_Field::compute_offsets() {
   88.90    Klass* k = SystemDictionary::reflect_Field_klass();
   88.91    compute_offset(clazz_offset,     k, vmSymbols::clazz_name(),     vmSymbols::class_signature());
   88.92 @@ -2096,8 +2132,10 @@
   88.93    // The generic signature and annotations fields are only present in 1.5
   88.94    signature_offset = -1;
   88.95    annotations_offset = -1;
   88.96 +  type_annotations_offset = -1;
   88.97    compute_optional_offset(signature_offset, k, vmSymbols::signature_name(), vmSymbols::string_signature());
   88.98    compute_optional_offset(annotations_offset,  k, vmSymbols::annotations_name(),  vmSymbols::byte_array_signature());
   88.99 +  compute_optional_offset(type_annotations_offset,  k, vmSymbols::type_annotations_name(),  vmSymbols::byte_array_signature());
  88.100  }
  88.101  
  88.102  Handle java_lang_reflect_Field::create(TRAPS) {
  88.103 @@ -2192,6 +2230,21 @@
  88.104    field->obj_field_put(annotations_offset, value);
  88.105  }
  88.106  
  88.107 +bool java_lang_reflect_Field::has_type_annotations_field() {
  88.108 +  return (type_annotations_offset >= 0);
  88.109 +}
  88.110 +
  88.111 +oop java_lang_reflect_Field::type_annotations(oop field) {
  88.112 +  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
  88.113 +  assert(has_type_annotations_field(), "type_annotations field must be present");
  88.114 +  return field->obj_field(type_annotations_offset);
  88.115 +}
  88.116 +
  88.117 +void java_lang_reflect_Field::set_type_annotations(oop field, oop value) {
  88.118 +  assert(Universe::is_fully_initialized(), "Need to find another solution to the reflection problem");
  88.119 +  assert(has_type_annotations_field(), "type_annotations field must be present");
  88.120 +  field->obj_field_put(type_annotations_offset, value);
  88.121 +}
  88.122  
  88.123  void sun_reflect_ConstantPool::compute_offsets() {
  88.124    Klass* k = SystemDictionary::reflect_ConstantPool_klass();
  88.125 @@ -2857,6 +2910,7 @@
  88.126  int java_lang_reflect_Method::annotations_offset;
  88.127  int java_lang_reflect_Method::parameter_annotations_offset;
  88.128  int java_lang_reflect_Method::annotation_default_offset;
  88.129 +int java_lang_reflect_Method::type_annotations_offset;
  88.130  int java_lang_reflect_Constructor::clazz_offset;
  88.131  int java_lang_reflect_Constructor::parameterTypes_offset;
  88.132  int java_lang_reflect_Constructor::exceptionTypes_offset;
  88.133 @@ -2865,6 +2919,7 @@
  88.134  int java_lang_reflect_Constructor::signature_offset;
  88.135  int java_lang_reflect_Constructor::annotations_offset;
  88.136  int java_lang_reflect_Constructor::parameter_annotations_offset;
  88.137 +int java_lang_reflect_Constructor::type_annotations_offset;
  88.138  int java_lang_reflect_Field::clazz_offset;
  88.139  int java_lang_reflect_Field::name_offset;
  88.140  int java_lang_reflect_Field::type_offset;
  88.141 @@ -2872,6 +2927,7 @@
  88.142  int java_lang_reflect_Field::modifiers_offset;
  88.143  int java_lang_reflect_Field::signature_offset;
  88.144  int java_lang_reflect_Field::annotations_offset;
  88.145 +int java_lang_reflect_Field::type_annotations_offset;
  88.146  int java_lang_boxing_object::value_offset;
  88.147  int java_lang_boxing_object::long_value_offset;
  88.148  int java_lang_ref_Reference::referent_offset;
    89.1 --- a/src/share/vm/classfile/javaClasses.hpp	Tue Jan 08 14:04:25 2013 -0500
    89.2 +++ b/src/share/vm/classfile/javaClasses.hpp	Tue Jan 08 11:39:53 2013 -0800
    89.3 @@ -166,8 +166,8 @@
    89.4    // objects in the shared archive file.
    89.5    // hash P(31) from Kernighan & Ritchie
    89.6    //
    89.7 -  // For this reason, THIS ALGORITHM MUST MATCH String.toHash().
    89.8 -  template <typename T> static unsigned int to_hash(T* s, int len) {
    89.9 +  // For this reason, THIS ALGORITHM MUST MATCH String.hashCode().
   89.10 +  template <typename T> static unsigned int hash_code(T* s, int len) {
   89.11      unsigned int h = 0;
   89.12      while (len-- > 0) {
   89.13        h = 31*h + (unsigned int) *s;
   89.14 @@ -175,10 +175,10 @@
   89.15      }
   89.16      return h;
   89.17    }
   89.18 -  static unsigned int to_hash(oop java_string);
   89.19 +  static unsigned int hash_code(oop java_string);
   89.20  
   89.21    // This is the string hash code used by the StringTable, which may be
   89.22 -  // the same as String.toHash or an alternate hash code.
   89.23 +  // the same as String.hashCode or an alternate hash code.
   89.24    static unsigned int hash_string(oop java_string);
   89.25  
   89.26    static bool equals(oop java_string, jchar* chars, int len);
   89.27 @@ -554,6 +554,7 @@
   89.28    static int annotations_offset;
   89.29    static int parameter_annotations_offset;
   89.30    static int annotation_default_offset;
   89.31 +  static int type_annotations_offset;
   89.32  
   89.33    static void compute_offsets();
   89.34  
   89.35 @@ -599,6 +600,10 @@
   89.36    static oop annotation_default(oop method);
   89.37    static void set_annotation_default(oop method, oop value);
   89.38  
   89.39 +  static bool has_type_annotations_field();
   89.40 +  static oop type_annotations(oop method);
   89.41 +  static void set_type_annotations(oop method, oop value);
   89.42 +
   89.43    // Debugging
   89.44    friend class JavaClasses;
   89.45  };
   89.46 @@ -618,6 +623,7 @@
   89.47    static int signature_offset;
   89.48    static int annotations_offset;
   89.49    static int parameter_annotations_offset;
   89.50 +  static int type_annotations_offset;
   89.51  
   89.52    static void compute_offsets();
   89.53  
   89.54 @@ -653,6 +659,10 @@
   89.55    static oop parameter_annotations(oop method);
   89.56    static void set_parameter_annotations(oop method, oop value);
   89.57  
   89.58 +  static bool has_type_annotations_field();
   89.59 +  static oop type_annotations(oop constructor);
   89.60 +  static void set_type_annotations(oop constructor, oop value);
   89.61 +
   89.62    // Debugging
   89.63    friend class JavaClasses;
   89.64  };
   89.65 @@ -671,6 +681,7 @@
   89.66    static int modifiers_offset;
   89.67    static int signature_offset;
   89.68    static int annotations_offset;
   89.69 +  static int type_annotations_offset;
   89.70  
   89.71    static void compute_offsets();
   89.72  
   89.73 @@ -710,6 +721,10 @@
   89.74    static oop annotation_default(oop method);
   89.75    static void set_annotation_default(oop method, oop value);
   89.76  
   89.77 +  static bool has_type_annotations_field();
   89.78 +  static oop type_annotations(oop field);
   89.79 +  static void set_type_annotations(oop field, oop value);
   89.80 +
   89.81    // Debugging
   89.82    friend class JavaClasses;
   89.83  };
    90.1 --- a/src/share/vm/classfile/symbolTable.cpp	Tue Jan 08 14:04:25 2013 -0500
    90.2 +++ b/src/share/vm/classfile/symbolTable.cpp	Tue Jan 08 11:39:53 2013 -0800
    90.3 @@ -179,7 +179,7 @@
    90.4  unsigned int SymbolTable::hash_symbol(const char* s, int len) {
    90.5    return use_alternate_hashcode() ?
    90.6             AltHashing::murmur3_32(seed(), (const jbyte*)s, len) :
    90.7 -           java_lang_String::to_hash(s, len);
    90.8 +           java_lang_String::hash_code(s, len);
    90.9  }
   90.10  
   90.11  
   90.12 @@ -617,7 +617,7 @@
   90.13  // Pick hashing algorithm
   90.14  unsigned int StringTable::hash_string(const jchar* s, int len) {
   90.15    return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
   90.16 -                                    java_lang_String::to_hash(s, len);
   90.17 +                                    java_lang_String::hash_code(s, len);
   90.18  }
   90.19  
   90.20  oop StringTable::lookup(int index, jchar* name,
    91.1 --- a/src/share/vm/classfile/vmSymbols.hpp	Tue Jan 08 14:04:25 2013 -0500
    91.2 +++ b/src/share/vm/classfile/vmSymbols.hpp	Tue Jan 08 11:39:53 2013 -0800
    91.3 @@ -136,6 +136,8 @@
    91.4    template(tag_runtime_visible_parameter_annotations, "RuntimeVisibleParameterAnnotations")       \
    91.5    template(tag_runtime_invisible_parameter_annotations,"RuntimeInvisibleParameterAnnotations")    \
    91.6    template(tag_annotation_default,                    "AnnotationDefault")                        \
    91.7 +  template(tag_runtime_visible_type_annotations,      "RuntimeVisibleTypeAnnotations")            \
    91.8 +  template(tag_runtime_invisible_type_annotations,    "RuntimeInvisibleTypeAnnotations")          \
    91.9    template(tag_enclosing_method,                      "EnclosingMethod")                          \
   91.10    template(tag_bootstrap_methods,                     "BootstrapMethods")                         \
   91.11                                                                                                    \
   91.12 @@ -239,6 +241,9 @@
   91.13    template(ConstantPool_name,                         "constantPoolOop")                          \
   91.14    template(sun_reflect_UnsafeStaticFieldAccessorImpl, "sun/reflect/UnsafeStaticFieldAccessorImpl")\
   91.15    template(base_name,                                 "base")                                     \
   91.16 +  /* Type Annotations (JDK 8 and above) */                                                        \
   91.17 +  template(type_annotations_name,                     "typeAnnotations")                          \
   91.18 +                                                                                                  \
   91.19                                                                                                    \
   91.20    /* Support for JSR 292 & invokedynamic (JDK 1.7 and above) */                                   \
   91.21    template(java_lang_invoke_CallSite,                 "java/lang/invoke/CallSite")                \
   91.22 @@ -756,6 +761,15 @@
   91.23    do_intrinsic(_unpark,                   sun_misc_Unsafe,        unpark_name, unpark_signature,                 F_RN)  \
   91.24     do_name(     unpark_name,                                     "unpark")                                              \
   91.25     do_alias(    unpark_signature,                               /*(LObject;)V*/ object_void_signature)                  \
   91.26 +  do_intrinsic(_loadFence,                sun_misc_Unsafe,        loadFence_name, loadFence_signature,           F_RN)  \
   91.27 +   do_name(     loadFence_name,                                  "loadFence")                                           \
   91.28 +   do_alias(    loadFence_signature,                              void_method_signature)                                \
   91.29 +  do_intrinsic(_storeFence,               sun_misc_Unsafe,        storeFence_name, storeFence_signature,         F_RN)  \
   91.30 +   do_name(     storeFence_name,                                 "storeFence")                                          \
   91.31 +   do_alias(    storeFence_signature,                             void_method_signature)                                \
   91.32 +  do_intrinsic(_fullFence,                sun_misc_Unsafe,        fullFence_name, fullFence_signature,           F_RN)  \
   91.33 +   do_name(     fullFence_name,                                  "fullFence")                                           \
   91.34 +   do_alias(    fullFence_signature,                              void_method_signature)                                \
   91.35                                                                                                                          \
   91.36    /* unsafe memory references (there are a lot of them...) */                                                           \
   91.37    do_signature(getObject_signature,       "(Ljava/lang/Object;J)Ljava/lang/Object;")                                    \
   91.38 @@ -897,12 +911,14 @@
   91.39    do_intrinsic(_getAndAddLong,            sun_misc_Unsafe,        getAndAddLong_name, getAndAddLong_signature, F_R)     \
   91.40     do_name(     getAndAddLong_name,                               "getAndAddLong")                                      \
   91.41     do_signature(getAndAddLong_signature,                          "(Ljava/lang/Object;JJ)J" )                           \
   91.42 -  do_intrinsic(_getAndSetInt,             sun_misc_Unsafe,        getAndSet_name, getAndSetInt_signature, F_R)          \
   91.43 -   do_name(     getAndSet_name,                                   "getAndSet")                                          \
   91.44 +  do_intrinsic(_getAndSetInt,             sun_misc_Unsafe,        getAndSetInt_name, getAndSetInt_signature, F_R)       \
   91.45 +   do_name(     getAndSetInt_name,                                "getAndSetInt")                                       \
   91.46     do_alias(    getAndSetInt_signature,                         /*"(Ljava/lang/Object;JI)I"*/ getAndAddInt_signature)   \
   91.47 -  do_intrinsic(_getAndSetLong,            sun_misc_Unsafe,        getAndSet_name, getAndSetLong_signature, F_R)         \
   91.48 +  do_intrinsic(_getAndSetLong,            sun_misc_Unsafe,        getAndSetLong_name, getAndSetLong_signature, F_R)     \
   91.49 +   do_name(     getAndSetLong_name,                               "getAndSetLong")                                      \
   91.50     do_alias(    getAndSetLong_signature,                        /*"(Ljava/lang/Object;JJ)J"*/ getAndAddLong_signature)  \
   91.51 -  do_intrinsic(_getAndSetObject,          sun_misc_Unsafe,        getAndSet_name, getAndSetObject_signature,  F_R)      \
   91.52 +  do_intrinsic(_getAndSetObject,          sun_misc_Unsafe,        getAndSetObject_name, getAndSetObject_signature,  F_R)\
   91.53 +   do_name(     getAndSetObject_name,                             "getAndSetObject")                                    \
   91.54     do_signature(getAndSetObject_signature,                        "(Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object;" ) \
   91.55                                                                                                                          \
   91.56    /* prefetch_signature is shared by all prefetch variants */                                                           \
    92.1 --- a/src/share/vm/compiler/compileBroker.cpp	Tue Jan 08 14:04:25 2013 -0500
    92.2 +++ b/src/share/vm/compiler/compileBroker.cpp	Tue Jan 08 11:39:53 2013 -0800
    92.3 @@ -269,12 +269,10 @@
    92.4                               const char* comment,
    92.5                               bool is_blocking) {
    92.6    assert(!_lock->is_locked(), "bad locking");
    92.7 -  InstanceKlass* holder = method->method_holder();
    92.8  
    92.9    _compile_id = compile_id;
   92.10    _method = method();
   92.11 -  _method_holder = JNIHandles::make_global(
   92.12 -        holder->is_anonymous() ? holder->java_mirror(): holder->class_loader());
   92.13 +  _method_holder = JNIHandles::make_global(method->method_holder()->klass_holder());
   92.14    _osr_bci = osr_bci;
   92.15    _is_blocking = is_blocking;
   92.16    _comp_level = comp_level;
   92.17 @@ -298,10 +296,7 @@
   92.18        } else {
   92.19          _hot_method = hot_method();
   92.20          // only add loader or mirror if different from _method_holder
   92.21 -        InstanceKlass* hot_holder = hot_method->method_holder();
   92.22 -        _hot_method_holder = JNIHandles::make_global(
   92.23 -               hot_holder->is_anonymous() ? hot_holder->java_mirror() :
   92.24 -                                            hot_holder->class_loader());
   92.25 +        _hot_method_holder = JNIHandles::make_global(hot_method->method_holder()->klass_holder());
   92.26        }
   92.27      }
   92.28    }
    93.1 --- a/src/share/vm/compiler/compilerOracle.cpp	Tue Jan 08 14:04:25 2013 -0500
    93.2 +++ b/src/share/vm/compiler/compilerOracle.cpp	Tue Jan 08 11:39:53 2013 -0800
    93.3 @@ -538,6 +538,7 @@
    93.4  
    93.5    if (match != NULL) {
    93.6      if (!_quiet) {
    93.7 +      ResourceMark rm;
    93.8        tty->print("CompilerOracle: %s ", command_names[command]);
    93.9        match->print();
   93.10      }
    94.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jan 08 14:04:25 2013 -0500
    94.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Jan 08 11:39:53 2013 -0800
    94.3 @@ -46,27 +46,11 @@
    94.4  
    94.5  // Concurrent marking bit map wrapper
    94.6  
    94.7 -CMBitMapRO::CMBitMapRO(ReservedSpace rs, int shifter) :
    94.8 -  _bm((uintptr_t*)NULL,0),
    94.9 +CMBitMapRO::CMBitMapRO(int shifter) :
   94.10 +  _bm(),
   94.11    _shifter(shifter) {
   94.12 -  _bmStartWord = (HeapWord*)(rs.base());
   94.13 -  _bmWordSize  = rs.size()/HeapWordSize;    // rs.size() is in bytes
   94.14 -  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
   94.15 -                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
   94.16 -
   94.17 -  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
   94.18 -
   94.19 -  guarantee(brs.is_reserved(), "couldn't allocate concurrent marking bit map");
   94.20 -  // For now we'll just commit all of the bit map up fromt.
   94.21 -  // Later on we'll try to be more parsimonious with swap.
   94.22 -  guarantee(_virtual_space.initialize(brs, brs.size()),
   94.23 -            "couldn't reseve backing store for concurrent marking bit map");
   94.24 -  assert(_virtual_space.committed_size() == brs.size(),
   94.25 -         "didn't reserve backing store for all of concurrent marking bit map?");
   94.26 -  _bm.set_map((uintptr_t*)_virtual_space.low());
   94.27 -  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
   94.28 -         _bmWordSize, "inconsistency in bit map sizing");
   94.29 -  _bm.set_size(_bmWordSize >> _shifter);
   94.30 +  _bmStartWord = 0;
   94.31 +  _bmWordSize = 0;
   94.32  }
   94.33  
   94.34  HeapWord* CMBitMapRO::getNextMarkedWordAddress(HeapWord* addr,
   94.35 @@ -108,15 +92,40 @@
   94.36  }
   94.37  
   94.38  #ifndef PRODUCT
   94.39 -bool CMBitMapRO::covers(ReservedSpace rs) const {
   94.40 +bool CMBitMapRO::covers(ReservedSpace heap_rs) const {
   94.41    // assert(_bm.map() == _virtual_space.low(), "map inconsistency");
   94.42    assert(((size_t)_bm.size() * ((size_t)1 << _shifter)) == _bmWordSize,
   94.43           "size inconsistency");
   94.44 -  return _bmStartWord == (HeapWord*)(rs.base()) &&
   94.45 -         _bmWordSize  == rs.size()>>LogHeapWordSize;
   94.46 +  return _bmStartWord == (HeapWord*)(heap_rs.base()) &&
   94.47 +         _bmWordSize  == heap_rs.size()>>LogHeapWordSize;
   94.48  }
   94.49  #endif
   94.50  
   94.51 +bool CMBitMap::allocate(ReservedSpace heap_rs) {
   94.52 +  _bmStartWord = (HeapWord*)(heap_rs.base());
   94.53 +  _bmWordSize  = heap_rs.size()/HeapWordSize;    // heap_rs.size() is in bytes
   94.54 +  ReservedSpace brs(ReservedSpace::allocation_align_size_up(
   94.55 +                     (_bmWordSize >> (_shifter + LogBitsPerByte)) + 1));
   94.56 +  if (!brs.is_reserved()) {
   94.57 +    warning("ConcurrentMark marking bit map allocation failure");
   94.58 +    return false;
   94.59 +  }
   94.60 +  MemTracker::record_virtual_memory_type((address)brs.base(), mtGC);
   94.61 +  // For now we'll just commit all of the bit map up front.
   94.62 +  // Later on we'll try to be more parsimonious with swap.
   94.63 +  if (!_virtual_space.initialize(brs, brs.size())) {
   94.64 +    warning("ConcurrentMark marking bit map backing store failure");
   94.65 +    return false;
   94.66 +  }
   94.67 +  assert(_virtual_space.committed_size() == brs.size(),
   94.68 +         "didn't reserve backing store for all of concurrent marking bit map?");
   94.69 +  _bm.set_map((uintptr_t*)_virtual_space.low());
   94.70 +  assert(_virtual_space.committed_size() << (_shifter + LogBitsPerByte) >=
   94.71 +         _bmWordSize, "inconsistency in bit map sizing");
   94.72 +  _bm.set_size(_bmWordSize >> _shifter);
   94.73 +  return true;
   94.74 +}
   94.75 +
   94.76  void CMBitMap::clearAll() {
   94.77    _bm.clear();
   94.78    return;
   94.79 @@ -163,20 +172,79 @@
   94.80  #endif
   94.81  {}
   94.82  
   94.83 -void CMMarkStack::allocate(size_t size) {
   94.84 -  _base = NEW_C_HEAP_ARRAY(oop, size, mtGC);
   94.85 -  if (_base == NULL) {
   94.86 -    vm_exit_during_initialization("Failed to allocate CM region mark stack");
   94.87 +bool CMMarkStack::allocate(size_t capacity) {
   94.88 +  // allocate a stack of the requisite depth
   94.89 +  ReservedSpace rs(ReservedSpace::allocation_align_size_up(capacity * sizeof(oop)));
   94.90 +  if (!rs.is_reserved()) {
   94.91 +    warning("ConcurrentMark MarkStack allocation failure");
   94.92 +    return false;
   94.93    }
   94.94 -  _index = 0;
   94.95 -  _capacity = (jint) size;
   94.96 +  MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
   94.97 +  if (!_virtual_space.initialize(rs, rs.size())) {
   94.98 +    warning("ConcurrentMark MarkStack backing store failure");
   94.99 +    // Release the virtual memory reserved for the marking stack
  94.100 +    rs.release();
  94.101 +    return false;
  94.102 +  }
  94.103 +  assert(_virtual_space.committed_size() == rs.size(),
  94.104 +         "Didn't reserve backing store for all of ConcurrentMark stack?");
  94.105 +  _base = (oop*) _virtual_space.low();
  94.106 +  setEmpty();
  94.107 +  _capacity = (jint) capacity;
  94.108    _saved_index = -1;
  94.109    NOT_PRODUCT(_max_depth = 0);
  94.110 +  return true;
  94.111 +}
  94.112 +
  94.113 +void CMMarkStack::expand() {
  94.114 +  // Called, during remark, if we've overflown the marking stack during marking.
  94.115 +  assert(isEmpty(), "stack should been emptied while handling overflow");
  94.116 +  assert(_capacity <= (jint) MarkStackSizeMax, "stack bigger than permitted");
  94.117 +  // Clear expansion flag
  94.118 +  _should_expand = false;
  94.119 +  if (_capacity == (jint) MarkStackSizeMax) {
  94.120 +    if (PrintGCDetails && Verbose) {
  94.121 +      gclog_or_tty->print_cr(" (benign) Can't expand marking stack capacity, at max size limit");
  94.122 +    }
  94.123 +    return;
  94.124 +  }
  94.125 +  // Double capacity if possible
  94.126 +  jint new_capacity = MIN2(_capacity*2, (jint) MarkStackSizeMax);
  94.127 +  // Do not give up existing stack until we have managed to
  94.128 +  // get the double capacity that we desired.
  94.129 +  ReservedSpace rs(ReservedSpace::allocation_align_size_up(new_capacity *
  94.130 +                                                           sizeof(oop)));
  94.131 +  if (rs.is_reserved()) {
  94.132 +    // Release the backing store associated with old stack
  94.133 +    _virtual_space.release();
  94.134 +    // Reinitialize virtual space for new stack
  94.135 +    if (!_virtual_space.initialize(rs, rs.size())) {
  94.136 +      fatal("Not enough swap for expanded marking stack capacity");
  94.137 +    }
  94.138 +    _base = (oop*)(_virtual_space.low());
  94.139 +    _index = 0;
  94.140 +    _capacity = new_capacity;
  94.141 +  } else {
  94.142 +    if (PrintGCDetails && Verbose) {
  94.143 +      // Failed to double capacity, continue;
  94.144 +      gclog_or_tty->print(" (benign) Failed to expand marking stack capacity from "
  94.145 +                          SIZE_FORMAT"K to " SIZE_FORMAT"K",
  94.146 +                          _capacity / K, new_capacity / K);
  94.147 +    }
  94.148 +  }
  94.149 +}
  94.150 +
  94.151 +void CMMarkStack::set_should_expand() {
  94.152 +  // If we're resetting the marking state because of an
  94.153 +  // marking stack overflow, record that we should, if
  94.154 +  // possible, expand the stack.
  94.155 +  _should_expand = _cm->has_overflown();
  94.156  }
  94.157  
  94.158  CMMarkStack::~CMMarkStack() {
  94.159    if (_base != NULL) {
  94.160 -    FREE_C_HEAP_ARRAY(oop, _base, mtGC);
  94.161 +    _base = NULL;
  94.162 +    _virtual_space.release();
  94.163    }
  94.164  }
  94.165  
  94.166 @@ -217,7 +285,7 @@
  94.167      jint res = Atomic::cmpxchg(next_index, &_index, index);
  94.168      if (res == index) {
  94.169        for (int i = 0; i < n; i++) {
  94.170 -        int ind = index + i;
  94.171 +        int  ind = index + i;
  94.172          assert(ind < _capacity, "By overflow test above.");
  94.173          _base[ind] = ptr_arr[i];
  94.174        }
  94.175 @@ -228,7 +296,6 @@
  94.176    }
  94.177  }
  94.178  
  94.179 -
  94.180  void CMMarkStack::par_push_arr(oop* ptr_arr, int n) {
  94.181    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  94.182    jint start = _index;
  94.183 @@ -244,9 +311,9 @@
  94.184      assert(ind < _capacity, "By overflow test above.");
  94.185      _base[ind] = ptr_arr[i];
  94.186    }
  94.187 +  NOT_PRODUCT(_max_depth = MAX2(_max_depth, next_index));
  94.188  }
  94.189  
  94.190 -
  94.191  bool CMMarkStack::par_pop_arr(oop* ptr_arr, int max, int* n) {
  94.192    MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
  94.193    jint index = _index;
  94.194 @@ -255,7 +322,7 @@
  94.195      return false;
  94.196    } else {
  94.197      int k = MIN2(max, index);
  94.198 -    jint new_ind = index - k;
  94.199 +    jint  new_ind = index - k;
  94.200      for (int j = 0; j < k; j++) {
  94.201        ptr_arr[j] = _base[new_ind + j];
  94.202      }
  94.203 @@ -404,9 +471,10 @@
  94.204    return MAX2((n_par_threads + 2) / 4, 1U);
  94.205  }
  94.206  
  94.207 -ConcurrentMark::ConcurrentMark(ReservedSpace rs, uint max_regions) :
  94.208 -  _markBitMap1(rs, MinObjAlignment - 1),
  94.209 -  _markBitMap2(rs, MinObjAlignment - 1),
  94.210 +ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
  94.211 +  _g1h(g1h),
  94.212 +  _markBitMap1(MinObjAlignment - 1),
  94.213 +  _markBitMap2(MinObjAlignment - 1),
  94.214  
  94.215    _parallel_marking_threads(0),
  94.216    _max_parallel_marking_threads(0),
  94.217 @@ -415,10 +483,10 @@
  94.218    _cleanup_sleep_factor(0.0),
  94.219    _cleanup_task_overhead(1.0),
  94.220    _cleanup_list("Cleanup List"),
  94.221 -  _region_bm((BitMap::idx_t) max_regions, false /* in_resource_area*/),
  94.222 -  _card_bm((rs.size() + CardTableModRefBS::card_size - 1) >>
  94.223 -           CardTableModRefBS::card_shift,
  94.224 -           false /* in_resource_area*/),
  94.225 +  _region_bm((BitMap::idx_t)(g1h->max_regions()), false /* in_resource_area*/),
  94.226 +  _card_bm((heap_rs.size() + CardTableModRefBS::card_size - 1) >>
  94.227 +            CardTableModRefBS::card_shift,
  94.228 +            false /* in_resource_area*/),
  94.229  
  94.230    _prevMarkBitMap(&_markBitMap1),
  94.231    _nextMarkBitMap(&_markBitMap2),
  94.232 @@ -449,7 +517,8 @@
  94.233    _parallel_workers(NULL),
  94.234  
  94.235    _count_card_bitmaps(NULL),
  94.236 -  _count_marked_bytes(NULL) {
  94.237 +  _count_marked_bytes(NULL),
  94.238 +  _completed_initialization(false) {
  94.239    CMVerboseLevel verbose_level = (CMVerboseLevel) G1MarkingVerboseLevel;
  94.240    if (verbose_level < no_verbose) {
  94.241      verbose_level = no_verbose;
  94.242 @@ -464,61 +533,34 @@
  94.243                             "heap end = "PTR_FORMAT, _heap_start, _heap_end);
  94.244    }
  94.245  
  94.246 -  _markStack.allocate(MarkStackSize);
  94.247 +  if (!_markBitMap1.allocate(heap_rs)) {
  94.248 +    warning("Failed to allocate first CM bit map");
  94.249 +    return;
  94.250 +  }
  94.251 +  if (!_markBitMap2.allocate(heap_rs)) {
  94.252 +    warning("Failed to allocate second CM bit map");
  94.253 +    return;
  94.254 +  }
  94.255  
  94.256    // Create & start a ConcurrentMark thread.
  94.257    _cmThread = new ConcurrentMarkThread(this);
  94.258    assert(cmThread() != NULL, "CM Thread should have been created");
  94.259    assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
  94.260  
  94.261 -  _g1h = G1CollectedHeap::heap();
  94.262    assert(CGC_lock != NULL, "Where's the CGC_lock?");
  94.263 -  assert(_markBitMap1.covers(rs), "_markBitMap1 inconsistency");
  94.264 -  assert(_markBitMap2.covers(rs), "_markBitMap2 inconsistency");
  94.265 +  assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
  94.266 +  assert(_markBitMap2.covers(heap_rs), "_markBitMap2 inconsistency");
  94.267  
  94.268    SATBMarkQueueSet& satb_qs = JavaThread::satb_mark_queue_set();
  94.269    satb_qs.set_buffer_size(G1SATBBufferSize);
  94.270  
  94.271    _root_regions.init(_g1h, this);
  94.272  
  94.273 -  _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
  94.274 -  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
  94.275 -
  94.276 -  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
  94.277 -  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
  94.278 -
  94.279 -  BitMap::idx_t card_bm_size = _card_bm.size();
  94.280 -
  94.281 -  // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
  94.282 -  _active_tasks = _max_worker_id;
  94.283 -  for (uint i = 0; i < _max_worker_id; ++i) {
  94.284 -    CMTaskQueue* task_queue = new CMTaskQueue();
  94.285 -    task_queue->initialize();
  94.286 -    _task_queues->register_queue(i, task_queue);
  94.287 -
  94.288 -    _count_card_bitmaps[i] = BitMap(card_bm_size, false);
  94.289 -    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, (size_t) max_regions, mtGC);
  94.290 -
  94.291 -    _tasks[i] = new CMTask(i, this,
  94.292 -                           _count_marked_bytes[i],
  94.293 -                           &_count_card_bitmaps[i],
  94.294 -                           task_queue, _task_queues);
  94.295 -
  94.296 -    _accum_task_vtime[i] = 0.0;
  94.297 -  }
  94.298 -
  94.299 -  // Calculate the card number for the bottom of the heap. Used
  94.300 -  // in biasing indexes into the accounting card bitmaps.
  94.301 -  _heap_bottom_card_num =
  94.302 -    intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
  94.303 -                                CardTableModRefBS::card_shift);
  94.304 -
  94.305 -  // Clear all the liveness counting data
  94.306 -  clear_all_count_data();
  94.307 -
  94.308    if (ConcGCThreads > ParallelGCThreads) {
  94.309 -    vm_exit_during_initialization("Can't have more ConcGCThreads "
  94.310 -                                  "than ParallelGCThreads.");
  94.311 +    warning("Can't have more ConcGCThreads (" UINT32_FORMAT ") "
  94.312 +            "than ParallelGCThreads (" UINT32_FORMAT ").",
  94.313 +            ConcGCThreads, ParallelGCThreads);
  94.314 +    return;
  94.315    }
  94.316    if (ParallelGCThreads == 0) {
  94.317      // if we are not running with any parallel GC threads we will not
  94.318 @@ -590,9 +632,86 @@
  94.319      }
  94.320    }
  94.321  
  94.322 +  if (FLAG_IS_DEFAULT(MarkStackSize)) {
  94.323 +    uintx mark_stack_size =
  94.324 +      MIN2(MarkStackSizeMax,
  94.325 +          MAX2(MarkStackSize, (uintx) (parallel_marking_threads() * TASKQUEUE_SIZE)));
  94.326 +    // Verify that the calculated value for MarkStackSize is in range.
  94.327 +    // It would be nice to use the private utility routine from Arguments.
  94.328 +    if (!(mark_stack_size >= 1 && mark_stack_size <= MarkStackSizeMax)) {
  94.329 +      warning("Invalid value calculated for MarkStackSize (" UINTX_FORMAT "): "
  94.330 +              "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
  94.331 +              mark_stack_size, 1, MarkStackSizeMax);
  94.332 +      return;
  94.333 +    }
  94.334 +    FLAG_SET_ERGO(uintx, MarkStackSize, mark_stack_size);
  94.335 +  } else {
  94.336 +    // Verify MarkStackSize is in range.
  94.337 +    if (FLAG_IS_CMDLINE(MarkStackSize)) {
  94.338 +      if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
  94.339 +        if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
  94.340 +          warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT "): "
  94.341 +                  "must be between " UINTX_FORMAT " and " UINTX_FORMAT,
  94.342 +                  MarkStackSize, 1, MarkStackSizeMax);
  94.343 +          return;
  94.344 +        }
  94.345 +      } else if (FLAG_IS_CMDLINE(MarkStackSizeMax)) {
  94.346 +        if (!(MarkStackSize >= 1 && MarkStackSize <= MarkStackSizeMax)) {
  94.347 +          warning("Invalid value specified for MarkStackSize (" UINTX_FORMAT ")"
  94.348 +                  " or for MarkStackSizeMax (" UINTX_FORMAT ")",
  94.349 +                  MarkStackSize, MarkStackSizeMax);
  94.350 +          return;
  94.351 +        }
  94.352 +      }
  94.353 +    }
  94.354 +  }
  94.355 +
  94.356 +  if (!_markStack.allocate(MarkStackSize)) {
  94.357 +    warning("Failed to allocate CM marking stack");
  94.358 +    return;
  94.359 +  }
  94.360 +
  94.361 +  _tasks = NEW_C_HEAP_ARRAY(CMTask*, _max_worker_id, mtGC);
  94.362 +  _accum_task_vtime = NEW_C_HEAP_ARRAY(double, _max_worker_id, mtGC);
  94.363 +
  94.364 +  _count_card_bitmaps = NEW_C_HEAP_ARRAY(BitMap,  _max_worker_id, mtGC);
  94.365 +  _count_marked_bytes = NEW_C_HEAP_ARRAY(size_t*, _max_worker_id, mtGC);
  94.366 +
  94.367 +  BitMap::idx_t card_bm_size = _card_bm.size();
  94.368 +
  94.369 +  // so that the assertion in MarkingTaskQueue::task_queue doesn't fail
  94.370 +  _active_tasks = _max_worker_id;
  94.371 +
  94.372 +  size_t max_regions = (size_t) _g1h->max_regions();
  94.373 +  for (uint i = 0; i < _max_worker_id; ++i) {
  94.374 +    CMTaskQueue* task_queue = new CMTaskQueue();
  94.375 +    task_queue->initialize();
  94.376 +    _task_queues->register_queue(i, task_queue);
  94.377 +
  94.378 +    _count_card_bitmaps[i] = BitMap(card_bm_size, false);
  94.379 +    _count_marked_bytes[i] = NEW_C_HEAP_ARRAY(size_t, max_regions, mtGC);
  94.380 +
  94.381 +    _tasks[i] = new CMTask(i, this,
  94.382 +                           _count_marked_bytes[i],
  94.383 +                           &_count_card_bitmaps[i],
  94.384 +                           task_queue, _task_queues);
  94.385 +
  94.386 +    _accum_task_vtime[i] = 0.0;
  94.387 +  }
  94.388 +
  94.389 +  // Calculate the card number for the bottom of the heap. Used
  94.390 +  // in biasing indexes into the accounting card bitmaps.
  94.391 +  _heap_bottom_card_num =
  94.392 +    intptr_t(uintptr_t(_g1h->reserved_region().start()) >>
  94.393 +                                CardTableModRefBS::card_shift);
  94.394 +
  94.395 +  // Clear all the liveness counting data
  94.396 +  clear_all_count_data();
  94.397 +
  94.398    // so that the call below can read a sensible value
  94.399 -  _heap_start = (HeapWord*) rs.base();
  94.400 +  _heap_start = (HeapWord*) heap_rs.base();
  94.401    set_non_marking_state();
  94.402 +  _completed_initialization = true;
  94.403  }
  94.404  
  94.405  void ConcurrentMark::update_g1_committed(bool force) {
  94.406 @@ -1165,6 +1284,11 @@
  94.407      assert(!restart_for_overflow(), "sanity");
  94.408    }
  94.409  
  94.410 +  // Expand the marking stack, if we have to and if we can.
  94.411 +  if (_markStack.should_expand()) {
  94.412 +    _markStack.expand();
  94.413 +  }
  94.414 +
  94.415    // Reset the marking state if marking completed
  94.416    if (!restart_for_overflow()) {
  94.417      set_non_marking_state();
  94.418 @@ -2785,7 +2909,7 @@
  94.419      // Verify entries on the task queues
  94.420      for (uint i = 0; i < _max_worker_id; i += 1) {
  94.421        cl.set_phase(VerifyNoCSetOopsQueues, i);
  94.422 -      OopTaskQueue* queue = _task_queues->queue(i);
  94.423 +      CMTaskQueue* queue = _task_queues->queue(i);
  94.424        queue->oops_do(&cl);
  94.425      }
  94.426    }
  94.427 @@ -2840,8 +2964,8 @@
  94.428  #endif // PRODUCT
  94.429  
  94.430  void ConcurrentMark::clear_marking_state(bool clear_overflow) {
  94.431 -  _markStack.setEmpty();
  94.432 -  _markStack.clear_overflow();
  94.433 +  _markStack.set_should_expand();
  94.434 +  _markStack.setEmpty();        // Also clears the _markStack overflow flag
  94.435    if (clear_overflow) {
  94.436      clear_has_overflown();
  94.437    } else {
  94.438 @@ -2850,7 +2974,7 @@
  94.439    _finger = _heap_start;
  94.440  
  94.441    for (uint i = 0; i < _max_worker_id; ++i) {
  94.442 -    OopTaskQueue* queue = _task_queues->queue(i);
  94.443 +    CMTaskQueue* queue = _task_queues->queue(i);
  94.444      queue->set_empty();
  94.445    }
  94.446  }
    95.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Jan 08 14:04:25 2013 -0500
    95.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue Jan 08 11:39:53 2013 -0800
    95.3 @@ -63,7 +63,7 @@
    95.4  
    95.5   public:
    95.6    // constructor
    95.7 -  CMBitMapRO(ReservedSpace rs, int shifter);
    95.8 +  CMBitMapRO(int shifter);
    95.9  
   95.10    enum { do_yield = true };
   95.11  
   95.12 @@ -117,8 +117,11 @@
   95.13  
   95.14   public:
   95.15    // constructor
   95.16 -  CMBitMap(ReservedSpace rs, int shifter) :
   95.17 -    CMBitMapRO(rs, shifter) {}
   95.18 +  CMBitMap(int shifter) :
   95.19 +    CMBitMapRO(shifter) {}
   95.20 +
   95.21 +  // Allocates the back store for the marking bitmap
   95.22 +  bool allocate(ReservedSpace heap_rs);
   95.23  
   95.24    // write marks
   95.25    void mark(HeapWord* addr) {
   95.26 @@ -155,17 +158,18 @@
   95.27    MemRegion getAndClearMarkedRegion(HeapWord* addr, HeapWord* end_addr);
   95.28  };
   95.29  
   95.30 -// Represents a marking stack used by the CM collector.
   95.31 -// Ideally this should be GrowableArray<> just like MSC's marking stack(s).
   95.32 +// Represents a marking stack used by ConcurrentMarking in the G1 collector.
   95.33  class CMMarkStack VALUE_OBJ_CLASS_SPEC {
   95.34 +  VirtualSpace _virtual_space;   // Underlying backing store for actual stack
   95.35    ConcurrentMark* _cm;
   95.36    oop*   _base;        // bottom of stack
   95.37 -  jint   _index;       // one more than last occupied index
   95.38 -  jint   _capacity;    // max #elements
   95.39 -  jint   _saved_index; // value of _index saved at start of GC
   95.40 -  NOT_PRODUCT(jint _max_depth;)  // max depth plumbed during run
   95.41 +  jint _index;       // one more than last occupied index
   95.42 +  jint _capacity;    // max #elements
   95.43 +  jint _saved_index; // value of _index saved at start of GC
   95.44 +  NOT_PRODUCT(jint _max_depth;)   // max depth plumbed during run
   95.45  
   95.46 -  bool   _overflow;
   95.47 +  bool  _overflow;
   95.48 +  bool  _should_expand;
   95.49    DEBUG_ONLY(bool _drain_in_progress;)
   95.50    DEBUG_ONLY(bool _drain_in_progress_yields;)
   95.51  
   95.52 @@ -173,7 +177,13 @@
   95.53    CMMarkStack(ConcurrentMark* cm);
   95.54    ~CMMarkStack();
   95.55  
   95.56 -  void allocate(size_t size);
   95.57 +#ifndef PRODUCT
   95.58 +  jint max_depth() const {
   95.59 +    return _max_depth;
   95.60 +  }
   95.61 +#endif
   95.62 +
   95.63 +  bool allocate(size_t capacity);
   95.64  
   95.65    oop pop() {
   95.66      if (!isEmpty()) {
   95.67 @@ -231,11 +241,17 @@
   95.68  
   95.69    bool isEmpty()    { return _index == 0; }
   95.70    bool isFull()     { return _index == _capacity; }
   95.71 -  int maxElems()    { return _capacity; }
   95.72 +  int  maxElems()   { return _capacity; }
   95.73  
   95.74    bool overflow() { return _overflow; }
   95.75    void clear_overflow() { _overflow = false; }
   95.76  
   95.77 +  bool should_expand() const { return _should_expand; }
   95.78 +  void set_should_expand();
   95.79 +
   95.80 +  // Expand the stack, typically in response to an overflow condition
   95.81 +  void expand();
   95.82 +
   95.83    int  size() { return _index; }
   95.84  
   95.85    void setEmpty()   { _index = 0; clear_overflow(); }
   95.86 @@ -344,6 +360,7 @@
   95.87  class ConcurrentMarkThread;
   95.88  
   95.89  class ConcurrentMark: public CHeapObj<mtGC> {
   95.90 +  friend class CMMarkStack;
   95.91    friend class ConcurrentMarkThread;
   95.92    friend class CMTask;
   95.93    friend class CMBitMapClosure;
   95.94 @@ -577,6 +594,9 @@
   95.95    // the card bitmaps.
   95.96    intptr_t _heap_bottom_card_num;
   95.97  
   95.98 +  // Set to true when initialization is complete
   95.99 +  bool _completed_initialization;
  95.100 +
  95.101  public:
  95.102    // Manipulation of the global mark stack.
  95.103    // Notice that the first mark_stack_push is CAS-based, whereas the
  95.104 @@ -636,7 +656,7 @@
  95.105      return _task_queues->steal(worker_id, hash_seed, obj);
  95.106    }
  95.107  
  95.108 -  ConcurrentMark(ReservedSpace rs, uint max_regions);
  95.109 +  ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs);
  95.110    ~ConcurrentMark();
  95.111  
  95.112    ConcurrentMarkThread* cmThread() { return _cmThread; }
  95.113 @@ -907,6 +927,11 @@
  95.114    // Should *not* be called from parallel code.
  95.115    inline bool mark_and_count(oop obj);
  95.116  
  95.117 +  // Returns true if initialization was successfully completed.
  95.118 +  bool completed_initialization() const {
  95.119 +    return _completed_initialization;
  95.120 +  }
  95.121 +
  95.122  protected:
  95.123    // Clear all the per-task bitmaps and arrays used to store the
  95.124    // counting data.
    96.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jan 08 14:04:25 2013 -0500
    96.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Jan 08 11:39:53 2013 -0800
    96.3 @@ -2079,7 +2079,11 @@
    96.4  
    96.5    // Create the ConcurrentMark data structure and thread.
    96.6    // (Must do this late, so that "max_regions" is defined.)
    96.7 -  _cm       = new ConcurrentMark(heap_rs, max_regions());
    96.8 +  _cm = new ConcurrentMark(this, heap_rs);
    96.9 +  if (_cm == NULL || !_cm->completed_initialization()) {
   96.10 +    vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
   96.11 +    return JNI_ENOMEM;
   96.12 +  }
   96.13    _cmThread = _cm->cmThread();
   96.14  
   96.15    // Initialize the from_card cache structure of HeapRegionRemSet.
   96.16 @@ -2087,7 +2091,7 @@
   96.17  
   96.18    // Now expand into the initial heap size.
   96.19    if (!expand(init_byte_size)) {
   96.20 -    vm_exit_during_initialization("Failed to allocate initial heap.");
   96.21 +    vm_shutdown_during_initialization("Failed to allocate initial heap.");
   96.22      return JNI_ENOMEM;
   96.23    }
   96.24  
    97.1 --- a/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp	Tue Jan 08 14:04:25 2013 -0500
    97.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/adjoiningVirtualSpaces.cpp	Tue Jan 08 11:39:53 2013 -0800
    97.3 @@ -24,6 +24,7 @@
    97.4  
    97.5  #include "precompiled.hpp"
    97.6  #include "gc_implementation/parallelScavenge/adjoiningVirtualSpaces.hpp"
    97.7 +#include "memory/allocation.inline.hpp"
    97.8  #include "runtime/java.hpp"
    97.9  
   97.10  AdjoiningVirtualSpaces::AdjoiningVirtualSpaces(ReservedSpace rs,
    98.1 --- a/src/share/vm/gc_implementation/shared/gcStats.cpp	Tue Jan 08 14:04:25 2013 -0500
    98.2 +++ b/src/share/vm/gc_implementation/shared/gcStats.cpp	Tue Jan 08 11:39:53 2013 -0800
    98.3 @@ -25,6 +25,7 @@
    98.4  #include "precompiled.hpp"
    98.5  #include "gc_implementation/shared/gcStats.hpp"
    98.6  #include "gc_implementation/shared/gcUtil.hpp"
    98.7 +#include "memory/allocation.inline.hpp"
    98.8  
    98.9  GCStats::GCStats() {
   98.10      _avg_promoted       = new AdaptivePaddedNoZeroDevAverage(
    99.1 --- a/src/share/vm/interpreter/rewriter.cpp	Tue Jan 08 14:04:25 2013 -0500
    99.2 +++ b/src/share/vm/interpreter/rewriter.cpp	Tue Jan 08 11:39:53 2013 -0800
    99.3 @@ -27,13 +27,8 @@
    99.4  #include "interpreter/interpreter.hpp"
    99.5  #include "interpreter/rewriter.hpp"
    99.6  #include "memory/gcLocker.hpp"
    99.7 -#include "memory/metadataFactory.hpp"
    99.8 -#include "memory/oopFactory.hpp"
    99.9  #include "memory/resourceArea.hpp"
   99.10  #include "oops/generateOopMap.hpp"
   99.11 -#include "oops/objArrayOop.hpp"
   99.12 -#include "oops/oop.inline.hpp"
   99.13 -#include "prims/methodComparator.hpp"
   99.14  #include "prims/methodHandles.hpp"
   99.15  
   99.16  // Computes a CPC map (new_index -> original_index) for constant pool entries
   99.17 @@ -402,13 +397,6 @@
   99.18  }
   99.19  
   99.20  
   99.21 -void Rewriter::rewrite(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS) {
   99.22 -  ResourceMark rm(THREAD);
   99.23 -  Rewriter     rw(klass, cpool, methods, CHECK);
   99.24 -  // (That's all, folks.)
   99.25 -}
   99.26 -
   99.27 -
   99.28  Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
   99.29    : _klass(klass),
   99.30      _pool(cpool),
   99.31 @@ -453,46 +441,25 @@
   99.32      restore_bytecodes();
   99.33      return;
   99.34    }
   99.35 -}
   99.36  
   99.37 -// Relocate jsr/rets in a method.  This can't be done with the rewriter
   99.38 -// stage because it can throw other exceptions, leaving the bytecodes
   99.39 -// pointing at constant pool cache entries.
   99.40 -// Link and check jvmti dependencies while we're iterating over the methods.
   99.41 -// JSR292 code calls with a different set of methods, so two entry points.
   99.42 -void Rewriter::relocate_and_link(instanceKlassHandle this_oop, TRAPS) {
   99.43 -  relocate_and_link(this_oop, this_oop->methods(), THREAD);
   99.44 -}
   99.45 -
   99.46 -void Rewriter::relocate_and_link(instanceKlassHandle this_oop,
   99.47 -                                 Array<Method*>* methods, TRAPS) {
   99.48 -  int len = methods->length();
   99.49 +  // Relocate after everything, but still do this under the is_rewritten flag,
   99.50 +  // so methods with jsrs in custom class lists in aren't attempted to be
   99.51 +  // rewritten in the RO section of the shared archive.
   99.52 +  // Relocated bytecodes don't have to be restored, only the cp cache entries
   99.53    for (int i = len-1; i >= 0; i--) {
   99.54 -    methodHandle m(THREAD, methods->at(i));
   99.55 +    methodHandle m(THREAD, _methods->at(i));
   99.56  
   99.57      if (m->has_jsrs()) {
   99.58 -      m = rewrite_jsrs(m, CHECK);
   99.59 +      m = rewrite_jsrs(m, THREAD);
   99.60 +      // Restore bytecodes to their unrewritten state if there are exceptions
   99.61 +      // relocating bytecodes.  If some are relocated, that is ok because that
   99.62 +      // doesn't affect constant pool to cpCache rewriting.
   99.63 +      if (HAS_PENDING_EXCEPTION) {
   99.64 +        restore_bytecodes();
   99.65 +        return;
   99.66 +      }
   99.67        // Method might have gotten rewritten.
   99.68        methods->at_put(i, m());
   99.69      }
   99.70 -
   99.71 -    // Set up method entry points for compiler and interpreter    .
   99.72 -    m->link_method(m, CHECK);
   99.73 -
   99.74 -    // This is for JVMTI and unrelated to relocator but the last thing we do
   99.75 -#ifdef ASSERT
   99.76 -    if (StressMethodComparator) {
   99.77 -      static int nmc = 0;
   99.78 -      for (int j = i; j >= 0 && j >= i-4; j--) {
   99.79 -        if ((++nmc % 1000) == 0)  tty->print_cr("Have run MethodComparator %d times...", nmc);
   99.80 -        bool z = MethodComparator::methods_EMCP(m(),
   99.81 -                   methods->at(j));
   99.82 -        if (j == i && !z) {
   99.83 -          tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
   99.84 -          assert(z, "method must compare equal to itself");
   99.85 -        }
   99.86 -      }
   99.87 -    }
   99.88 -#endif //ASSERT
   99.89    }
   99.90  }
   100.1 --- a/src/share/vm/interpreter/rewriter.hpp	Tue Jan 08 14:04:25 2013 -0500
   100.2 +++ b/src/share/vm/interpreter/rewriter.hpp	Tue Jan 08 11:39:53 2013 -0800
   100.3 @@ -158,14 +158,6 @@
   100.4   public:
   100.5    // Driver routine:
   100.6    static void rewrite(instanceKlassHandle klass, TRAPS);
   100.7 -  static void rewrite(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS);
   100.8 -
   100.9 -  // Second pass, not gated by is_rewritten flag
  100.10 -  static void relocate_and_link(instanceKlassHandle klass, TRAPS);
  100.11 -  // JSR292 version to call with it's own methods.
  100.12 -  static void relocate_and_link(instanceKlassHandle klass,
  100.13 -                                Array<Method*>* methods, TRAPS);
  100.14 -
  100.15  };
  100.16  
  100.17  #endif // SHARE_VM_INTERPRETER_REWRITER_HPP
   101.1 --- a/src/share/vm/memory/allocation.hpp	Tue Jan 08 14:04:25 2013 -0500
   101.2 +++ b/src/share/vm/memory/allocation.hpp	Tue Jan 08 11:39:53 2013 -0800
   101.3 @@ -202,7 +202,7 @@
   101.4  // Calling new or delete will result in fatal error.
   101.5  
   101.6  class StackObj ALLOCATION_SUPER_CLASS_SPEC {
   101.7 - public:
   101.8 + private:
   101.9    void* operator new(size_t size);
  101.10    void  operator delete(void* p);
  101.11  };
  101.12 @@ -226,7 +226,7 @@
  101.13  // be defined as a an empty string "".
  101.14  //
  101.15  class _ValueObj {
  101.16 - public:
  101.17 + private:
  101.18    void* operator new(size_t size);
  101.19    void operator delete(void* p);
  101.20  };
   102.1 --- a/src/share/vm/memory/filemap.cpp	Tue Jan 08 14:04:25 2013 -0500
   102.2 +++ b/src/share/vm/memory/filemap.cpp	Tue Jan 08 11:39:53 2013 -0800
   102.3 @@ -211,7 +211,11 @@
   102.4  
   102.5    // Remove the existing file in case another process has it open.
   102.6    remove(_full_path);
   102.7 +#ifdef _WINDOWS  // if 0444 is used on Windows, then remove() will fail.
   102.8 +  int fd = open(_full_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0744);
   102.9 +#else
  102.10    int fd = open(_full_path, O_RDWR | O_CREAT | O_TRUNC | O_BINARY, 0444);
  102.11 +#endif
  102.12    if (fd < 0) {
  102.13      fail_stop("Unable to create shared archive file %s.", _full_path);
  102.14    }
  102.15 @@ -370,9 +374,8 @@
  102.16      return rs;
  102.17    }
  102.18    // the reserved virtual memory is for mapping class data sharing archive
  102.19 -  if (MemTracker::is_on()) {
  102.20 -    MemTracker::record_virtual_memory_type((address)rs.base(), mtClassShared);
  102.21 -  }
  102.22 +  MemTracker::record_virtual_memory_type((address)rs.base(), mtClassShared);
  102.23 +
  102.24    return rs;
  102.25  }
  102.26  
  102.27 @@ -394,6 +397,11 @@
  102.28      fail_continue(err_msg("Unable to map %s shared space at required address.", shared_region_name[i]));
  102.29      return NULL;
  102.30    }
  102.31 +#ifdef _WINDOWS
  102.32 +  // This call is Windows-only because the memory_type gets recorded for the other platforms
  102.33 +  // in method FileMapInfo::reserve_shared_memory(), which is not called on Windows.
  102.34 +  MemTracker::record_virtual_memory_type((address)base, mtClassShared);
  102.35 +#endif
  102.36    return base;
  102.37  }
  102.38  
   103.1 --- a/src/share/vm/memory/metaspace.cpp	Tue Jan 08 14:04:25 2013 -0500
   103.2 +++ b/src/share/vm/memory/metaspace.cpp	Tue Jan 08 11:39:53 2013 -0800
   103.3 @@ -2192,11 +2192,6 @@
   103.4  
   103.5  // MetaspaceAux
   103.6  
   103.7 -size_t MetaspaceAux::used_in_bytes() {
   103.8 -  return (Metaspace::class_space_list()->used_words_sum() +
   103.9 -          Metaspace::space_list()->used_words_sum()) * BytesPerWord;
  103.10 -}
  103.11 -
  103.12  size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
  103.13    size_t used = 0;
  103.14    ClassLoaderDataGraphMetaspaceIterator iter;
  103.15 @@ -2222,14 +2217,6 @@
  103.16    return free * BytesPerWord;
  103.17  }
  103.18  
  103.19 -// The total words available for metadata allocation.  This
  103.20 -// uses Metaspace capacity_words() which is the total words
  103.21 -// in chunks allocated for a Metaspace.
  103.22 -size_t MetaspaceAux::capacity_in_bytes() {
  103.23 -  return (Metaspace::class_space_list()->capacity_words_sum() +
  103.24 -          Metaspace::space_list()->capacity_words_sum()) * BytesPerWord;
  103.25 -}
  103.26 -
  103.27  size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
  103.28    size_t capacity = free_chunks_total(mdtype);
  103.29    ClassLoaderDataGraphMetaspaceIterator iter;
  103.30 @@ -2242,11 +2229,6 @@
  103.31    return capacity * BytesPerWord;
  103.32  }
  103.33  
  103.34 -size_t MetaspaceAux::reserved_in_bytes() {
  103.35 -  return (Metaspace::class_space_list()->virtual_space_total() +
  103.36 -          Metaspace::space_list()->virtual_space_total()) * BytesPerWord;
  103.37 -}
  103.38 -
  103.39  size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
  103.40    size_t reserved = (mdtype == Metaspace::ClassType) ?
  103.41                         Metaspace::class_space_list()->virtual_space_total() :
   104.1 --- a/src/share/vm/memory/metaspace.hpp	Tue Jan 08 14:04:25 2013 -0500
   104.2 +++ b/src/share/vm/memory/metaspace.hpp	Tue Jan 08 11:39:53 2013 -0800
   104.3 @@ -156,16 +156,25 @@
   104.4  
   104.5   public:
   104.6    // Total of space allocated to metadata in all Metaspaces
   104.7 -  static size_t used_in_bytes();
   104.8 +  static size_t used_in_bytes() {
   104.9 +    return used_in_bytes(Metaspace::ClassType) +
  104.10 +           used_in_bytes(Metaspace::NonClassType);
  104.11 +  }
  104.12  
  104.13    // Total of available space in all Metaspaces
  104.14    // Total of capacity allocated to all Metaspaces.  This includes
  104.15    // space in Metachunks not yet allocated and in the Metachunk
  104.16    // freelist.
  104.17 -  static size_t capacity_in_bytes();
  104.18 +  static size_t capacity_in_bytes() {
  104.19 +    return capacity_in_bytes(Metaspace::ClassType) +
  104.20 +           capacity_in_bytes(Metaspace::NonClassType);
  104.21 +  }
  104.22  
  104.23    // Total space reserved in all Metaspaces
  104.24 -  static size_t reserved_in_bytes();
  104.25 +  static size_t reserved_in_bytes() {
  104.26 +    return reserved_in_bytes(Metaspace::ClassType) +
  104.27 +           reserved_in_bytes(Metaspace::NonClassType);
  104.28 +  }
  104.29  
  104.30    static size_t min_chunk_size();
  104.31  
   105.1 --- a/src/share/vm/memory/metaspaceShared.cpp	Tue Jan 08 14:04:25 2013 -0500
   105.2 +++ b/src/share/vm/memory/metaspaceShared.cpp	Tue Jan 08 11:39:53 2013 -0800
   105.3 @@ -689,9 +689,15 @@
   105.4  bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
   105.5    size_t image_alignment = mapinfo->alignment();
   105.6  
   105.7 -  // Map in the shared memory and then map the regions on top of it
   105.8 +#ifndef _WINDOWS
   105.9 +  // Map in the shared memory and then map the regions on top of it.
  105.10 +  // On Windows, don't map the memory here because it will cause the
  105.11 +  // mappings of the regions to fail.
  105.12    ReservedSpace shared_rs = mapinfo->reserve_shared_memory();
  105.13    if (!shared_rs.is_reserved()) return false;
  105.14 +#endif
  105.15 +
  105.16 +  assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces");
  105.17  
  105.18    // Map each shared region
  105.19    if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
  105.20 @@ -708,8 +714,10 @@
  105.21      if (_rw_base != NULL) mapinfo->unmap_region(rw);
  105.22      if (_md_base != NULL) mapinfo->unmap_region(md);
  105.23      if (_mc_base != NULL) mapinfo->unmap_region(mc);
  105.24 +#ifndef _WINDOWS
  105.25      // Release the entire mapped region
  105.26      shared_rs.release();
  105.27 +#endif
  105.28      // If -Xshare:on is specified, print out the error message and exit VM,
  105.29      // otherwise, set UseSharedSpaces to false and continue.
  105.30      if (RequireSharedSpaces) {
   106.1 --- a/src/share/vm/oops/annotations.cpp	Tue Jan 08 14:04:25 2013 -0500
   106.2 +++ b/src/share/vm/oops/annotations.cpp	Tue Jan 08 11:39:53 2013 -0800
   106.3 @@ -61,6 +61,9 @@
   106.4    free_contents(loader_data, methods_annotations());
   106.5    free_contents(loader_data, methods_parameter_annotations());
   106.6    free_contents(loader_data, methods_default_annotations());
   106.7 +
   106.8 +  // Recursively deallocate optional Annotations linked through this one
   106.9 +  MetadataFactory::free_metadata(loader_data, type_annotations());
  106.10  }
  106.11  
  106.12  // Set the annotation at 'idnum' to 'anno'.
   107.1 --- a/src/share/vm/oops/annotations.hpp	Tue Jan 08 14:04:25 2013 -0500
   107.2 +++ b/src/share/vm/oops/annotations.hpp	Tue Jan 08 11:39:53 2013 -0800
   107.3 @@ -38,7 +38,8 @@
   107.4  typedef Array<u1> AnnotationArray;
   107.5  
   107.6  // Class to hold the various types of annotations. The only metadata that points
   107.7 -// to this is InstanceKlass.
   107.8 +// to this is InstanceKlass, or another Annotations instance if this is a
   107.9 +// a type_annotation instance.
  107.10  
  107.11  class Annotations: public MetaspaceObj {
  107.12  
  107.13 @@ -58,6 +59,8 @@
  107.14    // such annotations.
  107.15    // Index is the idnum, which is initially the same as the methods array index.
  107.16    Array<AnnotationArray*>*     _methods_default_annotations;
  107.17 +  // Type annotations for this class, or null if none.
  107.18 +  Annotations*                 _type_annotations;
  107.19  
  107.20    // Constructor where some some values are known to not be null
  107.21    Annotations(Array<AnnotationArray*>* fa, Array<AnnotationArray*>* ma,
  107.22 @@ -66,7 +69,8 @@
  107.23                   _fields_annotations(fa),
  107.24                   _methods_annotations(ma),
  107.25                   _methods_parameter_annotations(mpa),
  107.26 -                 _methods_default_annotations(mda) {}
  107.27 +                 _methods_default_annotations(mda),
  107.28 +                 _type_annotations(NULL) {}
  107.29  
  107.30   public:
  107.31    // Allocate instance of this class
  107.32 @@ -81,22 +85,26 @@
  107.33    static int size()    { return sizeof(Annotations) / wordSize; }
  107.34  
  107.35    // Constructor to initialize to null
  107.36 -  Annotations() : _class_annotations(NULL), _fields_annotations(NULL),
  107.37 +  Annotations() : _class_annotations(NULL),
  107.38 +                  _fields_annotations(NULL),
  107.39                    _methods_annotations(NULL),
  107.40                    _methods_parameter_annotations(NULL),
  107.41 -                  _methods_default_annotations(NULL) {}
  107.42 +                  _methods_default_annotations(NULL),
  107.43 +                  _type_annotations(NULL) {}
  107.44  
  107.45    AnnotationArray* class_annotations() const                       { return _class_annotations; }
  107.46    Array<AnnotationArray*>* fields_annotations() const              { return _fields_annotations; }
  107.47    Array<AnnotationArray*>* methods_annotations() const             { return _methods_annotations; }
  107.48    Array<AnnotationArray*>* methods_parameter_annotations() const   { return _methods_parameter_annotations; }
  107.49    Array<AnnotationArray*>* methods_default_annotations() const     { return _methods_default_annotations; }
  107.50 +  Annotations* type_annotations() const                            { return _type_annotations; }
  107.51  
  107.52    void set_class_annotations(AnnotationArray* md)                     { _class_annotations = md; }
  107.53    void set_fields_annotations(Array<AnnotationArray*>* md)            { _fields_annotations = md; }
  107.54    void set_methods_annotations(Array<AnnotationArray*>* md)           { _methods_annotations = md; }
  107.55    void set_methods_parameter_annotations(Array<AnnotationArray*>* md) { _methods_parameter_annotations = md; }
  107.56    void set_methods_default_annotations(Array<AnnotationArray*>* md)   { _methods_default_annotations = md; }
  107.57 +  void set_type_annotations(Annotations* annos)                       { _type_annotations = annos; }
  107.58  
  107.59    // Redefine classes support
  107.60    AnnotationArray* get_method_annotations_of(int idnum)
  107.61 @@ -129,6 +137,7 @@
  107.62    inline AnnotationArray* get_method_annotations_from(int idnum, Array<AnnotationArray*>* annos);
  107.63    void set_annotations(Array<AnnotationArray*>* md, Array<AnnotationArray*>** md_p)  { *md_p = md; }
  107.64  
  107.65 +  bool is_klass() const { return false; }
  107.66   private:
  107.67    void set_methods_annotations_of(instanceKlassHandle ik,
  107.68                                    int idnum, AnnotationArray* anno,
   108.1 --- a/src/share/vm/oops/constMethod.hpp	Tue Jan 08 14:04:25 2013 -0500
   108.2 +++ b/src/share/vm/oops/constMethod.hpp	Tue Jan 08 11:39:53 2013 -0800
   108.3 @@ -46,6 +46,7 @@
   108.4  // | interp_kind  | flags    | code_size                  |
   108.5  // | name index              | signature index            |
   108.6  // | method_idnum            | max_stack                  |
   108.7 +// | max_locals              | size_of_parameters         |
   108.8  // |------------------------------------------------------|
   108.9  // |                                                      |
  108.10  // | byte codes                                           |
  108.11 @@ -150,7 +151,8 @@
  108.12                                                   // initially corresponds to the index into the methods array.
  108.13                                                   // but this may change with redefinition
  108.14    u2                _max_stack;                  // Maximum number of entries on the expression stack
  108.15 -
  108.16 +  u2                _max_locals;                 // Number of local variables used by this method
  108.17 +  u2                _size_of_parameters;         // size of the parameter block (receiver + arguments) in words
  108.18  
  108.19    // Constructor
  108.20    ConstMethod(int byte_code_size,
  108.21 @@ -338,6 +340,11 @@
  108.22  
  108.23    static ByteSize max_stack_offset()
  108.24                              { return byte_offset_of(ConstMethod, _max_stack); }
  108.25 +  static ByteSize size_of_locals_offset()
  108.26 +                            { return byte_offset_of(ConstMethod, _max_locals); }
  108.27 +  static ByteSize size_of_parameters_offset()
  108.28 +                            { return byte_offset_of(ConstMethod, _size_of_parameters); }
  108.29 +
  108.30  
  108.31    // Unique id for the method
  108.32    static const u2 MAX_IDNUM;
  108.33 @@ -349,6 +356,14 @@
  108.34    int  max_stack() const                         { return _max_stack; }
  108.35    void set_max_stack(int size)                   { _max_stack = size; }
  108.36  
  108.37 +  // max locals
  108.38 +  int  max_locals() const                        { return _max_locals; }
  108.39 +  void set_max_locals(int size)                  { _max_locals = size; }
  108.40 +
  108.41 +  // size of parameters
  108.42 +  int  size_of_parameters() const                { return _size_of_parameters; }
  108.43 +  void set_size_of_parameters(int size)          { _size_of_parameters = size; }
  108.44 +
  108.45    // Deallocation for RedefineClasses
  108.46    void deallocate_contents(ClassLoaderData* loader_data);
  108.47    bool is_klass() const { return false; }
   109.1 --- a/src/share/vm/oops/instanceKlass.cpp	Tue Jan 08 14:04:25 2013 -0500
   109.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Tue Jan 08 11:39:53 2013 -0800
   109.3 @@ -47,6 +47,7 @@
   109.4  #include "oops/symbol.hpp"
   109.5  #include "prims/jvmtiExport.hpp"
   109.6  #include "prims/jvmtiRedefineClassesTrace.hpp"
   109.7 +#include "prims/methodComparator.hpp"
   109.8  #include "runtime/fieldDescriptor.hpp"
   109.9  #include "runtime/handles.inline.hpp"
  109.10  #include "runtime/javaCalls.hpp"
  109.11 @@ -364,6 +365,9 @@
  109.12    set_protection_domain(NULL);
  109.13    set_signers(NULL);
  109.14    set_init_lock(NULL);
  109.15 +
  109.16 +  // We should deallocate the Annotations instance
  109.17 +  MetadataFactory::free_metadata(loader_data, annotations());
  109.18    set_annotations(NULL);
  109.19  }
  109.20  
  109.21 @@ -602,7 +606,7 @@
  109.22        }
  109.23  
  109.24        // relocate jsrs and link methods after they are all rewritten
  109.25 -      this_oop->relocate_and_link_methods(CHECK_false);
  109.26 +      this_oop->link_methods(CHECK_false);
  109.27  
  109.28        // Initialize the vtable and interface table after
  109.29        // methods have been rewritten since rewrite may
  109.30 @@ -650,10 +654,31 @@
  109.31  // Now relocate and link method entry points after class is rewritten.
  109.32  // This is outside is_rewritten flag. In case of an exception, it can be
  109.33  // executed more than once.
  109.34 -void InstanceKlass::relocate_and_link_methods(TRAPS) {
  109.35 -  assert(is_loaded(), "must be loaded");
  109.36 -  instanceKlassHandle this_oop(THREAD, this);
  109.37 -  Rewriter::relocate_and_link(this_oop, CHECK);
  109.38 +void InstanceKlass::link_methods(TRAPS) {
  109.39 +  int len = methods()->length();
  109.40 +  for (int i = len-1; i >= 0; i--) {
  109.41 +    methodHandle m(THREAD, methods()->at(i));
  109.42 +
  109.43 +    // Set up method entry points for compiler and interpreter    .
  109.44 +    m->link_method(m, CHECK);
  109.45 +
  109.46 +    // This is for JVMTI and unrelated to relocator but the last thing we do
  109.47 +#ifdef ASSERT
  109.48 +    if (StressMethodComparator) {
  109.49 +      ResourceMark rm(THREAD);
  109.50 +      static int nmc = 0;
  109.51 +      for (int j = i; j >= 0 && j >= i-4; j--) {
  109.52 +        if ((++nmc % 1000) == 0)  tty->print_cr("Have run MethodComparator %d times...", nmc);
  109.53 +        bool z = MethodComparator::methods_EMCP(m(),
  109.54 +                   methods()->at(j));
  109.55 +        if (j == i && !z) {
  109.56 +          tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
  109.57 +          assert(z, "method must compare equal to itself");
  109.58 +        }
  109.59 +      }
  109.60 +    }
  109.61 +#endif //ASSERT
  109.62 +  }
  109.63  }
  109.64  
  109.65  
   110.1 --- a/src/share/vm/oops/instanceKlass.hpp	Tue Jan 08 14:04:25 2013 -0500
   110.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Tue Jan 08 11:39:53 2013 -0800
   110.3 @@ -460,7 +460,7 @@
   110.4    bool link_class_or_fail(TRAPS); // returns false on failure
   110.5    void unlink_class();
   110.6    void rewrite_class(TRAPS);
   110.7 -  void relocate_and_link_methods(TRAPS);
   110.8 +  void link_methods(TRAPS);
   110.9    Method* class_initializer();
  110.10  
  110.11    // set the class to initialized if no static initializer is present
  110.12 @@ -544,6 +544,12 @@
  110.13      }
  110.14    }
  110.15  
  110.16 +  // Oop that keeps the metadata for this class from being unloaded
  110.17 +  // in places where the metadata is stored in other places, like nmethods
  110.18 +  oop klass_holder() const {
  110.19 +    return is_anonymous() ? java_mirror() : class_loader();
  110.20 +  }
  110.21 +
  110.22    // signers
  110.23    objArrayOop signers() const              { return _signers; }
  110.24    void set_signers(objArrayOop s)          { klass_oop_store((oop*)&_signers, s); }
  110.25 @@ -657,6 +663,10 @@
  110.26      if (annotations() == NULL) return NULL;
  110.27      return annotations()->fields_annotations();
  110.28    }
  110.29 +  Annotations* type_annotations() const {
  110.30 +    if (annotations() == NULL) return NULL;
  110.31 +    return annotations()->type_annotations();
  110.32 +  }
  110.33  
  110.34    // allocation
  110.35    instanceOop allocate_instance(TRAPS);
   111.1 --- a/src/share/vm/oops/klass.hpp	Tue Jan 08 14:04:25 2013 -0500
   111.2 +++ b/src/share/vm/oops/klass.hpp	Tue Jan 08 11:39:53 2013 -0800
   111.3 @@ -451,6 +451,8 @@
   111.4  
   111.5    oop class_loader() const;
   111.6  
   111.7 +  virtual oop klass_holder() const      { return class_loader(); }
   111.8 +
   111.9   protected:
  111.10    virtual Klass* array_klass_impl(bool or_null, int rank, TRAPS);
  111.11    virtual Klass* array_klass_impl(bool or_null, TRAPS);
   112.1 --- a/src/share/vm/oops/method.cpp	Tue Jan 08 14:04:25 2013 -0500
   112.2 +++ b/src/share/vm/oops/method.cpp	Tue Jan 08 11:39:53 2013 -0800
   112.3 @@ -1331,13 +1331,15 @@
   112.4                                   Array<AnnotationArray*>* methods_annotations,
   112.5                                   Array<AnnotationArray*>* methods_parameter_annotations,
   112.6                                   Array<AnnotationArray*>* methods_default_annotations,
   112.7 +                                 Array<AnnotationArray*>* methods_type_annotations,
   112.8                                   bool idempotent) {
   112.9    int length = methods->length();
  112.10    if (length > 1) {
  112.11      bool do_annotations = false;
  112.12      if (methods_annotations != NULL ||
  112.13          methods_parameter_annotations != NULL ||
  112.14 -        methods_default_annotations != NULL) {
  112.15 +        methods_default_annotations != NULL ||
  112.16 +        methods_type_annotations != NULL) {
  112.17        do_annotations = true;
  112.18      }
  112.19      if (do_annotations) {
  112.20 @@ -1356,6 +1358,7 @@
  112.21      assert(methods_annotations == NULL           || methods_annotations->length() == methods->length(), "");
  112.22      assert(methods_parameter_annotations == NULL || methods_parameter_annotations->length() == methods->length(), "");
  112.23      assert(methods_default_annotations == NULL   || methods_default_annotations->length() == methods->length(), "");
  112.24 +    assert(methods_type_annotations == NULL   || methods_type_annotations->length() == methods->length(), "");
  112.25      if (do_annotations) {
  112.26        ResourceMark rm;
  112.27        // Allocate temporary storage
  112.28 @@ -1363,6 +1366,7 @@
  112.29        reorder_based_on_method_index(methods, methods_annotations, temp_array);
  112.30        reorder_based_on_method_index(methods, methods_parameter_annotations, temp_array);
  112.31        reorder_based_on_method_index(methods, methods_default_annotations, temp_array);
  112.32 +      reorder_based_on_method_index(methods, methods_type_annotations, temp_array);
  112.33      }
  112.34  
  112.35      // Reset method ordering
   113.1 --- a/src/share/vm/oops/method.hpp	Tue Jan 08 14:04:25 2013 -0500
   113.2 +++ b/src/share/vm/oops/method.hpp	Tue Jan 08 11:39:53 2013 -0800
   113.3 @@ -73,8 +73,7 @@
   113.4  // |------------------------------------------------------|
   113.5  // | result_index (C++ interpreter only)                  |
   113.6  // |------------------------------------------------------|
   113.7 -// | method_size             |   max_locals               |
   113.8 -// | size_of_parameters      |   intrinsic_id|   flags    |
   113.9 +// | method_size             |   intrinsic_id|   flags    |
  113.10  // |------------------------------------------------------|
  113.11  // | throwout_count          |   num_breakpoints          |
  113.12  // |------------------------------------------------------|
  113.13 @@ -116,8 +115,6 @@
  113.14    int               _result_index;               // C++ interpreter needs for converting results to/from stack
  113.15  #endif
  113.16    u2                _method_size;                // size of this object
  113.17 -  u2                _max_locals;                 // Number of local variables used by this method
  113.18 -  u2                _size_of_parameters;         // size of the parameter block (receiver + arguments) in words
  113.19    u1                _intrinsic_id;               // vmSymbols::intrinsic_id (0 == _none)
  113.20    u1                _jfr_towrite  : 1,           // Flags
  113.21                      _force_inline : 1,
  113.22 @@ -228,6 +225,13 @@
  113.23      }
  113.24      return ik->annotations()->get_method_default_annotations_of(method_idnum());
  113.25    }
  113.26 +  AnnotationArray* type_annotations() const {
  113.27 +  InstanceKlass* ik = method_holder();
  113.28 +  Annotations* type_annos = ik->type_annotations();
  113.29 +  if (type_annos == NULL)
  113.30 +    return NULL;
  113.31 +  return type_annos->get_method_annotations_of(method_idnum());
  113.32 +}
  113.33  
  113.34  #ifdef CC_INTERP
  113.35    void set_result_index(BasicType type);
  113.36 @@ -292,8 +296,8 @@
  113.37    void      set_max_stack(int size)              {        constMethod()->set_max_stack(size); }
  113.38  
  113.39    // max locals
  113.40 -  int  max_locals() const                        { return _max_locals; }
  113.41 -  void set_max_locals(int size)                  { _max_locals = size; }
  113.42 +  int  max_locals() const                        { return constMethod()->max_locals(); }
  113.43 +  void set_max_locals(int size)                  { constMethod()->set_max_locals(size); }
  113.44  
  113.45    int highest_comp_level() const;
  113.46    void set_highest_comp_level(int level);
  113.47 @@ -311,7 +315,8 @@
  113.48    void set_interpreter_throwout_count(int count) { _interpreter_throwout_count = count; }
  113.49  
  113.50    // size of parameters
  113.51 -  int  size_of_parameters() const                { return _size_of_parameters; }
  113.52 +  int  size_of_parameters() const                { return constMethod()->size_of_parameters(); }
  113.53 +  void set_size_of_parameters(int size)          { constMethod()->set_size_of_parameters(size); }
  113.54  
  113.55    bool has_stackmap_table() const {
  113.56      return constMethod()->has_stackmap_table();
  113.57 @@ -588,8 +593,6 @@
  113.58  #ifdef CC_INTERP
  113.59    static ByteSize result_index_offset()          { return byte_offset_of(Method, _result_index ); }
  113.60  #endif /* CC_INTERP */
  113.61 -  static ByteSize size_of_locals_offset()        { return byte_offset_of(Method, _max_locals        ); }
  113.62 -  static ByteSize size_of_parameters_offset()    { return byte_offset_of(Method, _size_of_parameters); }
  113.63    static ByteSize from_compiled_offset()         { return byte_offset_of(Method, _from_compiled_entry); }
  113.64    static ByteSize code_offset()                  { return byte_offset_of(Method, _code); }
  113.65    static ByteSize invocation_counter_offset()    { return byte_offset_of(Method, _invocation_counter); }
  113.66 @@ -794,11 +797,9 @@
  113.67                             Array<AnnotationArray*>* methods_annotations,
  113.68                             Array<AnnotationArray*>* methods_parameter_annotations,
  113.69                             Array<AnnotationArray*>* methods_default_annotations,
  113.70 +                           Array<AnnotationArray*>* methods_type_annotations,
  113.71                             bool idempotent = false);
  113.72  
  113.73 -  // size of parameters
  113.74 -  void set_size_of_parameters(int size)          { _size_of_parameters = size; }
  113.75 -
  113.76    // Deallocation function for redefine classes or if an error occurs
  113.77    void deallocate_contents(ClassLoaderData* loader_data);
  113.78  
   114.1 --- a/src/share/vm/opto/addnode.cpp	Tue Jan 08 14:04:25 2013 -0500
   114.2 +++ b/src/share/vm/opto/addnode.cpp	Tue Jan 08 11:39:53 2013 -0800
   114.3 @@ -189,6 +189,11 @@
   114.4        set_req(1, addx);
   114.5        set_req(2, a22);
   114.6        progress = this;
   114.7 +      PhaseIterGVN *igvn = phase->is_IterGVN();
   114.8 +      if (add2->outcnt() == 0 && igvn) {
   114.9 +        // add disconnected.
  114.10 +        igvn->_worklist.push(add2);
  114.11 +      }
  114.12      }
  114.13    }
  114.14  
  114.15 @@ -624,6 +629,11 @@
  114.16      if( t22->singleton() && (t22 != Type::TOP) ) {  // Right input is an add of a constant?
  114.17        set_req(Address, phase->transform(new (phase->C) AddPNode(in(Base),in(Address),add->in(1))));
  114.18        set_req(Offset, add->in(2));
  114.19 +      PhaseIterGVN *igvn = phase->is_IterGVN();
  114.20 +      if (add->outcnt() == 0 && igvn) {
  114.21 +        // add disconnected.
  114.22 +        igvn->_worklist.push((Node*)add);
  114.23 +      }
  114.24        return this;              // Made progress
  114.25      }
  114.26    }
   115.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Tue Jan 08 14:04:25 2013 -0500
   115.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Tue Jan 08 11:39:53 2013 -0800
   115.3 @@ -403,7 +403,7 @@
   115.4  //------------------------------print_inlining---------------------------------
   115.5  // Really, the failure_msg can be a success message also.
   115.6  void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
   115.7 -  CompileTask::print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline");
   115.8 +  C->print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline");
   115.9    if (callee_method == NULL)  tty->print(" callee not monotonic or profiled");
  115.10    if (Verbose && callee_method) {
  115.11      const InlineTree *top = this;
   116.1 --- a/src/share/vm/opto/callGenerator.cpp	Tue Jan 08 14:04:25 2013 -0500
   116.2 +++ b/src/share/vm/opto/callGenerator.cpp	Tue Jan 08 11:39:53 2013 -0800
   116.3 @@ -274,6 +274,9 @@
   116.4    virtual void do_late_inline();
   116.5  
   116.6    virtual JVMState* generate(JVMState* jvms) {
   116.7 +    Compile *C = Compile::current();
   116.8 +    C->print_inlining_skip(this);
   116.9 +
  116.10      // Record that this call site should be revisited once the main
  116.11      // parse is finished.
  116.12      Compile::current()->add_late_inline(this);
  116.13 @@ -284,7 +287,6 @@
  116.14      // as is done for allocations and macro expansion.
  116.15      return DirectCallGenerator::generate(jvms);
  116.16    }
  116.17 -
  116.18  };
  116.19  
  116.20  
  116.21 @@ -307,7 +309,9 @@
  116.22  
  116.23    // Make sure the state is a MergeMem for parsing.
  116.24    if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
  116.25 -    map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
  116.26 +    Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
  116.27 +    C->initial_gvn()->set_type_bottom(mem);
  116.28 +    map->set_req(TypeFunc::Memory, mem);
  116.29    }
  116.30  
  116.31    // Make enough space for the expression stack and transfer the incoming arguments
  116.32 @@ -320,6 +324,8 @@
  116.33      }
  116.34    }
  116.35  
  116.36 +  C->print_inlining_insert(this);
  116.37 +
  116.38    CompileLog* log = C->log();
  116.39    if (log != NULL) {
  116.40      log->head("late_inline method='%d'", log->identify(method()));
  116.41 @@ -608,7 +614,7 @@
  116.42          if (cg != NULL && cg->is_inline())
  116.43            return cg;
  116.44        } else {
  116.45 -        if (PrintInlining)  CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
  116.46 +        if (PrintInlining)  C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
  116.47        }
  116.48      }
  116.49      break;
   117.1 --- a/src/share/vm/opto/callGenerator.hpp	Tue Jan 08 14:04:25 2013 -0500
   117.2 +++ b/src/share/vm/opto/callGenerator.hpp	Tue Jan 08 11:39:53 2013 -0800
   117.3 @@ -147,9 +147,9 @@
   117.4                                                  CallGenerator* cg);
   117.5    virtual Node* generate_predicate(JVMState* jvms) { return NULL; };
   117.6  
   117.7 -  static void print_inlining(ciMethod* callee, int inline_level, int bci, const char* msg) {
   117.8 +  static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
   117.9      if (PrintInlining)
  117.10 -      CompileTask::print_inlining(callee, inline_level, bci, msg);
  117.11 +      C->print_inlining(callee, inline_level, bci, msg);
  117.12    }
  117.13  };
  117.14  
   118.1 --- a/src/share/vm/opto/callnode.cpp	Tue Jan 08 14:04:25 2013 -0500
   118.2 +++ b/src/share/vm/opto/callnode.cpp	Tue Jan 08 11:39:53 2013 -0800
   118.3 @@ -751,7 +751,7 @@
   118.4          projs->fallthrough_ioproj = pn;
   118.5        for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
   118.6          Node* e = pn->out(j);
   118.7 -        if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj()) {
   118.8 +        if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
   118.9            assert(projs->exobj == NULL, "only one");
  118.10            projs->exobj = e;
  118.11          }
   119.1 --- a/src/share/vm/opto/cfgnode.cpp	Tue Jan 08 14:04:25 2013 -0500
   119.2 +++ b/src/share/vm/opto/cfgnode.cpp	Tue Jan 08 11:39:53 2013 -0800
   119.3 @@ -1566,6 +1566,10 @@
   119.4      Node* n = in(j);            // Get the input
   119.5      if (rc == NULL || phase->type(rc) == Type::TOP) {
   119.6        if (n != top) {           // Not already top?
   119.7 +        PhaseIterGVN *igvn = phase->is_IterGVN();
   119.8 +        if (can_reshape && igvn != NULL) {
   119.9 +          igvn->_worklist.push(r);
  119.10 +        }
  119.11          set_req(j, top);        // Nuke it down
  119.12          progress = this;        // Record progress
  119.13        }
   120.1 --- a/src/share/vm/opto/compile.cpp	Tue Jan 08 14:04:25 2013 -0500
   120.2 +++ b/src/share/vm/opto/compile.cpp	Tue Jan 08 11:39:53 2013 -0800
   120.3 @@ -610,7 +610,9 @@
   120.4                    _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
   120.5                    _printer(IdealGraphPrinter::printer()),
   120.6  #endif
   120.7 -                  _congraph(NULL) {
   120.8 +                  _congraph(NULL),
   120.9 +                  _print_inlining_list(NULL),
  120.10 +                  _print_inlining(0) {
  120.11    C = this;
  120.12  
  120.13    CompileWrapper cw(this);
  120.14 @@ -666,6 +668,9 @@
  120.15    PhaseGVN gvn(node_arena(), estimated_size);
  120.16    set_initial_gvn(&gvn);
  120.17  
  120.18 +  if (PrintInlining) {
  120.19 +    _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
  120.20 +  }
  120.21    { // Scope for timing the parser
  120.22      TracePhase t3("parse", &_t_parser, true);
  120.23  
  120.24 @@ -754,6 +759,7 @@
  120.25        }
  120.26      }
  120.27      assert(_late_inlines.length() == 0, "should have been processed");
  120.28 +    dump_inlining();
  120.29  
  120.30      print_method("Before RemoveUseless", 3);
  120.31  
  120.32 @@ -899,7 +905,9 @@
  120.33  #endif
  120.34      _dead_node_list(comp_arena()),
  120.35      _dead_node_count(0),
  120.36 -    _congraph(NULL) {
  120.37 +    _congraph(NULL),
  120.38 +    _print_inlining_list(NULL),
  120.39 +    _print_inlining(0) {
  120.40    C = this;
  120.41  
  120.42  #ifndef PRODUCT
  120.43 @@ -3351,3 +3359,11 @@
  120.44      cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
  120.45    }
  120.46  }
  120.47 +
  120.48 +void Compile::dump_inlining() {
  120.49 +  if (PrintInlining) {
  120.50 +    for (int i = 0; i < _print_inlining_list->length(); i++) {
  120.51 +      tty->print(_print_inlining_list->at(i).ss()->as_string());
  120.52 +    }
  120.53 +  }
  120.54 +}
   121.1 --- a/src/share/vm/opto/compile.hpp	Tue Jan 08 14:04:25 2013 -0500
   121.2 +++ b/src/share/vm/opto/compile.hpp	Tue Jan 08 11:39:53 2013 -0800
   121.3 @@ -30,6 +30,7 @@
   121.4  #include "code/debugInfoRec.hpp"
   121.5  #include "code/exceptionHandlerTable.hpp"
   121.6  #include "compiler/compilerOracle.hpp"
   121.7 +#include "compiler/compileBroker.hpp"
   121.8  #include "libadt/dict.hpp"
   121.9  #include "libadt/port.hpp"
  121.10  #include "libadt/vectset.hpp"
  121.11 @@ -369,6 +370,61 @@
  121.12    GrowableArray<CallGenerator*> _late_inlines;  // List of CallGenerators to be revisited after
  121.13                                                  // main parsing has finished.
  121.14  
  121.15 +  // Inlining may not happen in parse order which would make
  121.16 +  // PrintInlining output confusing. Keep track of PrintInlining
  121.17 +  // pieces in order.
  121.18 +  class PrintInliningBuffer : public ResourceObj {
  121.19 +   private:
  121.20 +    CallGenerator* _cg;
  121.21 +    stringStream* _ss;
  121.22 +
  121.23 +   public:
  121.24 +    PrintInliningBuffer()
  121.25 +      : _cg(NULL) { _ss = new stringStream(); }
  121.26 +
  121.27 +    stringStream* ss() const { return _ss; }
  121.28 +    CallGenerator* cg() const { return _cg; }
  121.29 +    void set_cg(CallGenerator* cg) { _cg = cg; }
  121.30 +  };
  121.31 +
  121.32 +  GrowableArray<PrintInliningBuffer>* _print_inlining_list;
  121.33 +  int _print_inlining;
  121.34 +
  121.35 + public:
  121.36 +
  121.37 +  outputStream* print_inlining_stream() const {
  121.38 +    return _print_inlining_list->at(_print_inlining).ss();
  121.39 +  }
  121.40 +
  121.41 +  void print_inlining_skip(CallGenerator* cg) {
  121.42 +    if (PrintInlining) {
  121.43 +      _print_inlining_list->at(_print_inlining).set_cg(cg);
  121.44 +      _print_inlining++;
  121.45 +      _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
  121.46 +    }
  121.47 +  }
  121.48 +
  121.49 +  void print_inlining_insert(CallGenerator* cg) {
  121.50 +    if (PrintInlining) {
  121.51 +      for (int i = 0; i < _print_inlining_list->length(); i++) {
  121.52 +        if (_print_inlining_list->at(i).cg() == cg) {
  121.53 +          _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
  121.54 +          _print_inlining = i+1;
  121.55 +          _print_inlining_list->at(i).set_cg(NULL);
  121.56 +          return;
  121.57 +        }
  121.58 +      }
  121.59 +      ShouldNotReachHere();
  121.60 +    }
  121.61 +  }
  121.62 +
  121.63 +  void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
  121.64 +    stringStream ss;
  121.65 +    CompileTask::print_inlining(&ss, method, inline_level, bci, msg);
  121.66 +    print_inlining_stream()->print(ss.as_string());
  121.67 +  }
  121.68 +
  121.69 + private:
  121.70    // Matching, CFG layout, allocation, code generation
  121.71    PhaseCFG*             _cfg;                   // Results of CFG finding
  121.72    bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
  121.73 @@ -591,7 +647,7 @@
  121.74    void         reset_dead_node_list()      { _dead_node_list.Reset();
  121.75                                               _dead_node_count = 0;
  121.76                                             }
  121.77 -  uint          live_nodes()               {
  121.78 +  uint          live_nodes() const         {
  121.79      int  val = _unique - _dead_node_count;
  121.80      assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count));
  121.81              return (uint) val;
  121.82 @@ -702,7 +758,7 @@
  121.83  
  121.84    void              identify_useful_nodes(Unique_Node_List &useful);
  121.85    void              update_dead_node_list(Unique_Node_List &useful);
  121.86 -  void              remove_useless_nodes  (Unique_Node_List &useful);
  121.87 +  void              remove_useless_nodes (Unique_Node_List &useful);
  121.88  
  121.89    WarmCallInfo*     warm_calls() const          { return _warm_calls; }
  121.90    void          set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
  121.91 @@ -711,6 +767,8 @@
  121.92    // Record this CallGenerator for inlining at the end of parsing.
  121.93    void              add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }
  121.94  
  121.95 +  void dump_inlining();
  121.96 +
  121.97    // Matching, CFG layout, allocation, code generation
  121.98    PhaseCFG*         cfg()                       { return _cfg; }
  121.99    bool              select_24_bit_instr() const { return _select_24_bit_instr; }
   122.1 --- a/src/share/vm/opto/doCall.cpp	Tue Jan 08 14:04:25 2013 -0500
   122.2 +++ b/src/share/vm/opto/doCall.cpp	Tue Jan 08 11:39:53 2013 -0800
   122.3 @@ -40,19 +40,24 @@
   122.4  #include "prims/nativeLookup.hpp"
   122.5  #include "runtime/sharedRuntime.hpp"
   122.6  
   122.7 -void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
   122.8 +void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
   122.9    if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
  122.10 +    outputStream* out = tty;
  122.11      if (!PrintInlining) {
  122.12        if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
  122.13          method->print_short_name();
  122.14          tty->cr();
  122.15        }
  122.16        CompileTask::print_inlining(prof_method, depth, bci);
  122.17 +    } else {
  122.18 +      out = C->print_inlining_stream();
  122.19      }
  122.20 -    CompileTask::print_inline_indent(depth);
  122.21 -    tty->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
  122.22 -    prof_klass->name()->print_symbol();
  122.23 -    tty->cr();
  122.24 +    CompileTask::print_inline_indent(depth, out);
  122.25 +    out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
  122.26 +    stringStream ss;
  122.27 +    prof_klass->name()->print_symbol_on(&ss);
  122.28 +    out->print(ss.as_string());
  122.29 +    out->cr();
  122.30    }
  122.31  }
  122.32  
  122.33 @@ -233,13 +238,13 @@
  122.34            }
  122.35            if (miss_cg != NULL) {
  122.36              if (next_hit_cg != NULL) {
  122.37 -              trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
  122.38 +              trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
  122.39                // We don't need to record dependency on a receiver here and below.
  122.40                // Whenever we inline, the dependency is added by Parse::Parse().
  122.41                miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
  122.42              }
  122.43              if (miss_cg != NULL) {
  122.44 -              trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
  122.45 +              trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
  122.46                CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
  122.47                if (cg != NULL)  return cg;
  122.48              }
   123.1 --- a/src/share/vm/opto/graphKit.cpp	Tue Jan 08 14:04:25 2013 -0500
   123.2 +++ b/src/share/vm/opto/graphKit.cpp	Tue Jan 08 11:39:53 2013 -0800
   123.3 @@ -1771,11 +1771,21 @@
   123.4    CallProjections callprojs;
   123.5    call->extract_projections(&callprojs, true);
   123.6  
   123.7 +  Node* init_mem = call->in(TypeFunc::Memory);
   123.8 +  Node* final_mem = final_state->in(TypeFunc::Memory);
   123.9 +  Node* final_ctl = final_state->in(TypeFunc::Control);
  123.10 +  Node* final_io = final_state->in(TypeFunc::I_O);
  123.11 +
  123.12    // Replace all the old call edges with the edges from the inlining result
  123.13 -  C->gvn_replace_by(callprojs.fallthrough_catchproj, final_state->in(TypeFunc::Control));
  123.14 -  C->gvn_replace_by(callprojs.fallthrough_memproj,   final_state->in(TypeFunc::Memory));
  123.15 -  C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_state->in(TypeFunc::I_O));
  123.16 -  Node* final_mem = final_state->in(TypeFunc::Memory);
  123.17 +  if (callprojs.fallthrough_catchproj != NULL) {
  123.18 +    C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
  123.19 +  }
  123.20 +  if (callprojs.fallthrough_memproj != NULL) {
  123.21 +    C->gvn_replace_by(callprojs.fallthrough_memproj,   final_mem);
  123.22 +  }
  123.23 +  if (callprojs.fallthrough_ioproj != NULL) {
  123.24 +    C->gvn_replace_by(callprojs.fallthrough_ioproj,    final_io);
  123.25 +  }
  123.26  
  123.27    // Replace the result with the new result if it exists and is used
  123.28    if (callprojs.resproj != NULL && result != NULL) {
  123.29 @@ -2980,7 +2990,7 @@
  123.30    set_control( _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Control) ) );
  123.31    // create memory projection for i_o
  123.32    set_memory ( _gvn.transform( new (C) ProjNode(allocx, TypeFunc::Memory, true) ), rawidx );
  123.33 -  make_slow_call_ex(allocx, env()->OutOfMemoryError_klass(), true);
  123.34 +  make_slow_call_ex(allocx, env()->Throwable_klass(), true);
  123.35  
  123.36    // create a memory projection as for the normal control path
  123.37    Node* malloc = _gvn.transform(new (C) ProjNode(allocx, TypeFunc::Memory));
   124.1 --- a/src/share/vm/opto/library_call.cpp	Tue Jan 08 14:04:25 2013 -0500
   124.2 +++ b/src/share/vm/opto/library_call.cpp	Tue Jan 08 11:39:53 2013 -0800
   124.3 @@ -282,6 +282,7 @@
   124.4    typedef enum { LS_xadd, LS_xchg, LS_cmpxchg } LoadStoreKind;
   124.5    bool inline_unsafe_load_store(BasicType type,  LoadStoreKind kind);
   124.6    bool inline_unsafe_ordered_store(BasicType type);
   124.7 +  bool inline_unsafe_fence(vmIntrinsics::ID id);
   124.8    bool inline_fp_conversions(vmIntrinsics::ID id);
   124.9    bool inline_number_methods(vmIntrinsics::ID id);
  124.10    bool inline_reference_get();
  124.11 @@ -334,6 +335,9 @@
  124.12      case vmIntrinsics::_getAndSetInt:
  124.13      case vmIntrinsics::_getAndSetLong:
  124.14      case vmIntrinsics::_getAndSetObject:
  124.15 +    case vmIntrinsics::_loadFence:
  124.16 +    case vmIntrinsics::_storeFence:
  124.17 +    case vmIntrinsics::_fullFence:
  124.18        break;  // InlineNatives does not control String.compareTo
  124.19      case vmIntrinsics::_Reference_get:
  124.20        break;  // InlineNatives does not control Reference.get
  124.21 @@ -412,16 +416,16 @@
  124.22      break;
  124.23  
  124.24    case vmIntrinsics::_reverseBytes_c:
  124.25 -    if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return false;
  124.26 +    if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return NULL;
  124.27      break;
  124.28    case vmIntrinsics::_reverseBytes_s:
  124.29 -    if (!Matcher::match_rule_supported(Op_ReverseBytesS))  return false;
  124.30 +    if (!Matcher::match_rule_supported(Op_ReverseBytesS))  return NULL;
  124.31      break;
  124.32    case vmIntrinsics::_reverseBytes_i:
  124.33 -    if (!Matcher::match_rule_supported(Op_ReverseBytesI))  return false;
  124.34 +    if (!Matcher::match_rule_supported(Op_ReverseBytesI))  return NULL;
  124.35      break;
  124.36    case vmIntrinsics::_reverseBytes_l:
  124.37 -    if (!Matcher::match_rule_supported(Op_ReverseBytesL))  return false;
  124.38 +    if (!Matcher::match_rule_supported(Op_ReverseBytesL))  return NULL;
  124.39      break;
  124.40  
  124.41    case vmIntrinsics::_Reference_get:
  124.42 @@ -536,7 +540,7 @@
  124.43    // Try to inline the intrinsic.
  124.44    if (kit.try_to_inline()) {
  124.45      if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
  124.46 -      CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
  124.47 +      C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
  124.48      }
  124.49      C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
  124.50      if (C->log()) {
  124.51 @@ -555,7 +559,7 @@
  124.52      if (jvms->has_method()) {
  124.53        // Not a root compile.
  124.54        const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
  124.55 -      CompileTask::print_inlining(callee, jvms->depth() - 1, bci, msg);
  124.56 +      C->print_inlining(callee, jvms->depth() - 1, bci, msg);
  124.57      } else {
  124.58        // Root compile
  124.59        tty->print("Did not generate intrinsic %s%s at bci:%d in",
  124.60 @@ -585,7 +589,7 @@
  124.61    Node* slow_ctl = kit.try_to_predicate();
  124.62    if (!kit.failing()) {
  124.63      if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
  124.64 -      CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
  124.65 +      C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
  124.66      }
  124.67      C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
  124.68      if (C->log()) {
  124.69 @@ -602,12 +606,12 @@
  124.70      if (jvms->has_method()) {
  124.71        // Not a root compile.
  124.72        const char* msg = "failed to generate predicate for intrinsic";
  124.73 -      CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
  124.74 +      C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
  124.75      } else {
  124.76        // Root compile
  124.77 -      tty->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
  124.78 -               vmIntrinsics::name_at(intrinsic_id()),
  124.79 -               (is_virtual() ? " (virtual)" : ""), bci);
  124.80 +      C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
  124.81 +                                        vmIntrinsics::name_at(intrinsic_id()),
  124.82 +                                        (is_virtual() ? " (virtual)" : ""), bci);
  124.83      }
  124.84    }
  124.85    C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
  124.86 @@ -732,6 +736,10 @@
  124.87    case vmIntrinsics::_getAndSetLong:            return inline_unsafe_load_store(T_LONG,   LS_xchg);
  124.88    case vmIntrinsics::_getAndSetObject:          return inline_unsafe_load_store(T_OBJECT, LS_xchg);
  124.89  
  124.90 +  case vmIntrinsics::_loadFence:
  124.91 +  case vmIntrinsics::_storeFence:
  124.92 +  case vmIntrinsics::_fullFence:                return inline_unsafe_fence(intrinsic_id());
  124.93 +
  124.94    case vmIntrinsics::_currentThread:            return inline_native_currentThread();
  124.95    case vmIntrinsics::_isInterrupted:            return inline_native_isInterrupted();
  124.96  
  124.97 @@ -2840,6 +2848,26 @@
  124.98    return true;
  124.99  }
 124.100  
 124.101 +bool LibraryCallKit::inline_unsafe_fence(vmIntrinsics::ID id) {
 124.102 +  // Regardless of form, don't allow previous ld/st to move down,
 124.103 +  // then issue acquire, release, or volatile mem_bar.
 124.104 +  insert_mem_bar(Op_MemBarCPUOrder);
 124.105 +  switch(id) {
 124.106 +    case vmIntrinsics::_loadFence:
 124.107 +      insert_mem_bar(Op_MemBarAcquire);
 124.108 +      return true;
 124.109 +    case vmIntrinsics::_storeFence:
 124.110 +      insert_mem_bar(Op_MemBarRelease);
 124.111 +      return true;
 124.112 +    case vmIntrinsics::_fullFence:
 124.113 +      insert_mem_bar(Op_MemBarVolatile);
 124.114 +      return true;
 124.115 +    default:
 124.116 +      fatal_unexpected_iid(id);
 124.117 +      return false;
 124.118 +  }
 124.119 +}
 124.120 +
 124.121  //----------------------------inline_unsafe_allocate---------------------------
 124.122  // public native Object sun.mics.Unsafe.allocateInstance(Class<?> cls);
 124.123  bool LibraryCallKit::inline_unsafe_allocate() {
 124.124 @@ -2952,14 +2980,23 @@
 124.125  
 124.126    // We only go to the fast case code if we pass two guards.
 124.127    // Paths which do not pass are accumulated in the slow_region.
 124.128 +
 124.129 +  enum {
 124.130 +    no_int_result_path   = 1, // t == Thread.current() && !TLS._osthread._interrupted
 124.131 +    no_clear_result_path = 2, // t == Thread.current() &&  TLS._osthread._interrupted && !clear_int
 124.132 +    slow_result_path     = 3, // slow path: t.isInterrupted(clear_int)
 124.133 +    PATH_LIMIT
 124.134 +  };
 124.135 +
 124.136 +  // Ensure that it's not possible to move the load of TLS._osthread._interrupted flag
 124.137 +  // out of the function.
 124.138 +  insert_mem_bar(Op_MemBarCPUOrder);
 124.139 +
 124.140 +  RegionNode* result_rgn = new (C) RegionNode(PATH_LIMIT);
 124.141 +  PhiNode*    result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
 124.142 +
 124.143    RegionNode* slow_region = new (C) RegionNode(1);
 124.144    record_for_igvn(slow_region);
 124.145 -  RegionNode* result_rgn = new (C) RegionNode(1+3); // fast1, fast2, slow
 124.146 -  PhiNode*    result_val = new (C) PhiNode(result_rgn, TypeInt::BOOL);
 124.147 -  enum { no_int_result_path   = 1,
 124.148 -         no_clear_result_path = 2,
 124.149 -         slow_result_path     = 3
 124.150 -  };
 124.151  
 124.152    // (a) Receiving thread must be the current thread.
 124.153    Node* rec_thr = argument(0);
 124.154 @@ -2968,14 +3005,13 @@
 124.155    Node* cmp_thr = _gvn.transform( new (C) CmpPNode(cur_thr, rec_thr) );
 124.156    Node* bol_thr = _gvn.transform( new (C) BoolNode(cmp_thr, BoolTest::ne) );
 124.157  
 124.158 -  bool known_current_thread = (_gvn.type(bol_thr) == TypeInt::ZERO);
 124.159 -  if (!known_current_thread)
 124.160 -    generate_slow_guard(bol_thr, slow_region);
 124.161 +  generate_slow_guard(bol_thr, slow_region);
 124.162  
 124.163    // (b) Interrupt bit on TLS must be false.
 124.164    Node* p = basic_plus_adr(top()/*!oop*/, tls_ptr, in_bytes(JavaThread::osthread_offset()));
 124.165    Node* osthread = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS);
 124.166    p = basic_plus_adr(top()/*!oop*/, osthread, in_bytes(OSThread::interrupted_offset()));
 124.167 +
 124.168    // Set the control input on the field _interrupted read to prevent it floating up.
 124.169    Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT);
 124.170    Node* cmp_bit = _gvn.transform( new (C) CmpINode(int_bit, intcon(0)) );
 124.171 @@ -3020,22 +3056,20 @@
 124.172      Node* slow_val = set_results_for_java_call(slow_call);
 124.173      // this->control() comes from set_results_for_java_call
 124.174  
 124.175 -    // If we know that the result of the slow call will be true, tell the optimizer!
 124.176 -    if (known_current_thread)  slow_val = intcon(1);
 124.177 -
 124.178      Node* fast_io  = slow_call->in(TypeFunc::I_O);
 124.179      Node* fast_mem = slow_call->in(TypeFunc::Memory);
 124.180 +
 124.181      // These two phis are pre-filled with copies of of the fast IO and Memory
 124.182 -    Node* io_phi   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
 124.183 -    Node* mem_phi  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
 124.184 +    PhiNode* result_mem  = PhiNode::make(result_rgn, fast_mem, Type::MEMORY, TypePtr::BOTTOM);
 124.185 +    PhiNode* result_io   = PhiNode::make(result_rgn, fast_io,  Type::ABIO);
 124.186  
 124.187      result_rgn->init_req(slow_result_path, control());
 124.188 -    io_phi    ->init_req(slow_result_path, i_o());
 124.189 -    mem_phi   ->init_req(slow_result_path, reset_memory());
 124.190 +    result_io ->init_req(slow_result_path, i_o());
 124.191 +    result_mem->init_req(slow_result_path, reset_memory());
 124.192      result_val->init_req(slow_result_path, slow_val);
 124.193  
 124.194 -    set_all_memory( _gvn.transform(mem_phi) );
 124.195 -    set_i_o(        _gvn.transform(io_phi) );
 124.196 +    set_all_memory(_gvn.transform(result_mem));
 124.197 +    set_i_o(       _gvn.transform(result_io));
 124.198    }
 124.199  
 124.200    C->set_has_split_ifs(true); // Has chance for split-if optimization
 124.201 @@ -3319,7 +3353,7 @@
 124.202      Node* arg = args[which_arg];
 124.203      arg = null_check(arg);
 124.204      if (stopped())  break;
 124.205 -    args[which_arg] = _gvn.transform(arg);
 124.206 +    args[which_arg] = arg;
 124.207  
 124.208      Node* p = basic_plus_adr(arg, class_klass_offset);
 124.209      Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
   125.1 --- a/src/share/vm/opto/node.cpp	Tue Jan 08 14:04:25 2013 -0500
   125.2 +++ b/src/share/vm/opto/node.cpp	Tue Jan 08 11:39:53 2013 -0800
   125.3 @@ -1839,15 +1839,16 @@
   125.4    return idx;                   // True for other than index 0 (control)
   125.5  }
   125.6  
   125.7 +static RegMask _not_used_at_all;
   125.8  // Register classes are defined for specific machines
   125.9  const RegMask &Node::out_RegMask() const {
  125.10    ShouldNotCallThis();
  125.11 -  return *(new RegMask());
  125.12 +  return _not_used_at_all;
  125.13  }
  125.14  
  125.15  const RegMask &Node::in_RegMask(uint) const {
  125.16    ShouldNotCallThis();
  125.17 -  return *(new RegMask());
  125.18 +  return _not_used_at_all;
  125.19  }
  125.20  
  125.21  //=============================================================================
   126.1 --- a/src/share/vm/opto/parse3.cpp	Tue Jan 08 14:04:25 2013 -0500
   126.2 +++ b/src/share/vm/opto/parse3.cpp	Tue Jan 08 11:39:53 2013 -0800
   126.3 @@ -509,6 +509,7 @@
   126.4                            makecon(TypeKlassPtr::make(array_klass)),
   126.5                            dims);
   126.6    }
   126.7 +  make_slow_call_ex(c, env()->Throwable_klass(), false);
   126.8  
   126.9    Node* res = _gvn.transform(new (C) ProjNode(c, TypeFunc::Parms));
  126.10  
   127.1 --- a/src/share/vm/opto/runtime.cpp	Tue Jan 08 14:04:25 2013 -0500
   127.2 +++ b/src/share/vm/opto/runtime.cpp	Tue Jan 08 11:39:53 2013 -0800
   127.3 @@ -989,7 +989,7 @@
   127.4        // since we're notifying the VM on every catch.
   127.5        // Force deoptimization and the rest of the lookup
   127.6        // will be fine.
   127.7 -      deoptimize_caller_frame(thread, true);
   127.8 +      deoptimize_caller_frame(thread);
   127.9      }
  127.10  
  127.11      // Check the stack guard pages.  If enabled, look for handler in this frame;
  127.12 @@ -1143,17 +1143,22 @@
  127.13  
  127.14  
  127.15  void OptoRuntime::deoptimize_caller_frame(JavaThread *thread, bool doit) {
  127.16 -  // Deoptimize frame
  127.17 -  if (doit) {
  127.18 -    // Called from within the owner thread, so no need for safepoint
  127.19 -    RegisterMap reg_map(thread);
  127.20 -    frame stub_frame = thread->last_frame();
  127.21 -    assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
  127.22 -    frame caller_frame = stub_frame.sender(&reg_map);
  127.23 +  // Deoptimize the caller before continuing, as the compiled
  127.24 +  // exception handler table may not be valid.
  127.25 +  if (!StressCompiledExceptionHandlers && doit) {
  127.26 +    deoptimize_caller_frame(thread);
  127.27 +  }
  127.28 +}
  127.29  
  127.30 -    // Deoptimize the caller frame.
  127.31 -    Deoptimization::deoptimize_frame(thread, caller_frame.id());
  127.32 -  }
  127.33 +void OptoRuntime::deoptimize_caller_frame(JavaThread *thread) {
  127.34 +  // Called from within the owner thread, so no need for safepoint
  127.35 +  RegisterMap reg_map(thread);
  127.36 +  frame stub_frame = thread->last_frame();
  127.37 +  assert(stub_frame.is_runtime_frame() || exception_blob()->contains(stub_frame.pc()), "sanity check");
  127.38 +  frame caller_frame = stub_frame.sender(&reg_map);
  127.39 +
  127.40 +  // Deoptimize the caller frame.
  127.41 +  Deoptimization::deoptimize_frame(thread, caller_frame.id());
  127.42  }
  127.43  
  127.44  
   128.1 --- a/src/share/vm/opto/runtime.hpp	Tue Jan 08 14:04:25 2013 -0500
   128.2 +++ b/src/share/vm/opto/runtime.hpp	Tue Jan 08 11:39:53 2013 -0800
   128.3 @@ -174,6 +174,7 @@
   128.4    static address handle_exception_C       (JavaThread* thread);
   128.5    static address handle_exception_C_helper(JavaThread* thread, nmethod*& nm);
   128.6    static address rethrow_C                (oopDesc* exception, JavaThread *thread, address return_pc );
   128.7 +  static void deoptimize_caller_frame     (JavaThread *thread);
   128.8    static void deoptimize_caller_frame     (JavaThread *thread, bool doit);
   128.9    static bool is_deoptimized_caller_frame (JavaThread *thread);
  128.10  
   129.1 --- a/src/share/vm/opto/stringopts.cpp	Tue Jan 08 14:04:25 2013 -0500
   129.2 +++ b/src/share/vm/opto/stringopts.cpp	Tue Jan 08 11:39:53 2013 -0800
   129.3 @@ -744,7 +744,9 @@
   129.4        ctrl_path.push(cn);
   129.5        ctrl_path.push(cn->proj_out(0));
   129.6        ctrl_path.push(cn->proj_out(0)->unique_out());
   129.7 -      ctrl_path.push(cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0));
   129.8 +      if (cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0) != NULL) {
   129.9 +        ctrl_path.push(cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0));
  129.10 +      }
  129.11      } else {
  129.12        ShouldNotReachHere();
  129.13      }
  129.14 @@ -762,6 +764,12 @@
  129.15      } else if (ptr->is_IfTrue()) {
  129.16        IfNode* iff = ptr->in(0)->as_If();
  129.17        BoolNode* b = iff->in(1)->isa_Bool();
  129.18 +
  129.19 +      if (b == NULL) {
  129.20 +        fail = true;
  129.21 +        break;
  129.22 +      }
  129.23 +
  129.24        Node* cmp = b->in(1);
  129.25        Node* v1 = cmp->in(1);
  129.26        Node* v2 = cmp->in(2);
  129.27 @@ -1408,71 +1416,76 @@
  129.28                        Deoptimization::Action_make_not_entrant);
  129.29    }
  129.30  
  129.31 -  // length now contains the number of characters needed for the
  129.32 -  // char[] so create a new AllocateArray for the char[]
  129.33 -  Node* char_array = NULL;
  129.34 -  {
  129.35 -    PreserveReexecuteState preexecs(&kit);
  129.36 -    // The original jvms is for an allocation of either a String or
  129.37 -    // StringBuffer so no stack adjustment is necessary for proper
  129.38 -    // reexecution.  If we deoptimize in the slow path the bytecode
  129.39 -    // will be reexecuted and the char[] allocation will be thrown away.
  129.40 -    kit.jvms()->set_should_reexecute(true);
  129.41 -    char_array = kit.new_array(__ makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_CHAR))),
  129.42 -                               length, 1);
  129.43 +  Node* result;
  129.44 +  if (!kit.stopped()) {
  129.45 +
  129.46 +    // length now contains the number of characters needed for the
  129.47 +    // char[] so create a new AllocateArray for the char[]
  129.48 +    Node* char_array = NULL;
  129.49 +    {
  129.50 +      PreserveReexecuteState preexecs(&kit);
  129.51 +      // The original jvms is for an allocation of either a String or
  129.52 +      // StringBuffer so no stack adjustment is necessary for proper
  129.53 +      // reexecution.  If we deoptimize in the slow path the bytecode
  129.54 +      // will be reexecuted and the char[] allocation will be thrown away.
  129.55 +      kit.jvms()->set_should_reexecute(true);
  129.56 +      char_array = kit.new_array(__ makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_CHAR))),
  129.57 +                                 length, 1);
  129.58 +    }
  129.59 +
  129.60 +    // Mark the allocation so that zeroing is skipped since the code
  129.61 +    // below will overwrite the entire array
  129.62 +    AllocateArrayNode* char_alloc = AllocateArrayNode::Ideal_array_allocation(char_array, _gvn);
  129.63 +    char_alloc->maybe_set_complete(_gvn);
  129.64 +
  129.65 +    // Now copy the string representations into the final char[]
  129.66 +    Node* start = __ intcon(0);
  129.67 +    for (int argi = 0; argi < sc->num_arguments(); argi++) {
  129.68 +      Node* arg = sc->argument(argi);
  129.69 +      switch (sc->mode(argi)) {
  129.70 +        case StringConcat::IntMode: {
  129.71 +          Node* end = __ AddI(start, string_sizes->in(argi));
  129.72 +          // getChars words backwards so pass the ending point as well as the start
  129.73 +          int_getChars(kit, arg, char_array, start, end);
  129.74 +          start = end;
  129.75 +          break;
  129.76 +        }
  129.77 +        case StringConcat::StringNullCheckMode:
  129.78 +        case StringConcat::StringMode: {
  129.79 +          start = copy_string(kit, arg, char_array, start);
  129.80 +          break;
  129.81 +        }
  129.82 +        case StringConcat::CharMode: {
  129.83 +          __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
  129.84 +                             arg, T_CHAR, char_adr_idx);
  129.85 +          start = __ AddI(start, __ intcon(1));
  129.86 +          break;
  129.87 +        }
  129.88 +        default:
  129.89 +          ShouldNotReachHere();
  129.90 +      }
  129.91 +    }
  129.92 +
  129.93 +    // If we're not reusing an existing String allocation then allocate one here.
  129.94 +    result = sc->string_alloc();
  129.95 +    if (result == NULL) {
  129.96 +      PreserveReexecuteState preexecs(&kit);
  129.97 +      // The original jvms is for an allocation of either a String or
  129.98 +      // StringBuffer so no stack adjustment is necessary for proper
  129.99 +      // reexecution.
 129.100 +      kit.jvms()->set_should_reexecute(true);
 129.101 +      result = kit.new_instance(__ makecon(TypeKlassPtr::make(C->env()->String_klass())));
 129.102 +    }
 129.103 +
 129.104 +    // Intialize the string
 129.105 +    if (java_lang_String::has_offset_field()) {
 129.106 +      kit.store_String_offset(kit.control(), result, __ intcon(0));
 129.107 +      kit.store_String_length(kit.control(), result, length);
 129.108 +    }
 129.109 +    kit.store_String_value(kit.control(), result, char_array);
 129.110 +  } else {
 129.111 +    result = C->top();
 129.112    }
 129.113 -
 129.114 -  // Mark the allocation so that zeroing is skipped since the code
 129.115 -  // below will overwrite the entire array
 129.116 -  AllocateArrayNode* char_alloc = AllocateArrayNode::Ideal_array_allocation(char_array, _gvn);
 129.117 -  char_alloc->maybe_set_complete(_gvn);
 129.118 -
 129.119 -  // Now copy the string representations into the final char[]
 129.120 -  Node* start = __ intcon(0);
 129.121 -  for (int argi = 0; argi < sc->num_arguments(); argi++) {
 129.122 -    Node* arg = sc->argument(argi);
 129.123 -    switch (sc->mode(argi)) {
 129.124 -      case StringConcat::IntMode: {
 129.125 -        Node* end = __ AddI(start, string_sizes->in(argi));
 129.126 -        // getChars words backwards so pass the ending point as well as the start
 129.127 -        int_getChars(kit, arg, char_array, start, end);
 129.128 -        start = end;
 129.129 -        break;
 129.130 -      }
 129.131 -      case StringConcat::StringNullCheckMode:
 129.132 -      case StringConcat::StringMode: {
 129.133 -        start = copy_string(kit, arg, char_array, start);
 129.134 -        break;
 129.135 -      }
 129.136 -      case StringConcat::CharMode: {
 129.137 -        __ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
 129.138 -                           arg, T_CHAR, char_adr_idx);
 129.139 -        start = __ AddI(start, __ intcon(1));
 129.140 -        break;
 129.141 -      }
 129.142 -      default:
 129.143 -        ShouldNotReachHere();
 129.144 -    }
 129.145 -  }
 129.146 -
 129.147 -  // If we're not reusing an existing String allocation then allocate one here.
 129.148 -  Node* result = sc->string_alloc();
 129.149 -  if (result == NULL) {
 129.150 -    PreserveReexecuteState preexecs(&kit);
 129.151 -    // The original jvms is for an allocation of either a String or
 129.152 -    // StringBuffer so no stack adjustment is necessary for proper
 129.153 -    // reexecution.
 129.154 -    kit.jvms()->set_should_reexecute(true);
 129.155 -    result = kit.new_instance(__ makecon(TypeKlassPtr::make(C->env()->String_klass())));
 129.156 -  }
 129.157 -
 129.158 -  // Intialize the string
 129.159 -  if (java_lang_String::has_offset_field()) {
 129.160 -    kit.store_String_offset(kit.control(), result, __ intcon(0));
 129.161 -    kit.store_String_length(kit.control(), result, length);
 129.162 -  }
 129.163 -  kit.store_String_value(kit.control(), result, char_array);
 129.164 -
 129.165    // hook up the outgoing control and result
 129.166    kit.replace_call(sc->end(), result);
 129.167  
   130.1 --- a/src/share/vm/prims/jvm.cpp	Tue Jan 08 14:04:25 2013 -0500
   130.2 +++ b/src/share/vm/prims/jvm.cpp	Tue Jan 08 11:39:53 2013 -0800
   130.3 @@ -1573,6 +1573,23 @@
   130.4      Annotations::make_java_array(m->parameter_annotations(), THREAD));
   130.5  JVM_END
   130.6  
   130.7 +/* Type use annotations support (JDK 1.8) */
   130.8 +
   130.9 +JVM_ENTRY(jbyteArray, JVM_GetClassTypeAnnotations(JNIEnv *env, jclass cls))
  130.10 +  assert (cls != NULL, "illegal class");
  130.11 +  JVMWrapper("JVM_GetClassTypeAnnotations");
  130.12 +  ResourceMark rm(THREAD);
  130.13 +  // Return null for arrays and primitives
  130.14 +  if (!java_lang_Class::is_primitive(JNIHandles::resolve(cls))) {
  130.15 +    Klass* k = java_lang_Class::as_Klass(JNIHandles::resolve(cls));
  130.16 +    if (k->oop_is_instance()) {
  130.17 +      typeArrayOop a = Annotations::make_java_array(InstanceKlass::cast(k)->type_annotations()->class_annotations(), CHECK_NULL);
  130.18 +      return (jbyteArray) JNIHandles::make_local(env, a);
  130.19 +    }
  130.20 +  }
  130.21 +  return NULL;
  130.22 +JVM_END
  130.23 +
  130.24  
  130.25  // New (JDK 1.4) reflection implementation /////////////////////////////////////
  130.26  
   131.1 --- a/src/share/vm/prims/jvm.h	Tue Jan 08 14:04:25 2013 -0500
   131.2 +++ b/src/share/vm/prims/jvm.h	Tue Jan 08 11:39:53 2013 -0800
   131.3 @@ -519,6 +519,10 @@
   131.4  JNIEXPORT jbyteArray JNICALL
   131.5  JVM_GetMethodParameterAnnotations(JNIEnv *env, jobject method);
   131.6  
   131.7 +/* Type use annotations support (JDK 1.8) */
   131.8 +
   131.9 +JNIEXPORT jbyteArray JNICALL
  131.10 +JVM_GetClassTypeAnnotations(JNIEnv *env, jclass cls);
  131.11  
  131.12  /*
  131.13   * New (JDK 1.4) reflection implementation
   132.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Tue Jan 08 14:04:25 2013 -0500
   132.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Tue Jan 08 11:39:53 2013 -0800
   132.3 @@ -1043,7 +1043,7 @@
   132.4  
   132.5      Rewriter::rewrite(scratch_class, THREAD);
   132.6      if (!HAS_PENDING_EXCEPTION) {
   132.7 -      Rewriter::relocate_and_link(scratch_class, THREAD);
   132.8 +      scratch_class->link_methods(THREAD);
   132.9      }
  132.10      if (HAS_PENDING_EXCEPTION) {
  132.11        Symbol* ex_name = PENDING_EXCEPTION->klass()->name();
  132.12 @@ -3338,7 +3338,20 @@
  132.13      the_class->set_access_flags(flags);
  132.14    }
  132.15  
  132.16 -  // Replace annotation fields value
  132.17 +  // Since there is currently no rewriting of type annotations indexes
  132.18 +  // into the CP, we null out type annotations on scratch_class before
  132.19 +  // we swap annotations with the_class rather than facing the
  132.20 +  // possibility of shipping annotations with broken indexes to
  132.21 +  // Java-land.
  132.22 +  Annotations* new_annotations = scratch_class->annotations();
  132.23 +  if (new_annotations != NULL) {
  132.24 +    Annotations* new_type_annotations = new_annotations->type_annotations();
  132.25 +    if (new_type_annotations != NULL) {
  132.26 +      MetadataFactory::free_metadata(scratch_class->class_loader_data(), new_type_annotations);
  132.27 +      new_annotations->set_type_annotations(NULL);
  132.28 +    }
  132.29 +  }
  132.30 +  // Swap annotation fields values
  132.31    Annotations* old_annotations = the_class->annotations();
  132.32    the_class->set_annotations(scratch_class->annotations());
  132.33    scratch_class->set_annotations(old_annotations);
   133.1 --- a/src/share/vm/prims/methodHandles.cpp	Tue Jan 08 14:04:25 2013 -0500
   133.2 +++ b/src/share/vm/prims/methodHandles.cpp	Tue Jan 08 11:39:53 2013 -0800
   133.3 @@ -1168,8 +1168,8 @@
   133.4      // Walk all nmethods depending on this call site.
   133.5      MutexLocker mu(Compile_lock, thread);
   133.6      Universe::flush_dependents_on(call_site, target);
   133.7 +    java_lang_invoke_CallSite::set_target(call_site(), target());
   133.8    }
   133.9 -  java_lang_invoke_CallSite::set_target(call_site(), target());
  133.10  }
  133.11  JVM_END
  133.12  
  133.13 @@ -1180,8 +1180,8 @@
  133.14      // Walk all nmethods depending on this call site.
  133.15      MutexLocker mu(Compile_lock, thread);
  133.16      Universe::flush_dependents_on(call_site, target);
  133.17 +    java_lang_invoke_CallSite::set_target_volatile(call_site(), target());
  133.18    }
  133.19 -  java_lang_invoke_CallSite::set_target_volatile(call_site(), target());
  133.20  }
  133.21  JVM_END
  133.22  
   134.1 --- a/src/share/vm/prims/unsafe.cpp	Tue Jan 08 14:04:25 2013 -0500
   134.2 +++ b/src/share/vm/prims/unsafe.cpp	Tue Jan 08 11:39:53 2013 -0800
   134.3 @@ -468,6 +468,21 @@
   134.4  #endif
   134.5  UNSAFE_END
   134.6  
   134.7 +UNSAFE_ENTRY(void, Unsafe_LoadFence(JNIEnv *env, jobject unsafe))
   134.8 +  UnsafeWrapper("Unsafe_LoadFence");
   134.9 +  OrderAccess::acquire();
  134.10 +UNSAFE_END
  134.11 +
  134.12 +UNSAFE_ENTRY(void, Unsafe_StoreFence(JNIEnv *env, jobject unsafe))
  134.13 +  UnsafeWrapper("Unsafe_StoreFence");
  134.14 +  OrderAccess::release();
  134.15 +UNSAFE_END
  134.16 +
  134.17 +UNSAFE_ENTRY(void, Unsafe_FullFence(JNIEnv *env, jobject unsafe))
  134.18 +  UnsafeWrapper("Unsafe_FullFence");
  134.19 +  OrderAccess::fence();
  134.20 +UNSAFE_END
  134.21 +
  134.22  ////// Data in the C heap.
  134.23  
  134.24  // Note:  These do not throw NullPointerException for bad pointers.
  134.25 @@ -1550,6 +1565,9 @@
  134.26      {CC"putOrderedObject",   CC"("OBJ"J"OBJ")V",         FN_PTR(Unsafe_SetOrderedObject)},
  134.27      {CC"putOrderedInt",      CC"("OBJ"JI)V",             FN_PTR(Unsafe_SetOrderedInt)},
  134.28      {CC"putOrderedLong",     CC"("OBJ"JJ)V",             FN_PTR(Unsafe_SetOrderedLong)},
  134.29 +    {CC"loadFence",          CC"()V",                    FN_PTR(Unsafe_LoadFence)},
  134.30 +    {CC"storeFence",         CC"()V",                    FN_PTR(Unsafe_StoreFence)},
  134.31 +    {CC"fullFence",          CC"()V",                    FN_PTR(Unsafe_FullFence)},
  134.32      {CC"park",               CC"(ZJ)V",                  FN_PTR(Unsafe_Park)},
  134.33      {CC"unpark",             CC"("OBJ")V",               FN_PTR(Unsafe_Unpark)}
  134.34  
   135.1 --- a/src/share/vm/runtime/arguments.cpp	Tue Jan 08 14:04:25 2013 -0500
   135.2 +++ b/src/share/vm/runtime/arguments.cpp	Tue Jan 08 11:39:53 2013 -0800
   135.3 @@ -1499,13 +1499,12 @@
   135.4                       Abstract_VM_Version::parallel_worker_threads());
   135.5    }
   135.6  
   135.7 -  if (FLAG_IS_DEFAULT(MarkStackSize)) {
   135.8 -    FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE);
   135.9 -  }
  135.10 -  if (PrintGCDetails && Verbose) {
  135.11 -    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
  135.12 -      MarkStackSize / K, MarkStackSizeMax / K);
  135.13 -    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
  135.14 +  // MarkStackSize will be set (if it hasn't been set by the user)
  135.15 +  // when concurrent marking is initialized.
  135.16 +  // Its value will be based upon the number of parallel marking threads.
  135.17 +  // But we do set the maximum mark stack size here.
  135.18 +  if (FLAG_IS_DEFAULT(MarkStackSizeMax)) {
  135.19 +    FLAG_SET_DEFAULT(MarkStackSizeMax, 128 * TASKQUEUE_SIZE);
  135.20    }
  135.21  
  135.22    if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
  135.23 @@ -1517,6 +1516,12 @@
  135.24      // is allocation). We might consider increase it further.
  135.25      FLAG_SET_DEFAULT(GCTimeRatio, 9);
  135.26    }
  135.27 +
  135.28 +  if (PrintGCDetails && Verbose) {
  135.29 +    tty->print_cr("MarkStackSize: %uk  MarkStackSizeMax: %uk",
  135.30 +      MarkStackSize / K, MarkStackSizeMax / K);
  135.31 +    tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
  135.32 +  }
  135.33  }
  135.34  
  135.35  void Arguments::set_heap_size() {
  135.36 @@ -1980,6 +1985,9 @@
  135.37    status = status && verify_min_value(ClassMetaspaceSize, 1*M,
  135.38                                        "ClassMetaspaceSize");
  135.39  
  135.40 +  status = status && verify_interval(MarkStackSizeMax,
  135.41 +                                  1, (max_jint - 1), "MarkStackSizeMax");
  135.42 +
  135.43  #ifdef SPARC
  135.44    if (UseConcMarkSweepGC || UseG1GC) {
  135.45      // Issue a stern warning if the user has explicitly set
  135.46 @@ -3030,7 +3038,6 @@
  135.47    strcpy(shared_archive_path, jvm_path);
  135.48    strcat(shared_archive_path, os::file_separator());
  135.49    strcat(shared_archive_path, "classes");
  135.50 -  DEBUG_ONLY(strcat(shared_archive_path, "_g");)
  135.51    strcat(shared_archive_path, ".jsa");
  135.52    SharedArchivePath = shared_archive_path;
  135.53  
   136.1 --- a/src/share/vm/runtime/fieldDescriptor.cpp	Tue Jan 08 14:04:25 2013 -0500
   136.2 +++ b/src/share/vm/runtime/fieldDescriptor.cpp	Tue Jan 08 11:39:53 2013 -0800
   136.3 @@ -65,6 +65,17 @@
   136.4    return md->at(index());
   136.5  }
   136.6  
   136.7 +AnnotationArray* fieldDescriptor::type_annotations() const {
   136.8 +  InstanceKlass* ik = field_holder();
   136.9 +  Annotations* type_annos = ik->type_annotations();
  136.10 +  if (type_annos == NULL)
  136.11 +    return NULL;
  136.12 +  Array<AnnotationArray*>* md = type_annos->fields_annotations();
  136.13 +  if (md == NULL)
  136.14 +    return NULL;
  136.15 +  return md->at(index());
  136.16 +}
  136.17 +
  136.18  constantTag fieldDescriptor::initial_value_tag() const {
  136.19    return constants()->tag_at(initial_value_index());
  136.20  }
   137.1 --- a/src/share/vm/runtime/fieldDescriptor.hpp	Tue Jan 08 14:04:25 2013 -0500
   137.2 +++ b/src/share/vm/runtime/fieldDescriptor.hpp	Tue Jan 08 11:39:53 2013 -0800
   137.3 @@ -68,6 +68,7 @@
   137.4    Symbol* generic_signature()     const;
   137.5    int index()                     const    { return _index; }
   137.6    AnnotationArray* annotations()  const;
   137.7 +  AnnotationArray* type_annotations()  const;
   137.8  
   137.9    // Initial field value
  137.10    bool has_initial_value()        const    { return field()->initval_index() != 0; }
   138.1 --- a/src/share/vm/runtime/globals.hpp	Tue Jan 08 14:04:25 2013 -0500
   138.2 +++ b/src/share/vm/runtime/globals.hpp	Tue Jan 08 11:39:53 2013 -0800
   138.3 @@ -922,6 +922,9 @@
   138.4    develop(bool, PrintExceptionHandlers, false,                              \
   138.5            "Print exception handler tables for all nmethods when generated") \
   138.6                                                                              \
   138.7 +  develop(bool, StressCompiledExceptionHandlers, false,                     \
   138.8 +         "Exercise compiled exception handlers")                            \
   138.9 +                                                                            \
  138.10    develop(bool, InterceptOSException, false,                                \
  138.11            "Starts debugger when an implicit OS (e.g., NULL) "               \
  138.12            "exception happens")                                              \
   139.1 --- a/src/share/vm/runtime/handles.inline.hpp	Tue Jan 08 14:04:25 2013 -0500
   139.2 +++ b/src/share/vm/runtime/handles.inline.hpp	Tue Jan 08 11:39:53 2013 -0800
   139.3 @@ -80,6 +80,8 @@
   139.4        _thread = Thread::current();                                     \
   139.5      }                                                                  \
   139.6      _thread->metadata_handles()->push((Metadata*)_value);              \
   139.7 +  } else {                                                             \
   139.8 +    _thread = NULL;                                                    \
   139.9    }                                                                    \
  139.10  }                                                                      \
  139.11  inline name##Handle& name##Handle::operator=(const name##Handle &s) {  \
  139.12 @@ -94,6 +96,8 @@
  139.13        _thread = Thread::current();                                     \
  139.14      }                                                                  \
  139.15      _thread->metadata_handles()->push((Metadata*)_value);              \
  139.16 +  } else {                                                             \
  139.17 +    _thread = NULL;                                                    \
  139.18    }                                                                    \
  139.19    return *this;                                                        \
  139.20  }                                                                      \
   140.1 --- a/src/share/vm/runtime/os.hpp	Tue Jan 08 14:04:25 2013 -0500
   140.2 +++ b/src/share/vm/runtime/os.hpp	Tue Jan 08 11:39:53 2013 -0800
   140.3 @@ -255,6 +255,7 @@
   140.4    static int    vm_allocation_granularity();
   140.5    static char*  reserve_memory(size_t bytes, char* addr = 0,
   140.6                                 size_t alignment_hint = 0);
   140.7 +  static char*  reserve_memory_aligned(size_t size, size_t alignment);
   140.8    static char*  attempt_reserve_memory_at(size_t bytes, char* addr);
   140.9    static void   split_reserved_memory(char *base, size_t size,
  140.10                                        size_t split, bool realloc);
   141.1 --- a/src/share/vm/runtime/reflection.cpp	Tue Jan 08 14:04:25 2013 -0500
   141.2 +++ b/src/share/vm/runtime/reflection.cpp	Tue Jan 08 11:39:53 2013 -0800
   141.3 @@ -771,6 +771,10 @@
   141.4      typeArrayOop an_oop = Annotations::make_java_array(method->annotation_default(), CHECK_NULL);
   141.5      java_lang_reflect_Method::set_annotation_default(mh(), an_oop);
   141.6    }
   141.7 +  if (java_lang_reflect_Method::has_type_annotations_field()) {
   141.8 +    typeArrayOop an_oop = Annotations::make_java_array(method->type_annotations(), CHECK_NULL);
   141.9 +    java_lang_reflect_Method::set_type_annotations(mh(), an_oop);
  141.10 +  }
  141.11    return mh();
  141.12  }
  141.13  
  141.14 @@ -849,6 +853,10 @@
  141.15      typeArrayOop an_oop = Annotations::make_java_array(fd->annotations(), CHECK_NULL);
  141.16      java_lang_reflect_Field::set_annotations(rh(), an_oop);
  141.17    }
  141.18 +  if (java_lang_reflect_Field::has_type_annotations_field()) {
  141.19 +    typeArrayOop an_oop = Annotations::make_java_array(fd->type_annotations(), CHECK_NULL);
  141.20 +    java_lang_reflect_Field::set_type_annotations(rh(), an_oop);
  141.21 +  }
  141.22    return rh();
  141.23  }
  141.24  
   142.1 --- a/src/share/vm/runtime/thread.cpp	Tue Jan 08 14:04:25 2013 -0500
   142.2 +++ b/src/share/vm/runtime/thread.cpp	Tue Jan 08 11:39:53 2013 -0800
   142.3 @@ -2190,7 +2190,7 @@
   142.4            // BiasedLocking needs an updated RegisterMap for the revoke monitors pass
   142.5            RegisterMap reg_map(this, UseBiasedLocking);
   142.6            frame compiled_frame = f.sender(&reg_map);
   142.7 -          if (compiled_frame.can_be_deoptimized()) {
   142.8 +          if (!StressCompiledExceptionHandlers && compiled_frame.can_be_deoptimized()) {
   142.9              Deoptimization::deoptimize(this, compiled_frame, &reg_map);
  142.10            }
  142.11          }
  142.12 @@ -3527,11 +3527,12 @@
  142.13        java_lang_Thread::set_thread_status(thread_object,
  142.14                                            java_lang_Thread::RUNNABLE);
  142.15  
  142.16 -      // The VM preresolve methods to these classes. Make sure that get initialized
  142.17 +      // The VM creates & returns objects of this class. Make sure it's initialized.
  142.18 +      initialize_class(vmSymbols::java_lang_Class(), CHECK_0);
  142.19 +
  142.20 +      // The VM preresolves methods to these classes. Make sure that they get initialized
  142.21        initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK_0);
  142.22        initialize_class(vmSymbols::java_lang_ref_Finalizer(),  CHECK_0);
  142.23 -      // The VM creates & returns objects of this class. Make sure it's initialized.
  142.24 -      initialize_class(vmSymbols::java_lang_Class(), CHECK_0);
  142.25        call_initializeSystemClass(CHECK_0);
  142.26  
  142.27        // get the Java runtime name after java.lang.System is initialized
   143.1 --- a/src/share/vm/runtime/virtualspace.cpp	Tue Jan 08 14:04:25 2013 -0500
   143.2 +++ b/src/share/vm/runtime/virtualspace.cpp	Tue Jan 08 11:39:53 2013 -0800
   143.3 @@ -329,20 +329,9 @@
   143.4      if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
   143.5        // Base not aligned, retry
   143.6        if (!os::release_memory(base, size)) fatal("os::release_memory failed");
   143.7 -      // Reserve size large enough to do manual alignment and
   143.8 -      // increase size to a multiple of the desired alignment
   143.9 +      // Make sure that size is aligned
  143.10        size = align_size_up(size, alignment);
  143.11 -      size_t extra_size = size + alignment;
  143.12 -      do {
  143.13 -        char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
  143.14 -        if (extra_base == NULL) return;
  143.15 -        // Do manual alignement
  143.16 -        base = (char*) align_size_up((uintptr_t) extra_base, alignment);
  143.17 -        assert(base >= extra_base, "just checking");
  143.18 -        // Re-reserve the region at the aligned base address.
  143.19 -        os::release_memory(extra_base, extra_size);
  143.20 -        base = os::reserve_memory(size, base);
  143.21 -      } while (base == NULL);
  143.22 +      base = os::reserve_memory_aligned(size, alignment);
  143.23  
  143.24        if (requested_address != 0 &&
  143.25            failed_to_reserve_as_requested(base, requested_address, size, false)) {
   144.1 --- a/src/share/vm/runtime/vmStructs.cpp	Tue Jan 08 14:04:25 2013 -0500
   144.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Tue Jan 08 11:39:53 2013 -0800
   144.3 @@ -355,8 +355,6 @@
   144.4    nonstatic_field(Method,               _access_flags,                                 AccessFlags)                           \
   144.5    nonstatic_field(Method,               _vtable_index,                                 int)                                   \
   144.6    nonstatic_field(Method,               _method_size,                                  u2)                                    \
   144.7 -  nonstatic_field(Method,               _max_locals,                                   u2)                                    \
   144.8 -  nonstatic_field(Method,               _size_of_parameters,                           u2)                                    \
   144.9    nonstatic_field(Method,               _interpreter_throwout_count,                   u2)                                    \
  144.10    nonstatic_field(Method,               _number_of_breakpoints,                        u2)                                    \
  144.11    nonstatic_field(Method,               _invocation_counter,                           InvocationCounter)                     \
  144.12 @@ -378,6 +376,8 @@
  144.13    nonstatic_field(ConstMethod,          _signature_index,                              u2)                                    \
  144.14    nonstatic_field(ConstMethod,          _method_idnum,                                 u2)                                    \
  144.15    nonstatic_field(ConstMethod,          _max_stack,                                    u2)                                    \
  144.16 +  nonstatic_field(ConstMethod,          _max_locals,                                   u2)                                    \
  144.17 +  nonstatic_field(ConstMethod,          _size_of_parameters,                           u2)                                    \
  144.18    nonstatic_field(ObjArrayKlass,               _element_klass,                                Klass*)                                \
  144.19    nonstatic_field(ObjArrayKlass,               _bottom_klass,                                 Klass*)                                \
  144.20    volatile_nonstatic_field(Symbol,             _refcount,                                     int)                                   \
   145.1 --- a/src/share/vm/services/memBaseline.hpp	Tue Jan 08 14:04:25 2013 -0500
   145.2 +++ b/src/share/vm/services/memBaseline.hpp	Tue Jan 08 11:39:53 2013 -0800
   145.3 @@ -334,7 +334,7 @@
   145.4    // create a memory baseline
   145.5    MemBaseline();
   145.6  
   145.7 -  virtual ~MemBaseline();
   145.8 +  ~MemBaseline();
   145.9  
  145.10    inline bool baselined() const {
  145.11      return _baselined;
   146.1 --- a/src/share/vm/services/nmtDCmd.cpp	Tue Jan 08 14:04:25 2013 -0500
   146.2 +++ b/src/share/vm/services/nmtDCmd.cpp	Tue Jan 08 11:39:53 2013 -0800
   146.3 @@ -84,28 +84,31 @@
   146.4    }
   146.5  
   146.6    int nopt = 0;
   146.7 -  if(_summary.is_set()) { ++nopt; }
   146.8 -  if(_detail.is_set()) { ++nopt; }
   146.9 -  if(_baseline.is_set()) { ++nopt; }
  146.10 -  if(_summary_diff.is_set()) { ++nopt; }
  146.11 -  if(_detail_diff.is_set()) { ++nopt; }
  146.12 -  if(_shutdown.is_set()) { ++nopt; }
  146.13 +  if(_summary.is_set() && _summary.value()) { ++nopt; }
  146.14 +  if(_detail.is_set() && _detail.value()) { ++nopt; }
  146.15 +  if(_baseline.is_set() && _baseline.value()) { ++nopt; }
  146.16 +  if(_summary_diff.is_set() && _summary_diff.value()) { ++nopt; }
  146.17 +  if(_detail_diff.is_set() && _detail_diff.value()) { ++nopt; }
  146.18 +  if(_shutdown.is_set() && _shutdown.value()) { ++nopt; }
  146.19  #ifndef PRODUCT
  146.20 -  if(_debug.is_set()) { ++nopt; }
  146.21 +  if(_debug.is_set() && _debug.value()) { ++nopt; }
  146.22  #endif
  146.23  
  146.24    if(nopt > 1) {
  146.25        output()->print_cr("At most one of the following option can be specified: " \
  146.26          "summary, detail, baseline, summary.diff, detail.diff, shutdown"
  146.27  #ifndef PRODUCT
  146.28 -        " ,debug"
  146.29 +        ", debug"
  146.30  #endif
  146.31        );
  146.32        return;
  146.33 -  }
  146.34 -
  146.35 -  if(nopt == 0) {
  146.36 +  } else if (nopt == 0) {
  146.37 +    if (_summary.is_set()) {
  146.38 +      output()->print_cr("No command to execute");
  146.39 +      return;
  146.40 +    } else {
  146.41        _summary.set_value(true);
  146.42 +    }
  146.43    }
  146.44  
  146.45  #ifndef PRODUCT
   147.1 --- a/src/share/vm/utilities/workgroup.hpp	Tue Jan 08 14:04:25 2013 -0500
   147.2 +++ b/src/share/vm/utilities/workgroup.hpp	Tue Jan 08 11:39:53 2013 -0800
   147.3 @@ -90,7 +90,7 @@
   147.4      NOT_PRODUCT(_name = name);
   147.5      _counter = 0;
   147.6    }
   147.7 -  virtual ~AbstractGangTask() { }
   147.8 +  ~AbstractGangTask() { }
   147.9  
  147.10  public:
  147.11  };
   148.1 --- a/src/share/vm/utilities/yieldingWorkgroup.hpp	Tue Jan 08 14:04:25 2013 -0500
   148.2 +++ b/src/share/vm/utilities/yieldingWorkgroup.hpp	Tue Jan 08 11:39:53 2013 -0800
   148.3 @@ -106,7 +106,7 @@
   148.4      _status(INACTIVE),
   148.5      _gang(NULL) { }
   148.6  
   148.7 -  virtual ~YieldingFlexibleGangTask() { }
   148.8 +  ~YieldingFlexibleGangTask() { }
   148.9  
  148.10    friend class YieldingFlexibleWorkGang;
  148.11    friend class YieldingFlexibleGangWorker;
   149.1 --- a/test/compiler/7184394/TestAESBase.java	Tue Jan 08 14:04:25 2013 -0500
   149.2 +++ b/test/compiler/7184394/TestAESBase.java	Tue Jan 08 11:39:53 2013 -0800
   149.3 @@ -54,7 +54,6 @@
   149.4    String paddingStr = "PKCS5Padding";
   149.5    AlgorithmParameters algParams;
   149.6    SecretKey key;
   149.7 -  int ivLen;
   149.8  
   149.9    static int numThreads = 0;
  149.10    int  threadId;
  149.11 @@ -68,7 +67,7 @@
  149.12  
  149.13    public void prepare() {
  149.14      try {
  149.15 -    System.out.println("\nmsgSize=" + msgSize + ", key size=" + keySize + ", reInit=" + !noReinit + ", checkOutput=" + checkOutput);
  149.16 +    System.out.println("\nalgorithm=" + algorithm + ", mode=" + mode + ", msgSize=" + msgSize + ", keySize=" + keySize + ", noReinit=" + noReinit + ", checkOutput=" + checkOutput);
  149.17  
  149.18        int keyLenBytes = (keySize == 0 ? 16 : keySize/8);
  149.19        byte keyBytes[] = new byte[keyLenBytes];
  149.20 @@ -90,10 +89,14 @@
  149.21        cipher = Cipher.getInstance(algorithm + "/" + mode + "/" + paddingStr, "SunJCE");
  149.22        dCipher = Cipher.getInstance(algorithm + "/" + mode + "/" + paddingStr, "SunJCE");
  149.23  
  149.24 -      ivLen = (algorithm.equals("AES") ? 16 : algorithm.equals("DES") ? 8 : 0);
  149.25 -      IvParameterSpec initVector = new IvParameterSpec(new byte[ivLen]);
  149.26 -
  149.27 -      cipher.init(Cipher.ENCRYPT_MODE, key, initVector);
  149.28 +      if (mode.equals("CBC")) {
  149.29 +        int ivLen = (algorithm.equals("AES") ? 16 : algorithm.equals("DES") ? 8 : 0);
  149.30 +        IvParameterSpec initVector = new IvParameterSpec(new byte[ivLen]);
  149.31 +        cipher.init(Cipher.ENCRYPT_MODE, key, initVector);
  149.32 +      } else {
  149.33 +        algParams = cipher.getParameters();
  149.34 +        cipher.init(Cipher.ENCRYPT_MODE, key, algParams);
  149.35 +      }
  149.36        algParams = cipher.getParameters();
  149.37        dCipher.init(Cipher.DECRYPT_MODE, key, algParams);
  149.38        if (threadId == 0) {
   150.1 --- a/test/compiler/7184394/TestAESMain.java	Tue Jan 08 14:04:25 2013 -0500
   150.2 +++ b/test/compiler/7184394/TestAESMain.java	Tue Jan 08 11:39:53 2013 -0800
   150.3 @@ -27,7 +27,8 @@
   150.4   * @bug 7184394
   150.5   * @summary add intrinsics to use AES instructions
   150.6   *
   150.7 - * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true TestAESMain
   150.8 + * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=CBC TestAESMain
   150.9 + * @run main/othervm/timeout=600 -Xbatch -DcheckOutput=true -Dmode=ECB TestAESMain
  150.10   *
  150.11   * @author Tom Deneau
  150.12   */
   151.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   151.2 +++ b/test/compiler/8004741/Test8004741.java	Tue Jan 08 11:39:53 2013 -0800
   151.3 @@ -0,0 +1,94 @@
   151.4 +/*
   151.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   151.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   151.7 + *
   151.8 + * This code is free software; you can redistribute it and/or modify it
   151.9 + * under the terms of the GNU General Public License version 2 only, as
  151.10 + * published by the Free Software Foundation.
  151.11 + *
  151.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  151.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  151.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  151.15 + * version 2 for more details (a copy is included in the LICENSE file that
  151.16 + * accompanied this code).
  151.17 + *
  151.18 + * You should have received a copy of the GNU General Public License version
  151.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  151.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  151.21 + *
  151.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  151.23 + * or visit www.oracle.com if you need additional information or have any
  151.24 + * questions.
  151.25 + */
  151.26 +
  151.27 +/*
  151.28 + * @test Test8004741.java
  151.29 + * @bug 8004741
  151.30 + * @summary Missing compiled exception handle table entry for multidimensional array allocation
  151.31 + * @run main/othervm -Xmx64m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -XX:+StressCompiledExceptionHandlers Test8004741
  151.32 + *
  151.33 + */
  151.34 +
  151.35 +import java.util.*;
  151.36 +
  151.37 +public class Test8004741 extends Thread {
  151.38 +
  151.39 +  static int[][] test(int a, int b) throws Exception {
  151.40 +    int[][] ar = null;
  151.41 +    try {
  151.42 +      ar = new int[a][b];
  151.43 +    } catch (Error e) {
  151.44 +      System.out.println("test got Error");
  151.45 +      passed = true;
  151.46 +      throw(e);
  151.47 +    } catch (Exception e) {
  151.48 +      System.out.println("test got Exception");
  151.49 +      throw(e);
  151.50 +    }
  151.51 +    return ar;
  151.52 +  }
  151.53 +
  151.54 +  static boolean passed = false;
  151.55 +
  151.56 +  public void run() {
  151.57 +      System.out.println("test started");
  151.58 +      try {
  151.59 +        while(true) {
  151.60 +          test(2,20000);
  151.61 +        }
  151.62 +      } catch (ThreadDeath e) {
  151.63 +        System.out.println("test got ThreadDeath");
  151.64 +        passed = true;
  151.65 +      } catch (Error e) {
  151.66 +        e.printStackTrace();
  151.67 +        System.out.println("test got Error");
  151.68 +      } catch (Exception e) {
  151.69 +        e.printStackTrace();
  151.70 +        System.out.println("test got Exception");
  151.71 +      }
  151.72 +  }
  151.73 +
  151.74 +  public static void main(String[] args) throws Exception {
  151.75 +    for (int n = 0; n < 11000; n++) {
  151.76 +      test(2, 20);
  151.77 +    }
  151.78 +
  151.79 +    // First test exception catch
  151.80 +    Test8004741 t = new Test8004741();
  151.81 +
  151.82 +    passed = false;
  151.83 +    t.start();
  151.84 +    Thread.sleep(1000);
  151.85 +    t.stop();
  151.86 +
  151.87 +    Thread.sleep(5000);
  151.88 +    t.join();
  151.89 +    if (passed) {
  151.90 +      System.out.println("PASSED");
  151.91 +    } else {
  151.92 +      System.out.println("FAILED");
  151.93 +      System.exit(97);
  151.94 +    }
  151.95 +  }
  151.96 +
  151.97 +};
   152.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   152.2 +++ b/test/compiler/8005033/Test8005033.java	Tue Jan 08 11:39:53 2013 -0800
   152.3 @@ -0,0 +1,50 @@
   152.4 +/*
   152.5 + * Copyright 2012 SAP AG.  All Rights Reserved.
   152.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   152.7 + *
   152.8 + * This code is free software; you can redistribute it and/or modify it
   152.9 + * under the terms of the GNU General Public License version 2 only, as
  152.10 + * published by the Free Software Foundation.
  152.11 + *
  152.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  152.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  152.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  152.15 + * version 2 for more details (a copy is included in the LICENSE file that
  152.16 + * accompanied this code).
  152.17 + *
  152.18 + * You should have received a copy of the GNU General Public License version
  152.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  152.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  152.21 + *
  152.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  152.23 + * or visit www.oracle.com if you need additional information or have any
  152.24 + * questions.
  152.25 + */
  152.26 +
  152.27 +/**
  152.28 + * @test
  152.29 + * @bug 8005033
  152.30 + * @summary On sparcv9, C2's intrinsic for Integer.bitCount(OV) returns wrong result if OV is the result of an operation with int overflow.
  152.31 + * @run main/othervm -Xcomp -XX:CompileOnly=Test8005033::testBitCount Test8005033
  152.32 + * @author Richard Reingruber richard DOT reingruber AT sap DOT com
  152.33 + */
  152.34 +
  152.35 +public class Test8005033 {
  152.36 +    public static int MINUS_ONE = -1;
  152.37 +
  152.38 +    public static void main(String[] args) {
  152.39 +        System.out.println("EXECUTING test.");
  152.40 +        Integer.bitCount(1);   // load class
  152.41 +        int expectedBitCount = 0;
  152.42 +        int calculatedBitCount = testBitCount();
  152.43 +        if (expectedBitCount != calculatedBitCount) {
  152.44 +            throw new InternalError("got " + calculatedBitCount + " but expected " + expectedBitCount);
  152.45 +        }
  152.46 +        System.out.println("SUCCESSFULLY passed test.");
  152.47 +    }
  152.48 +
  152.49 +    // testBitCount will be compiled using the Integer.bitCount() intrinsic if possible
  152.50 +    private static int testBitCount() {
  152.51 +        return Integer.bitCount(MINUS_ONE+1);   // -1 + 1 => int overflow
  152.52 +    }
  152.53 +}
   153.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   153.2 +++ b/test/sanity/ExecuteInternalVMTests.java	Tue Jan 08 11:39:53 2013 -0800
   153.3 @@ -0,0 +1,40 @@
   153.4 +/*
   153.5 + * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
   153.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   153.7 + *
   153.8 + * This code is free software; you can redistribute it and/or modify it
   153.9 + * under the terms of the GNU General Public License version 2 only, as
  153.10 + * published by the Free Software Foundation.
  153.11 + *
  153.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  153.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  153.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  153.15 + * version 2 for more details (a copy is included in the LICENSE file that
  153.16 + * accompanied this code).
  153.17 + *
  153.18 + * You should have received a copy of the GNU General Public License version
  153.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  153.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  153.21 + *
  153.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  153.23 + * or visit www.oracle.com if you need additional information or have any
  153.24 + * questions.
  153.25 + *
  153.26 + */
  153.27 +
  153.28 +/* @test ExecuteInternalVMTests
  153.29 + * @bug 8004691
  153.30 + * @summary Add a jtreg test that exercises the ExecuteInternalVMTests flag
  153.31 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+ExecuteInternalVMTests ExecuteInternalVMTests
  153.32 + */
  153.33 +public class ExecuteInternalVMTests {
  153.34 +    public static void main(String[] args) throws Exception {
  153.35 +        // The tests that are run are the HotSpot internal tests which are
  153.36 +        // executed only when the flag -XX:+ExecuteInternalVMTests is used.
  153.37 +
  153.38 +        // The flag -XX:+ExecuteInternalVMTests can only be used for
  153.39 +        // non-product builds of HotSpot. Therefore, the flag
  153.40 +        // -XX:+IgnoreUnrecognizedVMOptions is also used, which means that this
  153.41 +        // test will do nothing on a product build.
  153.42 +    }
  153.43 +}

mercurial