Fri, 02 Jul 2010 01:36:15 -0700
Merge
1.1 --- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java Wed Jun 30 18:57:35 2010 -0700 1.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeDisassembler.java Fri Jul 02 01:36:15 2010 -0700 1.3 @@ -72,6 +72,7 @@ 1.4 addBytecodeClass(Bytecodes._invokestatic, BytecodeInvoke.class); 1.5 addBytecodeClass(Bytecodes._invokespecial, BytecodeInvoke.class); 1.6 addBytecodeClass(Bytecodes._invokeinterface, BytecodeInvoke.class); 1.7 + addBytecodeClass(Bytecodes._invokedynamic, BytecodeInvoke.class); 1.8 addBytecodeClass(Bytecodes._jsr, BytecodeJsr.class); 1.9 addBytecodeClass(Bytecodes._jsr_w, BytecodeJsrW.class); 1.10 addBytecodeClass(Bytecodes._iload, BytecodeLoad.class);
2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java Wed Jun 30 18:57:35 2010 -0700 2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeInvoke.java Fri Jul 02 01:36:15 2010 -0700 2.3 @@ -54,15 +54,31 @@ 2.4 // returns the name of the invoked method 2.5 public Symbol name() { 2.6 ConstantPool cp = method().getConstants(); 2.7 + if (isInvokedynamic()) { 2.8 + int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod()); 2.9 + return cp.getSymbolAt(nt[0]); 2.10 + } 2.11 return cp.getNameRefAt(index()); 2.12 } 2.13 2.14 // returns the signature of the invoked method 2.15 public Symbol signature() { 2.16 ConstantPool cp = method().getConstants(); 2.17 + if (isInvokedynamic()) { 2.18 + int[] nt = cp.getNameAndTypeAt(indexForFieldOrMethod()); 2.19 + return cp.getSymbolAt(nt[1]); 2.20 + } 2.21 return cp.getSignatureRefAt(index()); 2.22 } 2.23 2.24 + public int getSecondaryIndex() { 2.25 + if (isInvokedynamic()) { 2.26 + // change byte-ordering of 4-byte integer 2.27 + return VM.getVM().getBytes().swapInt(javaSignedWordAt(1)); 2.28 + } 2.29 + return super.getSecondaryIndex(); // throw an error 2.30 + } 2.31 + 2.32 public Method getInvokedMethod() { 2.33 return method().getConstants().getMethodRefAt(index()); 2.34 } 2.35 @@ -87,6 +103,7 @@ 2.36 public boolean isInvokevirtual() { return adjustedInvokeCode() == Bytecodes._invokevirtual; } 2.37 public boolean isInvokestatic() { return adjustedInvokeCode() == Bytecodes._invokestatic; } 2.38 public boolean isInvokespecial() { return adjustedInvokeCode() == Bytecodes._invokespecial; } 2.39 + public boolean isInvokedynamic() { return adjustedInvokeCode() == Bytecodes._invokedynamic; } 2.40 2.41 public boolean isValid() { return isInvokeinterface() || 2.42 isInvokevirtual() || 2.43 @@ -104,6 +121,11 @@ 2.44 buf.append(spaces); 2.45 buf.append('#'); 2.46 buf.append(Integer.toString(indexForFieldOrMethod())); 2.47 + if (isInvokedynamic()) { 2.48 + buf.append('('); 2.49 + buf.append(Integer.toString(getSecondaryIndex())); 2.50 + buf.append(')'); 2.51 + } 2.52 buf.append(" [Method "); 2.53 StringBuffer sigBuf = new StringBuffer(); 2.54 new SignatureConverter(signature(), sigBuf).iterateReturntype();
3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java Wed Jun 30 18:57:35 2010 -0700 3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeLoadConstant.java Fri Jul 02 01:36:15 2010 -0700 3.3 @@ -25,6 +25,7 @@ 3.4 package sun.jvm.hotspot.interpreter; 3.5 3.6 import sun.jvm.hotspot.oops.*; 3.7 +import sun.jvm.hotspot.runtime.*; 3.8 import sun.jvm.hotspot.utilities.*; 3.9 3.10 public class BytecodeLoadConstant extends BytecodeWithCPIndex { 3.11 @@ -32,10 +33,47 @@ 3.12 super(method, bci); 3.13 } 3.14 3.15 + public boolean hasCacheIndex() { 3.16 + // normal ldc uses CP index, but fast_aldc uses swapped CP cache index 3.17 + return javaCode() != code(); 3.18 + } 3.19 + 3.20 public int index() { 3.21 - return javaCode() == Bytecodes._ldc ? 3.22 + int i = javaCode() == Bytecodes._ldc ? 3.23 (int) (0xFF & javaByteAt(1)) 3.24 : (int) (0xFFFF & javaShortAt(1)); 3.25 + if (hasCacheIndex()) { 3.26 + return (0xFFFF & VM.getVM().getBytes().swapShort((short) i)); 3.27 + } else { 3.28 + return i; 3.29 + } 3.30 + } 3.31 + 3.32 + public int poolIndex() { 3.33 + int i = index(); 3.34 + if (hasCacheIndex()) { 3.35 + ConstantPoolCache cpCache = method().getConstants().getCache(); 3.36 + return cpCache.getEntryAt(i).getConstantPoolIndex(); 3.37 + } else { 3.38 + return i; 3.39 + } 3.40 + } 3.41 + 3.42 + public int cacheIndex() { 3.43 + if (hasCacheIndex()) { 3.44 + return index(); 3.45 + } else { 3.46 + return -1; // no cache index 3.47 + } 3.48 + } 3.49 + 3.50 + private Oop getCachedConstant() { 3.51 + int i = cacheIndex(); 3.52 + if (i >= 0) { 3.53 + ConstantPoolCache cpCache = method().getConstants().getCache(); 3.54 + return cpCache.getEntryAt(i).getF1(); 3.55 + } 3.56 + return null; 3.57 } 3.58 3.59 public void verify() { 3.60 @@ -58,6 +96,7 @@ 3.61 // has to be int or float or String or Klass 3.62 return (ctag.isUnresolvedString() || ctag.isString() 3.63 || ctag.isUnresolvedKlass() || ctag.isKlass() 3.64 + || ctag.isMethodHandle() || ctag.isMethodType() 3.65 || ctag.isInt() || ctag.isFloat())? true: false; 3.66 } 3.67 } 3.68 @@ -112,7 +151,7 @@ 3.69 3.70 public String getConstantValue() { 3.71 ConstantPool cpool = method().getConstants(); 3.72 - int cpIndex = index(); 3.73 + int cpIndex = poolIndex(); 3.74 ConstantTag ctag = cpool.getTagAt(cpIndex); 3.75 if (ctag.isInt()) { 3.76 return "<int " + Integer.toString(cpool.getIntAt(cpIndex)) +">"; 3.77 @@ -149,6 +188,18 @@ 3.78 } else { 3.79 throw new RuntimeException("should not reach here"); 3.80 } 3.81 + } else if (ctag.isMethodHandle() || ctag.isMethodType()) { 3.82 + Oop x = getCachedConstant(); 3.83 + int refidx = cpool.getMethodHandleIndexAt(cpIndex); 3.84 + int refkind = cpool.getMethodHandleRefKindAt(cpIndex); 3.85 + return "<MethodHandle kind=" + Integer.toString(refkind) + 3.86 + " ref=" + Integer.toString(refidx) 3.87 + + (x == null ? "" : " @" + x.getHandle()) + ">"; 3.88 + } else if (ctag.isMethodType()) { 3.89 + Oop x = getCachedConstant(); 3.90 + int refidx = cpool.getMethodTypeIndexAt(cpIndex); 3.91 + return "<MethodType " + cpool.getSymbolAt(refidx).asString() 3.92 + + (x == null ? "" : " @" + x.getHandle()) + ">"; 3.93 } else { 3.94 if (Assert.ASSERTS_ENABLED) { 3.95 Assert.that(false, "invalid load constant type"); 3.96 @@ -162,7 +213,12 @@ 3.97 buf.append(getJavaBytecodeName()); 3.98 buf.append(spaces); 3.99 buf.append('#'); 3.100 - buf.append(Integer.toString(index())); 3.101 + buf.append(Integer.toString(poolIndex())); 3.102 + if (hasCacheIndex()) { 3.103 + buf.append('('); 3.104 + buf.append(Integer.toString(cacheIndex())); 3.105 + buf.append(')'); 3.106 + } 3.107 buf.append(spaces); 3.108 buf.append(getConstantValue()); 3.109 if (code() != javaCode()) {
4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java Wed Jun 30 18:57:35 2010 -0700 4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/BytecodeWithCPIndex.java Fri Jul 02 01:36:15 2010 -0700 4.3 @@ -37,12 +37,19 @@ 4.4 // the constant pool index for this bytecode 4.5 public int index() { return 0xFFFF & javaShortAt(1); } 4.6 4.7 + public int getSecondaryIndex() { 4.8 + throw new IllegalArgumentException("must be invokedynamic"); 4.9 + } 4.10 + 4.11 protected int indexForFieldOrMethod() { 4.12 ConstantPoolCache cpCache = method().getConstants().getCache(); 4.13 // get ConstantPool index from ConstantPoolCacheIndex at given bci 4.14 int cpCacheIndex = index(); 4.15 if (cpCache == null) { 4.16 return cpCacheIndex; 4.17 + } else if (code() == Bytecodes._invokedynamic) { 4.18 + int secondaryIndex = getSecondaryIndex(); 4.19 + return cpCache.getMainEntryAt(secondaryIndex).getConstantPoolIndex(); 4.20 } else { 4.21 // change byte-ordering and go via cache 4.22 return cpCache.getEntryAt((int) (0xFFFF & VM.getVM().getBytes().swapShort((short) cpCacheIndex))).getConstantPoolIndex();
5.1 --- a/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java Wed Jun 30 18:57:35 2010 -0700 5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/interpreter/Bytecodes.java Fri Jul 02 01:36:15 2010 -0700 5.3 @@ -222,7 +222,7 @@ 5.4 public static final int _invokespecial = 183; // 0xb7 5.5 public static final int _invokestatic = 184; // 0xb8 5.6 public static final int _invokeinterface = 185; // 0xb9 5.7 - public static final int _xxxunusedxxx = 186; // 0xba 5.8 + public static final int _invokedynamic = 186; // 0xba 5.9 public static final int _new = 187; // 0xbb 5.10 public static final int _newarray = 188; // 0xbc 5.11 public static final int _anewarray = 189; // 0xbd 5.12 @@ -269,9 +269,12 @@ 5.13 public static final int _fast_invokevfinal = 226; 5.14 public static final int _fast_linearswitch = 227; 5.15 public static final int _fast_binaryswitch = 228; 5.16 - public static final int _shouldnotreachhere = 229; // For debugging 5.17 + public static final int _fast_aldc = 229; 5.18 + public static final int _fast_aldc_w = 230; 5.19 + public static final int _return_register_finalizer = 231; 5.20 + public static final int _shouldnotreachhere = 232; // For debugging 5.21 5.22 - public static final int number_of_codes = 230; 5.23 + public static final int number_of_codes = 233; 5.24 5.25 public static int specialLengthAt(Method method, int bci) { 5.26 int code = codeAt(method, bci); 5.27 @@ -458,9 +461,9 @@ 5.28 def(_dconst_1 , "dconst_1" , "b" , null , BasicType.getTDouble() , 2, false); 5.29 def(_bipush , "bipush" , "bc" , null , BasicType.getTInt() , 1, false); 5.30 def(_sipush , "sipush" , "bcc" , null , BasicType.getTInt() , 1, false); 5.31 - def(_ldc , "ldc" , "bi" , null , BasicType.getTIllegal(), 1, true ); 5.32 - def(_ldc_w , "ldc_w" , "bii" , null , BasicType.getTIllegal(), 1, true ); 5.33 - def(_ldc2_w , "ldc2_w" , "bii" , null , BasicType.getTIllegal(), 2, true ); 5.34 + def(_ldc , "ldc" , "bk" , null , BasicType.getTIllegal(), 1, true ); 5.35 + def(_ldc_w , "ldc_w" , "bkk" , null , BasicType.getTIllegal(), 1, true ); 5.36 + def(_ldc2_w , "ldc2_w" , "bkk" , null , BasicType.getTIllegal(), 2, true ); 5.37 def(_iload , "iload" , "bi" , "wbii" , BasicType.getTInt() , 1, false); 5.38 def(_lload , "lload" , "bi" , "wbii" , BasicType.getTLong() , 2, false); 5.39 def(_fload , "fload" , "bi" , "wbii" , BasicType.getTFloat() , 1, false); 5.40 @@ -618,26 +621,26 @@ 5.41 def(_dreturn , "dreturn" , "b" , null , BasicType.getTDouble() , -2, true ); 5.42 def(_areturn , "areturn" , "b" , null , BasicType.getTObject() , -1, true ); 5.43 def(_return , "return" , "b" , null , BasicType.getTVoid() , 0, true ); 5.44 - def(_getstatic , "getstatic" , "bjj" , null , BasicType.getTIllegal(), 1, true ); 5.45 - def(_putstatic , "putstatic" , "bjj" , null , BasicType.getTIllegal(), -1, true ); 5.46 - def(_getfield , "getfield" , "bjj" , null , BasicType.getTIllegal(), 0, true ); 5.47 - def(_putfield , "putfield" , "bjj" , null , BasicType.getTIllegal(), -2, true ); 5.48 - def(_invokevirtual , "invokevirtual" , "bjj" , null , BasicType.getTIllegal(), -1, true ); 5.49 - def(_invokespecial , "invokespecial" , "bjj" , null , BasicType.getTIllegal(), -1, true ); 5.50 - def(_invokestatic , "invokestatic" , "bjj" , null , BasicType.getTIllegal(), 0, true ); 5.51 - def(_invokeinterface , "invokeinterface" , "bjj__", null , BasicType.getTIllegal(), -1, true ); 5.52 - def(_xxxunusedxxx , "xxxunusedxxx" , null , null , BasicType.getTVoid() , 0, false); 5.53 - def(_new , "new" , "bii" , null , BasicType.getTObject() , 1, true ); 5.54 + def(_getstatic , "getstatic" , "bJJ" , null , BasicType.getTIllegal(), 1, true ); 5.55 + def(_putstatic , "putstatic" , "bJJ" , null , BasicType.getTIllegal(), -1, true ); 5.56 + def(_getfield , "getfield" , "bJJ" , null , BasicType.getTIllegal(), 0, true ); 5.57 + def(_putfield , "putfield" , "bJJ" , null , BasicType.getTIllegal(), -2, true ); 5.58 + def(_invokevirtual , "invokevirtual" , "bJJ" , null , BasicType.getTIllegal(), -1, true ); 5.59 + def(_invokespecial , "invokespecial" , "bJJ" , null , BasicType.getTIllegal(), -1, true ); 5.60 + def(_invokestatic , "invokestatic" , "bJJ" , null , BasicType.getTIllegal(), 0, true ); 5.61 + def(_invokeinterface , "invokeinterface" , "bJJ__", null , BasicType.getTIllegal(), -1, true ); 5.62 + def(_invokedynamic , "invokedynamic" , "bJJJJ", null , BasicType.getTIllegal(), -1, true ); 5.63 + def(_new , "new" , "bkk" , null , BasicType.getTObject() , 1, true ); 5.64 def(_newarray , "newarray" , "bc" , null , BasicType.getTObject() , 0, true ); 5.65 - def(_anewarray , "anewarray" , "bii" , null , BasicType.getTObject() , 0, true ); 5.66 + def(_anewarray , "anewarray" , "bkk" , null , BasicType.getTObject() , 0, true ); 5.67 def(_arraylength , "arraylength" , "b" , null , BasicType.getTVoid() , 0, true ); 5.68 def(_athrow , "athrow" , "b" , null , BasicType.getTVoid() , -1, true ); 5.69 - def(_checkcast , "checkcast" , "bii" , null , BasicType.getTObject() , 0, true ); 5.70 - def(_instanceof , "instanceof" , "bii" , null , BasicType.getTInt() , 0, true ); 5.71 + def(_checkcast , "checkcast" , "bkk" , null , BasicType.getTObject() , 0, true ); 5.72 + def(_instanceof , "instanceof" , "bkk" , null , BasicType.getTInt() , 0, true ); 5.73 def(_monitorenter , "monitorenter" , "b" , null , BasicType.getTVoid() , -1, true ); 5.74 def(_monitorexit , "monitorexit" , "b" , null , BasicType.getTVoid() , -1, true ); 5.75 def(_wide , "wide" , "" , null , BasicType.getTVoid() , 0, false); 5.76 - def(_multianewarray , "multianewarray" , "biic" , null , BasicType.getTObject() , 1, true ); 5.77 + def(_multianewarray , "multianewarray" , "bkkc" , null , BasicType.getTObject() , 1, true ); 5.78 def(_ifnull , "ifnull" , "boo" , null , BasicType.getTVoid() , -1, false); 5.79 def(_ifnonnull , "ifnonnull" , "boo" , null , BasicType.getTVoid() , -1, false); 5.80 def(_goto_w , "goto_w" , "boooo", null , BasicType.getTVoid() , 0, false); 5.81 @@ -646,38 +649,44 @@ 5.82 5.83 // JVM bytecodes 5.84 // bytecode bytecode name format wide f. result tp stk traps std code 5.85 - def(_fast_agetfield , "fast_agetfield" , "bjj" , null , BasicType.getTObject() , 0, true , _getfield ); 5.86 - def(_fast_bgetfield , "fast_bgetfield" , "bjj" , null , BasicType.getTInt() , 0, true , _getfield ); 5.87 - def(_fast_cgetfield , "fast_cgetfield" , "bjj" , null , BasicType.getTChar() , 0, true , _getfield ); 5.88 - def(_fast_dgetfield , "fast_dgetfield" , "bjj" , null , BasicType.getTDouble() , 0, true , _getfield ); 5.89 - def(_fast_fgetfield , "fast_fgetfield" , "bjj" , null , BasicType.getTFloat() , 0, true , _getfield ); 5.90 - def(_fast_igetfield , "fast_igetfield" , "bjj" , null , BasicType.getTInt() , 0, true , _getfield ); 5.91 - def(_fast_lgetfield , "fast_lgetfield" , "bjj" , null , BasicType.getTLong() , 0, true , _getfield ); 5.92 - def(_fast_sgetfield , "fast_sgetfield" , "bjj" , null , BasicType.getTShort() , 0, true , _getfield ); 5.93 + def(_fast_agetfield , "fast_agetfield" , "bJJ" , null , BasicType.getTObject() , 0, true , _getfield ); 5.94 + def(_fast_bgetfield , "fast_bgetfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _getfield ); 5.95 + def(_fast_cgetfield , "fast_cgetfield" , "bJJ" , null , BasicType.getTChar() , 0, true , _getfield ); 5.96 + def(_fast_dgetfield , "fast_dgetfield" , "bJJ" , null , BasicType.getTDouble() , 0, true , _getfield ); 5.97 + def(_fast_fgetfield , "fast_fgetfield" , "bJJ" , null , BasicType.getTFloat() , 0, true , _getfield ); 5.98 + def(_fast_igetfield , "fast_igetfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _getfield ); 5.99 + def(_fast_lgetfield , "fast_lgetfield" , "bJJ" , null , BasicType.getTLong() , 0, true , _getfield ); 5.100 + def(_fast_sgetfield , "fast_sgetfield" , "bJJ" , null , BasicType.getTShort() , 0, true , _getfield ); 5.101 5.102 - def(_fast_aputfield , "fast_aputfield" , "bjj" , null , BasicType.getTObject() , 0, true , _putfield ); 5.103 - def(_fast_bputfield , "fast_bputfield" , "bjj" , null , BasicType.getTInt() , 0, true , _putfield ); 5.104 - def(_fast_cputfield , "fast_cputfield" , "bjj" , null , BasicType.getTChar() , 0, true , _putfield ); 5.105 - def(_fast_dputfield , "fast_dputfield" , "bjj" , null , BasicType.getTDouble() , 0, true , _putfield ); 5.106 - def(_fast_fputfield , "fast_fputfield" , "bjj" , null , BasicType.getTFloat() , 0, true , _putfield ); 5.107 - def(_fast_iputfield , "fast_iputfield" , "bjj" , null , BasicType.getTInt() , 0, true , _putfield ); 5.108 - def(_fast_lputfield , "fast_lputfield" , "bjj" , null , BasicType.getTLong() , 0, true , _putfield ); 5.109 - def(_fast_sputfield , "fast_sputfield" , "bjj" , null , BasicType.getTShort() , 0, true , _putfield ); 5.110 + def(_fast_aputfield , "fast_aputfield" , "bJJ" , null , BasicType.getTObject() , 0, true , _putfield ); 5.111 + def(_fast_bputfield , "fast_bputfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _putfield ); 5.112 + def(_fast_cputfield , "fast_cputfield" , "bJJ" , null , BasicType.getTChar() , 0, true , _putfield ); 5.113 + def(_fast_dputfield , "fast_dputfield" , "bJJ" , null , BasicType.getTDouble() , 0, true , _putfield ); 5.114 + def(_fast_fputfield , "fast_fputfield" , "bJJ" , null , BasicType.getTFloat() , 0, true , _putfield ); 5.115 + def(_fast_iputfield , "fast_iputfield" , "bJJ" , null , BasicType.getTInt() , 0, true , _putfield ); 5.116 + def(_fast_lputfield , "fast_lputfield" , "bJJ" , null , BasicType.getTLong() , 0, true , _putfield ); 5.117 + def(_fast_sputfield , "fast_sputfield" , "bJJ" , null , BasicType.getTShort() , 0, true , _putfield ); 5.118 5.119 def(_fast_aload_0 , "fast_aload_0" , "b" , null , BasicType.getTObject() , 1, true , _aload_0 ); 5.120 - def(_fast_iaccess_0 , "fast_iaccess_0" , "b_jj" , null , BasicType.getTInt() , 1, true , _aload_0 ); 5.121 - def(_fast_aaccess_0 , "fast_aaccess_0" , "b_jj" , null , BasicType.getTObject() , 1, true , _aload_0 ); 5.122 - def(_fast_faccess_0 , "fast_faccess_0" , "b_jj" , null , BasicType.getTObject() , 1, true , _aload_0 ); 5.123 + def(_fast_iaccess_0 , "fast_iaccess_0" , "b_JJ" , null , BasicType.getTInt() , 1, true , _aload_0 ); 5.124 + def(_fast_aaccess_0 , "fast_aaccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 ); 5.125 + def(_fast_faccess_0 , "fast_faccess_0" , "b_JJ" , null , BasicType.getTObject() , 1, true , _aload_0 ); 5.126 5.127 def(_fast_iload , "fast_iload" , "bi" , null , BasicType.getTInt() , 1, false, _iload); 5.128 def(_fast_iload2 , "fast_iload2" , "bi_i" , null , BasicType.getTInt() , 2, false, _iload); 5.129 def(_fast_icaload , "fast_icaload" , "bi_" , null , BasicType.getTInt() , 0, false, _iload); 5.130 5.131 // Faster method invocation. 5.132 - def(_fast_invokevfinal , "fast_invokevfinal" , "bjj" , null , BasicType.getTIllegal(), -1, true, _invokevirtual); 5.133 + def(_fast_invokevfinal , "fast_invokevfinal" , "bJJ" , null , BasicType.getTIllegal(), -1, true, _invokevirtual); 5.134 5.135 def(_fast_linearswitch , "fast_linearswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch ); 5.136 def(_fast_binaryswitch , "fast_binaryswitch" , "" , null , BasicType.getTVoid() , -1, false, _lookupswitch ); 5.137 + 5.138 + def(_return_register_finalizer, "return_register_finalizer", "b" , null , BasicType.getTVoid() , 0, true, _return ); 5.139 + 5.140 + def(_fast_aldc , "fast_aldc" , "bj" , null , BasicType.getTObject(), 1, true, _ldc ); 5.141 + def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , null , BasicType.getTObject(), 1, true, _ldc_w ); 5.142 + 5.143 def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , null , BasicType.getTVoid() , 0, false); 5.144 5.145 if (Assert.ASSERTS_ENABLED) {
6.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Wed Jun 30 18:57:35 2010 -0700 6.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Fri Jul 02 01:36:15 2010 -0700 6.3 @@ -152,7 +152,7 @@ 6.4 return res; 6.5 } 6.6 6.7 - public int getNameAndTypeAt(int which) { 6.8 + public int[] getNameAndTypeAt(int which) { 6.9 if (Assert.ASSERTS_ENABLED) { 6.10 Assert.that(getTagAt(which).isNameAndType(), "Corrupted constant pool"); 6.11 } 6.12 @@ -160,18 +160,16 @@ 6.13 if (DEBUG) { 6.14 System.err.println("ConstantPool.getNameAndTypeAt(" + which + "): result = " + i); 6.15 } 6.16 - return i; 6.17 + return new int[] { extractLowShortFromInt(i), extractHighShortFromInt(i) }; 6.18 } 6.19 6.20 public Symbol getNameRefAt(int which) { 6.21 - int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which)); 6.22 - int nameIndex = extractLowShortFromInt(refIndex); 6.23 + int nameIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[0]; 6.24 return getSymbolAt(nameIndex); 6.25 } 6.26 6.27 public Symbol getSignatureRefAt(int which) { 6.28 - int refIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which)); 6.29 - int sigIndex = extractHighShortFromInt(refIndex); 6.30 + int sigIndex = getNameAndTypeAt(getNameAndTypeRefIndexAt(which))[1]; 6.31 return getSymbolAt(sigIndex); 6.32 } 6.33 6.34 @@ -220,11 +218,11 @@ 6.35 6.36 /** Lookup for entries consisting of (name_index, signature_index) */ 6.37 public int getNameRefIndexAt(int index) { 6.38 - int refIndex = getNameAndTypeAt(index); 6.39 + int[] refIndex = getNameAndTypeAt(index); 6.40 if (DEBUG) { 6.41 - System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex); 6.42 + System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]); 6.43 } 6.44 - int i = extractLowShortFromInt(refIndex); 6.45 + int i = refIndex[0]; 6.46 if (DEBUG) { 6.47 System.err.println("ConstantPool.getNameRefIndexAt(" + index + "): result = " + i); 6.48 } 6.49 @@ -233,17 +231,53 @@ 6.50 6.51 /** Lookup for entries consisting of (name_index, signature_index) */ 6.52 public int getSignatureRefIndexAt(int index) { 6.53 - int refIndex = getNameAndTypeAt(index); 6.54 + int[] refIndex = getNameAndTypeAt(index); 6.55 if (DEBUG) { 6.56 - System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex); 6.57 + System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): refIndex = " + refIndex[0]+"/"+refIndex[1]); 6.58 } 6.59 - int i = extractHighShortFromInt(refIndex); 6.60 + int i = refIndex[1]; 6.61 if (DEBUG) { 6.62 System.err.println("ConstantPool.getSignatureRefIndexAt(" + index + "): result = " + i); 6.63 } 6.64 return i; 6.65 } 6.66 6.67 + /** Lookup for MethodHandle entries. */ 6.68 + public int getMethodHandleIndexAt(int i) { 6.69 + if (Assert.ASSERTS_ENABLED) { 6.70 + Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool"); 6.71 + } 6.72 + int res = extractHighShortFromInt(getIntAt(i)); 6.73 + if (DEBUG) { 6.74 + System.err.println("ConstantPool.getMethodHandleIndexAt(" + i + "): result = " + res); 6.75 + } 6.76 + return res; 6.77 + } 6.78 + 6.79 + /** Lookup for MethodHandle entries. */ 6.80 + public int getMethodHandleRefKindAt(int i) { 6.81 + if (Assert.ASSERTS_ENABLED) { 6.82 + Assert.that(getTagAt(i).isMethodHandle(), "Corrupted constant pool"); 6.83 + } 6.84 + int res = extractLowShortFromInt(getIntAt(i)); 6.85 + if (DEBUG) { 6.86 + System.err.println("ConstantPool.getMethodHandleRefKindAt(" + i + "): result = " + res); 6.87 + } 6.88 + return res; 6.89 + } 6.90 + 6.91 + /** Lookup for MethodType entries. */ 6.92 + public int getMethodTypeIndexAt(int i) { 6.93 + if (Assert.ASSERTS_ENABLED) { 6.94 + Assert.that(getTagAt(i).isMethodType(), "Corrupted constant pool"); 6.95 + } 6.96 + int res = getIntAt(i); 6.97 + if (DEBUG) { 6.98 + System.err.println("ConstantPool.getMethodHandleTypeAt(" + i + "): result = " + res); 6.99 + } 6.100 + return res; 6.101 + } 6.102 + 6.103 final private static String[] nameForTag = new String[] { 6.104 }; 6.105 6.106 @@ -261,6 +295,8 @@ 6.107 case JVM_CONSTANT_Methodref: return "JVM_CONSTANT_Methodref"; 6.108 case JVM_CONSTANT_InterfaceMethodref: return "JVM_CONSTANT_InterfaceMethodref"; 6.109 case JVM_CONSTANT_NameAndType: return "JVM_CONSTANT_NameAndType"; 6.110 + case JVM_CONSTANT_MethodHandle: return "JVM_CONSTANT_MethodHandle"; 6.111 + case JVM_CONSTANT_MethodType: return "JVM_CONSTANT_MethodType"; 6.112 case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid"; 6.113 case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass"; 6.114 case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError"; 6.115 @@ -317,6 +353,8 @@ 6.116 case JVM_CONSTANT_Methodref: 6.117 case JVM_CONSTANT_InterfaceMethodref: 6.118 case JVM_CONSTANT_NameAndType: 6.119 + case JVM_CONSTANT_MethodHandle: 6.120 + case JVM_CONSTANT_MethodType: 6.121 visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true); 6.122 break; 6.123 } 6.124 @@ -467,6 +505,18 @@ 6.125 + ", type = " + signatureIndex); 6.126 break; 6.127 } 6.128 + 6.129 + case JVM_CONSTANT_MethodHandle: { 6.130 + dos.writeByte(cpConstType); 6.131 + int value = getIntAt(ci); 6.132 + short nameIndex = (short) extractLowShortFromInt(value); 6.133 + short signatureIndex = (short) extractHighShortFromInt(value); 6.134 + dos.writeShort(nameIndex); 6.135 + dos.writeShort(signatureIndex); 6.136 + if (DEBUG) debugMessage("CP[" + ci + "] = N&T name = " + nameIndex 6.137 + + ", type = " + signatureIndex); 6.138 + break; 6.139 + } 6.140 default: 6.141 throw new InternalError("unknown tag: " + cpConstType); 6.142 } // switch 6.143 @@ -488,10 +538,12 @@ 6.144 // 6.145 6.146 private static int extractHighShortFromInt(int val) { 6.147 + // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc. 6.148 return (val >> 16) & 0xFFFF; 6.149 } 6.150 6.151 private static int extractLowShortFromInt(int val) { 6.152 + // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc. 6.153 return val & 0xFFFF; 6.154 } 6.155 }
7.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java Wed Jun 30 18:57:35 2010 -0700 7.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCache.java Fri Jul 02 01:36:15 2010 -0700 7.3 @@ -78,6 +78,31 @@ 7.4 return new ConstantPoolCacheEntry(this, i); 7.5 } 7.6 7.7 + public static boolean isSecondaryIndex(int i) { return (i < 0); } 7.8 + public static int decodeSecondaryIndex(int i) { return isSecondaryIndex(i) ? ~i : i; } 7.9 + public static int encodeSecondaryIndex(int i) { return !isSecondaryIndex(i) ? ~i : i; } 7.10 + 7.11 + // secondary entries hold invokedynamic call site bindings 7.12 + public ConstantPoolCacheEntry getSecondaryEntryAt(int i) { 7.13 + ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, decodeSecondaryIndex(i)); 7.14 + if (Assert.ASSERTS_ENABLED) { 7.15 + Assert.that(e.isSecondaryEntry(), "must be a secondary entry"); 7.16 + } 7.17 + return e; 7.18 + } 7.19 + 7.20 + public ConstantPoolCacheEntry getMainEntryAt(int i) { 7.21 + if (isSecondaryIndex(i)) { 7.22 + // run through an extra level of indirection: 7.23 + i = getSecondaryEntryAt(i).getMainEntryIndex(); 7.24 + } 7.25 + ConstantPoolCacheEntry e = new ConstantPoolCacheEntry(this, i); 7.26 + if (Assert.ASSERTS_ENABLED) { 7.27 + Assert.that(!e.isSecondaryEntry(), "must not be a secondary entry"); 7.28 + } 7.29 + return e; 7.30 + } 7.31 + 7.32 public int getIntAt(int entry, int fld) { 7.33 //alignObjectSize ? 7.34 long offset = baseOffset + /*alignObjectSize*/entry * elementSize + fld* getHeap().getIntSize();
8.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java Wed Jun 30 18:57:35 2010 -0700 8.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPoolCacheEntry.java Fri Jul 02 01:36:15 2010 -0700 8.3 @@ -28,6 +28,7 @@ 8.4 import sun.jvm.hotspot.debugger.*; 8.5 import sun.jvm.hotspot.runtime.*; 8.6 import sun.jvm.hotspot.types.*; 8.7 +import sun.jvm.hotspot.utilities.*; 8.8 8.9 public class ConstantPoolCacheEntry { 8.10 private static long size; 8.11 @@ -67,9 +68,23 @@ 8.12 } 8.13 8.14 public int getConstantPoolIndex() { 8.15 + if (Assert.ASSERTS_ENABLED) { 8.16 + Assert.that(!isSecondaryEntry(), "must not be a secondary CP entry"); 8.17 + } 8.18 return (int) (getIndices() & 0xFFFF); 8.19 } 8.20 8.21 + public boolean isSecondaryEntry() { 8.22 + return (getIndices() & 0xFFFF) == 0; 8.23 + } 8.24 + 8.25 + public int getMainEntryIndex() { 8.26 + if (Assert.ASSERTS_ENABLED) { 8.27 + Assert.that(isSecondaryEntry(), "must be a secondary CP entry"); 8.28 + } 8.29 + return (int) (getIndices() >>> 16); 8.30 + } 8.31 + 8.32 private long getIndices() { 8.33 return cp.getHandle().getCIntegerAt(indices.getOffset() + offset, indices.getSize(), indices.isUnsigned()); 8.34 }
9.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java Wed Jun 30 18:57:35 2010 -0700 9.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/GenerateOopMap.java Fri Jul 02 01:36:15 2010 -0700 9.3 @@ -566,6 +566,7 @@ 9.4 case Bytecodes._invokespecial: 9.5 case Bytecodes._invokestatic: 9.6 case Bytecodes._invokeinterface: 9.7 + case Bytecodes._invokedynamic: 9.8 // FIXME: print signature of referenced method (need more 9.9 // accessors in ConstantPool and ConstantPoolCache) 9.10 int idx = currentBC.getIndexBig(); 9.11 @@ -605,6 +606,7 @@ 9.12 case Bytecodes._invokespecial: 9.13 case Bytecodes._invokestatic: 9.14 case Bytecodes._invokeinterface: 9.15 + case Bytecodes._invokedynamic: 9.16 // FIXME: print signature of referenced method (need more 9.17 // accessors in ConstantPool and ConstantPoolCache) 9.18 int idx = currentBC.getIndexBig(); 9.19 @@ -1134,6 +1136,7 @@ 9.20 case Bytecodes._invokespecial: 9.21 case Bytecodes._invokestatic: 9.22 case Bytecodes._invokeinterface: 9.23 + case Bytecodes._invokedynamic: 9.24 _itr_send = itr; 9.25 _report_result_for_send = true; 9.26 break; 9.27 @@ -1379,6 +1382,7 @@ 9.28 case Bytecodes._invokevirtual: 9.29 case Bytecodes._invokespecial: doMethod(false, false, itr.getIndexBig(), itr.bci()); break; 9.30 case Bytecodes._invokestatic: doMethod(true, false, itr.getIndexBig(), itr.bci()); break; 9.31 + case Bytecodes._invokedynamic: doMethod(false, true, itr.getIndexBig(), itr.bci()); break; 9.32 case Bytecodes._invokeinterface: doMethod(false, true, itr.getIndexBig(), itr.bci()); break; 9.33 case Bytecodes._newarray: 9.34 case Bytecodes._anewarray: ppNewRef(vCTS, itr.bci()); break; 9.35 @@ -1725,7 +1729,7 @@ 9.36 void doMethod (boolean is_static, boolean is_interface, int idx, int bci) { 9.37 // Dig up signature for field in constant pool 9.38 ConstantPool cp = _method.getConstants(); 9.39 - int nameAndTypeIdx = cp.getNameAndTypeRefIndexAt(idx); 9.40 + int nameAndTypeIdx = cp.getTagAt(idx).isNameAndType() ? idx : cp.getNameAndTypeRefIndexAt(idx); 9.41 int signatureIdx = cp.getSignatureRefIndexAt(nameAndTypeIdx); 9.42 Symbol signature = cp.getSymbolAt(signatureIdx); 9.43
10.1 --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Wed Jun 30 18:57:35 2010 -0700 10.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/ClassConstants.java Fri Jul 02 01:36:15 2010 -0700 10.3 @@ -40,6 +40,19 @@ 10.4 public static final int JVM_CONSTANT_Methodref = 10; 10.5 public static final int JVM_CONSTANT_InterfaceMethodref = 11; 10.6 public static final int JVM_CONSTANT_NameAndType = 12; 10.7 + public static final int JVM_CONSTANT_MethodHandle = 15; 10.8 + public static final int JVM_CONSTANT_MethodType = 16; 10.9 + 10.10 + // JVM_CONSTANT_MethodHandle subtypes 10.11 + public static final int JVM_REF_getField = 1; 10.12 + public static final int JVM_REF_getStatic = 2; 10.13 + public static final int JVM_REF_putField = 3; 10.14 + public static final int JVM_REF_putStatic = 4; 10.15 + public static final int JVM_REF_invokeVirtual = 5; 10.16 + public static final int JVM_REF_invokeStatic = 6; 10.17 + public static final int JVM_REF_invokeSpecial = 7; 10.18 + public static final int JVM_REF_newInvokeSpecial = 8; 10.19 + public static final int JVM_REF_invokeInterface = 9; 10.20 10.21 // HotSpot specific constant pool constant types. 10.22
11.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Wed Jun 30 18:57:35 2010 -0700 11.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ByteCodeRewriter.java Fri Jul 02 01:36:15 2010 -0700 11.3 @@ -54,14 +54,34 @@ 11.4 11.5 } 11.6 11.7 - protected short getConstantPoolIndex(int bci) { 11.8 + protected short getConstantPoolIndex(int rawcode, int bci) { 11.9 // get ConstantPool index from ConstantPoolCacheIndex at given bci 11.10 - short cpCacheIndex = method.getBytecodeShortArg(bci); 11.11 + String fmt = Bytecodes.format(rawcode); 11.12 + int cpCacheIndex; 11.13 + switch (fmt.length()) { 11.14 + case 2: cpCacheIndex = method.getBytecodeByteArg(bci); break; 11.15 + case 3: cpCacheIndex = method.getBytecodeShortArg(bci); break; 11.16 + case 5: 11.17 + if (fmt.indexOf("__") >= 0) 11.18 + cpCacheIndex = method.getBytecodeShortArg(bci); 11.19 + else 11.20 + cpCacheIndex = method.getBytecodeIntArg(bci); 11.21 + break; 11.22 + default: throw new IllegalArgumentException(); 11.23 + } 11.24 if (cpCache == null) { 11.25 - return cpCacheIndex; 11.26 + return (short) cpCacheIndex; 11.27 + } else if (fmt.indexOf("JJJJ") >= 0) { 11.28 + // change byte-ordering and go via secondary cache entry 11.29 + return (short) cpCache.getMainEntryAt(bytes.swapInt(cpCacheIndex)).getConstantPoolIndex(); 11.30 + } else if (fmt.indexOf("JJ") >= 0) { 11.31 + // change byte-ordering and go via cache 11.32 + return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort((short)cpCacheIndex))).getConstantPoolIndex(); 11.33 + } else if (fmt.indexOf("j") >= 0) { 11.34 + // go via cache 11.35 + return (short) cpCache.getEntryAt((int) (0xFF & cpCacheIndex)).getConstantPoolIndex(); 11.36 } else { 11.37 - // change byte-ordering and go via cache 11.38 - return (short) cpCache.getEntryAt((int) (0xFFFF & bytes.swapShort(cpCacheIndex))).getConstantPoolIndex(); 11.39 + return (short) cpCacheIndex; 11.40 } 11.41 } 11.42 11.43 @@ -100,10 +120,31 @@ 11.44 case Bytecodes._invokespecial: 11.45 case Bytecodes._invokestatic: 11.46 case Bytecodes._invokeinterface: { 11.47 - cpoolIndex = getConstantPoolIndex(bci + 1); 11.48 + cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); 11.49 writeShort(code, bci + 1, cpoolIndex); 11.50 break; 11.51 } 11.52 + 11.53 + case Bytecodes._invokedynamic: 11.54 + cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); 11.55 + writeShort(code, bci + 1, cpoolIndex); 11.56 + writeShort(code, bci + 3, (short)0); // clear out trailing bytes 11.57 + break; 11.58 + 11.59 + case Bytecodes._ldc_w: 11.60 + if (hotspotcode != bytecode) { 11.61 + // fast_aldc_w puts constant in CP cache 11.62 + cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); 11.63 + writeShort(code, bci + 1, cpoolIndex); 11.64 + } 11.65 + break; 11.66 + case Bytecodes._ldc: 11.67 + if (hotspotcode != bytecode) { 11.68 + // fast_aldc puts constant in CP cache 11.69 + cpoolIndex = getConstantPoolIndex(hotspotcode, bci + 1); 11.70 + code[bci + 1] = (byte)(cpoolIndex); 11.71 + } 11.72 + break; 11.73 } 11.74 11.75 len = Bytecodes.lengthFor(bytecode);
12.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Wed Jun 30 18:57:35 2010 -0700 12.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Fri Jul 02 01:36:15 2010 -0700 12.3 @@ -61,10 +61,12 @@ 12.4 protected short _signatureIndex; 12.5 12.6 protected static int extractHighShortFromInt(int val) { 12.7 + // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc. 12.8 return (val >> 16) & 0xFFFF; 12.9 } 12.10 12.11 protected static int extractLowShortFromInt(int val) { 12.12 + // must stay in sync with constantPoolOopDesc::name_and_type_at_put, method_at_put, etc. 12.13 return val & 0xFFFF; 12.14 } 12.15 12.16 @@ -297,6 +299,28 @@ 12.17 + ", type = " + signatureIndex); 12.18 break; 12.19 } 12.20 + 12.21 + case JVM_CONSTANT_MethodHandle: { 12.22 + dos.writeByte(cpConstType); 12.23 + int value = cpool.getIntAt(ci); 12.24 + short refIndex = (short) extractHighShortFromInt(value); 12.25 + byte refKind = (byte) extractLowShortFromInt(value); 12.26 + dos.writeByte(refKind); 12.27 + dos.writeShort(refIndex); 12.28 + if (DEBUG) debugMessage("CP[" + ci + "] = MH index = " + refIndex 12.29 + + ", kind = " + refKind); 12.30 + break; 12.31 + } 12.32 + 12.33 + case JVM_CONSTANT_MethodType: { 12.34 + dos.writeByte(cpConstType); 12.35 + int value = cpool.getIntAt(ci); 12.36 + short refIndex = (short) value; 12.37 + dos.writeShort(refIndex); 12.38 + if (DEBUG) debugMessage("CP[" + ci + "] = MT index = " + refIndex); 12.39 + break; 12.40 + } 12.41 + 12.42 default: 12.43 throw new InternalError("Unknown tag: " + cpConstType); 12.44 } // switch
13.1 --- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Wed Jun 30 18:57:35 2010 -0700 13.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Fri Jul 02 01:36:15 2010 -0700 13.3 @@ -572,6 +572,16 @@ 13.4 buf.cell(Integer.toString(cpool.getIntAt(index))); 13.5 break; 13.6 13.7 + case JVM_CONSTANT_MethodHandle: 13.8 + buf.cell("JVM_CONSTANT_MethodHandle"); 13.9 + buf.cell(genLowHighShort(cpool.getIntAt(index))); 13.10 + break; 13.11 + 13.12 + case JVM_CONSTANT_MethodType: 13.13 + buf.cell("JVM_CONSTANT_MethodType"); 13.14 + buf.cell(Integer.toString(cpool.getIntAt(index))); 13.15 + break; 13.16 + 13.17 default: 13.18 throw new InternalError("unknown tag: " + ctag); 13.19 }
14.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Wed Jun 30 18:57:35 2010 -0700 14.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Fri Jul 02 01:36:15 2010 -0700 14.3 @@ -38,12 +38,26 @@ 14.4 private static int JVM_CONSTANT_Methodref = 10; 14.5 private static int JVM_CONSTANT_InterfaceMethodref = 11; 14.6 private static int JVM_CONSTANT_NameAndType = 12; 14.7 + private static int JVM_CONSTANT_MethodHandle = 15; // JSR 292 14.8 + private static int JVM_CONSTANT_MethodType = 16; // JSR 292 14.9 private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization 14.10 private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use 14.11 private static int JVM_CONSTANT_ClassIndex = 101; // Temporary tag while constructing constant pool 14.12 private static int JVM_CONSTANT_UnresolvedString = 102; // Temporary tag until actual use 14.13 private static int JVM_CONSTANT_StringIndex = 103; // Temporary tag while constructing constant pool 14.14 private static int JVM_CONSTANT_UnresolvedClassInError = 104; // Resolution failed 14.15 + private static int JVM_CONSTANT_Object = 105; // Required for BoundMethodHandle arguments. 14.16 + 14.17 + // JVM_CONSTANT_MethodHandle subtypes //FIXME: connect these to data structure 14.18 + private static int JVM_REF_getField = 1; 14.19 + private static int JVM_REF_getStatic = 2; 14.20 + private static int JVM_REF_putField = 3; 14.21 + private static int JVM_REF_putStatic = 4; 14.22 + private static int JVM_REF_invokeVirtual = 5; 14.23 + private static int JVM_REF_invokeStatic = 6; 14.24 + private static int JVM_REF_invokeSpecial = 7; 14.25 + private static int JVM_REF_newInvokeSpecial = 8; 14.26 + private static int JVM_REF_invokeInterface = 9; 14.27 14.28 private byte tag; 14.29 14.30 @@ -62,6 +76,8 @@ 14.31 public boolean isDouble() { return tag == JVM_CONSTANT_Double; } 14.32 public boolean isNameAndType() { return tag == JVM_CONSTANT_NameAndType; } 14.33 public boolean isUtf8() { return tag == JVM_CONSTANT_Utf8; } 14.34 + public boolean isMethodHandle() { return tag == JVM_CONSTANT_MethodHandle; } 14.35 + public boolean isMethodType() { return tag == JVM_CONSTANT_MethodType; } 14.36 14.37 public boolean isInvalid() { return tag == JVM_CONSTANT_Invalid; } 14.38 14.39 @@ -73,6 +89,8 @@ 14.40 public boolean isUnresolvedString() { return tag == JVM_CONSTANT_UnresolvedString; } 14.41 public boolean isStringIndex() { return tag == JVM_CONSTANT_StringIndex; } 14.42 14.43 + public boolean isObject() { return tag == JVM_CONSTANT_Object; } 14.44 + 14.45 public boolean isKlassReference() { return isKlassIndex() || isUnresolvedKlass(); } 14.46 public boolean isFieldOrMethod() { return isField() || isMethod() || isInterfaceMethod(); } 14.47 public boolean isSymbol() { return isUtf8(); }
15.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Wed Jun 30 18:57:35 2010 -0700 15.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/soql/sa.js Fri Jul 02 01:36:15 2010 -0700 15.3 @@ -825,6 +825,8 @@ 15.4 } 15.5 writeln(""); 15.6 disAsm.decode(new sapkg.interpreter.BytecodeVisitor() { 15.7 + prologue: function(method) { }, 15.8 + epilogue: function() { }, 15.9 visit: function(bytecode) { 15.10 if (hasLines) { 15.11 var line = method.getLineNumberFromBCI(bci);
16.1 --- a/make/linux/makefiles/adlc.make Wed Jun 30 18:57:35 2010 -0700 16.2 +++ b/make/linux/makefiles/adlc.make Fri Jul 02 01:36:15 2010 -0700 16.3 @@ -138,7 +138,11 @@ 16.4 16.5 # Normally, debugging is done directly on the ad_<arch>*.cpp files. 16.6 # But -g will put #line directives in those files pointing back to <arch>.ad. 16.7 +# Some builds of gcc 3.2 have a bug that gets tickled by the extra #line directives 16.8 +# so skip it for 3.2 and ealier. 16.9 +ifneq "$(shell expr \( $(CC_VER_MAJOR) \> 3 \) \| \( \( $(CC_VER_MAJOR) = 3 \) \& \( $(CC_VER_MINOR) \>= 3 \) \))" "0" 16.10 ADLCFLAGS += -g 16.11 +endif 16.12 16.13 ifdef LP64 16.14 ADLCFLAGS += -D_LP64
17.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Wed Jun 30 18:57:35 2010 -0700 17.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Jul 02 01:36:15 2010 -0700 17.3 @@ -318,6 +318,31 @@ 17.4 __ bind(exit); 17.5 } 17.6 17.7 +// Fast path for caching oop constants. 17.8 +// %%% We should use this to handle Class and String constants also. 17.9 +// %%% It will simplify the ldc/primitive path considerably. 17.10 +void TemplateTable::fast_aldc(bool wide) { 17.11 + transition(vtos, atos); 17.12 + 17.13 + if (!EnableMethodHandles) { 17.14 + // We should not encounter this bytecode if !EnableMethodHandles. 17.15 + // The verifier will stop it. However, if we get past the verifier, 17.16 + // this will stop the thread in a reasonable way, without crashing the JVM. 17.17 + __ call_VM(noreg, CAST_FROM_FN_PTR(address, 17.18 + InterpreterRuntime::throw_IncompatibleClassChangeError)); 17.19 + // the call_VM checks for exception, so we should never return here. 17.20 + __ should_not_reach_here(); 17.21 + return; 17.22 + } 17.23 + 17.24 + Register Rcache = G3_scratch; 17.25 + Register Rscratch = G4_scratch; 17.26 + 17.27 + resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1)); 17.28 + 17.29 + __ verify_oop(Otos_i); 17.30 +} 17.31 + 17.32 void TemplateTable::ldc2_w() { 17.33 transition(vtos, vtos); 17.34 Label retry, resolved, Long, exit; 17.35 @@ -1994,6 +2019,8 @@ 17.36 case Bytecodes::_invokestatic : // fall through 17.37 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 17.38 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 17.39 + case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; 17.40 + case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; 17.41 default : ShouldNotReachHere(); break; 17.42 } 17.43 // first time invocation - must resolve first
18.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp Wed Jun 30 18:57:35 2010 -0700 18.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Jul 02 01:36:15 2010 -0700 18.3 @@ -375,6 +375,32 @@ 18.4 __ bind(Done); 18.5 } 18.6 18.7 +// Fast path for caching oop constants. 18.8 +// %%% We should use this to handle Class and String constants also. 18.9 +// %%% It will simplify the ldc/primitive path considerably. 18.10 +void TemplateTable::fast_aldc(bool wide) { 18.11 + transition(vtos, atos); 18.12 + 18.13 + if (!EnableMethodHandles) { 18.14 + // We should not encounter this bytecode if !EnableMethodHandles. 18.15 + // The verifier will stop it. However, if we get past the verifier, 18.16 + // this will stop the thread in a reasonable way, without crashing the JVM. 18.17 + __ call_VM(noreg, CAST_FROM_FN_PTR(address, 18.18 + InterpreterRuntime::throw_IncompatibleClassChangeError)); 18.19 + // the call_VM checks for exception, so we should never return here. 18.20 + __ should_not_reach_here(); 18.21 + return; 18.22 + } 18.23 + 18.24 + const Register cache = rcx; 18.25 + const Register index = rdx; 18.26 + 18.27 + resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); 18.28 + if (VerifyOops) { 18.29 + __ verify_oop(rax); 18.30 + } 18.31 +} 18.32 + 18.33 void TemplateTable::ldc2_w() { 18.34 transition(vtos, vtos); 18.35 Label Long, Done; 18.36 @@ -2055,6 +2081,8 @@ 18.37 case Bytecodes::_invokestatic : // fall through 18.38 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break; 18.39 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break; 18.40 + case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; 18.41 + case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break; 18.42 default : ShouldNotReachHere(); break; 18.43 } 18.44 __ movl(temp, (int)bytecode());
19.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Wed Jun 30 18:57:35 2010 -0700 19.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Jul 02 01:36:15 2010 -0700 19.3 @@ -389,6 +389,32 @@ 19.4 __ bind(Done); 19.5 } 19.6 19.7 +// Fast path for caching oop constants. 19.8 +// %%% We should use this to handle Class and String constants also. 19.9 +// %%% It will simplify the ldc/primitive path considerably. 19.10 +void TemplateTable::fast_aldc(bool wide) { 19.11 + transition(vtos, atos); 19.12 + 19.13 + if (!EnableMethodHandles) { 19.14 + // We should not encounter this bytecode if !EnableMethodHandles. 19.15 + // The verifier will stop it. However, if we get past the verifier, 19.16 + // this will stop the thread in a reasonable way, without crashing the JVM. 19.17 + __ call_VM(noreg, CAST_FROM_FN_PTR(address, 19.18 + InterpreterRuntime::throw_IncompatibleClassChangeError)); 19.19 + // the call_VM checks for exception, so we should never return here. 19.20 + __ should_not_reach_here(); 19.21 + return; 19.22 + } 19.23 + 19.24 + const Register cache = rcx; 19.25 + const Register index = rdx; 19.26 + 19.27 + resolve_cache_and_index(f1_oop, rax, cache, index, wide ? sizeof(u2) : sizeof(u1)); 19.28 + if (VerifyOops) { 19.29 + __ verify_oop(rax); 19.30 + } 19.31 +} 19.32 + 19.33 void TemplateTable::ldc2_w() { 19.34 transition(vtos, vtos); 19.35 Label Long, Done; 19.36 @@ -2063,6 +2089,12 @@ 19.37 case Bytecodes::_invokedynamic: 19.38 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); 19.39 break; 19.40 + case Bytecodes::_fast_aldc: 19.41 + entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 19.42 + break; 19.43 + case Bytecodes::_fast_aldc_w: 19.44 + entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); 19.45 + break; 19.46 default: 19.47 ShouldNotReachHere(); 19.48 break;
20.1 --- a/src/cpu/x86/vm/vm_version_x86.cpp Wed Jun 30 18:57:35 2010 -0700 20.2 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Fri Jul 02 01:36:15 2010 -0700 20.3 @@ -1,5 +1,5 @@ 20.4 /* 20.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. 20.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved. 20.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 20.8 * 20.9 * This code is free software; you can redistribute it and/or modify it 20.10 @@ -34,7 +34,7 @@ 20.11 VM_Version::CpuidInfo VM_Version::_cpuid_info = { 0, }; 20.12 20.13 static BufferBlob* stub_blob; 20.14 -static const int stub_size = 300; 20.15 +static const int stub_size = 400; 20.16 20.17 extern "C" { 20.18 typedef void (*getPsrInfo_stub_t)(void*); 20.19 @@ -56,7 +56,7 @@ 20.20 const uint32_t CPU_FAMILY_386 = (3 << CPU_FAMILY_SHIFT); 20.21 const uint32_t CPU_FAMILY_486 = (4 << CPU_FAMILY_SHIFT); 20.22 20.23 - Label detect_486, cpu486, detect_586, std_cpuid1; 20.24 + Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4; 20.25 Label ext_cpuid1, ext_cpuid5, done; 20.26 20.27 StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub"); 20.28 @@ -131,13 +131,62 @@ 20.29 __ movl(Address(rsi, 8), rcx); 20.30 __ movl(Address(rsi,12), rdx); 20.31 20.32 - __ cmpl(rax, 3); // Is cpuid(0x4) supported? 20.33 - __ jccb(Assembler::belowEqual, std_cpuid1); 20.34 + __ cmpl(rax, 0xa); // Is cpuid(0xB) supported? 20.35 + __ jccb(Assembler::belowEqual, std_cpuid4); 20.36 + 20.37 + // 20.38 + // cpuid(0xB) Processor Topology 20.39 + // 20.40 + __ movl(rax, 0xb); 20.41 + __ xorl(rcx, rcx); // Threads level 20.42 + __ cpuid(); 20.43 + 20.44 + __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset()))); 20.45 + __ movl(Address(rsi, 0), rax); 20.46 + __ movl(Address(rsi, 4), rbx); 20.47 + __ movl(Address(rsi, 8), rcx); 20.48 + __ movl(Address(rsi,12), rdx); 20.49 + 20.50 + __ movl(rax, 0xb); 20.51 + __ movl(rcx, 1); // Cores level 20.52 + __ cpuid(); 20.53 + __ push(rax); 20.54 + __ andl(rax, 0x1f); // Determine if valid topology level 20.55 + __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 20.56 + __ andl(rax, 0xffff); 20.57 + __ pop(rax); 20.58 + __ jccb(Assembler::equal, std_cpuid4); 20.59 + 20.60 + __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset()))); 20.61 + __ movl(Address(rsi, 0), rax); 20.62 + __ movl(Address(rsi, 4), rbx); 20.63 + __ movl(Address(rsi, 8), rcx); 20.64 + __ movl(Address(rsi,12), rdx); 20.65 + 20.66 + __ movl(rax, 0xb); 20.67 + __ movl(rcx, 2); // Packages level 20.68 + __ cpuid(); 20.69 + __ push(rax); 20.70 + __ andl(rax, 0x1f); // Determine if valid topology level 20.71 + __ orl(rax, rbx); // eax[4:0] | ebx[0:15] == 0 indicates invalid level 20.72 + __ andl(rax, 0xffff); 20.73 + __ pop(rax); 20.74 + __ jccb(Assembler::equal, std_cpuid4); 20.75 + 20.76 + __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset()))); 20.77 + __ movl(Address(rsi, 0), rax); 20.78 + __ movl(Address(rsi, 4), rbx); 20.79 + __ movl(Address(rsi, 8), rcx); 20.80 + __ movl(Address(rsi,12), rdx); 20.81 20.82 // 20.83 // cpuid(0x4) Deterministic cache params 20.84 // 20.85 + __ bind(std_cpuid4); 20.86 __ movl(rax, 4); 20.87 + __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported? 20.88 + __ jccb(Assembler::greater, std_cpuid1); 20.89 + 20.90 __ xorl(rcx, rcx); // L1 cache 20.91 __ cpuid(); 20.92 __ push(rax); 20.93 @@ -460,13 +509,18 @@ 20.94 AllocatePrefetchDistance = allocate_prefetch_distance(); 20.95 AllocatePrefetchStyle = allocate_prefetch_style(); 20.96 20.97 - if( AllocatePrefetchStyle == 2 && is_intel() && 20.98 - cpu_family() == 6 && supports_sse3() ) { // watermark prefetching on Core 20.99 + if( is_intel() && cpu_family() == 6 && supports_sse3() ) { 20.100 + if( AllocatePrefetchStyle == 2 ) { // watermark prefetching on Core 20.101 #ifdef _LP64 20.102 - AllocatePrefetchDistance = 384; 20.103 + AllocatePrefetchDistance = 384; 20.104 #else 20.105 - AllocatePrefetchDistance = 320; 20.106 + AllocatePrefetchDistance = 320; 20.107 #endif 20.108 + } 20.109 + if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus 20.110 + AllocatePrefetchDistance = 192; 20.111 + AllocatePrefetchLines = 4; 20.112 + } 20.113 } 20.114 assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value"); 20.115
21.1 --- a/src/cpu/x86/vm/vm_version_x86.hpp Wed Jun 30 18:57:35 2010 -0700 21.2 +++ b/src/cpu/x86/vm/vm_version_x86.hpp Fri Jul 02 01:36:15 2010 -0700 21.3 @@ -1,5 +1,5 @@ 21.4 /* 21.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. 21.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All Rights Reserved. 21.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 21.8 * 21.9 * This code is free software; you can redistribute it and/or modify it 21.10 @@ -114,6 +114,14 @@ 21.11 } bits; 21.12 }; 21.13 21.14 + union TplCpuidBEbx { 21.15 + uint32_t value; 21.16 + struct { 21.17 + uint32_t logical_cpus : 16, 21.18 + : 16; 21.19 + } bits; 21.20 + }; 21.21 + 21.22 union ExtCpuid1Ecx { 21.23 uint32_t value; 21.24 struct { 21.25 @@ -211,6 +219,25 @@ 21.26 uint32_t dcp_cpuid4_ecx; // unused currently 21.27 uint32_t dcp_cpuid4_edx; // unused currently 21.28 21.29 + // cpuid function 0xB (processor topology) 21.30 + // ecx = 0 21.31 + uint32_t tpl_cpuidB0_eax; 21.32 + TplCpuidBEbx tpl_cpuidB0_ebx; 21.33 + uint32_t tpl_cpuidB0_ecx; // unused currently 21.34 + uint32_t tpl_cpuidB0_edx; // unused currently 21.35 + 21.36 + // ecx = 1 21.37 + uint32_t tpl_cpuidB1_eax; 21.38 + TplCpuidBEbx tpl_cpuidB1_ebx; 21.39 + uint32_t tpl_cpuidB1_ecx; // unused currently 21.40 + uint32_t tpl_cpuidB1_edx; // unused currently 21.41 + 21.42 + // ecx = 2 21.43 + uint32_t tpl_cpuidB2_eax; 21.44 + TplCpuidBEbx tpl_cpuidB2_ebx; 21.45 + uint32_t tpl_cpuidB2_ecx; // unused currently 21.46 + uint32_t tpl_cpuidB2_edx; // unused currently 21.47 + 21.48 // cpuid function 0x80000000 // example, unused 21.49 uint32_t ext_max_function; 21.50 uint32_t ext_vendor_name_0; 21.51 @@ -316,6 +343,9 @@ 21.52 static ByteSize ext_cpuid1_offset() { return byte_offset_of(CpuidInfo, ext_cpuid1_eax); } 21.53 static ByteSize ext_cpuid5_offset() { return byte_offset_of(CpuidInfo, ext_cpuid5_eax); } 21.54 static ByteSize ext_cpuid8_offset() { return byte_offset_of(CpuidInfo, ext_cpuid8_eax); } 21.55 + static ByteSize tpl_cpuidB0_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB0_eax); } 21.56 + static ByteSize tpl_cpuidB1_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB1_eax); } 21.57 + static ByteSize tpl_cpuidB2_offset() { return byte_offset_of(CpuidInfo, tpl_cpuidB2_eax); } 21.58 21.59 // Initialization 21.60 static void initialize(); 21.61 @@ -349,7 +379,12 @@ 21.62 static uint cores_per_cpu() { 21.63 uint result = 1; 21.64 if (is_intel()) { 21.65 - result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); 21.66 + if (_cpuid_info.std_max_function >= 0xB) { 21.67 + result = _cpuid_info.tpl_cpuidB1_ebx.bits.logical_cpus / 21.68 + _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 21.69 + } else { 21.70 + result = (_cpuid_info.dcp_cpuid4_eax.bits.cores_per_cpu + 1); 21.71 + } 21.72 } else if (is_amd()) { 21.73 result = (_cpuid_info.ext_cpuid8_ecx.bits.cores_per_cpu + 1); 21.74 } 21.75 @@ -358,7 +393,9 @@ 21.76 21.77 static uint threads_per_core() { 21.78 uint result = 1; 21.79 - if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { 21.80 + if (is_intel() && _cpuid_info.std_max_function >= 0xB) { 21.81 + result = _cpuid_info.tpl_cpuidB0_ebx.bits.logical_cpus; 21.82 + } else if (_cpuid_info.std_cpuid1_edx.bits.ht != 0) { 21.83 result = _cpuid_info.std_cpuid1_ebx.bits.threads_per_cpu / 21.84 cores_per_cpu(); 21.85 }
22.1 --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Wed Jun 30 18:57:35 2010 -0700 22.2 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Fri Jul 02 01:36:15 2010 -0700 22.3 @@ -820,7 +820,7 @@ 22.4 bool is_top_frame) { 22.5 assert(popframe_extra_args == 0, "what to do?"); 22.6 assert(!is_top_frame || (!callee_locals && !callee_param_count), 22.7 - "top frame should have no caller") 22.8 + "top frame should have no caller"); 22.9 22.10 // This code must exactly match what InterpreterFrame::build 22.11 // does (the full InterpreterFrame::build, that is, not the
23.1 --- a/src/os/solaris/vm/osThread_solaris.hpp Wed Jun 30 18:57:35 2010 -0700 23.2 +++ b/src/os/solaris/vm/osThread_solaris.hpp Fri Jul 02 01:36:15 2010 -0700 23.3 @@ -123,7 +123,7 @@ 23.4 23.5 int set_interrupt_callback (Sync_Interrupt_Callback * cb); 23.6 void remove_interrupt_callback(Sync_Interrupt_Callback * cb); 23.7 - void OSThread::do_interrupt_callbacks_at_interrupt(InterruptArguments *args); 23.8 + void do_interrupt_callbacks_at_interrupt(InterruptArguments *args); 23.9 23.10 // *************************************************************** 23.11 // java.lang.Thread.interrupt state.
24.1 --- a/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp Wed Jun 30 18:57:35 2010 -0700 24.2 +++ b/src/os_cpu/linux_x86/vm/copy_linux_x86.inline.hpp Fri Jul 02 01:36:15 2010 -0700 24.3 @@ -26,7 +26,7 @@ 24.4 #ifdef AMD64 24.5 (void)memmove(to, from, count * HeapWordSize); 24.6 #else 24.7 - // Same as pd_aligned_conjoint_words, except includes a zero-count check. 24.8 + // Includes a zero-count check. 24.9 intx temp; 24.10 __asm__ volatile(" testl %6,%6 ;" 24.11 " jz 7f ;" 24.12 @@ -84,7 +84,7 @@ 24.13 break; 24.14 } 24.15 #else 24.16 - // Same as pd_aligned_disjoint_words, except includes a zero-count check. 24.17 + // Includes a zero-count check. 24.18 intx temp; 24.19 __asm__ volatile(" testl %6,%6 ;" 24.20 " jz 3f ;" 24.21 @@ -130,75 +130,18 @@ 24.22 } 24.23 24.24 static void pd_aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { 24.25 -#ifdef AMD64 24.26 - (void)memmove(to, from, count * HeapWordSize); 24.27 -#else 24.28 - // Same as pd_conjoint_words, except no zero-count check. 24.29 - intx temp; 24.30 - __asm__ volatile(" cmpl %4,%5 ;" 24.31 - " leal -4(%4,%6,4),%3;" 24.32 - " jbe 1f ;" 24.33 - " cmpl %7,%5 ;" 24.34 - " jbe 4f ;" 24.35 - "1: cmpl $32,%6 ;" 24.36 - " ja 3f ;" 24.37 - " subl %4,%1 ;" 24.38 - "2: movl (%4),%3 ;" 24.39 - " movl %7,(%5,%4,1) ;" 24.40 - " addl $4,%0 ;" 24.41 - " subl $1,%2 ;" 24.42 - " jnz 2b ;" 24.43 - " jmp 7f ;" 24.44 - "3: rep; smovl ;" 24.45 - " jmp 7f ;" 24.46 - "4: cmpl $32,%2 ;" 24.47 - " movl %7,%0 ;" 24.48 - " leal -4(%5,%6,4),%1;" 24.49 - " ja 6f ;" 24.50 - " subl %4,%1 ;" 24.51 - "5: movl (%4),%3 ;" 24.52 - " movl %7,(%5,%4,1) ;" 24.53 - " subl $4,%0 ;" 24.54 - " subl $1,%2 ;" 24.55 - " jnz 5b ;" 24.56 - " jmp 7f ;" 24.57 - "6: std ;" 24.58 - " rep; smovl ;" 24.59 - " cld ;" 24.60 - "7: nop " 24.61 - : "=S" (from), "=D" (to), "=c" (count), "=r" (temp) 24.62 - : "0" (from), "1" (to), "2" (count), "3" (temp) 24.63 - : "memory", "flags"); 24.64 -#endif // AMD64 24.65 + pd_conjoint_words(from, to, count); 24.66 } 24.67 24.68 static void pd_aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { 24.69 -#ifdef AMD64 24.70 pd_disjoint_words(from, to, count); 24.71 -#else 24.72 - // Same as pd_disjoint_words, except no zero-count check. 24.73 - intx temp; 24.74 - __asm__ volatile(" cmpl $32,%6 ;" 24.75 - " ja 2f ;" 24.76 - " subl %4,%1 ;" 24.77 - "1: movl (%4),%3 ;" 24.78 - " movl %7,(%5,%4,1);" 24.79 - " addl $4,%0 ;" 24.80 - " subl $1,%2 ;" 24.81 - " jnz 1b ;" 24.82 - " jmp 3f ;" 24.83 - "2: rep; smovl ;" 24.84 - "3: nop " 24.85 - : "=S" (from), "=D" (to), "=c" (count), "=r" (temp) 24.86 - : "0" (from), "1" (to), "2" (count), "3" (temp) 24.87 - : "memory", "cc"); 24.88 -#endif // AMD64 24.89 } 24.90 24.91 static void pd_conjoint_bytes(void* from, void* to, size_t count) { 24.92 #ifdef AMD64 24.93 (void)memmove(to, from, count); 24.94 #else 24.95 + // Includes a zero-count check. 24.96 intx temp; 24.97 __asm__ volatile(" testl %6,%6 ;" 24.98 " jz 13f ;"
25.1 --- a/src/os_cpu/linux_x86/vm/linux_x86_32.s Wed Jun 30 18:57:35 2010 -0700 25.2 +++ b/src/os_cpu/linux_x86/vm/linux_x86_32.s Fri Jul 02 01:36:15 2010 -0700 25.3 @@ -121,10 +121,10 @@ 25.4 jnz 3b 25.5 addl %esi,%edi 25.6 4: movl %eax,%ecx # byte count less prefix 25.7 - andl $3,%ecx # suffix byte count 25.8 +5: andl $3,%ecx # suffix byte count 25.9 jz 7f # no suffix 25.10 # copy suffix 25.11 -5: xorl %eax,%eax 25.12 + xorl %eax,%eax 25.13 6: movb (%esi,%eax,1),%dl 25.14 movb %dl,(%edi,%eax,1) 25.15 addl $1,%eax 25.16 @@ -159,10 +159,10 @@ 25.17 # copy dwords, aligned or not 25.18 3: rep; smovl 25.19 4: movl %eax,%ecx # byte count 25.20 - andl $3,%ecx # suffix byte count 25.21 +5: andl $3,%ecx # suffix byte count 25.22 jz 7f # no suffix 25.23 # copy suffix 25.24 -5: subl %esi,%edi 25.25 + subl %esi,%edi 25.26 addl $3,%esi 25.27 6: movb (%esi),%dl 25.28 movb %dl,(%edi,%esi,1) 25.29 @@ -214,10 +214,10 @@ 25.30 # copy aligned dwords 25.31 3: rep; smovl 25.32 4: movl %eax,%ecx 25.33 - andl $3,%ecx 25.34 +5: andl $3,%ecx 25.35 jz 7f 25.36 # copy suffix 25.37 -5: xorl %eax,%eax 25.38 + xorl %eax,%eax 25.39 6: movb (%esi,%eax,1),%dl 25.40 movb %dl,(%edi,%eax,1) 25.41 addl $1,%eax 25.42 @@ -250,9 +250,9 @@ 25.43 jnz 3b 25.44 addl %esi,%edi 25.45 4: movl %eax,%ecx 25.46 - andl $3,%ecx 25.47 +5: andl $3,%ecx 25.48 jz 7f 25.49 -5: subl %esi,%edi 25.50 + subl %esi,%edi 25.51 addl $3,%esi 25.52 6: movb (%esi),%dl 25.53 movb %dl,(%edi,%esi,1) 25.54 @@ -287,11 +287,12 @@ 25.55 andl $3,%eax # either 0 or 2 25.56 jz 1f # no prefix 25.57 # copy prefix 25.58 + subl $1,%ecx 25.59 + jl 5f # zero count 25.60 movw (%esi),%dx 25.61 movw %dx,(%edi) 25.62 addl %eax,%esi # %eax == 2 25.63 addl %eax,%edi 25.64 - subl $1,%ecx 25.65 1: movl %ecx,%eax # word count less prefix 25.66 sarl %ecx # dword count 25.67 jz 4f # no dwords to move 25.68 @@ -454,12 +455,13 @@ 25.69 ret 25.70 .=.+10 25.71 2: subl %esi,%edi 25.72 + jmp 4f 25.73 .p2align 4,,15 25.74 3: movl (%esi),%edx 25.75 movl %edx,(%edi,%esi,1) 25.76 addl $4,%esi 25.77 - subl $1,%ecx 25.78 - jnz 3b 25.79 +4: subl $1,%ecx 25.80 + jge 3b 25.81 popl %edi 25.82 popl %esi 25.83 ret 25.84 @@ -467,19 +469,20 @@ 25.85 std 25.86 leal -4(%edi,%ecx,4),%edi # to + count*4 - 4 25.87 cmpl $32,%ecx 25.88 - ja 3f # > 32 dwords 25.89 + ja 4f # > 32 dwords 25.90 subl %eax,%edi # eax == from + count*4 - 4 25.91 + jmp 3f 25.92 .p2align 4,,15 25.93 2: movl (%eax),%edx 25.94 movl %edx,(%edi,%eax,1) 25.95 subl $4,%eax 25.96 - subl $1,%ecx 25.97 - jnz 2b 25.98 +3: subl $1,%ecx 25.99 + jge 2b 25.100 cld 25.101 popl %edi 25.102 popl %esi 25.103 ret 25.104 -3: movl %eax,%esi # from + count*4 - 4 25.105 +4: movl %eax,%esi # from + count*4 - 4 25.106 rep; smovl 25.107 cld 25.108 popl %edi
26.1 --- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Wed Jun 30 18:57:35 2010 -0700 26.2 +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Fri Jul 02 01:36:15 2010 -0700 26.3 @@ -861,7 +861,7 @@ 26.4 cmpxchg_long_func_t* os::atomic_cmpxchg_long_func = os::atomic_cmpxchg_long_bootstrap; 26.5 add_func_t* os::atomic_add_func = os::atomic_add_bootstrap; 26.6 26.7 -extern "C" _solaris_raw_setup_fpu(address ptr); 26.8 +extern "C" void _solaris_raw_setup_fpu(address ptr); 26.9 void os::setup_fpu() { 26.10 address fpu_cntrl = StubRoutines::addr_fpu_cntrl_wrd_std(); 26.11 _solaris_raw_setup_fpu(fpu_cntrl);
27.1 --- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.s Wed Jun 30 18:57:35 2010 -0700 27.2 +++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.s Fri Jul 02 01:36:15 2010 -0700 27.3 @@ -154,10 +154,10 @@ 27.4 jnz 3b 27.5 addl %esi,%edi 27.6 4: movl %eax,%ecx / byte count less prefix 27.7 - andl $3,%ecx / suffix byte count 27.8 +5: andl $3,%ecx / suffix byte count 27.9 jz 7f / no suffix 27.10 / copy suffix 27.11 -5: xorl %eax,%eax 27.12 + xorl %eax,%eax 27.13 6: movb (%esi,%eax,1),%dl 27.14 movb %dl,(%edi,%eax,1) 27.15 addl $1,%eax 27.16 @@ -192,10 +192,10 @@ 27.17 / copy dwords, aligned or not 27.18 3: rep; smovl 27.19 4: movl %eax,%ecx / byte count 27.20 - andl $3,%ecx / suffix byte count 27.21 +5: andl $3,%ecx / suffix byte count 27.22 jz 7f / no suffix 27.23 / copy suffix 27.24 -5: subl %esi,%edi 27.25 + subl %esi,%edi 27.26 addl $3,%esi 27.27 6: movb (%esi),%dl 27.28 movb %dl,(%edi,%esi,1) 27.29 @@ -246,10 +246,10 @@ 27.30 / copy aligned dwords 27.31 3: rep; smovl 27.32 4: movl %eax,%ecx 27.33 - andl $3,%ecx 27.34 +5: andl $3,%ecx 27.35 jz 7f 27.36 / copy suffix 27.37 -5: xorl %eax,%eax 27.38 + xorl %eax,%eax 27.39 6: movb (%esi,%eax,1),%dl 27.40 movb %dl,(%edi,%eax,1) 27.41 addl $1,%eax 27.42 @@ -282,9 +282,9 @@ 27.43 jnz 3b 27.44 addl %esi,%edi 27.45 4: movl %eax,%ecx 27.46 - andl $3,%ecx 27.47 +5: andl $3,%ecx 27.48 jz 7f 27.49 -5: subl %esi,%edi 27.50 + subl %esi,%edi 27.51 addl $3,%esi 27.52 6: movb (%esi),%dl 27.53 movb %dl,(%edi,%esi,1) 27.54 @@ -318,11 +318,12 @@ 27.55 andl $3,%eax / either 0 or 2 27.56 jz 1f / no prefix 27.57 / copy prefix 27.58 + subl $1,%ecx 27.59 + jl 5f / zero count 27.60 movw (%esi),%dx 27.61 movw %dx,(%edi) 27.62 addl %eax,%esi / %eax == 2 27.63 addl %eax,%edi 27.64 - subl $1,%ecx 27.65 1: movl %ecx,%eax / word count less prefix 27.66 sarl %ecx / dword count 27.67 jz 4f / no dwords to move 27.68 @@ -482,12 +483,13 @@ 27.69 ret 27.70 .=.+10 27.71 2: subl %esi,%edi 27.72 + jmp 4f 27.73 .align 16 27.74 3: movl (%esi),%edx 27.75 movl %edx,(%edi,%esi,1) 27.76 addl $4,%esi 27.77 - subl $1,%ecx 27.78 - jnz 3b 27.79 +4: subl $1,%ecx 27.80 + jge 3b 27.81 popl %edi 27.82 popl %esi 27.83 ret 27.84 @@ -495,19 +497,20 @@ 27.85 std 27.86 leal -4(%edi,%ecx,4),%edi / to + count*4 - 4 27.87 cmpl $32,%ecx 27.88 - ja 3f / > 32 dwords 27.89 + ja 4f / > 32 dwords 27.90 subl %eax,%edi / eax == from + count*4 - 4 27.91 + jmp 3f 27.92 .align 16 27.93 2: movl (%eax),%edx 27.94 movl %edx,(%edi,%eax,1) 27.95 subl $4,%eax 27.96 - subl $1,%ecx 27.97 - jnz 2b 27.98 +3: subl $1,%ecx 27.99 + jge 2b 27.100 cld 27.101 popl %edi 27.102 popl %esi 27.103 ret 27.104 -3: movl %eax,%esi / from + count*4 - 4 27.105 +4: movl %eax,%esi / from + count*4 - 4 27.106 rep; smovl 27.107 cld 27.108 popl %edi
28.1 --- a/src/share/vm/asm/codeBuffer.cpp Wed Jun 30 18:57:35 2010 -0700 28.2 +++ b/src/share/vm/asm/codeBuffer.cpp Fri Jul 02 01:36:15 2010 -0700 28.3 @@ -404,7 +404,7 @@ 28.4 locs_start = REALLOC_RESOURCE_ARRAY(relocInfo, _locs_start, old_capacity, new_capacity); 28.5 } else { 28.6 locs_start = NEW_RESOURCE_ARRAY(relocInfo, new_capacity); 28.7 - Copy::conjoint_bytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); 28.8 + Copy::conjoint_jbytes(_locs_start, locs_start, old_capacity * sizeof(relocInfo)); 28.9 _locs_own = true; 28.10 } 28.11 _locs_start = locs_start; 28.12 @@ -581,7 +581,7 @@ 28.13 (HeapWord*)(buf+buf_offset), 28.14 (lsize + HeapWordSize-1) / HeapWordSize); 28.15 } else { 28.16 - Copy::conjoint_bytes(lstart, buf+buf_offset, lsize); 28.17 + Copy::conjoint_jbytes(lstart, buf+buf_offset, lsize); 28.18 } 28.19 } 28.20 buf_offset += lsize;
29.1 --- a/src/share/vm/c1/c1_Compilation.cpp Wed Jun 30 18:57:35 2010 -0700 29.2 +++ b/src/share/vm/c1/c1_Compilation.cpp Fri Jul 02 01:36:15 2010 -0700 29.3 @@ -242,10 +242,10 @@ 29.4 code->insts()->initialize_shared_locs((relocInfo*)locs_buffer, 29.5 locs_buffer_size / sizeof(relocInfo)); 29.6 code->initialize_consts_size(Compilation::desired_max_constant_size()); 29.7 - // Call stubs + deopt/exception handler 29.8 + // Call stubs + two deopt handlers (regular and MH) + exception handler 29.9 code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) + 29.10 LIR_Assembler::exception_handler_size + 29.11 - LIR_Assembler::deopt_handler_size); 29.12 + 2 * LIR_Assembler::deopt_handler_size); 29.13 } 29.14 29.15
30.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp Wed Jun 30 18:57:35 2010 -0700 30.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Fri Jul 02 01:36:15 2010 -0700 30.3 @@ -878,15 +878,12 @@ 30.4 case T_OBJECT : 30.5 { 30.6 ciObject* obj = con.as_object(); 30.7 - if (obj->is_klass()) { 30.8 - ciKlass* klass = obj->as_klass(); 30.9 - if (!klass->is_loaded() || PatchALot) { 30.10 - patch_state = state()->copy(); 30.11 - t = new ObjectConstant(obj); 30.12 - } else { 30.13 - t = new InstanceConstant(klass->java_mirror()); 30.14 - } 30.15 + if (!obj->is_loaded() 30.16 + || (PatchALot && obj->klass() != ciEnv::current()->String_klass())) { 30.17 + patch_state = state()->copy(); 30.18 + t = new ObjectConstant(obj); 30.19 } else { 30.20 + assert(!obj->is_klass(), "must be java_mirror of klass"); 30.21 t = new InstanceConstant(obj->as_instance()); 30.22 } 30.23 break;
31.1 --- a/src/share/vm/c1/c1_Runtime1.cpp Wed Jun 30 18:57:35 2010 -0700 31.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp Fri Jul 02 01:36:15 2010 -0700 31.3 @@ -601,7 +601,7 @@ 31.4 31.5 31.6 static klassOop resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { 31.7 - Bytecode_field* field_access = Bytecode_field_at(caller(), caller->bcp_from(bci)); 31.8 + Bytecode_field* field_access = Bytecode_field_at(caller, bci); 31.9 // This can be static or non-static field access 31.10 Bytecodes::Code code = field_access->code(); 31.11 31.12 @@ -721,7 +721,7 @@ 31.13 Handle load_klass(THREAD, NULL); // oop needed by load_klass_patching code 31.14 if (stub_id == Runtime1::access_field_patching_id) { 31.15 31.16 - Bytecode_field* field_access = Bytecode_field_at(caller_method(), caller_method->bcp_from(bci)); 31.17 + Bytecode_field* field_access = Bytecode_field_at(caller_method, bci); 31.18 FieldAccessInfo result; // initialize class if needed 31.19 Bytecodes::Code code = field_access->code(); 31.20 constantPoolHandle constants(THREAD, caller_method->constants()); 31.21 @@ -781,11 +781,9 @@ 31.22 case Bytecodes::_ldc: 31.23 case Bytecodes::_ldc_w: 31.24 { 31.25 - Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method(), 31.26 - caller_method->bcp_from(bci)); 31.27 - klassOop resolved = caller_method->constants()->klass_at(cc->index(), CHECK); 31.28 - // ldc wants the java mirror. 31.29 - k = resolved->klass_part()->java_mirror(); 31.30 + Bytecode_loadconstant* cc = Bytecode_loadconstant_at(caller_method, bci); 31.31 + k = cc->resolve_constant(CHECK); 31.32 + assert(k != NULL && !k->is_klass(), "must be class mirror or other Java constant"); 31.33 } 31.34 break; 31.35 default: Unimplemented(); 31.36 @@ -816,6 +814,15 @@ 31.37 // Return to the now deoptimized frame. 31.38 } 31.39 31.40 + // If we are patching in a non-perm oop, make sure the nmethod 31.41 + // is on the right list. 31.42 + if (ScavengeRootsInCode && load_klass.not_null() && load_klass->is_scavengable()) { 31.43 + MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag); 31.44 + nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 31.45 + guarantee(nm != NULL, "only nmethods can contain non-perm oops"); 31.46 + if (!nm->on_scavenge_root_list()) 31.47 + CodeCache::add_scavenge_root_nmethod(nm); 31.48 + } 31.49 31.50 // Now copy code back 31.51 31.52 @@ -1115,7 +1122,7 @@ 31.53 if (length == 0) return; 31.54 // Not guaranteed to be word atomic, but that doesn't matter 31.55 // for anything but an oop array, which is covered by oop_arraycopy. 31.56 - Copy::conjoint_bytes(src, dst, length); 31.57 + Copy::conjoint_jbytes(src, dst, length); 31.58 JRT_END 31.59 31.60 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
32.1 --- a/src/share/vm/ci/ciCPCache.cpp Wed Jun 30 18:57:35 2010 -0700 32.2 +++ b/src/share/vm/ci/ciCPCache.cpp Fri Jul 02 01:36:15 2010 -0700 32.3 @@ -44,13 +44,23 @@ 32.4 // ciCPCache::is_f1_null_at 32.5 bool ciCPCache::is_f1_null_at(int index) { 32.6 VM_ENTRY_MARK; 32.7 - constantPoolCacheOop cpcache = (constantPoolCacheOop) get_oop(); 32.8 - oop f1 = cpcache->secondary_entry_at(index)->f1(); 32.9 + oop f1 = entry_at(index)->f1(); 32.10 return (f1 == NULL); 32.11 } 32.12 32.13 32.14 // ------------------------------------------------------------------ 32.15 +// ciCPCache::get_pool_index 32.16 +int ciCPCache::get_pool_index(int index) { 32.17 + VM_ENTRY_MARK; 32.18 + ConstantPoolCacheEntry* e = entry_at(index); 32.19 + if (e->is_secondary_entry()) 32.20 + e = entry_at(e->main_entry_index()); 32.21 + return e->constant_pool_index(); 32.22 +} 32.23 + 32.24 + 32.25 +// ------------------------------------------------------------------ 32.26 // ciCPCache::print 32.27 // 32.28 // Print debugging information about the cache.
33.1 --- a/src/share/vm/ci/ciCPCache.hpp Wed Jun 30 18:57:35 2010 -0700 33.2 +++ b/src/share/vm/ci/ciCPCache.hpp Fri Jul 02 01:36:15 2010 -0700 33.3 @@ -29,6 +29,18 @@ 33.4 // Note: This class is called ciCPCache as ciConstantPoolCache is used 33.5 // for something different. 33.6 class ciCPCache : public ciObject { 33.7 +private: 33.8 + constantPoolCacheOop get_cpCacheOop() { // must be called inside a VM_ENTRY_MARK 33.9 + return (constantPoolCacheOop) get_oop(); 33.10 + } 33.11 + 33.12 + ConstantPoolCacheEntry* entry_at(int i) { 33.13 + int raw_index = i; 33.14 + if (constantPoolCacheOopDesc::is_secondary_index(i)) 33.15 + raw_index = constantPoolCacheOopDesc::decode_secondary_index(i); 33.16 + return get_cpCacheOop()->entry_at(raw_index); 33.17 + } 33.18 + 33.19 public: 33.20 ciCPCache(constantPoolCacheHandle cpcache) : ciObject(cpcache) {} 33.21 33.22 @@ -41,5 +53,7 @@ 33.23 33.24 bool is_f1_null_at(int index); 33.25 33.26 + int get_pool_index(int index); 33.27 + 33.28 void print(); 33.29 };
34.1 --- a/src/share/vm/ci/ciClassList.hpp Wed Jun 30 18:57:35 2010 -0700 34.2 +++ b/src/share/vm/ci/ciClassList.hpp Fri Jul 02 01:36:15 2010 -0700 34.3 @@ -85,6 +85,7 @@ 34.4 friend class ciConstantPoolCache; \ 34.5 friend class ciField; \ 34.6 friend class ciConstant; \ 34.7 +friend class ciCPCache; \ 34.8 friend class ciFlags; \ 34.9 friend class ciExceptionHandler; \ 34.10 friend class ciCallProfile; \
35.1 --- a/src/share/vm/ci/ciEnv.cpp Wed Jun 30 18:57:35 2010 -0700 35.2 +++ b/src/share/vm/ci/ciEnv.cpp Fri Jul 02 01:36:15 2010 -0700 35.3 @@ -511,9 +511,22 @@ 35.4 // 35.5 // Implementation of get_constant_by_index(). 35.6 ciConstant ciEnv::get_constant_by_index_impl(constantPoolHandle cpool, 35.7 - int index, 35.8 + int pool_index, int cache_index, 35.9 ciInstanceKlass* accessor) { 35.10 + bool ignore_will_link; 35.11 EXCEPTION_CONTEXT; 35.12 + int index = pool_index; 35.13 + if (cache_index >= 0) { 35.14 + assert(index < 0, "only one kind of index at a time"); 35.15 + ConstantPoolCacheEntry* cpc_entry = cpool->cache()->entry_at(cache_index); 35.16 + index = cpc_entry->constant_pool_index(); 35.17 + oop obj = cpc_entry->f1(); 35.18 + if (obj != NULL) { 35.19 + assert(obj->is_instance(), "must be an instance"); 35.20 + ciObject* ciobj = get_object(obj); 35.21 + return ciConstant(T_OBJECT, ciobj); 35.22 + } 35.23 + } 35.24 constantTag tag = cpool->tag_at(index); 35.25 if (tag.is_int()) { 35.26 return ciConstant(T_INT, (jint)cpool->int_at(index)); 35.27 @@ -540,8 +553,7 @@ 35.28 return ciConstant(T_OBJECT, constant); 35.29 } else if (tag.is_klass() || tag.is_unresolved_klass()) { 35.30 // 4881222: allow ldc to take a class type 35.31 - bool ignore; 35.32 - ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore, accessor); 35.33 + ciKlass* klass = get_klass_by_index_impl(cpool, index, ignore_will_link, accessor); 35.34 if (HAS_PENDING_EXCEPTION) { 35.35 CLEAR_PENDING_EXCEPTION; 35.36 record_out_of_memory_failure(); 35.37 @@ -549,12 +561,26 @@ 35.38 } 35.39 assert (klass->is_instance_klass() || klass->is_array_klass(), 35.40 "must be an instance or array klass "); 35.41 - return ciConstant(T_OBJECT, klass); 35.42 + return ciConstant(T_OBJECT, klass->java_mirror()); 35.43 } else if (tag.is_object()) { 35.44 oop obj = cpool->object_at(index); 35.45 assert(obj->is_instance(), "must be an instance"); 35.46 ciObject* ciobj = get_object(obj); 35.47 return ciConstant(T_OBJECT, ciobj); 35.48 + } else if (tag.is_method_type()) { 35.49 + // must execute Java code to link this CP entry into cache[i].f1 35.50 + ciSymbol* signature = get_object(cpool->method_type_signature_at(index))->as_symbol(); 35.51 + ciObject* ciobj = get_unloaded_method_type_constant(signature); 35.52 + return ciConstant(T_OBJECT, ciobj); 35.53 + } else if (tag.is_method_handle()) { 35.54 + // must execute Java code to link this CP entry into cache[i].f1 35.55 + int ref_kind = cpool->method_handle_ref_kind_at(index); 35.56 + int callee_index = cpool->method_handle_klass_index_at(index); 35.57 + ciKlass* callee = get_klass_by_index_impl(cpool, callee_index, ignore_will_link, accessor); 35.58 + ciSymbol* name = get_object(cpool->method_handle_name_ref_at(index))->as_symbol(); 35.59 + ciSymbol* signature = get_object(cpool->method_handle_signature_ref_at(index))->as_symbol(); 35.60 + ciObject* ciobj = get_unloaded_method_handle_constant(callee, name, signature, ref_kind); 35.61 + return ciConstant(T_OBJECT, ciobj); 35.62 } else { 35.63 ShouldNotReachHere(); 35.64 return ciConstant(); 35.65 @@ -562,61 +588,15 @@ 35.66 } 35.67 35.68 // ------------------------------------------------------------------ 35.69 -// ciEnv::is_unresolved_string_impl 35.70 -// 35.71 -// Implementation of is_unresolved_string(). 35.72 -bool ciEnv::is_unresolved_string_impl(instanceKlass* accessor, int index) const { 35.73 - EXCEPTION_CONTEXT; 35.74 - assert(accessor->is_linked(), "must be linked before accessing constant pool"); 35.75 - constantPoolOop cpool = accessor->constants(); 35.76 - constantTag tag = cpool->tag_at(index); 35.77 - return tag.is_unresolved_string(); 35.78 -} 35.79 - 35.80 -// ------------------------------------------------------------------ 35.81 -// ciEnv::is_unresolved_klass_impl 35.82 -// 35.83 -// Implementation of is_unresolved_klass(). 35.84 -bool ciEnv::is_unresolved_klass_impl(instanceKlass* accessor, int index) const { 35.85 - EXCEPTION_CONTEXT; 35.86 - assert(accessor->is_linked(), "must be linked before accessing constant pool"); 35.87 - constantPoolOop cpool = accessor->constants(); 35.88 - constantTag tag = cpool->tag_at(index); 35.89 - return tag.is_unresolved_klass(); 35.90 -} 35.91 - 35.92 -// ------------------------------------------------------------------ 35.93 // ciEnv::get_constant_by_index 35.94 // 35.95 // Pull a constant out of the constant pool. How appropriate. 35.96 // 35.97 // Implementation note: this query is currently in no way cached. 35.98 ciConstant ciEnv::get_constant_by_index(constantPoolHandle cpool, 35.99 - int index, 35.100 + int pool_index, int cache_index, 35.101 ciInstanceKlass* accessor) { 35.102 - GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, index, accessor);) 35.103 -} 35.104 - 35.105 -// ------------------------------------------------------------------ 35.106 -// ciEnv::is_unresolved_string 35.107 -// 35.108 -// Check constant pool 35.109 -// 35.110 -// Implementation note: this query is currently in no way cached. 35.111 -bool ciEnv::is_unresolved_string(ciInstanceKlass* accessor, 35.112 - int index) const { 35.113 - GUARDED_VM_ENTRY(return is_unresolved_string_impl(accessor->get_instanceKlass(), index); ) 35.114 -} 35.115 - 35.116 -// ------------------------------------------------------------------ 35.117 -// ciEnv::is_unresolved_klass 35.118 -// 35.119 -// Check constant pool 35.120 -// 35.121 -// Implementation note: this query is currently in no way cached. 35.122 -bool ciEnv::is_unresolved_klass(ciInstanceKlass* accessor, 35.123 - int index) const { 35.124 - GUARDED_VM_ENTRY(return is_unresolved_klass_impl(accessor->get_instanceKlass(), index); ) 35.125 + GUARDED_VM_ENTRY(return get_constant_by_index_impl(cpool, pool_index, cache_index, accessor);) 35.126 } 35.127 35.128 // ------------------------------------------------------------------
36.1 --- a/src/share/vm/ci/ciEnv.hpp Wed Jun 30 18:57:35 2010 -0700 36.2 +++ b/src/share/vm/ci/ciEnv.hpp Fri Jul 02 01:36:15 2010 -0700 36.3 @@ -116,12 +116,8 @@ 36.4 bool& is_accessible, 36.5 ciInstanceKlass* loading_klass); 36.6 ciConstant get_constant_by_index(constantPoolHandle cpool, 36.7 - int constant_index, 36.8 + int pool_index, int cache_index, 36.9 ciInstanceKlass* accessor); 36.10 - bool is_unresolved_string(ciInstanceKlass* loading_klass, 36.11 - int constant_index) const; 36.12 - bool is_unresolved_klass(ciInstanceKlass* loading_klass, 36.13 - int constant_index) const; 36.14 ciField* get_field_by_index(ciInstanceKlass* loading_klass, 36.15 int field_index); 36.16 ciMethod* get_method_by_index(constantPoolHandle cpool, 36.17 @@ -137,12 +133,8 @@ 36.18 bool& is_accessible, 36.19 ciInstanceKlass* loading_klass); 36.20 ciConstant get_constant_by_index_impl(constantPoolHandle cpool, 36.21 - int constant_index, 36.22 + int pool_index, int cache_index, 36.23 ciInstanceKlass* loading_klass); 36.24 - bool is_unresolved_string_impl (instanceKlass* loading_klass, 36.25 - int constant_index) const; 36.26 - bool is_unresolved_klass_impl (instanceKlass* loading_klass, 36.27 - int constant_index) const; 36.28 ciField* get_field_by_index_impl(ciInstanceKlass* loading_klass, 36.29 int field_index); 36.30 ciMethod* get_method_by_index_impl(constantPoolHandle cpool, 36.31 @@ -190,6 +182,25 @@ 36.32 return _factory->get_unloaded_klass(accessing_klass, name, true); 36.33 } 36.34 36.35 + // Get a ciKlass representing an unloaded klass mirror. 36.36 + // Result is not necessarily unique, but will be unloaded. 36.37 + ciInstance* get_unloaded_klass_mirror(ciKlass* type) { 36.38 + return _factory->get_unloaded_klass_mirror(type); 36.39 + } 36.40 + 36.41 + // Get a ciInstance representing an unresolved method handle constant. 36.42 + ciInstance* get_unloaded_method_handle_constant(ciKlass* holder, 36.43 + ciSymbol* name, 36.44 + ciSymbol* signature, 36.45 + int ref_kind) { 36.46 + return _factory->get_unloaded_method_handle_constant(holder, name, signature, ref_kind); 36.47 + } 36.48 + 36.49 + // Get a ciInstance representing an unresolved method type constant. 36.50 + ciInstance* get_unloaded_method_type_constant(ciSymbol* signature) { 36.51 + return _factory->get_unloaded_method_type_constant(signature); 36.52 + } 36.53 + 36.54 // See if we already have an unloaded klass for the given name 36.55 // or return NULL if not. 36.56 ciKlass *check_get_unloaded_klass(ciKlass* accessing_klass, ciSymbol* name) {
37.1 --- a/src/share/vm/ci/ciInstanceKlass.cpp Wed Jun 30 18:57:35 2010 -0700 37.2 +++ b/src/share/vm/ci/ciInstanceKlass.cpp Fri Jul 02 01:36:15 2010 -0700 37.3 @@ -323,8 +323,8 @@ 37.4 // ciInstanceKlass::java_mirror 37.5 // 37.6 // Get the instance of java.lang.Class corresponding to this klass. 37.7 +// Cache it on this->_java_mirror. 37.8 ciInstance* ciInstanceKlass::java_mirror() { 37.9 - assert(is_loaded(), "must be loaded"); 37.10 if (_java_mirror == NULL) { 37.11 _java_mirror = ciKlass::java_mirror(); 37.12 }
38.1 --- a/src/share/vm/ci/ciKlass.cpp Wed Jun 30 18:57:35 2010 -0700 38.2 +++ b/src/share/vm/ci/ciKlass.cpp Fri Jul 02 01:36:15 2010 -0700 38.3 @@ -192,8 +192,14 @@ 38.4 38.5 // ------------------------------------------------------------------ 38.6 // ciKlass::java_mirror 38.7 +// 38.8 +// Get the instance of java.lang.Class corresponding to this klass. 38.9 +// If it is an unloaded instance or array klass, return an unloaded 38.10 +// mirror object of type Class. 38.11 ciInstance* ciKlass::java_mirror() { 38.12 GUARDED_VM_ENTRY( 38.13 + if (!is_loaded()) 38.14 + return ciEnv::current()->get_unloaded_klass_mirror(this); 38.15 oop java_mirror = get_Klass()->java_mirror(); 38.16 return CURRENT_ENV->get_object(java_mirror)->as_instance(); 38.17 )
39.1 --- a/src/share/vm/ci/ciObjectFactory.cpp Wed Jun 30 18:57:35 2010 -0700 39.2 +++ b/src/share/vm/ci/ciObjectFactory.cpp Fri Jul 02 01:36:15 2010 -0700 39.3 @@ -70,6 +70,7 @@ 39.4 39.5 _unloaded_methods = new (arena) GrowableArray<ciMethod*>(arena, 4, 0, NULL); 39.6 _unloaded_klasses = new (arena) GrowableArray<ciKlass*>(arena, 8, 0, NULL); 39.7 + _unloaded_instances = new (arena) GrowableArray<ciInstance*>(arena, 4, 0, NULL); 39.8 _return_addresses = 39.9 new (arena) GrowableArray<ciReturnAddress*>(arena, 8, 0, NULL); 39.10 } 39.11 @@ -443,6 +444,74 @@ 39.12 return new_klass; 39.13 } 39.14 39.15 + 39.16 +//------------------------------------------------------------------ 39.17 +// ciObjectFactory::get_unloaded_instance 39.18 +// 39.19 +// Get a ciInstance representing an as-yet undetermined instance of a given class. 39.20 +// 39.21 +ciInstance* ciObjectFactory::get_unloaded_instance(ciInstanceKlass* instance_klass) { 39.22 + for (int i=0; i<_unloaded_instances->length(); i++) { 39.23 + ciInstance* entry = _unloaded_instances->at(i); 39.24 + if (entry->klass()->equals(instance_klass)) { 39.25 + // We've found a match. 39.26 + return entry; 39.27 + } 39.28 + } 39.29 + 39.30 + // This is a new unloaded instance. Create it and stick it in 39.31 + // the cache. 39.32 + ciInstance* new_instance = new (arena()) ciInstance(instance_klass); 39.33 + 39.34 + init_ident_of(new_instance); 39.35 + _unloaded_instances->append(new_instance); 39.36 + 39.37 + // make sure it looks the way we want: 39.38 + assert(!new_instance->is_loaded(), ""); 39.39 + assert(new_instance->klass() == instance_klass, ""); 39.40 + 39.41 + return new_instance; 39.42 +} 39.43 + 39.44 + 39.45 +//------------------------------------------------------------------ 39.46 +// ciObjectFactory::get_unloaded_klass_mirror 39.47 +// 39.48 +// Get a ciInstance representing an unresolved klass mirror. 39.49 +// 39.50 +// Currently, this ignores the parameters and returns a unique unloaded instance. 39.51 +ciInstance* ciObjectFactory::get_unloaded_klass_mirror(ciKlass* type) { 39.52 + assert(ciEnv::_Class_klass != NULL, ""); 39.53 + return get_unloaded_instance(ciEnv::_Class_klass->as_instance_klass()); 39.54 +} 39.55 + 39.56 +//------------------------------------------------------------------ 39.57 +// ciObjectFactory::get_unloaded_method_handle_constant 39.58 +// 39.59 +// Get a ciInstance representing an unresolved method handle constant. 39.60 +// 39.61 +// Currently, this ignores the parameters and returns a unique unloaded instance. 39.62 +ciInstance* ciObjectFactory::get_unloaded_method_handle_constant(ciKlass* holder, 39.63 + ciSymbol* name, 39.64 + ciSymbol* signature, 39.65 + int ref_kind) { 39.66 + if (ciEnv::_MethodHandle_klass == NULL) return NULL; 39.67 + return get_unloaded_instance(ciEnv::_MethodHandle_klass->as_instance_klass()); 39.68 +} 39.69 + 39.70 +//------------------------------------------------------------------ 39.71 +// ciObjectFactory::get_unloaded_method_type_constant 39.72 +// 39.73 +// Get a ciInstance representing an unresolved method type constant. 39.74 +// 39.75 +// Currently, this ignores the parameters and returns a unique unloaded instance. 39.76 +ciInstance* ciObjectFactory::get_unloaded_method_type_constant(ciSymbol* signature) { 39.77 + if (ciEnv::_MethodType_klass == NULL) return NULL; 39.78 + return get_unloaded_instance(ciEnv::_MethodType_klass->as_instance_klass()); 39.79 +} 39.80 + 39.81 + 39.82 + 39.83 //------------------------------------------------------------------ 39.84 // ciObjectFactory::get_empty_methodData 39.85 // 39.86 @@ -637,7 +706,8 @@ 39.87 // 39.88 // Print debugging information about the object factory 39.89 void ciObjectFactory::print() { 39.90 - tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_klasses=%d>", 39.91 + tty->print("<ciObjectFactory oops=%d unloaded_methods=%d unloaded_instances=%d unloaded_klasses=%d>", 39.92 _ci_objects->length(), _unloaded_methods->length(), 39.93 + _unloaded_instances->length(), 39.94 _unloaded_klasses->length()); 39.95 }
40.1 --- a/src/share/vm/ci/ciObjectFactory.hpp Wed Jun 30 18:57:35 2010 -0700 40.2 +++ b/src/share/vm/ci/ciObjectFactory.hpp Fri Jul 02 01:36:15 2010 -0700 40.3 @@ -39,6 +39,7 @@ 40.4 GrowableArray<ciObject*>* _ci_objects; 40.5 GrowableArray<ciMethod*>* _unloaded_methods; 40.6 GrowableArray<ciKlass*>* _unloaded_klasses; 40.7 + GrowableArray<ciInstance*>* _unloaded_instances; 40.8 GrowableArray<ciReturnAddress*>* _return_addresses; 40.9 int _next_ident; 40.10 40.11 @@ -73,6 +74,8 @@ 40.12 40.13 void print_contents_impl(); 40.14 40.15 + ciInstance* get_unloaded_instance(ciInstanceKlass* klass); 40.16 + 40.17 public: 40.18 static bool is_initialized() { return _initialized; } 40.19 40.20 @@ -98,6 +101,18 @@ 40.21 ciSymbol* name, 40.22 bool create_if_not_found); 40.23 40.24 + // Get a ciInstance representing an unresolved klass mirror. 40.25 + ciInstance* get_unloaded_klass_mirror(ciKlass* type); 40.26 + 40.27 + // Get a ciInstance representing an unresolved method handle constant. 40.28 + ciInstance* get_unloaded_method_handle_constant(ciKlass* holder, 40.29 + ciSymbol* name, 40.30 + ciSymbol* signature, 40.31 + int ref_kind); 40.32 + 40.33 + // Get a ciInstance representing an unresolved method type constant. 40.34 + ciInstance* get_unloaded_method_type_constant(ciSymbol* signature); 40.35 + 40.36 40.37 // Get the ciMethodData representing the methodData for a method 40.38 // with none.
41.1 --- a/src/share/vm/ci/ciStreams.cpp Wed Jun 30 18:57:35 2010 -0700 41.2 +++ b/src/share/vm/ci/ciStreams.cpp Fri Jul 02 01:36:15 2010 -0700 41.3 @@ -186,12 +186,13 @@ 41.4 } 41.5 41.6 // ------------------------------------------------------------------ 41.7 -// ciBytecodeStream::get_constant_index 41.8 +// ciBytecodeStream::get_constant_raw_index 41.9 // 41.10 // If this bytecode is one of the ldc variants, get the index of the 41.11 // referenced constant. 41.12 -int ciBytecodeStream::get_constant_index() const { 41.13 - switch(cur_bc()) { 41.14 +int ciBytecodeStream::get_constant_raw_index() const { 41.15 + // work-alike for Bytecode_loadconstant::raw_index() 41.16 + switch (cur_bc()) { 41.17 case Bytecodes::_ldc: 41.18 return get_index_u1(); 41.19 case Bytecodes::_ldc_w: 41.20 @@ -202,25 +203,52 @@ 41.21 return 0; 41.22 } 41.23 } 41.24 + 41.25 +// ------------------------------------------------------------------ 41.26 +// ciBytecodeStream::get_constant_pool_index 41.27 +// Decode any CP cache index into a regular pool index. 41.28 +int ciBytecodeStream::get_constant_pool_index() const { 41.29 + // work-alike for Bytecode_loadconstant::pool_index() 41.30 + int index = get_constant_raw_index(); 41.31 + if (has_cache_index()) { 41.32 + return get_cpcache()->get_pool_index(index); 41.33 + } 41.34 + return index; 41.35 +} 41.36 + 41.37 +// ------------------------------------------------------------------ 41.38 +// ciBytecodeStream::get_constant_cache_index 41.39 +// Return the CP cache index, or -1 if there isn't any. 41.40 +int ciBytecodeStream::get_constant_cache_index() const { 41.41 + // work-alike for Bytecode_loadconstant::cache_index() 41.42 + return has_cache_index() ? get_constant_raw_index() : -1; 41.43 +} 41.44 + 41.45 // ------------------------------------------------------------------ 41.46 // ciBytecodeStream::get_constant 41.47 // 41.48 // If this bytecode is one of the ldc variants, get the referenced 41.49 // constant. 41.50 ciConstant ciBytecodeStream::get_constant() { 41.51 + int pool_index = get_constant_raw_index(); 41.52 + int cache_index = -1; 41.53 + if (has_cache_index()) { 41.54 + cache_index = pool_index; 41.55 + pool_index = -1; 41.56 + } 41.57 VM_ENTRY_MARK; 41.58 constantPoolHandle cpool(_method->get_methodOop()->constants()); 41.59 - return CURRENT_ENV->get_constant_by_index(cpool, get_constant_index(), _holder); 41.60 + return CURRENT_ENV->get_constant_by_index(cpool, pool_index, cache_index, _holder); 41.61 } 41.62 41.63 // ------------------------------------------------------------------ 41.64 -bool ciBytecodeStream::is_unresolved_string() const { 41.65 - return CURRENT_ENV->is_unresolved_string(_holder, get_constant_index()); 41.66 -} 41.67 - 41.68 -// ------------------------------------------------------------------ 41.69 -bool ciBytecodeStream::is_unresolved_klass() const { 41.70 - return CURRENT_ENV->is_unresolved_klass(_holder, get_klass_index()); 41.71 +// ciBytecodeStream::get_constant_pool_tag 41.72 +// 41.73 +// If this bytecode is one of the ldc variants, get the referenced 41.74 +// constant. 41.75 +constantTag ciBytecodeStream::get_constant_pool_tag(int index) const { 41.76 + VM_ENTRY_MARK; 41.77 + return _method->get_methodOop()->constants()->tag_at(index); 41.78 } 41.79 41.80 // ------------------------------------------------------------------ 41.81 @@ -378,13 +406,16 @@ 41.82 41.83 // ------------------------------------------------------------------ 41.84 // ciBytecodeStream::get_cpcache 41.85 -ciCPCache* ciBytecodeStream::get_cpcache() { 41.86 - VM_ENTRY_MARK; 41.87 - // Get the constant pool. 41.88 - constantPoolOop cpool = _holder->get_instanceKlass()->constants(); 41.89 - constantPoolCacheOop cpcache = cpool->cache(); 41.90 +ciCPCache* ciBytecodeStream::get_cpcache() const { 41.91 + if (_cpcache == NULL) { 41.92 + VM_ENTRY_MARK; 41.93 + // Get the constant pool. 41.94 + constantPoolOop cpool = _holder->get_instanceKlass()->constants(); 41.95 + constantPoolCacheOop cpcache = cpool->cache(); 41.96 41.97 - return CURRENT_ENV->get_object(cpcache)->as_cpcache(); 41.98 + *(ciCPCache**)&_cpcache = CURRENT_ENV->get_object(cpcache)->as_cpcache(); 41.99 + } 41.100 + return _cpcache; 41.101 } 41.102 41.103 // ------------------------------------------------------------------
42.1 --- a/src/share/vm/ci/ciStreams.hpp Wed Jun 30 18:57:35 2010 -0700 42.2 +++ b/src/share/vm/ci/ciStreams.hpp Fri Jul 02 01:36:15 2010 -0700 42.3 @@ -46,6 +46,7 @@ 42.4 42.5 ciMethod* _method; // the method 42.6 ciInstanceKlass* _holder; 42.7 + ciCPCache* _cpcache; 42.8 address _bc_start; // Start of current bytecode for table 42.9 address _was_wide; // Address past last wide bytecode 42.10 jint* _table_base; // Aligned start of last table or switch 42.11 @@ -58,7 +59,9 @@ 42.12 42.13 void reset( address base, unsigned int size ) { 42.14 _bc_start =_was_wide = 0; 42.15 - _start = _pc = base; _end = base + size; } 42.16 + _start = _pc = base; _end = base + size; 42.17 + _cpcache = NULL; 42.18 + } 42.19 42.20 void assert_wide(bool require_wide) const { 42.21 if (require_wide) 42.22 @@ -136,15 +139,20 @@ 42.23 bool is_wide() const { return ( _pc == _was_wide ); } 42.24 42.25 // Does this instruction contain an index which refes into the CP cache? 42.26 - bool uses_cp_cache() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); } 42.27 + bool has_cache_index() const { return Bytecodes::uses_cp_cache(cur_bc_raw()); } 42.28 42.29 int get_index_u1() const { 42.30 return bytecode()->get_index_u1(cur_bc_raw()); 42.31 } 42.32 42.33 + int get_index_u1_cpcache() const { 42.34 + return bytecode()->get_index_u1_cpcache(cur_bc_raw()); 42.35 + } 42.36 + 42.37 // Get a byte index following this bytecode. 42.38 // If prefixed with a wide bytecode, get a wide index. 42.39 int get_index() const { 42.40 + assert(!has_cache_index(), "else use cpcache variant"); 42.41 return (_pc == _was_wide) // was widened? 42.42 ? get_index_u2(true) // yes, return wide index 42.43 : get_index_u1(); // no, return narrow index 42.44 @@ -207,7 +215,9 @@ 42.45 return cur_bci() + get_int_table(index); } 42.46 42.47 // --- Constant pool access --- 42.48 - int get_constant_index() const; 42.49 + int get_constant_raw_index() const; 42.50 + int get_constant_pool_index() const; 42.51 + int get_constant_cache_index() const; 42.52 int get_field_index(); 42.53 int get_method_index(); 42.54 42.55 @@ -217,12 +227,17 @@ 42.56 int get_klass_index() const; 42.57 42.58 // If this bytecode is one of the ldc variants, get the referenced 42.59 - // constant 42.60 + // constant. Do not attempt to resolve it, since that would require 42.61 + // execution of Java code. If it is not resolved, return an unloaded 42.62 + // object (ciConstant.as_object()->is_loaded() == false). 42.63 ciConstant get_constant(); 42.64 - // True if the ldc variant points to an unresolved string 42.65 - bool is_unresolved_string() const; 42.66 - // True if the ldc variant points to an unresolved klass 42.67 - bool is_unresolved_klass() const; 42.68 + constantTag get_constant_pool_tag(int index) const; 42.69 + 42.70 + // True if the klass-using bytecode points to an unresolved klass 42.71 + bool is_unresolved_klass() const { 42.72 + constantTag tag = get_constant_pool_tag(get_klass_index()); 42.73 + return tag.is_unresolved_klass(); 42.74 + } 42.75 42.76 // If this bytecode is one of get_field, get_static, put_field, 42.77 // or put_static, get the referenced field. 42.78 @@ -238,7 +253,7 @@ 42.79 int get_method_holder_index(); 42.80 int get_method_signature_index(); 42.81 42.82 - ciCPCache* get_cpcache(); 42.83 + ciCPCache* get_cpcache() const; 42.84 ciCallSite* get_call_site(); 42.85 }; 42.86
43.1 --- a/src/share/vm/ci/ciTypeFlow.cpp Wed Jun 30 18:57:35 2010 -0700 43.2 +++ b/src/share/vm/ci/ciTypeFlow.cpp Fri Jul 02 01:36:15 2010 -0700 43.3 @@ -712,10 +712,8 @@ 43.4 ciObject* obj = con.as_object(); 43.5 if (obj->is_null_object()) { 43.6 push_null(); 43.7 - } else if (obj->is_klass()) { 43.8 - // The type of ldc <class> is java.lang.Class 43.9 - push_object(outer()->env()->Class_klass()); 43.10 } else { 43.11 + assert(!obj->is_klass(), "must be java_mirror of klass"); 43.12 push_object(obj->klass()); 43.13 } 43.14 } else {
44.1 --- a/src/share/vm/classfile/classFileParser.cpp Wed Jun 30 18:57:35 2010 -0700 44.2 +++ b/src/share/vm/classfile/classFileParser.cpp Fri Jul 02 01:36:15 2010 -0700 44.3 @@ -117,6 +117,29 @@ 44.4 cp->string_index_at_put(index, string_index); 44.5 } 44.6 break; 44.7 + case JVM_CONSTANT_MethodHandle : 44.8 + case JVM_CONSTANT_MethodType : 44.9 + if (!EnableMethodHandles || 44.10 + _major_version < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { 44.11 + classfile_parse_error( 44.12 + (!EnableInvokeDynamic ? 44.13 + "This JVM does not support constant tag %u in class file %s" : 44.14 + "Class file version does not support constant tag %u in class file %s"), 44.15 + tag, CHECK); 44.16 + } 44.17 + if (tag == JVM_CONSTANT_MethodHandle) { 44.18 + cfs->guarantee_more(4, CHECK); // ref_kind, method_index, tag/access_flags 44.19 + u1 ref_kind = cfs->get_u1_fast(); 44.20 + u2 method_index = cfs->get_u2_fast(); 44.21 + cp->method_handle_index_at_put(index, ref_kind, method_index); 44.22 + } else if (tag == JVM_CONSTANT_MethodType) { 44.23 + cfs->guarantee_more(3, CHECK); // signature_index, tag/access_flags 44.24 + u2 signature_index = cfs->get_u2_fast(); 44.25 + cp->method_type_index_at_put(index, signature_index); 44.26 + } else { 44.27 + ShouldNotReachHere(); 44.28 + } 44.29 + break; 44.30 case JVM_CONSTANT_Integer : 44.31 { 44.32 cfs->guarantee_more(5, CHECK); // bytes, tag/access_flags 44.33 @@ -337,6 +360,60 @@ 44.34 cp->unresolved_string_at_put(index, sym); 44.35 } 44.36 break; 44.37 + case JVM_CONSTANT_MethodHandle : 44.38 + { 44.39 + int ref_index = cp->method_handle_index_at(index); 44.40 + check_property( 44.41 + valid_cp_range(ref_index, length) && 44.42 + EnableMethodHandles, 44.43 + "Invalid constant pool index %u in class file %s", 44.44 + ref_index, CHECK_(nullHandle)); 44.45 + constantTag tag = cp->tag_at(ref_index); 44.46 + int ref_kind = cp->method_handle_ref_kind_at(index); 44.47 + switch (ref_kind) { 44.48 + case JVM_REF_getField: 44.49 + case JVM_REF_getStatic: 44.50 + case JVM_REF_putField: 44.51 + case JVM_REF_putStatic: 44.52 + check_property( 44.53 + tag.is_field(), 44.54 + "Invalid constant pool index %u in class file %s (not a field)", 44.55 + ref_index, CHECK_(nullHandle)); 44.56 + break; 44.57 + case JVM_REF_invokeVirtual: 44.58 + case JVM_REF_invokeStatic: 44.59 + case JVM_REF_invokeSpecial: 44.60 + case JVM_REF_newInvokeSpecial: 44.61 + check_property( 44.62 + tag.is_method(), 44.63 + "Invalid constant pool index %u in class file %s (not a method)", 44.64 + ref_index, CHECK_(nullHandle)); 44.65 + break; 44.66 + case JVM_REF_invokeInterface: 44.67 + check_property( 44.68 + tag.is_interface_method(), 44.69 + "Invalid constant pool index %u in class file %s (not an interface method)", 44.70 + ref_index, CHECK_(nullHandle)); 44.71 + break; 44.72 + default: 44.73 + classfile_parse_error( 44.74 + "Bad method handle kind at constant pool index %u in class file %s", 44.75 + index, CHECK_(nullHandle)); 44.76 + } 44.77 + // Keep the ref_index unchanged. It will be indirected at link-time. 44.78 + } 44.79 + break; 44.80 + case JVM_CONSTANT_MethodType : 44.81 + { 44.82 + int ref_index = cp->method_type_index_at(index); 44.83 + check_property( 44.84 + valid_cp_range(ref_index, length) && 44.85 + cp->tag_at(ref_index).is_utf8() && 44.86 + EnableMethodHandles, 44.87 + "Invalid constant pool index %u in class file %s", 44.88 + ref_index, CHECK_(nullHandle)); 44.89 + } 44.90 + break; 44.91 default: 44.92 fatal(err_msg("bad constant pool tag value %u", 44.93 cp->tag_at(index).value())); 44.94 @@ -452,6 +529,43 @@ 44.95 } 44.96 break; 44.97 } 44.98 + case JVM_CONSTANT_MethodHandle: { 44.99 + int ref_index = cp->method_handle_index_at(index); 44.100 + int ref_kind = cp->method_handle_ref_kind_at(index); 44.101 + switch (ref_kind) { 44.102 + case JVM_REF_invokeVirtual: 44.103 + case JVM_REF_invokeStatic: 44.104 + case JVM_REF_invokeSpecial: 44.105 + case JVM_REF_newInvokeSpecial: 44.106 + { 44.107 + int name_and_type_ref_index = cp->name_and_type_ref_index_at(ref_index); 44.108 + int name_ref_index = cp->name_ref_index_at(name_and_type_ref_index); 44.109 + symbolHandle name(THREAD, cp->symbol_at(name_ref_index)); 44.110 + if (ref_kind == JVM_REF_newInvokeSpecial) { 44.111 + if (name() != vmSymbols::object_initializer_name()) { 44.112 + classfile_parse_error( 44.113 + "Bad constructor name at constant pool index %u in class file %s", 44.114 + name_ref_index, CHECK_(nullHandle)); 44.115 + } 44.116 + } else { 44.117 + if (name() == vmSymbols::object_initializer_name()) { 44.118 + classfile_parse_error( 44.119 + "Bad method name at constant pool index %u in class file %s", 44.120 + name_ref_index, CHECK_(nullHandle)); 44.121 + } 44.122 + } 44.123 + } 44.124 + break; 44.125 + // Other ref_kinds are already fully checked in previous pass. 44.126 + } 44.127 + break; 44.128 + } 44.129 + case JVM_CONSTANT_MethodType: { 44.130 + symbolHandle no_name = vmSymbolHandles::type_name(); // place holder 44.131 + symbolHandle signature(THREAD, cp->method_type_signature_at(index)); 44.132 + verify_legal_method_signature(no_name, signature, CHECK_(nullHandle)); 44.133 + break; 44.134 + } 44.135 } // end of switch 44.136 } // end of for 44.137 44.138 @@ -467,7 +581,7 @@ 44.139 case JVM_CONSTANT_UnresolvedClass : 44.140 // Patching a class means pre-resolving it. 44.141 // The name in the constant pool is ignored. 44.142 - if (patch->klass() == SystemDictionary::Class_klass()) { // %%% java_lang_Class::is_instance 44.143 + if (java_lang_Class::is_instance(patch())) { 44.144 guarantee_property(!java_lang_Class::is_primitive(patch()), 44.145 "Illegal class patch at %d in class file %s", 44.146 index, CHECK);
45.1 --- a/src/share/vm/classfile/systemDictionary.cpp Wed Jun 30 18:57:35 2010 -0700 45.2 +++ b/src/share/vm/classfile/systemDictionary.cpp Fri Jul 02 01:36:15 2010 -0700 45.3 @@ -2454,6 +2454,48 @@ 45.4 return Handle(THREAD, (oop) result.get_jobject()); 45.5 } 45.6 45.7 +// Ask Java code to find or construct a method handle constant. 45.8 +Handle SystemDictionary::link_method_handle_constant(KlassHandle caller, 45.9 + int ref_kind, //e.g., JVM_REF_invokeVirtual 45.10 + KlassHandle callee, 45.11 + symbolHandle name_sym, 45.12 + symbolHandle signature, 45.13 + TRAPS) { 45.14 + Handle empty; 45.15 + Handle name = java_lang_String::create_from_symbol(name_sym(), CHECK_(empty)); 45.16 + Handle type; 45.17 + if (signature->utf8_length() > 0 && signature->byte_at(0) == '(') { 45.18 + bool ignore_is_on_bcp = false; 45.19 + type = find_method_handle_type(signature, caller, ignore_is_on_bcp, CHECK_(empty)); 45.20 + } else { 45.21 + SignatureStream ss(signature(), false); 45.22 + if (!ss.is_done()) { 45.23 + oop mirror = ss.as_java_mirror(caller->class_loader(), caller->protection_domain(), 45.24 + SignatureStream::NCDFError, CHECK_(empty)); 45.25 + type = Handle(THREAD, mirror); 45.26 + ss.next(); 45.27 + if (!ss.is_done()) type = Handle(); // error! 45.28 + } 45.29 + } 45.30 + if (type.is_null()) { 45.31 + THROW_MSG_(vmSymbols::java_lang_LinkageError(), "bad signature", empty); 45.32 + } 45.33 + 45.34 + // call sun.dyn.MethodHandleNatives::linkMethodHandleConstant(Class caller, int refKind, Class callee, String name, Object type) -> MethodHandle 45.35 + JavaCallArguments args; 45.36 + args.push_oop(caller->java_mirror()); // the referring class 45.37 + args.push_int(ref_kind); 45.38 + args.push_oop(callee->java_mirror()); // the target class 45.39 + args.push_oop(name()); 45.40 + args.push_oop(type()); 45.41 + JavaValue result(T_OBJECT); 45.42 + JavaCalls::call_static(&result, 45.43 + SystemDictionary::MethodHandleNatives_klass(), 45.44 + vmSymbols::linkMethodHandleConstant_name(), 45.45 + vmSymbols::linkMethodHandleConstant_signature(), 45.46 + &args, CHECK_(empty)); 45.47 + return Handle(THREAD, (oop) result.get_jobject()); 45.48 +} 45.49 45.50 // Ask Java code to find or construct a java.dyn.CallSite for the given 45.51 // name and signature, as interpreted relative to the given class loader.
46.1 --- a/src/share/vm/classfile/systemDictionary.hpp Wed Jun 30 18:57:35 2010 -0700 46.2 +++ b/src/share/vm/classfile/systemDictionary.hpp Fri Jul 02 01:36:15 2010 -0700 46.3 @@ -473,6 +473,13 @@ 46.4 KlassHandle accessing_klass, 46.5 bool& return_bcp_flag, 46.6 TRAPS); 46.7 + // ask Java to compute a java.dyn.MethodHandle object for a given CP entry 46.8 + static Handle link_method_handle_constant(KlassHandle caller, 46.9 + int ref_kind, //e.g., JVM_REF_invokeVirtual 46.10 + KlassHandle callee, 46.11 + symbolHandle name, 46.12 + symbolHandle signature, 46.13 + TRAPS); 46.14 // ask Java to create a dynamic call site, while linking an invokedynamic op 46.15 static Handle make_dynamic_call_site(Handle bootstrap_method, 46.16 // Callee information:
47.1 --- a/src/share/vm/classfile/verifier.cpp Wed Jun 30 18:57:35 2010 -0700 47.2 +++ b/src/share/vm/classfile/verifier.cpp Fri Jul 02 01:36:15 2010 -0700 47.3 @@ -1598,7 +1598,10 @@ 47.4 if (opcode == Bytecodes::_ldc || opcode == Bytecodes::_ldc_w) { 47.5 if (!tag.is_unresolved_string() && !tag.is_unresolved_klass()) { 47.6 types = (1 << JVM_CONSTANT_Integer) | (1 << JVM_CONSTANT_Float) 47.7 - | (1 << JVM_CONSTANT_String) | (1 << JVM_CONSTANT_Class); 47.8 + | (1 << JVM_CONSTANT_String) | (1 << JVM_CONSTANT_Class) 47.9 + | (1 << JVM_CONSTANT_MethodHandle) | (1 << JVM_CONSTANT_MethodType); 47.10 + // Note: The class file parser already verified the legality of 47.11 + // MethodHandle and MethodType constants. 47.12 verify_cp_type(index, cp, types, CHECK_VERIFY(this)); 47.13 } 47.14 } else { 47.15 @@ -1632,6 +1635,14 @@ 47.16 current_frame->push_stack_2( 47.17 VerificationType::long_type(), 47.18 VerificationType::long2_type(), CHECK_VERIFY(this)); 47.19 + } else if (tag.is_method_handle()) { 47.20 + current_frame->push_stack( 47.21 + VerificationType::reference_type( 47.22 + vmSymbols::java_dyn_MethodHandle()), CHECK_VERIFY(this)); 47.23 + } else if (tag.is_method_type()) { 47.24 + current_frame->push_stack( 47.25 + VerificationType::reference_type( 47.26 + vmSymbols::java_dyn_MethodType()), CHECK_VERIFY(this)); 47.27 } else { 47.28 verify_error(bci, "Invalid index in ldc"); 47.29 return; 47.30 @@ -1920,9 +1931,12 @@ 47.31 // Get referenced class type 47.32 VerificationType ref_class_type; 47.33 if (opcode == Bytecodes::_invokedynamic) { 47.34 - if (!EnableInvokeDynamic) { 47.35 + if (!EnableInvokeDynamic || 47.36 + _klass->major_version() < Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { 47.37 class_format_error( 47.38 - "invokedynamic instructions not enabled on this JVM", 47.39 + (!EnableInvokeDynamic ? 47.40 + "invokedynamic instructions not enabled in this JVM" : 47.41 + "invokedynamic instructions not supported by this class file version"), 47.42 _klass->external_name()); 47.43 return; 47.44 }
48.1 --- a/src/share/vm/classfile/verifier.hpp Wed Jun 30 18:57:35 2010 -0700 48.2 +++ b/src/share/vm/classfile/verifier.hpp Fri Jul 02 01:36:15 2010 -0700 48.3 @@ -25,7 +25,10 @@ 48.4 // The verifier class 48.5 class Verifier : AllStatic { 48.6 public: 48.7 - enum { STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50 }; 48.8 + enum { 48.9 + STACKMAP_ATTRIBUTE_MAJOR_VERSION = 50, 48.10 + INVOKEDYNAMIC_MAJOR_VERSION = 51 48.11 + }; 48.12 typedef enum { ThrowException, NoException } Mode; 48.13 48.14 /**
49.1 --- a/src/share/vm/classfile/vmSymbols.hpp Wed Jun 30 18:57:35 2010 -0700 49.2 +++ b/src/share/vm/classfile/vmSymbols.hpp Fri Jul 02 01:36:15 2010 -0700 49.3 @@ -246,6 +246,8 @@ 49.4 /* internal up-calls made only by the JVM, via class sun.dyn.MethodHandleNatives: */ \ 49.5 template(findMethodHandleType_name, "findMethodHandleType") \ 49.6 template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/dyn/MethodType;") \ 49.7 + template(linkMethodHandleConstant_name, "linkMethodHandleConstant") \ 49.8 + template(linkMethodHandleConstant_signature, "(Ljava/lang/Class;ILjava/lang/Class;Ljava/lang/String;Ljava/lang/Object;)Ljava/dyn/MethodHandle;") \ 49.9 template(makeDynamicCallSite_name, "makeDynamicCallSite") \ 49.10 template(makeDynamicCallSite_signature, "(Ljava/dyn/MethodHandle;Ljava/lang/String;Ljava/dyn/MethodType;Ljava/lang/Object;Lsun/dyn/MemberName;I)Ljava/dyn/CallSite;") \ 49.11 NOT_LP64( do_alias(machine_word_signature, int_signature) ) \
50.1 --- a/src/share/vm/code/nmethod.cpp Wed Jun 30 18:57:35 2010 -0700 50.2 +++ b/src/share/vm/code/nmethod.cpp Fri Jul 02 01:36:15 2010 -0700 50.3 @@ -584,6 +584,7 @@ 50.4 _oops_do_mark_link = NULL; 50.5 _method = method; 50.6 _entry_bci = InvocationEntryBci; 50.7 + _jmethod_id = NULL; 50.8 _osr_link = NULL; 50.9 _scavenge_root_link = NULL; 50.10 _scavenge_root_state = 0; 50.11 @@ -677,6 +678,7 @@ 50.12 _oops_do_mark_link = NULL; 50.13 _method = method; 50.14 _entry_bci = InvocationEntryBci; 50.15 + _jmethod_id = NULL; 50.16 _osr_link = NULL; 50.17 _scavenge_root_link = NULL; 50.18 _scavenge_root_state = 0; 50.19 @@ -784,6 +786,7 @@ 50.20 NOT_PRODUCT(_has_debug_info = false); 50.21 _oops_do_mark_link = NULL; 50.22 _method = method; 50.23 + _jmethod_id = NULL; 50.24 _compile_id = compile_id; 50.25 _comp_level = comp_level; 50.26 _entry_bci = entry_bci; 50.27 @@ -1488,11 +1491,25 @@ 50.28 moop->signature()->utf8_length(), 50.29 code_begin(), code_size()); 50.30 50.31 + if (JvmtiExport::should_post_compiled_method_load() || 50.32 + JvmtiExport::should_post_compiled_method_unload()) { 50.33 + get_and_cache_jmethod_id(); 50.34 + } 50.35 + 50.36 if (JvmtiExport::should_post_compiled_method_load()) { 50.37 JvmtiExport::post_compiled_method_load(this); 50.38 } 50.39 } 50.40 50.41 +jmethodID nmethod::get_and_cache_jmethod_id() { 50.42 + if (_jmethod_id == NULL) { 50.43 + // Cache the jmethod_id since it can no longer be looked up once the 50.44 + // method itself has been marked for unloading. 50.45 + _jmethod_id = method()->jmethod_id(); 50.46 + } 50.47 + return _jmethod_id; 50.48 +} 50.49 + 50.50 void nmethod::post_compiled_method_unload() { 50.51 if (unload_reported()) { 50.52 // During unloading we transition to unloaded and then to zombie 50.53 @@ -1504,12 +1521,17 @@ 50.54 DTRACE_METHOD_UNLOAD_PROBE(method()); 50.55 50.56 // If a JVMTI agent has enabled the CompiledMethodUnload event then 50.57 - // post the event. Sometime later this nmethod will be made a zombie by 50.58 - // the sweeper but the methodOop will not be valid at that point. 50.59 - if (JvmtiExport::should_post_compiled_method_unload()) { 50.60 + // post the event. Sometime later this nmethod will be made a zombie 50.61 + // by the sweeper but the methodOop will not be valid at that point. 50.62 + // If the _jmethod_id is null then no load event was ever requested 50.63 + // so don't bother posting the unload. The main reason for this is 50.64 + // that the jmethodID is a weak reference to the methodOop so if 50.65 + // it's being unloaded there's no way to look it up since the weak 50.66 + // ref will have been cleared. 50.67 + if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { 50.68 assert(!unload_reported(), "already unloaded"); 50.69 HandleMark hm; 50.70 - JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin()); 50.71 + JvmtiExport::post_compiled_method_unload(_jmethod_id, code_begin()); 50.72 } 50.73 50.74 // The JVMTI CompiledMethodUnload event can be enabled or disabled at 50.75 @@ -2659,13 +2681,10 @@ 50.76 case Bytecodes::_getstatic: 50.77 case Bytecodes::_putstatic: 50.78 { 50.79 - methodHandle sdm = sd->method(); 50.80 - Bytecode_field* field = Bytecode_field_at(sdm(), sdm->bcp_from(sd->bci())); 50.81 - constantPoolOop sdmc = sdm->constants(); 50.82 - symbolOop name = sdmc->name_ref_at(field->index()); 50.83 + Bytecode_field* field = Bytecode_field_at(sd->method(), sd->bci()); 50.84 st->print(" "); 50.85 - if (name != NULL) 50.86 - name->print_symbol_on(st); 50.87 + if (field->name() != NULL) 50.88 + field->name()->print_symbol_on(st); 50.89 else 50.90 st->print("<UNKNOWN>"); 50.91 }
51.1 --- a/src/share/vm/code/nmethod.hpp Wed Jun 30 18:57:35 2010 -0700 51.2 +++ b/src/share/vm/code/nmethod.hpp Fri Jul 02 01:36:15 2010 -0700 51.3 @@ -135,6 +135,7 @@ 51.4 51.5 methodOop _method; 51.6 int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method 51.7 + jmethodID _jmethod_id; // Cache of method()->jmethod_id() 51.8 51.9 // To support simple linked-list chaining of nmethods: 51.10 nmethod* _osr_link; // from instanceKlass::osr_nmethods_head 51.11 @@ -599,6 +600,7 @@ 51.12 51.13 // jvmti support: 51.14 void post_compiled_method_load_event(); 51.15 + jmethodID get_and_cache_jmethod_id(); 51.16 51.17 // verify operations 51.18 void verify();
52.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Jun 30 18:57:35 2010 -0700 52.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Jul 02 01:36:15 2010 -0700 52.3 @@ -3972,6 +3972,10 @@ 52.4 52.5 void work(int i) { 52.6 if (i >= _n_workers) return; // no work needed this round 52.7 + 52.8 + double start_time_ms = os::elapsedTime() * 1000.0; 52.9 + _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms); 52.10 + 52.11 ResourceMark rm; 52.12 HandleMark hm; 52.13 52.14 @@ -4019,7 +4023,7 @@ 52.15 double elapsed_ms = (os::elapsedTime()-start)*1000.0; 52.16 double term_ms = pss.term_time()*1000.0; 52.17 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms); 52.18 - _g1h->g1_policy()->record_termination_time(i, term_ms); 52.19 + _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts()); 52.20 } 52.21 _g1h->g1_policy()->record_thread_age_table(pss.age_table()); 52.22 _g1h->update_surviving_young_words(pss.surviving_young_words()+1); 52.23 @@ -4043,7 +4047,8 @@ 52.24 double term = pss.term_time(); 52.25 gclog_or_tty->print(" Elapsed: %7.2f ms.\n" 52.26 " Strong roots: %7.2f ms (%6.2f%%)\n" 52.27 - " Termination: %7.2f ms (%6.2f%%) (in %d entries)\n", 52.28 + " Termination: %7.2f ms (%6.2f%%) " 52.29 + "(in "SIZE_FORMAT" entries)\n", 52.30 elapsed * 1000.0, 52.31 strong_roots * 1000.0, (strong_roots*100.0/elapsed), 52.32 term * 1000.0, (term*100.0/elapsed), 52.33 @@ -4059,6 +4064,8 @@ 52.34 52.35 assert(pss.refs_to_scan() == 0, "Task queue should be empty"); 52.36 assert(pss.overflowed_refs_to_scan() == 0, "Overflow queue should be empty"); 52.37 + double end_time_ms = os::elapsedTime() * 1000.0; 52.38 + _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms); 52.39 } 52.40 }; 52.41
53.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Jun 30 18:57:35 2010 -0700 53.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Jul 02 01:36:15 2010 -0700 53.3 @@ -1549,7 +1549,7 @@ 53.4 int _hash_seed; 53.5 int _queue_num; 53.6 53.7 - int _term_attempts; 53.8 + size_t _term_attempts; 53.9 #if G1_DETAILED_STATS 53.10 int _pushes, _pops, _steals, _steal_attempts; 53.11 int _overflow_pushes; 53.12 @@ -1727,8 +1727,8 @@ 53.13 int* hash_seed() { return &_hash_seed; } 53.14 int queue_num() { return _queue_num; } 53.15 53.16 - int term_attempts() { return _term_attempts; } 53.17 - void note_term_attempt() { _term_attempts++; } 53.18 + size_t term_attempts() { return _term_attempts; } 53.19 + void note_term_attempt() { _term_attempts++; } 53.20 53.21 #if G1_DETAILED_STATS 53.22 int pushes() { return _pushes; }
54.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Wed Jun 30 18:57:35 2010 -0700 54.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Fri Jul 02 01:36:15 2010 -0700 54.3 @@ -231,20 +231,21 @@ 54.4 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime()); 54.5 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0; 54.6 54.7 + _par_last_gc_worker_start_times_ms = new double[_parallel_gc_threads]; 54.8 _par_last_ext_root_scan_times_ms = new double[_parallel_gc_threads]; 54.9 _par_last_mark_stack_scan_times_ms = new double[_parallel_gc_threads]; 54.10 54.11 - _par_last_update_rs_start_times_ms = new double[_parallel_gc_threads]; 54.12 _par_last_update_rs_times_ms = new double[_parallel_gc_threads]; 54.13 _par_last_update_rs_processed_buffers = new double[_parallel_gc_threads]; 54.14 54.15 - _par_last_scan_rs_start_times_ms = new double[_parallel_gc_threads]; 54.16 _par_last_scan_rs_times_ms = new double[_parallel_gc_threads]; 54.17 _par_last_scan_new_refs_times_ms = new double[_parallel_gc_threads]; 54.18 54.19 _par_last_obj_copy_times_ms = new double[_parallel_gc_threads]; 54.20 54.21 _par_last_termination_times_ms = new double[_parallel_gc_threads]; 54.22 + _par_last_termination_attempts = new double[_parallel_gc_threads]; 54.23 + _par_last_gc_worker_end_times_ms = new double[_parallel_gc_threads]; 54.24 54.25 // start conservatively 54.26 _expensive_region_limit_ms = 0.5 * (double) MaxGCPauseMillis; 54.27 @@ -274,10 +275,64 @@ 54.28 54.29 // </NEW PREDICTION> 54.30 54.31 + // Below, we might need to calculate the pause time target based on 54.32 + // the pause interval. When we do so we are going to give G1 maximum 54.33 + // flexibility and allow it to do pauses when it needs to. So, we'll 54.34 + // arrange that the pause interval to be pause time target + 1 to 54.35 + // ensure that a) the pause time target is maximized with respect to 54.36 + // the pause interval and b) we maintain the invariant that pause 54.37 + // time target < pause interval. If the user does not want this 54.38 + // maximum flexibility, they will have to set the pause interval 54.39 + // explicitly. 54.40 + 54.41 + // First make sure that, if either parameter is set, its value is 54.42 + // reasonable. 54.43 + if (!FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 54.44 + if (MaxGCPauseMillis < 1) { 54.45 + vm_exit_during_initialization("MaxGCPauseMillis should be " 54.46 + "greater than 0"); 54.47 + } 54.48 + } 54.49 + if (!FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 54.50 + if (GCPauseIntervalMillis < 1) { 54.51 + vm_exit_during_initialization("GCPauseIntervalMillis should be " 54.52 + "greater than 0"); 54.53 + } 54.54 + } 54.55 + 54.56 + // Then, if the pause time target parameter was not set, set it to 54.57 + // the default value. 54.58 + if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 54.59 + if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 54.60 + // The default pause time target in G1 is 200ms 54.61 + FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 54.62 + } else { 54.63 + // We do not allow the pause interval to be set without the 54.64 + // pause time target 54.65 + vm_exit_during_initialization("GCPauseIntervalMillis cannot be set " 54.66 + "without setting MaxGCPauseMillis"); 54.67 + } 54.68 + } 54.69 + 54.70 + // Then, if the interval parameter was not set, set it according to 54.71 + // the pause time target (this will also deal with the case when the 54.72 + // pause time target is the default value). 54.73 + if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) { 54.74 + FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1); 54.75 + } 54.76 + 54.77 + // Finally, make sure that the two parameters are consistent. 54.78 + if (MaxGCPauseMillis >= GCPauseIntervalMillis) { 54.79 + char buffer[256]; 54.80 + jio_snprintf(buffer, 256, 54.81 + "MaxGCPauseMillis (%u) should be less than " 54.82 + "GCPauseIntervalMillis (%u)", 54.83 + MaxGCPauseMillis, GCPauseIntervalMillis); 54.84 + vm_exit_during_initialization(buffer); 54.85 + } 54.86 + 54.87 + double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 54.88 double time_slice = (double) GCPauseIntervalMillis / 1000.0; 54.89 - double max_gc_time = (double) MaxGCPauseMillis / 1000.0; 54.90 - guarantee(max_gc_time < time_slice, 54.91 - "Max GC time should not be greater than the time slice"); 54.92 _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time); 54.93 _sigma = (double) G1ConfidencePercent / 100.0; 54.94 54.95 @@ -782,16 +837,17 @@ 54.96 // if they are not set properly 54.97 54.98 for (int i = 0; i < _parallel_gc_threads; ++i) { 54.99 - _par_last_ext_root_scan_times_ms[i] = -666.0; 54.100 - _par_last_mark_stack_scan_times_ms[i] = -666.0; 54.101 - _par_last_update_rs_start_times_ms[i] = -666.0; 54.102 - _par_last_update_rs_times_ms[i] = -666.0; 54.103 - _par_last_update_rs_processed_buffers[i] = -666.0; 54.104 - _par_last_scan_rs_start_times_ms[i] = -666.0; 54.105 - _par_last_scan_rs_times_ms[i] = -666.0; 54.106 - _par_last_scan_new_refs_times_ms[i] = -666.0; 54.107 - _par_last_obj_copy_times_ms[i] = -666.0; 54.108 - _par_last_termination_times_ms[i] = -666.0; 54.109 + _par_last_gc_worker_start_times_ms[i] = -1234.0; 54.110 + _par_last_ext_root_scan_times_ms[i] = -1234.0; 54.111 + _par_last_mark_stack_scan_times_ms[i] = -1234.0; 54.112 + _par_last_update_rs_times_ms[i] = -1234.0; 54.113 + _par_last_update_rs_processed_buffers[i] = -1234.0; 54.114 + _par_last_scan_rs_times_ms[i] = -1234.0; 54.115 + _par_last_scan_new_refs_times_ms[i] = -1234.0; 54.116 + _par_last_obj_copy_times_ms[i] = -1234.0; 54.117 + _par_last_termination_times_ms[i] = -1234.0; 54.118 + _par_last_termination_attempts[i] = -1234.0; 54.119 + _par_last_gc_worker_end_times_ms[i] = -1234.0; 54.120 } 54.121 #endif 54.122 54.123 @@ -942,9 +998,9 @@ 54.124 return sum; 54.125 } 54.126 54.127 -void G1CollectorPolicy::print_par_stats (int level, 54.128 - const char* str, 54.129 - double* data, 54.130 +void G1CollectorPolicy::print_par_stats(int level, 54.131 + const char* str, 54.132 + double* data, 54.133 bool summary) { 54.134 double min = data[0], max = data[0]; 54.135 double total = 0.0; 54.136 @@ -973,10 +1029,10 @@ 54.137 gclog_or_tty->print_cr("]"); 54.138 } 54.139 54.140 -void G1CollectorPolicy::print_par_buffers (int level, 54.141 - const char* str, 54.142 - double* data, 54.143 - bool summary) { 54.144 +void G1CollectorPolicy::print_par_sizes(int level, 54.145 + const char* str, 54.146 + double* data, 54.147 + bool summary) { 54.148 double min = data[0], max = data[0]; 54.149 double total = 0.0; 54.150 int j; 54.151 @@ -1321,15 +1377,22 @@ 54.152 } 54.153 if (parallel) { 54.154 print_stats(1, "Parallel Time", _cur_collection_par_time_ms); 54.155 - print_par_stats(2, "Update RS (Start)", _par_last_update_rs_start_times_ms, false); 54.156 + print_par_stats(2, "GC Worker Start Time", 54.157 + _par_last_gc_worker_start_times_ms, false); 54.158 print_par_stats(2, "Update RS", _par_last_update_rs_times_ms); 54.159 - print_par_buffers(3, "Processed Buffers", 54.160 - _par_last_update_rs_processed_buffers, true); 54.161 - print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms); 54.162 - print_par_stats(2, "Mark Stack Scanning", _par_last_mark_stack_scan_times_ms); 54.163 + print_par_sizes(3, "Processed Buffers", 54.164 + _par_last_update_rs_processed_buffers, true); 54.165 + print_par_stats(2, "Ext Root Scanning", 54.166 + _par_last_ext_root_scan_times_ms); 54.167 + print_par_stats(2, "Mark Stack Scanning", 54.168 + _par_last_mark_stack_scan_times_ms); 54.169 print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms); 54.170 print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms); 54.171 print_par_stats(2, "Termination", _par_last_termination_times_ms); 54.172 + print_par_sizes(3, "Termination Attempts", 54.173 + _par_last_termination_attempts, true); 54.174 + print_par_stats(2, "GC Worker End Time", 54.175 + _par_last_gc_worker_end_times_ms, false); 54.176 print_stats(2, "Other", parallel_other_time); 54.177 print_stats(1, "Clear CT", _cur_clear_ct_time_ms); 54.178 } else {
55.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Wed Jun 30 18:57:35 2010 -0700 55.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Fri Jul 02 01:36:15 2010 -0700 55.3 @@ -171,16 +171,17 @@ 55.4 double* _cur_aux_times_ms; 55.5 bool* _cur_aux_times_set; 55.6 55.7 + double* _par_last_gc_worker_start_times_ms; 55.8 double* _par_last_ext_root_scan_times_ms; 55.9 double* _par_last_mark_stack_scan_times_ms; 55.10 - double* _par_last_update_rs_start_times_ms; 55.11 double* _par_last_update_rs_times_ms; 55.12 double* _par_last_update_rs_processed_buffers; 55.13 - double* _par_last_scan_rs_start_times_ms; 55.14 double* _par_last_scan_rs_times_ms; 55.15 double* _par_last_scan_new_refs_times_ms; 55.16 double* _par_last_obj_copy_times_ms; 55.17 double* _par_last_termination_times_ms; 55.18 + double* _par_last_termination_attempts; 55.19 + double* _par_last_gc_worker_end_times_ms; 55.20 55.21 // indicates that we are in young GC mode 55.22 bool _in_young_gc_mode; 55.23 @@ -559,13 +560,14 @@ 55.24 } 55.25 55.26 protected: 55.27 - void print_stats (int level, const char* str, double value); 55.28 - void print_stats (int level, const char* str, int value); 55.29 - void print_par_stats (int level, const char* str, double* data) { 55.30 + void print_stats(int level, const char* str, double value); 55.31 + void print_stats(int level, const char* str, int value); 55.32 + 55.33 + void print_par_stats(int level, const char* str, double* data) { 55.34 print_par_stats(level, str, data, true); 55.35 } 55.36 - void print_par_stats (int level, const char* str, double* data, bool summary); 55.37 - void print_par_buffers (int level, const char* str, double* data, bool summary); 55.38 + void print_par_stats(int level, const char* str, double* data, bool summary); 55.39 + void print_par_sizes(int level, const char* str, double* data, bool summary); 55.40 55.41 void check_other_times(int level, 55.42 NumberSeq* other_times_ms, 55.43 @@ -891,6 +893,10 @@ 55.44 virtual void record_full_collection_start(); 55.45 virtual void record_full_collection_end(); 55.46 55.47 + void record_gc_worker_start_time(int worker_i, double ms) { 55.48 + _par_last_gc_worker_start_times_ms[worker_i] = ms; 55.49 + } 55.50 + 55.51 void record_ext_root_scan_time(int worker_i, double ms) { 55.52 _par_last_ext_root_scan_times_ms[worker_i] = ms; 55.53 } 55.54 @@ -912,10 +918,6 @@ 55.55 _all_mod_union_times_ms->add(ms); 55.56 } 55.57 55.58 - void record_update_rs_start_time(int thread, double ms) { 55.59 - _par_last_update_rs_start_times_ms[thread] = ms; 55.60 - } 55.61 - 55.62 void record_update_rs_time(int thread, double ms) { 55.63 _par_last_update_rs_times_ms[thread] = ms; 55.64 } 55.65 @@ -925,10 +927,6 @@ 55.66 _par_last_update_rs_processed_buffers[thread] = processed_buffers; 55.67 } 55.68 55.69 - void record_scan_rs_start_time(int thread, double ms) { 55.70 - _par_last_scan_rs_start_times_ms[thread] = ms; 55.71 - } 55.72 - 55.73 void record_scan_rs_time(int thread, double ms) { 55.74 _par_last_scan_rs_times_ms[thread] = ms; 55.75 } 55.76 @@ -953,16 +951,13 @@ 55.77 _par_last_obj_copy_times_ms[thread] += ms; 55.78 } 55.79 55.80 - void record_obj_copy_time(double ms) { 55.81 - record_obj_copy_time(0, ms); 55.82 + void record_termination(int thread, double ms, size_t attempts) { 55.83 + _par_last_termination_times_ms[thread] = ms; 55.84 + _par_last_termination_attempts[thread] = (double) attempts; 55.85 } 55.86 55.87 - void record_termination_time(int thread, double ms) { 55.88 - _par_last_termination_times_ms[thread] = ms; 55.89 - } 55.90 - 55.91 - void record_termination_time(double ms) { 55.92 - record_termination_time(0, ms); 55.93 + void record_gc_worker_end_time(int worker_i, double ms) { 55.94 + _par_last_gc_worker_end_times_ms[worker_i] = ms; 55.95 } 55.96 55.97 void record_pause_time_ms(double ms) {
56.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Wed Jun 30 18:57:35 2010 -0700 56.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Fri Jul 02 01:36:15 2010 -0700 56.3 @@ -303,7 +303,6 @@ 56.4 assert( _cards_scanned != NULL, "invariant" ); 56.5 _cards_scanned[worker_i] = scanRScl.cards_done(); 56.6 56.7 - _g1p->record_scan_rs_start_time(worker_i, rs_time_start * 1000.0); 56.8 _g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0); 56.9 } 56.10 56.11 @@ -311,8 +310,6 @@ 56.12 ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); 56.13 56.14 double start = os::elapsedTime(); 56.15 - _g1p->record_update_rs_start_time(worker_i, start * 1000.0); 56.16 - 56.17 // Apply the appropriate closure to all remaining log entries. 56.18 _g1->iterate_dirty_card_closure(false, worker_i); 56.19 // Now there should be no dirty cards. 56.20 @@ -471,7 +468,6 @@ 56.21 updateRS(worker_i); 56.22 scanNewRefsRS(oc, worker_i); 56.23 } else { 56.24 - _g1p->record_update_rs_start_time(worker_i, os::elapsedTime() * 1000.0); 56.25 _g1p->record_update_rs_processed_buffers(worker_i, 0.0); 56.26 _g1p->record_update_rs_time(worker_i, 0.0); 56.27 _g1p->record_scan_new_refs_time(worker_i, 0.0); 56.28 @@ -479,7 +475,6 @@ 56.29 if (G1UseParallelRSetScanning || (worker_i == 0)) { 56.30 scanRS(oc, worker_i); 56.31 } else { 56.32 - _g1p->record_scan_rs_start_time(worker_i, os::elapsedTime() * 1000.0); 56.33 _g1p->record_scan_rs_time(worker_i, 0.0); 56.34 } 56.35 } else {
57.1 --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Wed Jun 30 18:57:35 2010 -0700 57.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Fri Jul 02 01:36:15 2010 -0700 57.3 @@ -1,5 +1,5 @@ 57.4 /* 57.5 - * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. 57.6 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 57.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 57.8 * 57.9 * This code is free software; you can redistribute it and/or modify it 57.10 @@ -566,14 +566,14 @@ 57.11 #endif 57.12 57.13 // Commit new or uncommit old pages, if necessary. 57.14 - resize_commit_uncommit(changed_region, new_region); 57.15 + if (resize_commit_uncommit(changed_region, new_region)) { 57.16 + // Set the new start of the committed region 57.17 + resize_update_committed_table(changed_region, new_region); 57.18 + } 57.19 57.20 // Update card table entries 57.21 resize_update_card_table_entries(changed_region, new_region); 57.22 57.23 - // Set the new start of the committed region 57.24 - resize_update_committed_table(changed_region, new_region); 57.25 - 57.26 // Update the covered region 57.27 resize_update_covered_table(changed_region, new_region); 57.28 57.29 @@ -604,8 +604,9 @@ 57.30 debug_only(verify_guard();) 57.31 } 57.32 57.33 -void CardTableExtension::resize_commit_uncommit(int changed_region, 57.34 +bool CardTableExtension::resize_commit_uncommit(int changed_region, 57.35 MemRegion new_region) { 57.36 + bool result = false; 57.37 // Commit new or uncommit old pages, if necessary. 57.38 MemRegion cur_committed = _committed[changed_region]; 57.39 assert(_covered[changed_region].end() == new_region.end(), 57.40 @@ -675,20 +676,31 @@ 57.41 "card table expansion"); 57.42 } 57.43 } 57.44 + result = true; 57.45 } else if (new_start_aligned > cur_committed.start()) { 57.46 // Shrink the committed region 57.47 +#if 0 // uncommitting space is currently unsafe because of the interactions 57.48 + // of growing and shrinking regions. One region A can uncommit space 57.49 + // that it owns but which is being used by another region B (maybe). 57.50 + // Region B has not committed the space because it was already 57.51 + // committed by region A. 57.52 MemRegion uncommit_region = committed_unique_to_self(changed_region, 57.53 MemRegion(cur_committed.start(), new_start_aligned)); 57.54 if (!uncommit_region.is_empty()) { 57.55 if (!os::uncommit_memory((char*)uncommit_region.start(), 57.56 uncommit_region.byte_size())) { 57.57 - vm_exit_out_of_memory(uncommit_region.byte_size(), 57.58 - "card table contraction"); 57.59 + // If the uncommit fails, ignore it. Let the 57.60 + // committed table resizing go even though the committed 57.61 + // table will over state the committed space. 57.62 } 57.63 } 57.64 +#else 57.65 + assert(!result, "Should be false with current workaround"); 57.66 +#endif 57.67 } 57.68 assert(_committed[changed_region].end() == cur_committed.end(), 57.69 "end should not change"); 57.70 + return result; 57.71 } 57.72 57.73 void CardTableExtension::resize_update_committed_table(int changed_region,
58.1 --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Wed Jun 30 18:57:35 2010 -0700 58.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.hpp Fri Jul 02 01:36:15 2010 -0700 58.3 @@ -1,5 +1,5 @@ 58.4 /* 58.5 - * Copyright (c) 2001, 2008, Oracle and/or its affiliates. All rights reserved. 58.6 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 58.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 58.8 * 58.9 * This code is free software; you can redistribute it and/or modify it 58.10 @@ -30,7 +30,9 @@ 58.11 class CardTableExtension : public CardTableModRefBS { 58.12 private: 58.13 // Support methods for resizing the card table. 58.14 - void resize_commit_uncommit(int changed_region, MemRegion new_region); 58.15 + // resize_commit_uncommit() returns true if the pages were committed or 58.16 + // uncommitted 58.17 + bool resize_commit_uncommit(int changed_region, MemRegion new_region); 58.18 void resize_update_card_table_entries(int changed_region, 58.19 MemRegion new_region); 58.20 void resize_update_committed_table(int changed_region, MemRegion new_region);
59.1 --- a/src/share/vm/includeDB_core Wed Jun 30 18:57:35 2010 -0700 59.2 +++ b/src/share/vm/includeDB_core Fri Jul 02 01:36:15 2010 -0700 59.3 @@ -545,6 +545,7 @@ 59.4 59.5 ciCPCache.hpp ciClassList.hpp 59.6 ciCPCache.hpp ciObject.hpp 59.7 +ciCPCache.hpp cpCacheOop.hpp 59.8 59.9 ciEnv.cpp allocation.inline.hpp 59.10 ciEnv.cpp ciConstant.hpp 59.11 @@ -823,6 +824,7 @@ 59.12 59.13 ciStreams.cpp ciCallSite.hpp 59.14 ciStreams.cpp ciConstant.hpp 59.15 +ciStreams.cpp ciCPCache.hpp 59.16 ciStreams.cpp ciField.hpp 59.17 ciStreams.cpp ciStreams.hpp 59.18 ciStreams.cpp ciUtilities.hpp
60.1 --- a/src/share/vm/interpreter/bytecode.cpp Wed Jun 30 18:57:35 2010 -0700 60.2 +++ b/src/share/vm/interpreter/bytecode.cpp Fri Jul 02 01:36:15 2010 -0700 60.3 @@ -136,25 +136,24 @@ 60.4 // Implementation of Bytecode_invoke 60.5 60.6 void Bytecode_invoke::verify() const { 60.7 - Bytecodes::Code bc = adjusted_invoke_code(); 60.8 assert(is_valid(), "check invoke"); 60.9 assert(method()->constants()->cache() != NULL, "do not call this from verifier or rewriter"); 60.10 } 60.11 60.12 60.13 -symbolOop Bytecode_invoke::signature() const { 60.14 +symbolOop Bytecode_member_ref::signature() const { 60.15 constantPoolOop constants = method()->constants(); 60.16 return constants->signature_ref_at(index()); 60.17 } 60.18 60.19 60.20 -symbolOop Bytecode_invoke::name() const { 60.21 +symbolOop Bytecode_member_ref::name() const { 60.22 constantPoolOop constants = method()->constants(); 60.23 return constants->name_ref_at(index()); 60.24 } 60.25 60.26 60.27 -BasicType Bytecode_invoke::result_type(Thread *thread) const { 60.28 +BasicType Bytecode_member_ref::result_type(Thread *thread) const { 60.29 symbolHandle sh(thread, signature()); 60.30 ResultTypeFinder rts(sh); 60.31 rts.iterate(); 60.32 @@ -167,9 +166,9 @@ 60.33 KlassHandle resolved_klass; 60.34 constantPoolHandle constants(THREAD, _method->constants()); 60.35 60.36 - if (adjusted_invoke_code() == Bytecodes::_invokedynamic) { 60.37 + if (java_code() == Bytecodes::_invokedynamic) { 60.38 LinkResolver::resolve_dynamic_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); 60.39 - } else if (adjusted_invoke_code() != Bytecodes::_invokeinterface) { 60.40 + } else if (java_code() != Bytecodes::_invokeinterface) { 60.41 LinkResolver::resolve_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); 60.42 } else { 60.43 LinkResolver::resolve_interface_method(m, resolved_klass, constants, index(), CHECK_(methodHandle())); 60.44 @@ -178,51 +177,68 @@ 60.45 } 60.46 60.47 60.48 -int Bytecode_invoke::index() const { 60.49 +int Bytecode_member_ref::index() const { 60.50 // Note: Rewriter::rewrite changes the Java_u2 of an invokedynamic to a native_u4, 60.51 // at the same time it allocates per-call-site CP cache entries. 60.52 - Bytecodes::Code stdc = Bytecodes::java_code(code()); 60.53 - Bytecode* invoke = Bytecode_at(bcp()); 60.54 - if (invoke->has_index_u4(stdc)) 60.55 - return invoke->get_index_u4(stdc); 60.56 + Bytecodes::Code rawc = code(); 60.57 + Bytecode* invoke = bytecode(); 60.58 + if (invoke->has_index_u4(rawc)) 60.59 + return invoke->get_index_u4(rawc); 60.60 else 60.61 - return invoke->get_index_u2_cpcache(stdc); 60.62 + return invoke->get_index_u2_cpcache(rawc); 60.63 } 60.64 60.65 +int Bytecode_member_ref::pool_index() const { 60.66 + int index = this->index(); 60.67 + DEBUG_ONLY({ 60.68 + if (!bytecode()->has_index_u4(code())) 60.69 + index -= constantPoolOopDesc::CPCACHE_INDEX_TAG; 60.70 + }); 60.71 + return _method->constants()->cache()->entry_at(index)->constant_pool_index(); 60.72 +} 60.73 60.74 // Implementation of Bytecode_field 60.75 60.76 void Bytecode_field::verify() const { 60.77 - Bytecodes::Code stdc = Bytecodes::java_code(code()); 60.78 - assert(stdc == Bytecodes::_putstatic || stdc == Bytecodes::_getstatic || 60.79 - stdc == Bytecodes::_putfield || stdc == Bytecodes::_getfield, "check field"); 60.80 + assert(is_valid(), "check field"); 60.81 } 60.82 60.83 60.84 -bool Bytecode_field::is_static() const { 60.85 - Bytecodes::Code stdc = Bytecodes::java_code(code()); 60.86 - return stdc == Bytecodes::_putstatic || stdc == Bytecodes::_getstatic; 60.87 +// Implementation of Bytecode_loadconstant 60.88 + 60.89 +int Bytecode_loadconstant::raw_index() const { 60.90 + Bytecode* bcp = bytecode(); 60.91 + Bytecodes::Code rawc = bcp->code(); 60.92 + assert(rawc != Bytecodes::_wide, "verifier prevents this"); 60.93 + if (Bytecodes::java_code(rawc) == Bytecodes::_ldc) 60.94 + return bcp->get_index_u1(rawc); 60.95 + else 60.96 + return bcp->get_index_u2(rawc, false); 60.97 } 60.98 60.99 - 60.100 -int Bytecode_field::index() const { 60.101 - Bytecode* invoke = Bytecode_at(bcp()); 60.102 - return invoke->get_index_u2_cpcache(Bytecodes::_getfield); 60.103 +int Bytecode_loadconstant::pool_index() const { 60.104 + int index = raw_index(); 60.105 + if (has_cache_index()) { 60.106 + return _method->constants()->cache()->entry_at(index)->constant_pool_index(); 60.107 + } 60.108 + return index; 60.109 } 60.110 60.111 +BasicType Bytecode_loadconstant::result_type() const { 60.112 + int index = pool_index(); 60.113 + constantTag tag = _method->constants()->tag_at(index); 60.114 + return tag.basic_type(); 60.115 +} 60.116 60.117 -// Implementation of Bytecodes loac constant 60.118 - 60.119 -int Bytecode_loadconstant::index() const { 60.120 - Bytecodes::Code stdc = Bytecodes::java_code(code()); 60.121 - if (stdc != Bytecodes::_wide) { 60.122 - if (Bytecodes::java_code(stdc) == Bytecodes::_ldc) 60.123 - return get_index_u1(stdc); 60.124 - else 60.125 - return get_index_u2(stdc, false); 60.126 +oop Bytecode_loadconstant::resolve_constant(TRAPS) const { 60.127 + assert(_method.not_null(), "must supply method to resolve constant"); 60.128 + int index = raw_index(); 60.129 + constantPoolOop constants = _method->constants(); 60.130 + if (has_cache_index()) { 60.131 + return constants->resolve_cached_constant_at(index, THREAD); 60.132 + } else { 60.133 + return constants->resolve_constant_at(index, THREAD); 60.134 } 60.135 - stdc = Bytecodes::code_at(addr_at(1)); 60.136 - return get_index_u2(stdc, true); 60.137 } 60.138 60.139 //------------------------------------------------------------------------------
61.1 --- a/src/share/vm/interpreter/bytecode.hpp Wed Jun 30 18:57:35 2010 -0700 61.2 +++ b/src/share/vm/interpreter/bytecode.hpp Fri Jul 02 01:36:15 2010 -0700 61.3 @@ -76,9 +76,13 @@ 61.4 return Bytes::get_native_u2(p); 61.5 else return Bytes::get_Java_u2(p); 61.6 } 61.7 + int get_index_u1_cpcache(Bytecodes::Code bc) const { 61.8 + assert_same_format_as(bc); assert_index_size(1, bc); 61.9 + return *(jubyte*)addr_at(1) + constantPoolOopDesc::CPCACHE_INDEX_TAG; 61.10 + } 61.11 int get_index_u2_cpcache(Bytecodes::Code bc) const { 61.12 assert_same_format_as(bc); assert_index_size(2, bc); assert_native_index(bc); 61.13 - return Bytes::get_native_u2(addr_at(1)) DEBUG_ONLY(+ constantPoolOopDesc::CPCACHE_INDEX_TAG); 61.14 + return Bytes::get_native_u2(addr_at(1)) + constantPoolOopDesc::CPCACHE_INDEX_TAG; 61.15 } 61.16 int get_index_u4(Bytecodes::Code bc) const { 61.17 assert_same_format_as(bc); assert_index_size(4, bc); 61.18 @@ -152,7 +156,7 @@ 61.19 61.20 inline Bytecode_lookupswitch* Bytecode_lookupswitch_at(address bcp) { 61.21 Bytecode_lookupswitch* b = (Bytecode_lookupswitch*)bcp; 61.22 - debug_only(b->verify()); 61.23 + DEBUG_ONLY(b->verify()); 61.24 return b; 61.25 } 61.26 61.27 @@ -174,44 +178,56 @@ 61.28 61.29 inline Bytecode_tableswitch* Bytecode_tableswitch_at(address bcp) { 61.30 Bytecode_tableswitch* b = (Bytecode_tableswitch*)bcp; 61.31 - debug_only(b->verify()); 61.32 + DEBUG_ONLY(b->verify()); 61.33 return b; 61.34 } 61.35 61.36 61.37 -// Abstraction for invoke_{virtual, static, interface, special} 61.38 +// Common code for decoding invokes and field references. 61.39 61.40 -class Bytecode_invoke: public ResourceObj { 61.41 +class Bytecode_member_ref: public ResourceObj { 61.42 protected: 61.43 methodHandle _method; // method containing the bytecode 61.44 int _bci; // position of the bytecode 61.45 61.46 - Bytecode_invoke(methodHandle method, int bci) : _method(method), _bci(bci) {} 61.47 + Bytecode_member_ref(methodHandle method, int bci) : _method(method), _bci(bci) {} 61.48 + 61.49 + public: 61.50 + // Attributes 61.51 + methodHandle method() const { return _method; } 61.52 + int bci() const { return _bci; } 61.53 + address bcp() const { return _method->bcp_from(bci()); } 61.54 + Bytecode* bytecode() const { return Bytecode_at(bcp()); } 61.55 + 61.56 + int index() const; // cache index (loaded from instruction) 61.57 + int pool_index() const; // constant pool index 61.58 + symbolOop name() const; // returns the name of the method or field 61.59 + symbolOop signature() const; // returns the signature of the method or field 61.60 + 61.61 + BasicType result_type(Thread* thread) const; // returns the result type of the getfield or invoke 61.62 + 61.63 + Bytecodes::Code code() const { return Bytecodes::code_at(bcp(), _method()); } 61.64 + Bytecodes::Code java_code() const { return Bytecodes::java_code(code()); } 61.65 +}; 61.66 + 61.67 +// Abstraction for invoke_{virtual, static, interface, special} 61.68 + 61.69 +class Bytecode_invoke: public Bytecode_member_ref { 61.70 + protected: 61.71 + Bytecode_invoke(methodHandle method, int bci) : Bytecode_member_ref(method, bci) {} 61.72 61.73 public: 61.74 void verify() const; 61.75 61.76 // Attributes 61.77 - methodHandle method() const { return _method; } 61.78 - int bci() const { return _bci; } 61.79 - address bcp() const { return _method->bcp_from(bci()); } 61.80 - 61.81 - int index() const; // the constant pool index for the invoke 61.82 - symbolOop name() const; // returns the name of the invoked method 61.83 - symbolOop signature() const; // returns the signature of the invoked method 61.84 - BasicType result_type(Thread *thread) const; // returns the result type of the invoke 61.85 - 61.86 - Bytecodes::Code code() const { return Bytecodes::code_at(bcp(), _method()); } 61.87 - Bytecodes::Code adjusted_invoke_code() const { return Bytecodes::java_code(code()); } 61.88 - 61.89 methodHandle static_target(TRAPS); // "specified" method (from constant pool) 61.90 61.91 // Testers 61.92 - bool is_invokeinterface() const { return adjusted_invoke_code() == Bytecodes::_invokeinterface; } 61.93 - bool is_invokevirtual() const { return adjusted_invoke_code() == Bytecodes::_invokevirtual; } 61.94 - bool is_invokestatic() const { return adjusted_invoke_code() == Bytecodes::_invokestatic; } 61.95 - bool is_invokespecial() const { return adjusted_invoke_code() == Bytecodes::_invokespecial; } 61.96 - bool is_invokedynamic() const { return adjusted_invoke_code() == Bytecodes::_invokedynamic; } 61.97 + bool is_invokeinterface() const { return java_code() == Bytecodes::_invokeinterface; } 61.98 + bool is_invokevirtual() const { return java_code() == Bytecodes::_invokevirtual; } 61.99 + bool is_invokestatic() const { return java_code() == Bytecodes::_invokestatic; } 61.100 + bool is_invokespecial() const { return java_code() == Bytecodes::_invokespecial; } 61.101 + bool is_invokedynamic() const { return java_code() == Bytecodes::_invokedynamic; } 61.102 61.103 bool has_receiver() const { return !is_invokestatic() && !is_invokedynamic(); } 61.104 61.105 @@ -230,7 +246,7 @@ 61.106 61.107 inline Bytecode_invoke* Bytecode_invoke_at(methodHandle method, int bci) { 61.108 Bytecode_invoke* b = new Bytecode_invoke(method, bci); 61.109 - debug_only(b->verify()); 61.110 + DEBUG_ONLY(b->verify()); 61.111 return b; 61.112 } 61.113 61.114 @@ -240,21 +256,34 @@ 61.115 } 61.116 61.117 61.118 -// Abstraction for all field accesses (put/get field/static_ 61.119 -class Bytecode_field: public Bytecode { 61.120 -public: 61.121 +// Abstraction for all field accesses (put/get field/static) 61.122 +class Bytecode_field: public Bytecode_member_ref { 61.123 + protected: 61.124 + Bytecode_field(methodHandle method, int bci) : Bytecode_member_ref(method, bci) {} 61.125 + 61.126 + public: 61.127 + // Testers 61.128 + bool is_getfield() const { return java_code() == Bytecodes::_getfield; } 61.129 + bool is_putfield() const { return java_code() == Bytecodes::_putfield; } 61.130 + bool is_getstatic() const { return java_code() == Bytecodes::_getstatic; } 61.131 + bool is_putstatic() const { return java_code() == Bytecodes::_putstatic; } 61.132 + 61.133 + bool is_getter() const { return is_getfield() || is_getstatic(); } 61.134 + bool is_static() const { return is_getstatic() || is_putstatic(); } 61.135 + 61.136 + bool is_valid() const { return is_getfield() || 61.137 + is_putfield() || 61.138 + is_getstatic() || 61.139 + is_putstatic(); } 61.140 void verify() const; 61.141 61.142 - int index() const; 61.143 - bool is_static() const; 61.144 - 61.145 // Creation 61.146 - inline friend Bytecode_field* Bytecode_field_at(const methodOop method, address bcp); 61.147 + inline friend Bytecode_field* Bytecode_field_at(methodHandle method, int bci); 61.148 }; 61.149 61.150 -inline Bytecode_field* Bytecode_field_at(const methodOop method, address bcp) { 61.151 - Bytecode_field* b = (Bytecode_field*)bcp; 61.152 - debug_only(b->verify()); 61.153 +inline Bytecode_field* Bytecode_field_at(methodHandle method, int bci) { 61.154 + Bytecode_field* b = new Bytecode_field(method, bci); 61.155 + DEBUG_ONLY(b->verify()); 61.156 return b; 61.157 } 61.158 61.159 @@ -274,7 +303,7 @@ 61.160 61.161 inline Bytecode_checkcast* Bytecode_checkcast_at(address bcp) { 61.162 Bytecode_checkcast* b = (Bytecode_checkcast*)bcp; 61.163 - debug_only(b->verify()); 61.164 + DEBUG_ONLY(b->verify()); 61.165 return b; 61.166 } 61.167 61.168 @@ -294,7 +323,7 @@ 61.169 61.170 inline Bytecode_instanceof* Bytecode_instanceof_at(address bcp) { 61.171 Bytecode_instanceof* b = (Bytecode_instanceof*)bcp; 61.172 - debug_only(b->verify()); 61.173 + DEBUG_ONLY(b->verify()); 61.174 return b; 61.175 } 61.176 61.177 @@ -312,7 +341,7 @@ 61.178 61.179 inline Bytecode_new* Bytecode_new_at(address bcp) { 61.180 Bytecode_new* b = (Bytecode_new*)bcp; 61.181 - debug_only(b->verify()); 61.182 + DEBUG_ONLY(b->verify()); 61.183 return b; 61.184 } 61.185 61.186 @@ -330,7 +359,7 @@ 61.187 61.188 inline Bytecode_multianewarray* Bytecode_multianewarray_at(address bcp) { 61.189 Bytecode_multianewarray* b = (Bytecode_multianewarray*)bcp; 61.190 - debug_only(b->verify()); 61.191 + DEBUG_ONLY(b->verify()); 61.192 return b; 61.193 } 61.194 61.195 @@ -348,29 +377,57 @@ 61.196 61.197 inline Bytecode_anewarray* Bytecode_anewarray_at(address bcp) { 61.198 Bytecode_anewarray* b = (Bytecode_anewarray*)bcp; 61.199 - debug_only(b->verify()); 61.200 + DEBUG_ONLY(b->verify()); 61.201 return b; 61.202 } 61.203 61.204 61.205 // Abstraction for ldc, ldc_w and ldc2_w 61.206 61.207 -class Bytecode_loadconstant: public Bytecode { 61.208 +class Bytecode_loadconstant: public ResourceObj { 61.209 + private: 61.210 + int _bci; 61.211 + methodHandle _method; 61.212 + 61.213 + Bytecodes::Code code() const { return bytecode()->code(); } 61.214 + 61.215 + int raw_index() const; 61.216 + 61.217 + Bytecode_loadconstant(methodHandle method, int bci) : _method(method), _bci(bci) {} 61.218 + 61.219 public: 61.220 + // Attributes 61.221 + methodHandle method() const { return _method; } 61.222 + int bci() const { return _bci; } 61.223 + address bcp() const { return _method->bcp_from(bci()); } 61.224 + Bytecode* bytecode() const { return Bytecode_at(bcp()); } 61.225 + 61.226 void verify() const { 61.227 + assert(_method.not_null(), "must supply method"); 61.228 Bytecodes::Code stdc = Bytecodes::java_code(code()); 61.229 assert(stdc == Bytecodes::_ldc || 61.230 stdc == Bytecodes::_ldc_w || 61.231 stdc == Bytecodes::_ldc2_w, "load constant"); 61.232 } 61.233 61.234 - int index() const; 61.235 + // Only non-standard bytecodes (fast_aldc) have CP cache indexes. 61.236 + bool has_cache_index() const { return code() >= Bytecodes::number_of_java_codes; } 61.237 61.238 - inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(const methodOop method, address bcp); 61.239 + int pool_index() const; // index into constant pool 61.240 + int cache_index() const { // index into CP cache (or -1 if none) 61.241 + return has_cache_index() ? raw_index() : -1; 61.242 + } 61.243 + 61.244 + BasicType result_type() const; // returns the result type of the ldc 61.245 + 61.246 + oop resolve_constant(TRAPS) const; 61.247 + 61.248 + // Creation 61.249 + inline friend Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci); 61.250 }; 61.251 61.252 -inline Bytecode_loadconstant* Bytecode_loadconstant_at(const methodOop method, address bcp) { 61.253 - Bytecode_loadconstant* b = (Bytecode_loadconstant*)bcp; 61.254 - debug_only(b->verify()); 61.255 +inline Bytecode_loadconstant* Bytecode_loadconstant_at(methodHandle method, int bci) { 61.256 + Bytecode_loadconstant* b = new Bytecode_loadconstant(method, bci); 61.257 + DEBUG_ONLY(b->verify()); 61.258 return b; 61.259 }
62.1 --- a/src/share/vm/interpreter/bytecodeTracer.cpp Wed Jun 30 18:57:35 2010 -0700 62.2 +++ b/src/share/vm/interpreter/bytecodeTracer.cpp Fri Jul 02 01:36:15 2010 -0700 62.3 @@ -49,6 +49,7 @@ 62.4 62.5 int get_index_u1() { return *(address)_next_pc++; } 62.6 int get_index_u2() { int i=Bytes::get_Java_u2(_next_pc); _next_pc+=2; return i; } 62.7 + int get_index_u1_cpcache() { return get_index_u1() + constantPoolOopDesc::CPCACHE_INDEX_TAG; } 62.8 int get_index_u2_cpcache() { int i=Bytes::get_native_u2(_next_pc); _next_pc+=2; return i + constantPoolOopDesc::CPCACHE_INDEX_TAG; } 62.9 int get_index_u4() { int i=Bytes::get_native_u4(_next_pc); _next_pc+=4; return i; } 62.10 int get_index_special() { return (is_wide()) ? get_index_u2() : get_index_u1(); } 62.11 @@ -60,6 +61,7 @@ 62.12 bool check_index(int i, int& cp_index, outputStream* st = tty); 62.13 void print_constant(int i, outputStream* st = tty); 62.14 void print_field_or_method(int i, outputStream* st = tty); 62.15 + void print_field_or_method(int orig_i, int i, outputStream* st = tty); 62.16 void print_attributes(int bci, outputStream* st = tty); 62.17 void bytecode_epilog(int bci, outputStream* st = tty); 62.18 62.19 @@ -177,18 +179,29 @@ 62.20 _closure->trace(method, bcp, st); 62.21 } 62.22 62.23 +void print_symbol(symbolOop sym, outputStream* st) { 62.24 + char buf[40]; 62.25 + int len = sym->utf8_length(); 62.26 + if (len >= (int)sizeof(buf)) { 62.27 + st->print_cr(" %s...[%d]", sym->as_C_string(buf, sizeof(buf)), len); 62.28 + } else { 62.29 + st->print(" "); 62.30 + sym->print_on(st); st->cr(); 62.31 + } 62.32 +} 62.33 + 62.34 void print_oop(oop value, outputStream* st) { 62.35 if (value == NULL) { 62.36 st->print_cr(" NULL"); 62.37 - } else { 62.38 + } else if (java_lang_String::is_instance(value)) { 62.39 EXCEPTION_MARK; 62.40 Handle h_value (THREAD, value); 62.41 symbolHandle sym = java_lang_String::as_symbol(h_value, CATCH); 62.42 - if (sym->utf8_length() > 32) { 62.43 - st->print_cr(" ...."); 62.44 - } else { 62.45 - sym->print_on(st); st->cr(); 62.46 - } 62.47 + print_symbol(sym(), st); 62.48 + } else if (value->is_symbol()) { 62.49 + print_symbol(symbolOop(value), st); 62.50 + } else { 62.51 + st->print_cr(" " PTR_FORMAT, (intptr_t) value); 62.52 } 62.53 } 62.54 62.55 @@ -279,16 +292,27 @@ 62.56 } else if (tag.is_double()) { 62.57 st->print_cr(" %f", constants->double_at(i)); 62.58 } else if (tag.is_string()) { 62.59 - oop string = constants->resolved_string_at(i); 62.60 + oop string = constants->pseudo_string_at(i); 62.61 print_oop(string, st); 62.62 } else if (tag.is_unresolved_string()) { 62.63 - st->print_cr(" <unresolved string at %d>", i); 62.64 + const char* string = constants->string_at_noresolve(i); 62.65 + st->print_cr(" %s", string); 62.66 } else if (tag.is_klass()) { 62.67 st->print_cr(" %s", constants->resolved_klass_at(i)->klass_part()->external_name()); 62.68 } else if (tag.is_unresolved_klass()) { 62.69 st->print_cr(" <unresolved klass at %d>", i); 62.70 } else if (tag.is_object()) { 62.71 - st->print_cr(" " PTR_FORMAT, constants->object_at(i)); 62.72 + st->print(" <Object>"); 62.73 + print_oop(constants->object_at(i), st); 62.74 + } else if (tag.is_method_type()) { 62.75 + int i2 = constants->method_type_index_at(i); 62.76 + st->print(" <MethodType> %d", i2); 62.77 + print_oop(constants->symbol_at(i2), st); 62.78 + } else if (tag.is_method_handle()) { 62.79 + int kind = constants->method_handle_ref_kind_at(i); 62.80 + int i2 = constants->method_handle_index_at(i); 62.81 + st->print(" <MethodHandle of kind %d>", kind, i2); 62.82 + print_field_or_method(-i, i2, st); 62.83 } else { 62.84 st->print_cr(" bad tag=%d at %d", tag.value(), i); 62.85 } 62.86 @@ -297,7 +321,10 @@ 62.87 void BytecodePrinter::print_field_or_method(int i, outputStream* st) { 62.88 int orig_i = i; 62.89 if (!check_index(orig_i, i, st)) return; 62.90 + print_field_or_method(orig_i, i, st); 62.91 +} 62.92 62.93 +void BytecodePrinter::print_field_or_method(int orig_i, int i, outputStream* st) { 62.94 constantPoolOop constants = method()->constants(); 62.95 constantTag tag = constants->tag_at(i); 62.96 62.97 @@ -314,9 +341,11 @@ 62.98 return; 62.99 } 62.100 62.101 + symbolOop klass = constants->klass_name_at(constants->uncached_klass_ref_index_at(i)); 62.102 symbolOop name = constants->uncached_name_ref_at(i); 62.103 symbolOop signature = constants->uncached_signature_ref_at(i); 62.104 - st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string()); 62.105 + const char* sep = (tag.is_field() ? "/" : ""); 62.106 + st->print_cr(" %d <%s.%s%s%s> ", i, klass->as_C_string(), name->as_C_string(), sep, signature->as_C_string()); 62.107 } 62.108 62.109 62.110 @@ -340,12 +369,20 @@ 62.111 st->print_cr(" " INT32_FORMAT, get_short()); 62.112 break; 62.113 case Bytecodes::_ldc: 62.114 - print_constant(get_index_u1(), st); 62.115 + if (Bytecodes::uses_cp_cache(raw_code())) { 62.116 + print_constant(get_index_u1_cpcache(), st); 62.117 + } else { 62.118 + print_constant(get_index_u1(), st); 62.119 + } 62.120 break; 62.121 62.122 case Bytecodes::_ldc_w: 62.123 case Bytecodes::_ldc2_w: 62.124 - print_constant(get_index_u2(), st); 62.125 + if (Bytecodes::uses_cp_cache(raw_code())) { 62.126 + print_constant(get_index_u2_cpcache(), st); 62.127 + } else { 62.128 + print_constant(get_index_u2(), st); 62.129 + } 62.130 break; 62.131 62.132 case Bytecodes::_iload:
63.1 --- a/src/share/vm/interpreter/bytecodes.cpp Wed Jun 30 18:57:35 2010 -0700 63.2 +++ b/src/share/vm/interpreter/bytecodes.cpp Fri Jul 02 01:36:15 2010 -0700 63.3 @@ -489,6 +489,9 @@ 63.4 63.5 def(_return_register_finalizer , "return_register_finalizer" , "b" , NULL , T_VOID , 0, true, _return); 63.6 63.7 + def(_fast_aldc , "fast_aldc" , "bj" , NULL , T_OBJECT, 1, true, _ldc ); 63.8 + def(_fast_aldc_w , "fast_aldc_w" , "bJJ" , NULL , T_OBJECT, 1, true, _ldc_w ); 63.9 + 63.10 def(_shouldnotreachhere , "_shouldnotreachhere" , "b" , NULL , T_VOID , 0, false); 63.11 63.12 // platform specific JVM bytecodes
64.1 --- a/src/share/vm/interpreter/bytecodes.hpp Wed Jun 30 18:57:35 2010 -0700 64.2 +++ b/src/share/vm/interpreter/bytecodes.hpp Fri Jul 02 01:36:15 2010 -0700 64.3 @@ -270,6 +270,10 @@ 64.4 _fast_linearswitch , 64.5 _fast_binaryswitch , 64.6 64.7 + // special handling of oop constants: 64.8 + _fast_aldc , 64.9 + _fast_aldc_w , 64.10 + 64.11 _return_register_finalizer , 64.12 64.13 _shouldnotreachhere, // For debugging
65.1 --- a/src/share/vm/interpreter/interpreter.cpp Wed Jun 30 18:57:35 2010 -0700 65.2 +++ b/src/share/vm/interpreter/interpreter.cpp Fri Jul 02 01:36:15 2010 -0700 65.3 @@ -267,20 +267,6 @@ 65.4 } 65.5 #endif // PRODUCT 65.6 65.7 -static BasicType constant_pool_type(methodOop method, int index) { 65.8 - constantTag tag = method->constants()->tag_at(index); 65.9 - if (tag.is_int ()) return T_INT; 65.10 - else if (tag.is_float ()) return T_FLOAT; 65.11 - else if (tag.is_long ()) return T_LONG; 65.12 - else if (tag.is_double ()) return T_DOUBLE; 65.13 - else if (tag.is_string ()) return T_OBJECT; 65.14 - else if (tag.is_unresolved_string()) return T_OBJECT; 65.15 - else if (tag.is_klass ()) return T_OBJECT; 65.16 - else if (tag.is_unresolved_klass ()) return T_OBJECT; 65.17 - ShouldNotReachHere(); 65.18 - return T_ILLEGAL; 65.19 -} 65.20 - 65.21 65.22 //------------------------------------------------------------------------------------------------------------------------ 65.23 // Deoptimization support 65.24 @@ -330,13 +316,15 @@ 65.25 } 65.26 65.27 case Bytecodes::_ldc : 65.28 - type = constant_pool_type( method, *(bcp+1) ); 65.29 - break; 65.30 - 65.31 case Bytecodes::_ldc_w : // fall through 65.32 case Bytecodes::_ldc2_w: 65.33 - type = constant_pool_type( method, Bytes::get_Java_u2(bcp+1) ); 65.34 - break; 65.35 + { 65.36 + Thread *thread = Thread::current(); 65.37 + ResourceMark rm(thread); 65.38 + methodHandle mh(thread, method); 65.39 + type = Bytecode_loadconstant_at(mh, bci)->result_type(); 65.40 + break; 65.41 + } 65.42 65.43 default: 65.44 type = Bytecodes::result_type(code);
66.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp Wed Jun 30 18:57:35 2010 -0700 66.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Fri Jul 02 01:36:15 2010 -0700 66.3 @@ -83,6 +83,18 @@ 66.4 } 66.5 IRT_END 66.6 66.7 +IRT_ENTRY(void, InterpreterRuntime::resolve_ldc(JavaThread* thread, Bytecodes::Code bytecode)) { 66.8 + assert(bytecode == Bytecodes::_fast_aldc || 66.9 + bytecode == Bytecodes::_fast_aldc_w, "wrong bc"); 66.10 + ResourceMark rm(thread); 66.11 + methodHandle m (thread, method(thread)); 66.12 + Bytecode_loadconstant* ldc = Bytecode_loadconstant_at(m, bci(thread)); 66.13 + oop result = ldc->resolve_constant(THREAD); 66.14 + DEBUG_ONLY(ConstantPoolCacheEntry* cpce = m->constants()->cache()->entry_at(ldc->cache_index())); 66.15 + assert(result == cpce->f1(), "expected result for assembly code"); 66.16 +} 66.17 +IRT_END 66.18 + 66.19 66.20 //------------------------------------------------------------------------------------------------------------------------ 66.21 // Allocation 66.22 @@ -328,7 +340,7 @@ 66.23 typeArrayHandle h_extable (thread, h_method->exception_table()); 66.24 bool should_repeat; 66.25 int handler_bci; 66.26 - int current_bci = bcp(thread) - h_method->code_base(); 66.27 + int current_bci = bci(thread); 66.28 66.29 // Need to do this check first since when _do_not_unlock_if_synchronized 66.30 // is set, we don't want to trigger any classloading which may make calls 66.31 @@ -615,8 +627,7 @@ 66.32 if (bytecode == Bytecodes::_invokevirtual || bytecode == Bytecodes::_invokeinterface) { 66.33 ResourceMark rm(thread); 66.34 methodHandle m (thread, method(thread)); 66.35 - int bci = m->bci_from(bcp(thread)); 66.36 - Bytecode_invoke* call = Bytecode_invoke_at(m, bci); 66.37 + Bytecode_invoke* call = Bytecode_invoke_at(m, bci(thread)); 66.38 symbolHandle signature (thread, call->signature()); 66.39 receiver = Handle(thread, 66.40 thread->last_frame().interpreter_callee_receiver(signature)); 66.41 @@ -1257,7 +1268,7 @@ 66.42 Bytecode_invoke* invoke = Bytecode_invoke_at(mh, bci); 66.43 ArgumentSizeComputer asc(invoke->signature()); 66.44 int size_of_arguments = (asc.size() + (invoke->has_receiver() ? 1 : 0)); // receiver 66.45 - Copy::conjoint_bytes(src_address, dest_address, 66.46 + Copy::conjoint_jbytes(src_address, dest_address, 66.47 size_of_arguments * Interpreter::stackElementSize); 66.48 IRT_END 66.49 #endif
67.1 --- a/src/share/vm/interpreter/interpreterRuntime.hpp Wed Jun 30 18:57:35 2010 -0700 67.2 +++ b/src/share/vm/interpreter/interpreterRuntime.hpp Fri Jul 02 01:36:15 2010 -0700 67.3 @@ -34,6 +34,7 @@ 67.4 static frame last_frame(JavaThread *thread) { return thread->last_frame(); } 67.5 static methodOop method(JavaThread *thread) { return last_frame(thread).interpreter_frame_method(); } 67.6 static address bcp(JavaThread *thread) { return last_frame(thread).interpreter_frame_bcp(); } 67.7 + static int bci(JavaThread *thread) { return last_frame(thread).interpreter_frame_bci(); } 67.8 static void set_bcp_and_mdp(address bcp, JavaThread*thread); 67.9 static Bytecodes::Code code(JavaThread *thread) { 67.10 // pass method to avoid calling unsafe bcp_to_method (partial fix 4926272) 67.11 @@ -59,6 +60,7 @@ 67.12 public: 67.13 // Constants 67.14 static void ldc (JavaThread* thread, bool wide); 67.15 + static void resolve_ldc (JavaThread* thread, Bytecodes::Code bytecode); 67.16 67.17 // Allocation 67.18 static void _new (JavaThread* thread, constantPoolOopDesc* pool, int index);
68.1 --- a/src/share/vm/interpreter/rewriter.cpp Wed Jun 30 18:57:35 2010 -0700 68.2 +++ b/src/share/vm/interpreter/rewriter.cpp Fri Jul 02 01:36:15 2010 -0700 68.3 @@ -38,6 +38,8 @@ 68.4 case JVM_CONSTANT_InterfaceMethodref: 68.5 case JVM_CONSTANT_Fieldref : // fall through 68.6 case JVM_CONSTANT_Methodref : // fall through 68.7 + case JVM_CONSTANT_MethodHandle : // fall through 68.8 + case JVM_CONSTANT_MethodType : // fall through 68.9 add_cp_cache_entry(i); 68.10 break; 68.11 } 68.12 @@ -131,6 +133,27 @@ 68.13 } 68.14 68.15 68.16 +// Rewrite some ldc bytecodes to _fast_aldc 68.17 +void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) { 68.18 + assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), ""); 68.19 + address p = bcp + offset; 68.20 + int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p); 68.21 + constantTag tag = _pool->tag_at(cp_index).value(); 68.22 + if (tag.is_method_handle() || tag.is_method_type()) { 68.23 + int cache_index = cp_entry_to_cp_cache(cp_index); 68.24 + if (is_wide) { 68.25 + (*bcp) = Bytecodes::_fast_aldc_w; 68.26 + assert(cache_index == (u2)cache_index, ""); 68.27 + Bytes::put_native_u2(p, cache_index); 68.28 + } else { 68.29 + (*bcp) = Bytecodes::_fast_aldc; 68.30 + assert(cache_index == (u1)cache_index, ""); 68.31 + (*p) = (u1)cache_index; 68.32 + } 68.33 + } 68.34 +} 68.35 + 68.36 + 68.37 // Rewrites a method given the index_map information 68.38 void Rewriter::scan_method(methodOop method) { 68.39 68.40 @@ -198,6 +221,12 @@ 68.41 case Bytecodes::_invokedynamic: 68.42 rewrite_invokedynamic(bcp, prefix_length+1); 68.43 break; 68.44 + case Bytecodes::_ldc: 68.45 + maybe_rewrite_ldc(bcp, prefix_length+1, false); 68.46 + break; 68.47 + case Bytecodes::_ldc_w: 68.48 + maybe_rewrite_ldc(bcp, prefix_length+1, true); 68.49 + break; 68.50 case Bytecodes::_jsr : // fall through 68.51 case Bytecodes::_jsr_w : nof_jsrs++; break; 68.52 case Bytecodes::_monitorenter : // fall through
69.1 --- a/src/share/vm/interpreter/rewriter.hpp Wed Jun 30 18:57:35 2010 -0700 69.2 +++ b/src/share/vm/interpreter/rewriter.hpp Fri Jul 02 01:36:15 2010 -0700 69.3 @@ -66,6 +66,7 @@ 69.4 void rewrite_Object_init(methodHandle m, TRAPS); 69.5 void rewrite_member_reference(address bcp, int offset); 69.6 void rewrite_invokedynamic(address bcp, int offset); 69.7 + void maybe_rewrite_ldc(address bcp, int offset, bool is_wide); 69.8 69.9 public: 69.10 // Driver routine:
70.1 --- a/src/share/vm/interpreter/templateTable.cpp Wed Jun 30 18:57:35 2010 -0700 70.2 +++ b/src/share/vm/interpreter/templateTable.cpp Fri Jul 02 01:36:15 2010 -0700 70.3 @@ -507,6 +507,9 @@ 70.4 def(Bytecodes::_fast_linearswitch , ubcp|disp|____|____, itos, vtos, fast_linearswitch , _ ); 70.5 def(Bytecodes::_fast_binaryswitch , ubcp|disp|____|____, itos, vtos, fast_binaryswitch , _ ); 70.6 70.7 + def(Bytecodes::_fast_aldc , ubcp|____|clvm|____, vtos, atos, fast_aldc , false ); 70.8 + def(Bytecodes::_fast_aldc_w , ubcp|____|clvm|____, vtos, atos, fast_aldc , true ); 70.9 + 70.10 def(Bytecodes::_return_register_finalizer , ____|disp|clvm|____, vtos, vtos, _return , vtos ); 70.11 70.12 def(Bytecodes::_shouldnotreachhere , ____|____|____|____, vtos, vtos, shouldnotreachhere , _ );
71.1 --- a/src/share/vm/interpreter/templateTable.hpp Wed Jun 30 18:57:35 2010 -0700 71.2 +++ b/src/share/vm/interpreter/templateTable.hpp Fri Jul 02 01:36:15 2010 -0700 71.3 @@ -123,6 +123,7 @@ 71.4 static void sipush(); 71.5 static void ldc(bool wide); 71.6 static void ldc2_w(); 71.7 + static void fast_aldc(bool wide); 71.8 71.9 static void locals_index(Register reg, int offset = 1); 71.10 static void iload();
72.1 --- a/src/share/vm/memory/cardTableModRefBS.cpp Wed Jun 30 18:57:35 2010 -0700 72.2 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Fri Jul 02 01:36:15 2010 -0700 72.3 @@ -1,5 +1,5 @@ 72.4 /* 72.5 - * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. 72.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 72.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 72.8 * 72.9 * This code is free software; you can redistribute it and/or modify it 72.10 @@ -284,12 +284,19 @@ 72.11 committed_unique_to_self(ind, MemRegion(new_end_aligned, 72.12 cur_committed.end())); 72.13 if (!uncommit_region.is_empty()) { 72.14 - if (!os::uncommit_memory((char*)uncommit_region.start(), 72.15 - uncommit_region.byte_size())) { 72.16 - assert(false, "Card table contraction failed"); 72.17 - // The call failed so don't change the end of the 72.18 - // committed region. This is better than taking the 72.19 - // VM down. 72.20 + // It is not safe to uncommit cards if the boundary between 72.21 + // the generations is moving. A shrink can uncommit cards 72.22 + // owned by generation A but being used by generation B. 72.23 + if (!UseAdaptiveGCBoundary) { 72.24 + if (!os::uncommit_memory((char*)uncommit_region.start(), 72.25 + uncommit_region.byte_size())) { 72.26 + assert(false, "Card table contraction failed"); 72.27 + // The call failed so don't change the end of the 72.28 + // committed region. This is better than taking the 72.29 + // VM down. 72.30 + new_end_aligned = _committed[ind].end(); 72.31 + } 72.32 + } else { 72.33 new_end_aligned = _committed[ind].end(); 72.34 } 72.35 } 72.36 @@ -297,6 +304,19 @@ 72.37 // In any case, we can reset the end of the current committed entry. 72.38 _committed[ind].set_end(new_end_aligned); 72.39 72.40 +#ifdef ASSERT 72.41 + // Check that the last card in the new region is committed according 72.42 + // to the tables. 72.43 + bool covered = false; 72.44 + for (int cr = 0; cr < _cur_covered_regions; cr++) { 72.45 + if (_committed[cr].contains(new_end - 1)) { 72.46 + covered = true; 72.47 + break; 72.48 + } 72.49 + } 72.50 + assert(covered, "Card for end of new region not committed"); 72.51 +#endif 72.52 + 72.53 // The default of 0 is not necessarily clean cards. 72.54 jbyte* entry; 72.55 if (old_region.last() < _whole_heap.start()) { 72.56 @@ -354,6 +374,9 @@ 72.57 addr_for((jbyte*) _committed[ind].start()), 72.58 addr_for((jbyte*) _committed[ind].last())); 72.59 } 72.60 + // Touch the last card of the covered region to show that it 72.61 + // is committed (or SEGV). 72.62 + debug_only(*byte_for(_covered[ind].last());) 72.63 debug_only(verify_guard();) 72.64 } 72.65
73.1 --- a/src/share/vm/memory/genCollectedHeap.cpp Wed Jun 30 18:57:35 2010 -0700 73.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp Fri Jul 02 01:36:15 2010 -0700 73.3 @@ -179,9 +179,14 @@ 73.4 } 73.5 n_covered_regions += _gen_specs[i]->n_covered_regions(); 73.6 } 73.7 - assert(total_reserved % pageSize == 0, "Gen size"); 73.8 + assert(total_reserved % pageSize == 0, 73.9 + err_msg("Gen size; total_reserved=" SIZE_FORMAT ", pageSize=" 73.10 + SIZE_FORMAT, total_reserved, pageSize)); 73.11 total_reserved += perm_gen_spec->max_size(); 73.12 - assert(total_reserved % pageSize == 0, "Perm Gen size"); 73.13 + assert(total_reserved % pageSize == 0, 73.14 + err_msg("Perm size; total_reserved=" SIZE_FORMAT ", pageSize=" 73.15 + SIZE_FORMAT ", perm gen max=" SIZE_FORMAT, total_reserved, 73.16 + pageSize, perm_gen_spec->max_size())); 73.17 73.18 if (total_reserved < perm_gen_spec->max_size()) { 73.19 vm_exit_during_initialization(overflow_msg);
74.1 --- a/src/share/vm/oops/constantPoolKlass.cpp Wed Jun 30 18:57:35 2010 -0700 74.2 +++ b/src/share/vm/oops/constantPoolKlass.cpp Fri Jul 02 01:36:15 2010 -0700 74.3 @@ -372,6 +372,13 @@ 74.4 entry->print_value_on(st); 74.5 } 74.6 break; 74.7 + case JVM_CONSTANT_MethodHandle : 74.8 + st->print("ref_kind=%d", cp->method_handle_ref_kind_at(index)); 74.9 + st->print(" ref_index=%d", cp->method_handle_index_at(index)); 74.10 + break; 74.11 + case JVM_CONSTANT_MethodType : 74.12 + st->print("signature_index=%d", cp->method_type_index_at(index)); 74.13 + break; 74.14 default: 74.15 ShouldNotReachHere(); 74.16 break; 74.17 @@ -437,6 +444,7 @@ 74.18 // can be non-perm, can be non-instance (array) 74.19 } 74.20 } 74.21 + // FIXME: verify JSR 292 tags JVM_CONSTANT_MethodHandle, etc. 74.22 base++; 74.23 } 74.24 guarantee(cp->tags()->is_perm(), "should be in permspace");
75.1 --- a/src/share/vm/oops/constantPoolOop.cpp Wed Jun 30 18:57:35 2010 -0700 75.2 +++ b/src/share/vm/oops/constantPoolOop.cpp Fri Jul 02 01:36:15 2010 -0700 75.3 @@ -358,6 +358,11 @@ 75.4 return klass_at_noresolve(ref_index); 75.5 } 75.6 75.7 +symbolOop constantPoolOopDesc::uncached_klass_ref_at_noresolve(int which) { 75.8 + jint ref_index = uncached_klass_ref_index_at(which); 75.9 + return klass_at_noresolve(ref_index); 75.10 +} 75.11 + 75.12 char* constantPoolOopDesc::string_at_noresolve(int which) { 75.13 // Test entry type in case string is resolved while in here. 75.14 oop entry = *(obj_at_addr(which)); 75.15 @@ -384,6 +389,119 @@ 75.16 } 75.17 } 75.18 75.19 +oop constantPoolOopDesc::resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS) { 75.20 + oop result_oop = NULL; 75.21 + if (cache_index >= 0) { 75.22 + assert(index < 0, "only one kind of index at a time"); 75.23 + ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index); 75.24 + result_oop = cpc_entry->f1(); 75.25 + if (result_oop != NULL) { 75.26 + return result_oop; // that was easy... 75.27 + } 75.28 + index = cpc_entry->constant_pool_index(); 75.29 + } 75.30 + 75.31 + int tag_value = this_oop->tag_at(index).value(); 75.32 + switch (tag_value) { 75.33 + 75.34 + case JVM_CONSTANT_UnresolvedClass: 75.35 + case JVM_CONSTANT_UnresolvedClassInError: 75.36 + case JVM_CONSTANT_Class: 75.37 + { 75.38 + klassOop resolved = klass_at_impl(this_oop, index, CHECK_NULL); 75.39 + // ldc wants the java mirror. 75.40 + result_oop = resolved->klass_part()->java_mirror(); 75.41 + break; 75.42 + } 75.43 + 75.44 + case JVM_CONSTANT_String: 75.45 + case JVM_CONSTANT_UnresolvedString: 75.46 + if (this_oop->is_pseudo_string_at(index)) { 75.47 + result_oop = this_oop->pseudo_string_at(index); 75.48 + break; 75.49 + } 75.50 + result_oop = string_at_impl(this_oop, index, CHECK_NULL); 75.51 + break; 75.52 + 75.53 + case JVM_CONSTANT_Object: 75.54 + result_oop = this_oop->object_at(index); 75.55 + break; 75.56 + 75.57 + case JVM_CONSTANT_MethodHandle: 75.58 + { 75.59 + int ref_kind = this_oop->method_handle_ref_kind_at(index); 75.60 + int callee_index = this_oop->method_handle_klass_index_at(index); 75.61 + symbolHandle name(THREAD, this_oop->method_handle_name_ref_at(index)); 75.62 + symbolHandle signature(THREAD, this_oop->method_handle_signature_ref_at(index)); 75.63 + if (PrintMiscellaneous) 75.64 + tty->print_cr("resolve JVM_CONSTANT_MethodHandle:%d [%d/%d/%d] %s.%s", 75.65 + ref_kind, index, this_oop->method_handle_index_at(index), 75.66 + callee_index, name->as_C_string(), signature->as_C_string()); 75.67 + KlassHandle callee; 75.68 + { klassOop k = klass_at_impl(this_oop, callee_index, CHECK_NULL); 75.69 + callee = KlassHandle(THREAD, k); 75.70 + } 75.71 + KlassHandle klass(THREAD, this_oop->pool_holder()); 75.72 + Handle value = SystemDictionary::link_method_handle_constant(klass, ref_kind, 75.73 + callee, name, signature, 75.74 + CHECK_NULL); 75.75 + result_oop = value(); 75.76 + // FIXME: Uniquify errors, using SystemDictionary::find_resolution_error. 75.77 + break; 75.78 + } 75.79 + 75.80 + case JVM_CONSTANT_MethodType: 75.81 + { 75.82 + symbolHandle signature(THREAD, this_oop->method_type_signature_at(index)); 75.83 + if (PrintMiscellaneous) 75.84 + tty->print_cr("resolve JVM_CONSTANT_MethodType [%d/%d] %s", 75.85 + index, this_oop->method_type_index_at(index), 75.86 + signature->as_C_string()); 75.87 + KlassHandle klass(THREAD, this_oop->pool_holder()); 75.88 + bool ignore_is_on_bcp = false; 75.89 + Handle value = SystemDictionary::find_method_handle_type(signature, 75.90 + klass, 75.91 + ignore_is_on_bcp, 75.92 + CHECK_NULL); 75.93 + result_oop = value(); 75.94 + // FIXME: Uniquify errors, using SystemDictionary::find_resolution_error. 75.95 + break; 75.96 + } 75.97 + 75.98 + /* maybe some day 75.99 + case JVM_CONSTANT_Integer: 75.100 + case JVM_CONSTANT_Float: 75.101 + case JVM_CONSTANT_Long: 75.102 + case JVM_CONSTANT_Double: 75.103 + result_oop = java_lang_boxing_object::create(...); 75.104 + break; 75.105 + */ 75.106 + 75.107 + default: 75.108 + DEBUG_ONLY( tty->print_cr("*** %p: tag at CP[%d/%d] = %d", 75.109 + this_oop(), index, cache_index, tag_value) ); 75.110 + assert(false, "unexpected constant tag"); 75.111 + break; 75.112 + } 75.113 + 75.114 + if (cache_index >= 0) { 75.115 + // Cache the oop here also. 75.116 + Handle result(THREAD, result_oop); 75.117 + result_oop = NULL; // safety 75.118 + ObjectLocker ol(this_oop, THREAD); 75.119 + ConstantPoolCacheEntry* cpc_entry = this_oop->cache()->entry_at(cache_index); 75.120 + oop result_oop2 = cpc_entry->f1(); 75.121 + if (result_oop2 != NULL) { 75.122 + // Race condition: May already be filled in while we were trying to lock. 75.123 + return result_oop2; 75.124 + } 75.125 + cpc_entry->set_f1(result()); 75.126 + return result(); 75.127 + } else { 75.128 + return result_oop; 75.129 + } 75.130 +} 75.131 + 75.132 oop constantPoolOopDesc::string_at_impl(constantPoolHandle this_oop, int which, TRAPS) { 75.133 oop entry = *(this_oop->obj_at_addr(which)); 75.134 if (entry->is_symbol()) { 75.135 @@ -690,6 +808,28 @@ 75.136 } 75.137 } break; 75.138 75.139 + case JVM_CONSTANT_MethodType: 75.140 + { 75.141 + int k1 = method_type_index_at(index1); 75.142 + int k2 = cp2->method_type_index_at(index2); 75.143 + if (k1 == k2) { 75.144 + return true; 75.145 + } 75.146 + } break; 75.147 + 75.148 + case JVM_CONSTANT_MethodHandle: 75.149 + { 75.150 + int k1 = method_handle_ref_kind_at(index1); 75.151 + int k2 = cp2->method_handle_ref_kind_at(index2); 75.152 + if (k1 == k2) { 75.153 + int i1 = method_handle_index_at(index1); 75.154 + int i2 = cp2->method_handle_index_at(index2); 75.155 + if (i1 == i2) { 75.156 + return true; 75.157 + } 75.158 + } 75.159 + } break; 75.160 + 75.161 case JVM_CONSTANT_UnresolvedString: 75.162 { 75.163 symbolOop s1 = unresolved_string_at(index1); 75.164 @@ -863,6 +1003,19 @@ 75.165 to_cp->symbol_at_put(to_i, s); 75.166 } break; 75.167 75.168 + case JVM_CONSTANT_MethodType: 75.169 + { 75.170 + jint k = method_type_index_at(from_i); 75.171 + to_cp->method_type_index_at_put(to_i, k); 75.172 + } break; 75.173 + 75.174 + case JVM_CONSTANT_MethodHandle: 75.175 + { 75.176 + int k1 = method_handle_ref_kind_at(from_i); 75.177 + int k2 = method_handle_index_at(from_i); 75.178 + to_cp->method_handle_index_at_put(to_i, k1, k2); 75.179 + } break; 75.180 + 75.181 // Invalid is used as the tag for the second constant pool entry 75.182 // occupied by JVM_CONSTANT_Double or JVM_CONSTANT_Long. It should 75.183 // not be seen by itself. 75.184 @@ -1066,8 +1219,12 @@ 75.185 case JVM_CONSTANT_UnresolvedClassInError: 75.186 case JVM_CONSTANT_StringIndex: 75.187 case JVM_CONSTANT_UnresolvedString: 75.188 + case JVM_CONSTANT_MethodType: 75.189 return 3; 75.190 75.191 + case JVM_CONSTANT_MethodHandle: 75.192 + return 4; //tag, ref_kind, ref_index 75.193 + 75.194 case JVM_CONSTANT_Integer: 75.195 case JVM_CONSTANT_Float: 75.196 case JVM_CONSTANT_Fieldref: 75.197 @@ -1271,6 +1428,22 @@ 75.198 DBG(printf("JVM_CONSTANT_StringIndex: %hd", idx1)); 75.199 break; 75.200 } 75.201 + case JVM_CONSTANT_MethodHandle: { 75.202 + *bytes = JVM_CONSTANT_MethodHandle; 75.203 + int kind = method_handle_ref_kind_at(idx); 75.204 + idx1 = method_handle_index_at(idx); 75.205 + *(bytes+1) = (unsigned char) kind; 75.206 + Bytes::put_Java_u2((address) (bytes+2), idx1); 75.207 + DBG(printf("JVM_CONSTANT_MethodHandle: %d %hd", kind, idx1)); 75.208 + break; 75.209 + } 75.210 + case JVM_CONSTANT_MethodType: { 75.211 + *bytes = JVM_CONSTANT_MethodType; 75.212 + idx1 = method_type_index_at(idx); 75.213 + Bytes::put_Java_u2((address) (bytes+1), idx1); 75.214 + DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1)); 75.215 + break; 75.216 + } 75.217 } 75.218 DBG(printf("\n")); 75.219 bytes += ent_size;
76.1 --- a/src/share/vm/oops/constantPoolOop.hpp Wed Jun 30 18:57:35 2010 -0700 76.2 +++ b/src/share/vm/oops/constantPoolOop.hpp Fri Jul 02 01:36:15 2010 -0700 76.3 @@ -146,6 +146,16 @@ 76.4 oop_store_without_check(obj_at_addr(which), oop(s)); 76.5 } 76.6 76.7 + void method_handle_index_at_put(int which, int ref_kind, int ref_index) { 76.8 + tag_at_put(which, JVM_CONSTANT_MethodHandle); 76.9 + *int_at_addr(which) = ((jint) ref_index<<16) | ref_kind; 76.10 + } 76.11 + 76.12 + void method_type_index_at_put(int which, int ref_index) { 76.13 + tag_at_put(which, JVM_CONSTANT_MethodType); 76.14 + *int_at_addr(which) = ref_index; 76.15 + } 76.16 + 76.17 // Temporary until actual use 76.18 void unresolved_string_at_put(int which, symbolOop s) { 76.19 *obj_at_addr(which) = NULL; 76.20 @@ -357,6 +367,36 @@ 76.21 return *int_at_addr(which); 76.22 } 76.23 76.24 + int method_handle_ref_kind_at(int which) { 76.25 + assert(tag_at(which).is_method_handle(), "Corrupted constant pool"); 76.26 + return extract_low_short_from_int(*int_at_addr(which)); // mask out unwanted ref_index bits 76.27 + } 76.28 + int method_handle_index_at(int which) { 76.29 + assert(tag_at(which).is_method_handle(), "Corrupted constant pool"); 76.30 + return extract_high_short_from_int(*int_at_addr(which)); // shift out unwanted ref_kind bits 76.31 + } 76.32 + int method_type_index_at(int which) { 76.33 + assert(tag_at(which).is_method_type(), "Corrupted constant pool"); 76.34 + return *int_at_addr(which); 76.35 + } 76.36 + // Derived queries: 76.37 + symbolOop method_handle_name_ref_at(int which) { 76.38 + int member = method_handle_index_at(which); 76.39 + return impl_name_ref_at(member, true); 76.40 + } 76.41 + symbolOop method_handle_signature_ref_at(int which) { 76.42 + int member = method_handle_index_at(which); 76.43 + return impl_signature_ref_at(member, true); 76.44 + } 76.45 + int method_handle_klass_index_at(int which) { 76.46 + int member = method_handle_index_at(which); 76.47 + return impl_klass_ref_index_at(member, true); 76.48 + } 76.49 + symbolOop method_type_signature_at(int which) { 76.50 + int sym = method_type_index_at(which); 76.51 + return symbol_at(sym); 76.52 + } 76.53 + 76.54 // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve, 76.55 // name_and_type_ref_index_at) all expect to be passed indices obtained 76.56 // directly from the bytecode, and extracted according to java byte order. 76.57 @@ -388,6 +428,17 @@ 76.58 resolve_string_constants_impl(h_this, CHECK); 76.59 } 76.60 76.61 + // Resolve late bound constants. 76.62 + oop resolve_constant_at(int index, TRAPS) { 76.63 + constantPoolHandle h_this(THREAD, this); 76.64 + return resolve_constant_at_impl(h_this, index, -1, THREAD); 76.65 + } 76.66 + 76.67 + oop resolve_cached_constant_at(int cache_index, TRAPS) { 76.68 + constantPoolHandle h_this(THREAD, this); 76.69 + return resolve_constant_at_impl(h_this, -1, cache_index, THREAD); 76.70 + } 76.71 + 76.72 // Klass name matches name at offset 76.73 bool klass_name_at_matches(instanceKlassHandle k, int which); 76.74 76.75 @@ -420,6 +471,7 @@ 76.76 // Routines currently used for annotations (only called by jvm.cpp) but which might be used in the 76.77 // future by other Java code. These take constant pool indices rather than possibly-byte-swapped 76.78 // constant pool cache indices as do the peer methods above. 76.79 + symbolOop uncached_klass_ref_at_noresolve(int which); 76.80 symbolOop uncached_name_ref_at(int which) { return impl_name_ref_at(which, true); } 76.81 symbolOop uncached_signature_ref_at(int which) { return impl_signature_ref_at(which, true); } 76.82 int uncached_klass_ref_index_at(int which) { return impl_klass_ref_index_at(which, true); } 76.83 @@ -436,6 +488,8 @@ 76.84 76.85 #ifdef ASSERT 76.86 enum { CPCACHE_INDEX_TAG = 0x10000 }; // helps keep CP cache indices distinct from CP indices 76.87 +#else 76.88 + enum { CPCACHE_INDEX_TAG = 0 }; // in product mode, this zero value is a no-op 76.89 #endif //ASSERT 76.90 76.91 private: 76.92 @@ -469,6 +523,8 @@ 76.93 // Resolve string constants (to prevent allocation during compilation) 76.94 static void resolve_string_constants_impl(constantPoolHandle this_oop, TRAPS); 76.95 76.96 + static oop resolve_constant_at_impl(constantPoolHandle this_oop, int index, int cache_index, TRAPS); 76.97 + 76.98 public: 76.99 // Merging constantPoolOop support: 76.100 bool compare_entry_to(int index1, constantPoolHandle cp2, int index2, TRAPS);
77.1 --- a/src/share/vm/oops/cpCacheOop.hpp Wed Jun 30 18:57:35 2010 -0700 77.2 +++ b/src/share/vm/oops/cpCacheOop.hpp Fri Jul 02 01:36:15 2010 -0700 77.3 @@ -110,6 +110,7 @@ 77.4 class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC { 77.5 friend class VMStructs; 77.6 friend class constantPoolCacheKlass; 77.7 + friend class constantPoolOopDesc; //resolve_constant_at_impl => set_f1 77.8 77.9 private: 77.10 volatile intx _indices; // constant pool index & rewrite bytecodes
78.1 --- a/src/share/vm/opto/cfgnode.cpp Wed Jun 30 18:57:35 2010 -0700 78.2 +++ b/src/share/vm/opto/cfgnode.cpp Fri Jul 02 01:36:15 2010 -0700 78.3 @@ -472,9 +472,7 @@ 78.4 assert( n->req() == 2 && n->in(1) != NULL, "Only one data input expected" ); 78.5 // Break dead loop data path. 78.6 // Eagerly replace phis with top to avoid phis copies generation. 78.7 - igvn->add_users_to_worklist(n); 78.8 - igvn->hash_delete(n); // Yank from hash before hacking edges 78.9 - igvn->subsume_node(n, top); 78.10 + igvn->replace_node(n, top); 78.11 if( max != outcnt() ) { 78.12 progress = true; 78.13 j = refresh_out_pos(j); 78.14 @@ -518,18 +516,17 @@ 78.15 igvn->hash_delete(n); // Remove from worklist before modifying edges 78.16 if( n->is_Phi() ) { // Collapse all Phis 78.17 // Eagerly replace phis to avoid copies generation. 78.18 - igvn->add_users_to_worklist(n); 78.19 - igvn->hash_delete(n); // Yank from hash before hacking edges 78.20 + Node* in; 78.21 if( cnt == 0 ) { 78.22 assert( n->req() == 1, "No data inputs expected" ); 78.23 - igvn->subsume_node(n, parent_ctrl); // replaced by top 78.24 + in = parent_ctrl; // replaced by top 78.25 } else { 78.26 assert( n->req() == 2 && n->in(1) != NULL, "Only one data input expected" ); 78.27 - Node* in1 = n->in(1); // replaced by unique input 78.28 - if( n->as_Phi()->is_unsafe_data_reference(in1) ) 78.29 - in1 = phase->C->top(); // replaced by top 78.30 - igvn->subsume_node(n, in1); 78.31 + in = n->in(1); // replaced by unique input 78.32 + if( n->as_Phi()->is_unsafe_data_reference(in) ) 78.33 + in = phase->C->top(); // replaced by top 78.34 } 78.35 + igvn->replace_node(n, in); 78.36 } 78.37 else if( n->is_Region() ) { // Update all incoming edges 78.38 assert( !igvn->eqv(n, this), "Must be removed from DefUse edges"); 78.39 @@ -2127,7 +2124,7 @@ 78.40 // if it's not there, there's nothing to do. 78.41 Node* fallthru = proj_out(0); 78.42 if (fallthru != NULL) { 78.43 - phase->is_IterGVN()->subsume_node(fallthru, in(0)); 78.44 + phase->is_IterGVN()->replace_node(fallthru, in(0)); 78.45 } 78.46 return phase->C->top(); 78.47 }
79.1 --- a/src/share/vm/opto/compile.cpp Wed Jun 30 18:57:35 2010 -0700 79.2 +++ b/src/share/vm/opto/compile.cpp Fri Jul 02 01:36:15 2010 -0700 79.3 @@ -2000,6 +2000,17 @@ 79.4 } 79.5 } 79.6 79.7 +#ifdef ASSERT 79.8 + if( n->is_Mem() ) { 79.9 + Compile* C = Compile::current(); 79.10 + int alias_idx = C->get_alias_index(n->as_Mem()->adr_type()); 79.11 + assert( n->in(0) != NULL || alias_idx != Compile::AliasIdxRaw || 79.12 + // oop will be recorded in oop map if load crosses safepoint 79.13 + n->is_Load() && (n->as_Load()->bottom_type()->isa_oopptr() || 79.14 + LoadNode::is_immutable_value(n->in(MemNode::Address))), 79.15 + "raw memory operations should have control edge"); 79.16 + } 79.17 +#endif 79.18 // Count FPU ops and common calls, implements item (3) 79.19 switch( nop ) { 79.20 // Count all float operations that may use FPU
80.1 --- a/src/share/vm/opto/graphKit.cpp Wed Jun 30 18:57:35 2010 -0700 80.2 +++ b/src/share/vm/opto/graphKit.cpp Fri Jul 02 01:36:15 2010 -0700 80.3 @@ -1789,9 +1789,10 @@ 80.4 80.5 void GraphKit::increment_counter(Node* counter_addr) { 80.6 int adr_type = Compile::AliasIdxRaw; 80.7 - Node* cnt = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type); 80.8 + Node* ctrl = control(); 80.9 + Node* cnt = make_load(ctrl, counter_addr, TypeInt::INT, T_INT, adr_type); 80.10 Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1))); 80.11 - store_to_memory( NULL, counter_addr, incr, T_INT, adr_type ); 80.12 + store_to_memory( ctrl, counter_addr, incr, T_INT, adr_type ); 80.13 } 80.14 80.15 80.16 @@ -2771,11 +2772,7 @@ 80.17 // Update the counter for this lock. Don't bother using an atomic 80.18 // operation since we don't require absolute accuracy. 80.19 lock->create_lock_counter(map()->jvms()); 80.20 - int adr_type = Compile::AliasIdxRaw; 80.21 - Node* counter_addr = makecon(TypeRawPtr::make(lock->counter()->addr())); 80.22 - Node* cnt = make_load(NULL, counter_addr, TypeInt::INT, T_INT, adr_type); 80.23 - Node* incr = _gvn.transform(new (C, 3) AddINode(cnt, _gvn.intcon(1))); 80.24 - store_to_memory(control(), counter_addr, incr, T_INT, adr_type); 80.25 + increment_counter(lock->counter()->addr()); 80.26 } 80.27 #endif 80.28
81.1 --- a/src/share/vm/opto/ifnode.cpp Wed Jun 30 18:57:35 2010 -0700 81.2 +++ b/src/share/vm/opto/ifnode.cpp Fri Jul 02 01:36:15 2010 -0700 81.3 @@ -1081,11 +1081,9 @@ 81.4 81.5 igvn->register_new_node_with_optimizer(new_if_f); 81.6 igvn->register_new_node_with_optimizer(new_if_t); 81.7 - igvn->hash_delete(old_if_f); 81.8 - igvn->hash_delete(old_if_t); 81.9 // Flip test, so flip trailing control 81.10 - igvn->subsume_node(old_if_f, new_if_t); 81.11 - igvn->subsume_node(old_if_t, new_if_f); 81.12 + igvn->replace_node(old_if_f, new_if_t); 81.13 + igvn->replace_node(old_if_t, new_if_f); 81.14 81.15 // Progress 81.16 return iff;
82.1 --- a/src/share/vm/opto/library_call.cpp Wed Jun 30 18:57:35 2010 -0700 82.2 +++ b/src/share/vm/opto/library_call.cpp Fri Jul 02 01:36:15 2010 -0700 82.3 @@ -3512,8 +3512,7 @@ 82.4 82.5 // Get the header out of the object, use LoadMarkNode when available 82.6 Node* header_addr = basic_plus_adr(obj, oopDesc::mark_offset_in_bytes()); 82.7 - Node* header = make_load(NULL, header_addr, TypeRawPtr::BOTTOM, T_ADDRESS); 82.8 - header = _gvn.transform( new (C, 2) CastP2XNode(NULL, header) ); 82.9 + Node* header = make_load(control(), header_addr, TypeX_X, TypeX_X->basic_type()); 82.10 82.11 // Test the header to see if it is unlocked. 82.12 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); 82.13 @@ -5202,7 +5201,7 @@ 82.14 // super_check_offset, for the desired klass. 82.15 int sco_offset = Klass::super_check_offset_offset_in_bytes() + sizeof(oopDesc); 82.16 Node* p3 = basic_plus_adr(dest_elem_klass, sco_offset); 82.17 - Node* n3 = new(C, 3) LoadINode(NULL, immutable_memory(), p3, TypeRawPtr::BOTTOM); 82.18 + Node* n3 = new(C, 3) LoadINode(NULL, memory(p3), p3, _gvn.type(p3)->is_ptr()); 82.19 Node* check_offset = _gvn.transform(n3); 82.20 Node* check_value = dest_elem_klass; 82.21
83.1 --- a/src/share/vm/opto/loopTransform.cpp Wed Jun 30 18:57:35 2010 -0700 83.2 +++ b/src/share/vm/opto/loopTransform.cpp Fri Jul 02 01:36:15 2010 -0700 83.3 @@ -194,8 +194,7 @@ 83.4 addx = new (phase->C, 3) AddINode(x, inv); 83.5 } 83.6 phase->register_new_node(addx, phase->get_ctrl(x)); 83.7 - phase->_igvn.hash_delete(n1); 83.8 - phase->_igvn.subsume_node(n1, addx); 83.9 + phase->_igvn.replace_node(n1, addx); 83.10 return addx; 83.11 } 83.12 83.13 @@ -1586,8 +1585,7 @@ 83.14 Node *phi = cl->phi(); 83.15 Node *final = new (phase->C, 3) SubINode( cl->limit(), cl->stride() ); 83.16 phase->register_new_node(final,cl->in(LoopNode::EntryControl)); 83.17 - phase->_igvn.hash_delete(phi); 83.18 - phase->_igvn.subsume_node(phi,final); 83.19 + phase->_igvn.replace_node(phi,final); 83.20 phase->C->set_major_progress(); 83.21 return true; 83.22 }
84.1 --- a/src/share/vm/opto/loopnode.cpp Wed Jun 30 18:57:35 2010 -0700 84.2 +++ b/src/share/vm/opto/loopnode.cpp Fri Jul 02 01:36:15 2010 -0700 84.3 @@ -400,7 +400,7 @@ 84.4 nphi->set_req(LoopNode::LoopBackControl, phi->in(LoopNode::LoopBackControl)); 84.5 nphi = _igvn.register_new_node_with_optimizer(nphi); 84.6 set_ctrl(nphi, get_ctrl(phi)); 84.7 - _igvn.subsume_node(phi, nphi); 84.8 + _igvn.replace_node(phi, nphi); 84.9 phi = nphi->as_Phi(); 84.10 } 84.11 cmp = cmp->clone(); 84.12 @@ -760,7 +760,7 @@ 84.13 // which in turn prevents removing an empty loop. 84.14 Node *id_old_phi = old_phi->Identity( &igvn ); 84.15 if( id_old_phi != old_phi ) { // Found a simple identity? 84.16 - // Note that I cannot call 'subsume_node' here, because 84.17 + // Note that I cannot call 'replace_node' here, because 84.18 // that will yank the edge from old_phi to the Region and 84.19 // I'm mid-iteration over the Region's uses. 84.20 for (DUIterator_Last imin, i = old_phi->last_outs(imin); i >= imin; ) { 84.21 @@ -1065,11 +1065,9 @@ 84.22 l = igvn.register_new_node_with_optimizer(l, _head); 84.23 phase->set_created_loop_node(); 84.24 // Go ahead and replace _head 84.25 - phase->_igvn.subsume_node( _head, l ); 84.26 + phase->_igvn.replace_node( _head, l ); 84.27 _head = l; 84.28 phase->set_loop(_head, this); 84.29 - for (DUIterator_Fast imax, i = l->fast_outs(imax); i < imax; i++) 84.30 - phase->_igvn.add_users_to_worklist(l->fast_out(i)); 84.31 } 84.32 84.33 // Now recursively beautify nested loops 84.34 @@ -1329,8 +1327,7 @@ 84.35 Node* add = new (C, 3) AddINode(ratio_idx, diff); 84.36 phase->_igvn.register_new_node_with_optimizer(add); 84.37 phase->set_ctrl(add, cl); 84.38 - phase->_igvn.hash_delete( phi2 ); 84.39 - phase->_igvn.subsume_node( phi2, add ); 84.40 + phase->_igvn.replace_node( phi2, add ); 84.41 // Sometimes an induction variable is unused 84.42 if (add->outcnt() == 0) { 84.43 phase->_igvn.remove_dead_node(add);
85.1 --- a/src/share/vm/opto/loopnode.hpp Wed Jun 30 18:57:35 2010 -0700 85.2 +++ b/src/share/vm/opto/loopnode.hpp Fri Jul 02 01:36:15 2010 -0700 85.3 @@ -626,8 +626,7 @@ 85.4 _nodes.map( old_node->_idx, (Node*)((intptr_t)new_node + 1) ); 85.5 } 85.6 void lazy_replace( Node *old_node, Node *new_node ) { 85.7 - _igvn.hash_delete(old_node); 85.8 - _igvn.subsume_node( old_node, new_node ); 85.9 + _igvn.replace_node( old_node, new_node ); 85.10 lazy_update( old_node, new_node ); 85.11 } 85.12 void lazy_replace_proj( Node *old_node, Node *new_node ) {
86.1 --- a/src/share/vm/opto/loopopts.cpp Wed Jun 30 18:57:35 2010 -0700 86.2 +++ b/src/share/vm/opto/loopopts.cpp Fri Jul 02 01:36:15 2010 -0700 86.3 @@ -354,8 +354,7 @@ 86.4 register_new_node( var_scale, n_ctrl ); 86.5 Node *var_add = new (C, 3) AddINode( var_scale, inv_scale ); 86.6 register_new_node( var_add, n_ctrl ); 86.7 - _igvn.hash_delete( n ); 86.8 - _igvn.subsume_node( n, var_add ); 86.9 + _igvn.replace_node( n, var_add ); 86.10 return var_add; 86.11 } 86.12 86.13 @@ -390,8 +389,7 @@ 86.14 register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) ); 86.15 Node *add2 = new (C, 4) AddPNode( n->in(1), add1, n->in(2)->in(3) ); 86.16 register_new_node( add2, n_ctrl ); 86.17 - _igvn.hash_delete( n ); 86.18 - _igvn.subsume_node( n, add2 ); 86.19 + _igvn.replace_node( n, add2 ); 86.20 return add2; 86.21 } 86.22 } 86.23 @@ -412,8 +410,7 @@ 86.24 register_new_node( add1, n_loop->_head->in(LoopNode::EntryControl) ); 86.25 Node *add2 = new (C, 4) AddPNode( n->in(1), add1, V ); 86.26 register_new_node( add2, n_ctrl ); 86.27 - _igvn.hash_delete( n ); 86.28 - _igvn.subsume_node( n, add2 ); 86.29 + _igvn.replace_node( n, add2 ); 86.30 return add2; 86.31 } 86.32 } 86.33 @@ -555,8 +552,7 @@ 86.34 } 86.35 Node *cmov = CMoveNode::make( C, cmov_ctrl, iff->in(1), phi->in(1+flip), phi->in(2-flip), _igvn.type(phi) ); 86.36 register_new_node( cmov, cmov_ctrl ); 86.37 - _igvn.hash_delete(phi); 86.38 - _igvn.subsume_node( phi, cmov ); 86.39 + _igvn.replace_node( phi, cmov ); 86.40 #ifndef PRODUCT 86.41 if( VerifyLoopOptimizations ) verify(); 86.42 #endif 86.43 @@ -642,8 +638,7 @@ 86.44 86.45 // Found a Phi to split thru! 86.46 // Replace 'n' with the new phi 86.47 - _igvn.hash_delete(n); 86.48 - _igvn.subsume_node( n, phi ); 86.49 + _igvn.replace_node( n, phi ); 86.50 // Moved a load around the loop, 'en-registering' something. 86.51 if( n_blk->Opcode() == Op_Loop && n->is_Load() && 86.52 !phi->in(LoopNode::LoopBackControl)->is_Load() ) 86.53 @@ -789,13 +784,11 @@ 86.54 86.55 // Found a Phi to split thru! 86.56 // Replace 'n' with the new phi 86.57 - _igvn.hash_delete(n); 86.58 - _igvn.subsume_node( n, phi ); 86.59 + _igvn.replace_node( n, phi ); 86.60 86.61 // Now split the bool up thru the phi 86.62 Node *bolphi = split_thru_phi( bol, n_ctrl, -1 ); 86.63 - _igvn.hash_delete(bol); 86.64 - _igvn.subsume_node( bol, bolphi ); 86.65 + _igvn.replace_node( bol, bolphi ); 86.66 assert( iff->in(1) == bolphi, "" ); 86.67 if( bolphi->Value(&_igvn)->singleton() ) 86.68 return; 86.69 @@ -803,8 +796,7 @@ 86.70 // Conditional-move? Must split up now 86.71 if( !iff->is_If() ) { 86.72 Node *cmovphi = split_thru_phi( iff, n_ctrl, -1 ); 86.73 - _igvn.hash_delete(iff); 86.74 - _igvn.subsume_node( iff, cmovphi ); 86.75 + _igvn.replace_node( iff, cmovphi ); 86.76 return; 86.77 } 86.78 86.79 @@ -950,9 +942,7 @@ 86.80 if( n_op == Op_Opaque2 && 86.81 n->in(1) != NULL && 86.82 get_loop(get_ctrl(n)) == get_loop(get_ctrl(n->in(1))) ) { 86.83 - _igvn.add_users_to_worklist(n); 86.84 - _igvn.hash_delete(n); 86.85 - _igvn.subsume_node( n, n->in(1) ); 86.86 + _igvn.replace_node( n, n->in(1) ); 86.87 } 86.88 } 86.89 86.90 @@ -1425,7 +1415,7 @@ 86.91 // IGVN does CSE). 86.92 Node *hit = _igvn.hash_find_insert(use); 86.93 if( hit ) // Go ahead and re-hash for hits. 86.94 - _igvn.subsume_node( use, hit ); 86.95 + _igvn.replace_node( use, hit ); 86.96 } 86.97 86.98 // If 'use' was in the loop-exit block, it now needs to be sunk
87.1 --- a/src/share/vm/opto/macro.cpp Wed Jun 30 18:57:35 2010 -0700 87.2 +++ b/src/share/vm/opto/macro.cpp Fri Jul 02 01:36:15 2010 -0700 87.3 @@ -135,8 +135,7 @@ 87.4 if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); 87.5 copy_call_debug_info(oldcall, call); 87.6 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. 87.7 - _igvn.hash_delete(oldcall); 87.8 - _igvn.subsume_node(oldcall, call); 87.9 + _igvn.replace_node(oldcall, call); 87.10 transform_later(call); 87.11 87.12 return call; 87.13 @@ -523,8 +522,7 @@ 87.14 // Kill all new Phis 87.15 while(value_phis.is_nonempty()) { 87.16 Node* n = value_phis.node(); 87.17 - _igvn.hash_delete(n); 87.18 - _igvn.subsume_node(n, C->top()); 87.19 + _igvn.replace_node(n, C->top()); 87.20 value_phis.pop(); 87.21 } 87.22 } 87.23 @@ -1311,8 +1309,7 @@ 87.24 if (!always_slow) { 87.25 call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. 87.26 } 87.27 - _igvn.hash_delete(alloc); 87.28 - _igvn.subsume_node(alloc, call); 87.29 + _igvn.replace_node(alloc, call); 87.30 transform_later(call); 87.31 87.32 // Identify the output projections from the allocate node and 87.33 @@ -1431,7 +1428,7 @@ 87.34 Node* mark_node = NULL; 87.35 // For now only enable fast locking for non-array types 87.36 if (UseBiasedLocking && (length == NULL)) { 87.37 - mark_node = make_load(NULL, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS); 87.38 + mark_node = make_load(control, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS); 87.39 } else { 87.40 mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype())); 87.41 }
88.1 --- a/src/share/vm/opto/memnode.cpp Wed Jun 30 18:57:35 2010 -0700 88.2 +++ b/src/share/vm/opto/memnode.cpp Fri Jul 02 01:36:15 2010 -0700 88.3 @@ -815,6 +815,16 @@ 88.4 } 88.5 #endif 88.6 88.7 +#ifdef ASSERT 88.8 +//----------------------------is_immutable_value------------------------------- 88.9 +// Helper function to allow a raw load without control edge for some cases 88.10 +bool LoadNode::is_immutable_value(Node* adr) { 88.11 + return (adr->is_AddP() && adr->in(AddPNode::Base)->is_top() && 88.12 + adr->in(AddPNode::Address)->Opcode() == Op_ThreadLocal && 88.13 + (adr->in(AddPNode::Offset)->find_intptr_t_con(-1) == 88.14 + in_bytes(JavaThread::osthread_offset()))); 88.15 +} 88.16 +#endif 88.17 88.18 //----------------------------LoadNode::make----------------------------------- 88.19 // Polymorphic factory method: 88.20 @@ -828,6 +838,11 @@ 88.21 assert(!(adr_type->isa_aryptr() && 88.22 adr_type->offset() == arrayOopDesc::length_offset_in_bytes()), 88.23 "use LoadRangeNode instead"); 88.24 + // Check control edge of raw loads 88.25 + assert( ctl != NULL || C->get_alias_index(adr_type) != Compile::AliasIdxRaw || 88.26 + // oop will be recorded in oop map if load crosses safepoint 88.27 + rt->isa_oopptr() || is_immutable_value(adr), 88.28 + "raw memory operations should have control edge"); 88.29 switch (bt) { 88.30 case T_BOOLEAN: return new (C, 3) LoadUBNode(ctl, mem, adr, adr_type, rt->is_int() ); 88.31 case T_BYTE: return new (C, 3) LoadBNode (ctl, mem, adr, adr_type, rt->is_int() ); 88.32 @@ -2064,6 +2079,8 @@ 88.33 // Polymorphic factory method: 88.34 StoreNode* StoreNode::make( PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, BasicType bt ) { 88.35 Compile* C = gvn.C; 88.36 + assert( C->get_alias_index(adr_type) != Compile::AliasIdxRaw || 88.37 + ctl != NULL, "raw memory operations should have control edge"); 88.38 88.39 switch (bt) { 88.40 case T_BOOLEAN:
89.1 --- a/src/share/vm/opto/memnode.hpp Wed Jun 30 18:57:35 2010 -0700 89.2 +++ b/src/share/vm/opto/memnode.hpp Fri Jul 02 01:36:15 2010 -0700 89.3 @@ -189,6 +189,10 @@ 89.4 #ifndef PRODUCT 89.5 virtual void dump_spec(outputStream *st) const; 89.6 #endif 89.7 +#ifdef ASSERT 89.8 + // Helper function to allow a raw load without control edge for some cases 89.9 + static bool is_immutable_value(Node* adr); 89.10 +#endif 89.11 protected: 89.12 const Type* load_array_final_field(const TypeKlassPtr *tkls, 89.13 ciKlass* klass) const;
90.1 --- a/src/share/vm/opto/parse1.cpp Wed Jun 30 18:57:35 2010 -0700 90.2 +++ b/src/share/vm/opto/parse1.cpp Fri Jul 02 01:36:15 2010 -0700 90.3 @@ -88,15 +88,16 @@ 90.4 Node *local_addrs_base) { 90.5 Node *mem = memory(Compile::AliasIdxRaw); 90.6 Node *adr = basic_plus_adr( local_addrs_base, local_addrs, -index*wordSize ); 90.7 + Node *ctl = control(); 90.8 90.9 // Very similar to LoadNode::make, except we handle un-aligned longs and 90.10 // doubles on Sparc. Intel can handle them just fine directly. 90.11 Node *l; 90.12 switch( bt ) { // Signature is flattened 90.13 - case T_INT: l = new (C, 3) LoadINode( 0, mem, adr, TypeRawPtr::BOTTOM ); break; 90.14 - case T_FLOAT: l = new (C, 3) LoadFNode( 0, mem, adr, TypeRawPtr::BOTTOM ); break; 90.15 - case T_ADDRESS: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break; 90.16 - case T_OBJECT: l = new (C, 3) LoadPNode( 0, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break; 90.17 + case T_INT: l = new (C, 3) LoadINode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; 90.18 + case T_FLOAT: l = new (C, 3) LoadFNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); break; 90.19 + case T_ADDRESS: l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); break; 90.20 + case T_OBJECT: l = new (C, 3) LoadPNode( ctl, mem, adr, TypeRawPtr::BOTTOM, TypeInstPtr::BOTTOM ); break; 90.21 case T_LONG: 90.22 case T_DOUBLE: { 90.23 // Since arguments are in reverse order, the argument address 'adr' 90.24 @@ -104,12 +105,12 @@ 90.25 adr = basic_plus_adr( local_addrs_base, local_addrs, -(index+1)*wordSize ); 90.26 if( Matcher::misaligned_doubles_ok ) { 90.27 l = (bt == T_DOUBLE) 90.28 - ? (Node*)new (C, 3) LoadDNode( 0, mem, adr, TypeRawPtr::BOTTOM ) 90.29 - : (Node*)new (C, 3) LoadLNode( 0, mem, adr, TypeRawPtr::BOTTOM ); 90.30 + ? (Node*)new (C, 3) LoadDNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) 90.31 + : (Node*)new (C, 3) LoadLNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); 90.32 } else { 90.33 l = (bt == T_DOUBLE) 90.34 - ? (Node*)new (C, 3) LoadD_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM ) 90.35 - : (Node*)new (C, 3) LoadL_unalignedNode( 0, mem, adr, TypeRawPtr::BOTTOM ); 90.36 + ? (Node*)new (C, 3) LoadD_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ) 90.37 + : (Node*)new (C, 3) LoadL_unalignedNode( ctl, mem, adr, TypeRawPtr::BOTTOM ); 90.38 } 90.39 break; 90.40 }
91.1 --- a/src/share/vm/opto/parse2.cpp Wed Jun 30 18:57:35 2010 -0700 91.2 +++ b/src/share/vm/opto/parse2.cpp Fri Jul 02 01:36:15 2010 -0700 91.3 @@ -1324,33 +1324,21 @@ 91.4 case Bytecodes::_ldc_w: 91.5 case Bytecodes::_ldc2_w: 91.6 // If the constant is unresolved, run this BC once in the interpreter. 91.7 - if (iter().is_unresolved_string()) { 91.8 - uncommon_trap(Deoptimization::make_trap_request 91.9 - (Deoptimization::Reason_unloaded, 91.10 - Deoptimization::Action_reinterpret, 91.11 - iter().get_constant_index()), 91.12 - NULL, "unresolved_string"); 91.13 - break; 91.14 - } else { 91.15 + { 91.16 ciConstant constant = iter().get_constant(); 91.17 - if (constant.basic_type() == T_OBJECT) { 91.18 - ciObject* c = constant.as_object(); 91.19 - if (c->is_klass()) { 91.20 - // The constant returned for a klass is the ciKlass for the 91.21 - // entry. We want the java_mirror so get it. 91.22 - ciKlass* klass = c->as_klass(); 91.23 - if (klass->is_loaded()) { 91.24 - constant = ciConstant(T_OBJECT, klass->java_mirror()); 91.25 - } else { 91.26 - uncommon_trap(Deoptimization::make_trap_request 91.27 - (Deoptimization::Reason_unloaded, 91.28 - Deoptimization::Action_reinterpret, 91.29 - iter().get_constant_index()), 91.30 - NULL, "unresolved_klass"); 91.31 - break; 91.32 - } 91.33 - } 91.34 + if (constant.basic_type() == T_OBJECT && 91.35 + !constant.as_object()->is_loaded()) { 91.36 + int index = iter().get_constant_pool_index(); 91.37 + constantTag tag = iter().get_constant_pool_tag(index); 91.38 + uncommon_trap(Deoptimization::make_trap_request 91.39 + (Deoptimization::Reason_unloaded, 91.40 + Deoptimization::Action_reinterpret, 91.41 + index), 91.42 + NULL, tag.internal_name()); 91.43 + break; 91.44 } 91.45 + assert(constant.basic_type() != T_OBJECT || !constant.as_object()->is_klass(), 91.46 + "must be java_mirror of klass"); 91.47 bool pushed = push_constant(constant, true); 91.48 guarantee(pushed, "must be possible to push this constant"); 91.49 }
92.1 --- a/src/share/vm/opto/phaseX.cpp Wed Jun 30 18:57:35 2010 -0700 92.2 +++ b/src/share/vm/opto/phaseX.cpp Fri Jul 02 01:36:15 2010 -0700 92.3 @@ -1447,16 +1447,12 @@ 92.4 Node* m = n->out(i); 92.5 if( m->is_Phi() ) { 92.6 assert(type(m) == Type::TOP, "Unreachable region should not have live phis."); 92.7 - add_users_to_worklist(m); 92.8 - hash_delete(m); // Yank from hash before hacking edges 92.9 - subsume_node(m, nn); 92.10 + replace_node(m, nn); 92.11 --i; // deleted this phi; rescan starting with next position 92.12 } 92.13 } 92.14 } 92.15 - add_users_to_worklist(n); // Users of about-to-be-constant 'n' 92.16 - hash_delete(n); // Removed 'n' from table before subsuming it 92.17 - subsume_node(n,nn); // Update DefUse edges for new constant 92.18 + replace_node(n,nn); // Update DefUse edges for new constant 92.19 } 92.20 return nn; 92.21 }
93.1 --- a/src/share/vm/opto/phaseX.hpp Wed Jun 30 18:57:35 2010 -0700 93.2 +++ b/src/share/vm/opto/phaseX.hpp Fri Jul 02 01:36:15 2010 -0700 93.3 @@ -393,6 +393,10 @@ 93.4 93.5 // Idealize old Node 'n' with respect to its inputs and its value 93.6 virtual Node *transform_old( Node *a_node ); 93.7 + 93.8 + // Subsume users of node 'old' into node 'nn' 93.9 + void subsume_node( Node *old, Node *nn ); 93.10 + 93.11 protected: 93.12 93.13 // Idealize new Node 'n' with respect to its inputs and its value 93.14 @@ -439,10 +443,6 @@ 93.15 remove_globally_dead_node(dead); 93.16 } 93.17 93.18 - // Subsume users of node 'old' into node 'nn' 93.19 - // If no Def-Use info existed for 'nn' it will after call. 93.20 - void subsume_node( Node *old, Node *nn ); 93.21 - 93.22 // Add users of 'n' to worklist 93.23 void add_users_to_worklist0( Node *n ); 93.24 void add_users_to_worklist ( Node *n ); 93.25 @@ -450,7 +450,7 @@ 93.26 // Replace old node with new one. 93.27 void replace_node( Node *old, Node *nn ) { 93.28 add_users_to_worklist(old); 93.29 - hash_delete(old); 93.30 + hash_delete(old); // Yank from hash before hacking edges 93.31 subsume_node(old, nn); 93.32 } 93.33
94.1 --- a/src/share/vm/opto/split_if.cpp Wed Jun 30 18:57:35 2010 -0700 94.2 +++ b/src/share/vm/opto/split_if.cpp Fri Jul 02 01:36:15 2010 -0700 94.3 @@ -217,8 +217,7 @@ 94.4 register_new_node(phi, blk1); 94.5 94.6 // Remove cloned-up value from optimizer; use phi instead 94.7 - _igvn.hash_delete(n); 94.8 - _igvn.subsume_node( n, phi ); 94.9 + _igvn.replace_node( n, phi ); 94.10 94.11 // (There used to be a self-recursive call to split_up() here, 94.12 // but it is not needed. All necessary forward walking is done 94.13 @@ -352,8 +351,7 @@ 94.14 } 94.15 94.16 if (use_blk == NULL) { // He's dead, Jim 94.17 - _igvn.hash_delete(use); 94.18 - _igvn.subsume_node(use, C->top()); 94.19 + _igvn.replace_node(use, C->top()); 94.20 } 94.21 94.22 return use_blk;
95.1 --- a/src/share/vm/opto/superword.cpp Wed Jun 30 18:57:35 2010 -0700 95.2 +++ b/src/share/vm/opto/superword.cpp Fri Jul 02 01:36:15 2010 -0700 95.3 @@ -1172,8 +1172,7 @@ 95.4 _phase->set_ctrl(vn, _phase->get_ctrl(p->at(0))); 95.5 for (uint j = 0; j < p->size(); j++) { 95.6 Node* pm = p->at(j); 95.7 - _igvn.hash_delete(pm); 95.8 - _igvn.subsume_node(pm, vn); 95.9 + _igvn.replace_node(pm, vn); 95.10 } 95.11 _igvn._worklist.push(vn); 95.12 }
96.1 --- a/src/share/vm/opto/type.cpp Wed Jun 30 18:57:35 2010 -0700 96.2 +++ b/src/share/vm/opto/type.cpp Fri Jul 02 01:36:15 2010 -0700 96.3 @@ -182,6 +182,8 @@ 96.4 return t->hash(); 96.5 } 96.6 96.7 +#define SMALLINT ((juint)3) // a value too insignificant to consider widening 96.8 + 96.9 //--------------------------Initialize_shared---------------------------------- 96.10 void Type::Initialize_shared(Compile* current) { 96.11 // This method does not need to be locked because the first system 96.12 @@ -240,6 +242,7 @@ 96.13 assert( TypeInt::CC_GT == TypeInt::ONE, "types must match for CmpL to work" ); 96.14 assert( TypeInt::CC_EQ == TypeInt::ZERO, "types must match for CmpL to work" ); 96.15 assert( TypeInt::CC_GE == TypeInt::BOOL, "types must match for CmpL to work" ); 96.16 + assert( (juint)(TypeInt::CC->_hi - TypeInt::CC->_lo) <= SMALLINT, "CC is truly small"); 96.17 96.18 TypeLong::MINUS_1 = TypeLong::make(-1); // -1 96.19 TypeLong::ZERO = TypeLong::make( 0); // 0 96.20 @@ -1054,16 +1057,21 @@ 96.21 return (TypeInt*)(new TypeInt(lo,lo,WidenMin))->hashcons(); 96.22 } 96.23 96.24 -#define SMALLINT ((juint)3) // a value too insignificant to consider widening 96.25 - 96.26 -const TypeInt *TypeInt::make( jint lo, jint hi, int w ) { 96.27 +static int normalize_int_widen( jint lo, jint hi, int w ) { 96.28 // Certain normalizations keep us sane when comparing types. 96.29 // The 'SMALLINT' covers constants and also CC and its relatives. 96.30 - assert(CC == NULL || (juint)(CC->_hi - CC->_lo) <= SMALLINT, "CC is truly small"); 96.31 if (lo <= hi) { 96.32 - if ((juint)(hi - lo) <= SMALLINT) w = Type::WidenMin; 96.33 - if ((juint)(hi - lo) >= max_juint) w = Type::WidenMax; // plain int 96.34 + if ((juint)(hi - lo) <= SMALLINT) w = Type::WidenMin; 96.35 + if ((juint)(hi - lo) >= max_juint) w = Type::WidenMax; // TypeInt::INT 96.36 + } else { 96.37 + if ((juint)(lo - hi) <= SMALLINT) w = Type::WidenMin; 96.38 + if ((juint)(lo - hi) >= max_juint) w = Type::WidenMin; // dual TypeInt::INT 96.39 } 96.40 + return w; 96.41 +} 96.42 + 96.43 +const TypeInt *TypeInt::make( jint lo, jint hi, int w ) { 96.44 + w = normalize_int_widen(lo, hi, w); 96.45 return (TypeInt*)(new TypeInt(lo,hi,w))->hashcons(); 96.46 } 96.47 96.48 @@ -1103,14 +1111,14 @@ 96.49 96.50 // Expand covered set 96.51 const TypeInt *r = t->is_int(); 96.52 - // (Avoid TypeInt::make, to avoid the argument normalizations it enforces.) 96.53 - return (new TypeInt( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ))->hashcons(); 96.54 + return make( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ); 96.55 } 96.56 96.57 //------------------------------xdual------------------------------------------ 96.58 // Dual: reverse hi & lo; flip widen 96.59 const Type *TypeInt::xdual() const { 96.60 - return new TypeInt(_hi,_lo,WidenMax-_widen); 96.61 + int w = normalize_int_widen(_hi,_lo, WidenMax-_widen); 96.62 + return new TypeInt(_hi,_lo,w); 96.63 } 96.64 96.65 //------------------------------widen------------------------------------------ 96.66 @@ -1202,7 +1210,7 @@ 96.67 //-----------------------------filter------------------------------------------ 96.68 const Type *TypeInt::filter( const Type *kills ) const { 96.69 const TypeInt* ft = join(kills)->isa_int(); 96.70 - if (ft == NULL || ft->_lo > ft->_hi) 96.71 + if (ft == NULL || ft->empty()) 96.72 return Type::TOP; // Canonical empty value 96.73 if (ft->_widen < this->_widen) { 96.74 // Do not allow the value of kill->_widen to affect the outcome. 96.75 @@ -1304,13 +1312,21 @@ 96.76 return (TypeLong*)(new TypeLong(lo,lo,WidenMin))->hashcons(); 96.77 } 96.78 96.79 +static int normalize_long_widen( jlong lo, jlong hi, int w ) { 96.80 + // Certain normalizations keep us sane when comparing types. 96.81 + // The 'SMALLINT' covers constants. 96.82 + if (lo <= hi) { 96.83 + if ((julong)(hi - lo) <= SMALLINT) w = Type::WidenMin; 96.84 + if ((julong)(hi - lo) >= max_julong) w = Type::WidenMax; // TypeLong::LONG 96.85 + } else { 96.86 + if ((julong)(lo - hi) <= SMALLINT) w = Type::WidenMin; 96.87 + if ((julong)(lo - hi) >= max_julong) w = Type::WidenMin; // dual TypeLong::LONG 96.88 + } 96.89 + return w; 96.90 +} 96.91 + 96.92 const TypeLong *TypeLong::make( jlong lo, jlong hi, int w ) { 96.93 - // Certain normalizations keep us sane when comparing types. 96.94 - // The '1' covers constants. 96.95 - if (lo <= hi) { 96.96 - if ((julong)(hi - lo) <= SMALLINT) w = Type::WidenMin; 96.97 - if ((julong)(hi - lo) >= max_julong) w = Type::WidenMax; // plain long 96.98 - } 96.99 + w = normalize_long_widen(lo, hi, w); 96.100 return (TypeLong*)(new TypeLong(lo,hi,w))->hashcons(); 96.101 } 96.102 96.103 @@ -1351,14 +1367,14 @@ 96.104 96.105 // Expand covered set 96.106 const TypeLong *r = t->is_long(); // Turn into a TypeLong 96.107 - // (Avoid TypeLong::make, to avoid the argument normalizations it enforces.) 96.108 - return (new TypeLong( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ))->hashcons(); 96.109 + return make( MIN2(_lo,r->_lo), MAX2(_hi,r->_hi), MAX2(_widen,r->_widen) ); 96.110 } 96.111 96.112 //------------------------------xdual------------------------------------------ 96.113 // Dual: reverse hi & lo; flip widen 96.114 const Type *TypeLong::xdual() const { 96.115 - return new TypeLong(_hi,_lo,WidenMax-_widen); 96.116 + int w = normalize_long_widen(_hi,_lo, WidenMax-_widen); 96.117 + return new TypeLong(_hi,_lo,w); 96.118 } 96.119 96.120 //------------------------------widen------------------------------------------ 96.121 @@ -1453,7 +1469,7 @@ 96.122 //-----------------------------filter------------------------------------------ 96.123 const Type *TypeLong::filter( const Type *kills ) const { 96.124 const TypeLong* ft = join(kills)->isa_long(); 96.125 - if (ft == NULL || ft->_lo > ft->_hi) 96.126 + if (ft == NULL || ft->empty()) 96.127 return Type::TOP; // Canonical empty value 96.128 if (ft->_widen < this->_widen) { 96.129 // Do not allow the value of kill->_widen to affect the outcome.
97.1 --- a/src/share/vm/prims/jvm.h Wed Jun 30 18:57:35 2010 -0700 97.2 +++ b/src/share/vm/prims/jvm.h Fri Jul 02 01:36:15 2010 -0700 97.3 @@ -1044,7 +1044,22 @@ 97.4 JVM_CONSTANT_Fieldref, 97.5 JVM_CONSTANT_Methodref, 97.6 JVM_CONSTANT_InterfaceMethodref, 97.7 - JVM_CONSTANT_NameAndType 97.8 + JVM_CONSTANT_NameAndType, 97.9 + JVM_CONSTANT_MethodHandle = 15, // JSR 292 97.10 + JVM_CONSTANT_MethodType = 16 // JSR 292 97.11 +}; 97.12 + 97.13 +/* JVM_CONSTANT_MethodHandle subtypes */ 97.14 +enum { 97.15 + JVM_REF_getField = 1, 97.16 + JVM_REF_getStatic = 2, 97.17 + JVM_REF_putField = 3, 97.18 + JVM_REF_putStatic = 4, 97.19 + JVM_REF_invokeVirtual = 5, 97.20 + JVM_REF_invokeStatic = 6, 97.21 + JVM_REF_invokeSpecial = 7, 97.22 + JVM_REF_newInvokeSpecial = 8, 97.23 + JVM_REF_invokeInterface = 9 97.24 }; 97.25 97.26 /* Used in the newarray instruction. */
98.1 --- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp Wed Jun 30 18:57:35 2010 -0700 98.2 +++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp Fri Jul 02 01:36:15 2010 -0700 98.3 @@ -217,21 +217,21 @@ 98.4 98.5 class nmethodDesc: public CHeapObj { 98.6 private: 98.7 - methodHandle _method; 98.8 + jmethodID _jmethod_id; 98.9 address _code_begin; 98.10 address _code_end; 98.11 jvmtiAddrLocationMap* _map; 98.12 jint _map_length; 98.13 public: 98.14 - nmethodDesc(methodHandle method, address code_begin, address code_end, 98.15 + nmethodDesc(jmethodID jmethod_id, address code_begin, address code_end, 98.16 jvmtiAddrLocationMap* map, jint map_length) { 98.17 - _method = method; 98.18 + _jmethod_id = jmethod_id; 98.19 _code_begin = code_begin; 98.20 _code_end = code_end; 98.21 _map = map; 98.22 _map_length = map_length; 98.23 } 98.24 - methodHandle method() const { return _method; } 98.25 + jmethodID jmethod_id() const { return _jmethod_id; } 98.26 address code_begin() const { return _code_begin; } 98.27 address code_end() const { return _code_end; } 98.28 jvmtiAddrLocationMap* map() const { return _map; } 98.29 @@ -323,8 +323,7 @@ 98.30 JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length); 98.31 98.32 // record the nmethod details 98.33 - methodHandle mh(nm->method()); 98.34 - nmethodDesc* snm = new nmethodDesc(mh, 98.35 + nmethodDesc* snm = new nmethodDesc(nm->get_and_cache_jmethod_id(), 98.36 nm->code_begin(), 98.37 nm->code_end(), 98.38 map, 98.39 @@ -367,8 +366,7 @@ 98.40 // iterate over the list and post an event for each nmethod 98.41 nmethodDesc* nm_desc = collector.first(); 98.42 while (nm_desc != NULL) { 98.43 - methodOop method = nm_desc->method()(); 98.44 - jmethodID mid = method->jmethod_id(); 98.45 + jmethodID mid = nm_desc->jmethod_id(); 98.46 assert(mid != NULL, "checking"); 98.47 JvmtiExport::post_compiled_method_load(env, mid, 98.48 (jint)(nm_desc->code_end() - nm_desc->code_begin()),
99.1 --- a/src/share/vm/prims/methodComparator.cpp Wed Jun 30 18:57:35 2010 -0700 99.2 +++ b/src/share/vm/prims/methodComparator.cpp Fri Jul 02 01:36:15 2010 -0700 99.3 @@ -163,10 +163,10 @@ 99.4 99.5 case Bytecodes::_ldc : // fall through 99.6 case Bytecodes::_ldc_w : { 99.7 - Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method()(), _s_old->bcp()); 99.8 - Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method()(), _s_new->bcp()); 99.9 - int cpi_old = ldc_old->index(); 99.10 - int cpi_new = ldc_new->index(); 99.11 + Bytecode_loadconstant* ldc_old = Bytecode_loadconstant_at(_s_old->method(), _s_old->bci()); 99.12 + Bytecode_loadconstant* ldc_new = Bytecode_loadconstant_at(_s_new->method(), _s_new->bci()); 99.13 + int cpi_old = ldc_old->pool_index(); 99.14 + int cpi_new = ldc_new->pool_index(); 99.15 constantTag tag_old = _old_cp->tag_at(cpi_old); 99.16 constantTag tag_new = _new_cp->tag_at(cpi_new); 99.17 if (tag_old.is_int() || tag_old.is_float()) { 99.18 @@ -187,12 +187,30 @@ 99.19 if (strcmp(_old_cp->string_at_noresolve(cpi_old), 99.20 _new_cp->string_at_noresolve(cpi_new)) != 0) 99.21 return false; 99.22 - } else { // tag_old should be klass - 4881222 99.23 + } else if (tag_old.is_klass() || tag_old.is_unresolved_klass()) { 99.24 + // tag_old should be klass - 4881222 99.25 if (! (tag_new.is_unresolved_klass() || tag_new.is_klass())) 99.26 return false; 99.27 if (_old_cp->klass_at_noresolve(cpi_old) != 99.28 _new_cp->klass_at_noresolve(cpi_new)) 99.29 return false; 99.30 + } else if (tag_old.is_method_type() && tag_new.is_method_type()) { 99.31 + int mti_old = _old_cp->method_type_index_at(cpi_old); 99.32 + int mti_new = _new_cp->method_type_index_at(cpi_new); 99.33 + if ((_old_cp->symbol_at(mti_old) != _new_cp->symbol_at(mti_new))) 99.34 + return false; 99.35 + } else if (tag_old.is_method_handle() && tag_new.is_method_handle()) { 99.36 + if (_old_cp->method_handle_ref_kind_at(cpi_old) != 99.37 + _new_cp->method_handle_ref_kind_at(cpi_new)) 99.38 + return false; 99.39 + int mhi_old = _old_cp->method_handle_index_at(cpi_old); 99.40 + int mhi_new = _new_cp->method_handle_index_at(cpi_new); 99.41 + if ((_old_cp->uncached_klass_ref_at_noresolve(mhi_old) != _new_cp->uncached_klass_ref_at_noresolve(mhi_new)) || 99.42 + (_old_cp->uncached_name_ref_at(mhi_old) != _new_cp->uncached_name_ref_at(mhi_new)) || 99.43 + (_old_cp->uncached_signature_ref_at(mhi_old) != _new_cp->uncached_signature_ref_at(mhi_new))) 99.44 + return false; 99.45 + } else { 99.46 + return false; // unknown tag 99.47 } 99.48 break; 99.49 }
100.1 --- a/src/share/vm/runtime/arguments.cpp Wed Jun 30 18:57:35 2010 -0700 100.2 +++ b/src/share/vm/runtime/arguments.cpp Fri Jul 02 01:36:15 2010 -0700 100.3 @@ -1376,11 +1376,6 @@ 100.4 } 100.5 no_shared_spaces(); 100.6 100.7 - // Set the maximum pause time goal to be a reasonable default. 100.8 - if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) { 100.9 - FLAG_SET_DEFAULT(MaxGCPauseMillis, 200); 100.10 - } 100.11 - 100.12 if (FLAG_IS_DEFAULT(MarkStackSize)) { 100.13 FLAG_SET_DEFAULT(MarkStackSize, 128 * TASKQUEUE_SIZE); 100.14 } 100.15 @@ -1513,6 +1508,9 @@ 100.16 if (AggressiveOpts && FLAG_IS_DEFAULT(BiasedLockingStartupDelay)) { 100.17 FLAG_SET_DEFAULT(BiasedLockingStartupDelay, 500); 100.18 } 100.19 + if (AggressiveOpts && FLAG_IS_DEFAULT(OptimizeStringConcat)) { 100.20 + FLAG_SET_DEFAULT(OptimizeStringConcat, true); 100.21 + } 100.22 #endif 100.23 100.24 if (AggressiveOpts) { 100.25 @@ -1697,20 +1695,21 @@ 100.26 100.27 status = status && verify_percentage(GCHeapFreeLimit, "GCHeapFreeLimit"); 100.28 100.29 - // Check user specified sharing option conflict with Parallel GC 100.30 - bool cannot_share = ((UseConcMarkSweepGC || CMSIncrementalMode) || UseG1GC || UseParNewGC || 100.31 - UseParallelGC || UseParallelOldGC || 100.32 - SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages)); 100.33 - 100.34 + // Check whether user-specified sharing option conflicts with GC or page size. 100.35 + // Both sharing and large pages are enabled by default on some platforms; 100.36 + // large pages override sharing only if explicitly set on the command line. 100.37 + const bool cannot_share = UseConcMarkSweepGC || CMSIncrementalMode || 100.38 + UseG1GC || UseParNewGC || UseParallelGC || UseParallelOldGC || 100.39 + UseLargePages && FLAG_IS_CMDLINE(UseLargePages); 100.40 if (cannot_share) { 100.41 // Either force sharing on by forcing the other options off, or 100.42 // force sharing off. 100.43 if (DumpSharedSpaces || ForceSharedSpaces) { 100.44 jio_fprintf(defaultStream::error_stream(), 100.45 - "Reverting to Serial GC because of %s\n", 100.46 - ForceSharedSpaces ? " -Xshare:on" : "-Xshare:dump"); 100.47 + "Using Serial GC and default page size because of %s\n", 100.48 + ForceSharedSpaces ? "-Xshare:on" : "-Xshare:dump"); 100.49 force_serial_gc(); 100.50 - FLAG_SET_DEFAULT(SOLARIS_ONLY(UseISM) NOT_SOLARIS(UseLargePages), false); 100.51 + FLAG_SET_DEFAULT(UseLargePages, false); 100.52 } else { 100.53 if (UseSharedSpaces && Verbose) { 100.54 jio_fprintf(defaultStream::error_stream(), 100.55 @@ -1719,6 +1718,8 @@ 100.56 } 100.57 no_shared_spaces(); 100.58 } 100.59 + } else if (UseLargePages && (UseSharedSpaces || DumpSharedSpaces)) { 100.60 + FLAG_SET_DEFAULT(UseLargePages, false); 100.61 } 100.62 100.63 status = status && check_gc_consistency();
101.1 --- a/src/share/vm/runtime/globals.hpp Wed Jun 30 18:57:35 2010 -0700 101.2 +++ b/src/share/vm/runtime/globals.hpp Fri Jul 02 01:36:15 2010 -0700 101.3 @@ -1975,7 +1975,7 @@ 101.4 "Adaptive size policy maximum GC pause time goal in msec, " \ 101.5 "or (G1 Only) the max. GC time per MMU time slice") \ 101.6 \ 101.7 - product(intx, GCPauseIntervalMillis, 500, \ 101.8 + product(uintx, GCPauseIntervalMillis, 0, \ 101.9 "Time slice for MMU specification") \ 101.10 \ 101.11 product(uintx, MaxGCMinorPauseMillis, max_uintx, \
102.1 --- a/src/share/vm/runtime/jniHandles.cpp Wed Jun 30 18:57:35 2010 -0700 102.2 +++ b/src/share/vm/runtime/jniHandles.cpp Fri Jul 02 01:36:15 2010 -0700 102.3 @@ -66,6 +66,7 @@ 102.4 102.5 102.6 jobject JNIHandles::make_global(Handle obj) { 102.7 + assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); 102.8 jobject res = NULL; 102.9 if (!obj.is_null()) { 102.10 // ignore null handles 102.11 @@ -81,6 +82,7 @@ 102.12 102.13 102.14 jobject JNIHandles::make_weak_global(Handle obj) { 102.15 + assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC"); 102.16 jobject res = NULL; 102.17 if (!obj.is_null()) { 102.18 // ignore null handles
103.1 --- a/src/share/vm/runtime/sharedRuntime.cpp Wed Jun 30 18:57:35 2010 -0700 103.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp Fri Jul 02 01:36:15 2010 -0700 103.3 @@ -779,7 +779,7 @@ 103.4 103.5 // Find bytecode 103.6 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci); 103.7 - bc = bytecode->adjusted_invoke_code(); 103.8 + bc = bytecode->java_code(); 103.9 int bytecode_index = bytecode->index(); 103.10 103.11 // Find receiver for non-static call
104.1 --- a/src/share/vm/runtime/stubRoutines.cpp Wed Jun 30 18:57:35 2010 -0700 104.2 +++ b/src/share/vm/runtime/stubRoutines.cpp Fri Jul 02 01:36:15 2010 -0700 104.3 @@ -135,28 +135,32 @@ 104.4 static void test_arraycopy_func(address func, int alignment) { 104.5 int v = 0xcc; 104.6 int v2 = 0x11; 104.7 - jlong lbuffer[2]; 104.8 - jlong lbuffer2[2]; 104.9 - address buffer = (address) lbuffer; 104.10 - address buffer2 = (address) lbuffer2; 104.11 + jlong lbuffer[8]; 104.12 + jlong lbuffer2[8]; 104.13 + address fbuffer = (address) lbuffer; 104.14 + address fbuffer2 = (address) lbuffer2; 104.15 unsigned int i; 104.16 for (i = 0; i < sizeof(lbuffer); i++) { 104.17 - buffer[i] = v; buffer2[i] = v2; 104.18 + fbuffer[i] = v; fbuffer2[i] = v2; 104.19 } 104.20 + // C++ does not guarantee jlong[] array alignment to 8 bytes. 104.21 + // Use middle of array to check that memory before it is not modified. 104.22 + address buffer = (address) round_to((intptr_t)&lbuffer[4], BytesPerLong); 104.23 + address buffer2 = (address) round_to((intptr_t)&lbuffer2[4], BytesPerLong); 104.24 // do an aligned copy 104.25 ((arraycopy_fn)func)(buffer, buffer2, 0); 104.26 for (i = 0; i < sizeof(lbuffer); i++) { 104.27 - assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything"); 104.28 + assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything"); 104.29 } 104.30 // adjust destination alignment 104.31 ((arraycopy_fn)func)(buffer, buffer2 + alignment, 0); 104.32 for (i = 0; i < sizeof(lbuffer); i++) { 104.33 - assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything"); 104.34 + assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything"); 104.35 } 104.36 // adjust source alignment 104.37 ((arraycopy_fn)func)(buffer + alignment, buffer2, 0); 104.38 for (i = 0; i < sizeof(lbuffer); i++) { 104.39 - assert(buffer[i] == v && buffer2[i] == v2, "shouldn't have copied anything"); 104.40 + assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything"); 104.41 } 104.42 } 104.43 #endif 104.44 @@ -183,7 +187,7 @@ 104.45 test_arraycopy_func(arrayof_##type##_arraycopy(), sizeof(HeapWord)); \ 104.46 test_arraycopy_func(arrayof_##type##_disjoint_arraycopy(), sizeof(HeapWord)) 104.47 104.48 - // Make sure all the arraycopy stubs properly handle zeros 104.49 + // Make sure all the arraycopy stubs properly handle zero count 104.50 TEST_ARRAYCOPY(jbyte); 104.51 TEST_ARRAYCOPY(jshort); 104.52 TEST_ARRAYCOPY(jint); 104.53 @@ -191,6 +195,25 @@ 104.54 104.55 #undef TEST_ARRAYCOPY 104.56 104.57 +#define TEST_COPYRTN(type) \ 104.58 + test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_##type##s_atomic), sizeof(type)); \ 104.59 + test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::arrayof_conjoint_##type##s), (int)MAX2(sizeof(HeapWord), sizeof(type))) 104.60 + 104.61 + // Make sure all the copy runtime routines properly handle zero count 104.62 + TEST_COPYRTN(jbyte); 104.63 + TEST_COPYRTN(jshort); 104.64 + TEST_COPYRTN(jint); 104.65 + TEST_COPYRTN(jlong); 104.66 + 104.67 +#undef TEST_COPYRTN 104.68 + 104.69 + test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::conjoint_words), sizeof(HeapWord)); 104.70 + test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words), sizeof(HeapWord)); 104.71 + test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::disjoint_words_atomic), sizeof(HeapWord)); 104.72 + // Aligned to BytesPerLong 104.73 + test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_conjoint_words), sizeof(jlong)); 104.74 + test_arraycopy_func(CAST_FROM_FN_PTR(address, Copy::aligned_disjoint_words), sizeof(jlong)); 104.75 + 104.76 #endif 104.77 } 104.78 104.79 @@ -221,15 +244,13 @@ 104.80 #ifndef PRODUCT 104.81 SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy 104.82 #endif // !PRODUCT 104.83 - assert(count != 0, "count should be non-zero"); 104.84 - Copy::conjoint_bytes_atomic(src, dest, count); 104.85 + Copy::conjoint_jbytes_atomic(src, dest, count); 104.86 JRT_END 104.87 104.88 JRT_LEAF(void, StubRoutines::jshort_copy(jshort* src, jshort* dest, size_t count)) 104.89 #ifndef PRODUCT 104.90 SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy 104.91 #endif // !PRODUCT 104.92 - assert(count != 0, "count should be non-zero"); 104.93 Copy::conjoint_jshorts_atomic(src, dest, count); 104.94 JRT_END 104.95 104.96 @@ -237,7 +258,6 @@ 104.97 #ifndef PRODUCT 104.98 SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy 104.99 #endif // !PRODUCT 104.100 - assert(count != 0, "count should be non-zero"); 104.101 Copy::conjoint_jints_atomic(src, dest, count); 104.102 JRT_END 104.103 104.104 @@ -245,7 +265,6 @@ 104.105 #ifndef PRODUCT 104.106 SharedRuntime::_jlong_array_copy_ctr++; // Slow-path long/double array copy 104.107 #endif // !PRODUCT 104.108 - assert(count != 0, "count should be non-zero"); 104.109 Copy::conjoint_jlongs_atomic(src, dest, count); 104.110 JRT_END 104.111 104.112 @@ -263,15 +282,13 @@ 104.113 #ifndef PRODUCT 104.114 SharedRuntime::_jbyte_array_copy_ctr++; // Slow-path byte array copy 104.115 #endif // !PRODUCT 104.116 - assert(count != 0, "count should be non-zero"); 104.117 - Copy::arrayof_conjoint_bytes(src, dest, count); 104.118 + Copy::arrayof_conjoint_jbytes(src, dest, count); 104.119 JRT_END 104.120 104.121 JRT_LEAF(void, StubRoutines::arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count)) 104.122 #ifndef PRODUCT 104.123 SharedRuntime::_jshort_array_copy_ctr++; // Slow-path short/char array copy 104.124 #endif // !PRODUCT 104.125 - assert(count != 0, "count should be non-zero"); 104.126 Copy::arrayof_conjoint_jshorts(src, dest, count); 104.127 JRT_END 104.128 104.129 @@ -279,7 +296,6 @@ 104.130 #ifndef PRODUCT 104.131 SharedRuntime::_jint_array_copy_ctr++; // Slow-path int/float array copy 104.132 #endif // !PRODUCT 104.133 - assert(count != 0, "count should be non-zero"); 104.134 Copy::arrayof_conjoint_jints(src, dest, count); 104.135 JRT_END 104.136 104.137 @@ -287,7 +303,6 @@ 104.138 #ifndef PRODUCT 104.139 SharedRuntime::_jlong_array_copy_ctr++; // Slow-path int/float array copy 104.140 #endif // !PRODUCT 104.141 - assert(count != 0, "count should be non-zero"); 104.142 Copy::arrayof_conjoint_jlongs(src, dest, count); 104.143 JRT_END 104.144
105.1 --- a/src/share/vm/runtime/sweeper.cpp Wed Jun 30 18:57:35 2010 -0700 105.2 +++ b/src/share/vm/runtime/sweeper.cpp Fri Jul 02 01:36:15 2010 -0700 105.3 @@ -26,7 +26,7 @@ 105.4 # include "incls/_sweeper.cpp.incl" 105.5 105.6 long NMethodSweeper::_traversals = 0; // No. of stack traversals performed 105.7 -CodeBlob* NMethodSweeper::_current = NULL; // Current nmethod 105.8 +nmethod* NMethodSweeper::_current = NULL; // Current nmethod 105.9 int NMethodSweeper::_seen = 0 ; // No. of blobs we have currently processed in current pass of CodeCache 105.10 int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass 105.11 105.12 @@ -171,20 +171,16 @@ 105.13 // Since we will give up the CodeCache_lock, always skip ahead to an nmethod. 105.14 // Other blobs can be deleted by other threads 105.15 // Read next before we potentially delete current 105.16 - CodeBlob* next = CodeCache::next_nmethod(_current); 105.17 + nmethod* next = CodeCache::next_nmethod(_current); 105.18 105.19 // Now ready to process nmethod and give up CodeCache_lock 105.20 { 105.21 MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); 105.22 - process_nmethod((nmethod *)_current); 105.23 + process_nmethod(_current); 105.24 } 105.25 _seen++; 105.26 _current = next; 105.27 } 105.28 - 105.29 - // Skip forward to the next nmethod (if any). Code blobs other than nmethods 105.30 - // can be freed async to us and make _current invalid while we sleep. 105.31 - _current = CodeCache::next_nmethod(_current); 105.32 } 105.33 105.34 if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
106.1 --- a/src/share/vm/runtime/sweeper.hpp Wed Jun 30 18:57:35 2010 -0700 106.2 +++ b/src/share/vm/runtime/sweeper.hpp Fri Jul 02 01:36:15 2010 -0700 106.3 @@ -29,7 +29,7 @@ 106.4 106.5 class NMethodSweeper : public AllStatic { 106.6 static long _traversals; // Stack traversal count 106.7 - static CodeBlob* _current; // Current nmethod 106.8 + static nmethod* _current; // Current nmethod 106.9 static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache 106.10 static int _invocations; // No. of invocations left until we are completed with this pass 106.11
107.1 --- a/src/share/vm/runtime/thread.cpp Wed Jun 30 18:57:35 2010 -0700 107.2 +++ b/src/share/vm/runtime/thread.cpp Fri Jul 02 01:36:15 2010 -0700 107.3 @@ -2700,7 +2700,7 @@ 107.4 if (in_bytes(size_in_bytes) != 0) { 107.5 _popframe_preserved_args = NEW_C_HEAP_ARRAY(char, in_bytes(size_in_bytes)); 107.6 _popframe_preserved_args_size = in_bytes(size_in_bytes); 107.7 - Copy::conjoint_bytes(start, _popframe_preserved_args, _popframe_preserved_args_size); 107.8 + Copy::conjoint_jbytes(start, _popframe_preserved_args, _popframe_preserved_args_size); 107.9 } 107.10 } 107.11
108.1 --- a/src/share/vm/runtime/vframeArray.cpp Wed Jun 30 18:57:35 2010 -0700 108.2 +++ b/src/share/vm/runtime/vframeArray.cpp Fri Jul 02 01:36:15 2010 -0700 108.3 @@ -355,9 +355,9 @@ 108.4 } else { 108.5 base = iframe()->interpreter_frame_expression_stack(); 108.6 } 108.7 - Copy::conjoint_bytes(saved_args, 108.8 - base, 108.9 - popframe_preserved_args_size_in_bytes); 108.10 + Copy::conjoint_jbytes(saved_args, 108.11 + base, 108.12 + popframe_preserved_args_size_in_bytes); 108.13 thread->popframe_free_preserved_args(); 108.14 } 108.15 }
109.1 --- a/src/share/vm/runtime/virtualspace.cpp Wed Jun 30 18:57:35 2010 -0700 109.2 +++ b/src/share/vm/runtime/virtualspace.cpp Fri Jul 02 01:36:15 2010 -0700 109.3 @@ -111,6 +111,35 @@ 109.4 return result; 109.5 } 109.6 109.7 +// Helper method. 109.8 +static bool failed_to_reserve_as_requested(char* base, char* requested_address, 109.9 + const size_t size, bool special) 109.10 +{ 109.11 + if (base == requested_address || requested_address == NULL) 109.12 + return false; // did not fail 109.13 + 109.14 + if (base != NULL) { 109.15 + // Different reserve address may be acceptable in other cases 109.16 + // but for compressed oops heap should be at requested address. 109.17 + assert(UseCompressedOops, "currently requested address used only for compressed oops"); 109.18 + if (PrintCompressedOopsMode) { 109.19 + tty->cr(); 109.20 + tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address); 109.21 + } 109.22 + // OS ignored requested address. Try different address. 109.23 + if (special) { 109.24 + if (!os::release_memory_special(base, size)) { 109.25 + fatal("os::release_memory_special failed"); 109.26 + } 109.27 + } else { 109.28 + if (!os::release_memory(base, size)) { 109.29 + fatal("os::release_memory failed"); 109.30 + } 109.31 + } 109.32 + } 109.33 + return true; 109.34 +} 109.35 + 109.36 ReservedSpace::ReservedSpace(const size_t prefix_size, 109.37 const size_t prefix_align, 109.38 const size_t suffix_size, 109.39 @@ -129,6 +158,10 @@ 109.40 assert((suffix_align & prefix_align - 1) == 0, 109.41 "suffix_align not divisible by prefix_align"); 109.42 109.43 + // Assert that if noaccess_prefix is used, it is the same as prefix_align. 109.44 + assert(noaccess_prefix == 0 || 109.45 + noaccess_prefix == prefix_align, "noaccess prefix wrong"); 109.46 + 109.47 // Add in noaccess_prefix to prefix_size; 109.48 const size_t adjusted_prefix_size = prefix_size + noaccess_prefix; 109.49 const size_t size = adjusted_prefix_size + suffix_size; 109.50 @@ -150,15 +183,16 @@ 109.51 _noaccess_prefix = 0; 109.52 _executable = false; 109.53 109.54 - // Assert that if noaccess_prefix is used, it is the same as prefix_align. 109.55 - assert(noaccess_prefix == 0 || 109.56 - noaccess_prefix == prefix_align, "noaccess prefix wrong"); 109.57 - 109.58 // Optimistically try to reserve the exact size needed. 109.59 char* addr; 109.60 if (requested_address != 0) { 109.61 - addr = os::attempt_reserve_memory_at(size, 109.62 - requested_address-noaccess_prefix); 109.63 + requested_address -= noaccess_prefix; // adjust address 109.64 + assert(requested_address != NULL, "huge noaccess prefix?"); 109.65 + addr = os::attempt_reserve_memory_at(size, requested_address); 109.66 + if (failed_to_reserve_as_requested(addr, requested_address, size, false)) { 109.67 + // OS ignored requested address. Try different address. 109.68 + addr = NULL; 109.69 + } 109.70 } else { 109.71 addr = os::reserve_memory(size, NULL, prefix_align); 109.72 } 109.73 @@ -222,11 +256,20 @@ 109.74 bool special = large && !os::can_commit_large_page_memory(); 109.75 char* base = NULL; 109.76 109.77 + if (requested_address != 0) { 109.78 + requested_address -= noaccess_prefix; // adjust requested address 109.79 + assert(requested_address != NULL, "huge noaccess prefix?"); 109.80 + } 109.81 + 109.82 if (special) { 109.83 109.84 base = os::reserve_memory_special(size, requested_address, executable); 109.85 109.86 if (base != NULL) { 109.87 + if (failed_to_reserve_as_requested(base, requested_address, size, true)) { 109.88 + // OS ignored requested address. Try different address. 109.89 + return; 109.90 + } 109.91 // Check alignment constraints 109.92 if (alignment > 0) { 109.93 assert((uintptr_t) base % alignment == 0, 109.94 @@ -235,6 +278,13 @@ 109.95 _special = true; 109.96 } else { 109.97 // failed; try to reserve regular memory below 109.98 + if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) || 109.99 + !FLAG_IS_DEFAULT(LargePageSizeInBytes))) { 109.100 + if (PrintCompressedOopsMode) { 109.101 + tty->cr(); 109.102 + tty->print_cr("Reserve regular memory without large pages."); 109.103 + } 109.104 + } 109.105 } 109.106 } 109.107 109.108 @@ -248,8 +298,11 @@ 109.109 // important. If available space is not detected, return NULL. 109.110 109.111 if (requested_address != 0) { 109.112 - base = os::attempt_reserve_memory_at(size, 109.113 - requested_address-noaccess_prefix); 109.114 + base = os::attempt_reserve_memory_at(size, requested_address); 109.115 + if (failed_to_reserve_as_requested(base, requested_address, size, false)) { 109.116 + // OS ignored requested address. Try different address. 109.117 + base = NULL; 109.118 + } 109.119 } else { 109.120 base = os::reserve_memory(size, NULL, alignment); 109.121 } 109.122 @@ -365,7 +418,12 @@ 109.123 } 109.124 109.125 void ReservedSpace::protect_noaccess_prefix(const size_t size) { 109.126 - // If there is noaccess prefix, return. 109.127 + assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL && 109.128 + (size_t(_base + _size) > OopEncodingHeapMax) && 109.129 + Universe::narrow_oop_use_implicit_null_checks()), 109.130 + "noaccess_prefix should be used only with non zero based compressed oops"); 109.131 + 109.132 + // If there is no noaccess prefix, return. 109.133 if (_noaccess_prefix == 0) return; 109.134 109.135 assert(_noaccess_prefix >= (size_t)os::vm_page_size(), 109.136 @@ -377,6 +435,10 @@ 109.137 _special)) { 109.138 fatal("cannot protect protection page"); 109.139 } 109.140 + if (PrintCompressedOopsMode) { 109.141 + tty->cr(); 109.142 + tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix); 109.143 + } 109.144 109.145 _base += _noaccess_prefix; 109.146 _size -= _noaccess_prefix;
110.1 --- a/src/share/vm/utilities/constantTag.cpp Wed Jun 30 18:57:35 2010 -0700 110.2 +++ b/src/share/vm/utilities/constantTag.cpp Fri Jul 02 01:36:15 2010 -0700 110.3 @@ -28,56 +28,85 @@ 110.4 #ifndef PRODUCT 110.5 110.6 void constantTag::print_on(outputStream* st) const { 110.7 + st->print(internal_name()); 110.8 +} 110.9 + 110.10 +#endif // PRODUCT 110.11 + 110.12 +BasicType constantTag::basic_type() const { 110.13 switch (_tag) { 110.14 + case JVM_CONSTANT_Integer : 110.15 + return T_INT; 110.16 + case JVM_CONSTANT_Float : 110.17 + return T_FLOAT; 110.18 + case JVM_CONSTANT_Long : 110.19 + return T_LONG; 110.20 + case JVM_CONSTANT_Double : 110.21 + return T_DOUBLE; 110.22 + 110.23 case JVM_CONSTANT_Class : 110.24 - st->print("Class"); 110.25 - break; 110.26 - case JVM_CONSTANT_Fieldref : 110.27 - st->print("Field"); 110.28 - break; 110.29 - case JVM_CONSTANT_Methodref : 110.30 - st->print("Method"); 110.31 - break; 110.32 - case JVM_CONSTANT_InterfaceMethodref : 110.33 - st->print("InterfaceMethod"); 110.34 - break; 110.35 case JVM_CONSTANT_String : 110.36 - st->print("String"); 110.37 - break; 110.38 - case JVM_CONSTANT_Integer : 110.39 - st->print("Integer"); 110.40 - break; 110.41 - case JVM_CONSTANT_Float : 110.42 - st->print("Float"); 110.43 - break; 110.44 - case JVM_CONSTANT_Long : 110.45 - st->print("Long"); 110.46 - break; 110.47 - case JVM_CONSTANT_Double : 110.48 - st->print("Double"); 110.49 - break; 110.50 - case JVM_CONSTANT_NameAndType : 110.51 - st->print("NameAndType"); 110.52 - break; 110.53 - case JVM_CONSTANT_Utf8 : 110.54 - st->print("Utf8"); 110.55 - break; 110.56 case JVM_CONSTANT_UnresolvedClass : 110.57 - st->print("Unresolved class"); 110.58 - break; 110.59 + case JVM_CONSTANT_UnresolvedClassInError : 110.60 case JVM_CONSTANT_ClassIndex : 110.61 - st->print("Unresolved class index"); 110.62 - break; 110.63 case JVM_CONSTANT_UnresolvedString : 110.64 - st->print("Unresolved string"); 110.65 - break; 110.66 case JVM_CONSTANT_StringIndex : 110.67 - st->print("Unresolved string index"); 110.68 - break; 110.69 + case JVM_CONSTANT_MethodHandle : 110.70 + case JVM_CONSTANT_MethodType : 110.71 + case JVM_CONSTANT_Object : 110.72 + return T_OBJECT; 110.73 default: 110.74 ShouldNotReachHere(); 110.75 - break; 110.76 + return T_ILLEGAL; 110.77 } 110.78 } 110.79 110.80 -#endif // PRODUCT 110.81 + 110.82 + 110.83 +const char* constantTag::internal_name() const { 110.84 + switch (_tag) { 110.85 + case JVM_CONSTANT_Invalid : 110.86 + return "Invalid index"; 110.87 + case JVM_CONSTANT_Class : 110.88 + return "Class"; 110.89 + case JVM_CONSTANT_Fieldref : 110.90 + return "Field"; 110.91 + case JVM_CONSTANT_Methodref : 110.92 + return "Method"; 110.93 + case JVM_CONSTANT_InterfaceMethodref : 110.94 + return "InterfaceMethod"; 110.95 + case JVM_CONSTANT_String : 110.96 + return "String"; 110.97 + case JVM_CONSTANT_Integer : 110.98 + return "Integer"; 110.99 + case JVM_CONSTANT_Float : 110.100 + return "Float"; 110.101 + case JVM_CONSTANT_Long : 110.102 + return "Long"; 110.103 + case JVM_CONSTANT_Double : 110.104 + return "Double"; 110.105 + case JVM_CONSTANT_NameAndType : 110.106 + return "NameAndType"; 110.107 + case JVM_CONSTANT_MethodHandle : 110.108 + return "MethodHandle"; 110.109 + case JVM_CONSTANT_MethodType : 110.110 + return "MethodType"; 110.111 + case JVM_CONSTANT_Object : 110.112 + return "Object"; 110.113 + case JVM_CONSTANT_Utf8 : 110.114 + return "Utf8"; 110.115 + case JVM_CONSTANT_UnresolvedClass : 110.116 + return "Unresolved Class"; 110.117 + case JVM_CONSTANT_UnresolvedClassInError : 110.118 + return "Unresolved Class Error"; 110.119 + case JVM_CONSTANT_ClassIndex : 110.120 + return "Unresolved Class Index"; 110.121 + case JVM_CONSTANT_UnresolvedString : 110.122 + return "Unresolved String"; 110.123 + case JVM_CONSTANT_StringIndex : 110.124 + return "Unresolved String Index"; 110.125 + default: 110.126 + ShouldNotReachHere(); 110.127 + return "Illegal"; 110.128 + } 110.129 +}
111.1 --- a/src/share/vm/utilities/constantTag.hpp Wed Jun 30 18:57:35 2010 -0700 111.2 +++ b/src/share/vm/utilities/constantTag.hpp Fri Jul 02 01:36:15 2010 -0700 111.3 @@ -78,13 +78,24 @@ 111.4 bool is_field_or_method() const { return is_field() || is_method() || is_interface_method(); } 111.5 bool is_symbol() const { return is_utf8(); } 111.6 111.7 + bool is_method_type() const { return _tag == JVM_CONSTANT_MethodType; } 111.8 + bool is_method_handle() const { return _tag == JVM_CONSTANT_MethodHandle; } 111.9 + 111.10 + constantTag() { 111.11 + _tag = JVM_CONSTANT_Invalid; 111.12 + } 111.13 constantTag(jbyte tag) { 111.14 assert((tag >= 0 && tag <= JVM_CONSTANT_NameAndType) || 111.15 + (tag >= JVM_CONSTANT_MethodHandle && tag <= JVM_CONSTANT_MethodType) || 111.16 (tag >= JVM_CONSTANT_InternalMin && tag <= JVM_CONSTANT_InternalMax), "Invalid constant tag"); 111.17 _tag = tag; 111.18 } 111.19 111.20 jbyte value() { return _tag; } 111.21 111.22 + BasicType basic_type() const; // if used with ldc, what kind of value gets pushed? 111.23 + 111.24 + const char* internal_name() const; // for error reporting 111.25 + 111.26 void print_on(outputStream* st) const PRODUCT_RETURN; 111.27 };
112.1 --- a/src/share/vm/utilities/copy.cpp Wed Jun 30 18:57:35 2010 -0700 112.2 +++ b/src/share/vm/utilities/copy.cpp Fri Jul 02 01:36:15 2010 -0700 112.3 @@ -48,7 +48,7 @@ 112.4 Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort)); 112.5 } else { 112.6 // Not aligned, so no need to be atomic. 112.7 - Copy::conjoint_bytes((void*) src, (void*) dst, size); 112.8 + Copy::conjoint_jbytes((void*) src, (void*) dst, size); 112.9 } 112.10 } 112.11
113.1 --- a/src/share/vm/utilities/copy.hpp Wed Jun 30 18:57:35 2010 -0700 113.2 +++ b/src/share/vm/utilities/copy.hpp Fri Jul 02 01:36:15 2010 -0700 113.3 @@ -73,6 +73,9 @@ 113.4 // whole alignment units. E.g., if BytesPerLong is 2x word alignment, an odd 113.5 // count may copy an extra word. In the arrayof case, we are allowed to copy 113.6 // only the number of copy units specified. 113.7 + // 113.8 + // All callees check count for 0. 113.9 + // 113.10 113.11 // HeapWords 113.12 113.13 @@ -99,7 +102,6 @@ 113.14 // Object-aligned words, conjoint, not atomic on each word 113.15 static void aligned_conjoint_words(HeapWord* from, HeapWord* to, size_t count) { 113.16 assert_params_aligned(from, to); 113.17 - assert_non_zero(count); 113.18 pd_aligned_conjoint_words(from, to, count); 113.19 } 113.20 113.21 @@ -107,49 +109,42 @@ 113.22 static void aligned_disjoint_words(HeapWord* from, HeapWord* to, size_t count) { 113.23 assert_params_aligned(from, to); 113.24 assert_disjoint(from, to, count); 113.25 - assert_non_zero(count); 113.26 pd_aligned_disjoint_words(from, to, count); 113.27 } 113.28 113.29 // bytes, jshorts, jints, jlongs, oops 113.30 113.31 // bytes, conjoint, not atomic on each byte (not that it matters) 113.32 - static void conjoint_bytes(void* from, void* to, size_t count) { 113.33 - assert_non_zero(count); 113.34 + static void conjoint_jbytes(void* from, void* to, size_t count) { 113.35 pd_conjoint_bytes(from, to, count); 113.36 } 113.37 113.38 // bytes, conjoint, atomic on each byte (not that it matters) 113.39 - static void conjoint_bytes_atomic(void* from, void* to, size_t count) { 113.40 - assert_non_zero(count); 113.41 + static void conjoint_jbytes_atomic(void* from, void* to, size_t count) { 113.42 pd_conjoint_bytes(from, to, count); 113.43 } 113.44 113.45 // jshorts, conjoint, atomic on each jshort 113.46 static void conjoint_jshorts_atomic(jshort* from, jshort* to, size_t count) { 113.47 assert_params_ok(from, to, LogBytesPerShort); 113.48 - assert_non_zero(count); 113.49 pd_conjoint_jshorts_atomic(from, to, count); 113.50 } 113.51 113.52 // jints, conjoint, atomic on each jint 113.53 static void conjoint_jints_atomic(jint* from, jint* to, size_t count) { 113.54 assert_params_ok(from, to, LogBytesPerInt); 113.55 - assert_non_zero(count); 113.56 pd_conjoint_jints_atomic(from, to, count); 113.57 } 113.58 113.59 // jlongs, conjoint, atomic on each jlong 113.60 static void conjoint_jlongs_atomic(jlong* from, jlong* to, size_t count) { 113.61 assert_params_ok(from, to, LogBytesPerLong); 113.62 - assert_non_zero(count); 113.63 pd_conjoint_jlongs_atomic(from, to, count); 113.64 } 113.65 113.66 // oops, conjoint, atomic on each oop 113.67 static void conjoint_oops_atomic(oop* from, oop* to, size_t count) { 113.68 assert_params_ok(from, to, LogBytesPerHeapOop); 113.69 - assert_non_zero(count); 113.70 pd_conjoint_oops_atomic(from, to, count); 113.71 } 113.72 113.73 @@ -157,7 +152,6 @@ 113.74 static void conjoint_oops_atomic(narrowOop* from, narrowOop* to, size_t count) { 113.75 assert(sizeof(narrowOop) == sizeof(jint), "this cast is wrong"); 113.76 assert_params_ok(from, to, LogBytesPerInt); 113.77 - assert_non_zero(count); 113.78 pd_conjoint_jints_atomic((jint*)from, (jint*)to, count); 113.79 } 113.80 113.81 @@ -168,36 +162,31 @@ 113.82 static void conjoint_memory_atomic(void* from, void* to, size_t size); 113.83 113.84 // bytes, conjoint array, atomic on each byte (not that it matters) 113.85 - static void arrayof_conjoint_bytes(HeapWord* from, HeapWord* to, size_t count) { 113.86 - assert_non_zero(count); 113.87 + static void arrayof_conjoint_jbytes(HeapWord* from, HeapWord* to, size_t count) { 113.88 pd_arrayof_conjoint_bytes(from, to, count); 113.89 } 113.90 113.91 // jshorts, conjoint array, atomic on each jshort 113.92 static void arrayof_conjoint_jshorts(HeapWord* from, HeapWord* to, size_t count) { 113.93 assert_params_ok(from, to, LogBytesPerShort); 113.94 - assert_non_zero(count); 113.95 pd_arrayof_conjoint_jshorts(from, to, count); 113.96 } 113.97 113.98 // jints, conjoint array, atomic on each jint 113.99 static void arrayof_conjoint_jints(HeapWord* from, HeapWord* to, size_t count) { 113.100 assert_params_ok(from, to, LogBytesPerInt); 113.101 - assert_non_zero(count); 113.102 pd_arrayof_conjoint_jints(from, to, count); 113.103 } 113.104 113.105 // jlongs, conjoint array, atomic on each jlong 113.106 static void arrayof_conjoint_jlongs(HeapWord* from, HeapWord* to, size_t count) { 113.107 assert_params_ok(from, to, LogBytesPerLong); 113.108 - assert_non_zero(count); 113.109 pd_arrayof_conjoint_jlongs(from, to, count); 113.110 } 113.111 113.112 // oops, conjoint array, atomic on each oop 113.113 static void arrayof_conjoint_oops(HeapWord* from, HeapWord* to, size_t count) { 113.114 assert_params_ok(from, to, LogBytesPerHeapOop); 113.115 - assert_non_zero(count); 113.116 pd_arrayof_conjoint_oops(from, to, count); 113.117 } 113.118 113.119 @@ -319,14 +308,6 @@ 113.120 #endif 113.121 } 113.122 113.123 - static void assert_non_zero(size_t count) { 113.124 -#ifdef ASSERT 113.125 - if (count == 0) { 113.126 - basic_fatal("count must be non-zero"); 113.127 - } 113.128 -#endif 113.129 - } 113.130 - 113.131 static void assert_byte_count_ok(size_t byte_count, size_t unit_size) { 113.132 #ifdef ASSERT 113.133 if ((size_t)round_to(byte_count, unit_size) != byte_count) {