Merge

Tue, 25 Feb 2014 15:11:18 -0800

author
kvn
date
Tue, 25 Feb 2014 15:11:18 -0800
changeset 6507
752ba2e5f6d0
parent 6506
f040cf9fc9c0
parent 6319
51e1bb81df86
child 6508
c4178a748df9

Merge

src/cpu/sparc/vm/sparc.ad file | annotate | diff | comparison | revisions
src/share/vm/memory/allocation.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/allocation.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/metaspace.hpp file | annotate | diff | comparison | revisions
src/share/vm/oops/instanceKlass.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/c2_globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/callGenerator.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/compile.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/graphKit.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/library_call.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/matcher.hpp file | annotate | diff | comparison | revisions
src/share/vm/opto/memnode.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/node.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/parse1.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/parse2.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/parse3.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/runtime.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/type.cpp file | annotate | diff | comparison | revisions
src/share/vm/opto/type.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/arguments.cpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/globals.hpp file | annotate | diff | comparison | revisions
src/share/vm/runtime/os.cpp file | annotate | diff | comparison | revisions
src/share/vm/utilities/bitMap.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Wed Feb 19 20:12:43 2014 -0800
     1.2 +++ b/.hgtags	Tue Feb 25 15:11:18 2014 -0800
     1.3 @@ -424,3 +424,5 @@
     1.4  4638c4d7ff106db0f29ef7f18b128dd7e69bc470 hs25.20-b02
     1.5  e56d11f8cc2158d4280f80e56d196193349c150a hs25.20-b03
     1.6  757fe22ae90681e2b6cff50699c5abbe2563dd2c jdk8u20-b01
     1.7 +9c2ddd17626e375554044a3082a6dc5e68184ed9 jdk8u20-b02
     1.8 +ecf3678d5736a645aea893b525a9eb5fa1a8e072 hs25.20-b04
     2.1 --- a/make/hotspot_version	Wed Feb 19 20:12:43 2014 -0800
     2.2 +++ b/make/hotspot_version	Tue Feb 25 15:11:18 2014 -0800
     2.3 @@ -35,7 +35,7 @@
     2.4  
     2.5  HS_MAJOR_VER=25
     2.6  HS_MINOR_VER=20
     2.7 -HS_BUILD_NUMBER=03
     2.8 +HS_BUILD_NUMBER=04
     2.9  
    2.10  JDK_MAJOR_VER=1
    2.11  JDK_MINOR_VER=8
     3.1 --- a/src/cpu/sparc/vm/assembler_sparc.hpp	Wed Feb 19 20:12:43 2014 -0800
     3.2 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Tue Feb 25 15:11:18 2014 -0800
     3.3 @@ -88,6 +88,7 @@
     3.4      orncc_op3    = 0x16,
     3.5      xnorcc_op3   = 0x17,
     3.6      addccc_op3   = 0x18,
     3.7 +    aes4_op3     = 0x19,
     3.8      umulcc_op3   = 0x1a,
     3.9      smulcc_op3   = 0x1b,
    3.10      subccc_op3   = 0x1c,
    3.11 @@ -121,6 +122,8 @@
    3.12      fpop1_op3    = 0x34,
    3.13      fpop2_op3    = 0x35,
    3.14      impdep1_op3  = 0x36,
    3.15 +    aes3_op3     = 0x36,
    3.16 +    flog3_op3    = 0x36,
    3.17      impdep2_op3  = 0x37,
    3.18      jmpl_op3     = 0x38,
    3.19      rett_op3     = 0x39,
    3.20 @@ -172,41 +175,56 @@
    3.21  
    3.22    enum opfs {
    3.23      // selected opfs
    3.24 -    fmovs_opf   = 0x01,
    3.25 -    fmovd_opf   = 0x02,
    3.26 +    fmovs_opf          = 0x01,
    3.27 +    fmovd_opf          = 0x02,
    3.28  
    3.29 -    fnegs_opf   = 0x05,
    3.30 -    fnegd_opf   = 0x06,
    3.31 +    fnegs_opf          = 0x05,
    3.32 +    fnegd_opf          = 0x06,
    3.33  
    3.34 -    fadds_opf   = 0x41,
    3.35 -    faddd_opf   = 0x42,
    3.36 -    fsubs_opf   = 0x45,
    3.37 -    fsubd_opf   = 0x46,
    3.38 +    fadds_opf          = 0x41,
    3.39 +    faddd_opf          = 0x42,
    3.40 +    fsubs_opf          = 0x45,
    3.41 +    fsubd_opf          = 0x46,
    3.42  
    3.43 -    fmuls_opf   = 0x49,
    3.44 -    fmuld_opf   = 0x4a,
    3.45 -    fdivs_opf   = 0x4d,
    3.46 -    fdivd_opf   = 0x4e,
    3.47 +    fmuls_opf          = 0x49,
    3.48 +    fmuld_opf          = 0x4a,
    3.49 +    fdivs_opf          = 0x4d,
    3.50 +    fdivd_opf          = 0x4e,
    3.51  
    3.52 -    fcmps_opf   = 0x51,
    3.53 -    fcmpd_opf   = 0x52,
    3.54 +    fcmps_opf          = 0x51,
    3.55 +    fcmpd_opf          = 0x52,
    3.56  
    3.57 -    fstox_opf   = 0x81,
    3.58 -    fdtox_opf   = 0x82,
    3.59 -    fxtos_opf   = 0x84,
    3.60 -    fxtod_opf   = 0x88,
    3.61 -    fitos_opf   = 0xc4,
    3.62 -    fdtos_opf   = 0xc6,
    3.63 -    fitod_opf   = 0xc8,
    3.64 -    fstod_opf   = 0xc9,
    3.65 -    fstoi_opf   = 0xd1,
    3.66 -    fdtoi_opf   = 0xd2,
    3.67 +    fstox_opf          = 0x81,
    3.68 +    fdtox_opf          = 0x82,
    3.69 +    fxtos_opf          = 0x84,
    3.70 +    fxtod_opf          = 0x88,
    3.71 +    fitos_opf          = 0xc4,
    3.72 +    fdtos_opf          = 0xc6,
    3.73 +    fitod_opf          = 0xc8,
    3.74 +    fstod_opf          = 0xc9,
    3.75 +    fstoi_opf          = 0xd1,
    3.76 +    fdtoi_opf          = 0xd2,
    3.77  
    3.78 -    mdtox_opf   = 0x110,
    3.79 -    mstouw_opf  = 0x111,
    3.80 -    mstosw_opf  = 0x113,
    3.81 -    mxtod_opf   = 0x118,
    3.82 -    mwtos_opf   = 0x119
    3.83 +    mdtox_opf          = 0x110,
    3.84 +    mstouw_opf         = 0x111,
    3.85 +    mstosw_opf         = 0x113,
    3.86 +    mxtod_opf          = 0x118,
    3.87 +    mwtos_opf          = 0x119,
    3.88 +
    3.89 +    aes_kexpand0_opf   = 0x130,
    3.90 +    aes_kexpand2_opf   = 0x131
    3.91 +  };
    3.92 +
    3.93 +  enum op5s {
    3.94 +    aes_eround01_op5     = 0x00,
    3.95 +    aes_eround23_op5     = 0x01,
    3.96 +    aes_dround01_op5     = 0x02,
    3.97 +    aes_dround23_op5     = 0x03,
    3.98 +    aes_eround01_l_op5   = 0x04,
    3.99 +    aes_eround23_l_op5   = 0x05,
   3.100 +    aes_dround01_l_op5   = 0x06,
   3.101 +    aes_dround23_l_op5   = 0x07,
   3.102 +    aes_kexpand1_op5     = 0x08
   3.103    };
   3.104  
   3.105    enum RCondition {  rc_z = 1,  rc_lez = 2,  rc_lz = 3, rc_nz = 5, rc_gz = 6, rc_gez = 7, rc_last = rc_gez  };
   3.106 @@ -427,6 +445,7 @@
   3.107    static int immed(    bool        i)  { return  u_field(i ? 1 : 0,     13, 13); }
   3.108    static int opf_low6( int         w)  { return  u_field(w,             10,  5); }
   3.109    static int opf_low5( int         w)  { return  u_field(w,              9,  5); }
   3.110 +  static int op5(      int         x)  { return  u_field(x,              8,  5); }
   3.111    static int trapcc(   CC         cc)  { return  u_field(cc,            12, 11); }
   3.112    static int sx(       int         i)  { return  u_field(i,             12, 12); } // shift x=1 means 64-bit
   3.113    static int opf(      int         x)  { return  u_field(x,             13,  5); }
   3.114 @@ -451,6 +470,7 @@
   3.115    static int fd( FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 29, 25); };
   3.116    static int fs1(FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 18, 14); };
   3.117    static int fs2(FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa),  4,  0); };
   3.118 +  static int fs3(FloatRegister r,  FloatRegisterImpl::Width fwa) { return u_field(r->encoding(fwa), 13,  9); };
   3.119  
   3.120    // some float instructions use this encoding on the op3 field
   3.121    static int alt_op3(int op, FloatRegisterImpl::Width w) {
   3.122 @@ -559,6 +579,12 @@
   3.123      return x & ((1 << 10) - 1);
   3.124    }
   3.125  
   3.126 +  // AES crypto instructions supported only on certain processors
   3.127 +  static void aes_only() { assert( VM_Version::has_aes(), "This instruction only works on SPARC with AES instructions support"); }
   3.128 +
   3.129 +  // instruction only in VIS1
   3.130 +  static void vis1_only() { assert( VM_Version::has_vis1(), "This instruction only works on SPARC with VIS1"); }
   3.131 +
   3.132    // instruction only in VIS3
   3.133    static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
   3.134  
   3.135 @@ -682,6 +708,24 @@
   3.136    void addccc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(addc_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   3.137  
   3.138  
   3.139 +  // 4-operand AES instructions
   3.140 +
   3.141 +  void aes_eround01(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
   3.142 +  void aes_eround23(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
   3.143 +  void aes_dround01(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_op5) | fs2(s2, FloatRegisterImpl::D) ); }
   3.144 +  void aes_dround23(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_op5) | fs2(s2, FloatRegisterImpl::D) ); }
   3.145 +  void aes_eround01_l(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
   3.146 +  void aes_eround23_l(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_eround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
   3.147 +  void aes_dround01_l(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround01_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
   3.148 +  void aes_dround23_l(  FloatRegister s1, FloatRegister s2, FloatRegister s3, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | fs3(s3, FloatRegisterImpl::D) | op5(aes_dround23_l_op5) | fs2(s2, FloatRegisterImpl::D) ); }
   3.149 +  void aes_kexpand1(  FloatRegister s1, FloatRegister s2, int imm5a, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes4_op3) | fs1(s1, FloatRegisterImpl::D) | u_field(imm5a, 13, 9) | op5(aes_kexpand1_op5) | fs2(s2, FloatRegisterImpl::D) ); }
   3.150 +
   3.151 +
   3.152 +  // 3-operand AES instructions
   3.153 +
   3.154 +  void aes_kexpand0(  FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand0_opf) | fs2(s2, FloatRegisterImpl::D) ); }
   3.155 +  void aes_kexpand2(  FloatRegister s1, FloatRegister s2, FloatRegister d ) { aes_only(); emit_int32( op(arith_op) | fd(d, FloatRegisterImpl::D) | op3(aes3_op3) | fs1(s1, FloatRegisterImpl::D) | opf(aes_kexpand2_opf) | fs2(s2, FloatRegisterImpl::D) ); }
   3.156 +
   3.157    // pp 136
   3.158  
   3.159    inline void bpr(RCondition c, bool a, Predict p, Register s1, address d, relocInfo::relocType rt = relocInfo::none);
   3.160 @@ -784,6 +828,10 @@
   3.161    void fmul( FloatRegisterImpl::Width sw, FloatRegisterImpl::Width dw,  FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, dw) | op3(fpop1_op3) | fs1(s1, sw) | opf(0x60 + sw + dw*4) | fs2(s2, sw)); }
   3.162    void fdiv( FloatRegisterImpl::Width w,                            FloatRegister s1, FloatRegister s2, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w)  | op3(fpop1_op3) | fs1(s1, w)  | opf(0x4c + w)         | fs2(s2, w)); }
   3.163  
   3.164 +  // FXORs/FXORd instructions
   3.165 +
   3.166 +  void fxor( FloatRegisterImpl::Width w, FloatRegister s1, FloatRegister s2, FloatRegister d ) { vis1_only(); emit_int32( op(arith_op) | fd(d, w) | op3(flog3_op3) | fs1(s1, w) | opf(0x6E - w) | fs2(s2, w)); }
   3.167 +
   3.168    // pp 164
   3.169  
   3.170    void fsqrt( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x28 + w) | fs2(s, w)); }
     4.1 --- a/src/cpu/sparc/vm/sparc.ad	Wed Feb 19 20:12:43 2014 -0800
     4.2 +++ b/src/cpu/sparc/vm/sparc.ad	Tue Feb 25 15:11:18 2014 -0800
     4.3 @@ -1853,6 +1853,12 @@
     4.4    return false;
     4.5  }
     4.6  
     4.7 +// Current (2013) SPARC platforms need to read original key
     4.8 +// to construct decryption expanded key 
     4.9 +const bool Matcher::pass_original_key_for_aes() {
    4.10 +  return true;
    4.11 +}
    4.12 +
    4.13  // USII supports fxtof through the whole range of number, USIII doesn't
    4.14  const bool Matcher::convL2FSupported(void) {
    4.15    return VM_Version::has_fast_fxtof();
     5.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Wed Feb 19 20:12:43 2014 -0800
     5.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Feb 25 15:11:18 2014 -0800
     5.3 @@ -3304,6 +3304,775 @@
     5.4      }
     5.5    }
     5.6  
     5.7 +  address generate_aescrypt_encryptBlock() {
     5.8 +    __ align(CodeEntryAlignment);
     5.9 +    StubCodeMark mark(this, "StubRoutines", "aesencryptBlock");
    5.10 +    Label L_doLast128bit, L_storeOutput;
    5.11 +    address start = __ pc();
    5.12 +    Register from = O0; // source byte array
    5.13 +    Register to = O1;   // destination byte array
    5.14 +    Register key = O2;  // expanded key array
    5.15 +    const Register keylen = O4; //reg for storing expanded key array length
    5.16 +
    5.17 +    // read expanded key length
    5.18 +    __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
    5.19 +
    5.20 +    // load input into F54-F56; F30-F31 used as temp
    5.21 +    __ ldf(FloatRegisterImpl::S, from, 0, F30);
    5.22 +    __ ldf(FloatRegisterImpl::S, from, 4, F31);
    5.23 +    __ fmov(FloatRegisterImpl::D, F30, F54);
    5.24 +    __ ldf(FloatRegisterImpl::S, from, 8, F30);
    5.25 +    __ ldf(FloatRegisterImpl::S, from, 12, F31);
    5.26 +    __ fmov(FloatRegisterImpl::D, F30, F56);
    5.27 +
    5.28 +    // load expanded key
    5.29 +    for ( int i = 0;  i <= 38; i += 2 ) {
    5.30 +      __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i));
    5.31 +    }
    5.32 +
    5.33 +    // perform cipher transformation
    5.34 +    __ fxor(FloatRegisterImpl::D, F0, F54, F54);
    5.35 +    __ fxor(FloatRegisterImpl::D, F2, F56, F56);
    5.36 +    // rounds 1 through 8
    5.37 +    for ( int i = 4;  i <= 28; i += 8 ) {
    5.38 +      __ aes_eround01(as_FloatRegister(i), F54, F56, F58);
    5.39 +      __ aes_eround23(as_FloatRegister(i+2), F54, F56, F60);
    5.40 +      __ aes_eround01(as_FloatRegister(i+4), F58, F60, F54);
    5.41 +      __ aes_eround23(as_FloatRegister(i+6), F58, F60, F56);
    5.42 +    }
    5.43 +    __ aes_eround01(F36, F54, F56, F58); //round 9
    5.44 +    __ aes_eround23(F38, F54, F56, F60);
    5.45 +
    5.46 +    // 128-bit original key size
    5.47 +    __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_doLast128bit);
    5.48 +
    5.49 +    for ( int i = 40;  i <= 50; i += 2 ) {
    5.50 +      __ ldf(FloatRegisterImpl::D, key, i*4, as_FloatRegister(i) );
    5.51 +    }
    5.52 +    __ aes_eround01(F40, F58, F60, F54); //round 10
    5.53 +    __ aes_eround23(F42, F58, F60, F56);
    5.54 +    __ aes_eround01(F44, F54, F56, F58); //round 11
    5.55 +    __ aes_eround23(F46, F54, F56, F60);
    5.56 +
    5.57 +    // 192-bit original key size
    5.58 +    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_storeOutput);
    5.59 +
    5.60 +    __ ldf(FloatRegisterImpl::D, key, 208, F52);
    5.61 +    __ aes_eround01(F48, F58, F60, F54); //round 12
    5.62 +    __ aes_eround23(F50, F58, F60, F56);
    5.63 +    __ ldf(FloatRegisterImpl::D, key, 216, F46);
    5.64 +    __ ldf(FloatRegisterImpl::D, key, 224, F48);
    5.65 +    __ ldf(FloatRegisterImpl::D, key, 232, F50);
    5.66 +    __ aes_eround01(F52, F54, F56, F58); //round 13
    5.67 +    __ aes_eround23(F46, F54, F56, F60);
    5.68 +    __ br(Assembler::always, false, Assembler::pt, L_storeOutput);
    5.69 +    __ delayed()->nop();
    5.70 +
    5.71 +    __ BIND(L_doLast128bit);
    5.72 +    __ ldf(FloatRegisterImpl::D, key, 160, F48);
    5.73 +    __ ldf(FloatRegisterImpl::D, key, 168, F50);
    5.74 +
    5.75 +    __ BIND(L_storeOutput);
    5.76 +    // perform last round of encryption common for all key sizes
    5.77 +    __ aes_eround01_l(F48, F58, F60, F54); //last round
    5.78 +    __ aes_eround23_l(F50, F58, F60, F56);
    5.79 +
    5.80 +    // store output into the destination array, F0-F1 used as temp
    5.81 +    __ fmov(FloatRegisterImpl::D, F54, F0);
    5.82 +    __ stf(FloatRegisterImpl::S, F0, to, 0);
    5.83 +    __ stf(FloatRegisterImpl::S, F1, to, 4);
    5.84 +    __ fmov(FloatRegisterImpl::D, F56, F0);
    5.85 +    __ stf(FloatRegisterImpl::S, F0, to, 8);
    5.86 +    __ retl();
    5.87 +    __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12);
    5.88 +
    5.89 +    return start;
    5.90 +  }
    5.91 +
    5.92 +  address generate_aescrypt_decryptBlock() {
    5.93 +    __ align(CodeEntryAlignment);
    5.94 +    StubCodeMark mark(this, "StubRoutines", "aesdecryptBlock");
    5.95 +    address start = __ pc();
    5.96 +    Label L_expand192bit, L_expand256bit, L_common_transform;
    5.97 +    Register from = O0; // source byte array
    5.98 +    Register to = O1;   // destination byte array
    5.99 +    Register key = O2;  // expanded key array
   5.100 +    Register original_key = O3;  // original key array only required during decryption
   5.101 +    const Register keylen = O4;  // reg for storing expanded key array length
   5.102 +
   5.103 +    // read expanded key array length
   5.104 +    __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
   5.105 +
   5.106 +    // load input into F52-F54; F30,F31 used as temp
   5.107 +    __ ldf(FloatRegisterImpl::S, from, 0, F30);
   5.108 +    __ ldf(FloatRegisterImpl::S, from, 4, F31);
   5.109 +    __ fmov(FloatRegisterImpl::D, F30, F52);
   5.110 +    __ ldf(FloatRegisterImpl::S, from, 8, F30);
   5.111 +    __ ldf(FloatRegisterImpl::S, from, 12, F31);
   5.112 +    __ fmov(FloatRegisterImpl::D, F30, F54);
   5.113 +
   5.114 +    // load original key from SunJCE expanded decryption key
   5.115 +    for ( int i = 0;  i <= 3; i++ ) {
   5.116 +      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
   5.117 +    }
   5.118 +
   5.119 +    // 256-bit original key size
   5.120 +    __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
   5.121 +
   5.122 +    // 192-bit original key size
   5.123 +    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
   5.124 +
   5.125 +    // 128-bit original key size
   5.126 +    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
   5.127 +    for ( int i = 0;  i <= 36; i += 4 ) {
   5.128 +      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
   5.129 +      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
   5.130 +    }
   5.131 +
   5.132 +    // perform 128-bit key specific inverse cipher transformation
   5.133 +    __ fxor(FloatRegisterImpl::D, F42, F54, F54);
   5.134 +    __ fxor(FloatRegisterImpl::D, F40, F52, F52);
   5.135 +    __ br(Assembler::always, false, Assembler::pt, L_common_transform);
   5.136 +    __ delayed()->nop();
   5.137 +
   5.138 +    __ BIND(L_expand192bit);
   5.139 +
   5.140 +    // start loading rest of the 192-bit key
   5.141 +    __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
   5.142 +    __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
   5.143 +
   5.144 +    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
   5.145 +    for ( int i = 0;  i <= 36; i += 6 ) {
   5.146 +      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
   5.147 +      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
   5.148 +      __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
   5.149 +    }
   5.150 +    __ aes_kexpand1(F42, F46, 7, F48);
   5.151 +    __ aes_kexpand2(F44, F48, F50);
   5.152 +
   5.153 +    // perform 192-bit key specific inverse cipher transformation
   5.154 +    __ fxor(FloatRegisterImpl::D, F50, F54, F54);
   5.155 +    __ fxor(FloatRegisterImpl::D, F48, F52, F52);
   5.156 +    __ aes_dround23(F46, F52, F54, F58);
   5.157 +    __ aes_dround01(F44, F52, F54, F56);
   5.158 +    __ aes_dround23(F42, F56, F58, F54);
   5.159 +    __ aes_dround01(F40, F56, F58, F52);
   5.160 +    __ br(Assembler::always, false, Assembler::pt, L_common_transform);
   5.161 +    __ delayed()->nop();
   5.162 +
   5.163 +    __ BIND(L_expand256bit);
   5.164 +
   5.165 +    // load rest of the 256-bit key
   5.166 +    for ( int i = 4;  i <= 7; i++ ) {
   5.167 +      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
   5.168 +    }
   5.169 +
   5.170 +    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
   5.171 +    for ( int i = 0;  i <= 40; i += 8 ) {
   5.172 +      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
   5.173 +      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
   5.174 +      __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
   5.175 +      __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
   5.176 +    }
   5.177 +    __ aes_kexpand1(F48, F54, 6, F56);
   5.178 +    __ aes_kexpand2(F50, F56, F58);
   5.179 +
   5.180 +    for ( int i = 0;  i <= 6; i += 2 ) {
   5.181 +      __ fmov(FloatRegisterImpl::D, as_FloatRegister(58-i), as_FloatRegister(i));
   5.182 +    }
   5.183 +
   5.184 +    // load input into F52-F54
   5.185 +    __ ldf(FloatRegisterImpl::D, from, 0, F52);
   5.186 +    __ ldf(FloatRegisterImpl::D, from, 8, F54);
   5.187 +
   5.188 +    // perform 256-bit key specific inverse cipher transformation
   5.189 +    __ fxor(FloatRegisterImpl::D, F0, F54, F54);
   5.190 +    __ fxor(FloatRegisterImpl::D, F2, F52, F52);
   5.191 +    __ aes_dround23(F4, F52, F54, F58);
   5.192 +    __ aes_dround01(F6, F52, F54, F56);
   5.193 +    __ aes_dround23(F50, F56, F58, F54);
   5.194 +    __ aes_dround01(F48, F56, F58, F52);
   5.195 +    __ aes_dround23(F46, F52, F54, F58);
   5.196 +    __ aes_dround01(F44, F52, F54, F56);
   5.197 +    __ aes_dround23(F42, F56, F58, F54);
   5.198 +    __ aes_dround01(F40, F56, F58, F52);
   5.199 +
   5.200 +    for ( int i = 0;  i <= 7; i++ ) {
   5.201 +      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
   5.202 +    }
   5.203 +
   5.204 +    // perform inverse cipher transformations common for all key sizes
   5.205 +    __ BIND(L_common_transform);
   5.206 +    for ( int i = 38;  i >= 6; i -= 8 ) {
   5.207 +      __ aes_dround23(as_FloatRegister(i), F52, F54, F58);
   5.208 +      __ aes_dround01(as_FloatRegister(i-2), F52, F54, F56);
   5.209 +      if ( i != 6) {
   5.210 +        __ aes_dround23(as_FloatRegister(i-4), F56, F58, F54);
   5.211 +        __ aes_dround01(as_FloatRegister(i-6), F56, F58, F52);
   5.212 +      } else {
   5.213 +        __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F54);
   5.214 +        __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F52);
   5.215 +      }
   5.216 +    }
   5.217 +
   5.218 +    // store output to destination array, F0-F1 used as temp
   5.219 +    __ fmov(FloatRegisterImpl::D, F52, F0);
   5.220 +    __ stf(FloatRegisterImpl::S, F0, to, 0);
   5.221 +    __ stf(FloatRegisterImpl::S, F1, to, 4);
   5.222 +    __ fmov(FloatRegisterImpl::D, F54, F0);
   5.223 +    __ stf(FloatRegisterImpl::S, F0, to, 8);
   5.224 +    __ retl();
   5.225 +    __ delayed()->stf(FloatRegisterImpl::S, F1, to, 12);
   5.226 +
   5.227 +    return start;
   5.228 +  }
   5.229 +
   5.230 +  address generate_cipherBlockChaining_encryptAESCrypt() {
   5.231 +    __ align(CodeEntryAlignment);
   5.232 +    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_encryptAESCrypt");
   5.233 +    Label L_cbcenc128, L_cbcenc192, L_cbcenc256;
   5.234 +    address start = __ pc();
   5.235 +    Register from = O0; // source byte array
   5.236 +    Register to = O1;   // destination byte array
   5.237 +    Register key = O2;  // expanded key array
   5.238 +    Register rvec = O3; // init vector
   5.239 +    const Register len_reg = O4; // cipher length
   5.240 +    const Register keylen = O5;  // reg for storing expanded key array length
   5.241 +
   5.242 +    // save cipher len to return in the end
   5.243 +    __ mov(len_reg, L1);
   5.244 +
   5.245 +    // read expanded key length
   5.246 +    __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
   5.247 +
   5.248 +    // load init vector
   5.249 +    __ ldf(FloatRegisterImpl::D, rvec, 0, F60);
   5.250 +    __ ldf(FloatRegisterImpl::D, rvec, 8, F62);
   5.251 +    __ ldx(key,0,G1);
   5.252 +    __ ldx(key,8,G2);
   5.253 +
   5.254 +    // start loading expanded key
   5.255 +    for ( int i = 0, j = 16;  i <= 38; i += 2, j += 8 ) {
   5.256 +      __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
   5.257 +    }
   5.258 +
   5.259 +    // 128-bit original key size
   5.260 +    __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pt, L_cbcenc128);
   5.261 +
   5.262 +    for ( int i = 40, j = 176;  i <= 46; i += 2, j += 8 ) {
   5.263 +      __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
   5.264 +    }
   5.265 +
   5.266 +    // 192-bit original key size
   5.267 +    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pt, L_cbcenc192);
   5.268 +
   5.269 +    for ( int i = 48, j = 208;  i <= 54; i += 2, j += 8 ) {
   5.270 +      __ ldf(FloatRegisterImpl::D, key, j, as_FloatRegister(i));
   5.271 +    }
   5.272 +
   5.273 +    // 256-bit original key size
   5.274 +    __ br(Assembler::always, false, Assembler::pt, L_cbcenc256);
   5.275 +    __ delayed()->nop();
   5.276 +
   5.277 +    __ align(OptoLoopAlignment);
   5.278 +    __ BIND(L_cbcenc128);
   5.279 +    __ ldx(from,0,G3);
   5.280 +    __ ldx(from,8,G4);
   5.281 +    __ xor3(G1,G3,G3);
   5.282 +    __ xor3(G2,G4,G4);
   5.283 +    __ movxtod(G3,F56);
   5.284 +    __ movxtod(G4,F58);
   5.285 +    __ fxor(FloatRegisterImpl::D, F60, F56, F60);
   5.286 +    __ fxor(FloatRegisterImpl::D, F62, F58, F62);
   5.287 +
   5.288 +    // TEN_EROUNDS
   5.289 +    for ( int i = 0;  i <= 32; i += 8 ) {
   5.290 +      __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
   5.291 +      __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
   5.292 +      if (i != 32 ) {
   5.293 +        __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
   5.294 +        __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
   5.295 +      } else {
   5.296 +        __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
   5.297 +        __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
   5.298 +      }
   5.299 +    }
   5.300 +
   5.301 +    __ stf(FloatRegisterImpl::D, F60, to, 0);
   5.302 +    __ stf(FloatRegisterImpl::D, F62, to, 8);
   5.303 +    __ add(from, 16, from);
   5.304 +    __ add(to, 16, to);
   5.305 +    __ subcc(len_reg, 16, len_reg);
   5.306 +    __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc128);
   5.307 +    __ delayed()->nop();
   5.308 +    __ stf(FloatRegisterImpl::D, F60, rvec, 0);
   5.309 +    __ stf(FloatRegisterImpl::D, F62, rvec, 8);
   5.310 +    __ retl();
   5.311 +    __ delayed()->mov(L1, O0);
   5.312 +
   5.313 +    __ align(OptoLoopAlignment);
   5.314 +    __ BIND(L_cbcenc192);
   5.315 +    __ ldx(from,0,G3);
   5.316 +    __ ldx(from,8,G4);
   5.317 +    __ xor3(G1,G3,G3);
   5.318 +    __ xor3(G2,G4,G4);
   5.319 +    __ movxtod(G3,F56);
   5.320 +    __ movxtod(G4,F58);
   5.321 +    __ fxor(FloatRegisterImpl::D, F60, F56, F60);
   5.322 +    __ fxor(FloatRegisterImpl::D, F62, F58, F62);
   5.323 +
   5.324 +    // TWELEVE_EROUNDS
   5.325 +    for ( int i = 0;  i <= 40; i += 8 ) {
   5.326 +      __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
   5.327 +      __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
   5.328 +      if (i != 40 ) {
   5.329 +        __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
   5.330 +        __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
   5.331 +      } else {
   5.332 +        __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
   5.333 +        __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
   5.334 +      }
   5.335 +    }
   5.336 +
   5.337 +    __ stf(FloatRegisterImpl::D, F60, to, 0);
   5.338 +    __ stf(FloatRegisterImpl::D, F62, to, 8);
   5.339 +    __ add(from, 16, from);
   5.340 +    __ subcc(len_reg, 16, len_reg);
   5.341 +    __ add(to, 16, to);
   5.342 +    __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc192);
   5.343 +    __ delayed()->nop();
   5.344 +    __ stf(FloatRegisterImpl::D, F60, rvec, 0);
   5.345 +    __ stf(FloatRegisterImpl::D, F62, rvec, 8);
   5.346 +    __ retl();
   5.347 +    __ delayed()->mov(L1, O0);
   5.348 +
   5.349 +    __ align(OptoLoopAlignment);
   5.350 +    __ BIND(L_cbcenc256);
   5.351 +    __ ldx(from,0,G3);
   5.352 +    __ ldx(from,8,G4);
   5.353 +    __ xor3(G1,G3,G3);
   5.354 +    __ xor3(G2,G4,G4);
   5.355 +    __ movxtod(G3,F56);
   5.356 +    __ movxtod(G4,F58);
   5.357 +    __ fxor(FloatRegisterImpl::D, F60, F56, F60);
   5.358 +    __ fxor(FloatRegisterImpl::D, F62, F58, F62);
   5.359 +
   5.360 +    // FOURTEEN_EROUNDS
   5.361 +    for ( int i = 0;  i <= 48; i += 8 ) {
   5.362 +      __ aes_eround01(as_FloatRegister(i), F60, F62, F56);
   5.363 +      __ aes_eround23(as_FloatRegister(i+2), F60, F62, F58);
   5.364 +      if (i != 48 ) {
   5.365 +        __ aes_eround01(as_FloatRegister(i+4), F56, F58, F60);
   5.366 +        __ aes_eround23(as_FloatRegister(i+6), F56, F58, F62);
   5.367 +      } else {
   5.368 +        __ aes_eround01_l(as_FloatRegister(i+4), F56, F58, F60);
   5.369 +        __ aes_eround23_l(as_FloatRegister(i+6), F56, F58, F62);
   5.370 +      }
   5.371 +    }
   5.372 +
   5.373 +    __ stf(FloatRegisterImpl::D, F60, to, 0);
   5.374 +    __ stf(FloatRegisterImpl::D, F62, to, 8);
   5.375 +    __ add(from, 16, from);
   5.376 +    __ subcc(len_reg, 16, len_reg);
   5.377 +    __ add(to, 16, to);
   5.378 +    __ br(Assembler::notEqual, false, Assembler::pt, L_cbcenc256);
   5.379 +    __ delayed()->nop();
   5.380 +    __ stf(FloatRegisterImpl::D, F60, rvec, 0);
   5.381 +    __ stf(FloatRegisterImpl::D, F62, rvec, 8);
   5.382 +    __ retl();
   5.383 +    __ delayed()->mov(L1, O0);
   5.384 +
   5.385 +    return start;
   5.386 +  }
   5.387 +
   5.388 +  address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
   5.389 +    __ align(CodeEntryAlignment);
   5.390 +    StubCodeMark mark(this, "StubRoutines", "cipherBlockChaining_decryptAESCrypt");
   5.391 +    Label L_cbcdec_end, L_expand192bit, L_expand256bit, L_dec_first_block_start;
   5.392 +    Label L_dec_first_block128, L_dec_first_block192, L_dec_next2_blocks128, L_dec_next2_blocks192, L_dec_next2_blocks256;
   5.393 +    address start = __ pc();
   5.394 +    Register from = I0; // source byte array
   5.395 +    Register to = I1;   // destination byte array
   5.396 +    Register key = I2;  // expanded key array
   5.397 +    Register rvec = I3; // init vector
   5.398 +    const Register len_reg = I4; // cipher length
   5.399 +    const Register original_key = I5;  // original key array only required during decryption
   5.400 +    const Register keylen = L6;  // reg for storing expanded key array length
   5.401 +
   5.402 +    // save cipher len before save_frame, to return in the end
   5.403 +    __ mov(O4, L0);
   5.404 +    __ save_frame(0); //args are read from I* registers since we save the frame in the beginning
   5.405 +
   5.406 +    // load original key from SunJCE expanded decryption key
   5.407 +    for ( int i = 0;  i <= 3; i++ ) {
   5.408 +      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
   5.409 +    }
   5.410 +
   5.411 +    // load initial vector
   5.412 +    __ ldx(rvec,0,L0);
   5.413 +    __ ldx(rvec,8,L1);
   5.414 +
   5.415 +    // read expanded key array length
   5.416 +    __ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
   5.417 +
   5.418 +    // 256-bit original key size
   5.419 +    __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_expand256bit);
   5.420 +
   5.421 +    // 192-bit original key size
   5.422 +    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_expand192bit);
   5.423 +
   5.424 +    // 128-bit original key size
   5.425 +    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
   5.426 +    for ( int i = 0;  i <= 36; i += 4 ) {
   5.427 +      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+2), i/4, as_FloatRegister(i+4));
   5.428 +      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+4), as_FloatRegister(i+6));
   5.429 +    }
   5.430 +
   5.431 +    // load expanded key[last-1] and key[last] elements
   5.432 +    __ movdtox(F40,L2);
   5.433 +    __ movdtox(F42,L3);
   5.434 +
   5.435 +    __ and3(len_reg, 16, L4);
   5.436 +    __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks128);
   5.437 +    __ delayed()->nop();
   5.438 +
   5.439 +    __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start);
   5.440 +    __ delayed()->nop();
   5.441 +
   5.442 +    __ BIND(L_expand192bit);
   5.443 +    // load rest of the 192-bit key
   5.444 +    __ ldf(FloatRegisterImpl::S, original_key, 16, F4);
   5.445 +    __ ldf(FloatRegisterImpl::S, original_key, 20, F5);
   5.446 +
   5.447 +    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
   5.448 +    for ( int i = 0;  i <= 36; i += 6 ) {
   5.449 +      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+4), i/6, as_FloatRegister(i+6));
   5.450 +      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+6), as_FloatRegister(i+8));
   5.451 +      __ aes_kexpand2(as_FloatRegister(i+4), as_FloatRegister(i+8), as_FloatRegister(i+10));
   5.452 +    }
   5.453 +    __ aes_kexpand1(F42, F46, 7, F48);
   5.454 +    __ aes_kexpand2(F44, F48, F50);
   5.455 +
   5.456 +    // load expanded key[last-1] and key[last] elements
   5.457 +    __ movdtox(F48,L2);
   5.458 +    __ movdtox(F50,L3);
   5.459 +
   5.460 +    __ and3(len_reg, 16, L4);
   5.461 +    __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks192);
   5.462 +    __ delayed()->nop();
   5.463 +
   5.464 +    __ br(Assembler::always, false, Assembler::pt, L_dec_first_block_start);
   5.465 +    __ delayed()->nop();
   5.466 +
   5.467 +    __ BIND(L_expand256bit);
   5.468 +    // load rest of the 256-bit key
   5.469 +    for ( int i = 4;  i <= 7; i++ ) {
   5.470 +      __ ldf(FloatRegisterImpl::S, original_key, i*4, as_FloatRegister(i));
   5.471 +    }
   5.472 +
   5.473 +    // perform key expansion since SunJCE decryption-key expansion is not compatible with SPARC crypto instructions
   5.474 +    for ( int i = 0;  i <= 40; i += 8 ) {
   5.475 +      __ aes_kexpand1(as_FloatRegister(i), as_FloatRegister(i+6), i/8, as_FloatRegister(i+8));
   5.476 +      __ aes_kexpand2(as_FloatRegister(i+2), as_FloatRegister(i+8), as_FloatRegister(i+10));
   5.477 +      __ aes_kexpand0(as_FloatRegister(i+4), as_FloatRegister(i+10), as_FloatRegister(i+12));
   5.478 +      __ aes_kexpand2(as_FloatRegister(i+6), as_FloatRegister(i+12), as_FloatRegister(i+14));
   5.479 +    }
   5.480 +    __ aes_kexpand1(F48, F54, 6, F56);
   5.481 +    __ aes_kexpand2(F50, F56, F58);
   5.482 +
   5.483 +    // load expanded key[last-1] and key[last] elements
   5.484 +    __ movdtox(F56,L2);
   5.485 +    __ movdtox(F58,L3);
   5.486 +
   5.487 +    __ and3(len_reg, 16, L4);
   5.488 +    __ br_null(L4, false, Assembler::pt, L_dec_next2_blocks256);
   5.489 +    __ delayed()->nop();
   5.490 +
   5.491 +    __ BIND(L_dec_first_block_start);
   5.492 +    __ ldx(from,0,L4);
   5.493 +    __ ldx(from,8,L5);
   5.494 +    __ xor3(L2,L4,G1);
   5.495 +    __ movxtod(G1,F60);
   5.496 +    __ xor3(L3,L5,G1);
   5.497 +    __ movxtod(G1,F62);
   5.498 +
   5.499 +    // 128-bit original key size
   5.500 +    __ cmp_and_brx_short(keylen, 44, Assembler::equal, Assembler::pn, L_dec_first_block128);
   5.501 +
   5.502 +    // 192-bit original key size
   5.503 +    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_first_block192);
   5.504 +
   5.505 +    __ aes_dround23(F54, F60, F62, F58);
   5.506 +    __ aes_dround01(F52, F60, F62, F56);
   5.507 +    __ aes_dround23(F50, F56, F58, F62);
   5.508 +    __ aes_dround01(F48, F56, F58, F60);
   5.509 +
   5.510 +    __ BIND(L_dec_first_block192);
   5.511 +    __ aes_dround23(F46, F60, F62, F58);
   5.512 +    __ aes_dround01(F44, F60, F62, F56);
   5.513 +    __ aes_dround23(F42, F56, F58, F62);
   5.514 +    __ aes_dround01(F40, F56, F58, F60);
   5.515 +
   5.516 +    __ BIND(L_dec_first_block128);
   5.517 +    for ( int i = 38;  i >= 6; i -= 8 ) {
   5.518 +      __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
   5.519 +      __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
   5.520 +      if ( i != 6) {
   5.521 +        __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
   5.522 +        __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
   5.523 +      } else {
   5.524 +        __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
   5.525 +        __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
   5.526 +      }
   5.527 +    }
   5.528 +
   5.529 +    __ movxtod(L0,F56);
   5.530 +    __ movxtod(L1,F58);
   5.531 +    __ mov(L4,L0);
   5.532 +    __ mov(L5,L1);
   5.533 +    __ fxor(FloatRegisterImpl::D, F56, F60, F60);
   5.534 +    __ fxor(FloatRegisterImpl::D, F58, F62, F62);
   5.535 +
   5.536 +    __ stf(FloatRegisterImpl::D, F60, to, 0);
   5.537 +    __ stf(FloatRegisterImpl::D, F62, to, 8);
   5.538 +
   5.539 +    __ add(from, 16, from);
   5.540 +    __ add(to, 16, to);
   5.541 +    __ subcc(len_reg, 16, len_reg);
   5.542 +    __ br(Assembler::equal, false, Assembler::pt, L_cbcdec_end);
   5.543 +    __ delayed()->nop();
   5.544 +
   5.545 +    // 256-bit original key size
   5.546 +    __ cmp_and_brx_short(keylen, 60, Assembler::equal, Assembler::pn, L_dec_next2_blocks256);
   5.547 +
   5.548 +    // 192-bit original key size
   5.549 +    __ cmp_and_brx_short(keylen, 52, Assembler::equal, Assembler::pn, L_dec_next2_blocks192);
   5.550 +
   5.551 +    __ align(OptoLoopAlignment);
   5.552 +    __ BIND(L_dec_next2_blocks128);
   5.553 +    __ nop();
   5.554 +
   5.555 +    // F40:F42 used for first 16-bytes
   5.556 +    __ ldx(from,0,G4);
   5.557 +    __ ldx(from,8,G5);
   5.558 +    __ xor3(L2,G4,G1);
   5.559 +    __ movxtod(G1,F40);
   5.560 +    __ xor3(L3,G5,G1);
   5.561 +    __ movxtod(G1,F42);
   5.562 +
   5.563 +    // F60:F62 used for next 16-bytes
   5.564 +    __ ldx(from,16,L4);
   5.565 +    __ ldx(from,24,L5);
   5.566 +    __ xor3(L2,L4,G1);
   5.567 +    __ movxtod(G1,F60);
   5.568 +    __ xor3(L3,L5,G1);
   5.569 +    __ movxtod(G1,F62);
   5.570 +
   5.571 +    for ( int i = 38;  i >= 6; i -= 8 ) {
   5.572 +      __ aes_dround23(as_FloatRegister(i), F40, F42, F44);
   5.573 +      __ aes_dround01(as_FloatRegister(i-2), F40, F42, F46);
   5.574 +      __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
   5.575 +      __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
   5.576 +      if (i != 6 ) {
   5.577 +        __ aes_dround23(as_FloatRegister(i-4), F46, F44, F42);
   5.578 +        __ aes_dround01(as_FloatRegister(i-6), F46, F44, F40);
   5.579 +        __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
   5.580 +        __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
   5.581 +      } else {
   5.582 +        __ aes_dround23_l(as_FloatRegister(i-4), F46, F44, F42);
   5.583 +        __ aes_dround01_l(as_FloatRegister(i-6), F46, F44, F40);
   5.584 +        __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
   5.585 +        __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
   5.586 +      }
   5.587 +    }
   5.588 +
   5.589 +    __ movxtod(L0,F46);
   5.590 +    __ movxtod(L1,F44);
   5.591 +    __ fxor(FloatRegisterImpl::D, F46, F40, F40);
   5.592 +    __ fxor(FloatRegisterImpl::D, F44, F42, F42);
   5.593 +
   5.594 +    __ stf(FloatRegisterImpl::D, F40, to, 0);
   5.595 +    __ stf(FloatRegisterImpl::D, F42, to, 8);
   5.596 +
   5.597 +    __ movxtod(G4,F56);
   5.598 +    __ movxtod(G5,F58);
   5.599 +    __ mov(L4,L0);
   5.600 +    __ mov(L5,L1);
   5.601 +    __ fxor(FloatRegisterImpl::D, F56, F60, F60);
   5.602 +    __ fxor(FloatRegisterImpl::D, F58, F62, F62);
   5.603 +
   5.604 +    __ stf(FloatRegisterImpl::D, F60, to, 16);
   5.605 +    __ stf(FloatRegisterImpl::D, F62, to, 24);
   5.606 +
   5.607 +    __ add(from, 32, from);
   5.608 +    __ add(to, 32, to);
   5.609 +    __ subcc(len_reg, 32, len_reg);
   5.610 +    __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks128);
   5.611 +    __ delayed()->nop();
   5.612 +    __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end);
   5.613 +    __ delayed()->nop();
   5.614 +
   5.615 +    __ align(OptoLoopAlignment);
   5.616 +    __ BIND(L_dec_next2_blocks192);
   5.617 +    __ nop();
   5.618 +
   5.619 +    // F48:F50 used for first 16-bytes
   5.620 +    __ ldx(from,0,G4);
   5.621 +    __ ldx(from,8,G5);
   5.622 +    __ xor3(L2,G4,G1);
   5.623 +    __ movxtod(G1,F48);
   5.624 +    __ xor3(L3,G5,G1);
   5.625 +    __ movxtod(G1,F50);
   5.626 +
   5.627 +    // F60:F62 used for next 16-bytes
   5.628 +    __ ldx(from,16,L4);
   5.629 +    __ ldx(from,24,L5);
   5.630 +    __ xor3(L2,L4,G1);
   5.631 +    __ movxtod(G1,F60);
   5.632 +    __ xor3(L3,L5,G1);
   5.633 +    __ movxtod(G1,F62);
   5.634 +
   5.635 +    for ( int i = 46;  i >= 6; i -= 8 ) {
   5.636 +      __ aes_dround23(as_FloatRegister(i), F48, F50, F52);
   5.637 +      __ aes_dround01(as_FloatRegister(i-2), F48, F50, F54);
   5.638 +      __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
   5.639 +      __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
   5.640 +      if (i != 6 ) {
   5.641 +        __ aes_dround23(as_FloatRegister(i-4), F54, F52, F50);
   5.642 +        __ aes_dround01(as_FloatRegister(i-6), F54, F52, F48);
   5.643 +        __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
   5.644 +        __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
   5.645 +      } else {
   5.646 +        __ aes_dround23_l(as_FloatRegister(i-4), F54, F52, F50);
   5.647 +        __ aes_dround01_l(as_FloatRegister(i-6), F54, F52, F48);
   5.648 +        __ aes_dround23_l(as_FloatRegister(i-4), F56, F58, F62);
   5.649 +        __ aes_dround01_l(as_FloatRegister(i-6), F56, F58, F60);
   5.650 +      }
   5.651 +    }
   5.652 +
   5.653 +    __ movxtod(L0,F54);
   5.654 +    __ movxtod(L1,F52);
   5.655 +    __ fxor(FloatRegisterImpl::D, F54, F48, F48);
   5.656 +    __ fxor(FloatRegisterImpl::D, F52, F50, F50);
   5.657 +
   5.658 +    __ stf(FloatRegisterImpl::D, F48, to, 0);
   5.659 +    __ stf(FloatRegisterImpl::D, F50, to, 8);
   5.660 +
   5.661 +    __ movxtod(G4,F56);
   5.662 +    __ movxtod(G5,F58);
   5.663 +    __ mov(L4,L0);
   5.664 +    __ mov(L5,L1);
   5.665 +    __ fxor(FloatRegisterImpl::D, F56, F60, F60);
   5.666 +    __ fxor(FloatRegisterImpl::D, F58, F62, F62);
   5.667 +
   5.668 +    __ stf(FloatRegisterImpl::D, F60, to, 16);
   5.669 +    __ stf(FloatRegisterImpl::D, F62, to, 24);
   5.670 +
   5.671 +    __ add(from, 32, from);
   5.672 +    __ add(to, 32, to);
   5.673 +    __ subcc(len_reg, 32, len_reg);
   5.674 +    __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks192);
   5.675 +    __ delayed()->nop();
   5.676 +    __ br(Assembler::always, false, Assembler::pt, L_cbcdec_end);
   5.677 +    __ delayed()->nop();
   5.678 +
   5.679 +    __ align(OptoLoopAlignment);
   5.680 +    __ BIND(L_dec_next2_blocks256);
   5.681 +    __ nop();
   5.682 +
   5.683 +    // F0:F2 used for first 16-bytes
   5.684 +    __ ldx(from,0,G4);
   5.685 +    __ ldx(from,8,G5);
   5.686 +    __ xor3(L2,G4,G1);
   5.687 +    __ movxtod(G1,F0);
   5.688 +    __ xor3(L3,G5,G1);
   5.689 +    __ movxtod(G1,F2);
   5.690 +
   5.691 +    // F60:F62 used for next 16-bytes
   5.692 +    __ ldx(from,16,L4);
   5.693 +    __ ldx(from,24,L5);
   5.694 +    __ xor3(L2,L4,G1);
   5.695 +    __ movxtod(G1,F60);
   5.696 +    __ xor3(L3,L5,G1);
   5.697 +    __ movxtod(G1,F62);
   5.698 +
   5.699 +    __ aes_dround23(F54, F0, F2, F4);
   5.700 +    __ aes_dround01(F52, F0, F2, F6);
   5.701 +    __ aes_dround23(F54, F60, F62, F58);
   5.702 +    __ aes_dround01(F52, F60, F62, F56);
   5.703 +    __ aes_dround23(F50, F6, F4, F2);
   5.704 +    __ aes_dround01(F48, F6, F4, F0);
   5.705 +    __ aes_dround23(F50, F56, F58, F62);
   5.706 +    __ aes_dround01(F48, F56, F58, F60);
   5.707 +    // save F48:F54 in temp registers
   5.708 +    __ movdtox(F54,G2);
   5.709 +    __ movdtox(F52,G3);
   5.710 +    __ movdtox(F50,G6);
   5.711 +    __ movdtox(F48,G1);
   5.712 +    for ( int i = 46;  i >= 14; i -= 8 ) {
   5.713 +      __ aes_dround23(as_FloatRegister(i), F0, F2, F4);
   5.714 +      __ aes_dround01(as_FloatRegister(i-2), F0, F2, F6);
   5.715 +      __ aes_dround23(as_FloatRegister(i), F60, F62, F58);
   5.716 +      __ aes_dround01(as_FloatRegister(i-2), F60, F62, F56);
   5.717 +      __ aes_dround23(as_FloatRegister(i-4), F6, F4, F2);
   5.718 +      __ aes_dround01(as_FloatRegister(i-6), F6, F4, F0);
   5.719 +      __ aes_dround23(as_FloatRegister(i-4), F56, F58, F62);
   5.720 +      __ aes_dround01(as_FloatRegister(i-6), F56, F58, F60);
   5.721 +    }
   5.722 +    // init F48:F54 with F0:F6 values (original key)
   5.723 +    __ ldf(FloatRegisterImpl::D, original_key, 0, F48);
   5.724 +    __ ldf(FloatRegisterImpl::D, original_key, 8, F50);
   5.725 +    __ ldf(FloatRegisterImpl::D, original_key, 16, F52);
   5.726 +    __ ldf(FloatRegisterImpl::D, original_key, 24, F54);
   5.727 +    __ aes_dround23(F54, F0, F2, F4);
   5.728 +    __ aes_dround01(F52, F0, F2, F6);
   5.729 +    __ aes_dround23(F54, F60, F62, F58);
   5.730 +    __ aes_dround01(F52, F60, F62, F56);
   5.731 +    __ aes_dround23_l(F50, F6, F4, F2);
   5.732 +    __ aes_dround01_l(F48, F6, F4, F0);
   5.733 +    __ aes_dround23_l(F50, F56, F58, F62);
   5.734 +    __ aes_dround01_l(F48, F56, F58, F60);
   5.735 +    // re-init F48:F54 with their original values
   5.736 +    __ movxtod(G2,F54);
   5.737 +    __ movxtod(G3,F52);
   5.738 +    __ movxtod(G6,F50);
   5.739 +    __ movxtod(G1,F48);
   5.740 +
   5.741 +    __ movxtod(L0,F6);
   5.742 +    __ movxtod(L1,F4);
   5.743 +    __ fxor(FloatRegisterImpl::D, F6, F0, F0);
   5.744 +    __ fxor(FloatRegisterImpl::D, F4, F2, F2);
   5.745 +
   5.746 +    __ stf(FloatRegisterImpl::D, F0, to, 0);
   5.747 +    __ stf(FloatRegisterImpl::D, F2, to, 8);
   5.748 +
   5.749 +    __ movxtod(G4,F56);
   5.750 +    __ movxtod(G5,F58);
   5.751 +    __ mov(L4,L0);
   5.752 +    __ mov(L5,L1);
   5.753 +    __ fxor(FloatRegisterImpl::D, F56, F60, F60);
   5.754 +    __ fxor(FloatRegisterImpl::D, F58, F62, F62);
   5.755 +
   5.756 +    __ stf(FloatRegisterImpl::D, F60, to, 16);
   5.757 +    __ stf(FloatRegisterImpl::D, F62, to, 24);
   5.758 +
   5.759 +    __ add(from, 32, from);
   5.760 +    __ add(to, 32, to);
   5.761 +    __ subcc(len_reg, 32, len_reg);
   5.762 +    __ br(Assembler::notEqual, false, Assembler::pt, L_dec_next2_blocks256);
   5.763 +    __ delayed()->nop();
   5.764 +
   5.765 +    __ BIND(L_cbcdec_end);
   5.766 +    __ stx(L0, rvec, 0);
   5.767 +    __ stx(L1, rvec, 8);
   5.768 +    __ restore();
   5.769 +    __ mov(L0, O0);
   5.770 +    __ retl();
   5.771 +    __ delayed()->nop();
   5.772 +
   5.773 +    return start;
   5.774 +  }
   5.775 +
   5.776    void generate_initial() {
   5.777      // Generates all stubs and initializes the entry points
   5.778  
   5.779 @@ -3368,6 +4137,14 @@
   5.780      generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
   5.781                                                         &StubRoutines::_safefetchN_fault_pc,
   5.782                                                         &StubRoutines::_safefetchN_continuation_pc);
   5.783 +
   5.784 +    // generate AES intrinsics code
   5.785 +    if (UseAESIntrinsics) {
   5.786 +      StubRoutines::_aescrypt_encryptBlock = generate_aescrypt_encryptBlock();
   5.787 +      StubRoutines::_aescrypt_decryptBlock = generate_aescrypt_decryptBlock();
   5.788 +      StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
   5.789 +      StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
   5.790 +    }
   5.791    }
   5.792  
   5.793  
     6.1 --- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Wed Feb 19 20:12:43 2014 -0800
     6.2 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Tue Feb 25 15:11:18 2014 -0800
     6.3 @@ -234,7 +234,7 @@
     6.4    assert((OptoLoopAlignment % relocInfo::addr_unit()) == 0, "alignment is not a multiple of NOP size");
     6.5  
     6.6    char buf[512];
     6.7 -  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
     6.8 +  jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
     6.9                 (has_v9() ? ", v9" : (has_v8() ? ", v8" : "")),
    6.10                 (has_hardware_popc() ? ", popc" : ""),
    6.11                 (has_vis1() ? ", vis1" : ""),
    6.12 @@ -242,6 +242,7 @@
    6.13                 (has_vis3() ? ", vis3" : ""),
    6.14                 (has_blk_init() ? ", blk_init" : ""),
    6.15                 (has_cbcond() ? ", cbcond" : ""),
    6.16 +               (has_aes() ? ", aes" : ""),
    6.17                 (is_ultra3() ? ", ultra3" : ""),
    6.18                 (is_sun4v() ? ", sun4v" : ""),
    6.19                 (is_niagara_plus() ? ", niagara_plus" : (is_niagara() ? ", niagara" : "")),
    6.20 @@ -265,6 +266,41 @@
    6.21    if (!has_vis1()) // Drop to 0 if no VIS1 support
    6.22      UseVIS = 0;
    6.23  
    6.24 +  // T2 and above should have support for AES instructions
    6.25 +  if (has_aes()) {
    6.26 +    if (UseVIS > 0) { // AES intrinsics use FXOR instruction which is VIS1
    6.27 +      if (FLAG_IS_DEFAULT(UseAES)) {
    6.28 +        FLAG_SET_DEFAULT(UseAES, true);
    6.29 +      }
    6.30 +      if (FLAG_IS_DEFAULT(UseAESIntrinsics)) {
    6.31 +        FLAG_SET_DEFAULT(UseAESIntrinsics, true);
    6.32 +      }
    6.33 +      // we disable both the AES flags if either of them is disabled on the command line
    6.34 +      if (!UseAES || !UseAESIntrinsics) {
    6.35 +        FLAG_SET_DEFAULT(UseAES, false);
    6.36 +        FLAG_SET_DEFAULT(UseAESIntrinsics, false);
    6.37 +      }
    6.38 +    } else {
    6.39 +        if (UseAES || UseAESIntrinsics) {
    6.40 +          warning("SPARC AES intrinsics require VIS1 instruction support. Intrinsics will be disabled.");
    6.41 +          if (UseAES) {
    6.42 +            FLAG_SET_DEFAULT(UseAES, false);
    6.43 +          }
    6.44 +          if (UseAESIntrinsics) {
    6.45 +            FLAG_SET_DEFAULT(UseAESIntrinsics, false);
    6.46 +          }
    6.47 +        }
    6.48 +    }
    6.49 +  } else if (UseAES || UseAESIntrinsics) {
    6.50 +    warning("AES instructions are not available on this CPU");
    6.51 +    if (UseAES) {
    6.52 +      FLAG_SET_DEFAULT(UseAES, false);
    6.53 +    }
    6.54 +    if (UseAESIntrinsics) {
    6.55 +      FLAG_SET_DEFAULT(UseAESIntrinsics, false);
    6.56 +    }
    6.57 +  }
    6.58 +
    6.59    if (FLAG_IS_DEFAULT(ContendedPaddingWidth) &&
    6.60      (cache_line_size > ContendedPaddingWidth))
    6.61      ContendedPaddingWidth = cache_line_size;
     7.1 --- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Wed Feb 19 20:12:43 2014 -0800
     7.2 +++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Tue Feb 25 15:11:18 2014 -0800
     7.3 @@ -48,7 +48,8 @@
     7.4      sparc64_family       = 14,
     7.5      M_family             = 15,
     7.6      T_family             = 16,
     7.7 -    T1_model             = 17
     7.8 +    T1_model             = 17,
     7.9 +    aes_instructions     = 18
    7.10    };
    7.11  
    7.12    enum Feature_Flag_Set {
    7.13 @@ -73,6 +74,7 @@
    7.14      M_family_m              = 1 << M_family,
    7.15      T_family_m              = 1 << T_family,
    7.16      T1_model_m              = 1 << T1_model,
    7.17 +    aes_instructions_m      = 1 << aes_instructions,
    7.18  
    7.19      generic_v8_m        = v8_instructions_m | hardware_mul32_m | hardware_div32_m | hardware_fsmuld_m,
    7.20      generic_v9_m        = generic_v8_m | v9_instructions_m,
    7.21 @@ -123,6 +125,7 @@
    7.22    static bool has_vis3()                { return (_features & vis3_instructions_m) != 0; }
    7.23    static bool has_blk_init()            { return (_features & blk_init_instructions_m) != 0; }
    7.24    static bool has_cbcond()              { return (_features & cbcond_instructions_m) != 0; }
    7.25 +  static bool has_aes()                 { return (_features & aes_instructions_m) != 0; }
    7.26  
    7.27    static bool supports_compare_and_exchange()
    7.28                                          { return has_v9(); }
     8.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Wed Feb 19 20:12:43 2014 -0800
     8.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Tue Feb 25 15:11:18 2014 -0800
     8.3 @@ -2403,6 +2403,9 @@
     8.4    //   c_rarg3   - r vector byte array address
     8.5    //   c_rarg4   - input length
     8.6    //
     8.7 +  // Output:
     8.8 +  //   rax       - input length
     8.9 +  //
    8.10    address generate_cipherBlockChaining_encryptAESCrypt() {
    8.11      assert(UseAES, "need AES instructions and misaligned SSE support");
    8.12      __ align(CodeEntryAlignment);
    8.13 @@ -2483,7 +2486,7 @@
    8.14      __ movdqu(Address(rvec, 0), xmm_result);     // final value of r stored in rvec of CipherBlockChaining object
    8.15  
    8.16      handleSOERegisters(false /*restoring*/);
    8.17 -    __ movl(rax, 0);                             // return 0 (why?)
    8.18 +    __ movptr(rax, len_param); // return length
    8.19      __ leave();                                  // required for proper stackwalking of RuntimeStub frame
    8.20      __ ret(0);
    8.21  
    8.22 @@ -2557,6 +2560,9 @@
    8.23    //   c_rarg3   - r vector byte array address
    8.24    //   c_rarg4   - input length
    8.25    //
    8.26 +  // Output:
    8.27 +  //   rax       - input length
    8.28 +  //
    8.29  
    8.30    address generate_cipherBlockChaining_decryptAESCrypt() {
    8.31      assert(UseAES, "need AES instructions and misaligned SSE support");
    8.32 @@ -2650,7 +2656,7 @@
    8.33      __ movptr(rvec , rvec_param);                                     // restore this since used in loop
    8.34      __ movdqu(Address(rvec, 0), xmm_temp);                            // final value of r stored in rvec of CipherBlockChaining object
    8.35      handleSOERegisters(false /*restoring*/);
    8.36 -    __ movl(rax, 0);                                                  // return 0 (why?)
    8.37 +    __ movptr(rax, len_param); // return length
    8.38      __ leave();                                                       // required for proper stackwalking of RuntimeStub frame
    8.39      __ ret(0);
    8.40  
     9.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Wed Feb 19 20:12:43 2014 -0800
     9.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Feb 25 15:11:18 2014 -0800
     9.3 @@ -3217,6 +3217,9 @@
     9.4    //   c_rarg3   - r vector byte array address
     9.5    //   c_rarg4   - input length
     9.6    //
     9.7 +  // Output:
     9.8 +  //   rax       - input length
     9.9 +  //
    9.10    address generate_cipherBlockChaining_encryptAESCrypt() {
    9.11      assert(UseAES, "need AES instructions and misaligned SSE support");
    9.12      __ align(CodeEntryAlignment);
    9.13 @@ -3232,7 +3235,7 @@
    9.14  #ifndef _WIN64
    9.15      const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
    9.16  #else
    9.17 -    const Address  len_mem(rsp, 6 * wordSize);  // length is on stack on Win64
    9.18 +    const Address  len_mem(rbp, 6 * wordSize);  // length is on stack on Win64
    9.19      const Register len_reg     = r10;      // pick the first volatile windows register
    9.20  #endif
    9.21      const Register pos         = rax;
    9.22 @@ -3259,6 +3262,8 @@
    9.23      for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
    9.24        __ movdqu(xmm_save(i), as_XMMRegister(i));
    9.25      }
    9.26 +#else
    9.27 +    __ push(len_reg); // Save
    9.28  #endif
    9.29  
    9.30      const XMMRegister xmm_key_shuf_mask = xmm_temp;  // used temporarily to swap key bytes up front
    9.31 @@ -3301,8 +3306,10 @@
    9.32      for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
    9.33        __ movdqu(as_XMMRegister(i), xmm_save(i));
    9.34      }
    9.35 +    __ movl(rax, len_mem);
    9.36 +#else
    9.37 +    __ pop(rax); // return length
    9.38  #endif
    9.39 -    __ movl(rax, 0); // return 0 (why?)
    9.40      __ leave(); // required for proper stackwalking of RuntimeStub frame
    9.41      __ ret(0);
    9.42  
    9.43 @@ -3409,6 +3416,9 @@
    9.44    //   c_rarg3   - r vector byte array address
    9.45    //   c_rarg4   - input length
    9.46    //
    9.47 +  // Output:
    9.48 +  //   rax       - input length
    9.49 +  //
    9.50  
    9.51    address generate_cipherBlockChaining_decryptAESCrypt_Parallel() {
    9.52      assert(UseAES, "need AES instructions and misaligned SSE support");
    9.53 @@ -3427,7 +3437,7 @@
    9.54  #ifndef _WIN64
    9.55      const Register len_reg     = c_rarg4;  // src len (must be multiple of blocksize 16)
    9.56  #else
    9.57 -    const Address  len_mem(rsp, 6 * wordSize);  // length is on stack on Win64
    9.58 +    const Address  len_mem(rbp, 6 * wordSize);  // length is on stack on Win64
    9.59      const Register len_reg     = r10;      // pick the first volatile windows register
    9.60  #endif
    9.61      const Register pos         = rax;
    9.62 @@ -3448,7 +3458,10 @@
    9.63      for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
    9.64        __ movdqu(xmm_save(i), as_XMMRegister(i));
    9.65      }
    9.66 +#else
    9.67 +    __ push(len_reg); // Save
    9.68  #endif
    9.69 +
    9.70      // the java expanded key ordering is rotated one position from what we want
    9.71      // so we start from 0x10 here and hit 0x00 last
    9.72      const XMMRegister xmm_key_shuf_mask = xmm1;  // used temporarily to swap key bytes up front
    9.73 @@ -3554,8 +3567,10 @@
    9.74      for (int i = 6; i <= XMM_REG_NUM_KEY_LAST; i++) {
    9.75        __ movdqu(as_XMMRegister(i), xmm_save(i));
    9.76      }
    9.77 +    __ movl(rax, len_mem);
    9.78 +#else
    9.79 +    __ pop(rax); // return length
    9.80  #endif
    9.81 -    __ movl(rax, 0); // return 0 (why?)
    9.82      __ leave(); // required for proper stackwalking of RuntimeStub frame
    9.83      __ ret(0);
    9.84  
    10.1 --- a/src/cpu/x86/vm/x86.ad	Wed Feb 19 20:12:43 2014 -0800
    10.2 +++ b/src/cpu/x86/vm/x86.ad	Tue Feb 25 15:11:18 2014 -0800
    10.3 @@ -581,6 +581,12 @@
    10.4    return !AlignVector; // can be changed by flag
    10.5  }
    10.6  
    10.7 +// x86 AES instructions are compatible with SunJCE expanded
    10.8 +// keys, hence we do not need to pass the original key to stubs
    10.9 +const bool Matcher::pass_original_key_for_aes() {
   10.10 +  return false;
   10.11 +}
   10.12 +
   10.13  // Helper methods for MachSpillCopyNode::implementation().
   10.14  static int vec_mov_helper(CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
   10.15                            int src_hi, int dst_hi, uint ireg, outputStream* st) {
    11.1 --- a/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Wed Feb 19 20:12:43 2014 -0800
    11.2 +++ b/src/os_cpu/solaris_sparc/vm/vm_version_solaris_sparc.cpp	Tue Feb 25 15:11:18 2014 -0800
    11.3 @@ -119,6 +119,11 @@
    11.4  #endif
    11.5      if (av & AV_SPARC_CBCOND)       features |= cbcond_instructions_m;
    11.6  
    11.7 +#ifndef AV_SPARC_AES
    11.8 +#define AV_SPARC_AES 0x00020000  /* aes instrs supported */
    11.9 +#endif
   11.10 +    if (av & AV_SPARC_AES)       features |= aes_instructions_m;
   11.11 +
   11.12    } else {
   11.13      // getisax(2) failed, use the old legacy code.
   11.14  #ifndef PRODUCT
    12.1 --- a/src/share/vm/classfile/classLoaderData.cpp	Wed Feb 19 20:12:43 2014 -0800
    12.2 +++ b/src/share/vm/classfile/classLoaderData.cpp	Tue Feb 25 15:11:18 2014 -0800
    12.3 @@ -1,5 +1,5 @@
    12.4  /*
    12.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    12.6 + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
    12.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.8   *
    12.9   * This code is free software; you can redistribute it and/or modify it
   12.10 @@ -520,6 +520,13 @@
   12.11    }
   12.12  }
   12.13  
   12.14 +bool ClassLoaderData::contains_klass(Klass* klass) {
   12.15 +  for (Klass* k = _klasses; k != NULL; k = k->next_link()) {
   12.16 +    if (k == klass) return true;
   12.17 +  }
   12.18 +  return false;
   12.19 +}
   12.20 +
   12.21  
   12.22  // GC root of class loader data created.
   12.23  ClassLoaderData* ClassLoaderDataGraph::_head = NULL;
   12.24 @@ -648,12 +655,12 @@
   12.25    return array;
   12.26  }
   12.27  
   12.28 -#ifndef PRODUCT
   12.29 -// for debugging and hsfind(x)
   12.30 -bool ClassLoaderDataGraph::contains(address x) {
   12.31 -  // I think we need the _metaspace_lock taken here because the class loader
   12.32 -  // data graph could be changing while we are walking it (new entries added,
   12.33 -  // new entries being unloaded, etc).
   12.34 +// For profiling and hsfind() only.  Otherwise, this is unsafe (and slow).  This
   12.35 +// is done lock free to avoid lock inversion problems.  It is safe because
   12.36 +// new ClassLoaderData are added to the end of the CLDG, and only removed at
   12.37 +// safepoint.  The _unloading list can be deallocated concurrently with CMS so
   12.38 +// this doesn't look in metaspace for classes that have been unloaded.
   12.39 +bool ClassLoaderDataGraph::contains(const void* x) {
   12.40    if (DumpSharedSpaces) {
   12.41      // There are only two metaspaces to worry about.
   12.42      ClassLoaderData* ncld = ClassLoaderData::the_null_class_loader_data();
   12.43 @@ -670,16 +677,11 @@
   12.44      }
   12.45    }
   12.46  
   12.47 -  // Could also be on an unloading list which is okay, ie. still allocated
   12.48 -  // for a little while.
   12.49 -  for (ClassLoaderData* ucld = _unloading; ucld != NULL; ucld = ucld->next()) {
   12.50 -    if (ucld->metaspace_or_null() != NULL && ucld->metaspace_or_null()->contains(x)) {
   12.51 -      return true;
   12.52 -    }
   12.53 -  }
   12.54 +  // Do not check unloading list because deallocation can be concurrent.
   12.55    return false;
   12.56  }
   12.57  
   12.58 +#ifndef PRODUCT
   12.59  bool ClassLoaderDataGraph::contains_loader_data(ClassLoaderData* loader_data) {
   12.60    for (ClassLoaderData* data = _head; data != NULL; data = data->next()) {
   12.61      if (loader_data == data) {
    13.1 --- a/src/share/vm/classfile/classLoaderData.hpp	Wed Feb 19 20:12:43 2014 -0800
    13.2 +++ b/src/share/vm/classfile/classLoaderData.hpp	Tue Feb 25 15:11:18 2014 -0800
    13.3 @@ -1,5 +1,5 @@
    13.4  /*
    13.5 - * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    13.6 + * Copyright (c) 2012, 2014, Oracle and/or its affiliates. All rights reserved.
    13.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.8   *
    13.9   * This code is free software; you can redistribute it and/or modify it
   13.10 @@ -90,9 +90,9 @@
   13.11    static void dump() { dump_on(tty); }
   13.12    static void verify();
   13.13  
   13.14 +  // expensive test for pointer in metaspace for debugging
   13.15 +  static bool contains(const void* x);
   13.16  #ifndef PRODUCT
   13.17 -  // expensive test for pointer in metaspace for debugging
   13.18 -  static bool contains(address x);
   13.19    static bool contains_loader_data(ClassLoaderData* loader_data);
   13.20  #endif
   13.21  
   13.22 @@ -260,6 +260,7 @@
   13.23    jobject add_handle(Handle h);
   13.24    void add_class(Klass* k);
   13.25    void remove_class(Klass* k);
   13.26 +  bool contains_klass(Klass* k);
   13.27    void record_dependency(Klass* to, TRAPS);
   13.28    void init_dependencies(TRAPS);
   13.29  
    14.1 --- a/src/share/vm/classfile/dictionary.cpp	Wed Feb 19 20:12:43 2014 -0800
    14.2 +++ b/src/share/vm/classfile/dictionary.cpp	Tue Feb 25 15:11:18 2014 -0800
    14.3 @@ -1,5 +1,5 @@
    14.4  /*
    14.5 - * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    14.6 + * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
    14.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.8   *
    14.9   * This code is free software; you can redistribute it and/or modify it
   14.10 @@ -707,7 +707,7 @@
   14.11                  loader_data->class_loader() == NULL ||
   14.12                  loader_data->class_loader()->is_instance(),
   14.13                  "checking type of class_loader");
   14.14 -      e->verify(/*check_dictionary*/false);
   14.15 +      e->verify();
   14.16        probe->verify_protection_domain_set();
   14.17        element_count++;
   14.18      }
    15.1 --- a/src/share/vm/classfile/systemDictionary.cpp	Wed Feb 19 20:12:43 2014 -0800
    15.2 +++ b/src/share/vm/classfile/systemDictionary.cpp	Tue Feb 25 15:11:18 2014 -0800
    15.3 @@ -1,5 +1,5 @@
    15.4  /*
    15.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    15.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    15.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    15.8   *
    15.9   * This code is free software; you can redistribute it and/or modify it
   15.10 @@ -2650,23 +2650,6 @@
   15.11    constraints()->verify(dictionary(), placeholders());
   15.12  }
   15.13  
   15.14 -
   15.15 -void SystemDictionary::verify_obj_klass_present(Symbol* class_name,
   15.16 -                                                ClassLoaderData* loader_data) {
   15.17 -  GCMutexLocker mu(SystemDictionary_lock);
   15.18 -  Symbol* name;
   15.19 -
   15.20 -  Klass* probe = find_class(class_name, loader_data);
   15.21 -  if (probe == NULL) {
   15.22 -    probe = SystemDictionary::find_shared_class(class_name);
   15.23 -    if (probe == NULL) {
   15.24 -      name = find_placeholder(class_name, loader_data);
   15.25 -    }
   15.26 -  }
   15.27 -  guarantee(probe != NULL || name != NULL,
   15.28 -            "Loaded klasses should be in SystemDictionary");
   15.29 -}
   15.30 -
   15.31  // utility function for class load event
   15.32  void SystemDictionary::post_class_load_event(const Ticks& start_time,
   15.33                                               instanceKlassHandle k,
    16.1 --- a/src/share/vm/classfile/systemDictionary.hpp	Wed Feb 19 20:12:43 2014 -0800
    16.2 +++ b/src/share/vm/classfile/systemDictionary.hpp	Tue Feb 25 15:11:18 2014 -0800
    16.3 @@ -1,5 +1,5 @@
    16.4  /*
    16.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    16.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    16.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    16.8   *
    16.9   * This code is free software; you can redistribute it and/or modify it
   16.10 @@ -375,10 +375,6 @@
   16.11    static bool is_internal_format(Symbol* class_name);
   16.12  #endif
   16.13  
   16.14 -  // Verify class is in dictionary
   16.15 -  static void verify_obj_klass_present(Symbol* class_name,
   16.16 -                                       ClassLoaderData* loader_data);
   16.17 -
   16.18    // Initialization
   16.19    static void initialize(TRAPS);
   16.20  
    17.1 --- a/src/share/vm/classfile/vmSymbols.hpp	Wed Feb 19 20:12:43 2014 -0800
    17.2 +++ b/src/share/vm/classfile/vmSymbols.hpp	Tue Feb 25 15:11:18 2014 -0800
    17.3 @@ -787,7 +787,7 @@
    17.4     do_intrinsic(_cipherBlockChaining_decryptAESCrypt, com_sun_crypto_provider_cipherBlockChaining, decrypt_name, byteArray_int_int_byteArray_int_signature, F_R)   \
    17.5     do_name(     encrypt_name,                                      "encrypt")                                           \
    17.6     do_name(     decrypt_name,                                      "decrypt")                                           \
    17.7 -   do_signature(byteArray_int_int_byteArray_int_signature,         "([BII[BI)V")                                        \
    17.8 +   do_signature(byteArray_int_int_byteArray_int_signature,         "([BII[BI)I")                                        \
    17.9                                                                                                                          \
   17.10    /* support for java.util.zip */                                                                                       \
   17.11    do_class(java_util_zip_CRC32,           "java/util/zip/CRC32")                                                        \
    18.1 --- a/src/share/vm/code/dependencies.cpp	Wed Feb 19 20:12:43 2014 -0800
    18.2 +++ b/src/share/vm/code/dependencies.cpp	Tue Feb 25 15:11:18 2014 -0800
    18.3 @@ -655,8 +655,6 @@
    18.4    } else {
    18.5      o = _deps->oop_recorder()->metadata_at(i);
    18.6    }
    18.7 -  assert(o == NULL || o->is_metaspace_object(),
    18.8 -         err_msg("Should be metadata " PTR_FORMAT, o));
    18.9    return o;
   18.10  }
   18.11  
    19.1 --- a/src/share/vm/code/vtableStubs.cpp	Wed Feb 19 20:12:43 2014 -0800
    19.2 +++ b/src/share/vm/code/vtableStubs.cpp	Tue Feb 25 15:11:18 2014 -0800
    19.3 @@ -55,6 +55,9 @@
    19.4    const int chunk_factor = 32;
    19.5    if (_chunk == NULL || _chunk + real_size > _chunk_end) {
    19.6      const int bytes = chunk_factor * real_size + pd_code_alignment();
    19.7 +
    19.8 +   // There is a dependency on the name of the blob in src/share/vm/prims/jvmtiCodeBlobEvents.cpp
    19.9 +   // If changing the name, update the other file accordingly.
   19.10      BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
   19.11      if (blob == NULL) {
   19.12        return NULL;
   19.13 @@ -62,12 +65,6 @@
   19.14      _chunk = blob->content_begin();
   19.15      _chunk_end = _chunk + bytes;
   19.16      Forte::register_stub("vtable stub", _chunk, _chunk_end);
   19.17 -    // Notify JVMTI about this stub. The event will be recorded by the enclosing
   19.18 -    // JvmtiDynamicCodeEventCollector and posted when this thread has released
   19.19 -    // all locks.
   19.20 -    if (JvmtiExport::should_post_dynamic_code_generated()) {
   19.21 -      JvmtiExport::post_dynamic_code_generated_while_holding_locks("vtable stub", _chunk, _chunk_end);
   19.22 -    }
   19.23      align_chunk();
   19.24    }
   19.25    assert(_chunk + real_size <= _chunk_end, "bad allocation");
   19.26 @@ -130,6 +127,13 @@
   19.27                      is_vtable_stub? "vtbl": "itbl", vtable_index, VtableStub::receiver_location());
   19.28        Disassembler::decode(s->code_begin(), s->code_end());
   19.29      }
   19.30 +    // Notify JVMTI about this stub. The event will be recorded by the enclosing
   19.31 +    // JvmtiDynamicCodeEventCollector and posted when this thread has released
   19.32 +    // all locks.
   19.33 +    if (JvmtiExport::should_post_dynamic_code_generated()) {
   19.34 +      JvmtiExport::post_dynamic_code_generated_while_holding_locks(is_vtable_stub? "vtable stub": "itable stub",
   19.35 +                                                                   s->code_begin(), s->code_end());
   19.36 +    }
   19.37    }
   19.38    return s->entry_point();
   19.39  }
   19.40 @@ -195,6 +199,14 @@
   19.41    VtableStubs::initialize();
   19.42  }
   19.43  
   19.44 +void VtableStubs::vtable_stub_do(void f(VtableStub*)) {
   19.45 +    for (int i = 0; i < N; i++) {
   19.46 +        for (VtableStub* s = _table[i]; s != NULL; s = s->next()) {
   19.47 +            f(s);
   19.48 +        }
   19.49 +    }
   19.50 +}
   19.51 +
   19.52  
   19.53  //-----------------------------------------------------------------------------------------------------
   19.54  // Non-product code
    20.1 --- a/src/share/vm/code/vtableStubs.hpp	Wed Feb 19 20:12:43 2014 -0800
    20.2 +++ b/src/share/vm/code/vtableStubs.hpp	Tue Feb 25 15:11:18 2014 -0800
    20.3 @@ -131,6 +131,7 @@
    20.4    static VtableStub* stub_containing(address pc);                    // stub containing pc or NULL
    20.5    static int         number_of_vtable_stubs() { return _number_of_vtable_stubs; }
    20.6    static void        initialize();
    20.7 +  static void        vtable_stub_do(void f(VtableStub*));            // iterates over all vtable stubs
    20.8  };
    20.9  
   20.10  #endif // SHARE_VM_CODE_VTABLESTUBS_HPP
    21.1 --- a/src/share/vm/interpreter/linkResolver.cpp	Wed Feb 19 20:12:43 2014 -0800
    21.2 +++ b/src/share/vm/interpreter/linkResolver.cpp	Tue Feb 25 15:11:18 2014 -0800
    21.3 @@ -564,16 +564,7 @@
    21.4      }
    21.5    }
    21.6  
    21.7 -  // 5. check if method is concrete
    21.8 -  if (resolved_method->is_abstract() && !resolved_klass->is_abstract()) {
    21.9 -    ResourceMark rm(THREAD);
   21.10 -    THROW_MSG(vmSymbols::java_lang_AbstractMethodError(),
   21.11 -              Method::name_and_sig_as_C_string(resolved_klass(),
   21.12 -                                                      method_name,
   21.13 -                                                      method_signature));
   21.14 -  }
   21.15 -
   21.16 -  // 6. access checks, access checking may be turned off when calling from within the VM.
   21.17 +  // 5. access checks, access checking may be turned off when calling from within the VM.
   21.18    if (check_access) {
   21.19      assert(current_klass.not_null() , "current_klass should not be null");
   21.20  
    22.1 --- a/src/share/vm/interpreter/rewriter.cpp	Wed Feb 19 20:12:43 2014 -0800
    22.2 +++ b/src/share/vm/interpreter/rewriter.cpp	Tue Feb 25 15:11:18 2014 -0800
    22.3 @@ -1,5 +1,5 @@
    22.4  /*
    22.5 - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    22.6 + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
    22.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    22.8   *
    22.9   * This code is free software; you can redistribute it and/or modify it
   22.10 @@ -250,8 +250,8 @@
   22.11      // We will reverse the bytecode rewriting _after_ adjusting them.
   22.12      // Adjust the cache index by offset to the invokedynamic entries in the
   22.13      // cpCache plus the delta if the invokedynamic bytecodes were adjusted.
   22.14 -    cache_index = cp_cache_delta() + _first_iteration_cp_cache_limit;
   22.15 -    int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index);
   22.16 +    int adjustment = cp_cache_delta() + _first_iteration_cp_cache_limit;
   22.17 +    int cp_index = invokedynamic_cp_cache_entry_pool_index(cache_index - adjustment);
   22.18      assert(_pool->tag_at(cp_index).is_invoke_dynamic(), "wrong index");
   22.19      // zero out 4 bytes
   22.20      Bytes::put_Java_u4(p, 0);
   22.21 @@ -453,18 +453,7 @@
   22.22    return method;
   22.23  }
   22.24  
   22.25 -void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
   22.26 -  ResourceMark rm(THREAD);
   22.27 -  Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
   22.28 -  // (That's all, folks.)
   22.29 -}
   22.30 -
   22.31 -
   22.32 -Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
   22.33 -  : _klass(klass),
   22.34 -    _pool(cpool),
   22.35 -    _methods(methods)
   22.36 -{
   22.37 +void Rewriter::rewrite_bytecodes(TRAPS) {
   22.38    assert(_pool->cache() == NULL, "constant pool cache must not be set yet");
   22.39  
   22.40    // determine index maps for Method* rewriting
   22.41 @@ -508,6 +497,29 @@
   22.42    // May have to fix invokedynamic bytecodes if invokestatic/InterfaceMethodref
   22.43    // entries had to be added.
   22.44    patch_invokedynamic_bytecodes();
   22.45 +}
   22.46 +
   22.47 +void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
   22.48 +  ResourceMark rm(THREAD);
   22.49 +  Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
   22.50 +  // (That's all, folks.)
   22.51 +}
   22.52 +
   22.53 +
   22.54 +Rewriter::Rewriter(instanceKlassHandle klass, constantPoolHandle cpool, Array<Method*>* methods, TRAPS)
   22.55 +  : _klass(klass),
   22.56 +    _pool(cpool),
   22.57 +    _methods(methods)
   22.58 +{
   22.59 +
   22.60 +  // Rewrite bytecodes - exception here exits.
   22.61 +  rewrite_bytecodes(CHECK);
   22.62 +
   22.63 +  // Stress restoring bytecodes
   22.64 +  if (StressRewriter) {
   22.65 +    restore_bytecodes();
   22.66 +    rewrite_bytecodes(CHECK);
   22.67 +  }
   22.68  
   22.69    // allocate constant pool cache, now that we've seen all the bytecodes
   22.70    make_constant_pool_cache(THREAD);
   22.71 @@ -523,6 +535,7 @@
   22.72    // so methods with jsrs in custom class lists in aren't attempted to be
   22.73    // rewritten in the RO section of the shared archive.
   22.74    // Relocated bytecodes don't have to be restored, only the cp cache entries
   22.75 +  int len = _methods->length();
   22.76    for (int i = len-1; i >= 0; i--) {
   22.77      methodHandle m(THREAD, _methods->at(i));
   22.78  
    23.1 --- a/src/share/vm/interpreter/rewriter.hpp	Wed Feb 19 20:12:43 2014 -0800
    23.2 +++ b/src/share/vm/interpreter/rewriter.hpp	Tue Feb 25 15:11:18 2014 -0800
    23.3 @@ -1,5 +1,5 @@
    23.4  /*
    23.5 - * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    23.6 + * Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
    23.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    23.8   *
    23.9   * This code is free software; you can redistribute it and/or modify it
   23.10 @@ -199,6 +199,9 @@
   23.11  
   23.12    void patch_invokedynamic_bytecodes();
   23.13  
   23.14 +  // Do all the work.
   23.15 +  void rewrite_bytecodes(TRAPS);
   23.16 +
   23.17    // Revert bytecodes in case of an exception.
   23.18    void restore_bytecodes();
   23.19  
    24.1 --- a/src/share/vm/memory/allocation.cpp	Wed Feb 19 20:12:43 2014 -0800
    24.2 +++ b/src/share/vm/memory/allocation.cpp	Tue Feb 25 15:11:18 2014 -0800
    24.3 @@ -74,9 +74,8 @@
    24.4    return MetaspaceShared::is_in_shared_space(this);
    24.5  }
    24.6  
    24.7 -
    24.8  bool MetaspaceObj::is_metaspace_object() const {
    24.9 -  return Metaspace::contains((void*)this);
   24.10 +  return ClassLoaderDataGraph::contains((void*)this);
   24.11  }
   24.12  
   24.13  void MetaspaceObj::print_address_on(outputStream* st) const {
    25.1 --- a/src/share/vm/memory/allocation.hpp	Wed Feb 19 20:12:43 2014 -0800
    25.2 +++ b/src/share/vm/memory/allocation.hpp	Tue Feb 25 15:11:18 2014 -0800
    25.3 @@ -267,7 +267,7 @@
    25.4  
    25.5  class MetaspaceObj {
    25.6   public:
    25.7 -  bool is_metaspace_object() const;  // more specific test but slower
    25.8 +  bool is_metaspace_object() const;
    25.9    bool is_shared() const;
   25.10    void print_address_on(outputStream* st) const;  // nonvirtual address printing
   25.11  
    26.1 --- a/src/share/vm/memory/metachunk.hpp	Wed Feb 19 20:12:43 2014 -0800
    26.2 +++ b/src/share/vm/memory/metachunk.hpp	Tue Feb 25 15:11:18 2014 -0800
    26.3 @@ -143,6 +143,8 @@
    26.4    void set_is_tagged_free(bool v) { _is_tagged_free = v; }
    26.5  #endif
    26.6  
    26.7 +  bool contains(const void* ptr) { return bottom() <= ptr && ptr < _top; }
    26.8 +
    26.9    NOT_PRODUCT(void mangle();)
   26.10  
   26.11    void print_on(outputStream* st) const;
    27.1 --- a/src/share/vm/memory/metaspace.cpp	Wed Feb 19 20:12:43 2014 -0800
    27.2 +++ b/src/share/vm/memory/metaspace.cpp	Tue Feb 25 15:11:18 2014 -0800
    27.3 @@ -513,8 +513,6 @@
    27.4    // Unlink empty VirtualSpaceNodes and free it.
    27.5    void purge(ChunkManager* chunk_manager);
    27.6  
    27.7 -  bool contains(const void *ptr);
    27.8 -
    27.9    void print_on(outputStream* st) const;
   27.10  
   27.11    class VirtualSpaceListIterator : public StackObj {
   27.12 @@ -558,7 +556,7 @@
   27.13  
   27.14   private:
   27.15  
   27.16 -  // protects allocations and contains.
   27.17 +  // protects allocations
   27.18    Mutex* const _lock;
   27.19  
   27.20    // Type of metadata allocated.
   27.21 @@ -595,7 +593,11 @@
   27.22   private:
   27.23    // Accessors
   27.24    Metachunk* chunks_in_use(ChunkIndex index) const { return _chunks_in_use[index]; }
   27.25 -  void set_chunks_in_use(ChunkIndex index, Metachunk* v) { _chunks_in_use[index] = v; }
   27.26 +  void set_chunks_in_use(ChunkIndex index, Metachunk* v) {
   27.27 +    // ensure lock-free iteration sees fully initialized node
   27.28 +    OrderAccess::storestore();
   27.29 +    _chunks_in_use[index] = v;
   27.30 +  }
   27.31  
   27.32    BlockFreelist* block_freelists() const {
   27.33      return (BlockFreelist*) &_block_freelists;
   27.34 @@ -708,6 +710,8 @@
   27.35    void print_on(outputStream* st) const;
   27.36    void locked_print_chunks_in_use_on(outputStream* st) const;
   27.37  
   27.38 +  bool contains(const void *ptr);
   27.39 +
   27.40    void verify();
   27.41    void verify_chunk_size(Metachunk* chunk);
   27.42    NOT_PRODUCT(void mangle_freed_chunks();)
   27.43 @@ -1159,8 +1163,6 @@
   27.44    } else {
   27.45      assert(new_entry->reserved_words() == vs_word_size,
   27.46          "Reserved memory size differs from requested memory size");
   27.47 -    // ensure lock-free iteration sees fully initialized node
   27.48 -    OrderAccess::storestore();
   27.49      link_vs(new_entry);
   27.50      return true;
   27.51    }
   27.52 @@ -1287,19 +1289,6 @@
   27.53    }
   27.54  }
   27.55  
   27.56 -bool VirtualSpaceList::contains(const void *ptr) {
   27.57 -  VirtualSpaceNode* list = virtual_space_list();
   27.58 -  VirtualSpaceListIterator iter(list);
   27.59 -  while (iter.repeat()) {
   27.60 -    VirtualSpaceNode* node = iter.get_next();
   27.61 -    if (node->reserved()->contains(ptr)) {
   27.62 -      return true;
   27.63 -    }
   27.64 -  }
   27.65 -  return false;
   27.66 -}
   27.67 -
   27.68 -
   27.69  // MetaspaceGC methods
   27.70  
   27.71  // VM_CollectForMetadataAllocation is the vm operation used to GC.
   27.72 @@ -2392,6 +2381,21 @@
   27.73    return result;
   27.74  }
   27.75  
   27.76 +// This function looks at the chunks in the metaspace without locking.
   27.77 +// The chunks are added with store ordering and not deleted except for at
   27.78 +// unloading time.
   27.79 +bool SpaceManager::contains(const void *ptr) {
   27.80 +  for (ChunkIndex i = ZeroIndex; i < NumberOfInUseLists; i = next_chunk_index(i))
   27.81 +  {
   27.82 +    Metachunk* curr = chunks_in_use(i);
   27.83 +    while (curr != NULL) {
   27.84 +      if (curr->contains(ptr)) return true;
   27.85 +      curr = curr->next();
   27.86 +    }
   27.87 +  }
   27.88 +  return false;
   27.89 +}
   27.90 +
   27.91  void SpaceManager::verify() {
   27.92    // If there are blocks in the dictionary, then
   27.93    // verfication of chunks does not work since
   27.94 @@ -3463,17 +3467,12 @@
   27.95    }
   27.96  }
   27.97  
   27.98 -bool Metaspace::contains(const void * ptr) {
   27.99 -  if (MetaspaceShared::is_in_shared_space(ptr)) {
  27.100 -    return true;
  27.101 +bool Metaspace::contains(const void* ptr) {
  27.102 +  if (vsm()->contains(ptr)) return true;
  27.103 +  if (using_class_space()) {
  27.104 +    return class_vsm()->contains(ptr);
  27.105    }
  27.106 -  // This is checked while unlocked.  As long as the virtualspaces are added
  27.107 -  // at the end, the pointer will be in one of them.  The virtual spaces
  27.108 -  // aren't deleted presently.  When they are, some sort of locking might
  27.109 -  // be needed.  Note, locking this can cause inversion problems with the
  27.110 -  // caller in MetaspaceObj::is_metadata() function.
  27.111 -  return space_list()->contains(ptr) ||
  27.112 -         (using_class_space() && class_space_list()->contains(ptr));
  27.113 +  return false;
  27.114  }
  27.115  
  27.116  void Metaspace::verify() {
    28.1 --- a/src/share/vm/memory/metaspace.hpp	Wed Feb 19 20:12:43 2014 -0800
    28.2 +++ b/src/share/vm/memory/metaspace.hpp	Tue Feb 25 15:11:18 2014 -0800
    28.3 @@ -226,7 +226,7 @@
    28.4    MetaWord* expand_and_allocate(size_t size,
    28.5                                  MetadataType mdtype);
    28.6  
    28.7 -  static bool contains(const void *ptr);
    28.8 +  bool contains(const void* ptr);
    28.9    void dump(outputStream* const out) const;
   28.10  
   28.11    // Free empty virtualspaces
    29.1 --- a/src/share/vm/oops/arrayKlass.cpp	Wed Feb 19 20:12:43 2014 -0800
    29.2 +++ b/src/share/vm/oops/arrayKlass.cpp	Tue Feb 25 15:11:18 2014 -0800
    29.3 @@ -1,5 +1,5 @@
    29.4  /*
    29.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    29.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    29.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.8   *
    29.9   * This code is free software; you can redistribute it and/or modify it
   29.10 @@ -214,8 +214,8 @@
   29.11  
   29.12  // Verification
   29.13  
   29.14 -void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
   29.15 -  Klass::verify_on(st, check_dictionary);
   29.16 +void ArrayKlass::verify_on(outputStream* st) {
   29.17 +  Klass::verify_on(st);
   29.18  
   29.19    if (component_mirror() != NULL) {
   29.20      guarantee(component_mirror()->klass() != NULL, "should have a class");
    30.1 --- a/src/share/vm/oops/arrayKlass.hpp	Wed Feb 19 20:12:43 2014 -0800
    30.2 +++ b/src/share/vm/oops/arrayKlass.hpp	Tue Feb 25 15:11:18 2014 -0800
    30.3 @@ -1,5 +1,5 @@
    30.4  /*
    30.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    30.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    30.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    30.8   *
    30.9   * This code is free software; you can redistribute it and/or modify it
   30.10 @@ -146,7 +146,7 @@
   30.11    void oop_print_on(oop obj, outputStream* st);
   30.12  
   30.13    // Verification
   30.14 -  void verify_on(outputStream* st, bool check_dictionary);
   30.15 +  void verify_on(outputStream* st);
   30.16  
   30.17    void oop_verify_on(oop obj, outputStream* st);
   30.18  };
    31.1 --- a/src/share/vm/oops/constantPool.cpp	Wed Feb 19 20:12:43 2014 -0800
    31.2 +++ b/src/share/vm/oops/constantPool.cpp	Tue Feb 25 15:11:18 2014 -0800
    31.3 @@ -82,6 +82,9 @@
    31.4  void ConstantPool::deallocate_contents(ClassLoaderData* loader_data) {
    31.5    MetadataFactory::free_metadata(loader_data, cache());
    31.6    set_cache(NULL);
    31.7 +  MetadataFactory::free_array<u2>(loader_data, reference_map());
    31.8 +  set_reference_map(NULL);
    31.9 +
   31.10    MetadataFactory::free_array<jushort>(loader_data, operands());
   31.11    set_operands(NULL);
   31.12  
    32.1 --- a/src/share/vm/oops/instanceKlass.cpp	Wed Feb 19 20:12:43 2014 -0800
    32.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Tue Feb 25 15:11:18 2014 -0800
    32.3 @@ -1,5 +1,5 @@
    32.4  /*
    32.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    32.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    32.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    32.8   *
    32.9   * This code is free software; you can redistribute it and/or modify it
   32.10 @@ -3184,7 +3184,7 @@
   32.11    virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); }
   32.12  };
   32.13  
   32.14 -void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) {
   32.15 +void InstanceKlass::verify_on(outputStream* st) {
   32.16  #ifndef PRODUCT
   32.17    // Avoid redundant verifies, this really should be in product.
   32.18    if (_verify_count == Universe::verify_count()) return;
   32.19 @@ -3192,14 +3192,11 @@
   32.20  #endif
   32.21  
   32.22    // Verify Klass
   32.23 -  Klass::verify_on(st, check_dictionary);
   32.24 -
   32.25 -  // Verify that klass is present in SystemDictionary if not already
   32.26 -  // verifying the SystemDictionary.
   32.27 -  if (is_loaded() && !is_anonymous() && check_dictionary) {
   32.28 -    Symbol* h_name = name();
   32.29 -    SystemDictionary::verify_obj_klass_present(h_name, class_loader_data());
   32.30 -  }
   32.31 +  Klass::verify_on(st);
   32.32 +
   32.33 +  // Verify that klass is present in ClassLoaderData
   32.34 +  guarantee(class_loader_data()->contains_klass(this),
   32.35 +            "this class isn't found in class loader data");
   32.36  
   32.37    // Verify vtables
   32.38    if (is_linked()) {
    33.1 --- a/src/share/vm/oops/instanceKlass.hpp	Wed Feb 19 20:12:43 2014 -0800
    33.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Tue Feb 25 15:11:18 2014 -0800
    33.3 @@ -1086,7 +1086,7 @@
    33.4    const char* internal_name() const;
    33.5  
    33.6    // Verification
    33.7 -  void verify_on(outputStream* st, bool check_dictionary);
    33.8 +  void verify_on(outputStream* st);
    33.9  
   33.10    void oop_verify_on(oop obj, outputStream* st);
   33.11  };
    34.1 --- a/src/share/vm/oops/klass.cpp	Wed Feb 19 20:12:43 2014 -0800
    34.2 +++ b/src/share/vm/oops/klass.cpp	Tue Feb 25 15:11:18 2014 -0800
    34.3 @@ -1,5 +1,5 @@
    34.4  /*
    34.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    34.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    34.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    34.8   *
    34.9   * This code is free software; you can redistribute it and/or modify it
   34.10 @@ -376,8 +376,6 @@
   34.11  }
   34.12  
   34.13  bool Klass::is_loader_alive(BoolObjectClosure* is_alive) {
   34.14 -  assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace");
   34.15 -
   34.16  #ifdef ASSERT
   34.17    // The class is alive iff the class loader is alive.
   34.18    oop loader = class_loader();
   34.19 @@ -640,7 +638,7 @@
   34.20  
   34.21  // Verification
   34.22  
   34.23 -void Klass::verify_on(outputStream* st, bool check_dictionary) {
   34.24 +void Klass::verify_on(outputStream* st) {
   34.25  
   34.26    // This can be expensive, but it is worth checking that this klass is actually
   34.27    // in the CLD graph but not in production.
    35.1 --- a/src/share/vm/oops/klass.hpp	Wed Feb 19 20:12:43 2014 -0800
    35.2 +++ b/src/share/vm/oops/klass.hpp	Tue Feb 25 15:11:18 2014 -0800
    35.3 @@ -1,5 +1,5 @@
    35.4  /*
    35.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    35.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    35.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    35.8   *
    35.9   * This code is free software; you can redistribute it and/or modify it
   35.10 @@ -695,8 +695,8 @@
   35.11    virtual const char* internal_name() const = 0;
   35.12  
   35.13    // Verification
   35.14 -  virtual void verify_on(outputStream* st, bool check_dictionary);
   35.15 -  void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); }
   35.16 +  virtual void verify_on(outputStream* st);
   35.17 +  void verify() { verify_on(tty); }
   35.18  
   35.19  #ifndef PRODUCT
   35.20    bool verify_vtable_index(int index);
    36.1 --- a/src/share/vm/oops/objArrayKlass.cpp	Wed Feb 19 20:12:43 2014 -0800
    36.2 +++ b/src/share/vm/oops/objArrayKlass.cpp	Tue Feb 25 15:11:18 2014 -0800
    36.3 @@ -1,5 +1,5 @@
    36.4  /*
    36.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    36.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    36.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    36.8   *
    36.9   * This code is free software; you can redistribute it and/or modify it
   36.10 @@ -674,8 +674,8 @@
   36.11  
   36.12  // Verification
   36.13  
   36.14 -void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) {
   36.15 -  ArrayKlass::verify_on(st, check_dictionary);
   36.16 +void ObjArrayKlass::verify_on(outputStream* st) {
   36.17 +  ArrayKlass::verify_on(st);
   36.18    guarantee(element_klass()->is_klass(), "should be klass");
   36.19    guarantee(bottom_klass()->is_klass(), "should be klass");
   36.20    Klass* bk = bottom_klass();
    37.1 --- a/src/share/vm/oops/objArrayKlass.hpp	Wed Feb 19 20:12:43 2014 -0800
    37.2 +++ b/src/share/vm/oops/objArrayKlass.hpp	Tue Feb 25 15:11:18 2014 -0800
    37.3 @@ -1,5 +1,5 @@
    37.4  /*
    37.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    37.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    37.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    37.8   *
    37.9   * This code is free software; you can redistribute it and/or modify it
   37.10 @@ -151,7 +151,7 @@
   37.11    const char* internal_name() const;
   37.12  
   37.13    // Verification
   37.14 -  void verify_on(outputStream* st, bool check_dictionary);
   37.15 +  void verify_on(outputStream* st);
   37.16  
   37.17    void oop_verify_on(oop obj, outputStream* st);
   37.18  };
    38.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Wed Feb 19 20:12:43 2014 -0800
    38.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Tue Feb 25 15:11:18 2014 -0800
    38.3 @@ -63,34 +63,14 @@
    38.4    assert(_caller_jvms->same_calls_as(caller_jvms), "consistent JVMS");
    38.5    assert((caller_tree == NULL ? 0 : caller_tree->stack_depth() + 1) == stack_depth(), "correct (redundant) depth parameter");
    38.6    assert(caller_bci == this->caller_bci(), "correct (redundant) bci parameter");
    38.7 -  if (UseOldInlining) {
    38.8 -    // Update hierarchical counts, count_inline_bcs() and count_inlines()
    38.9 -    InlineTree *caller = (InlineTree *)caller_tree;
   38.10 -    for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) {
   38.11 -      caller->_count_inline_bcs += count_inline_bcs();
   38.12 -      NOT_PRODUCT(caller->_count_inlines++;)
   38.13 -    }
   38.14 +  // Update hierarchical counts, count_inline_bcs() and count_inlines()
   38.15 +  InlineTree *caller = (InlineTree *)caller_tree;
   38.16 +  for( ; caller != NULL; caller = ((InlineTree *)(caller->caller_tree())) ) {
   38.17 +    caller->_count_inline_bcs += count_inline_bcs();
   38.18 +    NOT_PRODUCT(caller->_count_inlines++;)
   38.19    }
   38.20  }
   38.21  
   38.22 -InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms,
   38.23 -                       float site_invoke_ratio, int max_inline_level) :
   38.24 -  C(c),
   38.25 -  _caller_jvms(caller_jvms),
   38.26 -  _caller_tree(NULL),
   38.27 -  _method(callee_method),
   38.28 -  _site_invoke_ratio(site_invoke_ratio),
   38.29 -  _max_inline_level(max_inline_level),
   38.30 -  _count_inline_bcs(method()->code_size()),
   38.31 -  _msg(NULL)
   38.32 -{
   38.33 -#ifndef PRODUCT
   38.34 -  _count_inlines = 0;
   38.35 -  _forced_inline = false;
   38.36 -#endif
   38.37 -  assert(!UseOldInlining, "do not use for old stuff");
   38.38 -}
   38.39 -
   38.40  /**
   38.41   *  Return true when EA is ON and a java constructor is called or
   38.42   *  a super constructor is called from an inlined java constructor.
   38.43 @@ -161,11 +141,6 @@
   38.44      return true;
   38.45    }
   38.46  
   38.47 -  if (!UseOldInlining) {
   38.48 -    set_msg("!UseOldInlining");
   38.49 -    return true;  // size and frequency are represented in a new way
   38.50 -  }
   38.51 -
   38.52    int default_max_inline_size = C->max_inline_size();
   38.53    int inline_small_code_size  = InlineSmallCode / 4;
   38.54    int max_inline_size         = default_max_inline_size;
   38.55 @@ -229,35 +204,6 @@
   38.56      fail_msg = "don't inline by annotation";
   38.57    }
   38.58  
   38.59 -  if (!UseOldInlining) {
   38.60 -    if (fail_msg != NULL) {
   38.61 -      *wci_result = *(WarmCallInfo::always_cold());
   38.62 -      set_msg(fail_msg);
   38.63 -      return true;
   38.64 -    }
   38.65 -
   38.66 -    if (callee_method->has_unloaded_classes_in_signature()) {
   38.67 -      wci_result->set_profit(wci_result->profit() * 0.1);
   38.68 -    }
   38.69 -
   38.70 -    // don't inline exception code unless the top method belongs to an
   38.71 -    // exception class
   38.72 -    if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
   38.73 -      ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
   38.74 -      if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
   38.75 -        wci_result->set_profit(wci_result->profit() * 0.1);
   38.76 -      }
   38.77 -    }
   38.78 -
   38.79 -    if (callee_method->has_compiled_code() &&
   38.80 -        callee_method->instructions_size() > InlineSmallCode) {
   38.81 -      wci_result->set_profit(wci_result->profit() * 0.1);
   38.82 -      // %%% adjust wci_result->size()?
   38.83 -    }
   38.84 -
   38.85 -    return false;
   38.86 -  }
   38.87 -
   38.88    // one more inlining restriction
   38.89    if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) {
   38.90      fail_msg = "unloaded signature classes";
   38.91 @@ -360,9 +306,7 @@
   38.92                                 int caller_bci, JVMState* jvms, ciCallProfile& profile,
   38.93                                 WarmCallInfo* wci_result, bool& should_delay) {
   38.94  
   38.95 -   // Old algorithm had funny accumulating BC-size counters
   38.96 -  if (UseOldInlining && ClipInlining
   38.97 -      && (int)count_inline_bcs() >= DesiredMethodLimit) {
   38.98 +  if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) {
   38.99      if (!callee_method->force_inline() || !IncrementalInline) {
  38.100        set_msg("size > DesiredMethodLimit");
  38.101        return false;
  38.102 @@ -465,8 +409,7 @@
  38.103  
  38.104    int size = callee_method->code_size_for_inlining();
  38.105  
  38.106 -  if (UseOldInlining && ClipInlining
  38.107 -      && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
  38.108 +  if (ClipInlining && (int)count_inline_bcs() + size >= DesiredMethodLimit) {
  38.109      if (!callee_method->force_inline() || !IncrementalInline) {
  38.110        set_msg("size > DesiredMethodLimit");
  38.111        return false;
  38.112 @@ -584,8 +527,7 @@
  38.113                                 jvms, profile, &wci, should_delay);
  38.114  
  38.115  #ifndef PRODUCT
  38.116 -  if (UseOldInlining && InlineWarmCalls
  38.117 -      && (PrintOpto || C->print_inlining())) {
  38.118 +  if (InlineWarmCalls && (PrintOpto || C->print_inlining())) {
  38.119      bool cold = wci.is_cold();
  38.120      bool hot  = !cold && wci.is_hot();
  38.121      bool old_cold = !success;
  38.122 @@ -599,13 +541,12 @@
  38.123      }
  38.124    }
  38.125  #endif
  38.126 -  if (UseOldInlining) {
  38.127 -    if (success) {
  38.128 -      wci = *(WarmCallInfo::always_hot());
  38.129 -    } else {
  38.130 -      wci = *(WarmCallInfo::always_cold());
  38.131 -    }
  38.132 +  if (success) {
  38.133 +    wci = *(WarmCallInfo::always_hot());
  38.134 +  } else {
  38.135 +    wci = *(WarmCallInfo::always_cold());
  38.136    }
  38.137 +
  38.138    if (!InlineWarmCalls) {
  38.139      if (!wci.is_cold() && !wci.is_hot()) {
  38.140        // Do not inline the warm calls.
  38.141 @@ -619,8 +560,7 @@
  38.142        set_msg("inline (hot)");
  38.143      }
  38.144      print_inlining(callee_method, caller_bci, true /* success */);
  38.145 -    if (UseOldInlining)
  38.146 -      build_inline_tree_for_callee(callee_method, jvms, caller_bci);
  38.147 +    build_inline_tree_for_callee(callee_method, jvms, caller_bci);
  38.148      if (InlineWarmCalls && !wci.is_hot())
  38.149        return new (C) WarmCallInfo(wci);  // copy to heap
  38.150      return WarmCallInfo::always_hot();
    39.1 --- a/src/share/vm/opto/c2_globals.hpp	Wed Feb 19 20:12:43 2014 -0800
    39.2 +++ b/src/share/vm/opto/c2_globals.hpp	Tue Feb 25 15:11:18 2014 -0800
    39.3 @@ -357,9 +357,6 @@
    39.4            "File to dump ideal graph to.  If set overrides the "             \
    39.5            "use of the network")                                             \
    39.6                                                                              \
    39.7 -  product(bool, UseOldInlining, true,                                       \
    39.8 -          "Enable the 1.3 inlining strategy")                               \
    39.9 -                                                                            \
   39.10    product(bool, UseBimorphicInlining, true,                                 \
   39.11            "Profiling based inlining for two receivers")                     \
   39.12                                                                              \
    40.1 --- a/src/share/vm/opto/callGenerator.cpp	Wed Feb 19 20:12:43 2014 -0800
    40.2 +++ b/src/share/vm/opto/callGenerator.cpp	Tue Feb 25 15:11:18 2014 -0800
    40.3 @@ -722,7 +722,7 @@
    40.4      Node* m = kit.map()->in(i);
    40.5      Node* n = slow_map->in(i);
    40.6      if (m != n) {
    40.7 -      const Type* t = gvn.type(m)->meet(gvn.type(n));
    40.8 +      const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
    40.9        Node* phi = PhiNode::make(region, m, t);
   40.10        phi->set_req(2, n);
   40.11        kit.map()->set_req(i, gvn.transform(phi));
   40.12 @@ -975,7 +975,7 @@
   40.13      Node* m = kit.map()->in(i);
   40.14      Node* n = slow_map->in(i);
   40.15      if (m != n) {
   40.16 -      const Type* t = gvn.type(m)->meet(gvn.type(n));
   40.17 +      const Type* t = gvn.type(m)->meet_speculative(gvn.type(n));
   40.18        Node* phi = PhiNode::make(region, m, t);
   40.19        phi->set_req(2, n);
   40.20        kit.map()->set_req(i, gvn.transform(phi));
    41.1 --- a/src/share/vm/opto/cfgnode.cpp	Wed Feb 19 20:12:43 2014 -0800
    41.2 +++ b/src/share/vm/opto/cfgnode.cpp	Tue Feb 25 15:11:18 2014 -0800
    41.3 @@ -951,7 +951,7 @@
    41.4          if (is_intf != ti_is_intf)
    41.5            { t = _type; break; }
    41.6        }
    41.7 -      t = t->meet(ti);
    41.8 +      t = t->meet_speculative(ti);
    41.9      }
   41.10    }
   41.11  
   41.12 @@ -968,11 +968,11 @@
   41.13    //
   41.14    // It is not possible to see Type::BOTTOM values as phi inputs,
   41.15    // because the ciTypeFlow pre-pass produces verifier-quality types.
   41.16 -  const Type* ft = t->filter(_type);  // Worst case type
   41.17 +  const Type* ft = t->filter_speculative(_type);  // Worst case type
   41.18  
   41.19  #ifdef ASSERT
   41.20    // The following logic has been moved into TypeOopPtr::filter.
   41.21 -  const Type* jt = t->join(_type);
   41.22 +  const Type* jt = t->join_speculative(_type);
   41.23    if( jt->empty() ) {           // Emptied out???
   41.24  
   41.25      // Check for evil case of 't' being a class and '_type' expecting an
   41.26 @@ -1757,7 +1757,7 @@
   41.27            break;
   41.28          }
   41.29          // Accumulate type for resulting Phi
   41.30 -        type = type->meet(in(i)->in(AddPNode::Base)->bottom_type());
   41.31 +        type = type->meet_speculative(in(i)->in(AddPNode::Base)->bottom_type());
   41.32        }
   41.33        Node* base = NULL;
   41.34        if (doit) {
    42.1 --- a/src/share/vm/opto/compile.cpp	Wed Feb 19 20:12:43 2014 -0800
    42.2 +++ b/src/share/vm/opto/compile.cpp	Tue Feb 25 15:11:18 2014 -0800
    42.3 @@ -705,10 +705,7 @@
    42.4  
    42.5    print_compile_messages();
    42.6  
    42.7 -  if (UseOldInlining || PrintCompilation NOT_PRODUCT( || PrintOpto) )
    42.8 -    _ilt = InlineTree::build_inline_tree_root();
    42.9 -  else
   42.10 -    _ilt = NULL;
   42.11 +  _ilt = InlineTree::build_inline_tree_root();
   42.12  
   42.13    // Even if NO memory addresses are used, MergeMem nodes must have at least 1 slice
   42.14    assert(num_alias_types() >= AliasIdxRaw, "");
   42.15 @@ -3948,16 +3945,18 @@
   42.16      // which may optimize it out.
   42.17      for (uint next = 0; next < worklist.size(); ++next) {
   42.18        Node *n  = worklist.at(next);
   42.19 -      if (n->is_Type() && n->as_Type()->type()->isa_oopptr() != NULL &&
   42.20 -          n->as_Type()->type()->is_oopptr()->speculative() != NULL) {
   42.21 +      if (n->is_Type()) {
   42.22          TypeNode* tn = n->as_Type();
   42.23 -        const TypeOopPtr* t = tn->type()->is_oopptr();
   42.24 -        bool in_hash = igvn.hash_delete(n);
   42.25 -        assert(in_hash, "node should be in igvn hash table");
   42.26 -        tn->set_type(t->remove_speculative());
   42.27 -        igvn.hash_insert(n);
   42.28 -        igvn._worklist.push(n); // give it a chance to go away
   42.29 -        modified++;
   42.30 +        const Type* t = tn->type();
   42.31 +        const Type* t_no_spec = t->remove_speculative();
   42.32 +        if (t_no_spec != t) {
   42.33 +          bool in_hash = igvn.hash_delete(n);
   42.34 +          assert(in_hash, "node should be in igvn hash table");
   42.35 +          tn->set_type(t_no_spec);
   42.36 +          igvn.hash_insert(n);
   42.37 +          igvn._worklist.push(n); // give it a chance to go away
   42.38 +          modified++;
   42.39 +        }
   42.40        }
   42.41        uint max = n->len();
   42.42        for( uint i = 0; i < max; ++i ) {
   42.43 @@ -3971,6 +3970,27 @@
   42.44      if (modified > 0) {
   42.45        igvn.optimize();
   42.46      }
   42.47 +#ifdef ASSERT
   42.48 +    // Verify that after the IGVN is over no speculative type has resurfaced
   42.49 +    worklist.clear();
   42.50 +    worklist.push(root());
   42.51 +    for (uint next = 0; next < worklist.size(); ++next) {
   42.52 +      Node *n  = worklist.at(next);
   42.53 +      const Type* t = igvn.type(n);
   42.54 +      assert(t == t->remove_speculative(), "no more speculative types");
   42.55 +      if (n->is_Type()) {
   42.56 +        t = n->as_Type()->type();
   42.57 +        assert(t == t->remove_speculative(), "no more speculative types");
   42.58 +      }
   42.59 +      uint max = n->len();
   42.60 +      for( uint i = 0; i < max; ++i ) {
   42.61 +        Node *m = n->in(i);
   42.62 +        if (not_a_node(m))  continue;
   42.63 +        worklist.push(m);
   42.64 +      }
   42.65 +    }
   42.66 +    igvn.check_no_speculative_types();
   42.67 +#endif
   42.68    }
   42.69  }
   42.70  
    43.1 --- a/src/share/vm/opto/connode.cpp	Wed Feb 19 20:12:43 2014 -0800
    43.2 +++ b/src/share/vm/opto/connode.cpp	Tue Feb 25 15:11:18 2014 -0800
    43.3 @@ -188,7 +188,7 @@
    43.4  const Type *CMoveNode::Value( PhaseTransform *phase ) const {
    43.5    if( phase->type(in(Condition)) == Type::TOP )
    43.6      return Type::TOP;
    43.7 -  return phase->type(in(IfFalse))->meet(phase->type(in(IfTrue)));
    43.8 +  return phase->type(in(IfFalse))->meet_speculative(phase->type(in(IfTrue)));
    43.9  }
   43.10  
   43.11  //------------------------------make-------------------------------------------
   43.12 @@ -392,14 +392,14 @@
   43.13  //=============================================================================
   43.14  // If input is already higher or equal to cast type, then this is an identity.
   43.15  Node *ConstraintCastNode::Identity( PhaseTransform *phase ) {
   43.16 -  return phase->type(in(1))->higher_equal(_type) ? in(1) : this;
   43.17 +  return phase->type(in(1))->higher_equal_speculative(_type) ? in(1) : this;
   43.18  }
   43.19  
   43.20  //------------------------------Value------------------------------------------
   43.21  // Take 'join' of input and cast-up type
   43.22  const Type *ConstraintCastNode::Value( PhaseTransform *phase ) const {
   43.23    if( in(0) && phase->type(in(0)) == Type::TOP ) return Type::TOP;
   43.24 -  const Type* ft = phase->type(in(1))->filter(_type);
   43.25 +const Type* ft = phase->type(in(1))->filter_speculative(_type);
   43.26  
   43.27  #ifdef ASSERT
   43.28    // Previous versions of this function had some special case logic,
   43.29 @@ -409,7 +409,7 @@
   43.30      {
   43.31        const Type* t1 = phase->type(in(1));
   43.32        if( t1 == Type::TOP )  assert(ft == Type::TOP, "special case #1");
   43.33 -      const Type* rt = t1->join(_type);
   43.34 +      const Type* rt = t1->join_speculative(_type);
   43.35        if (rt->empty())       assert(ft == Type::TOP, "special case #2");
   43.36        break;
   43.37      }
    44.1 --- a/src/share/vm/opto/connode.hpp	Wed Feb 19 20:12:43 2014 -0800
    44.2 +++ b/src/share/vm/opto/connode.hpp	Tue Feb 25 15:11:18 2014 -0800
    44.3 @@ -36,7 +36,7 @@
    44.4  // Simple constants
    44.5  class ConNode : public TypeNode {
    44.6  public:
    44.7 -  ConNode( const Type *t ) : TypeNode(t,1) {
    44.8 +  ConNode( const Type *t ) : TypeNode(t->remove_speculative(),1) {
    44.9      init_req(0, (Node*)Compile::current()->root());
   44.10      init_flags(Flag_is_Con);
   44.11    }
    45.1 --- a/src/share/vm/opto/doCall.cpp	Wed Feb 19 20:12:43 2014 -0800
    45.2 +++ b/src/share/vm/opto/doCall.cpp	Tue Feb 25 15:11:18 2014 -0800
    45.3 @@ -161,19 +161,8 @@
    45.4  
    45.5      // Try inlining a bytecoded method:
    45.6      if (!call_does_dispatch) {
    45.7 -      InlineTree* ilt;
    45.8 -      if (UseOldInlining) {
    45.9 -        ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
   45.10 -      } else {
   45.11 -        // Make a disembodied, stateless ILT.
   45.12 -        // TO DO:  When UseOldInlining is removed, copy the ILT code elsewhere.
   45.13 -        float site_invoke_ratio = prof_factor;
   45.14 -        // Note:  ilt is for the root of this parse, not the present call site.
   45.15 -        ilt = new InlineTree(this, jvms->method(), jvms->caller(), site_invoke_ratio, MaxInlineLevel);
   45.16 -      }
   45.17 +      InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
   45.18        WarmCallInfo scratch_ci;
   45.19 -      if (!UseOldInlining)
   45.20 -        scratch_ci.init(jvms, callee, profile, prof_factor);
   45.21        bool should_delay = false;
   45.22        WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
   45.23        assert(ci != &scratch_ci, "do not let this pointer escape");
    46.1 --- a/src/share/vm/opto/graphKit.cpp	Wed Feb 19 20:12:43 2014 -0800
    46.2 +++ b/src/share/vm/opto/graphKit.cpp	Tue Feb 25 15:11:18 2014 -0800
    46.3 @@ -420,7 +420,7 @@
    46.4        }
    46.5        const Type* srctype = _gvn.type(src);
    46.6        if (phi->type() != srctype) {
    46.7 -        const Type* dsttype = phi->type()->meet(srctype);
    46.8 +        const Type* dsttype = phi->type()->meet_speculative(srctype);
    46.9          if (phi->type() != dsttype) {
   46.10            phi->set_type(dsttype);
   46.11            _gvn.set_type(phi, dsttype);
   46.12 @@ -1224,7 +1224,7 @@
   46.13          // See if mixing in the NULL pointer changes type.
   46.14          // If so, then the NULL pointer was not allowed in the original
   46.15          // type.  In other words, "value" was not-null.
   46.16 -        if (t->meet(TypePtr::NULL_PTR) != t) {
   46.17 +        if (t->meet(TypePtr::NULL_PTR) != t->remove_speculative()) {
   46.18            // same as: if (!TypePtr::NULL_PTR->higher_equal(t)) ...
   46.19            explicit_null_checks_elided++;
   46.20            return value;           // Elided null check quickly!
   46.21 @@ -1357,7 +1357,7 @@
   46.22  // Cast obj to not-null on this path
   46.23  Node* GraphKit::cast_not_null(Node* obj, bool do_replace_in_map) {
   46.24    const Type *t = _gvn.type(obj);
   46.25 -  const Type *t_not_null = t->join(TypePtr::NOTNULL);
   46.26 +  const Type *t_not_null = t->join_speculative(TypePtr::NOTNULL);
   46.27    // Object is already not-null?
   46.28    if( t == t_not_null ) return obj;
   46.29  
   46.30 @@ -3014,7 +3014,7 @@
   46.31        if (failure_control != NULL) // failure is now impossible
   46.32          (*failure_control) = top();
   46.33        // adjust the type of the phi to the exact klass:
   46.34 -      phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR));
   46.35 +      phi->raise_bottom_type(_gvn.type(cast_obj)->meet_speculative(TypePtr::NULL_PTR));
   46.36      }
   46.37    }
   46.38  
    47.1 --- a/src/share/vm/opto/library_call.cpp	Wed Feb 19 20:12:43 2014 -0800
    47.2 +++ b/src/share/vm/opto/library_call.cpp	Tue Feb 25 15:11:18 2014 -0800
    47.3 @@ -304,6 +304,7 @@
    47.4    bool inline_cipherBlockChaining_AESCrypt(vmIntrinsics::ID id);
    47.5    Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting);
    47.6    Node* get_key_start_from_aescrypt_object(Node* aescrypt_object);
    47.7 +  Node* get_original_key_start_from_aescrypt_object(Node* aescrypt_object);
    47.8    bool inline_encodeISOArray();
    47.9    bool inline_updateCRC32();
   47.10    bool inline_updateBytesCRC32();
   47.11 @@ -5945,10 +5946,22 @@
   47.12    Node* k_start = get_key_start_from_aescrypt_object(aescrypt_object);
   47.13    if (k_start == NULL) return false;
   47.14  
   47.15 -  // Call the stub.
   47.16 -  make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
   47.17 -                    stubAddr, stubName, TypePtr::BOTTOM,
   47.18 -                    src_start, dest_start, k_start);
   47.19 +  if (Matcher::pass_original_key_for_aes()) {
   47.20 +    // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
   47.21 +    // compatibility issues between Java key expansion and SPARC crypto instructions
   47.22 +    Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
   47.23 +    if (original_k_start == NULL) return false;
   47.24 +
   47.25 +    // Call the stub.
   47.26 +    make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
   47.27 +                      stubAddr, stubName, TypePtr::BOTTOM,
   47.28 +                      src_start, dest_start, k_start, original_k_start);
   47.29 +  } else {
   47.30 +    // Call the stub.
   47.31 +    make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::aescrypt_block_Type(),
   47.32 +                      stubAddr, stubName, TypePtr::BOTTOM,
   47.33 +                      src_start, dest_start, k_start);
   47.34 +  }
   47.35  
   47.36    return true;
   47.37  }
   47.38 @@ -6026,14 +6039,29 @@
   47.39    if (objRvec == NULL) return false;
   47.40    Node* r_start = array_element_address(objRvec, intcon(0), T_BYTE);
   47.41  
   47.42 -  // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
   47.43 -  make_runtime_call(RC_LEAF|RC_NO_FP,
   47.44 -                    OptoRuntime::cipherBlockChaining_aescrypt_Type(),
   47.45 -                    stubAddr, stubName, TypePtr::BOTTOM,
   47.46 -                    src_start, dest_start, k_start, r_start, len);
   47.47 -
   47.48 -  // return is void so no result needs to be pushed
   47.49 -
   47.50 +  Node* cbcCrypt;
   47.51 +  if (Matcher::pass_original_key_for_aes()) {
   47.52 +    // on SPARC we need to pass the original key since key expansion needs to happen in intrinsics due to
   47.53 +    // compatibility issues between Java key expansion and SPARC crypto instructions
   47.54 +    Node* original_k_start = get_original_key_start_from_aescrypt_object(aescrypt_object);
   47.55 +    if (original_k_start == NULL) return false;
   47.56 +
   47.57 +    // Call the stub, passing src_start, dest_start, k_start, r_start, src_len and original_k_start
   47.58 +    cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
   47.59 +                                 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
   47.60 +                                 stubAddr, stubName, TypePtr::BOTTOM,
   47.61 +                                 src_start, dest_start, k_start, r_start, len, original_k_start);
   47.62 +  } else {
   47.63 +    // Call the stub, passing src_start, dest_start, k_start, r_start and src_len
   47.64 +    cbcCrypt = make_runtime_call(RC_LEAF|RC_NO_FP,
   47.65 +                                 OptoRuntime::cipherBlockChaining_aescrypt_Type(),
   47.66 +                                 stubAddr, stubName, TypePtr::BOTTOM,
   47.67 +                                 src_start, dest_start, k_start, r_start, len);
   47.68 +  }
   47.69 +
   47.70 +  // return cipher length (int)
   47.71 +  Node* retvalue = _gvn.transform(new (C) ProjNode(cbcCrypt, TypeFunc::Parms));
   47.72 +  set_result(retvalue);
   47.73    return true;
   47.74  }
   47.75  
   47.76 @@ -6048,6 +6076,17 @@
   47.77    return k_start;
   47.78  }
   47.79  
   47.80 +//------------------------------get_original_key_start_from_aescrypt_object-----------------------
   47.81 +Node * LibraryCallKit::get_original_key_start_from_aescrypt_object(Node *aescrypt_object) {
   47.82 +  Node* objAESCryptKey = load_field_from_object(aescrypt_object, "lastKey", "[B", /*is_exact*/ false);
   47.83 +  assert (objAESCryptKey != NULL, "wrong version of com.sun.crypto.provider.AESCrypt");
   47.84 +  if (objAESCryptKey == NULL) return (Node *) NULL;
   47.85 +
   47.86 +  // now have the array, need to get the start address of the lastKey array
   47.87 +  Node* original_k_start = array_element_address(objAESCryptKey, intcon(0), T_BYTE);
   47.88 +  return original_k_start;
   47.89 +}
   47.90 +
   47.91  //----------------------------inline_cipherBlockChaining_AESCrypt_predicate----------------------------
   47.92  // Return node representing slow path of predicate check.
   47.93  // the pseudo code we want to emulate with this predicate is:
    48.1 --- a/src/share/vm/opto/loopopts.cpp	Wed Feb 19 20:12:43 2014 -0800
    48.2 +++ b/src/share/vm/opto/loopopts.cpp	Tue Feb 25 15:11:18 2014 -0800
    48.3 @@ -1115,8 +1115,8 @@
    48.4      Node *n2 = phi->in(i)->in(1)->in(2);
    48.5      phi1->set_req( i, n1 );
    48.6      phi2->set_req( i, n2 );
    48.7 -    phi1->set_type( phi1->type()->meet(n1->bottom_type()) );
    48.8 -    phi2->set_type( phi2->type()->meet(n2->bottom_type()) );
    48.9 +    phi1->set_type( phi1->type()->meet_speculative(n1->bottom_type()));
   48.10 +    phi2->set_type( phi2->type()->meet_speculative(n2->bottom_type()));
   48.11    }
   48.12    // See if these Phis have been made before.
   48.13    // Register with optimizer
   48.14 @@ -1189,8 +1189,8 @@
   48.15      }
   48.16      phi1->set_req( j, n1 );
   48.17      phi2->set_req( j, n2 );
   48.18 -    phi1->set_type( phi1->type()->meet(n1->bottom_type()) );
   48.19 -    phi2->set_type( phi2->type()->meet(n2->bottom_type()) );
   48.20 +    phi1->set_type(phi1->type()->meet_speculative(n1->bottom_type()));
   48.21 +    phi2->set_type(phi2->type()->meet_speculative(n2->bottom_type()));
   48.22    }
   48.23  
   48.24    // See if these Phis have been made before.
    49.1 --- a/src/share/vm/opto/matcher.hpp	Wed Feb 19 20:12:43 2014 -0800
    49.2 +++ b/src/share/vm/opto/matcher.hpp	Tue Feb 25 15:11:18 2014 -0800
    49.3 @@ -286,6 +286,9 @@
    49.4    // CPU supports misaligned vectors store/load.
    49.5    static const bool misaligned_vectors_ok();
    49.6  
    49.7 +  // Should original key array reference be passed to AES stubs
    49.8 +  static const bool pass_original_key_for_aes();
    49.9 +
   49.10    // Used to determine a "low complexity" 64-bit constant.  (Zero is simple.)
   49.11    // The standard of comparison is one (StoreL ConL) vs. two (StoreI ConI).
   49.12    // Depends on the details of 64-bit constant generation on the CPU.
    50.1 --- a/src/share/vm/opto/memnode.cpp	Wed Feb 19 20:12:43 2014 -0800
    50.2 +++ b/src/share/vm/opto/memnode.cpp	Tue Feb 25 15:11:18 2014 -0800
    50.3 @@ -657,7 +657,7 @@
    50.4        // disregarding "null"-ness.
    50.5        // (We make an exception for TypeRawPtr::BOTTOM, which is a bit bucket.)
    50.6        const TypePtr* tp_notnull = tp->join(TypePtr::NOTNULL)->is_ptr();
    50.7 -      assert(cross_check->meet(tp_notnull) == cross_check,
    50.8 +      assert(cross_check->meet(tp_notnull) == cross_check->remove_speculative(),
    50.9               "real address must not escape from expected memory type");
   50.10      }
   50.11      #endif
   50.12 @@ -1685,7 +1685,7 @@
   50.13        // t might actually be lower than _type, if _type is a unique
   50.14        // concrete subclass of abstract class t.
   50.15        if (off_beyond_header) {  // is the offset beyond the header?
   50.16 -        const Type* jt = t->join(_type);
   50.17 +        const Type* jt = t->join_speculative(_type);
   50.18          // In any case, do not allow the join, per se, to empty out the type.
   50.19          if (jt->empty() && !t->empty()) {
   50.20            // This can happen if a interface-typed array narrows to a class type.
    51.1 --- a/src/share/vm/opto/multnode.cpp	Wed Feb 19 20:12:43 2014 -0800
    51.2 +++ b/src/share/vm/opto/multnode.cpp	Tue Feb 25 15:11:18 2014 -0800
    51.3 @@ -94,7 +94,7 @@
    51.4    if ((_con == TypeFunc::Parms) &&
    51.5        n->is_CallStaticJava() && n->as_CallStaticJava()->is_boxing_method()) {
    51.6      // The result of autoboxing is always non-null on normal path.
    51.7 -    t = t->join(TypePtr::NOTNULL);
    51.8 +    t = t->join_speculative(TypePtr::NOTNULL);
    51.9    }
   51.10    return t;
   51.11  }
    52.1 --- a/src/share/vm/opto/node.cpp	Wed Feb 19 20:12:43 2014 -0800
    52.2 +++ b/src/share/vm/opto/node.cpp	Tue Feb 25 15:11:18 2014 -0800
    52.3 @@ -995,13 +995,13 @@
    52.4    if (is_Type()) {
    52.5      TypeNode *n = this->as_Type();
    52.6      if (VerifyAliases) {
    52.7 -      assert(new_type->higher_equal(n->type()), "new type must refine old type");
    52.8 +      assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
    52.9      }
   52.10      n->set_type(new_type);
   52.11    } else if (is_Load()) {
   52.12      LoadNode *n = this->as_Load();
   52.13      if (VerifyAliases) {
   52.14 -      assert(new_type->higher_equal(n->type()), "new type must refine old type");
   52.15 +      assert(new_type->higher_equal_speculative(n->type()), "new type must refine old type");
   52.16      }
   52.17      n->set_type(new_type);
   52.18    }
    53.1 --- a/src/share/vm/opto/parse1.cpp	Wed Feb 19 20:12:43 2014 -0800
    53.2 +++ b/src/share/vm/opto/parse1.cpp	Tue Feb 25 15:11:18 2014 -0800
    53.3 @@ -1656,7 +1656,7 @@
    53.4            assert(bt1 != Type::BOTTOM, "should not be building conflict phis");
    53.5            map()->set_req(j, _gvn.transform_no_reclaim(phi));
    53.6            debug_only(const Type* bt2 = phi->bottom_type());
    53.7 -          assert(bt2->higher_equal(bt1), "must be consistent with type-flow");
    53.8 +          assert(bt2->higher_equal_speculative(bt1), "must be consistent with type-flow");
    53.9            record_for_igvn(phi);
   53.10          }
   53.11        }
   53.12 @@ -2029,7 +2029,7 @@
   53.13            !tp->klass()->is_interface()) {
   53.14          // sharpen the type eagerly; this eases certain assert checking
   53.15          if (tp->higher_equal(TypeInstPtr::NOTNULL))
   53.16 -          tr = tr->join(TypeInstPtr::NOTNULL)->is_instptr();
   53.17 +          tr = tr->join_speculative(TypeInstPtr::NOTNULL)->is_instptr();
   53.18          value = _gvn.transform(new (C) CheckCastPPNode(0,value,tr));
   53.19        }
   53.20      }
    54.1 --- a/src/share/vm/opto/parse2.cpp	Wed Feb 19 20:12:43 2014 -0800
    54.2 +++ b/src/share/vm/opto/parse2.cpp	Tue Feb 25 15:11:18 2014 -0800
    54.3 @@ -88,7 +88,7 @@
    54.4        if (toop->klass()->as_instance_klass()->unique_concrete_subklass()) {
    54.5          // If we load from "AbstractClass[]" we must see "ConcreteSubClass".
    54.6          const Type* subklass = Type::get_const_type(toop->klass());
    54.7 -        elemtype = subklass->join(el);
    54.8 +        elemtype = subklass->join_speculative(el);
    54.9        }
   54.10      }
   54.11    }
   54.12 @@ -1278,7 +1278,7 @@
   54.13         //   Bool(CmpP(LoadKlass(obj._klass), ConP(Foo.klass)), [eq])
   54.14         // or the narrowOop equivalent.
   54.15         const Type* obj_type = _gvn.type(obj);
   54.16 -       const TypeOopPtr* tboth = obj_type->join(con_type)->isa_oopptr();
   54.17 +       const TypeOopPtr* tboth = obj_type->join_speculative(con_type)->isa_oopptr();
   54.18         if (tboth != NULL && tboth->klass_is_exact() && tboth != obj_type &&
   54.19             tboth->higher_equal(obj_type)) {
   54.20            // obj has to be of the exact type Foo if the CmpP succeeds.
   54.21 @@ -1288,7 +1288,7 @@
   54.22                (jvms->is_loc(obj_in_map) || jvms->is_stk(obj_in_map))) {
   54.23              TypeNode* ccast = new (C) CheckCastPPNode(control(), obj, tboth);
   54.24              const Type* tcc = ccast->as_Type()->type();
   54.25 -            assert(tcc != obj_type && tcc->higher_equal(obj_type), "must improve");
   54.26 +            assert(tcc != obj_type && tcc->higher_equal_speculative(obj_type), "must improve");
   54.27              // Delay transform() call to allow recovery of pre-cast value
   54.28              // at the control merge.
   54.29              _gvn.set_type_bottom(ccast);
   54.30 @@ -1318,7 +1318,7 @@
   54.31    switch (btest) {
   54.32    case BoolTest::eq:                    // Constant test?
   54.33      {
   54.34 -      const Type* tboth = tcon->join(tval);
   54.35 +      const Type* tboth = tcon->join_speculative(tval);
   54.36        if (tboth == tval)  break;        // Nothing to gain.
   54.37        if (tcon->isa_int()) {
   54.38          ccast = new (C) CastIINode(val, tboth);
   54.39 @@ -1352,7 +1352,7 @@
   54.40  
   54.41    if (ccast != NULL) {
   54.42      const Type* tcc = ccast->as_Type()->type();
   54.43 -    assert(tcc != tval && tcc->higher_equal(tval), "must improve");
   54.44 +    assert(tcc != tval && tcc->higher_equal_speculative(tval), "must improve");
   54.45      // Delay transform() call to allow recovery of pre-cast value
   54.46      // at the control merge.
   54.47      ccast->set_req(0, control());
    55.1 --- a/src/share/vm/opto/parse3.cpp	Wed Feb 19 20:12:43 2014 -0800
    55.2 +++ b/src/share/vm/opto/parse3.cpp	Tue Feb 25 15:11:18 2014 -0800
    55.3 @@ -361,7 +361,7 @@
    55.4      //   should_be_constant = (oop not scavengable || ScavengeRootsInCode >= 2)
    55.5      // An oop is not scavengable if it is in the perm gen.
    55.6      if (stable_type != NULL && con_type != NULL && con_type->isa_oopptr())
    55.7 -      con_type = con_type->join(stable_type);
    55.8 +      con_type = con_type->join_speculative(stable_type);
    55.9      break;
   55.10  
   55.11    case T_ILLEGAL:
    56.1 --- a/src/share/vm/opto/phaseX.cpp	Wed Feb 19 20:12:43 2014 -0800
    56.2 +++ b/src/share/vm/opto/phaseX.cpp	Tue Feb 25 15:11:18 2014 -0800
    56.3 @@ -323,6 +323,23 @@
    56.4    }
    56.5  }
    56.6  
    56.7 +
    56.8 +void NodeHash::check_no_speculative_types() {
    56.9 +#ifdef ASSERT
   56.10 +  uint max = size();
   56.11 +  Node *sentinel_node = sentinel();
   56.12 +  for (uint i = 0; i < max; ++i) {
   56.13 +    Node *n = at(i);
   56.14 +    if(n != NULL && n != sentinel_node && n->is_Type()) {
   56.15 +      TypeNode* tn = n->as_Type();
   56.16 +      const Type* t = tn->type();
   56.17 +      const Type* t_no_spec = t->remove_speculative();
   56.18 +      assert(t == t_no_spec, "dead node in hash table or missed node during speculative cleanup");
   56.19 +    }
   56.20 +  }
   56.21 +#endif
   56.22 +}
   56.23 +
   56.24  #ifndef PRODUCT
   56.25  //------------------------------dump-------------------------------------------
   56.26  // Dump statistics for the hash table
   56.27 @@ -1392,11 +1409,11 @@
   56.28    assert(UseTypeSpeculation, "speculation is off");
   56.29    for (uint i = 0; i < _types.Size(); i++)  {
   56.30      const Type* t = _types.fast_lookup(i);
   56.31 -    if (t != NULL && t->isa_oopptr()) {
   56.32 -      const TypeOopPtr* to = t->is_oopptr();
   56.33 -      _types.map(i, to->remove_speculative());
   56.34 +    if (t != NULL) {
   56.35 +      _types.map(i, t->remove_speculative());
   56.36      }
   56.37    }
   56.38 +  _table.check_no_speculative_types();
   56.39  }
   56.40  
   56.41  //=============================================================================
    57.1 --- a/src/share/vm/opto/phaseX.hpp	Wed Feb 19 20:12:43 2014 -0800
    57.2 +++ b/src/share/vm/opto/phaseX.hpp	Tue Feb 25 15:11:18 2014 -0800
    57.3 @@ -92,7 +92,8 @@
    57.4    }
    57.5  
    57.6    void   remove_useless_nodes(VectorSet &useful); // replace with sentinel
    57.7 -  void replace_with(NodeHash* nh);
    57.8 +  void   replace_with(NodeHash* nh);
    57.9 +  void   check_no_speculative_types(); // Check no speculative part for type nodes in table
   57.10  
   57.11    Node  *sentinel() { return _sentinel; }
   57.12  
   57.13 @@ -501,6 +502,9 @@
   57.14                                          Deoptimization::DeoptReason reason);
   57.15  
   57.16    void remove_speculative_types();
   57.17 +  void check_no_speculative_types() {
   57.18 +    _table.check_no_speculative_types();
   57.19 +  }
   57.20  
   57.21  #ifndef PRODUCT
   57.22  protected:
    58.1 --- a/src/share/vm/opto/runtime.cpp	Wed Feb 19 20:12:43 2014 -0800
    58.2 +++ b/src/share/vm/opto/runtime.cpp	Tue Feb 25 15:11:18 2014 -0800
    58.3 @@ -826,12 +826,18 @@
    58.4  const TypeFunc* OptoRuntime::aescrypt_block_Type() {
    58.5    // create input type (domain)
    58.6    int num_args      = 3;
    58.7 +  if (Matcher::pass_original_key_for_aes()) {
    58.8 +    num_args = 4;
    58.9 +  }
   58.10    int argcnt = num_args;
   58.11    const Type** fields = TypeTuple::fields(argcnt);
   58.12    int argp = TypeFunc::Parms;
   58.13    fields[argp++] = TypePtr::NOTNULL;    // src
   58.14    fields[argp++] = TypePtr::NOTNULL;    // dest
   58.15    fields[argp++] = TypePtr::NOTNULL;    // k array
   58.16 +  if (Matcher::pass_original_key_for_aes()) {
   58.17 +    fields[argp++] = TypePtr::NOTNULL;    // original k array
   58.18 +  }
   58.19    assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
   58.20    const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
   58.21  
   58.22 @@ -868,6 +874,9 @@
   58.23  const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() {
   58.24    // create input type (domain)
   58.25    int num_args      = 5;
   58.26 +  if (Matcher::pass_original_key_for_aes()) {
   58.27 +    num_args = 6;
   58.28 +  }
   58.29    int argcnt = num_args;
   58.30    const Type** fields = TypeTuple::fields(argcnt);
   58.31    int argp = TypeFunc::Parms;
   58.32 @@ -876,13 +885,16 @@
   58.33    fields[argp++] = TypePtr::NOTNULL;    // k array
   58.34    fields[argp++] = TypePtr::NOTNULL;    // r array
   58.35    fields[argp++] = TypeInt::INT;        // src len
   58.36 +  if (Matcher::pass_original_key_for_aes()) {
   58.37 +    fields[argp++] = TypePtr::NOTNULL;    // original k array
   58.38 +  }
   58.39    assert(argp == TypeFunc::Parms+argcnt, "correct decoding");
   58.40    const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields);
   58.41  
   58.42 -  // no result type needed
   58.43 +  // returning cipher len (int)
   58.44    fields = TypeTuple::fields(1);
   58.45 -  fields[TypeFunc::Parms+0] = NULL; // void
   58.46 -  const TypeTuple* range = TypeTuple::make(TypeFunc::Parms, fields);
   58.47 +  fields[TypeFunc::Parms+0] = TypeInt::INT;
   58.48 +  const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields);
   58.49    return TypeFunc::make(domain, range);
   58.50  }
   58.51  
    59.1 --- a/src/share/vm/opto/type.cpp	Wed Feb 19 20:12:43 2014 -0800
    59.2 +++ b/src/share/vm/opto/type.cpp	Tue Feb 25 15:11:18 2014 -0800
    59.3 @@ -241,6 +241,13 @@
    59.4    return !t1->eq(t2);           // Return ZERO if equal
    59.5  }
    59.6  
    59.7 +const Type* Type::maybe_remove_speculative(bool include_speculative) const {
    59.8 +  if (!include_speculative) {
    59.9 +    return remove_speculative();
   59.10 +  }
   59.11 +  return this;
   59.12 +}
   59.13 +
   59.14  //------------------------------hash-------------------------------------------
   59.15  int Type::uhash( const Type *const t ) {
   59.16    return t->hash();
   59.17 @@ -633,41 +640,44 @@
   59.18  //------------------------------meet-------------------------------------------
   59.19  // Compute the MEET of two types.  NOT virtual.  It enforces that meet is
   59.20  // commutative and the lattice is symmetric.
   59.21 -const Type *Type::meet( const Type *t ) const {
   59.22 +const Type *Type::meet_helper(const Type *t, bool include_speculative) const {
   59.23    if (isa_narrowoop() && t->isa_narrowoop()) {
   59.24 -    const Type* result = make_ptr()->meet(t->make_ptr());
   59.25 +    const Type* result = make_ptr()->meet_helper(t->make_ptr(), include_speculative);
   59.26      return result->make_narrowoop();
   59.27    }
   59.28    if (isa_narrowklass() && t->isa_narrowklass()) {
   59.29 -    const Type* result = make_ptr()->meet(t->make_ptr());
   59.30 +    const Type* result = make_ptr()->meet_helper(t->make_ptr(), include_speculative);
   59.31      return result->make_narrowklass();
   59.32    }
   59.33  
   59.34 -  const Type *mt = xmeet(t);
   59.35 +  const Type *this_t = maybe_remove_speculative(include_speculative);
   59.36 +  t = t->maybe_remove_speculative(include_speculative);
   59.37 +
   59.38 +  const Type *mt = this_t->xmeet(t);
   59.39    if (isa_narrowoop() || t->isa_narrowoop()) return mt;
   59.40    if (isa_narrowklass() || t->isa_narrowklass()) return mt;
   59.41  #ifdef ASSERT
   59.42 -  assert( mt == t->xmeet(this), "meet not commutative" );
   59.43 +  assert(mt == t->xmeet(this_t), "meet not commutative");
   59.44    const Type* dual_join = mt->_dual;
   59.45    const Type *t2t    = dual_join->xmeet(t->_dual);
   59.46 -  const Type *t2this = dual_join->xmeet(   _dual);
   59.47 +  const Type *t2this = dual_join->xmeet(this_t->_dual);
   59.48  
   59.49    // Interface meet Oop is Not Symmetric:
   59.50    // Interface:AnyNull meet Oop:AnyNull == Interface:AnyNull
   59.51    // Interface:NotNull meet Oop:NotNull == java/lang/Object:NotNull
   59.52  
   59.53 -  if( !interface_vs_oop(t) && (t2t != t->_dual || t2this != _dual) ) {
   59.54 +  if( !interface_vs_oop(t) && (t2t != t->_dual || t2this != this_t->_dual) ) {
   59.55      tty->print_cr("=== Meet Not Symmetric ===");
   59.56 -    tty->print("t   =                   ");         t->dump(); tty->cr();
   59.57 -    tty->print("this=                   ");            dump(); tty->cr();
   59.58 -    tty->print("mt=(t meet this)=       ");        mt->dump(); tty->cr();
   59.59 -
   59.60 -    tty->print("t_dual=                 ");  t->_dual->dump(); tty->cr();
   59.61 -    tty->print("this_dual=              ");     _dual->dump(); tty->cr();
   59.62 -    tty->print("mt_dual=                "); mt->_dual->dump(); tty->cr();
   59.63 -
   59.64 -    tty->print("mt_dual meet t_dual=    "); t2t      ->dump(); tty->cr();
   59.65 -    tty->print("mt_dual meet this_dual= "); t2this   ->dump(); tty->cr();
   59.66 +    tty->print("t   =                   ");              t->dump(); tty->cr();
   59.67 +    tty->print("this=                   ");         this_t->dump(); tty->cr();
   59.68 +    tty->print("mt=(t meet this)=       ");             mt->dump(); tty->cr();
   59.69 +
   59.70 +    tty->print("t_dual=                 ");       t->_dual->dump(); tty->cr();
   59.71 +    tty->print("this_dual=              ");  this_t->_dual->dump(); tty->cr();
   59.72 +    tty->print("mt_dual=                ");      mt->_dual->dump(); tty->cr();
   59.73 +
   59.74 +    tty->print("mt_dual meet t_dual=    "); t2t           ->dump(); tty->cr();
   59.75 +    tty->print("mt_dual meet this_dual= "); t2this        ->dump(); tty->cr();
   59.76  
   59.77      fatal("meet not symmetric" );
   59.78    }
   59.79 @@ -759,8 +769,8 @@
   59.80  }
   59.81  
   59.82  //-----------------------------filter------------------------------------------
   59.83 -const Type *Type::filter( const Type *kills ) const {
   59.84 -  const Type* ft = join(kills);
   59.85 +const Type *Type::filter_helper(const Type *kills, bool include_speculative) const {
   59.86 +  const Type* ft = join_helper(kills, include_speculative);
   59.87    if (ft->empty())
   59.88      return Type::TOP;           // Canonical empty value
   59.89    return ft;
   59.90 @@ -1314,8 +1324,8 @@
   59.91  }
   59.92  
   59.93  //-----------------------------filter------------------------------------------
   59.94 -const Type *TypeInt::filter( const Type *kills ) const {
   59.95 -  const TypeInt* ft = join(kills)->isa_int();
   59.96 +const Type *TypeInt::filter_helper(const Type *kills, bool include_speculative) const {
   59.97 +  const TypeInt* ft = join_helper(kills, include_speculative)->isa_int();
   59.98    if (ft == NULL || ft->empty())
   59.99      return Type::TOP;           // Canonical empty value
  59.100    if (ft->_widen < this->_widen) {
  59.101 @@ -1575,8 +1585,8 @@
  59.102  }
  59.103  
  59.104  //-----------------------------filter------------------------------------------
  59.105 -const Type *TypeLong::filter( const Type *kills ) const {
  59.106 -  const TypeLong* ft = join(kills)->isa_long();
  59.107 +const Type *TypeLong::filter_helper(const Type *kills, bool include_speculative) const {
  59.108 +  const TypeLong* ft = join_helper(kills, include_speculative)->isa_long();
  59.109    if (ft == NULL || ft->empty())
  59.110      return Type::TOP;           // Canonical empty value
  59.111    if (ft->_widen < this->_widen) {
  59.112 @@ -1731,7 +1741,7 @@
  59.113      total_fields++;
  59.114      field_array = fields(total_fields);
  59.115      // Use get_const_type here because it respects UseUniqueSubclasses:
  59.116 -    field_array[pos++] = get_const_type(recv)->join(TypePtr::NOTNULL);
  59.117 +    field_array[pos++] = get_const_type(recv)->join_speculative(TypePtr::NOTNULL);
  59.118    } else {
  59.119      field_array = fields(total_fields);
  59.120    }
  59.121 @@ -1921,7 +1931,7 @@
  59.122  
  59.123    case Array: {                 // Meeting 2 arrays?
  59.124      const TypeAry *a = t->is_ary();
  59.125 -    return TypeAry::make(_elem->meet(a->_elem),
  59.126 +    return TypeAry::make(_elem->meet_speculative(a->_elem),
  59.127                           _size->xmeet(a->_size)->is_int(),
  59.128                           _stable & a->_stable);
  59.129    }
  59.130 @@ -1954,6 +1964,13 @@
  59.131    return (intptr_t)_elem + (intptr_t)_size + (_stable ? 43 : 0);
  59.132  }
  59.133  
  59.134 +/**
  59.135 + * Return same type without a speculative part in the element
  59.136 + */
  59.137 +const Type* TypeAry::remove_speculative() const {
  59.138 +  return make(_elem->remove_speculative(), _size, _stable);
  59.139 +}
  59.140 +
  59.141  //----------------------interface_vs_oop---------------------------------------
  59.142  #ifdef ASSERT
  59.143  bool TypeAry::interface_vs_oop(const Type *t) const {
  59.144 @@ -2566,14 +2583,14 @@
  59.145      return res;
  59.146    }
  59.147  
  59.148 -  if (res->isa_oopptr() != NULL) {
  59.149 +  const TypeOopPtr* res_oopptr = res->is_oopptr();
  59.150 +  if (res_oopptr->speculative() != NULL) {
  59.151      // type->speculative() == NULL means that speculation is no better
  59.152      // than type, i.e. type->speculative() == type. So there are 2
  59.153      // ways to represent the fact that we have no useful speculative
  59.154      // data and we should use a single one to be able to test for
  59.155      // equality between types. Check whether type->speculative() ==
  59.156      // type and set speculative to NULL if it is the case.
  59.157 -    const TypeOopPtr* res_oopptr = res->is_oopptr();
  59.158      if (res_oopptr->remove_speculative() == res_oopptr->speculative()) {
  59.159        return res_oopptr->remove_speculative();
  59.160      }
  59.161 @@ -2639,7 +2656,7 @@
  59.162    case OopPtr: {                 // Meeting to other OopPtrs
  59.163      const TypeOopPtr *tp = t->is_oopptr();
  59.164      int instance_id = meet_instance_id(tp->instance_id());
  59.165 -    const TypeOopPtr* speculative = meet_speculative(tp);
  59.166 +    const TypeOopPtr* speculative = xmeet_speculative(tp);
  59.167      return make(meet_ptr(tp->ptr()), meet_offset(tp->offset()), instance_id, speculative);
  59.168    }
  59.169  
  59.170 @@ -2793,9 +2810,9 @@
  59.171  
  59.172  //-----------------------------filter------------------------------------------
  59.173  // Do not allow interface-vs.-noninterface joins to collapse to top.
  59.174 -const Type *TypeOopPtr::filter(const Type *kills) const {
  59.175 -
  59.176 -  const Type* ft = join(kills);
  59.177 +const Type *TypeOopPtr::filter_helper(const Type *kills, bool include_speculative) const {
  59.178 +
  59.179 +  const Type* ft = join_helper(kills, include_speculative);
  59.180    const TypeInstPtr* ftip = ft->isa_instptr();
  59.181    const TypeInstPtr* ktip = kills->isa_instptr();
  59.182  
  59.183 @@ -2907,7 +2924,10 @@
  59.184  /**
  59.185   * Return same type without a speculative part
  59.186   */
  59.187 -const TypeOopPtr* TypeOopPtr::remove_speculative() const {
  59.188 +const Type* TypeOopPtr::remove_speculative() const {
  59.189 +  if (_speculative == NULL) {
  59.190 +    return this;
  59.191 +  }
  59.192    return make(_ptr, _offset, _instance_id, NULL);
  59.193  }
  59.194  
  59.195 @@ -2933,7 +2953,7 @@
  59.196   *
  59.197   * @param other  type to meet with
  59.198   */
  59.199 -const TypeOopPtr* TypeOopPtr::meet_speculative(const TypeOopPtr* other) const {
  59.200 +const TypeOopPtr* TypeOopPtr::xmeet_speculative(const TypeOopPtr* other) const {
  59.201    bool this_has_spec = (_speculative != NULL);
  59.202    bool other_has_spec = (other->speculative() != NULL);
  59.203  
  59.204 @@ -2958,7 +2978,7 @@
  59.205      other_spec = other;
  59.206    }
  59.207  
  59.208 -  return this_spec->meet(other_spec)->is_oopptr();
  59.209 +  return this_spec->meet_speculative(other_spec)->is_oopptr();
  59.210  }
  59.211  
  59.212  /**
  59.213 @@ -3117,7 +3137,7 @@
  59.214      int off = meet_offset(tinst->offset());
  59.215      PTR ptr = meet_ptr(tinst->ptr());
  59.216      int instance_id = meet_instance_id(tinst->instance_id());
  59.217 -    const TypeOopPtr* speculative = meet_speculative(tinst);
  59.218 +    const TypeOopPtr* speculative = xmeet_speculative(tinst);
  59.219  
  59.220      const TypeInstPtr *loaded    = is_loaded() ? this  : tinst;
  59.221      const TypeInstPtr *unloaded  = is_loaded() ? tinst : this;
  59.222 @@ -3194,7 +3214,7 @@
  59.223      int offset = meet_offset(tp->offset());
  59.224      PTR ptr = meet_ptr(tp->ptr());
  59.225      int instance_id = meet_instance_id(tp->instance_id());
  59.226 -    const TypeOopPtr* speculative = meet_speculative(tp);
  59.227 +    const TypeOopPtr* speculative = xmeet_speculative(tp);
  59.228      switch (ptr) {
  59.229      case TopPTR:
  59.230      case AnyNull:                // Fall 'down' to dual of object klass
  59.231 @@ -3244,14 +3264,14 @@
  59.232      case TopPTR:
  59.233      case AnyNull: {
  59.234        int instance_id = meet_instance_id(InstanceTop);
  59.235 -      const TypeOopPtr* speculative = meet_speculative(tp);
  59.236 +      const TypeOopPtr* speculative = xmeet_speculative(tp);
  59.237        return make(ptr, klass(), klass_is_exact(),
  59.238                    (ptr == Constant ? const_oop() : NULL), offset, instance_id, speculative);
  59.239      }
  59.240      case NotNull:
  59.241      case BotPTR: {
  59.242        int instance_id = meet_instance_id(tp->instance_id());
  59.243 -      const TypeOopPtr* speculative = meet_speculative(tp);
  59.244 +      const TypeOopPtr* speculative = xmeet_speculative(tp);
  59.245        return TypeOopPtr::make(ptr, offset, instance_id, speculative);
  59.246      }
  59.247      default: typerr(t);
  59.248 @@ -3303,7 +3323,7 @@
  59.249      int off = meet_offset( tinst->offset() );
  59.250      PTR ptr = meet_ptr( tinst->ptr() );
  59.251      int instance_id = meet_instance_id(tinst->instance_id());
  59.252 -    const TypeOopPtr* speculative = meet_speculative(tinst);
  59.253 +    const TypeOopPtr* speculative = xmeet_speculative(tinst);
  59.254  
  59.255      // Check for easy case; klasses are equal (and perhaps not loaded!)
  59.256      // If we have constants, then we created oops so classes are loaded
  59.257 @@ -3552,7 +3572,10 @@
  59.258    return make(_ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id, add_offset_speculative(offset));
  59.259  }
  59.260  
  59.261 -const TypeOopPtr *TypeInstPtr::remove_speculative() const {
  59.262 +const Type *TypeInstPtr::remove_speculative() const {
  59.263 +  if (_speculative == NULL) {
  59.264 +    return this;
  59.265 +  }
  59.266    return make(_ptr, klass(), klass_is_exact(), const_oop(), _offset, _instance_id, NULL);
  59.267  }
  59.268  
  59.269 @@ -3754,14 +3777,14 @@
  59.270      case TopPTR:
  59.271      case AnyNull: {
  59.272        int instance_id = meet_instance_id(InstanceTop);
  59.273 -      const TypeOopPtr* speculative = meet_speculative(tp);
  59.274 +      const TypeOopPtr* speculative = xmeet_speculative(tp);
  59.275        return make(ptr, (ptr == Constant ? const_oop() : NULL),
  59.276                    _ary, _klass, _klass_is_exact, offset, instance_id, speculative);
  59.277      }
  59.278      case BotPTR:
  59.279      case NotNull: {
  59.280        int instance_id = meet_instance_id(tp->instance_id());
  59.281 -      const TypeOopPtr* speculative = meet_speculative(tp);
  59.282 +      const TypeOopPtr* speculative = xmeet_speculative(tp);
  59.283        return TypeOopPtr::make(ptr, offset, instance_id, speculative);
  59.284      }
  59.285      default: ShouldNotReachHere();
  59.286 @@ -3799,10 +3822,10 @@
  59.287    case AryPtr: {                // Meeting 2 references?
  59.288      const TypeAryPtr *tap = t->is_aryptr();
  59.289      int off = meet_offset(tap->offset());
  59.290 -    const TypeAry *tary = _ary->meet(tap->_ary)->is_ary();
  59.291 +    const TypeAry *tary = _ary->meet_speculative(tap->_ary)->is_ary();
  59.292      PTR ptr = meet_ptr(tap->ptr());
  59.293      int instance_id = meet_instance_id(tap->instance_id());
  59.294 -    const TypeOopPtr* speculative = meet_speculative(tap);
  59.295 +    const TypeOopPtr* speculative = xmeet_speculative(tap);
  59.296      ciKlass* lazy_klass = NULL;
  59.297      if (tary->_elem->isa_int()) {
  59.298        // Integral array element types have irrelevant lattice relations.
  59.299 @@ -3882,7 +3905,7 @@
  59.300      int offset = meet_offset(tp->offset());
  59.301      PTR ptr = meet_ptr(tp->ptr());
  59.302      int instance_id = meet_instance_id(tp->instance_id());
  59.303 -    const TypeOopPtr* speculative = meet_speculative(tp);
  59.304 +    const TypeOopPtr* speculative = xmeet_speculative(tp);
  59.305      switch (ptr) {
  59.306      case TopPTR:
  59.307      case AnyNull:                // Fall 'down' to dual of object klass
  59.308 @@ -3996,8 +4019,8 @@
  59.309    return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id, add_offset_speculative(offset));
  59.310  }
  59.311  
  59.312 -const TypeOopPtr *TypeAryPtr::remove_speculative() const {
  59.313 -  return make(_ptr, _const_oop, _ary, _klass, _klass_is_exact, _offset, _instance_id, NULL);
  59.314 +const Type *TypeAryPtr::remove_speculative() const {
  59.315 +  return make(_ptr, _const_oop, _ary->remove_speculative()->is_ary(), _klass, _klass_is_exact, _offset, _instance_id, NULL);
  59.316  }
  59.317  
  59.318  //=============================================================================
  59.319 @@ -4037,9 +4060,9 @@
  59.320  }
  59.321  
  59.322  
  59.323 -const Type *TypeNarrowPtr::filter( const Type *kills ) const {
  59.324 +const Type *TypeNarrowPtr::filter_helper(const Type *kills, bool include_speculative) const {
  59.325    if (isa_same_narrowptr(kills)) {
  59.326 -    const Type* ft =_ptrtype->filter(is_same_narrowptr(kills)->_ptrtype);
  59.327 +    const Type* ft =_ptrtype->filter_helper(is_same_narrowptr(kills)->_ptrtype, include_speculative);
  59.328      if (ft->empty())
  59.329        return Type::TOP;           // Canonical empty value
  59.330      if (ft->isa_ptr()) {
  59.331 @@ -4047,7 +4070,7 @@
  59.332      }
  59.333      return ft;
  59.334    } else if (kills->isa_ptr()) {
  59.335 -    const Type* ft = _ptrtype->join(kills);
  59.336 +    const Type* ft = _ptrtype->join_helper(kills, include_speculative);
  59.337      if (ft->empty())
  59.338        return Type::TOP;           // Canonical empty value
  59.339      return ft;
  59.340 @@ -4177,8 +4200,8 @@
  59.341  
  59.342  //-----------------------------filter------------------------------------------
  59.343  // Do not allow interface-vs.-noninterface joins to collapse to top.
  59.344 -const Type *TypeMetadataPtr::filter( const Type *kills ) const {
  59.345 -  const TypeMetadataPtr* ft = join(kills)->isa_metadataptr();
  59.346 +const Type *TypeMetadataPtr::filter_helper(const Type *kills, bool include_speculative) const {
  59.347 +  const TypeMetadataPtr* ft = join_helper(kills, include_speculative)->isa_metadataptr();
  59.348    if (ft == NULL || ft->empty())
  59.349      return Type::TOP;           // Canonical empty value
  59.350    return ft;
  59.351 @@ -4380,10 +4403,10 @@
  59.352  }
  59.353  
  59.354  // Do not allow interface-vs.-noninterface joins to collapse to top.
  59.355 -const Type *TypeKlassPtr::filter(const Type *kills) const {
  59.356 +const Type *TypeKlassPtr::filter_helper(const Type *kills, bool include_speculative) const {
  59.357    // logic here mirrors the one from TypeOopPtr::filter. See comments
  59.358    // there.
  59.359 -  const Type* ft = join(kills);
  59.360 +  const Type* ft = join_helper(kills, include_speculative);
  59.361    const TypeKlassPtr* ftkp = ft->isa_klassptr();
  59.362    const TypeKlassPtr* ktkp = kills->isa_klassptr();
  59.363  
    60.1 --- a/src/share/vm/opto/type.hpp	Wed Feb 19 20:12:43 2014 -0800
    60.2 +++ b/src/share/vm/opto/type.hpp	Tue Feb 25 15:11:18 2014 -0800
    60.3 @@ -164,6 +164,8 @@
    60.4    virtual bool interface_vs_oop_helper(const Type *t) const;
    60.5  #endif
    60.6  
    60.7 +  const Type *meet_helper(const Type *t, bool include_speculative) const;
    60.8 +
    60.9  protected:
   60.10    // Each class of type is also identified by its base.
   60.11    const TYPES _base;            // Enum of Types type
   60.12 @@ -171,6 +173,10 @@
   60.13    Type( TYPES t ) : _dual(NULL),  _base(t) {} // Simple types
   60.14    // ~Type();                   // Use fast deallocation
   60.15    const Type *hashcons();       // Hash-cons the type
   60.16 +  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
   60.17 +  const Type *join_helper(const Type *t, bool include_speculative) const {
   60.18 +    return dual()->meet_helper(t->dual(), include_speculative)->dual();
   60.19 +  }
   60.20  
   60.21  public:
   60.22  
   60.23 @@ -202,10 +208,24 @@
   60.24    // Test for equivalence of types
   60.25    static int cmp( const Type *const t1, const Type *const t2 );
   60.26    // Test for higher or equal in lattice
   60.27 -  int higher_equal( const Type *t ) const { return !cmp(meet(t),t); }
   60.28 +  // Variant that drops the speculative part of the types
   60.29 +  int higher_equal(const Type *t) const {
   60.30 +    return !cmp(meet(t),t->remove_speculative());
   60.31 +  }
   60.32 +  // Variant that keeps the speculative part of the types
   60.33 +  int higher_equal_speculative(const Type *t) const {
   60.34 +    return !cmp(meet_speculative(t),t);
   60.35 +  }
   60.36  
   60.37    // MEET operation; lower in lattice.
   60.38 -  const Type *meet( const Type *t ) const;
   60.39 +  // Variant that drops the speculative part of the types
   60.40 +  const Type *meet(const Type *t) const {
   60.41 +    return meet_helper(t, false);
   60.42 +  }
   60.43 +  // Variant that keeps the speculative part of the types
   60.44 +  const Type *meet_speculative(const Type *t) const {
   60.45 +    return meet_helper(t, true);
   60.46 +  }
   60.47    // WIDEN: 'widens' for Ints and other range types
   60.48    virtual const Type *widen( const Type *old, const Type* limit ) const { return this; }
   60.49    // NARROW: complement for widen, used by pessimistic phases
   60.50 @@ -221,13 +241,26 @@
   60.51  
   60.52    // JOIN operation; higher in lattice.  Done by finding the dual of the
   60.53    // meet of the dual of the 2 inputs.
   60.54 -  const Type *join( const Type *t ) const {
   60.55 -    return dual()->meet(t->dual())->dual(); }
   60.56 +  // Variant that drops the speculative part of the types
   60.57 +  const Type *join(const Type *t) const {
   60.58 +    return join_helper(t, false);
   60.59 +  }
   60.60 +  // Variant that keeps the speculative part of the types
   60.61 +  const Type *join_speculative(const Type *t) const {
   60.62 +    return join_helper(t, true);
   60.63 +  }
   60.64  
   60.65    // Modified version of JOIN adapted to the needs Node::Value.
   60.66    // Normalizes all empty values to TOP.  Does not kill _widen bits.
   60.67    // Currently, it also works around limitations involving interface types.
   60.68 -  virtual const Type *filter( const Type *kills ) const;
   60.69 +  // Variant that drops the speculative part of the types
   60.70 +  const Type *filter(const Type *kills) const {
   60.71 +    return filter_helper(kills, false);
   60.72 +  }
   60.73 +  // Variant that keeps the speculative part of the types
   60.74 +  const Type *filter_speculative(const Type *kills) const {
   60.75 +    return filter_helper(kills, true);
   60.76 +  }
   60.77  
   60.78  #ifdef ASSERT
   60.79    // One type is interface, the other is oop
   60.80 @@ -383,6 +416,8 @@
   60.81  
   60.82    // Speculative type. See TypeInstPtr
   60.83    virtual ciKlass* speculative_type() const { return NULL; }
   60.84 +  const Type* maybe_remove_speculative(bool include_speculative) const;
   60.85 +  virtual const Type* remove_speculative() const { return this; }
   60.86  
   60.87  private:
   60.88    // support arrays
   60.89 @@ -450,12 +485,14 @@
   60.90  // upper bound, inclusive.
   60.91  class TypeInt : public Type {
   60.92    TypeInt( jint lo, jint hi, int w );
   60.93 +protected:
   60.94 +  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
   60.95 +
   60.96  public:
   60.97    virtual bool eq( const Type *t ) const;
   60.98    virtual int  hash() const;             // Type specific hashing
   60.99    virtual bool singleton(void) const;    // TRUE if type is a singleton
  60.100    virtual bool empty(void) const;        // TRUE if type is vacuous
  60.101 -public:
  60.102    const jint _lo, _hi;          // Lower bound, upper bound
  60.103    const short _widen;           // Limit on times we widen this sucker
  60.104  
  60.105 @@ -475,7 +512,6 @@
  60.106    virtual const Type *widen( const Type *t, const Type* limit_type ) const;
  60.107    virtual const Type *narrow( const Type *t ) const;
  60.108    // Do not kill _widen bits.
  60.109 -  virtual const Type *filter( const Type *kills ) const;
  60.110    // Convenience common pre-built types.
  60.111    static const TypeInt *MINUS_1;
  60.112    static const TypeInt *ZERO;
  60.113 @@ -506,6 +542,9 @@
  60.114  // an upper bound, inclusive.
  60.115  class TypeLong : public Type {
  60.116    TypeLong( jlong lo, jlong hi, int w );
  60.117 +protected:
  60.118 +  // Do not kill _widen bits.
  60.119 +  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
  60.120  public:
  60.121    virtual bool eq( const Type *t ) const;
  60.122    virtual int  hash() const;             // Type specific hashing
  60.123 @@ -533,8 +572,6 @@
  60.124    virtual const Type *xdual() const;    // Compute dual right now.
  60.125    virtual const Type *widen( const Type *t, const Type* limit_type ) const;
  60.126    virtual const Type *narrow( const Type *t ) const;
  60.127 -  // Do not kill _widen bits.
  60.128 -  virtual const Type *filter( const Type *kills ) const;
  60.129    // Convenience common pre-built types.
  60.130    static const TypeLong *MINUS_1;
  60.131    static const TypeLong *ZERO;
  60.132 @@ -625,6 +662,7 @@
  60.133    virtual const Type *xmeet( const Type *t ) const;
  60.134    virtual const Type *xdual() const;    // Compute dual right now.
  60.135    bool ary_must_be_exact() const;  // true if arrays of such are never generic
  60.136 +  virtual const Type* remove_speculative() const;
  60.137  #ifdef ASSERT
  60.138    // One type is interface, the other is oop
  60.139    virtual bool interface_vs_oop(const Type *t) const;
  60.140 @@ -835,7 +873,7 @@
  60.141  
  60.142    // utility methods to work on the speculative part of the type
  60.143    const TypeOopPtr* dual_speculative() const;
  60.144 -  const TypeOopPtr* meet_speculative(const TypeOopPtr* other) const;
  60.145 +  const TypeOopPtr* xmeet_speculative(const TypeOopPtr* other) const;
  60.146    bool eq_speculative(const TypeOopPtr* other) const;
  60.147    int hash_speculative() const;
  60.148    const TypeOopPtr* add_offset_speculative(intptr_t offset) const;
  60.149 @@ -843,6 +881,9 @@
  60.150    void dump_speculative(outputStream *st) const;
  60.151  #endif
  60.152  
  60.153 +  // Do not allow interface-vs.-noninterface joins to collapse to top.
  60.154 +  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
  60.155 +
  60.156  public:
  60.157    // Creates a type given a klass. Correctly handles multi-dimensional arrays
  60.158    // Respects UseUniqueSubclasses.
  60.159 @@ -898,16 +939,13 @@
  60.160  
  60.161    virtual const TypePtr *add_offset( intptr_t offset ) const;
  60.162    // Return same type without a speculative part
  60.163 -  virtual const TypeOopPtr* remove_speculative() const;
  60.164 +  virtual const Type* remove_speculative() const;
  60.165  
  60.166    virtual const Type *xmeet(const Type *t) const;
  60.167    virtual const Type *xdual() const;    // Compute dual right now.
  60.168    // the core of the computation of the meet for TypeOopPtr and for its subclasses
  60.169    virtual const Type *xmeet_helper(const Type *t) const;
  60.170  
  60.171 -  // Do not allow interface-vs.-noninterface joins to collapse to top.
  60.172 -  virtual const Type *filter( const Type *kills ) const;
  60.173 -
  60.174    // Convenience common pre-built type.
  60.175    static const TypeOopPtr *BOTTOM;
  60.176  #ifndef PRODUCT
  60.177 @@ -984,7 +1022,7 @@
  60.178  
  60.179    virtual const TypePtr *add_offset( intptr_t offset ) const;
  60.180    // Return same type without a speculative part
  60.181 -  virtual const TypeOopPtr* remove_speculative() const;
  60.182 +  virtual const Type* remove_speculative() const;
  60.183  
  60.184    // the core of the computation of the meet of 2 types
  60.185    virtual const Type *xmeet_helper(const Type *t) const;
  60.186 @@ -1062,7 +1100,7 @@
  60.187    virtual bool empty(void) const;        // TRUE if type is vacuous
  60.188    virtual const TypePtr *add_offset( intptr_t offset ) const;
  60.189    // Return same type without a speculative part
  60.190 -  virtual const TypeOopPtr* remove_speculative() const;
  60.191 +  virtual const Type* remove_speculative() const;
  60.192  
  60.193    // the core of the computation of the meet of 2 types
  60.194    virtual const Type *xmeet_helper(const Type *t) const;
  60.195 @@ -1103,6 +1141,8 @@
  60.196  class TypeMetadataPtr : public TypePtr {
  60.197  protected:
  60.198    TypeMetadataPtr(PTR ptr, ciMetadata* metadata, int offset);
  60.199 +  // Do not allow interface-vs.-noninterface joins to collapse to top.
  60.200 +  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
  60.201  public:
  60.202    virtual bool eq( const Type *t ) const;
  60.203    virtual int  hash() const;             // Type specific hashing
  60.204 @@ -1128,9 +1168,6 @@
  60.205  
  60.206    virtual intptr_t get_con() const;
  60.207  
  60.208 -  // Do not allow interface-vs.-noninterface joins to collapse to top.
  60.209 -  virtual const Type *filter( const Type *kills ) const;
  60.210 -
  60.211    // Convenience common pre-built types.
  60.212    static const TypeMetadataPtr *BOTTOM;
  60.213  
  60.214 @@ -1144,6 +1181,8 @@
  60.215  class TypeKlassPtr : public TypePtr {
  60.216    TypeKlassPtr( PTR ptr, ciKlass* klass, int offset );
  60.217  
  60.218 +protected:
  60.219 +  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
  60.220   public:
  60.221    virtual bool eq( const Type *t ) const;
  60.222    virtual int hash() const;             // Type specific hashing
  60.223 @@ -1205,9 +1244,6 @@
  60.224  
  60.225    virtual intptr_t get_con() const;
  60.226  
  60.227 -  // Do not allow interface-vs.-noninterface joins to collapse to top.
  60.228 -  virtual const Type *filter( const Type *kills ) const;
  60.229 -
  60.230    // Convenience common pre-built types.
  60.231    static const TypeKlassPtr* OBJECT; // Not-null object klass or below
  60.232    static const TypeKlassPtr* OBJECT_OR_NULL; // Maybe-null version of same
  60.233 @@ -1231,6 +1267,8 @@
  60.234    virtual const TypeNarrowPtr *is_same_narrowptr(const Type *t) const = 0;
  60.235    virtual const TypeNarrowPtr *make_same_narrowptr(const TypePtr *t) const = 0;
  60.236    virtual const TypeNarrowPtr *make_hash_same_narrowptr(const TypePtr *t) const = 0;
  60.237 +  // Do not allow interface-vs.-noninterface joins to collapse to top.
  60.238 +  virtual const Type *filter_helper(const Type *kills, bool include_speculative) const;
  60.239  public:
  60.240    virtual bool eq( const Type *t ) const;
  60.241    virtual int  hash() const;             // Type specific hashing
  60.242 @@ -1241,9 +1279,6 @@
  60.243  
  60.244    virtual intptr_t get_con() const;
  60.245  
  60.246 -  // Do not allow interface-vs.-noninterface joins to collapse to top.
  60.247 -  virtual const Type *filter( const Type *kills ) const;
  60.248 -
  60.249    virtual bool empty(void) const;        // TRUE if type is vacuous
  60.250  
  60.251    // returns the equivalent ptr type for this compressed pointer
  60.252 @@ -1294,6 +1329,10 @@
  60.253    static const TypeNarrowOop *BOTTOM;
  60.254    static const TypeNarrowOop *NULL_PTR;
  60.255  
  60.256 +  virtual const Type* remove_speculative() const {
  60.257 +    return make(_ptrtype->remove_speculative()->is_ptr());
  60.258 +  }
  60.259 +
  60.260  #ifndef PRODUCT
  60.261    virtual void dump2( Dict &d, uint depth, outputStream *st ) const;
  60.262  #endif
    61.1 --- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Wed Feb 19 20:12:43 2014 -0800
    61.2 +++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp	Tue Feb 25 15:11:18 2014 -0800
    61.3 @@ -26,6 +26,7 @@
    61.4  #include "code/codeBlob.hpp"
    61.5  #include "code/codeCache.hpp"
    61.6  #include "code/scopeDesc.hpp"
    61.7 +#include "code/vtableStubs.hpp"
    61.8  #include "memory/resourceArea.hpp"
    61.9  #include "oops/oop.inline.hpp"
   61.10  #include "prims/jvmtiCodeBlobEvents.hpp"
   61.11 @@ -63,6 +64,7 @@
   61.12    // used during a collection
   61.13    static GrowableArray<JvmtiCodeBlobDesc*>* _global_code_blobs;
   61.14    static void do_blob(CodeBlob* cb);
   61.15 +  static void do_vtable_stub(VtableStub* vs);
   61.16   public:
   61.17    CodeBlobCollector() {
   61.18      _code_blobs = NULL;
   61.19 @@ -119,6 +121,10 @@
   61.20    if (cb->is_nmethod()) {
   61.21      return;
   61.22    }
   61.23 +  // exclude VtableStubs, which are processed separately
   61.24 +  if (cb->is_buffer_blob() && strcmp(cb->name(), "vtable chunks") == 0) {
   61.25 +    return;
   61.26 +  }
   61.27  
   61.28    // check if this starting address has been seen already - the
   61.29    // assumption is that stubs are inserted into the list before the
   61.30 @@ -136,6 +142,13 @@
   61.31    _global_code_blobs->append(scb);
   61.32  }
   61.33  
   61.34 +// called for each VtableStub in VtableStubs
   61.35 +
   61.36 +void CodeBlobCollector::do_vtable_stub(VtableStub* vs) {
   61.37 +    JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(vs->is_vtable_stub() ? "vtable stub" : "itable stub",
   61.38 +                                                   vs->code_begin(), vs->code_end());
   61.39 +    _global_code_blobs->append(scb);
   61.40 +}
   61.41  
   61.42  // collects a list of CodeBlobs in the CodeCache.
   61.43  //
   61.44 @@ -166,6 +179,10 @@
   61.45      _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end()));
   61.46    }
   61.47  
   61.48 +  // Vtable stubs are not described with StubCodeDesc,
   61.49 +  // process them separately
   61.50 +  VtableStubs::vtable_stub_do(do_vtable_stub);
   61.51 +
   61.52    // next iterate over all the non-nmethod code blobs and add them to
   61.53    // the list - as noted above this will filter out duplicates and
   61.54    // enclosing blobs.
    62.1 --- a/src/share/vm/runtime/arguments.cpp	Wed Feb 19 20:12:43 2014 -0800
    62.2 +++ b/src/share/vm/runtime/arguments.cpp	Tue Feb 25 15:11:18 2014 -0800
    62.3 @@ -293,6 +293,7 @@
    62.4    { "UsePermISM",                    JDK_Version::jdk(8), JDK_Version::jdk(9) },
    62.5    { "UseMPSS",                       JDK_Version::jdk(8), JDK_Version::jdk(9) },
    62.6    { "UseStringCache",                JDK_Version::jdk(8), JDK_Version::jdk(9) },
    62.7 +  { "UseOldInlining",                JDK_Version::jdk(9), JDK_Version::jdk(10) },
    62.8  #ifdef PRODUCT
    62.9    { "DesiredMethodLimit",
   62.10                             JDK_Version::jdk_update(7, 2), JDK_Version::jdk(8) },
   62.11 @@ -881,7 +882,7 @@
   62.12      arg_len = equal_sign - argname;
   62.13    }
   62.14  
   62.15 -  Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true);
   62.16 +  Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true, true);
   62.17    if (found_flag != NULL) {
   62.18      char locked_message_buf[BUFLEN];
   62.19      found_flag->get_locked_message(locked_message_buf, BUFLEN);
    63.1 --- a/src/share/vm/runtime/globals.cpp	Wed Feb 19 20:12:43 2014 -0800
    63.2 +++ b/src/share/vm/runtime/globals.cpp	Tue Feb 25 15:11:18 2014 -0800
    63.3 @@ -62,6 +62,14 @@
    63.4  MATERIALIZE_FLAGS_EXT
    63.5  
    63.6  
    63.7 +static bool is_product_build() {
    63.8 +#ifdef PRODUCT
    63.9 +  return true;
   63.10 +#else
   63.11 +  return false;
   63.12 +#endif
   63.13 +}
   63.14 +
   63.15  void Flag::check_writable() {
   63.16    if (is_constant_in_binary()) {
   63.17      fatal(err_msg("flag is constant: %s", _name));
   63.18 @@ -235,6 +243,27 @@
   63.19  // Get custom message for this locked flag, or return NULL if
   63.20  // none is available.
   63.21  void Flag::get_locked_message(char* buf, int buflen) const {
   63.22 +  buf[0] = '\0';
   63.23 +  if (is_diagnostic() && !is_unlocked()) {
   63.24 +    jio_snprintf(buf, buflen, "Error: VM option '%s' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.\n",
   63.25 +                 _name);
   63.26 +    return;
   63.27 +  }
   63.28 +  if (is_experimental() && !is_unlocked()) {
   63.29 +    jio_snprintf(buf, buflen, "Error: VM option '%s' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.\n",
   63.30 +                 _name);
   63.31 +    return;
   63.32 +  }
   63.33 +  if (is_develop() && is_product_build()) {
   63.34 +    jio_snprintf(buf, buflen, "Error: VM option '%s' is develop and is available only in debug version of VM.\n",
   63.35 +                 _name);
   63.36 +    return;
   63.37 +  }
   63.38 +  if (is_notproduct() && is_product_build()) {
   63.39 +    jio_snprintf(buf, buflen, "Error: VM option '%s' is notproduct and is available only in debug version of VM.\n",
   63.40 +                 _name);
   63.41 +    return;
   63.42 +  }
   63.43    get_locked_message_ext(buf, buflen);
   63.44  }
   63.45  
   63.46 @@ -464,13 +493,13 @@
   63.47  }
   63.48  
   63.49  // Search the flag table for a named flag
   63.50 -Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) {
   63.51 +Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked, bool return_flag) {
   63.52    for (Flag* current = &flagTable[0]; current->_name != NULL; current++) {
   63.53      if (str_equal(current->_name, name, length)) {
   63.54        // Found a matching entry.
   63.55        // Don't report notproduct and develop flags in product builds.
   63.56        if (current->is_constant_in_binary()) {
   63.57 -        return NULL;
   63.58 +        return (return_flag == true ? current : NULL);
   63.59        }
   63.60        // Report locked flags only if allowed.
   63.61        if (!(current->is_unlocked() || current->is_unlocker())) {
    64.1 --- a/src/share/vm/runtime/globals.hpp	Wed Feb 19 20:12:43 2014 -0800
    64.2 +++ b/src/share/vm/runtime/globals.hpp	Tue Feb 25 15:11:18 2014 -0800
    64.3 @@ -1,5 +1,5 @@
    64.4  /*
    64.5 - * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    64.6 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
    64.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    64.8   *
    64.9   * This code is free software; you can redistribute it and/or modify it
   64.10 @@ -255,7 +255,7 @@
   64.11    // number of flags
   64.12    static size_t numFlags;
   64.13  
   64.14 -  static Flag* find_flag(const char* name, size_t length, bool allow_locked = false);
   64.15 +  static Flag* find_flag(const char* name, size_t length, bool allow_locked = false, bool return_flag = false);
   64.16    static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false);
   64.17  
   64.18    void check_writable();
   64.19 @@ -1272,6 +1272,9 @@
   64.20    develop(bool, TraceJNICalls, false,                                       \
   64.21            "Trace JNI calls")                                                \
   64.22                                                                              \
   64.23 +  develop(bool, StressRewriter, false,                                      \
   64.24 +          "Stress linktime bytecode rewriting")                             \
   64.25 +                                                                            \
   64.26    notproduct(bool, TraceJVMCalls, false,                                    \
   64.27            "Trace JVM calls")                                                \
   64.28                                                                              \
    65.1 --- a/src/share/vm/runtime/os.cpp	Wed Feb 19 20:12:43 2014 -0800
    65.2 +++ b/src/share/vm/runtime/os.cpp	Tue Feb 25 15:11:18 2014 -0800
    65.3 @@ -1081,7 +1081,6 @@
    65.4  
    65.5    }
    65.6  
    65.7 -#ifndef PRODUCT
    65.8    // Check if in metaspace.
    65.9    if (ClassLoaderDataGraph::contains((address)addr)) {
   65.10      // Use addr->print() from the debugger instead (not here)
   65.11 @@ -1089,7 +1088,6 @@
   65.12                   " is pointing into metadata", addr);
   65.13      return;
   65.14    }
   65.15 -#endif
   65.16  
   65.17    // Try an OS specific find
   65.18    if (os::find(addr, st)) {
    66.1 --- a/src/share/vm/utilities/array.hpp	Wed Feb 19 20:12:43 2014 -0800
    66.2 +++ b/src/share/vm/utilities/array.hpp	Tue Feb 25 15:11:18 2014 -0800
    66.3 @@ -1,5 +1,5 @@
    66.4  /*
    66.5 - * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    66.6 + * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
    66.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    66.8   *
    66.9   * This code is free software; you can redistribute it and/or modify it
   66.10 @@ -58,7 +58,7 @@
   66.11  
   66.12    void initialize(size_t esize, int length) {
   66.13      assert(length >= 0, "illegal length");
   66.14 -    assert(_data == NULL, "must be new object");
   66.15 +    assert(StressRewriter || _data == NULL, "must be new object");
   66.16      _length  = length;
   66.17      _data    = resource_allocate_bytes(esize * length);
   66.18      DEBUG_ONLY(init_nesting();)
    67.1 --- a/src/share/vm/utilities/bitMap.cpp	Wed Feb 19 20:12:43 2014 -0800
    67.2 +++ b/src/share/vm/utilities/bitMap.cpp	Tue Feb 25 15:11:18 2014 -0800
    67.3 @@ -110,7 +110,7 @@
    67.4      while (true) {
    67.5        intptr_t res = Atomic::cmpxchg_ptr(nw, pw, w);
    67.6        if (res == w) break;
    67.7 -      w  = *pw;
    67.8 +      w  = res;
    67.9        nw = value ? (w | ~mr) : (w & mr);
   67.10      }
   67.11    }
    68.1 --- a/test/compiler/6826736/Test.java	Wed Feb 19 20:12:43 2014 -0800
    68.2 +++ b/test/compiler/6826736/Test.java	Tue Feb 25 15:11:18 2014 -0800
    68.3 @@ -27,7 +27,7 @@
    68.4   * @bug 6826736
    68.5   * @summary CMS: core dump with -XX:+UseCompressedOops
    68.6   *
    68.7 - * @run main/othervm/timeout=600 -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 Test
    68.8 + * @run main/othervm/timeout=600 -XX:+IgnoreUnrecognizedVMOptions -Xbatch -XX:+ScavengeALot -XX:+UseCompressedOops -XX:HeapBaseMinAddress=32g -XX:CompileThreshold=100 -XX:CompileOnly=Test.test -XX:-BlockLayoutRotateLoops -XX:LoopUnrollLimit=0 -Xmx256m -XX:ParallelGCThreads=4 Test
    68.9   */
   68.10  
   68.11  public class Test {
    69.1 --- a/test/compiler/7184394/TestAESMain.java	Wed Feb 19 20:12:43 2014 -0800
    69.2 +++ b/test/compiler/7184394/TestAESMain.java	Tue Feb 25 15:11:18 2014 -0800
    69.3 @@ -1,5 +1,5 @@
    69.4  /*
    69.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    69.6 + * Copyright (c) 2012, 2014 Oracle and/or its affiliates. All rights reserved.
    69.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    69.8   *
    69.9   * This code is free software; you can redistribute it and/or modify it
   69.10 @@ -39,20 +39,32 @@
   69.11      System.out.println(iters + " iterations");
   69.12      TestAESEncode etest = new TestAESEncode();
   69.13      etest.prepare();
   69.14 +    // warm-up for 20K iterations
   69.15 +    System.out.println("Starting encryption warm-up");
   69.16 +    for (int i=0; i<20000; i++) {
   69.17 +      etest.run();
   69.18 +    }
   69.19 +    System.out.println("Finished encryption warm-up");
   69.20      long start = System.nanoTime();
   69.21      for (int i=0; i<iters; i++) {
   69.22        etest.run();
   69.23      }
   69.24      long end = System.nanoTime();
   69.25 -    System.out.println("TestAESEncode runtime was " + (double)((end - start)/1000000000.0) + " ms");
   69.26 +    System.out.println("TestAESEncode runtime was " + (double)((end - start)/1000000.0) + " ms");
   69.27  
   69.28      TestAESDecode dtest = new TestAESDecode();
   69.29      dtest.prepare();
   69.30 +    // warm-up for 20K iterations
   69.31 +    System.out.println("Starting decryption warm-up");
   69.32 +    for (int i=0; i<20000; i++) {
   69.33 +      dtest.run();
   69.34 +    }
   69.35 +    System.out.println("Finished decryption warm-up");
   69.36      start = System.nanoTime();
   69.37      for (int i=0; i<iters; i++) {
   69.38        dtest.run();
   69.39      }
   69.40      end = System.nanoTime();
   69.41 -    System.out.println("TestAESDecode runtime was " + (double)((end - start)/1000000000.0) + " ms");
   69.42 +    System.out.println("TestAESDecode runtime was " + (double)((end - start)/1000000.0) + " ms");
   69.43    }
   69.44  }
    70.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    70.2 +++ b/test/compiler/types/TestSpeculationFailedHigherEqual.java	Tue Feb 25 15:11:18 2014 -0800
    70.3 @@ -0,0 +1,63 @@
    70.4 +/*
    70.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    70.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    70.7 + *
    70.8 + * This code is free software; you can redistribute it and/or modify it
    70.9 + * under the terms of the GNU General Public License version 2 only, as
   70.10 + * published by the Free Software Foundation.
   70.11 + *
   70.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   70.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   70.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   70.15 + * version 2 for more details (a copy is included in the LICENSE file that
   70.16 + * accompanied this code).
   70.17 + *
   70.18 + * You should have received a copy of the GNU General Public License version
   70.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   70.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   70.21 + *
   70.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   70.23 + * or visit www.oracle.com if you need additional information or have any
   70.24 + * questions.
   70.25 + */
   70.26 +
   70.27 +/*
   70.28 + * @test
   70.29 + * @bug 8027422
   70.30 + * @summary type methods shouldn't always operate on speculative part
   70.31 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:TypeProfileLevel=222 -XX:+UnlockExperimentalVMOptions -XX:+UseTypeSpeculation -XX:-BackgroundCompilation TestSpeculationFailedHigherEqual
   70.32 + *
   70.33 + */
   70.34 +
   70.35 +public class TestSpeculationFailedHigherEqual {
   70.36 +
   70.37 +    static class A {
   70.38 +        void m() {}
   70.39 +        int i;
   70.40 +    }
   70.41 +
   70.42 +    static class C extends A {
   70.43 +    }
   70.44 +
   70.45 +    static C c;
   70.46 +
   70.47 +    static A m1(A a, boolean cond) {
   70.48 +        // speculative type for a is C not null
   70.49 +        if (cond ) {
   70.50 +            a = c;
   70.51 +        }
   70.52 +        // speculative type for a is C (may be null)
   70.53 +        int i = a.i;
   70.54 +        return a;
   70.55 +    }
   70.56 +
   70.57 +    static public void main(String[] args) {
   70.58 +        C c = new C();
   70.59 +        TestSpeculationFailedHigherEqual.c = c;
   70.60 +        for (int i = 0; i < 20000; i++) {
   70.61 +            m1(c, i%2 == 0);
   70.62 +        }
   70.63 +
   70.64 +        System.out.println("TEST PASSED");
   70.65 +    }
   70.66 +}
    71.1 --- a/test/runtime/7158988/FieldMonitor.java	Wed Feb 19 20:12:43 2014 -0800
    71.2 +++ b/test/runtime/7158988/FieldMonitor.java	Tue Feb 25 15:11:18 2014 -0800
    71.3 @@ -34,10 +34,6 @@
    71.4  import java.io.IOException;
    71.5  import java.io.InputStream;
    71.6  import java.io.InputStreamReader;
    71.7 -import java.io.OutputStream;
    71.8 -import java.io.OutputStreamWriter;
    71.9 -import java.io.Reader;
   71.10 -import java.io.Writer;
   71.11  import java.util.Iterator;
   71.12  import java.util.List;
   71.13  import java.util.Map;
   71.14 @@ -56,6 +52,7 @@
   71.15  import com.sun.jdi.event.EventSet;
   71.16  import com.sun.jdi.event.ModificationWatchpointEvent;
   71.17  import com.sun.jdi.event.VMDeathEvent;
   71.18 +import com.sun.jdi.event.VMStartEvent;
   71.19  import com.sun.jdi.event.VMDisconnectEvent;
   71.20  import com.sun.jdi.request.ClassPrepareRequest;
   71.21  import com.sun.jdi.request.EventRequest;
   71.22 @@ -71,24 +68,10 @@
   71.23    public static void main(String[] args)
   71.24        throws IOException, InterruptedException {
   71.25  
   71.26 -    StringBuffer sb = new StringBuffer();
   71.27 -
   71.28 -    for (int i=0; i < args.length; i++) {
   71.29 -        sb.append(' ');
   71.30 -        sb.append(args[i]);
   71.31 -    }
   71.32      //VirtualMachine vm = launchTarget(sb.toString());
   71.33      VirtualMachine vm = launchTarget(CLASS_NAME);
   71.34  
   71.35      System.out.println("Vm launched");
   71.36 -    // set watch field on already loaded classes
   71.37 -    List<ReferenceType> referenceTypes = vm
   71.38 -        .classesByName(CLASS_NAME);
   71.39 -    for (ReferenceType refType : referenceTypes) {
   71.40 -      addFieldWatch(vm, refType);
   71.41 -    }
   71.42 -    // watch for loaded classes
   71.43 -    addClassWatch(vm);
   71.44  
   71.45      // process events
   71.46      EventQueue eventQueue = vm.eventQueue();
   71.47 @@ -104,13 +87,15 @@
   71.48      errThread.start();
   71.49      outThread.start();
   71.50  
   71.51 -
   71.52 -    vm.resume();
   71.53      boolean connected = true;
   71.54 +    int watched = 0;
   71.55      while (connected) {
   71.56        EventSet eventSet = eventQueue.remove();
   71.57        for (Event event : eventSet) {
   71.58 -        if (event instanceof VMDeathEvent
   71.59 +        System.out.println("FieldMonitor-main receives: "+event);
   71.60 +        if (event instanceof VMStartEvent) {
   71.61 +          addClassWatch(vm);
   71.62 +        } else if (event instanceof VMDeathEvent
   71.63              || event instanceof VMDisconnectEvent) {
   71.64            // exit
   71.65            connected = false;
   71.66 @@ -122,17 +107,17 @@
   71.67                .referenceType();
   71.68            addFieldWatch(vm, refType);
   71.69          } else if (event instanceof ModificationWatchpointEvent) {
   71.70 +          watched++;
   71.71            System.out.println("sleep for 500 ms");
   71.72            Thread.sleep(500);
   71.73 -          System.out.println("resume...");
   71.74  
   71.75            ModificationWatchpointEvent modEvent = (ModificationWatchpointEvent) event;
   71.76            System.out.println("old="
   71.77                + modEvent.valueCurrent());
   71.78            System.out.println("new=" + modEvent.valueToBe());
   71.79 -          System.out.println();
   71.80          }
   71.81        }
   71.82 +      System.out.println("resume...");
   71.83        eventSet.resume();
   71.84      }
   71.85      // Shutdown begins when event thread terminates
   71.86 @@ -142,6 +127,10 @@
   71.87      } catch (InterruptedException exc) {
   71.88          // we don't interrupt
   71.89      }
   71.90 +
   71.91 +    if (watched != 11) { // init + 10 modifications in TestPostFieldModification class
   71.92 +        throw new Error("Expected to receive 11 times ModificationWatchpointEvent, but got "+watched);
   71.93 +    }
   71.94    }
   71.95  
   71.96    /**
    72.1 --- a/test/runtime/CommandLine/CompilerConfigFileWarning.java	Wed Feb 19 20:12:43 2014 -0800
    72.2 +++ b/test/runtime/CommandLine/CompilerConfigFileWarning.java	Tue Feb 25 15:11:18 2014 -0800
    72.3 @@ -1,5 +1,5 @@
    72.4  /*
    72.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    72.6 + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
    72.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    72.8   *
    72.9   * This code is free software; you can redistribute it and/or modify it
   72.10 @@ -33,8 +33,7 @@
   72.11  
   72.12  public class CompilerConfigFileWarning {
   72.13      public static void main(String[] args) throws Exception {
   72.14 -        String vmVersion = System.getProperty("java.vm.version");
   72.15 -        if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
   72.16 +        if (Platform.isDebugBuild()) {
   72.17              System.out.println("Skip on debug builds since we'll always read the file there");
   72.18              return;
   72.19          }
    73.1 --- a/test/runtime/CommandLine/ConfigFileWarning.java	Wed Feb 19 20:12:43 2014 -0800
    73.2 +++ b/test/runtime/CommandLine/ConfigFileWarning.java	Tue Feb 25 15:11:18 2014 -0800
    73.3 @@ -1,5 +1,5 @@
    73.4  /*
    73.5 - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    73.6 + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
    73.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.8   *
    73.9   * This code is free software; you can redistribute it and/or modify it
   73.10 @@ -33,8 +33,7 @@
   73.11  
   73.12  public class ConfigFileWarning {
   73.13      public static void main(String[] args) throws Exception {
   73.14 -        String vmVersion = System.getProperty("java.vm.version");
   73.15 -        if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
   73.16 +        if (Platform.isDebugBuild()) {
   73.17              System.out.println("Skip on debug builds since we'll always read the file there");
   73.18              return;
   73.19          }
    74.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    74.2 +++ b/test/runtime/CommandLine/VMOptionWarning.java	Tue Feb 25 15:11:18 2014 -0800
    74.3 @@ -0,0 +1,56 @@
    74.4 +/*
    74.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    74.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    74.7 + *
    74.8 + * This code is free software; you can redistribute it and/or modify it
    74.9 + * under the terms of the GNU General Public License version 2 only, as
   74.10 + * published by the Free Software Foundation.
   74.11 + *
   74.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   74.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   74.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   74.15 + * version 2 for more details (a copy is included in the LICENSE file that
   74.16 + * accompanied this code).
   74.17 + *
   74.18 + * You should have received a copy of the GNU General Public License version
   74.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   74.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   74.21 + *
   74.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   74.23 + * or visit www.oracle.com if you need additional information or have any
   74.24 + * questions.
   74.25 + */
   74.26 +
   74.27 +/*
   74.28 + * @test
   74.29 + * @bug 8027314
   74.30 + * @summary Warn if diagnostic or experimental vm option is used and -XX:+UnlockDiagnosticVMOptions or -XX:+UnlockExperimentalVMOptions, respectively, isn't specified. Warn if develop or notproduct vm option is used with product version of VM.
   74.31 + * @library /testlibrary
   74.32 + */
   74.33 +
   74.34 +import com.oracle.java.testlibrary.*;
   74.35 +
   74.36 +public class VMOptionWarning {
   74.37 +    public static void main(String[] args) throws Exception {
   74.38 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-XX:+PredictedLoadedClassCount", "-version");
   74.39 +        OutputAnalyzer output = new OutputAnalyzer(pb.start());
   74.40 +        output.shouldContain("Error: VM option 'PredictedLoadedClassCount' is experimental and must be enabled via -XX:+UnlockExperimentalVMOptions.");
   74.41 +
   74.42 +        if (Platform.isDebugBuild()) {
   74.43 +            System.out.println("Skip the rest of the tests on debug builds since diagnostic, develop, and notproduct options are available on debug builds.");
   74.44 +            return;
   74.45 +        }
   74.46 +
   74.47 +        pb = ProcessTools.createJavaProcessBuilder("-XX:+PrintInlining", "-version");
   74.48 +        output = new OutputAnalyzer(pb.start());
   74.49 +        output.shouldContain("Error: VM option 'PrintInlining' is diagnostic and must be enabled via -XX:+UnlockDiagnosticVMOptions.");
   74.50 +
   74.51 +        pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJNICalls", "-version");
   74.52 +        output = new OutputAnalyzer(pb.start());
   74.53 +        output.shouldContain("Error: VM option 'TraceJNICalls' is develop and is available only in debug version of VM.");
   74.54 +
   74.55 +        pb = ProcessTools.createJavaProcessBuilder("-XX:+TraceJVMCalls", "-version");
   74.56 +        output = new OutputAnalyzer(pb.start());
   74.57 +        output.shouldContain("Error: VM option 'TraceJVMCalls' is notproduct and is available only in debug version of VM.");
   74.58 +    }
   74.59 +}
    75.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    75.2 +++ b/test/runtime/lambda-features/InvokespecialInterface.java	Tue Feb 25 15:11:18 2014 -0800
    75.3 @@ -0,0 +1,61 @@
    75.4 +/*
    75.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    75.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    75.7 + *
    75.8 + * This code is free software; you can redistribute it and/or modify it
    75.9 + * under the terms of the GNU General Public License version 2 only, as
   75.10 + * published by the Free Software Foundation.
   75.11 + *
   75.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   75.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   75.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   75.15 + * version 2 for more details (a copy is included in the LICENSE file that
   75.16 + * accompanied this code).
   75.17 + *
   75.18 + * You should have received a copy of the GNU General Public License version
   75.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   75.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   75.21 + *
   75.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   75.23 + * or visit www.oracle.com if you need additional information or have any
   75.24 + * questions.
   75.25 + *
   75.26 + */
   75.27 +
   75.28 +/*
   75.29 + * @test
   75.30 + * @bug 8032024
   75.31 + * @bug 8025937
   75.32 + * @bug 8033528
   75.33 + * @summary [JDK 8] Test invokespecial and invokeinterface with the same JVM_CONSTANT_InterfaceMethodref
   75.34 + * @run main/othervm -XX:+StressRewriter InvokespecialInterface
   75.35 + */
   75.36 +import java.util.function.*;
   75.37 +import java.util.*;
   75.38 +
   75.39 +interface I {
   75.40 +  default void imethod() { System.out.println("I::imethod"); }
   75.41 +}
   75.42 +
   75.43 +class C implements I {
   75.44 +  public void foo() { I.super.imethod(); }  // invokespecial InterfaceMethod
   75.45 +  public void bar() { I i = this; i.imethod(); } // invokeinterface same
   75.46 +  public void doSomeInvokedynamic() {
   75.47 +      String str = "world";
   75.48 +      Supplier<String> foo = ()->"hello, "+str;
   75.49 +      String res = foo.get();
   75.50 +      System.out.println(res);
   75.51 +  }
   75.52 +}
   75.53 +
   75.54 +public class InvokespecialInterface {
   75.55 +  public static void main(java.lang.String[] unused) {
   75.56 +     // need to create C and call I::foo()
   75.57 +     C c = new C();
   75.58 +     c.foo();
   75.59 +     c.bar();
   75.60 +     c.doSomeInvokedynamic();
   75.61 +  }
   75.62 +};
   75.63 +
   75.64 +
    76.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    76.2 +++ b/test/runtime/lambda-features/TestConcreteClassWithAbstractMethod.java	Tue Feb 25 15:11:18 2014 -0800
    76.3 @@ -0,0 +1,181 @@
    76.4 +/*
    76.5 + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
    76.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    76.7 + *
    76.8 + * This code is free software; you can redistribute it and/or modify it
    76.9 + * under the terms of the GNU General Public License version 2 only, as
   76.10 + * published by the Free Software Foundation.
   76.11 + *
   76.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   76.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   76.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   76.15 + * version 2 for more details (a copy is included in the LICENSE file that
   76.16 + * accompanied this code).
   76.17 + *
   76.18 + * You should have received a copy of the GNU General Public License version
   76.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   76.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   76.21 + *
   76.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   76.23 + * or visit www.oracle.com if you need additional information or have any
   76.24 + * questions.
   76.25 + *
   76.26 + */
   76.27 +
   76.28 +/*
   76.29 + * @test
   76.30 + * @bug 8032010
   76.31 + * @summary method lookup on an abstract method in a concrete class should be successful
   76.32 + * @run main TestConcreteClassWithAbstractMethod
   76.33 + */
   76.34 +
   76.35 +import jdk.internal.org.objectweb.asm.ClassWriter;
   76.36 +import jdk.internal.org.objectweb.asm.MethodVisitor;
   76.37 +
   76.38 +import static jdk.internal.org.objectweb.asm.Opcodes.*;
   76.39 +
   76.40 +/*
   76.41 + *   class T1 { public int m() {} }
   76.42 + *   class T2 { public abstract int m(); }
   76.43 + *   class T3 { public int m() {} }
   76.44 + *
   76.45 + *   Call site: T3.test() { invokevirtual T2.m() }
   76.46 + *   T3.m() should be invoked
   76.47 + */
   76.48 +public class TestConcreteClassWithAbstractMethod {
   76.49 +    static final String classT1 = "p1.T1";
   76.50 +    static final String classT2 = "p1.T2";
   76.51 +    static final String classT3 = "p1.T3";
   76.52 +
   76.53 +    static final String callerName = classT3;
   76.54 +
   76.55 +    public static void main(String[] args) throws Exception {
   76.56 +        ClassLoader cl = new ClassLoader() {
   76.57 +            public Class<?> loadClass(String name) throws ClassNotFoundException {
   76.58 +                if (findLoadedClass(name) != null) {
   76.59 +                    return findLoadedClass(name);
   76.60 +                }
   76.61 +
   76.62 +                if (classT1.equals(name)) {
   76.63 +                    byte[] classFile = dumpT1();
   76.64 +                    return defineClass(classT1, classFile, 0, classFile.length);
   76.65 +                }
   76.66 +                if (classT2.equals(name)) {
   76.67 +                    byte[] classFile = dumpT2();
   76.68 +                    return defineClass(classT2, classFile, 0, classFile.length);
   76.69 +                }
   76.70 +                if (classT3.equals(name)) {
   76.71 +                    byte[] classFile = dumpT3();
   76.72 +                    return defineClass(classT3, classFile, 0, classFile.length);
   76.73 +                }
   76.74 +
   76.75 +                return super.loadClass(name);
   76.76 +            }
   76.77 +        };
   76.78 +
   76.79 +        cl.loadClass(classT1);
   76.80 +        cl.loadClass(classT2);
   76.81 +        cl.loadClass(classT3);
   76.82 +
   76.83 +        //cl.loadClass(callerName).getDeclaredMethod("m");
   76.84 +        cl.loadClass(callerName).newInstance();
   76.85 +
   76.86 +        int result = (Integer)cl.loadClass(callerName).getDeclaredMethod("test").invoke(null);
   76.87 +        System.out.println(""+result);
   76.88 +    }
   76.89 +
   76.90 +    public static byte[] dumpT1() {
   76.91 +        ClassWriter cw = new ClassWriter(0);
   76.92 +        MethodVisitor mv;
   76.93 +
   76.94 +        cw.visit(52, ACC_PUBLIC | ACC_SUPER, "p1/T1", null, "java/lang/Object", null);
   76.95 +        {
   76.96 +            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
   76.97 +            mv.visitCode();
   76.98 +            mv.visitVarInsn(ALOAD, 0);
   76.99 +            mv.visitMethodInsn(INVOKESPECIAL, "java/lang/Object", "<init>", "()V", false);
  76.100 +            mv.visitInsn(RETURN);
  76.101 +            mv.visitMaxs(1, 1);
  76.102 +            mv.visitEnd();
  76.103 +        }
  76.104 +        {
  76.105 +            mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
  76.106 +            mv.visitCode();
  76.107 +            mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
  76.108 +            mv.visitLdcInsn("p1/T1.m()");
  76.109 +            mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "print", "(Ljava/lang/String;)V", false);
  76.110 +            mv.visitIntInsn(BIPUSH, 3);
  76.111 +            mv.visitInsn(IRETURN);
  76.112 +            mv.visitMaxs(2, 1);
  76.113 +            mv.visitEnd();
  76.114 +        }
  76.115 +        cw.visitEnd();
  76.116 +
  76.117 +        return cw.toByteArray();
  76.118 +    }
  76.119 +
  76.120 +    public static byte[] dumpT2() {
  76.121 +        ClassWriter cw = new ClassWriter(0);
  76.122 +        MethodVisitor mv;
  76.123 +
  76.124 +        cw.visit(52, ACC_PUBLIC | ACC_SUPER, "p1/T2", null, "p1/T1", null);
  76.125 +        {
  76.126 +            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
  76.127 +            mv.visitCode();
  76.128 +            mv.visitVarInsn(ALOAD, 0);
  76.129 +            mv.visitMethodInsn(INVOKESPECIAL, "p1/T1", "<init>", "()V", false);
  76.130 +            mv.visitInsn(RETURN);
  76.131 +            mv.visitMaxs(1, 1);
  76.132 +            mv.visitEnd();
  76.133 +        }
  76.134 +        {
  76.135 +            mv = cw.visitMethod(ACC_PUBLIC + ACC_ABSTRACT, "m", "()I", null, null);
  76.136 +            mv.visitEnd();
  76.137 +        }
  76.138 +        cw.visitEnd();
  76.139 +
  76.140 +        return cw.toByteArray();
  76.141 +    }
  76.142 +
  76.143 +    public static byte[] dumpT3() {
  76.144 +        ClassWriter cw = new ClassWriter(0);
  76.145 +        MethodVisitor mv;
  76.146 +
  76.147 +        cw.visit(52, ACC_PUBLIC + ACC_SUPER, "p1/T3", null, "p1/T2", null);
  76.148 +
  76.149 +        {
  76.150 +            mv = cw.visitMethod(ACC_PUBLIC, "<init>", "()V", null, null);
  76.151 +            mv.visitCode();
  76.152 +            mv.visitVarInsn(ALOAD, 0);
  76.153 +            mv.visitMethodInsn(INVOKESPECIAL, "p1/T2", "<init>", "()V", false);
  76.154 +            mv.visitInsn(RETURN);
  76.155 +            mv.visitMaxs(1, 1);
  76.156 +            mv.visitEnd();
  76.157 +        }
  76.158 +        {
  76.159 +            mv = cw.visitMethod(ACC_PUBLIC, "m", "()I", null, null);
  76.160 +            mv.visitCode();
  76.161 +            mv.visitFieldInsn(GETSTATIC, "java/lang/System", "out", "Ljava/io/PrintStream;");
  76.162 +            mv.visitLdcInsn("p1/T3.m()");
  76.163 +            mv.visitMethodInsn(INVOKEVIRTUAL, "java/io/PrintStream", "print", "(Ljava/lang/String;)V", false);
  76.164 +            mv.visitIntInsn(BIPUSH, 2);
  76.165 +            mv.visitInsn(IRETURN);
  76.166 +            mv.visitMaxs(2, 1);
  76.167 +            mv.visitEnd();
  76.168 +        }
  76.169 +        {
  76.170 +            mv = cw.visitMethod(ACC_PUBLIC + ACC_STATIC, "test", "()I", null, null);
  76.171 +            mv.visitCode();
  76.172 +            mv.visitTypeInsn(NEW, "p1/T3");
  76.173 +            mv.visitInsn(DUP);
  76.174 +            mv.visitMethodInsn(INVOKESPECIAL, "p1/T3", "<init>", "()V", false);
  76.175 +            mv.visitMethodInsn(INVOKEVIRTUAL, "p1/T2", "m", "()I", false);
  76.176 +            mv.visitInsn(IRETURN);
  76.177 +            mv.visitMaxs(3, 2);
  76.178 +            mv.visitEnd();
  76.179 +        }
  76.180 +        cw.visitEnd();
  76.181 +
  76.182 +        return cw.toByteArray();
  76.183 +    }
  76.184 +}

mercurial