Merge

Tue, 25 Jun 2013 12:46:21 -0700

author
amurillo
date
Tue, 25 Jun 2013 12:46:21 -0700
changeset 5295
9f3e3245b50f
parent 5253
38e483cb1bcd
parent 5294
3bdeff4a6ca7
child 5296
e6a4b8c71fa6

Merge

src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp file | annotate | diff | comparison | revisions
src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp file | annotate | diff | comparison | revisions
src/share/vm/memory/allocation.hpp file | annotate | diff | comparison | revisions
src/share/vm/memory/universe.cpp file | annotate | diff | comparison | revisions
src/share/vm/prims/jvm.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Mon Jun 24 14:27:24 2013 -0700
     1.2 +++ b/.hgtags	Tue Jun 25 12:46:21 2013 -0700
     1.3 @@ -352,3 +352,4 @@
     1.4  1beed1f6f9edefe47ba8ed1355fbd3e7606b8288 jdk8-b94
     1.5  69689078dff8b21e6df30870464f5d736eebdf72 hs25-b37
     1.6  5d65c078cd0ac455aa5e58a09844c7acce54b487 jdk8-b95
     1.7 +2cc5a9d1ba66dfdff578918b393c727bd9450210 hs25-b38
     2.1 --- a/make/hotspot_version	Mon Jun 24 14:27:24 2013 -0700
     2.2 +++ b/make/hotspot_version	Tue Jun 25 12:46:21 2013 -0700
     2.3 @@ -35,7 +35,7 @@
     2.4  
     2.5  HS_MAJOR_VER=25
     2.6  HS_MINOR_VER=0
     2.7 -HS_BUILD_NUMBER=37
     2.8 +HS_BUILD_NUMBER=38
     2.9  
    2.10  JDK_MAJOR_VER=1
    2.11  JDK_MINOR_VER=8
     3.1 --- a/make/linux/makefiles/gcc.make	Mon Jun 24 14:27:24 2013 -0700
     3.2 +++ b/make/linux/makefiles/gcc.make	Tue Jun 25 12:46:21 2013 -0700
     3.3 @@ -214,7 +214,7 @@
     3.4    WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body
     3.5  endif
     3.6  
     3.7 -WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function
     3.8 +WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value
     3.9  
    3.10  ifeq ($(USE_CLANG),)
    3.11    # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit
     4.1 --- a/src/cpu/sparc/vm/assembler_sparc.hpp	Mon Jun 24 14:27:24 2013 -0700
     4.2 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp	Tue Jun 25 12:46:21 2013 -0700
     4.3 @@ -57,7 +57,6 @@
     4.4      fbp_op2   = 5,
     4.5      br_op2    = 2,
     4.6      bp_op2    = 1,
     4.7 -    cb_op2    = 7, // V8
     4.8      sethi_op2 = 4
     4.9    };
    4.10  
    4.11 @@ -145,7 +144,6 @@
    4.12      ldsh_op3     = 0x0a,
    4.13      ldx_op3      = 0x0b,
    4.14  
    4.15 -    ldstub_op3   = 0x0d,
    4.16      stx_op3      = 0x0e,
    4.17      swap_op3     = 0x0f,
    4.18  
    4.19 @@ -163,15 +161,6 @@
    4.20  
    4.21      prefetch_op3 = 0x2d,
    4.22  
    4.23 -
    4.24 -    ldc_op3      = 0x30,
    4.25 -    ldcsr_op3    = 0x31,
    4.26 -    lddc_op3     = 0x33,
    4.27 -    stc_op3      = 0x34,
    4.28 -    stcsr_op3    = 0x35,
    4.29 -    stdcq_op3    = 0x36,
    4.30 -    stdc_op3     = 0x37,
    4.31 -
    4.32      casa_op3     = 0x3c,
    4.33      casxa_op3    = 0x3e,
    4.34  
    4.35 @@ -574,17 +563,11 @@
    4.36    static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); }
    4.37  
    4.38    // instruction only in v9
    4.39 -  static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); }
    4.40 -
    4.41 -  // instruction only in v8
    4.42 -  static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); }
    4.43 +  static void v9_only() { } // do nothing
    4.44  
    4.45    // instruction deprecated in v9
    4.46    static void v9_dep()  { } // do nothing for now
    4.47  
    4.48 -  // some float instructions only exist for single prec. on v8
    4.49 -  static void v8_s_only(FloatRegisterImpl::Width w)  { if (w != FloatRegisterImpl::S)  v9_only(); }
    4.50 -
    4.51    // v8 has no CC field
    4.52    static void v8_no_cc(CC cc)  { if (cc)  v9_only(); }
    4.53  
    4.54 @@ -730,11 +713,6 @@
    4.55    inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
    4.56    inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
    4.57  
    4.58 -  // pp 121 (V8)
    4.59 -
    4.60 -  inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none );
    4.61 -  inline void cb( Condition c, bool a, Label& L );
    4.62 -
    4.63    // pp 149
    4.64  
    4.65    inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
    4.66 @@ -775,8 +753,8 @@
    4.67  
    4.68    // pp 157
    4.69  
    4.70 -  void fcmp(  FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc);  emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
    4.71 -  void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc);  emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
    4.72 +  void fcmp(  FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); }
    4.73 +  void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); }
    4.74  
    4.75    // pp 159
    4.76  
    4.77 @@ -794,21 +772,11 @@
    4.78  
    4.79    // pp 162
    4.80  
    4.81 -  void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
    4.82 +  void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); }
    4.83  
    4.84 -  void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
    4.85 +  void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); }
    4.86  
    4.87 -  // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available
    4.88 -  // on v8 to do negation of single, double and quad precision floats.
    4.89 -
    4.90 -  void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) |  opf(0x05) | fs2(sd, w)); }
    4.91 -
    4.92 -  void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w);  emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
    4.93 -
    4.94 -  // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available
    4.95 -  // on v8 to do abs operation on single/double/quad precision floats.
    4.96 -
    4.97 -  void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); }
    4.98 +  void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); }
    4.99  
   4.100    // pp 163
   4.101  
   4.102 @@ -839,11 +807,6 @@
   4.103    void impdep1( int id1, int const19a ) { v9_only();  emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); }
   4.104    void impdep2( int id1, int const19a ) { v9_only();  emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); }
   4.105  
   4.106 -  // pp 149 (v8)
   4.107 -
   4.108 -  void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only();  emit_int32( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
   4.109 -  void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only();  emit_int32( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); }
   4.110 -
   4.111    // pp 170
   4.112  
   4.113    void jmpl( Register s1, Register s2, Register d );
   4.114 @@ -860,16 +823,6 @@
   4.115    inline void ldxfsr( Register s1, Register s2 );
   4.116    inline void ldxfsr( Register s1, int simm13a);
   4.117  
   4.118 -  // pp 94 (v8)
   4.119 -
   4.120 -  inline void ldc(   Register s1, Register s2, int crd );
   4.121 -  inline void ldc(   Register s1, int simm13a, int crd);
   4.122 -  inline void lddc(  Register s1, Register s2, int crd );
   4.123 -  inline void lddc(  Register s1, int simm13a, int crd);
   4.124 -  inline void ldcsr( Register s1, Register s2, int crd );
   4.125 -  inline void ldcsr( Register s1, int simm13a, int crd);
   4.126 -
   4.127 -
   4.128    // 173
   4.129  
   4.130    void ldfa(  FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only();  emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
   4.131 @@ -910,18 +863,6 @@
   4.132    void lduwa(  Register s1, int simm13a,         Register d ) {             emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.133    void ldxa(   Register s1, Register s2, int ia, Register d ) { v9_only();  emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3  | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
   4.134    void ldxa(   Register s1, int simm13a,         Register d ) { v9_only();  emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3  | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.135 -  void ldda(   Register s1, Register s2, int ia, Register d ) { v9_dep();   emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3  | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
   4.136 -  void ldda(   Register s1, int simm13a,         Register d ) { v9_dep();   emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3  | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.137 -
   4.138 -  // pp 179
   4.139 -
   4.140 -  inline void ldstub(  Register s1, Register s2, Register d );
   4.141 -  inline void ldstub(  Register s1, int simm13a, Register d);
   4.142 -
   4.143 -  // pp 180
   4.144 -
   4.145 -  void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
   4.146 -  void ldstuba( Register s1, int simm13a,         Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.147  
   4.148    // pp 181
   4.149  
   4.150 @@ -992,11 +933,6 @@
   4.151    void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); }
   4.152    void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.153  
   4.154 -  // pp 199
   4.155 -
   4.156 -  void mulscc(   Register s1, Register s2, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); }
   4.157 -  void mulscc(   Register s1, int simm13a, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.158 -
   4.159    // pp 201
   4.160  
   4.161    void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); }
   4.162 @@ -1116,17 +1052,6 @@
   4.163    void stda(  Register d, Register s1, Register s2, int ia ) {             emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); }
   4.164    void stda(  Register d, Register s1, int simm13a         ) {             emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.165  
   4.166 -  // pp 97 (v8)
   4.167 -
   4.168 -  inline void stc(   int crd, Register s1, Register s2 );
   4.169 -  inline void stc(   int crd, Register s1, int simm13a);
   4.170 -  inline void stdc(  int crd, Register s1, Register s2 );
   4.171 -  inline void stdc(  int crd, Register s1, int simm13a);
   4.172 -  inline void stcsr( int crd, Register s1, Register s2 );
   4.173 -  inline void stcsr( int crd, Register s1, int simm13a);
   4.174 -  inline void stdcq( int crd, Register s1, Register s2 );
   4.175 -  inline void stdcq( int crd, Register s1, int simm13a);
   4.176 -
   4.177    // pp 230
   4.178  
   4.179    void sub(    Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3              ) | rs1(s1) | rs2(s2) ); }
   4.180 @@ -1153,20 +1078,16 @@
   4.181  
   4.182    void taddcc(    Register s1, Register s2, Register d ) {            emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3  ) | rs1(s1) | rs2(s2) ); }
   4.183    void taddcc(    Register s1, int simm13a, Register d ) {            emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.184 -  void taddcctv(  Register s1, Register s2, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); }
   4.185 -  void taddcctv(  Register s1, int simm13a, Register d ) { v9_dep();  emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.186  
   4.187    // pp 235
   4.188  
   4.189    void tsubcc(    Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3  ) | rs1(s1) | rs2(s2) ); }
   4.190    void tsubcc(    Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.191 -  void tsubcctv(  Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); }
   4.192 -  void tsubcctv(  Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
   4.193  
   4.194    // pp 237
   4.195  
   4.196 -  void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc);  emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
   4.197 -  void trap( Condition c, CC cc, Register s1, int trapa   ) { v8_no_cc(cc);  emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
   4.198 +  void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); }
   4.199 +  void trap( Condition c, CC cc, Register s1, int trapa   ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); }
   4.200    // simple uncond. trap
   4.201    void trap( int trapa ) { trap( always, icc, G0, trapa ); }
   4.202  
     5.1 --- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Mon Jun 24 14:27:24 2013 -0700
     5.2 +++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp	Tue Jun 25 12:46:21 2013 -0700
     5.3 @@ -63,9 +63,6 @@
     5.4  inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt);  has_delay_slot(); }
     5.5  inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); }
     5.6  
     5.7 -inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only();  cti();  emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
     5.8 -inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); }
     5.9 -
    5.10  inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep();  cti();   emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt);  has_delay_slot(); }
    5.11  inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); }
    5.12  
    5.13 @@ -88,18 +85,9 @@
    5.14  inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); }
    5.15  inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); }
    5.16  
    5.17 -inline void Assembler::ldfsr(  Register s1, Register s2) { v9_dep();   emit_int32( op(ldst_op) |             op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
    5.18 -inline void Assembler::ldfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.19  inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only();  emit_int32( op(ldst_op) | rd(G1)    | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); }
    5.20  inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only();  emit_data( op(ldst_op) | rd(G1)    | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.21  
    5.22 -inline void Assembler::ldc(   Register s1, Register s2, int crd) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(ldc_op3  ) | rs1(s1) | rs2(s2) ); }
    5.23 -inline void Assembler::ldc(   Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3  ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.24 -inline void Assembler::lddc(  Register s1, Register s2, int crd) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); }
    5.25 -inline void Assembler::lddc(  Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.26 -inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); }
    5.27 -inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.28 -
    5.29  inline void Assembler::ldsb(  Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); }
    5.30  inline void Assembler::ldsb(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.31  
    5.32 @@ -119,9 +107,6 @@
    5.33  inline void Assembler::ldd(   Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); }
    5.34  inline void Assembler::ldd(   Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.35  
    5.36 -inline void Assembler::ldstub(  Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); }
    5.37 -inline void Assembler::ldstub(  Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.38 -
    5.39  inline void Assembler::rett( Register s1, Register s2                         ) { cti();  emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2));  has_delay_slot(); }
    5.40  inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti();  emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt);  has_delay_slot(); }
    5.41  
    5.42 @@ -132,8 +117,6 @@
    5.43  inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); }
    5.44  inline void Assembler::stf(    FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.45  
    5.46 -inline void Assembler::stfsr(  Register s1, Register s2) { v9_dep();   emit_int32( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
    5.47 -inline void Assembler::stfsr(  Register s1, int simm13a) { v9_dep();   emit_data( op(ldst_op) |             op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.48  inline void Assembler::stxfsr( Register s1, Register s2) { v9_only();  emit_int32( op(ldst_op) | rd(G1)    | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); }
    5.49  inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only();  emit_data( op(ldst_op) | rd(G1)    | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.50  
    5.51 @@ -152,17 +135,6 @@
    5.52  inline void Assembler::std(  Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); }
    5.53  inline void Assembler::std(  Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.54  
    5.55 -// v8 p 99
    5.56 -
    5.57 -inline void Assembler::stc(    int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); }
    5.58 -inline void Assembler::stc(    int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.59 -inline void Assembler::stdc(   int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); }
    5.60 -inline void Assembler::stdc(   int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.61 -inline void Assembler::stcsr(  int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); }
    5.62 -inline void Assembler::stcsr(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.63 -inline void Assembler::stdcq(  int crd, Register s1, Register s2) { v8_only();  emit_int32( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
    5.64 -inline void Assembler::stdcq(  int crd, Register s1, int simm13a) { v8_only();  emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
    5.65 -
    5.66  // pp 231
    5.67  
    5.68  inline void Assembler::swap(    Register s1, Register s2, Register d) { v9_dep();  emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
     6.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
     6.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
     6.3 @@ -597,13 +597,6 @@
     6.4  
     6.5    __ sra(Rdividend, 31, Rscratch);
     6.6    __ wry(Rscratch);
     6.7 -  if (!VM_Version::v9_instructions_work()) {
     6.8 -    // v9 doesn't require these nops
     6.9 -    __ nop();
    6.10 -    __ nop();
    6.11 -    __ nop();
    6.12 -    __ nop();
    6.13 -  }
    6.14  
    6.15    add_debug_info_for_div0_here(op->info());
    6.16  
    6.17 @@ -652,10 +645,6 @@
    6.18        case lir_cond_lessEqual:     acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual   : Assembler::f_lessOrEqual);    break;
    6.19        case lir_cond_greaterEqual:  acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break;
    6.20        default :                         ShouldNotReachHere();
    6.21 -    };
    6.22 -
    6.23 -    if (!VM_Version::v9_instructions_work()) {
    6.24 -      __ nop();
    6.25      }
    6.26      __ fb( acond, false, Assembler::pn, *(op->label()));
    6.27    } else {
    6.28 @@ -725,9 +714,6 @@
    6.29        Label L;
    6.30        // result must be 0 if value is NaN; test by comparing value to itself
    6.31        __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc);
    6.32 -      if (!VM_Version::v9_instructions_work()) {
    6.33 -        __ nop();
    6.34 -      }
    6.35        __ fb(Assembler::f_unordered, true, Assembler::pn, L);
    6.36        __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN
    6.37        __ ftoi(FloatRegisterImpl::S, rsrc, rsrc);
    6.38 @@ -1909,7 +1895,7 @@
    6.39        switch (code) {
    6.40          case lir_add:  __ add  (lreg, rreg, res); break;
    6.41          case lir_sub:  __ sub  (lreg, rreg, res); break;
    6.42 -        case lir_mul:  __ mult (lreg, rreg, res); break;
    6.43 +        case lir_mul:  __ mulx (lreg, rreg, res); break;
    6.44          default: ShouldNotReachHere();
    6.45        }
    6.46      }
    6.47 @@ -1924,7 +1910,7 @@
    6.48        switch (code) {
    6.49          case lir_add:  __ add  (lreg, simm13, res); break;
    6.50          case lir_sub:  __ sub  (lreg, simm13, res); break;
    6.51 -        case lir_mul:  __ mult (lreg, simm13, res); break;
    6.52 +        case lir_mul:  __ mulx (lreg, simm13, res); break;
    6.53          default: ShouldNotReachHere();
    6.54        }
    6.55      } else {
    6.56 @@ -1936,7 +1922,7 @@
    6.57        switch (code) {
    6.58          case lir_add:  __ add  (lreg, (int)con, res); break;
    6.59          case lir_sub:  __ sub  (lreg, (int)con, res); break;
    6.60 -        case lir_mul:  __ mult (lreg, (int)con, res); break;
    6.61 +        case lir_mul:  __ mulx (lreg, (int)con, res); break;
    6.62          default: ShouldNotReachHere();
    6.63        }
    6.64      }
    6.65 @@ -3234,48 +3220,26 @@
    6.66      Register base = mem_addr->base()->as_register();
    6.67      if (src->is_register() && dest->is_address()) {
    6.68        // G4 is high half, G5 is low half
    6.69 -      if (VM_Version::v9_instructions_work()) {
    6.70 -        // clear the top bits of G5, and scale up G4
    6.71 -        __ srl (src->as_register_lo(),  0, G5);
    6.72 -        __ sllx(src->as_register_hi(), 32, G4);
    6.73 -        // combine the two halves into the 64 bits of G4
    6.74 -        __ or3(G4, G5, G4);
    6.75 -        null_check_offset = __ offset();
    6.76 -        if (idx == noreg) {
    6.77 -          __ stx(G4, base, disp);
    6.78 -        } else {
    6.79 -          __ stx(G4, base, idx);
    6.80 -        }
    6.81 +      // clear the top bits of G5, and scale up G4
    6.82 +      __ srl (src->as_register_lo(),  0, G5);
    6.83 +      __ sllx(src->as_register_hi(), 32, G4);
    6.84 +      // combine the two halves into the 64 bits of G4
    6.85 +      __ or3(G4, G5, G4);
    6.86 +      null_check_offset = __ offset();
    6.87 +      if (idx == noreg) {
    6.88 +        __ stx(G4, base, disp);
    6.89        } else {
    6.90 -        __ mov (src->as_register_hi(), G4);
    6.91 -        __ mov (src->as_register_lo(), G5);
    6.92 -        null_check_offset = __ offset();
    6.93 -        if (idx == noreg) {
    6.94 -          __ std(G4, base, disp);
    6.95 -        } else {
    6.96 -          __ std(G4, base, idx);
    6.97 -        }
    6.98 +        __ stx(G4, base, idx);
    6.99        }
   6.100      } else if (src->is_address() && dest->is_register()) {
   6.101        null_check_offset = __ offset();
   6.102 -      if (VM_Version::v9_instructions_work()) {
   6.103 -        if (idx == noreg) {
   6.104 -          __ ldx(base, disp, G5);
   6.105 -        } else {
   6.106 -          __ ldx(base, idx, G5);
   6.107 -        }
   6.108 -        __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
   6.109 -        __ mov (G5, dest->as_register_lo());     // copy low half into lo
   6.110 +      if (idx == noreg) {
   6.111 +        __ ldx(base, disp, G5);
   6.112        } else {
   6.113 -        if (idx == noreg) {
   6.114 -          __ ldd(base, disp, G4);
   6.115 -        } else {
   6.116 -          __ ldd(base, idx, G4);
   6.117 -        }
   6.118 -        // G4 is high half, G5 is low half
   6.119 -        __ mov (G4, dest->as_register_hi());
   6.120 -        __ mov (G5, dest->as_register_lo());
   6.121 +        __ ldx(base, idx, G5);
   6.122        }
   6.123 +      __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi
   6.124 +      __ mov (G5, dest->as_register_lo());     // copy low half into lo
   6.125      } else {
   6.126        Unimplemented();
   6.127      }
     7.1 --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
     7.2 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
     7.3 @@ -108,7 +108,7 @@
     7.4  
     7.5    // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
     7.6    assert(mark_addr.disp() == 0, "cas must take a zero displacement");
     7.7 -  casx_under_lock(mark_addr.base(), Rmark, Rscratch, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
     7.8 +  cas_ptr(mark_addr.base(), Rmark, Rscratch);
     7.9    // if compare/exchange succeeded we found an unlocked object and we now have locked it
    7.10    // hence we are done
    7.11    cmp(Rmark, Rscratch);
    7.12 @@ -149,7 +149,7 @@
    7.13  
    7.14    // Check if it is still a light weight lock, this is is true if we see
    7.15    // the stack address of the basicLock in the markOop of the object
    7.16 -  casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
    7.17 +  cas_ptr(mark_addr.base(), Rbox, Rmark);
    7.18    cmp(Rbox, Rmark);
    7.19  
    7.20    brx(Assembler::notEqual, false, Assembler::pn, slow_case);
    7.21 @@ -276,7 +276,7 @@
    7.22      sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body
    7.23      initialize_body(t1, t2);
    7.24  #ifndef _LP64
    7.25 -  } else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) {
    7.26 +  } else if (con_size_in_bytes < threshold * 2) {
    7.27      // on v9 we can do double word stores to fill twice as much space.
    7.28      assert(hdr_size_in_bytes % 8 == 0, "double word aligned");
    7.29      assert(con_size_in_bytes % 8 == 0, "double word aligned");
     8.1 --- a/src/cpu/sparc/vm/c2_init_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
     8.2 +++ b/src/cpu/sparc/vm/c2_init_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
     8.3 @@ -30,5 +30,4 @@
     8.4  
     8.5  void Compile::pd_compiler2_init() {
     8.6    guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" );
     8.7 -  guarantee( VM_Version::v9_instructions_work(), "Server compiler does not run on V8 systems" );
     8.8  }
     9.1 --- a/src/cpu/sparc/vm/disassembler_sparc.hpp	Mon Jun 24 14:27:24 2013 -0700
     9.2 +++ b/src/cpu/sparc/vm/disassembler_sparc.hpp	Tue Jun 25 12:46:21 2013 -0700
     9.3 @@ -30,8 +30,7 @@
     9.4    }
     9.5  
     9.6    static const char* pd_cpu_opts() {
     9.7 -    return (VM_Version::v9_instructions_work()?
     9.8 -            (VM_Version::v8_instructions_work()? "" : "v9only") : "v8only");
     9.9 +    return "v9only";
    9.10    }
    9.11  
    9.12  #endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP
    10.1 --- a/src/cpu/sparc/vm/globals_sparc.hpp	Mon Jun 24 14:27:24 2013 -0700
    10.2 +++ b/src/cpu/sparc/vm/globals_sparc.hpp	Tue Jun 25 12:46:21 2013 -0700
    10.3 @@ -110,8 +110,5 @@
    10.4                                                                              \
    10.5    product(uintx,  ArraycopyDstPrefetchDistance, 0,                          \
    10.6            "Distance to prefetch destination array in arracopy")             \
    10.7 -                                                                            \
    10.8 -  develop(intx, V8AtomicOperationUnderLockSpinCount,    50,                 \
    10.9 -          "Number of times to spin wait on a v8 atomic operation lock")     \
   10.10  
   10.11  #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
    11.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    11.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
    11.3 @@ -1210,8 +1210,7 @@
    11.4      st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
    11.5      // compare and exchange object_addr, markOop | 1, stack address of basicLock
    11.6      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
    11.7 -    casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
    11.8 -      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
    11.9 +    cas_ptr(mark_addr.base(), mark_reg, temp_reg);
   11.10  
   11.11      // if the compare and exchange succeeded we are done (we saw an unlocked object)
   11.12      cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
   11.13 @@ -1291,8 +1290,7 @@
   11.14      // we expect to see the stack address of the basicLock in case the
   11.15      // lock is still a light weight lock (lock_reg)
   11.16      assert(mark_addr.disp() == 0, "cas must take a zero displacement");
   11.17 -    casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
   11.18 -      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
   11.19 +    cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg);
   11.20      cmp(lock_reg, displaced_header_reg);
   11.21      brx(Assembler::equal, true, Assembler::pn, done);
   11.22      delayed()->st_ptr(G0, lockobj_addr);  // free entry
    12.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    12.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
    12.3 @@ -118,7 +118,6 @@
    12.4        case bp_op2:     m = wdisp(  word_aligned_ones, 0, 19);  v = wdisp(  dest_pos, inst_pos, 19); break;
    12.5        case fb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
    12.6        case br_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
    12.7 -      case cb_op2:     m = wdisp(  word_aligned_ones, 0, 22);  v = wdisp(  dest_pos, inst_pos, 22); break;
    12.8        case bpr_op2: {
    12.9          if (is_cbcond(inst)) {
   12.10            m = wdisp10(word_aligned_ones, 0);
   12.11 @@ -149,7 +148,6 @@
   12.12        case bp_op2:     r = inv_wdisp(  inst, pos, 19);  break;
   12.13        case fb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
   12.14        case br_op2:     r = inv_wdisp(  inst, pos, 22);  break;
   12.15 -      case cb_op2:     r = inv_wdisp(  inst, pos, 22);  break;
   12.16        case bpr_op2: {
   12.17          if (is_cbcond(inst)) {
   12.18            r = inv_wdisp10(inst, pos);
   12.19 @@ -325,12 +323,6 @@
   12.20    trap(ST_RESERVED_FOR_USER_0);
   12.21  }
   12.22  
   12.23 -// flush windows (except current) using flushw instruction if avail.
   12.24 -void MacroAssembler::flush_windows() {
   12.25 -  if (VM_Version::v9_instructions_work())  flushw();
   12.26 -  else                                     flush_windows_trap();
   12.27 -}
   12.28 -
   12.29  // Write serialization page so VM thread can do a pseudo remote membar
   12.30  // We use the current thread pointer to calculate a thread specific
   12.31  // offset to write to within the page. This minimizes bus traffic
   12.32 @@ -358,88 +350,6 @@
   12.33    Unimplemented();
   12.34  }
   12.35  
   12.36 -void MacroAssembler::mult(Register s1, Register s2, Register d) {
   12.37 -  if(VM_Version::v9_instructions_work()) {
   12.38 -    mulx (s1, s2, d);
   12.39 -  } else {
   12.40 -    smul (s1, s2, d);
   12.41 -  }
   12.42 -}
   12.43 -
   12.44 -void MacroAssembler::mult(Register s1, int simm13a, Register d) {
   12.45 -  if(VM_Version::v9_instructions_work()) {
   12.46 -    mulx (s1, simm13a, d);
   12.47 -  } else {
   12.48 -    smul (s1, simm13a, d);
   12.49 -  }
   12.50 -}
   12.51 -
   12.52 -
   12.53 -#ifdef ASSERT
   12.54 -void MacroAssembler::read_ccr_v8_assert(Register ccr_save) {
   12.55 -  const Register s1 = G3_scratch;
   12.56 -  const Register s2 = G4_scratch;
   12.57 -  Label get_psr_test;
   12.58 -  // Get the condition codes the V8 way.
   12.59 -  read_ccr_trap(s1);
   12.60 -  mov(ccr_save, s2);
   12.61 -  // This is a test of V8 which has icc but not xcc
   12.62 -  // so mask off the xcc bits
   12.63 -  and3(s2, 0xf, s2);
   12.64 -  // Compare condition codes from the V8 and V9 ways.
   12.65 -  subcc(s2, s1, G0);
   12.66 -  br(Assembler::notEqual, true, Assembler::pt, get_psr_test);
   12.67 -  delayed()->breakpoint_trap();
   12.68 -  bind(get_psr_test);
   12.69 -}
   12.70 -
   12.71 -void MacroAssembler::write_ccr_v8_assert(Register ccr_save) {
   12.72 -  const Register s1 = G3_scratch;
   12.73 -  const Register s2 = G4_scratch;
   12.74 -  Label set_psr_test;
   12.75 -  // Write out the saved condition codes the V8 way
   12.76 -  write_ccr_trap(ccr_save, s1, s2);
   12.77 -  // Read back the condition codes using the V9 instruction
   12.78 -  rdccr(s1);
   12.79 -  mov(ccr_save, s2);
   12.80 -  // This is a test of V8 which has icc but not xcc
   12.81 -  // so mask off the xcc bits
   12.82 -  and3(s2, 0xf, s2);
   12.83 -  and3(s1, 0xf, s1);
   12.84 -  // Compare the V8 way with the V9 way.
   12.85 -  subcc(s2, s1, G0);
   12.86 -  br(Assembler::notEqual, true, Assembler::pt, set_psr_test);
   12.87 -  delayed()->breakpoint_trap();
   12.88 -  bind(set_psr_test);
   12.89 -}
   12.90 -#else
   12.91 -#define read_ccr_v8_assert(x)
   12.92 -#define write_ccr_v8_assert(x)
   12.93 -#endif // ASSERT
   12.94 -
   12.95 -void MacroAssembler::read_ccr(Register ccr_save) {
   12.96 -  if (VM_Version::v9_instructions_work()) {
   12.97 -    rdccr(ccr_save);
   12.98 -    // Test code sequence used on V8.  Do not move above rdccr.
   12.99 -    read_ccr_v8_assert(ccr_save);
  12.100 -  } else {
  12.101 -    read_ccr_trap(ccr_save);
  12.102 -  }
  12.103 -}
  12.104 -
  12.105 -void MacroAssembler::write_ccr(Register ccr_save) {
  12.106 -  if (VM_Version::v9_instructions_work()) {
  12.107 -    // Test code sequence used on V8.  Do not move below wrccr.
  12.108 -    write_ccr_v8_assert(ccr_save);
  12.109 -    wrccr(ccr_save);
  12.110 -  } else {
  12.111 -    const Register temp_reg1 = G3_scratch;
  12.112 -    const Register temp_reg2 = G4_scratch;
  12.113 -    write_ccr_trap(ccr_save, temp_reg1, temp_reg2);
  12.114 -  }
  12.115 -}
  12.116 -
  12.117 -
  12.118  // Calls to C land
  12.119  
  12.120  #ifdef ASSERT
  12.121 @@ -465,8 +375,8 @@
  12.122  #ifdef ASSERT
  12.123    AddressLiteral last_get_thread_addrlit(&last_get_thread);
  12.124    set(last_get_thread_addrlit, L3);
  12.125 -  inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call
  12.126 -  st_ptr(L4, L3, 0);
  12.127 +  rdpc(L4);
  12.128 +  inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call  st_ptr(L4, L3, 0);
  12.129  #endif
  12.130    call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
  12.131    delayed()->nop();
  12.132 @@ -1327,7 +1237,7 @@
  12.133  
  12.134  void RegistersForDebugging::save_registers(MacroAssembler* a) {
  12.135    a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
  12.136 -  a->flush_windows();
  12.137 +  a->flushw();
  12.138    int i;
  12.139    for (i = 0; i < 8; ++i) {
  12.140      a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1);  a->st_ptr( L1, O0, i_offset(i));
  12.141 @@ -1338,7 +1248,7 @@
  12.142    for (i = 0;  i < 32; ++i) {
  12.143      a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
  12.144    }
  12.145 -  for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
  12.146 +  for (i = 0; i < 64; i += 2) {
  12.147      a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
  12.148    }
  12.149  }
  12.150 @@ -1350,7 +1260,7 @@
  12.151    for (int j = 0; j < 32; ++j) {
  12.152      a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
  12.153    }
  12.154 -  for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) {
  12.155 +  for (int k = 0; k < 64; k += 2) {
  12.156      a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
  12.157    }
  12.158  }
  12.159 @@ -1465,8 +1375,6 @@
  12.160  // the high bits of the O-regs if they contain Long values.  Acts as a 'leaf'
  12.161  // call.
  12.162  void MacroAssembler::verify_oop_subroutine() {
  12.163 -  assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" );
  12.164 -
  12.165    // Leaf call; no frame.
  12.166    Label succeed, fail, null_or_fail;
  12.167  
  12.168 @@ -1870,26 +1778,17 @@
  12.169    // And the equals case for the high part does not need testing,
  12.170    // since that triplet is reached only after finding the high halves differ.
  12.171  
  12.172 -  if (VM_Version::v9_instructions_work()) {
  12.173 -    mov(-1, Rresult);
  12.174 -    ba(done);  delayed()-> movcc(greater, false, icc,  1, Rresult);
  12.175 -  } else {
  12.176 -    br(less,    true, pt, done); delayed()-> set(-1, Rresult);
  12.177 -    br(greater, true, pt, done); delayed()-> set( 1, Rresult);
  12.178 -  }
  12.179 -
  12.180 -  bind( check_low_parts );
  12.181 -
  12.182 -  if (VM_Version::v9_instructions_work()) {
  12.183 -    mov(                               -1, Rresult);
  12.184 -    movcc(equal,           false, icc,  0, Rresult);
  12.185 -    movcc(greaterUnsigned, false, icc,  1, Rresult);
  12.186 -  } else {
  12.187 -    set(-1, Rresult);
  12.188 -    br(equal,           true, pt, done); delayed()->set( 0, Rresult);
  12.189 -    br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult);
  12.190 -  }
  12.191 -  bind( done );
  12.192 +  mov(-1, Rresult);
  12.193 +  ba(done);
  12.194 +  delayed()->movcc(greater, false, icc,  1, Rresult);
  12.195 +
  12.196 +  bind(check_low_parts);
  12.197 +
  12.198 +  mov(                               -1, Rresult);
  12.199 +  movcc(equal,           false, icc,  0, Rresult);
  12.200 +  movcc(greaterUnsigned, false, icc,  1, Rresult);
  12.201 +
  12.202 +  bind(done);
  12.203  }
  12.204  
  12.205  void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
  12.206 @@ -2117,119 +2016,24 @@
  12.207  void MacroAssembler::float_cmp( bool is_float, int unordered_result,
  12.208                                  FloatRegister Fa, FloatRegister Fb,
  12.209                                  Register Rresult) {
  12.210 -
  12.211 -  fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb);
  12.212 -
  12.213 -  Condition lt = unordered_result == -1 ? f_unorderedOrLess    : f_less;
  12.214 -  Condition eq =                          f_equal;
  12.215 -  Condition gt = unordered_result ==  1 ? f_unorderedOrGreater : f_greater;
  12.216 -
  12.217 -  if (VM_Version::v9_instructions_work()) {
  12.218 -
  12.219 -    mov(-1, Rresult);
  12.220 -    movcc(eq, true, fcc0, 0, Rresult);
  12.221 -    movcc(gt, true, fcc0, 1, Rresult);
  12.222 -
  12.223 +  if (is_float) {
  12.224 +    fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb);
  12.225    } else {
  12.226 -    Label done;
  12.227 -
  12.228 -    set( -1, Rresult );
  12.229 -    //fb(lt, true, pn, done); delayed()->set( -1, Rresult );
  12.230 -    fb( eq, true, pn, done);  delayed()->set(  0, Rresult );
  12.231 -    fb( gt, true, pn, done);  delayed()->set(  1, Rresult );
  12.232 -
  12.233 -    bind (done);
  12.234 +    fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb);
  12.235 +  }
  12.236 +
  12.237 +  if (unordered_result == 1) {
  12.238 +    mov(                                    -1, Rresult);
  12.239 +    movcc(f_equal,              true, fcc0,  0, Rresult);
  12.240 +    movcc(f_unorderedOrGreater, true, fcc0,  1, Rresult);
  12.241 +  } else {
  12.242 +    mov(                                    -1, Rresult);
  12.243 +    movcc(f_equal,              true, fcc0,  0, Rresult);
  12.244 +    movcc(f_greater,            true, fcc0,  1, Rresult);
  12.245    }
  12.246  }
  12.247  
  12.248  
  12.249 -void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
  12.250 -{
  12.251 -  if (VM_Version::v9_instructions_work()) {
  12.252 -    Assembler::fneg(w, s, d);
  12.253 -  } else {
  12.254 -    if (w == FloatRegisterImpl::S) {
  12.255 -      Assembler::fneg(w, s, d);
  12.256 -    } else if (w == FloatRegisterImpl::D) {
  12.257 -      // number() does a sanity check on the alignment.
  12.258 -      assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
  12.259 -        ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
  12.260 -
  12.261 -      Assembler::fneg(FloatRegisterImpl::S, s, d);
  12.262 -      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  12.263 -    } else {
  12.264 -      assert(w == FloatRegisterImpl::Q, "Invalid float register width");
  12.265 -
  12.266 -      // number() does a sanity check on the alignment.
  12.267 -      assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
  12.268 -        ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
  12.269 -
  12.270 -      Assembler::fneg(FloatRegisterImpl::S, s, d);
  12.271 -      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  12.272 -      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
  12.273 -      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
  12.274 -    }
  12.275 -  }
  12.276 -}
  12.277 -
  12.278 -void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
  12.279 -{
  12.280 -  if (VM_Version::v9_instructions_work()) {
  12.281 -    Assembler::fmov(w, s, d);
  12.282 -  } else {
  12.283 -    if (w == FloatRegisterImpl::S) {
  12.284 -      Assembler::fmov(w, s, d);
  12.285 -    } else if (w == FloatRegisterImpl::D) {
  12.286 -      // number() does a sanity check on the alignment.
  12.287 -      assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
  12.288 -        ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
  12.289 -
  12.290 -      Assembler::fmov(FloatRegisterImpl::S, s, d);
  12.291 -      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  12.292 -    } else {
  12.293 -      assert(w == FloatRegisterImpl::Q, "Invalid float register width");
  12.294 -
  12.295 -      // number() does a sanity check on the alignment.
  12.296 -      assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
  12.297 -        ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
  12.298 -
  12.299 -      Assembler::fmov(FloatRegisterImpl::S, s, d);
  12.300 -      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  12.301 -      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
  12.302 -      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
  12.303 -    }
  12.304 -  }
  12.305 -}
  12.306 -
  12.307 -void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d)
  12.308 -{
  12.309 -  if (VM_Version::v9_instructions_work()) {
  12.310 -    Assembler::fabs(w, s, d);
  12.311 -  } else {
  12.312 -    if (w == FloatRegisterImpl::S) {
  12.313 -      Assembler::fabs(w, s, d);
  12.314 -    } else if (w == FloatRegisterImpl::D) {
  12.315 -      // number() does a sanity check on the alignment.
  12.316 -      assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) &&
  12.317 -        ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check");
  12.318 -
  12.319 -      Assembler::fabs(FloatRegisterImpl::S, s, d);
  12.320 -      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  12.321 -    } else {
  12.322 -      assert(w == FloatRegisterImpl::Q, "Invalid float register width");
  12.323 -
  12.324 -      // number() does a sanity check on the alignment.
  12.325 -      assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) &&
  12.326 -       ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check");
  12.327 -
  12.328 -      Assembler::fabs(FloatRegisterImpl::S, s, d);
  12.329 -      Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor());
  12.330 -      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor());
  12.331 -      Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor());
  12.332 -    }
  12.333 -  }
  12.334 -}
  12.335 -
  12.336  void MacroAssembler::save_all_globals_into_locals() {
  12.337    mov(G1,L1);
  12.338    mov(G2,L2);
  12.339 @@ -2250,135 +2054,6 @@
  12.340    mov(L7,G7);
  12.341  }
  12.342  
  12.343 -// Use for 64 bit operation.
  12.344 -void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
  12.345 -{
  12.346 -  // store ptr_reg as the new top value
  12.347 -#ifdef _LP64
  12.348 -  casx(top_ptr_reg, top_reg, ptr_reg);
  12.349 -#else
  12.350 -  cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm);
  12.351 -#endif // _LP64
  12.352 -}
  12.353 -
  12.354 -// [RGV] This routine does not handle 64 bit operations.
  12.355 -//       use casx_under_lock() or casx directly!!!
  12.356 -void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm)
  12.357 -{
  12.358 -  // store ptr_reg as the new top value
  12.359 -  if (VM_Version::v9_instructions_work()) {
  12.360 -    cas(top_ptr_reg, top_reg, ptr_reg);
  12.361 -  } else {
  12.362 -
  12.363 -    // If the register is not an out nor global, it is not visible
  12.364 -    // after the save.  Allocate a register for it, save its
  12.365 -    // value in the register save area (the save may not flush
  12.366 -    // registers to the save area).
  12.367 -
  12.368 -    Register top_ptr_reg_after_save;
  12.369 -    Register top_reg_after_save;
  12.370 -    Register ptr_reg_after_save;
  12.371 -
  12.372 -    if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) {
  12.373 -      top_ptr_reg_after_save = top_ptr_reg->after_save();
  12.374 -    } else {
  12.375 -      Address reg_save_addr = top_ptr_reg->address_in_saved_window();
  12.376 -      top_ptr_reg_after_save = L0;
  12.377 -      st(top_ptr_reg, reg_save_addr);
  12.378 -    }
  12.379 -
  12.380 -    if (top_reg->is_out() || top_reg->is_global()) {
  12.381 -      top_reg_after_save = top_reg->after_save();
  12.382 -    } else {
  12.383 -      Address reg_save_addr = top_reg->address_in_saved_window();
  12.384 -      top_reg_after_save = L1;
  12.385 -      st(top_reg, reg_save_addr);
  12.386 -    }
  12.387 -
  12.388 -    if (ptr_reg->is_out() || ptr_reg->is_global()) {
  12.389 -      ptr_reg_after_save = ptr_reg->after_save();
  12.390 -    } else {
  12.391 -      Address reg_save_addr = ptr_reg->address_in_saved_window();
  12.392 -      ptr_reg_after_save = L2;
  12.393 -      st(ptr_reg, reg_save_addr);
  12.394 -    }
  12.395 -
  12.396 -    const Register& lock_reg = L3;
  12.397 -    const Register& lock_ptr_reg = L4;
  12.398 -    const Register& value_reg = L5;
  12.399 -    const Register& yield_reg = L6;
  12.400 -    const Register& yieldall_reg = L7;
  12.401 -
  12.402 -    save_frame();
  12.403 -
  12.404 -    if (top_ptr_reg_after_save == L0) {
  12.405 -      ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save);
  12.406 -    }
  12.407 -
  12.408 -    if (top_reg_after_save == L1) {
  12.409 -      ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save);
  12.410 -    }
  12.411 -
  12.412 -    if (ptr_reg_after_save == L2) {
  12.413 -      ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save);
  12.414 -    }
  12.415 -
  12.416 -    Label(retry_get_lock);
  12.417 -    Label(not_same);
  12.418 -    Label(dont_yield);
  12.419 -
  12.420 -    assert(lock_addr, "lock_address should be non null for v8");
  12.421 -    set((intptr_t)lock_addr, lock_ptr_reg);
  12.422 -    // Initialize yield counter
  12.423 -    mov(G0,yield_reg);
  12.424 -    mov(G0, yieldall_reg);
  12.425 -    set(StubRoutines::Sparc::locked, lock_reg);
  12.426 -
  12.427 -    bind(retry_get_lock);
  12.428 -    cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield);
  12.429 -
  12.430 -    if(use_call_vm) {
  12.431 -      Untested("Need to verify global reg consistancy");
  12.432 -      call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg);
  12.433 -    } else {
  12.434 -      // Save the regs and make space for a C call
  12.435 -      save(SP, -96, SP);
  12.436 -      save_all_globals_into_locals();
  12.437 -      call(CAST_FROM_FN_PTR(address,os::yield_all));
  12.438 -      delayed()->mov(yieldall_reg, O0);
  12.439 -      restore_globals_from_locals();
  12.440 -      restore();
  12.441 -    }
  12.442 -
  12.443 -    // reset the counter
  12.444 -    mov(G0,yield_reg);
  12.445 -    add(yieldall_reg, 1, yieldall_reg);
  12.446 -
  12.447 -    bind(dont_yield);
  12.448 -    // try to get lock
  12.449 -    Assembler::swap(lock_ptr_reg, 0, lock_reg);
  12.450 -
  12.451 -    // did we get the lock?
  12.452 -    cmp(lock_reg, StubRoutines::Sparc::unlocked);
  12.453 -    br(Assembler::notEqual, true, Assembler::pn, retry_get_lock);
  12.454 -    delayed()->add(yield_reg,1,yield_reg);
  12.455 -
  12.456 -    // yes, got lock.  do we have the same top?
  12.457 -    ld(top_ptr_reg_after_save, 0, value_reg);
  12.458 -    cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same);
  12.459 -
  12.460 -    // yes, same top.
  12.461 -    st(ptr_reg_after_save, top_ptr_reg_after_save, 0);
  12.462 -    membar(Assembler::StoreStore);
  12.463 -
  12.464 -    bind(not_same);
  12.465 -    mov(value_reg, ptr_reg_after_save);
  12.466 -    st(lock_reg, lock_ptr_reg, 0); // unlock
  12.467 -
  12.468 -    restore();
  12.469 -  }
  12.470 -}
  12.471 -
  12.472  RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
  12.473                                                        Register tmp,
  12.474                                                        int offset) {
  12.475 @@ -2970,7 +2645,7 @@
  12.476                    markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
  12.477                    mark_reg);
  12.478    or3(G2_thread, mark_reg, temp_reg);
  12.479 -  casn(mark_addr.base(), mark_reg, temp_reg);
  12.480 +  cas_ptr(mark_addr.base(), mark_reg, temp_reg);
  12.481    // If the biasing toward our thread failed, this means that
  12.482    // another thread succeeded in biasing it toward itself and we
  12.483    // need to revoke that bias. The revocation will occur in the
  12.484 @@ -2998,7 +2673,7 @@
  12.485    load_klass(obj_reg, temp_reg);
  12.486    ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
  12.487    or3(G2_thread, temp_reg, temp_reg);
  12.488 -  casn(mark_addr.base(), mark_reg, temp_reg);
  12.489 +  cas_ptr(mark_addr.base(), mark_reg, temp_reg);
  12.490    // If the biasing toward our thread failed, this means that
  12.491    // another thread succeeded in biasing it toward itself and we
  12.492    // need to revoke that bias. The revocation will occur in the
  12.493 @@ -3027,7 +2702,7 @@
  12.494    // bits in this situation. Should attempt to preserve them.
  12.495    load_klass(obj_reg, temp_reg);
  12.496    ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
  12.497 -  casn(mark_addr.base(), mark_reg, temp_reg);
  12.498 +  cas_ptr(mark_addr.base(), mark_reg, temp_reg);
  12.499    // Fall through to the normal CAS-based lock, because no matter what
  12.500    // the result of the above CAS, some thread must have succeeded in
  12.501    // removing the bias bit from the object's header.
  12.502 @@ -3058,15 +2733,6 @@
  12.503  }
  12.504  
  12.505  
  12.506 -// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by
  12.507 -// Solaris/SPARC's "as".  Another apt name would be cas_ptr()
  12.508 -
  12.509 -void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) {
  12.510 -  casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
  12.511 -}
  12.512 -
  12.513 -
  12.514 -
  12.515  // compiler_lock_object() and compiler_unlock_object() are direct transliterations
  12.516  // of i486.ad fast_lock() and fast_unlock().  See those methods for detailed comments.
  12.517  // The code could be tightened up considerably.
  12.518 @@ -3129,8 +2795,7 @@
  12.519  
  12.520       // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
  12.521       assert(mark_addr.disp() == 0, "cas must take a zero displacement");
  12.522 -     casx_under_lock(mark_addr.base(), Rmark, Rscratch,
  12.523 -        (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
  12.524 +     cas_ptr(mark_addr.base(), Rmark, Rscratch);
  12.525  
  12.526       // if compare/exchange succeeded we found an unlocked object and we now have locked it
  12.527       // hence we are done
  12.528 @@ -3176,7 +2841,7 @@
  12.529        mov(Rbox,  Rscratch);
  12.530        or3(Rmark, markOopDesc::unlocked_value, Rmark);
  12.531        assert(mark_addr.disp() == 0, "cas must take a zero displacement");
  12.532 -      casn(mark_addr.base(), Rmark, Rscratch);
  12.533 +      cas_ptr(mark_addr.base(), Rmark, Rscratch);
  12.534        cmp(Rmark, Rscratch);
  12.535        brx(Assembler::equal, false, Assembler::pt, done);
  12.536        delayed()->sub(Rscratch, SP, Rscratch);
  12.537 @@ -3207,7 +2872,7 @@
  12.538        // Invariant: if we acquire the lock then _recursions should be 0.
  12.539        add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
  12.540        mov(G2_thread, Rscratch);
  12.541 -      casn(Rmark, G0, Rscratch);
  12.542 +      cas_ptr(Rmark, G0, Rscratch);
  12.543        cmp(Rscratch, G0);
  12.544        // Intentional fall-through into done
  12.545     } else {
  12.546 @@ -3240,7 +2905,7 @@
  12.547        mov(0, Rscratch);
  12.548        or3(Rmark, markOopDesc::unlocked_value, Rmark);
  12.549        assert(mark_addr.disp() == 0, "cas must take a zero displacement");
  12.550 -      casn(mark_addr.base(), Rmark, Rscratch);
  12.551 +      cas_ptr(mark_addr.base(), Rmark, Rscratch);
  12.552  // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
  12.553        cmp(Rscratch, Rmark);
  12.554        brx(Assembler::notZero, false, Assembler::pn, Recursive);
  12.555 @@ -3266,7 +2931,7 @@
  12.556        // the fast-path stack-lock code from the interpreter and always passed
  12.557        // control to the "slow" operators in synchronizer.cpp.
  12.558  
  12.559 -      // RScratch contains the fetched obj->mark value from the failed CASN.
  12.560 +      // RScratch contains the fetched obj->mark value from the failed CAS.
  12.561  #ifdef _LP64
  12.562        sub(Rscratch, STACK_BIAS, Rscratch);
  12.563  #endif
  12.564 @@ -3300,7 +2965,7 @@
  12.565        // Invariant: if we acquire the lock then _recursions should be 0.
  12.566        add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
  12.567        mov(G2_thread, Rscratch);
  12.568 -      casn(Rmark, G0, Rscratch);
  12.569 +      cas_ptr(Rmark, G0, Rscratch);
  12.570        cmp(Rscratch, G0);
  12.571        // ST box->displaced_header = NonZero.
  12.572        // Any non-zero value suffices:
  12.573 @@ -3336,8 +3001,7 @@
  12.574       // Check if it is still a light weight lock, this is is true if we see
  12.575       // the stack address of the basicLock in the markOop of the object
  12.576       assert(mark_addr.disp() == 0, "cas must take a zero displacement");
  12.577 -     casx_under_lock(mark_addr.base(), Rbox, Rmark,
  12.578 -       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
  12.579 +     cas_ptr(mark_addr.base(), Rbox, Rmark);
  12.580       ba(done);
  12.581       delayed()->cmp(Rbox, Rmark);
  12.582       bind(done);
  12.583 @@ -3398,7 +3062,7 @@
  12.584        delayed()->andcc(G0, G0, G0);
  12.585        add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
  12.586        mov(G2_thread, Rscratch);
  12.587 -      casn(Rmark, G0, Rscratch);
  12.588 +      cas_ptr(Rmark, G0, Rscratch);
  12.589        // invert icc.zf and goto done
  12.590        br_notnull(Rscratch, false, Assembler::pt, done);
  12.591        delayed()->cmp(G0, G0);
  12.592 @@ -3440,7 +3104,7 @@
  12.593     // A prototype implementation showed excellent results, although
  12.594     // the scavenger and timeout code was rather involved.
  12.595  
  12.596 -   casn(mark_addr.base(), Rbox, Rscratch);
  12.597 +   cas_ptr(mark_addr.base(), Rbox, Rscratch);
  12.598     cmp(Rbox, Rscratch);
  12.599     // Intentional fall through into done ...
  12.600  
  12.601 @@ -3540,7 +3204,8 @@
  12.602  
  12.603    if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
  12.604      // No allocation in the shared eden.
  12.605 -    ba_short(slow_case);
  12.606 +    ba(slow_case);
  12.607 +    delayed()->nop();
  12.608    } else {
  12.609      // get eden boundaries
  12.610      // note: we need both top & top_addr!
  12.611 @@ -3583,7 +3248,7 @@
  12.612      // Compare obj with the value at top_addr; if still equal, swap the value of
  12.613      // end with the value at top_addr. If not equal, read the value at top_addr
  12.614      // into end.
  12.615 -    casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
  12.616 +    cas_ptr(top_addr, obj, end);
  12.617      // if someone beat us on the allocation, try again, otherwise continue
  12.618      cmp(obj, end);
  12.619      brx(Assembler::notEqual, false, Assembler::pn, retry);
    13.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Mon Jun 24 14:27:24 2013 -0700
    13.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Tue Jun 25 12:46:21 2013 -0700
    13.3 @@ -1,5 +1,5 @@
    13.4  /*
    13.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    13.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    13.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    13.8   *
    13.9   * This code is free software; you can redistribute it and/or modify it
   13.10 @@ -963,7 +963,7 @@
   13.11    inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
   13.12  
   13.13    using Assembler::swap;
   13.14 -  inline void swap(Address& a, Register d, int offset = 0);
   13.15 +  inline void swap(const Address& a, Register d, int offset = 0);
   13.16  
   13.17    // address pseudos: make these names unlike instruction names to avoid confusion
   13.18    inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
   13.19 @@ -1056,13 +1056,6 @@
   13.20  
   13.21    void breakpoint_trap();
   13.22    void breakpoint_trap(Condition c, CC cc);
   13.23 -  void flush_windows_trap();
   13.24 -  void clean_windows_trap();
   13.25 -  void get_psr_trap();
   13.26 -  void set_psr_trap();
   13.27 -
   13.28 -  // V8/V9 flush_windows
   13.29 -  void flush_windows();
   13.30  
   13.31    // Support for serializing memory accesses between threads
   13.32    void serialize_memory(Register thread, Register tmp1, Register tmp2);
   13.33 @@ -1071,14 +1064,6 @@
   13.34    void enter();
   13.35    void leave();
   13.36  
   13.37 -  // V8/V9 integer multiply
   13.38 -  void mult(Register s1, Register s2, Register d);
   13.39 -  void mult(Register s1, int simm13a, Register d);
   13.40 -
   13.41 -  // V8/V9 read and write of condition codes.
   13.42 -  void read_ccr(Register d);
   13.43 -  void write_ccr(Register s);
   13.44 -
   13.45    // Manipulation of C++ bools
   13.46    // These are idioms to flag the need for care with accessing bools but on
   13.47    // this platform we assume byte size
   13.48 @@ -1162,21 +1147,6 @@
   13.49    // check_and_forward_exception to handle exceptions when it is safe
   13.50    void check_and_forward_exception(Register scratch_reg);
   13.51  
   13.52 - private:
   13.53 -  // For V8
   13.54 -  void read_ccr_trap(Register ccr_save);
   13.55 -  void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2);
   13.56 -
   13.57 -#ifdef ASSERT
   13.58 -  // For V8 debugging.  Uses V8 instruction sequence and checks
   13.59 -  // result with V9 insturctions rdccr and wrccr.
   13.60 -  // Uses Gscatch and Gscatch2
   13.61 -  void read_ccr_v8_assert(Register ccr_save);
   13.62 -  void write_ccr_v8_assert(Register ccr_save);
   13.63 -#endif // ASSERT
   13.64 -
   13.65 - public:
   13.66 -
   13.67    // Write to card table for - register is destroyed afterwards.
   13.68    void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
   13.69  
   13.70 @@ -1314,20 +1284,9 @@
   13.71                    FloatRegister Fa, FloatRegister Fb,
   13.72                    Register Rresult);
   13.73  
   13.74 -  void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
   13.75 -  void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); }
   13.76 -  void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
   13.77 -  void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d);
   13.78 -
   13.79    void save_all_globals_into_locals();
   13.80    void restore_globals_from_locals();
   13.81  
   13.82 -  void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
   13.83 -    address lock_addr=0, bool use_call_vm=false);
   13.84 -  void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg,
   13.85 -    address lock_addr=0, bool use_call_vm=false);
   13.86 -  void casn (Register addr_reg, Register cmp_reg, Register set_reg) ;
   13.87 -
   13.88    // These set the icc condition code to equal if the lock succeeded
   13.89    // and notEqual if it failed and requires a slow case
   13.90    void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
    14.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Mon Jun 24 14:27:24 2013 -0700
    14.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp	Tue Jun 25 12:46:21 2013 -0700
    14.3 @@ -1,5 +1,5 @@
    14.4  /*
    14.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    14.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    14.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    14.8   *
    14.9   * This code is free software; you can redistribute it and/or modify it
   14.10 @@ -229,10 +229,7 @@
   14.11  // Use the right branch for the platform
   14.12  
   14.13  inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
   14.14 -  if (VM_Version::v9_instructions_work())
   14.15 -    Assembler::bp(c, a, icc, p, d, rt);
   14.16 -  else
   14.17 -    Assembler::br(c, a, d, rt);
   14.18 +  Assembler::bp(c, a, icc, p, d, rt);
   14.19  }
   14.20  
   14.21  inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) {
   14.22 @@ -268,10 +265,7 @@
   14.23  }
   14.24  
   14.25  inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) {
   14.26 -  if (VM_Version::v9_instructions_work())
   14.27 -    fbp(c, a, fcc0, p, d, rt);
   14.28 -  else
   14.29 -    Assembler::fb(c, a, d, rt);
   14.30 +  fbp(c, a, fcc0, p, d, rt);
   14.31  }
   14.32  
   14.33  inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) {
   14.34 @@ -334,7 +328,7 @@
   14.35  
   14.36  // prefetch instruction
   14.37  inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) {
   14.38 -  if (VM_Version::v9_instructions_work())
   14.39 +  Assembler::bp( never, true, xcc, pt, d, rt );
   14.40      Assembler::bp( never, true, xcc, pt, d, rt );
   14.41  }
   14.42  inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); }
   14.43 @@ -344,15 +338,7 @@
   14.44  // returns delta from gotten pc to addr after
   14.45  inline int MacroAssembler::get_pc( Register d ) {
   14.46    int x = offset();
   14.47 -  if (VM_Version::v9_instructions_work())
   14.48 -    rdpc(d);
   14.49 -  else {
   14.50 -    Label lbl;
   14.51 -    Assembler::call(lbl, relocInfo::none);  // No relocation as this is call to pc+0x8
   14.52 -    if (d == O7)  delayed()->nop();
   14.53 -    else          delayed()->mov(O7, d);
   14.54 -    bind(lbl);
   14.55 -  }
   14.56 +  rdpc(d);
   14.57    return offset() - x;
   14.58  }
   14.59  
   14.60 @@ -646,41 +632,26 @@
   14.61  // returns if membar generates anything, obviously this code should mirror
   14.62  // membar below.
   14.63  inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) {
   14.64 -  if( !os::is_MP() ) return false;  // Not needed on single CPU
   14.65 -  if( VM_Version::v9_instructions_work() ) {
   14.66 -    const Membar_mask_bits effective_mask =
   14.67 -        Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
   14.68 -    return (effective_mask != 0);
   14.69 -  } else {
   14.70 -    return true;
   14.71 -  }
   14.72 +  if (!os::is_MP())
   14.73 +    return false;  // Not needed on single CPU
   14.74 +  const Membar_mask_bits effective_mask =
   14.75 +      Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
   14.76 +  return (effective_mask != 0);
   14.77  }
   14.78  
   14.79  inline void MacroAssembler::membar( Membar_mask_bits const7a ) {
   14.80    // Uniprocessors do not need memory barriers
   14.81 -  if (!os::is_MP()) return;
   14.82 +  if (!os::is_MP())
   14.83 +    return;
   14.84    // Weakened for current Sparcs and TSO.  See the v9 manual, sections 8.4.3,
   14.85    // 8.4.4.3, a.31 and a.50.
   14.86 -  if( VM_Version::v9_instructions_work() ) {
   14.87 -    // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
   14.88 -    // of the mmask subfield of const7a that does anything that isn't done
   14.89 -    // implicitly is StoreLoad.
   14.90 -    const Membar_mask_bits effective_mask =
   14.91 -        Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
   14.92 -    if ( effective_mask != 0 ) {
   14.93 -      Assembler::membar( effective_mask );
   14.94 -    }
   14.95 -  } else {
   14.96 -    // stbar is the closest there is on v8.  Equivalent to membar(StoreStore).  We
   14.97 -    // do not issue the stbar because to my knowledge all v8 machines implement TSO,
   14.98 -    // which guarantees that all stores behave as if an stbar were issued just after
   14.99 -    // each one of them.  On these machines, stbar ought to be a nop.  There doesn't
  14.100 -    // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it,
  14.101 -    // it can't be specified by stbar, nor have I come up with a way to simulate it.
  14.102 -    //
  14.103 -    // Addendum.  Dave says that ldstub guarantees a write buffer flush to coherent
  14.104 -    // space.  Put one here to be on the safe side.
  14.105 -    Assembler::ldstub(SP, 0, G0);
  14.106 +  // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value
  14.107 +  // of the mmask subfield of const7a that does anything that isn't done
  14.108 +  // implicitly is StoreLoad.
  14.109 +  const Membar_mask_bits effective_mask =
  14.110 +      Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore));
  14.111 +  if (effective_mask != 0) {
  14.112 +    Assembler::membar(effective_mask);
  14.113    }
  14.114  }
  14.115  
  14.116 @@ -748,7 +719,7 @@
  14.117    if (offset != 0)       sub(d,  offset,                    d);
  14.118  }
  14.119  
  14.120 -inline void MacroAssembler::swap(Address& a, Register d, int offset) {
  14.121 +inline void MacroAssembler::swap(const Address& a, Register d, int offset) {
  14.122    relocate(a.rspec(offset));
  14.123    if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d        ); }
  14.124    else               {                          swap(a.base(), a.disp() + offset, d); }
    15.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    15.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
    15.3 @@ -162,7 +162,7 @@
    15.4     int i1 = ((int*)code_buffer)[1];
    15.5     int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
    15.6     assert(inv_op(*contention_addr) == Assembler::arith_op ||
    15.7 -          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
    15.8 +          *contention_addr == nop_instruction(),
    15.9            "must not interfere with original call");
   15.10     // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
   15.11     n_call->set_long_at(1*BytesPerInstWord, i1);
   15.12 @@ -181,7 +181,7 @@
   15.13     // Make sure the first-patched instruction, which may co-exist
   15.14     // briefly with the call, will do something harmless.
   15.15     assert(inv_op(*contention_addr) == Assembler::arith_op ||
   15.16 -          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
   15.17 +          *contention_addr == nop_instruction(),
   15.18            "must not interfere with original call");
   15.19  }
   15.20  
   15.21 @@ -933,11 +933,7 @@
   15.22    int code_size = 1 * BytesPerInstWord;
   15.23    CodeBuffer cb(verified_entry, code_size + 1);
   15.24    MacroAssembler* a = new MacroAssembler(&cb);
   15.25 -  if (VM_Version::v9_instructions_work()) {
   15.26 -    a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
   15.27 -  } else {
   15.28 -    a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler
   15.29 -  }
   15.30 +  a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler
   15.31    ICache::invalidate_range(verified_entry, code_size);
   15.32  }
   15.33  
   15.34 @@ -1024,7 +1020,7 @@
   15.35     int i1 = ((int*)code_buffer)[1];
   15.36     int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord);
   15.37     assert(inv_op(*contention_addr) == Assembler::arith_op ||
   15.38 -          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
   15.39 +          *contention_addr == nop_instruction(),
   15.40            "must not interfere with original call");
   15.41     // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
   15.42     h_jump->set_long_at(1*BytesPerInstWord, i1);
   15.43 @@ -1043,6 +1039,6 @@
   15.44     // Make sure the first-patched instruction, which may co-exist
   15.45     // briefly with the call, will do something harmless.
   15.46     assert(inv_op(*contention_addr) == Assembler::arith_op ||
   15.47 -          *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(),
   15.48 +          *contention_addr == nop_instruction(),
   15.49            "must not interfere with original call");
   15.50  }
    16.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.hpp	Mon Jun 24 14:27:24 2013 -0700
    16.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.hpp	Tue Jun 25 12:46:21 2013 -0700
    16.3 @@ -70,8 +70,7 @@
    16.4    bool is_zombie() {
    16.5      int x = long_at(0);
    16.6      return is_op3(x,
    16.7 -                  VM_Version::v9_instructions_work() ?
    16.8 -                    Assembler::ldsw_op3 : Assembler::lduw_op3,
    16.9 +                  Assembler::ldsw_op3,
   16.10                    Assembler::ldst_op)
   16.11          && Assembler::inv_rs1(x) == G0
   16.12          && Assembler::inv_rd(x) == O7;
    17.1 --- a/src/cpu/sparc/vm/register_sparc.hpp	Mon Jun 24 14:27:24 2013 -0700
    17.2 +++ b/src/cpu/sparc/vm/register_sparc.hpp	Tue Jun 25 12:46:21 2013 -0700
    17.3 @@ -249,12 +249,10 @@
    17.4  
    17.5        case D:
    17.6          assert(c < 64  &&  (c & 1) == 0, "bad double float register");
    17.7 -        assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
    17.8          return (c & 0x1e) | ((c & 0x20) >> 5);
    17.9  
   17.10        case Q:
   17.11          assert(c < 64  &&  (c & 3) == 0, "bad quad float register");
   17.12 -        assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform");
   17.13          return (c & 0x1c) | ((c & 0x20) >> 5);
   17.14      }
   17.15      ShouldNotReachHere();
    18.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    18.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
    18.3 @@ -2459,7 +2459,7 @@
    18.4  
    18.5    // Finally just about ready to make the JNI call
    18.6  
    18.7 -  __ flush_windows();
    18.8 +  __ flushw();
    18.9    if (inner_frame_created) {
   18.10      __ restore();
   18.11    } else {
    19.1 --- a/src/cpu/sparc/vm/sparc.ad	Mon Jun 24 14:27:24 2013 -0700
    19.2 +++ b/src/cpu/sparc/vm/sparc.ad	Tue Jun 25 12:46:21 2013 -0700
    19.3 @@ -2778,10 +2778,7 @@
    19.4      Register Rold = reg_to_register_object($old$$reg);
    19.5      Register Rnew = reg_to_register_object($new$$reg);
    19.6  
    19.7 -    // casx_under_lock picks 1 of 3 encodings:
    19.8 -    // For 32-bit pointers you get a 32-bit CAS
    19.9 -    // For 64-bit pointers you get a 64-bit CASX
   19.10 -    __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
   19.11 +    __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold
   19.12      __ cmp( Rold, Rnew );
   19.13    %}
   19.14  
   19.15 @@ -3067,7 +3064,7 @@
   19.16      AddressLiteral last_rethrow_addrlit(&last_rethrow);
   19.17      __ sethi(last_rethrow_addrlit, L1);
   19.18      Address addr(L1, last_rethrow_addrlit.low10());
   19.19 -    __ get_pc(L2);
   19.20 +    __ rdpc(L2);
   19.21      __ inc(L2, 3 * BytesPerInstWord);  // skip this & 2 more insns to point at jump_to
   19.22      __ st_ptr(L2, addr);
   19.23      __ restore();
    20.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    20.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
    20.3 @@ -566,7 +566,7 @@
    20.4      StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows");
    20.5      address start = __ pc();
    20.6  
    20.7 -    __ flush_windows();
    20.8 +    __ flushw();
    20.9      __ retl(false);
   20.10      __ delayed()->add( FP, STACK_BIAS, O0 );
   20.11      // The returned value must be a stack pointer whose register save area
   20.12 @@ -575,67 +575,9 @@
   20.13      return start;
   20.14    }
   20.15  
   20.16 -  // Helper functions for v8 atomic operations.
   20.17 -  //
   20.18 -  void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) {
   20.19 -    if (mark_oop_reg == noreg) {
   20.20 -      address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr();
   20.21 -      __ set((intptr_t)lock_ptr, lock_ptr_reg);
   20.22 -    } else {
   20.23 -      assert(scratch_reg != noreg, "just checking");
   20.24 -      address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache;
   20.25 -      __ set((intptr_t)lock_ptr, lock_ptr_reg);
   20.26 -      __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg);
   20.27 -      __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg);
   20.28 -    }
   20.29 -  }
   20.30 -
   20.31 -  void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
   20.32 -
   20.33 -    get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg);
   20.34 -    __ set(StubRoutines::Sparc::locked, lock_reg);
   20.35 -    // Initialize yield counter
   20.36 -    __ mov(G0,yield_reg);
   20.37 -
   20.38 -    __ BIND(retry);
   20.39 -    __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield);
   20.40 -
   20.41 -    // This code can only be called from inside the VM, this
   20.42 -    // stub is only invoked from Atomic::add().  We do not
   20.43 -    // want to use call_VM, because _last_java_sp and such
   20.44 -    // must already be set.
   20.45 -    //
   20.46 -    // Save the regs and make space for a C call
   20.47 -    __ save(SP, -96, SP);
   20.48 -    __ save_all_globals_into_locals();
   20.49 -    BLOCK_COMMENT("call os::naked_sleep");
   20.50 -    __ call(CAST_FROM_FN_PTR(address, os::naked_sleep));
   20.51 -    __ delayed()->nop();
   20.52 -    __ restore_globals_from_locals();
   20.53 -    __ restore();
   20.54 -    // reset the counter
   20.55 -    __ mov(G0,yield_reg);
   20.56 -
   20.57 -    __ BIND(dontyield);
   20.58 -
   20.59 -    // try to get lock
   20.60 -    __ swap(lock_ptr_reg, 0, lock_reg);
   20.61 -
   20.62 -    // did we get the lock?
   20.63 -    __ cmp(lock_reg, StubRoutines::Sparc::unlocked);
   20.64 -    __ br(Assembler::notEqual, true, Assembler::pn, retry);
   20.65 -    __ delayed()->add(yield_reg,1,yield_reg);
   20.66 -
   20.67 -    // yes, got lock. do the operation here.
   20.68 -  }
   20.69 -
   20.70 -  void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) {
   20.71 -    __ st(lock_reg, lock_ptr_reg, 0); // unlock
   20.72 -  }
   20.73 -
   20.74    // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest).
   20.75    //
   20.76 -  // Arguments :
   20.77 +  // Arguments:
   20.78    //
   20.79    //      exchange_value: O0
   20.80    //      dest:           O1
   20.81 @@ -656,33 +598,14 @@
   20.82        __ mov(O0, O3);       // scratch copy of exchange value
   20.83        __ ld(O1, 0, O2);     // observe the previous value
   20.84        // try to replace O2 with O3
   20.85 -      __ cas_under_lock(O1, O2, O3,
   20.86 -      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
   20.87 +      __ cas(O1, O2, O3);
   20.88        __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
   20.89  
   20.90        __ retl(false);
   20.91        __ delayed()->mov(O2, O0);  // report previous value to caller
   20.92 -
   20.93      } else {
   20.94 -      if (VM_Version::v9_instructions_work()) {
   20.95 -        __ retl(false);
   20.96 -        __ delayed()->swap(O1, 0, O0);
   20.97 -      } else {
   20.98 -        const Register& lock_reg = O2;
   20.99 -        const Register& lock_ptr_reg = O3;
  20.100 -        const Register& yield_reg = O4;
  20.101 -
  20.102 -        Label retry;
  20.103 -        Label dontyield;
  20.104 -
  20.105 -        generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
  20.106 -        // got the lock, do the swap
  20.107 -        __ swap(O1, 0, O0);
  20.108 -
  20.109 -        generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
  20.110 -        __ retl(false);
  20.111 -        __ delayed()->nop();
  20.112 -      }
  20.113 +      __ retl(false);
  20.114 +      __ delayed()->swap(O1, 0, O0);
  20.115      }
  20.116  
  20.117      return start;
  20.118 @@ -691,7 +614,7 @@
  20.119  
  20.120    // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value)
  20.121    //
  20.122 -  // Arguments :
  20.123 +  // Arguments:
  20.124    //
  20.125    //      exchange_value: O0
  20.126    //      dest:           O1
  20.127 @@ -701,15 +624,12 @@
  20.128    //
  20.129    //     O0: the value previously stored in dest
  20.130    //
  20.131 -  // Overwrites (v8): O3,O4,O5
  20.132 -  //
  20.133    address generate_atomic_cmpxchg() {
  20.134      StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg");
  20.135      address start = __ pc();
  20.136  
  20.137      // cmpxchg(dest, compare_value, exchange_value)
  20.138 -    __ cas_under_lock(O1, O2, O0,
  20.139 -      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false);
  20.140 +    __ cas(O1, O2, O0);
  20.141      __ retl(false);
  20.142      __ delayed()->nop();
  20.143  
  20.144 @@ -718,7 +638,7 @@
  20.145  
  20.146    // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value)
  20.147    //
  20.148 -  // Arguments :
  20.149 +  // Arguments:
  20.150    //
  20.151    //      exchange_value: O1:O0
  20.152    //      dest:           O2
  20.153 @@ -728,17 +648,12 @@
  20.154    //
  20.155    //     O1:O0: the value previously stored in dest
  20.156    //
  20.157 -  // This only works on V9, on V8 we don't generate any
  20.158 -  // code and just return NULL.
  20.159 -  //
  20.160    // Overwrites: G1,G2,G3
  20.161    //
  20.162    address generate_atomic_cmpxchg_long() {
  20.163      StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long");
  20.164      address start = __ pc();
  20.165  
  20.166 -    if (!VM_Version::supports_cx8())
  20.167 -        return NULL;;
  20.168      __ sllx(O0, 32, O0);
  20.169      __ srl(O1, 0, O1);
  20.170      __ or3(O0,O1,O0);      // O0 holds 64-bit value from compare_value
  20.171 @@ -756,7 +671,7 @@
  20.172  
  20.173    // Support for jint Atomic::add(jint add_value, volatile jint* dest).
  20.174    //
  20.175 -  // Arguments :
  20.176 +  // Arguments:
  20.177    //
  20.178    //      add_value: O0   (e.g., +1 or -1)
  20.179    //      dest:      O1
  20.180 @@ -765,47 +680,22 @@
  20.181    //
  20.182    //     O0: the new value stored in dest
  20.183    //
  20.184 -  // Overwrites (v9): O3
  20.185 -  // Overwrites (v8): O3,O4,O5
  20.186 +  // Overwrites: O3
  20.187    //
  20.188    address generate_atomic_add() {
  20.189      StubCodeMark mark(this, "StubRoutines", "atomic_add");
  20.190      address start = __ pc();
  20.191      __ BIND(_atomic_add_stub);
  20.192  
  20.193 -    if (VM_Version::v9_instructions_work()) {
  20.194 -      Label(retry);
  20.195 -      __ BIND(retry);
  20.196 -
  20.197 -      __ lduw(O1, 0, O2);
  20.198 -      __ add(O0, O2, O3);
  20.199 -      __ cas(O1, O2, O3);
  20.200 -      __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
  20.201 -      __ retl(false);
  20.202 -      __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
  20.203 -    } else {
  20.204 -      const Register& lock_reg = O2;
  20.205 -      const Register& lock_ptr_reg = O3;
  20.206 -      const Register& value_reg = O4;
  20.207 -      const Register& yield_reg = O5;
  20.208 -
  20.209 -      Label(retry);
  20.210 -      Label(dontyield);
  20.211 -
  20.212 -      generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
  20.213 -      // got lock, do the increment
  20.214 -      __ ld(O1, 0, value_reg);
  20.215 -      __ add(O0, value_reg, value_reg);
  20.216 -      __ st(value_reg, O1, 0);
  20.217 -
  20.218 -      // %%% only for RMO and PSO
  20.219 -      __ membar(Assembler::StoreStore);
  20.220 -
  20.221 -      generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield);
  20.222 -
  20.223 -      __ retl(false);
  20.224 -      __ delayed()->mov(value_reg, O0);
  20.225 -    }
  20.226 +    Label(retry);
  20.227 +    __ BIND(retry);
  20.228 +
  20.229 +    __ lduw(O1, 0, O2);
  20.230 +    __ add(O0, O2, O3);
  20.231 +    __ cas(O1, O2, O3);
  20.232 +    __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry);
  20.233 +    __ retl(false);
  20.234 +    __ delayed()->add(O0, O2, O0); // note that cas made O2==O3
  20.235  
  20.236      return start;
  20.237    }
  20.238 @@ -841,7 +731,7 @@
  20.239      __ mov(G3, L3);
  20.240      __ mov(G4, L4);
  20.241      __ mov(G5, L5);
  20.242 -    for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
  20.243 +    for (i = 0; i < 64; i += 2) {
  20.244        __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize);
  20.245      }
  20.246  
  20.247 @@ -855,7 +745,7 @@
  20.248      __ mov(L3, G3);
  20.249      __ mov(L4, G4);
  20.250      __ mov(L5, G5);
  20.251 -    for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) {
  20.252 +    for (i = 0; i < 64; i += 2) {
  20.253        __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize);
  20.254      }
  20.255  
    21.1 --- a/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    21.2 +++ b/src/cpu/sparc/vm/stubRoutines_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
    21.3 @@ -52,7 +52,3 @@
    21.4  address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows);
    21.5  
    21.6  address StubRoutines::Sparc::_partial_subtype_check = NULL;
    21.7 -
    21.8 -int StubRoutines::Sparc::_atomic_memory_operation_lock = StubRoutines::Sparc::unlocked;
    21.9 -
   21.10 -int StubRoutines::Sparc::_v8_oop_lock_cache[StubRoutines::Sparc::nof_v8_oop_lock_cache_entries];
    22.1 --- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Mon Jun 24 14:27:24 2013 -0700
    22.2 +++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp	Tue Jun 25 12:46:21 2013 -0700
    22.3 @@ -47,46 +47,14 @@
    22.4  class Sparc {
    22.5   friend class StubGenerator;
    22.6  
    22.7 - public:
    22.8 -  enum { nof_instance_allocators = 10 };
    22.9 -
   22.10 -  // allocator lock values
   22.11 -  enum {
   22.12 -    unlocked = 0,
   22.13 -    locked   = 1
   22.14 -  };
   22.15 -
   22.16 -  enum {
   22.17 -    v8_oop_lock_ignore_bits = 2,
   22.18 -    v8_oop_lock_bits = 4,
   22.19 -    nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits),
   22.20 -    v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits),
   22.21 -    v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits
   22.22 -  };
   22.23 -
   22.24 -  static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries];
   22.25 -
   22.26   private:
   22.27    static address _test_stop_entry;
   22.28    static address _stop_subroutine_entry;
   22.29    static address _flush_callers_register_windows_entry;
   22.30  
   22.31 -  static int _atomic_memory_operation_lock;
   22.32 -
   22.33    static address _partial_subtype_check;
   22.34  
   22.35   public:
   22.36 -  // %%% global lock for everyone who needs to use atomic_compare_and_exchange
   22.37 -  // %%% or atomic_increment -- should probably use more locks for more
   22.38 -  // %%% scalability-- for instance one for each eden space or group of
   22.39 -
   22.40 -  // address of the lock for atomic_compare_and_exchange
   22.41 -  static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; }
   22.42 -
   22.43 -  // accessor and mutator for _atomic_memory_operation_lock
   22.44 -  static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; }
   22.45 -  static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; }
   22.46 -
   22.47    // test assembler stop routine by setting registers
   22.48    static void (*test_stop_entry()) ()                     { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); }
   22.49  
    23.1 --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    23.2 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
    23.3 @@ -1054,7 +1054,7 @@
    23.4    // flush the windows now. We don't care about the current (protection) frame
    23.5    // only the outer frames
    23.6  
    23.7 -  __ flush_windows();
    23.8 +  __ flushw();
    23.9  
   23.10    // mark windows as flushed
   23.11    Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
    24.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    24.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
    24.3 @@ -1338,14 +1338,13 @@
    24.4  
    24.5  void TemplateTable::fneg() {
    24.6    transition(ftos, ftos);
    24.7 -  __ fneg(FloatRegisterImpl::S, Ftos_f);
    24.8 +  __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
    24.9  }
   24.10  
   24.11  
   24.12  void TemplateTable::dneg() {
   24.13    transition(dtos, dtos);
   24.14 -  // v8 has fnegd if source and dest are the same
   24.15 -  __ fneg(FloatRegisterImpl::D, Ftos_f);
   24.16 +  __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
   24.17  }
   24.18  
   24.19  
   24.20 @@ -1470,19 +1469,10 @@
   24.21      __ st_long(Otos_l, __ d_tmp);
   24.22      __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
   24.23  
   24.24 -    if (VM_Version::v9_instructions_work()) {
   24.25 -      if (bytecode() == Bytecodes::_l2f) {
   24.26 -        __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
   24.27 -      } else {
   24.28 -        __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
   24.29 -      }
   24.30 +    if (bytecode() == Bytecodes::_l2f) {
   24.31 +      __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
   24.32      } else {
   24.33 -      __ call_VM_leaf(
   24.34 -        Lscratch,
   24.35 -        bytecode() == Bytecodes::_l2f
   24.36 -          ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
   24.37 -          : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
   24.38 -      );
   24.39 +      __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
   24.40      }
   24.41      break;
   24.42  
   24.43 @@ -1490,11 +1480,6 @@
   24.44        Label isNaN;
   24.45        // result must be 0 if value is NaN; test by comparing value to itself
   24.46        __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
   24.47 -      // According to the v8 manual, you have to have a non-fp instruction
   24.48 -      // between fcmp and fb.
   24.49 -      if (!VM_Version::v9_instructions_work()) {
   24.50 -        __ nop();
   24.51 -      }
   24.52        __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
   24.53        __ delayed()->clr(Otos_i);                                     // NaN
   24.54        __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
   24.55 @@ -1537,16 +1522,7 @@
   24.56      break;
   24.57  
   24.58      case Bytecodes::_d2f:
   24.59 -    if (VM_Version::v9_instructions_work()) {
   24.60        __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
   24.61 -    }
   24.62 -    else {
   24.63 -      // must uncache tos
   24.64 -      __ push_d();
   24.65 -      __ pop_i(O0);
   24.66 -      __ pop_i(O1);
   24.67 -      __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
   24.68 -    }
   24.69      break;
   24.70  
   24.71      default: ShouldNotReachHere();
   24.72 @@ -1956,17 +1932,8 @@
   24.73      __ ld( Rarray, Rscratch, Rscratch );
   24.74      // (Rscratch is already in the native byte-ordering.)
   24.75      __ cmp( Rkey, Rscratch );
   24.76 -    if ( VM_Version::v9_instructions_work() ) {
   24.77 -      __ movcc( Assembler::less,         false, Assembler::icc, Rh, Rj );  // j = h if (key <  array[h].fast_match())
   24.78 -      __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri );  // i = h if (key >= array[h].fast_match())
   24.79 -    }
   24.80 -    else {
   24.81 -      Label end_of_if;
   24.82 -      __ br( Assembler::less, true, Assembler::pt, end_of_if );
   24.83 -      __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
   24.84 -      __ mov( Rh, Ri );            // else i = h
   24.85 -      __ bind(end_of_if);          // }
   24.86 -    }
   24.87 +    __ movcc( Assembler::less,         false, Assembler::icc, Rh, Rj );  // j = h if (key <  array[h].fast_match())
   24.88 +    __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri );  // i = h if (key >= array[h].fast_match())
   24.89  
   24.90      // while (i+1 < j)
   24.91      __ bind( entry );
   24.92 @@ -3418,9 +3385,7 @@
   24.93      // has been allocated.
   24.94      __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
   24.95  
   24.96 -    __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
   24.97 -      VM_Version::v9_instructions_work() ? NULL :
   24.98 -      (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
   24.99 +    __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
  24.100  
  24.101      // if someone beat us on the allocation, try again, otherwise continue
  24.102      __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
  24.103 @@ -3701,14 +3666,7 @@
  24.104  
  24.105      __ verify_oop(O4);          // verify each monitor's oop
  24.106      __ tst(O4); // is this entry unused?
  24.107 -    if (VM_Version::v9_instructions_work())
  24.108 -      __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
  24.109 -    else {
  24.110 -      Label L;
  24.111 -      __ br( Assembler::zero, true, Assembler::pn, L );
  24.112 -      __ delayed()->mov(O3, O1); // rememeber this one if match
  24.113 -      __ bind(L);
  24.114 -    }
  24.115 +    __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
  24.116  
  24.117      __ cmp(O4, O0); // check if current entry is for same object
  24.118      __ brx( Assembler::equal, false, Assembler::pn, exit );
    25.1 --- a/src/cpu/sparc/vm/vm_version_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    25.2 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp	Tue Jun 25 12:46:21 2013 -0700
    25.3 @@ -75,23 +75,14 @@
    25.4      FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1);
    25.5    }
    25.6  
    25.7 -  if (has_v9()) {
    25.8 -    assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
    25.9 -    if (ArraycopySrcPrefetchDistance >= 4096)
   25.10 -      ArraycopySrcPrefetchDistance = 4064;
   25.11 -    assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
   25.12 -    if (ArraycopyDstPrefetchDistance >= 4096)
   25.13 -      ArraycopyDstPrefetchDistance = 4064;
   25.14 -  } else {
   25.15 -    if (ArraycopySrcPrefetchDistance > 0) {
   25.16 -      warning("prefetch instructions are not available on this CPU");
   25.17 -      FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0);
   25.18 -    }
   25.19 -    if (ArraycopyDstPrefetchDistance > 0) {
   25.20 -      warning("prefetch instructions are not available on this CPU");
   25.21 -      FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0);
   25.22 -    }
   25.23 -  }
   25.24 +  guarantee(VM_Version::has_v9(), "only SPARC v9 is supported");
   25.25 +
   25.26 +  assert(ArraycopySrcPrefetchDistance < 4096, "invalid value");
   25.27 +  if (ArraycopySrcPrefetchDistance >= 4096)
   25.28 +    ArraycopySrcPrefetchDistance = 4064;
   25.29 +  assert(ArraycopyDstPrefetchDistance < 4096, "invalid value");
   25.30 +  if (ArraycopyDstPrefetchDistance >= 4096)
   25.31 +    ArraycopyDstPrefetchDistance = 4064;
   25.32  
   25.33    UseSSE = 0; // Only on x86 and x64
   25.34  
    26.1 --- a/src/cpu/sparc/vm/vm_version_sparc.hpp	Mon Jun 24 14:27:24 2013 -0700
    26.2 +++ b/src/cpu/sparc/vm/vm_version_sparc.hpp	Tue Jun 25 12:46:21 2013 -0700
    26.3 @@ -177,10 +177,6 @@
    26.4      return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0;
    26.5    }
    26.6  
    26.7 -  // Legacy
    26.8 -  static bool v8_instructions_work() { return has_v8() && !has_v9(); }
    26.9 -  static bool v9_instructions_work() { return has_v9(); }
   26.10 -
   26.11    // Assembler testing
   26.12    static void allow_all();
   26.13    static void revert();
    27.1 --- a/src/cpu/x86/vm/globals_x86.hpp	Mon Jun 24 14:27:24 2013 -0700
    27.2 +++ b/src/cpu/x86/vm/globals_x86.hpp	Tue Jun 25 12:46:21 2013 -0700
    27.3 @@ -1,5 +1,5 @@
    27.4  /*
    27.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
    27.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    27.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    27.8   *
    27.9   * This code is free software; you can redistribute it and/or modify it
   27.10 @@ -55,7 +55,7 @@
   27.11  define_pd_global(intx, InlineFrequencyCount,     100);
   27.12  define_pd_global(intx, InlineSmallCode,          1000);
   27.13  
   27.14 -define_pd_global(intx, StackYellowPages, 2);
   27.15 +define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3));
   27.16  define_pd_global(intx, StackRedPages, 1);
   27.17  #ifdef AMD64
   27.18  // Very large C++ stack frames using solaris-amd64 optimized builds
    28.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Mon Jun 24 14:27:24 2013 -0700
    28.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp	Tue Jun 25 12:46:21 2013 -0700
    28.3 @@ -1429,6 +1429,8 @@
    28.4    assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg,
    28.5           "possible collision");
    28.6  
    28.7 +  __ block_comment("unpack_array_argument {");
    28.8 +
    28.9    // Pass the length, ptr pair
   28.10    Label is_null, done;
   28.11    VMRegPair tmp;
   28.12 @@ -1453,6 +1455,8 @@
   28.13    move_ptr(masm, tmp, body_arg);
   28.14    move32_64(masm, tmp, length_arg);
   28.15    __ bind(done);
   28.16 +
   28.17 +  __ block_comment("} unpack_array_argument");
   28.18  }
   28.19  
   28.20  
   28.21 @@ -2170,27 +2174,34 @@
   28.22      }
   28.23    }
   28.24  
   28.25 -  // point c_arg at the first arg that is already loaded in case we
   28.26 -  // need to spill before we call out
   28.27 -  int c_arg = total_c_args - total_in_args;
   28.28 +  int c_arg;
   28.29  
   28.30    // Pre-load a static method's oop into r14.  Used both by locking code and
   28.31    // the normal JNI call code.
   28.32 -  if (method->is_static() && !is_critical_native) {
   28.33 -
   28.34 -    //  load oop into a register
   28.35 -    __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
   28.36 -
   28.37 -    // Now handlize the static class mirror it's known not-null.
   28.38 -    __ movptr(Address(rsp, klass_offset), oop_handle_reg);
   28.39 -    map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
   28.40 -
   28.41 -    // Now get the handle
   28.42 -    __ lea(oop_handle_reg, Address(rsp, klass_offset));
   28.43 -    // store the klass handle as second argument
   28.44 -    __ movptr(c_rarg1, oop_handle_reg);
   28.45 -    // and protect the arg if we must spill
   28.46 -    c_arg--;
   28.47 +  if (!is_critical_native) {
   28.48 +    // point c_arg at the first arg that is already loaded in case we
   28.49 +    // need to spill before we call out
   28.50 +    c_arg = total_c_args - total_in_args;
   28.51 +
   28.52 +    if (method->is_static()) {
   28.53 +
   28.54 +      //  load oop into a register
   28.55 +      __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror()));
   28.56 +
   28.57 +      // Now handlize the static class mirror it's known not-null.
   28.58 +      __ movptr(Address(rsp, klass_offset), oop_handle_reg);
   28.59 +      map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
   28.60 +
   28.61 +      // Now get the handle
   28.62 +      __ lea(oop_handle_reg, Address(rsp, klass_offset));
   28.63 +      // store the klass handle as second argument
   28.64 +      __ movptr(c_rarg1, oop_handle_reg);
   28.65 +      // and protect the arg if we must spill
   28.66 +      c_arg--;
   28.67 +    }
   28.68 +  } else {
   28.69 +    // For JNI critical methods we need to save all registers in save_args.
   28.70 +    c_arg = 0;
   28.71    }
   28.72  
   28.73    // Change state to native (we save the return address in the thread, since it might not
    29.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Mon Jun 24 14:27:24 2013 -0700
    29.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp	Tue Jun 25 12:46:21 2013 -0700
    29.3 @@ -83,7 +83,7 @@
    29.4   private:
    29.5  
    29.6  #ifdef PRODUCT
    29.7 -#define inc_counter_np(counter) (0)
    29.8 +#define inc_counter_np(counter) ((void)0)
    29.9  #else
   29.10    void inc_counter_np_(int& counter) {
   29.11      __ incrementl(ExternalAddress((address)&counter));
    30.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Mon Jun 24 14:27:24 2013 -0700
    30.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp	Tue Jun 25 12:46:21 2013 -0700
    30.3 @@ -81,7 +81,7 @@
    30.4   private:
    30.5  
    30.6  #ifdef PRODUCT
    30.7 -#define inc_counter_np(counter) (0)
    30.8 +#define inc_counter_np(counter) ((void)0)
    30.9  #else
   30.10    void inc_counter_np_(int& counter) {
   30.11      // This can destroy rscratch1 if counter is far from the code cache
    31.1 --- a/src/os/bsd/dtrace/jvm_dtrace.c	Mon Jun 24 14:27:24 2013 -0700
    31.2 +++ b/src/os/bsd/dtrace/jvm_dtrace.c	Tue Jun 25 12:46:21 2013 -0700
    31.3 @@ -122,9 +122,7 @@
    31.4  }
    31.5  
    31.6  static int file_close(int fd) {
    31.7 -    int ret;
    31.8 -    RESTARTABLE(close(fd), ret);
    31.9 -    return ret;
   31.10 +    return close(fd);
   31.11  }
   31.12  
   31.13  static int file_read(int fd, char* buf, int len) {
    32.1 --- a/src/os/bsd/vm/attachListener_bsd.cpp	Mon Jun 24 14:27:24 2013 -0700
    32.2 +++ b/src/os/bsd/vm/attachListener_bsd.cpp	Tue Jun 25 12:46:21 2013 -0700
    32.3 @@ -199,7 +199,7 @@
    32.4    ::unlink(initial_path);
    32.5    int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
    32.6    if (res == -1) {
    32.7 -    RESTARTABLE(::close(listener), res);
    32.8 +    ::close(listener);
    32.9      return -1;
   32.10    }
   32.11  
   32.12 @@ -217,7 +217,7 @@
   32.13      }
   32.14    }
   32.15    if (res == -1) {
   32.16 -    RESTARTABLE(::close(listener), res);
   32.17 +    ::close(listener);
   32.18      ::unlink(initial_path);
   32.19      return -1;
   32.20    }
   32.21 @@ -345,24 +345,21 @@
   32.22      uid_t puid;
   32.23      gid_t pgid;
   32.24      if (::getpeereid(s, &puid, &pgid) != 0) {
   32.25 -      int res;
   32.26 -      RESTARTABLE(::close(s), res);
   32.27 +      ::close(s);
   32.28        continue;
   32.29      }
   32.30      uid_t euid = geteuid();
   32.31      gid_t egid = getegid();
   32.32  
   32.33      if (puid != euid || pgid != egid) {
   32.34 -      int res;
   32.35 -      RESTARTABLE(::close(s), res);
   32.36 +      ::close(s);
   32.37        continue;
   32.38      }
   32.39  
   32.40      // peer credential look okay so we read the request
   32.41      BsdAttachOperation* op = read_request(s);
   32.42      if (op == NULL) {
   32.43 -      int res;
   32.44 -      RESTARTABLE(::close(s), res);
   32.45 +      ::close(s);
   32.46        continue;
   32.47      } else {
   32.48        return op;
   32.49 @@ -413,7 +410,7 @@
   32.50    }
   32.51  
   32.52    // done
   32.53 -  RESTARTABLE(::close(this->socket()), rc);
   32.54 +  ::close(this->socket());
   32.55  
   32.56    // were we externally suspended while we were waiting?
   32.57    thread->check_and_wait_while_suspended();
    33.1 --- a/src/os/bsd/vm/os_bsd.cpp	Mon Jun 24 14:27:24 2013 -0700
    33.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Tue Jun 25 12:46:21 2013 -0700
    33.3 @@ -2074,6 +2074,13 @@
    33.4    }
    33.5  }
    33.6  
    33.7 +static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
    33.8 +                                    int err) {
    33.9 +  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
   33.10 +          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
   33.11 +          strerror(err), err);
   33.12 +}
   33.13 +
   33.14  // NOTE: Bsd kernel does not really reserve the pages for us.
   33.15  //       All it does is to check if there are enough free pages
   33.16  //       left at the time of mmap(). This could be a potential
   33.17 @@ -2082,18 +2089,45 @@
   33.18    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   33.19  #ifdef __OpenBSD__
   33.20    // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD
   33.21 -  return ::mprotect(addr, size, prot) == 0;
   33.22 +  if (::mprotect(addr, size, prot) == 0) {
   33.23 +    return true;
   33.24 +  }
   33.25  #else
   33.26    uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
   33.27                                     MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
   33.28 -  return res != (uintptr_t) MAP_FAILED;
   33.29 +  if (res != (uintptr_t) MAP_FAILED) {
   33.30 +    return true;
   33.31 +  }
   33.32  #endif
   33.33 +
   33.34 +  // Warn about any commit errors we see in non-product builds just
   33.35 +  // in case mmap() doesn't work as described on the man page.
   33.36 +  NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);)
   33.37 +
   33.38 +  return false;
   33.39  }
   33.40  
   33.41 -
   33.42  bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
   33.43                         bool exec) {
   33.44 -  return commit_memory(addr, size, exec);
   33.45 +  // alignment_hint is ignored on this OS
   33.46 +  return pd_commit_memory(addr, size, exec);
   33.47 +}
   33.48 +
   33.49 +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
   33.50 +                                  const char* mesg) {
   33.51 +  assert(mesg != NULL, "mesg must be specified");
   33.52 +  if (!pd_commit_memory(addr, size, exec)) {
   33.53 +    // add extra info in product mode for vm_exit_out_of_memory():
   33.54 +    PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);)
   33.55 +    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
   33.56 +  }
   33.57 +}
   33.58 +
   33.59 +void os::pd_commit_memory_or_exit(char* addr, size_t size,
   33.60 +                                  size_t alignment_hint, bool exec,
   33.61 +                                  const char* mesg) {
   33.62 +  // alignment_hint is ignored on this OS
   33.63 +  pd_commit_memory_or_exit(addr, size, exec, mesg);
   33.64  }
   33.65  
   33.66  void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
   33.67 @@ -2148,7 +2182,7 @@
   33.68  }
   33.69  
   33.70  bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
   33.71 -  return os::commit_memory(addr, size);
   33.72 +  return os::commit_memory(addr, size, !ExecMem);
   33.73  }
   33.74  
   33.75  // If this is a growable mapping, remove the guard pages entirely by
   33.76 @@ -2320,21 +2354,20 @@
   33.77    }
   33.78  
   33.79    // The memory is committed
   33.80 -  address pc = CALLER_PC;
   33.81 -  MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
   33.82 -  MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
   33.83 +  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
   33.84  
   33.85    return addr;
   33.86  }
   33.87  
   33.88  bool os::release_memory_special(char* base, size_t bytes) {
   33.89 +  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   33.90    // detaching the SHM segment will also delete it, see reserve_memory_special()
   33.91    int rslt = shmdt(base);
   33.92    if (rslt == 0) {
   33.93 -    MemTracker::record_virtual_memory_uncommit((address)base, bytes);
   33.94 -    MemTracker::record_virtual_memory_release((address)base, bytes);
   33.95 +    tkr.record((address)base, bytes);
   33.96      return true;
   33.97    } else {
   33.98 +    tkr.discard();
   33.99      return false;
  33.100    }
  33.101  
  33.102 @@ -3512,7 +3545,7 @@
  33.103  
  33.104    if (!UseMembar) {
  33.105      address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  33.106 -    guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
  33.107 +    guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
  33.108      os::set_memory_serialize_page( mem_serialize_page );
  33.109  
  33.110  #ifndef PRODUCT
    34.1 --- a/src/os/bsd/vm/os_bsd.inline.hpp	Mon Jun 24 14:27:24 2013 -0700
    34.2 +++ b/src/os/bsd/vm/os_bsd.inline.hpp	Tue Jun 25 12:46:21 2013 -0700
    34.3 @@ -178,11 +178,11 @@
    34.4  }
    34.5  
    34.6  inline int os::close(int fd) {
    34.7 -  RESTARTABLE_RETURN_INT(::close(fd));
    34.8 +  return ::close(fd);
    34.9  }
   34.10  
   34.11  inline int os::socket_close(int fd) {
   34.12 -  RESTARTABLE_RETURN_INT(::close(fd));
   34.13 +  return ::close(fd);
   34.14  }
   34.15  
   34.16  inline int os::socket(int domain, int type, int protocol) {
    35.1 --- a/src/os/bsd/vm/perfMemory_bsd.cpp	Mon Jun 24 14:27:24 2013 -0700
    35.2 +++ b/src/os/bsd/vm/perfMemory_bsd.cpp	Tue Jun 25 12:46:21 2013 -0700
    35.3 @@ -1,5 +1,5 @@
    35.4  /*
    35.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    35.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    35.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    35.8   *
    35.9   * This code is free software; you can redistribute it and/or modify it
   35.10 @@ -60,7 +60,7 @@
   35.11    }
   35.12  
   35.13    // commit memory
   35.14 -  if (!os::commit_memory(mapAddress, size)) {
   35.15 +  if (!os::commit_memory(mapAddress, size, !ExecMem)) {
   35.16      if (PrintMiscellaneous && Verbose) {
   35.17        warning("Could not commit PerfData memory\n");
   35.18      }
   35.19 @@ -120,7 +120,7 @@
   35.20        addr += result;
   35.21      }
   35.22  
   35.23 -    RESTARTABLE(::close(fd), result);
   35.24 +    result = ::close(fd);
   35.25      if (PrintMiscellaneous && Verbose) {
   35.26        if (result == OS_ERR) {
   35.27          warning("Could not close %s: %s\n", destfile, strerror(errno));
   35.28 @@ -632,7 +632,7 @@
   35.29      if (PrintMiscellaneous && Verbose) {
   35.30        warning("could not set shared memory file size: %s\n", strerror(errno));
   35.31      }
   35.32 -    RESTARTABLE(::close(fd), result);
   35.33 +    ::close(fd);
   35.34      return -1;
   35.35    }
   35.36  
   35.37 @@ -656,7 +656,7 @@
   35.38    if (result != -1) {
   35.39      return fd;
   35.40    } else {
   35.41 -    RESTARTABLE(::close(fd), result);
   35.42 +    ::close(fd);
   35.43      return -1;
   35.44    }
   35.45  }
   35.46 @@ -734,9 +734,7 @@
   35.47  
   35.48    mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
   35.49  
   35.50 -  // attempt to close the file - restart it if it was interrupted,
   35.51 -  // but ignore other failures
   35.52 -  RESTARTABLE(::close(fd), result);
   35.53 +  result = ::close(fd);
   35.54    assert(result != OS_ERR, "could not close file");
   35.55  
   35.56    if (mapAddress == MAP_FAILED) {
   35.57 @@ -755,8 +753,7 @@
   35.58    (void)::memset((void*) mapAddress, 0, size);
   35.59  
   35.60    // it does not go through os api, the operation has to record from here
   35.61 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
   35.62 -  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
   35.63 +  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   35.64  
   35.65    return mapAddress;
   35.66  }
   35.67 @@ -909,7 +906,7 @@
   35.68  
   35.69    // attempt to close the file - restart if it gets interrupted,
   35.70    // but ignore other failures
   35.71 -  RESTARTABLE(::close(fd), result);
   35.72 +  result = ::close(fd);
   35.73    assert(result != OS_ERR, "could not close file");
   35.74  
   35.75    if (mapAddress == MAP_FAILED) {
   35.76 @@ -921,8 +918,7 @@
   35.77    }
   35.78  
   35.79    // it does not go through os api, the operation has to record from here
   35.80 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
   35.81 -  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
   35.82 +  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   35.83  
   35.84    *addr = mapAddress;
   35.85    *sizep = size;
    36.1 --- a/src/os/linux/vm/attachListener_linux.cpp	Mon Jun 24 14:27:24 2013 -0700
    36.2 +++ b/src/os/linux/vm/attachListener_linux.cpp	Tue Jun 25 12:46:21 2013 -0700
    36.3 @@ -199,7 +199,7 @@
    36.4    ::unlink(initial_path);
    36.5    int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr));
    36.6    if (res == -1) {
    36.7 -    RESTARTABLE(::close(listener), res);
    36.8 +    ::close(listener);
    36.9      return -1;
   36.10    }
   36.11  
   36.12 @@ -212,7 +212,7 @@
   36.13        }
   36.14    }
   36.15    if (res == -1) {
   36.16 -    RESTARTABLE(::close(listener), res);
   36.17 +    ::close(listener);
   36.18      ::unlink(initial_path);
   36.19      return -1;
   36.20    }
   36.21 @@ -340,24 +340,21 @@
   36.22      struct ucred cred_info;
   36.23      socklen_t optlen = sizeof(cred_info);
   36.24      if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) {
   36.25 -      int res;
   36.26 -      RESTARTABLE(::close(s), res);
   36.27 +      ::close(s);
   36.28        continue;
   36.29      }
   36.30      uid_t euid = geteuid();
   36.31      gid_t egid = getegid();
   36.32  
   36.33      if (cred_info.uid != euid || cred_info.gid != egid) {
   36.34 -      int res;
   36.35 -      RESTARTABLE(::close(s), res);
   36.36 +      ::close(s);
   36.37        continue;
   36.38      }
   36.39  
   36.40      // peer credential look okay so we read the request
   36.41      LinuxAttachOperation* op = read_request(s);
   36.42      if (op == NULL) {
   36.43 -      int res;
   36.44 -      RESTARTABLE(::close(s), res);
   36.45 +      ::close(s);
   36.46        continue;
   36.47      } else {
   36.48        return op;
   36.49 @@ -408,7 +405,7 @@
   36.50    }
   36.51  
   36.52    // done
   36.53 -  RESTARTABLE(::close(this->socket()), rc);
   36.54 +  ::close(this->socket());
   36.55  
   36.56    // were we externally suspended while we were waiting?
   36.57    thread->check_and_wait_while_suspended();
    37.1 --- a/src/os/linux/vm/os_linux.cpp	Mon Jun 24 14:27:24 2013 -0700
    37.2 +++ b/src/os/linux/vm/os_linux.cpp	Tue Jun 25 12:46:21 2013 -0700
    37.3 @@ -2612,11 +2612,49 @@
    37.4    }
    37.5  }
    37.6  
    37.7 +static bool recoverable_mmap_error(int err) {
    37.8 +  // See if the error is one we can let the caller handle. This
    37.9 +  // list of errno values comes from JBS-6843484. I can't find a
   37.10 +  // Linux man page that documents this specific set of errno
   37.11 +  // values so while this list currently matches Solaris, it may
   37.12 +  // change as we gain experience with this failure mode.
   37.13 +  switch (err) {
   37.14 +  case EBADF:
   37.15 +  case EINVAL:
   37.16 +  case ENOTSUP:
   37.17 +    // let the caller deal with these errors
   37.18 +    return true;
   37.19 +
   37.20 +  default:
   37.21 +    // Any remaining errors on this OS can cause our reserved mapping
   37.22 +    // to be lost. That can cause confusion where different data
   37.23 +    // structures think they have the same memory mapped. The worst
   37.24 +    // scenario is if both the VM and a library think they have the
   37.25 +    // same memory mapped.
   37.26 +    return false;
   37.27 +  }
   37.28 +}
   37.29 +
   37.30 +static void warn_fail_commit_memory(char* addr, size_t size, bool exec,
   37.31 +                                    int err) {
   37.32 +  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
   37.33 +          ", %d) failed; error='%s' (errno=%d)", addr, size, exec,
   37.34 +          strerror(err), err);
   37.35 +}
   37.36 +
   37.37 +static void warn_fail_commit_memory(char* addr, size_t size,
   37.38 +                                    size_t alignment_hint, bool exec,
   37.39 +                                    int err) {
   37.40 +  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
   37.41 +          ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size,
   37.42 +          alignment_hint, exec, strerror(err), err);
   37.43 +}
   37.44 +
   37.45  // NOTE: Linux kernel does not really reserve the pages for us.
   37.46  //       All it does is to check if there are enough free pages
   37.47  //       left at the time of mmap(). This could be a potential
   37.48  //       problem.
   37.49 -bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
   37.50 +int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) {
   37.51    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   37.52    uintptr_t res = (uintptr_t) ::mmap(addr, size, prot,
   37.53                                     MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0);
   37.54 @@ -2624,9 +2662,32 @@
   37.55      if (UseNUMAInterleaving) {
   37.56        numa_make_global(addr, size);
   37.57      }
   37.58 -    return true;
   37.59 -  }
   37.60 -  return false;
   37.61 +    return 0;
   37.62 +  }
   37.63 +
   37.64 +  int err = errno;  // save errno from mmap() call above
   37.65 +
   37.66 +  if (!recoverable_mmap_error(err)) {
   37.67 +    warn_fail_commit_memory(addr, size, exec, err);
   37.68 +    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory.");
   37.69 +  }
   37.70 +
   37.71 +  return err;
   37.72 +}
   37.73 +
   37.74 +bool os::pd_commit_memory(char* addr, size_t size, bool exec) {
   37.75 +  return os::Linux::commit_memory_impl(addr, size, exec) == 0;
   37.76 +}
   37.77 +
   37.78 +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
   37.79 +                                  const char* mesg) {
   37.80 +  assert(mesg != NULL, "mesg must be specified");
   37.81 +  int err = os::Linux::commit_memory_impl(addr, size, exec);
   37.82 +  if (err != 0) {
   37.83 +    // the caller wants all commit errors to exit with the specified mesg:
   37.84 +    warn_fail_commit_memory(addr, size, exec, err);
   37.85 +    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
   37.86 +  }
   37.87  }
   37.88  
   37.89  // Define MAP_HUGETLB here so we can build HotSpot on old systems.
   37.90 @@ -2639,8 +2700,9 @@
   37.91  #define MADV_HUGEPAGE 14
   37.92  #endif
   37.93  
   37.94 -bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
   37.95 -                       bool exec) {
   37.96 +int os::Linux::commit_memory_impl(char* addr, size_t size,
   37.97 +                                  size_t alignment_hint, bool exec) {
   37.98 +  int err;
   37.99    if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) {
  37.100      int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
  37.101      uintptr_t res =
  37.102 @@ -2651,16 +2713,46 @@
  37.103        if (UseNUMAInterleaving) {
  37.104          numa_make_global(addr, size);
  37.105        }
  37.106 -      return true;
  37.107 +      return 0;
  37.108 +    }
  37.109 +
  37.110 +    err = errno;  // save errno from mmap() call above
  37.111 +
  37.112 +    if (!recoverable_mmap_error(err)) {
  37.113 +      // However, it is not clear that this loss of our reserved mapping
  37.114 +      // happens with large pages on Linux or that we cannot recover
  37.115 +      // from the loss. For now, we just issue a warning and we don't
  37.116 +      // call vm_exit_out_of_memory(). This issue is being tracked by
  37.117 +      // JBS-8007074.
  37.118 +      warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
  37.119 +//    vm_exit_out_of_memory(size, OOM_MMAP_ERROR,
  37.120 +//                          "committing reserved memory.");
  37.121      }
  37.122      // Fall through and try to use small pages
  37.123    }
  37.124  
  37.125 -  if (commit_memory(addr, size, exec)) {
  37.126 +  err = os::Linux::commit_memory_impl(addr, size, exec);
  37.127 +  if (err == 0) {
  37.128      realign_memory(addr, size, alignment_hint);
  37.129 -    return true;
  37.130 -  }
  37.131 -  return false;
  37.132 +  }
  37.133 +  return err;
  37.134 +}
  37.135 +
  37.136 +bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
  37.137 +                          bool exec) {
  37.138 +  return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0;
  37.139 +}
  37.140 +
  37.141 +void os::pd_commit_memory_or_exit(char* addr, size_t size,
  37.142 +                                  size_t alignment_hint, bool exec,
  37.143 +                                  const char* mesg) {
  37.144 +  assert(mesg != NULL, "mesg must be specified");
  37.145 +  int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec);
  37.146 +  if (err != 0) {
  37.147 +    // the caller wants all commit errors to exit with the specified mesg:
  37.148 +    warn_fail_commit_memory(addr, size, alignment_hint, exec, err);
  37.149 +    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
  37.150 +  }
  37.151  }
  37.152  
  37.153  void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) {
  37.154 @@ -2678,7 +2770,7 @@
  37.155    // small pages on top of the SHM segment. This method always works for small pages, so we
  37.156    // allow that in any case.
  37.157    if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) {
  37.158 -    commit_memory(addr, bytes, alignment_hint, false);
  37.159 +    commit_memory(addr, bytes, alignment_hint, !ExecMem);
  37.160    }
  37.161  }
  37.162  
  37.163 @@ -2931,7 +3023,7 @@
  37.164        ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent);
  37.165    }
  37.166  
  37.167 -  return os::commit_memory(addr, size);
  37.168 +  return os::commit_memory(addr, size, !ExecMem);
  37.169  }
  37.170  
  37.171  // If this is a growable mapping, remove the guard pages entirely by
  37.172 @@ -3053,7 +3145,7 @@
  37.173                    MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB,
  37.174                    -1, 0);
  37.175  
  37.176 -  if (p != (void *) -1) {
  37.177 +  if (p != MAP_FAILED) {
  37.178      // We don't know if this really is a huge page or not.
  37.179      FILE *fp = fopen("/proc/self/maps", "r");
  37.180      if (fp) {
  37.181 @@ -3271,22 +3363,21 @@
  37.182    }
  37.183  
  37.184    // The memory is committed
  37.185 -  address pc = CALLER_PC;
  37.186 -  MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc);
  37.187 -  MemTracker::record_virtual_memory_commit((address)addr, bytes, pc);
  37.188 +  MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC);
  37.189  
  37.190    return addr;
  37.191  }
  37.192  
  37.193  bool os::release_memory_special(char* base, size_t bytes) {
  37.194 +  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  37.195    // detaching the SHM segment will also delete it, see reserve_memory_special()
  37.196    int rslt = shmdt(base);
  37.197    if (rslt == 0) {
  37.198 -    MemTracker::record_virtual_memory_uncommit((address)base, bytes);
  37.199 -    MemTracker::record_virtual_memory_release((address)base, bytes);
  37.200 +    tkr.record((address)base, bytes);
  37.201      return true;
  37.202    } else {
  37.203 -   return false;
  37.204 +    tkr.discard();
  37.205 +    return false;
  37.206    }
  37.207  }
  37.208  
  37.209 @@ -4393,7 +4484,7 @@
  37.210  
  37.211    if (!UseMembar) {
  37.212      address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  37.213 -    guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page");
  37.214 +    guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page");
  37.215      os::set_memory_serialize_page( mem_serialize_page );
  37.216  
  37.217  #ifndef PRODUCT
    38.1 --- a/src/os/linux/vm/os_linux.hpp	Mon Jun 24 14:27:24 2013 -0700
    38.2 +++ b/src/os/linux/vm/os_linux.hpp	Tue Jun 25 12:46:21 2013 -0700
    38.3 @@ -76,6 +76,10 @@
    38.4    static julong physical_memory() { return _physical_memory; }
    38.5    static void initialize_system_info();
    38.6  
    38.7 +  static int commit_memory_impl(char* addr, size_t bytes, bool exec);
    38.8 +  static int commit_memory_impl(char* addr, size_t bytes,
    38.9 +                                size_t alignment_hint, bool exec);
   38.10 +
   38.11    static void set_glibc_version(const char *s)      { _glibc_version = s; }
   38.12    static void set_libpthread_version(const char *s) { _libpthread_version = s; }
   38.13  
    39.1 --- a/src/os/linux/vm/perfMemory_linux.cpp	Mon Jun 24 14:27:24 2013 -0700
    39.2 +++ b/src/os/linux/vm/perfMemory_linux.cpp	Tue Jun 25 12:46:21 2013 -0700
    39.3 @@ -1,5 +1,5 @@
    39.4  /*
    39.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    39.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    39.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    39.8   *
    39.9   * This code is free software; you can redistribute it and/or modify it
   39.10 @@ -60,7 +60,7 @@
   39.11    }
   39.12  
   39.13    // commit memory
   39.14 -  if (!os::commit_memory(mapAddress, size)) {
   39.15 +  if (!os::commit_memory(mapAddress, size, !ExecMem)) {
   39.16      if (PrintMiscellaneous && Verbose) {
   39.17        warning("Could not commit PerfData memory\n");
   39.18      }
   39.19 @@ -120,7 +120,7 @@
   39.20        addr += result;
   39.21      }
   39.22  
   39.23 -    RESTARTABLE(::close(fd), result);
   39.24 +    result = ::close(fd);
   39.25      if (PrintMiscellaneous && Verbose) {
   39.26        if (result == OS_ERR) {
   39.27          warning("Could not close %s: %s\n", destfile, strerror(errno));
   39.28 @@ -632,7 +632,7 @@
   39.29      if (PrintMiscellaneous && Verbose) {
   39.30        warning("could not set shared memory file size: %s\n", strerror(errno));
   39.31      }
   39.32 -    RESTARTABLE(::close(fd), result);
   39.33 +    ::close(fd);
   39.34      return -1;
   39.35    }
   39.36  
   39.37 @@ -656,7 +656,7 @@
   39.38    if (result != -1) {
   39.39      return fd;
   39.40    } else {
   39.41 -    RESTARTABLE(::close(fd), result);
   39.42 +    ::close(fd);
   39.43      return -1;
   39.44    }
   39.45  }
   39.46 @@ -734,9 +734,7 @@
   39.47  
   39.48    mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
   39.49  
   39.50 -  // attempt to close the file - restart it if it was interrupted,
   39.51 -  // but ignore other failures
   39.52 -  RESTARTABLE(::close(fd), result);
   39.53 +  result = ::close(fd);
   39.54    assert(result != OS_ERR, "could not close file");
   39.55  
   39.56    if (mapAddress == MAP_FAILED) {
   39.57 @@ -755,8 +753,7 @@
   39.58    (void)::memset((void*) mapAddress, 0, size);
   39.59  
   39.60    // it does not go through os api, the operation has to record from here
   39.61 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
   39.62 -  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
   39.63 +  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   39.64  
   39.65    return mapAddress;
   39.66  }
   39.67 @@ -907,9 +904,7 @@
   39.68  
   39.69    mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
   39.70  
   39.71 -  // attempt to close the file - restart if it gets interrupted,
   39.72 -  // but ignore other failures
   39.73 -  RESTARTABLE(::close(fd), result);
   39.74 +  result = ::close(fd);
   39.75    assert(result != OS_ERR, "could not close file");
   39.76  
   39.77    if (mapAddress == MAP_FAILED) {
   39.78 @@ -921,8 +916,7 @@
   39.79    }
   39.80  
   39.81    // it does not go through os api, the operation has to record from here
   39.82 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
   39.83 -  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
   39.84 +  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   39.85  
   39.86    *addr = mapAddress;
   39.87    *sizep = size;
    40.1 --- a/src/os/solaris/dtrace/jvm_dtrace.c	Mon Jun 24 14:27:24 2013 -0700
    40.2 +++ b/src/os/solaris/dtrace/jvm_dtrace.c	Tue Jun 25 12:46:21 2013 -0700
    40.3 @@ -122,9 +122,7 @@
    40.4  }
    40.5  
    40.6  static int file_close(int fd) {
    40.7 -    int ret;
    40.8 -    RESTARTABLE(close(fd), ret);
    40.9 -    return ret;
   40.10 +    return close(fd);
   40.11  }
   40.12  
   40.13  static int file_read(int fd, char* buf, int len) {
    41.1 --- a/src/os/solaris/vm/attachListener_solaris.cpp	Mon Jun 24 14:27:24 2013 -0700
    41.2 +++ b/src/os/solaris/vm/attachListener_solaris.cpp	Tue Jun 25 12:46:21 2013 -0700
    41.3 @@ -392,7 +392,7 @@
    41.4      return -1;
    41.5    }
    41.6    assert(fd >= 0, "bad file descriptor");
    41.7 -  RESTARTABLE(::close(fd), res);
    41.8 +  ::close(fd);
    41.9  
   41.10    // attach the door descriptor to the file
   41.11    if ((res = ::fattach(dd, initial_path)) == -1) {
   41.12 @@ -410,7 +410,7 @@
   41.13    // rename file so that clients can attach
   41.14    if (dd >= 0) {
   41.15      if (::rename(initial_path, door_path) == -1) {
   41.16 -        RESTARTABLE(::close(dd), res);
   41.17 +        ::close(dd);
   41.18          ::fdetach(initial_path);
   41.19          dd = -1;
   41.20      }
   41.21 @@ -549,7 +549,7 @@
   41.22      }
   41.23  
   41.24      // close socket and we're done
   41.25 -    RESTARTABLE(::close(this->socket()), rc);
   41.26 +    ::close(this->socket());
   41.27  
   41.28      // were we externally suspended while we were waiting?
   41.29      thread->check_and_wait_while_suspended();
    42.1 --- a/src/os/solaris/vm/os_solaris.cpp	Mon Jun 24 14:27:24 2013 -0700
    42.2 +++ b/src/os/solaris/vm/os_solaris.cpp	Tue Jun 25 12:46:21 2013 -0700
    42.3 @@ -2784,7 +2784,42 @@
    42.4    return page_size;
    42.5  }
    42.6  
    42.7 -bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
    42.8 +static bool recoverable_mmap_error(int err) {
    42.9 +  // See if the error is one we can let the caller handle. This
   42.10 +  // list of errno values comes from the Solaris mmap(2) man page.
   42.11 +  switch (err) {
   42.12 +  case EBADF:
   42.13 +  case EINVAL:
   42.14 +  case ENOTSUP:
   42.15 +    // let the caller deal with these errors
   42.16 +    return true;
   42.17 +
   42.18 +  default:
   42.19 +    // Any remaining errors on this OS can cause our reserved mapping
   42.20 +    // to be lost. That can cause confusion where different data
   42.21 +    // structures think they have the same memory mapped. The worst
   42.22 +    // scenario is if both the VM and a library think they have the
   42.23 +    // same memory mapped.
   42.24 +    return false;
   42.25 +  }
   42.26 +}
   42.27 +
   42.28 +static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec,
   42.29 +                                    int err) {
   42.30 +  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
   42.31 +          ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec,
   42.32 +          strerror(err), err);
   42.33 +}
   42.34 +
   42.35 +static void warn_fail_commit_memory(char* addr, size_t bytes,
   42.36 +                                    size_t alignment_hint, bool exec,
   42.37 +                                    int err) {
   42.38 +  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
   42.39 +          ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes,
   42.40 +          alignment_hint, exec, strerror(err), err);
   42.41 +}
   42.42 +
   42.43 +int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) {
   42.44    int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE;
   42.45    size_t size = bytes;
   42.46    char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot);
   42.47 @@ -2792,14 +2827,38 @@
   42.48      if (UseNUMAInterleaving) {
   42.49        numa_make_global(addr, bytes);
   42.50      }
   42.51 -    return true;
   42.52 -  }
   42.53 -  return false;
   42.54 -}
   42.55 -
   42.56 -bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
   42.57 -                       bool exec) {
   42.58 -  if (commit_memory(addr, bytes, exec)) {
   42.59 +    return 0;
   42.60 +  }
   42.61 +
   42.62 +  int err = errno;  // save errno from mmap() call in mmap_chunk()
   42.63 +
   42.64 +  if (!recoverable_mmap_error(err)) {
   42.65 +    warn_fail_commit_memory(addr, bytes, exec, err);
   42.66 +    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory.");
   42.67 +  }
   42.68 +
   42.69 +  return err;
   42.70 +}
   42.71 +
   42.72 +bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
   42.73 +  return Solaris::commit_memory_impl(addr, bytes, exec) == 0;
   42.74 +}
   42.75 +
   42.76 +void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec,
   42.77 +                                  const char* mesg) {
   42.78 +  assert(mesg != NULL, "mesg must be specified");
   42.79 +  int err = os::Solaris::commit_memory_impl(addr, bytes, exec);
   42.80 +  if (err != 0) {
   42.81 +    // the caller wants all commit errors to exit with the specified mesg:
   42.82 +    warn_fail_commit_memory(addr, bytes, exec, err);
   42.83 +    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
   42.84 +  }
   42.85 +}
   42.86 +
   42.87 +int os::Solaris::commit_memory_impl(char* addr, size_t bytes,
   42.88 +                                    size_t alignment_hint, bool exec) {
   42.89 +  int err = Solaris::commit_memory_impl(addr, bytes, exec);
   42.90 +  if (err == 0) {
   42.91      if (UseMPSS && alignment_hint > (size_t)vm_page_size()) {
   42.92        // If the large page size has been set and the VM
   42.93        // is using large pages, use the large page size
   42.94 @@ -2821,9 +2880,25 @@
   42.95        // Since this is a hint, ignore any failures.
   42.96        (void)Solaris::set_mpss_range(addr, bytes, page_size);
   42.97      }
   42.98 -    return true;
   42.99 -  }
  42.100 -  return false;
  42.101 +  }
  42.102 +  return err;
  42.103 +}
  42.104 +
  42.105 +bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint,
  42.106 +                          bool exec) {
  42.107 +  return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0;
  42.108 +}
  42.109 +
  42.110 +void os::pd_commit_memory_or_exit(char* addr, size_t bytes,
  42.111 +                                  size_t alignment_hint, bool exec,
  42.112 +                                  const char* mesg) {
  42.113 +  assert(mesg != NULL, "mesg must be specified");
  42.114 +  int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec);
  42.115 +  if (err != 0) {
  42.116 +    // the caller wants all commit errors to exit with the specified mesg:
  42.117 +    warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err);
  42.118 +    vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg);
  42.119 +  }
  42.120  }
  42.121  
  42.122  // Uncommit the pages in a specified region.
  42.123 @@ -2835,7 +2910,7 @@
  42.124  }
  42.125  
  42.126  bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
  42.127 -  return os::commit_memory(addr, size);
  42.128 +  return os::commit_memory(addr, size, !ExecMem);
  42.129  }
  42.130  
  42.131  bool os::remove_stack_guard_pages(char* addr, size_t size) {
  42.132 @@ -3457,22 +3532,21 @@
  42.133    }
  42.134  
  42.135    // The memory is committed
  42.136 -  address pc = CALLER_PC;
  42.137 -  MemTracker::record_virtual_memory_reserve((address)retAddr, size, pc);
  42.138 -  MemTracker::record_virtual_memory_commit((address)retAddr, size, pc);
  42.139 +  MemTracker::record_virtual_memory_reserve_and_commit((address)retAddr, size, mtNone, CURRENT_PC);
  42.140  
  42.141    return retAddr;
  42.142  }
  42.143  
  42.144  bool os::release_memory_special(char* base, size_t bytes) {
  42.145 +  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  42.146    // detaching the SHM segment will also delete it, see reserve_memory_special()
  42.147    int rslt = shmdt(base);
  42.148    if (rslt == 0) {
  42.149 -    MemTracker::record_virtual_memory_uncommit((address)base, bytes);
  42.150 -    MemTracker::record_virtual_memory_release((address)base, bytes);
  42.151 +    tkr.record((address)base, bytes);
  42.152      return true;
  42.153    } else {
  42.154 -   return false;
  42.155 +    tkr.discard();
  42.156 +    return false;
  42.157    }
  42.158  }
  42.159  
  42.160 @@ -6604,11 +6678,11 @@
  42.161  }
  42.162  
  42.163  int os::close(int fd) {
  42.164 -  RESTARTABLE_RETURN_INT(::close(fd));
  42.165 +  return ::close(fd);
  42.166  }
  42.167  
  42.168  int os::socket_close(int fd) {
  42.169 -  RESTARTABLE_RETURN_INT(::close(fd));
  42.170 +  return ::close(fd);
  42.171  }
  42.172  
  42.173  int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
    43.1 --- a/src/os/solaris/vm/os_solaris.hpp	Mon Jun 24 14:27:24 2013 -0700
    43.2 +++ b/src/os/solaris/vm/os_solaris.hpp	Tue Jun 25 12:46:21 2013 -0700
    43.3 @@ -168,6 +168,9 @@
    43.4    static int _dev_zero_fd;
    43.5    static int get_dev_zero_fd() { return _dev_zero_fd; }
    43.6    static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; }
    43.7 +  static int commit_memory_impl(char* addr, size_t bytes, bool exec);
    43.8 +  static int commit_memory_impl(char* addr, size_t bytes,
    43.9 +                                size_t alignment_hint, bool exec);
   43.10    static char* mmap_chunk(char *addr, size_t size, int flags, int prot);
   43.11    static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed);
   43.12    static bool mpss_sanity_check(bool warn, size_t * page_size);
    44.1 --- a/src/os/solaris/vm/perfMemory_solaris.cpp	Mon Jun 24 14:27:24 2013 -0700
    44.2 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp	Tue Jun 25 12:46:21 2013 -0700
    44.3 @@ -1,5 +1,5 @@
    44.4  /*
    44.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    44.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    44.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    44.8   *
    44.9   * This code is free software; you can redistribute it and/or modify it
   44.10 @@ -62,7 +62,7 @@
   44.11    }
   44.12  
   44.13    // commit memory
   44.14 -  if (!os::commit_memory(mapAddress, size)) {
   44.15 +  if (!os::commit_memory(mapAddress, size, !ExecMem)) {
   44.16      if (PrintMiscellaneous && Verbose) {
   44.17        warning("Could not commit PerfData memory\n");
   44.18      }
   44.19 @@ -122,7 +122,7 @@
   44.20        addr += result;
   44.21      }
   44.22  
   44.23 -    RESTARTABLE(::close(fd), result);
   44.24 +    result = ::close(fd);
   44.25      if (PrintMiscellaneous && Verbose) {
   44.26        if (result == OS_ERR) {
   44.27          warning("Could not close %s: %s\n", destfile, strerror(errno));
   44.28 @@ -437,7 +437,7 @@
   44.29        addr+=result;
   44.30      }
   44.31  
   44.32 -    RESTARTABLE(::close(fd), result);
   44.33 +    ::close(fd);
   44.34  
   44.35      // get the user name for the effective user id of the process
   44.36      char* user_name = get_user_name(psinfo.pr_euid);
   44.37 @@ -669,7 +669,7 @@
   44.38      if (PrintMiscellaneous && Verbose) {
   44.39        warning("could not set shared memory file size: %s\n", strerror(errno));
   44.40      }
   44.41 -    RESTARTABLE(::close(fd), result);
   44.42 +    ::close(fd);
   44.43      return -1;
   44.44    }
   44.45  
   44.46 @@ -749,9 +749,7 @@
   44.47  
   44.48    mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
   44.49  
   44.50 -  // attempt to close the file - restart it if it was interrupted,
   44.51 -  // but ignore other failures
   44.52 -  RESTARTABLE(::close(fd), result);
   44.53 +  result = ::close(fd);
   44.54    assert(result != OS_ERR, "could not close file");
   44.55  
   44.56    if (mapAddress == MAP_FAILED) {
   44.57 @@ -770,8 +768,7 @@
   44.58    (void)::memset((void*) mapAddress, 0, size);
   44.59  
   44.60    // it does not go through os api, the operation has to record from here
   44.61 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
   44.62 -  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
   44.63 +  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   44.64  
   44.65    return mapAddress;
   44.66  }
   44.67 @@ -922,9 +919,7 @@
   44.68  
   44.69    mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0);
   44.70  
   44.71 -  // attempt to close the file - restart if it gets interrupted,
   44.72 -  // but ignore other failures
   44.73 -  RESTARTABLE(::close(fd), result);
   44.74 +  result = ::close(fd);
   44.75    assert(result != OS_ERR, "could not close file");
   44.76  
   44.77    if (mapAddress == MAP_FAILED) {
   44.78 @@ -936,8 +931,7 @@
   44.79    }
   44.80  
   44.81    // it does not go through os api, the operation has to record from here
   44.82 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
   44.83 -  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
   44.84 +  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   44.85  
   44.86    *addr = mapAddress;
   44.87    *sizep = size;
    45.1 --- a/src/os/windows/vm/os_windows.cpp	Mon Jun 24 14:27:24 2013 -0700
    45.2 +++ b/src/os/windows/vm/os_windows.cpp	Tue Jun 25 12:46:21 2013 -0700
    45.3 @@ -2524,7 +2524,7 @@
    45.4                    addr = (address)((uintptr_t)addr &
    45.5                           (~((uintptr_t)os::vm_page_size() - (uintptr_t)1)));
    45.6                    os::commit_memory((char *)addr, thread->stack_base() - addr,
    45.7 -                                    false );
    45.8 +                                    !ExecMem);
    45.9                    return EXCEPTION_CONTINUE_EXECUTION;
   45.10            }
   45.11            else
   45.12 @@ -2875,7 +2875,7 @@
   45.13                                  PAGE_READWRITE);
   45.14    // If reservation failed, return NULL
   45.15    if (p_buf == NULL) return NULL;
   45.16 -  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
   45.17 +  MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC);
   45.18    os::release_memory(p_buf, bytes + chunk_size);
   45.19  
   45.20    // we still need to round up to a page boundary (in case we are using large pages)
   45.21 @@ -2941,7 +2941,7 @@
   45.22          // need to create a dummy 'reserve' record to match
   45.23          // the release.
   45.24          MemTracker::record_virtual_memory_reserve((address)p_buf,
   45.25 -          bytes_to_release, CALLER_PC);
   45.26 +          bytes_to_release, mtNone, CALLER_PC);
   45.27          os::release_memory(p_buf, bytes_to_release);
   45.28        }
   45.29  #ifdef ASSERT
   45.30 @@ -2961,9 +2961,10 @@
   45.31    // Although the memory is allocated individually, it is returned as one.
   45.32    // NMT records it as one block.
   45.33    address pc = CALLER_PC;
   45.34 -  MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc);
   45.35    if ((flags & MEM_COMMIT) != 0) {
   45.36 -    MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc);
   45.37 +    MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc);
   45.38 +  } else {
   45.39 +    MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc);
   45.40    }
   45.41  
   45.42    // made it this far, success
   45.43 @@ -3154,8 +3155,7 @@
   45.44      char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot);
   45.45      if (res != NULL) {
   45.46        address pc = CALLER_PC;
   45.47 -      MemTracker::record_virtual_memory_reserve((address)res, bytes, pc);
   45.48 -      MemTracker::record_virtual_memory_commit((address)res, bytes, pc);
   45.49 +      MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc);
   45.50      }
   45.51  
   45.52      return res;
   45.53 @@ -3164,14 +3164,21 @@
   45.54  
   45.55  bool os::release_memory_special(char* base, size_t bytes) {
   45.56    assert(base != NULL, "Sanity check");
   45.57 -  // Memory allocated via reserve_memory_special() is committed
   45.58 -  MemTracker::record_virtual_memory_uncommit((address)base, bytes);
   45.59    return release_memory(base, bytes);
   45.60  }
   45.61  
   45.62  void os::print_statistics() {
   45.63  }
   45.64  
   45.65 +static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) {
   45.66 +  int err = os::get_last_error();
   45.67 +  char buf[256];
   45.68 +  size_t buf_len = os::lasterror(buf, sizeof(buf));
   45.69 +  warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT
   45.70 +          ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes,
   45.71 +          exec, buf_len != 0 ? buf : "<no_error_string>", err);
   45.72 +}
   45.73 +
   45.74  bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) {
   45.75    if (bytes == 0) {
   45.76      // Don't bother the OS with noops.
   45.77 @@ -3186,11 +3193,17 @@
   45.78    // is always within a reserve covered by a single VirtualAlloc
   45.79    // in that case we can just do a single commit for the requested size
   45.80    if (!UseNUMAInterleaving) {
   45.81 -    if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false;
   45.82 +    if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) {
   45.83 +      NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
   45.84 +      return false;
   45.85 +    }
   45.86      if (exec) {
   45.87        DWORD oldprot;
   45.88        // Windows doc says to use VirtualProtect to get execute permissions
   45.89 -      if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false;
   45.90 +      if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) {
   45.91 +        NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);)
   45.92 +        return false;
   45.93 +      }
   45.94      }
   45.95      return true;
   45.96    } else {
   45.97 @@ -3205,12 +3218,20 @@
   45.98        MEMORY_BASIC_INFORMATION alloc_info;
   45.99        VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info));
  45.100        size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize);
  45.101 -      if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL)
  45.102 +      if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT,
  45.103 +                       PAGE_READWRITE) == NULL) {
  45.104 +        NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
  45.105 +                                            exec);)
  45.106          return false;
  45.107 +      }
  45.108        if (exec) {
  45.109          DWORD oldprot;
  45.110 -        if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot))
  45.111 +        if (!VirtualProtect(next_alloc_addr, bytes_to_rq,
  45.112 +                            PAGE_EXECUTE_READWRITE, &oldprot)) {
  45.113 +          NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq,
  45.114 +                                              exec);)
  45.115            return false;
  45.116 +        }
  45.117        }
  45.118        bytes_remaining -= bytes_to_rq;
  45.119        next_alloc_addr += bytes_to_rq;
  45.120 @@ -3222,7 +3243,24 @@
  45.121  
  45.122  bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
  45.123                         bool exec) {
  45.124 -  return commit_memory(addr, size, exec);
  45.125 +  // alignment_hint is ignored on this OS
  45.126 +  return pd_commit_memory(addr, size, exec);
  45.127 +}
  45.128 +
  45.129 +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec,
  45.130 +                                  const char* mesg) {
  45.131 +  assert(mesg != NULL, "mesg must be specified");
  45.132 +  if (!pd_commit_memory(addr, size, exec)) {
  45.133 +    warn_fail_commit_memory(addr, size, exec);
  45.134 +    vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg);
  45.135 +  }
  45.136 +}
  45.137 +
  45.138 +void os::pd_commit_memory_or_exit(char* addr, size_t size,
  45.139 +                                  size_t alignment_hint, bool exec,
  45.140 +                                  const char* mesg) {
  45.141 +  // alignment_hint is ignored on this OS
  45.142 +  pd_commit_memory_or_exit(addr, size, exec, mesg);
  45.143  }
  45.144  
  45.145  bool os::pd_uncommit_memory(char* addr, size_t bytes) {
  45.146 @@ -3240,7 +3278,7 @@
  45.147  }
  45.148  
  45.149  bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
  45.150 -  return os::commit_memory(addr, size);
  45.151 +  return os::commit_memory(addr, size, !ExecMem);
  45.152  }
  45.153  
  45.154  bool os::remove_stack_guard_pages(char* addr, size_t size) {
  45.155 @@ -3264,8 +3302,9 @@
  45.156  
  45.157    // Strange enough, but on Win32 one can change protection only for committed
  45.158    // memory, not a big deal anyway, as bytes less or equal than 64K
  45.159 -  if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) {
  45.160 -    fatal("cannot commit protection page");
  45.161 +  if (!is_committed) {
  45.162 +    commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX,
  45.163 +                          "cannot commit protection page");
  45.164    }
  45.165    // One cannot use os::guard_memory() here, as on Win32 guard page
  45.166    // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
    46.1 --- a/src/os/windows/vm/perfMemory_windows.cpp	Mon Jun 24 14:27:24 2013 -0700
    46.2 +++ b/src/os/windows/vm/perfMemory_windows.cpp	Tue Jun 25 12:46:21 2013 -0700
    46.3 @@ -1,5 +1,5 @@
    46.4  /*
    46.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    46.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    46.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    46.8   *
    46.9   * This code is free software; you can redistribute it and/or modify it
   46.10 @@ -58,7 +58,7 @@
   46.11    }
   46.12  
   46.13    // commit memory
   46.14 -  if (!os::commit_memory(mapAddress, size)) {
   46.15 +  if (!os::commit_memory(mapAddress, size, !ExecMem)) {
   46.16      if (PrintMiscellaneous && Verbose) {
   46.17        warning("Could not commit PerfData memory\n");
   46.18      }
   46.19 @@ -1498,8 +1498,7 @@
   46.20    (void)memset(mapAddress, '\0', size);
   46.21  
   46.22    // it does not go through os api, the operation has to record from here
   46.23 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
   46.24 -  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
   46.25 +  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   46.26  
   46.27    return (char*) mapAddress;
   46.28  }
   46.29 @@ -1681,8 +1680,7 @@
   46.30    }
   46.31  
   46.32    // it does not go through os api, the operation has to record from here
   46.33 -  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC);
   46.34 -  MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal);
   46.35 +  MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC);
   46.36  
   46.37  
   46.38    *addrp = (char*)mapAddress;
   46.39 @@ -1836,9 +1834,10 @@
   46.40      return;
   46.41    }
   46.42  
   46.43 +  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   46.44    remove_file_mapping(addr);
   46.45    // it does not go through os api, the operation has to record from here
   46.46 -  MemTracker::record_virtual_memory_release((address)addr, bytes);
   46.47 +  tkr.record((address)addr, bytes);
   46.48  }
   46.49  
   46.50  char* PerfMemory::backing_store_filename() {
    47.1 --- a/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    47.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    47.3 @@ -1,47 +0,0 @@
    47.4 -/*
    47.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    47.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    47.7 - *
    47.8 - * This code is free software; you can redistribute it and/or modify it
    47.9 - * under the terms of the GNU General Public License version 2 only, as
   47.10 - * published by the Free Software Foundation.
   47.11 - *
   47.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   47.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   47.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   47.15 - * version 2 for more details (a copy is included in the LICENSE file that
   47.16 - * accompanied this code).
   47.17 - *
   47.18 - * You should have received a copy of the GNU General Public License version
   47.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   47.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   47.21 - *
   47.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   47.23 - * or visit www.oracle.com if you need additional information or have any
   47.24 - * questions.
   47.25 - *
   47.26 - */
   47.27 -
   47.28 -#include "precompiled.hpp"
   47.29 -#include "asm/macroAssembler.hpp"
   47.30 -#include "runtime/os.hpp"
   47.31 -#include "runtime/threadLocalStorage.hpp"
   47.32 -
   47.33 -#include <asm-sparc/traps.h>
   47.34 -
   47.35 -void MacroAssembler::read_ccr_trap(Register ccr_save) {
   47.36 -  // No implementation
   47.37 -  breakpoint_trap();
   47.38 -}
   47.39 -
   47.40 -void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
   47.41 -  // No implementation
   47.42 -  breakpoint_trap();
   47.43 -}
   47.44 -
   47.45 -void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); }
   47.46 -void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); }
   47.47 -
   47.48 -// Use software breakpoint trap until we figure out how to do this on Linux
   47.49 -void MacroAssembler::get_psr_trap()       { trap(SP_TRAP_SBPT); }
   47.50 -void MacroAssembler::set_psr_trap()       { trap(SP_TRAP_SBPT); }
    48.1 --- a/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Mon Jun 24 14:27:24 2013 -0700
    48.2 +++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp	Tue Jun 25 12:46:21 2013 -0700
    48.3 @@ -169,7 +169,6 @@
    48.4      : "memory");
    48.5    return rv;
    48.6  #else
    48.7 -  assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
    48.8    volatile jlong_accessor evl, cvl, rv;
    48.9    evl.long_value = exchange_value;
   48.10    cvl.long_value = compare_value;
    49.1 --- a/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp	Mon Jun 24 14:27:24 2013 -0700
    49.2 +++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
    49.3 @@ -1,61 +0,0 @@
    49.4 -/*
    49.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
    49.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    49.7 - *
    49.8 - * This code is free software; you can redistribute it and/or modify it
    49.9 - * under the terms of the GNU General Public License version 2 only, as
   49.10 - * published by the Free Software Foundation.
   49.11 - *
   49.12 - * This code is distributed in the hope that it will be useful, but WITHOUT
   49.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   49.14 - * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   49.15 - * version 2 for more details (a copy is included in the LICENSE file that
   49.16 - * accompanied this code).
   49.17 - *
   49.18 - * You should have received a copy of the GNU General Public License version
   49.19 - * 2 along with this work; if not, write to the Free Software Foundation,
   49.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   49.21 - *
   49.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   49.23 - * or visit www.oracle.com if you need additional information or have any
   49.24 - * questions.
   49.25 - *
   49.26 - */
   49.27 -
   49.28 -#include "precompiled.hpp"
   49.29 -#include "asm/macroAssembler.inline.hpp"
   49.30 -#include "runtime/os.hpp"
   49.31 -#include "runtime/threadLocalStorage.hpp"
   49.32 -
   49.33 -#include <sys/trap.h>          // For trap numbers
   49.34 -#include <v9/sys/psr_compat.h> // For V8 compatibility
   49.35 -
   49.36 -void MacroAssembler::read_ccr_trap(Register ccr_save) {
   49.37 -  // Execute a trap to get the PSR, mask and shift
   49.38 -  // to get the condition codes.
   49.39 -  get_psr_trap();
   49.40 -  nop();
   49.41 -  set(PSR_ICC, ccr_save);
   49.42 -  and3(O0, ccr_save, ccr_save);
   49.43 -  srl(ccr_save, PSR_ICC_SHIFT, ccr_save);
   49.44 -}
   49.45 -
   49.46 -void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) {
   49.47 -  // Execute a trap to get the PSR, shift back
   49.48 -  // the condition codes, mask the condition codes
   49.49 -  // back into and PSR and trap to write back the
   49.50 -  // PSR.
   49.51 -  sll(ccr_save, PSR_ICC_SHIFT, scratch2);
   49.52 -  get_psr_trap();
   49.53 -  nop();
   49.54 -  set(~PSR_ICC, scratch1);
   49.55 -  and3(O0, scratch1, O0);
   49.56 -  or3(O0, scratch2, O0);
   49.57 -  set_psr_trap();
   49.58 -  nop();
   49.59 -}
   49.60 -
   49.61 -void MacroAssembler::flush_windows_trap() { trap(ST_FLUSH_WINDOWS); }
   49.62 -void MacroAssembler::clean_windows_trap() { trap(ST_CLEAN_WINDOWS); }
   49.63 -void MacroAssembler::get_psr_trap()       { trap(ST_GETPSR); }
   49.64 -void MacroAssembler::set_psr_trap()       { trap(ST_SETPSR); }
    50.1 --- a/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Mon Jun 24 14:27:24 2013 -0700
    50.2 +++ b/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp	Tue Jun 25 12:46:21 2013 -0700
    50.3 @@ -60,21 +60,10 @@
    50.4  
    50.5  #else
    50.6  
    50.7 -extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst);
    50.8  extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst);
    50.9  
   50.10  inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) {
   50.11 -#ifdef COMPILER2
   50.12 -  // Compiler2 does not support v8, it is used only for v9.
   50.13    _Atomic_move_long_v9(src, dst);
   50.14 -#else
   50.15 -  // The branch is cheaper then emulated LDD.
   50.16 -  if (VM_Version::v9_instructions_work()) {
   50.17 -    _Atomic_move_long_v9(src, dst);
   50.18 -  } else {
   50.19 -    _Atomic_move_long_v8(src, dst);
   50.20 -  }
   50.21 -#endif
   50.22  }
   50.23  
   50.24  inline jlong Atomic::load(volatile jlong* src) {
   50.25 @@ -209,7 +198,6 @@
   50.26      : "memory");
   50.27    return rv;
   50.28  #else  //_LP64
   50.29 -  assert(VM_Version::v9_instructions_work(), "cas only supported on v9");
   50.30    volatile jlong_accessor evl, cvl, rv;
   50.31    evl.long_value = exchange_value;
   50.32    cvl.long_value = compare_value;
   50.33 @@ -318,7 +306,6 @@
   50.34    // Return 64 bit value in %o0
   50.35    return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value);
   50.36  #else  // _LP64
   50.37 -  assert (VM_Version::v9_instructions_work(), "only supported on v9");
   50.38    // Return 64 bit value in %o0,%o1 by hand
   50.39    return _Atomic_casl(exchange_value, dest, compare_value);
   50.40  #endif // _LP64
    51.1 --- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Mon Jun 24 14:27:24 2013 -0700
    51.2 +++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.il	Tue Jun 25 12:46:21 2013 -0700
    51.3 @@ -152,23 +152,6 @@
    51.4          .nonvolatile
    51.5          .end
    51.6  
    51.7 -  // Support for jlong Atomic::load and Atomic::store on v8.
    51.8 -  //
    51.9 -  // void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst)
   51.10 -  //
   51.11 -  // Arguments:
   51.12 -  //      src:  O0
   51.13 -  //      dest: O1
   51.14 -  //
   51.15 -  // Overwrites O2 and O3
   51.16 -
   51.17 -        .inline _Atomic_move_long_v8,2
   51.18 -        .volatile
   51.19 -        ldd     [%o0], %o2
   51.20 -        std     %o2, [%o1]
   51.21 -        .nonvolatile
   51.22 -        .end
   51.23 -
   51.24    // Support for jlong Atomic::load and Atomic::store on v9.
   51.25    //
   51.26    // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
    52.1 --- a/src/share/vm/adlc/formssel.cpp	Mon Jun 24 14:27:24 2013 -0700
    52.2 +++ b/src/share/vm/adlc/formssel.cpp	Tue Jun 25 12:46:21 2013 -0700
    52.3 @@ -235,6 +235,9 @@
    52.4    return false;
    52.5  }
    52.6  
    52.7 +bool InstructForm::is_ideal_negD() const {
    52.8 +  return (_matrule && _matrule->_rChild && strcmp(_matrule->_rChild->_opType, "NegD") == 0);
    52.9 +}
   52.10  
   52.11  // Return 'true' if this instruction matches an ideal 'Copy*' node
   52.12  int InstructForm::is_ideal_copy() const {
   52.13 @@ -533,6 +536,12 @@
   52.14    if( data_type != Form::none )
   52.15      rematerialize = true;
   52.16  
   52.17 +  // Ugly: until a better fix is implemented, disable rematerialization for
   52.18 +  // negD nodes because they are proved to be problematic.
   52.19 +  if (is_ideal_negD()) {
   52.20 +    return false;
   52.21 +  }
   52.22 +
   52.23    // Constants
   52.24    if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) )
   52.25      rematerialize = true;
    53.1 --- a/src/share/vm/adlc/formssel.hpp	Mon Jun 24 14:27:24 2013 -0700
    53.2 +++ b/src/share/vm/adlc/formssel.hpp	Tue Jun 25 12:46:21 2013 -0700
    53.3 @@ -147,6 +147,7 @@
    53.4    virtual int         is_empty_encoding() const; // _size=0 and/or _insencode empty
    53.5    virtual int         is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal
    53.6    virtual int         is_ideal_copy() const;    // node matches ideal 'Copy*'
    53.7 +  virtual bool        is_ideal_negD() const;    // node matches ideal 'NegD'
    53.8    virtual bool        is_ideal_if()   const;    // node matches ideal 'If'
    53.9    virtual bool        is_ideal_fastlock() const; // node matches 'FastLock'
   53.10    virtual bool        is_ideal_membar() const;  // node matches ideal 'MemBarXXX'
    54.1 --- a/src/share/vm/c1/c1_IR.cpp	Mon Jun 24 14:27:24 2013 -0700
    54.2 +++ b/src/share/vm/c1/c1_IR.cpp	Tue Jun 25 12:46:21 2013 -0700
    54.3 @@ -506,7 +506,7 @@
    54.4    _loop_map(0, 0),          // initialized later with correct size
    54.5    _compilation(c)
    54.6  {
    54.7 -  TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order");
    54.8 +  TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order"));
    54.9  
   54.10    init_visited();
   54.11    count_edges(start_block, NULL);
   54.12 @@ -683,7 +683,7 @@
   54.13  }
   54.14  
   54.15  void ComputeLinearScanOrder::assign_loop_depth(BlockBegin* start_block) {
   54.16 -  TRACE_LINEAR_SCAN(3, "----- computing loop-depth and weight");
   54.17 +  TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing loop-depth and weight"));
   54.18    init_visited();
   54.19  
   54.20    assert(_work_list.is_empty(), "work list must be empty before processing");
   54.21 @@ -868,7 +868,7 @@
   54.22  }
   54.23  
   54.24  void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) {
   54.25 -  TRACE_LINEAR_SCAN(3, "----- computing final block order");
   54.26 +  TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing final block order"));
   54.27  
   54.28    // the start block is always the first block in the linear scan order
   54.29    _linear_scan_order = new BlockList(_num_blocks);
    55.1 --- a/src/share/vm/c1/c1_LIR.cpp	Mon Jun 24 14:27:24 2013 -0700
    55.2 +++ b/src/share/vm/c1/c1_LIR.cpp	Tue Jun 25 12:46:21 2013 -0700
    55.3 @@ -201,23 +201,24 @@
    55.4  
    55.5  #ifdef ASSERT
    55.6    if (!is_pointer() && !is_illegal()) {
    55.7 +    OprKind kindfield = kind_field(); // Factored out because of compiler bug, see 8002160
    55.8      switch (as_BasicType(type_field())) {
    55.9      case T_LONG:
   55.10 -      assert((kind_field() == cpu_register || kind_field() == stack_value) &&
   55.11 +      assert((kindfield == cpu_register || kindfield == stack_value) &&
   55.12               size_field() == double_size, "must match");
   55.13        break;
   55.14      case T_FLOAT:
   55.15        // FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
   55.16 -      assert((kind_field() == fpu_register || kind_field() == stack_value
   55.17 -             ARM_ONLY(|| kind_field() == cpu_register)
   55.18 -             PPC_ONLY(|| kind_field() == cpu_register) ) &&
   55.19 +      assert((kindfield == fpu_register || kindfield == stack_value
   55.20 +             ARM_ONLY(|| kindfield == cpu_register)
   55.21 +             PPC_ONLY(|| kindfield == cpu_register) ) &&
   55.22               size_field() == single_size, "must match");
   55.23        break;
   55.24      case T_DOUBLE:
   55.25        // FP return values can be also in CPU registers on ARM and PPC (softfp ABI)
   55.26 -      assert((kind_field() == fpu_register || kind_field() == stack_value
   55.27 -             ARM_ONLY(|| kind_field() == cpu_register)
   55.28 -             PPC_ONLY(|| kind_field() == cpu_register) ) &&
   55.29 +      assert((kindfield == fpu_register || kindfield == stack_value
   55.30 +             ARM_ONLY(|| kindfield == cpu_register)
   55.31 +             PPC_ONLY(|| kindfield == cpu_register) ) &&
   55.32               size_field() == double_size, "must match");
   55.33        break;
   55.34      case T_BOOLEAN:
   55.35 @@ -229,7 +230,7 @@
   55.36      case T_OBJECT:
   55.37      case T_METADATA:
   55.38      case T_ARRAY:
   55.39 -      assert((kind_field() == cpu_register || kind_field() == stack_value) &&
   55.40 +      assert((kindfield == cpu_register || kindfield == stack_value) &&
   55.41               size_field() == single_size, "must match");
   55.42        break;
   55.43  
    56.1 --- a/src/share/vm/ci/ciUtilities.hpp	Mon Jun 24 14:27:24 2013 -0700
    56.2 +++ b/src/share/vm/ci/ciUtilities.hpp	Tue Jun 25 12:46:21 2013 -0700
    56.3 @@ -96,7 +96,7 @@
    56.4      CLEAR_PENDING_EXCEPTION;                     \
    56.5      return (result);                             \
    56.6    }                                              \
    56.7 -  (0
    56.8 +  (void)(0
    56.9  
   56.10  #define KILL_COMPILE_ON_ANY                      \
   56.11    THREAD);                                       \
   56.12 @@ -104,7 +104,7 @@
   56.13      fatal("unhandled ci exception");             \
   56.14      CLEAR_PENDING_EXCEPTION;                     \
   56.15    }                                              \
   56.16 -(0
   56.17 +(void)(0
   56.18  
   56.19  
   56.20  inline const char* bool_to_str(bool b) {
    57.1 --- a/src/share/vm/classfile/genericSignatures.cpp	Mon Jun 24 14:27:24 2013 -0700
    57.2 +++ b/src/share/vm/classfile/genericSignatures.cpp	Tue Jun 25 12:46:21 2013 -0700
    57.3 @@ -124,7 +124,7 @@
    57.4        fatal(STREAM->parse_error());      \
    57.5      }                                   \
    57.6      return NULL;                        \
    57.7 -  } 0
    57.8 +  } (void)0
    57.9  
   57.10  #define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR()
   57.11  #define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR()
   57.12 @@ -133,7 +133,7 @@
   57.13  #define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR()
   57.14  #define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR()
   57.15  
   57.16 -#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); (0
   57.17 +#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); ((void)0
   57.18  
   57.19  #ifndef PRODUCT
   57.20  void Identifier::print_on(outputStream* str) const {
    58.1 --- a/src/share/vm/classfile/symbolTable.cpp	Mon Jun 24 14:27:24 2013 -0700
    58.2 +++ b/src/share/vm/classfile/symbolTable.cpp	Tue Jun 25 12:46:21 2013 -0700
    58.3 @@ -598,6 +598,8 @@
    58.4  
    58.5  bool StringTable::_needs_rehashing = false;
    58.6  
    58.7 +volatile int StringTable::_parallel_claimed_idx = 0;
    58.8 +
    58.9  // Pick hashing algorithm
   58.10  unsigned int StringTable::hash_string(const jchar* s, int len) {
   58.11    return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) :
   58.12 @@ -761,8 +763,18 @@
   58.13    }
   58.14  }
   58.15  
   58.16 -void StringTable::oops_do(OopClosure* f) {
   58.17 -  for (int i = 0; i < the_table()->table_size(); ++i) {
   58.18 +void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) {
   58.19 +  const int limit = the_table()->table_size();
   58.20 +
   58.21 +  assert(0 <= start_idx && start_idx <= limit,
   58.22 +         err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx));
   58.23 +  assert(0 <= end_idx && end_idx <= limit,
   58.24 +         err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx));
   58.25 +  assert(start_idx <= end_idx,
   58.26 +         err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT,
   58.27 +                 start_idx, end_idx));
   58.28 +
   58.29 +  for (int i = start_idx; i < end_idx; i += 1) {
   58.30      HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i);
   58.31      while (entry != NULL) {
   58.32        assert(!entry->is_shared(), "CDS not used for the StringTable");
   58.33 @@ -774,6 +786,27 @@
   58.34    }
   58.35  }
   58.36  
   58.37 +void StringTable::oops_do(OopClosure* f) {
   58.38 +  buckets_do(f, 0, the_table()->table_size());
   58.39 +}
   58.40 +
   58.41 +void StringTable::possibly_parallel_oops_do(OopClosure* f) {
   58.42 +  const int ClaimChunkSize = 32;
   58.43 +  const int limit = the_table()->table_size();
   58.44 +
   58.45 +  for (;;) {
   58.46 +    // Grab next set of buckets to scan
   58.47 +    int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize;
   58.48 +    if (start_idx >= limit) {
   58.49 +      // End of table
   58.50 +      break;
   58.51 +    }
   58.52 +
   58.53 +    int end_idx = MIN2(limit, start_idx + ClaimChunkSize);
   58.54 +    buckets_do(f, start_idx, end_idx);
   58.55 +  }
   58.56 +}
   58.57 +
   58.58  void StringTable::verify() {
   58.59    for (int i = 0; i < the_table()->table_size(); ++i) {
   58.60      HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
    59.1 --- a/src/share/vm/classfile/symbolTable.hpp	Mon Jun 24 14:27:24 2013 -0700
    59.2 +++ b/src/share/vm/classfile/symbolTable.hpp	Tue Jun 25 12:46:21 2013 -0700
    59.3 @@ -1,5 +1,5 @@
    59.4  /*
    59.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    59.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    59.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    59.8   *
    59.9   * This code is free software; you can redistribute it and/or modify it
   59.10 @@ -246,12 +246,19 @@
   59.11    // Set if one bucket is out of balance due to hash algorithm deficiency
   59.12    static bool _needs_rehashing;
   59.13  
   59.14 +  // Claimed high water mark for parallel chunked scanning
   59.15 +  static volatile int _parallel_claimed_idx;
   59.16 +
   59.17    static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS);
   59.18    oop basic_add(int index, Handle string_or_null, jchar* name, int len,
   59.19                  unsigned int hashValue, TRAPS);
   59.20  
   59.21    oop lookup(int index, jchar* chars, int length, unsigned int hashValue);
   59.22  
   59.23 +  // Apply the give oop closure to the entries to the buckets
   59.24 +  // in the range [start_idx, end_idx).
   59.25 +  static void buckets_do(OopClosure* f, int start_idx, int end_idx);
   59.26 +
   59.27    StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
   59.28                                sizeof (HashtableEntry<oop, mtSymbol>)) {}
   59.29  
   59.30 @@ -277,9 +284,12 @@
   59.31      unlink_or_oops_do(cl, NULL);
   59.32    }
   59.33  
   59.34 -  // Invoke "f->do_oop" on the locations of all oops in the table.
   59.35 +  // Serially invoke "f->do_oop" on the locations of all oops in the table.
   59.36    static void oops_do(OopClosure* f);
   59.37  
   59.38 +  // Possibly parallel version of the above
   59.39 +  static void possibly_parallel_oops_do(OopClosure* f);
   59.40 +
   59.41    // Hashing algorithm, used as the hash value used by the
   59.42    //     StringTable for bucket selection and comparison (stored in the
   59.43    //     HashtableEntry structures).  This is used in the String.intern() method.
   59.44 @@ -315,5 +325,8 @@
   59.45    // Rehash the symbol table if it gets out of balance
   59.46    static void rehash_table();
   59.47    static bool needs_rehashing() { return _needs_rehashing; }
   59.48 +
   59.49 +  // Parallel chunked scanning
   59.50 +  static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; }
   59.51  };
   59.52  #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
    60.1 --- a/src/share/vm/classfile/verifier.hpp	Mon Jun 24 14:27:24 2013 -0700
    60.2 +++ b/src/share/vm/classfile/verifier.hpp	Tue Jun 25 12:46:21 2013 -0700
    60.3 @@ -86,9 +86,9 @@
    60.4  // These macros are used similarly to CHECK macros but also check
    60.5  // the status of the verifier and return if that has an error.
    60.6  #define CHECK_VERIFY(verifier) \
    60.7 -  CHECK); if ((verifier)->has_error()) return; (0
    60.8 +  CHECK); if ((verifier)->has_error()) return; ((void)0
    60.9  #define CHECK_VERIFY_(verifier, result) \
   60.10 -  CHECK_(result)); if ((verifier)->has_error()) return (result); (0
   60.11 +  CHECK_(result)); if ((verifier)->has_error()) return (result); ((void)0
   60.12  
   60.13  class TypeOrigin VALUE_OBJ_CLASS_SPEC {
   60.14   private:
    61.1 --- a/src/share/vm/code/dependencies.cpp	Mon Jun 24 14:27:24 2013 -0700
    61.2 +++ b/src/share/vm/code/dependencies.cpp	Tue Jun 25 12:46:21 2013 -0700
    61.3 @@ -989,7 +989,7 @@
    61.4    assert(changes.involves_context(context_type), "irrelevant dependency");
    61.5    Klass* new_type = changes.new_type();
    61.6  
    61.7 -  count_find_witness_calls();
    61.8 +  (void)count_find_witness_calls();
    61.9    NOT_PRODUCT(deps_find_witness_singles++);
   61.10  
   61.11    // Current thread must be in VM (not native mode, as in CI):
    62.1 --- a/src/share/vm/code/nmethod.cpp	Mon Jun 24 14:27:24 2013 -0700
    62.2 +++ b/src/share/vm/code/nmethod.cpp	Tue Jun 25 12:46:21 2013 -0700
    62.3 @@ -2615,7 +2615,8 @@
    62.4                        relocation_begin()-1+ip[1]);
    62.5        for (; ip < index_end; ip++)
    62.6          tty->print_cr("  (%d ?)", ip[0]);
    62.7 -      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip++);
    62.8 +      tty->print_cr("          @" INTPTR_FORMAT ": index_size=%d", ip, *ip);
    62.9 +      ip++;
   62.10        tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip);
   62.11      }
   62.12    }
    63.1 --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Mon Jun 24 14:27:24 2013 -0700
    63.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp	Tue Jun 25 12:46:21 2013 -0700
    63.3 @@ -1,5 +1,5 @@
    63.4  /*
    63.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    63.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    63.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    63.8   *
    63.9   * This code is free software; you can redistribute it and/or modify it
   63.10 @@ -565,11 +565,9 @@
   63.11      if(new_start_aligned < new_end_for_commit) {
   63.12        MemRegion new_committed =
   63.13          MemRegion(new_start_aligned, new_end_for_commit);
   63.14 -      if (!os::commit_memory((char*)new_committed.start(),
   63.15 -                             new_committed.byte_size())) {
   63.16 -        vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
   63.17 -                              "card table expansion");
   63.18 -      }
   63.19 +      os::commit_memory_or_exit((char*)new_committed.start(),
   63.20 +                                new_committed.byte_size(), !ExecMem,
   63.21 +                                "card table expansion");
   63.22      }
   63.23      result = true;
   63.24    } else if (new_start_aligned > cur_committed.start()) {
    64.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Mon Jun 24 14:27:24 2013 -0700
    64.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp	Tue Jun 25 12:46:21 2013 -0700
    64.3 @@ -1250,14 +1250,13 @@
    64.4                    avg_promoted()->deviation());
    64.5      }
    64.6  
    64.7 -    gclog_or_tty->print( "  avg_promoted_padded_avg: %f"
    64.8 +    gclog_or_tty->print_cr( "  avg_promoted_padded_avg: %f"
    64.9                  "  avg_pretenured_padded_avg: %f"
   64.10                  "  tenuring_thresh: %d"
   64.11                  "  target_size: " SIZE_FORMAT,
   64.12                  avg_promoted()->padded_average(),
   64.13                  _avg_pretenured->padded_average(),
   64.14                  tenuring_threshold, target_size);
   64.15 -    tty->cr();
   64.16    }
   64.17  
   64.18    set_survivor_size(target_size);
   64.19 @@ -1279,7 +1278,7 @@
   64.20    avg_promoted()->sample(promoted + _avg_pretenured->padded_average());
   64.21  
   64.22    if (PrintAdaptiveSizePolicy) {
   64.23 -    gclog_or_tty->print(
   64.24 +    gclog_or_tty->print_cr(
   64.25                    "AdaptiveSizePolicy::update_averages:"
   64.26                    "  survived: "  SIZE_FORMAT
   64.27                    "  promoted: "  SIZE_FORMAT
    65.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Mon Jun 24 14:27:24 2013 -0700
    65.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp	Tue Jun 25 12:46:21 2013 -0700
    65.3 @@ -1,5 +1,5 @@
    65.4  /*
    65.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
    65.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    65.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    65.8   *
    65.9   * This code is free software; you can redistribute it and/or modify it
   65.10 @@ -101,7 +101,8 @@
   65.11    }
   65.12  
   65.13    char* const base_addr = committed_high_addr();
   65.14 -  bool result = special() || os::commit_memory(base_addr, bytes, alignment());
   65.15 +  bool result = special() ||
   65.16 +         os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
   65.17    if (result) {
   65.18      _committed_high_addr += bytes;
   65.19    }
   65.20 @@ -154,7 +155,7 @@
   65.21    if (tmp_bytes > 0) {
   65.22      char* const commit_base = committed_high_addr();
   65.23      if (other_space->special() ||
   65.24 -        os::commit_memory(commit_base, tmp_bytes, alignment())) {
   65.25 +        os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
   65.26        // Reduce the reserved region in the other space.
   65.27        other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes,
   65.28                                  other_space->reserved_high_addr(),
   65.29 @@ -269,7 +270,8 @@
   65.30    }
   65.31  
   65.32    char* const base_addr = committed_low_addr() - bytes;
   65.33 -  bool result = special() || os::commit_memory(base_addr, bytes, alignment());
   65.34 +  bool result = special() ||
   65.35 +         os::commit_memory(base_addr, bytes, alignment(), !ExecMem);
   65.36    if (result) {
   65.37      _committed_low_addr -= bytes;
   65.38    }
   65.39 @@ -322,7 +324,7 @@
   65.40    if (tmp_bytes > 0) {
   65.41      char* const commit_base = committed_low_addr() - tmp_bytes;
   65.42      if (other_space->special() ||
   65.43 -        os::commit_memory(commit_base, tmp_bytes, alignment())) {
   65.44 +        os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) {
   65.45        // Reduce the reserved region in the other space.
   65.46        other_space->set_reserved(other_space->reserved_low_addr(),
   65.47                                  other_space->reserved_high_addr() - tmp_bytes,
    66.1 --- a/src/share/vm/memory/allocation.hpp	Mon Jun 24 14:27:24 2013 -0700
    66.2 +++ b/src/share/vm/memory/allocation.hpp	Tue Jun 25 12:46:21 2013 -0700
    66.3 @@ -643,8 +643,15 @@
    66.4  #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\
    66.5    (type*) resource_allocate_bytes(thread, (size) * sizeof(type))
    66.6  
    66.7 +#define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\
    66.8 +  (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
    66.9 +
   66.10  #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\
   66.11 -  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) )
   66.12 +  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type))
   66.13 +
   66.14 +#define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\
   66.15 +  (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\
   66.16 +                                    (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL)
   66.17  
   66.18  #define FREE_RESOURCE_ARRAY(type, old, size)\
   66.19    resource_free_bytes((char*)(old), (size) * sizeof(type))
   66.20 @@ -655,28 +662,40 @@
   66.21  #define NEW_RESOURCE_OBJ(type)\
   66.22    NEW_RESOURCE_ARRAY(type, 1)
   66.23  
   66.24 +#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\
   66.25 +  NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1)
   66.26 +
   66.27 +#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\
   66.28 +  (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
   66.29 +
   66.30 +#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
   66.31 +  (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
   66.32 +
   66.33  #define NEW_C_HEAP_ARRAY(type, size, memflags)\
   66.34    (type*) (AllocateHeap((size) * sizeof(type), memflags))
   66.35  
   66.36 +#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\
   66.37 +  NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL)
   66.38 +
   66.39 +#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\
   66.40 +  NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL)
   66.41 +
   66.42  #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\
   66.43    (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags))
   66.44  
   66.45 +#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\
   66.46 +   (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL))
   66.47 +
   66.48  #define FREE_C_HEAP_ARRAY(type, old, memflags) \
   66.49    FreeHeap((char*)(old), memflags)
   66.50  
   66.51 -#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\
   66.52 -  (type*) (AllocateHeap((size) * sizeof(type), memflags, pc))
   66.53 -
   66.54 -#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\
   66.55 -  (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc))
   66.56 -
   66.57 -#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)         \
   66.58 -  (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail)
   66.59 -
   66.60  // allocate type in heap without calling ctor
   66.61  #define NEW_C_HEAP_OBJ(type, memflags)\
   66.62    NEW_C_HEAP_ARRAY(type, 1, memflags)
   66.63  
   66.64 +#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\
   66.65 +  NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags)
   66.66 +
   66.67  // deallocate obj of type in heap without calling dtor
   66.68  #define FREE_C_HEAP_OBJ(objname, memflags)\
   66.69    FreeHeap((char*)objname, memflags);
   66.70 @@ -721,13 +740,21 @@
   66.71  // is set so that we always use malloc except for Solaris where we set the
   66.72  // limit to get mapped memory.
   66.73  template <class E, MEMFLAGS F>
   66.74 -class ArrayAllocator : StackObj {
   66.75 +class ArrayAllocator VALUE_OBJ_CLASS_SPEC {
   66.76    char* _addr;
   66.77    bool _use_malloc;
   66.78    size_t _size;
   66.79 +  bool _free_in_destructor;
   66.80   public:
   66.81 -  ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { }
   66.82 -  ~ArrayAllocator() { free(); }
   66.83 +  ArrayAllocator(bool free_in_destructor = true) :
   66.84 +    _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { }
   66.85 +
   66.86 +  ~ArrayAllocator() {
   66.87 +    if (_free_in_destructor) {
   66.88 +      free();
   66.89 +    }
   66.90 +  }
   66.91 +
   66.92    E* allocate(size_t length);
   66.93    void free();
   66.94  };
    67.1 --- a/src/share/vm/memory/allocation.inline.hpp	Mon Jun 24 14:27:24 2013 -0700
    67.2 +++ b/src/share/vm/memory/allocation.inline.hpp	Tue Jun 25 12:46:21 2013 -0700
    67.3 @@ -146,10 +146,7 @@
    67.4      vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)");
    67.5    }
    67.6  
    67.7 -  bool success = os::commit_memory(_addr, _size, false /* executable */);
    67.8 -  if (!success) {
    67.9 -    vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)");
   67.10 -  }
   67.11 +  os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)");
   67.12  
   67.13    return (E*)_addr;
   67.14  }
    68.1 --- a/src/share/vm/memory/cardTableModRefBS.cpp	Mon Jun 24 14:27:24 2013 -0700
    68.2 +++ b/src/share/vm/memory/cardTableModRefBS.cpp	Tue Jun 25 12:46:21 2013 -0700
    68.3 @@ -110,11 +110,8 @@
    68.4    jbyte* guard_card = &_byte_map[_guard_index];
    68.5    uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size);
    68.6    _guard_region = MemRegion((HeapWord*)guard_page, _page_size);
    68.7 -  if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) {
    68.8 -    // Do better than this for Merlin
    68.9 -    vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card");
   68.10 -  }
   68.11 -
   68.12 +  os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size,
   68.13 +                            !ExecMem, "card table last card");
   68.14    *guard_card = last_card;
   68.15  
   68.16     _lowest_non_clean =
   68.17 @@ -312,12 +309,9 @@
   68.18          MemRegion(cur_committed.end(), new_end_for_commit);
   68.19  
   68.20        assert(!new_committed.is_empty(), "Region should not be empty here");
   68.21 -      if (!os::commit_memory((char*)new_committed.start(),
   68.22 -                             new_committed.byte_size(), _page_size)) {
   68.23 -        // Do better than this for Merlin
   68.24 -        vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR,
   68.25 -                "card table expansion");
   68.26 -      }
   68.27 +      os::commit_memory_or_exit((char*)new_committed.start(),
   68.28 +                                new_committed.byte_size(), _page_size,
   68.29 +                                !ExecMem, "card table expansion");
   68.30      // Use new_end_aligned (as opposed to new_end_for_commit) because
   68.31      // the cur_committed region may include the guard region.
   68.32      } else if (new_end_aligned < cur_committed.end()) {
   68.33 @@ -418,7 +412,7 @@
   68.34    }
   68.35    // Touch the last card of the covered region to show that it
   68.36    // is committed (or SEGV).
   68.37 -  debug_only(*byte_for(_covered[ind].last());)
   68.38 +  debug_only((void) (*byte_for(_covered[ind].last()));)
   68.39    debug_only(verify_guard();)
   68.40  }
   68.41  
    69.1 --- a/src/share/vm/memory/sharedHeap.cpp	Mon Jun 24 14:27:24 2013 -0700
    69.2 +++ b/src/share/vm/memory/sharedHeap.cpp	Tue Jun 25 12:46:21 2013 -0700
    69.3 @@ -1,5 +1,5 @@
    69.4  /*
    69.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
    69.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    69.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    69.8   *
    69.9   * This code is free software; you can redistribute it and/or modify it
   69.10 @@ -47,7 +47,6 @@
   69.11    SH_PS_SystemDictionary_oops_do,
   69.12    SH_PS_ClassLoaderDataGraph_oops_do,
   69.13    SH_PS_jvmti_oops_do,
   69.14 -  SH_PS_StringTable_oops_do,
   69.15    SH_PS_CodeCache_oops_do,
   69.16    // Leave this one last.
   69.17    SH_PS_NumElements
   69.18 @@ -127,6 +126,8 @@
   69.19  {
   69.20    if (_active) {
   69.21      outer->change_strong_roots_parity();
   69.22 +    // Zero the claimed high water mark in the StringTable
   69.23 +    StringTable::clear_parallel_claimed_index();
   69.24    }
   69.25  }
   69.26  
   69.27 @@ -154,14 +155,16 @@
   69.28    // Global (strong) JNI handles
   69.29    if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do))
   69.30      JNIHandles::oops_do(roots);
   69.31 +
   69.32    // All threads execute this; the individual threads are task groups.
   69.33    CLDToOopClosure roots_from_clds(roots);
   69.34    CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds);
   69.35 -  if (ParallelGCThreads > 0) {
   69.36 -    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p ,code_roots);
   69.37 +  if (CollectedHeap::use_parallel_gc_threads()) {
   69.38 +    Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots);
   69.39    } else {
   69.40      Threads::oops_do(roots, roots_from_clds_p, code_roots);
   69.41    }
   69.42 +
   69.43    if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do))
   69.44      ObjectSynchronizer::oops_do(roots);
   69.45    if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do))
   69.46 @@ -189,8 +192,12 @@
   69.47      }
   69.48    }
   69.49  
   69.50 -  if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) {
   69.51 -    if (so & SO_Strings) {
   69.52 +  // All threads execute the following. A specific chunk of buckets
   69.53 +  // from the StringTable are the individual tasks.
   69.54 +  if (so & SO_Strings) {
   69.55 +    if (CollectedHeap::use_parallel_gc_threads()) {
   69.56 +      StringTable::possibly_parallel_oops_do(roots);
   69.57 +    } else {
   69.58        StringTable::oops_do(roots);
   69.59      }
   69.60    }
    70.1 --- a/src/share/vm/memory/universe.cpp	Mon Jun 24 14:27:24 2013 -0700
    70.2 +++ b/src/share/vm/memory/universe.cpp	Tue Jun 25 12:46:21 2013 -0700
    70.3 @@ -531,7 +531,9 @@
    70.4    if (vt) vt->initialize_vtable(false, CHECK);
    70.5    if (ko->oop_is_instance()) {
    70.6      InstanceKlass* ik = (InstanceKlass*)ko;
    70.7 -    for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) {
    70.8 +    for (KlassHandle s_h(THREAD, ik->subklass());
    70.9 +         s_h() != NULL;
   70.10 +         s_h = KlassHandle(THREAD, s_h()->next_sibling())) {
   70.11        reinitialize_vtable_of(s_h, CHECK);
   70.12      }
   70.13    }
    71.1 --- a/src/share/vm/opto/c2_globals.hpp	Mon Jun 24 14:27:24 2013 -0700
    71.2 +++ b/src/share/vm/opto/c2_globals.hpp	Tue Jun 25 12:46:21 2013 -0700
    71.3 @@ -1,5 +1,5 @@
    71.4  /*
    71.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
    71.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    71.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    71.8   *
    71.9   * This code is free software; you can redistribute it and/or modify it
   71.10 @@ -406,10 +406,10 @@
   71.11    develop(intx, WarmCallMaxSize, 999999,                                    \
   71.12            "size of the largest inlinable method")                           \
   71.13                                                                              \
   71.14 -  product(intx, MaxNodeLimit, 65000,                                        \
   71.15 +  product(intx, MaxNodeLimit, 80000,                                        \
   71.16            "Maximum number of nodes")                                        \
   71.17                                                                              \
   71.18 -  product(intx, NodeLimitFudgeFactor, 1000,                                 \
   71.19 +  product(intx, NodeLimitFudgeFactor, 2000,                                 \
   71.20            "Fudge Factor for certain optimizations")                         \
   71.21                                                                              \
   71.22    product(bool, UseJumpTables, true,                                        \
    72.1 --- a/src/share/vm/opto/chaitin.cpp	Mon Jun 24 14:27:24 2013 -0700
    72.2 +++ b/src/share/vm/opto/chaitin.cpp	Tue Jun 25 12:46:21 2013 -0700
    72.3 @@ -1,5 +1,5 @@
    72.4  /*
    72.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
    72.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
    72.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    72.8   *
    72.9   * This code is free software; you can redistribute it and/or modify it
   72.10 @@ -435,6 +435,9 @@
   72.11      // Insert un-coalesced copies.  Visit all Phis.  Where inputs to a Phi do
   72.12      // not match the Phi itself, insert a copy.
   72.13      coalesce.insert_copies(_matcher);
   72.14 +    if (C->failing()) {
   72.15 +      return;
   72.16 +    }
   72.17    }
   72.18  
   72.19    // After aggressive coalesce, attempt a first cut at coloring.
    73.1 --- a/src/share/vm/opto/coalesce.cpp	Mon Jun 24 14:27:24 2013 -0700
    73.2 +++ b/src/share/vm/opto/coalesce.cpp	Tue Jun 25 12:46:21 2013 -0700
    73.3 @@ -1,5 +1,5 @@
    73.4  /*
    73.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
    73.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    73.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.8   *
    73.9   * This code is free software; you can redistribute it and/or modify it
   73.10 @@ -240,6 +240,8 @@
   73.11    _unique = C->unique();
   73.12  
   73.13    for( uint i=0; i<_phc._cfg._num_blocks; i++ ) {
   73.14 +    C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce");
   73.15 +    if (C->failing()) return;
   73.16      Block *b = _phc._cfg._blocks[i];
   73.17      uint cnt = b->num_preds();  // Number of inputs to the Phi
   73.18  
    74.1 --- a/src/share/vm/opto/matcher.cpp	Mon Jun 24 14:27:24 2013 -0700
    74.2 +++ b/src/share/vm/opto/matcher.cpp	Tue Jun 25 12:46:21 2013 -0700
    74.3 @@ -985,6 +985,8 @@
    74.4    mstack.push(n, Visit, NULL, -1);  // set NULL as parent to indicate root
    74.5  
    74.6    while (mstack.is_nonempty()) {
    74.7 +    C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions");
    74.8 +    if (C->failing()) return NULL;
    74.9      n = mstack.node();          // Leave node on stack
   74.10      Node_State nstate = mstack.state();
   74.11      if (nstate == Visit) {
    75.1 --- a/src/share/vm/opto/memnode.cpp	Mon Jun 24 14:27:24 2013 -0700
    75.2 +++ b/src/share/vm/opto/memnode.cpp	Tue Jun 25 12:46:21 2013 -0700
    75.3 @@ -2930,7 +2930,9 @@
    75.4  Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
    75.5    if (remove_dead_region(phase, can_reshape)) return this;
    75.6    // Don't bother trying to transform a dead node
    75.7 -  if (in(0) && in(0)->is_top())  return NULL;
    75.8 +  if (in(0) && in(0)->is_top()) {
    75.9 +    return NULL;
   75.10 +  }
   75.11  
   75.12    // Eliminate volatile MemBars for scalar replaced objects.
   75.13    if (can_reshape && req() == (Precedent+1)) {
   75.14 @@ -2939,6 +2941,14 @@
   75.15      if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) {
   75.16        // Volatile field loads and stores.
   75.17        Node* my_mem = in(MemBarNode::Precedent);
   75.18 +      // The MembarAquire may keep an unused LoadNode alive through the Precedent edge
   75.19 +      if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) {
   75.20 +        assert(my_mem->unique_out() == this, "sanity");
   75.21 +        phase->hash_delete(this);
   75.22 +        del_req(Precedent);
   75.23 +        phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later
   75.24 +        my_mem = NULL;
   75.25 +      }
   75.26        if (my_mem != NULL && my_mem->is_Mem()) {
   75.27          const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr();
   75.28          // Check for scalar replaced object reference.
   75.29 @@ -4384,7 +4394,7 @@
   75.30    }
   75.31  }
   75.32  #else // !ASSERT
   75.33 -#define verify_memory_slice(m,i,n) (0)  // PRODUCT version is no-op
   75.34 +#define verify_memory_slice(m,i,n) (void)(0)  // PRODUCT version is no-op
   75.35  #endif
   75.36  
   75.37  
    76.1 --- a/src/share/vm/prims/forte.cpp	Mon Jun 24 14:27:24 2013 -0700
    76.2 +++ b/src/share/vm/prims/forte.cpp	Tue Jun 25 12:46:21 2013 -0700
    76.3 @@ -619,7 +619,7 @@
    76.4                              void* null_argument_3);
    76.5  #pragma weak collector_func_load
    76.6  #define collector_func_load(x0,x1,x2,x3,x4,x5,x6) \
    76.7 -        ( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),0 : 0 )
    76.8 +        ( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),(void)0 : (void)0 )
    76.9  #endif // __APPLE__
   76.10  #endif // !_WINDOWS
   76.11  
    77.1 --- a/src/share/vm/prims/jvm.cpp	Mon Jun 24 14:27:24 2013 -0700
    77.2 +++ b/src/share/vm/prims/jvm.cpp	Tue Jun 25 12:46:21 2013 -0700
    77.3 @@ -3310,24 +3310,10 @@
    77.4  JVM_END
    77.5  
    77.6  
    77.7 -// Utility object for collecting method holders walking down the stack
    77.8 -class KlassLink: public ResourceObj {
    77.9 - public:
   77.10 -  KlassHandle klass;
   77.11 -  KlassLink*  next;
   77.12 -
   77.13 -  KlassLink(KlassHandle k) { klass = k; next = NULL; }
   77.14 -};
   77.15 -
   77.16 -
   77.17  JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env))
   77.18    JVMWrapper("JVM_GetClassContext");
   77.19    ResourceMark rm(THREAD);
   77.20    JvmtiVMObjectAllocEventCollector oam;
   77.21 -  // Collect linked list of (handles to) method holders
   77.22 -  KlassLink* first = NULL;
   77.23 -  KlassLink* last  = NULL;
   77.24 -  int depth = 0;
   77.25    vframeStream vfst(thread);
   77.26  
   77.27    if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) {
   77.28 @@ -3341,32 +3327,23 @@
   77.29    }
   77.30  
   77.31    // Collect method holders
   77.32 +  GrowableArray<KlassHandle>* klass_array = new GrowableArray<KlassHandle>();
   77.33    for (; !vfst.at_end(); vfst.security_next()) {
   77.34      Method* m = vfst.method();
   77.35      // Native frames are not returned
   77.36      if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) {
   77.37        Klass* holder = m->method_holder();
   77.38        assert(holder->is_klass(), "just checking");
   77.39 -      depth++;
   77.40 -      KlassLink* l = new KlassLink(KlassHandle(thread, holder));
   77.41 -      if (first == NULL) {
   77.42 -        first = last = l;
   77.43 -      } else {
   77.44 -        last->next = l;
   77.45 -        last = l;
   77.46 -      }
   77.47 +      klass_array->append(holder);
   77.48      }
   77.49    }
   77.50  
   77.51    // Create result array of type [Ljava/lang/Class;
   77.52 -  objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL);
   77.53 +  objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), klass_array->length(), CHECK_NULL);
   77.54    // Fill in mirrors corresponding to method holders
   77.55 -  int index = 0;
   77.56 -  while (first != NULL) {
   77.57 -    result->obj_at_put(index++, first->klass()->java_mirror());
   77.58 -    first = first->next;
   77.59 +  for (int i = 0; i < klass_array->length(); i++) {
   77.60 +    result->obj_at_put(i, klass_array->at(i)->java_mirror());
   77.61    }
   77.62 -  assert(index == depth, "just checking");
   77.63  
   77.64    return (jobjectArray) JNIHandles::make_local(env, result);
   77.65  JVM_END
    78.1 --- a/src/share/vm/prims/jvmti.xml	Mon Jun 24 14:27:24 2013 -0700
    78.2 +++ b/src/share/vm/prims/jvmti.xml	Tue Jun 25 12:46:21 2013 -0700
    78.3 @@ -1897,7 +1897,7 @@
    78.4  	  </description>
    78.5  	</param>
    78.6  	<param id="monitor_info_ptr">
    78.7 -	  <allocbuf outcount="owned_monitor_depth_count_ptr">
    78.8 +	  <allocbuf outcount="monitor_info_count_ptr">
    78.9              <struct>jvmtiMonitorStackDepthInfo</struct>
   78.10            </allocbuf>
   78.11  	  <description>
    79.1 --- a/src/share/vm/prims/whitebox.cpp	Mon Jun 24 14:27:24 2013 -0700
    79.2 +++ b/src/share/vm/prims/whitebox.cpp	Tue Jun 25 12:46:21 2013 -0700
    79.3 @@ -159,7 +159,7 @@
    79.4  
    79.5  
    79.6  WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
    79.7 -  os::commit_memory((char *)(uintptr_t)addr, size);
    79.8 +  os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem);
    79.9    MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest);
   79.10  WB_END
   79.11  
    80.1 --- a/src/share/vm/runtime/arguments.cpp	Mon Jun 24 14:27:24 2013 -0700
    80.2 +++ b/src/share/vm/runtime/arguments.cpp	Tue Jun 25 12:46:21 2013 -0700
    80.3 @@ -1566,6 +1566,15 @@
    80.4    return result;
    80.5  }
    80.6  
    80.7 +void Arguments::set_heap_base_min_address() {
    80.8 +  if (FLAG_IS_DEFAULT(HeapBaseMinAddress) && UseG1GC && HeapBaseMinAddress < 1*G) {
    80.9 +    // By default HeapBaseMinAddress is 2G on all platforms except Solaris x86.
   80.10 +    // G1 currently needs a lot of C-heap, so on Solaris we have to give G1
   80.11 +    // some extra space for the C-heap compared to other collectors.
   80.12 +    FLAG_SET_ERGO(uintx, HeapBaseMinAddress, 1*G);
   80.13 +  }
   80.14 +}
   80.15 +
   80.16  void Arguments::set_heap_size() {
   80.17    if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) {
   80.18      // Deprecated flag
   80.19 @@ -1885,21 +1894,6 @@
   80.20    // Note: Needs platform-dependent factoring.
   80.21    bool status = true;
   80.22  
   80.23 -#if ( (defined(COMPILER2) && defined(SPARC)))
   80.24 -  // NOTE: The call to VM_Version_init depends on the fact that VM_Version_init
   80.25 -  // on sparc doesn't require generation of a stub as is the case on, e.g.,
   80.26 -  // x86.  Normally, VM_Version_init must be called from init_globals in
   80.27 -  // init.cpp, which is called by the initial java thread *after* arguments
   80.28 -  // have been parsed.  VM_Version_init gets called twice on sparc.
   80.29 -  extern void VM_Version_init();
   80.30 -  VM_Version_init();
   80.31 -  if (!VM_Version::has_v9()) {
   80.32 -    jio_fprintf(defaultStream::error_stream(),
   80.33 -                "V8 Machine detected, Server requires V9\n");
   80.34 -    status = false;
   80.35 -  }
   80.36 -#endif /* COMPILER2 && SPARC */
   80.37 -
   80.38    // Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product
   80.39    // builds so the cost of stack banging can be measured.
   80.40  #if (defined(PRODUCT) && defined(SOLARIS))
   80.41 @@ -3525,6 +3519,8 @@
   80.42      }
   80.43    }
   80.44  
   80.45 +  set_heap_base_min_address();
   80.46 +
   80.47    // Set heap size based on available physical memory
   80.48    set_heap_size();
   80.49  
    81.1 --- a/src/share/vm/runtime/arguments.hpp	Mon Jun 24 14:27:24 2013 -0700
    81.2 +++ b/src/share/vm/runtime/arguments.hpp	Tue Jun 25 12:46:21 2013 -0700
    81.3 @@ -315,6 +315,8 @@
    81.4    // limits the given memory size by the maximum amount of memory this process is
    81.5    // currently allowed to allocate or reserve.
    81.6    static julong limit_by_allocatable_memory(julong size);
    81.7 +  // Setup HeapBaseMinAddress
    81.8 +  static void set_heap_base_min_address();
    81.9    // Setup heap size
   81.10    static void set_heap_size();
   81.11    // Based on automatic selection criteria, should the
    82.1 --- a/src/share/vm/runtime/os.cpp	Mon Jun 24 14:27:24 2013 -0700
    82.2 +++ b/src/share/vm/runtime/os.cpp	Tue Jun 25 12:46:21 2013 -0700
    82.3 @@ -647,10 +647,13 @@
    82.4  #ifndef ASSERT
    82.5    NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1));
    82.6    NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size));
    82.7 +  MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
    82.8    void* ptr = ::realloc(memblock, size);
    82.9    if (ptr != NULL) {
   82.10 -    MemTracker::record_realloc((address)memblock, (address)ptr, size, memflags,
   82.11 +    tkr.record((address)memblock, (address)ptr, size, memflags,
   82.12       caller == 0 ? CALLER_PC : caller);
   82.13 +  } else {
   82.14 +    tkr.discard();
   82.15    }
   82.16    return ptr;
   82.17  #else
   82.18 @@ -1456,7 +1459,7 @@
   82.19  char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
   82.20    char* result = pd_reserve_memory(bytes, addr, alignment_hint);
   82.21    if (result != NULL) {
   82.22 -    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
   82.23 +    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
   82.24    }
   82.25  
   82.26    return result;
   82.27 @@ -1466,7 +1469,7 @@
   82.28     MEMFLAGS flags) {
   82.29    char* result = pd_reserve_memory(bytes, addr, alignment_hint);
   82.30    if (result != NULL) {
   82.31 -    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
   82.32 +    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
   82.33      MemTracker::record_virtual_memory_type((address)result, flags);
   82.34    }
   82.35  
   82.36 @@ -1476,7 +1479,7 @@
   82.37  char* os::attempt_reserve_memory_at(size_t bytes, char* addr) {
   82.38    char* result = pd_attempt_reserve_memory_at(bytes, addr);
   82.39    if (result != NULL) {
   82.40 -    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
   82.41 +    MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC);
   82.42    }
   82.43    return result;
   82.44  }
   82.45 @@ -1503,18 +1506,36 @@
   82.46    return res;
   82.47  }
   82.48  
   82.49 +void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable,
   82.50 +                               const char* mesg) {
   82.51 +  pd_commit_memory_or_exit(addr, bytes, executable, mesg);
   82.52 +  MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC);
   82.53 +}
   82.54 +
   82.55 +void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
   82.56 +                               bool executable, const char* mesg) {
   82.57 +  os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
   82.58 +  MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
   82.59 +}
   82.60 +
   82.61  bool os::uncommit_memory(char* addr, size_t bytes) {
   82.62 +  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
   82.63    bool res = pd_uncommit_memory(addr, bytes);
   82.64    if (res) {
   82.65 -    MemTracker::record_virtual_memory_uncommit((address)addr, bytes);
   82.66 +    tkr.record((address)addr, bytes);
   82.67 +  } else {
   82.68 +    tkr.discard();
   82.69    }
   82.70    return res;
   82.71  }
   82.72  
   82.73  bool os::release_memory(char* addr, size_t bytes) {
   82.74 +  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   82.75    bool res = pd_release_memory(addr, bytes);
   82.76    if (res) {
   82.77 -    MemTracker::record_virtual_memory_release((address)addr, bytes);
   82.78 +    tkr.record((address)addr, bytes);
   82.79 +  } else {
   82.80 +    tkr.discard();
   82.81    }
   82.82    return res;
   82.83  }
   82.84 @@ -1525,8 +1546,7 @@
   82.85                             bool allow_exec) {
   82.86    char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
   82.87    if (result != NULL) {
   82.88 -    MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
   82.89 -    MemTracker::record_virtual_memory_commit((address)result, bytes, CALLER_PC);
   82.90 +    MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC);
   82.91    }
   82.92    return result;
   82.93  }
   82.94 @@ -1539,10 +1559,12 @@
   82.95  }
   82.96  
   82.97  bool os::unmap_memory(char *addr, size_t bytes) {
   82.98 +  MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
   82.99    bool result = pd_unmap_memory(addr, bytes);
  82.100    if (result) {
  82.101 -    MemTracker::record_virtual_memory_uncommit((address)addr, bytes);
  82.102 -    MemTracker::record_virtual_memory_release((address)addr, bytes);
  82.103 +    tkr.record((address)addr, bytes);
  82.104 +  } else {
  82.105 +    tkr.discard();
  82.106    }
  82.107    return result;
  82.108  }
    83.1 --- a/src/share/vm/runtime/os.hpp	Mon Jun 24 14:27:24 2013 -0700
    83.2 +++ b/src/share/vm/runtime/os.hpp	Tue Jun 25 12:46:21 2013 -0700
    83.3 @@ -78,6 +78,10 @@
    83.4    CriticalPriority = 11      // Critical thread priority
    83.5  };
    83.6  
    83.7 +// Executable parameter flag for os::commit_memory() and
    83.8 +// os::commit_memory_or_exit().
    83.9 +const bool ExecMem = true;
   83.10 +
   83.11  // Typedef for structured exception handling support
   83.12  typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread);
   83.13  
   83.14 @@ -104,9 +108,16 @@
   83.15    static char*  pd_attempt_reserve_memory_at(size_t bytes, char* addr);
   83.16    static void   pd_split_reserved_memory(char *base, size_t size,
   83.17                                        size_t split, bool realloc);
   83.18 -  static bool   pd_commit_memory(char* addr, size_t bytes, bool executable = false);
   83.19 +  static bool   pd_commit_memory(char* addr, size_t bytes, bool executable);
   83.20    static bool   pd_commit_memory(char* addr, size_t size, size_t alignment_hint,
   83.21 -                              bool executable = false);
   83.22 +                                 bool executable);
   83.23 +  // Same as pd_commit_memory() that either succeeds or calls
   83.24 +  // vm_exit_out_of_memory() with the specified mesg.
   83.25 +  static void   pd_commit_memory_or_exit(char* addr, size_t bytes,
   83.26 +                                         bool executable, const char* mesg);
   83.27 +  static void   pd_commit_memory_or_exit(char* addr, size_t size,
   83.28 +                                         size_t alignment_hint,
   83.29 +                                         bool executable, const char* mesg);
   83.30    static bool   pd_uncommit_memory(char* addr, size_t bytes);
   83.31    static bool   pd_release_memory(char* addr, size_t bytes);
   83.32  
   83.33 @@ -261,9 +272,16 @@
   83.34    static char*  attempt_reserve_memory_at(size_t bytes, char* addr);
   83.35    static void   split_reserved_memory(char *base, size_t size,
   83.36                                        size_t split, bool realloc);
   83.37 -  static bool   commit_memory(char* addr, size_t bytes, bool executable = false);
   83.38 +  static bool   commit_memory(char* addr, size_t bytes, bool executable);
   83.39    static bool   commit_memory(char* addr, size_t size, size_t alignment_hint,
   83.40 -                              bool executable = false);
   83.41 +                              bool executable);
   83.42 +  // Same as commit_memory() that either succeeds or calls
   83.43 +  // vm_exit_out_of_memory() with the specified mesg.
   83.44 +  static void   commit_memory_or_exit(char* addr, size_t bytes,
   83.45 +                                      bool executable, const char* mesg);
   83.46 +  static void   commit_memory_or_exit(char* addr, size_t size,
   83.47 +                                      size_t alignment_hint,
   83.48 +                                      bool executable, const char* mesg);
   83.49    static bool   uncommit_memory(char* addr, size_t bytes);
   83.50    static bool   release_memory(char* addr, size_t bytes);
   83.51  
    84.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Mon Jun 24 14:27:24 2013 -0700
    84.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Tue Jun 25 12:46:21 2013 -0700
    84.3 @@ -2731,7 +2731,7 @@
    84.4    // ResourceObject, so do not put any ResourceMarks in here.
    84.5    char *s = sig->as_C_string();
    84.6    int len = (int)strlen(s);
    84.7 -  *s++; len--;                  // Skip opening paren
    84.8 +  s++; len--;                   // Skip opening paren
    84.9    char *t = s+len;
   84.10    while( *(--t) != ')' ) ;      // Find close paren
   84.11  
    85.1 --- a/src/share/vm/runtime/virtualspace.cpp	Mon Jun 24 14:27:24 2013 -0700
    85.2 +++ b/src/share/vm/runtime/virtualspace.cpp	Tue Jun 25 12:46:21 2013 -0700
    85.3 @@ -533,11 +533,13 @@
    85.4             lower_high() + lower_needs <= lower_high_boundary(),
    85.5             "must not expand beyond region");
    85.6      if (!os::commit_memory(lower_high(), lower_needs, _executable)) {
    85.7 -      debug_only(warning("os::commit_memory failed"));
    85.8 +      debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
    85.9 +                         ", lower_needs=" SIZE_FORMAT ", %d) failed",
   85.10 +                         lower_high(), lower_needs, _executable);)
   85.11        return false;
   85.12      } else {
   85.13        _lower_high += lower_needs;
   85.14 -     }
   85.15 +    }
   85.16    }
   85.17    if (middle_needs > 0) {
   85.18      assert(lower_high_boundary() <= middle_high() &&
   85.19 @@ -545,7 +547,10 @@
   85.20             "must not expand beyond region");
   85.21      if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(),
   85.22                             _executable)) {
   85.23 -      debug_only(warning("os::commit_memory failed"));
   85.24 +      debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   85.25 +                         ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT
   85.26 +                         ", %d) failed", middle_high(), middle_needs,
   85.27 +                         middle_alignment(), _executable);)
   85.28        return false;
   85.29      }
   85.30      _middle_high += middle_needs;
   85.31 @@ -555,7 +560,9 @@
   85.32             upper_high() + upper_needs <= upper_high_boundary(),
   85.33             "must not expand beyond region");
   85.34      if (!os::commit_memory(upper_high(), upper_needs, _executable)) {
   85.35 -      debug_only(warning("os::commit_memory failed"));
   85.36 +      debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT
   85.37 +                         ", upper_needs=" SIZE_FORMAT ", %d) failed",
   85.38 +                         upper_high(), upper_needs, _executable);)
   85.39        return false;
   85.40      } else {
   85.41        _upper_high += upper_needs;
    86.1 --- a/src/share/vm/services/diagnosticArgument.cpp	Mon Jun 24 14:27:24 2013 -0700
    86.2 +++ b/src/share/vm/services/diagnosticArgument.cpp	Tue Jun 25 12:46:21 2013 -0700
    86.3 @@ -247,7 +247,7 @@
    86.4    } else {
    86.5      _value._time = 0;
    86.6      _value._nanotime = 0;
    86.7 -    strcmp(_value._unit, "ns");
    86.8 +    strcpy(_value._unit, "ns");
    86.9    }
   86.10  }
   86.11  
    87.1 --- a/src/share/vm/services/memBaseline.cpp	Mon Jun 24 14:27:24 2013 -0700
    87.2 +++ b/src/share/vm/services/memBaseline.cpp	Tue Jun 25 12:46:21 2013 -0700
    87.3 @@ -130,7 +130,7 @@
    87.4        if (malloc_ptr->is_arena_record()) {
    87.5          // see if arena memory record present
    87.6          MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
    87.7 -        if (next_malloc_ptr->is_arena_memory_record()) {
    87.8 +        if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) {
    87.9            assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
   87.10               "Arena records do not match");
   87.11            size = next_malloc_ptr->size();
    88.1 --- a/src/share/vm/services/memPtr.hpp	Mon Jun 24 14:27:24 2013 -0700
    88.2 +++ b/src/share/vm/services/memPtr.hpp	Tue Jun 25 12:46:21 2013 -0700
    88.3 @@ -1,5 +1,5 @@
    88.4  /*
    88.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    88.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    88.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    88.8   *
    88.9   * This code is free software; you can redistribute it and/or modify it
   88.10 @@ -457,9 +457,8 @@
   88.11   public:
   88.12    SeqMemPointerRecord(): _seq(0){ }
   88.13  
   88.14 -  SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size)
   88.15 -    : MemPointerRecord(addr, flags, size) {
   88.16 -    _seq = SequenceGenerator::next();
   88.17 +  SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq)
   88.18 +    : MemPointerRecord(addr, flags, size), _seq(seq)  {
   88.19    }
   88.20  
   88.21    SeqMemPointerRecord(const SeqMemPointerRecord& copy_from)
   88.22 @@ -488,8 +487,8 @@
   88.23    SeqMemPointerRecordEx(): _seq(0) { }
   88.24  
   88.25    SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size,
   88.26 -    address pc): MemPointerRecordEx(addr, flags, size, pc) {
   88.27 -    _seq = SequenceGenerator::next();
   88.28 +    jint seq, address pc):
   88.29 +    MemPointerRecordEx(addr, flags, size, pc), _seq(seq)  {
   88.30    }
   88.31  
   88.32    SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
    89.1 --- a/src/share/vm/services/memRecorder.cpp	Mon Jun 24 14:27:24 2013 -0700
    89.2 +++ b/src/share/vm/services/memRecorder.cpp	Tue Jun 25 12:46:21 2013 -0700
    89.3 @@ -1,5 +1,5 @@
    89.4  /*
    89.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    89.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    89.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    89.8   *
    89.9   * This code is free software; you can redistribute it and/or modify it
   89.10 @@ -69,10 +69,11 @@
   89.11  
   89.12    if (_pointer_records != NULL) {
   89.13      // recode itself
   89.14 +    address pc = CURRENT_PC;
   89.15      record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
   89.16 -        sizeof(MemRecorder), CALLER_PC);
   89.17 +        sizeof(MemRecorder), SequenceGenerator::next(), pc);
   89.18      record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder),
   89.19 -        _pointer_records->instance_size(),CURRENT_PC);
   89.20 +        _pointer_records->instance_size(), SequenceGenerator::next(), pc);
   89.21    }
   89.22  }
   89.23  
   89.24 @@ -116,7 +117,8 @@
   89.25    }
   89.26  }
   89.27  
   89.28 -bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) {
   89.29 +bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) {
   89.30 +  assert(seq > 0, "No sequence number");
   89.31  #ifdef ASSERT
   89.32    if (MemPointerRecord::is_virtual_memory_record(flags)) {
   89.33      assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record");
   89.34 @@ -133,11 +135,11 @@
   89.35  #endif
   89.36  
   89.37    if (MemTracker::track_callsite()) {
   89.38 -    SeqMemPointerRecordEx ap(p, flags, size, pc);
   89.39 +    SeqMemPointerRecordEx ap(p, flags, size, seq, pc);
   89.40      debug_only(check_dup_seq(ap.seq());)
   89.41      return _pointer_records->append(&ap);
   89.42    } else {
   89.43 -    SeqMemPointerRecord ap(p, flags, size);
   89.44 +    SeqMemPointerRecord ap(p, flags, size, seq);
   89.45      debug_only(check_dup_seq(ap.seq());)
   89.46      return _pointer_records->append(&ap);
   89.47    }
    90.1 --- a/src/share/vm/services/memRecorder.hpp	Mon Jun 24 14:27:24 2013 -0700
    90.2 +++ b/src/share/vm/services/memRecorder.hpp	Tue Jun 25 12:46:21 2013 -0700
    90.3 @@ -1,5 +1,5 @@
    90.4  /*
    90.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    90.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    90.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    90.8   *
    90.9   * This code is free software; you can redistribute it and/or modify it
   90.10 @@ -220,7 +220,7 @@
   90.11    ~MemRecorder();
   90.12  
   90.13    // record a memory operation
   90.14 -  bool record(address addr, MEMFLAGS flags, size_t size, address caller_pc = 0);
   90.15 +  bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0);
   90.16  
   90.17    // linked list support
   90.18    inline void set_next(MemRecorder* rec) {
    91.1 --- a/src/share/vm/services/memReporter.cpp	Mon Jun 24 14:27:24 2013 -0700
    91.2 +++ b/src/share/vm/services/memReporter.cpp	Tue Jun 25 12:46:21 2013 -0700
    91.3 @@ -190,17 +190,18 @@
    91.4    while (cur_malloc_callsite != NULL || prev_malloc_callsite != NULL) {
    91.5      if (prev_malloc_callsite == NULL ||
    91.6          cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) {
    91.7 +      // this is a new callsite
    91.8        _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
    91.9          amount_in_current_scale(cur_malloc_callsite->amount()),
   91.10          cur_malloc_callsite->count(),
   91.11          diff_in_current_scale(cur_malloc_callsite->amount(), 0),
   91.12          diff(cur_malloc_callsite->count(), 0));
   91.13        cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next();
   91.14 -    } else if (prev_malloc_callsite == NULL ||
   91.15 +    } else if (cur_malloc_callsite == NULL ||
   91.16                 cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) {
   91.17 -      _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(),
   91.18 -        amount_in_current_scale(prev_malloc_callsite->amount()),
   91.19 -        prev_malloc_callsite->count(),
   91.20 +      // this callsite is already gone
   91.21 +      _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(),
   91.22 +        amount_in_current_scale(0), 0,
   91.23          diff_in_current_scale(0, prev_malloc_callsite->amount()),
   91.24          diff(0, prev_malloc_callsite->count()));
   91.25        prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next();
   91.26 @@ -222,6 +223,7 @@
   91.27    VMCallsitePointer*          prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.current();
   91.28    while (cur_vm_callsite != NULL || prev_vm_callsite != NULL) {
   91.29      if (prev_vm_callsite == NULL || cur_vm_callsite->addr() < prev_vm_callsite->addr()) {
   91.30 +      // this is a new callsite
   91.31        _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(),
   91.32          amount_in_current_scale(cur_vm_callsite->reserved_amount()),
   91.33          amount_in_current_scale(cur_vm_callsite->committed_amount()),
   91.34 @@ -229,9 +231,10 @@
   91.35          diff_in_current_scale(cur_vm_callsite->committed_amount(), 0));
   91.36        cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next();
   91.37      } else if (cur_vm_callsite == NULL || cur_vm_callsite->addr() > prev_vm_callsite->addr()) {
   91.38 +      // this callsite is already gone
   91.39        _outputer.diff_virtual_memory_callsite(prev_vm_callsite->addr(),
   91.40 -        amount_in_current_scale(prev_vm_callsite->reserved_amount()),
   91.41 -        amount_in_current_scale(prev_vm_callsite->committed_amount()),
   91.42 +        amount_in_current_scale(0),
   91.43 +        amount_in_current_scale(0),
   91.44          diff_in_current_scale(0, prev_vm_callsite->reserved_amount()),
   91.45          diff_in_current_scale(0, prev_vm_callsite->committed_amount()));
   91.46        prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
    92.1 --- a/src/share/vm/services/memTracker.cpp	Mon Jun 24 14:27:24 2013 -0700
    92.2 +++ b/src/share/vm/services/memTracker.cpp	Tue Jun 25 12:46:21 2013 -0700
    92.3 @@ -69,6 +69,7 @@
    92.4  volatile jint                   MemTracker::_pooled_recorder_count = 0;
    92.5  volatile unsigned long          MemTracker::_processing_generation = 0;
    92.6  volatile bool                   MemTracker::_worker_thread_idle = false;
    92.7 +volatile jint                   MemTracker::_pending_op_count = 0;
    92.8  volatile bool                   MemTracker::_slowdown_calling_thread = false;
    92.9  debug_only(intx                 MemTracker::_main_thread_tid = 0;)
   92.10  NOT_PRODUCT(volatile jint       MemTracker::_pending_recorder_count = 0;)
   92.11 @@ -337,92 +338,14 @@
   92.12    Atomic::inc(&_pooled_recorder_count);
   92.13  }
   92.14  
   92.15 -/*
   92.16 - * This is the most important method in whole nmt implementation.
   92.17 - *
   92.18 - * Create a memory record.
   92.19 - * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
   92.20 - *    still in single thread mode.
   92.21 - * 2. For all threads other than JavaThread, ThreadCritical is needed
   92.22 - *    to write to recorders to global recorder.
   92.23 - * 3. For JavaThreads that are not longer visible by safepoint, also
   92.24 - *    need to take ThreadCritical and records are written to global
   92.25 - *    recorders, since these threads are NOT walked by Threads.do_thread().
   92.26 - * 4. JavaThreads that are running in native state, have to transition
   92.27 - *    to VM state before writing to per-thread recorders.
   92.28 - * 5. JavaThreads that are running in VM state do not need any lock and
   92.29 - *    records are written to per-thread recorders.
   92.30 - * 6. For a thread has yet to attach VM 'Thread', they need to take
   92.31 - *    ThreadCritical to write to global recorder.
   92.32 - *
   92.33 - *    Important note:
   92.34 - *    NO LOCK should be taken inside ThreadCritical lock !!!
   92.35 - */
   92.36 -void MemTracker::create_memory_record(address addr, MEMFLAGS flags,
   92.37 -    size_t size, address pc, Thread* thread) {
   92.38 -  assert(addr != NULL, "Sanity check");
   92.39 -  if (!shutdown_in_progress()) {
   92.40 -    // single thread, we just write records direct to global recorder,'
   92.41 -    // with any lock
   92.42 -    if (_state == NMT_bootstrapping_single_thread) {
   92.43 -      assert(_main_thread_tid == os::current_thread_id(), "wrong thread");
   92.44 -      thread = NULL;
   92.45 -    } else {
   92.46 -      if (thread == NULL) {
   92.47 -          // don't use Thread::current(), since it is possible that
   92.48 -          // the calling thread has yet to attach to VM 'Thread',
   92.49 -          // which will result assertion failure
   92.50 -          thread = ThreadLocalStorage::thread();
   92.51 -      }
   92.52 -    }
   92.53 -
   92.54 -    if (thread != NULL) {
   92.55 -      // slow down all calling threads except NMT worker thread, so it
   92.56 -      // can catch up.
   92.57 -      if (_slowdown_calling_thread && thread != _worker_thread) {
   92.58 -        os::yield_all();
   92.59 -      }
   92.60 -
   92.61 -      if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) {
   92.62 -        JavaThread*      java_thread = (JavaThread*)thread;
   92.63 -        JavaThreadState  state = java_thread->thread_state();
   92.64 -        if (SafepointSynchronize::safepoint_safe(java_thread, state)) {
   92.65 -          // JavaThreads that are safepoint safe, can run through safepoint,
   92.66 -          // so ThreadCritical is needed to ensure no threads at safepoint create
   92.67 -          // new records while the records are being gathered and the sequence number is changing
   92.68 -          ThreadCritical tc;
   92.69 -          create_record_in_recorder(addr, flags, size, pc, java_thread);
   92.70 -        } else {
   92.71 -          create_record_in_recorder(addr, flags, size, pc, java_thread);
   92.72 -        }
   92.73 -      } else {
   92.74 -        // other threads, such as worker and watcher threads, etc. need to
   92.75 -        // take ThreadCritical to write to global recorder
   92.76 -        ThreadCritical tc;
   92.77 -        create_record_in_recorder(addr, flags, size, pc, NULL);
   92.78 -      }
   92.79 -    } else {
   92.80 -      if (_state == NMT_bootstrapping_single_thread) {
   92.81 -        // single thread, no lock needed
   92.82 -        create_record_in_recorder(addr, flags, size, pc, NULL);
   92.83 -      } else {
   92.84 -        // for thread has yet to attach VM 'Thread', we can not use VM mutex.
   92.85 -        // use native thread critical instead
   92.86 -        ThreadCritical tc;
   92.87 -        create_record_in_recorder(addr, flags, size, pc, NULL);
   92.88 -      }
   92.89 -    }
   92.90 -  }
   92.91 -}
   92.92 -
   92.93  // write a record to proper recorder. No lock can be taken from this method
   92.94  // down.
   92.95 -void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags,
   92.96 -    size_t size, address pc, JavaThread* thread) {
   92.97 +void MemTracker::write_tracking_record(address addr, MEMFLAGS flags,
   92.98 +    size_t size, jint seq, address pc, JavaThread* thread) {
   92.99  
  92.100      MemRecorder* rc = get_thread_recorder(thread);
  92.101      if (rc != NULL) {
  92.102 -      rc->record(addr, flags, size, pc);
  92.103 +      rc->record(addr, flags, size, seq, pc);
  92.104      }
  92.105  }
  92.106  
  92.107 @@ -487,39 +410,43 @@
  92.108          return;
  92.109        }
  92.110      }
  92.111 -    _sync_point_skip_count = 0;
  92.112      {
  92.113        // This method is running at safepoint, with ThreadCritical lock,
  92.114        // it should guarantee that NMT is fully sync-ed.
  92.115        ThreadCritical tc;
  92.116  
  92.117 -      SequenceGenerator::reset();
  92.118 +      // We can NOT execute NMT sync-point if there are pending tracking ops.
  92.119 +      if (_pending_op_count == 0) {
  92.120 +        SequenceGenerator::reset();
  92.121 +        _sync_point_skip_count = 0;
  92.122  
  92.123 -      // walk all JavaThreads to collect recorders
  92.124 -      SyncThreadRecorderClosure stc;
  92.125 -      Threads::threads_do(&stc);
  92.126 +        // walk all JavaThreads to collect recorders
  92.127 +        SyncThreadRecorderClosure stc;
  92.128 +        Threads::threads_do(&stc);
  92.129  
  92.130 -      _thread_count = stc.get_thread_count();
  92.131 -      MemRecorder* pending_recorders = get_pending_recorders();
  92.132 +        _thread_count = stc.get_thread_count();
  92.133 +        MemRecorder* pending_recorders = get_pending_recorders();
  92.134  
  92.135 -      if (_global_recorder != NULL) {
  92.136 -        _global_recorder->set_next(pending_recorders);
  92.137 -        pending_recorders = _global_recorder;
  92.138 -        _global_recorder = NULL;
  92.139 +        if (_global_recorder != NULL) {
  92.140 +          _global_recorder->set_next(pending_recorders);
  92.141 +          pending_recorders = _global_recorder;
  92.142 +          _global_recorder = NULL;
  92.143 +        }
  92.144 +
  92.145 +        // see if NMT has too many outstanding recorder instances, it usually
  92.146 +        // means that worker thread is lagging behind in processing them.
  92.147 +        if (!AutoShutdownNMT) {
  92.148 +          _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
  92.149 +        }
  92.150 +
  92.151 +        // check _worker_thread with lock to avoid racing condition
  92.152 +        if (_worker_thread != NULL) {
  92.153 +          _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
  92.154 +        }
  92.155 +        assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
  92.156 +      } else {
  92.157 +        _sync_point_skip_count ++;
  92.158        }
  92.159 -
  92.160 -      // see if NMT has too many outstanding recorder instances, it usually
  92.161 -      // means that worker thread is lagging behind in processing them.
  92.162 -      if (!AutoShutdownNMT) {
  92.163 -        _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count);
  92.164 -      }
  92.165 -
  92.166 -      // check _worker_thread with lock to avoid racing condition
  92.167 -      if (_worker_thread != NULL) {
  92.168 -        _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes());
  92.169 -      }
  92.170 -
  92.171 -      assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point");
  92.172      }
  92.173    }
  92.174  
  92.175 @@ -708,3 +635,243 @@
  92.176  }
  92.177  #endif
  92.178  
  92.179 +
  92.180 +// Tracker Implementation
  92.181 +
  92.182 +/*
  92.183 + * Create a tracker.
  92.184 + * This is a fairly complicated constructor, as it has to make two important decisions:
  92.185 + *   1) Does it need to take ThreadCritical lock to write tracking record
  92.186 + *   2) Does it need to pre-reserve a sequence number for the tracking record
  92.187 + *
  92.188 + * The rules to determine if ThreadCritical is needed:
  92.189 + *   1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM
  92.190 + *      still in single thread mode.
  92.191 + *   2. For all threads other than JavaThread, ThreadCritical is needed
  92.192 + *      to write to recorders to global recorder.
  92.193 + *   3. For JavaThreads that are no longer visible by safepoint, also
  92.194 + *      need to take ThreadCritical and records are written to global
  92.195 + *      recorders, since these threads are NOT walked by Threads.do_thread().
  92.196 + *   4. JavaThreads that are running in safepoint-safe states do not stop
  92.197 + *      for safepoints, ThreadCritical lock should be taken to write
  92.198 + *      memory records.
  92.199 + *   5. JavaThreads that are running in VM state do not need any lock and
  92.200 + *      records are written to per-thread recorders.
  92.201 + *   6. For a thread has yet to attach VM 'Thread', they need to take
  92.202 + *      ThreadCritical to write to global recorder.
  92.203 + *
  92.204 + *  The memory operations that need pre-reserve sequence numbers:
  92.205 + *    The memory operations that "release" memory blocks and the
  92.206 + *    operations can fail, need to pre-reserve sequence number. They
  92.207 + *    are realloc, uncommit and release.
  92.208 + *
  92.209 + *  The reason for pre-reserve sequence number, is to prevent race condition:
  92.210 + *    Thread 1                      Thread 2
  92.211 + *    <release>
  92.212 + *                                  <allocate>
  92.213 + *                                  <write allocate record>
  92.214 + *   <write release record>
  92.215 + *   if Thread 2 happens to obtain the memory address Thread 1 just released,
  92.216 + *   then NMT can mistakenly report the memory is free.
  92.217 + *
  92.218 + *  Noticeably, free() does not need pre-reserve sequence number, because the call
  92.219 + *  does not fail, so we can alway write "release" record before the memory is actaully
  92.220 + *  freed.
  92.221 + *
  92.222 + *  For realloc, uncommit and release, following coding pattern should be used:
  92.223 + *
  92.224 + *     MemTracker::Tracker tkr = MemTracker::get_realloc_tracker();
  92.225 + *     ptr = ::realloc(...);
  92.226 + *     if (ptr == NULL) {
  92.227 + *       tkr.record(...)
  92.228 + *     } else {
  92.229 + *       tkr.discard();
  92.230 + *     }
  92.231 + *
  92.232 + *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker();
  92.233 + *     if (uncommit(...)) {
  92.234 + *       tkr.record(...);
  92.235 + *     } else {
  92.236 + *       tkr.discard();
  92.237 + *     }
  92.238 + *
  92.239 + *     MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker();
  92.240 + *     if (release(...)) {
  92.241 + *       tkr.record(...);
  92.242 + *     } else {
  92.243 + *       tkr.discard();
  92.244 + *     }
  92.245 + *
  92.246 + * Since pre-reserved sequence number is only good for the generation that it is acquired,
  92.247 + * when there is pending Tracker that reserved sequence number, NMT sync-point has
  92.248 + * to be skipped to prevent from advancing generation. This is done by inc and dec
  92.249 + * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped.
  92.250 + * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads
  92.251 + * that honor safepoints, safepoint can not occur during the memory operations, so the
  92.252 + * pre-reserved sequence number won't cross the generation boundry.
  92.253 + */
  92.254 +MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) {
  92.255 +  _op = NoOp;
  92.256 +  _seq = 0;
  92.257 +  if (MemTracker::is_on()) {
  92.258 +    _java_thread = NULL;
  92.259 +    _op = op;
  92.260 +
  92.261 +    // figure out if ThreadCritical lock is needed to write this operation
  92.262 +    // to MemTracker
  92.263 +    if (MemTracker::is_single_threaded_bootstrap()) {
  92.264 +      thr = NULL;
  92.265 +    } else if (thr == NULL) {
  92.266 +      // don't use Thread::current(), since it is possible that
  92.267 +      // the calling thread has yet to attach to VM 'Thread',
  92.268 +      // which will result assertion failure
  92.269 +      thr = ThreadLocalStorage::thread();
  92.270 +    }
  92.271 +
  92.272 +    if (thr != NULL) {
  92.273 +      // Check NMT load
  92.274 +      MemTracker::check_NMT_load(thr);
  92.275 +
  92.276 +      if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) {
  92.277 +        _java_thread = (JavaThread*)thr;
  92.278 +        JavaThreadState  state = _java_thread->thread_state();
  92.279 +        // JavaThreads that are safepoint safe, can run through safepoint,
  92.280 +        // so ThreadCritical is needed to ensure no threads at safepoint create
  92.281 +        // new records while the records are being gathered and the sequence number is changing
  92.282 +        _need_thread_critical_lock =
  92.283 +          SafepointSynchronize::safepoint_safe(_java_thread, state);
  92.284 +      } else {
  92.285 +        _need_thread_critical_lock = true;
  92.286 +      }
  92.287 +    } else {
  92.288 +       _need_thread_critical_lock
  92.289 +         = !MemTracker::is_single_threaded_bootstrap();
  92.290 +    }
  92.291 +
  92.292 +    // see if we need to pre-reserve sequence number for this operation
  92.293 +    if (_op == Realloc || _op == Uncommit || _op == Release) {
  92.294 +      if (_need_thread_critical_lock) {
  92.295 +        ThreadCritical tc;
  92.296 +        MemTracker::inc_pending_op_count();
  92.297 +        _seq = SequenceGenerator::next();
  92.298 +      } else {
  92.299 +        // for the threads that honor safepoints, no safepoint can occur
  92.300 +        // during the lifespan of tracker, so we don't need to increase
  92.301 +        // pending op count.
  92.302 +        _seq = SequenceGenerator::next();
  92.303 +      }
  92.304 +    }
  92.305 +  }
  92.306 +}
  92.307 +
  92.308 +void MemTracker::Tracker::discard() {
  92.309 +  if (MemTracker::is_on() && _seq != 0) {
  92.310 +    if (_need_thread_critical_lock) {
  92.311 +      ThreadCritical tc;
  92.312 +      MemTracker::dec_pending_op_count();
  92.313 +    }
  92.314 +    _seq = 0;
  92.315 +  }
  92.316 +}
  92.317 +
  92.318 +
  92.319 +void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size,
  92.320 +  MEMFLAGS flags, address pc) {
  92.321 +  assert(old_addr != NULL && new_addr != NULL, "Sanity check");
  92.322 +  assert(_op == Realloc || _op == NoOp, "Wrong call");
  92.323 +  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
  92.324 +    assert(_seq > 0, "Need pre-reserve sequence number");
  92.325 +    if (_need_thread_critical_lock) {
  92.326 +      ThreadCritical tc;
  92.327 +      // free old address, use pre-reserved sequence number
  92.328 +      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
  92.329 +        0, _seq, pc, _java_thread);
  92.330 +      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
  92.331 +        size, SequenceGenerator::next(), pc, _java_thread);
  92.332 +      // decrement MemTracker pending_op_count
  92.333 +      MemTracker::dec_pending_op_count();
  92.334 +    } else {
  92.335 +      // free old address, use pre-reserved sequence number
  92.336 +      MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(),
  92.337 +        0, _seq, pc, _java_thread);
  92.338 +      MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(),
  92.339 +        size, SequenceGenerator::next(), pc, _java_thread);
  92.340 +    }
  92.341 +    _seq = 0;
  92.342 +  }
  92.343 +}
  92.344 +
  92.345 +void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) {
  92.346 +  // OOM already?
  92.347 +  if (addr == NULL) return;
  92.348 +
  92.349 +  if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) {
  92.350 +    bool pre_reserved_seq = (_seq != 0);
  92.351 +    address  pc = CALLER_CALLER_PC;
  92.352 +    MEMFLAGS orig_flags = flags;
  92.353 +
  92.354 +    // or the tagging flags
  92.355 +    switch(_op) {
  92.356 +      case Malloc:
  92.357 +        flags |= MemPointerRecord::malloc_tag();
  92.358 +        break;
  92.359 +      case Free:
  92.360 +        flags = MemPointerRecord::free_tag();
  92.361 +        break;
  92.362 +      case Realloc:
  92.363 +        fatal("Use the other Tracker::record()");
  92.364 +        break;
  92.365 +      case Reserve:
  92.366 +      case ReserveAndCommit:
  92.367 +        flags |= MemPointerRecord::virtual_memory_reserve_tag();
  92.368 +        break;
  92.369 +      case Commit:
  92.370 +        flags = MemPointerRecord::virtual_memory_commit_tag();
  92.371 +        break;
  92.372 +      case Type:
  92.373 +        flags |= MemPointerRecord::virtual_memory_type_tag();
  92.374 +        break;
  92.375 +      case Uncommit:
  92.376 +        assert(pre_reserved_seq, "Need pre-reserve sequence number");
  92.377 +        flags = MemPointerRecord::virtual_memory_uncommit_tag();
  92.378 +        break;
  92.379 +      case Release:
  92.380 +        assert(pre_reserved_seq, "Need pre-reserve sequence number");
  92.381 +        flags = MemPointerRecord::virtual_memory_release_tag();
  92.382 +        break;
  92.383 +      case ArenaSize:
  92.384 +        // a bit of hack here, add a small postive offset to arena
  92.385 +        // address for its size record, so the size record is sorted
  92.386 +        // right after arena record.
  92.387 +        flags = MemPointerRecord::arena_size_tag();
  92.388 +        addr += sizeof(void*);
  92.389 +        break;
  92.390 +      case StackRelease:
  92.391 +        flags = MemPointerRecord::virtual_memory_release_tag();
  92.392 +        break;
  92.393 +      default:
  92.394 +        ShouldNotReachHere();
  92.395 +    }
  92.396 +
  92.397 +    // write memory tracking record
  92.398 +    if (_need_thread_critical_lock) {
  92.399 +      ThreadCritical tc;
  92.400 +      if (_seq == 0) _seq = SequenceGenerator::next();
  92.401 +      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
  92.402 +      if (_op == ReserveAndCommit) {
  92.403 +        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
  92.404 +          size, SequenceGenerator::next(), pc, _java_thread);
  92.405 +      }
  92.406 +      if (pre_reserved_seq) MemTracker::dec_pending_op_count();
  92.407 +    } else {
  92.408 +      if (_seq == 0) _seq = SequenceGenerator::next();
  92.409 +      MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread);
  92.410 +      if (_op == ReserveAndCommit) {
  92.411 +        MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(),
  92.412 +          size, SequenceGenerator::next(), pc, _java_thread);
  92.413 +      }
  92.414 +    }
  92.415 +    _seq = 0;
  92.416 +  }
  92.417 +}
  92.418 +
    93.1 --- a/src/share/vm/services/memTracker.hpp	Mon Jun 24 14:27:24 2013 -0700
    93.2 +++ b/src/share/vm/services/memTracker.hpp	Tue Jun 25 12:46:21 2013 -0700
    93.3 @@ -1,5 +1,5 @@
    93.4  /*
    93.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
    93.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
    93.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    93.8   *
    93.9   * This code is free software; you can redistribute it and/or modify it
   93.10 @@ -54,6 +54,18 @@
   93.11        NMT_sequence_overflow  // overflow the sequence number
   93.12     };
   93.13  
   93.14 +  class Tracker {
   93.15 +   public:
   93.16 +    void discard() { }
   93.17 +
   93.18 +    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { }
   93.19 +    void record(address old_addr, address new_addr, size_t size,
   93.20 +      MEMFLAGS flags, address pc = NULL) { }
   93.21 +  };
   93.22 +
   93.23 +  private:
   93.24 +   static Tracker  _tkr;
   93.25 +
   93.26  
   93.27    public:
   93.28     static inline void init_tracking_options(const char* option_line) { }
   93.29 @@ -68,19 +80,18 @@
   93.30     static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
   93.31          address pc = 0, Thread* thread = NULL) { }
   93.32     static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { }
   93.33 -   static inline void record_realloc(address old_addr, address new_addr, size_t size,
   93.34 -        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
   93.35     static inline void record_arena_size(address addr, size_t size) { }
   93.36     static inline void record_virtual_memory_reserve(address addr, size_t size,
   93.37 -        address pc = 0, Thread* thread = NULL) { }
   93.38 +        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
   93.39 +   static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
   93.40 +        MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { }
   93.41     static inline void record_virtual_memory_commit(address addr, size_t size,
   93.42          address pc = 0, Thread* thread = NULL) { }
   93.43 -   static inline void record_virtual_memory_uncommit(address addr, size_t size,
   93.44 -        Thread* thread = NULL) { }
   93.45 -   static inline void record_virtual_memory_release(address addr, size_t size,
   93.46 -        Thread* thread = NULL) { }
   93.47     static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
   93.48          Thread* thread = NULL) { }
   93.49 +   static inline Tracker get_realloc_tracker() { return _tkr; }
   93.50 +   static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; }
   93.51 +   static inline Tracker get_virtual_memory_release_tracker()  { return _tkr; }
   93.52     static inline bool baseline() { return false; }
   93.53     static inline bool has_baseline() { return false; }
   93.54  
   93.55 @@ -165,6 +176,45 @@
   93.56    };
   93.57  
   93.58   public:
   93.59 +  class Tracker : public StackObj {
   93.60 +    friend class MemTracker;
   93.61 +   public:
   93.62 +    enum MemoryOperation {
   93.63 +      NoOp,                   // no op
   93.64 +      Malloc,                 // malloc
   93.65 +      Realloc,                // realloc
   93.66 +      Free,                   // free
   93.67 +      Reserve,                // virtual memory reserve
   93.68 +      Commit,                 // virtual memory commit
   93.69 +      ReserveAndCommit,       // virtual memory reserve and commit
   93.70 +      StackAlloc = ReserveAndCommit, // allocate thread stack
   93.71 +      Type,                   // assign virtual memory type
   93.72 +      Uncommit,               // virtual memory uncommit
   93.73 +      Release,                // virtual memory release
   93.74 +      ArenaSize,              // set arena size
   93.75 +      StackRelease            // release thread stack
   93.76 +    };
   93.77 +
   93.78 +
   93.79 +   protected:
   93.80 +    Tracker(MemoryOperation op, Thread* thr = NULL);
   93.81 +
   93.82 +   public:
   93.83 +    void discard();
   93.84 +
   93.85 +    void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL);
   93.86 +    void record(address old_addr, address new_addr, size_t size,
   93.87 +      MEMFLAGS flags, address pc = NULL);
   93.88 +
   93.89 +   private:
   93.90 +    bool            _need_thread_critical_lock;
   93.91 +    JavaThread*     _java_thread;
   93.92 +    MemoryOperation _op;          // memory operation
   93.93 +    jint            _seq;         // reserved sequence number
   93.94 +  };
   93.95 +
   93.96 +
   93.97 + public:
   93.98    // native memory tracking level
   93.99    enum NMTLevel {
  93.100      NMT_off,              // native memory tracking is off
  93.101 @@ -276,109 +326,74 @@
  93.102    // record a 'malloc' call
  93.103    static inline void record_malloc(address addr, size_t size, MEMFLAGS flags,
  93.104                              address pc = 0, Thread* thread = NULL) {
  93.105 -    if (is_on() && NMT_CAN_TRACK(flags)) {
  93.106 -      assert(size > 0, "Sanity check");
  93.107 -      create_memory_record(addr, (flags|MemPointerRecord::malloc_tag()), size, pc, thread);
  93.108 -    }
  93.109 +    Tracker tkr(Tracker::Malloc, thread);
  93.110 +    tkr.record(addr, size, flags, pc);
  93.111    }
  93.112    // record a 'free' call
  93.113    static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) {
  93.114 -    if (is_on() && NMT_CAN_TRACK(flags)) {
  93.115 -      create_memory_record(addr, MemPointerRecord::free_tag(), 0, 0, thread);
  93.116 -    }
  93.117 -  }
  93.118 -  // record a 'realloc' call
  93.119 -  static inline void record_realloc(address old_addr, address new_addr, size_t size,
  93.120 -       MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
  93.121 -    if (is_on() && NMT_CAN_TRACK(flags)) {
  93.122 -      assert(size > 0, "Sanity check");
  93.123 -      record_free(old_addr, flags, thread);
  93.124 -      record_malloc(new_addr, size, flags, pc, thread);
  93.125 -    }
  93.126 +    Tracker tkr(Tracker::Free, thread);
  93.127 +    tkr.record(addr, 0, flags, DEBUG_CALLER_PC);
  93.128    }
  93.129  
  93.130 -  // record arena memory size
  93.131    static inline void record_arena_size(address addr, size_t size) {
  93.132 -    // we add a positive offset to arena address, so we can have arena memory record
  93.133 -    // sorted after arena record
  93.134 -    if (is_on() && !UseMallocOnly) {
  93.135 -      assert(addr != NULL, "Sanity check");
  93.136 -      create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size,
  93.137 -        DEBUG_CALLER_PC, NULL);
  93.138 -    }
  93.139 +    Tracker tkr(Tracker::ArenaSize);
  93.140 +    tkr.record(addr, size);
  93.141    }
  93.142  
  93.143    // record a virtual memory 'reserve' call
  93.144    static inline void record_virtual_memory_reserve(address addr, size_t size,
  93.145 -                            address pc = 0, Thread* thread = NULL) {
  93.146 -    if (is_on()) {
  93.147 -      assert(size > 0, "Sanity check");
  93.148 -      create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag(),
  93.149 -                           size, pc, thread);
  93.150 -    }
  93.151 +                     MEMFLAGS flags, address pc = 0, Thread* thread = NULL) {
  93.152 +    assert(size > 0, "Sanity check");
  93.153 +    Tracker tkr(Tracker::Reserve, thread);
  93.154 +    tkr.record(addr, size, flags, pc);
  93.155    }
  93.156  
  93.157    static inline void record_thread_stack(address addr, size_t size, Thread* thr,
  93.158                             address pc = 0) {
  93.159 -    if (is_on()) {
  93.160 -      assert(size > 0 && thr != NULL, "Sanity check");
  93.161 -      create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag() | mtThreadStack,
  93.162 -                          size, pc, thr);
  93.163 -      create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag() | mtThreadStack,
  93.164 -                          size, pc, thr);
  93.165 -    }
  93.166 +    Tracker tkr(Tracker::StackAlloc, thr);
  93.167 +    tkr.record(addr, size, mtThreadStack, pc);
  93.168    }
  93.169  
  93.170    static inline void release_thread_stack(address addr, size_t size, Thread* thr) {
  93.171 -    if (is_on()) {
  93.172 -      assert(size > 0 && thr != NULL, "Sanity check");
  93.173 -      assert(!thr->is_Java_thread(), "too early");
  93.174 -      create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag() | mtThreadStack,
  93.175 -                          size, DEBUG_CALLER_PC, thr);
  93.176 -      create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag() | mtThreadStack,
  93.177 -                          size, DEBUG_CALLER_PC, thr);
  93.178 -    }
  93.179 +    Tracker tkr(Tracker::StackRelease, thr);
  93.180 +    tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC);
  93.181    }
  93.182  
  93.183    // record a virtual memory 'commit' call
  93.184    static inline void record_virtual_memory_commit(address addr, size_t size,
  93.185                              address pc, Thread* thread = NULL) {
  93.186 -    if (is_on()) {
  93.187 -      assert(size > 0, "Sanity check");
  93.188 -      create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(),
  93.189 -                           size, pc, thread);
  93.190 -    }
  93.191 +    Tracker tkr(Tracker::Commit, thread);
  93.192 +    tkr.record(addr, size, mtNone, pc);
  93.193    }
  93.194  
  93.195 -  // record a virtual memory 'uncommit' call
  93.196 -  static inline void record_virtual_memory_uncommit(address addr, size_t size,
  93.197 -                            Thread* thread = NULL) {
  93.198 -    if (is_on()) {
  93.199 -      assert(size > 0, "Sanity check");
  93.200 -      create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(),
  93.201 -                           size, DEBUG_CALLER_PC, thread);
  93.202 -    }
  93.203 +  static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size,
  93.204 +    MEMFLAGS flags, address pc, Thread* thread = NULL) {
  93.205 +    Tracker tkr(Tracker::ReserveAndCommit, thread);
  93.206 +    tkr.record(addr, size, flags, pc);
  93.207    }
  93.208  
  93.209 -  // record a virtual memory 'release' call
  93.210 -  static inline void record_virtual_memory_release(address addr, size_t size,
  93.211 -                            Thread* thread = NULL) {
  93.212 -    if (is_on()) {
  93.213 -      assert(size > 0, "Sanity check");
  93.214 -      create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(),
  93.215 -                           size, DEBUG_CALLER_PC, thread);
  93.216 -    }
  93.217 -  }
  93.218  
  93.219    // record memory type on virtual memory base address
  93.220    static inline void record_virtual_memory_type(address base, MEMFLAGS flags,
  93.221                              Thread* thread = NULL) {
  93.222 -    if (is_on()) {
  93.223 -      assert(base > 0, "wrong base address");
  93.224 -      assert((flags & (~mt_masks)) == 0, "memory type only");
  93.225 -      create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()),
  93.226 -                           0, DEBUG_CALLER_PC, thread);
  93.227 -    }
  93.228 +    Tracker tkr(Tracker::Type);
  93.229 +    tkr.record(base, 0, flags);
  93.230 +  }
  93.231 +
  93.232 +  // Get memory trackers for memory operations that can result race conditions.
  93.233 +  // The memory tracker has to be obtained before realloc, virtual memory uncommit
  93.234 +  // and virtual memory release, and call tracker.record() method if operation
  93.235 +  // succeeded, or tracker.discard() to abort the tracking.
  93.236 +  static inline Tracker get_realloc_tracker() {
  93.237 +    return Tracker(Tracker::Realloc);
  93.238 +  }
  93.239 +
  93.240 +  static inline Tracker get_virtual_memory_uncommit_tracker() {
  93.241 +    return Tracker(Tracker::Uncommit);
  93.242 +  }
  93.243 +
  93.244 +  static inline Tracker get_virtual_memory_release_tracker() {
  93.245 +    return Tracker(Tracker::Release);
  93.246    }
  93.247  
  93.248  
  93.249 @@ -444,6 +459,31 @@
  93.250    static MemRecorder* get_pending_recorders();
  93.251    static void delete_all_pending_recorders();
  93.252  
  93.253 +  // write a memory tracking record in recorder
  93.254 +  static void write_tracking_record(address addr, MEMFLAGS type,
  93.255 +    size_t size, jint seq, address pc, JavaThread* thread);
  93.256 +
  93.257 +  static bool is_single_threaded_bootstrap() {
  93.258 +    return _state == NMT_bootstrapping_single_thread;
  93.259 +  }
  93.260 +
  93.261 +  static void check_NMT_load(Thread* thr) {
  93.262 +    assert(thr != NULL, "Sanity check");
  93.263 +    if (_slowdown_calling_thread && thr != _worker_thread) {
  93.264 +      os::yield_all();
  93.265 +    }
  93.266 +  }
  93.267 +
  93.268 +  static void inc_pending_op_count() {
  93.269 +    Atomic::inc(&_pending_op_count);
  93.270 +  }
  93.271 +
  93.272 +  static void dec_pending_op_count() {
  93.273 +    Atomic::dec(&_pending_op_count);
  93.274 +    assert(_pending_op_count >= 0, "Sanity check");
  93.275 +  }
  93.276 +
  93.277 +
  93.278   private:
  93.279    // retrieve a pooled memory record or create new one if there is not
  93.280    // one available
  93.281 @@ -522,6 +562,12 @@
  93.282    // if NMT should slow down calling thread to allow
  93.283    // worker thread to catch up
  93.284    static volatile bool             _slowdown_calling_thread;
  93.285 +
  93.286 +  // pending memory op count.
  93.287 +  // Certain memory ops need to pre-reserve sequence number
  93.288 +  // before memory operation can happen to avoid race condition.
  93.289 +  // See MemTracker::Tracker for detail
  93.290 +  static volatile jint             _pending_op_count;
  93.291  };
  93.292  
  93.293  #endif // !INCLUDE_NMT
    94.1 --- a/src/share/vm/services/threadService.cpp	Mon Jun 24 14:27:24 2013 -0700
    94.2 +++ b/src/share/vm/services/threadService.cpp	Tue Jun 25 12:46:21 2013 -0700
    94.3 @@ -327,27 +327,30 @@
    94.4      while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) {
    94.5        cycle->add_thread(currentThread);
    94.6        if (waitingToLockMonitor != NULL) {
    94.7 -        currentThread = Threads::owning_thread_from_monitor_owner(
    94.8 -                          (address)waitingToLockMonitor->owner(),
    94.9 -                          false /* no locking needed */);
   94.10 -        if (currentThread == NULL) {
   94.11 -          // This function is called at a safepoint so the JavaThread
   94.12 -          // that owns waitingToLockMonitor should be findable, but
   94.13 -          // if it is not findable, then the previous currentThread is
   94.14 -          // blocked permanently. We record this as a deadlock.
   94.15 -          num_deadlocks++;
   94.16 +        address currentOwner = (address)waitingToLockMonitor->owner();
   94.17 +        if (currentOwner != NULL) {
   94.18 +          currentThread = Threads::owning_thread_from_monitor_owner(
   94.19 +                            currentOwner,
   94.20 +                            false /* no locking needed */);
   94.21 +          if (currentThread == NULL) {
   94.22 +            // This function is called at a safepoint so the JavaThread
   94.23 +            // that owns waitingToLockMonitor should be findable, but
   94.24 +            // if it is not findable, then the previous currentThread is
   94.25 +            // blocked permanently. We record this as a deadlock.
   94.26 +            num_deadlocks++;
   94.27  
   94.28 -          cycle->set_deadlock(true);
   94.29 +            cycle->set_deadlock(true);
   94.30  
   94.31 -          // add this cycle to the deadlocks list
   94.32 -          if (deadlocks == NULL) {
   94.33 -            deadlocks = cycle;
   94.34 -          } else {
   94.35 -            last->set_next(cycle);
   94.36 +            // add this cycle to the deadlocks list
   94.37 +            if (deadlocks == NULL) {
   94.38 +              deadlocks = cycle;
   94.39 +            } else {
   94.40 +              last->set_next(cycle);
   94.41 +            }
   94.42 +            last = cycle;
   94.43 +            cycle = new DeadlockCycle();
   94.44 +            break;
   94.45            }
   94.46 -          last = cycle;
   94.47 -          cycle = new DeadlockCycle();
   94.48 -          break;
   94.49          }
   94.50        } else {
   94.51          if (concurrent_locks) {
    95.1 --- a/src/share/vm/utilities/bitMap.cpp	Mon Jun 24 14:27:24 2013 -0700
    95.2 +++ b/src/share/vm/utilities/bitMap.cpp	Tue Jun 25 12:46:21 2013 -0700
    95.3 @@ -41,7 +41,7 @@
    95.4  
    95.5  
    95.6  BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) :
    95.7 -  _map(map), _size(size_in_bits)
    95.8 +  _map(map), _size(size_in_bits), _map_allocator(false)
    95.9  {
   95.10    assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption.");
   95.11    assert(size_in_bits >= 0, "just checking");
   95.12 @@ -49,7 +49,7 @@
   95.13  
   95.14  
   95.15  BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) :
   95.16 -  _map(NULL), _size(0)
   95.17 +  _map(NULL), _size(0), _map_allocator(false)
   95.18  {
   95.19    assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption.");
   95.20    resize(size_in_bits, in_resource_area);
   95.21 @@ -65,8 +65,10 @@
   95.22    if (in_resource_area) {
   95.23      _map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words);
   95.24    } else {
   95.25 -    if (old_map != NULL) FREE_C_HEAP_ARRAY(bm_word_t, _map, mtInternal);
   95.26 -    _map = NEW_C_HEAP_ARRAY(bm_word_t, new_size_in_words, mtInternal);
   95.27 +    if (old_map != NULL) {
   95.28 +      _map_allocator.free();
   95.29 +    }
   95.30 +    _map = _map_allocator.allocate(new_size_in_words);
   95.31    }
   95.32    Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map,
   95.33                         MIN2(old_size_in_words, new_size_in_words));
    96.1 --- a/src/share/vm/utilities/bitMap.hpp	Mon Jun 24 14:27:24 2013 -0700
    96.2 +++ b/src/share/vm/utilities/bitMap.hpp	Tue Jun 25 12:46:21 2013 -0700
    96.3 @@ -48,6 +48,7 @@
    96.4    } RangeSizeHint;
    96.5  
    96.6   private:
    96.7 +  ArrayAllocator<bm_word_t, mtInternal> _map_allocator;
    96.8    bm_word_t* _map;     // First word in bitmap
    96.9    idx_t      _size;    // Size of bitmap (in bits)
   96.10  
   96.11 @@ -113,7 +114,7 @@
   96.12   public:
   96.13  
   96.14    // Constructs a bitmap with no map, and size 0.
   96.15 -  BitMap() : _map(NULL), _size(0) {}
   96.16 +  BitMap() : _map(NULL), _size(0), _map_allocator(false) {}
   96.17  
   96.18    // Constructs a bitmap with the given map and size.
   96.19    BitMap(bm_word_t* map, idx_t size_in_bits);
    97.1 --- a/src/share/vm/utilities/exceptions.hpp	Mon Jun 24 14:27:24 2013 -0700
    97.2 +++ b/src/share/vm/utilities/exceptions.hpp	Tue Jun 25 12:46:21 2013 -0700
    97.3 @@ -194,15 +194,15 @@
    97.4  #define HAS_PENDING_EXCEPTION                    (((ThreadShadow*)THREAD)->has_pending_exception())
    97.5  #define CLEAR_PENDING_EXCEPTION                  (((ThreadShadow*)THREAD)->clear_pending_exception())
    97.6  
    97.7 -#define CHECK                                    THREAD); if (HAS_PENDING_EXCEPTION) return       ; (0
    97.8 -#define CHECK_(result)                           THREAD); if (HAS_PENDING_EXCEPTION) return result; (0
    97.9 +#define CHECK                                    THREAD); if (HAS_PENDING_EXCEPTION) return       ; (void)(0
   97.10 +#define CHECK_(result)                           THREAD); if (HAS_PENDING_EXCEPTION) return result; (void)(0
   97.11  #define CHECK_0                                  CHECK_(0)
   97.12  #define CHECK_NH                                 CHECK_(Handle())
   97.13  #define CHECK_NULL                               CHECK_(NULL)
   97.14  #define CHECK_false                              CHECK_(false)
   97.15  
   97.16 -#define CHECK_AND_CLEAR                         THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return;        } (0
   97.17 -#define CHECK_AND_CLEAR_(result)                THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (0
   97.18 +#define CHECK_AND_CLEAR                         THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return;        } (void)(0
   97.19 +#define CHECK_AND_CLEAR_(result)                THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (void)(0
   97.20  #define CHECK_AND_CLEAR_0                       CHECK_AND_CLEAR_(0)
   97.21  #define CHECK_AND_CLEAR_NH                      CHECK_AND_CLEAR_(Handle())
   97.22  #define CHECK_AND_CLEAR_NULL                    CHECK_AND_CLEAR_(NULL)
   97.23 @@ -282,7 +282,7 @@
   97.24      CLEAR_PENDING_EXCEPTION;               \
   97.25      ex->print();                           \
   97.26      ShouldNotReachHere();                  \
   97.27 -  } (0
   97.28 +  } (void)(0
   97.29  
   97.30  // ExceptionMark is a stack-allocated helper class for local exception handling.
   97.31  // It is used with the EXCEPTION_MARK macro.
    98.1 --- a/src/share/vm/utilities/taskqueue.hpp	Mon Jun 24 14:27:24 2013 -0700
    98.2 +++ b/src/share/vm/utilities/taskqueue.hpp	Tue Jun 25 12:46:21 2013 -0700
    98.3 @@ -340,8 +340,12 @@
    98.4    if (dirty_n_elems == N - 1) {
    98.5      // Actually means 0, so do the push.
    98.6      uint localBot = _bottom;
    98.7 -    // g++ complains if the volatile result of the assignment is unused.
    98.8 -    const_cast<E&>(_elems[localBot] = t);
    98.9 +    // g++ complains if the volatile result of the assignment is
   98.10 +    // unused, so we cast the volatile away.  We cannot cast directly
   98.11 +    // to void, because gcc treats that as not using the result of the
   98.12 +    // assignment.  However, casting to E& means that we trigger an
   98.13 +    // unused-value warning.  So, we cast the E& to void.
   98.14 +    (void)const_cast<E&>(_elems[localBot] = t);
   98.15      OrderAccess::release_store(&_bottom, increment_index(localBot));
   98.16      TASKQUEUE_STATS_ONLY(stats.record_push());
   98.17      return true;
   98.18 @@ -397,7 +401,12 @@
   98.19      return false;
   98.20    }
   98.21  
   98.22 -  const_cast<E&>(t = _elems[oldAge.top()]);
   98.23 +  // g++ complains if the volatile result of the assignment is
   98.24 +  // unused, so we cast the volatile away.  We cannot cast directly
   98.25 +  // to void, because gcc treats that as not using the result of the
   98.26 +  // assignment.  However, casting to E& means that we trigger an
   98.27 +  // unused-value warning.  So, we cast the E& to void.
   98.28 +  (void) const_cast<E&>(t = _elems[oldAge.top()]);
   98.29    Age newAge(oldAge);
   98.30    newAge.increment();
   98.31    Age resAge = _age.cmpxchg(newAge, oldAge);
   98.32 @@ -640,8 +649,12 @@
   98.33    uint dirty_n_elems = dirty_size(localBot, top);
   98.34    assert(dirty_n_elems < N, "n_elems out of range.");
   98.35    if (dirty_n_elems < max_elems()) {
   98.36 -    // g++ complains if the volatile result of the assignment is unused.
   98.37 -    const_cast<E&>(_elems[localBot] = t);
   98.38 +    // g++ complains if the volatile result of the assignment is
   98.39 +    // unused, so we cast the volatile away.  We cannot cast directly
   98.40 +    // to void, because gcc treats that as not using the result of the
   98.41 +    // assignment.  However, casting to E& means that we trigger an
   98.42 +    // unused-value warning.  So, we cast the E& to void.
   98.43 +    (void) const_cast<E&>(_elems[localBot] = t);
   98.44      OrderAccess::release_store(&_bottom, increment_index(localBot));
   98.45      TASKQUEUE_STATS_ONLY(stats.record_push());
   98.46      return true;
   98.47 @@ -665,7 +678,12 @@
   98.48    // This is necessary to prevent any read below from being reordered
   98.49    // before the store just above.
   98.50    OrderAccess::fence();
   98.51 -  const_cast<E&>(t = _elems[localBot]);
   98.52 +  // g++ complains if the volatile result of the assignment is
   98.53 +  // unused, so we cast the volatile away.  We cannot cast directly
   98.54 +  // to void, because gcc treats that as not using the result of the
   98.55 +  // assignment.  However, casting to E& means that we trigger an
   98.56 +  // unused-value warning.  So, we cast the E& to void.
   98.57 +  (void) const_cast<E&>(t = _elems[localBot]);
   98.58    // This is a second read of "age"; the "size()" above is the first.
   98.59    // If there's still at least one element in the queue, based on the
   98.60    // "_bottom" and "age" we've read, then there can be no interference with
    99.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    99.2 +++ b/test/runtime/CommandLine/CompilerConfigFileWarning.java	Tue Jun 25 12:46:21 2013 -0700
    99.3 @@ -0,0 +1,50 @@
    99.4 +/*
    99.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    99.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    99.7 + *
    99.8 + * This code is free software; you can redistribute it and/or modify it
    99.9 + * under the terms of the GNU General Public License version 2 only, as
   99.10 + * published by the Free Software Foundation.
   99.11 + *
   99.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   99.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   99.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   99.15 + * version 2 for more details (a copy is included in the LICENSE file that
   99.16 + * accompanied this code).
   99.17 + *
   99.18 + * You should have received a copy of the GNU General Public License version
   99.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   99.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   99.21 + *
   99.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   99.23 + * or visit www.oracle.com if you need additional information or have any
   99.24 + * questions.
   99.25 + */
   99.26 +
   99.27 +/*
   99.28 + * @test
   99.29 + * @bug 7167142
   99.30 + * @summary Warn if unused .hotspot_compiler file is present
   99.31 + * @library /testlibrary
   99.32 + */
   99.33 +
   99.34 +import java.io.PrintWriter;
   99.35 +import com.oracle.java.testlibrary.*;
   99.36 +
   99.37 +public class CompilerConfigFileWarning {
   99.38 +    public static void main(String[] args) throws Exception {
   99.39 +        String vmVersion = System.getProperty("java.vm.version");
   99.40 +        if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
   99.41 +            System.out.println("Skip on debug builds since we'll always read the file there");
   99.42 +            return;
   99.43 +        }
   99.44 +
   99.45 +        PrintWriter pw = new PrintWriter(".hotspot_compiler");
   99.46 +        pw.println("aa");
   99.47 +        pw.close();
   99.48 +
   99.49 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-version");
   99.50 +        OutputAnalyzer output = new OutputAnalyzer(pb.start());
   99.51 +        output.shouldContain("warning: .hotspot_compiler file is present but has been ignored.  Run with -XX:CompileCommandFile=.hotspot_compiler to load the file.");
   99.52 +    }
   99.53 +}
   100.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   100.2 +++ b/test/runtime/CommandLine/ConfigFileWarning.java	Tue Jun 25 12:46:21 2013 -0700
   100.3 @@ -0,0 +1,50 @@
   100.4 +/*
   100.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   100.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   100.7 + *
   100.8 + * This code is free software; you can redistribute it and/or modify it
   100.9 + * under the terms of the GNU General Public License version 2 only, as
  100.10 + * published by the Free Software Foundation.
  100.11 + *
  100.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  100.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  100.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  100.15 + * version 2 for more details (a copy is included in the LICENSE file that
  100.16 + * accompanied this code).
  100.17 + *
  100.18 + * You should have received a copy of the GNU General Public License version
  100.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  100.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  100.21 + *
  100.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  100.23 + * or visit www.oracle.com if you need additional information or have any
  100.24 + * questions.
  100.25 + */
  100.26 +
  100.27 +/*
  100.28 + * @test
  100.29 + * @bug 7167142
  100.30 + * @summary Warn if unused .hotspot_rc file is present
  100.31 + * @library /testlibrary
  100.32 + */
  100.33 +
  100.34 +import java.io.PrintWriter;
  100.35 +import com.oracle.java.testlibrary.*;
  100.36 +
  100.37 +public class ConfigFileWarning {
  100.38 +    public static void main(String[] args) throws Exception {
  100.39 +        String vmVersion = System.getProperty("java.vm.version");
  100.40 +        if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) {
  100.41 +            System.out.println("Skip on debug builds since we'll always read the file there");
  100.42 +            return;
  100.43 +        }
  100.44 +
  100.45 +        PrintWriter pw = new PrintWriter(".hotspotrc");
  100.46 +        pw.println("aa");
  100.47 +        pw.close();
  100.48 +
  100.49 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-version");
  100.50 +        OutputAnalyzer output = new OutputAnalyzer(pb.start());
  100.51 +        output.shouldContain("warning: .hotspotrc file is present but has been ignored.  Run with -XX:Flags=.hotspotrc to load the file.");
  100.52 +    }
  100.53 +}
   101.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   101.2 +++ b/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java	Tue Jun 25 12:46:21 2013 -0700
   101.3 @@ -0,0 +1,88 @@
   101.4 +/*
   101.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   101.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   101.7 + *
   101.8 + * This code is free software; you can redistribute it and/or modify it
   101.9 + * under the terms of the GNU General Public License version 2 only, as
  101.10 + * published by the Free Software Foundation.
  101.11 + *
  101.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  101.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  101.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  101.15 + * version 2 for more details (a copy is included in the LICENSE file that
  101.16 + * accompanied this code).
  101.17 + *
  101.18 + * You should have received a copy of the GNU General Public License version
  101.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  101.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  101.21 + *
  101.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  101.23 + * or visit www.oracle.com if you need additional information or have any
  101.24 + * questions.
  101.25 + */
  101.26 +
  101.27 +/*
  101.28 + * @test CdsDifferentObjectAlignment
  101.29 + * @summary Testing CDS (class data sharing) using varying object alignment.
  101.30 + *          Using different object alignment for each dump/load pair.
  101.31 + *          This is a negative test; using  object alignment for loading that
  101.32 + *          is different from object alignment for creating a CDS file
  101.33 + *          should fail when loading.
  101.34 + * @library /testlibrary
  101.35 + */
  101.36 +
  101.37 +import com.oracle.java.testlibrary.*;
  101.38 +
  101.39 +public class CdsDifferentObjectAlignment {
  101.40 +    public static void main(String[] args) throws Exception {
  101.41 +        String nativeWordSize = System.getProperty("sun.arch.data.model");
  101.42 +        if (!Platform.is64bit()) {
  101.43 +            System.out.println("ObjectAlignmentInBytes for CDS is only " +
  101.44 +                "supported on 64bit platforms; this plaform is " +
  101.45 +                nativeWordSize);
  101.46 +            System.out.println("Skipping the test");
  101.47 +        } else {
  101.48 +            createAndLoadSharedArchive(16, 64);
  101.49 +            createAndLoadSharedArchive(64, 32);
  101.50 +        }
  101.51 +    }
  101.52 +
  101.53 +
  101.54 +    // Parameters are object alignment expressed in bytes
  101.55 +    private static void
  101.56 +    createAndLoadSharedArchive(int createAlignment, int loadAlignment)
  101.57 +    throws Exception {
  101.58 +        String createAlignmentArgument = "-XX:ObjectAlignmentInBytes=" +
  101.59 +            createAlignment;
  101.60 +        String loadAlignmentArgument = "-XX:ObjectAlignmentInBytes=" +
  101.61 +            loadAlignment;
  101.62 +
  101.63 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
  101.64 +            "-XX:+UnlockDiagnosticVMOptions",
  101.65 +            "-XX:SharedArchiveFile=./sample.jsa",
  101.66 +            "-Xshare:dump",
  101.67 +            createAlignmentArgument);
  101.68 +
  101.69 +        OutputAnalyzer output = new OutputAnalyzer(pb.start());
  101.70 +        output.shouldContain("Loading classes to share");
  101.71 +        output.shouldHaveExitValue(0);
  101.72 +
  101.73 +        pb = ProcessTools.createJavaProcessBuilder(
  101.74 +            "-XX:+UnlockDiagnosticVMOptions",
  101.75 +            "-XX:SharedArchiveFile=./sample.jsa",
  101.76 +            "-Xshare:on",
  101.77 +            loadAlignmentArgument,
  101.78 +            "-version");
  101.79 +
  101.80 +        output = new OutputAnalyzer(pb.start());
  101.81 +        String expectedErrorMsg =
  101.82 +            String.format(
  101.83 +            "The shared archive file's ObjectAlignmentInBytes of %d " +
  101.84 +            "does not equal the current ObjectAlignmentInBytes of %d",
  101.85 +            createAlignment,
  101.86 +            loadAlignment);
  101.87 +
  101.88 +        output.shouldContain(expectedErrorMsg);
  101.89 +        output.shouldHaveExitValue(1);
  101.90 +    }
  101.91 +}
   102.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   102.2 +++ b/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java	Tue Jun 25 12:46:21 2013 -0700
   102.3 @@ -0,0 +1,92 @@
   102.4 +/*
   102.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   102.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   102.7 + *
   102.8 + * This code is free software; you can redistribute it and/or modify it
   102.9 + * under the terms of the GNU General Public License version 2 only, as
  102.10 + * published by the Free Software Foundation.
  102.11 + *
  102.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  102.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  102.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  102.15 + * version 2 for more details (a copy is included in the LICENSE file that
  102.16 + * accompanied this code).
  102.17 + *
  102.18 + * You should have received a copy of the GNU General Public License version
  102.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  102.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  102.21 + *
  102.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  102.23 + * or visit www.oracle.com if you need additional information or have any
  102.24 + * questions.
  102.25 + */
  102.26 +
  102.27 +/*
  102.28 + * @test CdsSameObjectAlignment
  102.29 + * @summary Testing CDS (class data sharing) using varying object alignment.
  102.30 + *          Using same object alignment for each dump/load pair
  102.31 + * @library /testlibrary
  102.32 + */
  102.33 +
  102.34 +import com.oracle.java.testlibrary.*;
  102.35 +
  102.36 +public class CdsSameObjectAlignment {
  102.37 +    public static void main(String[] args) throws Exception {
  102.38 +        String nativeWordSize = System.getProperty("sun.arch.data.model");
  102.39 +        if (!Platform.is64bit()) {
  102.40 +            System.out.println("ObjectAlignmentInBytes for CDS is only " +
  102.41 +                "supported on 64bit platforms; this plaform is " +
  102.42 +                nativeWordSize);
  102.43 +            System.out.println("Skipping the test");
  102.44 +        } else {
  102.45 +            dumpAndLoadSharedArchive(8);
  102.46 +            dumpAndLoadSharedArchive(16);
  102.47 +            dumpAndLoadSharedArchive(32);
  102.48 +            dumpAndLoadSharedArchive(64);
  102.49 +        }
  102.50 +    }
  102.51 +
  102.52 +    private static void
  102.53 +    dumpAndLoadSharedArchive(int objectAlignmentInBytes) throws Exception {
  102.54 +        String objectAlignmentArg = "-XX:ObjectAlignmentInBytes="
  102.55 +            + objectAlignmentInBytes;
  102.56 +        System.out.println("dumpAndLoadSharedArchive(): objectAlignmentInBytes = "
  102.57 +            + objectAlignmentInBytes);
  102.58 +
  102.59 +        // create shared archive
  102.60 +        ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
  102.61 +            "-XX:+UnlockDiagnosticVMOptions",
  102.62 +            "-XX:SharedArchiveFile=./sample.jsa",
  102.63 +            "-Xshare:dump",
  102.64 +            objectAlignmentArg);
  102.65 +
  102.66 +        OutputAnalyzer output = new OutputAnalyzer(pb.start());
  102.67 +        output.shouldContain("Loading classes to share");
  102.68 +        output.shouldHaveExitValue(0);
  102.69 +
  102.70 +
  102.71 +        // run using the shared archive
  102.72 +        pb = ProcessTools.createJavaProcessBuilder(
  102.73 +            "-XX:+UnlockDiagnosticVMOptions",
  102.74 +            "-XX:SharedArchiveFile=./sample.jsa",
  102.75 +            "-Xshare:on",
  102.76 +            objectAlignmentArg,
  102.77 +            "-version");
  102.78 +
  102.79 +        output = new OutputAnalyzer(pb.start());
  102.80 +
  102.81 +        try {
  102.82 +            output.shouldContain("sharing");
  102.83 +            output.shouldHaveExitValue(0);
  102.84 +        } catch (RuntimeException e) {
  102.85 +            // CDS uses absolute addresses for performance.
  102.86 +            // It will try to reserve memory at a specific address;
  102.87 +            // there is a chance such reservation will fail
  102.88 +            // If it does, it is NOT considered a failure of the feature,
  102.89 +            // rather a possible expected outcome, though not likely
  102.90 +            output.shouldContain(
  102.91 +                "Unable to reserve shared space at required address");
  102.92 +            output.shouldHaveExitValue(1);
  102.93 +        }
  102.94 +    }
  102.95 +}
   103.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   103.2 +++ b/test/serviceability/threads/TestFalseDeadLock.java	Tue Jun 25 12:46:21 2013 -0700
   103.3 @@ -0,0 +1,95 @@
   103.4 +/*
   103.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   103.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   103.7 + *
   103.8 + * This code is free software; you can redistribute it and/or modify it
   103.9 + * under the terms of the GNU General Public License version 2 only, as
  103.10 + * published by the Free Software Foundation.
  103.11 + *
  103.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  103.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  103.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  103.15 + * version 2 for more details (a copy is included in the LICENSE file that
  103.16 + * accompanied this code).
  103.17 + *
  103.18 + * You should have received a copy of the GNU General Public License version
  103.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  103.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  103.21 + *
  103.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  103.23 + * or visit www.oracle.com if you need additional information or have any
  103.24 + * questions.
  103.25 + */
  103.26 +
  103.27 +import java.lang.management.ManagementFactory;
  103.28 +import java.lang.management.ThreadMXBean;
  103.29 +import java.util.Random;
  103.30 +
  103.31 +/*
  103.32 + * @test
  103.33 + * @bug 8016304
  103.34 + * @summary Make sure no deadlock is reported for this program which has no deadlocks.
  103.35 + * @run main/othervm TestFalseDeadLock
  103.36 + */
  103.37 +
  103.38 +/*
  103.39 + * This test will not provoke the bug every time it is run since the bug is intermittent.
  103.40 + * The test has a fixed running time of 5 seconds.
  103.41 + */
  103.42 +
  103.43 +public class TestFalseDeadLock {
  103.44 +    private static ThreadMXBean bean;
  103.45 +    private static volatile boolean running = true;
  103.46 +    private static volatile boolean found = false;
  103.47 +
  103.48 +    public static void main(String[] args) throws Exception {
  103.49 +        bean = ManagementFactory.getThreadMXBean();
  103.50 +        Thread[] threads = new Thread[500];
  103.51 +        for (int i = 0; i < threads.length; i++) {
  103.52 +            Test t = new Test();
  103.53 +            threads[i] = new Thread(t);
  103.54 +            threads[i].start();
  103.55 +        }
  103.56 +        try {
  103.57 +            Thread.sleep(5000);
  103.58 +        } catch (InterruptedException ex) {
  103.59 +        }
  103.60 +        running = false;
  103.61 +        for (Thread t : threads) {
  103.62 +            t.join();
  103.63 +        }
  103.64 +        if (found) {
  103.65 +            throw new Exception("Deadlock reported, but there is no deadlock.");
  103.66 +        }
  103.67 +    }
  103.68 +
  103.69 +    public static class Test implements Runnable {
  103.70 +        public void run() {
  103.71 +            Random r = new Random();
  103.72 +            while (running) {
  103.73 +                try {
  103.74 +                    synchronized (this) {
  103.75 +                        wait(r.nextInt(1000) + 1);
  103.76 +                    }
  103.77 +                } catch (InterruptedException ex) {
  103.78 +                }
  103.79 +                recurse(2000);
  103.80 +            }
  103.81 +            if (bean.findDeadlockedThreads() != null) {
  103.82 +                System.out.println("FOUND!");
  103.83 +                found = true;
  103.84 +            }
  103.85 +        }
  103.86 +
  103.87 +        private void recurse(int i) {
  103.88 +            if (!running) {
  103.89 +                // It is important for the test to call println here
  103.90 +                // since there are locks inside that path.
  103.91 +                System.out.println("Hullo");
  103.92 +            }
  103.93 +            else if (i > 0) {
  103.94 +                recurse(i - 1);
  103.95 +            }
  103.96 +        }
  103.97 +    }
  103.98 +}
   104.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
   104.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/Platform.java	Tue Jun 25 12:46:21 2013 -0700
   104.3 @@ -0,0 +1,62 @@
   104.4 +/*
   104.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
   104.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   104.7 + *
   104.8 + * This code is free software; you can redistribute it and/or modify it
   104.9 + * under the terms of the GNU General Public License version 2 only, as
  104.10 + * published by the Free Software Foundation.
  104.11 + *
  104.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
  104.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  104.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  104.15 + * version 2 for more details (a copy is included in the LICENSE file that
  104.16 + * accompanied this code).
  104.17 + *
  104.18 + * You should have received a copy of the GNU General Public License version
  104.19 + * 2 along with this work; if not, write to the Free Software Foundation,
  104.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  104.21 + *
  104.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  104.23 + * or visit www.oracle.com if you need additional information or have any
  104.24 + * questions.
  104.25 + */
  104.26 +
  104.27 +package com.oracle.java.testlibrary;
  104.28 +
  104.29 +public class Platform {
  104.30 +  private static final String osName = System.getProperty("os.name");
  104.31 +  private static final String dataModel = System.getProperty("sun.arch.data.model");
  104.32 +  private static final String vmVersion = System.getProperty("java.vm.version");
  104.33 +
  104.34 +  public static boolean is64bit() {
  104.35 +    return dataModel.equals("64");
  104.36 +  }
  104.37 +
  104.38 +  public static boolean isSolaris() {
  104.39 +    return osName.toLowerCase().startsWith("sunos");
  104.40 +  }
  104.41 +
  104.42 +  public static boolean isWindows() {
  104.43 +    return osName.toLowerCase().startsWith("win");
  104.44 +  }
  104.45 +
  104.46 +  public static boolean isOSX() {
  104.47 +    return osName.toLowerCase().startsWith("mac");
  104.48 +  }
  104.49 +
  104.50 +  public static boolean isLinux() {
  104.51 +    return osName.toLowerCase().startsWith("linux");
  104.52 +  }
  104.53 +
  104.54 +  public static String getOsName() {
  104.55 +    return osName;
  104.56 +  }
  104.57 +
  104.58 +  public static boolean isDebugBuild() {
  104.59 +    return vmVersion.toLowerCase().contains("debug");
  104.60 +  }
  104.61 +
  104.62 +  public static String getVMVersion() {
  104.63 +    return vmVersion;
  104.64 +  }
  104.65 +}
   105.1 --- a/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java	Mon Jun 24 14:27:24 2013 -0700
   105.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java	Tue Jun 25 12:46:21 2013 -0700
   105.3 @@ -112,10 +112,8 @@
   105.4     * @return String[] with platform specific arguments, empty if there are none
   105.5     */
   105.6    public static String[] getPlatformSpecificVMArgs() {
   105.7 -    String osName = System.getProperty("os.name");
   105.8 -    String dataModel = System.getProperty("sun.arch.data.model");
   105.9  
  105.10 -    if (osName.equals("SunOS") && dataModel.equals("64")) {
  105.11 +    if (Platform.is64bit() && Platform.isSolaris()) {
  105.12        return new String[] { "-d64" };
  105.13      }
  105.14  

mercurial