Thu, 15 Apr 2010 19:08:48 -0700
Merge
1.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Apr 15 19:08:18 2010 -0700 1.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Apr 15 19:08:48 2010 -0700 1.3 @@ -1728,9 +1728,13 @@ 1.4 ShouldNotReachHere(); 1.5 } 1.6 } else if (code == lir_cmp_l2i) { 1.7 +#ifdef _LP64 1.8 + __ lcmp(left->as_register_lo(), right->as_register_lo(), dst->as_register()); 1.9 +#else 1.10 __ lcmp(left->as_register_hi(), left->as_register_lo(), 1.11 right->as_register_hi(), right->as_register_lo(), 1.12 dst->as_register()); 1.13 +#endif 1.14 } else { 1.15 ShouldNotReachHere(); 1.16 } 1.17 @@ -2849,7 +2853,7 @@ 1.18 1.19 1.20 void LIR_Assembler::align_backward_branch_target() { 1.21 - __ align(16); 1.22 + __ align(OptoLoopAlignment); 1.23 } 1.24 1.25
2.1 --- a/src/cpu/sparc/vm/c2_globals_sparc.hpp Thu Apr 15 19:08:18 2010 -0700 2.2 +++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp Thu Apr 15 19:08:48 2010 -0700 2.3 @@ -60,9 +60,6 @@ 2.4 define_pd_global(intx, INTPRESSURE, 48); // large register set 2.5 define_pd_global(intx, InteriorEntryAlignment, 16); // = CodeEntryAlignment 2.6 define_pd_global(intx, NewSizeThreadIncrease, ScaleForWordSize(4*K)); 2.7 -// The default setting 16/16 seems to work best. 2.8 -// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.) 2.9 -define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize 2.10 define_pd_global(intx, RegisterCostAreaRatio, 12000); 2.11 define_pd_global(bool, UseTLAB, true); 2.12 define_pd_global(bool, ResizeTLAB, true);
3.1 --- a/src/cpu/sparc/vm/globals_sparc.hpp Thu Apr 15 19:08:18 2010 -0700 3.2 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Thu Apr 15 19:08:48 2010 -0700 3.3 @@ -40,6 +40,9 @@ 3.4 define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast 3.5 3.6 define_pd_global(intx, CodeEntryAlignment, 32); 3.7 +// The default setting 16/16 seems to work best. 3.8 +// (For _228_jack 16/16 is 2% better than 4/4, 16/4, 32/32, 32/16, or 16/32.) 3.9 +define_pd_global(intx, OptoLoopAlignment, 16); // = 4*wordSize 3.10 define_pd_global(intx, InlineFrequencyCount, 50); // we can use more inlining on the SPARC 3.11 define_pd_global(intx, InlineSmallCode, 1500); 3.12 #ifdef _LP64
4.1 --- a/src/cpu/sparc/vm/sparc.ad Thu Apr 15 19:08:18 2010 -0700 4.2 +++ b/src/cpu/sparc/vm/sparc.ad Thu Apr 15 19:08:48 2010 -0700 4.3 @@ -471,6 +471,9 @@ 4.4 source %{ 4.5 #define __ _masm. 4.6 4.7 +// Block initializing store 4.8 +#define ASI_BLK_INIT_QUAD_LDD_P 0xE2 4.9 + 4.10 // tertiary op of a LoadP or StoreP encoding 4.11 #define REGP_OP true 4.12 4.13 @@ -6147,6 +6150,7 @@ 4.14 %} 4.15 4.16 instruct prefetchw( memory mem ) %{ 4.17 + predicate(AllocatePrefetchStyle != 3 ); 4.18 match( PrefetchWrite mem ); 4.19 ins_cost(MEMORY_REF_COST); 4.20 4.21 @@ -6156,6 +6160,23 @@ 4.22 ins_pipe(iload_mem); 4.23 %} 4.24 4.25 +// Use BIS instruction to prefetch. 4.26 +instruct prefetchw_bis( memory mem ) %{ 4.27 + predicate(AllocatePrefetchStyle == 3); 4.28 + match( PrefetchWrite mem ); 4.29 + ins_cost(MEMORY_REF_COST); 4.30 + 4.31 + format %{ "STXA G0,$mem\t! // Block initializing store" %} 4.32 + ins_encode %{ 4.33 + Register base = as_Register($mem$$base); 4.34 + int disp = $mem$$disp; 4.35 + if (disp != 0) { 4.36 + __ add(base, AllocatePrefetchStepSize, base); 4.37 + } 4.38 + __ stxa(G0, base, G0, ASI_BLK_INIT_QUAD_LDD_P); 4.39 + %} 4.40 + ins_pipe(istore_mem_reg); 4.41 +%} 4.42 4.43 //----------Store Instructions------------------------------------------------- 4.44 // Store Byte
5.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Apr 15 19:08:18 2010 -0700 5.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Apr 15 19:08:48 2010 -0700 5.3 @@ -1148,7 +1148,7 @@ 5.4 __ andn(from, 7, from); // Align address 5.5 __ ldx(from, 0, O3); 5.6 __ inc(from, 8); 5.7 - __ align(16); 5.8 + __ align(OptoLoopAlignment); 5.9 __ BIND(L_loop); 5.10 __ ldx(from, 0, O4); 5.11 __ deccc(count, count_dec); // Can we do next iteration after this one? 5.12 @@ -1220,7 +1220,7 @@ 5.13 // 5.14 __ andn(end_from, 7, end_from); // Align address 5.15 __ ldx(end_from, 0, O3); 5.16 - __ align(16); 5.17 + __ align(OptoLoopAlignment); 5.18 __ BIND(L_loop); 5.19 __ ldx(end_from, -8, O4); 5.20 __ deccc(count, count_dec); // Can we do next iteration after this one? 5.21 @@ -1349,7 +1349,7 @@ 5.22 __ BIND(L_copy_byte); 5.23 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); 5.24 __ delayed()->nop(); 5.25 - __ align(16); 5.26 + __ align(OptoLoopAlignment); 5.27 __ BIND(L_copy_byte_loop); 5.28 __ ldub(from, offset, O3); 5.29 __ deccc(count); 5.30 @@ -1445,7 +1445,7 @@ 5.31 L_aligned_copy, L_copy_byte); 5.32 } 5.33 // copy 4 elements (16 bytes) at a time 5.34 - __ align(16); 5.35 + __ align(OptoLoopAlignment); 5.36 __ BIND(L_aligned_copy); 5.37 __ dec(end_from, 16); 5.38 __ ldx(end_from, 8, O3); 5.39 @@ -1461,7 +1461,7 @@ 5.40 __ BIND(L_copy_byte); 5.41 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); 5.42 __ delayed()->nop(); 5.43 - __ align(16); 5.44 + __ align(OptoLoopAlignment); 5.45 __ BIND(L_copy_byte_loop); 5.46 __ dec(end_from); 5.47 __ dec(end_to); 5.48 @@ -1577,7 +1577,7 @@ 5.49 __ BIND(L_copy_2_bytes); 5.50 __ br_zero(Assembler::zero, false, Assembler::pt, count, L_exit); 5.51 __ delayed()->nop(); 5.52 - __ align(16); 5.53 + __ align(OptoLoopAlignment); 5.54 __ BIND(L_copy_2_bytes_loop); 5.55 __ lduh(from, offset, O3); 5.56 __ deccc(count); 5.57 @@ -1684,7 +1684,7 @@ 5.58 L_aligned_copy, L_copy_2_bytes); 5.59 } 5.60 // copy 4 elements (16 bytes) at a time 5.61 - __ align(16); 5.62 + __ align(OptoLoopAlignment); 5.63 __ BIND(L_aligned_copy); 5.64 __ dec(end_from, 16); 5.65 __ ldx(end_from, 8, O3); 5.66 @@ -1781,7 +1781,7 @@ 5.67 // copy with shift 4 elements (16 bytes) at a time 5.68 __ dec(count, 4); // The cmp at the beginning guaranty count >= 4 5.69 5.70 - __ align(16); 5.71 + __ align(OptoLoopAlignment); 5.72 __ BIND(L_copy_16_bytes); 5.73 __ ldx(from, 4, O4); 5.74 __ deccc(count, 4); // Can we do next iteration after this one? 5.75 @@ -1907,7 +1907,7 @@ 5.76 // to form 2 aligned 8-bytes chunks to store. 5.77 // 5.78 __ ldx(end_from, -4, O3); 5.79 - __ align(16); 5.80 + __ align(OptoLoopAlignment); 5.81 __ BIND(L_copy_16_bytes); 5.82 __ ldx(end_from, -12, O4); 5.83 __ deccc(count, 4); 5.84 @@ -1929,7 +1929,7 @@ 5.85 __ delayed()->inc(count, 4); 5.86 5.87 // copy 4 elements (16 bytes) at a time 5.88 - __ align(16); 5.89 + __ align(OptoLoopAlignment); 5.90 __ BIND(L_aligned_copy); 5.91 __ dec(end_from, 16); 5.92 __ ldx(end_from, 8, O3); 5.93 @@ -2000,6 +2000,27 @@ 5.94 // to: O1 5.95 // count: O2 treated as signed 5.96 // 5.97 + // count -= 2; 5.98 + // if ( count >= 0 ) { // >= 2 elements 5.99 + // if ( count > 6) { // >= 8 elements 5.100 + // count -= 6; // original count - 8 5.101 + // do { 5.102 + // copy_8_elements; 5.103 + // count -= 8; 5.104 + // } while ( count >= 0 ); 5.105 + // count += 6; 5.106 + // } 5.107 + // if ( count >= 0 ) { // >= 2 elements 5.108 + // do { 5.109 + // copy_2_elements; 5.110 + // } while ( (count=count-2) >= 0 ); 5.111 + // } 5.112 + // } 5.113 + // count += 2; 5.114 + // if ( count != 0 ) { // 1 element left 5.115 + // copy_1_element; 5.116 + // } 5.117 + // 5.118 void generate_disjoint_long_copy_core(bool aligned) { 5.119 Label L_copy_8_bytes, L_copy_16_bytes, L_exit; 5.120 const Register from = O0; // source array address 5.121 @@ -2012,7 +2033,39 @@ 5.122 __ mov(G0, offset0); // offset from start of arrays (0) 5.123 __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 5.124 __ delayed()->add(offset0, 8, offset8); 5.125 - __ align(16); 5.126 + 5.127 + // Copy by 64 bytes chunks 5.128 + Label L_copy_64_bytes; 5.129 + const Register from64 = O3; // source address 5.130 + const Register to64 = G3; // destination address 5.131 + __ subcc(count, 6, O3); 5.132 + __ brx(Assembler::negative, false, Assembler::pt, L_copy_16_bytes ); 5.133 + __ delayed()->mov(to, to64); 5.134 + // Now we can use O4(offset0), O5(offset8) as temps 5.135 + __ mov(O3, count); 5.136 + __ mov(from, from64); 5.137 + 5.138 + __ align(OptoLoopAlignment); 5.139 + __ BIND(L_copy_64_bytes); 5.140 + for( int off = 0; off < 64; off += 16 ) { 5.141 + __ ldx(from64, off+0, O4); 5.142 + __ ldx(from64, off+8, O5); 5.143 + __ stx(O4, to64, off+0); 5.144 + __ stx(O5, to64, off+8); 5.145 + } 5.146 + __ deccc(count, 8); 5.147 + __ inc(from64, 64); 5.148 + __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_64_bytes); 5.149 + __ delayed()->inc(to64, 64); 5.150 + 5.151 + // Restore O4(offset0), O5(offset8) 5.152 + __ sub(from64, from, offset0); 5.153 + __ inccc(count, 6); 5.154 + __ brx(Assembler::negative, false, Assembler::pn, L_copy_8_bytes ); 5.155 + __ delayed()->add(offset0, 8, offset8); 5.156 + 5.157 + // Copy by 16 bytes chunks 5.158 + __ align(OptoLoopAlignment); 5.159 __ BIND(L_copy_16_bytes); 5.160 __ ldx(from, offset0, O3); 5.161 __ ldx(from, offset8, G3); 5.162 @@ -2023,6 +2076,7 @@ 5.163 __ brx(Assembler::greaterEqual, false, Assembler::pt, L_copy_16_bytes); 5.164 __ delayed()->inc(offset8, 16); 5.165 5.166 + // Copy last 8 bytes 5.167 __ BIND(L_copy_8_bytes); 5.168 __ inccc(count, 2); 5.169 __ brx(Assembler::zero, true, Assembler::pn, L_exit ); 5.170 @@ -2085,7 +2139,7 @@ 5.171 __ brx(Assembler::lessEqual, false, Assembler::pn, L_copy_8_bytes ); 5.172 __ delayed()->sllx(count, LogBytesPerLong, offset8); 5.173 __ sub(offset8, 8, offset0); 5.174 - __ align(16); 5.175 + __ align(OptoLoopAlignment); 5.176 __ BIND(L_copy_16_bytes); 5.177 __ ldx(from, offset8, O2); 5.178 __ ldx(from, offset0, O3); 5.179 @@ -2351,7 +2405,7 @@ 5.180 // (O5 = 0; ; O5 += wordSize) --- offset from src, dest arrays 5.181 // (O2 = len; O2 != 0; O2--) --- number of oops *remaining* 5.182 // G3, G4, G5 --- current oop, oop.klass, oop.klass.super 5.183 - __ align(16); 5.184 + __ align(OptoLoopAlignment); 5.185 5.186 __ BIND(store_element); 5.187 __ deccc(G1_remain); // decrement the count
6.1 --- a/src/cpu/sparc/vm/vm_version_sparc.cpp Thu Apr 15 19:08:18 2010 -0700 6.2 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp Thu Apr 15 19:08:48 2010 -0700 6.3 @@ -86,14 +86,24 @@ 6.4 if (FLAG_IS_DEFAULT(InteriorEntryAlignment)) { 6.5 FLAG_SET_DEFAULT(InteriorEntryAlignment, 4); 6.6 } 6.7 + if (is_niagara1_plus()) { 6.8 + if (AllocatePrefetchStyle > 0 && FLAG_IS_DEFAULT(AllocatePrefetchStyle)) { 6.9 + // Use BIS instruction for allocation prefetch. 6.10 + FLAG_SET_DEFAULT(AllocatePrefetchStyle, 3); 6.11 + if (FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { 6.12 + // Use smaller prefetch distance on N2 with BIS 6.13 + FLAG_SET_DEFAULT(AllocatePrefetchDistance, 64); 6.14 + } 6.15 + } 6.16 + if (AllocatePrefetchStyle != 3 && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { 6.17 + // Use different prefetch distance without BIS 6.18 + FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256); 6.19 + } 6.20 + } 6.21 +#endif 6.22 if (FLAG_IS_DEFAULT(OptoLoopAlignment)) { 6.23 FLAG_SET_DEFAULT(OptoLoopAlignment, 4); 6.24 } 6.25 - if (is_niagara1_plus() && FLAG_IS_DEFAULT(AllocatePrefetchDistance)) { 6.26 - // Use smaller prefetch distance on N2 6.27 - FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256); 6.28 - } 6.29 -#endif 6.30 } 6.31 6.32 // Use hardware population count instruction if available.
7.1 --- a/src/cpu/x86/vm/assembler_x86.cpp Thu Apr 15 19:08:18 2010 -0700 7.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp Thu Apr 15 19:08:48 2010 -0700 7.3 @@ -3365,6 +3365,13 @@ 7.4 7.5 #else // LP64 7.6 7.7 +void Assembler::set_byte_if_not_zero(Register dst) { 7.8 + int enc = prefix_and_encode(dst->encoding(), true); 7.9 + emit_byte(0x0F); 7.10 + emit_byte(0x95); 7.11 + emit_byte(0xE0 | enc); 7.12 +} 7.13 + 7.14 // 64bit only pieces of the assembler 7.15 // This should only be used by 64bit instructions that can use rip-relative 7.16 // it cannot be used by instructions that want an immediate value.
8.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Apr 15 19:08:18 2010 -0700 8.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Apr 15 19:08:48 2010 -0700 8.3 @@ -2690,19 +2690,14 @@ 8.4 } else { 8.5 assert(code == lir_cmp_l2i, "check"); 8.6 #ifdef _LP64 8.7 - Register dest = dst->as_register(); 8.8 - __ xorptr(dest, dest); 8.9 - Label high, done; 8.10 - __ cmpptr(left->as_register_lo(), right->as_register_lo()); 8.11 - __ jcc(Assembler::equal, done); 8.12 - __ jcc(Assembler::greater, high); 8.13 - __ decrement(dest); 8.14 - __ jmp(done); 8.15 - __ bind(high); 8.16 - __ increment(dest); 8.17 - 8.18 - __ bind(done); 8.19 - 8.20 + Label done; 8.21 + Register dest = dst->as_register(); 8.22 + __ cmpptr(left->as_register_lo(), right->as_register_lo()); 8.23 + __ movl(dest, -1); 8.24 + __ jccb(Assembler::less, done); 8.25 + __ set_byte_if_not_zero(dest); 8.26 + __ movzbl(dest, dest); 8.27 + __ bind(done); 8.28 #else 8.29 __ lcmp2int(left->as_register_hi(), 8.30 left->as_register_lo(),
9.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Apr 15 19:08:18 2010 -0700 9.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Apr 15 19:08:48 2010 -0700 9.3 @@ -781,7 +781,7 @@ 9.4 9.5 // Restore SP from BP if the exception PC is a MethodHandle call site. 9.6 NOT_LP64(__ get_thread(thread);) 9.7 - __ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0); 9.8 + __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 9.9 __ cmovptr(Assembler::notEqual, rsp, rbp); 9.10 9.11 // continue at exception handler (return address removed)
10.1 --- a/src/cpu/x86/vm/c2_globals_x86.hpp Thu Apr 15 19:08:18 2010 -0700 10.2 +++ b/src/cpu/x86/vm/c2_globals_x86.hpp Thu Apr 15 19:08:48 2010 -0700 10.3 @@ -80,7 +80,6 @@ 10.4 // Ergonomics related flags 10.5 define_pd_global(uint64_t,MaxRAM, 4ULL*G); 10.6 #endif // AMD64 10.7 -define_pd_global(intx, OptoLoopAlignment, 16); 10.8 define_pd_global(intx, RegisterCostAreaRatio, 16000); 10.9 10.10 // Peephole and CISC spilling both break the graph, and so makes the
11.1 --- a/src/cpu/x86/vm/globals_x86.hpp Thu Apr 15 19:08:18 2010 -0700 11.2 +++ b/src/cpu/x86/vm/globals_x86.hpp Thu Apr 15 19:08:48 2010 -0700 11.3 @@ -45,6 +45,7 @@ 11.4 #else 11.5 define_pd_global(intx, CodeEntryAlignment, 16); 11.6 #endif // COMPILER2 11.7 +define_pd_global(intx, OptoLoopAlignment, 16); 11.8 define_pd_global(intx, InlineFrequencyCount, 100); 11.9 define_pd_global(intx, InlineSmallCode, 1000); 11.10
12.1 --- a/src/cpu/x86/vm/runtime_x86_32.cpp Thu Apr 15 19:08:18 2010 -0700 12.2 +++ b/src/cpu/x86/vm/runtime_x86_32.cpp Thu Apr 15 19:08:48 2010 -0700 12.3 @@ -115,8 +115,8 @@ 12.4 12.5 // rax: exception handler for given <exception oop/exception pc> 12.6 12.7 - // Restore SP from BP if the exception PC is a MethodHandle call. 12.8 - __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0); 12.9 + // Restore SP from BP if the exception PC is a MethodHandle call site. 12.10 + __ cmpl(Address(rcx, JavaThread::is_method_handle_return_offset()), 0); 12.11 __ cmovptr(Assembler::notEqual, rsp, rbp); 12.12 12.13 // We have a handler in rax, (could be deopt blob)
13.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Thu Apr 15 19:08:18 2010 -0700 13.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Thu Apr 15 19:08:48 2010 -0700 13.3 @@ -3328,8 +3328,8 @@ 13.4 13.5 // rax: exception handler 13.6 13.7 - // Restore SP from BP if the exception PC is a MethodHandle call. 13.8 - __ cmpl(Address(r15_thread, JavaThread::is_method_handle_exception_offset()), 0); 13.9 + // Restore SP from BP if the exception PC is a MethodHandle call site. 13.10 + __ cmpl(Address(r15_thread, JavaThread::is_method_handle_return_offset()), 0); 13.11 __ cmovptr(Assembler::notEqual, rsp, rbp); 13.12 13.13 // We have a handler in rax (could be deopt blob).
14.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Apr 15 19:08:18 2010 -0700 14.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Apr 15 19:08:48 2010 -0700 14.3 @@ -430,7 +430,7 @@ 14.4 __ verify_oop(exception_oop); 14.5 14.6 // Restore SP from BP if the exception PC is a MethodHandle call site. 14.7 - __ cmpl(Address(thread, JavaThread::is_method_handle_exception_offset()), 0); 14.8 + __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 14.9 __ cmovptr(Assembler::notEqual, rsp, rbp); 14.10 14.11 // continue at exception handler (return address removed) 14.12 @@ -812,7 +812,7 @@ 14.13 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 14.14 // Copy 64-byte chunks 14.15 __ jmpb(L_copy_64_bytes); 14.16 - __ align(16); 14.17 + __ align(OptoLoopAlignment); 14.18 __ BIND(L_copy_64_bytes_loop); 14.19 14.20 if(UseUnalignedLoadStores) { 14.21 @@ -874,7 +874,7 @@ 14.22 Label L_copy_64_bytes_loop, L_copy_64_bytes, L_copy_8_bytes, L_exit; 14.23 // Copy 64-byte chunks 14.24 __ jmpb(L_copy_64_bytes); 14.25 - __ align(16); 14.26 + __ align(OptoLoopAlignment); 14.27 __ BIND(L_copy_64_bytes_loop); 14.28 __ movq(mmx0, Address(from, 0)); 14.29 __ movq(mmx1, Address(from, 8)); 14.30 @@ -1144,7 +1144,7 @@ 14.31 __ movl(Address(to, count, sf, 0), rdx); 14.32 __ jmpb(L_copy_8_bytes); 14.33 14.34 - __ align(16); 14.35 + __ align(OptoLoopAlignment); 14.36 // Move 8 bytes 14.37 __ BIND(L_copy_8_bytes_loop); 14.38 if (UseXMMForArrayCopy) { 14.39 @@ -1235,7 +1235,7 @@ 14.40 } 14.41 } else { 14.42 __ jmpb(L_copy_8_bytes); 14.43 - __ align(16); 14.44 + __ align(OptoLoopAlignment); 14.45 __ BIND(L_copy_8_bytes_loop); 14.46 __ fild_d(Address(from, 0)); 14.47 __ fistp_d(Address(from, to_from, Address::times_1)); 14.48 @@ -1282,7 +1282,7 @@ 14.49 14.50 __ jmpb(L_copy_8_bytes); 14.51 14.52 - __ align(16); 14.53 + __ align(OptoLoopAlignment); 14.54 __ BIND(L_copy_8_bytes_loop); 14.55 if (VM_Version::supports_mmx()) { 14.56 if (UseXMMForArrayCopy) { 14.57 @@ -1454,7 +1454,7 @@ 14.58 // Loop control: 14.59 // for (count = -count; count != 0; count++) 14.60 // Base pointers src, dst are biased by 8*count,to last element. 14.61 - __ align(16); 14.62 + __ align(OptoLoopAlignment); 14.63 14.64 __ BIND(L_store_element); 14.65 __ movptr(to_element_addr, elem); // store the oop
15.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Apr 15 19:08:18 2010 -0700 15.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Apr 15 19:08:48 2010 -0700 15.3 @@ -871,9 +871,8 @@ 15.4 } 15.5 15.6 address generate_fp_mask(const char *stub_name, int64_t mask) { 15.7 + __ align(CodeEntryAlignment); 15.8 StubCodeMark mark(this, "StubRoutines", stub_name); 15.9 - 15.10 - __ align(16); 15.11 address start = __ pc(); 15.12 15.13 __ emit_data64( mask, relocInfo::none ); 15.14 @@ -1268,7 +1267,7 @@ 15.15 Label& L_copy_32_bytes, Label& L_copy_8_bytes) { 15.16 DEBUG_ONLY(__ stop("enter at entry label, not here")); 15.17 Label L_loop; 15.18 - __ align(16); 15.19 + __ align(OptoLoopAlignment); 15.20 __ BIND(L_loop); 15.21 if(UseUnalignedLoadStores) { 15.22 __ movdqu(xmm0, Address(end_from, qword_count, Address::times_8, -24)); 15.23 @@ -1309,7 +1308,7 @@ 15.24 Label& L_copy_32_bytes, Label& L_copy_8_bytes) { 15.25 DEBUG_ONLY(__ stop("enter at entry label, not here")); 15.26 Label L_loop; 15.27 - __ align(16); 15.28 + __ align(OptoLoopAlignment); 15.29 __ BIND(L_loop); 15.30 if(UseUnalignedLoadStores) { 15.31 __ movdqu(xmm0, Address(from, qword_count, Address::times_8, 16)); 15.32 @@ -2229,7 +2228,7 @@ 15.33 // Loop control: 15.34 // for (count = -count; count != 0; count++) 15.35 // Base pointers src, dst are biased by 8*(count-1),to last element. 15.36 - __ align(16); 15.37 + __ align(OptoLoopAlignment); 15.38 15.39 __ BIND(L_store_element); 15.40 __ store_heap_oop(to_element_addr, rax_oop); // store the oop
16.1 --- a/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Apr 15 19:08:18 2010 -0700 16.2 +++ b/src/cpu/zero/vm/cppInterpreter_zero.cpp Thu Apr 15 19:08:48 2010 -0700 16.3 @@ -206,7 +206,6 @@ 16.4 16.5 // Update the invocation counter 16.6 if ((UseCompiler || CountCompiledCalls) && !method->is_synchronized()) { 16.7 - thread->set_do_not_unlock(); 16.8 InvocationCounter *counter = method->invocation_counter(); 16.9 counter->increment(); 16.10 if (counter->reached_InvocationLimit()) { 16.11 @@ -215,7 +214,6 @@ 16.12 if (HAS_PENDING_EXCEPTION) 16.13 goto unwind_and_return; 16.14 } 16.15 - thread->clr_do_not_unlock(); 16.16 } 16.17 16.18 // Lock if necessary
17.1 --- a/src/cpu/zero/vm/methodHandles_zero.cpp Thu Apr 15 19:08:18 2010 -0700 17.2 +++ b/src/cpu/zero/vm/methodHandles_zero.cpp Thu Apr 15 19:08:48 2010 -0700 17.3 @@ -1,6 +1,6 @@ 17.4 /* 17.5 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. 17.6 - * Copyright 2009 Red Hat, Inc. 17.7 + * Copyright 2009, 2010 Red Hat, Inc. 17.8 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 17.9 * 17.10 * This code is free software; you can redistribute it and/or modify it 17.11 @@ -23,4 +23,10 @@ 17.12 * 17.13 */ 17.14 17.15 -// This file is intentionally empty 17.16 +#include "incls/_precompiled.incl" 17.17 +#include "incls/_methodHandles_zero.cpp.incl" 17.18 + 17.19 +void MethodHandles::generate_method_handle_stub(MacroAssembler* masm, 17.20 + MethodHandles::EntryKind ek) { 17.21 + ShouldNotCallThis(); 17.22 +}
18.1 --- a/src/cpu/zero/vm/stubRoutines_zero.hpp Thu Apr 15 19:08:18 2010 -0700 18.2 +++ b/src/cpu/zero/vm/stubRoutines_zero.hpp Thu Apr 15 19:08:48 2010 -0700 18.3 @@ -1,6 +1,6 @@ 18.4 /* 18.5 * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved. 18.6 - * Copyright 2007, 2008, 2009 Red Hat, Inc. 18.7 + * Copyright 2007, 2008, 2009, 2010 Red Hat, Inc. 18.8 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 18.9 * 18.10 * This code is free software; you can redistribute it and/or modify it 18.11 @@ -41,6 +41,10 @@ 18.12 code_size2 = 0 // if these are too small. Simply increase 18.13 }; // them if that happens. 18.14 18.15 + enum method_handles_platform_dependent_constants { 18.16 + method_handles_adapters_code_size = 0 18.17 + }; 18.18 + 18.19 #ifdef IA32 18.20 class x86 { 18.21 friend class VMStructs;
19.1 --- a/src/os/linux/vm/attachListener_linux.cpp Thu Apr 15 19:08:18 2010 -0700 19.2 +++ b/src/os/linux/vm/attachListener_linux.cpp Thu Apr 15 19:08:48 2010 -0700 19.3 @@ -192,7 +192,8 @@ 19.4 res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr)); 19.5 } 19.6 if (res == -1) { 19.7 - sprintf(path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id()); 19.8 + snprintf(path, PATH_MAX+1, "%s/.java_pid%d", 19.9 + os::get_temp_directory(), os::current_process_id()); 19.10 strcpy(addr.sun_path, path); 19.11 ::unlink(path); 19.12 res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr)); 19.13 @@ -460,13 +461,14 @@ 19.14 if (init_at_startup() || is_initialized()) { 19.15 return false; // initialized at startup or already initialized 19.16 } 19.17 - char fn[32]; 19.18 + char fn[128]; 19.19 sprintf(fn, ".attach_pid%d", os::current_process_id()); 19.20 int ret; 19.21 struct stat64 st; 19.22 RESTARTABLE(::stat64(fn, &st), ret); 19.23 if (ret == -1) { 19.24 - sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id()); 19.25 + snprintf(fn, sizeof(fn), "%s/.attach_pid%d", 19.26 + os::get_temp_directory(), os::current_process_id()); 19.27 RESTARTABLE(::stat64(fn, &st), ret); 19.28 } 19.29 if (ret == 0) {
20.1 --- a/src/os/linux/vm/os_linux.cpp Thu Apr 15 19:08:18 2010 -0700 20.2 +++ b/src/os/linux/vm/os_linux.cpp Thu Apr 15 19:08:48 2010 -0700 20.3 @@ -1522,7 +1522,10 @@ 20.4 20.5 const char* os::dll_file_extension() { return ".so"; } 20.6 20.7 -const char* os::get_temp_directory() { return "/tmp/"; } 20.8 +const char* os::get_temp_directory() { 20.9 + const char *prop = Arguments::get_property("java.io.tmpdir"); 20.10 + return prop == NULL ? "/tmp" : prop; 20.11 +} 20.12 20.13 static bool file_exists(const char* filename) { 20.14 struct stat statbuf; 20.15 @@ -2305,7 +2308,8 @@ 20.16 char buf[40]; 20.17 int num = Atomic::add(1, &cnt); 20.18 20.19 - sprintf(buf, "/tmp/hs-vm-%d-%d", os::current_process_id(), num); 20.20 + snprintf(buf, sizeof(buf), "%s/hs-vm-%d-%d", 20.21 + os::get_temp_directory(), os::current_process_id(), num); 20.22 unlink(buf); 20.23 20.24 int fd = open(buf, O_CREAT | O_RDWR, S_IRWXU);
21.1 --- a/src/os/linux/vm/perfMemory_linux.cpp Thu Apr 15 19:08:18 2010 -0700 21.2 +++ b/src/os/linux/vm/perfMemory_linux.cpp Thu Apr 15 19:08:48 2010 -0700 21.3 @@ -145,11 +145,11 @@ 21.4 21.5 const char* tmpdir = os::get_temp_directory(); 21.6 const char* perfdir = PERFDATA_NAME; 21.7 - size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2; 21.8 + size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3; 21.9 char* dirname = NEW_C_HEAP_ARRAY(char, nbytes); 21.10 21.11 // construct the path name to user specific tmp directory 21.12 - snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user); 21.13 + snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user); 21.14 21.15 return dirname; 21.16 } 21.17 @@ -331,8 +331,9 @@ 21.18 } 21.19 21.20 char* usrdir_name = NEW_C_HEAP_ARRAY(char, 21.21 - strlen(tmpdirname) + strlen(dentry->d_name) + 1); 21.22 + strlen(tmpdirname) + strlen(dentry->d_name) + 2); 21.23 strcpy(usrdir_name, tmpdirname); 21.24 + strcat(usrdir_name, "/"); 21.25 strcat(usrdir_name, dentry->d_name); 21.26 21.27 DIR* subdirp = os::opendir(usrdir_name);
22.1 --- a/src/os/solaris/vm/attachListener_solaris.cpp Thu Apr 15 19:08:18 2010 -0700 22.2 +++ b/src/os/solaris/vm/attachListener_solaris.cpp Thu Apr 15 19:08:48 2010 -0700 22.3 @@ -375,7 +375,8 @@ 22.4 return -1; 22.5 } 22.6 22.7 - sprintf(door_path, "%s/.java_pid%d", os::get_temp_directory(), os::current_process_id()); 22.8 + snprintf(door_path, sizeof(door_path), "%s/.java_pid%d", 22.9 + os::get_temp_directory(), os::current_process_id()); 22.10 RESTARTABLE(::creat(door_path, S_IRUSR | S_IWUSR), fd); 22.11 22.12 if (fd == -1) { 22.13 @@ -591,13 +592,14 @@ 22.14 if (init_at_startup() || is_initialized()) { 22.15 return false; // initialized at startup or already initialized 22.16 } 22.17 - char fn[32]; 22.18 + char fn[128]; 22.19 sprintf(fn, ".attach_pid%d", os::current_process_id()); 22.20 int ret; 22.21 struct stat64 st; 22.22 RESTARTABLE(::stat64(fn, &st), ret); 22.23 if (ret == -1) { 22.24 - sprintf(fn, "/tmp/.attach_pid%d", os::current_process_id()); 22.25 + snprintf(fn, sizeof(fn), "%s/.attach_pid%d", 22.26 + os::get_temp_directory(), os::current_process_id()); 22.27 RESTARTABLE(::stat64(fn, &st), ret); 22.28 } 22.29 if (ret == 0) {
23.1 --- a/src/os/solaris/vm/os_solaris.cpp Thu Apr 15 19:08:18 2010 -0700 23.2 +++ b/src/os/solaris/vm/os_solaris.cpp Thu Apr 15 19:08:48 2010 -0700 23.3 @@ -676,15 +676,6 @@ 23.4 } 23.5 23.6 23.7 -static char* get_property(char* name, char* buffer, int buffer_size) { 23.8 - if (os::getenv(name, buffer, buffer_size)) { 23.9 - return buffer; 23.10 - } 23.11 - static char empty[] = ""; 23.12 - return empty; 23.13 -} 23.14 - 23.15 - 23.16 void os::init_system_properties_values() { 23.17 char arch[12]; 23.18 sysinfo(SI_ARCHITECTURE, arch, sizeof(arch)); 23.19 @@ -1826,7 +1817,10 @@ 23.20 23.21 const char* os::dll_file_extension() { return ".so"; } 23.22 23.23 -const char* os::get_temp_directory() { return "/tmp/"; } 23.24 +const char* os::get_temp_directory() { 23.25 + const char *prop = Arguments::get_property("java.io.tmpdir"); 23.26 + return prop == NULL ? "/tmp" : prop; 23.27 +} 23.28 23.29 static bool file_exists(const char* filename) { 23.30 struct stat statbuf;
24.1 --- a/src/os/solaris/vm/perfMemory_solaris.cpp Thu Apr 15 19:08:18 2010 -0700 24.2 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp Thu Apr 15 19:08:48 2010 -0700 24.3 @@ -147,11 +147,11 @@ 24.4 24.5 const char* tmpdir = os::get_temp_directory(); 24.6 const char* perfdir = PERFDATA_NAME; 24.7 - size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2; 24.8 + size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3; 24.9 char* dirname = NEW_C_HEAP_ARRAY(char, nbytes); 24.10 24.11 // construct the path name to user specific tmp directory 24.12 - snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user); 24.13 + snprintf(dirname, nbytes, "%s/%s_%s", tmpdir, perfdir, user); 24.14 24.15 return dirname; 24.16 } 24.17 @@ -322,8 +322,9 @@ 24.18 } 24.19 24.20 char* usrdir_name = NEW_C_HEAP_ARRAY(char, 24.21 - strlen(tmpdirname) + strlen(dentry->d_name) + 1); 24.22 + strlen(tmpdirname) + strlen(dentry->d_name) + 2); 24.23 strcpy(usrdir_name, tmpdirname); 24.24 + strcat(usrdir_name, "/"); 24.25 strcat(usrdir_name, dentry->d_name); 24.26 24.27 DIR* subdirp = os::opendir(usrdir_name);
25.1 --- a/src/os/windows/vm/os_windows.cpp Thu Apr 15 19:08:18 2010 -0700 25.2 +++ b/src/os/windows/vm/os_windows.cpp Thu Apr 15 19:08:48 2010 -0700 25.3 @@ -998,15 +998,16 @@ 25.4 25.5 const char* os::dll_file_extension() { return ".dll"; } 25.6 25.7 -const char * os::get_temp_directory() 25.8 -{ 25.9 - static char path_buf[MAX_PATH]; 25.10 - if (GetTempPath(MAX_PATH, path_buf)>0) 25.11 - return path_buf; 25.12 - else{ 25.13 - path_buf[0]='\0'; 25.14 - return path_buf; 25.15 - } 25.16 +const char* os::get_temp_directory() { 25.17 + const char *prop = Arguments::get_property("java.io.tmpdir"); 25.18 + if (prop != 0) return prop; 25.19 + static char path_buf[MAX_PATH]; 25.20 + if (GetTempPath(MAX_PATH, path_buf)>0) 25.21 + return path_buf; 25.22 + else{ 25.23 + path_buf[0]='\0'; 25.24 + return path_buf; 25.25 + } 25.26 } 25.27 25.28 static bool file_exists(const char* filename) {
26.1 --- a/src/os/windows/vm/perfMemory_windows.cpp Thu Apr 15 19:08:18 2010 -0700 26.2 +++ b/src/os/windows/vm/perfMemory_windows.cpp Thu Apr 15 19:08:48 2010 -0700 26.3 @@ -149,11 +149,11 @@ 26.4 26.5 const char* tmpdir = os::get_temp_directory(); 26.6 const char* perfdir = PERFDATA_NAME; 26.7 - size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 2; 26.8 + size_t nbytes = strlen(tmpdir) + strlen(perfdir) + strlen(user) + 3; 26.9 char* dirname = NEW_C_HEAP_ARRAY(char, nbytes); 26.10 26.11 // construct the path name to user specific tmp directory 26.12 - _snprintf(dirname, nbytes, "%s%s_%s", tmpdir, perfdir, user); 26.13 + _snprintf(dirname, nbytes, "%s\\%s_%s", tmpdir, perfdir, user); 26.14 26.15 return dirname; 26.16 } 26.17 @@ -318,8 +318,9 @@ 26.18 } 26.19 26.20 char* usrdir_name = NEW_C_HEAP_ARRAY(char, 26.21 - strlen(tmpdirname) + strlen(dentry->d_name) + 1); 26.22 + strlen(tmpdirname) + strlen(dentry->d_name) + 2); 26.23 strcpy(usrdir_name, tmpdirname); 26.24 + strcat(usrdir_name, "\\"); 26.25 strcat(usrdir_name, dentry->d_name); 26.26 26.27 DIR* subdirp = os::opendir(usrdir_name);
27.1 --- a/src/share/vm/c1/c1_LinearScan.cpp Thu Apr 15 19:08:18 2010 -0700 27.2 +++ b/src/share/vm/c1/c1_LinearScan.cpp Thu Apr 15 19:08:48 2010 -0700 27.3 @@ -2608,6 +2608,46 @@ 27.4 } else if (opr->is_double_xmm()) { 27.5 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation"); 27.6 VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg(); 27.7 +# ifdef _LP64 27.8 + first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first)); 27.9 + second = &_int_0_scope_value; 27.10 +# else 27.11 + first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); 27.12 + // %%% This is probably a waste but we'll keep things as they were for now 27.13 + if (true) { 27.14 + VMReg rname_second = rname_first->next(); 27.15 + second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); 27.16 + } 27.17 +# endif 27.18 +#endif 27.19 + 27.20 + } else if (opr->is_double_fpu()) { 27.21 + // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of 27.22 + // the double as float registers in the native ordering. On X86, 27.23 + // fpu_regnrLo is a FPU stack slot whose VMReg represents 27.24 + // the low-order word of the double and fpu_regnrLo + 1 is the 27.25 + // name for the other half. *first and *second must represent the 27.26 + // least and most significant words, respectively. 27.27 + 27.28 +#ifdef X86 27.29 + // the exact location of fpu stack values is only known 27.30 + // during fpu stack allocation, so the stack allocator object 27.31 + // must be present 27.32 + assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)"); 27.33 + assert(_fpu_stack_allocator != NULL, "must be present"); 27.34 + opr = _fpu_stack_allocator->to_fpu_stack(opr); 27.35 + 27.36 + assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)"); 27.37 +#endif 27.38 +#ifdef SPARC 27.39 + assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)"); 27.40 +#endif 27.41 + 27.42 + VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi()); 27.43 +#ifdef _LP64 27.44 + first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first)); 27.45 + second = &_int_0_scope_value; 27.46 +#else 27.47 first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); 27.48 // %%% This is probably a waste but we'll keep things as they were for now 27.49 if (true) { 27.50 @@ -2616,37 +2656,6 @@ 27.51 } 27.52 #endif 27.53 27.54 - } else if (opr->is_double_fpu()) { 27.55 - // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of 27.56 - // the double as float registers in the native ordering. On X86, 27.57 - // fpu_regnrLo is a FPU stack slot whose VMReg represents 27.58 - // the low-order word of the double and fpu_regnrLo + 1 is the 27.59 - // name for the other half. *first and *second must represent the 27.60 - // least and most significant words, respectively. 27.61 - 27.62 -#ifdef X86 27.63 - // the exact location of fpu stack values is only known 27.64 - // during fpu stack allocation, so the stack allocator object 27.65 - // must be present 27.66 - assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)"); 27.67 - assert(_fpu_stack_allocator != NULL, "must be present"); 27.68 - opr = _fpu_stack_allocator->to_fpu_stack(opr); 27.69 - 27.70 - assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)"); 27.71 -#endif 27.72 -#ifdef SPARC 27.73 - assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)"); 27.74 -#endif 27.75 - 27.76 - VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi()); 27.77 - 27.78 - first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first)); 27.79 - // %%% This is probably a waste but we'll keep things as they were for now 27.80 - if (true) { 27.81 - VMReg rname_second = rname_first->next(); 27.82 - second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second)); 27.83 - } 27.84 - 27.85 } else { 27.86 ShouldNotReachHere(); 27.87 first = NULL;
28.1 --- a/src/share/vm/ci/ciConstant.cpp Thu Apr 15 19:08:18 2010 -0700 28.2 +++ b/src/share/vm/ci/ciConstant.cpp Thu Apr 15 19:08:48 2010 -0700 28.3 @@ -36,7 +36,7 @@ 28.4 basictype_to_str(basic_type())); 28.5 switch (basic_type()) { 28.6 case T_BOOLEAN: 28.7 - tty->print("%s", bool_to_str(_value._int == 0)); 28.8 + tty->print("%s", bool_to_str(_value._int != 0)); 28.9 break; 28.10 case T_CHAR: 28.11 case T_BYTE:
29.1 --- a/src/share/vm/classfile/classFileParser.cpp Thu Apr 15 19:08:18 2010 -0700 29.2 +++ b/src/share/vm/classfile/classFileParser.cpp Thu Apr 15 19:08:48 2010 -0700 29.3 @@ -2956,8 +2956,8 @@ 29.4 #endif 29.5 bool compact_fields = CompactFields; 29.6 int allocation_style = FieldsAllocationStyle; 29.7 - if( allocation_style < 0 || allocation_style > 1 ) { // Out of range? 29.8 - assert(false, "0 <= FieldsAllocationStyle <= 1"); 29.9 + if( allocation_style < 0 || allocation_style > 2 ) { // Out of range? 29.10 + assert(false, "0 <= FieldsAllocationStyle <= 2"); 29.11 allocation_style = 1; // Optimistic 29.12 } 29.13 29.14 @@ -2993,6 +2993,25 @@ 29.15 } else if( allocation_style == 1 ) { 29.16 // Fields order: longs/doubles, ints, shorts/chars, bytes, oops 29.17 next_nonstatic_double_offset = next_nonstatic_field_offset; 29.18 + } else if( allocation_style == 2 ) { 29.19 + // Fields allocation: oops fields in super and sub classes are together. 29.20 + if( nonstatic_field_size > 0 && super_klass() != NULL && 29.21 + super_klass->nonstatic_oop_map_size() > 0 ) { 29.22 + int map_size = super_klass->nonstatic_oop_map_size(); 29.23 + OopMapBlock* first_map = super_klass->start_of_nonstatic_oop_maps(); 29.24 + OopMapBlock* last_map = first_map + map_size - 1; 29.25 + int next_offset = last_map->offset() + (last_map->count() * heapOopSize); 29.26 + if (next_offset == next_nonstatic_field_offset) { 29.27 + allocation_style = 0; // allocate oops first 29.28 + next_nonstatic_oop_offset = next_nonstatic_field_offset; 29.29 + next_nonstatic_double_offset = next_nonstatic_oop_offset + 29.30 + (nonstatic_oop_count * heapOopSize); 29.31 + } 29.32 + } 29.33 + if( allocation_style == 2 ) { 29.34 + allocation_style = 1; // allocate oops last 29.35 + next_nonstatic_double_offset = next_nonstatic_field_offset; 29.36 + } 29.37 } else { 29.38 ShouldNotReachHere(); 29.39 }
30.1 --- a/src/share/vm/code/codeCache.cpp Thu Apr 15 19:08:18 2010 -0700 30.2 +++ b/src/share/vm/code/codeCache.cpp Thu Apr 15 19:08:48 2010 -0700 30.3 @@ -1,5 +1,5 @@ 30.4 /* 30.5 - * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. 30.6 + * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. 30.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 30.8 * 30.9 * This code is free software; you can redistribute it and/or modify it 30.10 @@ -284,9 +284,11 @@ 30.11 cur->print_on(tty, is_live ? "scavenge root" : "dead scavenge root"); tty->cr(); 30.12 } 30.13 #endif //PRODUCT 30.14 - if (is_live) 30.15 + if (is_live) { 30.16 // Perform cur->oops_do(f), maybe just once per nmethod. 30.17 f->do_code_blob(cur); 30.18 + cur->fix_oop_relocations(); 30.19 + } 30.20 } 30.21 30.22 // Check for stray marks.
31.1 --- a/src/share/vm/compiler/compileBroker.cpp Thu Apr 15 19:08:18 2010 -0700 31.2 +++ b/src/share/vm/compiler/compileBroker.cpp Thu Apr 15 19:08:48 2010 -0700 31.3 @@ -1414,9 +1414,14 @@ 31.4 intx thread_id = os::current_thread_id(); 31.5 for (int try_temp_dir = 1; try_temp_dir >= 0; try_temp_dir--) { 31.6 const char* dir = (try_temp_dir ? os::get_temp_directory() : NULL); 31.7 - if (dir == NULL) dir = ""; 31.8 - sprintf(fileBuf, "%shs_c" UINTX_FORMAT "_pid%u.log", 31.9 - dir, thread_id, os::current_process_id()); 31.10 + if (dir == NULL) { 31.11 + jio_snprintf(fileBuf, sizeof(fileBuf), "hs_c" UINTX_FORMAT "_pid%u.log", 31.12 + thread_id, os::current_process_id()); 31.13 + } else { 31.14 + jio_snprintf(fileBuf, sizeof(fileBuf), 31.15 + "%s%shs_c" UINTX_FORMAT "_pid%u.log", dir, 31.16 + os::file_separator(), thread_id, os::current_process_id()); 31.17 + } 31.18 fp = fopen(fileBuf, "at"); 31.19 if (fp != NULL) { 31.20 file = NEW_C_HEAP_ARRAY(char, strlen(fileBuf)+1);
32.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Apr 15 19:08:18 2010 -0700 32.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Thu Apr 15 19:08:48 2010 -0700 32.3 @@ -297,6 +297,11 @@ 32.4 } 32.5 } 32.6 32.7 +// Currently we do not call this at all. Normally we would call it 32.8 +// during the concurrent marking / remark phases but we now call 32.9 +// the lock-based version instead. But we might want to resurrect this 32.10 +// code in the future. So, we'll leave it here commented out. 32.11 +#if 0 32.12 MemRegion CMRegionStack::pop() { 32.13 while (true) { 32.14 // Otherwise... 32.15 @@ -321,6 +326,41 @@ 32.16 // Otherwise, we need to try again. 32.17 } 32.18 } 32.19 +#endif // 0 32.20 + 32.21 +void CMRegionStack::push_with_lock(MemRegion mr) { 32.22 + assert(mr.word_size() > 0, "Precondition"); 32.23 + MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); 32.24 + 32.25 + if (isFull()) { 32.26 + _overflow = true; 32.27 + return; 32.28 + } 32.29 + 32.30 + _base[_index] = mr; 32.31 + _index += 1; 32.32 +} 32.33 + 32.34 +MemRegion CMRegionStack::pop_with_lock() { 32.35 + MutexLockerEx x(CMRegionStack_lock, Mutex::_no_safepoint_check_flag); 32.36 + 32.37 + while (true) { 32.38 + if (_index == 0) { 32.39 + return MemRegion(); 32.40 + } 32.41 + _index -= 1; 32.42 + 32.43 + MemRegion mr = _base[_index]; 32.44 + if (mr.start() != NULL) { 32.45 + assert(mr.end() != NULL, "invariant"); 32.46 + assert(mr.word_size() > 0, "invariant"); 32.47 + return mr; 32.48 + } else { 32.49 + // that entry was invalidated... let's skip it 32.50 + assert(mr.end() == NULL, "invariant"); 32.51 + } 32.52 + } 32.53 +} 32.54 32.55 bool CMRegionStack::invalidate_entries_into_cset() { 32.56 bool result = false; 32.57 @@ -668,24 +708,46 @@ 32.58 // 32.59 32.60 void ConcurrentMark::clearNextBitmap() { 32.61 - guarantee(!G1CollectedHeap::heap()->mark_in_progress(), "Precondition."); 32.62 - 32.63 - // clear the mark bitmap (no grey objects to start with). 32.64 - // We need to do this in chunks and offer to yield in between 32.65 - // each chunk. 32.66 - HeapWord* start = _nextMarkBitMap->startWord(); 32.67 - HeapWord* end = _nextMarkBitMap->endWord(); 32.68 - HeapWord* cur = start; 32.69 - size_t chunkSize = M; 32.70 - while (cur < end) { 32.71 - HeapWord* next = cur + chunkSize; 32.72 - if (next > end) 32.73 - next = end; 32.74 - MemRegion mr(cur,next); 32.75 - _nextMarkBitMap->clearRange(mr); 32.76 - cur = next; 32.77 - do_yield_check(); 32.78 - } 32.79 + G1CollectedHeap* g1h = G1CollectedHeap::heap(); 32.80 + G1CollectorPolicy* g1p = g1h->g1_policy(); 32.81 + 32.82 + // Make sure that the concurrent mark thread looks to still be in 32.83 + // the current cycle. 32.84 + guarantee(cmThread()->during_cycle(), "invariant"); 32.85 + 32.86 + // We are finishing up the current cycle by clearing the next 32.87 + // marking bitmap and getting it ready for the next cycle. During 32.88 + // this time no other cycle can start. So, let's make sure that this 32.89 + // is the case. 32.90 + guarantee(!g1h->mark_in_progress(), "invariant"); 32.91 + 32.92 + // clear the mark bitmap (no grey objects to start with). 32.93 + // We need to do this in chunks and offer to yield in between 32.94 + // each chunk. 32.95 + HeapWord* start = _nextMarkBitMap->startWord(); 32.96 + HeapWord* end = _nextMarkBitMap->endWord(); 32.97 + HeapWord* cur = start; 32.98 + size_t chunkSize = M; 32.99 + while (cur < end) { 32.100 + HeapWord* next = cur + chunkSize; 32.101 + if (next > end) 32.102 + next = end; 32.103 + MemRegion mr(cur,next); 32.104 + _nextMarkBitMap->clearRange(mr); 32.105 + cur = next; 32.106 + do_yield_check(); 32.107 + 32.108 + // Repeat the asserts from above. We'll do them as asserts here to 32.109 + // minimize their overhead on the product. However, we'll have 32.110 + // them as guarantees at the beginning / end of the bitmap 32.111 + // clearing to get some checking in the product. 32.112 + assert(cmThread()->during_cycle(), "invariant"); 32.113 + assert(!g1h->mark_in_progress(), "invariant"); 32.114 + } 32.115 + 32.116 + // Repeat the asserts from above. 32.117 + guarantee(cmThread()->during_cycle(), "invariant"); 32.118 + guarantee(!g1h->mark_in_progress(), "invariant"); 32.119 } 32.120 32.121 class NoteStartOfMarkHRClosure: public HeapRegionClosure { 32.122 @@ -3363,7 +3425,7 @@ 32.123 gclog_or_tty->print_cr("[%d] draining region stack, size = %d", 32.124 _task_id, _cm->region_stack_size()); 32.125 32.126 - MemRegion mr = _cm->region_stack_pop(); 32.127 + MemRegion mr = _cm->region_stack_pop_with_lock(); 32.128 // it returns MemRegion() if the pop fails 32.129 statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); 32.130 32.131 @@ -3384,7 +3446,7 @@ 32.132 if (has_aborted()) 32.133 mr = MemRegion(); 32.134 else { 32.135 - mr = _cm->region_stack_pop(); 32.136 + mr = _cm->region_stack_pop_with_lock(); 32.137 // it returns MemRegion() if the pop fails 32.138 statsOnly(if (mr.start() != NULL) ++_region_stack_pops ); 32.139 } 32.140 @@ -3417,7 +3479,7 @@ 32.141 } 32.142 // Now push the part of the region we didn't scan on the 32.143 // region stack to make sure a task scans it later. 32.144 - _cm->region_stack_push(newRegion); 32.145 + _cm->region_stack_push_with_lock(newRegion); 32.146 } 32.147 // break from while 32.148 mr = MemRegion();
33.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Apr 15 19:08:18 2010 -0700 33.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp Thu Apr 15 19:08:48 2010 -0700 33.3 @@ -252,9 +252,19 @@ 33.4 // with other "push" operations (no pops). 33.5 void push(MemRegion mr); 33.6 33.7 +#if 0 33.8 + // This is currently not used. See the comment in the .cpp file. 33.9 + 33.10 // Lock-free; assumes that it will only be called in parallel 33.11 // with other "pop" operations (no pushes). 33.12 MemRegion pop(); 33.13 +#endif // 0 33.14 + 33.15 + // These two are the implementations that use a lock. They can be 33.16 + // called concurrently with each other but they should not be called 33.17 + // concurrently with the lock-free versions (push() / pop()). 33.18 + void push_with_lock(MemRegion mr); 33.19 + MemRegion pop_with_lock(); 33.20 33.21 bool isEmpty() { return _index == 0; } 33.22 bool isFull() { return _index == _capacity; } 33.23 @@ -540,6 +550,10 @@ 33.24 33.25 // Manipulation of the region stack 33.26 bool region_stack_push(MemRegion mr) { 33.27 + // Currently we only call the lock-free version during evacuation 33.28 + // pauses. 33.29 + assert(SafepointSynchronize::is_at_safepoint(), "world should be stopped"); 33.30 + 33.31 _regionStack.push(mr); 33.32 if (_regionStack.overflow()) { 33.33 set_has_overflown(); 33.34 @@ -547,7 +561,33 @@ 33.35 } 33.36 return true; 33.37 } 33.38 - MemRegion region_stack_pop() { return _regionStack.pop(); } 33.39 +#if 0 33.40 + // Currently this is not used. See the comment in the .cpp file. 33.41 + MemRegion region_stack_pop() { return _regionStack.pop(); } 33.42 +#endif // 0 33.43 + 33.44 + bool region_stack_push_with_lock(MemRegion mr) { 33.45 + // Currently we only call the lock-based version during either 33.46 + // concurrent marking or remark. 33.47 + assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), 33.48 + "if we are at a safepoint it should be the remark safepoint"); 33.49 + 33.50 + _regionStack.push_with_lock(mr); 33.51 + if (_regionStack.overflow()) { 33.52 + set_has_overflown(); 33.53 + return false; 33.54 + } 33.55 + return true; 33.56 + } 33.57 + MemRegion region_stack_pop_with_lock() { 33.58 + // Currently we only call the lock-based version during either 33.59 + // concurrent marking or remark. 33.60 + assert(!SafepointSynchronize::is_at_safepoint() || !concurrent(), 33.61 + "if we are at a safepoint it should be the remark safepoint"); 33.62 + 33.63 + return _regionStack.pop_with_lock(); 33.64 + } 33.65 + 33.66 int region_stack_size() { return _regionStack.size(); } 33.67 bool region_stack_overflow() { return _regionStack.overflow(); } 33.68 bool region_stack_empty() { return _regionStack.isEmpty(); }
34.1 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp Thu Apr 15 19:08:18 2010 -0700 34.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.hpp Thu Apr 15 19:08:48 2010 -0700 34.3 @@ -42,8 +42,8 @@ 34.4 34.5 private: 34.6 ConcurrentMark* _cm; 34.7 - bool _started; 34.8 - bool _in_progress; 34.9 + volatile bool _started; 34.10 + volatile bool _in_progress; 34.11 34.12 void sleepBeforeNextCycle(); 34.13 34.14 @@ -67,15 +67,25 @@ 34.15 // Counting virtual time so far. 34.16 double vtime_count_accum() { return _vtime_count_accum; } 34.17 34.18 - ConcurrentMark* cm() { return _cm; } 34.19 + ConcurrentMark* cm() { return _cm; } 34.20 34.21 - void set_started() { _started = true; } 34.22 - void clear_started() { _started = false; } 34.23 - bool started() { return _started; } 34.24 + void set_started() { _started = true; } 34.25 + void clear_started() { _started = false; } 34.26 + bool started() { return _started; } 34.27 34.28 - void set_in_progress() { _in_progress = true; } 34.29 - void clear_in_progress() { _in_progress = false; } 34.30 - bool in_progress() { return _in_progress; } 34.31 + void set_in_progress() { _in_progress = true; } 34.32 + void clear_in_progress() { _in_progress = false; } 34.33 + bool in_progress() { return _in_progress; } 34.34 + 34.35 + // This flag returns true from the moment a marking cycle is 34.36 + // initiated (during the initial-mark pause when started() is set) 34.37 + // to the moment when the cycle completes (just after the next 34.38 + // marking bitmap has been cleared and in_progress() is 34.39 + // cleared). While this flag is true we will not start another cycle 34.40 + // so that cycles do not overlap. We cannot use just in_progress() 34.41 + // as the CM thread might take some time to wake up before noticing 34.42 + // that started() is set and set in_progress(). 34.43 + bool during_cycle() { return started() || in_progress(); } 34.44 34.45 // Yield for GC 34.46 void yield();
35.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Apr 15 19:08:18 2010 -0700 35.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Apr 15 19:08:48 2010 -0700 35.3 @@ -902,6 +902,10 @@ 35.4 35.5 void G1CollectedHeap::do_collection(bool full, bool clear_all_soft_refs, 35.6 size_t word_size) { 35.7 + if (GC_locker::check_active_before_gc()) { 35.8 + return; // GC is disabled (e.g. JNI GetXXXCritical operation) 35.9 + } 35.10 + 35.11 ResourceMark rm; 35.12 35.13 if (PrintHeapAtGC) { 35.14 @@ -916,10 +920,6 @@ 35.15 assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint"); 35.16 assert(Thread::current() == VMThread::vm_thread(), "should be in vm thread"); 35.17 35.18 - if (GC_locker::is_active()) { 35.19 - return; // GC is disabled (e.g. JNI GetXXXCritical operation) 35.20 - } 35.21 - 35.22 { 35.23 IsGCActiveMark x; 35.24 35.25 @@ -2658,6 +2658,10 @@ 35.26 35.27 void 35.28 G1CollectedHeap::do_collection_pause_at_safepoint() { 35.29 + if (GC_locker::check_active_before_gc()) { 35.30 + return; // GC is disabled (e.g. JNI GetXXXCritical operation) 35.31 + } 35.32 + 35.33 if (PrintHeapAtGC) { 35.34 Universe::print_heap_before_gc(); 35.35 } 35.36 @@ -2665,6 +2669,11 @@ 35.37 { 35.38 ResourceMark rm; 35.39 35.40 + // This call will decide whether this pause is an initial-mark 35.41 + // pause. If it is, during_initial_mark_pause() will return true 35.42 + // for the duration of this pause. 35.43 + g1_policy()->decide_on_conc_mark_initiation(); 35.44 + 35.45 char verbose_str[128]; 35.46 sprintf(verbose_str, "GC pause "); 35.47 if (g1_policy()->in_young_gc_mode()) { 35.48 @@ -2673,7 +2682,7 @@ 35.49 else 35.50 strcat(verbose_str, "(partial)"); 35.51 } 35.52 - if (g1_policy()->should_initiate_conc_mark()) 35.53 + if (g1_policy()->during_initial_mark_pause()) 35.54 strcat(verbose_str, " (initial-mark)"); 35.55 35.56 // if PrintGCDetails is on, we'll print long statistics information 35.57 @@ -2697,10 +2706,6 @@ 35.58 "young list should be well formed"); 35.59 } 35.60 35.61 - if (GC_locker::is_active()) { 35.62 - return; // GC is disabled (e.g. JNI GetXXXCritical operation) 35.63 - } 35.64 - 35.65 bool abandoned = false; 35.66 { // Call to jvmpi::post_class_unload_events must occur outside of active GC 35.67 IsGCActiveMark x; 35.68 @@ -2756,7 +2761,7 @@ 35.69 _young_list->print(); 35.70 #endif // SCAN_ONLY_VERBOSE 35.71 35.72 - if (g1_policy()->should_initiate_conc_mark()) { 35.73 + if (g1_policy()->during_initial_mark_pause()) { 35.74 concurrent_mark()->checkpointRootsInitialPre(); 35.75 } 35.76 save_marks(); 35.77 @@ -2858,7 +2863,7 @@ 35.78 } 35.79 35.80 if (g1_policy()->in_young_gc_mode() && 35.81 - g1_policy()->should_initiate_conc_mark()) { 35.82 + g1_policy()->during_initial_mark_pause()) { 35.83 concurrent_mark()->checkpointRootsInitialPost(); 35.84 set_marking_started(); 35.85 // CAUTION: after the doConcurrentMark() call below, 35.86 @@ -2937,6 +2942,9 @@ 35.87 // the same region 35.88 assert(r == NULL || !r->is_gc_alloc_region(), 35.89 "shouldn't already be a GC alloc region"); 35.90 + assert(r == NULL || !r->isHumongous(), 35.91 + "humongous regions shouldn't be used as GC alloc regions"); 35.92 + 35.93 HeapWord* original_top = NULL; 35.94 if (r != NULL) 35.95 original_top = r->top(); 35.96 @@ -3079,12 +3087,17 @@ 35.97 35.98 if (alloc_region->in_collection_set() || 35.99 alloc_region->top() == alloc_region->end() || 35.100 - alloc_region->top() == alloc_region->bottom()) { 35.101 - // we will discard the current GC alloc region if it's in the 35.102 - // collection set (it can happen!), if it's already full (no 35.103 - // point in using it), or if it's empty (this means that it 35.104 - // was emptied during a cleanup and it should be on the free 35.105 - // list now). 35.106 + alloc_region->top() == alloc_region->bottom() || 35.107 + alloc_region->isHumongous()) { 35.108 + // we will discard the current GC alloc region if 35.109 + // * it's in the collection set (it can happen!), 35.110 + // * it's already full (no point in using it), 35.111 + // * it's empty (this means that it was emptied during 35.112 + // a cleanup and it should be on the free list now), or 35.113 + // * it's humongous (this means that it was emptied 35.114 + // during a cleanup and was added to the free list, but 35.115 + // has been subseqently used to allocate a humongous 35.116 + // object that may be less than the region size). 35.117 35.118 alloc_region = NULL; 35.119 } 35.120 @@ -3977,7 +3990,7 @@ 35.121 OopsInHeapRegionClosure *scan_perm_cl; 35.122 OopsInHeapRegionClosure *scan_so_cl; 35.123 35.124 - if (_g1h->g1_policy()->should_initiate_conc_mark()) { 35.125 + if (_g1h->g1_policy()->during_initial_mark_pause()) { 35.126 scan_root_cl = &scan_mark_root_cl; 35.127 scan_perm_cl = &scan_mark_perm_cl; 35.128 scan_so_cl = &scan_mark_heap_rs_cl; 35.129 @@ -4140,7 +4153,7 @@ 35.130 FilterAndMarkInHeapRegionAndIntoCSClosure scan_and_mark(this, &boc, concurrent_mark()); 35.131 35.132 OopsInHeapRegionClosure *foc; 35.133 - if (g1_policy()->should_initiate_conc_mark()) 35.134 + if (g1_policy()->during_initial_mark_pause()) 35.135 foc = &scan_and_mark; 35.136 else 35.137 foc = &scan_only;
36.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Apr 15 19:08:18 2010 -0700 36.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp Thu Apr 15 19:08:48 2010 -0700 36.3 @@ -178,8 +178,8 @@ 36.4 // so the hack is to do the cast QQQ FIXME 36.5 _pauses_btwn_concurrent_mark((size_t)G1PausesBtwnConcMark), 36.6 _n_marks_since_last_pause(0), 36.7 - _conc_mark_initiated(false), 36.8 - _should_initiate_conc_mark(false), 36.9 + _initiate_conc_mark_if_possible(false), 36.10 + _during_initial_mark_pause(false), 36.11 _should_revert_to_full_young_gcs(false), 36.12 _last_full_young_gc(false), 36.13 36.14 @@ -198,7 +198,9 @@ 36.15 _recorded_survivor_regions(0), 36.16 _recorded_survivor_head(NULL), 36.17 _recorded_survivor_tail(NULL), 36.18 - _survivors_age_table(true) 36.19 + _survivors_age_table(true), 36.20 + 36.21 + _gc_overhead_perc(0.0) 36.22 36.23 { 36.24 // Set up the region size and associated fields. Given that the 36.25 @@ -275,6 +277,11 @@ 36.26 // calculate_young_list_target_config during initialization 36.27 _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes; 36.28 36.29 + assert(GCTimeRatio > 0, 36.30 + "we should have set it to a default value set_g1_gc_flags() " 36.31 + "if a user set it to 0"); 36.32 + _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio)); 36.33 + 36.34 initialize_all(); 36.35 } 36.36 36.37 @@ -786,7 +793,7 @@ 36.38 elapsed_time_ms, 36.39 calculations, 36.40 full_young_gcs() ? "full" : "partial", 36.41 - should_initiate_conc_mark() ? " i-m" : "", 36.42 + during_initial_mark_pause() ? " i-m" : "", 36.43 _in_marking_window, 36.44 _in_marking_window_im); 36.45 #endif // TRACE_CALC_YOUNG_CONFIG 36.46 @@ -1033,7 +1040,8 @@ 36.47 set_full_young_gcs(true); 36.48 _last_full_young_gc = false; 36.49 _should_revert_to_full_young_gcs = false; 36.50 - _should_initiate_conc_mark = false; 36.51 + clear_initiate_conc_mark_if_possible(); 36.52 + clear_during_initial_mark_pause(); 36.53 _known_garbage_bytes = 0; 36.54 _known_garbage_ratio = 0.0; 36.55 _in_marking_window = false; 36.56 @@ -1179,7 +1187,8 @@ 36.57 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double 36.58 mark_init_elapsed_time_ms) { 36.59 _during_marking = true; 36.60 - _should_initiate_conc_mark = false; 36.61 + assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now"); 36.62 + clear_during_initial_mark_pause(); 36.63 _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms; 36.64 } 36.65 36.66 @@ -1250,7 +1259,6 @@ 36.67 } 36.68 _n_pauses_at_mark_end = _n_pauses; 36.69 _n_marks_since_last_pause++; 36.70 - _conc_mark_initiated = false; 36.71 } 36.72 36.73 void 36.74 @@ -1446,17 +1454,24 @@ 36.75 #endif // PRODUCT 36.76 36.77 if (in_young_gc_mode()) { 36.78 - last_pause_included_initial_mark = _should_initiate_conc_mark; 36.79 + last_pause_included_initial_mark = during_initial_mark_pause(); 36.80 if (last_pause_included_initial_mark) 36.81 record_concurrent_mark_init_end_pre(0.0); 36.82 36.83 size_t min_used_targ = 36.84 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent; 36.85 36.86 - if (cur_used_bytes > min_used_targ) { 36.87 - if (cur_used_bytes <= _prev_collection_pause_used_at_end_bytes) { 36.88 - } else if (!_g1->mark_in_progress() && !_last_full_young_gc) { 36.89 - _should_initiate_conc_mark = true; 36.90 + 36.91 + if (!_g1->mark_in_progress() && !_last_full_young_gc) { 36.92 + assert(!last_pause_included_initial_mark, "invariant"); 36.93 + if (cur_used_bytes > min_used_targ && 36.94 + cur_used_bytes > _prev_collection_pause_used_at_end_bytes) { 36.95 + assert(!during_initial_mark_pause(), "we should not see this here"); 36.96 + 36.97 + // Note: this might have already been set, if during the last 36.98 + // pause we decided to start a cycle but at the beginning of 36.99 + // this pause we decided to postpone it. That's OK. 36.100 + set_initiate_conc_mark_if_possible(); 36.101 } 36.102 } 36.103 36.104 @@ -1747,7 +1762,7 @@ 36.105 36.106 bool new_in_marking_window = _in_marking_window; 36.107 bool new_in_marking_window_im = false; 36.108 - if (_should_initiate_conc_mark) { 36.109 + if (during_initial_mark_pause()) { 36.110 new_in_marking_window = true; 36.111 new_in_marking_window_im = true; 36.112 } 36.113 @@ -2166,7 +2181,13 @@ 36.114 if (predicted_time_ms > _expensive_region_limit_ms) { 36.115 if (!in_young_gc_mode()) { 36.116 set_full_young_gcs(true); 36.117 - _should_initiate_conc_mark = true; 36.118 + // We might want to do something different here. However, 36.119 + // right now we don't support the non-generational G1 mode 36.120 + // (and in fact we are planning to remove the associated code, 36.121 + // see CR 6814390). So, let's leave it as is and this will be 36.122 + // removed some time in the future 36.123 + ShouldNotReachHere(); 36.124 + set_during_initial_mark_pause(); 36.125 } else 36.126 // no point in doing another partial one 36.127 _should_revert_to_full_young_gcs = true; 36.128 @@ -2288,7 +2309,7 @@ 36.129 } 36.130 36.131 size_t G1CollectorPolicy::expansion_amount() { 36.132 - if ((int)(recent_avg_pause_time_ratio() * 100.0) > G1GCPercent) { 36.133 + if ((recent_avg_pause_time_ratio() * 100.0) > _gc_overhead_perc) { 36.134 // We will double the existing space, or take 36.135 // G1ExpandByPercentOfAvailable % of the available expansion 36.136 // space, whichever is smaller, bounded below by a minimum 36.137 @@ -2690,6 +2711,50 @@ 36.138 #endif 36.139 36.140 void 36.141 +G1CollectorPolicy::decide_on_conc_mark_initiation() { 36.142 + // We are about to decide on whether this pause will be an 36.143 + // initial-mark pause. 36.144 + 36.145 + // First, during_initial_mark_pause() should not be already set. We 36.146 + // will set it here if we have to. However, it should be cleared by 36.147 + // the end of the pause (it's only set for the duration of an 36.148 + // initial-mark pause). 36.149 + assert(!during_initial_mark_pause(), "pre-condition"); 36.150 + 36.151 + if (initiate_conc_mark_if_possible()) { 36.152 + // We had noticed on a previous pause that the heap occupancy has 36.153 + // gone over the initiating threshold and we should start a 36.154 + // concurrent marking cycle. So we might initiate one. 36.155 + 36.156 + bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle(); 36.157 + if (!during_cycle) { 36.158 + // The concurrent marking thread is not "during a cycle", i.e., 36.159 + // it has completed the last one. So we can go ahead and 36.160 + // initiate a new cycle. 36.161 + 36.162 + set_during_initial_mark_pause(); 36.163 + 36.164 + // And we can now clear initiate_conc_mark_if_possible() as 36.165 + // we've already acted on it. 36.166 + clear_initiate_conc_mark_if_possible(); 36.167 + } else { 36.168 + // The concurrent marking thread is still finishing up the 36.169 + // previous cycle. If we start one right now the two cycles 36.170 + // overlap. In particular, the concurrent marking thread might 36.171 + // be in the process of clearing the next marking bitmap (which 36.172 + // we will use for the next cycle if we start one). Starting a 36.173 + // cycle now will be bad given that parts of the marking 36.174 + // information might get cleared by the marking thread. And we 36.175 + // cannot wait for the marking thread to finish the cycle as it 36.176 + // periodically yields while clearing the next marking bitmap 36.177 + // and, if it's in a yield point, it's waiting for us to 36.178 + // finish. So, at this point we will not start a cycle and we'll 36.179 + // let the concurrent marking thread complete the last one. 36.180 + } 36.181 + } 36.182 +} 36.183 + 36.184 +void 36.185 G1CollectorPolicy_BestRegionsFirst:: 36.186 record_collection_pause_start(double start_time_sec, size_t start_used) { 36.187 G1CollectorPolicy::record_collection_pause_start(start_time_sec, start_used);
37.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Apr 15 19:08:18 2010 -0700 37.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp Thu Apr 15 19:08:48 2010 -0700 37.3 @@ -215,6 +215,8 @@ 37.4 SurvRateGroup* _survivor_surv_rate_group; 37.5 // add here any more surv rate groups 37.6 37.7 + double _gc_overhead_perc; 37.8 + 37.9 bool during_marking() { 37.10 return _during_marking; 37.11 } 37.12 @@ -722,11 +724,31 @@ 37.13 37.14 size_t _n_marks_since_last_pause; 37.15 37.16 - // True iff CM has been initiated. 37.17 - bool _conc_mark_initiated; 37.18 + // At the end of a pause we check the heap occupancy and we decide 37.19 + // whether we will start a marking cycle during the next pause. If 37.20 + // we decide that we want to do that, we will set this parameter to 37.21 + // true. So, this parameter will stay true between the end of a 37.22 + // pause and the beginning of a subsequent pause (not necessarily 37.23 + // the next one, see the comments on the next field) when we decide 37.24 + // that we will indeed start a marking cycle and do the initial-mark 37.25 + // work. 37.26 + volatile bool _initiate_conc_mark_if_possible; 37.27 37.28 - // True iff CM should be initiated 37.29 - bool _should_initiate_conc_mark; 37.30 + // If initiate_conc_mark_if_possible() is set at the beginning of a 37.31 + // pause, it is a suggestion that the pause should start a marking 37.32 + // cycle by doing the initial-mark work. However, it is possible 37.33 + // that the concurrent marking thread is still finishing up the 37.34 + // previous marking cycle (e.g., clearing the next marking 37.35 + // bitmap). If that is the case we cannot start a new cycle and 37.36 + // we'll have to wait for the concurrent marking thread to finish 37.37 + // what it is doing. In this case we will postpone the marking cycle 37.38 + // initiation decision for the next pause. When we eventually decide 37.39 + // to start a cycle, we will set _during_initial_mark_pause which 37.40 + // will stay true until the end of the initial-mark pause and it's 37.41 + // the condition that indicates that a pause is doing the 37.42 + // initial-mark work. 37.43 + volatile bool _during_initial_mark_pause; 37.44 + 37.45 bool _should_revert_to_full_young_gcs; 37.46 bool _last_full_young_gc; 37.47 37.48 @@ -979,9 +1001,21 @@ 37.49 // Add "hr" to the CS. 37.50 void add_to_collection_set(HeapRegion* hr); 37.51 37.52 - bool should_initiate_conc_mark() { return _should_initiate_conc_mark; } 37.53 - void set_should_initiate_conc_mark() { _should_initiate_conc_mark = true; } 37.54 - void unset_should_initiate_conc_mark(){ _should_initiate_conc_mark = false; } 37.55 + bool initiate_conc_mark_if_possible() { return _initiate_conc_mark_if_possible; } 37.56 + void set_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = true; } 37.57 + void clear_initiate_conc_mark_if_possible() { _initiate_conc_mark_if_possible = false; } 37.58 + 37.59 + bool during_initial_mark_pause() { return _during_initial_mark_pause; } 37.60 + void set_during_initial_mark_pause() { _during_initial_mark_pause = true; } 37.61 + void clear_during_initial_mark_pause(){ _during_initial_mark_pause = false; } 37.62 + 37.63 + // This is called at the very beginning of an evacuation pause (it 37.64 + // has to be the first thing that the pause does). If 37.65 + // initiate_conc_mark_if_possible() is true, and the concurrent 37.66 + // marking thread has completed its work during the previous cycle, 37.67 + // it will set during_initial_mark_pause() to so that the pause does 37.68 + // the initial-mark work and start a marking cycle. 37.69 + void decide_on_conc_mark_initiation(); 37.70 37.71 // If an expansion would be appropriate, because recent GC overhead had 37.72 // exceeded the desired limit, return an amount to expand by.
38.1 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Apr 15 19:08:18 2010 -0700 38.2 +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp Thu Apr 15 19:08:48 2010 -0700 38.3 @@ -40,9 +40,6 @@ 38.4 develop(bool, G1Gen, true, \ 38.5 "If true, it will enable the generational G1") \ 38.6 \ 38.7 - develop(intx, G1GCPercent, 10, \ 38.8 - "The desired percent time spent on GC") \ 38.9 - \ 38.10 develop(intx, G1PolicyVerbose, 0, \ 38.11 "The verbosity level on G1 policy decisions") \ 38.12 \ 38.13 @@ -270,11 +267,11 @@ 38.14 product(uintx, G1HeapRegionSize, 0, \ 38.15 "Size of the G1 regions.") \ 38.16 \ 38.17 - experimental(bool, G1UseParallelRSetUpdating, false, \ 38.18 + experimental(bool, G1UseParallelRSetUpdating, true, \ 38.19 "Enables the parallelization of remembered set updating " \ 38.20 "during evacuation pauses") \ 38.21 \ 38.22 - experimental(bool, G1UseParallelRSetScanning, false, \ 38.23 + experimental(bool, G1UseParallelRSetScanning, true, \ 38.24 "Enables the parallelization of remembered set scanning " \ 38.25 "during evacuation pauses") \ 38.26 \
39.1 --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Apr 15 19:08:18 2010 -0700 39.2 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Apr 15 19:08:48 2010 -0700 39.3 @@ -2328,6 +2328,17 @@ 39.4 } 39.5 39.6 DEFAULT: 39.7 +#ifdef ZERO 39.8 + // Some zero configurations use the C++ interpreter as a 39.9 + // fallback interpreter and have support for platform 39.10 + // specific fast bytecodes which aren't supported here, so 39.11 + // redispatch to the equivalent non-fast bytecode when they 39.12 + // are encountered. 39.13 + if (Bytecodes::is_defined((Bytecodes::Code)opcode)) { 39.14 + opcode = (jubyte)Bytecodes::java_code((Bytecodes::Code)opcode); 39.15 + goto opcode_switch; 39.16 + } 39.17 +#endif 39.18 fatal2("\t*** Unimplemented opcode: %d = %s\n", 39.19 opcode, Bytecodes::name((Bytecodes::Code)opcode)); 39.20 goto finish;
40.1 --- a/src/share/vm/memory/threadLocalAllocBuffer.hpp Thu Apr 15 19:08:18 2010 -0700 40.2 +++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp Thu Apr 15 19:08:48 2010 -0700 40.3 @@ -111,7 +111,22 @@ 40.4 40.5 // Allocate size HeapWords. The memory is NOT initialized to zero. 40.6 inline HeapWord* allocate(size_t size); 40.7 - static size_t alignment_reserve() { return align_object_size(typeArrayOopDesc::header_size(T_INT)); } 40.8 + 40.9 + // Reserve space at the end of TLAB 40.10 + static size_t end_reserve() { 40.11 + int reserve_size = typeArrayOopDesc::header_size(T_INT); 40.12 + if (AllocatePrefetchStyle == 3) { 40.13 + // BIS is used to prefetch - we need a space for it. 40.14 + // +1 for rounding up to next cache line +1 to be safe 40.15 + int lines = AllocatePrefetchLines + 2; 40.16 + int step_size = AllocatePrefetchStepSize; 40.17 + int distance = AllocatePrefetchDistance; 40.18 + int prefetch_end = (distance + step_size*lines)/(int)HeapWordSize; 40.19 + reserve_size = MAX2(reserve_size, prefetch_end); 40.20 + } 40.21 + return reserve_size; 40.22 + } 40.23 + static size_t alignment_reserve() { return align_object_size(end_reserve()); } 40.24 static size_t alignment_reserve_in_bytes() { return alignment_reserve() * HeapWordSize; } 40.25 40.26 // Return tlab size or remaining space in eden such that the
41.1 --- a/src/share/vm/opto/c2_globals.hpp Thu Apr 15 19:08:18 2010 -0700 41.2 +++ b/src/share/vm/opto/c2_globals.hpp Thu Apr 15 19:08:48 2010 -0700 41.3 @@ -52,9 +52,6 @@ 41.4 "Code alignment for interior entry points " \ 41.5 "in generated code (in bytes)") \ 41.6 \ 41.7 - product_pd(intx, OptoLoopAlignment, \ 41.8 - "Align inner loops to zero relative to this modulus") \ 41.9 - \ 41.10 product(intx, MaxLoopPad, (OptoLoopAlignment-1), \ 41.11 "Align a loop if padding size in bytes is less or equal to this value") \ 41.12 \
42.1 --- a/src/share/vm/opto/doCall.cpp Thu Apr 15 19:08:18 2010 -0700 42.2 +++ b/src/share/vm/opto/doCall.cpp Thu Apr 15 19:08:48 2010 -0700 42.3 @@ -714,8 +714,6 @@ 42.4 42.5 // iterate through all entries sequentially 42.6 for (;!handlers.is_done(); handlers.next()) { 42.7 - // Do nothing if turned off 42.8 - if( !DeutschShiffmanExceptions ) break; 42.9 ciExceptionHandler* handler = handlers.handler(); 42.10 42.11 if (handler->is_rethrow()) { 42.12 @@ -741,46 +739,26 @@ 42.13 return; // No more handling to be done here! 42.14 } 42.15 42.16 - // %%% The following logic replicates make_from_klass_unique. 42.17 - // TO DO: Replace by a subroutine call. Then generalize 42.18 - // the type check, as noted in the next "%%%" comment. 42.19 + // Get the handler's klass 42.20 + ciInstanceKlass* klass = handler->catch_klass(); 42.21 42.22 - ciInstanceKlass* klass = handler->catch_klass(); 42.23 - if (UseUniqueSubclasses) { 42.24 - // (We use make_from_klass because it respects UseUniqueSubclasses.) 42.25 - const TypeOopPtr* tp = TypeOopPtr::make_from_klass(klass); 42.26 - klass = tp->klass()->as_instance_klass(); 42.27 + if (!klass->is_loaded()) { // klass is not loaded? 42.28 + // fall through into catch_call_exceptions which will emit a 42.29 + // handler with an uncommon trap. 42.30 + break; 42.31 } 42.32 42.33 - // Get the handler's klass 42.34 - if (!klass->is_loaded()) // klass is not loaded? 42.35 - break; // Must call Rethrow! 42.36 if (klass->is_interface()) // should not happen, but... 42.37 break; // bail out 42.38 - // See if the loaded exception klass has no subtypes 42.39 - if (klass->has_subklass()) 42.40 - break; // Cannot easily do precise test ==> Rethrow 42.41 42.42 - // %%% Now that subclass checking is very fast, we need to rewrite 42.43 - // this section and remove the option "DeutschShiffmanExceptions". 42.44 - // The exception processing chain should be a normal typecase pattern, 42.45 - // with a bailout to the interpreter only in the case of unloaded 42.46 - // classes. (The bailout should mark the method non-entrant.) 42.47 - // This rewrite should be placed in GraphKit::, not Parse::. 42.48 - 42.49 - // Add a dependence; if any subclass added we need to recompile 42.50 - // %%% should use stronger assert_unique_concrete_subtype instead 42.51 - if (!klass->is_final()) { 42.52 - C->dependencies()->assert_leaf_type(klass); 42.53 - } 42.54 - 42.55 - // Implement precise test 42.56 + // Check the type of the exception against the catch type 42.57 const TypeKlassPtr *tk = TypeKlassPtr::make(klass); 42.58 Node* con = _gvn.makecon(tk); 42.59 - Node* cmp = _gvn.transform( new (C, 3) CmpPNode(ex_klass_node, con) ); 42.60 - Node* bol = _gvn.transform( new (C, 2) BoolNode(cmp, BoolTest::ne) ); 42.61 - { BuildCutout unless(this, bol, PROB_LIKELY(0.7f)); 42.62 - const TypeInstPtr* tinst = TypeInstPtr::make_exact(TypePtr::NotNull, klass); 42.63 + Node* not_subtype_ctrl = gen_subtype_check(ex_klass_node, con); 42.64 + if (!stopped()) { 42.65 + PreserveJVMState pjvms(this); 42.66 + const TypeInstPtr* tinst = TypeOopPtr::make_from_klass_unique(klass)->cast_to_ptr_type(TypePtr::NotNull)->is_instptr(); 42.67 + assert(klass->has_subklass() || tinst->klass_is_exact(), "lost exactness"); 42.68 Node* ex_oop = _gvn.transform(new (C, 2) CheckCastPPNode(control(), ex_node, tinst)); 42.69 push_ex_oop(ex_oop); // Push exception oop for handler 42.70 #ifndef PRODUCT 42.71 @@ -792,6 +770,7 @@ 42.72 #endif 42.73 merge_exception(handler_bci); 42.74 } 42.75 + set_control(not_subtype_ctrl); 42.76 42.77 // Come here if exception does not match handler. 42.78 // Carry on with more handler checks. 42.79 @@ -800,21 +779,6 @@ 42.80 42.81 assert(!stopped(), "you should return if you finish the chain"); 42.82 42.83 - if (remaining == 1) { 42.84 - // Further checks do not matter. 42.85 - } 42.86 - 42.87 - if (can_rerun_bytecode()) { 42.88 - // Do not push_ex_oop here! 42.89 - // Re-executing the bytecode will reproduce the throwing condition. 42.90 - bool must_throw = true; 42.91 - uncommon_trap(Deoptimization::Reason_unhandled, 42.92 - Deoptimization::Action_none, 42.93 - (ciKlass*)NULL, (const char*)NULL, // default args 42.94 - must_throw); 42.95 - return; 42.96 - } 42.97 - 42.98 // Oops, need to call into the VM to resolve the klasses at runtime. 42.99 // Note: This call must not deoptimize, since it is not a real at this bci! 42.100 kill_dead_locals();
43.1 --- a/src/share/vm/opto/macro.cpp Thu Apr 15 19:08:18 2010 -0700 43.2 +++ b/src/share/vm/opto/macro.cpp Thu Apr 15 19:08:48 2010 -0700 43.3 @@ -1487,11 +1487,11 @@ 43.4 Node*& contended_phi_rawmem, 43.5 Node* old_eden_top, Node* new_eden_top, 43.6 Node* length) { 43.7 + enum { fall_in_path = 1, pf_path = 2 }; 43.8 if( UseTLAB && AllocatePrefetchStyle == 2 ) { 43.9 // Generate prefetch allocation with watermark check. 43.10 // As an allocation hits the watermark, we will prefetch starting 43.11 // at a "distance" away from watermark. 43.12 - enum { fall_in_path = 1, pf_path = 2 }; 43.13 43.14 Node *pf_region = new (C, 3) RegionNode(3); 43.15 Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, 43.16 @@ -1570,6 +1570,45 @@ 43.17 needgc_false = pf_region; 43.18 contended_phi_rawmem = pf_phi_rawmem; 43.19 i_o = pf_phi_abio; 43.20 + } else if( UseTLAB && AllocatePrefetchStyle == 3 ) { 43.21 + // Insert a prefetch for each allocation only on the fast-path 43.22 + Node *pf_region = new (C, 3) RegionNode(3); 43.23 + Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, 43.24 + TypeRawPtr::BOTTOM ); 43.25 + 43.26 + // Generate several prefetch instructions only for arrays. 43.27 + uint lines = (length != NULL) ? AllocatePrefetchLines : 1; 43.28 + uint step_size = AllocatePrefetchStepSize; 43.29 + uint distance = AllocatePrefetchDistance; 43.30 + 43.31 + // Next cache address. 43.32 + Node *cache_adr = new (C, 4) AddPNode(old_eden_top, old_eden_top, 43.33 + _igvn.MakeConX(distance)); 43.34 + transform_later(cache_adr); 43.35 + cache_adr = new (C, 2) CastP2XNode(needgc_false, cache_adr); 43.36 + transform_later(cache_adr); 43.37 + Node* mask = _igvn.MakeConX(~(intptr_t)(step_size-1)); 43.38 + cache_adr = new (C, 3) AndXNode(cache_adr, mask); 43.39 + transform_later(cache_adr); 43.40 + cache_adr = new (C, 2) CastX2PNode(cache_adr); 43.41 + transform_later(cache_adr); 43.42 + 43.43 + // Prefetch 43.44 + Node *prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, cache_adr ); 43.45 + prefetch->set_req(0, needgc_false); 43.46 + transform_later(prefetch); 43.47 + contended_phi_rawmem = prefetch; 43.48 + Node *prefetch_adr; 43.49 + distance = step_size; 43.50 + for ( uint i = 1; i < lines; i++ ) { 43.51 + prefetch_adr = new (C, 4) AddPNode( cache_adr, cache_adr, 43.52 + _igvn.MakeConX(distance) ); 43.53 + transform_later(prefetch_adr); 43.54 + prefetch = new (C, 3) PrefetchWriteNode( contended_phi_rawmem, prefetch_adr ); 43.55 + transform_later(prefetch); 43.56 + distance += step_size; 43.57 + contended_phi_rawmem = prefetch; 43.58 + } 43.59 } else if( AllocatePrefetchStyle > 0 ) { 43.60 // Insert a prefetch for each allocation only on the fast-path 43.61 Node *prefetch_adr;
44.1 --- a/src/share/vm/opto/memnode.hpp Thu Apr 15 19:08:18 2010 -0700 44.2 +++ b/src/share/vm/opto/memnode.hpp Thu Apr 15 19:08:48 2010 -0700 44.3 @@ -1244,5 +1244,5 @@ 44.4 virtual int Opcode() const; 44.5 virtual uint ideal_reg() const { return NotAMachineReg; } 44.6 virtual uint match_edge(uint idx) const { return idx==2; } 44.7 - virtual const Type *bottom_type() const { return Type::ABIO; } 44.8 + virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } 44.9 };
45.1 --- a/src/share/vm/opto/parse.hpp Thu Apr 15 19:08:18 2010 -0700 45.2 +++ b/src/share/vm/opto/parse.hpp Thu Apr 15 19:08:48 2010 -0700 45.3 @@ -551,9 +551,6 @@ 45.4 // Also handles exceptions for individual bytecodes. 45.5 void catch_inline_exceptions(SafePointNode* ex_map); 45.6 45.7 - // Bytecode classifier, helps decide to use uncommon_trap vs. rethrow_C. 45.8 - bool can_rerun_bytecode(); 45.9 - 45.10 // Merge the given map into correct exceptional exit state. 45.11 // Assumes that there is no applicable local handler. 45.12 void throw_to_exit(SafePointNode* ex_map);
46.1 --- a/src/share/vm/opto/parse1.cpp Thu Apr 15 19:08:18 2010 -0700 46.2 +++ b/src/share/vm/opto/parse1.cpp Thu Apr 15 19:08:48 2010 -0700 46.3 @@ -798,67 +798,6 @@ 46.4 initial_gvn()->transform_no_reclaim(exit); 46.5 } 46.6 46.7 -bool Parse::can_rerun_bytecode() { 46.8 - switch (bc()) { 46.9 - case Bytecodes::_ldc: 46.10 - case Bytecodes::_ldc_w: 46.11 - case Bytecodes::_ldc2_w: 46.12 - case Bytecodes::_getfield: 46.13 - case Bytecodes::_putfield: 46.14 - case Bytecodes::_getstatic: 46.15 - case Bytecodes::_putstatic: 46.16 - case Bytecodes::_arraylength: 46.17 - case Bytecodes::_baload: 46.18 - case Bytecodes::_caload: 46.19 - case Bytecodes::_iaload: 46.20 - case Bytecodes::_saload: 46.21 - case Bytecodes::_faload: 46.22 - case Bytecodes::_aaload: 46.23 - case Bytecodes::_laload: 46.24 - case Bytecodes::_daload: 46.25 - case Bytecodes::_bastore: 46.26 - case Bytecodes::_castore: 46.27 - case Bytecodes::_iastore: 46.28 - case Bytecodes::_sastore: 46.29 - case Bytecodes::_fastore: 46.30 - case Bytecodes::_aastore: 46.31 - case Bytecodes::_lastore: 46.32 - case Bytecodes::_dastore: 46.33 - case Bytecodes::_irem: 46.34 - case Bytecodes::_idiv: 46.35 - case Bytecodes::_lrem: 46.36 - case Bytecodes::_ldiv: 46.37 - case Bytecodes::_frem: 46.38 - case Bytecodes::_fdiv: 46.39 - case Bytecodes::_drem: 46.40 - case Bytecodes::_ddiv: 46.41 - case Bytecodes::_checkcast: 46.42 - case Bytecodes::_instanceof: 46.43 - case Bytecodes::_anewarray: 46.44 - case Bytecodes::_newarray: 46.45 - case Bytecodes::_multianewarray: 46.46 - case Bytecodes::_new: 46.47 - case Bytecodes::_monitorenter: // can re-run initial null check, only 46.48 - case Bytecodes::_return: 46.49 - return true; 46.50 - break; 46.51 - 46.52 - // Don't rerun athrow since it's part of the exception path. 46.53 - case Bytecodes::_athrow: 46.54 - case Bytecodes::_invokestatic: 46.55 - case Bytecodes::_invokedynamic: 46.56 - case Bytecodes::_invokespecial: 46.57 - case Bytecodes::_invokevirtual: 46.58 - case Bytecodes::_invokeinterface: 46.59 - return false; 46.60 - break; 46.61 - 46.62 - default: 46.63 - assert(false, "unexpected bytecode produced an exception"); 46.64 - return true; 46.65 - } 46.66 -} 46.67 - 46.68 //---------------------------do_exceptions------------------------------------- 46.69 // Process exceptions arising from the current bytecode. 46.70 // Send caught exceptions to the proper handler within this method. 46.71 @@ -872,9 +811,6 @@ 46.72 return; 46.73 } 46.74 46.75 - // Make sure we can classify this bytecode if we need to. 46.76 - debug_only(can_rerun_bytecode()); 46.77 - 46.78 PreserveJVMState pjvms(this, false); 46.79 46.80 SafePointNode* ex_map;
47.1 --- a/src/share/vm/opto/runtime.cpp Thu Apr 15 19:08:18 2010 -0700 47.2 +++ b/src/share/vm/opto/runtime.cpp Thu Apr 15 19:08:48 2010 -0700 47.3 @@ -865,7 +865,7 @@ 47.4 thread->set_exception_stack_size(0); 47.5 47.6 // Check if the exception PC is a MethodHandle call site. 47.7 - thread->set_is_method_handle_exception(nm->is_method_handle_return(pc)); 47.8 + thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); 47.9 } 47.10 47.11 // Restore correct return pc. Was saved above.
48.1 --- a/src/share/vm/prims/forte.cpp Thu Apr 15 19:08:18 2010 -0700 48.2 +++ b/src/share/vm/prims/forte.cpp Thu Apr 15 19:08:48 2010 -0700 48.3 @@ -55,12 +55,11 @@ 48.4 }; 48.5 48.6 48.7 -static void is_decipherable_compiled_frame(frame* fr, RegisterMap* map, 48.8 - bool* is_compiled_p, bool* is_walkable_p); 48.9 +static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, nmethod* nm); 48.10 static bool is_decipherable_interpreted_frame(JavaThread* thread, 48.11 - frame* fr, 48.12 - methodOop* method_p, 48.13 - int* bci_p); 48.14 + frame* fr, 48.15 + methodOop* method_p, 48.16 + int* bci_p); 48.17 48.18 48.19 48.20 @@ -122,41 +121,43 @@ 48.21 // Determine if 'fr' is a decipherable compiled frame. We are already 48.22 // assured that fr is for a java nmethod. 48.23 48.24 -static bool is_decipherable_compiled_frame(frame* fr) { 48.25 - 48.26 - assert(fr->cb() != NULL && fr->cb()->is_nmethod(), "invariant"); 48.27 - nmethod* nm = (nmethod*) fr->cb(); 48.28 +static bool is_decipherable_compiled_frame(JavaThread* thread, frame* fr, nmethod* nm) { 48.29 assert(nm->is_java_method(), "invariant"); 48.30 48.31 - // First try and find an exact PcDesc 48.32 + if (thread->has_last_Java_frame() && thread->last_Java_pc() == fr->pc()) { 48.33 + // We're stopped at a call into the JVM so look for a PcDesc with 48.34 + // the actual pc reported by the frame. 48.35 + PcDesc* pc_desc = nm->pc_desc_at(fr->pc()); 48.36 48.37 - PcDesc* pc_desc = nm->pc_desc_at(fr->pc()); 48.38 - 48.39 - // Did we find a useful PcDesc? 48.40 - if (pc_desc != NULL && 48.41 - pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) { 48.42 - 48.43 - address probe_pc = fr->pc() + 1; 48.44 - pc_desc = nm->pc_desc_near(probe_pc); 48.45 - 48.46 - // Now do we have a useful PcDesc? 48.47 - 48.48 + // Did we find a useful PcDesc? 48.49 if (pc_desc != NULL && 48.50 - pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) { 48.51 - // No debug information available for this pc 48.52 - // vframeStream would explode if we try and walk the frames. 48.53 - return false; 48.54 + pc_desc->scope_decode_offset() != DebugInformationRecorder::serialized_null) { 48.55 + return true; 48.56 } 48.57 - 48.58 - // This PcDesc is useful however we must adjust the frame's pc 48.59 - // so that the vframeStream lookups will use this same pc 48.60 - 48.61 - fr->set_pc(pc_desc->real_pc(nm)); 48.62 } 48.63 48.64 + // We're at some random pc in the nmethod so search for the PcDesc 48.65 + // whose pc is greater than the current PC. It's done this way 48.66 + // because the extra PcDescs that are recorded for improved debug 48.67 + // info record the end of the region covered by the ScopeDesc 48.68 + // instead of the beginning. 48.69 + PcDesc* pc_desc = nm->pc_desc_near(fr->pc() + 1); 48.70 + 48.71 + // Now do we have a useful PcDesc? 48.72 + if (pc_desc == NULL || 48.73 + pc_desc->scope_decode_offset() == DebugInformationRecorder::serialized_null) { 48.74 + // No debug information available for this pc 48.75 + // vframeStream would explode if we try and walk the frames. 48.76 + return false; 48.77 + } 48.78 + 48.79 + // This PcDesc is useful however we must adjust the frame's pc 48.80 + // so that the vframeStream lookups will use this same pc 48.81 + fr->set_pc(pc_desc->real_pc(nm)); 48.82 return true; 48.83 } 48.84 48.85 + 48.86 // Determine if 'fr' is a walkable interpreted frame. Returns false 48.87 // if it is not. *method_p, and *bci_p are not set when false is 48.88 // returned. *method_p is non-NULL if frame was executing a Java 48.89 @@ -166,9 +167,9 @@ 48.90 // even if a valid BCI cannot be found. 48.91 48.92 static bool is_decipherable_interpreted_frame(JavaThread* thread, 48.93 - frame* fr, 48.94 - methodOop* method_p, 48.95 - int* bci_p) { 48.96 + frame* fr, 48.97 + methodOop* method_p, 48.98 + int* bci_p) { 48.99 assert(fr->is_interpreted_frame(), "just checking"); 48.100 48.101 // top frame is an interpreted frame 48.102 @@ -323,13 +324,15 @@ 48.103 // have a PCDesc that can get us a bci however we did find 48.104 // a method 48.105 48.106 - if (!is_decipherable_compiled_frame(&candidate)) { 48.107 + if (!is_decipherable_compiled_frame(thread, &candidate, nm)) { 48.108 return false; 48.109 } 48.110 48.111 // is_decipherable_compiled_frame may modify candidate's pc 48.112 *initial_frame_p = candidate; 48.113 48.114 + assert(nm->pc_desc_at(candidate.pc()) != NULL, "if it's decipherable then pc must be valid"); 48.115 + 48.116 return true; 48.117 } 48.118
49.1 --- a/src/share/vm/runtime/arguments.cpp Thu Apr 15 19:08:18 2010 -0700 49.2 +++ b/src/share/vm/runtime/arguments.cpp Thu Apr 15 19:08:48 2010 -0700 49.3 @@ -1353,6 +1353,16 @@ 49.4 MarkStackSize / K, MarkStackSizeMax / K); 49.5 tty->print_cr("ConcGCThreads: %u", ConcGCThreads); 49.6 } 49.7 + 49.8 + if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) { 49.9 + // In G1, we want the default GC overhead goal to be higher than 49.10 + // say in PS. So we set it here to 10%. Otherwise the heap might 49.11 + // be expanded more aggressively than we would like it to. In 49.12 + // fact, even 10% seems to not be high enough in some cases 49.13 + // (especially small GC stress tests that the main thing they do 49.14 + // is allocation). We might consider increase it further. 49.15 + FLAG_SET_DEFAULT(GCTimeRatio, 9); 49.16 + } 49.17 } 49.18 49.19 void Arguments::set_heap_size() {
50.1 --- a/src/share/vm/runtime/globals.hpp Thu Apr 15 19:08:18 2010 -0700 50.2 +++ b/src/share/vm/runtime/globals.hpp Thu Apr 15 19:08:48 2010 -0700 50.3 @@ -1052,7 +1052,8 @@ 50.4 "Use SSE2 MOVDQU instruction for Arraycopy") \ 50.5 \ 50.6 product(intx, FieldsAllocationStyle, 1, \ 50.7 - "0 - type based with oops first, 1 - with oops last") \ 50.8 + "0 - type based with oops first, 1 - with oops last, " \ 50.9 + "2 - oops in super and sub classes are together") \ 50.10 \ 50.11 product(bool, CompactFields, true, \ 50.12 "Allocate nonstatic fields in gaps between previous fields") \ 50.13 @@ -2502,10 +2503,6 @@ 50.14 notproduct(bool, TraceSpilling, false, \ 50.15 "Trace spilling") \ 50.16 \ 50.17 - develop(bool, DeutschShiffmanExceptions, true, \ 50.18 - "Fast check to find exception handler for precisely typed " \ 50.19 - "exceptions") \ 50.20 - \ 50.21 product(bool, SplitIfBlocks, true, \ 50.22 "Clone compares and control flow through merge points to fold " \ 50.23 "some branches") \ 50.24 @@ -2711,7 +2708,8 @@ 50.25 product(intx, AllocatePrefetchStyle, 1, \ 50.26 "0 = no prefetch, " \ 50.27 "1 = prefetch instructions for each allocation, " \ 50.28 - "2 = use TLAB watermark to gate allocation prefetch") \ 50.29 + "2 = use TLAB watermark to gate allocation prefetch, " \ 50.30 + "3 = use BIS instruction on Sparc for allocation prefetch") \ 50.31 \ 50.32 product(intx, AllocatePrefetchDistance, -1, \ 50.33 "Distance to prefetch ahead of allocation pointer") \ 50.34 @@ -3114,6 +3112,9 @@ 50.35 develop_pd(intx, CodeEntryAlignment, \ 50.36 "Code entry alignment for generated code (in bytes)") \ 50.37 \ 50.38 + product_pd(intx, OptoLoopAlignment, \ 50.39 + "Align inner loops to zero relative to this modulus") \ 50.40 + \ 50.41 product_pd(uintx, InitialCodeCacheSize, \ 50.42 "Initial code cache size (in bytes)") \ 50.43 \
51.1 --- a/src/share/vm/runtime/mutexLocker.cpp Thu Apr 15 19:08:18 2010 -0700 51.2 +++ b/src/share/vm/runtime/mutexLocker.cpp Thu Apr 15 19:08:48 2010 -0700 51.3 @@ -70,6 +70,7 @@ 51.4 Monitor* CMark_lock = NULL; 51.5 Monitor* ZF_mon = NULL; 51.6 Monitor* Cleanup_mon = NULL; 51.7 +Mutex* CMRegionStack_lock = NULL; 51.8 Mutex* SATB_Q_FL_lock = NULL; 51.9 Monitor* SATB_Q_CBL_mon = NULL; 51.10 Mutex* Shared_SATB_Q_lock = NULL; 51.11 @@ -167,6 +168,7 @@ 51.12 def(CMark_lock , Monitor, nonleaf, true ); // coordinate concurrent mark thread 51.13 def(ZF_mon , Monitor, leaf, true ); 51.14 def(Cleanup_mon , Monitor, nonleaf, true ); 51.15 + def(CMRegionStack_lock , Mutex, leaf, true ); 51.16 def(SATB_Q_FL_lock , Mutex , special, true ); 51.17 def(SATB_Q_CBL_mon , Monitor, nonleaf, true ); 51.18 def(Shared_SATB_Q_lock , Mutex, nonleaf, true );
52.1 --- a/src/share/vm/runtime/mutexLocker.hpp Thu Apr 15 19:08:18 2010 -0700 52.2 +++ b/src/share/vm/runtime/mutexLocker.hpp Thu Apr 15 19:08:48 2010 -0700 52.3 @@ -63,6 +63,7 @@ 52.4 extern Monitor* CMark_lock; // used for concurrent mark thread coordination 52.5 extern Monitor* ZF_mon; // used for G1 conc zero-fill. 52.6 extern Monitor* Cleanup_mon; // used for G1 conc cleanup. 52.7 +extern Mutex* CMRegionStack_lock; // used for protecting accesses to the CM region stack 52.8 extern Mutex* SATB_Q_FL_lock; // Protects SATB Q 52.9 // buffer free list. 52.10 extern Monitor* SATB_Q_CBL_mon; // Protects SATB Q
53.1 --- a/src/share/vm/runtime/sharedRuntime.cpp Thu Apr 15 19:08:18 2010 -0700 53.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp Thu Apr 15 19:08:48 2010 -0700 53.3 @@ -259,13 +259,16 @@ 53.4 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { 53.5 assert(frame::verify_return_pc(return_address), "must be a return pc"); 53.6 53.7 + // Reset MethodHandle flag. 53.8 + thread->set_is_method_handle_return(false); 53.9 + 53.10 // the fastest case first 53.11 CodeBlob* blob = CodeCache::find_blob(return_address); 53.12 if (blob != NULL && blob->is_nmethod()) { 53.13 nmethod* code = (nmethod*)blob; 53.14 assert(code != NULL, "nmethod must be present"); 53.15 // Check if the return address is a MethodHandle call site. 53.16 - thread->set_is_method_handle_exception(code->is_method_handle_return(return_address)); 53.17 + thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); 53.18 // native nmethods don't have exception handlers 53.19 assert(!code->is_native_method(), "no exception handler"); 53.20 assert(code->header_begin() != code->exception_begin(), "no exception handler"); 53.21 @@ -292,7 +295,7 @@ 53.22 nmethod* code = (nmethod*)blob; 53.23 assert(code != NULL, "nmethod must be present"); 53.24 // Check if the return address is a MethodHandle call site. 53.25 - thread->set_is_method_handle_exception(code->is_method_handle_return(return_address)); 53.26 + thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); 53.27 assert(code->header_begin() != code->exception_begin(), "no exception handler"); 53.28 return code->exception_begin(); 53.29 }
54.1 --- a/src/share/vm/runtime/thread.hpp Thu Apr 15 19:08:18 2010 -0700 54.2 +++ b/src/share/vm/runtime/thread.hpp Thu Apr 15 19:08:48 2010 -0700 54.3 @@ -772,7 +772,7 @@ 54.4 volatile address _exception_pc; // PC where exception happened 54.5 volatile address _exception_handler_pc; // PC for handler of exception 54.6 volatile int _exception_stack_size; // Size of frame where exception happened 54.7 - volatile int _is_method_handle_exception; // True if the current exception PC is at a MethodHandle call. 54.8 + volatile int _is_method_handle_return; // true (== 1) if the current exception PC is a MethodHandle call site. 54.9 54.10 // support for compilation 54.11 bool _is_compiling; // is true if a compilation is active inthis thread (one compilation per thread possible) 54.12 @@ -1108,13 +1108,13 @@ 54.13 int exception_stack_size() const { return _exception_stack_size; } 54.14 address exception_pc() const { return _exception_pc; } 54.15 address exception_handler_pc() const { return _exception_handler_pc; } 54.16 - int is_method_handle_exception() const { return _is_method_handle_exception; } 54.17 + bool is_method_handle_return() const { return _is_method_handle_return == 1; } 54.18 54.19 void set_exception_oop(oop o) { _exception_oop = o; } 54.20 void set_exception_pc(address a) { _exception_pc = a; } 54.21 void set_exception_handler_pc(address a) { _exception_handler_pc = a; } 54.22 void set_exception_stack_size(int size) { _exception_stack_size = size; } 54.23 - void set_is_method_handle_exception(int value) { _is_method_handle_exception = value; } 54.24 + void set_is_method_handle_return(bool value) { _is_method_handle_return = value ? 1 : 0; } 54.25 54.26 // Stack overflow support 54.27 inline size_t stack_available(address cur_sp); 54.28 @@ -1188,7 +1188,7 @@ 54.29 static ByteSize exception_pc_offset() { return byte_offset_of(JavaThread, _exception_pc ); } 54.30 static ByteSize exception_handler_pc_offset() { return byte_offset_of(JavaThread, _exception_handler_pc); } 54.31 static ByteSize exception_stack_size_offset() { return byte_offset_of(JavaThread, _exception_stack_size); } 54.32 - static ByteSize is_method_handle_exception_offset() { return byte_offset_of(JavaThread, _is_method_handle_exception); } 54.33 + static ByteSize is_method_handle_return_offset() { return byte_offset_of(JavaThread, _is_method_handle_return); } 54.34 static ByteSize stack_guard_state_offset() { return byte_offset_of(JavaThread, _stack_guard_state ); } 54.35 static ByteSize suspend_flags_offset() { return byte_offset_of(JavaThread, _suspend_flags ); } 54.36
55.1 --- a/src/share/vm/utilities/ostream.cpp Thu Apr 15 19:08:18 2010 -0700 55.2 +++ b/src/share/vm/utilities/ostream.cpp Thu Apr 15 19:08:48 2010 -0700 55.3 @@ -363,7 +363,7 @@ 55.4 return _log_file != NULL; 55.5 } 55.6 55.7 -static const char* make_log_name(const char* log_name, const char* force_directory, char* buf) { 55.8 +static const char* make_log_name(const char* log_name, const char* force_directory) { 55.9 const char* basename = log_name; 55.10 char file_sep = os::file_separator()[0]; 55.11 const char* cp; 55.12 @@ -374,6 +374,27 @@ 55.13 } 55.14 const char* nametail = log_name; 55.15 55.16 + // Compute buffer length 55.17 + size_t buffer_length; 55.18 + if (force_directory != NULL) { 55.19 + buffer_length = strlen(force_directory) + strlen(os::file_separator()) + 55.20 + strlen(basename) + 1; 55.21 + } else { 55.22 + buffer_length = strlen(log_name) + 1; 55.23 + } 55.24 + 55.25 + const char* star = strchr(basename, '*'); 55.26 + int star_pos = (star == NULL) ? -1 : (star - nametail); 55.27 + 55.28 + char pid[32]; 55.29 + if (star_pos >= 0) { 55.30 + jio_snprintf(pid, sizeof(pid), "%u", os::current_process_id()); 55.31 + buffer_length += strlen(pid); 55.32 + } 55.33 + 55.34 + // Create big enough buffer. 55.35 + char *buf = NEW_C_HEAP_ARRAY(char, buffer_length); 55.36 + 55.37 strcpy(buf, ""); 55.38 if (force_directory != NULL) { 55.39 strcat(buf, force_directory); 55.40 @@ -381,14 +402,11 @@ 55.41 nametail = basename; // completely skip directory prefix 55.42 } 55.43 55.44 - const char* star = strchr(basename, '*'); 55.45 - int star_pos = (star == NULL) ? -1 : (star - nametail); 55.46 - 55.47 if (star_pos >= 0) { 55.48 // convert foo*bar.log to foo123bar.log 55.49 int buf_pos = (int) strlen(buf); 55.50 strncpy(&buf[buf_pos], nametail, star_pos); 55.51 - sprintf(&buf[buf_pos + star_pos], "%u", os::current_process_id()); 55.52 + strcpy(&buf[buf_pos + star_pos], pid); 55.53 nametail += star_pos + 1; // skip prefix and star 55.54 } 55.55 55.56 @@ -399,20 +417,23 @@ 55.57 void defaultStream::init_log() { 55.58 // %%% Need a MutexLocker? 55.59 const char* log_name = LogFile != NULL ? LogFile : "hotspot.log"; 55.60 - char buf[O_BUFLEN*2]; 55.61 - const char* try_name = make_log_name(log_name, NULL, buf); 55.62 + const char* try_name = make_log_name(log_name, NULL); 55.63 fileStream* file = new(ResourceObj::C_HEAP) fileStream(try_name); 55.64 if (!file->is_open()) { 55.65 // Try again to open the file. 55.66 char warnbuf[O_BUFLEN*2]; 55.67 - sprintf(warnbuf, "Warning: Cannot open log file: %s\n", try_name); 55.68 + jio_snprintf(warnbuf, sizeof(warnbuf), 55.69 + "Warning: Cannot open log file: %s\n", try_name); 55.70 // Note: This feature is for maintainer use only. No need for L10N. 55.71 jio_print(warnbuf); 55.72 - try_name = make_log_name("hs_pid*.log", os::get_temp_directory(), buf); 55.73 - sprintf(warnbuf, "Warning: Forcing option -XX:LogFile=%s\n", try_name); 55.74 + FREE_C_HEAP_ARRAY(char, try_name); 55.75 + try_name = make_log_name("hs_pid*.log", os::get_temp_directory()); 55.76 + jio_snprintf(warnbuf, sizeof(warnbuf), 55.77 + "Warning: Forcing option -XX:LogFile=%s\n", try_name); 55.78 jio_print(warnbuf); 55.79 delete file; 55.80 file = new(ResourceObj::C_HEAP) fileStream(try_name); 55.81 + FREE_C_HEAP_ARRAY(char, try_name); 55.82 } 55.83 if (file->is_open()) { 55.84 _log_file = file;
56.1 --- a/src/share/vm/utilities/vmError.cpp Thu Apr 15 19:08:18 2010 -0700 56.2 +++ b/src/share/vm/utilities/vmError.cpp Thu Apr 15 19:08:48 2010 -0700 56.3 @@ -807,8 +807,8 @@ 56.4 if (fd == -1) { 56.5 // try temp directory 56.6 const char * tmpdir = os::get_temp_directory(); 56.7 - jio_snprintf(buffer, sizeof(buffer), "%shs_err_pid%u.log", 56.8 - (tmpdir ? tmpdir : ""), os::current_process_id()); 56.9 + jio_snprintf(buffer, sizeof(buffer), "%s%shs_err_pid%u.log", 56.10 + tmpdir, os::file_separator(), os::current_process_id()); 56.11 fd = open(buffer, O_WRONLY | O_CREAT | O_TRUNC, 0666); 56.12 } 56.13