Wed, 23 Dec 2009 00:47:04 -0800
Merge
1.1 --- a/src/cpu/x86/vm/assembler_x86.cpp Tue Dec 22 22:35:08 2009 -0800 1.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp Wed Dec 23 00:47:04 2009 -0800 1.3 @@ -7666,7 +7666,7 @@ 1.4 1.5 #ifdef ASSERT 1.6 Label L; 1.7 - testl(tmp, tmp); 1.8 + testptr(tmp, tmp); 1.9 jccb(Assembler::notZero, L); 1.10 hlt(); 1.11 bind(L);
2.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp Tue Dec 22 22:35:08 2009 -0800 2.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp Wed Dec 23 00:47:04 2009 -0800 2.3 @@ -196,6 +196,9 @@ 2.4 } else { 2.5 assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); 2.6 movl(reg, Address(rsi, bcp_offset)); 2.7 + // Check if the secondary index definition is still ~x, otherwise 2.8 + // we have to change the following assembler code to calculate the 2.9 + // plain index. 2.10 assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); 2.11 notl(reg); // convert to plain index 2.12 }
3.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp Tue Dec 22 22:35:08 2009 -0800 3.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp Wed Dec 23 00:47:04 2009 -0800 3.3 @@ -185,12 +185,30 @@ 3.4 } 3.5 3.6 3.7 +void InterpreterMacroAssembler::get_cache_index_at_bcp(Register index, 3.8 + int bcp_offset, 3.9 + bool giant_index) { 3.10 + assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 3.11 + if (!giant_index) { 3.12 + load_unsigned_short(index, Address(r13, bcp_offset)); 3.13 + } else { 3.14 + assert(EnableInvokeDynamic, "giant index used only for EnableInvokeDynamic"); 3.15 + movl(index, Address(r13, bcp_offset)); 3.16 + // Check if the secondary index definition is still ~x, otherwise 3.17 + // we have to change the following assembler code to calculate the 3.18 + // plain index. 3.19 + assert(constantPoolCacheOopDesc::decode_secondary_index(~123) == 123, "else change next line"); 3.20 + notl(index); // convert to plain index 3.21 + } 3.22 +} 3.23 + 3.24 + 3.25 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, 3.26 Register index, 3.27 - int bcp_offset) { 3.28 - assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 3.29 + int bcp_offset, 3.30 + bool giant_index) { 3.31 assert(cache != index, "must use different registers"); 3.32 - load_unsigned_short(index, Address(r13, bcp_offset)); 3.33 + get_cache_index_at_bcp(index, bcp_offset, giant_index); 3.34 movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize)); 3.35 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 3.36 // convert from field index to ConstantPoolCacheEntry index 3.37 @@ -200,10 +218,10 @@ 3.38 3.39 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, 3.40 Register tmp, 3.41 - int bcp_offset) { 3.42 - assert(bcp_offset > 0, "bcp is still pointing to start of bytecode"); 3.43 + int bcp_offset, 3.44 + bool giant_index) { 3.45 assert(cache != tmp, "must use different register"); 3.46 - load_unsigned_short(tmp, Address(r13, bcp_offset)); 3.47 + get_cache_index_at_bcp(tmp, bcp_offset, giant_index); 3.48 assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below"); 3.49 // convert from field index to ConstantPoolCacheEntry index 3.50 // and from word offset to byte offset 3.51 @@ -1236,7 +1254,8 @@ 3.52 3.53 void InterpreterMacroAssembler::profile_virtual_call(Register receiver, 3.54 Register mdp, 3.55 - Register reg2) { 3.56 + Register reg2, 3.57 + bool receiver_can_be_null) { 3.58 if (ProfileInterpreter) { 3.59 Label profile_continue; 3.60 3.61 @@ -1246,8 +1265,15 @@ 3.62 // We are making a call. Increment the count. 3.63 increment_mdp_data_at(mdp, in_bytes(CounterData::count_offset())); 3.64 3.65 + Label skip_receiver_profile; 3.66 + if (receiver_can_be_null) { 3.67 + testptr(receiver, receiver); 3.68 + jcc(Assembler::zero, skip_receiver_profile); 3.69 + } 3.70 + 3.71 // Record the receiver type. 3.72 record_klass_in_profile(receiver, mdp, reg2); 3.73 + bind(skip_receiver_profile); 3.74 3.75 // The method data pointer needs to be updated to reflect the new target. 3.76 update_mdp_by_constant(mdp,
4.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.hpp Tue Dec 22 22:35:08 2009 -0800 4.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp Wed Dec 23 00:47:04 2009 -0800 4.3 @@ -95,9 +95,10 @@ 4.4 4.5 void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset); 4.6 void get_cache_and_index_at_bcp(Register cache, Register index, 4.7 - int bcp_offset); 4.8 + int bcp_offset, bool giant_index = false); 4.9 void get_cache_entry_pointer_at_bcp(Register cache, Register tmp, 4.10 - int bcp_offset); 4.11 + int bcp_offset, bool giant_index = false); 4.12 + void get_cache_index_at_bcp(Register index, int bcp_offset, bool giant_index = false); 4.13 4.14 4.15 void pop_ptr(Register r = rax); 4.16 @@ -236,7 +237,8 @@ 4.17 void profile_call(Register mdp); 4.18 void profile_final_call(Register mdp); 4.19 void profile_virtual_call(Register receiver, Register mdp, 4.20 - Register scratch2); 4.21 + Register scratch2, 4.22 + bool receiver_can_be_null = false); 4.23 void profile_ret(Register return_bci, Register mdp); 4.24 void profile_null_seen(Register mdp); 4.25 void profile_typecheck(Register mdp, Register klass, Register scratch);
5.1 --- a/src/cpu/x86/vm/interpreter_x86_64.cpp Tue Dec 22 22:35:08 2009 -0800 5.2 +++ b/src/cpu/x86/vm/interpreter_x86_64.cpp Wed Dec 23 00:47:04 2009 -0800 5.3 @@ -277,12 +277,11 @@ 5.4 address entry_point = __ pc(); 5.5 5.6 // abstract method entry 5.7 - // remove return address. Not really needed, since exception 5.8 - // handling throws away expression stack 5.9 - __ pop(rbx); 5.10 5.11 - // adjust stack to what a normal return would do 5.12 - __ mov(rsp, r13); 5.13 + // pop return address, reset last_sp to NULL 5.14 + __ empty_expression_stack(); 5.15 + __ restore_bcp(); // rsi must be correct for exception handler (was destroyed) 5.16 + __ restore_locals(); // make sure locals pointer is correct as well (was destroyed) 5.17 5.18 // throw exception 5.19 __ call_VM(noreg, CAST_FROM_FN_PTR(address, 5.20 @@ -300,7 +299,10 @@ 5.21 if (!EnableMethodHandles) { 5.22 return generate_abstract_entry(); 5.23 } 5.24 - return generate_abstract_entry(); //6815692// 5.25 + 5.26 + address entry_point = MethodHandles::generate_method_handle_interpreter_entry(_masm); 5.27 + 5.28 + return entry_point; 5.29 } 5.30 5.31
6.1 --- a/src/cpu/x86/vm/methodHandles_x86.cpp Tue Dec 22 22:35:08 2009 -0800 6.2 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp Wed Dec 23 00:47:04 2009 -0800 6.3 @@ -448,7 +448,7 @@ 6.4 rbx_index, Address::times_ptr, 6.5 base + vtableEntry::method_offset_in_bytes()); 6.6 Register rbx_method = rbx_temp; 6.7 - __ movl(rbx_method, vtable_entry_addr); 6.8 + __ movptr(rbx_method, vtable_entry_addr); 6.9 6.10 __ verify_oop(rbx_method); 6.11 __ jmp(rbx_method_fie);
7.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Tue Dec 22 22:35:08 2009 -0800 7.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Wed Dec 23 00:47:04 2009 -0800 7.3 @@ -2935,6 +2935,16 @@ 7.4 7.5 // arraycopy stubs used by compilers 7.6 generate_arraycopy_stubs(); 7.7 + 7.8 + // generic method handle stubs 7.9 + if (EnableMethodHandles && SystemDictionary::MethodHandle_klass() != NULL) { 7.10 + for (MethodHandles::EntryKind ek = MethodHandles::_EK_FIRST; 7.11 + ek < MethodHandles::_EK_LIMIT; 7.12 + ek = MethodHandles::EntryKind(1 + (int)ek)) { 7.13 + StubCodeMark mark(this, "MethodHandle", MethodHandles::entry_name(ek)); 7.14 + MethodHandles::generate_method_handle_stub(_masm, ek); 7.15 + } 7.16 + } 7.17 } 7.18 7.19 public:
8.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Tue Dec 22 22:35:08 2009 -0800 8.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Wed Dec 23 00:47:04 2009 -0800 8.3 @@ -100,21 +100,26 @@ 8.4 return entry; 8.5 } 8.6 8.7 -// Arguments are: required type in rarg1, failing object (or NULL) in rarg2 8.8 +// Arguments are: required type at TOS+8, failing object (or NULL) at TOS+4. 8.9 address TemplateInterpreterGenerator::generate_WrongMethodType_handler() { 8.10 address entry = __ pc(); 8.11 8.12 __ pop(c_rarg2); // failing object is at TOS 8.13 __ pop(c_rarg1); // required type is at TOS+8 8.14 8.15 - // expression stack must be empty before entering the VM if an 8.16 - // exception happened 8.17 + __ verify_oop(c_rarg1); 8.18 + __ verify_oop(c_rarg2); 8.19 + 8.20 + // Various method handle types use interpreter registers as temps. 8.21 + __ restore_bcp(); 8.22 + __ restore_locals(); 8.23 + 8.24 + // Expression stack must be empty before entering the VM for an exception. 8.25 __ empty_expression_stack(); 8.26 8.27 __ call_VM(noreg, 8.28 CAST_FROM_FN_PTR(address, 8.29 - InterpreterRuntime:: 8.30 - throw_WrongMethodTypeException), 8.31 + InterpreterRuntime::throw_WrongMethodTypeException), 8.32 // pass required type, failing object (or NULL) 8.33 c_rarg1, c_rarg2); 8.34 return entry; 8.35 @@ -182,15 +187,29 @@ 8.36 __ restore_bcp(); 8.37 __ restore_locals(); 8.38 8.39 - __ get_cache_and_index_at_bcp(rbx, rcx, 1); 8.40 + Label L_got_cache, L_giant_index; 8.41 + if (EnableInvokeDynamic) { 8.42 + __ cmpb(Address(r13, 0), Bytecodes::_invokedynamic); 8.43 + __ jcc(Assembler::equal, L_giant_index); 8.44 + } 8.45 + __ get_cache_and_index_at_bcp(rbx, rcx, 1, false); 8.46 + __ bind(L_got_cache); 8.47 __ movl(rbx, Address(rbx, rcx, 8.48 - Address::times_8, 8.49 + Address::times_ptr, 8.50 in_bytes(constantPoolCacheOopDesc::base_offset()) + 8.51 3 * wordSize)); 8.52 __ andl(rbx, 0xFF); 8.53 if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter. 8.54 __ lea(rsp, Address(rsp, rbx, Address::times_8)); 8.55 __ dispatch_next(state, step); 8.56 + 8.57 + // out of the main line of code... 8.58 + if (EnableInvokeDynamic) { 8.59 + __ bind(L_giant_index); 8.60 + __ get_cache_and_index_at_bcp(rbx, rcx, 1, true); 8.61 + __ jmp(L_got_cache); 8.62 + } 8.63 + 8.64 return entry; 8.65 } 8.66
9.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp Tue Dec 22 22:35:08 2009 -0800 9.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Wed Dec 23 00:47:04 2009 -0800 9.3 @@ -3146,7 +3146,6 @@ 9.4 __ profile_call(rsi); 9.5 } 9.6 9.7 - Label handle_unlinked_site; 9.8 __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx))); 9.9 __ null_check(rcx); 9.10 __ prepare_to_jump_from_interpreted();
10.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Tue Dec 22 22:35:08 2009 -0800 10.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Wed Dec 23 00:47:04 2009 -0800 10.3 @@ -203,18 +203,15 @@ 10.4 __ jcc(Assembler::notEqual, fast_patch); 10.5 __ get_method(scratch); 10.6 // Let breakpoint table handling rewrite to quicker bytecode 10.7 - __ call_VM(noreg, 10.8 - CAST_FROM_FN_PTR(address, 10.9 - InterpreterRuntime::set_original_bytecode_at), 10.10 - scratch, r13, bc); 10.11 + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), scratch, r13, bc); 10.12 #ifndef ASSERT 10.13 __ jmpb(patch_done); 10.14 +#else 10.15 + __ jmp(patch_done); 10.16 +#endif 10.17 __ bind(fast_patch); 10.18 } 10.19 -#else 10.20 - __ jmp(patch_done); 10.21 - __ bind(fast_patch); 10.22 - } 10.23 +#ifdef ASSERT 10.24 Label okay; 10.25 __ load_unsigned_byte(scratch, at_bcp(0)); 10.26 __ cmpl(scratch, (int) Bytecodes::java_code(bytecode)); 10.27 @@ -2054,26 +2051,28 @@ 10.28 } 10.29 } 10.30 10.31 -void TemplateTable::resolve_cache_and_index(int byte_no, 10.32 - Register Rcache, 10.33 - Register index) { 10.34 +void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) { 10.35 assert(byte_no == 1 || byte_no == 2, "byte_no out of range"); 10.36 + bool is_invokedynamic = (bytecode() == Bytecodes::_invokedynamic); 10.37 10.38 const Register temp = rbx; 10.39 assert_different_registers(Rcache, index, temp); 10.40 10.41 const int shift_count = (1 + byte_no) * BitsPerByte; 10.42 Label resolved; 10.43 - __ get_cache_and_index_at_bcp(Rcache, index, 1); 10.44 - __ movl(temp, Address(Rcache, 10.45 - index, Address::times_8, 10.46 - constantPoolCacheOopDesc::base_offset() + 10.47 - ConstantPoolCacheEntry::indices_offset())); 10.48 - __ shrl(temp, shift_count); 10.49 - // have we resolved this bytecode? 10.50 - __ andl(temp, 0xFF); 10.51 - __ cmpl(temp, (int) bytecode()); 10.52 - __ jcc(Assembler::equal, resolved); 10.53 + __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); 10.54 + if (is_invokedynamic) { 10.55 + // we are resolved if the f1 field contains a non-null CallSite object 10.56 + __ cmpptr(Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f1_offset()), (int32_t) NULL_WORD); 10.57 + __ jcc(Assembler::notEqual, resolved); 10.58 + } else { 10.59 + __ movl(temp, Address(Rcache, index, Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset())); 10.60 + __ shrl(temp, shift_count); 10.61 + // have we resolved this bytecode? 10.62 + __ andl(temp, 0xFF); 10.63 + __ cmpl(temp, (int) bytecode()); 10.64 + __ jcc(Assembler::equal, resolved); 10.65 + } 10.66 10.67 // resolve first time through 10.68 address entry; 10.69 @@ -2090,6 +2089,9 @@ 10.70 case Bytecodes::_invokeinterface: 10.71 entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); 10.72 break; 10.73 + case Bytecodes::_invokedynamic: 10.74 + entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); 10.75 + break; 10.76 default: 10.77 ShouldNotReachHere(); 10.78 break; 10.79 @@ -2098,7 +2100,7 @@ 10.80 __ call_VM(noreg, entry, temp); 10.81 10.82 // Update registers with resolved info 10.83 - __ get_cache_and_index_at_bcp(Rcache, index, 1); 10.84 + __ get_cache_and_index_at_bcp(Rcache, index, 1, is_invokedynamic); 10.85 __ bind(resolved); 10.86 } 10.87 10.88 @@ -2832,15 +2834,14 @@ 10.89 ShouldNotReachHere(); 10.90 } 10.91 10.92 -void TemplateTable::prepare_invoke(Register method, 10.93 - Register index, 10.94 - int byte_no, 10.95 - Bytecodes::Code code) { 10.96 +void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) { 10.97 // determine flags 10.98 + Bytecodes::Code code = bytecode(); 10.99 const bool is_invokeinterface = code == Bytecodes::_invokeinterface; 10.100 + const bool is_invokedynamic = code == Bytecodes::_invokedynamic; 10.101 const bool is_invokevirtual = code == Bytecodes::_invokevirtual; 10.102 const bool is_invokespecial = code == Bytecodes::_invokespecial; 10.103 - const bool load_receiver = code != Bytecodes::_invokestatic; 10.104 + const bool load_receiver = (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic); 10.105 const bool receiver_null_check = is_invokespecial; 10.106 const bool save_flags = is_invokeinterface || is_invokevirtual; 10.107 // setup registers & access constant pool cache 10.108 @@ -2858,9 +2859,13 @@ 10.109 __ movl(recv, flags); 10.110 __ andl(recv, 0xFF); 10.111 if (TaggedStackInterpreter) __ shll(recv, 1); // index*2 10.112 - __ movptr(recv, Address(rsp, recv, Address::times_8, 10.113 - -Interpreter::expr_offset_in_bytes(1))); 10.114 - __ verify_oop(recv); 10.115 + Address recv_addr(rsp, recv, Address::times_8, -Interpreter::expr_offset_in_bytes(1)); 10.116 + if (is_invokedynamic) { 10.117 + __ lea(recv, recv_addr); 10.118 + } else { 10.119 + __ movptr(recv, recv_addr); 10.120 + __ verify_oop(recv); 10.121 + } 10.122 } 10.123 10.124 // do null check if needed 10.125 @@ -2878,10 +2883,14 @@ 10.126 ConstantPoolCacheEntry::verify_tosBits(); 10.127 // load return address 10.128 { 10.129 - ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table()); 10.130 - ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table()); 10.131 - __ lea(rscratch1, (is_invokeinterface ? return_5 : return_3)); 10.132 - __ movptr(flags, Address(rscratch1, flags, Address::times_8)); 10.133 + address table_addr; 10.134 + if (is_invokeinterface || is_invokedynamic) 10.135 + table_addr = (address)Interpreter::return_5_addrs_by_index_table(); 10.136 + else 10.137 + table_addr = (address)Interpreter::return_3_addrs_by_index_table(); 10.138 + ExternalAddress table(table_addr); 10.139 + __ lea(rscratch1, table); 10.140 + __ movptr(flags, Address(rscratch1, flags, Address::times_ptr)); 10.141 } 10.142 10.143 // push return address 10.144 @@ -2947,7 +2956,7 @@ 10.145 10.146 void TemplateTable::invokevirtual(int byte_no) { 10.147 transition(vtos, vtos); 10.148 - prepare_invoke(rbx, noreg, byte_no, bytecode()); 10.149 + prepare_invoke(rbx, noreg, byte_no); 10.150 10.151 // rbx: index 10.152 // rcx: receiver 10.153 @@ -2959,7 +2968,7 @@ 10.154 10.155 void TemplateTable::invokespecial(int byte_no) { 10.156 transition(vtos, vtos); 10.157 - prepare_invoke(rbx, noreg, byte_no, bytecode()); 10.158 + prepare_invoke(rbx, noreg, byte_no); 10.159 // do the call 10.160 __ verify_oop(rbx); 10.161 __ profile_call(rax); 10.162 @@ -2969,7 +2978,7 @@ 10.163 10.164 void TemplateTable::invokestatic(int byte_no) { 10.165 transition(vtos, vtos); 10.166 - prepare_invoke(rbx, noreg, byte_no, bytecode()); 10.167 + prepare_invoke(rbx, noreg, byte_no); 10.168 // do the call 10.169 __ verify_oop(rbx); 10.170 __ profile_call(rax); 10.171 @@ -2983,7 +2992,7 @@ 10.172 10.173 void TemplateTable::invokeinterface(int byte_no) { 10.174 transition(vtos, vtos); 10.175 - prepare_invoke(rax, rbx, byte_no, bytecode()); 10.176 + prepare_invoke(rax, rbx, byte_no); 10.177 10.178 // rax: Interface 10.179 // rbx: index 10.180 @@ -3072,7 +3081,24 @@ 10.181 return; 10.182 } 10.183 10.184 - __ stop("invokedynamic NYI");//6815692// 10.185 + prepare_invoke(rax, rbx, byte_no); 10.186 + 10.187 + // rax: CallSite object (f1) 10.188 + // rbx: unused (f2) 10.189 + // rcx: receiver address 10.190 + // rdx: flags (unused) 10.191 + 10.192 + if (ProfileInterpreter) { 10.193 + Label L; 10.194 + // %%% should make a type profile for any invokedynamic that takes a ref argument 10.195 + // profile this call 10.196 + __ profile_call(r13); 10.197 + } 10.198 + 10.199 + __ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx))); 10.200 + __ null_check(rcx); 10.201 + __ prepare_to_jump_from_interpreted(); 10.202 + __ jump_to_method_handle_entry(rcx, rdx); 10.203 } 10.204 10.205
11.1 --- a/src/cpu/x86/vm/templateTable_x86_64.hpp Tue Dec 22 22:35:08 2009 -0800 11.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.hpp Wed Dec 23 00:47:04 2009 -0800 11.3 @@ -22,8 +22,7 @@ 11.4 * 11.5 */ 11.6 11.7 - static void prepare_invoke(Register method, Register index, int byte_no, 11.8 - Bytecodes::Code code); 11.9 + static void prepare_invoke(Register method, Register index, int byte_no); 11.10 static void invokevirtual_helper(Register index, Register recv, 11.11 Register flags); 11.12 static void volatile_barrier(Assembler::Membar_mask_bits order_constraint);
12.1 --- a/src/share/vm/classfile/classFileParser.cpp Tue Dec 22 22:35:08 2009 -0800 12.2 +++ b/src/share/vm/classfile/classFileParser.cpp Wed Dec 23 00:47:04 2009 -0800 12.3 @@ -2511,23 +2511,12 @@ 12.4 fac_ptr->nonstatic_byte_count -= 1; 12.5 (*fields_ptr)->ushort_at_put(i + instanceKlass::signature_index_offset, 12.6 word_sig_index); 12.7 - if (wordSize == jintSize) { 12.8 - fac_ptr->nonstatic_word_count += 1; 12.9 - } else { 12.10 - fac_ptr->nonstatic_double_count += 1; 12.11 - } 12.12 - 12.13 - FieldAllocationType atype = (FieldAllocationType) (*fields_ptr)->ushort_at(i+4); 12.14 + fac_ptr->nonstatic_word_count += 1; 12.15 + 12.16 + FieldAllocationType atype = (FieldAllocationType) (*fields_ptr)->ushort_at(i + instanceKlass::low_offset); 12.17 assert(atype == NONSTATIC_BYTE, ""); 12.18 FieldAllocationType new_atype = NONSTATIC_WORD; 12.19 - if (wordSize > jintSize) { 12.20 - if (Universe::field_type_should_be_aligned(T_LONG)) { 12.21 - atype = NONSTATIC_ALIGNED_DOUBLE; 12.22 - } else { 12.23 - atype = NONSTATIC_DOUBLE; 12.24 - } 12.25 - } 12.26 - (*fields_ptr)->ushort_at_put(i+4, new_atype); 12.27 + (*fields_ptr)->ushort_at_put(i + instanceKlass::low_offset, new_atype); 12.28 12.29 found_vmentry = true; 12.30 break; 12.31 @@ -3085,7 +3074,7 @@ 12.32 int len = fields->length(); 12.33 for (int i = 0; i < len; i += instanceKlass::next_offset) { 12.34 int real_offset; 12.35 - FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i+4); 12.36 + FieldAllocationType atype = (FieldAllocationType) fields->ushort_at(i + instanceKlass::low_offset); 12.37 switch (atype) { 12.38 case STATIC_OOP: 12.39 real_offset = next_static_oop_offset; 12.40 @@ -3173,8 +3162,8 @@ 12.41 default: 12.42 ShouldNotReachHere(); 12.43 } 12.44 - fields->short_at_put(i+4, extract_low_short_from_int(real_offset) ); 12.45 - fields->short_at_put(i+5, extract_high_short_from_int(real_offset) ); 12.46 + fields->short_at_put(i + instanceKlass::low_offset, extract_low_short_from_int(real_offset)); 12.47 + fields->short_at_put(i + instanceKlass::high_offset, extract_high_short_from_int(real_offset)); 12.48 } 12.49 12.50 // Size of instances
13.1 --- a/src/share/vm/code/nmethod.cpp Tue Dec 22 22:35:08 2009 -0800 13.2 +++ b/src/share/vm/code/nmethod.cpp Wed Dec 23 00:47:04 2009 -0800 13.3 @@ -414,9 +414,8 @@ 13.4 } 13.5 13.6 const char* nmethod::compile_kind() const { 13.7 - if (method() == NULL) return "unloaded"; 13.8 - if (is_native_method()) return "c2n"; 13.9 if (is_osr_method()) return "osr"; 13.10 + if (method() != NULL && is_native_method()) return "c2n"; 13.11 return NULL; 13.12 } 13.13 13.14 @@ -1127,6 +1126,9 @@ 13.15 } 13.16 flags.state = unloaded; 13.17 13.18 + // Log the unloading. 13.19 + log_state_change(); 13.20 + 13.21 // The methodOop is gone at this point 13.22 assert(_method == NULL, "Tautology"); 13.23 13.24 @@ -1137,8 +1139,6 @@ 13.25 13.26 void nmethod::invalidate_osr_method() { 13.27 assert(_entry_bci != InvocationEntryBci, "wrong kind of nmethod"); 13.28 - if (_entry_bci != InvalidOSREntryBci) 13.29 - inc_decompile_count(); 13.30 // Remove from list of active nmethods 13.31 if (method() != NULL) 13.32 instanceKlass::cast(method()->method_holder())->remove_osr_nmethod(this); 13.33 @@ -1146,59 +1146,63 @@ 13.34 _entry_bci = InvalidOSREntryBci; 13.35 } 13.36 13.37 -void nmethod::log_state_change(int state) const { 13.38 +void nmethod::log_state_change() const { 13.39 if (LogCompilation) { 13.40 if (xtty != NULL) { 13.41 ttyLocker ttyl; // keep the following output all in one block 13.42 - xtty->begin_elem("make_not_entrant %sthread='" UINTX_FORMAT "'", 13.43 - (state == zombie ? "zombie='1' " : ""), 13.44 - os::current_thread_id()); 13.45 + if (flags.state == unloaded) { 13.46 + xtty->begin_elem("make_unloaded thread='" UINTX_FORMAT "'", 13.47 + os::current_thread_id()); 13.48 + } else { 13.49 + xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'%s", 13.50 + os::current_thread_id(), 13.51 + (flags.state == zombie ? " zombie='1'" : "")); 13.52 + } 13.53 log_identity(xtty); 13.54 xtty->stamp(); 13.55 xtty->end_elem(); 13.56 } 13.57 } 13.58 - if (PrintCompilation) { 13.59 - print_on(tty, state == zombie ? "made zombie " : "made not entrant "); 13.60 + if (PrintCompilation && flags.state != unloaded) { 13.61 + print_on(tty, flags.state == zombie ? "made zombie " : "made not entrant "); 13.62 tty->cr(); 13.63 } 13.64 } 13.65 13.66 // Common functionality for both make_not_entrant and make_zombie 13.67 -void nmethod::make_not_entrant_or_zombie(int state) { 13.68 +bool nmethod::make_not_entrant_or_zombie(int state) { 13.69 assert(state == zombie || state == not_entrant, "must be zombie or not_entrant"); 13.70 13.71 - // Code for an on-stack-replacement nmethod is removed when a class gets unloaded. 13.72 - // They never become zombie/non-entrant, so the nmethod sweeper will never remove 13.73 - // them. Instead the entry_bci is set to InvalidOSREntryBci, so the osr nmethod 13.74 - // will never be used anymore. That the nmethods only gets removed when class unloading 13.75 - // happens, make life much simpler, since the nmethods are not just going to disappear 13.76 - // out of the blue. 13.77 - if (is_osr_method()) { 13.78 - if (osr_entry_bci() != InvalidOSREntryBci) { 13.79 - // only log this once 13.80 - log_state_change(state); 13.81 - } 13.82 - invalidate_osr_method(); 13.83 - return; 13.84 + // If the method is already zombie there is nothing to do 13.85 + if (is_zombie()) { 13.86 + return false; 13.87 } 13.88 13.89 - // If the method is already zombie or set to the state we want, nothing to do 13.90 - if (is_zombie() || (state == not_entrant && is_not_entrant())) { 13.91 - return; 13.92 - } 13.93 - 13.94 - log_state_change(state); 13.95 - 13.96 // Make sure the nmethod is not flushed in case of a safepoint in code below. 13.97 nmethodLocker nml(this); 13.98 13.99 { 13.100 + // invalidate osr nmethod before acquiring the patching lock since 13.101 + // they both acquire leaf locks and we don't want a deadlock. 13.102 + // This logic is equivalent to the logic below for patching the 13.103 + // verified entry point of regular methods. 13.104 + if (is_osr_method()) { 13.105 + // this effectively makes the osr nmethod not entrant 13.106 + invalidate_osr_method(); 13.107 + } 13.108 + 13.109 // Enter critical section. Does not block for safepoint. 13.110 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 13.111 + 13.112 + if (flags.state == state) { 13.113 + // another thread already performed this transition so nothing 13.114 + // to do, but return false to indicate this. 13.115 + return false; 13.116 + } 13.117 + 13.118 // The caller can be calling the method statically or through an inline 13.119 // cache call. 13.120 - if (!is_not_entrant()) { 13.121 + if (!is_osr_method() && !is_not_entrant()) { 13.122 NativeJump::patch_verified_entry(entry_point(), verified_entry_point(), 13.123 SharedRuntime::get_handle_wrong_method_stub()); 13.124 assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, ""); 13.125 @@ -1217,6 +1221,10 @@ 13.126 13.127 // Change state 13.128 flags.state = state; 13.129 + 13.130 + // Log the transition once 13.131 + log_state_change(); 13.132 + 13.133 } // leave critical region under Patching_lock 13.134 13.135 if (state == not_entrant) { 13.136 @@ -1240,7 +1248,6 @@ 13.137 // It's a true state change, so mark the method as decompiled. 13.138 inc_decompile_count(); 13.139 13.140 - 13.141 // zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event 13.142 // and it hasn't already been reported for this nmethod then report it now. 13.143 // (the event may have been reported earilier if the GC marked it for unloading). 13.144 @@ -1268,7 +1275,7 @@ 13.145 13.146 // Check whether method got unloaded at a safepoint before this, 13.147 // if so we can skip the flushing steps below 13.148 - if (method() == NULL) return; 13.149 + if (method() == NULL) return true; 13.150 13.151 // Remove nmethod from method. 13.152 // We need to check if both the _code and _from_compiled_code_entry_point 13.153 @@ -1282,6 +1289,8 @@ 13.154 HandleMark hm; 13.155 method()->clear_code(); 13.156 } 13.157 + 13.158 + return true; 13.159 } 13.160 13.161
14.1 --- a/src/share/vm/code/nmethod.hpp Tue Dec 22 22:35:08 2009 -0800 14.2 +++ b/src/share/vm/code/nmethod.hpp Wed Dec 23 00:47:04 2009 -0800 14.3 @@ -252,7 +252,9 @@ 14.4 void* operator new(size_t size, int nmethod_size); 14.5 14.6 const char* reloc_string_for(u_char* begin, u_char* end); 14.7 - void make_not_entrant_or_zombie(int state); 14.8 + // Returns true if this thread changed the state of the nmethod or 14.9 + // false if another thread performed the transition. 14.10 + bool make_not_entrant_or_zombie(int state); 14.11 void inc_decompile_count(); 14.12 14.13 // used to check that writes to nmFlags are done consistently. 14.14 @@ -375,10 +377,12 @@ 14.15 bool is_zombie() const { return flags.state == zombie; } 14.16 bool is_unloaded() const { return flags.state == unloaded; } 14.17 14.18 - // Make the nmethod non entrant. The nmethod will continue to be alive. 14.19 - // It is used when an uncommon trap happens. 14.20 - void make_not_entrant() { make_not_entrant_or_zombie(not_entrant); } 14.21 - void make_zombie() { make_not_entrant_or_zombie(zombie); } 14.22 + // Make the nmethod non entrant. The nmethod will continue to be 14.23 + // alive. It is used when an uncommon trap happens. Returns true 14.24 + // if this thread changed the state of the nmethod or false if 14.25 + // another thread performed the transition. 14.26 + bool make_not_entrant() { return make_not_entrant_or_zombie(not_entrant); } 14.27 + bool make_zombie() { return make_not_entrant_or_zombie(zombie); } 14.28 14.29 // used by jvmti to track if the unload event has been reported 14.30 bool unload_reported() { return _unload_reported; } 14.31 @@ -563,7 +567,7 @@ 14.32 // Logging 14.33 void log_identity(xmlStream* log) const; 14.34 void log_new_nmethod() const; 14.35 - void log_state_change(int state) const; 14.36 + void log_state_change() const; 14.37 14.38 // Prints a comment for one native instruction (reloc info, pc desc) 14.39 void print_code_comment_on(outputStream* st, int column, address begin, address end);