Mon, 29 Nov 2010 15:43:36 -0500
6780143: hs203t003 hits SIGSEGV/EXCEPTION_ACCESS_VIOLATION with -XX:+UseCompressedOops
Summary: Using r12 as temporary register around call_VM trashes heapbase becausecall_VM doesn't always return immediately to following code (eg forward_exception, popframe and early return support).
Reviewed-by: never, kvn
1.1 --- a/src/cpu/x86/vm/assembler_x86.cpp Tue Nov 23 13:22:55 2010 -0800 1.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp Mon Nov 29 15:43:36 2010 -0500 1.3 @@ -5538,17 +5538,14 @@ 1.4 } 1.5 1.6 void MacroAssembler::warn(const char* msg) { 1.7 - push(r12); 1.8 - movq(r12, rsp); 1.9 + push(rsp); 1.10 andq(rsp, -16); // align stack as required by push_CPU_state and call 1.11 1.12 push_CPU_state(); // keeps alignment at 16 bytes 1.13 lea(c_rarg0, ExternalAddress((address) msg)); 1.14 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0); 1.15 pop_CPU_state(); 1.16 - 1.17 - movq(rsp, r12); 1.18 - pop(r12); 1.19 + pop(rsp); 1.20 } 1.21 1.22 #ifndef PRODUCT 1.23 @@ -5860,6 +5857,10 @@ 1.24 // debugging support 1.25 assert(number_of_arguments >= 0 , "cannot have negative number of arguments"); 1.26 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register")); 1.27 +#ifdef ASSERT 1.28 + LP64_ONLY(if (UseCompressedOops) verify_heapbase("call_VM_base");) 1.29 +#endif // ASSERT 1.30 + 1.31 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); 1.32 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp"); 1.33
2.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp Tue Nov 23 13:22:55 2010 -0800 2.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp Mon Nov 29 15:43:36 2010 -0500 2.3 @@ -449,10 +449,9 @@ 2.4 // JVMTI events, such as single-stepping, are implemented partly by avoiding running 2.5 // compiled code in threads for which the event is enabled. Check here for 2.6 // interp_only_mode if these events CAN be enabled. 2.7 - get_thread(temp); 2.8 // interp_only is an int, on little endian it is sufficient to test the byte only 2.9 - // Is a cmpl faster (ce 2.10 - cmpb(Address(temp, JavaThread::interp_only_mode_offset()), 0); 2.11 + // Is a cmpl faster? 2.12 + cmpb(Address(r15_thread, JavaThread::interp_only_mode_offset()), 0); 2.13 jcc(Assembler::zero, run_compiled_code); 2.14 jmp(Address(method, methodOopDesc::interpreter_entry_offset())); 2.15 bind(run_compiled_code);
3.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Tue Nov 23 13:22:55 2010 -0800 3.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Mon Nov 29 15:43:36 2010 -0500 3.3 @@ -1069,7 +1069,7 @@ 3.4 // runtime call by hand. 3.5 // 3.6 __ mov(c_rarg0, r15_thread); 3.7 - __ mov(r12, rsp); // remember sp 3.8 + __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 3.9 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 3.10 __ andptr(rsp, -16); // align stack as required by ABI 3.11 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans))); 3.12 @@ -1116,7 +1116,7 @@ 3.13 __ jcc(Assembler::notEqual, no_reguard); 3.14 3.15 __ pusha(); // XXX only save smashed registers 3.16 - __ mov(r12, rsp); // remember sp 3.17 + __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 3.18 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 3.19 __ andptr(rsp, -16); // align stack as required by ABI 3.20 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages))); 3.21 @@ -1907,7 +1907,7 @@ 3.22 3.23 assert(Interpreter::trace_code(t->tos_in()) != NULL, 3.24 "entry must have been generated"); 3.25 - __ mov(r12, rsp); // remember sp 3.26 + __ mov(r12, rsp); // remember sp (can only use r12 if not using call_VM) 3.27 __ andptr(rsp, -16); // align stack as required by ABI 3.28 __ call(RuntimeAddress(Interpreter::trace_code(t->tos_in()))); 3.29 __ mov(rsp, r12); // restore sp
4.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Tue Nov 23 13:22:55 2010 -0800 4.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Mon Nov 29 15:43:36 2010 -0500 4.3 @@ -2762,7 +2762,7 @@ 4.4 // access constant pool cache entry 4.5 __ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1); 4.6 __ verify_oop(rax); 4.7 - __ mov(r12, rax); // save object pointer before call_VM() clobbers it 4.8 + __ push_ptr(rax); // save object pointer before call_VM() clobbers it 4.9 __ mov(c_rarg1, rax); 4.10 // c_rarg1: object pointer copied above 4.11 // c_rarg2: cache entry pointer 4.12 @@ -2770,8 +2770,7 @@ 4.13 CAST_FROM_FN_PTR(address, 4.14 InterpreterRuntime::post_field_access), 4.15 c_rarg1, c_rarg2); 4.16 - __ mov(rax, r12); // restore object pointer 4.17 - __ reinit_heapbase(); 4.18 + __ pop_ptr(rax); // restore object pointer 4.19 __ bind(L1); 4.20 } 4.21 4.22 @@ -3365,10 +3364,7 @@ 4.23 JVM_CONSTANT_Class); 4.24 __ jcc(Assembler::equal, quicked); 4.25 __ push(atos); // save receiver for result, and for GC 4.26 - __ mov(r12, rcx); // save rcx XXX 4.27 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4.28 - __ movq(rcx, r12); // restore rcx XXX 4.29 - __ reinit_heapbase(); 4.30 __ pop_ptr(rdx); // restore receiver 4.31 __ jmpb(resolved); 4.32 4.33 @@ -3422,11 +3418,9 @@ 4.34 __ jcc(Assembler::equal, quicked); 4.35 4.36 __ push(atos); // save receiver for result, and for GC 4.37 - __ mov(r12, rcx); // save rcx 4.38 call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc)); 4.39 - __ movq(rcx, r12); // restore rcx 4.40 - __ reinit_heapbase(); 4.41 __ pop_ptr(rdx); // restore receiver 4.42 + __ verify_oop(rdx); 4.43 __ load_klass(rdx, rdx); 4.44 __ jmpb(resolved); 4.45