1.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Fri Aug 31 16:39:35 2012 -0700 1.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Sat Sep 01 13:25:18 2012 -0400 1.3 @@ -29,7 +29,7 @@ 1.4 #include "code/icBuffer.hpp" 1.5 #include "code/vtableStubs.hpp" 1.6 #include "interpreter/interpreter.hpp" 1.7 -#include "oops/compiledICHolderOop.hpp" 1.8 +#include "oops/compiledICHolder.hpp" 1.9 #include "prims/jvmtiRedefineClassesTrace.hpp" 1.10 #include "runtime/sharedRuntime.hpp" 1.11 #include "runtime/vframeArray.hpp" 1.12 @@ -453,8 +453,7 @@ 1.13 // Patch the callers callsite with entry to compiled code if it exists. 1.14 static void patch_callers_callsite(MacroAssembler *masm) { 1.15 Label L; 1.16 - __ verify_oop(rbx); 1.17 - __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD); 1.18 + __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD); 1.19 __ jcc(Assembler::equal, L); 1.20 // Schedule the branch target address early. 1.21 // Call into the VM to patch the caller, then jump to compiled callee 1.22 @@ -486,7 +485,6 @@ 1.23 __ push(rax); 1.24 // VM needs target method 1.25 __ push(rbx); 1.26 - __ verify_oop(rbx); 1.27 __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite))); 1.28 __ addptr(rsp, 2*wordSize); 1.29 1.30 @@ -631,7 +629,7 @@ 1.31 } 1.32 1.33 // Schedule the branch target address early. 1.34 - __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset()))); 1.35 + __ movptr(rcx, Address(rbx, in_bytes(Method::interpreter_entry_offset()))); 1.36 // And repush original return address 1.37 __ push(rax); 1.38 __ jmp(rcx); 1.39 @@ -746,7 +744,7 @@ 1.40 1.41 // Will jump to the compiled code just as if compiled code was doing it. 1.42 // Pre-load the register-jump target early, to schedule it better. 1.43 - __ movptr(rdi, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset()))); 1.44 + __ movptr(rdi, Address(rbx, in_bytes(Method::from_compiled_offset()))); 1.45 1.46 // Now generate the shuffle code. Pick up all register args and move the 1.47 // rest through the floating point stack top. 1.48 @@ -859,8 +857,8 @@ 1.49 __ get_thread(rax); 1.50 __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx); 1.51 1.52 - // move methodOop to rax, in case we end up in an c2i adapter. 1.53 - // the c2i adapters expect methodOop in rax, (c2) because c2's 1.54 + // move Method* to rax, in case we end up in an c2i adapter. 1.55 + // the c2i adapters expect Method* in rax, (c2) because c2's 1.56 // resolve stubs return the result (the method) in rax,. 1.57 // I'd love to fix this. 1.58 __ mov(rax, rbx); 1.59 @@ -880,7 +878,7 @@ 1.60 gen_i2c_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs); 1.61 1.62 // ------------------------------------------------------------------------- 1.63 - // Generate a C2I adapter. On entry we know rbx, holds the methodOop during calls 1.64 + // Generate a C2I adapter. On entry we know rbx, holds the Method* during calls 1.65 // to the interpreter. The args start out packed in the compiled layout. They 1.66 // need to be unpacked into the interpreter layout. This will almost always 1.67 // require some stack space. We grow the current (compiled) stack, then repack 1.68 @@ -898,18 +896,14 @@ 1.69 { 1.70 1.71 Label missed; 1.72 - 1.73 - __ verify_oop(holder); 1.74 __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes())); 1.75 - __ verify_oop(temp); 1.76 - 1.77 - __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset())); 1.78 - __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset())); 1.79 + __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset())); 1.80 + __ movptr(rbx, Address(holder, CompiledICHolder::holder_method_offset())); 1.81 __ jcc(Assembler::notEqual, missed); 1.82 // Method might have been compiled since the call site was patched to 1.83 // interpreted if that is the case treat it as a miss so we can get 1.84 // the call site corrected. 1.85 - __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD); 1.86 + __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD); 1.87 __ jcc(Assembler::equal, skip_fixup); 1.88 1.89 __ bind(missed); 1.90 @@ -1918,7 +1912,7 @@ 1.91 1.92 { 1.93 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); 1.94 - __ movoop(rax, JNIHandles::make_local(method())); 1.95 + __ mov_metadata(rax, method()); 1.96 __ call_VM_leaf( 1.97 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 1.98 thread, rax); 1.99 @@ -1926,7 +1920,7 @@ 1.100 1.101 // RedefineClasses() tracing support for obsolete method entry 1.102 if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) { 1.103 - __ movoop(rax, JNIHandles::make_local(method())); 1.104 + __ mov_metadata(rax, method()); 1.105 __ call_VM_leaf( 1.106 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1.107 thread, rax); 1.108 @@ -2184,7 +2178,7 @@ 1.109 SkipIfEqual skip_if(masm, &DTraceMethodProbes, 0); 1.110 // Tell dtrace about this method exit 1.111 save_native_result(masm, ret_type, stack_slots); 1.112 - __ movoop(rax, JNIHandles::make_local(method())); 1.113 + __ mov_metadata(rax, method()); 1.114 __ call_VM_leaf( 1.115 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 1.116 thread, rax); 1.117 @@ -3427,8 +3421,8 @@ 1.118 __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD); 1.119 __ jcc(Assembler::notEqual, pending); 1.120 1.121 - // get the returned methodOop 1.122 - __ movptr(rbx, Address(thread, JavaThread::vm_result_offset())); 1.123 + // get the returned Method* 1.124 + __ get_vm_result_2(rbx, thread); 1.125 __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx); 1.126 1.127 __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);