1.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Fri Aug 31 16:39:35 2012 -0700 1.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Sat Sep 01 13:25:18 2012 -0400 1.3 @@ -29,7 +29,7 @@ 1.4 #include "code/icBuffer.hpp" 1.5 #include "code/vtableStubs.hpp" 1.6 #include "interpreter/interpreter.hpp" 1.7 -#include "oops/compiledICHolderOop.hpp" 1.8 +#include "oops/compiledICHolder.hpp" 1.9 #include "prims/jvmtiRedefineClassesTrace.hpp" 1.10 #include "runtime/sharedRuntime.hpp" 1.11 #include "runtime/vframeArray.hpp" 1.12 @@ -599,10 +599,10 @@ 1.13 // Patch the callers callsite with entry to compiled code if it exists. 1.14 void AdapterGenerator::patch_callers_callsite() { 1.15 Label L; 1.16 - __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 1.17 + __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 1.18 __ br_null(G3_scratch, false, Assembler::pt, L); 1.19 // Schedule the branch target address early. 1.20 - __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 1.21 + __ delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 1.22 // Call into the VM to patch the caller, then jump to compiled callee 1.23 __ save_frame(4); // Args in compiled layout; do not blow them 1.24 1.25 @@ -611,7 +611,7 @@ 1.26 // G2: global allocated to TLS 1.27 // G3: used in inline cache check (scratch) 1.28 // G4: 2nd Long arg (32bit build); 1.29 - // G5: used in inline cache check (methodOop) 1.30 + // G5: used in inline cache check (Method*) 1.31 1.32 // The longs must go to the stack by hand since in the 32 bit build they can be trashed by window ops. 1.33 1.34 @@ -645,7 +645,7 @@ 1.35 __ ldx(FP, -8 + STACK_BIAS, G1); 1.36 __ ldx(FP, -16 + STACK_BIAS, G4); 1.37 __ mov(L5, G5_method); 1.38 - __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 1.39 + __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 1.40 #endif /* _LP64 */ 1.41 1.42 __ restore(); // Restore args 1.43 @@ -853,7 +853,7 @@ 1.44 1.45 #ifdef _LP64 1.46 // Need to reload G3_scratch, used for temporary displacements. 1.47 - __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 1.48 + __ ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 1.49 1.50 // Pass O5_savedSP as an argument to the interpreter. 1.51 // The interpreter will restore SP to this value before returning. 1.52 @@ -1046,7 +1046,7 @@ 1.53 1.54 // Will jump to the compiled code just as if compiled code was doing it. 1.55 // Pre-load the register-jump target early, to schedule it better. 1.56 - __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3); 1.57 + __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3); 1.58 1.59 // Now generate the shuffle code. Pick up all register args and move the 1.60 // rest through G1_scratch. 1.61 @@ -1163,7 +1163,7 @@ 1.62 #ifndef _LP64 1.63 if (g3_crushed) { 1.64 // Rats load was wasted, at least it is in cache... 1.65 - __ ld_ptr(G5_method, methodOopDesc::from_compiled_offset(), G3); 1.66 + __ ld_ptr(G5_method, Method::from_compiled_offset(), G3); 1.67 } 1.68 #endif /* _LP64 */ 1.69 1.70 @@ -1212,7 +1212,7 @@ 1.71 1.72 1.73 // ------------------------------------------------------------------------- 1.74 - // Generate a C2I adapter. On entry we know G5 holds the methodOop. The 1.75 + // Generate a C2I adapter. On entry we know G5 holds the Method*. The 1.76 // args start out packed in the compiled layout. They need to be unpacked 1.77 // into the interpreter layout. This will almost always require some stack 1.78 // space. We grow the current (compiled) stack, then repack the args. We 1.79 @@ -1232,25 +1232,21 @@ 1.80 AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub()); 1.81 1.82 __ verify_oop(O0); 1.83 - __ verify_oop(G5_method); 1.84 __ load_klass(O0, G3_scratch); 1.85 - __ verify_oop(G3_scratch); 1.86 1.87 #if !defined(_LP64) && defined(COMPILER2) 1.88 __ save(SP, -frame::register_save_words*wordSize, SP); 1.89 - __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1.90 - __ verify_oop(R_temp); 1.91 + __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp); 1.92 __ cmp(G3_scratch, R_temp); 1.93 __ restore(); 1.94 #else 1.95 - __ ld_ptr(G5_method, compiledICHolderOopDesc::holder_klass_offset(), R_temp); 1.96 - __ verify_oop(R_temp); 1.97 + __ ld_ptr(G5_method, CompiledICHolder::holder_klass_offset(), R_temp); 1.98 __ cmp(G3_scratch, R_temp); 1.99 #endif 1.100 1.101 Label ok, ok2; 1.102 __ brx(Assembler::equal, false, Assembler::pt, ok); 1.103 - __ delayed()->ld_ptr(G5_method, compiledICHolderOopDesc::holder_method_offset(), G5_method); 1.104 + __ delayed()->ld_ptr(G5_method, CompiledICHolder::holder_method_offset(), G5_method); 1.105 __ jump_to(ic_miss, G3_scratch); 1.106 __ delayed()->nop(); 1.107 1.108 @@ -1258,10 +1254,10 @@ 1.109 // Method might have been compiled since the call site was patched to 1.110 // interpreted if that is the case treat it as a miss so we can get 1.111 // the call site corrected. 1.112 - __ ld_ptr(G5_method, in_bytes(methodOopDesc::code_offset()), G3_scratch); 1.113 + __ ld_ptr(G5_method, in_bytes(Method::code_offset()), G3_scratch); 1.114 __ bind(ok2); 1.115 __ br_null(G3_scratch, false, Assembler::pt, skip_fixup); 1.116 - __ delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch); 1.117 + __ delayed()->ld_ptr(G5_method, in_bytes(Method::interpreter_entry_offset()), G3_scratch); 1.118 __ jump_to(ic_miss, G3_scratch); 1.119 __ delayed()->nop(); 1.120 1.121 @@ -2571,7 +2567,7 @@ 1.122 // create inner frame 1.123 __ save_frame(0); 1.124 __ mov(G2_thread, L7_thread_cache); 1.125 - __ set_oop_constant(JNIHandles::make_local(method()), O1); 1.126 + __ set_metadata_constant(method(), O1); 1.127 __ call_VM_leaf(L7_thread_cache, 1.128 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), 1.129 G2_thread, O1); 1.130 @@ -2583,7 +2579,7 @@ 1.131 // create inner frame 1.132 __ save_frame(0); 1.133 __ mov(G2_thread, L7_thread_cache); 1.134 - __ set_oop_constant(JNIHandles::make_local(method()), O1); 1.135 + __ set_metadata_constant(method(), O1); 1.136 __ call_VM_leaf(L7_thread_cache, 1.137 CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry), 1.138 G2_thread, O1); 1.139 @@ -2869,7 +2865,7 @@ 1.140 SkipIfEqual skip_if( 1.141 masm, G3_scratch, &DTraceMethodProbes, Assembler::zero); 1.142 save_native_result(masm, ret_type, stack_slots); 1.143 - __ set_oop_constant(JNIHandles::make_local(method()), O1); 1.144 + __ set_metadata_constant(method(), O1); 1.145 __ call_VM_leaf(L7_thread_cache, 1.146 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), 1.147 G2_thread, O1); 1.148 @@ -4081,9 +4077,9 @@ 1.149 __ ld_ptr(G2_thread, in_bytes(Thread::pending_exception_offset()), O1); 1.150 __ br_notnull_short(O1, Assembler::pn, pending); 1.151 1.152 - // get the returned methodOop 1.153 - 1.154 - __ get_vm_result(G5_method); 1.155 + // get the returned Method* 1.156 + 1.157 + __ get_vm_result_2(G5_method); 1.158 __ stx(G5_method, SP, RegisterSaver::G5_offset()+STACK_BIAS); 1.159 1.160 // O0 is where we want to jump, overwrite G3 which is saved and scratch