1.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Apr 18 14:38:31 2013 +0200 1.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Apr 18 17:00:16 2013 -0400 1.3 @@ -1611,9 +1611,8 @@ 1.4 // Normal (non-jsr) branch handling 1.5 1.6 // Save the current Lbcp 1.7 - const Register O0_cur_bcp = O0; 1.8 - __ mov( Lbcp, O0_cur_bcp ); 1.9 - 1.10 + const Register l_cur_bcp = Lscratch; 1.11 + __ mov( Lbcp, l_cur_bcp ); 1.12 1.13 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter; 1.14 if ( increment_invocation_counter_for_backward_branches ) { 1.15 @@ -1623,6 +1622,9 @@ 1.16 // Bump bytecode pointer by displacement (take the branch) 1.17 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr 1.18 1.19 + const Register Rcounters = G3_scratch; 1.20 + __ get_method_counters(Lmethod, Rcounters, Lforward); 1.21 + 1.22 if (TieredCompilation) { 1.23 Label Lno_mdo, Loverflow; 1.24 int increment = InvocationCounter::count_increment; 1.25 @@ -1635,21 +1637,22 @@ 1.26 // Increment backedge counter in the MDO 1.27 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) + 1.28 in_bytes(InvocationCounter::counter_offset())); 1.29 - __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch, 1.30 + __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0, 1.31 Assembler::notZero, &Lforward); 1.32 __ ba_short(Loverflow); 1.33 } 1.34 1.35 - // If there's no MDO, increment counter in Method* 1.36 + // If there's no MDO, increment counter in MethodCounters* 1.37 __ bind(Lno_mdo); 1.38 - Address backedge_counter(Lmethod, in_bytes(Method::backedge_counter_offset()) + 1.39 - in_bytes(InvocationCounter::counter_offset())); 1.40 - __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch, 1.41 + Address backedge_counter(Rcounters, 1.42 + in_bytes(MethodCounters::backedge_counter_offset()) + 1.43 + in_bytes(InvocationCounter::counter_offset())); 1.44 + __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0, 1.45 Assembler::notZero, &Lforward); 1.46 __ bind(Loverflow); 1.47 1.48 // notify point for loop, pass branch bytecode 1.49 - __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp); 1.50 + __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp); 1.51 1.52 // Was an OSR adapter generated? 1.53 // O0 = osr nmethod 1.54 @@ -1686,15 +1689,15 @@ 1.55 } else { 1.56 // Update Backedge branch separately from invocations 1.57 const Register G4_invoke_ctr = G4; 1.58 - __ increment_backedge_counter(G4_invoke_ctr, G1_scratch); 1.59 + __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch); 1.60 if (ProfileInterpreter) { 1.61 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward); 1.62 if (UseOnStackReplacement) { 1.63 - __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch); 1.64 + __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch); 1.65 } 1.66 } else { 1.67 if (UseOnStackReplacement) { 1.68 - __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch); 1.69 + __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch); 1.70 } 1.71 } 1.72 }