1.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp Thu Sep 02 11:40:02 2010 -0700 1.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Fri Sep 03 17:51:07 2010 -0700 1.3 @@ -777,43 +777,6 @@ 1.4 // Miscellaneous 1.5 1.6 1.7 -#ifndef PRODUCT 1.8 -static void trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci, address branch_bcp) { 1.9 - if (TraceInvocationCounterOverflow) { 1.10 - InvocationCounter* ic = m->invocation_counter(); 1.11 - InvocationCounter* bc = m->backedge_counter(); 1.12 - ResourceMark rm; 1.13 - const char* msg = 1.14 - branch_bcp == NULL 1.15 - ? "comp-policy cntr ovfl @ %d in entry of " 1.16 - : "comp-policy cntr ovfl @ %d in loop of "; 1.17 - tty->print(msg, bci); 1.18 - m->print_value(); 1.19 - tty->cr(); 1.20 - ic->print(); 1.21 - bc->print(); 1.22 - if (ProfileInterpreter) { 1.23 - if (branch_bcp != NULL) { 1.24 - methodDataOop mdo = m->method_data(); 1.25 - if (mdo != NULL) { 1.26 - int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken(); 1.27 - tty->print_cr("back branch count = %d", count); 1.28 - } 1.29 - } 1.30 - } 1.31 - } 1.32 -} 1.33 - 1.34 -static void trace_osr_request(methodHandle method, nmethod* osr, int bci) { 1.35 - if (TraceOnStackReplacement) { 1.36 - ResourceMark rm; 1.37 - tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for "); 1.38 - method->print_short_name(tty); 1.39 - tty->print_cr(" at bci %d", bci); 1.40 - } 1.41 -} 1.42 -#endif // !PRODUCT 1.43 - 1.44 nmethod* InterpreterRuntime::frequency_counter_overflow(JavaThread* thread, address branch_bcp) { 1.45 nmethod* nm = frequency_counter_overflow_inner(thread, branch_bcp); 1.46 assert(branch_bcp != NULL || nm == NULL, "always returns null for non OSR requests"); 1.47 @@ -826,7 +789,7 @@ 1.48 frame fr = thread->last_frame(); 1.49 methodOop method = fr.interpreter_frame_method(); 1.50 int bci = method->bci_from(fr.interpreter_frame_bcp()); 1.51 - nm = method->lookup_osr_nmethod_for(bci); 1.52 + nm = method->lookup_osr_nmethod_for(bci, CompLevel_none, false); 1.53 } 1.54 return nm; 1.55 } 1.56 @@ -840,74 +803,32 @@ 1.57 frame fr = thread->last_frame(); 1.58 assert(fr.is_interpreted_frame(), "must come from interpreter"); 1.59 methodHandle method(thread, fr.interpreter_frame_method()); 1.60 - const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : 0; 1.61 - const int bci = method->bci_from(fr.interpreter_frame_bcp()); 1.62 - NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci, branch_bcp);) 1.63 + const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci; 1.64 + const int bci = branch_bcp != NULL ? method->bci_from(fr.interpreter_frame_bcp()) : InvocationEntryBci; 1.65 1.66 - if (JvmtiExport::can_post_interpreter_events()) { 1.67 - if (thread->is_interp_only_mode()) { 1.68 - // If certain JVMTI events (e.g. frame pop event) are requested then the 1.69 - // thread is forced to remain in interpreted code. This is 1.70 - // implemented partly by a check in the run_compiled_code 1.71 - // section of the interpreter whether we should skip running 1.72 - // compiled code, and partly by skipping OSR compiles for 1.73 - // interpreted-only threads. 1.74 - if (branch_bcp != NULL) { 1.75 - CompilationPolicy::policy()->reset_counter_for_back_branch_event(method); 1.76 - return NULL; 1.77 + nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, thread); 1.78 + 1.79 + if (osr_nm != NULL) { 1.80 + // We may need to do on-stack replacement which requires that no 1.81 + // monitors in the activation are biased because their 1.82 + // BasicObjectLocks will need to migrate during OSR. Force 1.83 + // unbiasing of all monitors in the activation now (even though 1.84 + // the OSR nmethod might be invalidated) because we don't have a 1.85 + // safepoint opportunity later once the migration begins. 1.86 + if (UseBiasedLocking) { 1.87 + ResourceMark rm; 1.88 + GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1.89 + for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end(); 1.90 + kptr < fr.interpreter_frame_monitor_begin(); 1.91 + kptr = fr.next_monitor_in_interpreter_frame(kptr) ) { 1.92 + if( kptr->obj() != NULL ) { 1.93 + objects_to_revoke->append(Handle(THREAD, kptr->obj())); 1.94 + } 1.95 } 1.96 + BiasedLocking::revoke(objects_to_revoke); 1.97 } 1.98 } 1.99 - 1.100 - if (branch_bcp == NULL) { 1.101 - // when code cache is full, compilation gets switched off, UseCompiler 1.102 - // is set to false 1.103 - if (!method->has_compiled_code() && UseCompiler) { 1.104 - CompilationPolicy::policy()->method_invocation_event(method, CHECK_NULL); 1.105 - } else { 1.106 - // Force counter overflow on method entry, even if no compilation 1.107 - // happened. (The method_invocation_event call does this also.) 1.108 - CompilationPolicy::policy()->reset_counter_for_invocation_event(method); 1.109 - } 1.110 - // compilation at an invocation overflow no longer goes and retries test for 1.111 - // compiled method. We always run the loser of the race as interpreted. 1.112 - // so return NULL 1.113 - return NULL; 1.114 - } else { 1.115 - // counter overflow in a loop => try to do on-stack-replacement 1.116 - nmethod* osr_nm = method->lookup_osr_nmethod_for(bci); 1.117 - NOT_PRODUCT(trace_osr_request(method, osr_nm, bci);) 1.118 - // when code cache is full, we should not compile any more... 1.119 - if (osr_nm == NULL && UseCompiler) { 1.120 - const int branch_bci = method->bci_from(branch_bcp); 1.121 - CompilationPolicy::policy()->method_back_branch_event(method, branch_bci, bci, CHECK_NULL); 1.122 - osr_nm = method->lookup_osr_nmethod_for(bci); 1.123 - } 1.124 - if (osr_nm == NULL) { 1.125 - CompilationPolicy::policy()->reset_counter_for_back_branch_event(method); 1.126 - return NULL; 1.127 - } else { 1.128 - // We may need to do on-stack replacement which requires that no 1.129 - // monitors in the activation are biased because their 1.130 - // BasicObjectLocks will need to migrate during OSR. Force 1.131 - // unbiasing of all monitors in the activation now (even though 1.132 - // the OSR nmethod might be invalidated) because we don't have a 1.133 - // safepoint opportunity later once the migration begins. 1.134 - if (UseBiasedLocking) { 1.135 - ResourceMark rm; 1.136 - GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>(); 1.137 - for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end(); 1.138 - kptr < fr.interpreter_frame_monitor_begin(); 1.139 - kptr = fr.next_monitor_in_interpreter_frame(kptr) ) { 1.140 - if( kptr->obj() != NULL ) { 1.141 - objects_to_revoke->append(Handle(THREAD, kptr->obj())); 1.142 - } 1.143 - } 1.144 - BiasedLocking::revoke(objects_to_revoke); 1.145 - } 1.146 - return osr_nm; 1.147 - } 1.148 - } 1.149 + return osr_nm; 1.150 IRT_END 1.151 1.152 IRT_LEAF(jint, InterpreterRuntime::bcp_to_di(methodOopDesc* method, address cur_bcp))