81 // but we shouldn't expose the system to this problem in testing |
88 // but we shouldn't expose the system to this problem in testing |
82 // modes. |
89 // modes. |
83 if (!AbstractInterpreter::can_be_compiled(m)) { |
90 if (!AbstractInterpreter::can_be_compiled(m)) { |
84 return false; |
91 return false; |
85 } |
92 } |
86 |
93 if (comp_level == CompLevel_all) { |
87 return !m->is_not_compilable(); |
94 return !m->is_not_compilable(CompLevel_simple) && !m->is_not_compilable(CompLevel_full_optimization); |
|
95 } else { |
|
96 return !m->is_not_compilable(comp_level); |
|
97 } |
|
98 } |
|
99 |
|
100 bool CompilationPolicy::is_compilation_enabled() { |
|
101 // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler |
|
102 return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs(); |
88 } |
103 } |
89 |
104 |
90 #ifndef PRODUCT |
105 #ifndef PRODUCT |
91 void CompilationPolicy::print_time() { |
106 void CompilationPolicy::print_time() { |
92 tty->print_cr ("Accumulated compilationPolicy times:"); |
107 tty->print_cr ("Accumulated compilationPolicy times:"); |
93 tty->print_cr ("---------------------------"); |
108 tty->print_cr ("---------------------------"); |
94 tty->print_cr (" Total: %3.3f sec.", _accumulated_time.seconds()); |
109 tty->print_cr (" Total: %3.3f sec.", _accumulated_time.seconds()); |
95 } |
110 } |
96 |
111 |
97 static void trace_osr_completion(nmethod* osr_nm) { |
112 void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) { |
98 if (TraceOnStackReplacement) { |
113 if (TraceOnStackReplacement) { |
99 if (osr_nm == NULL) tty->print_cr("compilation failed"); |
114 if (osr_nm == NULL) tty->print_cr("compilation failed"); |
100 else tty->print_cr("nmethod " INTPTR_FORMAT, osr_nm); |
115 else tty->print_cr("nmethod " INTPTR_FORMAT, osr_nm); |
101 } |
116 } |
102 } |
117 } |
103 #endif // !PRODUCT |
118 #endif // !PRODUCT |
104 |
119 |
105 void CompilationPolicy::reset_counter_for_invocation_event(methodHandle m) { |
120 void NonTieredCompPolicy::initialize() { |
|
121 // Setup the compiler thread numbers |
|
122 if (CICompilerCountPerCPU) { |
|
123 // Example: if CICompilerCountPerCPU is true, then we get |
|
124 // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine. |
|
125 // May help big-app startup time. |
|
126 _compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1); |
|
127 } else { |
|
128 _compiler_count = CICompilerCount; |
|
129 } |
|
130 } |
|
131 |
|
132 int NonTieredCompPolicy::compiler_count(CompLevel comp_level) { |
|
133 #ifdef COMPILER1 |
|
134 if (is_c1_compile(comp_level)) { |
|
135 return _compiler_count; |
|
136 } |
|
137 #endif |
|
138 |
|
139 #ifdef COMPILER2 |
|
140 if (is_c2_compile(comp_level)) { |
|
141 return _compiler_count; |
|
142 } |
|
143 #endif |
|
144 |
|
145 return 0; |
|
146 } |
|
147 |
|
148 void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) { |
106 // Make sure invocation and backedge counter doesn't overflow again right away |
149 // Make sure invocation and backedge counter doesn't overflow again right away |
107 // as would be the case for native methods. |
150 // as would be the case for native methods. |
108 |
151 |
109 // BUT also make sure the method doesn't look like it was never executed. |
152 // BUT also make sure the method doesn't look like it was never executed. |
110 // Set carry bit and reduce counter's value to min(count, CompileThreshold/2). |
153 // Set carry bit and reduce counter's value to min(count, CompileThreshold/2). |
126 i->set(i->state(), CompileThreshold); |
169 i->set(i->state(), CompileThreshold); |
127 // Don't reset counter too low - it is used to check if OSR method is ready. |
170 // Don't reset counter too low - it is used to check if OSR method is ready. |
128 b->set(b->state(), CompileThreshold / 2); |
171 b->set(b->state(), CompileThreshold / 2); |
129 } |
172 } |
130 |
173 |
|
174 // |
|
175 // CounterDecay |
|
176 // |
|
177 // Interates through invocation counters and decrements them. This |
|
178 // is done at each safepoint. |
|
179 // |
|
180 class CounterDecay : public AllStatic { |
|
181 static jlong _last_timestamp; |
|
182 static void do_method(methodOop m) { |
|
183 m->invocation_counter()->decay(); |
|
184 } |
|
185 public: |
|
186 static void decay(); |
|
187 static bool is_decay_needed() { |
|
188 return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; |
|
189 } |
|
190 }; |
|
191 |
|
192 jlong CounterDecay::_last_timestamp = 0; |
|
193 |
|
194 void CounterDecay::decay() { |
|
195 _last_timestamp = os::javaTimeMillis(); |
|
196 |
|
197 // This operation is going to be performed only at the end of a safepoint |
|
198 // and hence GC's will not be going on, all Java mutators are suspended |
|
199 // at this point and hence SystemDictionary_lock is also not needed. |
|
200 assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint"); |
|
201 int nclasses = SystemDictionary::number_of_classes(); |
|
202 double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 / |
|
203 CounterHalfLifeTime); |
|
204 for (int i = 0; i < classes_per_tick; i++) { |
|
205 klassOop k = SystemDictionary::try_get_next_class(); |
|
206 if (k != NULL && k->klass_part()->oop_is_instance()) { |
|
207 instanceKlass::cast(k)->methods_do(do_method); |
|
208 } |
|
209 } |
|
210 } |
|
211 |
|
212 // Called at the end of the safepoint |
|
213 void NonTieredCompPolicy::do_safepoint_work() { |
|
214 if(UseCounterDecay && CounterDecay::is_decay_needed()) { |
|
215 CounterDecay::decay(); |
|
216 } |
|
217 } |
|
218 |
|
219 void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { |
|
220 ScopeDesc* sd = trap_scope; |
|
221 for (; !sd->is_top(); sd = sd->sender()) { |
|
222 // Reset ICs of inlined methods, since they can trigger compilations also. |
|
223 sd->method()->invocation_counter()->reset(); |
|
224 } |
|
225 InvocationCounter* c = sd->method()->invocation_counter(); |
|
226 if (is_osr) { |
|
227 // It was an OSR method, so bump the count higher. |
|
228 c->set(c->state(), CompileThreshold); |
|
229 } else { |
|
230 c->reset(); |
|
231 } |
|
232 sd->method()->backedge_counter()->reset(); |
|
233 } |
|
234 |
|
235 // This method can be called by any component of the runtime to notify the policy |
|
236 // that it's recommended to delay the complation of this method. |
|
237 void NonTieredCompPolicy::delay_compilation(methodOop method) { |
|
238 method->invocation_counter()->decay(); |
|
239 method->backedge_counter()->decay(); |
|
240 } |
|
241 |
|
242 void NonTieredCompPolicy::disable_compilation(methodOop method) { |
|
243 method->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); |
|
244 method->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); |
|
245 } |
|
246 |
|
247 CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) { |
|
248 return compile_queue->first(); |
|
249 } |
|
250 |
|
251 bool NonTieredCompPolicy::is_mature(methodOop method) { |
|
252 methodDataOop mdo = method->method_data(); |
|
253 assert(mdo != NULL, "Should be"); |
|
254 uint current = mdo->mileage_of(method); |
|
255 uint initial = mdo->creation_mileage(); |
|
256 if (current < initial) |
|
257 return true; // some sort of overflow |
|
258 uint target; |
|
259 if (ProfileMaturityPercentage <= 0) |
|
260 target = (uint) -ProfileMaturityPercentage; // absolute value |
|
261 else |
|
262 target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 ); |
|
263 return (current >= initial + target); |
|
264 } |
|
265 |
|
266 nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, int bci, CompLevel comp_level, TRAPS) { |
|
267 assert(comp_level == CompLevel_none, "This should be only called from the interpreter"); |
|
268 NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci)); |
|
269 if (JvmtiExport::can_post_interpreter_events()) { |
|
270 assert(THREAD->is_Java_thread(), "Wrong type of thread"); |
|
271 if (((JavaThread*)THREAD)->is_interp_only_mode()) { |
|
272 // If certain JVMTI events (e.g. frame pop event) are requested then the |
|
273 // thread is forced to remain in interpreted code. This is |
|
274 // implemented partly by a check in the run_compiled_code |
|
275 // section of the interpreter whether we should skip running |
|
276 // compiled code, and partly by skipping OSR compiles for |
|
277 // interpreted-only threads. |
|
278 if (bci != InvocationEntryBci) { |
|
279 reset_counter_for_back_branch_event(method); |
|
280 return NULL; |
|
281 } |
|
282 } |
|
283 } |
|
284 if (bci == InvocationEntryBci) { |
|
285 // when code cache is full, compilation gets switched off, UseCompiler |
|
286 // is set to false |
|
287 if (!method->has_compiled_code() && UseCompiler) { |
|
288 method_invocation_event(method, CHECK_NULL); |
|
289 } else { |
|
290 // Force counter overflow on method entry, even if no compilation |
|
291 // happened. (The method_invocation_event call does this also.) |
|
292 reset_counter_for_invocation_event(method); |
|
293 } |
|
294 // compilation at an invocation overflow no longer goes and retries test for |
|
295 // compiled method. We always run the loser of the race as interpreted. |
|
296 // so return NULL |
|
297 return NULL; |
|
298 } else { |
|
299 // counter overflow in a loop => try to do on-stack-replacement |
|
300 nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); |
|
301 NOT_PRODUCT(trace_osr_request(method, osr_nm, bci)); |
|
302 // when code cache is full, we should not compile any more... |
|
303 if (osr_nm == NULL && UseCompiler) { |
|
304 method_back_branch_event(method, bci, CHECK_NULL); |
|
305 osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); |
|
306 } |
|
307 if (osr_nm == NULL) { |
|
308 reset_counter_for_back_branch_event(method); |
|
309 return NULL; |
|
310 } |
|
311 return osr_nm; |
|
312 } |
|
313 return NULL; |
|
314 } |
|
315 |
|
316 #ifndef PRODUCT |
|
317 void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) { |
|
318 if (TraceInvocationCounterOverflow) { |
|
319 InvocationCounter* ic = m->invocation_counter(); |
|
320 InvocationCounter* bc = m->backedge_counter(); |
|
321 ResourceMark rm; |
|
322 const char* msg = |
|
323 bci == InvocationEntryBci |
|
324 ? "comp-policy cntr ovfl @ %d in entry of " |
|
325 : "comp-policy cntr ovfl @ %d in loop of "; |
|
326 tty->print(msg, bci); |
|
327 m->print_value(); |
|
328 tty->cr(); |
|
329 ic->print(); |
|
330 bc->print(); |
|
331 if (ProfileInterpreter) { |
|
332 if (bci != InvocationEntryBci) { |
|
333 methodDataOop mdo = m->method_data(); |
|
334 if (mdo != NULL) { |
|
335 int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken(); |
|
336 tty->print_cr("back branch count = %d", count); |
|
337 } |
|
338 } |
|
339 } |
|
340 } |
|
341 } |
|
342 |
|
343 void NonTieredCompPolicy::trace_osr_request(methodHandle method, nmethod* osr, int bci) { |
|
344 if (TraceOnStackReplacement) { |
|
345 ResourceMark rm; |
|
346 tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for "); |
|
347 method->print_short_name(tty); |
|
348 tty->print_cr(" at bci %d", bci); |
|
349 } |
|
350 } |
|
351 #endif // !PRODUCT |
|
352 |
131 // SimpleCompPolicy - compile current method |
353 // SimpleCompPolicy - compile current method |
132 |
354 |
133 void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) { |
355 void SimpleCompPolicy::method_invocation_event( methodHandle m, TRAPS) { |
134 assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); |
356 assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); |
135 |
357 |
136 int hot_count = m->invocation_count(); |
358 int hot_count = m->invocation_count(); |
137 reset_counter_for_invocation_event(m); |
359 reset_counter_for_invocation_event(m); |
138 const char* comment = "count"; |
360 const char* comment = "count"; |
139 |
361 |
140 if (!delayCompilationDuringStartup() && canBeCompiled(m) && UseCompiler && CompileBroker::should_compile_new_jobs()) { |
362 if (is_compilation_enabled() && can_be_compiled(m)) { |
141 nmethod* nm = m->code(); |
363 nmethod* nm = m->code(); |
142 if (nm == NULL ) { |
364 if (nm == NULL ) { |
143 const char* comment = "count"; |
365 const char* comment = "count"; |
144 CompileBroker::compile_method(m, InvocationEntryBci, |
366 CompileBroker::compile_method(m, InvocationEntryBci, CompLevel_highest_tier, |
145 m, hot_count, comment, CHECK); |
367 m, hot_count, comment, CHECK); |
146 } else { |
368 } |
147 #ifdef TIERED |
369 } |
148 |
370 } |
149 if (nm->is_compiled_by_c1()) { |
371 |
150 const char* comment = "tier1 overflow"; |
372 void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) { |
151 CompileBroker::compile_method(m, InvocationEntryBci, |
|
152 m, hot_count, comment, CHECK); |
|
153 } |
|
154 #endif // TIERED |
|
155 } |
|
156 } |
|
157 } |
|
158 |
|
159 void SimpleCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) { |
|
160 assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); |
373 assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); |
161 |
374 |
162 int hot_count = m->backedge_count(); |
375 int hot_count = m->backedge_count(); |
163 const char* comment = "backedge_count"; |
376 const char* comment = "backedge_count"; |
164 |
377 |
165 if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) { |
378 if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) { |
166 CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK); |
379 CompileBroker::compile_method(m, bci, CompLevel_highest_tier, |
167 |
380 m, hot_count, comment, CHECK); |
168 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));) |
381 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));) |
169 } |
382 } |
170 } |
383 } |
171 |
|
172 int SimpleCompPolicy::compilation_level(methodHandle m, int branch_bci) |
|
173 { |
|
174 #ifdef TIERED |
|
175 if (!TieredCompilation) { |
|
176 return CompLevel_highest_tier; |
|
177 } |
|
178 if (/* m()->tier1_compile_done() && */ |
|
179 // QQQ HACK FIX ME set tier1_compile_done!! |
|
180 !m()->is_native()) { |
|
181 // Grab the nmethod so it doesn't go away while it's being queried |
|
182 nmethod* code = m()->code(); |
|
183 if (code != NULL && code->is_compiled_by_c1()) { |
|
184 return CompLevel_highest_tier; |
|
185 } |
|
186 } |
|
187 return CompLevel_fast_compile; |
|
188 #else |
|
189 return CompLevel_highest_tier; |
|
190 #endif // TIERED |
|
191 } |
|
192 |
|
193 // StackWalkCompPolicy - walk up stack to find a suitable method to compile |
384 // StackWalkCompPolicy - walk up stack to find a suitable method to compile |
194 |
385 |
195 #ifdef COMPILER2 |
386 #ifdef COMPILER2 |
196 const char* StackWalkCompPolicy::_msg = NULL; |
387 const char* StackWalkCompPolicy::_msg = NULL; |
197 |
388 |
222 RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m); |
413 RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m); |
223 |
414 |
224 if (first->top_method()->code() != NULL) { |
415 if (first->top_method()->code() != NULL) { |
225 // called obsolete method/nmethod -- no need to recompile |
416 // called obsolete method/nmethod -- no need to recompile |
226 if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code()); |
417 if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, first->top_method()->code()); |
227 } else if (compilation_level(m, InvocationEntryBci) == CompLevel_fast_compile) { |
|
228 // Tier1 compilation policy avaoids stack walking. |
|
229 CompileBroker::compile_method(m, InvocationEntryBci, |
|
230 m, hot_count, comment, CHECK); |
|
231 } else { |
418 } else { |
232 if (TimeCompilationPolicy) accumulated_time()->start(); |
419 if (TimeCompilationPolicy) accumulated_time()->start(); |
233 GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50); |
420 GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50); |
234 stack->push(first); |
421 stack->push(first); |
235 RFrame* top = findTopInlinableFrame(stack); |
422 RFrame* top = findTopInlinableFrame(stack); |
236 if (TimeCompilationPolicy) accumulated_time()->stop(); |
423 if (TimeCompilationPolicy) accumulated_time()->stop(); |
237 assert(top != NULL, "findTopInlinableFrame returned null"); |
424 assert(top != NULL, "findTopInlinableFrame returned null"); |
238 if (TraceCompilationPolicy) top->print(); |
425 if (TraceCompilationPolicy) top->print(); |
239 CompileBroker::compile_method(top->top_method(), InvocationEntryBci, |
426 CompileBroker::compile_method(top->top_method(), InvocationEntryBci, CompLevel_highest_tier, |
240 m, hot_count, comment, CHECK); |
427 m, hot_count, comment, CHECK); |
241 } |
428 } |
242 } |
429 } |
243 } |
430 } |
244 |
431 |
245 void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int branch_bci, int loop_top_bci, TRAPS) { |
432 void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, TRAPS) { |
246 assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); |
433 assert(UseCompiler || CompileTheWorld, "UseCompiler should be set by now."); |
247 |
434 |
248 int hot_count = m->backedge_count(); |
435 int hot_count = m->backedge_count(); |
249 const char* comment = "backedge_count"; |
436 const char* comment = "backedge_count"; |
250 |
437 |
251 if (!m->is_not_osr_compilable() && !delayCompilationDuringStartup() && canBeCompiled(m) && CompileBroker::should_compile_new_jobs()) { |
438 if (is_compilation_enabled() && !m->is_not_osr_compilable() && can_be_compiled(m)) { |
252 CompileBroker::compile_method(m, loop_top_bci, m, hot_count, comment, CHECK); |
439 CompileBroker::compile_method(m, bci, CompLevel_highest_tier, m, hot_count, comment, CHECK); |
253 |
440 |
254 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(loop_top_bci));) |
441 NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true));) |
255 } |
442 } |
256 } |
443 } |
257 |
|
258 int StackWalkCompPolicy::compilation_level(methodHandle m, int osr_bci) |
|
259 { |
|
260 int comp_level = CompLevel_full_optimization; |
|
261 if (TieredCompilation && osr_bci == InvocationEntryBci) { |
|
262 if (CompileTheWorld) { |
|
263 // Under CTW, the first compile is tier1, the second tier2 |
|
264 if (m->highest_tier_compile() == CompLevel_none) { |
|
265 comp_level = CompLevel_fast_compile; |
|
266 } |
|
267 } else if (!m->has_osr_nmethod()) { |
|
268 // Before tier1 is done, use invocation_count + backedge_count to |
|
269 // compare against the threshold. After that, the counters may/will |
|
270 // be reset, so rely on the straight interpreter_invocation_count. |
|
271 if (m->highest_tier_compile() == CompLevel_initial_compile) { |
|
272 if (m->interpreter_invocation_count() < Tier2CompileThreshold) { |
|
273 comp_level = CompLevel_fast_compile; |
|
274 } |
|
275 } else if (m->invocation_count() + m->backedge_count() < |
|
276 Tier2CompileThreshold) { |
|
277 comp_level = CompLevel_fast_compile; |
|
278 } |
|
279 } |
|
280 |
|
281 } |
|
282 return comp_level; |
|
283 } |
|
284 |
|
285 |
444 |
286 RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) { |
445 RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) { |
287 // go up the stack until finding a frame that (probably) won't be inlined |
446 // go up the stack until finding a frame that (probably) won't be inlined |
288 // into its caller |
447 // into its caller |
289 RFrame* current = stack->at(0); // current choice for stopping |
448 RFrame* current = stack->at(0); // current choice for stopping |