Thu, 02 May 2013 18:50:05 -0700
Merge
1 /*
2 * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "runtime/advancedThresholdPolicy.hpp"
27 #include "runtime/simpleThresholdPolicy.inline.hpp"
29 #ifdef TIERED
30 // Print an event.
31 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
32 int bci, CompLevel level) {
33 tty->print(" rate=");
34 if (mh->prev_time() == 0) tty->print("n/a");
35 else tty->print("%f", mh->rate());
37 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
38 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
40 }
42 void AdvancedThresholdPolicy::initialize() {
43 // Turn on ergonomic compiler count selection
44 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
45 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
46 }
47 int count = CICompilerCount;
48 if (CICompilerCountPerCPU) {
49 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
50 int log_cpu = log2_intptr(os::active_processor_count());
51 int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
52 count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
53 }
55 set_c1_count(MAX2(count / 3, 1));
56 set_c2_count(MAX2(count - count / 3, 1));
58 // Some inlining tuning
59 #ifdef X86
60 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
61 FLAG_SET_DEFAULT(InlineSmallCode, 2000);
62 }
63 #endif
65 #ifdef SPARC
66 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
67 FLAG_SET_DEFAULT(InlineSmallCode, 2500);
68 }
69 #endif
72 set_start_time(os::javaTimeMillis());
73 }
75 // update_rate() is called from select_task() while holding a compile queue lock.
76 void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
77 JavaThread* THREAD = JavaThread::current();
78 if (is_old(m)) {
79 // We don't remove old methods from the queue,
80 // so we can just zero the rate.
81 m->set_rate(0, THREAD);
82 return;
83 }
85 // We don't update the rate if we've just came out of a safepoint.
86 // delta_s is the time since last safepoint in milliseconds.
87 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
88 jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
89 // How many events were there since the last time?
90 int event_count = m->invocation_count() + m->backedge_count();
91 int delta_e = event_count - m->prev_event_count();
93 // We should be running for at least 1ms.
94 if (delta_s >= TieredRateUpdateMinTime) {
95 // And we must've taken the previous point at least 1ms before.
96 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
97 m->set_prev_time(t, THREAD);
98 m->set_prev_event_count(event_count, THREAD);
99 m->set_rate((float)delta_e / (float)delta_t, THREAD); // Rate is events per millisecond
100 } else
101 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
102 // If nothing happened for 25ms, zero the rate. Don't modify prev values.
103 m->set_rate(0, THREAD);
104 }
105 }
106 }
108 // Check if this method has been stale from a given number of milliseconds.
109 // See select_task().
110 bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
111 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
112 jlong delta_t = t - m->prev_time();
113 if (delta_t > timeout && delta_s > timeout) {
114 int event_count = m->invocation_count() + m->backedge_count();
115 int delta_e = event_count - m->prev_event_count();
116 // Return true if there were no events.
117 return delta_e == 0;
118 }
119 return false;
120 }
122 // We don't remove old methods from the compile queue even if they have
123 // very low activity. See select_task().
124 bool AdvancedThresholdPolicy::is_old(Method* method) {
125 return method->invocation_count() > 50000 || method->backedge_count() > 500000;
126 }
128 double AdvancedThresholdPolicy::weight(Method* method) {
129 return (method->rate() + 1) * ((method->invocation_count() + 1) * (method->backedge_count() + 1));
130 }
132 // Apply heuristics and return true if x should be compiled before y
133 bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
134 if (x->highest_comp_level() > y->highest_comp_level()) {
135 // recompilation after deopt
136 return true;
137 } else
138 if (x->highest_comp_level() == y->highest_comp_level()) {
139 if (weight(x) > weight(y)) {
140 return true;
141 }
142 }
143 return false;
144 }
146 // Is method profiled enough?
147 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
148 MethodData* mdo = method->method_data();
149 if (mdo != NULL) {
150 int i = mdo->invocation_count_delta();
151 int b = mdo->backedge_count_delta();
152 return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
153 }
154 return false;
155 }
157 // Called with the queue locked and with at least one element
158 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
159 CompileTask *max_task = NULL;
160 Method* max_method = NULL;
161 jlong t = os::javaTimeMillis();
162 // Iterate through the queue and find a method with a maximum rate.
163 for (CompileTask* task = compile_queue->first(); task != NULL;) {
164 CompileTask* next_task = task->next();
165 Method* method = task->method();
166 MethodData* mdo = method->method_data();
167 update_rate(t, method);
168 if (max_task == NULL) {
169 max_task = task;
170 max_method = method;
171 } else {
172 // If a method has been stale for some time, remove it from the queue.
173 if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
174 if (PrintTieredEvents) {
175 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
176 }
177 CompileTaskWrapper ctw(task); // Frees the task
178 compile_queue->remove(task);
179 method->clear_queued_for_compilation();
180 task = next_task;
181 continue;
182 }
184 // Select a method with a higher rate
185 if (compare_methods(method, max_method)) {
186 max_task = task;
187 max_method = method;
188 }
189 }
190 task = next_task;
191 }
193 if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
194 && is_method_profiled(max_method)) {
195 max_task->set_comp_level(CompLevel_limited_profile);
196 if (PrintTieredEvents) {
197 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
198 }
199 }
201 return max_task;
202 }
204 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
205 double queue_size = CompileBroker::queue_size(level);
206 int comp_count = compiler_count(level);
207 double k = queue_size / (feedback_k * comp_count) + 1;
208 return k;
209 }
211 // Call and loop predicates determine whether a transition to a higher
212 // compilation level should be performed (pointers to predicate functions
213 // are passed to common()).
214 // Tier?LoadFeedback is basically a coefficient that determines of
215 // how many methods per compiler thread can be in the queue before
216 // the threshold values double.
217 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
218 switch(cur_level) {
219 case CompLevel_none:
220 case CompLevel_limited_profile: {
221 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
222 return loop_predicate_helper<CompLevel_none>(i, b, k);
223 }
224 case CompLevel_full_profile: {
225 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
226 return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
227 }
228 default:
229 return true;
230 }
231 }
233 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
234 switch(cur_level) {
235 case CompLevel_none:
236 case CompLevel_limited_profile: {
237 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
238 return call_predicate_helper<CompLevel_none>(i, b, k);
239 }
240 case CompLevel_full_profile: {
241 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
242 return call_predicate_helper<CompLevel_full_profile>(i, b, k);
243 }
244 default:
245 return true;
246 }
247 }
249 // If a method is old enough and is still in the interpreter we would want to
250 // start profiling without waiting for the compiled method to arrive.
251 // We also take the load on compilers into the account.
252 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
253 if (cur_level == CompLevel_none &&
254 CompileBroker::queue_size(CompLevel_full_optimization) <=
255 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
256 int i = method->invocation_count();
257 int b = method->backedge_count();
258 double k = Tier0ProfilingStartPercentage / 100.0;
259 return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
260 }
261 return false;
262 }
264 // Inlining control: if we're compiling a profiled method with C1 and the callee
265 // is known to have OSRed in a C2 version, don't inline it.
266 bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
267 CompLevel comp_level = (CompLevel)env->comp_level();
268 if (comp_level == CompLevel_full_profile ||
269 comp_level == CompLevel_limited_profile) {
270 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
271 }
272 return false;
273 }
275 // Create MDO if necessary.
276 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
277 if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
278 if (mh->method_data() == NULL) {
279 Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
280 }
281 }
284 /*
285 * Method states:
286 * 0 - interpreter (CompLevel_none)
287 * 1 - pure C1 (CompLevel_simple)
288 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
289 * 3 - C1 with full profiling (CompLevel_full_profile)
290 * 4 - C2 (CompLevel_full_optimization)
291 *
292 * Common state transition patterns:
293 * a. 0 -> 3 -> 4.
294 * The most common path. But note that even in this straightforward case
295 * profiling can start at level 0 and finish at level 3.
296 *
297 * b. 0 -> 2 -> 3 -> 4.
298 * This case occures when the load on C2 is deemed too high. So, instead of transitioning
299 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
300 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
301 *
302 * c. 0 -> (3->2) -> 4.
303 * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
304 * to enable the profiling to fully occur at level 0. In this case we change the compilation level
305 * of the method to 2, because it'll allow it to run much faster without full profiling while c2
306 * is compiling.
307 *
308 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
309 * After a method was once compiled with C1 it can be identified as trivial and be compiled to
310 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
311 *
312 * e. 0 -> 4.
313 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
314 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
315 * the compiled version already exists).
316 *
317 * Note that since state 0 can be reached from any other state via deoptimization different loops
318 * are possible.
319 *
320 */
322 // Common transition function. Given a predicate determines if a method should transition to another level.
323 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
324 CompLevel next_level = cur_level;
325 int i = method->invocation_count();
326 int b = method->backedge_count();
328 if (is_trivial(method)) {
329 next_level = CompLevel_simple;
330 } else {
331 switch(cur_level) {
332 case CompLevel_none:
333 // If we were at full profile level, would we switch to full opt?
334 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
335 next_level = CompLevel_full_optimization;
336 } else if ((this->*p)(i, b, cur_level)) {
337 // C1-generated fully profiled code is about 30% slower than the limited profile
338 // code that has only invocation and backedge counters. The observation is that
339 // if C2 queue is large enough we can spend too much time in the fully profiled code
340 // while waiting for C2 to pick the method from the queue. To alleviate this problem
341 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
342 // we choose to compile a limited profiled version and then recompile with full profiling
343 // when the load on C2 goes down.
344 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
345 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
346 next_level = CompLevel_limited_profile;
347 } else {
348 next_level = CompLevel_full_profile;
349 }
350 }
351 break;
352 case CompLevel_limited_profile:
353 if (is_method_profiled(method)) {
354 // Special case: we got here because this method was fully profiled in the interpreter.
355 next_level = CompLevel_full_optimization;
356 } else {
357 MethodData* mdo = method->method_data();
358 if (mdo != NULL) {
359 if (mdo->would_profile()) {
360 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
361 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
362 (this->*p)(i, b, cur_level))) {
363 next_level = CompLevel_full_profile;
364 }
365 } else {
366 next_level = CompLevel_full_optimization;
367 }
368 }
369 }
370 break;
371 case CompLevel_full_profile:
372 {
373 MethodData* mdo = method->method_data();
374 if (mdo != NULL) {
375 if (mdo->would_profile()) {
376 int mdo_i = mdo->invocation_count_delta();
377 int mdo_b = mdo->backedge_count_delta();
378 if ((this->*p)(mdo_i, mdo_b, cur_level)) {
379 next_level = CompLevel_full_optimization;
380 }
381 } else {
382 next_level = CompLevel_full_optimization;
383 }
384 }
385 }
386 break;
387 }
388 }
389 return MIN2(next_level, (CompLevel)TieredStopAtLevel);
390 }
392 // Determine if a method should be compiled with a normal entry point at a different level.
393 CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) {
394 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
395 common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
396 CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
398 // If OSR method level is greater than the regular method level, the levels should be
399 // equalized by raising the regular method level in order to avoid OSRs during each
400 // invocation of the method.
401 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
402 MethodData* mdo = method->method_data();
403 guarantee(mdo != NULL, "MDO should not be NULL");
404 if (mdo->invocation_count() >= 1) {
405 next_level = CompLevel_full_optimization;
406 }
407 } else {
408 next_level = MAX2(osr_level, next_level);
409 }
410 return next_level;
411 }
413 // Determine if we should do an OSR compilation of a given method.
414 CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level) {
415 CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
416 if (cur_level == CompLevel_none) {
417 // If there is a live OSR method that means that we deopted to the interpreter
418 // for the transition.
419 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
420 if (osr_level > CompLevel_none) {
421 return osr_level;
422 }
423 }
424 return next_level;
425 }
427 // Update the rate and submit compile
428 void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
429 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
430 update_rate(os::javaTimeMillis(), mh());
431 CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
432 }
434 // Handle the invocation event.
435 void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
436 CompLevel level, nmethod* nm, JavaThread* thread) {
437 if (should_create_mdo(mh(), level)) {
438 create_mdo(mh, thread);
439 }
440 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
441 CompLevel next_level = call_event(mh(), level);
442 if (next_level != level) {
443 compile(mh, InvocationEntryBci, next_level, thread);
444 }
445 }
446 }
448 // Handle the back branch event. Notice that we can compile the method
449 // with a regular entry from here.
450 void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
451 int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
452 if (should_create_mdo(mh(), level)) {
453 create_mdo(mh, thread);
454 }
455 // Check if MDO should be created for the inlined method
456 if (should_create_mdo(imh(), level)) {
457 create_mdo(imh, thread);
458 }
460 if (is_compilation_enabled()) {
461 CompLevel next_osr_level = loop_event(imh(), level);
462 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
463 // At the very least compile the OSR version
464 if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
465 compile(imh, bci, next_osr_level, thread);
466 }
468 // Use loop event as an opportunity to also check if there's been
469 // enough calls.
470 CompLevel cur_level, next_level;
471 if (mh() != imh()) { // If there is an enclosing method
472 guarantee(nm != NULL, "Should have nmethod here");
473 cur_level = comp_level(mh());
474 next_level = call_event(mh(), cur_level);
476 if (max_osr_level == CompLevel_full_optimization) {
477 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
478 bool make_not_entrant = false;
479 if (nm->is_osr_method()) {
480 // This is an osr method, just make it not entrant and recompile later if needed
481 make_not_entrant = true;
482 } else {
483 if (next_level != CompLevel_full_optimization) {
484 // next_level is not full opt, so we need to recompile the
485 // enclosing method without the inlinee
486 cur_level = CompLevel_none;
487 make_not_entrant = true;
488 }
489 }
490 if (make_not_entrant) {
491 if (PrintTieredEvents) {
492 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
493 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
494 }
495 nm->make_not_entrant();
496 }
497 }
498 if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
499 // Fix up next_level if necessary to avoid deopts
500 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
501 next_level = CompLevel_full_profile;
502 }
503 if (cur_level != next_level) {
504 compile(mh, InvocationEntryBci, next_level, thread);
505 }
506 }
507 } else {
508 cur_level = comp_level(imh());
509 next_level = call_event(imh(), cur_level);
510 if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
511 compile(imh, InvocationEntryBci, next_level, thread);
512 }
513 }
514 }
515 }
517 #endif // TIERED