Sun, 10 Feb 2013 22:35:38 -0800
8006430: TraceTypeProfile is a product flag while it should be a diagnostic flag
Summary: make sure all diagnostic and experimental flag kinds are checked in Flag::is_unlocked()
Reviewed-by: kvn
1 /*
2 * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "runtime/advancedThresholdPolicy.hpp"
27 #include "runtime/simpleThresholdPolicy.inline.hpp"
29 #ifdef TIERED
30 // Print an event.
31 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
32 int bci, CompLevel level) {
33 tty->print(" rate=");
34 if (mh->prev_time() == 0) tty->print("n/a");
35 else tty->print("%f", mh->rate());
37 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
38 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
40 }
42 void AdvancedThresholdPolicy::initialize() {
43 // Turn on ergonomic compiler count selection
44 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
45 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
46 }
47 int count = CICompilerCount;
48 if (CICompilerCountPerCPU) {
49 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
50 int log_cpu = log2_intptr(os::active_processor_count());
51 int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
52 count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
53 }
55 set_c1_count(MAX2(count / 3, 1));
56 set_c2_count(MAX2(count - count / 3, 1));
58 // Some inlining tuning
59 #ifdef X86
60 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
61 FLAG_SET_DEFAULT(InlineSmallCode, 2000);
62 }
63 #endif
65 #ifdef SPARC
66 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
67 FLAG_SET_DEFAULT(InlineSmallCode, 2500);
68 }
69 #endif
72 set_start_time(os::javaTimeMillis());
73 }
75 // update_rate() is called from select_task() while holding a compile queue lock.
76 void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
77 if (is_old(m)) {
78 // We don't remove old methods from the queue,
79 // so we can just zero the rate.
80 m->set_rate(0);
81 return;
82 }
84 // We don't update the rate if we've just came out of a safepoint.
85 // delta_s is the time since last safepoint in milliseconds.
86 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
87 jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
88 // How many events were there since the last time?
89 int event_count = m->invocation_count() + m->backedge_count();
90 int delta_e = event_count - m->prev_event_count();
92 // We should be running for at least 1ms.
93 if (delta_s >= TieredRateUpdateMinTime) {
94 // And we must've taken the previous point at least 1ms before.
95 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
96 m->set_prev_time(t);
97 m->set_prev_event_count(event_count);
98 m->set_rate((float)delta_e / (float)delta_t); // Rate is events per millisecond
99 } else
100 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
101 // If nothing happened for 25ms, zero the rate. Don't modify prev values.
102 m->set_rate(0);
103 }
104 }
105 }
107 // Check if this method has been stale from a given number of milliseconds.
108 // See select_task().
109 bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
110 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
111 jlong delta_t = t - m->prev_time();
112 if (delta_t > timeout && delta_s > timeout) {
113 int event_count = m->invocation_count() + m->backedge_count();
114 int delta_e = event_count - m->prev_event_count();
115 // Return true if there were no events.
116 return delta_e == 0;
117 }
118 return false;
119 }
121 // We don't remove old methods from the compile queue even if they have
122 // very low activity. See select_task().
123 bool AdvancedThresholdPolicy::is_old(Method* method) {
124 return method->invocation_count() > 50000 || method->backedge_count() > 500000;
125 }
127 double AdvancedThresholdPolicy::weight(Method* method) {
128 return (method->rate() + 1) * ((method->invocation_count() + 1) * (method->backedge_count() + 1));
129 }
131 // Apply heuristics and return true if x should be compiled before y
132 bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
133 if (x->highest_comp_level() > y->highest_comp_level()) {
134 // recompilation after deopt
135 return true;
136 } else
137 if (x->highest_comp_level() == y->highest_comp_level()) {
138 if (weight(x) > weight(y)) {
139 return true;
140 }
141 }
142 return false;
143 }
145 // Is method profiled enough?
146 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
147 MethodData* mdo = method->method_data();
148 if (mdo != NULL) {
149 int i = mdo->invocation_count_delta();
150 int b = mdo->backedge_count_delta();
151 return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
152 }
153 return false;
154 }
156 // Called with the queue locked and with at least one element
157 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
158 CompileTask *max_task = NULL;
159 Method* max_method = NULL;
160 jlong t = os::javaTimeMillis();
161 // Iterate through the queue and find a method with a maximum rate.
162 for (CompileTask* task = compile_queue->first(); task != NULL;) {
163 CompileTask* next_task = task->next();
164 Method* method = task->method();
165 MethodData* mdo = method->method_data();
166 update_rate(t, method);
167 if (max_task == NULL) {
168 max_task = task;
169 max_method = method;
170 } else {
171 // If a method has been stale for some time, remove it from the queue.
172 if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
173 if (PrintTieredEvents) {
174 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
175 }
176 CompileTaskWrapper ctw(task); // Frees the task
177 compile_queue->remove(task);
178 method->clear_queued_for_compilation();
179 task = next_task;
180 continue;
181 }
183 // Select a method with a higher rate
184 if (compare_methods(method, max_method)) {
185 max_task = task;
186 max_method = method;
187 }
188 }
189 task = next_task;
190 }
192 if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
193 && is_method_profiled(max_method)) {
194 max_task->set_comp_level(CompLevel_limited_profile);
195 if (PrintTieredEvents) {
196 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
197 }
198 }
200 return max_task;
201 }
203 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
204 double queue_size = CompileBroker::queue_size(level);
205 int comp_count = compiler_count(level);
206 double k = queue_size / (feedback_k * comp_count) + 1;
207 return k;
208 }
210 // Call and loop predicates determine whether a transition to a higher
211 // compilation level should be performed (pointers to predicate functions
212 // are passed to common()).
213 // Tier?LoadFeedback is basically a coefficient that determines of
214 // how many methods per compiler thread can be in the queue before
215 // the threshold values double.
216 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
217 switch(cur_level) {
218 case CompLevel_none:
219 case CompLevel_limited_profile: {
220 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
221 return loop_predicate_helper<CompLevel_none>(i, b, k);
222 }
223 case CompLevel_full_profile: {
224 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
225 return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
226 }
227 default:
228 return true;
229 }
230 }
232 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
233 switch(cur_level) {
234 case CompLevel_none:
235 case CompLevel_limited_profile: {
236 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
237 return call_predicate_helper<CompLevel_none>(i, b, k);
238 }
239 case CompLevel_full_profile: {
240 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
241 return call_predicate_helper<CompLevel_full_profile>(i, b, k);
242 }
243 default:
244 return true;
245 }
246 }
248 // If a method is old enough and is still in the interpreter we would want to
249 // start profiling without waiting for the compiled method to arrive.
250 // We also take the load on compilers into the account.
251 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
252 if (cur_level == CompLevel_none &&
253 CompileBroker::queue_size(CompLevel_full_optimization) <=
254 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
255 int i = method->invocation_count();
256 int b = method->backedge_count();
257 double k = Tier0ProfilingStartPercentage / 100.0;
258 return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
259 }
260 return false;
261 }
263 // Inlining control: if we're compiling a profiled method with C1 and the callee
264 // is known to have OSRed in a C2 version, don't inline it.
265 bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
266 CompLevel comp_level = (CompLevel)env->comp_level();
267 if (comp_level == CompLevel_full_profile ||
268 comp_level == CompLevel_limited_profile) {
269 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
270 }
271 return false;
272 }
274 // Create MDO if necessary.
275 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
276 if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
277 if (mh->method_data() == NULL) {
278 Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
279 }
280 }
283 /*
284 * Method states:
285 * 0 - interpreter (CompLevel_none)
286 * 1 - pure C1 (CompLevel_simple)
287 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
288 * 3 - C1 with full profiling (CompLevel_full_profile)
289 * 4 - C2 (CompLevel_full_optimization)
290 *
291 * Common state transition patterns:
292 * a. 0 -> 3 -> 4.
293 * The most common path. But note that even in this straightforward case
294 * profiling can start at level 0 and finish at level 3.
295 *
296 * b. 0 -> 2 -> 3 -> 4.
297 * This case occures when the load on C2 is deemed too high. So, instead of transitioning
298 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
299 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
300 *
301 * c. 0 -> (3->2) -> 4.
302 * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
303 * to enable the profiling to fully occur at level 0. In this case we change the compilation level
304 * of the method to 2, because it'll allow it to run much faster without full profiling while c2
305 * is compiling.
306 *
307 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
308 * After a method was once compiled with C1 it can be identified as trivial and be compiled to
309 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
310 *
311 * e. 0 -> 4.
312 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
313 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
314 * the compiled version already exists).
315 *
316 * Note that since state 0 can be reached from any other state via deoptimization different loops
317 * are possible.
318 *
319 */
321 // Common transition function. Given a predicate determines if a method should transition to another level.
322 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
323 CompLevel next_level = cur_level;
324 int i = method->invocation_count();
325 int b = method->backedge_count();
327 if (is_trivial(method)) {
328 next_level = CompLevel_simple;
329 } else {
330 switch(cur_level) {
331 case CompLevel_none:
332 // If we were at full profile level, would we switch to full opt?
333 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
334 next_level = CompLevel_full_optimization;
335 } else if ((this->*p)(i, b, cur_level)) {
336 // C1-generated fully profiled code is about 30% slower than the limited profile
337 // code that has only invocation and backedge counters. The observation is that
338 // if C2 queue is large enough we can spend too much time in the fully profiled code
339 // while waiting for C2 to pick the method from the queue. To alleviate this problem
340 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
341 // we choose to compile a limited profiled version and then recompile with full profiling
342 // when the load on C2 goes down.
343 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
344 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
345 next_level = CompLevel_limited_profile;
346 } else {
347 next_level = CompLevel_full_profile;
348 }
349 }
350 break;
351 case CompLevel_limited_profile:
352 if (is_method_profiled(method)) {
353 // Special case: we got here because this method was fully profiled in the interpreter.
354 next_level = CompLevel_full_optimization;
355 } else {
356 MethodData* mdo = method->method_data();
357 if (mdo != NULL) {
358 if (mdo->would_profile()) {
359 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
360 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
361 (this->*p)(i, b, cur_level))) {
362 next_level = CompLevel_full_profile;
363 }
364 } else {
365 next_level = CompLevel_full_optimization;
366 }
367 }
368 }
369 break;
370 case CompLevel_full_profile:
371 {
372 MethodData* mdo = method->method_data();
373 if (mdo != NULL) {
374 if (mdo->would_profile()) {
375 int mdo_i = mdo->invocation_count_delta();
376 int mdo_b = mdo->backedge_count_delta();
377 if ((this->*p)(mdo_i, mdo_b, cur_level)) {
378 next_level = CompLevel_full_optimization;
379 }
380 } else {
381 next_level = CompLevel_full_optimization;
382 }
383 }
384 }
385 break;
386 }
387 }
388 return MIN2(next_level, (CompLevel)TieredStopAtLevel);
389 }
391 // Determine if a method should be compiled with a normal entry point at a different level.
392 CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) {
393 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
394 common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
395 CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
397 // If OSR method level is greater than the regular method level, the levels should be
398 // equalized by raising the regular method level in order to avoid OSRs during each
399 // invocation of the method.
400 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
401 MethodData* mdo = method->method_data();
402 guarantee(mdo != NULL, "MDO should not be NULL");
403 if (mdo->invocation_count() >= 1) {
404 next_level = CompLevel_full_optimization;
405 }
406 } else {
407 next_level = MAX2(osr_level, next_level);
408 }
409 return next_level;
410 }
412 // Determine if we should do an OSR compilation of a given method.
413 CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level) {
414 CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
415 if (cur_level == CompLevel_none) {
416 // If there is a live OSR method that means that we deopted to the interpreter
417 // for the transition.
418 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
419 if (osr_level > CompLevel_none) {
420 return osr_level;
421 }
422 }
423 return next_level;
424 }
426 // Update the rate and submit compile
427 void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
428 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
429 update_rate(os::javaTimeMillis(), mh());
430 CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
431 }
433 // Handle the invocation event.
434 void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
435 CompLevel level, nmethod* nm, JavaThread* thread) {
436 if (should_create_mdo(mh(), level)) {
437 create_mdo(mh, thread);
438 }
439 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
440 CompLevel next_level = call_event(mh(), level);
441 if (next_level != level) {
442 compile(mh, InvocationEntryBci, next_level, thread);
443 }
444 }
445 }
447 // Handle the back branch event. Notice that we can compile the method
448 // with a regular entry from here.
449 void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
450 int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
451 if (should_create_mdo(mh(), level)) {
452 create_mdo(mh, thread);
453 }
454 // Check if MDO should be created for the inlined method
455 if (should_create_mdo(imh(), level)) {
456 create_mdo(imh, thread);
457 }
459 if (is_compilation_enabled()) {
460 CompLevel next_osr_level = loop_event(imh(), level);
461 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
462 // At the very least compile the OSR version
463 if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
464 compile(imh, bci, next_osr_level, thread);
465 }
467 // Use loop event as an opportunity to also check if there's been
468 // enough calls.
469 CompLevel cur_level, next_level;
470 if (mh() != imh()) { // If there is an enclosing method
471 guarantee(nm != NULL, "Should have nmethod here");
472 cur_level = comp_level(mh());
473 next_level = call_event(mh(), cur_level);
475 if (max_osr_level == CompLevel_full_optimization) {
476 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
477 bool make_not_entrant = false;
478 if (nm->is_osr_method()) {
479 // This is an osr method, just make it not entrant and recompile later if needed
480 make_not_entrant = true;
481 } else {
482 if (next_level != CompLevel_full_optimization) {
483 // next_level is not full opt, so we need to recompile the
484 // enclosing method without the inlinee
485 cur_level = CompLevel_none;
486 make_not_entrant = true;
487 }
488 }
489 if (make_not_entrant) {
490 if (PrintTieredEvents) {
491 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
492 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
493 }
494 nm->make_not_entrant();
495 }
496 }
497 if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
498 // Fix up next_level if necessary to avoid deopts
499 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
500 next_level = CompLevel_full_profile;
501 }
502 if (cur_level != next_level) {
503 compile(mh, InvocationEntryBci, next_level, thread);
504 }
505 }
506 } else {
507 cur_level = comp_level(imh());
508 next_level = call_event(imh(), cur_level);
509 if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
510 compile(imh, InvocationEntryBci, next_level, thread);
511 }
512 }
513 }
514 }
516 #endif // TIERED