Fri, 28 Mar 2014 10:13:37 -0700
8035828: Turn on @Stable support in VM
Reviewed-by: jrose, twisti
1 /*
2 * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "runtime/advancedThresholdPolicy.hpp"
27 #include "runtime/simpleThresholdPolicy.inline.hpp"
29 #ifdef TIERED
30 // Print an event.
31 void AdvancedThresholdPolicy::print_specific(EventType type, methodHandle mh, methodHandle imh,
32 int bci, CompLevel level) {
33 tty->print(" rate=");
34 if (mh->prev_time() == 0) tty->print("n/a");
35 else tty->print("%f", mh->rate());
37 tty->print(" k=%.2lf,%.2lf", threshold_scale(CompLevel_full_profile, Tier3LoadFeedback),
38 threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback));
40 }
42 void AdvancedThresholdPolicy::initialize() {
43 // Turn on ergonomic compiler count selection
44 if (FLAG_IS_DEFAULT(CICompilerCountPerCPU) && FLAG_IS_DEFAULT(CICompilerCount)) {
45 FLAG_SET_DEFAULT(CICompilerCountPerCPU, true);
46 }
47 int count = CICompilerCount;
48 if (CICompilerCountPerCPU) {
49 // Simple log n seems to grow too slowly for tiered, try something faster: log n * log log n
50 int log_cpu = log2_intptr(os::active_processor_count());
51 int loglog_cpu = log2_intptr(MAX2(log_cpu, 1));
52 count = MAX2(log_cpu * loglog_cpu, 1) * 3 / 2;
53 }
55 set_c1_count(MAX2(count / 3, 1));
56 set_c2_count(MAX2(count - count / 3, 1));
58 // Some inlining tuning
59 #ifdef X86
60 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
61 FLAG_SET_DEFAULT(InlineSmallCode, 2000);
62 }
63 #endif
65 #ifdef SPARC
66 if (FLAG_IS_DEFAULT(InlineSmallCode)) {
67 FLAG_SET_DEFAULT(InlineSmallCode, 2500);
68 }
69 #endif
71 set_increase_threshold_at_ratio();
72 set_start_time(os::javaTimeMillis());
73 }
75 // update_rate() is called from select_task() while holding a compile queue lock.
76 void AdvancedThresholdPolicy::update_rate(jlong t, Method* m) {
77 JavaThread* THREAD = JavaThread::current();
78 if (is_old(m)) {
79 // We don't remove old methods from the queue,
80 // so we can just zero the rate.
81 m->set_rate(0, THREAD);
82 return;
83 }
85 // We don't update the rate if we've just came out of a safepoint.
86 // delta_s is the time since last safepoint in milliseconds.
87 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
88 jlong delta_t = t - (m->prev_time() != 0 ? m->prev_time() : start_time()); // milliseconds since the last measurement
89 // How many events were there since the last time?
90 int event_count = m->invocation_count() + m->backedge_count();
91 int delta_e = event_count - m->prev_event_count();
93 // We should be running for at least 1ms.
94 if (delta_s >= TieredRateUpdateMinTime) {
95 // And we must've taken the previous point at least 1ms before.
96 if (delta_t >= TieredRateUpdateMinTime && delta_e > 0) {
97 m->set_prev_time(t, THREAD);
98 m->set_prev_event_count(event_count, THREAD);
99 m->set_rate((float)delta_e / (float)delta_t, THREAD); // Rate is events per millisecond
100 } else
101 if (delta_t > TieredRateUpdateMaxTime && delta_e == 0) {
102 // If nothing happened for 25ms, zero the rate. Don't modify prev values.
103 m->set_rate(0, THREAD);
104 }
105 }
106 }
108 // Check if this method has been stale from a given number of milliseconds.
109 // See select_task().
110 bool AdvancedThresholdPolicy::is_stale(jlong t, jlong timeout, Method* m) {
111 jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint();
112 jlong delta_t = t - m->prev_time();
113 if (delta_t > timeout && delta_s > timeout) {
114 int event_count = m->invocation_count() + m->backedge_count();
115 int delta_e = event_count - m->prev_event_count();
116 // Return true if there were no events.
117 return delta_e == 0;
118 }
119 return false;
120 }
122 // We don't remove old methods from the compile queue even if they have
123 // very low activity. See select_task().
124 bool AdvancedThresholdPolicy::is_old(Method* method) {
125 return method->invocation_count() > 50000 || method->backedge_count() > 500000;
126 }
128 double AdvancedThresholdPolicy::weight(Method* method) {
129 return (method->rate() + 1) * ((method->invocation_count() + 1) * (method->backedge_count() + 1));
130 }
132 // Apply heuristics and return true if x should be compiled before y
133 bool AdvancedThresholdPolicy::compare_methods(Method* x, Method* y) {
134 if (x->highest_comp_level() > y->highest_comp_level()) {
135 // recompilation after deopt
136 return true;
137 } else
138 if (x->highest_comp_level() == y->highest_comp_level()) {
139 if (weight(x) > weight(y)) {
140 return true;
141 }
142 }
143 return false;
144 }
146 // Is method profiled enough?
147 bool AdvancedThresholdPolicy::is_method_profiled(Method* method) {
148 MethodData* mdo = method->method_data();
149 if (mdo != NULL) {
150 int i = mdo->invocation_count_delta();
151 int b = mdo->backedge_count_delta();
152 return call_predicate_helper<CompLevel_full_profile>(i, b, 1);
153 }
154 return false;
155 }
157 // Called with the queue locked and with at least one element
158 CompileTask* AdvancedThresholdPolicy::select_task(CompileQueue* compile_queue) {
159 CompileTask *max_task = NULL;
160 Method* max_method = NULL;
161 jlong t = os::javaTimeMillis();
162 // Iterate through the queue and find a method with a maximum rate.
163 for (CompileTask* task = compile_queue->first(); task != NULL;) {
164 CompileTask* next_task = task->next();
165 Method* method = task->method();
166 MethodData* mdo = method->method_data();
167 update_rate(t, method);
168 if (max_task == NULL) {
169 max_task = task;
170 max_method = method;
171 } else {
172 // If a method has been stale for some time, remove it from the queue.
173 if (is_stale(t, TieredCompileTaskTimeout, method) && !is_old(method)) {
174 if (PrintTieredEvents) {
175 print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level());
176 }
177 CompileTaskWrapper ctw(task); // Frees the task
178 compile_queue->remove(task);
179 method->clear_queued_for_compilation();
180 task = next_task;
181 continue;
182 }
184 // Select a method with a higher rate
185 if (compare_methods(method, max_method)) {
186 max_task = task;
187 max_method = method;
188 }
189 }
190 task = next_task;
191 }
193 if (max_task->comp_level() == CompLevel_full_profile && TieredStopAtLevel > CompLevel_full_profile
194 && is_method_profiled(max_method)) {
195 max_task->set_comp_level(CompLevel_limited_profile);
196 if (PrintTieredEvents) {
197 print_event(UPDATE_IN_QUEUE, max_method, max_method, max_task->osr_bci(), (CompLevel)max_task->comp_level());
198 }
199 }
201 return max_task;
202 }
204 double AdvancedThresholdPolicy::threshold_scale(CompLevel level, int feedback_k) {
205 double queue_size = CompileBroker::queue_size(level);
206 int comp_count = compiler_count(level);
207 double k = queue_size / (feedback_k * comp_count) + 1;
209 // Increase C1 compile threshold when the code cache is filled more
210 // than specified by IncreaseFirstTierCompileThresholdAt percentage.
211 // The main intention is to keep enough free space for C2 compiled code
212 // to achieve peak performance if the code cache is under stress.
213 if ((TieredStopAtLevel == CompLevel_full_optimization) && (level != CompLevel_full_optimization)) {
214 double current_reverse_free_ratio = CodeCache::reverse_free_ratio();
215 if (current_reverse_free_ratio > _increase_threshold_at_ratio) {
216 k *= exp(current_reverse_free_ratio - _increase_threshold_at_ratio);
217 }
218 }
219 return k;
220 }
222 // Call and loop predicates determine whether a transition to a higher
223 // compilation level should be performed (pointers to predicate functions
224 // are passed to common()).
225 // Tier?LoadFeedback is basically a coefficient that determines of
226 // how many methods per compiler thread can be in the queue before
227 // the threshold values double.
228 bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level) {
229 switch(cur_level) {
230 case CompLevel_none:
231 case CompLevel_limited_profile: {
232 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
233 return loop_predicate_helper<CompLevel_none>(i, b, k);
234 }
235 case CompLevel_full_profile: {
236 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
237 return loop_predicate_helper<CompLevel_full_profile>(i, b, k);
238 }
239 default:
240 return true;
241 }
242 }
244 bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level) {
245 switch(cur_level) {
246 case CompLevel_none:
247 case CompLevel_limited_profile: {
248 double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback);
249 return call_predicate_helper<CompLevel_none>(i, b, k);
250 }
251 case CompLevel_full_profile: {
252 double k = threshold_scale(CompLevel_full_optimization, Tier4LoadFeedback);
253 return call_predicate_helper<CompLevel_full_profile>(i, b, k);
254 }
255 default:
256 return true;
257 }
258 }
260 // If a method is old enough and is still in the interpreter we would want to
261 // start profiling without waiting for the compiled method to arrive.
262 // We also take the load on compilers into the account.
263 bool AdvancedThresholdPolicy::should_create_mdo(Method* method, CompLevel cur_level) {
264 if (cur_level == CompLevel_none &&
265 CompileBroker::queue_size(CompLevel_full_optimization) <=
266 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
267 int i = method->invocation_count();
268 int b = method->backedge_count();
269 double k = Tier0ProfilingStartPercentage / 100.0;
270 return call_predicate_helper<CompLevel_none>(i, b, k) || loop_predicate_helper<CompLevel_none>(i, b, k);
271 }
272 return false;
273 }
275 // Inlining control: if we're compiling a profiled method with C1 and the callee
276 // is known to have OSRed in a C2 version, don't inline it.
277 bool AdvancedThresholdPolicy::should_not_inline(ciEnv* env, ciMethod* callee) {
278 CompLevel comp_level = (CompLevel)env->comp_level();
279 if (comp_level == CompLevel_full_profile ||
280 comp_level == CompLevel_limited_profile) {
281 return callee->highest_osr_comp_level() == CompLevel_full_optimization;
282 }
283 return false;
284 }
286 // Create MDO if necessary.
287 void AdvancedThresholdPolicy::create_mdo(methodHandle mh, JavaThread* THREAD) {
288 if (mh->is_native() || mh->is_abstract() || mh->is_accessor()) return;
289 if (mh->method_data() == NULL) {
290 Method::build_interpreter_method_data(mh, CHECK_AND_CLEAR);
291 }
292 }
295 /*
296 * Method states:
297 * 0 - interpreter (CompLevel_none)
298 * 1 - pure C1 (CompLevel_simple)
299 * 2 - C1 with invocation and backedge counting (CompLevel_limited_profile)
300 * 3 - C1 with full profiling (CompLevel_full_profile)
301 * 4 - C2 (CompLevel_full_optimization)
302 *
303 * Common state transition patterns:
304 * a. 0 -> 3 -> 4.
305 * The most common path. But note that even in this straightforward case
306 * profiling can start at level 0 and finish at level 3.
307 *
308 * b. 0 -> 2 -> 3 -> 4.
309 * This case occures when the load on C2 is deemed too high. So, instead of transitioning
310 * into state 3 directly and over-profiling while a method is in the C2 queue we transition to
311 * level 2 and wait until the load on C2 decreases. This path is disabled for OSRs.
312 *
313 * c. 0 -> (3->2) -> 4.
314 * In this case we enqueue a method for compilation at level 3, but the C1 queue is long enough
315 * to enable the profiling to fully occur at level 0. In this case we change the compilation level
316 * of the method to 2, because it'll allow it to run much faster without full profiling while c2
317 * is compiling.
318 *
319 * d. 0 -> 3 -> 1 or 0 -> 2 -> 1.
320 * After a method was once compiled with C1 it can be identified as trivial and be compiled to
321 * level 1. These transition can also occur if a method can't be compiled with C2 but can with C1.
322 *
323 * e. 0 -> 4.
324 * This can happen if a method fails C1 compilation (it will still be profiled in the interpreter)
325 * or because of a deopt that didn't require reprofiling (compilation won't happen in this case because
326 * the compiled version already exists).
327 *
328 * Note that since state 0 can be reached from any other state via deoptimization different loops
329 * are possible.
330 *
331 */
333 // Common transition function. Given a predicate determines if a method should transition to another level.
334 CompLevel AdvancedThresholdPolicy::common(Predicate p, Method* method, CompLevel cur_level, bool disable_feedback) {
335 CompLevel next_level = cur_level;
336 int i = method->invocation_count();
337 int b = method->backedge_count();
339 if (is_trivial(method)) {
340 next_level = CompLevel_simple;
341 } else {
342 switch(cur_level) {
343 case CompLevel_none:
344 // If we were at full profile level, would we switch to full opt?
345 if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) {
346 next_level = CompLevel_full_optimization;
347 } else if ((this->*p)(i, b, cur_level)) {
348 // C1-generated fully profiled code is about 30% slower than the limited profile
349 // code that has only invocation and backedge counters. The observation is that
350 // if C2 queue is large enough we can spend too much time in the fully profiled code
351 // while waiting for C2 to pick the method from the queue. To alleviate this problem
352 // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long
353 // we choose to compile a limited profiled version and then recompile with full profiling
354 // when the load on C2 goes down.
355 if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) >
356 Tier3DelayOn * compiler_count(CompLevel_full_optimization)) {
357 next_level = CompLevel_limited_profile;
358 } else {
359 next_level = CompLevel_full_profile;
360 }
361 }
362 break;
363 case CompLevel_limited_profile:
364 if (is_method_profiled(method)) {
365 // Special case: we got here because this method was fully profiled in the interpreter.
366 next_level = CompLevel_full_optimization;
367 } else {
368 MethodData* mdo = method->method_data();
369 if (mdo != NULL) {
370 if (mdo->would_profile()) {
371 if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <=
372 Tier3DelayOff * compiler_count(CompLevel_full_optimization) &&
373 (this->*p)(i, b, cur_level))) {
374 next_level = CompLevel_full_profile;
375 }
376 } else {
377 next_level = CompLevel_full_optimization;
378 }
379 }
380 }
381 break;
382 case CompLevel_full_profile:
383 {
384 MethodData* mdo = method->method_data();
385 if (mdo != NULL) {
386 if (mdo->would_profile()) {
387 int mdo_i = mdo->invocation_count_delta();
388 int mdo_b = mdo->backedge_count_delta();
389 if ((this->*p)(mdo_i, mdo_b, cur_level)) {
390 next_level = CompLevel_full_optimization;
391 }
392 } else {
393 next_level = CompLevel_full_optimization;
394 }
395 }
396 }
397 break;
398 }
399 }
400 return MIN2(next_level, (CompLevel)TieredStopAtLevel);
401 }
403 // Determine if a method should be compiled with a normal entry point at a different level.
404 CompLevel AdvancedThresholdPolicy::call_event(Method* method, CompLevel cur_level) {
405 CompLevel osr_level = MIN2((CompLevel) method->highest_osr_comp_level(),
406 common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true));
407 CompLevel next_level = common(&AdvancedThresholdPolicy::call_predicate, method, cur_level);
409 // If OSR method level is greater than the regular method level, the levels should be
410 // equalized by raising the regular method level in order to avoid OSRs during each
411 // invocation of the method.
412 if (osr_level == CompLevel_full_optimization && cur_level == CompLevel_full_profile) {
413 MethodData* mdo = method->method_data();
414 guarantee(mdo != NULL, "MDO should not be NULL");
415 if (mdo->invocation_count() >= 1) {
416 next_level = CompLevel_full_optimization;
417 }
418 } else {
419 next_level = MAX2(osr_level, next_level);
420 }
421 return next_level;
422 }
424 // Determine if we should do an OSR compilation of a given method.
425 CompLevel AdvancedThresholdPolicy::loop_event(Method* method, CompLevel cur_level) {
426 CompLevel next_level = common(&AdvancedThresholdPolicy::loop_predicate, method, cur_level, true);
427 if (cur_level == CompLevel_none) {
428 // If there is a live OSR method that means that we deopted to the interpreter
429 // for the transition.
430 CompLevel osr_level = MIN2((CompLevel)method->highest_osr_comp_level(), next_level);
431 if (osr_level > CompLevel_none) {
432 return osr_level;
433 }
434 }
435 return next_level;
436 }
438 // Update the rate and submit compile
439 void AdvancedThresholdPolicy::submit_compile(methodHandle mh, int bci, CompLevel level, JavaThread* thread) {
440 int hot_count = (bci == InvocationEntryBci) ? mh->invocation_count() : mh->backedge_count();
441 update_rate(os::javaTimeMillis(), mh());
442 CompileBroker::compile_method(mh, bci, level, mh, hot_count, "tiered", thread);
443 }
445 // Handle the invocation event.
446 void AdvancedThresholdPolicy::method_invocation_event(methodHandle mh, methodHandle imh,
447 CompLevel level, nmethod* nm, JavaThread* thread) {
448 if (should_create_mdo(mh(), level)) {
449 create_mdo(mh, thread);
450 }
451 if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
452 CompLevel next_level = call_event(mh(), level);
453 if (next_level != level) {
454 compile(mh, InvocationEntryBci, next_level, thread);
455 }
456 }
457 }
459 // Handle the back branch event. Notice that we can compile the method
460 // with a regular entry from here.
461 void AdvancedThresholdPolicy::method_back_branch_event(methodHandle mh, methodHandle imh,
462 int bci, CompLevel level, nmethod* nm, JavaThread* thread) {
463 if (should_create_mdo(mh(), level)) {
464 create_mdo(mh, thread);
465 }
466 // Check if MDO should be created for the inlined method
467 if (should_create_mdo(imh(), level)) {
468 create_mdo(imh, thread);
469 }
471 if (is_compilation_enabled()) {
472 CompLevel next_osr_level = loop_event(imh(), level);
473 CompLevel max_osr_level = (CompLevel)imh->highest_osr_comp_level();
474 // At the very least compile the OSR version
475 if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_osr_level != level) {
476 compile(imh, bci, next_osr_level, thread);
477 }
479 // Use loop event as an opportunity to also check if there's been
480 // enough calls.
481 CompLevel cur_level, next_level;
482 if (mh() != imh()) { // If there is an enclosing method
483 guarantee(nm != NULL, "Should have nmethod here");
484 cur_level = comp_level(mh());
485 next_level = call_event(mh(), cur_level);
487 if (max_osr_level == CompLevel_full_optimization) {
488 // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts
489 bool make_not_entrant = false;
490 if (nm->is_osr_method()) {
491 // This is an osr method, just make it not entrant and recompile later if needed
492 make_not_entrant = true;
493 } else {
494 if (next_level != CompLevel_full_optimization) {
495 // next_level is not full opt, so we need to recompile the
496 // enclosing method without the inlinee
497 cur_level = CompLevel_none;
498 make_not_entrant = true;
499 }
500 }
501 if (make_not_entrant) {
502 if (PrintTieredEvents) {
503 int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci;
504 print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level);
505 }
506 nm->make_not_entrant();
507 }
508 }
509 if (!CompileBroker::compilation_is_in_queue(mh, InvocationEntryBci)) {
510 // Fix up next_level if necessary to avoid deopts
511 if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) {
512 next_level = CompLevel_full_profile;
513 }
514 if (cur_level != next_level) {
515 compile(mh, InvocationEntryBci, next_level, thread);
516 }
517 }
518 } else {
519 cur_level = comp_level(imh());
520 next_level = call_event(imh(), cur_level);
521 if (!CompileBroker::compilation_is_in_queue(imh, bci) && next_level != cur_level) {
522 compile(imh, InvocationEntryBci, next_level, thread);
523 }
524 }
525 }
526 }
528 #endif // TIERED