Fri, 29 Apr 2016 00:06:10 +0800
Added MIPS 64-bit port.
aoqi@0 | 1 | /* |
aoqi@0 | 2 | * Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved. |
aoqi@0 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
aoqi@0 | 4 | * |
aoqi@0 | 5 | * This code is free software; you can redistribute it and/or modify it |
aoqi@0 | 6 | * under the terms of the GNU General Public License version 2 only, as |
aoqi@0 | 7 | * published by the Free Software Foundation. |
aoqi@0 | 8 | * |
aoqi@0 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
aoqi@0 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
aoqi@0 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
aoqi@0 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
aoqi@0 | 13 | * accompanied this code). |
aoqi@0 | 14 | * |
aoqi@0 | 15 | * You should have received a copy of the GNU General Public License version |
aoqi@0 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
aoqi@0 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
aoqi@0 | 18 | * |
aoqi@0 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
aoqi@0 | 20 | * or visit www.oracle.com if you need additional information or have any |
aoqi@0 | 21 | * questions. |
aoqi@0 | 22 | * |
aoqi@0 | 23 | */ |
aoqi@0 | 24 | |
aoqi@1 | 25 | /* |
aoqi@1 | 26 | * This file has been modified by Loongson Technology in 2015. These |
aoqi@1 | 27 | * modifications are Copyright (c) 2015 Loongson Technology, and are made |
aoqi@1 | 28 | * available on the same license terms set forth above. |
aoqi@1 | 29 | */ |
aoqi@1 | 30 | |
aoqi@0 | 31 | #include "precompiled.hpp" |
aoqi@0 | 32 | #include "code/compiledIC.hpp" |
aoqi@0 | 33 | #include "code/nmethod.hpp" |
aoqi@0 | 34 | #include "code/scopeDesc.hpp" |
aoqi@0 | 35 | #include "compiler/compilerOracle.hpp" |
aoqi@0 | 36 | #include "interpreter/interpreter.hpp" |
aoqi@0 | 37 | #include "oops/methodData.hpp" |
aoqi@0 | 38 | #include "oops/method.hpp" |
aoqi@0 | 39 | #include "oops/oop.inline.hpp" |
aoqi@0 | 40 | #include "prims/nativeLookup.hpp" |
aoqi@0 | 41 | #include "runtime/advancedThresholdPolicy.hpp" |
aoqi@0 | 42 | #include "runtime/compilationPolicy.hpp" |
aoqi@0 | 43 | #include "runtime/frame.hpp" |
aoqi@0 | 44 | #include "runtime/handles.inline.hpp" |
aoqi@0 | 45 | #include "runtime/rframe.hpp" |
aoqi@0 | 46 | #include "runtime/simpleThresholdPolicy.hpp" |
aoqi@0 | 47 | #include "runtime/stubRoutines.hpp" |
aoqi@0 | 48 | #include "runtime/thread.hpp" |
aoqi@0 | 49 | #include "runtime/timer.hpp" |
aoqi@0 | 50 | #include "runtime/vframe.hpp" |
aoqi@0 | 51 | #include "runtime/vm_operations.hpp" |
aoqi@0 | 52 | #include "utilities/events.hpp" |
aoqi@0 | 53 | #include "utilities/globalDefinitions.hpp" |
aoqi@0 | 54 | |
aoqi@0 | 55 | CompilationPolicy* CompilationPolicy::_policy; |
aoqi@0 | 56 | elapsedTimer CompilationPolicy::_accumulated_time; |
aoqi@0 | 57 | bool CompilationPolicy::_in_vm_startup; |
aoqi@0 | 58 | |
aoqi@0 | 59 | // Determine compilation policy based on command line argument |
aoqi@0 | 60 | void compilationPolicy_init() { |
aoqi@0 | 61 | CompilationPolicy::set_in_vm_startup(DelayCompilationDuringStartup); |
aoqi@0 | 62 | |
aoqi@0 | 63 | switch(CompilationPolicyChoice) { |
aoqi@0 | 64 | case 0: |
aoqi@0 | 65 | CompilationPolicy::set_policy(new SimpleCompPolicy()); |
aoqi@0 | 66 | break; |
aoqi@0 | 67 | |
aoqi@0 | 68 | case 1: |
aoqi@0 | 69 | #ifdef COMPILER2 |
aoqi@0 | 70 | CompilationPolicy::set_policy(new StackWalkCompPolicy()); |
aoqi@0 | 71 | #else |
aoqi@0 | 72 | Unimplemented(); |
aoqi@0 | 73 | #endif |
aoqi@0 | 74 | break; |
aoqi@0 | 75 | case 2: |
aoqi@0 | 76 | #ifdef TIERED |
aoqi@0 | 77 | CompilationPolicy::set_policy(new SimpleThresholdPolicy()); |
aoqi@0 | 78 | #else |
aoqi@0 | 79 | Unimplemented(); |
aoqi@0 | 80 | #endif |
aoqi@0 | 81 | break; |
aoqi@0 | 82 | case 3: |
aoqi@0 | 83 | #ifdef TIERED |
aoqi@0 | 84 | CompilationPolicy::set_policy(new AdvancedThresholdPolicy()); |
aoqi@0 | 85 | #else |
aoqi@0 | 86 | Unimplemented(); |
aoqi@0 | 87 | #endif |
aoqi@0 | 88 | break; |
aoqi@0 | 89 | default: |
aoqi@0 | 90 | fatal("CompilationPolicyChoice must be in the range: [0-3]"); |
aoqi@0 | 91 | } |
aoqi@0 | 92 | CompilationPolicy::policy()->initialize(); |
aoqi@0 | 93 | } |
aoqi@0 | 94 | |
aoqi@0 | 95 | void CompilationPolicy::completed_vm_startup() { |
aoqi@0 | 96 | if (TraceCompilationPolicy) { |
aoqi@0 | 97 | tty->print("CompilationPolicy: completed vm startup.\n"); |
aoqi@0 | 98 | } |
aoqi@0 | 99 | _in_vm_startup = false; |
aoqi@0 | 100 | } |
aoqi@0 | 101 | |
aoqi@0 | 102 | // Returns true if m must be compiled before executing it |
aoqi@0 | 103 | // This is intended to force compiles for methods (usually for |
aoqi@0 | 104 | // debugging) that would otherwise be interpreted for some reason. |
aoqi@0 | 105 | bool CompilationPolicy::must_be_compiled(methodHandle m, int comp_level) { |
aoqi@0 | 106 | // Don't allow Xcomp to cause compiles in replay mode |
aoqi@0 | 107 | if (ReplayCompiles) return false; |
aoqi@0 | 108 | |
aoqi@0 | 109 | if (m->has_compiled_code()) return false; // already compiled |
aoqi@0 | 110 | if (!can_be_compiled(m, comp_level)) return false; |
aoqi@0 | 111 | |
aoqi@0 | 112 | return !UseInterpreter || // must compile all methods |
aoqi@0 | 113 | (UseCompiler && AlwaysCompileLoopMethods && m->has_loops() && CompileBroker::should_compile_new_jobs()); // eagerly compile loop methods |
aoqi@0 | 114 | } |
aoqi@0 | 115 | |
aoqi@0 | 116 | // Returns true if m is allowed to be compiled |
aoqi@0 | 117 | bool CompilationPolicy::can_be_compiled(methodHandle m, int comp_level) { |
aoqi@0 | 118 | // allow any levels for WhiteBox |
aoqi@0 | 119 | assert(WhiteBoxAPI || comp_level == CompLevel_all || is_compile(comp_level), "illegal compilation level"); |
aoqi@0 | 120 | |
aoqi@0 | 121 | if (m->is_abstract()) return false; |
aoqi@0 | 122 | if (DontCompileHugeMethods && m->code_size() > HugeMethodLimit) return false; |
aoqi@0 | 123 | |
aoqi@0 | 124 | // Math intrinsics should never be compiled as this can lead to |
aoqi@0 | 125 | // monotonicity problems because the interpreter will prefer the |
aoqi@0 | 126 | // compiled code to the intrinsic version. This can't happen in |
aoqi@0 | 127 | // production because the invocation counter can't be incremented |
aoqi@0 | 128 | // but we shouldn't expose the system to this problem in testing |
aoqi@0 | 129 | // modes. |
aoqi@0 | 130 | if (!AbstractInterpreter::can_be_compiled(m)) { |
aoqi@0 | 131 | return false; |
aoqi@0 | 132 | } |
aoqi@0 | 133 | if (comp_level == CompLevel_all) { |
aoqi@0 | 134 | if (TieredCompilation) { |
aoqi@0 | 135 | // enough to be compilable at any level for tiered |
aoqi@0 | 136 | return !m->is_not_compilable(CompLevel_simple) || !m->is_not_compilable(CompLevel_full_optimization); |
aoqi@0 | 137 | } else { |
aoqi@0 | 138 | // must be compilable at available level for non-tiered |
aoqi@0 | 139 | return !m->is_not_compilable(CompLevel_highest_tier); |
aoqi@0 | 140 | } |
aoqi@0 | 141 | } else if (is_compile(comp_level)) { |
aoqi@0 | 142 | return !m->is_not_compilable(comp_level); |
aoqi@0 | 143 | } |
aoqi@0 | 144 | return false; |
aoqi@0 | 145 | } |
aoqi@0 | 146 | |
aoqi@0 | 147 | // Returns true if m is allowed to be osr compiled |
aoqi@0 | 148 | bool CompilationPolicy::can_be_osr_compiled(methodHandle m, int comp_level) { |
aoqi@0 | 149 | bool result = false; |
aoqi@0 | 150 | if (comp_level == CompLevel_all) { |
aoqi@0 | 151 | if (TieredCompilation) { |
aoqi@0 | 152 | // enough to be osr compilable at any level for tiered |
aoqi@0 | 153 | result = !m->is_not_osr_compilable(CompLevel_simple) || !m->is_not_osr_compilable(CompLevel_full_optimization); |
aoqi@0 | 154 | } else { |
aoqi@0 | 155 | // must be osr compilable at available level for non-tiered |
aoqi@0 | 156 | result = !m->is_not_osr_compilable(CompLevel_highest_tier); |
aoqi@0 | 157 | } |
aoqi@0 | 158 | } else if (is_compile(comp_level)) { |
aoqi@0 | 159 | result = !m->is_not_osr_compilable(comp_level); |
aoqi@0 | 160 | } |
aoqi@0 | 161 | return (result && can_be_compiled(m, comp_level)); |
aoqi@0 | 162 | } |
aoqi@0 | 163 | |
aoqi@0 | 164 | bool CompilationPolicy::is_compilation_enabled() { |
aoqi@0 | 165 | // NOTE: CompileBroker::should_compile_new_jobs() checks for UseCompiler |
aoqi@0 | 166 | return !delay_compilation_during_startup() && CompileBroker::should_compile_new_jobs(); |
aoqi@0 | 167 | } |
aoqi@0 | 168 | |
aoqi@0 | 169 | #ifndef PRODUCT |
aoqi@0 | 170 | void CompilationPolicy::print_time() { |
aoqi@0 | 171 | tty->print_cr ("Accumulated compilationPolicy times:"); |
aoqi@0 | 172 | tty->print_cr ("---------------------------"); |
aoqi@0 | 173 | tty->print_cr (" Total: %3.3f sec.", _accumulated_time.seconds()); |
aoqi@0 | 174 | } |
aoqi@0 | 175 | |
aoqi@0 | 176 | void NonTieredCompPolicy::trace_osr_completion(nmethod* osr_nm) { |
aoqi@0 | 177 | if (TraceOnStackReplacement) { |
aoqi@0 | 178 | if (osr_nm == NULL) tty->print_cr("compilation failed"); |
aoqi@0 | 179 | else tty->print_cr("nmethod " INTPTR_FORMAT, p2i(osr_nm)); |
aoqi@0 | 180 | } |
aoqi@0 | 181 | } |
aoqi@0 | 182 | #endif // !PRODUCT |
aoqi@0 | 183 | |
aoqi@0 | 184 | void NonTieredCompPolicy::initialize() { |
aoqi@0 | 185 | // Setup the compiler thread numbers |
aoqi@0 | 186 | if (CICompilerCountPerCPU) { |
aoqi@0 | 187 | // Example: if CICompilerCountPerCPU is true, then we get |
aoqi@0 | 188 | // max(log2(8)-1,1) = 2 compiler threads on an 8-way machine. |
aoqi@0 | 189 | // May help big-app startup time. |
aoqi@0 | 190 | _compiler_count = MAX2(log2_intptr(os::active_processor_count())-1,1); |
aoqi@0 | 191 | FLAG_SET_ERGO(intx, CICompilerCount, _compiler_count); |
aoqi@0 | 192 | } else { |
aoqi@0 | 193 | _compiler_count = CICompilerCount; |
aoqi@0 | 194 | } |
aoqi@0 | 195 | } |
aoqi@0 | 196 | |
aoqi@0 | 197 | // Note: this policy is used ONLY if TieredCompilation is off. |
aoqi@0 | 198 | // compiler_count() behaves the following way: |
aoqi@0 | 199 | // - with TIERED build (with both COMPILER1 and COMPILER2 defined) it should return |
aoqi@0 | 200 | // zero for the c1 compilation levels, hence the particular ordering of the |
aoqi@0 | 201 | // statements. |
aoqi@0 | 202 | // - the same should happen when COMPILER2 is defined and COMPILER1 is not |
aoqi@0 | 203 | // (server build without TIERED defined). |
aoqi@0 | 204 | // - if only COMPILER1 is defined (client build), zero should be returned for |
aoqi@0 | 205 | // the c2 level. |
aoqi@0 | 206 | // - if neither is defined - always return zero. |
aoqi@0 | 207 | int NonTieredCompPolicy::compiler_count(CompLevel comp_level) { |
aoqi@0 | 208 | assert(!TieredCompilation, "This policy should not be used with TieredCompilation"); |
aoqi@0 | 209 | #ifdef COMPILER2 |
aoqi@0 | 210 | if (is_c2_compile(comp_level)) { |
aoqi@0 | 211 | return _compiler_count; |
aoqi@0 | 212 | } else { |
aoqi@0 | 213 | return 0; |
aoqi@0 | 214 | } |
aoqi@0 | 215 | #endif |
aoqi@0 | 216 | |
aoqi@0 | 217 | #ifdef COMPILER1 |
aoqi@0 | 218 | if (is_c1_compile(comp_level)) { |
aoqi@0 | 219 | return _compiler_count; |
aoqi@0 | 220 | } else { |
aoqi@0 | 221 | return 0; |
aoqi@0 | 222 | } |
aoqi@0 | 223 | #endif |
aoqi@0 | 224 | |
aoqi@0 | 225 | return 0; |
aoqi@0 | 226 | } |
aoqi@0 | 227 | |
aoqi@0 | 228 | void NonTieredCompPolicy::reset_counter_for_invocation_event(methodHandle m) { |
aoqi@0 | 229 | // Make sure invocation and backedge counter doesn't overflow again right away |
aoqi@0 | 230 | // as would be the case for native methods. |
aoqi@0 | 231 | |
aoqi@0 | 232 | // BUT also make sure the method doesn't look like it was never executed. |
aoqi@0 | 233 | // Set carry bit and reduce counter's value to min(count, CompileThreshold/2). |
aoqi@0 | 234 | MethodCounters* mcs = m->method_counters(); |
aoqi@0 | 235 | assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); |
aoqi@0 | 236 | mcs->invocation_counter()->set_carry(); |
aoqi@0 | 237 | mcs->backedge_counter()->set_carry(); |
aoqi@0 | 238 | |
aoqi@0 | 239 | assert(!m->was_never_executed(), "don't reset to 0 -- could be mistaken for never-executed"); |
aoqi@0 | 240 | } |
aoqi@0 | 241 | |
aoqi@0 | 242 | void NonTieredCompPolicy::reset_counter_for_back_branch_event(methodHandle m) { |
aoqi@0 | 243 | // Delay next back-branch event but pump up invocation counter to triger |
aoqi@0 | 244 | // whole method compilation. |
aoqi@0 | 245 | MethodCounters* mcs = m->method_counters(); |
aoqi@0 | 246 | assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); |
aoqi@0 | 247 | InvocationCounter* i = mcs->invocation_counter(); |
aoqi@0 | 248 | InvocationCounter* b = mcs->backedge_counter(); |
aoqi@0 | 249 | |
aoqi@0 | 250 | // Don't set invocation_counter's value too low otherwise the method will |
aoqi@0 | 251 | // look like immature (ic < ~5300) which prevents the inlining based on |
aoqi@0 | 252 | // the type profiling. |
aoqi@0 | 253 | i->set(i->state(), CompileThreshold); |
aoqi@0 | 254 | // Don't reset counter too low - it is used to check if OSR method is ready. |
aoqi@0 | 255 | b->set(b->state(), CompileThreshold / 2); |
aoqi@0 | 256 | } |
aoqi@0 | 257 | |
aoqi@0 | 258 | // |
aoqi@0 | 259 | // CounterDecay |
aoqi@0 | 260 | // |
aoqi@0 | 261 | // Interates through invocation counters and decrements them. This |
aoqi@0 | 262 | // is done at each safepoint. |
aoqi@0 | 263 | // |
aoqi@0 | 264 | class CounterDecay : public AllStatic { |
aoqi@0 | 265 | static jlong _last_timestamp; |
aoqi@0 | 266 | static void do_method(Method* m) { |
aoqi@0 | 267 | MethodCounters* mcs = m->method_counters(); |
aoqi@0 | 268 | if (mcs != NULL) { |
aoqi@0 | 269 | mcs->invocation_counter()->decay(); |
aoqi@0 | 270 | } |
aoqi@0 | 271 | } |
aoqi@0 | 272 | public: |
aoqi@0 | 273 | static void decay(); |
aoqi@0 | 274 | static bool is_decay_needed() { |
aoqi@0 | 275 | return (os::javaTimeMillis() - _last_timestamp) > CounterDecayMinIntervalLength; |
aoqi@0 | 276 | } |
aoqi@0 | 277 | }; |
aoqi@0 | 278 | |
aoqi@0 | 279 | jlong CounterDecay::_last_timestamp = 0; |
aoqi@0 | 280 | |
aoqi@0 | 281 | void CounterDecay::decay() { |
aoqi@0 | 282 | _last_timestamp = os::javaTimeMillis(); |
aoqi@0 | 283 | |
aoqi@0 | 284 | // This operation is going to be performed only at the end of a safepoint |
aoqi@0 | 285 | // and hence GC's will not be going on, all Java mutators are suspended |
aoqi@0 | 286 | // at this point and hence SystemDictionary_lock is also not needed. |
aoqi@0 | 287 | assert(SafepointSynchronize::is_at_safepoint(), "can only be executed at a safepoint"); |
aoqi@0 | 288 | int nclasses = SystemDictionary::number_of_classes(); |
aoqi@0 | 289 | double classes_per_tick = nclasses * (CounterDecayMinIntervalLength * 1e-3 / |
aoqi@0 | 290 | CounterHalfLifeTime); |
aoqi@0 | 291 | for (int i = 0; i < classes_per_tick; i++) { |
aoqi@0 | 292 | Klass* k = SystemDictionary::try_get_next_class(); |
aoqi@0 | 293 | if (k != NULL && k->oop_is_instance()) { |
aoqi@0 | 294 | InstanceKlass::cast(k)->methods_do(do_method); |
aoqi@0 | 295 | } |
aoqi@0 | 296 | } |
aoqi@0 | 297 | } |
aoqi@0 | 298 | |
aoqi@0 | 299 | // Called at the end of the safepoint |
aoqi@0 | 300 | void NonTieredCompPolicy::do_safepoint_work() { |
aoqi@0 | 301 | if(UseCounterDecay && CounterDecay::is_decay_needed()) { |
aoqi@0 | 302 | CounterDecay::decay(); |
aoqi@0 | 303 | } |
aoqi@0 | 304 | } |
aoqi@0 | 305 | |
aoqi@0 | 306 | void NonTieredCompPolicy::reprofile(ScopeDesc* trap_scope, bool is_osr) { |
aoqi@0 | 307 | ScopeDesc* sd = trap_scope; |
aoqi@0 | 308 | MethodCounters* mcs; |
aoqi@0 | 309 | InvocationCounter* c; |
aoqi@0 | 310 | for (; !sd->is_top(); sd = sd->sender()) { |
aoqi@0 | 311 | mcs = sd->method()->method_counters(); |
aoqi@0 | 312 | if (mcs != NULL) { |
aoqi@0 | 313 | // Reset ICs of inlined methods, since they can trigger compilations also. |
aoqi@0 | 314 | mcs->invocation_counter()->reset(); |
aoqi@0 | 315 | } |
aoqi@0 | 316 | } |
aoqi@0 | 317 | mcs = sd->method()->method_counters(); |
aoqi@0 | 318 | if (mcs != NULL) { |
aoqi@0 | 319 | c = mcs->invocation_counter(); |
aoqi@0 | 320 | if (is_osr) { |
aoqi@0 | 321 | // It was an OSR method, so bump the count higher. |
aoqi@0 | 322 | c->set(c->state(), CompileThreshold); |
aoqi@0 | 323 | } else { |
aoqi@0 | 324 | c->reset(); |
aoqi@0 | 325 | } |
aoqi@0 | 326 | mcs->backedge_counter()->reset(); |
aoqi@0 | 327 | } |
aoqi@0 | 328 | } |
aoqi@0 | 329 | |
aoqi@0 | 330 | // This method can be called by any component of the runtime to notify the policy |
aoqi@0 | 331 | // that it's recommended to delay the complation of this method. |
aoqi@0 | 332 | void NonTieredCompPolicy::delay_compilation(Method* method) { |
aoqi@0 | 333 | MethodCounters* mcs = method->method_counters(); |
aoqi@0 | 334 | if (mcs != NULL) { |
aoqi@0 | 335 | mcs->invocation_counter()->decay(); |
aoqi@0 | 336 | mcs->backedge_counter()->decay(); |
aoqi@0 | 337 | } |
aoqi@0 | 338 | } |
aoqi@0 | 339 | |
aoqi@0 | 340 | void NonTieredCompPolicy::disable_compilation(Method* method) { |
aoqi@0 | 341 | MethodCounters* mcs = method->method_counters(); |
aoqi@0 | 342 | if (mcs != NULL) { |
aoqi@0 | 343 | mcs->invocation_counter()->set_state(InvocationCounter::wait_for_nothing); |
aoqi@0 | 344 | mcs->backedge_counter()->set_state(InvocationCounter::wait_for_nothing); |
aoqi@0 | 345 | } |
aoqi@0 | 346 | } |
aoqi@0 | 347 | |
aoqi@1 | 348 | #ifdef MIPS64 |
aoqi@1 | 349 | bool NonTieredCompPolicy::compare(CompileTask* task_x, CompileTask* task_y) { |
aoqi@1 | 350 | |
aoqi@1 | 351 | if (task_x->weight() > task_y->weight()) { |
aoqi@1 | 352 | return true; |
aoqi@1 | 353 | } |
aoqi@1 | 354 | |
aoqi@1 | 355 | return false; |
aoqi@1 | 356 | } |
aoqi@1 | 357 | |
aoqi@1 | 358 | void NonTieredCompPolicy::update_speed(jlong t, CompileTask* task) { |
aoqi@1 | 359 | jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); |
aoqi@1 | 360 | jlong delta_t = t - task->prev_time(); |
aoqi@1 | 361 | Method* m = task->method(); |
aoqi@1 | 362 | |
aoqi@1 | 363 | int ic = m->interpreter_invocation_count(); |
aoqi@1 | 364 | int bc = m->backedge_count() + m->get_decay_counter(); |
aoqi@1 | 365 | int pre_ic = task->prev_ic_count(); |
aoqi@1 | 366 | int pre_bc = task->prev_bc_count(); |
aoqi@1 | 367 | |
aoqi@1 | 368 | int delta_e = (ic + bc) - (pre_ic + pre_bc); |
aoqi@1 | 369 | |
aoqi@1 | 370 | if (delta_s >= MinUpdateTime) { |
aoqi@1 | 371 | if (delta_t >= MinUpdateTime && delta_e > 0) { |
aoqi@1 | 372 | task->set_prev_time(t); |
aoqi@1 | 373 | task->set_prev_ic_count(ic); |
aoqi@1 | 374 | task->set_prev_bc_count(bc); |
aoqi@1 | 375 | int delta_n = FactorOfSizeScheduling * (ic - pre_ic) / 100 + 10 * (bc - pre_bc) / 100; |
aoqi@1 | 376 | task->set_speed(delta_n * 1.0 / delta_t); |
aoqi@1 | 377 | task->set_weight(); |
aoqi@1 | 378 | } else |
aoqi@1 | 379 | if (delta_t > MaxUpdateTime && delta_e == 0) { |
aoqi@1 | 380 | task->set_speed(0); |
aoqi@1 | 381 | task->set_weight(); |
aoqi@1 | 382 | } |
aoqi@1 | 383 | } |
aoqi@1 | 384 | } |
aoqi@1 | 385 | |
aoqi@1 | 386 | bool NonTieredCompPolicy::task_should_be_removed(jlong t, jlong timeout, CompileTask* task) { |
aoqi@1 | 387 | jlong delta_s = t - SafepointSynchronize::end_of_last_safepoint(); |
aoqi@1 | 388 | jlong delta_t = t - task->prev_time(); |
aoqi@1 | 389 | Method* m = task->method(); |
aoqi@1 | 390 | |
aoqi@1 | 391 | if (delta_t > timeout && delta_s > timeout) { |
aoqi@1 | 392 | int ic = m->interpreter_invocation_count(); |
aoqi@1 | 393 | int bc = m->backedge_count() + m->get_decay_counter(); |
aoqi@1 | 394 | |
aoqi@1 | 395 | if(ic > InvocationOldThreshold || bc > LoopOldThreshold) { |
aoqi@1 | 396 | // This task is old enough, do not remove it. |
aoqi@1 | 397 | return false; |
aoqi@1 | 398 | } |
aoqi@1 | 399 | |
aoqi@1 | 400 | return task->speed() < 0.001; |
aoqi@1 | 401 | } |
aoqi@1 | 402 | return false; |
aoqi@1 | 403 | } |
aoqi@1 | 404 | |
aoqi@1 | 405 | #endif |
aoqi@1 | 406 | |
aoqi@0 | 407 | CompileTask* NonTieredCompPolicy::select_task(CompileQueue* compile_queue) { |
aoqi@1 | 408 | #ifndef MIPS64 |
aoqi@0 | 409 | return compile_queue->first(); |
aoqi@1 | 410 | #else |
aoqi@1 | 411 | CompileTask *max_task = NULL; |
aoqi@1 | 412 | jlong t = os::javaTimeMillis(); |
aoqi@1 | 413 | |
aoqi@1 | 414 | int counter = 1; |
aoqi@1 | 415 | for (CompileTask* task = compile_queue->first(); task != NULL;) { |
aoqi@1 | 416 | CompileTask* next_task = task->next(); |
aoqi@1 | 417 | counter++; |
aoqi@1 | 418 | if (counter > MaxCompileQueueSize) return max_task; |
aoqi@1 | 419 | update_speed(t, task); |
aoqi@1 | 420 | if (max_task == NULL) { |
aoqi@1 | 421 | max_task = task; |
aoqi@1 | 422 | } else { |
aoqi@1 | 423 | if (task_should_be_removed(t, MinWatchTime, task)) { |
aoqi@1 | 424 | CompileTaskWrapper ctw(task); // Frees the task |
aoqi@1 | 425 | compile_queue->remove(task); |
aoqi@1 | 426 | task->method()->clear_queued_for_compilation(); |
aoqi@1 | 427 | task = next_task; |
aoqi@1 | 428 | continue; |
aoqi@1 | 429 | } |
aoqi@1 | 430 | |
aoqi@1 | 431 | if (compare(task, max_task)) { |
aoqi@1 | 432 | max_task = task; |
aoqi@1 | 433 | } |
aoqi@1 | 434 | } |
aoqi@1 | 435 | task = next_task; |
aoqi@1 | 436 | } |
aoqi@1 | 437 | |
aoqi@1 | 438 | return max_task; |
aoqi@1 | 439 | #endif |
aoqi@0 | 440 | } |
aoqi@0 | 441 | |
aoqi@0 | 442 | bool NonTieredCompPolicy::is_mature(Method* method) { |
aoqi@0 | 443 | MethodData* mdo = method->method_data(); |
aoqi@0 | 444 | assert(mdo != NULL, "Should be"); |
aoqi@0 | 445 | uint current = mdo->mileage_of(method); |
aoqi@0 | 446 | uint initial = mdo->creation_mileage(); |
aoqi@0 | 447 | if (current < initial) |
aoqi@0 | 448 | return true; // some sort of overflow |
aoqi@0 | 449 | uint target; |
aoqi@0 | 450 | if (ProfileMaturityPercentage <= 0) |
aoqi@0 | 451 | target = (uint) -ProfileMaturityPercentage; // absolute value |
aoqi@0 | 452 | else |
aoqi@0 | 453 | target = (uint)( (ProfileMaturityPercentage * CompileThreshold) / 100 ); |
aoqi@0 | 454 | return (current >= initial + target); |
aoqi@0 | 455 | } |
aoqi@0 | 456 | |
aoqi@0 | 457 | nmethod* NonTieredCompPolicy::event(methodHandle method, methodHandle inlinee, int branch_bci, |
aoqi@0 | 458 | int bci, CompLevel comp_level, nmethod* nm, JavaThread* thread) { |
aoqi@0 | 459 | assert(comp_level == CompLevel_none, "This should be only called from the interpreter"); |
aoqi@0 | 460 | NOT_PRODUCT(trace_frequency_counter_overflow(method, branch_bci, bci)); |
aoqi@1 | 461 | #ifdef MIPS64 |
aoqi@1 | 462 | method->incr_num_of_requests(1); |
aoqi@1 | 463 | #endif |
aoqi@0 | 464 | if (JvmtiExport::can_post_interpreter_events() && thread->is_interp_only_mode()) { |
aoqi@0 | 465 | // If certain JVMTI events (e.g. frame pop event) are requested then the |
aoqi@0 | 466 | // thread is forced to remain in interpreted code. This is |
aoqi@0 | 467 | // implemented partly by a check in the run_compiled_code |
aoqi@0 | 468 | // section of the interpreter whether we should skip running |
aoqi@0 | 469 | // compiled code, and partly by skipping OSR compiles for |
aoqi@0 | 470 | // interpreted-only threads. |
aoqi@0 | 471 | if (bci != InvocationEntryBci) { |
aoqi@0 | 472 | reset_counter_for_back_branch_event(method); |
aoqi@0 | 473 | return NULL; |
aoqi@0 | 474 | } |
aoqi@0 | 475 | } |
aoqi@0 | 476 | if (CompileTheWorld || ReplayCompiles) { |
aoqi@0 | 477 | // Don't trigger other compiles in testing mode |
aoqi@0 | 478 | if (bci == InvocationEntryBci) { |
aoqi@0 | 479 | reset_counter_for_invocation_event(method); |
aoqi@0 | 480 | } else { |
aoqi@0 | 481 | reset_counter_for_back_branch_event(method); |
aoqi@0 | 482 | } |
aoqi@0 | 483 | return NULL; |
aoqi@0 | 484 | } |
aoqi@0 | 485 | |
aoqi@0 | 486 | if (bci == InvocationEntryBci) { |
aoqi@0 | 487 | // when code cache is full, compilation gets switched off, UseCompiler |
aoqi@0 | 488 | // is set to false |
aoqi@0 | 489 | if (!method->has_compiled_code() && UseCompiler) { |
aoqi@0 | 490 | method_invocation_event(method, thread); |
aoqi@0 | 491 | } else { |
aoqi@0 | 492 | // Force counter overflow on method entry, even if no compilation |
aoqi@0 | 493 | // happened. (The method_invocation_event call does this also.) |
aoqi@0 | 494 | reset_counter_for_invocation_event(method); |
aoqi@0 | 495 | } |
aoqi@0 | 496 | // compilation at an invocation overflow no longer goes and retries test for |
aoqi@0 | 497 | // compiled method. We always run the loser of the race as interpreted. |
aoqi@0 | 498 | // so return NULL |
aoqi@0 | 499 | return NULL; |
aoqi@0 | 500 | } else { |
aoqi@0 | 501 | // counter overflow in a loop => try to do on-stack-replacement |
aoqi@0 | 502 | nmethod* osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); |
aoqi@0 | 503 | NOT_PRODUCT(trace_osr_request(method, osr_nm, bci)); |
aoqi@0 | 504 | // when code cache is full, we should not compile any more... |
aoqi@0 | 505 | if (osr_nm == NULL && UseCompiler) { |
aoqi@0 | 506 | method_back_branch_event(method, bci, thread); |
aoqi@0 | 507 | osr_nm = method->lookup_osr_nmethod_for(bci, CompLevel_highest_tier, true); |
aoqi@0 | 508 | } |
aoqi@0 | 509 | if (osr_nm == NULL) { |
aoqi@0 | 510 | reset_counter_for_back_branch_event(method); |
aoqi@0 | 511 | return NULL; |
aoqi@0 | 512 | } |
aoqi@0 | 513 | return osr_nm; |
aoqi@0 | 514 | } |
aoqi@0 | 515 | return NULL; |
aoqi@0 | 516 | } |
aoqi@0 | 517 | |
aoqi@0 | 518 | #ifndef PRODUCT |
aoqi@0 | 519 | PRAGMA_FORMAT_NONLITERAL_IGNORED_EXTERNAL |
aoqi@0 | 520 | void NonTieredCompPolicy::trace_frequency_counter_overflow(methodHandle m, int branch_bci, int bci) { |
aoqi@0 | 521 | if (TraceInvocationCounterOverflow) { |
aoqi@0 | 522 | MethodCounters* mcs = m->method_counters(); |
aoqi@0 | 523 | assert(mcs != NULL, "MethodCounters cannot be NULL for profiling"); |
aoqi@0 | 524 | InvocationCounter* ic = mcs->invocation_counter(); |
aoqi@0 | 525 | InvocationCounter* bc = mcs->backedge_counter(); |
aoqi@0 | 526 | ResourceMark rm; |
aoqi@0 | 527 | const char* msg = |
aoqi@0 | 528 | bci == InvocationEntryBci |
aoqi@0 | 529 | ? "comp-policy cntr ovfl @ %d in entry of " |
aoqi@0 | 530 | : "comp-policy cntr ovfl @ %d in loop of "; |
aoqi@0 | 531 | PRAGMA_DIAG_PUSH |
aoqi@0 | 532 | PRAGMA_FORMAT_NONLITERAL_IGNORED_INTERNAL |
aoqi@0 | 533 | tty->print(msg, bci); |
aoqi@0 | 534 | PRAGMA_DIAG_POP |
aoqi@0 | 535 | m->print_value(); |
aoqi@0 | 536 | tty->cr(); |
aoqi@0 | 537 | ic->print(); |
aoqi@0 | 538 | bc->print(); |
aoqi@0 | 539 | if (ProfileInterpreter) { |
aoqi@0 | 540 | if (bci != InvocationEntryBci) { |
aoqi@0 | 541 | MethodData* mdo = m->method_data(); |
aoqi@0 | 542 | if (mdo != NULL) { |
aoqi@0 | 543 | int count = mdo->bci_to_data(branch_bci)->as_JumpData()->taken(); |
aoqi@0 | 544 | tty->print_cr("back branch count = %d", count); |
aoqi@0 | 545 | } |
aoqi@0 | 546 | } |
aoqi@0 | 547 | } |
aoqi@0 | 548 | } |
aoqi@0 | 549 | } |
aoqi@0 | 550 | |
aoqi@0 | 551 | void NonTieredCompPolicy::trace_osr_request(methodHandle method, nmethod* osr, int bci) { |
aoqi@0 | 552 | if (TraceOnStackReplacement) { |
aoqi@0 | 553 | ResourceMark rm; |
aoqi@0 | 554 | tty->print(osr != NULL ? "Reused OSR entry for " : "Requesting OSR entry for "); |
aoqi@0 | 555 | method->print_short_name(tty); |
aoqi@0 | 556 | tty->print_cr(" at bci %d", bci); |
aoqi@0 | 557 | } |
aoqi@0 | 558 | } |
aoqi@0 | 559 | #endif // !PRODUCT |
aoqi@0 | 560 | |
aoqi@0 | 561 | // SimpleCompPolicy - compile current method |
aoqi@0 | 562 | |
aoqi@0 | 563 | void SimpleCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) { |
aoqi@0 | 564 | const int comp_level = CompLevel_highest_tier; |
aoqi@0 | 565 | const int hot_count = m->invocation_count(); |
aoqi@1 | 566 | #ifdef MIPS64 |
aoqi@1 | 567 | const int bc = m->backedge_count(); |
aoqi@1 | 568 | #endif |
aoqi@1 | 569 | |
aoqi@0 | 570 | reset_counter_for_invocation_event(m); |
aoqi@1 | 571 | #ifdef MIPS64 |
aoqi@1 | 572 | const int new_bc = m->backedge_count(); |
aoqi@1 | 573 | const int delta = bc - new_bc; |
aoqi@1 | 574 | #endif |
aoqi@0 | 575 | const char* comment = "count"; |
aoqi@0 | 576 | |
aoqi@1 | 577 | #ifdef MIPS64 |
aoqi@1 | 578 | if(delta > 0) m->incr_decay_counter(delta); |
aoqi@1 | 579 | #endif |
aoqi@1 | 580 | |
aoqi@0 | 581 | if (is_compilation_enabled() && can_be_compiled(m, comp_level)) { |
aoqi@0 | 582 | nmethod* nm = m->code(); |
aoqi@0 | 583 | if (nm == NULL ) { |
aoqi@0 | 584 | CompileBroker::compile_method(m, InvocationEntryBci, comp_level, m, hot_count, comment, thread); |
aoqi@0 | 585 | } |
aoqi@0 | 586 | } |
aoqi@0 | 587 | } |
aoqi@0 | 588 | |
aoqi@0 | 589 | void SimpleCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) { |
aoqi@0 | 590 | const int comp_level = CompLevel_highest_tier; |
aoqi@0 | 591 | const int hot_count = m->backedge_count(); |
aoqi@0 | 592 | const char* comment = "backedge_count"; |
aoqi@0 | 593 | |
aoqi@0 | 594 | if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) { |
aoqi@0 | 595 | CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread); |
aoqi@0 | 596 | NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) |
aoqi@0 | 597 | } |
aoqi@0 | 598 | } |
aoqi@0 | 599 | // StackWalkCompPolicy - walk up stack to find a suitable method to compile |
aoqi@0 | 600 | |
aoqi@0 | 601 | #ifdef COMPILER2 |
aoqi@0 | 602 | const char* StackWalkCompPolicy::_msg = NULL; |
aoqi@0 | 603 | |
aoqi@0 | 604 | |
aoqi@0 | 605 | // Consider m for compilation |
aoqi@0 | 606 | void StackWalkCompPolicy::method_invocation_event(methodHandle m, JavaThread* thread) { |
aoqi@0 | 607 | const int comp_level = CompLevel_highest_tier; |
aoqi@0 | 608 | const int hot_count = m->invocation_count(); |
aoqi@0 | 609 | reset_counter_for_invocation_event(m); |
aoqi@0 | 610 | const char* comment = "count"; |
aoqi@0 | 611 | |
aoqi@0 | 612 | if (is_compilation_enabled() && m->code() == NULL && can_be_compiled(m, comp_level)) { |
aoqi@0 | 613 | ResourceMark rm(thread); |
aoqi@0 | 614 | frame fr = thread->last_frame(); |
aoqi@0 | 615 | assert(fr.is_interpreted_frame(), "must be interpreted"); |
aoqi@0 | 616 | assert(fr.interpreter_frame_method() == m(), "bad method"); |
aoqi@0 | 617 | |
aoqi@0 | 618 | if (TraceCompilationPolicy) { |
aoqi@0 | 619 | tty->print("method invocation trigger: "); |
aoqi@0 | 620 | m->print_short_name(tty); |
aoqi@0 | 621 | tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)m()), m->code_size()); |
aoqi@0 | 622 | } |
aoqi@0 | 623 | RegisterMap reg_map(thread, false); |
aoqi@0 | 624 | javaVFrame* triggerVF = thread->last_java_vframe(®_map); |
aoqi@0 | 625 | // triggerVF is the frame that triggered its counter |
aoqi@0 | 626 | RFrame* first = new InterpretedRFrame(triggerVF->fr(), thread, m); |
aoqi@0 | 627 | |
aoqi@0 | 628 | if (first->top_method()->code() != NULL) { |
aoqi@0 | 629 | // called obsolete method/nmethod -- no need to recompile |
aoqi@0 | 630 | if (TraceCompilationPolicy) tty->print_cr(" --> " INTPTR_FORMAT, p2i(first->top_method()->code())); |
aoqi@0 | 631 | } else { |
aoqi@0 | 632 | if (TimeCompilationPolicy) accumulated_time()->start(); |
aoqi@0 | 633 | GrowableArray<RFrame*>* stack = new GrowableArray<RFrame*>(50); |
aoqi@0 | 634 | stack->push(first); |
aoqi@0 | 635 | RFrame* top = findTopInlinableFrame(stack); |
aoqi@0 | 636 | if (TimeCompilationPolicy) accumulated_time()->stop(); |
aoqi@0 | 637 | assert(top != NULL, "findTopInlinableFrame returned null"); |
aoqi@0 | 638 | if (TraceCompilationPolicy) top->print(); |
aoqi@0 | 639 | CompileBroker::compile_method(top->top_method(), InvocationEntryBci, comp_level, |
aoqi@0 | 640 | m, hot_count, comment, thread); |
aoqi@0 | 641 | } |
aoqi@0 | 642 | } |
aoqi@0 | 643 | } |
aoqi@0 | 644 | |
aoqi@0 | 645 | void StackWalkCompPolicy::method_back_branch_event(methodHandle m, int bci, JavaThread* thread) { |
aoqi@0 | 646 | const int comp_level = CompLevel_highest_tier; |
aoqi@0 | 647 | const int hot_count = m->backedge_count(); |
aoqi@0 | 648 | const char* comment = "backedge_count"; |
aoqi@0 | 649 | |
aoqi@0 | 650 | if (is_compilation_enabled() && can_be_osr_compiled(m, comp_level)) { |
aoqi@0 | 651 | CompileBroker::compile_method(m, bci, comp_level, m, hot_count, comment, thread); |
aoqi@0 | 652 | NOT_PRODUCT(trace_osr_completion(m->lookup_osr_nmethod_for(bci, comp_level, true));) |
aoqi@0 | 653 | } |
aoqi@0 | 654 | } |
aoqi@0 | 655 | |
aoqi@0 | 656 | RFrame* StackWalkCompPolicy::findTopInlinableFrame(GrowableArray<RFrame*>* stack) { |
aoqi@0 | 657 | // go up the stack until finding a frame that (probably) won't be inlined |
aoqi@0 | 658 | // into its caller |
aoqi@0 | 659 | RFrame* current = stack->at(0); // current choice for stopping |
aoqi@0 | 660 | assert( current && !current->is_compiled(), "" ); |
aoqi@0 | 661 | const char* msg = NULL; |
aoqi@0 | 662 | |
aoqi@0 | 663 | while (1) { |
aoqi@0 | 664 | |
aoqi@0 | 665 | // before going up the stack further, check if doing so would get us into |
aoqi@0 | 666 | // compiled code |
aoqi@0 | 667 | RFrame* next = senderOf(current, stack); |
aoqi@0 | 668 | if( !next ) // No next frame up the stack? |
aoqi@0 | 669 | break; // Then compile with current frame |
aoqi@0 | 670 | |
aoqi@0 | 671 | methodHandle m = current->top_method(); |
aoqi@0 | 672 | methodHandle next_m = next->top_method(); |
aoqi@0 | 673 | |
aoqi@0 | 674 | if (TraceCompilationPolicy && Verbose) { |
aoqi@0 | 675 | tty->print("[caller: "); |
aoqi@0 | 676 | next_m->print_short_name(tty); |
aoqi@0 | 677 | tty->print("] "); |
aoqi@0 | 678 | } |
aoqi@0 | 679 | |
aoqi@0 | 680 | if( !Inline ) { // Inlining turned off |
aoqi@0 | 681 | msg = "Inlining turned off"; |
aoqi@0 | 682 | break; |
aoqi@0 | 683 | } |
aoqi@0 | 684 | if (next_m->is_not_compilable()) { // Did fail to compile this before/ |
aoqi@0 | 685 | msg = "caller not compilable"; |
aoqi@0 | 686 | break; |
aoqi@0 | 687 | } |
aoqi@0 | 688 | if (next->num() > MaxRecompilationSearchLength) { |
aoqi@0 | 689 | // don't go up too high when searching for recompilees |
aoqi@0 | 690 | msg = "don't go up any further: > MaxRecompilationSearchLength"; |
aoqi@0 | 691 | break; |
aoqi@0 | 692 | } |
aoqi@0 | 693 | if (next->distance() > MaxInterpretedSearchLength) { |
aoqi@0 | 694 | // don't go up too high when searching for recompilees |
aoqi@0 | 695 | msg = "don't go up any further: next > MaxInterpretedSearchLength"; |
aoqi@0 | 696 | break; |
aoqi@0 | 697 | } |
aoqi@0 | 698 | // Compiled frame above already decided not to inline; |
aoqi@0 | 699 | // do not recompile him. |
aoqi@0 | 700 | if (next->is_compiled()) { |
aoqi@0 | 701 | msg = "not going up into optimized code"; |
aoqi@0 | 702 | break; |
aoqi@0 | 703 | } |
aoqi@0 | 704 | |
aoqi@0 | 705 | // Interpreted frame above us was already compiled. Do not force |
aoqi@0 | 706 | // a recompile, although if the frame above us runs long enough an |
aoqi@0 | 707 | // OSR might still happen. |
aoqi@0 | 708 | if( current->is_interpreted() && next_m->has_compiled_code() ) { |
aoqi@0 | 709 | msg = "not going up -- already compiled caller"; |
aoqi@0 | 710 | break; |
aoqi@0 | 711 | } |
aoqi@0 | 712 | |
aoqi@0 | 713 | // Compute how frequent this call site is. We have current method 'm'. |
aoqi@0 | 714 | // We know next method 'next_m' is interpreted. Find the call site and |
aoqi@0 | 715 | // check the various invocation counts. |
aoqi@0 | 716 | int invcnt = 0; // Caller counts |
aoqi@0 | 717 | if (ProfileInterpreter) { |
aoqi@0 | 718 | invcnt = next_m->interpreter_invocation_count(); |
aoqi@0 | 719 | } |
aoqi@0 | 720 | int cnt = 0; // Call site counts |
aoqi@0 | 721 | if (ProfileInterpreter && next_m->method_data() != NULL) { |
aoqi@0 | 722 | ResourceMark rm; |
aoqi@0 | 723 | int bci = next->top_vframe()->bci(); |
aoqi@0 | 724 | ProfileData* data = next_m->method_data()->bci_to_data(bci); |
aoqi@0 | 725 | if (data != NULL && data->is_CounterData()) |
aoqi@0 | 726 | cnt = data->as_CounterData()->count(); |
aoqi@0 | 727 | } |
aoqi@0 | 728 | |
aoqi@0 | 729 | // Caller counts / call-site counts; i.e. is this call site |
aoqi@0 | 730 | // a hot call site for method next_m? |
aoqi@0 | 731 | int freq = (invcnt) ? cnt/invcnt : cnt; |
aoqi@0 | 732 | |
aoqi@0 | 733 | // Check size and frequency limits |
aoqi@0 | 734 | if ((msg = shouldInline(m, freq, cnt)) != NULL) { |
aoqi@0 | 735 | break; |
aoqi@0 | 736 | } |
aoqi@0 | 737 | // Check inlining negative tests |
aoqi@0 | 738 | if ((msg = shouldNotInline(m)) != NULL) { |
aoqi@0 | 739 | break; |
aoqi@0 | 740 | } |
aoqi@0 | 741 | |
aoqi@0 | 742 | |
aoqi@0 | 743 | // If the caller method is too big or something then we do not want to |
aoqi@0 | 744 | // compile it just to inline a method |
aoqi@0 | 745 | if (!can_be_compiled(next_m, CompLevel_any)) { |
aoqi@0 | 746 | msg = "caller cannot be compiled"; |
aoqi@0 | 747 | break; |
aoqi@0 | 748 | } |
aoqi@0 | 749 | |
aoqi@0 | 750 | if( next_m->name() == vmSymbols::class_initializer_name() ) { |
aoqi@0 | 751 | msg = "do not compile class initializer (OSR ok)"; |
aoqi@0 | 752 | break; |
aoqi@0 | 753 | } |
aoqi@0 | 754 | |
aoqi@0 | 755 | if (TraceCompilationPolicy && Verbose) { |
aoqi@0 | 756 | tty->print("\n\t check caller: "); |
aoqi@0 | 757 | next_m->print_short_name(tty); |
aoqi@0 | 758 | tty->print(" ( interpreted " INTPTR_FORMAT ", size=%d ) ", p2i((address)next_m()), next_m->code_size()); |
aoqi@0 | 759 | } |
aoqi@0 | 760 | |
aoqi@0 | 761 | current = next; |
aoqi@0 | 762 | } |
aoqi@0 | 763 | |
aoqi@0 | 764 | assert( !current || !current->is_compiled(), "" ); |
aoqi@0 | 765 | |
aoqi@0 | 766 | if (TraceCompilationPolicy && msg) tty->print("(%s)\n", msg); |
aoqi@0 | 767 | |
aoqi@0 | 768 | return current; |
aoqi@0 | 769 | } |
aoqi@0 | 770 | |
aoqi@0 | 771 | RFrame* StackWalkCompPolicy::senderOf(RFrame* rf, GrowableArray<RFrame*>* stack) { |
aoqi@0 | 772 | RFrame* sender = rf->caller(); |
aoqi@0 | 773 | if (sender && sender->num() == stack->length()) stack->push(sender); |
aoqi@0 | 774 | return sender; |
aoqi@0 | 775 | } |
aoqi@0 | 776 | |
aoqi@0 | 777 | |
aoqi@0 | 778 | const char* StackWalkCompPolicy::shouldInline(methodHandle m, float freq, int cnt) { |
aoqi@0 | 779 | // Allows targeted inlining |
aoqi@0 | 780 | // positive filter: should send be inlined? returns NULL (--> yes) |
aoqi@0 | 781 | // or rejection msg |
aoqi@0 | 782 | int max_size = MaxInlineSize; |
aoqi@0 | 783 | int cost = m->code_size(); |
aoqi@0 | 784 | |
aoqi@0 | 785 | // Check for too many throws (and not too huge) |
aoqi@0 | 786 | if (m->interpreter_throwout_count() > InlineThrowCount && cost < InlineThrowMaxSize ) { |
aoqi@0 | 787 | return NULL; |
aoqi@0 | 788 | } |
aoqi@0 | 789 | |
aoqi@0 | 790 | // bump the max size if the call is frequent |
aoqi@0 | 791 | if ((freq >= InlineFrequencyRatio) || (cnt >= InlineFrequencyCount)) { |
aoqi@0 | 792 | if (TraceFrequencyInlining) { |
aoqi@0 | 793 | tty->print("(Inlined frequent method)\n"); |
aoqi@0 | 794 | m->print(); |
aoqi@0 | 795 | } |
aoqi@0 | 796 | max_size = FreqInlineSize; |
aoqi@0 | 797 | } |
aoqi@0 | 798 | if (cost > max_size) { |
aoqi@0 | 799 | return (_msg = "too big"); |
aoqi@0 | 800 | } |
aoqi@0 | 801 | return NULL; |
aoqi@0 | 802 | } |
aoqi@0 | 803 | |
aoqi@0 | 804 | |
aoqi@0 | 805 | const char* StackWalkCompPolicy::shouldNotInline(methodHandle m) { |
aoqi@0 | 806 | // negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg |
aoqi@0 | 807 | if (m->is_abstract()) return (_msg = "abstract method"); |
aoqi@0 | 808 | // note: we allow ik->is_abstract() |
aoqi@0 | 809 | if (!m->method_holder()->is_initialized()) return (_msg = "method holder not initialized"); |
aoqi@0 | 810 | if (m->is_native()) return (_msg = "native method"); |
aoqi@0 | 811 | nmethod* m_code = m->code(); |
aoqi@0 | 812 | if (m_code != NULL && m_code->code_size() > InlineSmallCode) |
aoqi@0 | 813 | return (_msg = "already compiled into a big method"); |
aoqi@0 | 814 | |
aoqi@0 | 815 | // use frequency-based objections only for non-trivial methods |
aoqi@0 | 816 | if (m->code_size() <= MaxTrivialSize) return NULL; |
aoqi@0 | 817 | if (UseInterpreter) { // don't use counts with -Xcomp |
aoqi@0 | 818 | if ((m->code() == NULL) && m->was_never_executed()) return (_msg = "never executed"); |
aoqi@0 | 819 | if (!m->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return (_msg = "executed < MinInliningThreshold times"); |
aoqi@0 | 820 | } |
aoqi@0 | 821 | if (Method::has_unloaded_classes_in_signature(m, JavaThread::current())) return (_msg = "unloaded signature classes"); |
aoqi@0 | 822 | |
aoqi@0 | 823 | return NULL; |
aoqi@0 | 824 | } |
aoqi@0 | 825 | |
aoqi@0 | 826 | |
aoqi@0 | 827 | |
aoqi@0 | 828 | #endif // COMPILER2 |