Wed, 09 Dec 2009 16:40:45 -0800
6895383: JCK test throws NPE for method compiled with Escape Analysis
Summary: Add missing checks for MemBar nodes in EA.
Reviewed-by: never
duke@435 | 1 | /* |
xdono@772 | 2 | * Copyright 2000-2008 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_callGenerator.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | CallGenerator::CallGenerator(ciMethod* method) { |
duke@435 | 29 | _method = method; |
duke@435 | 30 | } |
duke@435 | 31 | |
duke@435 | 32 | // Utility function. |
duke@435 | 33 | const TypeFunc* CallGenerator::tf() const { |
duke@435 | 34 | return TypeFunc::make(method()); |
duke@435 | 35 | } |
duke@435 | 36 | |
duke@435 | 37 | //-----------------------------ParseGenerator--------------------------------- |
duke@435 | 38 | // Internal class which handles all direct bytecode traversal. |
duke@435 | 39 | class ParseGenerator : public InlineCallGenerator { |
duke@435 | 40 | private: |
duke@435 | 41 | bool _is_osr; |
duke@435 | 42 | float _expected_uses; |
duke@435 | 43 | |
duke@435 | 44 | public: |
duke@435 | 45 | ParseGenerator(ciMethod* method, float expected_uses, bool is_osr = false) |
duke@435 | 46 | : InlineCallGenerator(method) |
duke@435 | 47 | { |
duke@435 | 48 | _is_osr = is_osr; |
duke@435 | 49 | _expected_uses = expected_uses; |
duke@435 | 50 | assert(can_parse(method, is_osr), "parse must be possible"); |
duke@435 | 51 | } |
duke@435 | 52 | |
duke@435 | 53 | // Can we build either an OSR or a regular parser for this method? |
duke@435 | 54 | static bool can_parse(ciMethod* method, int is_osr = false); |
duke@435 | 55 | |
duke@435 | 56 | virtual bool is_parse() const { return true; } |
duke@435 | 57 | virtual JVMState* generate(JVMState* jvms); |
duke@435 | 58 | int is_osr() { return _is_osr; } |
duke@435 | 59 | |
duke@435 | 60 | }; |
duke@435 | 61 | |
duke@435 | 62 | JVMState* ParseGenerator::generate(JVMState* jvms) { |
duke@435 | 63 | Compile* C = Compile::current(); |
duke@435 | 64 | |
duke@435 | 65 | if (is_osr()) { |
duke@435 | 66 | // The JVMS for a OSR has a single argument (see its TypeFunc). |
duke@435 | 67 | assert(jvms->depth() == 1, "no inline OSR"); |
duke@435 | 68 | } |
duke@435 | 69 | |
duke@435 | 70 | if (C->failing()) { |
duke@435 | 71 | return NULL; // bailing out of the compile; do not try to parse |
duke@435 | 72 | } |
duke@435 | 73 | |
duke@435 | 74 | Parse parser(jvms, method(), _expected_uses); |
duke@435 | 75 | // Grab signature for matching/allocation |
duke@435 | 76 | #ifdef ASSERT |
duke@435 | 77 | if (parser.tf() != (parser.depth() == 1 ? C->tf() : tf())) { |
duke@435 | 78 | MutexLockerEx ml(Compile_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 79 | assert(C->env()->system_dictionary_modification_counter_changed(), |
duke@435 | 80 | "Must invalidate if TypeFuncs differ"); |
duke@435 | 81 | } |
duke@435 | 82 | #endif |
duke@435 | 83 | |
duke@435 | 84 | GraphKit& exits = parser.exits(); |
duke@435 | 85 | |
duke@435 | 86 | if (C->failing()) { |
duke@435 | 87 | while (exits.pop_exception_state() != NULL) ; |
duke@435 | 88 | return NULL; |
duke@435 | 89 | } |
duke@435 | 90 | |
duke@435 | 91 | assert(exits.jvms()->same_calls_as(jvms), "sanity"); |
duke@435 | 92 | |
duke@435 | 93 | // Simply return the exit state of the parser, |
duke@435 | 94 | // augmented by any exceptional states. |
duke@435 | 95 | return exits.transfer_exceptions_into_jvms(); |
duke@435 | 96 | } |
duke@435 | 97 | |
duke@435 | 98 | //---------------------------DirectCallGenerator------------------------------ |
duke@435 | 99 | // Internal class which handles all out-of-line calls w/o receiver type checks. |
duke@435 | 100 | class DirectCallGenerator : public CallGenerator { |
never@1515 | 101 | private: |
never@1515 | 102 | CallStaticJavaNode* _call_node; |
never@1515 | 103 | // Force separate memory and I/O projections for the exceptional |
never@1515 | 104 | // paths to facilitate late inlinig. |
never@1515 | 105 | bool _separate_io_proj; |
never@1515 | 106 | |
never@1515 | 107 | public: |
never@1515 | 108 | DirectCallGenerator(ciMethod* method, bool separate_io_proj) |
never@1515 | 109 | : CallGenerator(method), |
never@1515 | 110 | _separate_io_proj(separate_io_proj) |
duke@435 | 111 | { |
duke@435 | 112 | } |
duke@435 | 113 | virtual JVMState* generate(JVMState* jvms); |
never@1515 | 114 | |
never@1515 | 115 | CallStaticJavaNode* call_node() const { return _call_node; } |
duke@435 | 116 | }; |
duke@435 | 117 | |
duke@435 | 118 | JVMState* DirectCallGenerator::generate(JVMState* jvms) { |
duke@435 | 119 | GraphKit kit(jvms); |
duke@435 | 120 | bool is_static = method()->is_static(); |
duke@435 | 121 | address target = is_static ? SharedRuntime::get_resolve_static_call_stub() |
duke@435 | 122 | : SharedRuntime::get_resolve_opt_virtual_call_stub(); |
duke@435 | 123 | |
duke@435 | 124 | if (kit.C->log() != NULL) { |
duke@435 | 125 | kit.C->log()->elem("direct_call bci='%d'", jvms->bci()); |
duke@435 | 126 | } |
duke@435 | 127 | |
duke@435 | 128 | CallStaticJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallStaticJavaNode(tf(), target, method(), kit.bci()); |
duke@435 | 129 | if (!is_static) { |
duke@435 | 130 | // Make an explicit receiver null_check as part of this call. |
duke@435 | 131 | // Since we share a map with the caller, his JVMS gets adjusted. |
duke@435 | 132 | kit.null_check_receiver(method()); |
duke@435 | 133 | if (kit.stopped()) { |
duke@435 | 134 | // And dump it back to the caller, decorated with any exceptions: |
duke@435 | 135 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 136 | } |
duke@435 | 137 | // Mark the call node as virtual, sort of: |
duke@435 | 138 | call->set_optimized_virtual(true); |
duke@435 | 139 | } |
duke@435 | 140 | kit.set_arguments_for_java_call(call); |
never@1515 | 141 | kit.set_edges_for_java_call(call, false, _separate_io_proj); |
never@1515 | 142 | Node* ret = kit.set_results_for_java_call(call, _separate_io_proj); |
duke@435 | 143 | kit.push_node(method()->return_type()->basic_type(), ret); |
never@1515 | 144 | _call_node = call; // Save the call node in case we need it later |
duke@435 | 145 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 146 | } |
duke@435 | 147 | |
duke@435 | 148 | class VirtualCallGenerator : public CallGenerator { |
duke@435 | 149 | private: |
duke@435 | 150 | int _vtable_index; |
duke@435 | 151 | public: |
duke@435 | 152 | VirtualCallGenerator(ciMethod* method, int vtable_index) |
duke@435 | 153 | : CallGenerator(method), _vtable_index(vtable_index) |
duke@435 | 154 | { |
duke@435 | 155 | assert(vtable_index == methodOopDesc::invalid_vtable_index || |
duke@435 | 156 | vtable_index >= 0, "either invalid or usable"); |
duke@435 | 157 | } |
duke@435 | 158 | virtual bool is_virtual() const { return true; } |
duke@435 | 159 | virtual JVMState* generate(JVMState* jvms); |
duke@435 | 160 | }; |
duke@435 | 161 | |
duke@435 | 162 | //--------------------------VirtualCallGenerator------------------------------ |
duke@435 | 163 | // Internal class which handles all out-of-line calls checking receiver type. |
duke@435 | 164 | JVMState* VirtualCallGenerator::generate(JVMState* jvms) { |
duke@435 | 165 | GraphKit kit(jvms); |
duke@435 | 166 | Node* receiver = kit.argument(0); |
duke@435 | 167 | |
duke@435 | 168 | if (kit.C->log() != NULL) { |
duke@435 | 169 | kit.C->log()->elem("virtual_call bci='%d'", jvms->bci()); |
duke@435 | 170 | } |
duke@435 | 171 | |
duke@435 | 172 | // If the receiver is a constant null, do not torture the system |
duke@435 | 173 | // by attempting to call through it. The compile will proceed |
duke@435 | 174 | // correctly, but may bail out in final_graph_reshaping, because |
duke@435 | 175 | // the call instruction will have a seemingly deficient out-count. |
duke@435 | 176 | // (The bailout says something misleading about an "infinite loop".) |
duke@435 | 177 | if (kit.gvn().type(receiver)->higher_equal(TypePtr::NULL_PTR)) { |
duke@435 | 178 | kit.inc_sp(method()->arg_size()); // restore arguments |
duke@435 | 179 | kit.uncommon_trap(Deoptimization::Reason_null_check, |
duke@435 | 180 | Deoptimization::Action_none, |
duke@435 | 181 | NULL, "null receiver"); |
duke@435 | 182 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 183 | } |
duke@435 | 184 | |
duke@435 | 185 | // Ideally we would unconditionally do a null check here and let it |
duke@435 | 186 | // be converted to an implicit check based on profile information. |
duke@435 | 187 | // However currently the conversion to implicit null checks in |
duke@435 | 188 | // Block::implicit_null_check() only looks for loads and stores, not calls. |
duke@435 | 189 | ciMethod *caller = kit.method(); |
duke@435 | 190 | ciMethodData *caller_md = (caller == NULL) ? NULL : caller->method_data(); |
duke@435 | 191 | if (!UseInlineCaches || !ImplicitNullChecks || |
duke@435 | 192 | ((ImplicitNullCheckThreshold > 0) && caller_md && |
duke@435 | 193 | (caller_md->trap_count(Deoptimization::Reason_null_check) |
duke@435 | 194 | >= (uint)ImplicitNullCheckThreshold))) { |
duke@435 | 195 | // Make an explicit receiver null_check as part of this call. |
duke@435 | 196 | // Since we share a map with the caller, his JVMS gets adjusted. |
duke@435 | 197 | receiver = kit.null_check_receiver(method()); |
duke@435 | 198 | if (kit.stopped()) { |
duke@435 | 199 | // And dump it back to the caller, decorated with any exceptions: |
duke@435 | 200 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 201 | } |
duke@435 | 202 | } |
duke@435 | 203 | |
duke@435 | 204 | assert(!method()->is_static(), "virtual call must not be to static"); |
duke@435 | 205 | assert(!method()->is_final(), "virtual call should not be to final"); |
duke@435 | 206 | assert(!method()->is_private(), "virtual call should not be to private"); |
duke@435 | 207 | assert(_vtable_index == methodOopDesc::invalid_vtable_index || !UseInlineCaches, |
duke@435 | 208 | "no vtable calls if +UseInlineCaches "); |
duke@435 | 209 | address target = SharedRuntime::get_resolve_virtual_call_stub(); |
duke@435 | 210 | // Normal inline cache used for call |
duke@435 | 211 | CallDynamicJavaNode *call = new (kit.C, tf()->domain()->cnt()) CallDynamicJavaNode(tf(), target, method(), _vtable_index, kit.bci()); |
duke@435 | 212 | kit.set_arguments_for_java_call(call); |
duke@435 | 213 | kit.set_edges_for_java_call(call); |
duke@435 | 214 | Node* ret = kit.set_results_for_java_call(call); |
duke@435 | 215 | kit.push_node(method()->return_type()->basic_type(), ret); |
duke@435 | 216 | |
duke@435 | 217 | // Represent the effect of an implicit receiver null_check |
duke@435 | 218 | // as part of this call. Since we share a map with the caller, |
duke@435 | 219 | // his JVMS gets adjusted. |
duke@435 | 220 | kit.cast_not_null(receiver); |
duke@435 | 221 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 222 | } |
duke@435 | 223 | |
duke@435 | 224 | bool ParseGenerator::can_parse(ciMethod* m, int entry_bci) { |
duke@435 | 225 | // Certain methods cannot be parsed at all: |
duke@435 | 226 | if (!m->can_be_compiled()) return false; |
duke@435 | 227 | if (!m->has_balanced_monitors()) return false; |
duke@435 | 228 | if (m->get_flow_analysis()->failing()) return false; |
duke@435 | 229 | |
duke@435 | 230 | // (Methods may bail out for other reasons, after the parser is run. |
duke@435 | 231 | // We try to avoid this, but if forced, we must return (Node*)NULL. |
duke@435 | 232 | // The user of the CallGenerator must check for this condition.) |
duke@435 | 233 | return true; |
duke@435 | 234 | } |
duke@435 | 235 | |
duke@435 | 236 | CallGenerator* CallGenerator::for_inline(ciMethod* m, float expected_uses) { |
duke@435 | 237 | if (!ParseGenerator::can_parse(m)) return NULL; |
duke@435 | 238 | return new ParseGenerator(m, expected_uses); |
duke@435 | 239 | } |
duke@435 | 240 | |
duke@435 | 241 | // As a special case, the JVMS passed to this CallGenerator is |
duke@435 | 242 | // for the method execution already in progress, not just the JVMS |
duke@435 | 243 | // of the caller. Thus, this CallGenerator cannot be mixed with others! |
duke@435 | 244 | CallGenerator* CallGenerator::for_osr(ciMethod* m, int osr_bci) { |
duke@435 | 245 | if (!ParseGenerator::can_parse(m, true)) return NULL; |
duke@435 | 246 | float past_uses = m->interpreter_invocation_count(); |
duke@435 | 247 | float expected_uses = past_uses; |
duke@435 | 248 | return new ParseGenerator(m, expected_uses, true); |
duke@435 | 249 | } |
duke@435 | 250 | |
never@1515 | 251 | CallGenerator* CallGenerator::for_direct_call(ciMethod* m, bool separate_io_proj) { |
duke@435 | 252 | assert(!m->is_abstract(), "for_direct_call mismatch"); |
never@1515 | 253 | return new DirectCallGenerator(m, separate_io_proj); |
duke@435 | 254 | } |
duke@435 | 255 | |
duke@435 | 256 | CallGenerator* CallGenerator::for_virtual_call(ciMethod* m, int vtable_index) { |
duke@435 | 257 | assert(!m->is_static(), "for_virtual_call mismatch"); |
duke@435 | 258 | return new VirtualCallGenerator(m, vtable_index); |
duke@435 | 259 | } |
duke@435 | 260 | |
never@1515 | 261 | // Allow inlining decisions to be delayed |
never@1515 | 262 | class LateInlineCallGenerator : public DirectCallGenerator { |
never@1515 | 263 | CallGenerator* _inline_cg; |
never@1515 | 264 | |
never@1515 | 265 | public: |
never@1515 | 266 | LateInlineCallGenerator(ciMethod* method, CallGenerator* inline_cg) : |
never@1515 | 267 | DirectCallGenerator(method, true), _inline_cg(inline_cg) {} |
never@1515 | 268 | |
never@1515 | 269 | virtual bool is_late_inline() const { return true; } |
never@1515 | 270 | |
never@1515 | 271 | // Convert the CallStaticJava into an inline |
never@1515 | 272 | virtual void do_late_inline(); |
never@1515 | 273 | |
never@1515 | 274 | JVMState* generate(JVMState* jvms) { |
never@1515 | 275 | // Record that this call site should be revisited once the main |
never@1515 | 276 | // parse is finished. |
never@1515 | 277 | Compile::current()->add_late_inline(this); |
never@1515 | 278 | |
never@1515 | 279 | // Emit the CallStaticJava and request separate projections so |
never@1515 | 280 | // that the late inlining logic can distinguish between fall |
never@1515 | 281 | // through and exceptional uses of the memory and io projections |
never@1515 | 282 | // as is done for allocations and macro expansion. |
never@1515 | 283 | return DirectCallGenerator::generate(jvms); |
never@1515 | 284 | } |
never@1515 | 285 | |
never@1515 | 286 | }; |
never@1515 | 287 | |
never@1515 | 288 | |
never@1515 | 289 | void LateInlineCallGenerator::do_late_inline() { |
never@1515 | 290 | // Can't inline it |
never@1515 | 291 | if (call_node() == NULL || call_node()->outcnt() == 0 || |
never@1515 | 292 | call_node()->in(0) == NULL || call_node()->in(0)->is_top()) |
never@1515 | 293 | return; |
never@1515 | 294 | |
never@1515 | 295 | CallStaticJavaNode* call = call_node(); |
never@1515 | 296 | |
never@1515 | 297 | // Make a clone of the JVMState that appropriate to use for driving a parse |
never@1515 | 298 | Compile* C = Compile::current(); |
never@1515 | 299 | JVMState* jvms = call->jvms()->clone_shallow(C); |
never@1515 | 300 | uint size = call->req(); |
never@1515 | 301 | SafePointNode* map = new (C, size) SafePointNode(size, jvms); |
never@1515 | 302 | for (uint i1 = 0; i1 < size; i1++) { |
never@1515 | 303 | map->init_req(i1, call->in(i1)); |
never@1515 | 304 | } |
never@1515 | 305 | |
never@1515 | 306 | // Make sure the state is a MergeMem for parsing. |
never@1515 | 307 | if (!map->in(TypeFunc::Memory)->is_MergeMem()) { |
never@1515 | 308 | map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory))); |
never@1515 | 309 | } |
never@1515 | 310 | |
never@1515 | 311 | // Make enough space for the expression stack and transfer the incoming arguments |
never@1515 | 312 | int nargs = method()->arg_size(); |
never@1515 | 313 | jvms->set_map(map); |
never@1515 | 314 | map->ensure_stack(jvms, jvms->method()->max_stack()); |
never@1515 | 315 | if (nargs > 0) { |
never@1515 | 316 | for (int i1 = 0; i1 < nargs; i1++) { |
never@1515 | 317 | map->set_req(i1 + jvms->argoff(), call->in(TypeFunc::Parms + i1)); |
never@1515 | 318 | } |
never@1515 | 319 | } |
never@1515 | 320 | |
never@1515 | 321 | CompileLog* log = C->log(); |
never@1515 | 322 | if (log != NULL) { |
never@1515 | 323 | log->head("late_inline method='%d'", log->identify(method())); |
never@1515 | 324 | JVMState* p = jvms; |
never@1515 | 325 | while (p != NULL) { |
never@1515 | 326 | log->elem("jvms bci='%d' method='%d'", p->bci(), log->identify(p->method())); |
never@1515 | 327 | p = p->caller(); |
never@1515 | 328 | } |
never@1515 | 329 | log->tail("late_inline"); |
never@1515 | 330 | } |
never@1515 | 331 | |
never@1515 | 332 | // Setup default node notes to be picked up by the inlining |
never@1515 | 333 | Node_Notes* old_nn = C->default_node_notes(); |
never@1515 | 334 | if (old_nn != NULL) { |
never@1515 | 335 | Node_Notes* entry_nn = old_nn->clone(C); |
never@1515 | 336 | entry_nn->set_jvms(jvms); |
never@1515 | 337 | C->set_default_node_notes(entry_nn); |
never@1515 | 338 | } |
never@1515 | 339 | |
never@1515 | 340 | // Now perform the inling using the synthesized JVMState |
never@1515 | 341 | JVMState* new_jvms = _inline_cg->generate(jvms); |
never@1515 | 342 | if (new_jvms == NULL) return; // no change |
never@1515 | 343 | if (C->failing()) return; |
never@1515 | 344 | |
never@1515 | 345 | // Capture any exceptional control flow |
never@1515 | 346 | GraphKit kit(new_jvms); |
never@1515 | 347 | |
never@1515 | 348 | // Find the result object |
never@1515 | 349 | Node* result = C->top(); |
never@1515 | 350 | int result_size = method()->return_type()->size(); |
never@1515 | 351 | if (result_size != 0 && !kit.stopped()) { |
never@1515 | 352 | result = (result_size == 1) ? kit.pop() : kit.pop_pair(); |
never@1515 | 353 | } |
never@1515 | 354 | |
never@1515 | 355 | kit.replace_call(call, result); |
never@1515 | 356 | } |
never@1515 | 357 | |
never@1515 | 358 | |
never@1515 | 359 | CallGenerator* CallGenerator::for_late_inline(ciMethod* method, CallGenerator* inline_cg) { |
never@1515 | 360 | return new LateInlineCallGenerator(method, inline_cg); |
never@1515 | 361 | } |
never@1515 | 362 | |
duke@435 | 363 | |
duke@435 | 364 | //---------------------------WarmCallGenerator-------------------------------- |
duke@435 | 365 | // Internal class which handles initial deferral of inlining decisions. |
duke@435 | 366 | class WarmCallGenerator : public CallGenerator { |
duke@435 | 367 | WarmCallInfo* _call_info; |
duke@435 | 368 | CallGenerator* _if_cold; |
duke@435 | 369 | CallGenerator* _if_hot; |
duke@435 | 370 | bool _is_virtual; // caches virtuality of if_cold |
duke@435 | 371 | bool _is_inline; // caches inline-ness of if_hot |
duke@435 | 372 | |
duke@435 | 373 | public: |
duke@435 | 374 | WarmCallGenerator(WarmCallInfo* ci, |
duke@435 | 375 | CallGenerator* if_cold, |
duke@435 | 376 | CallGenerator* if_hot) |
duke@435 | 377 | : CallGenerator(if_cold->method()) |
duke@435 | 378 | { |
duke@435 | 379 | assert(method() == if_hot->method(), "consistent choices"); |
duke@435 | 380 | _call_info = ci; |
duke@435 | 381 | _if_cold = if_cold; |
duke@435 | 382 | _if_hot = if_hot; |
duke@435 | 383 | _is_virtual = if_cold->is_virtual(); |
duke@435 | 384 | _is_inline = if_hot->is_inline(); |
duke@435 | 385 | } |
duke@435 | 386 | |
duke@435 | 387 | virtual bool is_inline() const { return _is_inline; } |
duke@435 | 388 | virtual bool is_virtual() const { return _is_virtual; } |
duke@435 | 389 | virtual bool is_deferred() const { return true; } |
duke@435 | 390 | |
duke@435 | 391 | virtual JVMState* generate(JVMState* jvms); |
duke@435 | 392 | }; |
duke@435 | 393 | |
duke@435 | 394 | |
duke@435 | 395 | CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci, |
duke@435 | 396 | CallGenerator* if_cold, |
duke@435 | 397 | CallGenerator* if_hot) { |
duke@435 | 398 | return new WarmCallGenerator(ci, if_cold, if_hot); |
duke@435 | 399 | } |
duke@435 | 400 | |
duke@435 | 401 | JVMState* WarmCallGenerator::generate(JVMState* jvms) { |
duke@435 | 402 | Compile* C = Compile::current(); |
duke@435 | 403 | if (C->log() != NULL) { |
duke@435 | 404 | C->log()->elem("warm_call bci='%d'", jvms->bci()); |
duke@435 | 405 | } |
duke@435 | 406 | jvms = _if_cold->generate(jvms); |
duke@435 | 407 | if (jvms != NULL) { |
duke@435 | 408 | Node* m = jvms->map()->control(); |
duke@435 | 409 | if (m->is_CatchProj()) m = m->in(0); else m = C->top(); |
duke@435 | 410 | if (m->is_Catch()) m = m->in(0); else m = C->top(); |
duke@435 | 411 | if (m->is_Proj()) m = m->in(0); else m = C->top(); |
duke@435 | 412 | if (m->is_CallJava()) { |
duke@435 | 413 | _call_info->set_call(m->as_Call()); |
duke@435 | 414 | _call_info->set_hot_cg(_if_hot); |
duke@435 | 415 | #ifndef PRODUCT |
duke@435 | 416 | if (PrintOpto || PrintOptoInlining) { |
duke@435 | 417 | tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci()); |
duke@435 | 418 | tty->print("WCI: "); |
duke@435 | 419 | _call_info->print(); |
duke@435 | 420 | } |
duke@435 | 421 | #endif |
duke@435 | 422 | _call_info->set_heat(_call_info->compute_heat()); |
duke@435 | 423 | C->set_warm_calls(_call_info->insert_into(C->warm_calls())); |
duke@435 | 424 | } |
duke@435 | 425 | } |
duke@435 | 426 | return jvms; |
duke@435 | 427 | } |
duke@435 | 428 | |
duke@435 | 429 | void WarmCallInfo::make_hot() { |
never@1515 | 430 | Unimplemented(); |
duke@435 | 431 | } |
duke@435 | 432 | |
duke@435 | 433 | void WarmCallInfo::make_cold() { |
duke@435 | 434 | // No action: Just dequeue. |
duke@435 | 435 | } |
duke@435 | 436 | |
duke@435 | 437 | |
duke@435 | 438 | //------------------------PredictedCallGenerator------------------------------ |
duke@435 | 439 | // Internal class which handles all out-of-line calls checking receiver type. |
duke@435 | 440 | class PredictedCallGenerator : public CallGenerator { |
duke@435 | 441 | ciKlass* _predicted_receiver; |
duke@435 | 442 | CallGenerator* _if_missed; |
duke@435 | 443 | CallGenerator* _if_hit; |
duke@435 | 444 | float _hit_prob; |
duke@435 | 445 | |
duke@435 | 446 | public: |
duke@435 | 447 | PredictedCallGenerator(ciKlass* predicted_receiver, |
duke@435 | 448 | CallGenerator* if_missed, |
duke@435 | 449 | CallGenerator* if_hit, float hit_prob) |
duke@435 | 450 | : CallGenerator(if_missed->method()) |
duke@435 | 451 | { |
duke@435 | 452 | // The call profile data may predict the hit_prob as extreme as 0 or 1. |
duke@435 | 453 | // Remove the extremes values from the range. |
duke@435 | 454 | if (hit_prob > PROB_MAX) hit_prob = PROB_MAX; |
duke@435 | 455 | if (hit_prob < PROB_MIN) hit_prob = PROB_MIN; |
duke@435 | 456 | |
duke@435 | 457 | _predicted_receiver = predicted_receiver; |
duke@435 | 458 | _if_missed = if_missed; |
duke@435 | 459 | _if_hit = if_hit; |
duke@435 | 460 | _hit_prob = hit_prob; |
duke@435 | 461 | } |
duke@435 | 462 | |
duke@435 | 463 | virtual bool is_virtual() const { return true; } |
duke@435 | 464 | virtual bool is_inline() const { return _if_hit->is_inline(); } |
duke@435 | 465 | virtual bool is_deferred() const { return _if_hit->is_deferred(); } |
duke@435 | 466 | |
duke@435 | 467 | virtual JVMState* generate(JVMState* jvms); |
duke@435 | 468 | }; |
duke@435 | 469 | |
duke@435 | 470 | |
duke@435 | 471 | CallGenerator* CallGenerator::for_predicted_call(ciKlass* predicted_receiver, |
duke@435 | 472 | CallGenerator* if_missed, |
duke@435 | 473 | CallGenerator* if_hit, |
duke@435 | 474 | float hit_prob) { |
duke@435 | 475 | return new PredictedCallGenerator(predicted_receiver, if_missed, if_hit, hit_prob); |
duke@435 | 476 | } |
duke@435 | 477 | |
duke@435 | 478 | |
duke@435 | 479 | JVMState* PredictedCallGenerator::generate(JVMState* jvms) { |
duke@435 | 480 | GraphKit kit(jvms); |
duke@435 | 481 | PhaseGVN& gvn = kit.gvn(); |
duke@435 | 482 | // We need an explicit receiver null_check before checking its type. |
duke@435 | 483 | // We share a map with the caller, so his JVMS gets adjusted. |
duke@435 | 484 | Node* receiver = kit.argument(0); |
duke@435 | 485 | |
duke@435 | 486 | CompileLog* log = kit.C->log(); |
duke@435 | 487 | if (log != NULL) { |
duke@435 | 488 | log->elem("predicted_call bci='%d' klass='%d'", |
duke@435 | 489 | jvms->bci(), log->identify(_predicted_receiver)); |
duke@435 | 490 | } |
duke@435 | 491 | |
duke@435 | 492 | receiver = kit.null_check_receiver(method()); |
duke@435 | 493 | if (kit.stopped()) { |
duke@435 | 494 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 495 | } |
duke@435 | 496 | |
duke@435 | 497 | Node* exact_receiver = receiver; // will get updated in place... |
duke@435 | 498 | Node* slow_ctl = kit.type_check_receiver(receiver, |
duke@435 | 499 | _predicted_receiver, _hit_prob, |
duke@435 | 500 | &exact_receiver); |
duke@435 | 501 | |
duke@435 | 502 | SafePointNode* slow_map = NULL; |
duke@435 | 503 | JVMState* slow_jvms; |
duke@435 | 504 | { PreserveJVMState pjvms(&kit); |
duke@435 | 505 | kit.set_control(slow_ctl); |
duke@435 | 506 | if (!kit.stopped()) { |
duke@435 | 507 | slow_jvms = _if_missed->generate(kit.sync_jvms()); |
duke@435 | 508 | assert(slow_jvms != NULL, "miss path must not fail to generate"); |
duke@435 | 509 | kit.add_exception_states_from(slow_jvms); |
duke@435 | 510 | kit.set_map(slow_jvms->map()); |
duke@435 | 511 | if (!kit.stopped()) |
duke@435 | 512 | slow_map = kit.stop(); |
duke@435 | 513 | } |
duke@435 | 514 | } |
duke@435 | 515 | |
kvn@728 | 516 | if (kit.stopped()) { |
kvn@728 | 517 | // Instance exactly does not matches the desired type. |
kvn@728 | 518 | kit.set_jvms(slow_jvms); |
kvn@728 | 519 | return kit.transfer_exceptions_into_jvms(); |
kvn@728 | 520 | } |
kvn@728 | 521 | |
duke@435 | 522 | // fall through if the instance exactly matches the desired type |
duke@435 | 523 | kit.replace_in_map(receiver, exact_receiver); |
duke@435 | 524 | |
duke@435 | 525 | // Make the hot call: |
duke@435 | 526 | JVMState* new_jvms = _if_hit->generate(kit.sync_jvms()); |
duke@435 | 527 | if (new_jvms == NULL) { |
duke@435 | 528 | // Inline failed, so make a direct call. |
duke@435 | 529 | assert(_if_hit->is_inline(), "must have been a failed inline"); |
duke@435 | 530 | CallGenerator* cg = CallGenerator::for_direct_call(_if_hit->method()); |
duke@435 | 531 | new_jvms = cg->generate(kit.sync_jvms()); |
duke@435 | 532 | } |
duke@435 | 533 | kit.add_exception_states_from(new_jvms); |
duke@435 | 534 | kit.set_jvms(new_jvms); |
duke@435 | 535 | |
duke@435 | 536 | // Need to merge slow and fast? |
duke@435 | 537 | if (slow_map == NULL) { |
duke@435 | 538 | // The fast path is the only path remaining. |
duke@435 | 539 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 540 | } |
duke@435 | 541 | |
duke@435 | 542 | if (kit.stopped()) { |
duke@435 | 543 | // Inlined method threw an exception, so it's just the slow path after all. |
duke@435 | 544 | kit.set_jvms(slow_jvms); |
duke@435 | 545 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 546 | } |
duke@435 | 547 | |
duke@435 | 548 | // Finish the diamond. |
duke@435 | 549 | kit.C->set_has_split_ifs(true); // Has chance for split-if optimization |
duke@435 | 550 | RegionNode* region = new (kit.C, 3) RegionNode(3); |
duke@435 | 551 | region->init_req(1, kit.control()); |
duke@435 | 552 | region->init_req(2, slow_map->control()); |
duke@435 | 553 | kit.set_control(gvn.transform(region)); |
duke@435 | 554 | Node* iophi = PhiNode::make(region, kit.i_o(), Type::ABIO); |
duke@435 | 555 | iophi->set_req(2, slow_map->i_o()); |
duke@435 | 556 | kit.set_i_o(gvn.transform(iophi)); |
duke@435 | 557 | kit.merge_memory(slow_map->merged_memory(), region, 2); |
duke@435 | 558 | uint tos = kit.jvms()->stkoff() + kit.sp(); |
duke@435 | 559 | uint limit = slow_map->req(); |
duke@435 | 560 | for (uint i = TypeFunc::Parms; i < limit; i++) { |
duke@435 | 561 | // Skip unused stack slots; fast forward to monoff(); |
duke@435 | 562 | if (i == tos) { |
duke@435 | 563 | i = kit.jvms()->monoff(); |
duke@435 | 564 | if( i >= limit ) break; |
duke@435 | 565 | } |
duke@435 | 566 | Node* m = kit.map()->in(i); |
duke@435 | 567 | Node* n = slow_map->in(i); |
duke@435 | 568 | if (m != n) { |
duke@435 | 569 | const Type* t = gvn.type(m)->meet(gvn.type(n)); |
duke@435 | 570 | Node* phi = PhiNode::make(region, m, t); |
duke@435 | 571 | phi->set_req(2, n); |
duke@435 | 572 | kit.map()->set_req(i, gvn.transform(phi)); |
duke@435 | 573 | } |
duke@435 | 574 | } |
duke@435 | 575 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 576 | } |
duke@435 | 577 | |
duke@435 | 578 | |
duke@435 | 579 | //-------------------------UncommonTrapCallGenerator----------------------------- |
duke@435 | 580 | // Internal class which handles all out-of-line calls checking receiver type. |
duke@435 | 581 | class UncommonTrapCallGenerator : public CallGenerator { |
duke@435 | 582 | Deoptimization::DeoptReason _reason; |
duke@435 | 583 | Deoptimization::DeoptAction _action; |
duke@435 | 584 | |
duke@435 | 585 | public: |
duke@435 | 586 | UncommonTrapCallGenerator(ciMethod* m, |
duke@435 | 587 | Deoptimization::DeoptReason reason, |
duke@435 | 588 | Deoptimization::DeoptAction action) |
duke@435 | 589 | : CallGenerator(m) |
duke@435 | 590 | { |
duke@435 | 591 | _reason = reason; |
duke@435 | 592 | _action = action; |
duke@435 | 593 | } |
duke@435 | 594 | |
duke@435 | 595 | virtual bool is_virtual() const { ShouldNotReachHere(); return false; } |
duke@435 | 596 | virtual bool is_trap() const { return true; } |
duke@435 | 597 | |
duke@435 | 598 | virtual JVMState* generate(JVMState* jvms); |
duke@435 | 599 | }; |
duke@435 | 600 | |
duke@435 | 601 | |
duke@435 | 602 | CallGenerator* |
duke@435 | 603 | CallGenerator::for_uncommon_trap(ciMethod* m, |
duke@435 | 604 | Deoptimization::DeoptReason reason, |
duke@435 | 605 | Deoptimization::DeoptAction action) { |
duke@435 | 606 | return new UncommonTrapCallGenerator(m, reason, action); |
duke@435 | 607 | } |
duke@435 | 608 | |
duke@435 | 609 | |
duke@435 | 610 | JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) { |
duke@435 | 611 | GraphKit kit(jvms); |
duke@435 | 612 | // Take the trap with arguments pushed on the stack. (Cf. null_check_receiver). |
duke@435 | 613 | int nargs = method()->arg_size(); |
duke@435 | 614 | kit.inc_sp(nargs); |
duke@435 | 615 | assert(nargs <= kit.sp() && kit.sp() <= jvms->stk_size(), "sane sp w/ args pushed"); |
duke@435 | 616 | if (_reason == Deoptimization::Reason_class_check && |
duke@435 | 617 | _action == Deoptimization::Action_maybe_recompile) { |
duke@435 | 618 | // Temp fix for 6529811 |
duke@435 | 619 | // Don't allow uncommon_trap to override our decision to recompile in the event |
duke@435 | 620 | // of a class cast failure for a monomorphic call as it will never let us convert |
duke@435 | 621 | // the call to either bi-morphic or megamorphic and can lead to unc-trap loops |
duke@435 | 622 | bool keep_exact_action = true; |
duke@435 | 623 | kit.uncommon_trap(_reason, _action, NULL, "monomorphic vcall checkcast", false, keep_exact_action); |
duke@435 | 624 | } else { |
duke@435 | 625 | kit.uncommon_trap(_reason, _action); |
duke@435 | 626 | } |
duke@435 | 627 | return kit.transfer_exceptions_into_jvms(); |
duke@435 | 628 | } |
duke@435 | 629 | |
duke@435 | 630 | // (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.) |
duke@435 | 631 | |
duke@435 | 632 | // (Node: Merged hook_up_exits into ParseGenerator::generate.) |
duke@435 | 633 | |
duke@435 | 634 | #define NODES_OVERHEAD_PER_METHOD (30.0) |
duke@435 | 635 | #define NODES_PER_BYTECODE (9.5) |
duke@435 | 636 | |
duke@435 | 637 | void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) { |
duke@435 | 638 | int call_count = profile.count(); |
duke@435 | 639 | int code_size = call_method->code_size(); |
duke@435 | 640 | |
duke@435 | 641 | // Expected execution count is based on the historical count: |
duke@435 | 642 | _count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor); |
duke@435 | 643 | |
duke@435 | 644 | // Expected profit from inlining, in units of simple call-overheads. |
duke@435 | 645 | _profit = 1.0; |
duke@435 | 646 | |
duke@435 | 647 | // Expected work performed by the call in units of call-overheads. |
duke@435 | 648 | // %%% need an empirical curve fit for "work" (time in call) |
duke@435 | 649 | float bytecodes_per_call = 3; |
duke@435 | 650 | _work = 1.0 + code_size / bytecodes_per_call; |
duke@435 | 651 | |
duke@435 | 652 | // Expected size of compilation graph: |
duke@435 | 653 | // -XX:+PrintParseStatistics once reported: |
duke@435 | 654 | // Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391 |
duke@435 | 655 | // Histogram of 144298 parsed bytecodes: |
duke@435 | 656 | // %%% Need an better predictor for graph size. |
duke@435 | 657 | _size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size); |
duke@435 | 658 | } |
duke@435 | 659 | |
duke@435 | 660 | // is_cold: Return true if the node should never be inlined. |
duke@435 | 661 | // This is true if any of the key metrics are extreme. |
duke@435 | 662 | bool WarmCallInfo::is_cold() const { |
duke@435 | 663 | if (count() < WarmCallMinCount) return true; |
duke@435 | 664 | if (profit() < WarmCallMinProfit) return true; |
duke@435 | 665 | if (work() > WarmCallMaxWork) return true; |
duke@435 | 666 | if (size() > WarmCallMaxSize) return true; |
duke@435 | 667 | return false; |
duke@435 | 668 | } |
duke@435 | 669 | |
duke@435 | 670 | // is_hot: Return true if the node should be inlined immediately. |
duke@435 | 671 | // This is true if any of the key metrics are extreme. |
duke@435 | 672 | bool WarmCallInfo::is_hot() const { |
duke@435 | 673 | assert(!is_cold(), "eliminate is_cold cases before testing is_hot"); |
duke@435 | 674 | if (count() >= HotCallCountThreshold) return true; |
duke@435 | 675 | if (profit() >= HotCallProfitThreshold) return true; |
duke@435 | 676 | if (work() <= HotCallTrivialWork) return true; |
duke@435 | 677 | if (size() <= HotCallTrivialSize) return true; |
duke@435 | 678 | return false; |
duke@435 | 679 | } |
duke@435 | 680 | |
duke@435 | 681 | // compute_heat: |
duke@435 | 682 | float WarmCallInfo::compute_heat() const { |
duke@435 | 683 | assert(!is_cold(), "compute heat only on warm nodes"); |
duke@435 | 684 | assert(!is_hot(), "compute heat only on warm nodes"); |
duke@435 | 685 | int min_size = MAX2(0, (int)HotCallTrivialSize); |
duke@435 | 686 | int max_size = MIN2(500, (int)WarmCallMaxSize); |
duke@435 | 687 | float method_size = (size() - min_size) / MAX2(1, max_size - min_size); |
duke@435 | 688 | float size_factor; |
duke@435 | 689 | if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg. |
duke@435 | 690 | else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg. |
duke@435 | 691 | else if (method_size < 0.5) size_factor = 1; // better than avg. |
duke@435 | 692 | else size_factor = 0.5; // worse than avg. |
duke@435 | 693 | return (count() * profit() * size_factor); |
duke@435 | 694 | } |
duke@435 | 695 | |
duke@435 | 696 | bool WarmCallInfo::warmer_than(WarmCallInfo* that) { |
duke@435 | 697 | assert(this != that, "compare only different WCIs"); |
duke@435 | 698 | assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st"); |
duke@435 | 699 | if (this->heat() > that->heat()) return true; |
duke@435 | 700 | if (this->heat() < that->heat()) return false; |
duke@435 | 701 | assert(this->heat() == that->heat(), "no NaN heat allowed"); |
duke@435 | 702 | // Equal heat. Break the tie some other way. |
duke@435 | 703 | if (!this->call() || !that->call()) return (address)this > (address)that; |
duke@435 | 704 | return this->call()->_idx > that->call()->_idx; |
duke@435 | 705 | } |
duke@435 | 706 | |
duke@435 | 707 | //#define UNINIT_NEXT ((WarmCallInfo*)badAddress) |
duke@435 | 708 | #define UNINIT_NEXT ((WarmCallInfo*)NULL) |
duke@435 | 709 | |
duke@435 | 710 | WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) { |
duke@435 | 711 | assert(next() == UNINIT_NEXT, "not yet on any list"); |
duke@435 | 712 | WarmCallInfo* prev_p = NULL; |
duke@435 | 713 | WarmCallInfo* next_p = head; |
duke@435 | 714 | while (next_p != NULL && next_p->warmer_than(this)) { |
duke@435 | 715 | prev_p = next_p; |
duke@435 | 716 | next_p = prev_p->next(); |
duke@435 | 717 | } |
duke@435 | 718 | // Install this between prev_p and next_p. |
duke@435 | 719 | this->set_next(next_p); |
duke@435 | 720 | if (prev_p == NULL) |
duke@435 | 721 | head = this; |
duke@435 | 722 | else |
duke@435 | 723 | prev_p->set_next(this); |
duke@435 | 724 | return head; |
duke@435 | 725 | } |
duke@435 | 726 | |
duke@435 | 727 | WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) { |
duke@435 | 728 | WarmCallInfo* prev_p = NULL; |
duke@435 | 729 | WarmCallInfo* next_p = head; |
duke@435 | 730 | while (next_p != this) { |
duke@435 | 731 | assert(next_p != NULL, "this must be in the list somewhere"); |
duke@435 | 732 | prev_p = next_p; |
duke@435 | 733 | next_p = prev_p->next(); |
duke@435 | 734 | } |
duke@435 | 735 | next_p = this->next(); |
duke@435 | 736 | debug_only(this->set_next(UNINIT_NEXT)); |
duke@435 | 737 | // Remove this from between prev_p and next_p. |
duke@435 | 738 | if (prev_p == NULL) |
duke@435 | 739 | head = next_p; |
duke@435 | 740 | else |
duke@435 | 741 | prev_p->set_next(next_p); |
duke@435 | 742 | return head; |
duke@435 | 743 | } |
duke@435 | 744 | |
duke@435 | 745 | WarmCallInfo* WarmCallInfo::_always_hot = NULL; |
duke@435 | 746 | WarmCallInfo* WarmCallInfo::_always_cold = NULL; |
duke@435 | 747 | |
duke@435 | 748 | WarmCallInfo* WarmCallInfo::always_hot() { |
duke@435 | 749 | if (_always_hot == NULL) { |
duke@435 | 750 | static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; |
duke@435 | 751 | WarmCallInfo* ci = (WarmCallInfo*) bits; |
duke@435 | 752 | ci->_profit = ci->_count = MAX_VALUE(); |
duke@435 | 753 | ci->_work = ci->_size = MIN_VALUE(); |
duke@435 | 754 | _always_hot = ci; |
duke@435 | 755 | } |
duke@435 | 756 | assert(_always_hot->is_hot(), "must always be hot"); |
duke@435 | 757 | return _always_hot; |
duke@435 | 758 | } |
duke@435 | 759 | |
duke@435 | 760 | WarmCallInfo* WarmCallInfo::always_cold() { |
duke@435 | 761 | if (_always_cold == NULL) { |
duke@435 | 762 | static double bits[sizeof(WarmCallInfo) / sizeof(double) + 1] = {0}; |
duke@435 | 763 | WarmCallInfo* ci = (WarmCallInfo*) bits; |
duke@435 | 764 | ci->_profit = ci->_count = MIN_VALUE(); |
duke@435 | 765 | ci->_work = ci->_size = MAX_VALUE(); |
duke@435 | 766 | _always_cold = ci; |
duke@435 | 767 | } |
duke@435 | 768 | assert(_always_cold->is_cold(), "must always be cold"); |
duke@435 | 769 | return _always_cold; |
duke@435 | 770 | } |
duke@435 | 771 | |
duke@435 | 772 | |
duke@435 | 773 | #ifndef PRODUCT |
duke@435 | 774 | |
duke@435 | 775 | void WarmCallInfo::print() const { |
duke@435 | 776 | tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p", |
duke@435 | 777 | is_cold() ? "cold" : is_hot() ? "hot " : "warm", |
duke@435 | 778 | count(), profit(), work(), size(), compute_heat(), next()); |
duke@435 | 779 | tty->cr(); |
duke@435 | 780 | if (call() != NULL) call()->dump(); |
duke@435 | 781 | } |
duke@435 | 782 | |
duke@435 | 783 | void print_wci(WarmCallInfo* ci) { |
duke@435 | 784 | ci->print(); |
duke@435 | 785 | } |
duke@435 | 786 | |
duke@435 | 787 | void WarmCallInfo::print_all() const { |
duke@435 | 788 | for (const WarmCallInfo* p = this; p != NULL; p = p->next()) |
duke@435 | 789 | p->print(); |
duke@435 | 790 | } |
duke@435 | 791 | |
duke@435 | 792 | int WarmCallInfo::count_all() const { |
duke@435 | 793 | int cnt = 0; |
duke@435 | 794 | for (const WarmCallInfo* p = this; p != NULL; p = p->next()) |
duke@435 | 795 | cnt++; |
duke@435 | 796 | return cnt; |
duke@435 | 797 | } |
duke@435 | 798 | |
duke@435 | 799 | #endif //PRODUCT |