Thu, 22 May 2014 15:52:41 -0400
8037816: Fix for 8036122 breaks build with Xcode5/clang
8043029: Change 8037816 breaks HS build with older GCC versions which don't support diagnostic pragmas
8043164: Format warning in traceStream.hpp
Summary: Backport of main fix + two corrections, enables clang compilation, turns on format attributes, corrects/mutes warnings
Reviewed-by: kvn, coleenp, iveresov, twisti
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/nmethod.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/linkResolver.hpp"
34 #include "memory/metadataFactory.hpp"
35 #include "memory/oopFactory.hpp"
36 #include "oops/method.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "oops/symbol.hpp"
39 #include "runtime/icache.hpp"
40 #include "runtime/sharedRuntime.hpp"
41 #include "runtime/stubRoutines.hpp"
42 #include "utilities/events.hpp"
45 // Every time a compiled IC is changed or its type is being accessed,
46 // either the CompiledIC_lock must be set or we must be at a safe point.
48 //-----------------------------------------------------------------------------
49 // Low-level access to an inline cache. Private, since they might not be
50 // MT-safe to use.
52 void* CompiledIC::cached_value() const {
53 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
54 assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
56 if (!is_in_transition_state()) {
57 void* data = (void*)_value->data();
58 // If we let the metadata value here be initialized to zero...
59 assert(data != NULL || Universe::non_oop_word() == NULL,
60 "no raw nulls in CompiledIC metadatas, because of patching races");
61 return (data == (void*)Universe::non_oop_word()) ? NULL : data;
62 } else {
63 return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
64 }
65 }
68 void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
69 assert(entry_point != NULL, "must set legal entry point");
70 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
71 assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata");
72 assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata");
74 assert(!is_icholder || is_icholder_entry(entry_point), "must be");
76 // Don't use ic_destination for this test since that forwards
77 // through ICBuffer instead of returning the actual current state of
78 // the CompiledIC.
79 if (is_icholder_entry(_ic_call->destination())) {
80 // When patching for the ICStub case the cached value isn't
81 // overwritten until the ICStub copied into the CompiledIC during
82 // the next safepoint. Make sure that the CompiledICHolder* is
83 // marked for release at this point since it won't be identifiable
84 // once the entry point is overwritten.
85 InlineCacheBuffer::queue_for_release((CompiledICHolder*)_value->data());
86 }
88 if (TraceCompiledIC) {
89 tty->print(" ");
90 print_compiled_ic();
91 tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
92 if (!is_optimized()) {
93 tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
94 }
95 if (is_icstub) {
96 tty->print(" (icstub)");
97 }
98 tty->cr();
99 }
101 {
102 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
103 #ifdef ASSERT
104 CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
105 assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
106 #endif
107 _ic_call->set_destination_mt_safe(entry_point);
108 }
110 if (is_optimized() || is_icstub) {
111 // Optimized call sites don't have a cache value and ICStub call
112 // sites only change the entry point. Changing the value in that
113 // case could lead to MT safety issues.
114 assert(cache == NULL, "must be null");
115 return;
116 }
118 if (cache == NULL) cache = (void*)Universe::non_oop_word();
120 _value->set_data((intptr_t)cache);
121 }
124 void CompiledIC::set_ic_destination(ICStub* stub) {
125 internal_set_ic_destination(stub->code_begin(), true, NULL, false);
126 }
130 address CompiledIC::ic_destination() const {
131 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
132 if (!is_in_transition_state()) {
133 return _ic_call->destination();
134 } else {
135 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
136 }
137 }
140 bool CompiledIC::is_in_transition_state() const {
141 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
142 return InlineCacheBuffer::contains(_ic_call->destination());
143 }
146 bool CompiledIC::is_icholder_call() const {
147 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
148 return !_is_optimized && is_icholder_entry(ic_destination());
149 }
151 // Returns native address of 'call' instruction in inline-cache. Used by
152 // the InlineCacheBuffer when it needs to find the stub.
153 address CompiledIC::stub_address() const {
154 assert(is_in_transition_state(), "should only be called when we are in a transition state");
155 return _ic_call->destination();
156 }
159 //-----------------------------------------------------------------------------
160 // High-level access to an inline cache. Guaranteed to be MT-safe.
163 bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
164 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
165 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
166 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
168 address entry;
169 if (call_info->call_kind() == CallInfo::itable_call) {
170 assert(bytecode == Bytecodes::_invokeinterface, "");
171 int itable_index = call_info->itable_index();
172 entry = VtableStubs::find_itable_stub(itable_index);
173 if (entry == false) {
174 return false;
175 }
176 #ifdef ASSERT
177 int index = call_info->resolved_method()->itable_index();
178 assert(index == itable_index, "CallInfo pre-computes this");
179 #endif //ASSERT
180 InstanceKlass* k = call_info->resolved_method()->method_holder();
181 assert(k->verify_itable_index(itable_index), "sanity check");
182 InlineCacheBuffer::create_transition_stub(this, k, entry);
183 } else {
184 assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
185 // Can be different than selected_method->vtable_index(), due to package-private etc.
186 int vtable_index = call_info->vtable_index();
187 assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
188 entry = VtableStubs::find_vtable_stub(vtable_index);
189 if (entry == NULL) {
190 return false;
191 }
192 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
193 }
195 if (TraceICs) {
196 ResourceMark rm;
197 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
198 p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
199 }
201 // We can't check this anymore. With lazy deopt we could have already
202 // cleaned this IC entry before we even return. This is possible if
203 // we ran out of space in the inline cache buffer trying to do the
204 // set_next and we safepointed to free up space. This is a benign
205 // race because the IC entry was complete when we safepointed so
206 // cleaning it immediately is harmless.
207 // assert(is_megamorphic(), "sanity check");
208 return true;
209 }
212 // true if destination is megamorphic stub
213 bool CompiledIC::is_megamorphic() const {
214 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
215 assert(!is_optimized(), "an optimized call cannot be megamorphic");
217 // Cannot rely on cached_value. It is either an interface or a method.
218 return VtableStubs::is_entry_point(ic_destination());
219 }
221 bool CompiledIC::is_call_to_compiled() const {
222 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
224 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
225 // method is guaranteed to still exist, since we only remove methods after all inline caches
226 // has been cleaned up
227 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
228 bool is_monomorphic = (cb != NULL && cb->is_nmethod());
229 // Check that the cached_value is a klass for non-optimized monomorphic calls
230 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
231 // for calling directly to vep without using the inline cache (i.e., cached_value == NULL)
232 #ifdef ASSERT
233 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
234 bool is_c1_method = caller->is_compiled_by_c1();
235 assert( is_c1_method ||
236 !is_monomorphic ||
237 is_optimized() ||
238 (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check");
239 #endif // ASSERT
240 return is_monomorphic;
241 }
244 bool CompiledIC::is_call_to_interpreted() const {
245 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
246 // Call to interpreter if destination is either calling to a stub (if it
247 // is optimized), or calling to an I2C blob
248 bool is_call_to_interpreted = false;
249 if (!is_optimized()) {
250 // must use unsafe because the destination can be a zombie (and we're cleaning)
251 // and the print_compiled_ic code wants to know if site (in the non-zombie)
252 // is to the interpreter.
253 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
254 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
255 assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check");
256 } else {
257 // Check if we are calling into our own codeblob (i.e., to a stub)
258 CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
259 address dest = ic_destination();
260 #ifdef ASSERT
261 {
262 CodeBlob* db = CodeCache::find_blob_unsafe(dest);
263 assert(!db->is_adapter_blob(), "must use stub!");
264 }
265 #endif /* ASSERT */
266 is_call_to_interpreted = cb->contains(dest);
267 }
268 return is_call_to_interpreted;
269 }
272 void CompiledIC::set_to_clean() {
273 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
274 if (TraceInlineCacheClearing || TraceICs) {
275 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
276 print();
277 }
279 address entry;
280 if (is_optimized()) {
281 entry = SharedRuntime::get_resolve_opt_virtual_call_stub();
282 } else {
283 entry = SharedRuntime::get_resolve_virtual_call_stub();
284 }
286 // A zombie transition will always be safe, since the metadata has already been set to NULL, so
287 // we only need to patch the destination
288 bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
290 if (safe_transition) {
291 // Kill any leftover stub we might have too
292 if (is_in_transition_state()) {
293 ICStub* old_stub = ICStub_from_destination_address(stub_address());
294 old_stub->clear();
295 }
296 if (is_optimized()) {
297 set_ic_destination(entry);
298 } else {
299 set_ic_destination_and_value(entry, (void*)NULL);
300 }
301 } else {
302 // Unsafe transition - create stub.
303 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
304 }
305 // We can't check this anymore. With lazy deopt we could have already
306 // cleaned this IC entry before we even return. This is possible if
307 // we ran out of space in the inline cache buffer trying to do the
308 // set_next and we safepointed to free up space. This is a benign
309 // race because the IC entry was complete when we safepointed so
310 // cleaning it immediately is harmless.
311 // assert(is_clean(), "sanity check");
312 }
315 bool CompiledIC::is_clean() const {
316 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
317 bool is_clean = false;
318 address dest = ic_destination();
319 is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() ||
320 dest == SharedRuntime::get_resolve_virtual_call_stub();
321 assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check");
322 return is_clean;
323 }
326 void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
327 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
328 // Updating a cache to the wrong entry can cause bugs that are very hard
329 // to track down - if cache entry gets invalid - we just clean it. In
330 // this way it is always the same code path that is responsible for
331 // updating and resolving an inline cache
332 //
333 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
334 // callsites. In addition ic_miss code will update a site to monomorphic if it determines
335 // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
336 //
337 // In both of these cases the only thing being modifed is the jump/call target and these
338 // transitions are mt_safe
340 Thread *thread = Thread::current();
341 if (info.to_interpreter()) {
342 // Call to interpreter
343 if (info.is_optimized() && is_optimized()) {
344 assert(is_clean(), "unsafe IC path");
345 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
346 // the call analysis (callee structure) specifies that the call is optimized
347 // (either because of CHA or the static target is final)
348 // At code generation time, this call has been emitted as static call
349 // Call via stub
350 assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check");
351 CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
352 methodHandle method (thread, (Method*)info.cached_metadata());
353 csc->set_to_interpreted(method, info.entry());
354 if (TraceICs) {
355 ResourceMark rm(thread);
356 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
357 p2i(instruction_address()),
358 method->print_value_string());
359 }
360 } else {
361 // Call via method-klass-holder
362 InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry());
363 if (TraceICs) {
364 ResourceMark rm(thread);
365 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
366 }
367 }
368 } else {
369 // Call to compiled code
370 bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
371 #ifdef ASSERT
372 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
373 assert (cb->is_nmethod(), "must be compiled!");
374 #endif /* ASSERT */
376 // This is MT safe if we come from a clean-cache and go through a
377 // non-verified entry point
378 bool safe = SafepointSynchronize::is_at_safepoint() ||
379 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
381 if (!safe) {
382 InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry());
383 } else {
384 if (is_optimized()) {
385 set_ic_destination(info.entry());
386 } else {
387 set_ic_destination_and_value(info.entry(), info.cached_metadata());
388 }
389 }
391 if (TraceICs) {
392 ResourceMark rm(thread);
393 assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be");
394 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
395 p2i(instruction_address()),
396 ((Klass*)info.cached_metadata())->print_value_string(),
397 (safe) ? "" : "via stub");
398 }
399 }
400 // We can't check this anymore. With lazy deopt we could have already
401 // cleaned this IC entry before we even return. This is possible if
402 // we ran out of space in the inline cache buffer trying to do the
403 // set_next and we safepointed to free up space. This is a benign
404 // race because the IC entry was complete when we safepointed so
405 // cleaning it immediately is harmless.
406 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
407 }
410 // is_optimized: Compiler has generated an optimized call (i.e., no inline
411 // cache) static_bound: The call can be static bound (i.e, no need to use
412 // inline cache)
413 void CompiledIC::compute_monomorphic_entry(methodHandle method,
414 KlassHandle receiver_klass,
415 bool is_optimized,
416 bool static_bound,
417 CompiledICInfo& info,
418 TRAPS) {
419 nmethod* method_code = method->code();
420 address entry = NULL;
421 if (method_code != NULL && method_code->is_in_use()) {
422 // Call to compiled code
423 if (static_bound || is_optimized) {
424 entry = method_code->verified_entry_point();
425 } else {
426 entry = method_code->entry_point();
427 }
428 }
429 if (entry != NULL) {
430 // Call to compiled code
431 info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized);
432 } else {
433 // Note: the following problem exists with Compiler1:
434 // - at compile time we may or may not know if the destination is final
435 // - if we know that the destination is final, we will emit an optimized
436 // virtual call (no inline cache), and need a Method* to make a call
437 // to the interpreter
438 // - if we do not know if the destination is final, we emit a standard
439 // virtual call, and use CompiledICHolder to call interpreted code
440 // (no static call stub has been generated)
441 // However in that case we will now notice it is static_bound
442 // and convert the call into what looks to be an optimized
443 // virtual call. This causes problems in verifying the IC because
444 // it look vanilla but is optimized. Code in is_call_to_interpreted
445 // is aware of this and weakens its asserts.
447 // static_bound should imply is_optimized -- otherwise we have a
448 // performance bug (statically-bindable method is called via
449 // dynamically-dispatched call note: the reverse implication isn't
450 // necessarily true -- the call may have been optimized based on compiler
451 // analysis (static_bound is only based on "final" etc.)
452 #ifdef COMPILER2
453 #ifdef TIERED
454 #if defined(ASSERT)
455 // can't check the assert because we don't have the CompiledIC with which to
456 // find the address if the call instruction.
457 //
458 // CodeBlob* cb = find_blob_unsafe(instruction_address());
459 // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
460 #endif // ASSERT
461 #else
462 assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
463 #endif // TIERED
464 #endif // COMPILER2
465 if (is_optimized) {
466 // Use stub entry
467 info.set_interpreter_entry(method()->get_c2i_entry(), method());
468 } else {
469 // Use icholder entry
470 CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
471 info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
472 }
473 }
474 assert(info.is_optimized() == is_optimized, "must agree");
475 }
478 bool CompiledIC::is_icholder_entry(address entry) {
479 CodeBlob* cb = CodeCache::find_blob_unsafe(entry);
480 return (cb != NULL && cb->is_adapter_blob());
481 }
483 // ----------------------------------------------------------------------------
485 void CompiledStaticCall::set_to_clean() {
486 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
487 // Reset call site
488 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
489 #ifdef ASSERT
490 CodeBlob* cb = CodeCache::find_blob_unsafe(this);
491 assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
492 #endif
493 set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
495 // Do not reset stub here: It is too expensive to call find_stub.
496 // Instead, rely on caller (nmethod::clear_inline_caches) to clear
497 // both the call and its stub.
498 }
501 bool CompiledStaticCall::is_clean() const {
502 return destination() == SharedRuntime::get_resolve_static_call_stub();
503 }
505 bool CompiledStaticCall::is_call_to_compiled() const {
506 return CodeCache::contains(destination());
507 }
510 bool CompiledStaticCall::is_call_to_interpreted() const {
511 // It is a call to interpreted, if it calls to a stub. Hence, the destination
512 // must be in the stub part of the nmethod that contains the call
513 nmethod* nm = CodeCache::find_nmethod(instruction_address());
514 return nm->stub_contains(destination());
515 }
517 void CompiledStaticCall::set(const StaticCallInfo& info) {
518 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
519 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
520 // Updating a cache to the wrong entry can cause bugs that are very hard
521 // to track down - if cache entry gets invalid - we just clean it. In
522 // this way it is always the same code path that is responsible for
523 // updating and resolving an inline cache
524 assert(is_clean(), "do not update a call entry - use clean");
526 if (info._to_interpreter) {
527 // Call to interpreted code
528 set_to_interpreted(info.callee(), info.entry());
529 } else {
530 if (TraceICs) {
531 ResourceMark rm;
532 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
533 p2i(instruction_address()),
534 p2i(info.entry()));
535 }
536 // Call to compiled code
537 assert (CodeCache::contains(info.entry()), "wrong entry point");
538 set_destination_mt_safe(info.entry());
539 }
540 }
543 // Compute settings for a CompiledStaticCall. Since we might have to set
544 // the stub when calling to the interpreter, we need to return arguments.
545 void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
546 nmethod* m_code = m->code();
547 info._callee = m;
548 if (m_code != NULL && m_code->is_in_use()) {
549 info._to_interpreter = false;
550 info._entry = m_code->verified_entry_point();
551 } else {
552 // Callee is interpreted code. In any case entering the interpreter
553 // puts a converter-frame on the stack to save arguments.
554 info._to_interpreter = true;
555 info._entry = m()->get_c2i_entry();
556 }
557 }
559 address CompiledStaticCall::find_stub() {
560 // Find reloc. information containing this call-site
561 RelocIterator iter((nmethod*)NULL, instruction_address());
562 while (iter.next()) {
563 if (iter.addr() == instruction_address()) {
564 switch(iter.type()) {
565 case relocInfo::static_call_type:
566 return iter.static_call_reloc()->static_stub();
567 // We check here for opt_virtual_call_type, since we reuse the code
568 // from the CompiledIC implementation
569 case relocInfo::opt_virtual_call_type:
570 return iter.opt_virtual_call_reloc()->static_stub();
571 case relocInfo::poll_type:
572 case relocInfo::poll_return_type: // A safepoint can't overlap a call.
573 default:
574 ShouldNotReachHere();
575 }
576 }
577 }
578 return NULL;
579 }
582 //-----------------------------------------------------------------------------
583 // Non-product mode code
584 #ifndef PRODUCT
586 void CompiledIC::verify() {
587 // make sure code pattern is actually a call imm32 instruction
588 _ic_call->verify();
589 if (os::is_MP()) {
590 _ic_call->verify_alignment();
591 }
592 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
593 || is_optimized() || is_megamorphic(), "sanity check");
594 }
596 void CompiledIC::print() {
597 print_compiled_ic();
598 tty->cr();
599 }
601 void CompiledIC::print_compiled_ic() {
602 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
603 p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value()));
604 }
606 void CompiledStaticCall::print() {
607 tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
608 if (is_clean()) {
609 tty->print("clean");
610 } else if (is_call_to_compiled()) {
611 tty->print("compiled");
612 } else if (is_call_to_interpreted()) {
613 tty->print("interpreted");
614 }
615 tty->cr();
616 }
618 #endif // !PRODUCT