duke@435: /* never@3499: * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #include "precompiled.hpp" stefank@2314: #include "classfile/systemDictionary.hpp" stefank@2314: #include "code/codeCache.hpp" stefank@2314: #include "code/compiledIC.hpp" stefank@2314: #include "code/icBuffer.hpp" stefank@2314: #include "code/nmethod.hpp" stefank@2314: #include "code/vtableStubs.hpp" stefank@2314: #include "interpreter/interpreter.hpp" stefank@2314: #include "interpreter/linkResolver.hpp" coleenp@4037: #include "memory/metadataFactory.hpp" stefank@2314: #include "memory/oopFactory.hpp" coleenp@4037: #include "oops/method.hpp" stefank@2314: #include "oops/oop.inline.hpp" coleenp@2497: #include "oops/symbol.hpp" stefank@2314: #include "runtime/icache.hpp" stefank@2314: #include "runtime/sharedRuntime.hpp" stefank@2314: #include "runtime/stubRoutines.hpp" stefank@2314: #include "utilities/events.hpp" duke@435: duke@435: duke@435: // Every time a compiled IC is changed or its type is being accessed, duke@435: // either the CompiledIC_lock must be set or we must be at a safe point. duke@435: coleenp@4037: coleenp@4037: // Release the CompiledICHolder* associated with this call site is there is one. coleenp@4037: void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { coleenp@4037: // This call site might have become stale so inspect it carefully. coleenp@4037: NativeCall* call = nativeCall_at(call_site->addr()); coleenp@4037: if (is_icholder_entry(call->destination())) { coleenp@4037: NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value()); coleenp@4037: InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data()); coleenp@4037: } coleenp@4037: } coleenp@4037: coleenp@4037: coleenp@4037: bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { coleenp@4037: // This call site might have become stale so inspect it carefully. coleenp@4037: NativeCall* call = nativeCall_at(call_site->addr()); coleenp@4037: return is_icholder_entry(call->destination()); coleenp@4037: } coleenp@4037: coleenp@4037: duke@435: //----------------------------------------------------------------------------- duke@435: // Low-level access to an inline cache. Private, since they might not be duke@435: // MT-safe to use. duke@435: coleenp@4037: void* CompiledIC::cached_value() const { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); coleenp@4037: assert (!is_optimized(), "an optimized virtual call does not have a cached metadata"); coleenp@4037: coleenp@4037: if (!is_in_transition_state()) { coleenp@4037: void* data = (void*)_value->data(); coleenp@4037: // If we let the metadata value here be initialized to zero... coleenp@4037: assert(data != NULL || Universe::non_oop_word() == NULL, coleenp@4037: "no raw nulls in CompiledIC metadatas, because of patching races"); coleenp@4037: return (data == (void*)Universe::non_oop_word()) ? NULL : data; coleenp@4037: } else { coleenp@4037: return InlineCacheBuffer::cached_value_for((CompiledIC *)this); coleenp@4037: } coleenp@4037: } coleenp@4037: coleenp@4037: coleenp@4037: void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) { coleenp@4037: assert(entry_point != NULL, "must set legal entry point"); coleenp@4037: assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); coleenp@4037: assert (!is_optimized() || cache == NULL, "an optimized virtual call does not have a cached metadata"); coleenp@4037: assert (cache == NULL || cache != (Metadata*)badOopVal, "invalid metadata"); coleenp@4037: coleenp@4037: assert(!is_icholder || is_icholder_entry(entry_point), "must be"); coleenp@4037: coleenp@4037: // Don't use ic_destination for this test since that forwards coleenp@4037: // through ICBuffer instead of returning the actual current state of coleenp@4037: // the CompiledIC. coleenp@4037: if (is_icholder_entry(_ic_call->destination())) { coleenp@4037: // When patching for the ICStub case the cached value isn't coleenp@4037: // overwritten until the ICStub copied into the CompiledIC during coleenp@4037: // the next safepoint. Make sure that the CompiledICHolder* is coleenp@4037: // marked for release at this point since it won't be identifiable coleenp@4037: // once the entry point is overwritten. coleenp@4037: InlineCacheBuffer::queue_for_release((CompiledICHolder*)_value->data()); coleenp@4037: } duke@435: duke@435: if (TraceCompiledIC) { duke@435: tty->print(" "); duke@435: print_compiled_ic(); coleenp@4037: tty->print(" changing destination to " INTPTR_FORMAT, entry_point); coleenp@4037: if (!is_optimized()) { coleenp@4037: tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", (address)cache); coleenp@4037: } coleenp@4037: if (is_icstub) { coleenp@4037: tty->print(" (icstub)"); coleenp@4037: } coleenp@4037: tty->cr(); duke@435: } duke@435: coleenp@4037: { duke@435: MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); duke@435: #ifdef ASSERT duke@435: CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); duke@435: assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); duke@435: #endif duke@435: _ic_call->set_destination_mt_safe(entry_point); duke@435: } duke@435: coleenp@4037: if (is_optimized() || is_icstub) { coleenp@4037: // Optimized call sites don't have a cache value and ICStub call coleenp@4037: // sites only change the entry point. Changing the value in that coleenp@4037: // case could lead to MT safety issues. coleenp@4037: assert(cache == NULL, "must be null"); coleenp@4037: return; coleenp@4037: } coleenp@4037: coleenp@4037: if (cache == NULL) cache = (void*)Universe::non_oop_word(); coleenp@4037: coleenp@4037: _value->set_data((intptr_t)cache); coleenp@4037: } coleenp@4037: coleenp@4037: coleenp@4037: void CompiledIC::set_ic_destination(ICStub* stub) { coleenp@4037: internal_set_ic_destination(stub->code_begin(), true, NULL, false); coleenp@4037: } coleenp@4037: coleenp@4037: duke@435: duke@435: address CompiledIC::ic_destination() const { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); duke@435: if (!is_in_transition_state()) { duke@435: return _ic_call->destination(); duke@435: } else { duke@435: return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); duke@435: } duke@435: } duke@435: duke@435: duke@435: bool CompiledIC::is_in_transition_state() const { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); duke@435: return InlineCacheBuffer::contains(_ic_call->destination()); duke@435: } duke@435: duke@435: coleenp@4037: bool CompiledIC::is_icholder_call() const { coleenp@4037: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); coleenp@4037: return !_is_optimized && is_icholder_entry(ic_destination()); coleenp@4037: } coleenp@4037: duke@435: // Returns native address of 'call' instruction in inline-cache. Used by duke@435: // the InlineCacheBuffer when it needs to find the stub. duke@435: address CompiledIC::stub_address() const { duke@435: assert(is_in_transition_state(), "should only be called when we are in a transition state"); duke@435: return _ic_call->destination(); duke@435: } duke@435: duke@435: duke@435: //----------------------------------------------------------------------------- duke@435: // High-level access to an inline cache. Guaranteed to be MT-safe. duke@435: duke@435: duke@435: void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { duke@435: methodHandle method = call_info->selected_method(); duke@435: bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index()); duke@435: assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); duke@435: assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); duke@435: assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); duke@435: duke@435: address entry; duke@435: if (is_invoke_interface) { duke@435: int index = klassItable::compute_itable_index(call_info->resolved_method()()); duke@435: entry = VtableStubs::create_stub(false, index, method()); duke@435: assert(entry != NULL, "entry not computed"); coleenp@4037: Klass* k = call_info->resolved_method()->method_holder(); duke@435: assert(Klass::cast(k)->is_interface(), "sanity check"); duke@435: InlineCacheBuffer::create_transition_stub(this, k, entry); duke@435: } else { duke@435: // Can be different than method->vtable_index(), due to package-private etc. duke@435: int vtable_index = call_info->vtable_index(); duke@435: entry = VtableStubs::create_stub(true, vtable_index, method()); duke@435: InlineCacheBuffer::create_transition_stub(this, method(), entry); duke@435: } duke@435: duke@435: if (TraceICs) { duke@435: ResourceMark rm; duke@435: tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, duke@435: instruction_address(), method->print_value_string(), entry); duke@435: } duke@435: duke@435: // We can't check this anymore. With lazy deopt we could have already duke@435: // cleaned this IC entry before we even return. This is possible if duke@435: // we ran out of space in the inline cache buffer trying to do the duke@435: // set_next and we safepointed to free up space. This is a benign duke@435: // race because the IC entry was complete when we safepointed so duke@435: // cleaning it immediately is harmless. duke@435: // assert(is_megamorphic(), "sanity check"); duke@435: } duke@435: duke@435: duke@435: // true if destination is megamorphic stub duke@435: bool CompiledIC::is_megamorphic() const { duke@435: assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); duke@435: assert(!is_optimized(), "an optimized call cannot be megamorphic"); duke@435: coleenp@4037: // Cannot rely on cached_value. It is either an interface or a method. duke@435: return VtableStubs::is_entry_point(ic_destination()); duke@435: } duke@435: duke@435: bool CompiledIC::is_call_to_compiled() const { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); duke@435: duke@435: // Use unsafe, since an inline cache might point to a zombie method. However, the zombie duke@435: // method is guaranteed to still exist, since we only remove methods after all inline caches duke@435: // has been cleaned up duke@435: CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); duke@435: bool is_monomorphic = (cb != NULL && cb->is_nmethod()); coleenp@4037: // Check that the cached_value is a klass for non-optimized monomorphic calls duke@435: // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used coleenp@4037: // for calling directly to vep without using the inline cache (i.e., cached_value == NULL) duke@435: #ifdef ASSERT duke@435: CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); duke@435: bool is_c1_method = caller->is_compiled_by_c1(); duke@435: assert( is_c1_method || duke@435: !is_monomorphic || duke@435: is_optimized() || coleenp@4037: (cached_metadata() != NULL && cached_metadata()->is_klass()), "sanity check"); duke@435: #endif // ASSERT duke@435: return is_monomorphic; duke@435: } duke@435: duke@435: duke@435: bool CompiledIC::is_call_to_interpreted() const { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); duke@435: // Call to interpreter if destination is either calling to a stub (if it duke@435: // is optimized), or calling to an I2C blob duke@435: bool is_call_to_interpreted = false; duke@435: if (!is_optimized()) { duke@435: // must use unsafe because the destination can be a zombie (and we're cleaning) duke@435: // and the print_compiled_ic code wants to know if site (in the non-zombie) duke@435: // is to the interpreter. duke@435: CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); duke@435: is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); coleenp@4037: assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check"); duke@435: } else { duke@435: // Check if we are calling into our own codeblob (i.e., to a stub) duke@435: CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address()); duke@435: address dest = ic_destination(); duke@435: #ifdef ASSERT duke@435: { duke@435: CodeBlob* db = CodeCache::find_blob_unsafe(dest); duke@435: assert(!db->is_adapter_blob(), "must use stub!"); duke@435: } duke@435: #endif /* ASSERT */ duke@435: is_call_to_interpreted = cb->contains(dest); duke@435: } duke@435: return is_call_to_interpreted; duke@435: } duke@435: duke@435: duke@435: void CompiledIC::set_to_clean() { duke@435: assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); duke@435: if (TraceInlineCacheClearing || TraceICs) { duke@435: tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address()); duke@435: print(); duke@435: } duke@435: duke@435: address entry; duke@435: if (is_optimized()) { duke@435: entry = SharedRuntime::get_resolve_opt_virtual_call_stub(); duke@435: } else { duke@435: entry = SharedRuntime::get_resolve_virtual_call_stub(); duke@435: } duke@435: coleenp@4037: // A zombie transition will always be safe, since the metadata has already been set to NULL, so duke@435: // we only need to patch the destination duke@435: bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint(); duke@435: duke@435: if (safe_transition) { duke@435: // Kill any leftover stub we might have too duke@435: if (is_in_transition_state()) { duke@435: ICStub* old_stub = ICStub_from_destination_address(stub_address()); duke@435: old_stub->clear(); duke@435: } coleenp@4037: if (is_optimized()) { duke@435: set_ic_destination(entry); duke@435: } else { coleenp@4037: set_ic_destination_and_value(entry, (void*)NULL); coleenp@4037: } coleenp@4037: } else { duke@435: // Unsafe transition - create stub. duke@435: InlineCacheBuffer::create_transition_stub(this, NULL, entry); duke@435: } duke@435: // We can't check this anymore. With lazy deopt we could have already duke@435: // cleaned this IC entry before we even return. This is possible if duke@435: // we ran out of space in the inline cache buffer trying to do the duke@435: // set_next and we safepointed to free up space. This is a benign duke@435: // race because the IC entry was complete when we safepointed so duke@435: // cleaning it immediately is harmless. duke@435: // assert(is_clean(), "sanity check"); duke@435: } duke@435: duke@435: duke@435: bool CompiledIC::is_clean() const { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); duke@435: bool is_clean = false; duke@435: address dest = ic_destination(); duke@435: is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() || duke@435: dest == SharedRuntime::get_resolve_virtual_call_stub(); coleenp@4037: assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check"); duke@435: return is_clean; duke@435: } duke@435: duke@435: coleenp@4037: void CompiledIC::set_to_monomorphic(CompiledICInfo& info) { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); duke@435: // Updating a cache to the wrong entry can cause bugs that are very hard duke@435: // to track down - if cache entry gets invalid - we just clean it. In duke@435: // this way it is always the same code path that is responsible for duke@435: // updating and resolving an inline cache duke@435: // duke@435: // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized duke@435: // callsites. In addition ic_miss code will update a site to monomorphic if it determines duke@435: // that an monomorphic call to the interpreter can now be monomorphic to compiled code. duke@435: // duke@435: // In both of these cases the only thing being modifed is the jump/call target and these duke@435: // transitions are mt_safe duke@435: duke@435: Thread *thread = Thread::current(); coleenp@4037: if (info.to_interpreter()) { duke@435: // Call to interpreter duke@435: if (info.is_optimized() && is_optimized()) { duke@435: assert(is_clean(), "unsafe IC path"); duke@435: MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); duke@435: // the call analysis (callee structure) specifies that the call is optimized duke@435: // (either because of CHA or the static target is final) duke@435: // At code generation time, this call has been emitted as static call duke@435: // Call via stub coleenp@4037: assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); duke@435: CompiledStaticCall* csc = compiledStaticCall_at(instruction_address()); coleenp@4037: methodHandle method (thread, (Method*)info.cached_metadata()); duke@435: csc->set_to_interpreted(method, info.entry()); duke@435: if (TraceICs) { duke@435: ResourceMark rm(thread); duke@435: tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", duke@435: instruction_address(), duke@435: method->print_value_string()); duke@435: } duke@435: } else { duke@435: // Call via method-klass-holder coleenp@4037: InlineCacheBuffer::create_transition_stub(this, info.claim_cached_icholder(), info.entry()); duke@435: if (TraceICs) { duke@435: ResourceMark rm(thread); coleenp@4037: tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", instruction_address()); duke@435: } duke@435: } duke@435: } else { duke@435: // Call to compiled code coleenp@4037: bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL); duke@435: #ifdef ASSERT duke@435: CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); duke@435: assert (cb->is_nmethod(), "must be compiled!"); duke@435: #endif /* ASSERT */ duke@435: duke@435: // This is MT safe if we come from a clean-cache and go through a duke@435: // non-verified entry point duke@435: bool safe = SafepointSynchronize::is_at_safepoint() || duke@435: (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); duke@435: duke@435: if (!safe) { coleenp@4037: InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry()); duke@435: } else { coleenp@4037: if (is_optimized()) { duke@435: set_ic_destination(info.entry()); coleenp@4037: } else { coleenp@4037: set_ic_destination_and_value(info.entry(), info.cached_metadata()); coleenp@4037: } duke@435: } duke@435: duke@435: if (TraceICs) { duke@435: ResourceMark rm(thread); coleenp@4037: assert(info.cached_metadata() == NULL || info.cached_metadata()->is_klass(), "must be"); duke@435: tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s", duke@435: instruction_address(), coleenp@4037: ((Klass*)info.cached_metadata())->print_value_string(), duke@435: (safe) ? "" : "via stub"); duke@435: } duke@435: } duke@435: // We can't check this anymore. With lazy deopt we could have already duke@435: // cleaned this IC entry before we even return. This is possible if duke@435: // we ran out of space in the inline cache buffer trying to do the duke@435: // set_next and we safepointed to free up space. This is a benign duke@435: // race because the IC entry was complete when we safepointed so duke@435: // cleaning it immediately is harmless. duke@435: // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); duke@435: } duke@435: duke@435: duke@435: // is_optimized: Compiler has generated an optimized call (i.e., no inline duke@435: // cache) static_bound: The call can be static bound (i.e, no need to use duke@435: // inline cache) duke@435: void CompiledIC::compute_monomorphic_entry(methodHandle method, duke@435: KlassHandle receiver_klass, duke@435: bool is_optimized, duke@435: bool static_bound, duke@435: CompiledICInfo& info, duke@435: TRAPS) { duke@435: nmethod* method_code = method->code(); duke@435: address entry = NULL; duke@435: if (method_code != NULL) { duke@435: // Call to compiled code duke@435: if (static_bound || is_optimized) { duke@435: entry = method_code->verified_entry_point(); duke@435: } else { duke@435: entry = method_code->entry_point(); duke@435: } duke@435: } duke@435: if (entry != NULL) { duke@435: // Call to compiled code coleenp@4037: info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized); duke@435: } else { duke@435: // Note: the following problem exists with Compiler1: duke@435: // - at compile time we may or may not know if the destination is final duke@435: // - if we know that the destination is final, we will emit an optimized coleenp@4037: // virtual call (no inline cache), and need a Method* to make a call duke@435: // to the interpreter duke@435: // - if we do not know if the destination is final, we emit a standard duke@435: // virtual call, and use CompiledICHolder to call interpreted code duke@435: // (no static call stub has been generated) duke@435: // However in that case we will now notice it is static_bound duke@435: // and convert the call into what looks to be an optimized duke@435: // virtual call. This causes problems in verifying the IC because duke@435: // it look vanilla but is optimized. Code in is_call_to_interpreted duke@435: // is aware of this and weakens its asserts. duke@435: duke@435: // static_bound should imply is_optimized -- otherwise we have a duke@435: // performance bug (statically-bindable method is called via duke@435: // dynamically-dispatched call note: the reverse implication isn't duke@435: // necessarily true -- the call may have been optimized based on compiler duke@435: // analysis (static_bound is only based on "final" etc.) duke@435: #ifdef COMPILER2 duke@435: #ifdef TIERED duke@435: #if defined(ASSERT) duke@435: // can't check the assert because we don't have the CompiledIC with which to duke@435: // find the address if the call instruction. duke@435: // duke@435: // CodeBlob* cb = find_blob_unsafe(instruction_address()); duke@435: // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized"); duke@435: #endif // ASSERT duke@435: #else duke@435: assert(!static_bound || is_optimized, "static_bound should imply is_optimized"); duke@435: #endif // TIERED duke@435: #endif // COMPILER2 duke@435: if (is_optimized) { duke@435: // Use stub entry coleenp@4037: info.set_interpreter_entry(method()->get_c2i_entry(), method()); duke@435: } else { coleenp@4037: // Use icholder entry coleenp@4037: CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass()); coleenp@4037: info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder); duke@435: } duke@435: } coleenp@4037: assert(info.is_optimized() == is_optimized, "must agree"); duke@435: } duke@435: duke@435: coleenp@4037: bool CompiledIC::is_icholder_entry(address entry) { coleenp@4037: CodeBlob* cb = CodeCache::find_blob_unsafe(entry); coleenp@4037: return (cb != NULL && cb->is_adapter_blob()); duke@435: } duke@435: coleenp@4037: coleenp@4037: CompiledIC::CompiledIC(nmethod* nm, NativeCall* call) coleenp@4037: : _ic_call(call) duke@435: { coleenp@4037: address ic_call = call->instruction_address(); coleenp@4037: coleenp@4037: assert(ic_call != NULL, "ic_call address must be set"); coleenp@4037: assert(nm != NULL, "must pass nmethod"); coleenp@4037: assert(nm->contains(ic_call), "must be in nmethod"); coleenp@4037: coleenp@4037: // search for the ic_call at the given address coleenp@4037: RelocIterator iter(nm, ic_call, ic_call+1); coleenp@4037: bool ret = iter.next(); coleenp@4037: assert(ret == true, "relocInfo must exist at this address"); coleenp@4037: assert(iter.addr() == ic_call, "must find ic_call"); coleenp@4037: if (iter.type() == relocInfo::virtual_call_type) { coleenp@4037: virtual_call_Relocation* r = iter.virtual_call_reloc(); coleenp@4037: _is_optimized = false; coleenp@4037: _value = nativeMovConstReg_at(r->cached_value()); coleenp@4037: } else { coleenp@4037: assert(iter.type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); coleenp@4037: _is_optimized = true; coleenp@4037: _value = NULL; duke@435: } duke@435: } duke@435: duke@435: duke@435: // ---------------------------------------------------------------------------- duke@435: duke@435: void CompiledStaticCall::set_to_clean() { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); duke@435: // Reset call site duke@435: MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); duke@435: #ifdef ASSERT duke@435: CodeBlob* cb = CodeCache::find_blob_unsafe(this); duke@435: assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); duke@435: #endif duke@435: set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub()); duke@435: duke@435: // Do not reset stub here: It is too expensive to call find_stub. duke@435: // Instead, rely on caller (nmethod::clear_inline_caches) to clear duke@435: // both the call and its stub. duke@435: } duke@435: duke@435: duke@435: bool CompiledStaticCall::is_clean() const { duke@435: return destination() == SharedRuntime::get_resolve_static_call_stub(); duke@435: } duke@435: duke@435: bool CompiledStaticCall::is_call_to_compiled() const { duke@435: return CodeCache::contains(destination()); duke@435: } duke@435: duke@435: duke@435: bool CompiledStaticCall::is_call_to_interpreted() const { duke@435: // It is a call to interpreted, if it calls to a stub. Hence, the destination duke@435: // must be in the stub part of the nmethod that contains the call duke@435: nmethod* nm = CodeCache::find_nmethod(instruction_address()); duke@435: return nm->stub_contains(destination()); duke@435: } duke@435: duke@435: duke@435: void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { duke@435: address stub=find_stub(); duke@435: assert(stub!=NULL, "stub not found"); duke@435: duke@435: if (TraceICs) { duke@435: ResourceMark rm; duke@435: tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", duke@435: instruction_address(), duke@435: callee->name_and_sig_as_C_string()); duke@435: } duke@435: duke@435: NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object duke@435: NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); duke@435: duke@435: assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache"); duke@435: assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache"); duke@435: duke@435: // Update stub duke@435: method_holder->set_data((intptr_t)callee()); duke@435: jump->set_jump_destination(entry); duke@435: duke@435: // Update jump to call duke@435: set_destination_mt_safe(stub); duke@435: } duke@435: duke@435: duke@435: void CompiledStaticCall::set(const StaticCallInfo& info) { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); duke@435: MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); duke@435: // Updating a cache to the wrong entry can cause bugs that are very hard duke@435: // to track down - if cache entry gets invalid - we just clean it. In duke@435: // this way it is always the same code path that is responsible for duke@435: // updating and resolving an inline cache duke@435: assert(is_clean(), "do not update a call entry - use clean"); duke@435: duke@435: if (info._to_interpreter) { duke@435: // Call to interpreted code duke@435: set_to_interpreted(info.callee(), info.entry()); duke@435: } else { duke@435: if (TraceICs) { duke@435: ResourceMark rm; duke@435: tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, duke@435: instruction_address(), duke@435: info.entry()); duke@435: } duke@435: // Call to compiled code duke@435: assert (CodeCache::contains(info.entry()), "wrong entry point"); duke@435: set_destination_mt_safe(info.entry()); duke@435: } duke@435: } duke@435: duke@435: duke@435: // Compute settings for a CompiledStaticCall. Since we might have to set duke@435: // the stub when calling to the interpreter, we need to return arguments. duke@435: void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) { duke@435: nmethod* m_code = m->code(); duke@435: info._callee = m; duke@435: if (m_code != NULL) { duke@435: info._to_interpreter = false; duke@435: info._entry = m_code->verified_entry_point(); duke@435: } else { duke@435: // Callee is interpreted code. In any case entering the interpreter duke@435: // puts a converter-frame on the stack to save arguments. duke@435: info._to_interpreter = true; duke@435: info._entry = m()->get_c2i_entry(); duke@435: } duke@435: } duke@435: duke@435: duke@435: void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { duke@435: assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); duke@435: // Reset stub duke@435: address stub = static_stub->addr(); duke@435: assert(stub!=NULL, "stub not found"); duke@435: NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object duke@435: NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); duke@435: method_holder->set_data(0); duke@435: jump->set_jump_destination((address)-1); duke@435: } duke@435: duke@435: duke@435: address CompiledStaticCall::find_stub() { duke@435: // Find reloc. information containing this call-site duke@435: RelocIterator iter((nmethod*)NULL, instruction_address()); duke@435: while (iter.next()) { duke@435: if (iter.addr() == instruction_address()) { duke@435: switch(iter.type()) { duke@435: case relocInfo::static_call_type: duke@435: return iter.static_call_reloc()->static_stub(); duke@435: // We check here for opt_virtual_call_type, since we reuse the code duke@435: // from the CompiledIC implementation duke@435: case relocInfo::opt_virtual_call_type: duke@435: return iter.opt_virtual_call_reloc()->static_stub(); duke@435: case relocInfo::poll_type: duke@435: case relocInfo::poll_return_type: // A safepoint can't overlap a call. duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: } duke@435: return NULL; duke@435: } duke@435: duke@435: duke@435: //----------------------------------------------------------------------------- duke@435: // Non-product mode code duke@435: #ifndef PRODUCT duke@435: duke@435: void CompiledIC::verify() { duke@435: // make sure code pattern is actually a call imm32 instruction duke@435: _ic_call->verify(); duke@435: if (os::is_MP()) { duke@435: _ic_call->verify_alignment(); duke@435: } duke@435: assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() duke@435: || is_optimized() || is_megamorphic(), "sanity check"); duke@435: } duke@435: duke@435: duke@435: void CompiledIC::print() { duke@435: print_compiled_ic(); duke@435: tty->cr(); duke@435: } duke@435: duke@435: duke@435: void CompiledIC::print_compiled_ic() { coleenp@4037: tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT, coleenp@4037: instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination(), is_optimized() ? NULL : cached_value()); duke@435: } duke@435: duke@435: duke@435: void CompiledStaticCall::print() { duke@435: tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address()); duke@435: if (is_clean()) { duke@435: tty->print("clean"); duke@435: } else if (is_call_to_compiled()) { duke@435: tty->print("compiled"); duke@435: } else if (is_call_to_interpreted()) { duke@435: tty->print("interpreted"); duke@435: } duke@435: tty->cr(); duke@435: } duke@435: duke@435: void CompiledStaticCall::verify() { duke@435: // Verify call duke@435: NativeCall::verify(); duke@435: if (os::is_MP()) { duke@435: verify_alignment(); duke@435: } duke@435: duke@435: // Verify stub duke@435: address stub = find_stub(); duke@435: assert(stub != NULL, "no stub found for static call"); duke@435: NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object duke@435: NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); duke@435: duke@435: // Verify state duke@435: assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); duke@435: } duke@435: duke@435: #endif