src/share/vm/code/compiledIC.cpp

Wed, 01 Feb 2012 07:59:01 -0800

author
never
date
Wed, 01 Feb 2012 07:59:01 -0800
changeset 3499
aa3d708d67c4
parent 2708
1d1603768966
child 4037
da91efe96a93
permissions
-rw-r--r--

7141200: log some interesting information in ring buffers for crashes
Reviewed-by: kvn, jrose, kevinw, brutisso, twisti, jmasa

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "code/codeCache.hpp"
    28 #include "code/compiledIC.hpp"
    29 #include "code/icBuffer.hpp"
    30 #include "code/nmethod.hpp"
    31 #include "code/vtableStubs.hpp"
    32 #include "interpreter/interpreter.hpp"
    33 #include "interpreter/linkResolver.hpp"
    34 #include "memory/oopFactory.hpp"
    35 #include "oops/methodOop.hpp"
    36 #include "oops/oop.inline.hpp"
    37 #include "oops/symbol.hpp"
    38 #include "runtime/icache.hpp"
    39 #include "runtime/sharedRuntime.hpp"
    40 #include "runtime/stubRoutines.hpp"
    41 #include "utilities/events.hpp"
    44 // Every time a compiled IC is changed or its type is being accessed,
    45 // either the CompiledIC_lock must be set or we must be at a safe point.
    47 //-----------------------------------------------------------------------------
    48 // Low-level access to an inline cache. Private, since they might not be
    49 // MT-safe to use.
    51 void CompiledIC::set_cached_oop(oop cache) {
    52   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
    53   assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
    54   assert (cache == NULL || cache != badOop, "invalid oop");
    56   if (TraceCompiledIC) {
    57     tty->print("  ");
    58     print_compiled_ic();
    59     tty->print_cr(" changing oop to " INTPTR_FORMAT, (address)cache);
    60   }
    62   if (cache == NULL)  cache = (oop)Universe::non_oop_word();
    64   *_oop_addr = cache;
    65   // fix up the relocations
    66   RelocIterator iter = _oops;
    67   while (iter.next()) {
    68     if (iter.type() == relocInfo::oop_type) {
    69       oop_Relocation* r = iter.oop_reloc();
    70       if (r->oop_addr() == _oop_addr)
    71         r->fix_oop_relocation();
    72     }
    73   }
    74   return;
    75 }
    78 oop CompiledIC::cached_oop() const {
    79   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
    80   assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
    82   if (!is_in_transition_state()) {
    83     oop data = *_oop_addr;
    84     // If we let the oop value here be initialized to zero...
    85     assert(data != NULL || Universe::non_oop_word() == NULL,
    86            "no raw nulls in CompiledIC oops, because of patching races");
    87     return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data;
    88   } else {
    89     return InlineCacheBuffer::cached_oop_for((CompiledIC *)this);
    90   }
    91 }
    94 void CompiledIC::set_ic_destination(address entry_point) {
    95   assert(entry_point != NULL, "must set legal entry point");
    96   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
    97   if (TraceCompiledIC) {
    98     tty->print("  ");
    99     print_compiled_ic();
   100     tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point);
   101   }
   102   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
   103 #ifdef ASSERT
   104   CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
   105   assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
   106 #endif
   107   _ic_call->set_destination_mt_safe(entry_point);
   108 }
   111 address CompiledIC::ic_destination() const {
   112  assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   113  if (!is_in_transition_state()) {
   114    return _ic_call->destination();
   115  } else {
   116    return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
   117  }
   118 }
   121 bool CompiledIC::is_in_transition_state() const {
   122   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   123   return InlineCacheBuffer::contains(_ic_call->destination());
   124 }
   127 // Returns native address of 'call' instruction in inline-cache. Used by
   128 // the InlineCacheBuffer when it needs to find the stub.
   129 address CompiledIC::stub_address() const {
   130   assert(is_in_transition_state(), "should only be called when we are in a transition state");
   131   return _ic_call->destination();
   132 }
   135 //-----------------------------------------------------------------------------
   136 // High-level access to an inline cache. Guaranteed to be MT-safe.
   139 void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
   140   methodHandle method = call_info->selected_method();
   141   bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
   142   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   143   assert(method->is_oop(), "cannot be NULL and must be oop");
   144   assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   145   assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
   147   address entry;
   148   if (is_invoke_interface) {
   149     int index = klassItable::compute_itable_index(call_info->resolved_method()());
   150     entry = VtableStubs::create_stub(false, index, method());
   151     assert(entry != NULL, "entry not computed");
   152     klassOop k = call_info->resolved_method()->method_holder();
   153     assert(Klass::cast(k)->is_interface(), "sanity check");
   154     InlineCacheBuffer::create_transition_stub(this, k, entry);
   155   } else {
   156     // Can be different than method->vtable_index(), due to package-private etc.
   157     int vtable_index = call_info->vtable_index();
   158     entry = VtableStubs::create_stub(true, vtable_index, method());
   159     InlineCacheBuffer::create_transition_stub(this, method(), entry);
   160   }
   162   if (TraceICs) {
   163     ResourceMark rm;
   164     tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
   165                    instruction_address(), method->print_value_string(), entry);
   166   }
   168   // We can't check this anymore. With lazy deopt we could have already
   169   // cleaned this IC entry before we even return. This is possible if
   170   // we ran out of space in the inline cache buffer trying to do the
   171   // set_next and we safepointed to free up space. This is a benign
   172   // race because the IC entry was complete when we safepointed so
   173   // cleaning it immediately is harmless.
   174   // assert(is_megamorphic(), "sanity check");
   175 }
   178 // true if destination is megamorphic stub
   179 bool CompiledIC::is_megamorphic() const {
   180   assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   181   assert(!is_optimized(), "an optimized call cannot be megamorphic");
   183   // Cannot rely on cached_oop. It is either an interface or a method.
   184   return VtableStubs::is_entry_point(ic_destination());
   185 }
   187 bool CompiledIC::is_call_to_compiled() const {
   188   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   190   // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
   191   // method is guaranteed to still exist, since we only remove methods after all inline caches
   192   // has been cleaned up
   193   CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
   194   bool is_monomorphic = (cb != NULL && cb->is_nmethod());
   195   // Check that the cached_oop is a klass for non-optimized monomorphic calls
   196   // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
   197   // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
   198 #ifdef ASSERT
   199 #ifdef TIERED
   200   CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
   201   bool is_c1_method = caller->is_compiled_by_c1();
   202 #else
   203 #ifdef COMPILER1
   204   bool is_c1_method = true;
   205 #else
   206   bool is_c1_method = false;
   207 #endif // COMPILER1
   208 #endif // TIERED
   209   assert( is_c1_method ||
   210          !is_monomorphic ||
   211          is_optimized() ||
   212          (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
   213 #endif // ASSERT
   214   return is_monomorphic;
   215 }
   218 bool CompiledIC::is_call_to_interpreted() const {
   219   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   220   // Call to interpreter if destination is either calling to a stub (if it
   221   // is optimized), or calling to an I2C blob
   222   bool is_call_to_interpreted = false;
   223   if (!is_optimized()) {
   224     // must use unsafe because the destination can be a zombie (and we're cleaning)
   225     // and the print_compiled_ic code wants to know if site (in the non-zombie)
   226     // is to the interpreter.
   227     CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
   228     is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
   229     assert(!is_call_to_interpreted ||  (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
   230   } else {
   231     // Check if we are calling into our own codeblob (i.e., to a stub)
   232     CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
   233     address dest = ic_destination();
   234 #ifdef ASSERT
   235     {
   236       CodeBlob* db = CodeCache::find_blob_unsafe(dest);
   237       assert(!db->is_adapter_blob(), "must use stub!");
   238     }
   239 #endif /* ASSERT */
   240     is_call_to_interpreted = cb->contains(dest);
   241   }
   242   return is_call_to_interpreted;
   243 }
   246 void CompiledIC::set_to_clean() {
   247   assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
   248   if (TraceInlineCacheClearing || TraceICs) {
   249     tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address());
   250     print();
   251   }
   253   address entry;
   254   if (is_optimized()) {
   255     entry = SharedRuntime::get_resolve_opt_virtual_call_stub();
   256   } else {
   257     entry = SharedRuntime::get_resolve_virtual_call_stub();
   258   }
   260   // A zombie transition will always be safe, since the oop has already been set to NULL, so
   261   // we only need to patch the destination
   262   bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
   264   if (safe_transition) {
   265     if (!is_optimized()) set_cached_oop(NULL);
   266     // Kill any leftover stub we might have too
   267     if (is_in_transition_state()) {
   268       ICStub* old_stub = ICStub_from_destination_address(stub_address());
   269       old_stub->clear();
   270     }
   271     set_ic_destination(entry);
   272   } else {
   273     // Unsafe transition - create stub.
   274     InlineCacheBuffer::create_transition_stub(this, NULL, entry);
   275   }
   276   // We can't check this anymore. With lazy deopt we could have already
   277   // cleaned this IC entry before we even return. This is possible if
   278   // we ran out of space in the inline cache buffer trying to do the
   279   // set_next and we safepointed to free up space. This is a benign
   280   // race because the IC entry was complete when we safepointed so
   281   // cleaning it immediately is harmless.
   282   // assert(is_clean(), "sanity check");
   283 }
   286 bool CompiledIC::is_clean() const {
   287   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   288   bool is_clean = false;
   289   address dest = ic_destination();
   290   is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() ||
   291              dest == SharedRuntime::get_resolve_virtual_call_stub();
   292   assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check");
   293   return is_clean;
   294 }
   297 void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
   298   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   299   // Updating a cache to the wrong entry can cause bugs that are very hard
   300   // to track down - if cache entry gets invalid - we just clean it. In
   301   // this way it is always the same code path that is responsible for
   302   // updating and resolving an inline cache
   303   //
   304   // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
   305   // callsites. In addition ic_miss code will update a site to monomorphic if it determines
   306   // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
   307   //
   308   // In both of these cases the only thing being modifed is the jump/call target and these
   309   // transitions are mt_safe
   311   Thread *thread = Thread::current();
   312   if (info._to_interpreter) {
   313     // Call to interpreter
   314     if (info.is_optimized() && is_optimized()) {
   315        assert(is_clean(), "unsafe IC path");
   316        MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
   317       // the call analysis (callee structure) specifies that the call is optimized
   318       // (either because of CHA or the static target is final)
   319       // At code generation time, this call has been emitted as static call
   320       // Call via stub
   321       assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check");
   322       CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
   323       methodHandle method (thread, (methodOop)info.cached_oop()());
   324       csc->set_to_interpreted(method, info.entry());
   325       if (TraceICs) {
   326          ResourceMark rm(thread);
   327          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
   328            instruction_address(),
   329            method->print_value_string());
   330       }
   331     } else {
   332       // Call via method-klass-holder
   333       assert(info.cached_oop().not_null(), "must be set");
   334       InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
   336       if (TraceICs) {
   337          ResourceMark rm(thread);
   338          tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via mkh", instruction_address());
   339       }
   340     }
   341   } else {
   342     // Call to compiled code
   343     bool static_bound = info.is_optimized() || (info.cached_oop().is_null());
   344 #ifdef ASSERT
   345     CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
   346     assert (cb->is_nmethod(), "must be compiled!");
   347 #endif /* ASSERT */
   349     // This is MT safe if we come from a clean-cache and go through a
   350     // non-verified entry point
   351     bool safe = SafepointSynchronize::is_at_safepoint() ||
   352                 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
   354     if (!safe) {
   355       InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
   356     } else {
   357       set_ic_destination(info.entry());
   358       if (!is_optimized()) set_cached_oop(info.cached_oop()());
   359     }
   361     if (TraceICs) {
   362       ResourceMark rm(thread);
   363       assert(info.cached_oop() == NULL || info.cached_oop()()->is_klass(), "must be");
   364       tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
   365         instruction_address(),
   366         ((klassOop)info.cached_oop()())->print_value_string(),
   367         (safe) ? "" : "via stub");
   368     }
   369   }
   370   // We can't check this anymore. With lazy deopt we could have already
   371   // cleaned this IC entry before we even return. This is possible if
   372   // we ran out of space in the inline cache buffer trying to do the
   373   // set_next and we safepointed to free up space. This is a benign
   374   // race because the IC entry was complete when we safepointed so
   375   // cleaning it immediately is harmless.
   376   // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
   377 }
   380 // is_optimized: Compiler has generated an optimized call (i.e., no inline
   381 // cache) static_bound: The call can be static bound (i.e, no need to use
   382 // inline cache)
   383 void CompiledIC::compute_monomorphic_entry(methodHandle method,
   384                                            KlassHandle receiver_klass,
   385                                            bool is_optimized,
   386                                            bool static_bound,
   387                                            CompiledICInfo& info,
   388                                            TRAPS) {
   389   info._is_optimized = is_optimized;
   391   nmethod* method_code = method->code();
   392   address entry = NULL;
   393   if (method_code != NULL) {
   394     // Call to compiled code
   395     if (static_bound || is_optimized) {
   396       entry      = method_code->verified_entry_point();
   397     } else {
   398       entry      = method_code->entry_point();
   399     }
   400   }
   401   if (entry != NULL) {
   402     // Call to compiled code
   403     info._entry      = entry;
   404     if (static_bound || is_optimized) {
   405       info._cached_oop = Handle(THREAD, (oop)NULL);
   406     } else {
   407       info._cached_oop = receiver_klass;
   408     }
   409     info._to_interpreter = false;
   410   } else {
   411     // Note: the following problem exists with Compiler1:
   412     //   - at compile time we may or may not know if the destination is final
   413     //   - if we know that the destination is final, we will emit an optimized
   414     //     virtual call (no inline cache), and need a methodOop to make a call
   415     //     to the interpreter
   416     //   - if we do not know if the destination is final, we emit a standard
   417     //     virtual call, and use CompiledICHolder to call interpreted code
   418     //     (no static call stub has been generated)
   419     //     However in that case we will now notice it is static_bound
   420     //     and convert the call into what looks to be an optimized
   421     //     virtual call. This causes problems in verifying the IC because
   422     //     it look vanilla but is optimized. Code in is_call_to_interpreted
   423     //     is aware of this and weakens its asserts.
   425     info._to_interpreter = true;
   426     // static_bound should imply is_optimized -- otherwise we have a
   427     // performance bug (statically-bindable method is called via
   428     // dynamically-dispatched call note: the reverse implication isn't
   429     // necessarily true -- the call may have been optimized based on compiler
   430     // analysis (static_bound is only based on "final" etc.)
   431 #ifdef COMPILER2
   432 #ifdef TIERED
   433 #if defined(ASSERT)
   434     // can't check the assert because we don't have the CompiledIC with which to
   435     // find the address if the call instruction.
   436     //
   437     // CodeBlob* cb = find_blob_unsafe(instruction_address());
   438     // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
   439 #endif // ASSERT
   440 #else
   441     assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
   442 #endif // TIERED
   443 #endif // COMPILER2
   444     if (is_optimized) {
   445       // Use stub entry
   446       info._entry      = method()->get_c2i_entry();
   447       info._cached_oop = method;
   448     } else {
   449       // Use mkh entry
   450       oop holder = oopFactory::new_compiledICHolder(method, receiver_klass, CHECK);
   451       info._cached_oop = Handle(THREAD, holder);
   452       info._entry      = method()->get_c2i_unverified_entry();
   453     }
   454   }
   455 }
   458 inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
   459    address  first_oop = NULL;
   460    // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
   461    nmethod* tmp_nm = nm;
   462    return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
   463 }
   465 CompiledIC::CompiledIC(NativeCall* ic_call)
   466   : _ic_call(ic_call),
   467     _oops(parse_ic(NULL, ic_call->instruction_address(), _oop_addr, &_is_optimized))
   468 {
   469 }
   472 CompiledIC::CompiledIC(Relocation* ic_reloc)
   473   : _ic_call(nativeCall_at(ic_reloc->addr())),
   474     _oops(parse_ic(ic_reloc->code(), ic_reloc->addr(), _oop_addr, &_is_optimized))
   475 {
   476   assert(ic_reloc->type() == relocInfo::virtual_call_type ||
   477          ic_reloc->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
   478 }
   481 // ----------------------------------------------------------------------------
   483 void CompiledStaticCall::set_to_clean() {
   484   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
   485   // Reset call site
   486   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
   487 #ifdef ASSERT
   488   CodeBlob* cb = CodeCache::find_blob_unsafe(this);
   489   assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
   490 #endif
   491   set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
   493   // Do not reset stub here:  It is too expensive to call find_stub.
   494   // Instead, rely on caller (nmethod::clear_inline_caches) to clear
   495   // both the call and its stub.
   496 }
   499 bool CompiledStaticCall::is_clean() const {
   500   return destination() == SharedRuntime::get_resolve_static_call_stub();
   501 }
   503 bool CompiledStaticCall::is_call_to_compiled() const {
   504   return CodeCache::contains(destination());
   505 }
   508 bool CompiledStaticCall::is_call_to_interpreted() const {
   509   // It is a call to interpreted, if it calls to a stub. Hence, the destination
   510   // must be in the stub part of the nmethod that contains the call
   511   nmethod* nm = CodeCache::find_nmethod(instruction_address());
   512   return nm->stub_contains(destination());
   513 }
   516 void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
   517   address stub=find_stub();
   518   assert(stub!=NULL, "stub not found");
   520   if (TraceICs) {
   521     ResourceMark rm;
   522     tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
   523                   instruction_address(),
   524                   callee->name_and_sig_as_C_string());
   525   }
   527   NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
   528   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
   530   assert(method_holder->data()    == 0           || method_holder->data()    == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
   531   assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
   533   // Update stub
   534   method_holder->set_data((intptr_t)callee());
   535   jump->set_jump_destination(entry);
   537   // Update jump to call
   538   set_destination_mt_safe(stub);
   539 }
   542 void CompiledStaticCall::set(const StaticCallInfo& info) {
   543   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
   544   MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
   545   // Updating a cache to the wrong entry can cause bugs that are very hard
   546   // to track down - if cache entry gets invalid - we just clean it. In
   547   // this way it is always the same code path that is responsible for
   548   // updating and resolving an inline cache
   549   assert(is_clean(), "do not update a call entry - use clean");
   551   if (info._to_interpreter) {
   552     // Call to interpreted code
   553     set_to_interpreted(info.callee(), info.entry());
   554   } else {
   555     if (TraceICs) {
   556       ResourceMark rm;
   557       tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
   558                     instruction_address(),
   559                     info.entry());
   560     }
   561     // Call to compiled code
   562     assert (CodeCache::contains(info.entry()), "wrong entry point");
   563     set_destination_mt_safe(info.entry());
   564   }
   565 }
   568 // Compute settings for a CompiledStaticCall. Since we might have to set
   569 // the stub when calling to the interpreter, we need to return arguments.
   570 void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
   571   nmethod* m_code = m->code();
   572   info._callee = m;
   573   if (m_code != NULL) {
   574     info._to_interpreter = false;
   575     info._entry  = m_code->verified_entry_point();
   576   } else {
   577     // Callee is interpreted code.  In any case entering the interpreter
   578     // puts a converter-frame on the stack to save arguments.
   579     info._to_interpreter = true;
   580     info._entry      = m()->get_c2i_entry();
   581   }
   582 }
   585 void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
   586   assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
   587   // Reset stub
   588   address stub = static_stub->addr();
   589   assert(stub!=NULL, "stub not found");
   590   NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
   591   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
   592   method_holder->set_data(0);
   593   jump->set_jump_destination((address)-1);
   594 }
   597 address CompiledStaticCall::find_stub() {
   598   // Find reloc. information containing this call-site
   599   RelocIterator iter((nmethod*)NULL, instruction_address());
   600   while (iter.next()) {
   601     if (iter.addr() == instruction_address()) {
   602       switch(iter.type()) {
   603         case relocInfo::static_call_type:
   604           return iter.static_call_reloc()->static_stub();
   605         // We check here for opt_virtual_call_type, since we reuse the code
   606         // from the CompiledIC implementation
   607         case relocInfo::opt_virtual_call_type:
   608           return iter.opt_virtual_call_reloc()->static_stub();
   609         case relocInfo::poll_type:
   610         case relocInfo::poll_return_type: // A safepoint can't overlap a call.
   611         default:
   612           ShouldNotReachHere();
   613       }
   614     }
   615   }
   616   return NULL;
   617 }
   620 //-----------------------------------------------------------------------------
   621 // Non-product mode code
   622 #ifndef PRODUCT
   624 void CompiledIC::verify() {
   625   // make sure code pattern is actually a call imm32 instruction
   626   _ic_call->verify();
   627   if (os::is_MP()) {
   628     _ic_call->verify_alignment();
   629   }
   630   assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
   631           || is_optimized() || is_megamorphic(), "sanity check");
   632 }
   635 void CompiledIC::print() {
   636   print_compiled_ic();
   637   tty->cr();
   638 }
   641 void CompiledIC::print_compiled_ic() {
   642   tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT,
   643              instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination());
   644 }
   647 void CompiledStaticCall::print() {
   648   tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address());
   649   if (is_clean()) {
   650     tty->print("clean");
   651   } else if (is_call_to_compiled()) {
   652     tty->print("compiled");
   653   } else if (is_call_to_interpreted()) {
   654     tty->print("interpreted");
   655   }
   656   tty->cr();
   657 }
   659 void CompiledStaticCall::verify() {
   660   // Verify call
   661   NativeCall::verify();
   662   if (os::is_MP()) {
   663     verify_alignment();
   664   }
   666   // Verify stub
   667   address stub = find_stub();
   668   assert(stub != NULL, "no stub found for static call");
   669   NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);   // creation also verifies the object
   670   NativeJump*        jump          = nativeJump_at(method_holder->next_instruction_address());
   672   // Verify state
   673   assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
   674 }
   676 #endif

mercurial