1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/code/compiledIC.cpp Sat Dec 01 00:00:00 2007 +0000 1.3 @@ -0,0 +1,662 @@ 1.4 +/* 1.5 + * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 1.24 + * have any questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "incls/_precompiled.incl" 1.29 +#include "incls/_compiledIC.cpp.incl" 1.30 + 1.31 + 1.32 +// Every time a compiled IC is changed or its type is being accessed, 1.33 +// either the CompiledIC_lock must be set or we must be at a safe point. 1.34 + 1.35 +//----------------------------------------------------------------------------- 1.36 +// Low-level access to an inline cache. Private, since they might not be 1.37 +// MT-safe to use. 1.38 + 1.39 +void CompiledIC::set_cached_oop(oop cache) { 1.40 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.41 + assert (!is_optimized(), "an optimized virtual call does not have a cached oop"); 1.42 + assert (cache == NULL || cache != badOop, "invalid oop"); 1.43 + 1.44 + if (TraceCompiledIC) { 1.45 + tty->print(" "); 1.46 + print_compiled_ic(); 1.47 + tty->print_cr(" changing oop to " INTPTR_FORMAT, (address)cache); 1.48 + } 1.49 + 1.50 + if (cache == NULL) cache = (oop)Universe::non_oop_word(); 1.51 + 1.52 + *_oop_addr = cache; 1.53 + // fix up the relocations 1.54 + RelocIterator iter = _oops; 1.55 + while (iter.next()) { 1.56 + if (iter.type() == relocInfo::oop_type) { 1.57 + oop_Relocation* r = iter.oop_reloc(); 1.58 + if (r->oop_addr() == _oop_addr) 1.59 + r->fix_oop_relocation(); 1.60 + } 1.61 + } 1.62 + return; 1.63 +} 1.64 + 1.65 + 1.66 +oop CompiledIC::cached_oop() const { 1.67 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.68 + assert (!is_optimized(), "an optimized virtual call does not have a cached oop"); 1.69 + 1.70 + if (!is_in_transition_state()) { 1.71 + oop data = *_oop_addr; 1.72 + // If we let the oop value here be initialized to zero... 1.73 + assert(data != NULL || Universe::non_oop_word() == NULL, 1.74 + "no raw nulls in CompiledIC oops, because of patching races"); 1.75 + return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data; 1.76 + } else { 1.77 + return InlineCacheBuffer::cached_oop_for((CompiledIC *)this); 1.78 + } 1.79 +} 1.80 + 1.81 + 1.82 +void CompiledIC::set_ic_destination(address entry_point) { 1.83 + assert(entry_point != NULL, "must set legal entry point"); 1.84 + assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.85 + if (TraceCompiledIC) { 1.86 + tty->print(" "); 1.87 + print_compiled_ic(); 1.88 + tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point); 1.89 + } 1.90 + MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 1.91 +#ifdef ASSERT 1.92 + CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); 1.93 + assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); 1.94 +#endif 1.95 + _ic_call->set_destination_mt_safe(entry_point); 1.96 +} 1.97 + 1.98 + 1.99 +address CompiledIC::ic_destination() const { 1.100 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.101 + if (!is_in_transition_state()) { 1.102 + return _ic_call->destination(); 1.103 + } else { 1.104 + return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); 1.105 + } 1.106 +} 1.107 + 1.108 + 1.109 +bool CompiledIC::is_in_transition_state() const { 1.110 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.111 + return InlineCacheBuffer::contains(_ic_call->destination()); 1.112 +} 1.113 + 1.114 + 1.115 +// Returns native address of 'call' instruction in inline-cache. Used by 1.116 +// the InlineCacheBuffer when it needs to find the stub. 1.117 +address CompiledIC::stub_address() const { 1.118 + assert(is_in_transition_state(), "should only be called when we are in a transition state"); 1.119 + return _ic_call->destination(); 1.120 +} 1.121 + 1.122 + 1.123 +//----------------------------------------------------------------------------- 1.124 +// High-level access to an inline cache. Guaranteed to be MT-safe. 1.125 + 1.126 + 1.127 +void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) { 1.128 + methodHandle method = call_info->selected_method(); 1.129 + bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index()); 1.130 + assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.131 + assert(method->is_oop(), "cannot be NULL and must be oop"); 1.132 + assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic"); 1.133 + assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?"); 1.134 + 1.135 + address entry; 1.136 + if (is_invoke_interface) { 1.137 + int index = klassItable::compute_itable_index(call_info->resolved_method()()); 1.138 + entry = VtableStubs::create_stub(false, index, method()); 1.139 + assert(entry != NULL, "entry not computed"); 1.140 + klassOop k = call_info->resolved_method()->method_holder(); 1.141 + assert(Klass::cast(k)->is_interface(), "sanity check"); 1.142 + InlineCacheBuffer::create_transition_stub(this, k, entry); 1.143 + } else { 1.144 + // Can be different than method->vtable_index(), due to package-private etc. 1.145 + int vtable_index = call_info->vtable_index(); 1.146 + entry = VtableStubs::create_stub(true, vtable_index, method()); 1.147 + InlineCacheBuffer::create_transition_stub(this, method(), entry); 1.148 + } 1.149 + 1.150 + if (TraceICs) { 1.151 + ResourceMark rm; 1.152 + tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT, 1.153 + instruction_address(), method->print_value_string(), entry); 1.154 + } 1.155 + 1.156 + Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method()); 1.157 + // We can't check this anymore. With lazy deopt we could have already 1.158 + // cleaned this IC entry before we even return. This is possible if 1.159 + // we ran out of space in the inline cache buffer trying to do the 1.160 + // set_next and we safepointed to free up space. This is a benign 1.161 + // race because the IC entry was complete when we safepointed so 1.162 + // cleaning it immediately is harmless. 1.163 + // assert(is_megamorphic(), "sanity check"); 1.164 +} 1.165 + 1.166 + 1.167 +// true if destination is megamorphic stub 1.168 +bool CompiledIC::is_megamorphic() const { 1.169 + assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.170 + assert(!is_optimized(), "an optimized call cannot be megamorphic"); 1.171 + 1.172 + // Cannot rely on cached_oop. It is either an interface or a method. 1.173 + return VtableStubs::is_entry_point(ic_destination()); 1.174 +} 1.175 + 1.176 +bool CompiledIC::is_call_to_compiled() const { 1.177 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.178 + 1.179 + // Use unsafe, since an inline cache might point to a zombie method. However, the zombie 1.180 + // method is guaranteed to still exist, since we only remove methods after all inline caches 1.181 + // has been cleaned up 1.182 + CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 1.183 + bool is_monomorphic = (cb != NULL && cb->is_nmethod()); 1.184 + // Check that the cached_oop is a klass for non-optimized monomorphic calls 1.185 + // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used 1.186 + // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL) 1.187 +#ifdef ASSERT 1.188 +#ifdef TIERED 1.189 + CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address()); 1.190 + bool is_c1_method = caller->is_compiled_by_c1(); 1.191 +#else 1.192 +#ifdef COMPILER1 1.193 + bool is_c1_method = true; 1.194 +#else 1.195 + bool is_c1_method = false; 1.196 +#endif // COMPILER1 1.197 +#endif // TIERED 1.198 + assert( is_c1_method || 1.199 + !is_monomorphic || 1.200 + is_optimized() || 1.201 + (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check"); 1.202 +#endif // ASSERT 1.203 + return is_monomorphic; 1.204 +} 1.205 + 1.206 + 1.207 +bool CompiledIC::is_call_to_interpreted() const { 1.208 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.209 + // Call to interpreter if destination is either calling to a stub (if it 1.210 + // is optimized), or calling to an I2C blob 1.211 + bool is_call_to_interpreted = false; 1.212 + if (!is_optimized()) { 1.213 + // must use unsafe because the destination can be a zombie (and we're cleaning) 1.214 + // and the print_compiled_ic code wants to know if site (in the non-zombie) 1.215 + // is to the interpreter. 1.216 + CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination()); 1.217 + is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob()); 1.218 + assert(!is_call_to_interpreted || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check"); 1.219 + } else { 1.220 + // Check if we are calling into our own codeblob (i.e., to a stub) 1.221 + CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address()); 1.222 + address dest = ic_destination(); 1.223 +#ifdef ASSERT 1.224 + { 1.225 + CodeBlob* db = CodeCache::find_blob_unsafe(dest); 1.226 + assert(!db->is_adapter_blob(), "must use stub!"); 1.227 + } 1.228 +#endif /* ASSERT */ 1.229 + is_call_to_interpreted = cb->contains(dest); 1.230 + } 1.231 + return is_call_to_interpreted; 1.232 +} 1.233 + 1.234 + 1.235 +void CompiledIC::set_to_clean() { 1.236 + assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); 1.237 + if (TraceInlineCacheClearing || TraceICs) { 1.238 + tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address()); 1.239 + print(); 1.240 + } 1.241 + 1.242 + address entry; 1.243 + if (is_optimized()) { 1.244 + entry = SharedRuntime::get_resolve_opt_virtual_call_stub(); 1.245 + } else { 1.246 + entry = SharedRuntime::get_resolve_virtual_call_stub(); 1.247 + } 1.248 + 1.249 + // A zombie transition will always be safe, since the oop has already been set to NULL, so 1.250 + // we only need to patch the destination 1.251 + bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint(); 1.252 + 1.253 + if (safe_transition) { 1.254 + if (!is_optimized()) set_cached_oop(NULL); 1.255 + // Kill any leftover stub we might have too 1.256 + if (is_in_transition_state()) { 1.257 + ICStub* old_stub = ICStub_from_destination_address(stub_address()); 1.258 + old_stub->clear(); 1.259 + } 1.260 + set_ic_destination(entry); 1.261 + } else { 1.262 + // Unsafe transition - create stub. 1.263 + InlineCacheBuffer::create_transition_stub(this, NULL, entry); 1.264 + } 1.265 + // We can't check this anymore. With lazy deopt we could have already 1.266 + // cleaned this IC entry before we even return. This is possible if 1.267 + // we ran out of space in the inline cache buffer trying to do the 1.268 + // set_next and we safepointed to free up space. This is a benign 1.269 + // race because the IC entry was complete when we safepointed so 1.270 + // cleaning it immediately is harmless. 1.271 + // assert(is_clean(), "sanity check"); 1.272 +} 1.273 + 1.274 + 1.275 +bool CompiledIC::is_clean() const { 1.276 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.277 + bool is_clean = false; 1.278 + address dest = ic_destination(); 1.279 + is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() || 1.280 + dest == SharedRuntime::get_resolve_virtual_call_stub(); 1.281 + assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check"); 1.282 + return is_clean; 1.283 +} 1.284 + 1.285 + 1.286 +void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) { 1.287 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); 1.288 + // Updating a cache to the wrong entry can cause bugs that are very hard 1.289 + // to track down - if cache entry gets invalid - we just clean it. In 1.290 + // this way it is always the same code path that is responsible for 1.291 + // updating and resolving an inline cache 1.292 + // 1.293 + // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized 1.294 + // callsites. In addition ic_miss code will update a site to monomorphic if it determines 1.295 + // that an monomorphic call to the interpreter can now be monomorphic to compiled code. 1.296 + // 1.297 + // In both of these cases the only thing being modifed is the jump/call target and these 1.298 + // transitions are mt_safe 1.299 + 1.300 + Thread *thread = Thread::current(); 1.301 + if (info._to_interpreter) { 1.302 + // Call to interpreter 1.303 + if (info.is_optimized() && is_optimized()) { 1.304 + assert(is_clean(), "unsafe IC path"); 1.305 + MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 1.306 + // the call analysis (callee structure) specifies that the call is optimized 1.307 + // (either because of CHA or the static target is final) 1.308 + // At code generation time, this call has been emitted as static call 1.309 + // Call via stub 1.310 + assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check"); 1.311 + CompiledStaticCall* csc = compiledStaticCall_at(instruction_address()); 1.312 + methodHandle method (thread, (methodOop)info.cached_oop()()); 1.313 + csc->set_to_interpreted(method, info.entry()); 1.314 + if (TraceICs) { 1.315 + ResourceMark rm(thread); 1.316 + tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", 1.317 + instruction_address(), 1.318 + method->print_value_string()); 1.319 + } 1.320 + } else { 1.321 + // Call via method-klass-holder 1.322 + assert(info.cached_oop().not_null(), "must be set"); 1.323 + InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry()); 1.324 + 1.325 + if (TraceICs) { 1.326 + ResourceMark rm(thread); 1.327 + tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via mkh", instruction_address()); 1.328 + } 1.329 + } 1.330 + } else { 1.331 + // Call to compiled code 1.332 + bool static_bound = info.is_optimized() || (info.cached_oop().is_null()); 1.333 +#ifdef ASSERT 1.334 + CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry()); 1.335 + assert (cb->is_nmethod(), "must be compiled!"); 1.336 +#endif /* ASSERT */ 1.337 + 1.338 + // This is MT safe if we come from a clean-cache and go through a 1.339 + // non-verified entry point 1.340 + bool safe = SafepointSynchronize::is_at_safepoint() || 1.341 + (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean())); 1.342 + 1.343 + if (!safe) { 1.344 + InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry()); 1.345 + } else { 1.346 + set_ic_destination(info.entry()); 1.347 + if (!is_optimized()) set_cached_oop(info.cached_oop()()); 1.348 + } 1.349 + 1.350 + if (TraceICs) { 1.351 + ResourceMark rm(thread); 1.352 + assert(info.cached_oop() == NULL || info.cached_oop()()->is_klass(), "must be"); 1.353 + tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s", 1.354 + instruction_address(), 1.355 + ((klassOop)info.cached_oop()())->print_value_string(), 1.356 + (safe) ? "" : "via stub"); 1.357 + } 1.358 + } 1.359 + // We can't check this anymore. With lazy deopt we could have already 1.360 + // cleaned this IC entry before we even return. This is possible if 1.361 + // we ran out of space in the inline cache buffer trying to do the 1.362 + // set_next and we safepointed to free up space. This is a benign 1.363 + // race because the IC entry was complete when we safepointed so 1.364 + // cleaning it immediately is harmless. 1.365 + // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); 1.366 +} 1.367 + 1.368 + 1.369 +// is_optimized: Compiler has generated an optimized call (i.e., no inline 1.370 +// cache) static_bound: The call can be static bound (i.e, no need to use 1.371 +// inline cache) 1.372 +void CompiledIC::compute_monomorphic_entry(methodHandle method, 1.373 + KlassHandle receiver_klass, 1.374 + bool is_optimized, 1.375 + bool static_bound, 1.376 + CompiledICInfo& info, 1.377 + TRAPS) { 1.378 + info._is_optimized = is_optimized; 1.379 + 1.380 + nmethod* method_code = method->code(); 1.381 + address entry = NULL; 1.382 + if (method_code != NULL) { 1.383 + // Call to compiled code 1.384 + if (static_bound || is_optimized) { 1.385 + entry = method_code->verified_entry_point(); 1.386 + } else { 1.387 + entry = method_code->entry_point(); 1.388 + } 1.389 + } 1.390 + if (entry != NULL) { 1.391 + // Call to compiled code 1.392 + info._entry = entry; 1.393 + if (static_bound || is_optimized) { 1.394 + info._cached_oop = Handle(THREAD, (oop)NULL); 1.395 + } else { 1.396 + info._cached_oop = receiver_klass; 1.397 + } 1.398 + info._to_interpreter = false; 1.399 + } else { 1.400 + // Note: the following problem exists with Compiler1: 1.401 + // - at compile time we may or may not know if the destination is final 1.402 + // - if we know that the destination is final, we will emit an optimized 1.403 + // virtual call (no inline cache), and need a methodOop to make a call 1.404 + // to the interpreter 1.405 + // - if we do not know if the destination is final, we emit a standard 1.406 + // virtual call, and use CompiledICHolder to call interpreted code 1.407 + // (no static call stub has been generated) 1.408 + // However in that case we will now notice it is static_bound 1.409 + // and convert the call into what looks to be an optimized 1.410 + // virtual call. This causes problems in verifying the IC because 1.411 + // it look vanilla but is optimized. Code in is_call_to_interpreted 1.412 + // is aware of this and weakens its asserts. 1.413 + 1.414 + info._to_interpreter = true; 1.415 + // static_bound should imply is_optimized -- otherwise we have a 1.416 + // performance bug (statically-bindable method is called via 1.417 + // dynamically-dispatched call note: the reverse implication isn't 1.418 + // necessarily true -- the call may have been optimized based on compiler 1.419 + // analysis (static_bound is only based on "final" etc.) 1.420 +#ifdef COMPILER2 1.421 +#ifdef TIERED 1.422 +#if defined(ASSERT) 1.423 + // can't check the assert because we don't have the CompiledIC with which to 1.424 + // find the address if the call instruction. 1.425 + // 1.426 + // CodeBlob* cb = find_blob_unsafe(instruction_address()); 1.427 + // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized"); 1.428 +#endif // ASSERT 1.429 +#else 1.430 + assert(!static_bound || is_optimized, "static_bound should imply is_optimized"); 1.431 +#endif // TIERED 1.432 +#endif // COMPILER2 1.433 + if (is_optimized) { 1.434 + // Use stub entry 1.435 + info._entry = method()->get_c2i_entry(); 1.436 + info._cached_oop = method; 1.437 + } else { 1.438 + // Use mkh entry 1.439 + oop holder = oopFactory::new_compiledICHolder(method, receiver_klass, CHECK); 1.440 + info._cached_oop = Handle(THREAD, holder); 1.441 + info._entry = method()->get_c2i_unverified_entry(); 1.442 + } 1.443 + } 1.444 +} 1.445 + 1.446 + 1.447 +inline static RelocIterator parse_ic(CodeBlob* code, address ic_call, oop* &_oop_addr, bool *is_optimized) { 1.448 + address first_oop = NULL; 1.449 + // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter. 1.450 + CodeBlob *code1 = code; 1.451 + return virtual_call_Relocation::parse_ic(code1, ic_call, first_oop, _oop_addr, is_optimized); 1.452 +} 1.453 + 1.454 +CompiledIC::CompiledIC(NativeCall* ic_call) 1.455 + : _ic_call(ic_call), 1.456 + _oops(parse_ic(NULL, ic_call->instruction_address(), _oop_addr, &_is_optimized)) 1.457 +{ 1.458 +} 1.459 + 1.460 + 1.461 +CompiledIC::CompiledIC(Relocation* ic_reloc) 1.462 + : _ic_call(nativeCall_at(ic_reloc->addr())), 1.463 + _oops(parse_ic(ic_reloc->code(), ic_reloc->addr(), _oop_addr, &_is_optimized)) 1.464 +{ 1.465 + assert(ic_reloc->type() == relocInfo::virtual_call_type || 1.466 + ic_reloc->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); 1.467 +} 1.468 + 1.469 + 1.470 +// ---------------------------------------------------------------------------- 1.471 + 1.472 +void CompiledStaticCall::set_to_clean() { 1.473 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 1.474 + // Reset call site 1.475 + MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 1.476 +#ifdef ASSERT 1.477 + CodeBlob* cb = CodeCache::find_blob_unsafe(this); 1.478 + assert(cb != NULL && cb->is_nmethod(), "must be nmethod"); 1.479 +#endif 1.480 + set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub()); 1.481 + 1.482 + // Do not reset stub here: It is too expensive to call find_stub. 1.483 + // Instead, rely on caller (nmethod::clear_inline_caches) to clear 1.484 + // both the call and its stub. 1.485 +} 1.486 + 1.487 + 1.488 +bool CompiledStaticCall::is_clean() const { 1.489 + return destination() == SharedRuntime::get_resolve_static_call_stub(); 1.490 +} 1.491 + 1.492 +bool CompiledStaticCall::is_call_to_compiled() const { 1.493 + return CodeCache::contains(destination()); 1.494 +} 1.495 + 1.496 + 1.497 +bool CompiledStaticCall::is_call_to_interpreted() const { 1.498 + // It is a call to interpreted, if it calls to a stub. Hence, the destination 1.499 + // must be in the stub part of the nmethod that contains the call 1.500 + nmethod* nm = CodeCache::find_nmethod(instruction_address()); 1.501 + return nm->stub_contains(destination()); 1.502 +} 1.503 + 1.504 + 1.505 +void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { 1.506 + address stub=find_stub(); 1.507 + assert(stub!=NULL, "stub not found"); 1.508 + 1.509 + if (TraceICs) { 1.510 + ResourceMark rm; 1.511 + tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", 1.512 + instruction_address(), 1.513 + callee->name_and_sig_as_C_string()); 1.514 + } 1.515 + 1.516 + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object 1.517 + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); 1.518 + 1.519 + assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache"); 1.520 + assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache"); 1.521 + 1.522 + // Update stub 1.523 + method_holder->set_data((intptr_t)callee()); 1.524 + jump->set_jump_destination(entry); 1.525 + 1.526 + // Update jump to call 1.527 + set_destination_mt_safe(stub); 1.528 +} 1.529 + 1.530 + 1.531 +void CompiledStaticCall::set(const StaticCallInfo& info) { 1.532 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 1.533 + MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); 1.534 + // Updating a cache to the wrong entry can cause bugs that are very hard 1.535 + // to track down - if cache entry gets invalid - we just clean it. In 1.536 + // this way it is always the same code path that is responsible for 1.537 + // updating and resolving an inline cache 1.538 + assert(is_clean(), "do not update a call entry - use clean"); 1.539 + 1.540 + if (info._to_interpreter) { 1.541 + // Call to interpreted code 1.542 + set_to_interpreted(info.callee(), info.entry()); 1.543 + } else { 1.544 + if (TraceICs) { 1.545 + ResourceMark rm; 1.546 + tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, 1.547 + instruction_address(), 1.548 + info.entry()); 1.549 + } 1.550 + // Call to compiled code 1.551 + assert (CodeCache::contains(info.entry()), "wrong entry point"); 1.552 + set_destination_mt_safe(info.entry()); 1.553 + } 1.554 +} 1.555 + 1.556 + 1.557 +// Compute settings for a CompiledStaticCall. Since we might have to set 1.558 +// the stub when calling to the interpreter, we need to return arguments. 1.559 +void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) { 1.560 + nmethod* m_code = m->code(); 1.561 + info._callee = m; 1.562 + if (m_code != NULL) { 1.563 + info._to_interpreter = false; 1.564 + info._entry = m_code->verified_entry_point(); 1.565 + } else { 1.566 + // Callee is interpreted code. In any case entering the interpreter 1.567 + // puts a converter-frame on the stack to save arguments. 1.568 + info._to_interpreter = true; 1.569 + info._entry = m()->get_c2i_entry(); 1.570 + } 1.571 +} 1.572 + 1.573 + 1.574 +void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { 1.575 + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); 1.576 + // Reset stub 1.577 + address stub = static_stub->addr(); 1.578 + assert(stub!=NULL, "stub not found"); 1.579 + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object 1.580 + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); 1.581 + method_holder->set_data(0); 1.582 + jump->set_jump_destination((address)-1); 1.583 +} 1.584 + 1.585 + 1.586 +address CompiledStaticCall::find_stub() { 1.587 + // Find reloc. information containing this call-site 1.588 + RelocIterator iter((nmethod*)NULL, instruction_address()); 1.589 + while (iter.next()) { 1.590 + if (iter.addr() == instruction_address()) { 1.591 + switch(iter.type()) { 1.592 + case relocInfo::static_call_type: 1.593 + return iter.static_call_reloc()->static_stub(); 1.594 + // We check here for opt_virtual_call_type, since we reuse the code 1.595 + // from the CompiledIC implementation 1.596 + case relocInfo::opt_virtual_call_type: 1.597 + return iter.opt_virtual_call_reloc()->static_stub(); 1.598 + case relocInfo::poll_type: 1.599 + case relocInfo::poll_return_type: // A safepoint can't overlap a call. 1.600 + default: 1.601 + ShouldNotReachHere(); 1.602 + } 1.603 + } 1.604 + } 1.605 + return NULL; 1.606 +} 1.607 + 1.608 + 1.609 +//----------------------------------------------------------------------------- 1.610 +// Non-product mode code 1.611 +#ifndef PRODUCT 1.612 + 1.613 +void CompiledIC::verify() { 1.614 + // make sure code pattern is actually a call imm32 instruction 1.615 + _ic_call->verify(); 1.616 + if (os::is_MP()) { 1.617 + _ic_call->verify_alignment(); 1.618 + } 1.619 + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() 1.620 + || is_optimized() || is_megamorphic(), "sanity check"); 1.621 +} 1.622 + 1.623 + 1.624 +void CompiledIC::print() { 1.625 + print_compiled_ic(); 1.626 + tty->cr(); 1.627 +} 1.628 + 1.629 + 1.630 +void CompiledIC::print_compiled_ic() { 1.631 + tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT, 1.632 + instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination()); 1.633 +} 1.634 + 1.635 + 1.636 +void CompiledStaticCall::print() { 1.637 + tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address()); 1.638 + if (is_clean()) { 1.639 + tty->print("clean"); 1.640 + } else if (is_call_to_compiled()) { 1.641 + tty->print("compiled"); 1.642 + } else if (is_call_to_interpreted()) { 1.643 + tty->print("interpreted"); 1.644 + } 1.645 + tty->cr(); 1.646 +} 1.647 + 1.648 +void CompiledStaticCall::verify() { 1.649 + // Verify call 1.650 + NativeCall::verify(); 1.651 + if (os::is_MP()) { 1.652 + verify_alignment(); 1.653 + } 1.654 + 1.655 + // Verify stub 1.656 + address stub = find_stub(); 1.657 + assert(stub != NULL, "no stub found for static call"); 1.658 + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object 1.659 + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); 1.660 + 1.661 + // Verify state 1.662 + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); 1.663 +} 1.664 + 1.665 +#endif