Thu, 27 Jan 2011 16:11:27 -0800
6990754: Use native memory and reference counting to implement SymbolTable
Summary: move symbols from permgen into C heap and reference count them
Reviewed-by: never, acorn, jmasa, stefank
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "code/codeCache.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/icBuffer.hpp"
30 #include "code/nmethod.hpp"
31 #include "code/vtableStubs.hpp"
32 #include "interpreter/interpreter.hpp"
33 #include "interpreter/linkResolver.hpp"
34 #include "memory/oopFactory.hpp"
35 #include "oops/methodOop.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/symbol.hpp"
38 #include "runtime/icache.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "utilities/events.hpp"
44 // Every time a compiled IC is changed or its type is being accessed,
45 // either the CompiledIC_lock must be set or we must be at a safe point.
47 //-----------------------------------------------------------------------------
48 // Low-level access to an inline cache. Private, since they might not be
49 // MT-safe to use.
51 void CompiledIC::set_cached_oop(oop cache) {
52 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
53 assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
54 assert (cache == NULL || cache != badOop, "invalid oop");
56 if (TraceCompiledIC) {
57 tty->print(" ");
58 print_compiled_ic();
59 tty->print_cr(" changing oop to " INTPTR_FORMAT, (address)cache);
60 }
62 if (cache == NULL) cache = (oop)Universe::non_oop_word();
64 *_oop_addr = cache;
65 // fix up the relocations
66 RelocIterator iter = _oops;
67 while (iter.next()) {
68 if (iter.type() == relocInfo::oop_type) {
69 oop_Relocation* r = iter.oop_reloc();
70 if (r->oop_addr() == _oop_addr)
71 r->fix_oop_relocation();
72 }
73 }
74 return;
75 }
78 oop CompiledIC::cached_oop() const {
79 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
80 assert (!is_optimized(), "an optimized virtual call does not have a cached oop");
82 if (!is_in_transition_state()) {
83 oop data = *_oop_addr;
84 // If we let the oop value here be initialized to zero...
85 assert(data != NULL || Universe::non_oop_word() == NULL,
86 "no raw nulls in CompiledIC oops, because of patching races");
87 return (data == (oop)Universe::non_oop_word()) ? (oop)NULL : data;
88 } else {
89 return InlineCacheBuffer::cached_oop_for((CompiledIC *)this);
90 }
91 }
94 void CompiledIC::set_ic_destination(address entry_point) {
95 assert(entry_point != NULL, "must set legal entry point");
96 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
97 if (TraceCompiledIC) {
98 tty->print(" ");
99 print_compiled_ic();
100 tty->print_cr(" changing destination to " INTPTR_FORMAT, entry_point);
101 }
102 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
103 #ifdef ASSERT
104 CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
105 assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
106 #endif
107 _ic_call->set_destination_mt_safe(entry_point);
108 }
111 address CompiledIC::ic_destination() const {
112 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
113 if (!is_in_transition_state()) {
114 return _ic_call->destination();
115 } else {
116 return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
117 }
118 }
121 bool CompiledIC::is_in_transition_state() const {
122 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
123 return InlineCacheBuffer::contains(_ic_call->destination());
124 }
127 // Returns native address of 'call' instruction in inline-cache. Used by
128 // the InlineCacheBuffer when it needs to find the stub.
129 address CompiledIC::stub_address() const {
130 assert(is_in_transition_state(), "should only be called when we are in a transition state");
131 return _ic_call->destination();
132 }
135 //-----------------------------------------------------------------------------
136 // High-level access to an inline cache. Guaranteed to be MT-safe.
139 void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
140 methodHandle method = call_info->selected_method();
141 bool is_invoke_interface = (bytecode == Bytecodes::_invokeinterface && !call_info->has_vtable_index());
142 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
143 assert(method->is_oop(), "cannot be NULL and must be oop");
144 assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
145 assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
147 address entry;
148 if (is_invoke_interface) {
149 int index = klassItable::compute_itable_index(call_info->resolved_method()());
150 entry = VtableStubs::create_stub(false, index, method());
151 assert(entry != NULL, "entry not computed");
152 klassOop k = call_info->resolved_method()->method_holder();
153 assert(Klass::cast(k)->is_interface(), "sanity check");
154 InlineCacheBuffer::create_transition_stub(this, k, entry);
155 } else {
156 // Can be different than method->vtable_index(), due to package-private etc.
157 int vtable_index = call_info->vtable_index();
158 entry = VtableStubs::create_stub(true, vtable_index, method());
159 InlineCacheBuffer::create_transition_stub(this, method(), entry);
160 }
162 if (TraceICs) {
163 ResourceMark rm;
164 tty->print_cr ("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
165 instruction_address(), method->print_value_string(), entry);
166 }
168 Events::log("compiledIC " INTPTR_FORMAT " --> megamorphic " INTPTR_FORMAT, this, (address)method());
169 // We can't check this anymore. With lazy deopt we could have already
170 // cleaned this IC entry before we even return. This is possible if
171 // we ran out of space in the inline cache buffer trying to do the
172 // set_next and we safepointed to free up space. This is a benign
173 // race because the IC entry was complete when we safepointed so
174 // cleaning it immediately is harmless.
175 // assert(is_megamorphic(), "sanity check");
176 }
179 // true if destination is megamorphic stub
180 bool CompiledIC::is_megamorphic() const {
181 assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
182 assert(!is_optimized(), "an optimized call cannot be megamorphic");
184 // Cannot rely on cached_oop. It is either an interface or a method.
185 return VtableStubs::is_entry_point(ic_destination());
186 }
188 bool CompiledIC::is_call_to_compiled() const {
189 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
191 // Use unsafe, since an inline cache might point to a zombie method. However, the zombie
192 // method is guaranteed to still exist, since we only remove methods after all inline caches
193 // has been cleaned up
194 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
195 bool is_monomorphic = (cb != NULL && cb->is_nmethod());
196 // Check that the cached_oop is a klass for non-optimized monomorphic calls
197 // This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
198 // for calling directly to vep without using the inline cache (i.e., cached_oop == NULL)
199 #ifdef ASSERT
200 #ifdef TIERED
201 CodeBlob* caller = CodeCache::find_blob_unsafe(instruction_address());
202 bool is_c1_method = caller->is_compiled_by_c1();
203 #else
204 #ifdef COMPILER1
205 bool is_c1_method = true;
206 #else
207 bool is_c1_method = false;
208 #endif // COMPILER1
209 #endif // TIERED
210 assert( is_c1_method ||
211 !is_monomorphic ||
212 is_optimized() ||
213 (cached_oop() != NULL && cached_oop()->is_klass()), "sanity check");
214 #endif // ASSERT
215 return is_monomorphic;
216 }
219 bool CompiledIC::is_call_to_interpreted() const {
220 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
221 // Call to interpreter if destination is either calling to a stub (if it
222 // is optimized), or calling to an I2C blob
223 bool is_call_to_interpreted = false;
224 if (!is_optimized()) {
225 // must use unsafe because the destination can be a zombie (and we're cleaning)
226 // and the print_compiled_ic code wants to know if site (in the non-zombie)
227 // is to the interpreter.
228 CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
229 is_call_to_interpreted = (cb != NULL && cb->is_adapter_blob());
230 assert(!is_call_to_interpreted || (cached_oop() != NULL && cached_oop()->is_compiledICHolder()), "sanity check");
231 } else {
232 // Check if we are calling into our own codeblob (i.e., to a stub)
233 CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address());
234 address dest = ic_destination();
235 #ifdef ASSERT
236 {
237 CodeBlob* db = CodeCache::find_blob_unsafe(dest);
238 assert(!db->is_adapter_blob(), "must use stub!");
239 }
240 #endif /* ASSERT */
241 is_call_to_interpreted = cb->contains(dest);
242 }
243 return is_call_to_interpreted;
244 }
247 void CompiledIC::set_to_clean() {
248 assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call");
249 if (TraceInlineCacheClearing || TraceICs) {
250 tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", instruction_address());
251 print();
252 }
254 address entry;
255 if (is_optimized()) {
256 entry = SharedRuntime::get_resolve_opt_virtual_call_stub();
257 } else {
258 entry = SharedRuntime::get_resolve_virtual_call_stub();
259 }
261 // A zombie transition will always be safe, since the oop has already been set to NULL, so
262 // we only need to patch the destination
263 bool safe_transition = is_optimized() || SafepointSynchronize::is_at_safepoint();
265 if (safe_transition) {
266 if (!is_optimized()) set_cached_oop(NULL);
267 // Kill any leftover stub we might have too
268 if (is_in_transition_state()) {
269 ICStub* old_stub = ICStub_from_destination_address(stub_address());
270 old_stub->clear();
271 }
272 set_ic_destination(entry);
273 } else {
274 // Unsafe transition - create stub.
275 InlineCacheBuffer::create_transition_stub(this, NULL, entry);
276 }
277 // We can't check this anymore. With lazy deopt we could have already
278 // cleaned this IC entry before we even return. This is possible if
279 // we ran out of space in the inline cache buffer trying to do the
280 // set_next and we safepointed to free up space. This is a benign
281 // race because the IC entry was complete when we safepointed so
282 // cleaning it immediately is harmless.
283 // assert(is_clean(), "sanity check");
284 }
287 bool CompiledIC::is_clean() const {
288 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
289 bool is_clean = false;
290 address dest = ic_destination();
291 is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() ||
292 dest == SharedRuntime::get_resolve_virtual_call_stub();
293 assert(!is_clean || is_optimized() || cached_oop() == NULL, "sanity check");
294 return is_clean;
295 }
298 void CompiledIC::set_to_monomorphic(const CompiledICInfo& info) {
299 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
300 // Updating a cache to the wrong entry can cause bugs that are very hard
301 // to track down - if cache entry gets invalid - we just clean it. In
302 // this way it is always the same code path that is responsible for
303 // updating and resolving an inline cache
304 //
305 // The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
306 // callsites. In addition ic_miss code will update a site to monomorphic if it determines
307 // that an monomorphic call to the interpreter can now be monomorphic to compiled code.
308 //
309 // In both of these cases the only thing being modifed is the jump/call target and these
310 // transitions are mt_safe
312 Thread *thread = Thread::current();
313 if (info._to_interpreter) {
314 // Call to interpreter
315 if (info.is_optimized() && is_optimized()) {
316 assert(is_clean(), "unsafe IC path");
317 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
318 // the call analysis (callee structure) specifies that the call is optimized
319 // (either because of CHA or the static target is final)
320 // At code generation time, this call has been emitted as static call
321 // Call via stub
322 assert(info.cached_oop().not_null() && info.cached_oop()->is_method(), "sanity check");
323 CompiledStaticCall* csc = compiledStaticCall_at(instruction_address());
324 methodHandle method (thread, (methodOop)info.cached_oop()());
325 csc->set_to_interpreted(method, info.entry());
326 if (TraceICs) {
327 ResourceMark rm(thread);
328 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
329 instruction_address(),
330 method->print_value_string());
331 }
332 } else {
333 // Call via method-klass-holder
334 assert(info.cached_oop().not_null(), "must be set");
335 InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
337 if (TraceICs) {
338 ResourceMark rm(thread);
339 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter via mkh", instruction_address());
340 }
341 }
342 } else {
343 // Call to compiled code
344 bool static_bound = info.is_optimized() || (info.cached_oop().is_null());
345 #ifdef ASSERT
346 CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
347 assert (cb->is_nmethod(), "must be compiled!");
348 #endif /* ASSERT */
350 // This is MT safe if we come from a clean-cache and go through a
351 // non-verified entry point
352 bool safe = SafepointSynchronize::is_at_safepoint() ||
353 (!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
355 if (!safe) {
356 InlineCacheBuffer::create_transition_stub(this, info.cached_oop()(), info.entry());
357 } else {
358 set_ic_destination(info.entry());
359 if (!is_optimized()) set_cached_oop(info.cached_oop()());
360 }
362 if (TraceICs) {
363 ResourceMark rm(thread);
364 assert(info.cached_oop() == NULL || info.cached_oop()()->is_klass(), "must be");
365 tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass) %s: %s",
366 instruction_address(),
367 ((klassOop)info.cached_oop()())->print_value_string(),
368 (safe) ? "" : "via stub");
369 }
370 }
371 // We can't check this anymore. With lazy deopt we could have already
372 // cleaned this IC entry before we even return. This is possible if
373 // we ran out of space in the inline cache buffer trying to do the
374 // set_next and we safepointed to free up space. This is a benign
375 // race because the IC entry was complete when we safepointed so
376 // cleaning it immediately is harmless.
377 // assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
378 }
381 // is_optimized: Compiler has generated an optimized call (i.e., no inline
382 // cache) static_bound: The call can be static bound (i.e, no need to use
383 // inline cache)
384 void CompiledIC::compute_monomorphic_entry(methodHandle method,
385 KlassHandle receiver_klass,
386 bool is_optimized,
387 bool static_bound,
388 CompiledICInfo& info,
389 TRAPS) {
390 info._is_optimized = is_optimized;
392 nmethod* method_code = method->code();
393 address entry = NULL;
394 if (method_code != NULL) {
395 // Call to compiled code
396 if (static_bound || is_optimized) {
397 entry = method_code->verified_entry_point();
398 } else {
399 entry = method_code->entry_point();
400 }
401 }
402 if (entry != NULL) {
403 // Call to compiled code
404 info._entry = entry;
405 if (static_bound || is_optimized) {
406 info._cached_oop = Handle(THREAD, (oop)NULL);
407 } else {
408 info._cached_oop = receiver_klass;
409 }
410 info._to_interpreter = false;
411 } else {
412 // Note: the following problem exists with Compiler1:
413 // - at compile time we may or may not know if the destination is final
414 // - if we know that the destination is final, we will emit an optimized
415 // virtual call (no inline cache), and need a methodOop to make a call
416 // to the interpreter
417 // - if we do not know if the destination is final, we emit a standard
418 // virtual call, and use CompiledICHolder to call interpreted code
419 // (no static call stub has been generated)
420 // However in that case we will now notice it is static_bound
421 // and convert the call into what looks to be an optimized
422 // virtual call. This causes problems in verifying the IC because
423 // it look vanilla but is optimized. Code in is_call_to_interpreted
424 // is aware of this and weakens its asserts.
426 info._to_interpreter = true;
427 // static_bound should imply is_optimized -- otherwise we have a
428 // performance bug (statically-bindable method is called via
429 // dynamically-dispatched call note: the reverse implication isn't
430 // necessarily true -- the call may have been optimized based on compiler
431 // analysis (static_bound is only based on "final" etc.)
432 #ifdef COMPILER2
433 #ifdef TIERED
434 #if defined(ASSERT)
435 // can't check the assert because we don't have the CompiledIC with which to
436 // find the address if the call instruction.
437 //
438 // CodeBlob* cb = find_blob_unsafe(instruction_address());
439 // assert(cb->is_compiled_by_c1() || !static_bound || is_optimized, "static_bound should imply is_optimized");
440 #endif // ASSERT
441 #else
442 assert(!static_bound || is_optimized, "static_bound should imply is_optimized");
443 #endif // TIERED
444 #endif // COMPILER2
445 if (is_optimized) {
446 // Use stub entry
447 info._entry = method()->get_c2i_entry();
448 info._cached_oop = method;
449 } else {
450 // Use mkh entry
451 oop holder = oopFactory::new_compiledICHolder(method, receiver_klass, CHECK);
452 info._cached_oop = Handle(THREAD, holder);
453 info._entry = method()->get_c2i_unverified_entry();
454 }
455 }
456 }
459 inline static RelocIterator parse_ic(nmethod* nm, address ic_call, oop* &_oop_addr, bool *is_optimized) {
460 address first_oop = NULL;
461 // Mergers please note: Sun SC5.x CC insists on an lvalue for a reference parameter.
462 nmethod* tmp_nm = nm;
463 return virtual_call_Relocation::parse_ic(tmp_nm, ic_call, first_oop, _oop_addr, is_optimized);
464 }
466 CompiledIC::CompiledIC(NativeCall* ic_call)
467 : _ic_call(ic_call),
468 _oops(parse_ic(NULL, ic_call->instruction_address(), _oop_addr, &_is_optimized))
469 {
470 }
473 CompiledIC::CompiledIC(Relocation* ic_reloc)
474 : _ic_call(nativeCall_at(ic_reloc->addr())),
475 _oops(parse_ic(ic_reloc->code(), ic_reloc->addr(), _oop_addr, &_is_optimized))
476 {
477 assert(ic_reloc->type() == relocInfo::virtual_call_type ||
478 ic_reloc->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
479 }
482 // ----------------------------------------------------------------------------
484 void CompiledStaticCall::set_to_clean() {
485 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
486 // Reset call site
487 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
488 #ifdef ASSERT
489 CodeBlob* cb = CodeCache::find_blob_unsafe(this);
490 assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
491 #endif
492 set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
494 // Do not reset stub here: It is too expensive to call find_stub.
495 // Instead, rely on caller (nmethod::clear_inline_caches) to clear
496 // both the call and its stub.
497 }
500 bool CompiledStaticCall::is_clean() const {
501 return destination() == SharedRuntime::get_resolve_static_call_stub();
502 }
504 bool CompiledStaticCall::is_call_to_compiled() const {
505 return CodeCache::contains(destination());
506 }
509 bool CompiledStaticCall::is_call_to_interpreted() const {
510 // It is a call to interpreted, if it calls to a stub. Hence, the destination
511 // must be in the stub part of the nmethod that contains the call
512 nmethod* nm = CodeCache::find_nmethod(instruction_address());
513 return nm->stub_contains(destination());
514 }
517 void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) {
518 address stub=find_stub();
519 assert(stub!=NULL, "stub not found");
521 if (TraceICs) {
522 ResourceMark rm;
523 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
524 instruction_address(),
525 callee->name_and_sig_as_C_string());
526 }
528 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
529 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
531 assert(method_holder->data() == 0 || method_holder->data() == (intptr_t)callee(), "a) MT-unsafe modification of inline cache");
532 assert(jump->jump_destination() == (address)-1 || jump->jump_destination() == entry, "b) MT-unsafe modification of inline cache");
534 // Update stub
535 method_holder->set_data((intptr_t)callee());
536 jump->set_jump_destination(entry);
538 // Update jump to call
539 set_destination_mt_safe(stub);
540 }
543 void CompiledStaticCall::set(const StaticCallInfo& info) {
544 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
545 MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag);
546 // Updating a cache to the wrong entry can cause bugs that are very hard
547 // to track down - if cache entry gets invalid - we just clean it. In
548 // this way it is always the same code path that is responsible for
549 // updating and resolving an inline cache
550 assert(is_clean(), "do not update a call entry - use clean");
552 if (info._to_interpreter) {
553 // Call to interpreted code
554 set_to_interpreted(info.callee(), info.entry());
555 } else {
556 if (TraceICs) {
557 ResourceMark rm;
558 tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
559 instruction_address(),
560 info.entry());
561 }
562 // Call to compiled code
563 assert (CodeCache::contains(info.entry()), "wrong entry point");
564 set_destination_mt_safe(info.entry());
565 }
566 }
569 // Compute settings for a CompiledStaticCall. Since we might have to set
570 // the stub when calling to the interpreter, we need to return arguments.
571 void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
572 nmethod* m_code = m->code();
573 info._callee = m;
574 if (m_code != NULL) {
575 info._to_interpreter = false;
576 info._entry = m_code->verified_entry_point();
577 } else {
578 // Callee is interpreted code. In any case entering the interpreter
579 // puts a converter-frame on the stack to save arguments.
580 info._to_interpreter = true;
581 info._entry = m()->get_c2i_entry();
582 }
583 }
586 void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
587 assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call");
588 // Reset stub
589 address stub = static_stub->addr();
590 assert(stub!=NULL, "stub not found");
591 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
592 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
593 method_holder->set_data(0);
594 jump->set_jump_destination((address)-1);
595 }
598 address CompiledStaticCall::find_stub() {
599 // Find reloc. information containing this call-site
600 RelocIterator iter((nmethod*)NULL, instruction_address());
601 while (iter.next()) {
602 if (iter.addr() == instruction_address()) {
603 switch(iter.type()) {
604 case relocInfo::static_call_type:
605 return iter.static_call_reloc()->static_stub();
606 // We check here for opt_virtual_call_type, since we reuse the code
607 // from the CompiledIC implementation
608 case relocInfo::opt_virtual_call_type:
609 return iter.opt_virtual_call_reloc()->static_stub();
610 case relocInfo::poll_type:
611 case relocInfo::poll_return_type: // A safepoint can't overlap a call.
612 default:
613 ShouldNotReachHere();
614 }
615 }
616 }
617 return NULL;
618 }
621 //-----------------------------------------------------------------------------
622 // Non-product mode code
623 #ifndef PRODUCT
625 void CompiledIC::verify() {
626 // make sure code pattern is actually a call imm32 instruction
627 _ic_call->verify();
628 if (os::is_MP()) {
629 _ic_call->verify_alignment();
630 }
631 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
632 || is_optimized() || is_megamorphic(), "sanity check");
633 }
636 void CompiledIC::print() {
637 print_compiled_ic();
638 tty->cr();
639 }
642 void CompiledIC::print_compiled_ic() {
643 tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT,
644 instruction_address(), is_call_to_interpreted() ? "interpreted " : "", ic_destination());
645 }
648 void CompiledStaticCall::print() {
649 tty->print("static call at " INTPTR_FORMAT " -> ", instruction_address());
650 if (is_clean()) {
651 tty->print("clean");
652 } else if (is_call_to_compiled()) {
653 tty->print("compiled");
654 } else if (is_call_to_interpreted()) {
655 tty->print("interpreted");
656 }
657 tty->cr();
658 }
660 void CompiledStaticCall::verify() {
661 // Verify call
662 NativeCall::verify();
663 if (os::is_MP()) {
664 verify_alignment();
665 }
667 // Verify stub
668 address stub = find_stub();
669 assert(stub != NULL, "no stub found for static call");
670 NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); // creation also verifies the object
671 NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
673 // Verify state
674 assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
675 }
677 #endif