Tue, 04 Mar 2008 09:44:24 -0500
6666698: EnableBiasedLocking with BiasedLockingStartupDelay can block Watcher thread
Summary: Enqueue VM_EnableBiasedLocking operation asynchronously
Reviewed-by: never, xlu, kbr, acorn
1 /*
2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_sharedRuntime.cpp.incl"
27 #include <math.h>
29 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
30 HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
31 char*, int, char*, int, char*, int);
32 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
33 char*, int, char*, int, char*, int);
35 // Implementation of SharedRuntime
37 #ifndef PRODUCT
38 // For statistics
39 int SharedRuntime::_ic_miss_ctr = 0;
40 int SharedRuntime::_wrong_method_ctr = 0;
41 int SharedRuntime::_resolve_static_ctr = 0;
42 int SharedRuntime::_resolve_virtual_ctr = 0;
43 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
44 int SharedRuntime::_implicit_null_throws = 0;
45 int SharedRuntime::_implicit_div0_throws = 0;
46 int SharedRuntime::_throw_null_ctr = 0;
48 int SharedRuntime::_nof_normal_calls = 0;
49 int SharedRuntime::_nof_optimized_calls = 0;
50 int SharedRuntime::_nof_inlined_calls = 0;
51 int SharedRuntime::_nof_megamorphic_calls = 0;
52 int SharedRuntime::_nof_static_calls = 0;
53 int SharedRuntime::_nof_inlined_static_calls = 0;
54 int SharedRuntime::_nof_interface_calls = 0;
55 int SharedRuntime::_nof_optimized_interface_calls = 0;
56 int SharedRuntime::_nof_inlined_interface_calls = 0;
57 int SharedRuntime::_nof_megamorphic_interface_calls = 0;
58 int SharedRuntime::_nof_removable_exceptions = 0;
60 int SharedRuntime::_new_instance_ctr=0;
61 int SharedRuntime::_new_array_ctr=0;
62 int SharedRuntime::_multi1_ctr=0;
63 int SharedRuntime::_multi2_ctr=0;
64 int SharedRuntime::_multi3_ctr=0;
65 int SharedRuntime::_multi4_ctr=0;
66 int SharedRuntime::_multi5_ctr=0;
67 int SharedRuntime::_mon_enter_stub_ctr=0;
68 int SharedRuntime::_mon_exit_stub_ctr=0;
69 int SharedRuntime::_mon_enter_ctr=0;
70 int SharedRuntime::_mon_exit_ctr=0;
71 int SharedRuntime::_partial_subtype_ctr=0;
72 int SharedRuntime::_jbyte_array_copy_ctr=0;
73 int SharedRuntime::_jshort_array_copy_ctr=0;
74 int SharedRuntime::_jint_array_copy_ctr=0;
75 int SharedRuntime::_jlong_array_copy_ctr=0;
76 int SharedRuntime::_oop_array_copy_ctr=0;
77 int SharedRuntime::_checkcast_array_copy_ctr=0;
78 int SharedRuntime::_unsafe_array_copy_ctr=0;
79 int SharedRuntime::_generic_array_copy_ctr=0;
80 int SharedRuntime::_slow_array_copy_ctr=0;
81 int SharedRuntime::_find_handler_ctr=0;
82 int SharedRuntime::_rethrow_ctr=0;
84 int SharedRuntime::_ICmiss_index = 0;
85 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
86 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
88 void SharedRuntime::trace_ic_miss(address at) {
89 for (int i = 0; i < _ICmiss_index; i++) {
90 if (_ICmiss_at[i] == at) {
91 _ICmiss_count[i]++;
92 return;
93 }
94 }
95 int index = _ICmiss_index++;
96 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
97 _ICmiss_at[index] = at;
98 _ICmiss_count[index] = 1;
99 }
101 void SharedRuntime::print_ic_miss_histogram() {
102 if (ICMissHistogram) {
103 tty->print_cr ("IC Miss Histogram:");
104 int tot_misses = 0;
105 for (int i = 0; i < _ICmiss_index; i++) {
106 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
107 tot_misses += _ICmiss_count[i];
108 }
109 tty->print_cr ("Total IC misses: %7d", tot_misses);
110 }
111 }
112 #endif // PRODUCT
115 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
116 return x * y;
117 JRT_END
120 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
121 if (x == min_jlong && y == CONST64(-1)) {
122 return x;
123 } else {
124 return x / y;
125 }
126 JRT_END
129 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
130 if (x == min_jlong && y == CONST64(-1)) {
131 return 0;
132 } else {
133 return x % y;
134 }
135 JRT_END
138 const juint float_sign_mask = 0x7FFFFFFF;
139 const juint float_infinity = 0x7F800000;
140 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
141 const julong double_infinity = CONST64(0x7FF0000000000000);
143 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
144 #ifdef _WIN64
145 // 64-bit Windows on amd64 returns the wrong values for
146 // infinity operands.
147 union { jfloat f; juint i; } xbits, ybits;
148 xbits.f = x;
149 ybits.f = y;
150 // x Mod Infinity == x unless x is infinity
151 if ( ((xbits.i & float_sign_mask) != float_infinity) &&
152 ((ybits.i & float_sign_mask) == float_infinity) ) {
153 return x;
154 }
155 #endif
156 return ((jfloat)fmod((double)x,(double)y));
157 JRT_END
160 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
161 #ifdef _WIN64
162 union { jdouble d; julong l; } xbits, ybits;
163 xbits.d = x;
164 ybits.d = y;
165 // x Mod Infinity == x unless x is infinity
166 if ( ((xbits.l & double_sign_mask) != double_infinity) &&
167 ((ybits.l & double_sign_mask) == double_infinity) ) {
168 return x;
169 }
170 #endif
171 return ((jdouble)fmod((double)x,(double)y));
172 JRT_END
175 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
176 if (g_isnan(x)) {return 0;}
177 jlong lltmp = (jlong)x;
178 jint ltmp = (jint)lltmp;
179 if (ltmp == lltmp) {
180 return ltmp;
181 } else {
182 if (x < 0) {
183 return min_jint;
184 } else {
185 return max_jint;
186 }
187 }
188 JRT_END
191 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
192 if (g_isnan(x)) {return 0;}
193 jlong lltmp = (jlong)x;
194 if (lltmp != min_jlong) {
195 return lltmp;
196 } else {
197 if (x < 0) {
198 return min_jlong;
199 } else {
200 return max_jlong;
201 }
202 }
203 JRT_END
206 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
207 if (g_isnan(x)) {return 0;}
208 jlong lltmp = (jlong)x;
209 jint ltmp = (jint)lltmp;
210 if (ltmp == lltmp) {
211 return ltmp;
212 } else {
213 if (x < 0) {
214 return min_jint;
215 } else {
216 return max_jint;
217 }
218 }
219 JRT_END
222 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
223 if (g_isnan(x)) {return 0;}
224 jlong lltmp = (jlong)x;
225 if (lltmp != min_jlong) {
226 return lltmp;
227 } else {
228 if (x < 0) {
229 return min_jlong;
230 } else {
231 return max_jlong;
232 }
233 }
234 JRT_END
237 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
238 return (jfloat)x;
239 JRT_END
242 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
243 return (jfloat)x;
244 JRT_END
247 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
248 return (jdouble)x;
249 JRT_END
251 // Exception handling accross interpreter/compiler boundaries
252 //
253 // exception_handler_for_return_address(...) returns the continuation address.
254 // The continuation address is the entry point of the exception handler of the
255 // previous frame depending on the return address.
257 address SharedRuntime::raw_exception_handler_for_return_address(address return_address) {
258 assert(frame::verify_return_pc(return_address), "must be a return pc");
260 // the fastest case first
261 CodeBlob* blob = CodeCache::find_blob(return_address);
262 if (blob != NULL && blob->is_nmethod()) {
263 nmethod* code = (nmethod*)blob;
264 assert(code != NULL, "nmethod must be present");
265 // native nmethods don't have exception handlers
266 assert(!code->is_native_method(), "no exception handler");
267 assert(code->header_begin() != code->exception_begin(), "no exception handler");
268 if (code->is_deopt_pc(return_address)) {
269 return SharedRuntime::deopt_blob()->unpack_with_exception();
270 } else {
271 return code->exception_begin();
272 }
273 }
275 // Entry code
276 if (StubRoutines::returns_to_call_stub(return_address)) {
277 return StubRoutines::catch_exception_entry();
278 }
279 // Interpreted code
280 if (Interpreter::contains(return_address)) {
281 return Interpreter::rethrow_exception_entry();
282 }
284 // Compiled code
285 if (CodeCache::contains(return_address)) {
286 CodeBlob* blob = CodeCache::find_blob(return_address);
287 if (blob->is_nmethod()) {
288 nmethod* code = (nmethod*)blob;
289 assert(code != NULL, "nmethod must be present");
290 assert(code->header_begin() != code->exception_begin(), "no exception handler");
291 return code->exception_begin();
292 }
293 if (blob->is_runtime_stub()) {
294 ShouldNotReachHere(); // callers are responsible for skipping runtime stub frames
295 }
296 }
297 guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
298 #ifndef PRODUCT
299 { ResourceMark rm;
300 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
301 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
302 tty->print_cr("b) other problem");
303 }
304 #endif // PRODUCT
305 ShouldNotReachHere();
306 return NULL;
307 }
310 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address))
311 return raw_exception_handler_for_return_address(return_address);
312 JRT_END
314 address SharedRuntime::get_poll_stub(address pc) {
315 address stub;
316 // Look up the code blob
317 CodeBlob *cb = CodeCache::find_blob(pc);
319 // Should be an nmethod
320 assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
322 // Look up the relocation information
323 assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
324 "safepoint polling: type must be poll" );
326 assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
327 "Only polling locations are used for safepoint");
329 bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
330 if (at_poll_return) {
331 assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
332 "polling page return stub not created yet");
333 stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin();
334 } else {
335 assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
336 "polling page safepoint stub not created yet");
337 stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin();
338 }
339 #ifndef PRODUCT
340 if( TraceSafepoint ) {
341 char buf[256];
342 jio_snprintf(buf, sizeof(buf),
343 "... found polling page %s exception at pc = "
344 INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
345 at_poll_return ? "return" : "loop",
346 (intptr_t)pc, (intptr_t)stub);
347 tty->print_raw_cr(buf);
348 }
349 #endif // PRODUCT
350 return stub;
351 }
354 oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) {
355 assert(caller.is_interpreted_frame(), "");
356 int args_size = ArgumentSizeComputer(sig).size() + 1;
357 assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
358 oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
359 assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
360 return result;
361 }
364 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
365 if (JvmtiExport::can_post_exceptions()) {
366 vframeStream vfst(thread, true);
367 methodHandle method = methodHandle(thread, vfst.method());
368 address bcp = method()->bcp_from(vfst.bci());
369 JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
370 }
371 Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
372 }
374 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) {
375 Handle h_exception = Exceptions::new_exception(thread, name, message);
376 throw_and_post_jvmti_exception(thread, h_exception);
377 }
379 // ret_pc points into caller; we are returning caller's exception handler
380 // for given exception
381 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
382 bool force_unwind, bool top_frame_only) {
383 assert(nm != NULL, "must exist");
384 ResourceMark rm;
386 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
387 // determine handler bci, if any
388 EXCEPTION_MARK;
390 int handler_bci = -1;
391 int scope_depth = 0;
392 if (!force_unwind) {
393 int bci = sd->bci();
394 do {
395 bool skip_scope_increment = false;
396 // exception handler lookup
397 KlassHandle ek (THREAD, exception->klass());
398 handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD);
399 if (HAS_PENDING_EXCEPTION) {
400 // We threw an exception while trying to find the exception handler.
401 // Transfer the new exception to the exception handle which will
402 // be set into thread local storage, and do another lookup for an
403 // exception handler for this exception, this time starting at the
404 // BCI of the exception handler which caused the exception to be
405 // thrown (bugs 4307310 and 4546590). Set "exception" reference
406 // argument to ensure that the correct exception is thrown (4870175).
407 exception = Handle(THREAD, PENDING_EXCEPTION);
408 CLEAR_PENDING_EXCEPTION;
409 if (handler_bci >= 0) {
410 bci = handler_bci;
411 handler_bci = -1;
412 skip_scope_increment = true;
413 }
414 }
415 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
416 sd = sd->sender();
417 if (sd != NULL) {
418 bci = sd->bci();
419 }
420 ++scope_depth;
421 }
422 } while (!top_frame_only && handler_bci < 0 && sd != NULL);
423 }
425 // found handling method => lookup exception handler
426 int catch_pco = ret_pc - nm->instructions_begin();
428 ExceptionHandlerTable table(nm);
429 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
430 if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
431 // Allow abbreviated catch tables. The idea is to allow a method
432 // to materialize its exceptions without committing to the exact
433 // routing of exceptions. In particular this is needed for adding
434 // a synthethic handler to unlock monitors when inlining
435 // synchonized methods since the unlock path isn't represented in
436 // the bytecodes.
437 t = table.entry_for(catch_pco, -1, 0);
438 }
440 #ifdef COMPILER1
441 if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) {
442 // Exception is not handled by this frame so unwind. Note that
443 // this is not the same as how C2 does this. C2 emits a table
444 // entry that dispatches to the unwind code in the nmethod.
445 return NULL;
446 }
447 #endif /* COMPILER1 */
450 if (t == NULL) {
451 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
452 tty->print_cr(" Exception:");
453 exception->print();
454 tty->cr();
455 tty->print_cr(" Compiled exception table :");
456 table.print();
457 nm->print_code();
458 guarantee(false, "missing exception handler");
459 return NULL;
460 }
462 return nm->instructions_begin() + t->pco();
463 }
465 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
466 // These errors occur only at call sites
467 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
468 JRT_END
470 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
471 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
472 JRT_END
474 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
475 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
476 JRT_END
478 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
479 // This entry point is effectively only used for NullPointerExceptions which occur at inline
480 // cache sites (when the callee activation is not yet set up) so we are at a call site
481 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
482 JRT_END
484 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
485 // We avoid using the normal exception construction in this case because
486 // it performs an upcall to Java, and we're already out of stack space.
487 klassOop k = SystemDictionary::StackOverflowError_klass();
488 oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK);
489 Handle exception (thread, exception_oop);
490 if (StackTraceInThrowable) {
491 java_lang_Throwable::fill_in_stack_trace(exception);
492 }
493 throw_and_post_jvmti_exception(thread, exception);
494 JRT_END
496 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
497 address pc,
498 SharedRuntime::ImplicitExceptionKind exception_kind)
499 {
500 address target_pc = NULL;
502 if (Interpreter::contains(pc)) {
503 #ifdef CC_INTERP
504 // C++ interpreter doesn't throw implicit exceptions
505 ShouldNotReachHere();
506 #else
507 switch (exception_kind) {
508 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
509 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
510 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
511 default: ShouldNotReachHere();
512 }
513 #endif // !CC_INTERP
514 } else {
515 switch (exception_kind) {
516 case STACK_OVERFLOW: {
517 // Stack overflow only occurs upon frame setup; the callee is
518 // going to be unwound. Dispatch to a shared runtime stub
519 // which will cause the StackOverflowError to be fabricated
520 // and processed.
521 // For stack overflow in deoptimization blob, cleanup thread.
522 if (thread->deopt_mark() != NULL) {
523 Deoptimization::cleanup_deopt_info(thread, NULL);
524 }
525 return StubRoutines::throw_StackOverflowError_entry();
526 }
528 case IMPLICIT_NULL: {
529 if (VtableStubs::contains(pc)) {
530 // We haven't yet entered the callee frame. Fabricate an
531 // exception and begin dispatching it in the caller. Since
532 // the caller was at a call site, it's safe to destroy all
533 // caller-saved registers, as these entry points do.
534 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
535 guarantee(vt_stub != NULL, "unable to find SEGVing vtable stub");
536 if (vt_stub->is_abstract_method_error(pc)) {
537 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
538 return StubRoutines::throw_AbstractMethodError_entry();
539 } else {
540 return StubRoutines::throw_NullPointerException_at_call_entry();
541 }
542 } else {
543 CodeBlob* cb = CodeCache::find_blob(pc);
544 guarantee(cb != NULL, "exception happened outside interpreter, nmethods and vtable stubs (1)");
546 // Exception happened in CodeCache. Must be either:
547 // 1. Inline-cache check in C2I handler blob,
548 // 2. Inline-cache check in nmethod, or
549 // 3. Implict null exception in nmethod
551 if (!cb->is_nmethod()) {
552 guarantee(cb->is_adapter_blob(),
553 "exception happened outside interpreter, nmethods and vtable stubs (2)");
554 // There is no handler here, so we will simply unwind.
555 return StubRoutines::throw_NullPointerException_at_call_entry();
556 }
558 // Otherwise, it's an nmethod. Consult its exception handlers.
559 nmethod* nm = (nmethod*)cb;
560 if (nm->inlinecache_check_contains(pc)) {
561 // exception happened inside inline-cache check code
562 // => the nmethod is not yet active (i.e., the frame
563 // is not set up yet) => use return address pushed by
564 // caller => don't push another return address
565 return StubRoutines::throw_NullPointerException_at_call_entry();
566 }
568 #ifndef PRODUCT
569 _implicit_null_throws++;
570 #endif
571 target_pc = nm->continuation_for_implicit_exception(pc);
572 guarantee(target_pc != 0, "must have a continuation point");
573 }
575 break; // fall through
576 }
579 case IMPLICIT_DIVIDE_BY_ZERO: {
580 nmethod* nm = CodeCache::find_nmethod(pc);
581 guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions");
582 #ifndef PRODUCT
583 _implicit_div0_throws++;
584 #endif
585 target_pc = nm->continuation_for_implicit_exception(pc);
586 guarantee(target_pc != 0, "must have a continuation point");
587 break; // fall through
588 }
590 default: ShouldNotReachHere();
591 }
593 guarantee(target_pc != NULL, "must have computed destination PC for implicit exception");
594 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
596 // for AbortVMOnException flag
597 NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
598 if (exception_kind == IMPLICIT_NULL) {
599 Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
600 } else {
601 Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
602 }
603 return target_pc;
604 }
606 ShouldNotReachHere();
607 return NULL;
608 }
611 JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
612 {
613 THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
614 }
615 JNI_END
618 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
619 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
620 }
623 #ifndef PRODUCT
624 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
625 const frame f = thread->last_frame();
626 assert(f.is_interpreted_frame(), "must be an interpreted frame");
627 #ifndef PRODUCT
628 methodHandle mh(THREAD, f.interpreter_frame_method());
629 BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
630 #endif // !PRODUCT
631 return preserve_this_value;
632 JRT_END
633 #endif // !PRODUCT
636 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
637 os::yield_all(attempts);
638 JRT_END
641 // ---------------------------------------------------------------------------------------------------------
642 // Non-product code
643 #ifndef PRODUCT
645 void SharedRuntime::verify_caller_frame(frame caller_frame, methodHandle callee_method) {
646 ResourceMark rm;
647 assert (caller_frame.is_interpreted_frame(), "sanity check");
648 assert (callee_method->has_compiled_code(), "callee must be compiled");
649 methodHandle caller_method (Thread::current(), caller_frame.interpreter_frame_method());
650 jint bci = caller_frame.interpreter_frame_bci();
651 methodHandle method = find_callee_method_inside_interpreter(caller_frame, caller_method, bci);
652 assert (callee_method == method, "incorrect method");
653 }
655 methodHandle SharedRuntime::find_callee_method_inside_interpreter(frame caller_frame, methodHandle caller_method, int bci) {
656 EXCEPTION_MARK;
657 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller_method, bci);
658 methodHandle staticCallee = bytecode->static_target(CATCH); // Non-product code
660 bytecode = Bytecode_invoke_at(caller_method, bci);
661 int bytecode_index = bytecode->index();
662 Bytecodes::Code bc = bytecode->adjusted_invoke_code();
664 Handle receiver;
665 if (bc == Bytecodes::_invokeinterface ||
666 bc == Bytecodes::_invokevirtual ||
667 bc == Bytecodes::_invokespecial) {
668 symbolHandle signature (THREAD, staticCallee->signature());
669 receiver = Handle(THREAD, retrieve_receiver(signature, caller_frame));
670 } else {
671 receiver = Handle();
672 }
673 CallInfo result;
674 constantPoolHandle constants (THREAD, caller_method->constants());
675 LinkResolver::resolve_invoke(result, receiver, constants, bytecode_index, bc, CATCH); // Non-product code
676 methodHandle calleeMethod = result.selected_method();
677 return calleeMethod;
678 }
680 #endif // PRODUCT
683 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
684 assert(obj->is_oop(), "must be a valid oop");
685 assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
686 instanceKlass::register_finalizer(instanceOop(obj), CHECK);
687 JRT_END
690 jlong SharedRuntime::get_java_tid(Thread* thread) {
691 if (thread != NULL) {
692 if (thread->is_Java_thread()) {
693 oop obj = ((JavaThread*)thread)->threadObj();
694 return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
695 }
696 }
697 return 0;
698 }
700 /**
701 * This function ought to be a void function, but cannot be because
702 * it gets turned into a tail-call on sparc, which runs into dtrace bug
703 * 6254741. Once that is fixed we can remove the dummy return value.
704 */
705 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
706 return dtrace_object_alloc_base(Thread::current(), o);
707 }
709 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
710 assert(DTraceAllocProbes, "wrong call");
711 Klass* klass = o->blueprint();
712 int size = o->size();
713 symbolOop name = klass->name();
714 HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
715 name->bytes(), name->utf8_length(), size * HeapWordSize);
716 return 0;
717 }
719 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
720 JavaThread* thread, methodOopDesc* method))
721 assert(DTraceMethodProbes, "wrong call");
722 symbolOop kname = method->klass_name();
723 symbolOop name = method->name();
724 symbolOop sig = method->signature();
725 HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
726 kname->bytes(), kname->utf8_length(),
727 name->bytes(), name->utf8_length(),
728 sig->bytes(), sig->utf8_length());
729 return 0;
730 JRT_END
732 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
733 JavaThread* thread, methodOopDesc* method))
734 assert(DTraceMethodProbes, "wrong call");
735 symbolOop kname = method->klass_name();
736 symbolOop name = method->name();
737 symbolOop sig = method->signature();
738 HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
739 kname->bytes(), kname->utf8_length(),
740 name->bytes(), name->utf8_length(),
741 sig->bytes(), sig->utf8_length());
742 return 0;
743 JRT_END
746 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
747 // for a call current in progress, i.e., arguments has been pushed on stack
748 // put callee has not been invoked yet. Used by: resolve virtual/static,
749 // vtable updates, etc. Caller frame must be compiled.
750 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
751 ResourceMark rm(THREAD);
753 // last java frame on stack (which includes native call frames)
754 vframeStream vfst(thread, true); // Do not skip and javaCalls
756 return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
757 }
760 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
761 // for a call current in progress, i.e., arguments has been pushed on stack
762 // but callee has not been invoked yet. Caller frame must be compiled.
763 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
764 vframeStream& vfst,
765 Bytecodes::Code& bc,
766 CallInfo& callinfo, TRAPS) {
767 Handle receiver;
768 Handle nullHandle; //create a handy null handle for exception returns
770 assert(!vfst.at_end(), "Java frame must exist");
772 // Find caller and bci from vframe
773 methodHandle caller (THREAD, vfst.method());
774 int bci = vfst.bci();
776 // Find bytecode
777 Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
778 bc = bytecode->adjusted_invoke_code();
779 int bytecode_index = bytecode->index();
781 // Find receiver for non-static call
782 if (bc != Bytecodes::_invokestatic) {
783 // This register map must be update since we need to find the receiver for
784 // compiled frames. The receiver might be in a register.
785 RegisterMap reg_map2(thread);
786 frame stubFrame = thread->last_frame();
787 // Caller-frame is a compiled frame
788 frame callerFrame = stubFrame.sender(®_map2);
790 methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
791 if (callee.is_null()) {
792 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
793 }
794 // Retrieve from a compiled argument list
795 receiver = Handle(THREAD, callerFrame.retrieve_receiver(®_map2));
797 if (receiver.is_null()) {
798 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
799 }
800 }
802 // Resolve method. This is parameterized by bytecode.
803 constantPoolHandle constants (THREAD, caller->constants());
804 assert (receiver.is_null() || receiver->is_oop(), "wrong receiver");
805 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
807 #ifdef ASSERT
808 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
809 if (bc != Bytecodes::_invokestatic) {
810 assert(receiver.not_null(), "should have thrown exception");
811 KlassHandle receiver_klass (THREAD, receiver->klass());
812 klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
813 // klass is already loaded
814 KlassHandle static_receiver_klass (THREAD, rk);
815 assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass");
816 if (receiver_klass->oop_is_instance()) {
817 if (instanceKlass::cast(receiver_klass())->is_not_initialized()) {
818 tty->print_cr("ERROR: Klass not yet initialized!!");
819 receiver_klass.print();
820 }
821 assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
822 }
823 }
824 #endif
826 return receiver;
827 }
829 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
830 ResourceMark rm(THREAD);
831 // We need first to check if any Java activations (compiled, interpreted)
832 // exist on the stack since last JavaCall. If not, we need
833 // to get the target method from the JavaCall wrapper.
834 vframeStream vfst(thread, true); // Do not skip any javaCalls
835 methodHandle callee_method;
836 if (vfst.at_end()) {
837 // No Java frames were found on stack since we did the JavaCall.
838 // Hence the stack can only contain an entry_frame. We need to
839 // find the target method from the stub frame.
840 RegisterMap reg_map(thread, false);
841 frame fr = thread->last_frame();
842 assert(fr.is_runtime_frame(), "must be a runtimeStub");
843 fr = fr.sender(®_map);
844 assert(fr.is_entry_frame(), "must be");
845 // fr is now pointing to the entry frame.
846 callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
847 assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
848 } else {
849 Bytecodes::Code bc;
850 CallInfo callinfo;
851 find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
852 callee_method = callinfo.selected_method();
853 }
854 assert(callee_method()->is_method(), "must be");
855 return callee_method;
856 }
858 // Resolves a call.
859 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
860 bool is_virtual,
861 bool is_optimized, TRAPS) {
862 methodHandle callee_method;
863 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
864 if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
865 int retry_count = 0;
866 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
867 callee_method->method_holder() != SystemDictionary::object_klass()) {
868 // If has a pending exception then there is no need to re-try to
869 // resolve this method.
870 // If the method has been redefined, we need to try again.
871 // Hack: we have no way to update the vtables of arrays, so don't
872 // require that java.lang.Object has been updated.
874 // It is very unlikely that method is redefined more than 100 times
875 // in the middle of resolve. If it is looping here more than 100 times
876 // means then there could be a bug here.
877 guarantee((retry_count++ < 100),
878 "Could not resolve to latest version of redefined method");
879 // method is redefined in the middle of resolve so re-try.
880 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
881 }
882 }
883 return callee_method;
884 }
886 // Resolves a call. The compilers generate code for calls that go here
887 // and are patched with the real destination of the call.
888 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
889 bool is_virtual,
890 bool is_optimized, TRAPS) {
892 ResourceMark rm(thread);
893 RegisterMap cbl_map(thread, false);
894 frame caller_frame = thread->last_frame().sender(&cbl_map);
896 CodeBlob* cb = caller_frame.cb();
897 guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod");
898 // make sure caller is not getting deoptimized
899 // and removed before we are done with it.
900 // CLEANUP - with lazy deopt shouldn't need this lock
901 nmethodLocker caller_lock((nmethod*)cb);
904 // determine call info & receiver
905 // note: a) receiver is NULL for static calls
906 // b) an exception is thrown if receiver is NULL for non-static calls
907 CallInfo call_info;
908 Bytecodes::Code invoke_code = Bytecodes::_illegal;
909 Handle receiver = find_callee_info(thread, invoke_code,
910 call_info, CHECK_(methodHandle()));
911 methodHandle callee_method = call_info.selected_method();
913 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
914 ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
916 #ifndef PRODUCT
917 // tracing/debugging/statistics
918 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
919 (is_virtual) ? (&_resolve_virtual_ctr) :
920 (&_resolve_static_ctr);
921 Atomic::inc(addr);
923 if (TraceCallFixup) {
924 ResourceMark rm(thread);
925 tty->print("resolving %s%s (%s) call to",
926 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
927 Bytecodes::name(invoke_code));
928 callee_method->print_short_name(tty);
929 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
930 }
931 #endif
933 // Compute entry points. This might require generation of C2I converter
934 // frames, so we cannot be holding any locks here. Furthermore, the
935 // computation of the entry points is independent of patching the call. We
936 // always return the entry-point, but we only patch the stub if the call has
937 // not been deoptimized. Return values: For a virtual call this is an
938 // (cached_oop, destination address) pair. For a static call/optimized
939 // virtual this is just a destination address.
941 StaticCallInfo static_call_info;
942 CompiledICInfo virtual_call_info;
945 // Make sure the callee nmethod does not get deoptimized and removed before
946 // we are done patching the code.
947 nmethod* nm = callee_method->code();
948 nmethodLocker nl_callee(nm);
949 #ifdef ASSERT
950 address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below
951 #endif
953 if (is_virtual) {
954 assert(receiver.not_null(), "sanity check");
955 bool static_bound = call_info.resolved_method()->can_be_statically_bound();
956 KlassHandle h_klass(THREAD, receiver->klass());
957 CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
958 is_optimized, static_bound, virtual_call_info,
959 CHECK_(methodHandle()));
960 } else {
961 // static call
962 CompiledStaticCall::compute_entry(callee_method, static_call_info);
963 }
965 // grab lock, check for deoptimization and potentially patch caller
966 {
967 MutexLocker ml_patch(CompiledIC_lock);
969 // Now that we are ready to patch if the methodOop was redefined then
970 // don't update call site and let the caller retry.
972 if (!callee_method->is_old()) {
973 #ifdef ASSERT
974 // We must not try to patch to jump to an already unloaded method.
975 if (dest_entry_point != 0) {
976 assert(CodeCache::find_blob(dest_entry_point) != NULL,
977 "should not unload nmethod while locked");
978 }
979 #endif
980 if (is_virtual) {
981 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
982 if (inline_cache->is_clean()) {
983 inline_cache->set_to_monomorphic(virtual_call_info);
984 }
985 } else {
986 CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
987 if (ssc->is_clean()) ssc->set(static_call_info);
988 }
989 }
991 } // unlock CompiledIC_lock
993 return callee_method;
994 }
997 // Inline caches exist only in compiled code
998 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
999 #ifdef ASSERT
1000 RegisterMap reg_map(thread, false);
1001 frame stub_frame = thread->last_frame();
1002 assert(stub_frame.is_runtime_frame(), "sanity check");
1003 frame caller_frame = stub_frame.sender(®_map);
1004 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1005 #endif /* ASSERT */
1007 methodHandle callee_method;
1008 JRT_BLOCK
1009 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1010 // Return methodOop through TLS
1011 thread->set_vm_result(callee_method());
1012 JRT_BLOCK_END
1013 // return compiled code entry point after potential safepoints
1014 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1015 return callee_method->verified_code_entry();
1016 JRT_END
1019 // Handle call site that has been made non-entrant
1020 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
1021 // 6243940 We might end up in here if the callee is deoptimized
1022 // as we race to call it. We don't want to take a safepoint if
1023 // the caller was interpreted because the caller frame will look
1024 // interpreted to the stack walkers and arguments are now
1025 // "compiled" so it is much better to make this transition
1026 // invisible to the stack walking code. The i2c path will
1027 // place the callee method in the callee_target. It is stashed
1028 // there because if we try and find the callee by normal means a
1029 // safepoint is possible and have trouble gc'ing the compiled args.
1030 RegisterMap reg_map(thread, false);
1031 frame stub_frame = thread->last_frame();
1032 assert(stub_frame.is_runtime_frame(), "sanity check");
1033 frame caller_frame = stub_frame.sender(®_map);
1034 if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) {
1035 methodOop callee = thread->callee_target();
1036 guarantee(callee != NULL && callee->is_method(), "bad handshake");
1037 thread->set_vm_result(callee);
1038 thread->set_callee_target(NULL);
1039 return callee->get_c2i_entry();
1040 }
1042 // Must be compiled to compiled path which is safe to stackwalk
1043 methodHandle callee_method;
1044 JRT_BLOCK
1045 // Force resolving of caller (if we called from compiled frame)
1046 callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
1047 thread->set_vm_result(callee_method());
1048 JRT_BLOCK_END
1049 // return compiled code entry point after potential safepoints
1050 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1051 return callee_method->verified_code_entry();
1052 JRT_END
1055 // resolve a static call and patch code
1056 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
1057 methodHandle callee_method;
1058 JRT_BLOCK
1059 callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
1060 thread->set_vm_result(callee_method());
1061 JRT_BLOCK_END
1062 // return compiled code entry point after potential safepoints
1063 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1064 return callee_method->verified_code_entry();
1065 JRT_END
1068 // resolve virtual call and update inline cache to monomorphic
1069 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
1070 methodHandle callee_method;
1071 JRT_BLOCK
1072 callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
1073 thread->set_vm_result(callee_method());
1074 JRT_BLOCK_END
1075 // return compiled code entry point after potential safepoints
1076 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1077 return callee_method->verified_code_entry();
1078 JRT_END
1081 // Resolve a virtual call that can be statically bound (e.g., always
1082 // monomorphic, so it has no inline cache). Patch code to resolved target.
1083 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1084 methodHandle callee_method;
1085 JRT_BLOCK
1086 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1087 thread->set_vm_result(callee_method());
1088 JRT_BLOCK_END
1089 // return compiled code entry point after potential safepoints
1090 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1091 return callee_method->verified_code_entry();
1092 JRT_END
1098 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1099 ResourceMark rm(thread);
1100 CallInfo call_info;
1101 Bytecodes::Code bc;
1103 // receiver is NULL for static calls. An exception is thrown for NULL
1104 // receivers for non-static calls
1105 Handle receiver = find_callee_info(thread, bc, call_info,
1106 CHECK_(methodHandle()));
1107 // Compiler1 can produce virtual call sites that can actually be statically bound
1108 // If we fell thru to below we would think that the site was going megamorphic
1109 // when in fact the site can never miss. Worse because we'd think it was megamorphic
1110 // we'd try and do a vtable dispatch however methods that can be statically bound
1111 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1112 // reresolution of the call site (as if we did a handle_wrong_method and not an
1113 // plain ic_miss) and the site will be converted to an optimized virtual call site
1114 // never to miss again. I don't believe C2 will produce code like this but if it
1115 // did this would still be the correct thing to do for it too, hence no ifdef.
1116 //
1117 if (call_info.resolved_method()->can_be_statically_bound()) {
1118 methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
1119 if (TraceCallFixup) {
1120 RegisterMap reg_map(thread, false);
1121 frame caller_frame = thread->last_frame().sender(®_map);
1122 ResourceMark rm(thread);
1123 tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1124 callee_method->print_short_name(tty);
1125 tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc());
1126 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1127 }
1128 return callee_method;
1129 }
1131 methodHandle callee_method = call_info.selected_method();
1133 bool should_be_mono = false;
1135 #ifndef PRODUCT
1136 Atomic::inc(&_ic_miss_ctr);
1138 // Statistics & Tracing
1139 if (TraceCallFixup) {
1140 ResourceMark rm(thread);
1141 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1142 callee_method->print_short_name(tty);
1143 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1144 }
1146 if (ICMissHistogram) {
1147 MutexLocker m(VMStatistic_lock);
1148 RegisterMap reg_map(thread, false);
1149 frame f = thread->last_frame().real_sender(®_map);// skip runtime stub
1150 // produce statistics under the lock
1151 trace_ic_miss(f.pc());
1152 }
1153 #endif
1155 // install an event collector so that when a vtable stub is created the
1156 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1157 // event can't be posted when the stub is created as locks are held
1158 // - instead the event will be deferred until the event collector goes
1159 // out of scope.
1160 JvmtiDynamicCodeEventCollector event_collector;
1162 // Update inline cache to megamorphic. Skip update if caller has been
1163 // made non-entrant or we are called from interpreted.
1164 { MutexLocker ml_patch (CompiledIC_lock);
1165 RegisterMap reg_map(thread, false);
1166 frame caller_frame = thread->last_frame().sender(®_map);
1167 CodeBlob* cb = caller_frame.cb();
1168 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
1169 // Not a non-entrant nmethod, so find inline_cache
1170 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
1171 bool should_be_mono = false;
1172 if (inline_cache->is_optimized()) {
1173 if (TraceCallFixup) {
1174 ResourceMark rm(thread);
1175 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1176 callee_method->print_short_name(tty);
1177 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1178 }
1179 should_be_mono = true;
1180 } else {
1181 compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop();
1182 if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) {
1184 if (receiver()->klass() == ic_oop->holder_klass()) {
1185 // This isn't a real miss. We must have seen that compiled code
1186 // is now available and we want the call site converted to a
1187 // monomorphic compiled call site.
1188 // We can't assert for callee_method->code() != NULL because it
1189 // could have been deoptimized in the meantime
1190 if (TraceCallFixup) {
1191 ResourceMark rm(thread);
1192 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1193 callee_method->print_short_name(tty);
1194 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1195 }
1196 should_be_mono = true;
1197 }
1198 }
1199 }
1201 if (should_be_mono) {
1203 // We have a path that was monomorphic but was going interpreted
1204 // and now we have (or had) a compiled entry. We correct the IC
1205 // by using a new icBuffer.
1206 CompiledICInfo info;
1207 KlassHandle receiver_klass(THREAD, receiver()->klass());
1208 inline_cache->compute_monomorphic_entry(callee_method,
1209 receiver_klass,
1210 inline_cache->is_optimized(),
1211 false,
1212 info, CHECK_(methodHandle()));
1213 inline_cache->set_to_monomorphic(info);
1214 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1215 // Change to megamorphic
1216 inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
1217 } else {
1218 // Either clean or megamorphic
1219 }
1220 }
1221 } // Release CompiledIC_lock
1223 return callee_method;
1224 }
1226 //
1227 // Resets a call-site in compiled code so it will get resolved again.
1228 // This routines handles both virtual call sites, optimized virtual call
1229 // sites, and static call sites. Typically used to change a call sites
1230 // destination from compiled to interpreted.
1231 //
1232 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
1233 ResourceMark rm(thread);
1234 RegisterMap reg_map(thread, false);
1235 frame stub_frame = thread->last_frame();
1236 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1237 frame caller = stub_frame.sender(®_map);
1239 // Do nothing if the frame isn't a live compiled frame.
1240 // nmethod could be deoptimized by the time we get here
1241 // so no update to the caller is needed.
1243 if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1245 address pc = caller.pc();
1246 Events::log("update call-site at pc " INTPTR_FORMAT, pc);
1248 // Default call_addr is the location of the "basic" call.
1249 // Determine the address of the call we a reresolving. With
1250 // Inline Caches we will always find a recognizable call.
1251 // With Inline Caches disabled we may or may not find a
1252 // recognizable call. We will always find a call for static
1253 // calls and for optimized virtual calls. For vanilla virtual
1254 // calls it depends on the state of the UseInlineCaches switch.
1255 //
1256 // With Inline Caches disabled we can get here for a virtual call
1257 // for two reasons:
1258 // 1 - calling an abstract method. The vtable for abstract methods
1259 // will run us thru handle_wrong_method and we will eventually
1260 // end up in the interpreter to throw the ame.
1261 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1262 // call and between the time we fetch the entry address and
1263 // we jump to it the target gets deoptimized. Similar to 1
1264 // we will wind up in the interprter (thru a c2i with c2).
1265 //
1266 address call_addr = NULL;
1267 {
1268 // Get call instruction under lock because another thread may be
1269 // busy patching it.
1270 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1271 // Location of call instruction
1272 if (NativeCall::is_call_before(pc)) {
1273 NativeCall *ncall = nativeCall_before(pc);
1274 call_addr = ncall->instruction_address();
1275 }
1276 }
1278 // Check for static or virtual call
1279 bool is_static_call = false;
1280 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1281 // Make sure nmethod doesn't get deoptimized and removed until
1282 // this is done with it.
1283 // CLEANUP - with lazy deopt shouldn't need this lock
1284 nmethodLocker nmlock(caller_nm);
1286 if (call_addr != NULL) {
1287 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1288 int ret = iter.next(); // Get item
1289 if (ret) {
1290 assert(iter.addr() == call_addr, "must find call");
1291 if (iter.type() == relocInfo::static_call_type) {
1292 is_static_call = true;
1293 } else {
1294 assert(iter.type() == relocInfo::virtual_call_type ||
1295 iter.type() == relocInfo::opt_virtual_call_type
1296 , "unexpected relocInfo. type");
1297 }
1298 } else {
1299 assert(!UseInlineCaches, "relocation info. must exist for this address");
1300 }
1302 // Cleaning the inline cache will force a new resolve. This is more robust
1303 // than directly setting it to the new destination, since resolving of calls
1304 // is always done through the same code path. (experience shows that it
1305 // leads to very hard to track down bugs, if an inline cache gets updated
1306 // to a wrong method). It should not be performance critical, since the
1307 // resolve is only done once.
1309 MutexLocker ml(CompiledIC_lock);
1310 //
1311 // We do not patch the call site if the nmethod has been made non-entrant
1312 // as it is a waste of time
1313 //
1314 if (caller_nm->is_in_use()) {
1315 if (is_static_call) {
1316 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
1317 ssc->set_to_clean();
1318 } else {
1319 // compiled, dispatched call (which used to call an interpreted method)
1320 CompiledIC* inline_cache = CompiledIC_at(call_addr);
1321 inline_cache->set_to_clean();
1322 }
1323 }
1324 }
1326 }
1328 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1331 #ifndef PRODUCT
1332 Atomic::inc(&_wrong_method_ctr);
1334 if (TraceCallFixup) {
1335 ResourceMark rm(thread);
1336 tty->print("handle_wrong_method reresolving call to");
1337 callee_method->print_short_name(tty);
1338 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1339 }
1340 #endif
1342 return callee_method;
1343 }
1345 // ---------------------------------------------------------------------------
1346 // We are calling the interpreter via a c2i. Normally this would mean that
1347 // we were called by a compiled method. However we could have lost a race
1348 // where we went int -> i2c -> c2i and so the caller could in fact be
1349 // interpreted. If the caller is compiled we attampt to patch the caller
1350 // so he no longer calls into the interpreter.
1351 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
1352 methodOop moop(method);
1354 address entry_point = moop->from_compiled_entry();
1356 // It's possible that deoptimization can occur at a call site which hasn't
1357 // been resolved yet, in which case this function will be called from
1358 // an nmethod that has been patched for deopt and we can ignore the
1359 // request for a fixup.
1360 // Also it is possible that we lost a race in that from_compiled_entry
1361 // is now back to the i2c in that case we don't need to patch and if
1362 // we did we'd leap into space because the callsite needs to use
1363 // "to interpreter" stub in order to load up the methodOop. Don't
1364 // ask me how I know this...
1365 //
1367 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1368 if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
1369 return;
1370 }
1372 // There is a benign race here. We could be attempting to patch to a compiled
1373 // entry point at the same time the callee is being deoptimized. If that is
1374 // the case then entry_point may in fact point to a c2i and we'd patch the
1375 // call site with the same old data. clear_code will set code() to NULL
1376 // at the end of it. If we happen to see that NULL then we can skip trying
1377 // to patch. If we hit the window where the callee has a c2i in the
1378 // from_compiled_entry and the NULL isn't present yet then we lose the race
1379 // and patch the code with the same old data. Asi es la vida.
1381 if (moop->code() == NULL) return;
1383 if (((nmethod*)cb)->is_in_use()) {
1385 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1386 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1387 if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
1388 NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
1389 //
1390 // bug 6281185. We might get here after resolving a call site to a vanilla
1391 // virtual call. Because the resolvee uses the verified entry it may then
1392 // see compiled code and attempt to patch the site by calling us. This would
1393 // then incorrectly convert the call site to optimized and its downhill from
1394 // there. If you're lucky you'll get the assert in the bugid, if not you've
1395 // just made a call site that could be megamorphic into a monomorphic site
1396 // for the rest of its life! Just another racing bug in the life of
1397 // fixup_callers_callsite ...
1398 //
1399 RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
1400 iter.next();
1401 assert(iter.has_current(), "must have a reloc at java call site");
1402 relocInfo::relocType typ = iter.reloc()->type();
1403 if ( typ != relocInfo::static_call_type &&
1404 typ != relocInfo::opt_virtual_call_type &&
1405 typ != relocInfo::static_stub_type) {
1406 return;
1407 }
1408 address destination = call->destination();
1409 if (destination != entry_point) {
1410 CodeBlob* callee = CodeCache::find_blob(destination);
1411 // callee == cb seems weird. It means calling interpreter thru stub.
1412 if (callee == cb || callee->is_adapter_blob()) {
1413 // static call or optimized virtual
1414 if (TraceCallFixup) {
1415 tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1416 moop->print_short_name(tty);
1417 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1418 }
1419 call->set_destination_mt_safe(entry_point);
1420 } else {
1421 if (TraceCallFixup) {
1422 tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1423 moop->print_short_name(tty);
1424 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1425 }
1426 // assert is too strong could also be resolve destinations.
1427 // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1428 }
1429 } else {
1430 if (TraceCallFixup) {
1431 tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1432 moop->print_short_name(tty);
1433 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1434 }
1435 }
1436 }
1437 }
1439 IRT_END
1442 // same as JVM_Arraycopy, but called directly from compiled code
1443 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
1444 oopDesc* dest, jint dest_pos,
1445 jint length,
1446 JavaThread* thread)) {
1447 #ifndef PRODUCT
1448 _slow_array_copy_ctr++;
1449 #endif
1450 // Check if we have null pointers
1451 if (src == NULL || dest == NULL) {
1452 THROW(vmSymbols::java_lang_NullPointerException());
1453 }
1454 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
1455 // even though the copy_array API also performs dynamic checks to ensure
1456 // that src and dest are truly arrays (and are conformable).
1457 // The copy_array mechanism is awkward and could be removed, but
1458 // the compilers don't call this function except as a last resort,
1459 // so it probably doesn't matter.
1460 Klass::cast(src->klass())->copy_array((arrayOopDesc*)src, src_pos,
1461 (arrayOopDesc*)dest, dest_pos,
1462 length, thread);
1463 }
1464 JRT_END
1466 char* SharedRuntime::generate_class_cast_message(
1467 JavaThread* thread, const char* objName) {
1469 // Get target class name from the checkcast instruction
1470 vframeStream vfst(thread, true);
1471 assert(!vfst.at_end(), "Java frame must exist");
1472 Bytecode_checkcast* cc = Bytecode_checkcast_at(
1473 vfst.method()->bcp_from(vfst.bci()));
1474 Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
1475 cc->index(), thread));
1476 return generate_class_cast_message(objName, targetKlass->external_name());
1477 }
1479 char* SharedRuntime::generate_class_cast_message(
1480 const char* objName, const char* targetKlassName) {
1481 const char* desc = " cannot be cast to ";
1482 size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1;
1484 char* message = NEW_RESOURCE_ARRAY(char, msglen);
1485 if (NULL == message) {
1486 // Shouldn't happen, but don't cause even more problems if it does
1487 message = const_cast<char*>(objName);
1488 } else {
1489 jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
1490 }
1491 return message;
1492 }
1494 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1495 (void) JavaThread::current()->reguard_stack();
1496 JRT_END
1499 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
1500 #ifndef PRODUCT
1501 int SharedRuntime::_monitor_enter_ctr=0;
1502 #endif
1503 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
1504 oop obj(_obj);
1505 #ifndef PRODUCT
1506 _monitor_enter_ctr++; // monitor enter slow
1507 #endif
1508 if (PrintBiasedLockingStatistics) {
1509 Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
1510 }
1511 Handle h_obj(THREAD, obj);
1512 if (UseBiasedLocking) {
1513 // Retry fast entry if bias is revoked to avoid unnecessary inflation
1514 ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
1515 } else {
1516 ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
1517 }
1518 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
1519 JRT_END
1521 #ifndef PRODUCT
1522 int SharedRuntime::_monitor_exit_ctr=0;
1523 #endif
1524 // Handles the uncommon cases of monitor unlocking in compiled code
1525 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
1526 oop obj(_obj);
1527 #ifndef PRODUCT
1528 _monitor_exit_ctr++; // monitor exit slow
1529 #endif
1530 Thread* THREAD = JavaThread::current();
1531 // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
1532 // testing was unable to ever fire the assert that guarded it so I have removed it.
1533 assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
1534 #undef MIGHT_HAVE_PENDING
1535 #ifdef MIGHT_HAVE_PENDING
1536 // Save and restore any pending_exception around the exception mark.
1537 // While the slow_exit must not throw an exception, we could come into
1538 // this routine with one set.
1539 oop pending_excep = NULL;
1540 const char* pending_file;
1541 int pending_line;
1542 if (HAS_PENDING_EXCEPTION) {
1543 pending_excep = PENDING_EXCEPTION;
1544 pending_file = THREAD->exception_file();
1545 pending_line = THREAD->exception_line();
1546 CLEAR_PENDING_EXCEPTION;
1547 }
1548 #endif /* MIGHT_HAVE_PENDING */
1550 {
1551 // Exit must be non-blocking, and therefore no exceptions can be thrown.
1552 EXCEPTION_MARK;
1553 ObjectSynchronizer::slow_exit(obj, lock, THREAD);
1554 }
1556 #ifdef MIGHT_HAVE_PENDING
1557 if (pending_excep != NULL) {
1558 THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
1559 }
1560 #endif /* MIGHT_HAVE_PENDING */
1561 JRT_END
1563 #ifndef PRODUCT
1565 void SharedRuntime::print_statistics() {
1566 ttyLocker ttyl;
1567 if (xtty != NULL) xtty->head("statistics type='SharedRuntime'");
1569 if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr);
1570 if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr);
1571 if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
1573 SharedRuntime::print_ic_miss_histogram();
1575 if (CountRemovableExceptions) {
1576 if (_nof_removable_exceptions > 0) {
1577 Unimplemented(); // this counter is not yet incremented
1578 tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
1579 }
1580 }
1582 // Dump the JRT_ENTRY counters
1583 if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
1584 if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
1585 if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
1586 if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
1587 if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
1588 if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
1589 if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
1591 tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
1592 tty->print_cr("%5d wrong method", _wrong_method_ctr );
1593 tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
1594 tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
1595 tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
1597 if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
1598 if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
1599 if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
1600 if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
1601 if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
1602 if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
1603 if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
1604 if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
1605 if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
1606 if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
1607 if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
1608 if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
1609 if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
1610 if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
1611 if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
1612 if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
1614 if (xtty != NULL) xtty->tail("statistics");
1615 }
1617 inline double percent(int x, int y) {
1618 return 100.0 * x / MAX2(y, 1);
1619 }
1621 class MethodArityHistogram {
1622 public:
1623 enum { MAX_ARITY = 256 };
1624 private:
1625 static int _arity_histogram[MAX_ARITY]; // histogram of #args
1626 static int _size_histogram[MAX_ARITY]; // histogram of arg size in words
1627 static int _max_arity; // max. arity seen
1628 static int _max_size; // max. arg size seen
1630 static void add_method_to_histogram(nmethod* nm) {
1631 methodOop m = nm->method();
1632 ArgumentCount args(m->signature());
1633 int arity = args.size() + (m->is_static() ? 0 : 1);
1634 int argsize = m->size_of_parameters();
1635 arity = MIN2(arity, MAX_ARITY-1);
1636 argsize = MIN2(argsize, MAX_ARITY-1);
1637 int count = nm->method()->compiled_invocation_count();
1638 _arity_histogram[arity] += count;
1639 _size_histogram[argsize] += count;
1640 _max_arity = MAX2(_max_arity, arity);
1641 _max_size = MAX2(_max_size, argsize);
1642 }
1644 void print_histogram_helper(int n, int* histo, const char* name) {
1645 const int N = MIN2(5, n);
1646 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1647 double sum = 0;
1648 double weighted_sum = 0;
1649 int i;
1650 for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
1651 double rest = sum;
1652 double percent = sum / 100;
1653 for (i = 0; i <= N; i++) {
1654 rest -= histo[i];
1655 tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
1656 }
1657 tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
1658 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
1659 }
1661 void print_histogram() {
1662 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1663 print_histogram_helper(_max_arity, _arity_histogram, "arity");
1664 tty->print_cr("\nSame for parameter size (in words):");
1665 print_histogram_helper(_max_size, _size_histogram, "size");
1666 tty->cr();
1667 }
1669 public:
1670 MethodArityHistogram() {
1671 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1672 _max_arity = _max_size = 0;
1673 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
1674 CodeCache::nmethods_do(add_method_to_histogram);
1675 print_histogram();
1676 }
1677 };
1679 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
1680 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
1681 int MethodArityHistogram::_max_arity;
1682 int MethodArityHistogram::_max_size;
1684 void SharedRuntime::print_call_statistics(int comp_total) {
1685 tty->print_cr("Calls from compiled code:");
1686 int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
1687 int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
1688 int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
1689 tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total));
1690 tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
1691 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
1692 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
1693 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
1694 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
1695 tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
1696 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
1697 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
1698 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
1699 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
1700 tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
1701 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
1702 tty->cr();
1703 tty->print_cr("Note 1: counter updates are not MT-safe.");
1704 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
1705 tty->print_cr(" %% in nested categories are relative to their category");
1706 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
1707 tty->cr();
1709 MethodArityHistogram h;
1710 }
1711 #endif
1714 // ---------------------------------------------------------------------------
1715 // Implementation of AdapterHandlerLibrary
1716 const char* AdapterHandlerEntry::name = "I2C/C2I adapters";
1717 GrowableArray<uint64_t>* AdapterHandlerLibrary::_fingerprints = NULL;
1718 GrowableArray<AdapterHandlerEntry* >* AdapterHandlerLibrary::_handlers = NULL;
1719 const int AdapterHandlerLibrary_size = 16*K;
1720 u_char AdapterHandlerLibrary::_buffer[AdapterHandlerLibrary_size + 32];
1722 void AdapterHandlerLibrary::initialize() {
1723 if (_fingerprints != NULL) return;
1724 _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true);
1725 _handlers = new(ResourceObj::C_HEAP)GrowableArray<AdapterHandlerEntry*>(32, true);
1726 // Index 0 reserved for the slow path handler
1727 _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
1728 _handlers->append(NULL);
1730 // Create a special handler for abstract methods. Abstract methods
1731 // are never compiled so an i2c entry is somewhat meaningless, but
1732 // fill it in with something appropriate just in case. Pass handle
1733 // wrong method for the c2i transitions.
1734 address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
1735 _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
1736 assert(_handlers->length() == AbstractMethodHandler, "in wrong slot");
1737 _handlers->append(new AdapterHandlerEntry(StubRoutines::throw_AbstractMethodError_entry(),
1738 wrong_method, wrong_method));
1739 }
1741 int AdapterHandlerLibrary::get_create_adapter_index(methodHandle method) {
1742 // Use customized signature handler. Need to lock around updates to the
1743 // _fingerprints array (it is not safe for concurrent readers and a single
1744 // writer: this can be fixed if it becomes a problem).
1746 // Shouldn't be here if running -Xint
1747 if (Arguments::mode() == Arguments::_int) {
1748 ShouldNotReachHere();
1749 }
1751 // Get the address of the ic_miss handlers before we grab the
1752 // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
1753 // was caused by the initialization of the stubs happening
1754 // while we held the lock and then notifying jvmti while
1755 // holding it. This just forces the initialization to be a little
1756 // earlier.
1757 address ic_miss = SharedRuntime::get_ic_miss_stub();
1758 assert(ic_miss != NULL, "must have handler");
1760 int result;
1761 BufferBlob *B = NULL;
1762 uint64_t fingerprint;
1763 {
1764 MutexLocker mu(AdapterHandlerLibrary_lock);
1765 // make sure data structure is initialized
1766 initialize();
1768 if (method->is_abstract()) {
1769 return AbstractMethodHandler;
1770 }
1772 // Lookup method signature's fingerprint
1773 fingerprint = Fingerprinter(method).fingerprint();
1774 assert( fingerprint != CONST64( 0), "no zero fingerprints allowed" );
1775 // Fingerprints are small fixed-size condensed representations of
1776 // signatures. If the signature is too large, it won't fit in a
1777 // fingerprint. Signatures which cannot support a fingerprint get a new i2c
1778 // adapter gen'd each time, instead of searching the cache for one. This -1
1779 // game can be avoided if I compared signatures instead of using
1780 // fingerprints. However, -1 fingerprints are very rare.
1781 if( fingerprint != UCONST64(-1) ) { // If this is a cache-able fingerprint
1782 // Turns out i2c adapters do not care what the return value is. Mask it
1783 // out so signatures that only differ in return type will share the same
1784 // adapter.
1785 fingerprint &= ~(SignatureIterator::result_feature_mask << SignatureIterator::static_feature_size);
1786 // Search for a prior existing i2c/c2i adapter
1787 int index = _fingerprints->find(fingerprint);
1788 if( index >= 0 ) return index; // Found existing handlers?
1789 } else {
1790 // Annoyingly, I end up adding -1 fingerprints to the array of handlers,
1791 // because I need a unique handler index. It cannot be scanned for
1792 // because all -1's look alike. Instead, the matching index is passed out
1793 // and immediately used to collect the 2 return values (the c2i and i2c
1794 // adapters).
1795 }
1797 // Create I2C & C2I handlers
1798 ResourceMark rm;
1799 // Improve alignment slightly
1800 u_char *buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
1801 CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
1802 short buffer_locs[20];
1803 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
1804 sizeof(buffer_locs)/sizeof(relocInfo));
1805 MacroAssembler _masm(&buffer);
1807 // Fill in the signature array, for the calling-convention call.
1808 int total_args_passed = method->size_of_parameters(); // All args on stack
1810 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
1811 VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed);
1812 int i=0;
1813 if( !method->is_static() ) // Pass in receiver first
1814 sig_bt[i++] = T_OBJECT;
1815 for( SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
1816 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
1817 if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
1818 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
1819 }
1820 assert( i==total_args_passed, "" );
1822 // Now get the re-packed compiled-Java layout.
1823 int comp_args_on_stack;
1825 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
1826 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
1828 AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
1829 total_args_passed,
1830 comp_args_on_stack,
1831 sig_bt,
1832 regs);
1834 B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
1835 if (B == NULL) return -2; // Out of CodeCache space
1836 entry->relocate(B->instructions_begin());
1837 #ifndef PRODUCT
1838 // debugging suppport
1839 if (PrintAdapterHandlers) {
1840 tty->cr();
1841 tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = 0x%llx, %d bytes generated)",
1842 _handlers->length(), (method->is_static() ? "static" : "receiver"),
1843 method->signature()->as_C_string(), fingerprint, buffer.code_size() );
1844 tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
1845 Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + buffer.code_size());
1846 }
1847 #endif
1849 // add handlers to library
1850 _fingerprints->append(fingerprint);
1851 _handlers->append(entry);
1852 // set handler index
1853 assert(_fingerprints->length() == _handlers->length(), "sanity check");
1854 result = _fingerprints->length() - 1;
1855 }
1856 // Outside of the lock
1857 if (B != NULL) {
1858 char blob_id[256];
1859 jio_snprintf(blob_id,
1860 sizeof(blob_id),
1861 "%s(" PTR64_FORMAT ")@" PTR_FORMAT,
1862 AdapterHandlerEntry::name,
1863 fingerprint,
1864 B->instructions_begin());
1865 VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
1866 Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
1868 if (JvmtiExport::should_post_dynamic_code_generated()) {
1869 JvmtiExport::post_dynamic_code_generated(blob_id,
1870 B->instructions_begin(),
1871 B->instructions_end());
1872 }
1873 }
1874 return result;
1875 }
1877 void AdapterHandlerEntry::relocate(address new_base) {
1878 ptrdiff_t delta = new_base - _i2c_entry;
1879 _i2c_entry += delta;
1880 _c2i_entry += delta;
1881 _c2i_unverified_entry += delta;
1882 }
1884 // Create a native wrapper for this native method. The wrapper converts the
1885 // java compiled calling convention to the native convention, handlizes
1886 // arguments, and transitions to native. On return from the native we transition
1887 // back to java blocking if a safepoint is in progress.
1888 nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
1889 ResourceMark rm;
1890 nmethod* nm = NULL;
1892 if (PrintCompilation) {
1893 ttyLocker ttyl;
1894 tty->print("--- n%s ", (method->is_synchronized() ? "s" : " "));
1895 method->print_short_name(tty);
1896 if (method->is_static()) {
1897 tty->print(" (static)");
1898 }
1899 tty->cr();
1900 }
1902 assert(method->has_native_function(), "must have something valid to call!");
1904 {
1905 // perform the work while holding the lock, but perform any printing outside the lock
1906 MutexLocker mu(AdapterHandlerLibrary_lock);
1907 // See if somebody beat us to it
1908 nm = method->code();
1909 if (nm) {
1910 return nm;
1911 }
1913 // Improve alignment slightly
1914 u_char* buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
1915 CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
1916 // Need a few relocation entries
1917 double locs_buf[20];
1918 buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
1919 MacroAssembler _masm(&buffer);
1921 // Fill in the signature array, for the calling-convention call.
1922 int total_args_passed = method->size_of_parameters();
1924 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
1925 VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed);
1926 int i=0;
1927 if( !method->is_static() ) // Pass in receiver first
1928 sig_bt[i++] = T_OBJECT;
1929 SignatureStream ss(method->signature());
1930 for( ; !ss.at_return_type(); ss.next()) {
1931 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
1932 if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
1933 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
1934 }
1935 assert( i==total_args_passed, "" );
1936 BasicType ret_type = ss.type();
1938 // Now get the compiled-Java layout as input arguments
1939 int comp_args_on_stack;
1940 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
1942 // Generate the compiled-to-native wrapper code
1943 nm = SharedRuntime::generate_native_wrapper(&_masm,
1944 method,
1945 total_args_passed,
1946 comp_args_on_stack,
1947 sig_bt,regs,
1948 ret_type);
1949 }
1951 // Must unlock before calling set_code
1952 // Install the generated code.
1953 if (nm != NULL) {
1954 method->set_code(method, nm);
1955 nm->post_compiled_method_load_event();
1956 } else {
1957 // CodeCache is full, disable compilation
1958 // Ought to log this but compile log is only per compile thread
1959 // and we're some non descript Java thread.
1960 UseInterpreter = true;
1961 if (UseCompiler || AlwaysCompileLoopMethods ) {
1962 #ifndef PRODUCT
1963 warning("CodeCache is full. Compiler has been disabled");
1964 if (CompileTheWorld || ExitOnFullCodeCache) {
1965 before_exit(JavaThread::current());
1966 exit_globals(); // will delete tty
1967 vm_direct_exit(CompileTheWorld ? 0 : 1);
1968 }
1969 #endif
1970 UseCompiler = false;
1971 AlwaysCompileLoopMethods = false;
1972 }
1973 }
1974 return nm;
1975 }
1977 // -------------------------------------------------------------------------
1978 // Java-Java calling convention
1979 // (what you use when Java calls Java)
1981 //------------------------------name_for_receiver----------------------------------
1982 // For a given signature, return the VMReg for parameter 0.
1983 VMReg SharedRuntime::name_for_receiver() {
1984 VMRegPair regs;
1985 BasicType sig_bt = T_OBJECT;
1986 (void) java_calling_convention(&sig_bt, ®s, 1, true);
1987 // Return argument 0 register. In the LP64 build pointers
1988 // take 2 registers, but the VM wants only the 'main' name.
1989 return regs.first();
1990 }
1992 VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) {
1993 // This method is returning a data structure allocating as a
1994 // ResourceObject, so do not put any ResourceMarks in here.
1995 char *s = sig->as_C_string();
1996 int len = (int)strlen(s);
1997 *s++; len--; // Skip opening paren
1998 char *t = s+len;
1999 while( *(--t) != ')' ) ; // Find close paren
2001 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
2002 VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
2003 int cnt = 0;
2004 if (!is_static) {
2005 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
2006 }
2008 while( s < t ) {
2009 switch( *s++ ) { // Switch on signature character
2010 case 'B': sig_bt[cnt++] = T_BYTE; break;
2011 case 'C': sig_bt[cnt++] = T_CHAR; break;
2012 case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break;
2013 case 'F': sig_bt[cnt++] = T_FLOAT; break;
2014 case 'I': sig_bt[cnt++] = T_INT; break;
2015 case 'J': sig_bt[cnt++] = T_LONG; sig_bt[cnt++] = T_VOID; break;
2016 case 'S': sig_bt[cnt++] = T_SHORT; break;
2017 case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
2018 case 'V': sig_bt[cnt++] = T_VOID; break;
2019 case 'L': // Oop
2020 while( *s++ != ';' ) ; // Skip signature
2021 sig_bt[cnt++] = T_OBJECT;
2022 break;
2023 case '[': { // Array
2024 do { // Skip optional size
2025 while( *s >= '0' && *s <= '9' ) s++;
2026 } while( *s++ == '[' ); // Nested arrays?
2027 // Skip element type
2028 if( s[-1] == 'L' )
2029 while( *s++ != ';' ) ; // Skip signature
2030 sig_bt[cnt++] = T_ARRAY;
2031 break;
2032 }
2033 default : ShouldNotReachHere();
2034 }
2035 }
2036 assert( cnt < 256, "grow table size" );
2038 int comp_args_on_stack;
2039 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
2041 // the calling convention doesn't count out_preserve_stack_slots so
2042 // we must add that in to get "true" stack offsets.
2044 if (comp_args_on_stack) {
2045 for (int i = 0; i < cnt; i++) {
2046 VMReg reg1 = regs[i].first();
2047 if( reg1->is_stack()) {
2048 // Yuck
2049 reg1 = reg1->bias(out_preserve_stack_slots());
2050 }
2051 VMReg reg2 = regs[i].second();
2052 if( reg2->is_stack()) {
2053 // Yuck
2054 reg2 = reg2->bias(out_preserve_stack_slots());
2055 }
2056 regs[i].set_pair(reg2, reg1);
2057 }
2058 }
2060 // results
2061 *arg_size = cnt;
2062 return regs;
2063 }
2065 // OSR Migration Code
2066 //
2067 // This code is used convert interpreter frames into compiled frames. It is
2068 // called from very start of a compiled OSR nmethod. A temp array is
2069 // allocated to hold the interesting bits of the interpreter frame. All
2070 // active locks are inflated to allow them to move. The displaced headers and
2071 // active interpeter locals are copied into the temp buffer. Then we return
2072 // back to the compiled code. The compiled code then pops the current
2073 // interpreter frame off the stack and pushes a new compiled frame. Then it
2074 // copies the interpreter locals and displaced headers where it wants.
2075 // Finally it calls back to free the temp buffer.
2076 //
2077 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
2079 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
2081 #ifdef IA64
2082 ShouldNotReachHere(); // NYI
2083 #endif /* IA64 */
2085 //
2086 // This code is dependent on the memory layout of the interpreter local
2087 // array and the monitors. On all of our platforms the layout is identical
2088 // so this code is shared. If some platform lays the their arrays out
2089 // differently then this code could move to platform specific code or
2090 // the code here could be modified to copy items one at a time using
2091 // frame accessor methods and be platform independent.
2093 frame fr = thread->last_frame();
2094 assert( fr.is_interpreted_frame(), "" );
2095 assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
2097 // Figure out how many monitors are active.
2098 int active_monitor_count = 0;
2099 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
2100 kptr < fr.interpreter_frame_monitor_begin();
2101 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
2102 if( kptr->obj() != NULL ) active_monitor_count++;
2103 }
2105 // QQQ we could place number of active monitors in the array so that compiled code
2106 // could double check it.
2108 methodOop moop = fr.interpreter_frame_method();
2109 int max_locals = moop->max_locals();
2110 // Allocate temp buffer, 1 word per local & 2 per active monitor
2111 int buf_size_words = max_locals + active_monitor_count*2;
2112 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words);
2114 // Copy the locals. Order is preserved so that loading of longs works.
2115 // Since there's no GC I can copy the oops blindly.
2116 assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
2117 if (TaggedStackInterpreter) {
2118 for (int i = 0; i < max_locals; i++) {
2119 // copy only each local separately to the buffer avoiding the tag
2120 buf[i] = *fr.interpreter_frame_local_at(max_locals-i-1);
2121 }
2122 } else {
2123 Copy::disjoint_words(
2124 (HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
2125 (HeapWord*)&buf[0],
2126 max_locals);
2127 }
2129 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
2130 int i = max_locals;
2131 for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
2132 kptr2 < fr.interpreter_frame_monitor_begin();
2133 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
2134 if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
2135 BasicLock *lock = kptr2->lock();
2136 // Inflate so the displaced header becomes position-independent
2137 if (lock->displaced_header()->is_unlocked())
2138 ObjectSynchronizer::inflate_helper(kptr2->obj());
2139 // Now the displaced header is free to move
2140 buf[i++] = (intptr_t)lock->displaced_header();
2141 buf[i++] = (intptr_t)kptr2->obj();
2142 }
2143 }
2144 assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
2146 return buf;
2147 JRT_END
2149 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
2150 FREE_C_HEAP_ARRAY(intptr_t,buf);
2151 JRT_END
2153 #ifndef PRODUCT
2154 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
2156 for (int i = 0 ; i < _handlers->length() ; i++) {
2157 AdapterHandlerEntry* a = get_entry(i);
2158 if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
2159 }
2160 return false;
2161 }
2163 void AdapterHandlerLibrary::print_handler(CodeBlob* b) {
2165 for (int i = 0 ; i < _handlers->length() ; i++) {
2166 AdapterHandlerEntry* a = get_entry(i);
2167 if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) {
2168 tty->print("Adapter for signature: ");
2169 // Fingerprinter::print(_fingerprints->at(i));
2170 tty->print("0x%" FORMAT64_MODIFIER "x", _fingerprints->at(i));
2171 tty->print_cr(" i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
2172 a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
2174 return;
2175 }
2176 }
2177 assert(false, "Should have found handler");
2178 }
2179 #endif /* PRODUCT */