Fri, 10 Jun 2011 15:08:36 -0700
6941923: RFE: Handling large log files produced by long running Java Applications
Summary: supply optinal flags to realize gc log rotation
Reviewed-by: ysr, jwilhelm
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/scopeDesc.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "compiler/abstractCompiler.hpp"
32 #include "compiler/compileBroker.hpp"
33 #include "compiler/compilerOracle.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "interpreter/interpreterRuntime.hpp"
36 #include "memory/gcLocker.inline.hpp"
37 #include "memory/universe.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "prims/forte.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "prims/jvmtiRedefineClassesTrace.hpp"
42 #include "prims/methodHandles.hpp"
43 #include "prims/nativeLookup.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/biasedLocking.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/init.hpp"
48 #include "runtime/interfaceSupport.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "runtime/stubRoutines.hpp"
52 #include "runtime/vframe.hpp"
53 #include "runtime/vframeArray.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/dtrace.hpp"
56 #include "utilities/events.hpp"
57 #include "utilities/hashtable.inline.hpp"
58 #include "utilities/xmlstream.hpp"
59 #ifdef TARGET_ARCH_x86
60 # include "nativeInst_x86.hpp"
61 # include "vmreg_x86.inline.hpp"
62 #endif
63 #ifdef TARGET_ARCH_sparc
64 # include "nativeInst_sparc.hpp"
65 # include "vmreg_sparc.inline.hpp"
66 #endif
67 #ifdef TARGET_ARCH_zero
68 # include "nativeInst_zero.hpp"
69 # include "vmreg_zero.inline.hpp"
70 #endif
71 #ifdef TARGET_ARCH_arm
72 # include "nativeInst_arm.hpp"
73 # include "vmreg_arm.inline.hpp"
74 #endif
75 #ifdef TARGET_ARCH_ppc
76 # include "nativeInst_ppc.hpp"
77 # include "vmreg_ppc.inline.hpp"
78 #endif
79 #ifdef COMPILER1
80 #include "c1/c1_Runtime1.hpp"
81 #endif
83 #include <math.h>
85 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
86 HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
87 char*, int, char*, int, char*, int);
88 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
89 char*, int, char*, int, char*, int);
91 RicochetBlob* SharedRuntime::_ricochet_blob = NULL;
93 // Implementation of SharedRuntime
95 #ifndef PRODUCT
96 // For statistics
97 int SharedRuntime::_ic_miss_ctr = 0;
98 int SharedRuntime::_wrong_method_ctr = 0;
99 int SharedRuntime::_resolve_static_ctr = 0;
100 int SharedRuntime::_resolve_virtual_ctr = 0;
101 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
102 int SharedRuntime::_implicit_null_throws = 0;
103 int SharedRuntime::_implicit_div0_throws = 0;
104 int SharedRuntime::_throw_null_ctr = 0;
106 int SharedRuntime::_nof_normal_calls = 0;
107 int SharedRuntime::_nof_optimized_calls = 0;
108 int SharedRuntime::_nof_inlined_calls = 0;
109 int SharedRuntime::_nof_megamorphic_calls = 0;
110 int SharedRuntime::_nof_static_calls = 0;
111 int SharedRuntime::_nof_inlined_static_calls = 0;
112 int SharedRuntime::_nof_interface_calls = 0;
113 int SharedRuntime::_nof_optimized_interface_calls = 0;
114 int SharedRuntime::_nof_inlined_interface_calls = 0;
115 int SharedRuntime::_nof_megamorphic_interface_calls = 0;
116 int SharedRuntime::_nof_removable_exceptions = 0;
118 int SharedRuntime::_new_instance_ctr=0;
119 int SharedRuntime::_new_array_ctr=0;
120 int SharedRuntime::_multi1_ctr=0;
121 int SharedRuntime::_multi2_ctr=0;
122 int SharedRuntime::_multi3_ctr=0;
123 int SharedRuntime::_multi4_ctr=0;
124 int SharedRuntime::_multi5_ctr=0;
125 int SharedRuntime::_mon_enter_stub_ctr=0;
126 int SharedRuntime::_mon_exit_stub_ctr=0;
127 int SharedRuntime::_mon_enter_ctr=0;
128 int SharedRuntime::_mon_exit_ctr=0;
129 int SharedRuntime::_partial_subtype_ctr=0;
130 int SharedRuntime::_jbyte_array_copy_ctr=0;
131 int SharedRuntime::_jshort_array_copy_ctr=0;
132 int SharedRuntime::_jint_array_copy_ctr=0;
133 int SharedRuntime::_jlong_array_copy_ctr=0;
134 int SharedRuntime::_oop_array_copy_ctr=0;
135 int SharedRuntime::_checkcast_array_copy_ctr=0;
136 int SharedRuntime::_unsafe_array_copy_ctr=0;
137 int SharedRuntime::_generic_array_copy_ctr=0;
138 int SharedRuntime::_slow_array_copy_ctr=0;
139 int SharedRuntime::_find_handler_ctr=0;
140 int SharedRuntime::_rethrow_ctr=0;
142 int SharedRuntime::_ICmiss_index = 0;
143 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
144 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
146 void SharedRuntime::trace_ic_miss(address at) {
147 for (int i = 0; i < _ICmiss_index; i++) {
148 if (_ICmiss_at[i] == at) {
149 _ICmiss_count[i]++;
150 return;
151 }
152 }
153 int index = _ICmiss_index++;
154 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
155 _ICmiss_at[index] = at;
156 _ICmiss_count[index] = 1;
157 }
159 void SharedRuntime::print_ic_miss_histogram() {
160 if (ICMissHistogram) {
161 tty->print_cr ("IC Miss Histogram:");
162 int tot_misses = 0;
163 for (int i = 0; i < _ICmiss_index; i++) {
164 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
165 tot_misses += _ICmiss_count[i];
166 }
167 tty->print_cr ("Total IC misses: %7d", tot_misses);
168 }
169 }
170 #endif // PRODUCT
172 #ifndef SERIALGC
174 // G1 write-barrier pre: executed before a pointer store.
175 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
176 if (orig == NULL) {
177 assert(false, "should be optimized out");
178 return;
179 }
180 assert(orig->is_oop(true /* ignore mark word */), "Error");
181 // store the original value that was in the field reference
182 thread->satb_mark_queue().enqueue(orig);
183 JRT_END
185 // G1 write-barrier post: executed after a pointer store.
186 JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread))
187 thread->dirty_card_queue().enqueue(card_addr);
188 JRT_END
190 #endif // !SERIALGC
193 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
194 return x * y;
195 JRT_END
198 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
199 if (x == min_jlong && y == CONST64(-1)) {
200 return x;
201 } else {
202 return x / y;
203 }
204 JRT_END
207 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
208 if (x == min_jlong && y == CONST64(-1)) {
209 return 0;
210 } else {
211 return x % y;
212 }
213 JRT_END
216 const juint float_sign_mask = 0x7FFFFFFF;
217 const juint float_infinity = 0x7F800000;
218 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
219 const julong double_infinity = CONST64(0x7FF0000000000000);
221 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
222 #ifdef _WIN64
223 // 64-bit Windows on amd64 returns the wrong values for
224 // infinity operands.
225 union { jfloat f; juint i; } xbits, ybits;
226 xbits.f = x;
227 ybits.f = y;
228 // x Mod Infinity == x unless x is infinity
229 if ( ((xbits.i & float_sign_mask) != float_infinity) &&
230 ((ybits.i & float_sign_mask) == float_infinity) ) {
231 return x;
232 }
233 #endif
234 return ((jfloat)fmod((double)x,(double)y));
235 JRT_END
238 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
239 #ifdef _WIN64
240 union { jdouble d; julong l; } xbits, ybits;
241 xbits.d = x;
242 ybits.d = y;
243 // x Mod Infinity == x unless x is infinity
244 if ( ((xbits.l & double_sign_mask) != double_infinity) &&
245 ((ybits.l & double_sign_mask) == double_infinity) ) {
246 return x;
247 }
248 #endif
249 return ((jdouble)fmod((double)x,(double)y));
250 JRT_END
252 #ifdef __SOFTFP__
253 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
254 return x + y;
255 JRT_END
257 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
258 return x - y;
259 JRT_END
261 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
262 return x * y;
263 JRT_END
265 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
266 return x / y;
267 JRT_END
269 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
270 return x + y;
271 JRT_END
273 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
274 return x - y;
275 JRT_END
277 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
278 return x * y;
279 JRT_END
281 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
282 return x / y;
283 JRT_END
285 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
286 return (jfloat)x;
287 JRT_END
289 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
290 return (jdouble)x;
291 JRT_END
293 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
294 return (jdouble)x;
295 JRT_END
297 JRT_LEAF(int, SharedRuntime::fcmpl(float x, float y))
298 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan*/
299 JRT_END
301 JRT_LEAF(int, SharedRuntime::fcmpg(float x, float y))
302 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
303 JRT_END
305 JRT_LEAF(int, SharedRuntime::dcmpl(double x, double y))
306 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
307 JRT_END
309 JRT_LEAF(int, SharedRuntime::dcmpg(double x, double y))
310 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
311 JRT_END
313 // Functions to return the opposite of the aeabi functions for nan.
314 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
315 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
316 JRT_END
318 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
319 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
320 JRT_END
322 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
323 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
324 JRT_END
326 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
327 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
328 JRT_END
330 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
331 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
332 JRT_END
334 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
335 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
336 JRT_END
338 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
339 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
340 JRT_END
342 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
343 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
344 JRT_END
346 // Intrinsics make gcc generate code for these.
347 float SharedRuntime::fneg(float f) {
348 return -f;
349 }
351 double SharedRuntime::dneg(double f) {
352 return -f;
353 }
355 #endif // __SOFTFP__
357 #if defined(__SOFTFP__) || defined(E500V2)
358 // Intrinsics make gcc generate code for these.
359 double SharedRuntime::dabs(double f) {
360 return (f <= (double)0.0) ? (double)0.0 - f : f;
361 }
363 #endif
365 #if defined(__SOFTFP__) || defined(PPC)
366 double SharedRuntime::dsqrt(double f) {
367 return sqrt(f);
368 }
369 #endif
371 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
372 if (g_isnan(x))
373 return 0;
374 if (x >= (jfloat) max_jint)
375 return max_jint;
376 if (x <= (jfloat) min_jint)
377 return min_jint;
378 return (jint) x;
379 JRT_END
382 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
383 if (g_isnan(x))
384 return 0;
385 if (x >= (jfloat) max_jlong)
386 return max_jlong;
387 if (x <= (jfloat) min_jlong)
388 return min_jlong;
389 return (jlong) x;
390 JRT_END
393 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
394 if (g_isnan(x))
395 return 0;
396 if (x >= (jdouble) max_jint)
397 return max_jint;
398 if (x <= (jdouble) min_jint)
399 return min_jint;
400 return (jint) x;
401 JRT_END
404 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
405 if (g_isnan(x))
406 return 0;
407 if (x >= (jdouble) max_jlong)
408 return max_jlong;
409 if (x <= (jdouble) min_jlong)
410 return min_jlong;
411 return (jlong) x;
412 JRT_END
415 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
416 return (jfloat)x;
417 JRT_END
420 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
421 return (jfloat)x;
422 JRT_END
425 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
426 return (jdouble)x;
427 JRT_END
429 // Exception handling accross interpreter/compiler boundaries
430 //
431 // exception_handler_for_return_address(...) returns the continuation address.
432 // The continuation address is the entry point of the exception handler of the
433 // previous frame depending on the return address.
435 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
436 assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address));
438 // Reset method handle flag.
439 thread->set_is_method_handle_return(false);
441 // The fastest case first
442 CodeBlob* blob = CodeCache::find_blob(return_address);
443 nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL;
444 if (nm != NULL) {
445 // Set flag if return address is a method handle call site.
446 thread->set_is_method_handle_return(nm->is_method_handle_return(return_address));
447 // native nmethods don't have exception handlers
448 assert(!nm->is_native_method(), "no exception handler");
449 assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
450 if (nm->is_deopt_pc(return_address)) {
451 return SharedRuntime::deopt_blob()->unpack_with_exception();
452 } else {
453 return nm->exception_begin();
454 }
455 }
457 // Entry code
458 if (StubRoutines::returns_to_call_stub(return_address)) {
459 return StubRoutines::catch_exception_entry();
460 }
461 // Interpreted code
462 if (Interpreter::contains(return_address)) {
463 return Interpreter::rethrow_exception_entry();
464 }
465 // Ricochet frame unwind code
466 if (SharedRuntime::ricochet_blob() != NULL && SharedRuntime::ricochet_blob()->returns_to_bounce_addr(return_address)) {
467 return SharedRuntime::ricochet_blob()->exception_addr();
468 }
470 guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
471 guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
473 #ifndef PRODUCT
474 { ResourceMark rm;
475 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
476 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
477 tty->print_cr("b) other problem");
478 }
479 #endif // PRODUCT
481 ShouldNotReachHere();
482 return NULL;
483 }
486 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
487 return raw_exception_handler_for_return_address(thread, return_address);
488 JRT_END
491 address SharedRuntime::get_poll_stub(address pc) {
492 address stub;
493 // Look up the code blob
494 CodeBlob *cb = CodeCache::find_blob(pc);
496 // Should be an nmethod
497 assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
499 // Look up the relocation information
500 assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
501 "safepoint polling: type must be poll" );
503 assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
504 "Only polling locations are used for safepoint");
506 bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
507 if (at_poll_return) {
508 assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
509 "polling page return stub not created yet");
510 stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
511 } else {
512 assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
513 "polling page safepoint stub not created yet");
514 stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
515 }
516 #ifndef PRODUCT
517 if( TraceSafepoint ) {
518 char buf[256];
519 jio_snprintf(buf, sizeof(buf),
520 "... found polling page %s exception at pc = "
521 INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
522 at_poll_return ? "return" : "loop",
523 (intptr_t)pc, (intptr_t)stub);
524 tty->print_raw_cr(buf);
525 }
526 #endif // PRODUCT
527 return stub;
528 }
531 oop SharedRuntime::retrieve_receiver( Symbol* sig, frame caller ) {
532 assert(caller.is_interpreted_frame(), "");
533 int args_size = ArgumentSizeComputer(sig).size() + 1;
534 assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
535 oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
536 assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
537 return result;
538 }
541 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
542 if (JvmtiExport::can_post_on_exceptions()) {
543 vframeStream vfst(thread, true);
544 methodHandle method = methodHandle(thread, vfst.method());
545 address bcp = method()->bcp_from(vfst.bci());
546 JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
547 }
548 Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
549 }
551 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Symbol* name, const char *message) {
552 Handle h_exception = Exceptions::new_exception(thread, name, message);
553 throw_and_post_jvmti_exception(thread, h_exception);
554 }
556 // The interpreter code to call this tracing function is only
557 // called/generated when TraceRedefineClasses has the right bits
558 // set. Since obsolete methods are never compiled, we don't have
559 // to modify the compilers to generate calls to this function.
560 //
561 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
562 JavaThread* thread, methodOopDesc* method))
563 assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call");
565 if (method->is_obsolete()) {
566 // We are calling an obsolete method, but this is not necessarily
567 // an error. Our method could have been redefined just after we
568 // fetched the methodOop from the constant pool.
570 // RC_TRACE macro has an embedded ResourceMark
571 RC_TRACE_WITH_THREAD(0x00001000, thread,
572 ("calling obsolete method '%s'",
573 method->name_and_sig_as_C_string()));
574 if (RC_TRACE_ENABLED(0x00002000)) {
575 // this option is provided to debug calls to obsolete methods
576 guarantee(false, "faulting at call to an obsolete method.");
577 }
578 }
579 return 0;
580 JRT_END
582 // ret_pc points into caller; we are returning caller's exception handler
583 // for given exception
584 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
585 bool force_unwind, bool top_frame_only) {
586 assert(nm != NULL, "must exist");
587 ResourceMark rm;
589 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
590 // determine handler bci, if any
591 EXCEPTION_MARK;
593 int handler_bci = -1;
594 int scope_depth = 0;
595 if (!force_unwind) {
596 int bci = sd->bci();
597 do {
598 bool skip_scope_increment = false;
599 // exception handler lookup
600 KlassHandle ek (THREAD, exception->klass());
601 handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD);
602 if (HAS_PENDING_EXCEPTION) {
603 // We threw an exception while trying to find the exception handler.
604 // Transfer the new exception to the exception handle which will
605 // be set into thread local storage, and do another lookup for an
606 // exception handler for this exception, this time starting at the
607 // BCI of the exception handler which caused the exception to be
608 // thrown (bugs 4307310 and 4546590). Set "exception" reference
609 // argument to ensure that the correct exception is thrown (4870175).
610 exception = Handle(THREAD, PENDING_EXCEPTION);
611 CLEAR_PENDING_EXCEPTION;
612 if (handler_bci >= 0) {
613 bci = handler_bci;
614 handler_bci = -1;
615 skip_scope_increment = true;
616 }
617 }
618 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
619 sd = sd->sender();
620 if (sd != NULL) {
621 bci = sd->bci();
622 }
623 ++scope_depth;
624 }
625 } while (!top_frame_only && handler_bci < 0 && sd != NULL);
626 }
628 // found handling method => lookup exception handler
629 int catch_pco = ret_pc - nm->code_begin();
631 ExceptionHandlerTable table(nm);
632 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
633 if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
634 // Allow abbreviated catch tables. The idea is to allow a method
635 // to materialize its exceptions without committing to the exact
636 // routing of exceptions. In particular this is needed for adding
637 // a synthethic handler to unlock monitors when inlining
638 // synchonized methods since the unlock path isn't represented in
639 // the bytecodes.
640 t = table.entry_for(catch_pco, -1, 0);
641 }
643 #ifdef COMPILER1
644 if (t == NULL && nm->is_compiled_by_c1()) {
645 assert(nm->unwind_handler_begin() != NULL, "");
646 return nm->unwind_handler_begin();
647 }
648 #endif
650 if (t == NULL) {
651 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
652 tty->print_cr(" Exception:");
653 exception->print();
654 tty->cr();
655 tty->print_cr(" Compiled exception table :");
656 table.print();
657 nm->print_code();
658 guarantee(false, "missing exception handler");
659 return NULL;
660 }
662 return nm->code_begin() + t->pco();
663 }
665 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
666 // These errors occur only at call sites
667 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
668 JRT_END
670 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
671 // These errors occur only at call sites
672 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
673 JRT_END
675 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
676 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
677 JRT_END
679 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
680 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
681 JRT_END
683 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
684 // This entry point is effectively only used for NullPointerExceptions which occur at inline
685 // cache sites (when the callee activation is not yet set up) so we are at a call site
686 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
687 JRT_END
689 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
690 // We avoid using the normal exception construction in this case because
691 // it performs an upcall to Java, and we're already out of stack space.
692 klassOop k = SystemDictionary::StackOverflowError_klass();
693 oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK);
694 Handle exception (thread, exception_oop);
695 if (StackTraceInThrowable) {
696 java_lang_Throwable::fill_in_stack_trace(exception);
697 }
698 throw_and_post_jvmti_exception(thread, exception);
699 JRT_END
701 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
702 address pc,
703 SharedRuntime::ImplicitExceptionKind exception_kind)
704 {
705 address target_pc = NULL;
707 if (Interpreter::contains(pc)) {
708 #ifdef CC_INTERP
709 // C++ interpreter doesn't throw implicit exceptions
710 ShouldNotReachHere();
711 #else
712 switch (exception_kind) {
713 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
714 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
715 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
716 default: ShouldNotReachHere();
717 }
718 #endif // !CC_INTERP
719 } else {
720 switch (exception_kind) {
721 case STACK_OVERFLOW: {
722 // Stack overflow only occurs upon frame setup; the callee is
723 // going to be unwound. Dispatch to a shared runtime stub
724 // which will cause the StackOverflowError to be fabricated
725 // and processed.
726 // For stack overflow in deoptimization blob, cleanup thread.
727 if (thread->deopt_mark() != NULL) {
728 Deoptimization::cleanup_deopt_info(thread, NULL);
729 }
730 return StubRoutines::throw_StackOverflowError_entry();
731 }
733 case IMPLICIT_NULL: {
734 if (VtableStubs::contains(pc)) {
735 // We haven't yet entered the callee frame. Fabricate an
736 // exception and begin dispatching it in the caller. Since
737 // the caller was at a call site, it's safe to destroy all
738 // caller-saved registers, as these entry points do.
739 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
741 // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error.
742 if (vt_stub == NULL) return NULL;
744 if (vt_stub->is_abstract_method_error(pc)) {
745 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
746 return StubRoutines::throw_AbstractMethodError_entry();
747 } else {
748 return StubRoutines::throw_NullPointerException_at_call_entry();
749 }
750 } else {
751 CodeBlob* cb = CodeCache::find_blob(pc);
753 // If code blob is NULL, then return NULL to signal handler to report the SEGV error.
754 if (cb == NULL) return NULL;
756 // Exception happened in CodeCache. Must be either:
757 // 1. Inline-cache check in C2I handler blob,
758 // 2. Inline-cache check in nmethod, or
759 // 3. Implict null exception in nmethod
761 if (!cb->is_nmethod()) {
762 guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
763 "exception happened outside interpreter, nmethods and vtable stubs (1)");
764 // There is no handler here, so we will simply unwind.
765 return StubRoutines::throw_NullPointerException_at_call_entry();
766 }
768 // Otherwise, it's an nmethod. Consult its exception handlers.
769 nmethod* nm = (nmethod*)cb;
770 if (nm->inlinecache_check_contains(pc)) {
771 // exception happened inside inline-cache check code
772 // => the nmethod is not yet active (i.e., the frame
773 // is not set up yet) => use return address pushed by
774 // caller => don't push another return address
775 return StubRoutines::throw_NullPointerException_at_call_entry();
776 }
778 #ifndef PRODUCT
779 _implicit_null_throws++;
780 #endif
781 target_pc = nm->continuation_for_implicit_exception(pc);
782 // If there's an unexpected fault, target_pc might be NULL,
783 // in which case we want to fall through into the normal
784 // error handling code.
785 }
787 break; // fall through
788 }
791 case IMPLICIT_DIVIDE_BY_ZERO: {
792 nmethod* nm = CodeCache::find_nmethod(pc);
793 guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions");
794 #ifndef PRODUCT
795 _implicit_div0_throws++;
796 #endif
797 target_pc = nm->continuation_for_implicit_exception(pc);
798 // If there's an unexpected fault, target_pc might be NULL,
799 // in which case we want to fall through into the normal
800 // error handling code.
801 break; // fall through
802 }
804 default: ShouldNotReachHere();
805 }
807 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
809 // for AbortVMOnException flag
810 NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
811 if (exception_kind == IMPLICIT_NULL) {
812 Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
813 } else {
814 Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
815 }
816 return target_pc;
817 }
819 ShouldNotReachHere();
820 return NULL;
821 }
824 JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
825 {
826 THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
827 }
828 JNI_END
831 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
832 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
833 }
836 #ifndef PRODUCT
837 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
838 const frame f = thread->last_frame();
839 assert(f.is_interpreted_frame(), "must be an interpreted frame");
840 #ifndef PRODUCT
841 methodHandle mh(THREAD, f.interpreter_frame_method());
842 BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
843 #endif // !PRODUCT
844 return preserve_this_value;
845 JRT_END
846 #endif // !PRODUCT
849 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
850 os::yield_all(attempts);
851 JRT_END
854 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
855 assert(obj->is_oop(), "must be a valid oop");
856 assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
857 instanceKlass::register_finalizer(instanceOop(obj), CHECK);
858 JRT_END
861 jlong SharedRuntime::get_java_tid(Thread* thread) {
862 if (thread != NULL) {
863 if (thread->is_Java_thread()) {
864 oop obj = ((JavaThread*)thread)->threadObj();
865 return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
866 }
867 }
868 return 0;
869 }
871 /**
872 * This function ought to be a void function, but cannot be because
873 * it gets turned into a tail-call on sparc, which runs into dtrace bug
874 * 6254741. Once that is fixed we can remove the dummy return value.
875 */
876 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
877 return dtrace_object_alloc_base(Thread::current(), o);
878 }
880 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
881 assert(DTraceAllocProbes, "wrong call");
882 Klass* klass = o->blueprint();
883 int size = o->size();
884 Symbol* name = klass->name();
885 HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
886 name->bytes(), name->utf8_length(), size * HeapWordSize);
887 return 0;
888 }
890 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
891 JavaThread* thread, methodOopDesc* method))
892 assert(DTraceMethodProbes, "wrong call");
893 Symbol* kname = method->klass_name();
894 Symbol* name = method->name();
895 Symbol* sig = method->signature();
896 HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
897 kname->bytes(), kname->utf8_length(),
898 name->bytes(), name->utf8_length(),
899 sig->bytes(), sig->utf8_length());
900 return 0;
901 JRT_END
903 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
904 JavaThread* thread, methodOopDesc* method))
905 assert(DTraceMethodProbes, "wrong call");
906 Symbol* kname = method->klass_name();
907 Symbol* name = method->name();
908 Symbol* sig = method->signature();
909 HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
910 kname->bytes(), kname->utf8_length(),
911 name->bytes(), name->utf8_length(),
912 sig->bytes(), sig->utf8_length());
913 return 0;
914 JRT_END
917 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
918 // for a call current in progress, i.e., arguments has been pushed on stack
919 // put callee has not been invoked yet. Used by: resolve virtual/static,
920 // vtable updates, etc. Caller frame must be compiled.
921 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
922 ResourceMark rm(THREAD);
924 // last java frame on stack (which includes native call frames)
925 vframeStream vfst(thread, true); // Do not skip and javaCalls
927 return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
928 }
931 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
932 // for a call current in progress, i.e., arguments has been pushed on stack
933 // but callee has not been invoked yet. Caller frame must be compiled.
934 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
935 vframeStream& vfst,
936 Bytecodes::Code& bc,
937 CallInfo& callinfo, TRAPS) {
938 Handle receiver;
939 Handle nullHandle; //create a handy null handle for exception returns
941 assert(!vfst.at_end(), "Java frame must exist");
943 // Find caller and bci from vframe
944 methodHandle caller (THREAD, vfst.method());
945 int bci = vfst.bci();
947 // Find bytecode
948 Bytecode_invoke bytecode(caller, bci);
949 bc = bytecode.java_code();
950 int bytecode_index = bytecode.index();
952 // Find receiver for non-static call
953 if (bc != Bytecodes::_invokestatic) {
954 // This register map must be update since we need to find the receiver for
955 // compiled frames. The receiver might be in a register.
956 RegisterMap reg_map2(thread);
957 frame stubFrame = thread->last_frame();
958 // Caller-frame is a compiled frame
959 frame callerFrame = stubFrame.sender(®_map2);
961 methodHandle callee = bytecode.static_target(CHECK_(nullHandle));
962 if (callee.is_null()) {
963 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
964 }
965 // Retrieve from a compiled argument list
966 receiver = Handle(THREAD, callerFrame.retrieve_receiver(®_map2));
968 if (receiver.is_null()) {
969 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
970 }
971 }
973 // Resolve method. This is parameterized by bytecode.
974 constantPoolHandle constants (THREAD, caller->constants());
975 assert (receiver.is_null() || receiver->is_oop(), "wrong receiver");
976 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
978 #ifdef ASSERT
979 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
980 if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
981 assert(receiver.not_null(), "should have thrown exception");
982 KlassHandle receiver_klass (THREAD, receiver->klass());
983 klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
984 // klass is already loaded
985 KlassHandle static_receiver_klass (THREAD, rk);
986 assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass");
987 if (receiver_klass->oop_is_instance()) {
988 if (instanceKlass::cast(receiver_klass())->is_not_initialized()) {
989 tty->print_cr("ERROR: Klass not yet initialized!!");
990 receiver_klass.print();
991 }
992 assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
993 }
994 }
995 #endif
997 return receiver;
998 }
1000 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
1001 ResourceMark rm(THREAD);
1002 // We need first to check if any Java activations (compiled, interpreted)
1003 // exist on the stack since last JavaCall. If not, we need
1004 // to get the target method from the JavaCall wrapper.
1005 vframeStream vfst(thread, true); // Do not skip any javaCalls
1006 methodHandle callee_method;
1007 if (vfst.at_end()) {
1008 // No Java frames were found on stack since we did the JavaCall.
1009 // Hence the stack can only contain an entry_frame. We need to
1010 // find the target method from the stub frame.
1011 RegisterMap reg_map(thread, false);
1012 frame fr = thread->last_frame();
1013 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1014 fr = fr.sender(®_map);
1015 assert(fr.is_entry_frame(), "must be");
1016 // fr is now pointing to the entry frame.
1017 callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
1018 assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
1019 } else {
1020 Bytecodes::Code bc;
1021 CallInfo callinfo;
1022 find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
1023 callee_method = callinfo.selected_method();
1024 }
1025 assert(callee_method()->is_method(), "must be");
1026 return callee_method;
1027 }
1029 // Resolves a call.
1030 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
1031 bool is_virtual,
1032 bool is_optimized, TRAPS) {
1033 methodHandle callee_method;
1034 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1035 if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1036 int retry_count = 0;
1037 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1038 callee_method->method_holder() != SystemDictionary::Object_klass()) {
1039 // If has a pending exception then there is no need to re-try to
1040 // resolve this method.
1041 // If the method has been redefined, we need to try again.
1042 // Hack: we have no way to update the vtables of arrays, so don't
1043 // require that java.lang.Object has been updated.
1045 // It is very unlikely that method is redefined more than 100 times
1046 // in the middle of resolve. If it is looping here more than 100 times
1047 // means then there could be a bug here.
1048 guarantee((retry_count++ < 100),
1049 "Could not resolve to latest version of redefined method");
1050 // method is redefined in the middle of resolve so re-try.
1051 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1052 }
1053 }
1054 return callee_method;
1055 }
1057 // Resolves a call. The compilers generate code for calls that go here
1058 // and are patched with the real destination of the call.
1059 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
1060 bool is_virtual,
1061 bool is_optimized, TRAPS) {
1063 ResourceMark rm(thread);
1064 RegisterMap cbl_map(thread, false);
1065 frame caller_frame = thread->last_frame().sender(&cbl_map);
1067 CodeBlob* caller_cb = caller_frame.cb();
1068 guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
1069 nmethod* caller_nm = caller_cb->as_nmethod_or_null();
1070 // make sure caller is not getting deoptimized
1071 // and removed before we are done with it.
1072 // CLEANUP - with lazy deopt shouldn't need this lock
1073 nmethodLocker caller_lock(caller_nm);
1076 // determine call info & receiver
1077 // note: a) receiver is NULL for static calls
1078 // b) an exception is thrown if receiver is NULL for non-static calls
1079 CallInfo call_info;
1080 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1081 Handle receiver = find_callee_info(thread, invoke_code,
1082 call_info, CHECK_(methodHandle()));
1083 methodHandle callee_method = call_info.selected_method();
1085 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
1086 ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
1088 #ifndef PRODUCT
1089 // tracing/debugging/statistics
1090 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1091 (is_virtual) ? (&_resolve_virtual_ctr) :
1092 (&_resolve_static_ctr);
1093 Atomic::inc(addr);
1095 if (TraceCallFixup) {
1096 ResourceMark rm(thread);
1097 tty->print("resolving %s%s (%s) call to",
1098 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1099 Bytecodes::name(invoke_code));
1100 callee_method->print_short_name(tty);
1101 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1102 }
1103 #endif
1105 // JSR 292
1106 // If the resolved method is a MethodHandle invoke target the call
1107 // site must be a MethodHandle call site.
1108 if (callee_method->is_method_handle_invoke()) {
1109 assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1110 }
1112 // Compute entry points. This might require generation of C2I converter
1113 // frames, so we cannot be holding any locks here. Furthermore, the
1114 // computation of the entry points is independent of patching the call. We
1115 // always return the entry-point, but we only patch the stub if the call has
1116 // not been deoptimized. Return values: For a virtual call this is an
1117 // (cached_oop, destination address) pair. For a static call/optimized
1118 // virtual this is just a destination address.
1120 StaticCallInfo static_call_info;
1121 CompiledICInfo virtual_call_info;
1123 // Make sure the callee nmethod does not get deoptimized and removed before
1124 // we are done patching the code.
1125 nmethod* callee_nm = callee_method->code();
1126 nmethodLocker nl_callee(callee_nm);
1127 #ifdef ASSERT
1128 address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
1129 #endif
1131 if (is_virtual) {
1132 assert(receiver.not_null(), "sanity check");
1133 bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1134 KlassHandle h_klass(THREAD, receiver->klass());
1135 CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
1136 is_optimized, static_bound, virtual_call_info,
1137 CHECK_(methodHandle()));
1138 } else {
1139 // static call
1140 CompiledStaticCall::compute_entry(callee_method, static_call_info);
1141 }
1143 // grab lock, check for deoptimization and potentially patch caller
1144 {
1145 MutexLocker ml_patch(CompiledIC_lock);
1147 // Now that we are ready to patch if the methodOop was redefined then
1148 // don't update call site and let the caller retry.
1150 if (!callee_method->is_old()) {
1151 #ifdef ASSERT
1152 // We must not try to patch to jump to an already unloaded method.
1153 if (dest_entry_point != 0) {
1154 assert(CodeCache::find_blob(dest_entry_point) != NULL,
1155 "should not unload nmethod while locked");
1156 }
1157 #endif
1158 if (is_virtual) {
1159 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
1160 if (inline_cache->is_clean()) {
1161 inline_cache->set_to_monomorphic(virtual_call_info);
1162 }
1163 } else {
1164 CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
1165 if (ssc->is_clean()) ssc->set(static_call_info);
1166 }
1167 }
1169 } // unlock CompiledIC_lock
1171 return callee_method;
1172 }
1175 // Inline caches exist only in compiled code
1176 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1177 #ifdef ASSERT
1178 RegisterMap reg_map(thread, false);
1179 frame stub_frame = thread->last_frame();
1180 assert(stub_frame.is_runtime_frame(), "sanity check");
1181 frame caller_frame = stub_frame.sender(®_map);
1182 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1183 assert(!caller_frame.is_ricochet_frame(), "unexpected frame");
1184 #endif /* ASSERT */
1186 methodHandle callee_method;
1187 JRT_BLOCK
1188 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1189 // Return methodOop through TLS
1190 thread->set_vm_result(callee_method());
1191 JRT_BLOCK_END
1192 // return compiled code entry point after potential safepoints
1193 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1194 return callee_method->verified_code_entry();
1195 JRT_END
1198 // Handle call site that has been made non-entrant
1199 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
1200 // 6243940 We might end up in here if the callee is deoptimized
1201 // as we race to call it. We don't want to take a safepoint if
1202 // the caller was interpreted because the caller frame will look
1203 // interpreted to the stack walkers and arguments are now
1204 // "compiled" so it is much better to make this transition
1205 // invisible to the stack walking code. The i2c path will
1206 // place the callee method in the callee_target. It is stashed
1207 // there because if we try and find the callee by normal means a
1208 // safepoint is possible and have trouble gc'ing the compiled args.
1209 RegisterMap reg_map(thread, false);
1210 frame stub_frame = thread->last_frame();
1211 assert(stub_frame.is_runtime_frame(), "sanity check");
1212 frame caller_frame = stub_frame.sender(®_map);
1214 // MethodHandle invokes don't have a CompiledIC and should always
1215 // simply redispatch to the callee_target.
1216 address sender_pc = caller_frame.pc();
1217 CodeBlob* sender_cb = caller_frame.cb();
1218 nmethod* sender_nm = sender_cb->as_nmethod_or_null();
1219 bool is_mh_invoke_via_adapter = false; // Direct c2c call or via adapter?
1220 if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
1221 // If the callee_target is set, then we have come here via an i2c
1222 // adapter.
1223 methodOop callee = thread->callee_target();
1224 if (callee != NULL) {
1225 assert(callee->is_method(), "sanity");
1226 is_mh_invoke_via_adapter = true;
1227 }
1228 }
1230 if (caller_frame.is_interpreted_frame() ||
1231 caller_frame.is_entry_frame() ||
1232 caller_frame.is_ricochet_frame() ||
1233 is_mh_invoke_via_adapter) {
1234 methodOop callee = thread->callee_target();
1235 guarantee(callee != NULL && callee->is_method(), "bad handshake");
1236 thread->set_vm_result(callee);
1237 thread->set_callee_target(NULL);
1238 return callee->get_c2i_entry();
1239 }
1241 // Must be compiled to compiled path which is safe to stackwalk
1242 methodHandle callee_method;
1243 JRT_BLOCK
1244 // Force resolving of caller (if we called from compiled frame)
1245 callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
1246 thread->set_vm_result(callee_method());
1247 JRT_BLOCK_END
1248 // return compiled code entry point after potential safepoints
1249 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1250 return callee_method->verified_code_entry();
1251 JRT_END
1254 // resolve a static call and patch code
1255 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
1256 methodHandle callee_method;
1257 JRT_BLOCK
1258 callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
1259 thread->set_vm_result(callee_method());
1260 JRT_BLOCK_END
1261 // return compiled code entry point after potential safepoints
1262 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1263 return callee_method->verified_code_entry();
1264 JRT_END
1267 // resolve virtual call and update inline cache to monomorphic
1268 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
1269 methodHandle callee_method;
1270 JRT_BLOCK
1271 callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
1272 thread->set_vm_result(callee_method());
1273 JRT_BLOCK_END
1274 // return compiled code entry point after potential safepoints
1275 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1276 return callee_method->verified_code_entry();
1277 JRT_END
1280 // Resolve a virtual call that can be statically bound (e.g., always
1281 // monomorphic, so it has no inline cache). Patch code to resolved target.
1282 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1283 methodHandle callee_method;
1284 JRT_BLOCK
1285 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1286 thread->set_vm_result(callee_method());
1287 JRT_BLOCK_END
1288 // return compiled code entry point after potential safepoints
1289 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1290 return callee_method->verified_code_entry();
1291 JRT_END
1297 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1298 ResourceMark rm(thread);
1299 CallInfo call_info;
1300 Bytecodes::Code bc;
1302 // receiver is NULL for static calls. An exception is thrown for NULL
1303 // receivers for non-static calls
1304 Handle receiver = find_callee_info(thread, bc, call_info,
1305 CHECK_(methodHandle()));
1306 // Compiler1 can produce virtual call sites that can actually be statically bound
1307 // If we fell thru to below we would think that the site was going megamorphic
1308 // when in fact the site can never miss. Worse because we'd think it was megamorphic
1309 // we'd try and do a vtable dispatch however methods that can be statically bound
1310 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1311 // reresolution of the call site (as if we did a handle_wrong_method and not an
1312 // plain ic_miss) and the site will be converted to an optimized virtual call site
1313 // never to miss again. I don't believe C2 will produce code like this but if it
1314 // did this would still be the correct thing to do for it too, hence no ifdef.
1315 //
1316 if (call_info.resolved_method()->can_be_statically_bound()) {
1317 methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
1318 if (TraceCallFixup) {
1319 RegisterMap reg_map(thread, false);
1320 frame caller_frame = thread->last_frame().sender(®_map);
1321 ResourceMark rm(thread);
1322 tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1323 callee_method->print_short_name(tty);
1324 tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc());
1325 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1326 }
1327 return callee_method;
1328 }
1330 methodHandle callee_method = call_info.selected_method();
1332 bool should_be_mono = false;
1334 #ifndef PRODUCT
1335 Atomic::inc(&_ic_miss_ctr);
1337 // Statistics & Tracing
1338 if (TraceCallFixup) {
1339 ResourceMark rm(thread);
1340 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1341 callee_method->print_short_name(tty);
1342 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1343 }
1345 if (ICMissHistogram) {
1346 MutexLocker m(VMStatistic_lock);
1347 RegisterMap reg_map(thread, false);
1348 frame f = thread->last_frame().real_sender(®_map);// skip runtime stub
1349 // produce statistics under the lock
1350 trace_ic_miss(f.pc());
1351 }
1352 #endif
1354 // install an event collector so that when a vtable stub is created the
1355 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1356 // event can't be posted when the stub is created as locks are held
1357 // - instead the event will be deferred until the event collector goes
1358 // out of scope.
1359 JvmtiDynamicCodeEventCollector event_collector;
1361 // Update inline cache to megamorphic. Skip update if caller has been
1362 // made non-entrant or we are called from interpreted.
1363 { MutexLocker ml_patch (CompiledIC_lock);
1364 RegisterMap reg_map(thread, false);
1365 frame caller_frame = thread->last_frame().sender(®_map);
1366 CodeBlob* cb = caller_frame.cb();
1367 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
1368 // Not a non-entrant nmethod, so find inline_cache
1369 CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
1370 bool should_be_mono = false;
1371 if (inline_cache->is_optimized()) {
1372 if (TraceCallFixup) {
1373 ResourceMark rm(thread);
1374 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1375 callee_method->print_short_name(tty);
1376 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1377 }
1378 should_be_mono = true;
1379 } else {
1380 compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop();
1381 if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) {
1383 if (receiver()->klass() == ic_oop->holder_klass()) {
1384 // This isn't a real miss. We must have seen that compiled code
1385 // is now available and we want the call site converted to a
1386 // monomorphic compiled call site.
1387 // We can't assert for callee_method->code() != NULL because it
1388 // could have been deoptimized in the meantime
1389 if (TraceCallFixup) {
1390 ResourceMark rm(thread);
1391 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1392 callee_method->print_short_name(tty);
1393 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1394 }
1395 should_be_mono = true;
1396 }
1397 }
1398 }
1400 if (should_be_mono) {
1402 // We have a path that was monomorphic but was going interpreted
1403 // and now we have (or had) a compiled entry. We correct the IC
1404 // by using a new icBuffer.
1405 CompiledICInfo info;
1406 KlassHandle receiver_klass(THREAD, receiver()->klass());
1407 inline_cache->compute_monomorphic_entry(callee_method,
1408 receiver_klass,
1409 inline_cache->is_optimized(),
1410 false,
1411 info, CHECK_(methodHandle()));
1412 inline_cache->set_to_monomorphic(info);
1413 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1414 // Change to megamorphic
1415 inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
1416 } else {
1417 // Either clean or megamorphic
1418 }
1419 }
1420 } // Release CompiledIC_lock
1422 return callee_method;
1423 }
1425 //
1426 // Resets a call-site in compiled code so it will get resolved again.
1427 // This routines handles both virtual call sites, optimized virtual call
1428 // sites, and static call sites. Typically used to change a call sites
1429 // destination from compiled to interpreted.
1430 //
1431 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
1432 ResourceMark rm(thread);
1433 RegisterMap reg_map(thread, false);
1434 frame stub_frame = thread->last_frame();
1435 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1436 frame caller = stub_frame.sender(®_map);
1438 // Do nothing if the frame isn't a live compiled frame.
1439 // nmethod could be deoptimized by the time we get here
1440 // so no update to the caller is needed.
1442 if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1444 address pc = caller.pc();
1445 Events::log("update call-site at pc " INTPTR_FORMAT, pc);
1447 // Default call_addr is the location of the "basic" call.
1448 // Determine the address of the call we a reresolving. With
1449 // Inline Caches we will always find a recognizable call.
1450 // With Inline Caches disabled we may or may not find a
1451 // recognizable call. We will always find a call for static
1452 // calls and for optimized virtual calls. For vanilla virtual
1453 // calls it depends on the state of the UseInlineCaches switch.
1454 //
1455 // With Inline Caches disabled we can get here for a virtual call
1456 // for two reasons:
1457 // 1 - calling an abstract method. The vtable for abstract methods
1458 // will run us thru handle_wrong_method and we will eventually
1459 // end up in the interpreter to throw the ame.
1460 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1461 // call and between the time we fetch the entry address and
1462 // we jump to it the target gets deoptimized. Similar to 1
1463 // we will wind up in the interprter (thru a c2i with c2).
1464 //
1465 address call_addr = NULL;
1466 {
1467 // Get call instruction under lock because another thread may be
1468 // busy patching it.
1469 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1470 // Location of call instruction
1471 if (NativeCall::is_call_before(pc)) {
1472 NativeCall *ncall = nativeCall_before(pc);
1473 call_addr = ncall->instruction_address();
1474 }
1475 }
1477 // Check for static or virtual call
1478 bool is_static_call = false;
1479 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1480 // Make sure nmethod doesn't get deoptimized and removed until
1481 // this is done with it.
1482 // CLEANUP - with lazy deopt shouldn't need this lock
1483 nmethodLocker nmlock(caller_nm);
1485 if (call_addr != NULL) {
1486 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1487 int ret = iter.next(); // Get item
1488 if (ret) {
1489 assert(iter.addr() == call_addr, "must find call");
1490 if (iter.type() == relocInfo::static_call_type) {
1491 is_static_call = true;
1492 } else {
1493 assert(iter.type() == relocInfo::virtual_call_type ||
1494 iter.type() == relocInfo::opt_virtual_call_type
1495 , "unexpected relocInfo. type");
1496 }
1497 } else {
1498 assert(!UseInlineCaches, "relocation info. must exist for this address");
1499 }
1501 // Cleaning the inline cache will force a new resolve. This is more robust
1502 // than directly setting it to the new destination, since resolving of calls
1503 // is always done through the same code path. (experience shows that it
1504 // leads to very hard to track down bugs, if an inline cache gets updated
1505 // to a wrong method). It should not be performance critical, since the
1506 // resolve is only done once.
1508 MutexLocker ml(CompiledIC_lock);
1509 //
1510 // We do not patch the call site if the nmethod has been made non-entrant
1511 // as it is a waste of time
1512 //
1513 if (caller_nm->is_in_use()) {
1514 if (is_static_call) {
1515 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
1516 ssc->set_to_clean();
1517 } else {
1518 // compiled, dispatched call (which used to call an interpreted method)
1519 CompiledIC* inline_cache = CompiledIC_at(call_addr);
1520 inline_cache->set_to_clean();
1521 }
1522 }
1523 }
1525 }
1527 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1530 #ifndef PRODUCT
1531 Atomic::inc(&_wrong_method_ctr);
1533 if (TraceCallFixup) {
1534 ResourceMark rm(thread);
1535 tty->print("handle_wrong_method reresolving call to");
1536 callee_method->print_short_name(tty);
1537 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1538 }
1539 #endif
1541 return callee_method;
1542 }
1544 // ---------------------------------------------------------------------------
1545 // We are calling the interpreter via a c2i. Normally this would mean that
1546 // we were called by a compiled method. However we could have lost a race
1547 // where we went int -> i2c -> c2i and so the caller could in fact be
1548 // interpreted. If the caller is compiled we attempt to patch the caller
1549 // so he no longer calls into the interpreter.
1550 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
1551 methodOop moop(method);
1553 address entry_point = moop->from_compiled_entry();
1555 // It's possible that deoptimization can occur at a call site which hasn't
1556 // been resolved yet, in which case this function will be called from
1557 // an nmethod that has been patched for deopt and we can ignore the
1558 // request for a fixup.
1559 // Also it is possible that we lost a race in that from_compiled_entry
1560 // is now back to the i2c in that case we don't need to patch and if
1561 // we did we'd leap into space because the callsite needs to use
1562 // "to interpreter" stub in order to load up the methodOop. Don't
1563 // ask me how I know this...
1565 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1566 if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
1567 return;
1568 }
1570 // The check above makes sure this is a nmethod.
1571 nmethod* nm = cb->as_nmethod_or_null();
1572 assert(nm, "must be");
1574 // Don't fixup MethodHandle call sites as c2i/i2c adapters are used
1575 // to implement MethodHandle actions.
1576 if (nm->is_method_handle_return(caller_pc)) {
1577 return;
1578 }
1580 // There is a benign race here. We could be attempting to patch to a compiled
1581 // entry point at the same time the callee is being deoptimized. If that is
1582 // the case then entry_point may in fact point to a c2i and we'd patch the
1583 // call site with the same old data. clear_code will set code() to NULL
1584 // at the end of it. If we happen to see that NULL then we can skip trying
1585 // to patch. If we hit the window where the callee has a c2i in the
1586 // from_compiled_entry and the NULL isn't present yet then we lose the race
1587 // and patch the code with the same old data. Asi es la vida.
1589 if (moop->code() == NULL) return;
1591 if (nm->is_in_use()) {
1593 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1594 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1595 if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
1596 NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
1597 //
1598 // bug 6281185. We might get here after resolving a call site to a vanilla
1599 // virtual call. Because the resolvee uses the verified entry it may then
1600 // see compiled code and attempt to patch the site by calling us. This would
1601 // then incorrectly convert the call site to optimized and its downhill from
1602 // there. If you're lucky you'll get the assert in the bugid, if not you've
1603 // just made a call site that could be megamorphic into a monomorphic site
1604 // for the rest of its life! Just another racing bug in the life of
1605 // fixup_callers_callsite ...
1606 //
1607 RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
1608 iter.next();
1609 assert(iter.has_current(), "must have a reloc at java call site");
1610 relocInfo::relocType typ = iter.reloc()->type();
1611 if ( typ != relocInfo::static_call_type &&
1612 typ != relocInfo::opt_virtual_call_type &&
1613 typ != relocInfo::static_stub_type) {
1614 return;
1615 }
1616 address destination = call->destination();
1617 if (destination != entry_point) {
1618 CodeBlob* callee = CodeCache::find_blob(destination);
1619 // callee == cb seems weird. It means calling interpreter thru stub.
1620 if (callee == cb || callee->is_adapter_blob()) {
1621 // static call or optimized virtual
1622 if (TraceCallFixup) {
1623 tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1624 moop->print_short_name(tty);
1625 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1626 }
1627 call->set_destination_mt_safe(entry_point);
1628 } else {
1629 if (TraceCallFixup) {
1630 tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1631 moop->print_short_name(tty);
1632 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1633 }
1634 // assert is too strong could also be resolve destinations.
1635 // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1636 }
1637 } else {
1638 if (TraceCallFixup) {
1639 tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1640 moop->print_short_name(tty);
1641 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1642 }
1643 }
1644 }
1645 }
1647 IRT_END
1650 // same as JVM_Arraycopy, but called directly from compiled code
1651 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
1652 oopDesc* dest, jint dest_pos,
1653 jint length,
1654 JavaThread* thread)) {
1655 #ifndef PRODUCT
1656 _slow_array_copy_ctr++;
1657 #endif
1658 // Check if we have null pointers
1659 if (src == NULL || dest == NULL) {
1660 THROW(vmSymbols::java_lang_NullPointerException());
1661 }
1662 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
1663 // even though the copy_array API also performs dynamic checks to ensure
1664 // that src and dest are truly arrays (and are conformable).
1665 // The copy_array mechanism is awkward and could be removed, but
1666 // the compilers don't call this function except as a last resort,
1667 // so it probably doesn't matter.
1668 Klass::cast(src->klass())->copy_array((arrayOopDesc*)src, src_pos,
1669 (arrayOopDesc*)dest, dest_pos,
1670 length, thread);
1671 }
1672 JRT_END
1674 char* SharedRuntime::generate_class_cast_message(
1675 JavaThread* thread, const char* objName) {
1677 // Get target class name from the checkcast instruction
1678 vframeStream vfst(thread, true);
1679 assert(!vfst.at_end(), "Java frame must exist");
1680 Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
1681 Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
1682 cc.index(), thread));
1683 return generate_class_cast_message(objName, targetKlass->external_name());
1684 }
1686 char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread,
1687 oopDesc* required,
1688 oopDesc* actual) {
1689 if (TraceMethodHandles) {
1690 tty->print_cr("WrongMethodType thread="PTR_FORMAT" req="PTR_FORMAT" act="PTR_FORMAT"",
1691 thread, required, actual);
1692 }
1693 assert(EnableInvokeDynamic, "");
1694 oop singleKlass = wrong_method_type_is_for_single_argument(thread, required);
1695 char* message = NULL;
1696 if (singleKlass != NULL) {
1697 const char* objName = "argument or return value";
1698 if (actual != NULL) {
1699 // be flexible about the junk passed in:
1700 klassOop ak = (actual->is_klass()
1701 ? (klassOop)actual
1702 : actual->klass());
1703 objName = Klass::cast(ak)->external_name();
1704 }
1705 Klass* targetKlass = Klass::cast(required->is_klass()
1706 ? (klassOop)required
1707 : java_lang_Class::as_klassOop(required));
1708 message = generate_class_cast_message(objName, targetKlass->external_name());
1709 } else {
1710 // %%% need to get the MethodType string, without messing around too much
1711 const char* desc = NULL;
1712 // Get a signature from the invoke instruction
1713 const char* mhName = "method handle";
1714 const char* targetType = "the required signature";
1715 int targetArity = -1, mhArity = -1;
1716 vframeStream vfst(thread, true);
1717 if (!vfst.at_end()) {
1718 Bytecode_invoke call(vfst.method(), vfst.bci());
1719 methodHandle target;
1720 {
1721 EXCEPTION_MARK;
1722 target = call.static_target(THREAD);
1723 if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; }
1724 }
1725 if (target.not_null()
1726 && target->is_method_handle_invoke()
1727 && required == target->method_handle_type()) {
1728 targetType = target->signature()->as_C_string();
1729 targetArity = ArgumentCount(target->signature()).size();
1730 }
1731 }
1732 KlassHandle kignore; int dmf_flags = 0;
1733 methodHandle actual_method = MethodHandles::decode_method(actual, kignore, dmf_flags);
1734 if ((dmf_flags & ~(MethodHandles::_dmf_has_receiver |
1735 MethodHandles::_dmf_does_dispatch |
1736 MethodHandles::_dmf_from_interface)) != 0)
1737 actual_method = methodHandle(); // MH does extra binds, drops, etc.
1738 bool has_receiver = ((dmf_flags & MethodHandles::_dmf_has_receiver) != 0);
1739 if (actual_method.not_null()) {
1740 mhName = actual_method->signature()->as_C_string();
1741 mhArity = ArgumentCount(actual_method->signature()).size();
1742 if (!actual_method->is_static()) mhArity += 1;
1743 } else if (java_lang_invoke_MethodHandle::is_instance(actual)) {
1744 oopDesc* mhType = java_lang_invoke_MethodHandle::type(actual);
1745 mhArity = java_lang_invoke_MethodType::ptype_count(mhType);
1746 stringStream st;
1747 java_lang_invoke_MethodType::print_signature(mhType, &st);
1748 mhName = st.as_string();
1749 }
1750 if (targetArity != -1 && targetArity != mhArity) {
1751 if (has_receiver && targetArity == mhArity-1)
1752 desc = " cannot be called without a receiver argument as ";
1753 else
1754 desc = " cannot be called with a different arity as ";
1755 }
1756 message = generate_class_cast_message(mhName, targetType,
1757 desc != NULL ? desc :
1758 " cannot be called as ");
1759 }
1760 if (TraceMethodHandles) {
1761 tty->print_cr("WrongMethodType => message=%s", message);
1762 }
1763 return message;
1764 }
1766 oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr,
1767 oopDesc* required) {
1768 if (required == NULL) return NULL;
1769 if (required->klass() == SystemDictionary::Class_klass())
1770 return required;
1771 if (required->is_klass())
1772 return Klass::cast(klassOop(required))->java_mirror();
1773 return NULL;
1774 }
1777 char* SharedRuntime::generate_class_cast_message(
1778 const char* objName, const char* targetKlassName, const char* desc) {
1779 size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1;
1781 char* message = NEW_RESOURCE_ARRAY(char, msglen);
1782 if (NULL == message) {
1783 // Shouldn't happen, but don't cause even more problems if it does
1784 message = const_cast<char*>(objName);
1785 } else {
1786 jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
1787 }
1788 return message;
1789 }
1791 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1792 (void) JavaThread::current()->reguard_stack();
1793 JRT_END
1796 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
1797 #ifndef PRODUCT
1798 int SharedRuntime::_monitor_enter_ctr=0;
1799 #endif
1800 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
1801 oop obj(_obj);
1802 #ifndef PRODUCT
1803 _monitor_enter_ctr++; // monitor enter slow
1804 #endif
1805 if (PrintBiasedLockingStatistics) {
1806 Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
1807 }
1808 Handle h_obj(THREAD, obj);
1809 if (UseBiasedLocking) {
1810 // Retry fast entry if bias is revoked to avoid unnecessary inflation
1811 ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
1812 } else {
1813 ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
1814 }
1815 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
1816 JRT_END
1818 #ifndef PRODUCT
1819 int SharedRuntime::_monitor_exit_ctr=0;
1820 #endif
1821 // Handles the uncommon cases of monitor unlocking in compiled code
1822 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
1823 oop obj(_obj);
1824 #ifndef PRODUCT
1825 _monitor_exit_ctr++; // monitor exit slow
1826 #endif
1827 Thread* THREAD = JavaThread::current();
1828 // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
1829 // testing was unable to ever fire the assert that guarded it so I have removed it.
1830 assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
1831 #undef MIGHT_HAVE_PENDING
1832 #ifdef MIGHT_HAVE_PENDING
1833 // Save and restore any pending_exception around the exception mark.
1834 // While the slow_exit must not throw an exception, we could come into
1835 // this routine with one set.
1836 oop pending_excep = NULL;
1837 const char* pending_file;
1838 int pending_line;
1839 if (HAS_PENDING_EXCEPTION) {
1840 pending_excep = PENDING_EXCEPTION;
1841 pending_file = THREAD->exception_file();
1842 pending_line = THREAD->exception_line();
1843 CLEAR_PENDING_EXCEPTION;
1844 }
1845 #endif /* MIGHT_HAVE_PENDING */
1847 {
1848 // Exit must be non-blocking, and therefore no exceptions can be thrown.
1849 EXCEPTION_MARK;
1850 ObjectSynchronizer::slow_exit(obj, lock, THREAD);
1851 }
1853 #ifdef MIGHT_HAVE_PENDING
1854 if (pending_excep != NULL) {
1855 THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
1856 }
1857 #endif /* MIGHT_HAVE_PENDING */
1858 JRT_END
1860 #ifndef PRODUCT
1862 void SharedRuntime::print_statistics() {
1863 ttyLocker ttyl;
1864 if (xtty != NULL) xtty->head("statistics type='SharedRuntime'");
1866 if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr);
1867 if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr);
1868 if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
1870 SharedRuntime::print_ic_miss_histogram();
1872 if (CountRemovableExceptions) {
1873 if (_nof_removable_exceptions > 0) {
1874 Unimplemented(); // this counter is not yet incremented
1875 tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
1876 }
1877 }
1879 // Dump the JRT_ENTRY counters
1880 if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
1881 if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
1882 if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
1883 if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
1884 if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
1885 if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
1886 if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
1888 tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
1889 tty->print_cr("%5d wrong method", _wrong_method_ctr );
1890 tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
1891 tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
1892 tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
1894 if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
1895 if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
1896 if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
1897 if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
1898 if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
1899 if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
1900 if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
1901 if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
1902 if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
1903 if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
1904 if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
1905 if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
1906 if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
1907 if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
1908 if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
1909 if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
1911 AdapterHandlerLibrary::print_statistics();
1913 if (xtty != NULL) xtty->tail("statistics");
1914 }
1916 inline double percent(int x, int y) {
1917 return 100.0 * x / MAX2(y, 1);
1918 }
1920 class MethodArityHistogram {
1921 public:
1922 enum { MAX_ARITY = 256 };
1923 private:
1924 static int _arity_histogram[MAX_ARITY]; // histogram of #args
1925 static int _size_histogram[MAX_ARITY]; // histogram of arg size in words
1926 static int _max_arity; // max. arity seen
1927 static int _max_size; // max. arg size seen
1929 static void add_method_to_histogram(nmethod* nm) {
1930 methodOop m = nm->method();
1931 ArgumentCount args(m->signature());
1932 int arity = args.size() + (m->is_static() ? 0 : 1);
1933 int argsize = m->size_of_parameters();
1934 arity = MIN2(arity, MAX_ARITY-1);
1935 argsize = MIN2(argsize, MAX_ARITY-1);
1936 int count = nm->method()->compiled_invocation_count();
1937 _arity_histogram[arity] += count;
1938 _size_histogram[argsize] += count;
1939 _max_arity = MAX2(_max_arity, arity);
1940 _max_size = MAX2(_max_size, argsize);
1941 }
1943 void print_histogram_helper(int n, int* histo, const char* name) {
1944 const int N = MIN2(5, n);
1945 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1946 double sum = 0;
1947 double weighted_sum = 0;
1948 int i;
1949 for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
1950 double rest = sum;
1951 double percent = sum / 100;
1952 for (i = 0; i <= N; i++) {
1953 rest -= histo[i];
1954 tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
1955 }
1956 tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
1957 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
1958 }
1960 void print_histogram() {
1961 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1962 print_histogram_helper(_max_arity, _arity_histogram, "arity");
1963 tty->print_cr("\nSame for parameter size (in words):");
1964 print_histogram_helper(_max_size, _size_histogram, "size");
1965 tty->cr();
1966 }
1968 public:
1969 MethodArityHistogram() {
1970 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1971 _max_arity = _max_size = 0;
1972 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
1973 CodeCache::nmethods_do(add_method_to_histogram);
1974 print_histogram();
1975 }
1976 };
1978 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
1979 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
1980 int MethodArityHistogram::_max_arity;
1981 int MethodArityHistogram::_max_size;
1983 void SharedRuntime::print_call_statistics(int comp_total) {
1984 tty->print_cr("Calls from compiled code:");
1985 int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
1986 int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
1987 int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
1988 tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total));
1989 tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
1990 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
1991 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
1992 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
1993 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
1994 tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
1995 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
1996 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
1997 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
1998 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
1999 tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
2000 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
2001 tty->cr();
2002 tty->print_cr("Note 1: counter updates are not MT-safe.");
2003 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
2004 tty->print_cr(" %% in nested categories are relative to their category");
2005 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
2006 tty->cr();
2008 MethodArityHistogram h;
2009 }
2010 #endif
2013 // A simple wrapper class around the calling convention information
2014 // that allows sharing of adapters for the same calling convention.
2015 class AdapterFingerPrint : public CHeapObj {
2016 private:
2017 union {
2018 int _compact[3];
2019 int* _fingerprint;
2020 } _value;
2021 int _length; // A negative length indicates the fingerprint is in the compact form,
2022 // Otherwise _value._fingerprint is the array.
2024 // Remap BasicTypes that are handled equivalently by the adapters.
2025 // These are correct for the current system but someday it might be
2026 // necessary to make this mapping platform dependent.
2027 static BasicType adapter_encoding(BasicType in) {
2028 assert((~0xf & in) == 0, "must fit in 4 bits");
2029 switch(in) {
2030 case T_BOOLEAN:
2031 case T_BYTE:
2032 case T_SHORT:
2033 case T_CHAR:
2034 // There are all promoted to T_INT in the calling convention
2035 return T_INT;
2037 case T_OBJECT:
2038 case T_ARRAY:
2039 #ifdef _LP64
2040 return T_LONG;
2041 #else
2042 return T_INT;
2043 #endif
2045 case T_INT:
2046 case T_LONG:
2047 case T_FLOAT:
2048 case T_DOUBLE:
2049 case T_VOID:
2050 return in;
2052 default:
2053 ShouldNotReachHere();
2054 return T_CONFLICT;
2055 }
2056 }
2058 public:
2059 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2060 // The fingerprint is based on the BasicType signature encoded
2061 // into an array of ints with four entries per int.
2062 int* ptr;
2063 int len = (total_args_passed + 3) >> 2;
2064 if (len <= (int)(sizeof(_value._compact) / sizeof(int))) {
2065 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2066 // Storing the signature encoded as signed chars hits about 98%
2067 // of the time.
2068 _length = -len;
2069 ptr = _value._compact;
2070 } else {
2071 _length = len;
2072 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length);
2073 ptr = _value._fingerprint;
2074 }
2076 // Now pack the BasicTypes with 4 per int
2077 int sig_index = 0;
2078 for (int index = 0; index < len; index++) {
2079 int value = 0;
2080 for (int byte = 0; byte < 4; byte++) {
2081 if (sig_index < total_args_passed) {
2082 value = (value << 4) | adapter_encoding(sig_bt[sig_index++]);
2083 }
2084 }
2085 ptr[index] = value;
2086 }
2087 }
2089 ~AdapterFingerPrint() {
2090 if (_length > 0) {
2091 FREE_C_HEAP_ARRAY(int, _value._fingerprint);
2092 }
2093 }
2095 int value(int index) {
2096 if (_length < 0) {
2097 return _value._compact[index];
2098 }
2099 return _value._fingerprint[index];
2100 }
2101 int length() {
2102 if (_length < 0) return -_length;
2103 return _length;
2104 }
2106 bool is_compact() {
2107 return _length <= 0;
2108 }
2110 unsigned int compute_hash() {
2111 int hash = 0;
2112 for (int i = 0; i < length(); i++) {
2113 int v = value(i);
2114 hash = (hash << 8) ^ v ^ (hash >> 5);
2115 }
2116 return (unsigned int)hash;
2117 }
2119 const char* as_string() {
2120 stringStream st;
2121 for (int i = 0; i < length(); i++) {
2122 st.print(PTR_FORMAT, value(i));
2123 }
2124 return st.as_string();
2125 }
2127 bool equals(AdapterFingerPrint* other) {
2128 if (other->_length != _length) {
2129 return false;
2130 }
2131 if (_length < 0) {
2132 return _value._compact[0] == other->_value._compact[0] &&
2133 _value._compact[1] == other->_value._compact[1] &&
2134 _value._compact[2] == other->_value._compact[2];
2135 } else {
2136 for (int i = 0; i < _length; i++) {
2137 if (_value._fingerprint[i] != other->_value._fingerprint[i]) {
2138 return false;
2139 }
2140 }
2141 }
2142 return true;
2143 }
2144 };
2147 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2148 class AdapterHandlerTable : public BasicHashtable {
2149 friend class AdapterHandlerTableIterator;
2151 private:
2153 #ifndef PRODUCT
2154 static int _lookups; // number of calls to lookup
2155 static int _buckets; // number of buckets checked
2156 static int _equals; // number of buckets checked with matching hash
2157 static int _hits; // number of successful lookups
2158 static int _compact; // number of equals calls with compact signature
2159 #endif
2161 AdapterHandlerEntry* bucket(int i) {
2162 return (AdapterHandlerEntry*)BasicHashtable::bucket(i);
2163 }
2165 public:
2166 AdapterHandlerTable()
2167 : BasicHashtable(293, sizeof(AdapterHandlerEntry)) { }
2169 // Create a new entry suitable for insertion in the table
2170 AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
2171 AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable::new_entry(fingerprint->compute_hash());
2172 entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2173 return entry;
2174 }
2176 // Insert an entry into the table
2177 void add(AdapterHandlerEntry* entry) {
2178 int index = hash_to_index(entry->hash());
2179 add_entry(index, entry);
2180 }
2182 void free_entry(AdapterHandlerEntry* entry) {
2183 entry->deallocate();
2184 BasicHashtable::free_entry(entry);
2185 }
2187 // Find a entry with the same fingerprint if it exists
2188 AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2189 NOT_PRODUCT(_lookups++);
2190 AdapterFingerPrint fp(total_args_passed, sig_bt);
2191 unsigned int hash = fp.compute_hash();
2192 int index = hash_to_index(hash);
2193 for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2194 NOT_PRODUCT(_buckets++);
2195 if (e->hash() == hash) {
2196 NOT_PRODUCT(_equals++);
2197 if (fp.equals(e->fingerprint())) {
2198 #ifndef PRODUCT
2199 if (fp.is_compact()) _compact++;
2200 _hits++;
2201 #endif
2202 return e;
2203 }
2204 }
2205 }
2206 return NULL;
2207 }
2209 #ifndef PRODUCT
2210 void print_statistics() {
2211 ResourceMark rm;
2212 int longest = 0;
2213 int empty = 0;
2214 int total = 0;
2215 int nonempty = 0;
2216 for (int index = 0; index < table_size(); index++) {
2217 int count = 0;
2218 for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2219 count++;
2220 }
2221 if (count != 0) nonempty++;
2222 if (count == 0) empty++;
2223 if (count > longest) longest = count;
2224 total += count;
2225 }
2226 tty->print_cr("AdapterHandlerTable: empty %d longest %d total %d average %f",
2227 empty, longest, total, total / (double)nonempty);
2228 tty->print_cr("AdapterHandlerTable: lookups %d buckets %d equals %d hits %d compact %d",
2229 _lookups, _buckets, _equals, _hits, _compact);
2230 }
2231 #endif
2232 };
2235 #ifndef PRODUCT
2237 int AdapterHandlerTable::_lookups;
2238 int AdapterHandlerTable::_buckets;
2239 int AdapterHandlerTable::_equals;
2240 int AdapterHandlerTable::_hits;
2241 int AdapterHandlerTable::_compact;
2243 #endif
2245 class AdapterHandlerTableIterator : public StackObj {
2246 private:
2247 AdapterHandlerTable* _table;
2248 int _index;
2249 AdapterHandlerEntry* _current;
2251 void scan() {
2252 while (_index < _table->table_size()) {
2253 AdapterHandlerEntry* a = _table->bucket(_index);
2254 _index++;
2255 if (a != NULL) {
2256 _current = a;
2257 return;
2258 }
2259 }
2260 }
2262 public:
2263 AdapterHandlerTableIterator(AdapterHandlerTable* table): _table(table), _index(0), _current(NULL) {
2264 scan();
2265 }
2266 bool has_next() {
2267 return _current != NULL;
2268 }
2269 AdapterHandlerEntry* next() {
2270 if (_current != NULL) {
2271 AdapterHandlerEntry* result = _current;
2272 _current = _current->next();
2273 if (_current == NULL) scan();
2274 return result;
2275 } else {
2276 return NULL;
2277 }
2278 }
2279 };
2282 // ---------------------------------------------------------------------------
2283 // Implementation of AdapterHandlerLibrary
2284 AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL;
2285 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
2286 const int AdapterHandlerLibrary_size = 16*K;
2287 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
2289 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2290 // Should be called only when AdapterHandlerLibrary_lock is active.
2291 if (_buffer == NULL) // Initialize lazily
2292 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2293 return _buffer;
2294 }
2296 void AdapterHandlerLibrary::initialize() {
2297 if (_adapters != NULL) return;
2298 _adapters = new AdapterHandlerTable();
2300 // Create a special handler for abstract methods. Abstract methods
2301 // are never compiled so an i2c entry is somewhat meaningless, but
2302 // fill it in with something appropriate just in case. Pass handle
2303 // wrong method for the c2i transitions.
2304 address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
2305 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
2306 StubRoutines::throw_AbstractMethodError_entry(),
2307 wrong_method, wrong_method);
2308 }
2310 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2311 address i2c_entry,
2312 address c2i_entry,
2313 address c2i_unverified_entry) {
2314 return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2315 }
2317 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
2318 // Use customized signature handler. Need to lock around updates to
2319 // the AdapterHandlerTable (it is not safe for concurrent readers
2320 // and a single writer: this could be fixed if it becomes a
2321 // problem).
2323 // Get the address of the ic_miss handlers before we grab the
2324 // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
2325 // was caused by the initialization of the stubs happening
2326 // while we held the lock and then notifying jvmti while
2327 // holding it. This just forces the initialization to be a little
2328 // earlier.
2329 address ic_miss = SharedRuntime::get_ic_miss_stub();
2330 assert(ic_miss != NULL, "must have handler");
2332 ResourceMark rm;
2334 NOT_PRODUCT(int insts_size);
2335 AdapterBlob* B = NULL;
2336 AdapterHandlerEntry* entry = NULL;
2337 AdapterFingerPrint* fingerprint = NULL;
2338 {
2339 MutexLocker mu(AdapterHandlerLibrary_lock);
2340 // make sure data structure is initialized
2341 initialize();
2343 if (method->is_abstract()) {
2344 return _abstract_method_handler;
2345 }
2347 // Fill in the signature array, for the calling-convention call.
2348 int total_args_passed = method->size_of_parameters(); // All args on stack
2350 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2351 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2352 int i = 0;
2353 if (!method->is_static()) // Pass in receiver first
2354 sig_bt[i++] = T_OBJECT;
2355 for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
2356 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
2357 if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
2358 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2359 }
2360 assert(i == total_args_passed, "");
2362 // Lookup method signature's fingerprint
2363 entry = _adapters->lookup(total_args_passed, sig_bt);
2365 #ifdef ASSERT
2366 AdapterHandlerEntry* shared_entry = NULL;
2367 if (VerifyAdapterSharing && entry != NULL) {
2368 shared_entry = entry;
2369 entry = NULL;
2370 }
2371 #endif
2373 if (entry != NULL) {
2374 return entry;
2375 }
2377 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2378 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
2380 // Make a C heap allocated version of the fingerprint to store in the adapter
2381 fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2383 // Create I2C & C2I handlers
2385 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2386 if (buf != NULL) {
2387 CodeBuffer buffer(buf);
2388 short buffer_locs[20];
2389 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2390 sizeof(buffer_locs)/sizeof(relocInfo));
2391 MacroAssembler _masm(&buffer);
2393 entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2394 total_args_passed,
2395 comp_args_on_stack,
2396 sig_bt,
2397 regs,
2398 fingerprint);
2400 #ifdef ASSERT
2401 if (VerifyAdapterSharing) {
2402 if (shared_entry != NULL) {
2403 assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt),
2404 "code must match");
2405 // Release the one just created and return the original
2406 _adapters->free_entry(entry);
2407 return shared_entry;
2408 } else {
2409 entry->save_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt);
2410 }
2411 }
2412 #endif
2414 B = AdapterBlob::create(&buffer);
2415 NOT_PRODUCT(insts_size = buffer.insts_size());
2416 }
2417 if (B == NULL) {
2418 // CodeCache is full, disable compilation
2419 // Ought to log this but compile log is only per compile thread
2420 // and we're some non descript Java thread.
2421 MutexUnlocker mu(AdapterHandlerLibrary_lock);
2422 CompileBroker::handle_full_code_cache();
2423 return NULL; // Out of CodeCache space
2424 }
2425 entry->relocate(B->content_begin());
2426 #ifndef PRODUCT
2427 // debugging suppport
2428 if (PrintAdapterHandlers) {
2429 tty->cr();
2430 tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)",
2431 _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
2432 method->signature()->as_C_string(), fingerprint->as_string(), insts_size );
2433 tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
2434 Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + insts_size);
2435 }
2436 #endif
2438 _adapters->add(entry);
2439 }
2440 // Outside of the lock
2441 if (B != NULL) {
2442 char blob_id[256];
2443 jio_snprintf(blob_id,
2444 sizeof(blob_id),
2445 "%s(%s)@" PTR_FORMAT,
2446 B->name(),
2447 fingerprint->as_string(),
2448 B->content_begin());
2449 Forte::register_stub(blob_id, B->content_begin(), B->content_end());
2451 if (JvmtiExport::should_post_dynamic_code_generated()) {
2452 JvmtiExport::post_dynamic_code_generated(blob_id, B->content_begin(), B->content_end());
2453 }
2454 }
2455 return entry;
2456 }
2458 void AdapterHandlerEntry::relocate(address new_base) {
2459 ptrdiff_t delta = new_base - _i2c_entry;
2460 _i2c_entry += delta;
2461 _c2i_entry += delta;
2462 _c2i_unverified_entry += delta;
2463 }
2466 void AdapterHandlerEntry::deallocate() {
2467 delete _fingerprint;
2468 #ifdef ASSERT
2469 if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
2470 if (_saved_sig) FREE_C_HEAP_ARRAY(Basictype, _saved_sig);
2471 #endif
2472 }
2475 #ifdef ASSERT
2476 // Capture the code before relocation so that it can be compared
2477 // against other versions. If the code is captured after relocation
2478 // then relative instructions won't be equivalent.
2479 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
2480 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length);
2481 _code_length = length;
2482 memcpy(_saved_code, buffer, length);
2483 _total_args_passed = total_args_passed;
2484 _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed);
2485 memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType));
2486 }
2489 bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
2490 if (length != _code_length) {
2491 return false;
2492 }
2493 for (int i = 0; i < length; i++) {
2494 if (buffer[i] != _saved_code[i]) {
2495 return false;
2496 }
2497 }
2498 return true;
2499 }
2500 #endif
2503 // Create a native wrapper for this native method. The wrapper converts the
2504 // java compiled calling convention to the native convention, handlizes
2505 // arguments, and transitions to native. On return from the native we transition
2506 // back to java blocking if a safepoint is in progress.
2507 nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int compile_id) {
2508 ResourceMark rm;
2509 nmethod* nm = NULL;
2511 assert(method->has_native_function(), "must have something valid to call!");
2513 {
2514 // perform the work while holding the lock, but perform any printing outside the lock
2515 MutexLocker mu(AdapterHandlerLibrary_lock);
2516 // See if somebody beat us to it
2517 nm = method->code();
2518 if (nm) {
2519 return nm;
2520 }
2522 ResourceMark rm;
2524 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2525 if (buf != NULL) {
2526 CodeBuffer buffer(buf);
2527 double locs_buf[20];
2528 buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2529 MacroAssembler _masm(&buffer);
2531 // Fill in the signature array, for the calling-convention call.
2532 int total_args_passed = method->size_of_parameters();
2534 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
2535 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair,total_args_passed);
2536 int i=0;
2537 if( !method->is_static() ) // Pass in receiver first
2538 sig_bt[i++] = T_OBJECT;
2539 SignatureStream ss(method->signature());
2540 for( ; !ss.at_return_type(); ss.next()) {
2541 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
2542 if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
2543 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2544 }
2545 assert( i==total_args_passed, "" );
2546 BasicType ret_type = ss.type();
2548 // Now get the compiled-Java layout as input arguments
2549 int comp_args_on_stack;
2550 comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
2552 // Generate the compiled-to-native wrapper code
2553 nm = SharedRuntime::generate_native_wrapper(&_masm,
2554 method,
2555 compile_id,
2556 total_args_passed,
2557 comp_args_on_stack,
2558 sig_bt,regs,
2559 ret_type);
2560 }
2561 }
2563 // Must unlock before calling set_code
2565 // Install the generated code.
2566 if (nm != NULL) {
2567 if (PrintCompilation) {
2568 ttyLocker ttyl;
2569 CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : "");
2570 }
2571 method->set_code(method, nm);
2572 nm->post_compiled_method_load_event();
2573 } else {
2574 // CodeCache is full, disable compilation
2575 CompileBroker::handle_full_code_cache();
2576 }
2577 return nm;
2578 }
2580 #ifdef HAVE_DTRACE_H
2581 // Create a dtrace nmethod for this method. The wrapper converts the
2582 // java compiled calling convention to the native convention, makes a dummy call
2583 // (actually nops for the size of the call instruction, which become a trap if
2584 // probe is enabled). The returns to the caller. Since this all looks like a
2585 // leaf no thread transition is needed.
2587 nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) {
2588 ResourceMark rm;
2589 nmethod* nm = NULL;
2591 if (PrintCompilation) {
2592 ttyLocker ttyl;
2593 tty->print("--- n%s ");
2594 method->print_short_name(tty);
2595 if (method->is_static()) {
2596 tty->print(" (static)");
2597 }
2598 tty->cr();
2599 }
2601 {
2602 // perform the work while holding the lock, but perform any printing
2603 // outside the lock
2604 MutexLocker mu(AdapterHandlerLibrary_lock);
2605 // See if somebody beat us to it
2606 nm = method->code();
2607 if (nm) {
2608 return nm;
2609 }
2611 ResourceMark rm;
2613 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2614 if (buf != NULL) {
2615 CodeBuffer buffer(buf);
2616 // Need a few relocation entries
2617 double locs_buf[20];
2618 buffer.insts()->initialize_shared_locs(
2619 (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2620 MacroAssembler _masm(&buffer);
2622 // Generate the compiled-to-native wrapper code
2623 nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method);
2624 }
2625 }
2626 return nm;
2627 }
2629 // the dtrace method needs to convert java lang string to utf8 string.
2630 void SharedRuntime::get_utf(oopDesc* src, address dst) {
2631 typeArrayOop jlsValue = java_lang_String::value(src);
2632 int jlsOffset = java_lang_String::offset(src);
2633 int jlsLen = java_lang_String::length(src);
2634 jchar* jlsPos = (jlsLen == 0) ? NULL :
2635 jlsValue->char_at_addr(jlsOffset);
2636 assert(typeArrayKlass::cast(jlsValue->klass())->element_type() == T_CHAR, "compressed string");
2637 (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size);
2638 }
2639 #endif // ndef HAVE_DTRACE_H
2641 // -------------------------------------------------------------------------
2642 // Java-Java calling convention
2643 // (what you use when Java calls Java)
2645 //------------------------------name_for_receiver----------------------------------
2646 // For a given signature, return the VMReg for parameter 0.
2647 VMReg SharedRuntime::name_for_receiver() {
2648 VMRegPair regs;
2649 BasicType sig_bt = T_OBJECT;
2650 (void) java_calling_convention(&sig_bt, ®s, 1, true);
2651 // Return argument 0 register. In the LP64 build pointers
2652 // take 2 registers, but the VM wants only the 'main' name.
2653 return regs.first();
2654 }
2656 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, int* arg_size) {
2657 // This method is returning a data structure allocating as a
2658 // ResourceObject, so do not put any ResourceMarks in here.
2659 char *s = sig->as_C_string();
2660 int len = (int)strlen(s);
2661 *s++; len--; // Skip opening paren
2662 char *t = s+len;
2663 while( *(--t) != ')' ) ; // Find close paren
2665 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
2666 VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
2667 int cnt = 0;
2668 if (has_receiver) {
2669 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
2670 }
2672 while( s < t ) {
2673 switch( *s++ ) { // Switch on signature character
2674 case 'B': sig_bt[cnt++] = T_BYTE; break;
2675 case 'C': sig_bt[cnt++] = T_CHAR; break;
2676 case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break;
2677 case 'F': sig_bt[cnt++] = T_FLOAT; break;
2678 case 'I': sig_bt[cnt++] = T_INT; break;
2679 case 'J': sig_bt[cnt++] = T_LONG; sig_bt[cnt++] = T_VOID; break;
2680 case 'S': sig_bt[cnt++] = T_SHORT; break;
2681 case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
2682 case 'V': sig_bt[cnt++] = T_VOID; break;
2683 case 'L': // Oop
2684 while( *s++ != ';' ) ; // Skip signature
2685 sig_bt[cnt++] = T_OBJECT;
2686 break;
2687 case '[': { // Array
2688 do { // Skip optional size
2689 while( *s >= '0' && *s <= '9' ) s++;
2690 } while( *s++ == '[' ); // Nested arrays?
2691 // Skip element type
2692 if( s[-1] == 'L' )
2693 while( *s++ != ';' ) ; // Skip signature
2694 sig_bt[cnt++] = T_ARRAY;
2695 break;
2696 }
2697 default : ShouldNotReachHere();
2698 }
2699 }
2700 assert( cnt < 256, "grow table size" );
2702 int comp_args_on_stack;
2703 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
2705 // the calling convention doesn't count out_preserve_stack_slots so
2706 // we must add that in to get "true" stack offsets.
2708 if (comp_args_on_stack) {
2709 for (int i = 0; i < cnt; i++) {
2710 VMReg reg1 = regs[i].first();
2711 if( reg1->is_stack()) {
2712 // Yuck
2713 reg1 = reg1->bias(out_preserve_stack_slots());
2714 }
2715 VMReg reg2 = regs[i].second();
2716 if( reg2->is_stack()) {
2717 // Yuck
2718 reg2 = reg2->bias(out_preserve_stack_slots());
2719 }
2720 regs[i].set_pair(reg2, reg1);
2721 }
2722 }
2724 // results
2725 *arg_size = cnt;
2726 return regs;
2727 }
2729 // OSR Migration Code
2730 //
2731 // This code is used convert interpreter frames into compiled frames. It is
2732 // called from very start of a compiled OSR nmethod. A temp array is
2733 // allocated to hold the interesting bits of the interpreter frame. All
2734 // active locks are inflated to allow them to move. The displaced headers and
2735 // active interpeter locals are copied into the temp buffer. Then we return
2736 // back to the compiled code. The compiled code then pops the current
2737 // interpreter frame off the stack and pushes a new compiled frame. Then it
2738 // copies the interpreter locals and displaced headers where it wants.
2739 // Finally it calls back to free the temp buffer.
2740 //
2741 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
2743 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
2745 #ifdef IA64
2746 ShouldNotReachHere(); // NYI
2747 #endif /* IA64 */
2749 //
2750 // This code is dependent on the memory layout of the interpreter local
2751 // array and the monitors. On all of our platforms the layout is identical
2752 // so this code is shared. If some platform lays the their arrays out
2753 // differently then this code could move to platform specific code or
2754 // the code here could be modified to copy items one at a time using
2755 // frame accessor methods and be platform independent.
2757 frame fr = thread->last_frame();
2758 assert( fr.is_interpreted_frame(), "" );
2759 assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
2761 // Figure out how many monitors are active.
2762 int active_monitor_count = 0;
2763 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
2764 kptr < fr.interpreter_frame_monitor_begin();
2765 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
2766 if( kptr->obj() != NULL ) active_monitor_count++;
2767 }
2769 // QQQ we could place number of active monitors in the array so that compiled code
2770 // could double check it.
2772 methodOop moop = fr.interpreter_frame_method();
2773 int max_locals = moop->max_locals();
2774 // Allocate temp buffer, 1 word per local & 2 per active monitor
2775 int buf_size_words = max_locals + active_monitor_count*2;
2776 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words);
2778 // Copy the locals. Order is preserved so that loading of longs works.
2779 // Since there's no GC I can copy the oops blindly.
2780 assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
2781 Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
2782 (HeapWord*)&buf[0],
2783 max_locals);
2785 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
2786 int i = max_locals;
2787 for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
2788 kptr2 < fr.interpreter_frame_monitor_begin();
2789 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
2790 if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
2791 BasicLock *lock = kptr2->lock();
2792 // Inflate so the displaced header becomes position-independent
2793 if (lock->displaced_header()->is_unlocked())
2794 ObjectSynchronizer::inflate_helper(kptr2->obj());
2795 // Now the displaced header is free to move
2796 buf[i++] = (intptr_t)lock->displaced_header();
2797 buf[i++] = (intptr_t)kptr2->obj();
2798 }
2799 }
2800 assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
2802 return buf;
2803 JRT_END
2805 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
2806 FREE_C_HEAP_ARRAY(intptr_t,buf);
2807 JRT_END
2809 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
2810 AdapterHandlerTableIterator iter(_adapters);
2811 while (iter.has_next()) {
2812 AdapterHandlerEntry* a = iter.next();
2813 if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
2814 }
2815 return false;
2816 }
2818 void AdapterHandlerLibrary::print_handler_on(outputStream* st, CodeBlob* b) {
2819 AdapterHandlerTableIterator iter(_adapters);
2820 while (iter.has_next()) {
2821 AdapterHandlerEntry* a = iter.next();
2822 if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) {
2823 st->print("Adapter for signature: ");
2824 st->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
2825 a->fingerprint()->as_string(),
2826 a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
2828 return;
2829 }
2830 }
2831 assert(false, "Should have found handler");
2832 }
2834 #ifndef PRODUCT
2836 void AdapterHandlerLibrary::print_statistics() {
2837 _adapters->print_statistics();
2838 }
2840 #endif /* PRODUCT */