Mon, 17 Sep 2012 07:36:31 -0400
7194254: jstack reports wrong thread priorities
Reviewed-by: dholmes, sla, fparain
Contributed-by: Dmytro Sheyko <dmytro_sheyko@hotmail.com>
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "code/compiledIC.hpp"
29 #include "code/scopeDesc.hpp"
30 #include "code/vtableStubs.hpp"
31 #include "compiler/abstractCompiler.hpp"
32 #include "compiler/compileBroker.hpp"
33 #include "compiler/compilerOracle.hpp"
34 #include "interpreter/interpreter.hpp"
35 #include "interpreter/interpreterRuntime.hpp"
36 #include "memory/gcLocker.inline.hpp"
37 #include "memory/universe.inline.hpp"
38 #include "oops/oop.inline.hpp"
39 #include "prims/forte.hpp"
40 #include "prims/jvmtiExport.hpp"
41 #include "prims/jvmtiRedefineClassesTrace.hpp"
42 #include "prims/methodHandles.hpp"
43 #include "prims/nativeLookup.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/biasedLocking.hpp"
46 #include "runtime/handles.inline.hpp"
47 #include "runtime/init.hpp"
48 #include "runtime/interfaceSupport.hpp"
49 #include "runtime/javaCalls.hpp"
50 #include "runtime/sharedRuntime.hpp"
51 #include "runtime/stubRoutines.hpp"
52 #include "runtime/vframe.hpp"
53 #include "runtime/vframeArray.hpp"
54 #include "utilities/copy.hpp"
55 #include "utilities/dtrace.hpp"
56 #include "utilities/events.hpp"
57 #include "utilities/hashtable.inline.hpp"
58 #include "utilities/xmlstream.hpp"
59 #ifdef TARGET_ARCH_x86
60 # include "nativeInst_x86.hpp"
61 # include "vmreg_x86.inline.hpp"
62 #endif
63 #ifdef TARGET_ARCH_sparc
64 # include "nativeInst_sparc.hpp"
65 # include "vmreg_sparc.inline.hpp"
66 #endif
67 #ifdef TARGET_ARCH_zero
68 # include "nativeInst_zero.hpp"
69 # include "vmreg_zero.inline.hpp"
70 #endif
71 #ifdef TARGET_ARCH_arm
72 # include "nativeInst_arm.hpp"
73 # include "vmreg_arm.inline.hpp"
74 #endif
75 #ifdef TARGET_ARCH_ppc
76 # include "nativeInst_ppc.hpp"
77 # include "vmreg_ppc.inline.hpp"
78 #endif
79 #ifdef COMPILER1
80 #include "c1/c1_Runtime1.hpp"
81 #endif
83 // Shared stub locations
84 RuntimeStub* SharedRuntime::_wrong_method_blob;
85 RuntimeStub* SharedRuntime::_ic_miss_blob;
86 RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob;
87 RuntimeStub* SharedRuntime::_resolve_virtual_call_blob;
88 RuntimeStub* SharedRuntime::_resolve_static_call_blob;
90 DeoptimizationBlob* SharedRuntime::_deopt_blob;
91 SafepointBlob* SharedRuntime::_polling_page_safepoint_handler_blob;
92 SafepointBlob* SharedRuntime::_polling_page_return_handler_blob;
94 #ifdef COMPILER2
95 UncommonTrapBlob* SharedRuntime::_uncommon_trap_blob;
96 #endif // COMPILER2
99 //----------------------------generate_stubs-----------------------------------
100 void SharedRuntime::generate_stubs() {
101 _wrong_method_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method), "wrong_method_stub");
102 _ic_miss_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::handle_wrong_method_ic_miss), "ic_miss_stub");
103 _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call");
104 _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call");
105 _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call");
107 _polling_page_safepoint_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), false);
108 _polling_page_return_handler_blob = generate_handler_blob(CAST_FROM_FN_PTR(address, SafepointSynchronize::handle_polling_page_exception), true);
110 generate_deopt_blob();
112 #ifdef COMPILER2
113 generate_uncommon_trap_blob();
114 #endif // COMPILER2
115 }
117 #include <math.h>
119 #ifndef USDT2
120 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
121 HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
122 char*, int, char*, int, char*, int);
123 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
124 char*, int, char*, int, char*, int);
125 #endif /* !USDT2 */
127 // Implementation of SharedRuntime
129 #ifndef PRODUCT
130 // For statistics
131 int SharedRuntime::_ic_miss_ctr = 0;
132 int SharedRuntime::_wrong_method_ctr = 0;
133 int SharedRuntime::_resolve_static_ctr = 0;
134 int SharedRuntime::_resolve_virtual_ctr = 0;
135 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
136 int SharedRuntime::_implicit_null_throws = 0;
137 int SharedRuntime::_implicit_div0_throws = 0;
138 int SharedRuntime::_throw_null_ctr = 0;
140 int SharedRuntime::_nof_normal_calls = 0;
141 int SharedRuntime::_nof_optimized_calls = 0;
142 int SharedRuntime::_nof_inlined_calls = 0;
143 int SharedRuntime::_nof_megamorphic_calls = 0;
144 int SharedRuntime::_nof_static_calls = 0;
145 int SharedRuntime::_nof_inlined_static_calls = 0;
146 int SharedRuntime::_nof_interface_calls = 0;
147 int SharedRuntime::_nof_optimized_interface_calls = 0;
148 int SharedRuntime::_nof_inlined_interface_calls = 0;
149 int SharedRuntime::_nof_megamorphic_interface_calls = 0;
150 int SharedRuntime::_nof_removable_exceptions = 0;
152 int SharedRuntime::_new_instance_ctr=0;
153 int SharedRuntime::_new_array_ctr=0;
154 int SharedRuntime::_multi1_ctr=0;
155 int SharedRuntime::_multi2_ctr=0;
156 int SharedRuntime::_multi3_ctr=0;
157 int SharedRuntime::_multi4_ctr=0;
158 int SharedRuntime::_multi5_ctr=0;
159 int SharedRuntime::_mon_enter_stub_ctr=0;
160 int SharedRuntime::_mon_exit_stub_ctr=0;
161 int SharedRuntime::_mon_enter_ctr=0;
162 int SharedRuntime::_mon_exit_ctr=0;
163 int SharedRuntime::_partial_subtype_ctr=0;
164 int SharedRuntime::_jbyte_array_copy_ctr=0;
165 int SharedRuntime::_jshort_array_copy_ctr=0;
166 int SharedRuntime::_jint_array_copy_ctr=0;
167 int SharedRuntime::_jlong_array_copy_ctr=0;
168 int SharedRuntime::_oop_array_copy_ctr=0;
169 int SharedRuntime::_checkcast_array_copy_ctr=0;
170 int SharedRuntime::_unsafe_array_copy_ctr=0;
171 int SharedRuntime::_generic_array_copy_ctr=0;
172 int SharedRuntime::_slow_array_copy_ctr=0;
173 int SharedRuntime::_find_handler_ctr=0;
174 int SharedRuntime::_rethrow_ctr=0;
176 int SharedRuntime::_ICmiss_index = 0;
177 int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
178 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
181 void SharedRuntime::trace_ic_miss(address at) {
182 for (int i = 0; i < _ICmiss_index; i++) {
183 if (_ICmiss_at[i] == at) {
184 _ICmiss_count[i]++;
185 return;
186 }
187 }
188 int index = _ICmiss_index++;
189 if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
190 _ICmiss_at[index] = at;
191 _ICmiss_count[index] = 1;
192 }
194 void SharedRuntime::print_ic_miss_histogram() {
195 if (ICMissHistogram) {
196 tty->print_cr ("IC Miss Histogram:");
197 int tot_misses = 0;
198 for (int i = 0; i < _ICmiss_index; i++) {
199 tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
200 tot_misses += _ICmiss_count[i];
201 }
202 tty->print_cr ("Total IC misses: %7d", tot_misses);
203 }
204 }
205 #endif // PRODUCT
207 #ifndef SERIALGC
209 // G1 write-barrier pre: executed before a pointer store.
210 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
211 if (orig == NULL) {
212 assert(false, "should be optimized out");
213 return;
214 }
215 assert(orig->is_oop(true /* ignore mark word */), "Error");
216 // store the original value that was in the field reference
217 thread->satb_mark_queue().enqueue(orig);
218 JRT_END
220 // G1 write-barrier post: executed after a pointer store.
221 JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread))
222 thread->dirty_card_queue().enqueue(card_addr);
223 JRT_END
225 #endif // !SERIALGC
228 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
229 return x * y;
230 JRT_END
233 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
234 if (x == min_jlong && y == CONST64(-1)) {
235 return x;
236 } else {
237 return x / y;
238 }
239 JRT_END
242 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
243 if (x == min_jlong && y == CONST64(-1)) {
244 return 0;
245 } else {
246 return x % y;
247 }
248 JRT_END
251 const juint float_sign_mask = 0x7FFFFFFF;
252 const juint float_infinity = 0x7F800000;
253 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
254 const julong double_infinity = CONST64(0x7FF0000000000000);
256 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y))
257 #ifdef _WIN64
258 // 64-bit Windows on amd64 returns the wrong values for
259 // infinity operands.
260 union { jfloat f; juint i; } xbits, ybits;
261 xbits.f = x;
262 ybits.f = y;
263 // x Mod Infinity == x unless x is infinity
264 if ( ((xbits.i & float_sign_mask) != float_infinity) &&
265 ((ybits.i & float_sign_mask) == float_infinity) ) {
266 return x;
267 }
268 #endif
269 return ((jfloat)fmod((double)x,(double)y));
270 JRT_END
273 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
274 #ifdef _WIN64
275 union { jdouble d; julong l; } xbits, ybits;
276 xbits.d = x;
277 ybits.d = y;
278 // x Mod Infinity == x unless x is infinity
279 if ( ((xbits.l & double_sign_mask) != double_infinity) &&
280 ((ybits.l & double_sign_mask) == double_infinity) ) {
281 return x;
282 }
283 #endif
284 return ((jdouble)fmod((double)x,(double)y));
285 JRT_END
287 #ifdef __SOFTFP__
288 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
289 return x + y;
290 JRT_END
292 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
293 return x - y;
294 JRT_END
296 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
297 return x * y;
298 JRT_END
300 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
301 return x / y;
302 JRT_END
304 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
305 return x + y;
306 JRT_END
308 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
309 return x - y;
310 JRT_END
312 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
313 return x * y;
314 JRT_END
316 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
317 return x / y;
318 JRT_END
320 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
321 return (jfloat)x;
322 JRT_END
324 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
325 return (jdouble)x;
326 JRT_END
328 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
329 return (jdouble)x;
330 JRT_END
332 JRT_LEAF(int, SharedRuntime::fcmpl(float x, float y))
333 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan*/
334 JRT_END
336 JRT_LEAF(int, SharedRuntime::fcmpg(float x, float y))
337 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
338 JRT_END
340 JRT_LEAF(int, SharedRuntime::dcmpl(double x, double y))
341 return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
342 JRT_END
344 JRT_LEAF(int, SharedRuntime::dcmpg(double x, double y))
345 return x<y ? -1 : (x==y ? 0 : 1); /* x>y or is_nan */
346 JRT_END
348 // Functions to return the opposite of the aeabi functions for nan.
349 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
350 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
351 JRT_END
353 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
354 return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
355 JRT_END
357 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
358 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
359 JRT_END
361 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
362 return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
363 JRT_END
365 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
366 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
367 JRT_END
369 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
370 return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
371 JRT_END
373 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
374 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
375 JRT_END
377 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
378 return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
379 JRT_END
381 // Intrinsics make gcc generate code for these.
382 float SharedRuntime::fneg(float f) {
383 return -f;
384 }
386 double SharedRuntime::dneg(double f) {
387 return -f;
388 }
390 #endif // __SOFTFP__
392 #if defined(__SOFTFP__) || defined(E500V2)
393 // Intrinsics make gcc generate code for these.
394 double SharedRuntime::dabs(double f) {
395 return (f <= (double)0.0) ? (double)0.0 - f : f;
396 }
398 #endif
400 #if defined(__SOFTFP__) || defined(PPC)
401 double SharedRuntime::dsqrt(double f) {
402 return sqrt(f);
403 }
404 #endif
406 JRT_LEAF(jint, SharedRuntime::f2i(jfloat x))
407 if (g_isnan(x))
408 return 0;
409 if (x >= (jfloat) max_jint)
410 return max_jint;
411 if (x <= (jfloat) min_jint)
412 return min_jint;
413 return (jint) x;
414 JRT_END
417 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x))
418 if (g_isnan(x))
419 return 0;
420 if (x >= (jfloat) max_jlong)
421 return max_jlong;
422 if (x <= (jfloat) min_jlong)
423 return min_jlong;
424 return (jlong) x;
425 JRT_END
428 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
429 if (g_isnan(x))
430 return 0;
431 if (x >= (jdouble) max_jint)
432 return max_jint;
433 if (x <= (jdouble) min_jint)
434 return min_jint;
435 return (jint) x;
436 JRT_END
439 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
440 if (g_isnan(x))
441 return 0;
442 if (x >= (jdouble) max_jlong)
443 return max_jlong;
444 if (x <= (jdouble) min_jlong)
445 return min_jlong;
446 return (jlong) x;
447 JRT_END
450 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
451 return (jfloat)x;
452 JRT_END
455 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
456 return (jfloat)x;
457 JRT_END
460 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
461 return (jdouble)x;
462 JRT_END
464 // Exception handling accross interpreter/compiler boundaries
465 //
466 // exception_handler_for_return_address(...) returns the continuation address.
467 // The continuation address is the entry point of the exception handler of the
468 // previous frame depending on the return address.
470 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
471 assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address));
473 // Reset method handle flag.
474 thread->set_is_method_handle_return(false);
476 // The fastest case first
477 CodeBlob* blob = CodeCache::find_blob(return_address);
478 nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL;
479 if (nm != NULL) {
480 // Set flag if return address is a method handle call site.
481 thread->set_is_method_handle_return(nm->is_method_handle_return(return_address));
482 // native nmethods don't have exception handlers
483 assert(!nm->is_native_method(), "no exception handler");
484 assert(nm->header_begin() != nm->exception_begin(), "no exception handler");
485 if (nm->is_deopt_pc(return_address)) {
486 return SharedRuntime::deopt_blob()->unpack_with_exception();
487 } else {
488 return nm->exception_begin();
489 }
490 }
492 // Entry code
493 if (StubRoutines::returns_to_call_stub(return_address)) {
494 return StubRoutines::catch_exception_entry();
495 }
496 // Interpreted code
497 if (Interpreter::contains(return_address)) {
498 return Interpreter::rethrow_exception_entry();
499 }
501 guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub");
502 guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
504 #ifndef PRODUCT
505 { ResourceMark rm;
506 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
507 tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
508 tty->print_cr("b) other problem");
509 }
510 #endif // PRODUCT
512 ShouldNotReachHere();
513 return NULL;
514 }
517 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
518 return raw_exception_handler_for_return_address(thread, return_address);
519 JRT_END
522 address SharedRuntime::get_poll_stub(address pc) {
523 address stub;
524 // Look up the code blob
525 CodeBlob *cb = CodeCache::find_blob(pc);
527 // Should be an nmethod
528 assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
530 // Look up the relocation information
531 assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
532 "safepoint polling: type must be poll" );
534 assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
535 "Only polling locations are used for safepoint");
537 bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
538 if (at_poll_return) {
539 assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
540 "polling page return stub not created yet");
541 stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
542 } else {
543 assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
544 "polling page safepoint stub not created yet");
545 stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
546 }
547 #ifndef PRODUCT
548 if( TraceSafepoint ) {
549 char buf[256];
550 jio_snprintf(buf, sizeof(buf),
551 "... found polling page %s exception at pc = "
552 INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
553 at_poll_return ? "return" : "loop",
554 (intptr_t)pc, (intptr_t)stub);
555 tty->print_raw_cr(buf);
556 }
557 #endif // PRODUCT
558 return stub;
559 }
562 oop SharedRuntime::retrieve_receiver( Symbol* sig, frame caller ) {
563 assert(caller.is_interpreted_frame(), "");
564 int args_size = ArgumentSizeComputer(sig).size() + 1;
565 assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
566 oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
567 assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
568 return result;
569 }
572 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
573 if (JvmtiExport::can_post_on_exceptions()) {
574 vframeStream vfst(thread, true);
575 methodHandle method = methodHandle(thread, vfst.method());
576 address bcp = method()->bcp_from(vfst.bci());
577 JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
578 }
579 Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
580 }
582 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Symbol* name, const char *message) {
583 Handle h_exception = Exceptions::new_exception(thread, name, message);
584 throw_and_post_jvmti_exception(thread, h_exception);
585 }
587 // The interpreter code to call this tracing function is only
588 // called/generated when TraceRedefineClasses has the right bits
589 // set. Since obsolete methods are never compiled, we don't have
590 // to modify the compilers to generate calls to this function.
591 //
592 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
593 JavaThread* thread, Method* method))
594 assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call");
596 if (method->is_obsolete()) {
597 // We are calling an obsolete method, but this is not necessarily
598 // an error. Our method could have been redefined just after we
599 // fetched the Method* from the constant pool.
601 // RC_TRACE macro has an embedded ResourceMark
602 RC_TRACE_WITH_THREAD(0x00001000, thread,
603 ("calling obsolete method '%s'",
604 method->name_and_sig_as_C_string()));
605 if (RC_TRACE_ENABLED(0x00002000)) {
606 // this option is provided to debug calls to obsolete methods
607 guarantee(false, "faulting at call to an obsolete method.");
608 }
609 }
610 return 0;
611 JRT_END
613 // ret_pc points into caller; we are returning caller's exception handler
614 // for given exception
615 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
616 bool force_unwind, bool top_frame_only) {
617 assert(nm != NULL, "must exist");
618 ResourceMark rm;
620 ScopeDesc* sd = nm->scope_desc_at(ret_pc);
621 // determine handler bci, if any
622 EXCEPTION_MARK;
624 int handler_bci = -1;
625 int scope_depth = 0;
626 if (!force_unwind) {
627 int bci = sd->bci();
628 bool recursive_exception = false;
629 do {
630 bool skip_scope_increment = false;
631 // exception handler lookup
632 KlassHandle ek (THREAD, exception->klass());
633 handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD);
634 if (HAS_PENDING_EXCEPTION) {
635 recursive_exception = true;
636 // We threw an exception while trying to find the exception handler.
637 // Transfer the new exception to the exception handle which will
638 // be set into thread local storage, and do another lookup for an
639 // exception handler for this exception, this time starting at the
640 // BCI of the exception handler which caused the exception to be
641 // thrown (bugs 4307310 and 4546590). Set "exception" reference
642 // argument to ensure that the correct exception is thrown (4870175).
643 exception = Handle(THREAD, PENDING_EXCEPTION);
644 CLEAR_PENDING_EXCEPTION;
645 if (handler_bci >= 0) {
646 bci = handler_bci;
647 handler_bci = -1;
648 skip_scope_increment = true;
649 }
650 }
651 else {
652 recursive_exception = false;
653 }
654 if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
655 sd = sd->sender();
656 if (sd != NULL) {
657 bci = sd->bci();
658 }
659 ++scope_depth;
660 }
661 } while (recursive_exception || (!top_frame_only && handler_bci < 0 && sd != NULL));
662 }
664 // found handling method => lookup exception handler
665 int catch_pco = ret_pc - nm->code_begin();
667 ExceptionHandlerTable table(nm);
668 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
669 if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
670 // Allow abbreviated catch tables. The idea is to allow a method
671 // to materialize its exceptions without committing to the exact
672 // routing of exceptions. In particular this is needed for adding
673 // a synthethic handler to unlock monitors when inlining
674 // synchonized methods since the unlock path isn't represented in
675 // the bytecodes.
676 t = table.entry_for(catch_pco, -1, 0);
677 }
679 #ifdef COMPILER1
680 if (t == NULL && nm->is_compiled_by_c1()) {
681 assert(nm->unwind_handler_begin() != NULL, "");
682 return nm->unwind_handler_begin();
683 }
684 #endif
686 if (t == NULL) {
687 tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
688 tty->print_cr(" Exception:");
689 exception->print();
690 tty->cr();
691 tty->print_cr(" Compiled exception table :");
692 table.print();
693 nm->print_code();
694 guarantee(false, "missing exception handler");
695 return NULL;
696 }
698 return nm->code_begin() + t->pco();
699 }
701 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
702 // These errors occur only at call sites
703 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
704 JRT_END
706 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
707 // These errors occur only at call sites
708 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
709 JRT_END
711 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
712 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
713 JRT_END
715 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
716 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
717 JRT_END
719 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
720 // This entry point is effectively only used for NullPointerExceptions which occur at inline
721 // cache sites (when the callee activation is not yet set up) so we are at a call site
722 throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
723 JRT_END
725 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
726 // We avoid using the normal exception construction in this case because
727 // it performs an upcall to Java, and we're already out of stack space.
728 Klass* k = SystemDictionary::StackOverflowError_klass();
729 oop exception_oop = InstanceKlass::cast(k)->allocate_instance(CHECK);
730 Handle exception (thread, exception_oop);
731 if (StackTraceInThrowable) {
732 java_lang_Throwable::fill_in_stack_trace(exception);
733 }
734 throw_and_post_jvmti_exception(thread, exception);
735 JRT_END
737 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
738 address pc,
739 SharedRuntime::ImplicitExceptionKind exception_kind)
740 {
741 address target_pc = NULL;
743 if (Interpreter::contains(pc)) {
744 #ifdef CC_INTERP
745 // C++ interpreter doesn't throw implicit exceptions
746 ShouldNotReachHere();
747 #else
748 switch (exception_kind) {
749 case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry();
750 case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
751 case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry();
752 default: ShouldNotReachHere();
753 }
754 #endif // !CC_INTERP
755 } else {
756 switch (exception_kind) {
757 case STACK_OVERFLOW: {
758 // Stack overflow only occurs upon frame setup; the callee is
759 // going to be unwound. Dispatch to a shared runtime stub
760 // which will cause the StackOverflowError to be fabricated
761 // and processed.
762 // For stack overflow in deoptimization blob, cleanup thread.
763 if (thread->deopt_mark() != NULL) {
764 Deoptimization::cleanup_deopt_info(thread, NULL);
765 }
766 Events::log_exception(thread, "StackOverflowError at " INTPTR_FORMAT, pc);
767 return StubRoutines::throw_StackOverflowError_entry();
768 }
770 case IMPLICIT_NULL: {
771 if (VtableStubs::contains(pc)) {
772 // We haven't yet entered the callee frame. Fabricate an
773 // exception and begin dispatching it in the caller. Since
774 // the caller was at a call site, it's safe to destroy all
775 // caller-saved registers, as these entry points do.
776 VtableStub* vt_stub = VtableStubs::stub_containing(pc);
778 // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error.
779 if (vt_stub == NULL) return NULL;
781 if (vt_stub->is_abstract_method_error(pc)) {
782 assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
783 Events::log_exception(thread, "AbstractMethodError at " INTPTR_FORMAT, pc);
784 return StubRoutines::throw_AbstractMethodError_entry();
785 } else {
786 Events::log_exception(thread, "NullPointerException at vtable entry " INTPTR_FORMAT, pc);
787 return StubRoutines::throw_NullPointerException_at_call_entry();
788 }
789 } else {
790 CodeBlob* cb = CodeCache::find_blob(pc);
792 // If code blob is NULL, then return NULL to signal handler to report the SEGV error.
793 if (cb == NULL) return NULL;
795 // Exception happened in CodeCache. Must be either:
796 // 1. Inline-cache check in C2I handler blob,
797 // 2. Inline-cache check in nmethod, or
798 // 3. Implict null exception in nmethod
800 if (!cb->is_nmethod()) {
801 guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
802 "exception happened outside interpreter, nmethods and vtable stubs (1)");
803 Events::log_exception(thread, "NullPointerException in code blob at " INTPTR_FORMAT, pc);
804 // There is no handler here, so we will simply unwind.
805 return StubRoutines::throw_NullPointerException_at_call_entry();
806 }
808 // Otherwise, it's an nmethod. Consult its exception handlers.
809 nmethod* nm = (nmethod*)cb;
810 if (nm->inlinecache_check_contains(pc)) {
811 // exception happened inside inline-cache check code
812 // => the nmethod is not yet active (i.e., the frame
813 // is not set up yet) => use return address pushed by
814 // caller => don't push another return address
815 Events::log_exception(thread, "NullPointerException in IC check " INTPTR_FORMAT, pc);
816 return StubRoutines::throw_NullPointerException_at_call_entry();
817 }
819 if (nm->method()->is_method_handle_intrinsic()) {
820 // exception happened inside MH dispatch code, similar to a vtable stub
821 Events::log_exception(thread, "NullPointerException in MH adapter " INTPTR_FORMAT, pc);
822 return StubRoutines::throw_NullPointerException_at_call_entry();
823 }
825 #ifndef PRODUCT
826 _implicit_null_throws++;
827 #endif
828 target_pc = nm->continuation_for_implicit_exception(pc);
829 // If there's an unexpected fault, target_pc might be NULL,
830 // in which case we want to fall through into the normal
831 // error handling code.
832 }
834 break; // fall through
835 }
838 case IMPLICIT_DIVIDE_BY_ZERO: {
839 nmethod* nm = CodeCache::find_nmethod(pc);
840 guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions");
841 #ifndef PRODUCT
842 _implicit_div0_throws++;
843 #endif
844 target_pc = nm->continuation_for_implicit_exception(pc);
845 // If there's an unexpected fault, target_pc might be NULL,
846 // in which case we want to fall through into the normal
847 // error handling code.
848 break; // fall through
849 }
851 default: ShouldNotReachHere();
852 }
854 assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
856 // for AbortVMOnException flag
857 NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
858 if (exception_kind == IMPLICIT_NULL) {
859 Events::log_exception(thread, "Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
860 } else {
861 Events::log_exception(thread, "Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
862 }
863 return target_pc;
864 }
866 ShouldNotReachHere();
867 return NULL;
868 }
871 JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
872 {
873 THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
874 }
875 JNI_END
877 JNI_ENTRY(void, throw_unsupported_operation_exception(JNIEnv* env, ...))
878 {
879 THROW(vmSymbols::java_lang_UnsupportedOperationException());
880 }
881 JNI_END
883 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
884 return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
885 }
887 address SharedRuntime::native_method_throw_unsupported_operation_exception_entry() {
888 return CAST_FROM_FN_PTR(address, &throw_unsupported_operation_exception);
889 }
892 #ifndef PRODUCT
893 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
894 const frame f = thread->last_frame();
895 assert(f.is_interpreted_frame(), "must be an interpreted frame");
896 #ifndef PRODUCT
897 methodHandle mh(THREAD, f.interpreter_frame_method());
898 BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
899 #endif // !PRODUCT
900 return preserve_this_value;
901 JRT_END
902 #endif // !PRODUCT
905 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
906 os::yield_all(attempts);
907 JRT_END
910 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
911 assert(obj->is_oop(), "must be a valid oop");
912 assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise");
913 InstanceKlass::register_finalizer(instanceOop(obj), CHECK);
914 JRT_END
917 jlong SharedRuntime::get_java_tid(Thread* thread) {
918 if (thread != NULL) {
919 if (thread->is_Java_thread()) {
920 oop obj = ((JavaThread*)thread)->threadObj();
921 return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
922 }
923 }
924 return 0;
925 }
927 /**
928 * This function ought to be a void function, but cannot be because
929 * it gets turned into a tail-call on sparc, which runs into dtrace bug
930 * 6254741. Once that is fixed we can remove the dummy return value.
931 */
932 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
933 return dtrace_object_alloc_base(Thread::current(), o);
934 }
936 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
937 assert(DTraceAllocProbes, "wrong call");
938 Klass* klass = o->klass();
939 int size = o->size();
940 Symbol* name = klass->name();
941 #ifndef USDT2
942 HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
943 name->bytes(), name->utf8_length(), size * HeapWordSize);
944 #else /* USDT2 */
945 HOTSPOT_OBJECT_ALLOC(
946 get_java_tid(thread),
947 (char *) name->bytes(), name->utf8_length(), size * HeapWordSize);
948 #endif /* USDT2 */
949 return 0;
950 }
952 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
953 JavaThread* thread, Method* method))
954 assert(DTraceMethodProbes, "wrong call");
955 Symbol* kname = method->klass_name();
956 Symbol* name = method->name();
957 Symbol* sig = method->signature();
958 #ifndef USDT2
959 HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
960 kname->bytes(), kname->utf8_length(),
961 name->bytes(), name->utf8_length(),
962 sig->bytes(), sig->utf8_length());
963 #else /* USDT2 */
964 HOTSPOT_METHOD_ENTRY(
965 get_java_tid(thread),
966 (char *) kname->bytes(), kname->utf8_length(),
967 (char *) name->bytes(), name->utf8_length(),
968 (char *) sig->bytes(), sig->utf8_length());
969 #endif /* USDT2 */
970 return 0;
971 JRT_END
973 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
974 JavaThread* thread, Method* method))
975 assert(DTraceMethodProbes, "wrong call");
976 Symbol* kname = method->klass_name();
977 Symbol* name = method->name();
978 Symbol* sig = method->signature();
979 #ifndef USDT2
980 HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
981 kname->bytes(), kname->utf8_length(),
982 name->bytes(), name->utf8_length(),
983 sig->bytes(), sig->utf8_length());
984 #else /* USDT2 */
985 HOTSPOT_METHOD_RETURN(
986 get_java_tid(thread),
987 (char *) kname->bytes(), kname->utf8_length(),
988 (char *) name->bytes(), name->utf8_length(),
989 (char *) sig->bytes(), sig->utf8_length());
990 #endif /* USDT2 */
991 return 0;
992 JRT_END
995 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
996 // for a call current in progress, i.e., arguments has been pushed on stack
997 // put callee has not been invoked yet. Used by: resolve virtual/static,
998 // vtable updates, etc. Caller frame must be compiled.
999 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
1000 ResourceMark rm(THREAD);
1002 // last java frame on stack (which includes native call frames)
1003 vframeStream vfst(thread, true); // Do not skip and javaCalls
1005 return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
1006 }
1009 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
1010 // for a call current in progress, i.e., arguments has been pushed on stack
1011 // but callee has not been invoked yet. Caller frame must be compiled.
1012 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
1013 vframeStream& vfst,
1014 Bytecodes::Code& bc,
1015 CallInfo& callinfo, TRAPS) {
1016 Handle receiver;
1017 Handle nullHandle; //create a handy null handle for exception returns
1019 assert(!vfst.at_end(), "Java frame must exist");
1021 // Find caller and bci from vframe
1022 methodHandle caller(THREAD, vfst.method());
1023 int bci = vfst.bci();
1025 // Find bytecode
1026 Bytecode_invoke bytecode(caller, bci);
1027 bc = bytecode.invoke_code();
1028 int bytecode_index = bytecode.index();
1030 // Find receiver for non-static call
1031 if (bc != Bytecodes::_invokestatic &&
1032 bc != Bytecodes::_invokedynamic) {
1033 // This register map must be update since we need to find the receiver for
1034 // compiled frames. The receiver might be in a register.
1035 RegisterMap reg_map2(thread);
1036 frame stubFrame = thread->last_frame();
1037 // Caller-frame is a compiled frame
1038 frame callerFrame = stubFrame.sender(®_map2);
1040 methodHandle callee = bytecode.static_target(CHECK_(nullHandle));
1041 if (callee.is_null()) {
1042 THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
1043 }
1044 // Retrieve from a compiled argument list
1045 receiver = Handle(THREAD, callerFrame.retrieve_receiver(®_map2));
1047 if (receiver.is_null()) {
1048 THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
1049 }
1050 }
1052 // Resolve method. This is parameterized by bytecode.
1053 constantPoolHandle constants(THREAD, caller->constants());
1054 assert(receiver.is_null() || receiver->is_oop(), "wrong receiver");
1055 LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
1057 #ifdef ASSERT
1058 // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
1059 if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
1060 assert(receiver.not_null(), "should have thrown exception");
1061 KlassHandle receiver_klass(THREAD, receiver->klass());
1062 Klass* rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
1063 // klass is already loaded
1064 KlassHandle static_receiver_klass(THREAD, rk);
1065 // Method handle invokes might have been optimized to a direct call
1066 // so don't check for the receiver class.
1067 // FIXME this weakens the assert too much
1068 methodHandle callee = callinfo.selected_method();
1069 assert(receiver_klass->is_subtype_of(static_receiver_klass()) ||
1070 callee->is_method_handle_intrinsic() ||
1071 callee->is_compiled_lambda_form(),
1072 "actual receiver must be subclass of static receiver klass");
1073 if (receiver_klass->oop_is_instance()) {
1074 if (InstanceKlass::cast(receiver_klass())->is_not_initialized()) {
1075 tty->print_cr("ERROR: Klass not yet initialized!!");
1076 receiver_klass()->print();
1077 }
1078 assert(!InstanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
1079 }
1080 }
1081 #endif
1083 return receiver;
1084 }
1086 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
1087 ResourceMark rm(THREAD);
1088 // We need first to check if any Java activations (compiled, interpreted)
1089 // exist on the stack since last JavaCall. If not, we need
1090 // to get the target method from the JavaCall wrapper.
1091 vframeStream vfst(thread, true); // Do not skip any javaCalls
1092 methodHandle callee_method;
1093 if (vfst.at_end()) {
1094 // No Java frames were found on stack since we did the JavaCall.
1095 // Hence the stack can only contain an entry_frame. We need to
1096 // find the target method from the stub frame.
1097 RegisterMap reg_map(thread, false);
1098 frame fr = thread->last_frame();
1099 assert(fr.is_runtime_frame(), "must be a runtimeStub");
1100 fr = fr.sender(®_map);
1101 assert(fr.is_entry_frame(), "must be");
1102 // fr is now pointing to the entry frame.
1103 callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
1104 assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
1105 } else {
1106 Bytecodes::Code bc;
1107 CallInfo callinfo;
1108 find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
1109 callee_method = callinfo.selected_method();
1110 }
1111 assert(callee_method()->is_method(), "must be");
1112 return callee_method;
1113 }
1115 // Resolves a call.
1116 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
1117 bool is_virtual,
1118 bool is_optimized, TRAPS) {
1119 methodHandle callee_method;
1120 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1121 if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
1122 int retry_count = 0;
1123 while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
1124 callee_method->method_holder() != SystemDictionary::Object_klass()) {
1125 // If has a pending exception then there is no need to re-try to
1126 // resolve this method.
1127 // If the method has been redefined, we need to try again.
1128 // Hack: we have no way to update the vtables of arrays, so don't
1129 // require that java.lang.Object has been updated.
1131 // It is very unlikely that method is redefined more than 100 times
1132 // in the middle of resolve. If it is looping here more than 100 times
1133 // means then there could be a bug here.
1134 guarantee((retry_count++ < 100),
1135 "Could not resolve to latest version of redefined method");
1136 // method is redefined in the middle of resolve so re-try.
1137 callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
1138 }
1139 }
1140 return callee_method;
1141 }
1143 // Resolves a call. The compilers generate code for calls that go here
1144 // and are patched with the real destination of the call.
1145 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
1146 bool is_virtual,
1147 bool is_optimized, TRAPS) {
1149 ResourceMark rm(thread);
1150 RegisterMap cbl_map(thread, false);
1151 frame caller_frame = thread->last_frame().sender(&cbl_map);
1153 CodeBlob* caller_cb = caller_frame.cb();
1154 guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
1155 nmethod* caller_nm = caller_cb->as_nmethod_or_null();
1156 // make sure caller is not getting deoptimized
1157 // and removed before we are done with it.
1158 // CLEANUP - with lazy deopt shouldn't need this lock
1159 nmethodLocker caller_lock(caller_nm);
1162 // determine call info & receiver
1163 // note: a) receiver is NULL for static calls
1164 // b) an exception is thrown if receiver is NULL for non-static calls
1165 CallInfo call_info;
1166 Bytecodes::Code invoke_code = Bytecodes::_illegal;
1167 Handle receiver = find_callee_info(thread, invoke_code,
1168 call_info, CHECK_(methodHandle()));
1169 methodHandle callee_method = call_info.selected_method();
1171 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) ||
1172 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) ||
1173 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
1174 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
1176 #ifndef PRODUCT
1177 // tracing/debugging/statistics
1178 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
1179 (is_virtual) ? (&_resolve_virtual_ctr) :
1180 (&_resolve_static_ctr);
1181 Atomic::inc(addr);
1183 if (TraceCallFixup) {
1184 ResourceMark rm(thread);
1185 tty->print("resolving %s%s (%s) call to",
1186 (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
1187 Bytecodes::name(invoke_code));
1188 callee_method->print_short_name(tty);
1189 tty->print_cr(" at pc: " INTPTR_FORMAT " to code: " INTPTR_FORMAT, caller_frame.pc(), callee_method->code());
1190 }
1191 #endif
1193 // JSR 292 key invariant:
1194 // If the resolved method is a MethodHandle invoke target the call
1195 // site must be a MethodHandle call site, because the lambda form might tail-call
1196 // leaving the stack in a state unknown to either caller or callee
1197 // TODO detune for now but we might need it again
1198 // assert(!callee_method->is_compiled_lambda_form() ||
1199 // caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
1201 // Compute entry points. This might require generation of C2I converter
1202 // frames, so we cannot be holding any locks here. Furthermore, the
1203 // computation of the entry points is independent of patching the call. We
1204 // always return the entry-point, but we only patch the stub if the call has
1205 // not been deoptimized. Return values: For a virtual call this is an
1206 // (cached_oop, destination address) pair. For a static call/optimized
1207 // virtual this is just a destination address.
1209 StaticCallInfo static_call_info;
1210 CompiledICInfo virtual_call_info;
1212 // Make sure the callee nmethod does not get deoptimized and removed before
1213 // we are done patching the code.
1214 nmethod* callee_nm = callee_method->code();
1215 nmethodLocker nl_callee(callee_nm);
1216 #ifdef ASSERT
1217 address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
1218 #endif
1220 if (is_virtual) {
1221 assert(receiver.not_null(), "sanity check");
1222 bool static_bound = call_info.resolved_method()->can_be_statically_bound();
1223 KlassHandle h_klass(THREAD, receiver->klass());
1224 CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
1225 is_optimized, static_bound, virtual_call_info,
1226 CHECK_(methodHandle()));
1227 } else {
1228 // static call
1229 CompiledStaticCall::compute_entry(callee_method, static_call_info);
1230 }
1232 // grab lock, check for deoptimization and potentially patch caller
1233 {
1234 MutexLocker ml_patch(CompiledIC_lock);
1236 // Now that we are ready to patch if the Method* was redefined then
1237 // don't update call site and let the caller retry.
1239 if (!callee_method->is_old()) {
1240 #ifdef ASSERT
1241 // We must not try to patch to jump to an already unloaded method.
1242 if (dest_entry_point != 0) {
1243 assert(CodeCache::find_blob(dest_entry_point) != NULL,
1244 "should not unload nmethod while locked");
1245 }
1246 #endif
1247 if (is_virtual) {
1248 nmethod* nm = callee_nm;
1249 if (nm == NULL) CodeCache::find_blob(caller_frame.pc());
1250 CompiledIC* inline_cache = CompiledIC_before(caller_nm, caller_frame.pc());
1251 if (inline_cache->is_clean()) {
1252 inline_cache->set_to_monomorphic(virtual_call_info);
1253 }
1254 } else {
1255 CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
1256 if (ssc->is_clean()) ssc->set(static_call_info);
1257 }
1258 }
1260 } // unlock CompiledIC_lock
1262 return callee_method;
1263 }
1266 // Inline caches exist only in compiled code
1267 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
1268 #ifdef ASSERT
1269 RegisterMap reg_map(thread, false);
1270 frame stub_frame = thread->last_frame();
1271 assert(stub_frame.is_runtime_frame(), "sanity check");
1272 frame caller_frame = stub_frame.sender(®_map);
1273 assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
1274 #endif /* ASSERT */
1276 methodHandle callee_method;
1277 JRT_BLOCK
1278 callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
1279 // Return Method* through TLS
1280 thread->set_vm_result_2(callee_method());
1281 JRT_BLOCK_END
1282 // return compiled code entry point after potential safepoints
1283 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1284 return callee_method->verified_code_entry();
1285 JRT_END
1288 // Handle call site that has been made non-entrant
1289 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
1290 // 6243940 We might end up in here if the callee is deoptimized
1291 // as we race to call it. We don't want to take a safepoint if
1292 // the caller was interpreted because the caller frame will look
1293 // interpreted to the stack walkers and arguments are now
1294 // "compiled" so it is much better to make this transition
1295 // invisible to the stack walking code. The i2c path will
1296 // place the callee method in the callee_target. It is stashed
1297 // there because if we try and find the callee by normal means a
1298 // safepoint is possible and have trouble gc'ing the compiled args.
1299 RegisterMap reg_map(thread, false);
1300 frame stub_frame = thread->last_frame();
1301 assert(stub_frame.is_runtime_frame(), "sanity check");
1302 frame caller_frame = stub_frame.sender(®_map);
1304 // MethodHandle invokes don't have a CompiledIC and should always
1305 // simply redispatch to the callee_target.
1306 address sender_pc = caller_frame.pc();
1307 CodeBlob* sender_cb = caller_frame.cb();
1308 nmethod* sender_nm = sender_cb->as_nmethod_or_null();
1310 if (caller_frame.is_interpreted_frame() ||
1311 caller_frame.is_entry_frame()) {
1312 Method* callee = thread->callee_target();
1313 guarantee(callee != NULL && callee->is_method(), "bad handshake");
1314 thread->set_vm_result_2(callee);
1315 thread->set_callee_target(NULL);
1316 return callee->get_c2i_entry();
1317 }
1319 // Must be compiled to compiled path which is safe to stackwalk
1320 methodHandle callee_method;
1321 JRT_BLOCK
1322 // Force resolving of caller (if we called from compiled frame)
1323 callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
1324 thread->set_vm_result_2(callee_method());
1325 JRT_BLOCK_END
1326 // return compiled code entry point after potential safepoints
1327 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1328 return callee_method->verified_code_entry();
1329 JRT_END
1332 // resolve a static call and patch code
1333 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
1334 methodHandle callee_method;
1335 JRT_BLOCK
1336 callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
1337 thread->set_vm_result_2(callee_method());
1338 JRT_BLOCK_END
1339 // return compiled code entry point after potential safepoints
1340 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1341 return callee_method->verified_code_entry();
1342 JRT_END
1345 // resolve virtual call and update inline cache to monomorphic
1346 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
1347 methodHandle callee_method;
1348 JRT_BLOCK
1349 callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
1350 thread->set_vm_result_2(callee_method());
1351 JRT_BLOCK_END
1352 // return compiled code entry point after potential safepoints
1353 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1354 return callee_method->verified_code_entry();
1355 JRT_END
1358 // Resolve a virtual call that can be statically bound (e.g., always
1359 // monomorphic, so it has no inline cache). Patch code to resolved target.
1360 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
1361 methodHandle callee_method;
1362 JRT_BLOCK
1363 callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
1364 thread->set_vm_result_2(callee_method());
1365 JRT_BLOCK_END
1366 // return compiled code entry point after potential safepoints
1367 assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
1368 return callee_method->verified_code_entry();
1369 JRT_END
1375 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
1376 ResourceMark rm(thread);
1377 CallInfo call_info;
1378 Bytecodes::Code bc;
1380 // receiver is NULL for static calls. An exception is thrown for NULL
1381 // receivers for non-static calls
1382 Handle receiver = find_callee_info(thread, bc, call_info,
1383 CHECK_(methodHandle()));
1384 // Compiler1 can produce virtual call sites that can actually be statically bound
1385 // If we fell thru to below we would think that the site was going megamorphic
1386 // when in fact the site can never miss. Worse because we'd think it was megamorphic
1387 // we'd try and do a vtable dispatch however methods that can be statically bound
1388 // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
1389 // reresolution of the call site (as if we did a handle_wrong_method and not an
1390 // plain ic_miss) and the site will be converted to an optimized virtual call site
1391 // never to miss again. I don't believe C2 will produce code like this but if it
1392 // did this would still be the correct thing to do for it too, hence no ifdef.
1393 //
1394 if (call_info.resolved_method()->can_be_statically_bound()) {
1395 methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
1396 if (TraceCallFixup) {
1397 RegisterMap reg_map(thread, false);
1398 frame caller_frame = thread->last_frame().sender(®_map);
1399 ResourceMark rm(thread);
1400 tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
1401 callee_method->print_short_name(tty);
1402 tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc());
1403 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1404 }
1405 return callee_method;
1406 }
1408 methodHandle callee_method = call_info.selected_method();
1410 bool should_be_mono = false;
1412 #ifndef PRODUCT
1413 Atomic::inc(&_ic_miss_ctr);
1415 // Statistics & Tracing
1416 if (TraceCallFixup) {
1417 ResourceMark rm(thread);
1418 tty->print("IC miss (%s) call to", Bytecodes::name(bc));
1419 callee_method->print_short_name(tty);
1420 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1421 }
1423 if (ICMissHistogram) {
1424 MutexLocker m(VMStatistic_lock);
1425 RegisterMap reg_map(thread, false);
1426 frame f = thread->last_frame().real_sender(®_map);// skip runtime stub
1427 // produce statistics under the lock
1428 trace_ic_miss(f.pc());
1429 }
1430 #endif
1432 // install an event collector so that when a vtable stub is created the
1433 // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
1434 // event can't be posted when the stub is created as locks are held
1435 // - instead the event will be deferred until the event collector goes
1436 // out of scope.
1437 JvmtiDynamicCodeEventCollector event_collector;
1439 // Update inline cache to megamorphic. Skip update if caller has been
1440 // made non-entrant or we are called from interpreted.
1441 { MutexLocker ml_patch (CompiledIC_lock);
1442 RegisterMap reg_map(thread, false);
1443 frame caller_frame = thread->last_frame().sender(®_map);
1444 CodeBlob* cb = caller_frame.cb();
1445 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
1446 // Not a non-entrant nmethod, so find inline_cache
1447 CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc());
1448 bool should_be_mono = false;
1449 if (inline_cache->is_optimized()) {
1450 if (TraceCallFixup) {
1451 ResourceMark rm(thread);
1452 tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
1453 callee_method->print_short_name(tty);
1454 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1455 }
1456 should_be_mono = true;
1457 } else if (inline_cache->is_icholder_call()) {
1458 CompiledICHolder* ic_oop = inline_cache->cached_icholder();
1459 if ( ic_oop != NULL) {
1461 if (receiver()->klass() == ic_oop->holder_klass()) {
1462 // This isn't a real miss. We must have seen that compiled code
1463 // is now available and we want the call site converted to a
1464 // monomorphic compiled call site.
1465 // We can't assert for callee_method->code() != NULL because it
1466 // could have been deoptimized in the meantime
1467 if (TraceCallFixup) {
1468 ResourceMark rm(thread);
1469 tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
1470 callee_method->print_short_name(tty);
1471 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1472 }
1473 should_be_mono = true;
1474 }
1475 }
1476 }
1478 if (should_be_mono) {
1480 // We have a path that was monomorphic but was going interpreted
1481 // and now we have (or had) a compiled entry. We correct the IC
1482 // by using a new icBuffer.
1483 CompiledICInfo info;
1484 KlassHandle receiver_klass(THREAD, receiver()->klass());
1485 inline_cache->compute_monomorphic_entry(callee_method,
1486 receiver_klass,
1487 inline_cache->is_optimized(),
1488 false,
1489 info, CHECK_(methodHandle()));
1490 inline_cache->set_to_monomorphic(info);
1491 } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
1492 // Change to megamorphic
1493 inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
1494 } else {
1495 // Either clean or megamorphic
1496 }
1497 }
1498 } // Release CompiledIC_lock
1500 return callee_method;
1501 }
1503 //
1504 // Resets a call-site in compiled code so it will get resolved again.
1505 // This routines handles both virtual call sites, optimized virtual call
1506 // sites, and static call sites. Typically used to change a call sites
1507 // destination from compiled to interpreted.
1508 //
1509 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
1510 ResourceMark rm(thread);
1511 RegisterMap reg_map(thread, false);
1512 frame stub_frame = thread->last_frame();
1513 assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
1514 frame caller = stub_frame.sender(®_map);
1516 // Do nothing if the frame isn't a live compiled frame.
1517 // nmethod could be deoptimized by the time we get here
1518 // so no update to the caller is needed.
1520 if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
1522 address pc = caller.pc();
1524 // Default call_addr is the location of the "basic" call.
1525 // Determine the address of the call we a reresolving. With
1526 // Inline Caches we will always find a recognizable call.
1527 // With Inline Caches disabled we may or may not find a
1528 // recognizable call. We will always find a call for static
1529 // calls and for optimized virtual calls. For vanilla virtual
1530 // calls it depends on the state of the UseInlineCaches switch.
1531 //
1532 // With Inline Caches disabled we can get here for a virtual call
1533 // for two reasons:
1534 // 1 - calling an abstract method. The vtable for abstract methods
1535 // will run us thru handle_wrong_method and we will eventually
1536 // end up in the interpreter to throw the ame.
1537 // 2 - a racing deoptimization. We could be doing a vanilla vtable
1538 // call and between the time we fetch the entry address and
1539 // we jump to it the target gets deoptimized. Similar to 1
1540 // we will wind up in the interprter (thru a c2i with c2).
1541 //
1542 address call_addr = NULL;
1543 {
1544 // Get call instruction under lock because another thread may be
1545 // busy patching it.
1546 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1547 // Location of call instruction
1548 if (NativeCall::is_call_before(pc)) {
1549 NativeCall *ncall = nativeCall_before(pc);
1550 call_addr = ncall->instruction_address();
1551 }
1552 }
1554 // Check for static or virtual call
1555 bool is_static_call = false;
1556 nmethod* caller_nm = CodeCache::find_nmethod(pc);
1557 // Make sure nmethod doesn't get deoptimized and removed until
1558 // this is done with it.
1559 // CLEANUP - with lazy deopt shouldn't need this lock
1560 nmethodLocker nmlock(caller_nm);
1562 if (call_addr != NULL) {
1563 RelocIterator iter(caller_nm, call_addr, call_addr+1);
1564 int ret = iter.next(); // Get item
1565 if (ret) {
1566 assert(iter.addr() == call_addr, "must find call");
1567 if (iter.type() == relocInfo::static_call_type) {
1568 is_static_call = true;
1569 } else {
1570 assert(iter.type() == relocInfo::virtual_call_type ||
1571 iter.type() == relocInfo::opt_virtual_call_type
1572 , "unexpected relocInfo. type");
1573 }
1574 } else {
1575 assert(!UseInlineCaches, "relocation info. must exist for this address");
1576 }
1578 // Cleaning the inline cache will force a new resolve. This is more robust
1579 // than directly setting it to the new destination, since resolving of calls
1580 // is always done through the same code path. (experience shows that it
1581 // leads to very hard to track down bugs, if an inline cache gets updated
1582 // to a wrong method). It should not be performance critical, since the
1583 // resolve is only done once.
1585 MutexLocker ml(CompiledIC_lock);
1586 //
1587 // We do not patch the call site if the nmethod has been made non-entrant
1588 // as it is a waste of time
1589 //
1590 if (caller_nm->is_in_use()) {
1591 if (is_static_call) {
1592 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
1593 ssc->set_to_clean();
1594 } else {
1595 // compiled, dispatched call (which used to call an interpreted method)
1596 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr);
1597 inline_cache->set_to_clean();
1598 }
1599 }
1600 }
1602 }
1604 methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
1607 #ifndef PRODUCT
1608 Atomic::inc(&_wrong_method_ctr);
1610 if (TraceCallFixup) {
1611 ResourceMark rm(thread);
1612 tty->print("handle_wrong_method reresolving call to");
1613 callee_method->print_short_name(tty);
1614 tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
1615 }
1616 #endif
1618 return callee_method;
1619 }
1621 // ---------------------------------------------------------------------------
1622 // We are calling the interpreter via a c2i. Normally this would mean that
1623 // we were called by a compiled method. However we could have lost a race
1624 // where we went int -> i2c -> c2i and so the caller could in fact be
1625 // interpreted. If the caller is compiled we attempt to patch the caller
1626 // so he no longer calls into the interpreter.
1627 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(Method* method, address caller_pc))
1628 Method* moop(method);
1630 address entry_point = moop->from_compiled_entry();
1632 // It's possible that deoptimization can occur at a call site which hasn't
1633 // been resolved yet, in which case this function will be called from
1634 // an nmethod that has been patched for deopt and we can ignore the
1635 // request for a fixup.
1636 // Also it is possible that we lost a race in that from_compiled_entry
1637 // is now back to the i2c in that case we don't need to patch and if
1638 // we did we'd leap into space because the callsite needs to use
1639 // "to interpreter" stub in order to load up the Method*. Don't
1640 // ask me how I know this...
1642 CodeBlob* cb = CodeCache::find_blob(caller_pc);
1643 if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
1644 return;
1645 }
1647 // The check above makes sure this is a nmethod.
1648 nmethod* nm = cb->as_nmethod_or_null();
1649 assert(nm, "must be");
1651 // Get the return PC for the passed caller PC.
1652 address return_pc = caller_pc + frame::pc_return_offset;
1654 // There is a benign race here. We could be attempting to patch to a compiled
1655 // entry point at the same time the callee is being deoptimized. If that is
1656 // the case then entry_point may in fact point to a c2i and we'd patch the
1657 // call site with the same old data. clear_code will set code() to NULL
1658 // at the end of it. If we happen to see that NULL then we can skip trying
1659 // to patch. If we hit the window where the callee has a c2i in the
1660 // from_compiled_entry and the NULL isn't present yet then we lose the race
1661 // and patch the code with the same old data. Asi es la vida.
1663 if (moop->code() == NULL) return;
1665 if (nm->is_in_use()) {
1667 // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
1668 MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
1669 if (NativeCall::is_call_before(return_pc)) {
1670 NativeCall *call = nativeCall_before(return_pc);
1671 //
1672 // bug 6281185. We might get here after resolving a call site to a vanilla
1673 // virtual call. Because the resolvee uses the verified entry it may then
1674 // see compiled code and attempt to patch the site by calling us. This would
1675 // then incorrectly convert the call site to optimized and its downhill from
1676 // there. If you're lucky you'll get the assert in the bugid, if not you've
1677 // just made a call site that could be megamorphic into a monomorphic site
1678 // for the rest of its life! Just another racing bug in the life of
1679 // fixup_callers_callsite ...
1680 //
1681 RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
1682 iter.next();
1683 assert(iter.has_current(), "must have a reloc at java call site");
1684 relocInfo::relocType typ = iter.reloc()->type();
1685 if ( typ != relocInfo::static_call_type &&
1686 typ != relocInfo::opt_virtual_call_type &&
1687 typ != relocInfo::static_stub_type) {
1688 return;
1689 }
1690 address destination = call->destination();
1691 if (destination != entry_point) {
1692 CodeBlob* callee = CodeCache::find_blob(destination);
1693 // callee == cb seems weird. It means calling interpreter thru stub.
1694 if (callee == cb || callee->is_adapter_blob()) {
1695 // static call or optimized virtual
1696 if (TraceCallFixup) {
1697 tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1698 moop->print_short_name(tty);
1699 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1700 }
1701 call->set_destination_mt_safe(entry_point);
1702 } else {
1703 if (TraceCallFixup) {
1704 tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1705 moop->print_short_name(tty);
1706 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1707 }
1708 // assert is too strong could also be resolve destinations.
1709 // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
1710 }
1711 } else {
1712 if (TraceCallFixup) {
1713 tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
1714 moop->print_short_name(tty);
1715 tty->print_cr(" to " INTPTR_FORMAT, entry_point);
1716 }
1717 }
1718 }
1719 }
1720 IRT_END
1723 // same as JVM_Arraycopy, but called directly from compiled code
1724 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos,
1725 oopDesc* dest, jint dest_pos,
1726 jint length,
1727 JavaThread* thread)) {
1728 #ifndef PRODUCT
1729 _slow_array_copy_ctr++;
1730 #endif
1731 // Check if we have null pointers
1732 if (src == NULL || dest == NULL) {
1733 THROW(vmSymbols::java_lang_NullPointerException());
1734 }
1735 // Do the copy. The casts to arrayOop are necessary to the copy_array API,
1736 // even though the copy_array API also performs dynamic checks to ensure
1737 // that src and dest are truly arrays (and are conformable).
1738 // The copy_array mechanism is awkward and could be removed, but
1739 // the compilers don't call this function except as a last resort,
1740 // so it probably doesn't matter.
1741 Klass::cast(src->klass())->copy_array((arrayOopDesc*)src, src_pos,
1742 (arrayOopDesc*)dest, dest_pos,
1743 length, thread);
1744 }
1745 JRT_END
1747 char* SharedRuntime::generate_class_cast_message(
1748 JavaThread* thread, const char* objName) {
1750 // Get target class name from the checkcast instruction
1751 vframeStream vfst(thread, true);
1752 assert(!vfst.at_end(), "Java frame must exist");
1753 Bytecode_checkcast cc(vfst.method(), vfst.method()->bcp_from(vfst.bci()));
1754 Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
1755 cc.index(), thread));
1756 return generate_class_cast_message(objName, targetKlass->external_name());
1757 }
1759 char* SharedRuntime::generate_class_cast_message(
1760 const char* objName, const char* targetKlassName, const char* desc) {
1761 size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1;
1763 char* message = NEW_RESOURCE_ARRAY(char, msglen);
1764 if (NULL == message) {
1765 // Shouldn't happen, but don't cause even more problems if it does
1766 message = const_cast<char*>(objName);
1767 } else {
1768 jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
1769 }
1770 return message;
1771 }
1773 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
1774 (void) JavaThread::current()->reguard_stack();
1775 JRT_END
1778 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
1779 #ifndef PRODUCT
1780 int SharedRuntime::_monitor_enter_ctr=0;
1781 #endif
1782 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
1783 oop obj(_obj);
1784 #ifndef PRODUCT
1785 _monitor_enter_ctr++; // monitor enter slow
1786 #endif
1787 if (PrintBiasedLockingStatistics) {
1788 Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
1789 }
1790 Handle h_obj(THREAD, obj);
1791 if (UseBiasedLocking) {
1792 // Retry fast entry if bias is revoked to avoid unnecessary inflation
1793 ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
1794 } else {
1795 ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
1796 }
1797 assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
1798 JRT_END
1800 #ifndef PRODUCT
1801 int SharedRuntime::_monitor_exit_ctr=0;
1802 #endif
1803 // Handles the uncommon cases of monitor unlocking in compiled code
1804 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
1805 oop obj(_obj);
1806 #ifndef PRODUCT
1807 _monitor_exit_ctr++; // monitor exit slow
1808 #endif
1809 Thread* THREAD = JavaThread::current();
1810 // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
1811 // testing was unable to ever fire the assert that guarded it so I have removed it.
1812 assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
1813 #undef MIGHT_HAVE_PENDING
1814 #ifdef MIGHT_HAVE_PENDING
1815 // Save and restore any pending_exception around the exception mark.
1816 // While the slow_exit must not throw an exception, we could come into
1817 // this routine with one set.
1818 oop pending_excep = NULL;
1819 const char* pending_file;
1820 int pending_line;
1821 if (HAS_PENDING_EXCEPTION) {
1822 pending_excep = PENDING_EXCEPTION;
1823 pending_file = THREAD->exception_file();
1824 pending_line = THREAD->exception_line();
1825 CLEAR_PENDING_EXCEPTION;
1826 }
1827 #endif /* MIGHT_HAVE_PENDING */
1829 {
1830 // Exit must be non-blocking, and therefore no exceptions can be thrown.
1831 EXCEPTION_MARK;
1832 ObjectSynchronizer::slow_exit(obj, lock, THREAD);
1833 }
1835 #ifdef MIGHT_HAVE_PENDING
1836 if (pending_excep != NULL) {
1837 THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
1838 }
1839 #endif /* MIGHT_HAVE_PENDING */
1840 JRT_END
1842 #ifndef PRODUCT
1844 void SharedRuntime::print_statistics() {
1845 ttyLocker ttyl;
1846 if (xtty != NULL) xtty->head("statistics type='SharedRuntime'");
1848 if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr);
1849 if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr);
1850 if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
1852 SharedRuntime::print_ic_miss_histogram();
1854 if (CountRemovableExceptions) {
1855 if (_nof_removable_exceptions > 0) {
1856 Unimplemented(); // this counter is not yet incremented
1857 tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
1858 }
1859 }
1861 // Dump the JRT_ENTRY counters
1862 if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
1863 if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
1864 if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
1865 if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
1866 if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
1867 if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
1868 if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
1870 tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
1871 tty->print_cr("%5d wrong method", _wrong_method_ctr );
1872 tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
1873 tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
1874 tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
1876 if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
1877 if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
1878 if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
1879 if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
1880 if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
1881 if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
1882 if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
1883 if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
1884 if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
1885 if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
1886 if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
1887 if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
1888 if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
1889 if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
1890 if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
1891 if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
1893 AdapterHandlerLibrary::print_statistics();
1895 if (xtty != NULL) xtty->tail("statistics");
1896 }
1898 inline double percent(int x, int y) {
1899 return 100.0 * x / MAX2(y, 1);
1900 }
1902 class MethodArityHistogram {
1903 public:
1904 enum { MAX_ARITY = 256 };
1905 private:
1906 static int _arity_histogram[MAX_ARITY]; // histogram of #args
1907 static int _size_histogram[MAX_ARITY]; // histogram of arg size in words
1908 static int _max_arity; // max. arity seen
1909 static int _max_size; // max. arg size seen
1911 static void add_method_to_histogram(nmethod* nm) {
1912 Method* m = nm->method();
1913 ArgumentCount args(m->signature());
1914 int arity = args.size() + (m->is_static() ? 0 : 1);
1915 int argsize = m->size_of_parameters();
1916 arity = MIN2(arity, MAX_ARITY-1);
1917 argsize = MIN2(argsize, MAX_ARITY-1);
1918 int count = nm->method()->compiled_invocation_count();
1919 _arity_histogram[arity] += count;
1920 _size_histogram[argsize] += count;
1921 _max_arity = MAX2(_max_arity, arity);
1922 _max_size = MAX2(_max_size, argsize);
1923 }
1925 void print_histogram_helper(int n, int* histo, const char* name) {
1926 const int N = MIN2(5, n);
1927 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1928 double sum = 0;
1929 double weighted_sum = 0;
1930 int i;
1931 for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
1932 double rest = sum;
1933 double percent = sum / 100;
1934 for (i = 0; i <= N; i++) {
1935 rest -= histo[i];
1936 tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
1937 }
1938 tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
1939 tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
1940 }
1942 void print_histogram() {
1943 tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
1944 print_histogram_helper(_max_arity, _arity_histogram, "arity");
1945 tty->print_cr("\nSame for parameter size (in words):");
1946 print_histogram_helper(_max_size, _size_histogram, "size");
1947 tty->cr();
1948 }
1950 public:
1951 MethodArityHistogram() {
1952 MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1953 _max_arity = _max_size = 0;
1954 for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
1955 CodeCache::nmethods_do(add_method_to_histogram);
1956 print_histogram();
1957 }
1958 };
1960 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
1961 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
1962 int MethodArityHistogram::_max_arity;
1963 int MethodArityHistogram::_max_size;
1965 void SharedRuntime::print_call_statistics(int comp_total) {
1966 tty->print_cr("Calls from compiled code:");
1967 int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
1968 int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
1969 int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
1970 tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total));
1971 tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total));
1972 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
1973 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
1974 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls));
1975 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
1976 tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total));
1977 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
1978 tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
1979 tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls));
1980 tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
1981 tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
1982 tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
1983 tty->cr();
1984 tty->print_cr("Note 1: counter updates are not MT-safe.");
1985 tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
1986 tty->print_cr(" %% in nested categories are relative to their category");
1987 tty->print_cr(" (and thus add up to more than 100%% with inlining)");
1988 tty->cr();
1990 MethodArityHistogram h;
1991 }
1992 #endif
1995 // A simple wrapper class around the calling convention information
1996 // that allows sharing of adapters for the same calling convention.
1997 class AdapterFingerPrint : public CHeapObj<mtCode> {
1998 private:
1999 enum {
2000 _basic_type_bits = 4,
2001 _basic_type_mask = right_n_bits(_basic_type_bits),
2002 _basic_types_per_int = BitsPerInt / _basic_type_bits,
2003 _compact_int_count = 3
2004 };
2005 // TO DO: Consider integrating this with a more global scheme for compressing signatures.
2006 // For now, 4 bits per components (plus T_VOID gaps after double/long) is not excessive.
2008 union {
2009 int _compact[_compact_int_count];
2010 int* _fingerprint;
2011 } _value;
2012 int _length; // A negative length indicates the fingerprint is in the compact form,
2013 // Otherwise _value._fingerprint is the array.
2015 // Remap BasicTypes that are handled equivalently by the adapters.
2016 // These are correct for the current system but someday it might be
2017 // necessary to make this mapping platform dependent.
2018 static int adapter_encoding(BasicType in) {
2019 switch(in) {
2020 case T_BOOLEAN:
2021 case T_BYTE:
2022 case T_SHORT:
2023 case T_CHAR:
2024 // There are all promoted to T_INT in the calling convention
2025 return T_INT;
2027 case T_OBJECT:
2028 case T_ARRAY:
2029 // In other words, we assume that any register good enough for
2030 // an int or long is good enough for a managed pointer.
2031 #ifdef _LP64
2032 return T_LONG;
2033 #else
2034 return T_INT;
2035 #endif
2037 case T_INT:
2038 case T_LONG:
2039 case T_FLOAT:
2040 case T_DOUBLE:
2041 case T_VOID:
2042 return in;
2044 default:
2045 ShouldNotReachHere();
2046 return T_CONFLICT;
2047 }
2048 }
2050 public:
2051 AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
2052 // The fingerprint is based on the BasicType signature encoded
2053 // into an array of ints with eight entries per int.
2054 int* ptr;
2055 int len = (total_args_passed + (_basic_types_per_int-1)) / _basic_types_per_int;
2056 if (len <= _compact_int_count) {
2057 assert(_compact_int_count == 3, "else change next line");
2058 _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
2059 // Storing the signature encoded as signed chars hits about 98%
2060 // of the time.
2061 _length = -len;
2062 ptr = _value._compact;
2063 } else {
2064 _length = len;
2065 _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length, mtCode);
2066 ptr = _value._fingerprint;
2067 }
2069 // Now pack the BasicTypes with 8 per int
2070 int sig_index = 0;
2071 for (int index = 0; index < len; index++) {
2072 int value = 0;
2073 for (int byte = 0; byte < _basic_types_per_int; byte++) {
2074 int bt = ((sig_index < total_args_passed)
2075 ? adapter_encoding(sig_bt[sig_index++])
2076 : 0);
2077 assert((bt & _basic_type_mask) == bt, "must fit in 4 bits");
2078 value = (value << _basic_type_bits) | bt;
2079 }
2080 ptr[index] = value;
2081 }
2082 }
2084 ~AdapterFingerPrint() {
2085 if (_length > 0) {
2086 FREE_C_HEAP_ARRAY(int, _value._fingerprint, mtCode);
2087 }
2088 }
2090 int value(int index) {
2091 if (_length < 0) {
2092 return _value._compact[index];
2093 }
2094 return _value._fingerprint[index];
2095 }
2096 int length() {
2097 if (_length < 0) return -_length;
2098 return _length;
2099 }
2101 bool is_compact() {
2102 return _length <= 0;
2103 }
2105 unsigned int compute_hash() {
2106 int hash = 0;
2107 for (int i = 0; i < length(); i++) {
2108 int v = value(i);
2109 hash = (hash << 8) ^ v ^ (hash >> 5);
2110 }
2111 return (unsigned int)hash;
2112 }
2114 const char* as_string() {
2115 stringStream st;
2116 st.print("0x");
2117 for (int i = 0; i < length(); i++) {
2118 st.print("%08x", value(i));
2119 }
2120 return st.as_string();
2121 }
2123 bool equals(AdapterFingerPrint* other) {
2124 if (other->_length != _length) {
2125 return false;
2126 }
2127 if (_length < 0) {
2128 assert(_compact_int_count == 3, "else change next line");
2129 return _value._compact[0] == other->_value._compact[0] &&
2130 _value._compact[1] == other->_value._compact[1] &&
2131 _value._compact[2] == other->_value._compact[2];
2132 } else {
2133 for (int i = 0; i < _length; i++) {
2134 if (_value._fingerprint[i] != other->_value._fingerprint[i]) {
2135 return false;
2136 }
2137 }
2138 }
2139 return true;
2140 }
2141 };
2144 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
2145 class AdapterHandlerTable : public BasicHashtable<mtCode> {
2146 friend class AdapterHandlerTableIterator;
2148 private:
2150 #ifndef PRODUCT
2151 static int _lookups; // number of calls to lookup
2152 static int _buckets; // number of buckets checked
2153 static int _equals; // number of buckets checked with matching hash
2154 static int _hits; // number of successful lookups
2155 static int _compact; // number of equals calls with compact signature
2156 #endif
2158 AdapterHandlerEntry* bucket(int i) {
2159 return (AdapterHandlerEntry*)BasicHashtable<mtCode>::bucket(i);
2160 }
2162 public:
2163 AdapterHandlerTable()
2164 : BasicHashtable<mtCode>(293, sizeof(AdapterHandlerEntry)) { }
2166 // Create a new entry suitable for insertion in the table
2167 AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
2168 AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable<mtCode>::new_entry(fingerprint->compute_hash());
2169 entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2170 return entry;
2171 }
2173 // Insert an entry into the table
2174 void add(AdapterHandlerEntry* entry) {
2175 int index = hash_to_index(entry->hash());
2176 add_entry(index, entry);
2177 }
2179 void free_entry(AdapterHandlerEntry* entry) {
2180 entry->deallocate();
2181 BasicHashtable<mtCode>::free_entry(entry);
2182 }
2184 // Find a entry with the same fingerprint if it exists
2185 AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
2186 NOT_PRODUCT(_lookups++);
2187 AdapterFingerPrint fp(total_args_passed, sig_bt);
2188 unsigned int hash = fp.compute_hash();
2189 int index = hash_to_index(hash);
2190 for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2191 NOT_PRODUCT(_buckets++);
2192 if (e->hash() == hash) {
2193 NOT_PRODUCT(_equals++);
2194 if (fp.equals(e->fingerprint())) {
2195 #ifndef PRODUCT
2196 if (fp.is_compact()) _compact++;
2197 _hits++;
2198 #endif
2199 return e;
2200 }
2201 }
2202 }
2203 return NULL;
2204 }
2206 #ifndef PRODUCT
2207 void print_statistics() {
2208 ResourceMark rm;
2209 int longest = 0;
2210 int empty = 0;
2211 int total = 0;
2212 int nonempty = 0;
2213 for (int index = 0; index < table_size(); index++) {
2214 int count = 0;
2215 for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
2216 count++;
2217 }
2218 if (count != 0) nonempty++;
2219 if (count == 0) empty++;
2220 if (count > longest) longest = count;
2221 total += count;
2222 }
2223 tty->print_cr("AdapterHandlerTable: empty %d longest %d total %d average %f",
2224 empty, longest, total, total / (double)nonempty);
2225 tty->print_cr("AdapterHandlerTable: lookups %d buckets %d equals %d hits %d compact %d",
2226 _lookups, _buckets, _equals, _hits, _compact);
2227 }
2228 #endif
2229 };
2232 #ifndef PRODUCT
2234 int AdapterHandlerTable::_lookups;
2235 int AdapterHandlerTable::_buckets;
2236 int AdapterHandlerTable::_equals;
2237 int AdapterHandlerTable::_hits;
2238 int AdapterHandlerTable::_compact;
2240 #endif
2242 class AdapterHandlerTableIterator : public StackObj {
2243 private:
2244 AdapterHandlerTable* _table;
2245 int _index;
2246 AdapterHandlerEntry* _current;
2248 void scan() {
2249 while (_index < _table->table_size()) {
2250 AdapterHandlerEntry* a = _table->bucket(_index);
2251 _index++;
2252 if (a != NULL) {
2253 _current = a;
2254 return;
2255 }
2256 }
2257 }
2259 public:
2260 AdapterHandlerTableIterator(AdapterHandlerTable* table): _table(table), _index(0), _current(NULL) {
2261 scan();
2262 }
2263 bool has_next() {
2264 return _current != NULL;
2265 }
2266 AdapterHandlerEntry* next() {
2267 if (_current != NULL) {
2268 AdapterHandlerEntry* result = _current;
2269 _current = _current->next();
2270 if (_current == NULL) scan();
2271 return result;
2272 } else {
2273 return NULL;
2274 }
2275 }
2276 };
2279 // ---------------------------------------------------------------------------
2280 // Implementation of AdapterHandlerLibrary
2281 AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL;
2282 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
2283 const int AdapterHandlerLibrary_size = 16*K;
2284 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
2286 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
2287 // Should be called only when AdapterHandlerLibrary_lock is active.
2288 if (_buffer == NULL) // Initialize lazily
2289 _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
2290 return _buffer;
2291 }
2293 void AdapterHandlerLibrary::initialize() {
2294 if (_adapters != NULL) return;
2295 _adapters = new AdapterHandlerTable();
2297 // Create a special handler for abstract methods. Abstract methods
2298 // are never compiled so an i2c entry is somewhat meaningless, but
2299 // fill it in with something appropriate just in case. Pass handle
2300 // wrong method for the c2i transitions.
2301 address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
2302 _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
2303 StubRoutines::throw_AbstractMethodError_entry(),
2304 wrong_method, wrong_method);
2305 }
2307 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
2308 address i2c_entry,
2309 address c2i_entry,
2310 address c2i_unverified_entry) {
2311 return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
2312 }
2314 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
2315 // Use customized signature handler. Need to lock around updates to
2316 // the AdapterHandlerTable (it is not safe for concurrent readers
2317 // and a single writer: this could be fixed if it becomes a
2318 // problem).
2320 // Get the address of the ic_miss handlers before we grab the
2321 // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
2322 // was caused by the initialization of the stubs happening
2323 // while we held the lock and then notifying jvmti while
2324 // holding it. This just forces the initialization to be a little
2325 // earlier.
2326 address ic_miss = SharedRuntime::get_ic_miss_stub();
2327 assert(ic_miss != NULL, "must have handler");
2329 ResourceMark rm;
2331 NOT_PRODUCT(int insts_size);
2332 AdapterBlob* B = NULL;
2333 AdapterHandlerEntry* entry = NULL;
2334 AdapterFingerPrint* fingerprint = NULL;
2335 {
2336 MutexLocker mu(AdapterHandlerLibrary_lock);
2337 // make sure data structure is initialized
2338 initialize();
2340 if (method->is_abstract()) {
2341 return _abstract_method_handler;
2342 }
2344 // Fill in the signature array, for the calling-convention call.
2345 int total_args_passed = method->size_of_parameters(); // All args on stack
2347 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
2348 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
2349 int i = 0;
2350 if (!method->is_static()) // Pass in receiver first
2351 sig_bt[i++] = T_OBJECT;
2352 for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
2353 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
2354 if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
2355 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2356 }
2357 assert(i == total_args_passed, "");
2359 // Lookup method signature's fingerprint
2360 entry = _adapters->lookup(total_args_passed, sig_bt);
2362 #ifdef ASSERT
2363 AdapterHandlerEntry* shared_entry = NULL;
2364 if (VerifyAdapterSharing && entry != NULL) {
2365 shared_entry = entry;
2366 entry = NULL;
2367 }
2368 #endif
2370 if (entry != NULL) {
2371 return entry;
2372 }
2374 // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
2375 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
2377 // Make a C heap allocated version of the fingerprint to store in the adapter
2378 fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
2380 // Create I2C & C2I handlers
2382 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2383 if (buf != NULL) {
2384 CodeBuffer buffer(buf);
2385 short buffer_locs[20];
2386 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
2387 sizeof(buffer_locs)/sizeof(relocInfo));
2388 MacroAssembler _masm(&buffer);
2390 entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
2391 total_args_passed,
2392 comp_args_on_stack,
2393 sig_bt,
2394 regs,
2395 fingerprint);
2397 #ifdef ASSERT
2398 if (VerifyAdapterSharing) {
2399 if (shared_entry != NULL) {
2400 assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt),
2401 "code must match");
2402 // Release the one just created and return the original
2403 _adapters->free_entry(entry);
2404 return shared_entry;
2405 } else {
2406 entry->save_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt);
2407 }
2408 }
2409 #endif
2411 B = AdapterBlob::create(&buffer);
2412 NOT_PRODUCT(insts_size = buffer.insts_size());
2413 }
2414 if (B == NULL) {
2415 // CodeCache is full, disable compilation
2416 // Ought to log this but compile log is only per compile thread
2417 // and we're some non descript Java thread.
2418 MutexUnlocker mu(AdapterHandlerLibrary_lock);
2419 CompileBroker::handle_full_code_cache();
2420 return NULL; // Out of CodeCache space
2421 }
2422 entry->relocate(B->content_begin());
2423 #ifndef PRODUCT
2424 // debugging suppport
2425 if (PrintAdapterHandlers || PrintStubCode) {
2426 entry->print_adapter_on(tty);
2427 tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
2428 _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
2429 method->signature()->as_C_string(), insts_size);
2430 tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
2431 if (Verbose || PrintStubCode) {
2432 address first_pc = entry->base_address();
2433 if (first_pc != NULL)
2434 Disassembler::decode(first_pc, first_pc + insts_size);
2435 }
2436 }
2437 #endif
2439 _adapters->add(entry);
2440 }
2441 // Outside of the lock
2442 if (B != NULL) {
2443 char blob_id[256];
2444 jio_snprintf(blob_id,
2445 sizeof(blob_id),
2446 "%s(%s)@" PTR_FORMAT,
2447 B->name(),
2448 fingerprint->as_string(),
2449 B->content_begin());
2450 Forte::register_stub(blob_id, B->content_begin(), B->content_end());
2452 if (JvmtiExport::should_post_dynamic_code_generated()) {
2453 JvmtiExport::post_dynamic_code_generated(blob_id, B->content_begin(), B->content_end());
2454 }
2455 }
2456 return entry;
2457 }
2459 address AdapterHandlerEntry::base_address() {
2460 address base = _i2c_entry;
2461 if (base == NULL) base = _c2i_entry;
2462 assert(base <= _c2i_entry || _c2i_entry == NULL, "");
2463 assert(base <= _c2i_unverified_entry || _c2i_unverified_entry == NULL, "");
2464 return base;
2465 }
2467 void AdapterHandlerEntry::relocate(address new_base) {
2468 address old_base = base_address();
2469 assert(old_base != NULL, "");
2470 ptrdiff_t delta = new_base - old_base;
2471 if (_i2c_entry != NULL)
2472 _i2c_entry += delta;
2473 if (_c2i_entry != NULL)
2474 _c2i_entry += delta;
2475 if (_c2i_unverified_entry != NULL)
2476 _c2i_unverified_entry += delta;
2477 assert(base_address() == new_base, "");
2478 }
2481 void AdapterHandlerEntry::deallocate() {
2482 delete _fingerprint;
2483 #ifdef ASSERT
2484 if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code, mtCode);
2485 if (_saved_sig) FREE_C_HEAP_ARRAY(Basictype, _saved_sig, mtCode);
2486 #endif
2487 }
2490 #ifdef ASSERT
2491 // Capture the code before relocation so that it can be compared
2492 // against other versions. If the code is captured after relocation
2493 // then relative instructions won't be equivalent.
2494 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
2495 _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length, mtCode);
2496 _code_length = length;
2497 memcpy(_saved_code, buffer, length);
2498 _total_args_passed = total_args_passed;
2499 _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed, mtCode);
2500 memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType));
2501 }
2504 bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
2505 if (length != _code_length) {
2506 return false;
2507 }
2508 for (int i = 0; i < length; i++) {
2509 if (buffer[i] != _saved_code[i]) {
2510 return false;
2511 }
2512 }
2513 return true;
2514 }
2515 #endif
2518 // Create a native wrapper for this native method. The wrapper converts the
2519 // java compiled calling convention to the native convention, handlizes
2520 // arguments, and transitions to native. On return from the native we transition
2521 // back to java blocking if a safepoint is in progress.
2522 nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method, int compile_id) {
2523 ResourceMark rm;
2524 nmethod* nm = NULL;
2526 assert(method->is_native(), "must be native");
2527 assert(method->is_method_handle_intrinsic() ||
2528 method->has_native_function(), "must have something valid to call!");
2530 {
2531 // perform the work while holding the lock, but perform any printing outside the lock
2532 MutexLocker mu(AdapterHandlerLibrary_lock);
2533 // See if somebody beat us to it
2534 nm = method->code();
2535 if (nm) {
2536 return nm;
2537 }
2539 ResourceMark rm;
2541 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2542 if (buf != NULL) {
2543 CodeBuffer buffer(buf);
2544 double locs_buf[20];
2545 buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2546 MacroAssembler _masm(&buffer);
2548 // Fill in the signature array, for the calling-convention call.
2549 int total_args_passed = method->size_of_parameters();
2551 BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
2552 VMRegPair* regs = NEW_RESOURCE_ARRAY(VMRegPair,total_args_passed);
2553 int i=0;
2554 if( !method->is_static() ) // Pass in receiver first
2555 sig_bt[i++] = T_OBJECT;
2556 SignatureStream ss(method->signature());
2557 for( ; !ss.at_return_type(); ss.next()) {
2558 sig_bt[i++] = ss.type(); // Collect remaining bits of signature
2559 if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
2560 sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots
2561 }
2562 assert( i==total_args_passed, "" );
2563 BasicType ret_type = ss.type();
2565 // Now get the compiled-Java layout as input (or output) arguments.
2566 // NOTE: Stubs for compiled entry points of method handle intrinsics
2567 // are just trampolines so the argument registers must be outgoing ones.
2568 const bool is_outgoing = method->is_method_handle_intrinsic();
2569 int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, is_outgoing);
2571 // Generate the compiled-to-native wrapper code
2572 nm = SharedRuntime::generate_native_wrapper(&_masm,
2573 method,
2574 compile_id,
2575 total_args_passed,
2576 comp_args_on_stack,
2577 sig_bt,regs,
2578 ret_type);
2579 }
2580 }
2582 // Must unlock before calling set_code
2584 // Install the generated code.
2585 if (nm != NULL) {
2586 if (PrintCompilation) {
2587 ttyLocker ttyl;
2588 CompileTask::print_compilation(tty, nm, method->is_static() ? "(static)" : "");
2589 }
2590 method->set_code(method, nm);
2591 nm->post_compiled_method_load_event();
2592 } else {
2593 // CodeCache is full, disable compilation
2594 CompileBroker::handle_full_code_cache();
2595 }
2596 return nm;
2597 }
2599 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::block_for_jni_critical(JavaThread* thread))
2600 assert(thread == JavaThread::current(), "must be");
2601 // The code is about to enter a JNI lazy critical native method and
2602 // _needs_gc is true, so if this thread is already in a critical
2603 // section then just return, otherwise this thread should block
2604 // until needs_gc has been cleared.
2605 if (thread->in_critical()) {
2606 return;
2607 }
2608 // Lock and unlock a critical section to give the system a chance to block
2609 GC_locker::lock_critical(thread);
2610 GC_locker::unlock_critical(thread);
2611 JRT_END
2613 #ifdef HAVE_DTRACE_H
2614 // Create a dtrace nmethod for this method. The wrapper converts the
2615 // java compiled calling convention to the native convention, makes a dummy call
2616 // (actually nops for the size of the call instruction, which become a trap if
2617 // probe is enabled). The returns to the caller. Since this all looks like a
2618 // leaf no thread transition is needed.
2620 nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) {
2621 ResourceMark rm;
2622 nmethod* nm = NULL;
2624 if (PrintCompilation) {
2625 ttyLocker ttyl;
2626 tty->print("--- n%s ");
2627 method->print_short_name(tty);
2628 if (method->is_static()) {
2629 tty->print(" (static)");
2630 }
2631 tty->cr();
2632 }
2634 {
2635 // perform the work while holding the lock, but perform any printing
2636 // outside the lock
2637 MutexLocker mu(AdapterHandlerLibrary_lock);
2638 // See if somebody beat us to it
2639 nm = method->code();
2640 if (nm) {
2641 return nm;
2642 }
2644 ResourceMark rm;
2646 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
2647 if (buf != NULL) {
2648 CodeBuffer buffer(buf);
2649 // Need a few relocation entries
2650 double locs_buf[20];
2651 buffer.insts()->initialize_shared_locs(
2652 (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
2653 MacroAssembler _masm(&buffer);
2655 // Generate the compiled-to-native wrapper code
2656 nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method);
2657 }
2658 }
2659 return nm;
2660 }
2662 // the dtrace method needs to convert java lang string to utf8 string.
2663 void SharedRuntime::get_utf(oopDesc* src, address dst) {
2664 typeArrayOop jlsValue = java_lang_String::value(src);
2665 int jlsOffset = java_lang_String::offset(src);
2666 int jlsLen = java_lang_String::length(src);
2667 jchar* jlsPos = (jlsLen == 0) ? NULL :
2668 jlsValue->char_at_addr(jlsOffset);
2669 assert(typeArrayKlass::cast(jlsValue->klass())->element_type() == T_CHAR, "compressed string");
2670 (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size);
2671 }
2672 #endif // ndef HAVE_DTRACE_H
2674 // -------------------------------------------------------------------------
2675 // Java-Java calling convention
2676 // (what you use when Java calls Java)
2678 //------------------------------name_for_receiver----------------------------------
2679 // For a given signature, return the VMReg for parameter 0.
2680 VMReg SharedRuntime::name_for_receiver() {
2681 VMRegPair regs;
2682 BasicType sig_bt = T_OBJECT;
2683 (void) java_calling_convention(&sig_bt, ®s, 1, true);
2684 // Return argument 0 register. In the LP64 build pointers
2685 // take 2 registers, but the VM wants only the 'main' name.
2686 return regs.first();
2687 }
2689 VMRegPair *SharedRuntime::find_callee_arguments(Symbol* sig, bool has_receiver, int* arg_size) {
2690 // This method is returning a data structure allocating as a
2691 // ResourceObject, so do not put any ResourceMarks in here.
2692 char *s = sig->as_C_string();
2693 int len = (int)strlen(s);
2694 *s++; len--; // Skip opening paren
2695 char *t = s+len;
2696 while( *(--t) != ')' ) ; // Find close paren
2698 BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
2699 VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
2700 int cnt = 0;
2701 if (has_receiver) {
2702 sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
2703 }
2705 while( s < t ) {
2706 switch( *s++ ) { // Switch on signature character
2707 case 'B': sig_bt[cnt++] = T_BYTE; break;
2708 case 'C': sig_bt[cnt++] = T_CHAR; break;
2709 case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break;
2710 case 'F': sig_bt[cnt++] = T_FLOAT; break;
2711 case 'I': sig_bt[cnt++] = T_INT; break;
2712 case 'J': sig_bt[cnt++] = T_LONG; sig_bt[cnt++] = T_VOID; break;
2713 case 'S': sig_bt[cnt++] = T_SHORT; break;
2714 case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
2715 case 'V': sig_bt[cnt++] = T_VOID; break;
2716 case 'L': // Oop
2717 while( *s++ != ';' ) ; // Skip signature
2718 sig_bt[cnt++] = T_OBJECT;
2719 break;
2720 case '[': { // Array
2721 do { // Skip optional size
2722 while( *s >= '0' && *s <= '9' ) s++;
2723 } while( *s++ == '[' ); // Nested arrays?
2724 // Skip element type
2725 if( s[-1] == 'L' )
2726 while( *s++ != ';' ) ; // Skip signature
2727 sig_bt[cnt++] = T_ARRAY;
2728 break;
2729 }
2730 default : ShouldNotReachHere();
2731 }
2732 }
2733 assert( cnt < 256, "grow table size" );
2735 int comp_args_on_stack;
2736 comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
2738 // the calling convention doesn't count out_preserve_stack_slots so
2739 // we must add that in to get "true" stack offsets.
2741 if (comp_args_on_stack) {
2742 for (int i = 0; i < cnt; i++) {
2743 VMReg reg1 = regs[i].first();
2744 if( reg1->is_stack()) {
2745 // Yuck
2746 reg1 = reg1->bias(out_preserve_stack_slots());
2747 }
2748 VMReg reg2 = regs[i].second();
2749 if( reg2->is_stack()) {
2750 // Yuck
2751 reg2 = reg2->bias(out_preserve_stack_slots());
2752 }
2753 regs[i].set_pair(reg2, reg1);
2754 }
2755 }
2757 // results
2758 *arg_size = cnt;
2759 return regs;
2760 }
2762 // OSR Migration Code
2763 //
2764 // This code is used convert interpreter frames into compiled frames. It is
2765 // called from very start of a compiled OSR nmethod. A temp array is
2766 // allocated to hold the interesting bits of the interpreter frame. All
2767 // active locks are inflated to allow them to move. The displaced headers and
2768 // active interpeter locals are copied into the temp buffer. Then we return
2769 // back to the compiled code. The compiled code then pops the current
2770 // interpreter frame off the stack and pushes a new compiled frame. Then it
2771 // copies the interpreter locals and displaced headers where it wants.
2772 // Finally it calls back to free the temp buffer.
2773 //
2774 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
2776 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
2778 #ifdef IA64
2779 ShouldNotReachHere(); // NYI
2780 #endif /* IA64 */
2782 //
2783 // This code is dependent on the memory layout of the interpreter local
2784 // array and the monitors. On all of our platforms the layout is identical
2785 // so this code is shared. If some platform lays the their arrays out
2786 // differently then this code could move to platform specific code or
2787 // the code here could be modified to copy items one at a time using
2788 // frame accessor methods and be platform independent.
2790 frame fr = thread->last_frame();
2791 assert( fr.is_interpreted_frame(), "" );
2792 assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
2794 // Figure out how many monitors are active.
2795 int active_monitor_count = 0;
2796 for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
2797 kptr < fr.interpreter_frame_monitor_begin();
2798 kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
2799 if( kptr->obj() != NULL ) active_monitor_count++;
2800 }
2802 // QQQ we could place number of active monitors in the array so that compiled code
2803 // could double check it.
2805 Method* moop = fr.interpreter_frame_method();
2806 int max_locals = moop->max_locals();
2807 // Allocate temp buffer, 1 word per local & 2 per active monitor
2808 int buf_size_words = max_locals + active_monitor_count*2;
2809 intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words, mtCode);
2811 // Copy the locals. Order is preserved so that loading of longs works.
2812 // Since there's no GC I can copy the oops blindly.
2813 assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
2814 Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
2815 (HeapWord*)&buf[0],
2816 max_locals);
2818 // Inflate locks. Copy the displaced headers. Be careful, there can be holes.
2819 int i = max_locals;
2820 for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
2821 kptr2 < fr.interpreter_frame_monitor_begin();
2822 kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
2823 if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array
2824 BasicLock *lock = kptr2->lock();
2825 // Inflate so the displaced header becomes position-independent
2826 if (lock->displaced_header()->is_unlocked())
2827 ObjectSynchronizer::inflate_helper(kptr2->obj());
2828 // Now the displaced header is free to move
2829 buf[i++] = (intptr_t)lock->displaced_header();
2830 buf[i++] = (intptr_t)kptr2->obj();
2831 }
2832 }
2833 assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
2835 return buf;
2836 JRT_END
2838 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
2839 FREE_C_HEAP_ARRAY(intptr_t,buf, mtCode);
2840 JRT_END
2842 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
2843 AdapterHandlerTableIterator iter(_adapters);
2844 while (iter.has_next()) {
2845 AdapterHandlerEntry* a = iter.next();
2846 if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
2847 }
2848 return false;
2849 }
2851 void AdapterHandlerLibrary::print_handler_on(outputStream* st, CodeBlob* b) {
2852 AdapterHandlerTableIterator iter(_adapters);
2853 while (iter.has_next()) {
2854 AdapterHandlerEntry* a = iter.next();
2855 if (b == CodeCache::find_blob(a->get_i2c_entry())) {
2856 st->print("Adapter for signature: ");
2857 a->print_adapter_on(tty);
2858 return;
2859 }
2860 }
2861 assert(false, "Should have found handler");
2862 }
2864 void AdapterHandlerEntry::print_adapter_on(outputStream* st) const {
2865 st->print_cr("AHE@" INTPTR_FORMAT ": %s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
2866 (intptr_t) this, fingerprint()->as_string(),
2867 get_i2c_entry(), get_c2i_entry(), get_c2i_unverified_entry());
2869 }
2871 #ifndef PRODUCT
2873 void AdapterHandlerLibrary::print_statistics() {
2874 _adapters->print_statistics();
2875 }
2877 #endif /* PRODUCT */