src/share/vm/runtime/sharedRuntime.cpp

Fri, 22 Oct 2010 15:59:34 -0400

author
acorn
date
Fri, 22 Oct 2010 15:59:34 -0400
changeset 2233
fa83ab460c54
parent 2223
3dc12ef8735e
child 2314
f95d63e2154a
permissions
-rw-r--r--

6988353: refactor contended sync subsystem
Summary: reduce complexity by factoring synchronizer.cpp
Reviewed-by: dholmes, never, coleenp

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_sharedRuntime.cpp.incl"
    27 #include <math.h>
    29 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
    30 HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
    31                       char*, int, char*, int, char*, int);
    32 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
    33                       char*, int, char*, int, char*, int);
    35 // Implementation of SharedRuntime
    37 #ifndef PRODUCT
    38 // For statistics
    39 int SharedRuntime::_ic_miss_ctr = 0;
    40 int SharedRuntime::_wrong_method_ctr = 0;
    41 int SharedRuntime::_resolve_static_ctr = 0;
    42 int SharedRuntime::_resolve_virtual_ctr = 0;
    43 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
    44 int SharedRuntime::_implicit_null_throws = 0;
    45 int SharedRuntime::_implicit_div0_throws = 0;
    46 int SharedRuntime::_throw_null_ctr = 0;
    48 int SharedRuntime::_nof_normal_calls = 0;
    49 int SharedRuntime::_nof_optimized_calls = 0;
    50 int SharedRuntime::_nof_inlined_calls = 0;
    51 int SharedRuntime::_nof_megamorphic_calls = 0;
    52 int SharedRuntime::_nof_static_calls = 0;
    53 int SharedRuntime::_nof_inlined_static_calls = 0;
    54 int SharedRuntime::_nof_interface_calls = 0;
    55 int SharedRuntime::_nof_optimized_interface_calls = 0;
    56 int SharedRuntime::_nof_inlined_interface_calls = 0;
    57 int SharedRuntime::_nof_megamorphic_interface_calls = 0;
    58 int SharedRuntime::_nof_removable_exceptions = 0;
    60 int SharedRuntime::_new_instance_ctr=0;
    61 int SharedRuntime::_new_array_ctr=0;
    62 int SharedRuntime::_multi1_ctr=0;
    63 int SharedRuntime::_multi2_ctr=0;
    64 int SharedRuntime::_multi3_ctr=0;
    65 int SharedRuntime::_multi4_ctr=0;
    66 int SharedRuntime::_multi5_ctr=0;
    67 int SharedRuntime::_mon_enter_stub_ctr=0;
    68 int SharedRuntime::_mon_exit_stub_ctr=0;
    69 int SharedRuntime::_mon_enter_ctr=0;
    70 int SharedRuntime::_mon_exit_ctr=0;
    71 int SharedRuntime::_partial_subtype_ctr=0;
    72 int SharedRuntime::_jbyte_array_copy_ctr=0;
    73 int SharedRuntime::_jshort_array_copy_ctr=0;
    74 int SharedRuntime::_jint_array_copy_ctr=0;
    75 int SharedRuntime::_jlong_array_copy_ctr=0;
    76 int SharedRuntime::_oop_array_copy_ctr=0;
    77 int SharedRuntime::_checkcast_array_copy_ctr=0;
    78 int SharedRuntime::_unsafe_array_copy_ctr=0;
    79 int SharedRuntime::_generic_array_copy_ctr=0;
    80 int SharedRuntime::_slow_array_copy_ctr=0;
    81 int SharedRuntime::_find_handler_ctr=0;
    82 int SharedRuntime::_rethrow_ctr=0;
    84 int     SharedRuntime::_ICmiss_index                    = 0;
    85 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
    86 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
    88 void SharedRuntime::trace_ic_miss(address at) {
    89   for (int i = 0; i < _ICmiss_index; i++) {
    90     if (_ICmiss_at[i] == at) {
    91       _ICmiss_count[i]++;
    92       return;
    93     }
    94   }
    95   int index = _ICmiss_index++;
    96   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
    97   _ICmiss_at[index] = at;
    98   _ICmiss_count[index] = 1;
    99 }
   101 void SharedRuntime::print_ic_miss_histogram() {
   102   if (ICMissHistogram) {
   103     tty->print_cr ("IC Miss Histogram:");
   104     int tot_misses = 0;
   105     for (int i = 0; i < _ICmiss_index; i++) {
   106       tty->print_cr("  at: " INTPTR_FORMAT "  nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
   107       tot_misses += _ICmiss_count[i];
   108     }
   109     tty->print_cr ("Total IC misses: %7d", tot_misses);
   110   }
   111 }
   112 #endif // PRODUCT
   114 #ifndef SERIALGC
   116 // G1 write-barrier pre: executed before a pointer store.
   117 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
   118   if (orig == NULL) {
   119     assert(false, "should be optimized out");
   120     return;
   121   }
   122   assert(orig->is_oop(true /* ignore mark word */), "Error");
   123   // store the original value that was in the field reference
   124   thread->satb_mark_queue().enqueue(orig);
   125 JRT_END
   127 // G1 write-barrier post: executed after a pointer store.
   128 JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread))
   129   thread->dirty_card_queue().enqueue(card_addr);
   130 JRT_END
   132 #endif // !SERIALGC
   135 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
   136   return x * y;
   137 JRT_END
   140 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
   141   if (x == min_jlong && y == CONST64(-1)) {
   142     return x;
   143   } else {
   144     return x / y;
   145   }
   146 JRT_END
   149 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
   150   if (x == min_jlong && y == CONST64(-1)) {
   151     return 0;
   152   } else {
   153     return x % y;
   154   }
   155 JRT_END
   158 const juint  float_sign_mask  = 0x7FFFFFFF;
   159 const juint  float_infinity   = 0x7F800000;
   160 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
   161 const julong double_infinity  = CONST64(0x7FF0000000000000);
   163 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat  x, jfloat  y))
   164 #ifdef _WIN64
   165   // 64-bit Windows on amd64 returns the wrong values for
   166   // infinity operands.
   167   union { jfloat f; juint i; } xbits, ybits;
   168   xbits.f = x;
   169   ybits.f = y;
   170   // x Mod Infinity == x unless x is infinity
   171   if ( ((xbits.i & float_sign_mask) != float_infinity) &&
   172        ((ybits.i & float_sign_mask) == float_infinity) ) {
   173     return x;
   174   }
   175 #endif
   176   return ((jfloat)fmod((double)x,(double)y));
   177 JRT_END
   180 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
   181 #ifdef _WIN64
   182   union { jdouble d; julong l; } xbits, ybits;
   183   xbits.d = x;
   184   ybits.d = y;
   185   // x Mod Infinity == x unless x is infinity
   186   if ( ((xbits.l & double_sign_mask) != double_infinity) &&
   187        ((ybits.l & double_sign_mask) == double_infinity) ) {
   188     return x;
   189   }
   190 #endif
   191   return ((jdouble)fmod((double)x,(double)y));
   192 JRT_END
   194 #ifdef __SOFTFP__
   195 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
   196   return x + y;
   197 JRT_END
   199 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
   200   return x - y;
   201 JRT_END
   203 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
   204   return x * y;
   205 JRT_END
   207 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
   208   return x / y;
   209 JRT_END
   211 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
   212   return x + y;
   213 JRT_END
   215 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
   216   return x - y;
   217 JRT_END
   219 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
   220   return x * y;
   221 JRT_END
   223 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
   224   return x / y;
   225 JRT_END
   227 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
   228   return (jfloat)x;
   229 JRT_END
   231 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
   232   return (jdouble)x;
   233 JRT_END
   235 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
   236   return (jdouble)x;
   237 JRT_END
   239 JRT_LEAF(int,  SharedRuntime::fcmpl(float x, float y))
   240   return x>y ? 1 : (x==y ? 0 : -1);  /* x<y or is_nan*/
   241 JRT_END
   243 JRT_LEAF(int,  SharedRuntime::fcmpg(float x, float y))
   244   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
   245 JRT_END
   247 JRT_LEAF(int,  SharedRuntime::dcmpl(double x, double y))
   248   return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
   249 JRT_END
   251 JRT_LEAF(int,  SharedRuntime::dcmpg(double x, double y))
   252   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
   253 JRT_END
   255 // Functions to return the opposite of the aeabi functions for nan.
   256 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
   257   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   258 JRT_END
   260 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
   261   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   262 JRT_END
   264 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
   265   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   266 JRT_END
   268 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
   269   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   270 JRT_END
   272 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
   273   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   274 JRT_END
   276 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
   277   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   278 JRT_END
   280 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
   281   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   282 JRT_END
   284 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
   285   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   286 JRT_END
   288 // Intrinsics make gcc generate code for these.
   289 float  SharedRuntime::fneg(float f)   {
   290   return -f;
   291 }
   293 double SharedRuntime::dneg(double f)  {
   294   return -f;
   295 }
   297 #endif // __SOFTFP__
   299 #if defined(__SOFTFP__) || defined(E500V2)
   300 // Intrinsics make gcc generate code for these.
   301 double SharedRuntime::dabs(double f)  {
   302   return (f <= (double)0.0) ? (double)0.0 - f : f;
   303 }
   305 #endif
   307 #if defined(__SOFTFP__) || defined(PPC)
   308 double SharedRuntime::dsqrt(double f) {
   309   return sqrt(f);
   310 }
   311 #endif
   313 JRT_LEAF(jint, SharedRuntime::f2i(jfloat  x))
   314   if (g_isnan(x))
   315     return 0;
   316   if (x >= (jfloat) max_jint)
   317     return max_jint;
   318   if (x <= (jfloat) min_jint)
   319     return min_jint;
   320   return (jint) x;
   321 JRT_END
   324 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat  x))
   325   if (g_isnan(x))
   326     return 0;
   327   if (x >= (jfloat) max_jlong)
   328     return max_jlong;
   329   if (x <= (jfloat) min_jlong)
   330     return min_jlong;
   331   return (jlong) x;
   332 JRT_END
   335 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
   336   if (g_isnan(x))
   337     return 0;
   338   if (x >= (jdouble) max_jint)
   339     return max_jint;
   340   if (x <= (jdouble) min_jint)
   341     return min_jint;
   342   return (jint) x;
   343 JRT_END
   346 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
   347   if (g_isnan(x))
   348     return 0;
   349   if (x >= (jdouble) max_jlong)
   350     return max_jlong;
   351   if (x <= (jdouble) min_jlong)
   352     return min_jlong;
   353   return (jlong) x;
   354 JRT_END
   357 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
   358   return (jfloat)x;
   359 JRT_END
   362 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
   363   return (jfloat)x;
   364 JRT_END
   367 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
   368   return (jdouble)x;
   369 JRT_END
   371 // Exception handling accross interpreter/compiler boundaries
   372 //
   373 // exception_handler_for_return_address(...) returns the continuation address.
   374 // The continuation address is the entry point of the exception handler of the
   375 // previous frame depending on the return address.
   377 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
   378   assert(frame::verify_return_pc(return_address), "must be a return pc");
   380   // Reset MethodHandle flag.
   381   thread->set_is_method_handle_return(false);
   383   // the fastest case first
   384   CodeBlob* blob = CodeCache::find_blob(return_address);
   385   if (blob != NULL && blob->is_nmethod()) {
   386     nmethod* code = (nmethod*)blob;
   387     assert(code != NULL, "nmethod must be present");
   388     // Check if the return address is a MethodHandle call site.
   389     thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
   390     // native nmethods don't have exception handlers
   391     assert(!code->is_native_method(), "no exception handler");
   392     assert(code->header_begin() != code->exception_begin(), "no exception handler");
   393     if (code->is_deopt_pc(return_address)) {
   394       return SharedRuntime::deopt_blob()->unpack_with_exception();
   395     } else {
   396       return code->exception_begin();
   397     }
   398   }
   400   // Entry code
   401   if (StubRoutines::returns_to_call_stub(return_address)) {
   402     return StubRoutines::catch_exception_entry();
   403   }
   404   // Interpreted code
   405   if (Interpreter::contains(return_address)) {
   406     return Interpreter::rethrow_exception_entry();
   407   }
   409   // Compiled code
   410   if (CodeCache::contains(return_address)) {
   411     CodeBlob* blob = CodeCache::find_blob(return_address);
   412     if (blob->is_nmethod()) {
   413       nmethod* code = (nmethod*)blob;
   414       assert(code != NULL, "nmethod must be present");
   415       // Check if the return address is a MethodHandle call site.
   416       thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
   417       assert(code->header_begin() != code->exception_begin(), "no exception handler");
   418       return code->exception_begin();
   419     }
   420     if (blob->is_runtime_stub()) {
   421       ShouldNotReachHere();   // callers are responsible for skipping runtime stub frames
   422     }
   423   }
   424   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
   425 #ifndef PRODUCT
   426   { ResourceMark rm;
   427     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
   428     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
   429     tty->print_cr("b) other problem");
   430   }
   431 #endif // PRODUCT
   432   ShouldNotReachHere();
   433   return NULL;
   434 }
   437 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
   438   return raw_exception_handler_for_return_address(thread, return_address);
   439 JRT_END
   442 address SharedRuntime::get_poll_stub(address pc) {
   443   address stub;
   444   // Look up the code blob
   445   CodeBlob *cb = CodeCache::find_blob(pc);
   447   // Should be an nmethod
   448   assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
   450   // Look up the relocation information
   451   assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
   452     "safepoint polling: type must be poll" );
   454   assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
   455     "Only polling locations are used for safepoint");
   457   bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
   458   if (at_poll_return) {
   459     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
   460            "polling page return stub not created yet");
   461     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
   462   } else {
   463     assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
   464            "polling page safepoint stub not created yet");
   465     stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
   466   }
   467 #ifndef PRODUCT
   468   if( TraceSafepoint ) {
   469     char buf[256];
   470     jio_snprintf(buf, sizeof(buf),
   471                  "... found polling page %s exception at pc = "
   472                  INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
   473                  at_poll_return ? "return" : "loop",
   474                  (intptr_t)pc, (intptr_t)stub);
   475     tty->print_raw_cr(buf);
   476   }
   477 #endif // PRODUCT
   478   return stub;
   479 }
   482 oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) {
   483   assert(caller.is_interpreted_frame(), "");
   484   int args_size = ArgumentSizeComputer(sig).size() + 1;
   485   assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
   486   oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
   487   assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
   488   return result;
   489 }
   492 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
   493   if (JvmtiExport::can_post_on_exceptions()) {
   494     vframeStream vfst(thread, true);
   495     methodHandle method = methodHandle(thread, vfst.method());
   496     address bcp = method()->bcp_from(vfst.bci());
   497     JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
   498   }
   499   Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
   500 }
   502 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) {
   503   Handle h_exception = Exceptions::new_exception(thread, name, message);
   504   throw_and_post_jvmti_exception(thread, h_exception);
   505 }
   507 // The interpreter code to call this tracing function is only
   508 // called/generated when TraceRedefineClasses has the right bits
   509 // set. Since obsolete methods are never compiled, we don't have
   510 // to modify the compilers to generate calls to this function.
   511 //
   512 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
   513     JavaThread* thread, methodOopDesc* method))
   514   assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call");
   516   if (method->is_obsolete()) {
   517     // We are calling an obsolete method, but this is not necessarily
   518     // an error. Our method could have been redefined just after we
   519     // fetched the methodOop from the constant pool.
   521     // RC_TRACE macro has an embedded ResourceMark
   522     RC_TRACE_WITH_THREAD(0x00001000, thread,
   523                          ("calling obsolete method '%s'",
   524                           method->name_and_sig_as_C_string()));
   525     if (RC_TRACE_ENABLED(0x00002000)) {
   526       // this option is provided to debug calls to obsolete methods
   527       guarantee(false, "faulting at call to an obsolete method.");
   528     }
   529   }
   530   return 0;
   531 JRT_END
   533 // ret_pc points into caller; we are returning caller's exception handler
   534 // for given exception
   535 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
   536                                                     bool force_unwind, bool top_frame_only) {
   537   assert(nm != NULL, "must exist");
   538   ResourceMark rm;
   540   ScopeDesc* sd = nm->scope_desc_at(ret_pc);
   541   // determine handler bci, if any
   542   EXCEPTION_MARK;
   544   int handler_bci = -1;
   545   int scope_depth = 0;
   546   if (!force_unwind) {
   547     int bci = sd->bci();
   548     do {
   549       bool skip_scope_increment = false;
   550       // exception handler lookup
   551       KlassHandle ek (THREAD, exception->klass());
   552       handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD);
   553       if (HAS_PENDING_EXCEPTION) {
   554         // We threw an exception while trying to find the exception handler.
   555         // Transfer the new exception to the exception handle which will
   556         // be set into thread local storage, and do another lookup for an
   557         // exception handler for this exception, this time starting at the
   558         // BCI of the exception handler which caused the exception to be
   559         // thrown (bugs 4307310 and 4546590). Set "exception" reference
   560         // argument to ensure that the correct exception is thrown (4870175).
   561         exception = Handle(THREAD, PENDING_EXCEPTION);
   562         CLEAR_PENDING_EXCEPTION;
   563         if (handler_bci >= 0) {
   564           bci = handler_bci;
   565           handler_bci = -1;
   566           skip_scope_increment = true;
   567         }
   568       }
   569       if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
   570         sd = sd->sender();
   571         if (sd != NULL) {
   572           bci = sd->bci();
   573         }
   574         ++scope_depth;
   575       }
   576     } while (!top_frame_only && handler_bci < 0 && sd != NULL);
   577   }
   579   // found handling method => lookup exception handler
   580   int catch_pco = ret_pc - nm->code_begin();
   582   ExceptionHandlerTable table(nm);
   583   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
   584   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
   585     // Allow abbreviated catch tables.  The idea is to allow a method
   586     // to materialize its exceptions without committing to the exact
   587     // routing of exceptions.  In particular this is needed for adding
   588     // a synthethic handler to unlock monitors when inlining
   589     // synchonized methods since the unlock path isn't represented in
   590     // the bytecodes.
   591     t = table.entry_for(catch_pco, -1, 0);
   592   }
   594 #ifdef COMPILER1
   595   if (t == NULL && nm->is_compiled_by_c1()) {
   596     assert(nm->unwind_handler_begin() != NULL, "");
   597     return nm->unwind_handler_begin();
   598   }
   599 #endif
   601   if (t == NULL) {
   602     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
   603     tty->print_cr("   Exception:");
   604     exception->print();
   605     tty->cr();
   606     tty->print_cr(" Compiled exception table :");
   607     table.print();
   608     nm->print_code();
   609     guarantee(false, "missing exception handler");
   610     return NULL;
   611   }
   613   return nm->code_begin() + t->pco();
   614 }
   616 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
   617   // These errors occur only at call sites
   618   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
   619 JRT_END
   621 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
   622   // These errors occur only at call sites
   623   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
   624 JRT_END
   626 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
   627   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
   628 JRT_END
   630 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
   631   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
   632 JRT_END
   634 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
   635   // This entry point is effectively only used for NullPointerExceptions which occur at inline
   636   // cache sites (when the callee activation is not yet set up) so we are at a call site
   637   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
   638 JRT_END
   640 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
   641   // We avoid using the normal exception construction in this case because
   642   // it performs an upcall to Java, and we're already out of stack space.
   643   klassOop k = SystemDictionary::StackOverflowError_klass();
   644   oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK);
   645   Handle exception (thread, exception_oop);
   646   if (StackTraceInThrowable) {
   647     java_lang_Throwable::fill_in_stack_trace(exception);
   648   }
   649   throw_and_post_jvmti_exception(thread, exception);
   650 JRT_END
   652 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
   653                                                            address pc,
   654                                                            SharedRuntime::ImplicitExceptionKind exception_kind)
   655 {
   656   address target_pc = NULL;
   658   if (Interpreter::contains(pc)) {
   659 #ifdef CC_INTERP
   660     // C++ interpreter doesn't throw implicit exceptions
   661     ShouldNotReachHere();
   662 #else
   663     switch (exception_kind) {
   664       case IMPLICIT_NULL:           return Interpreter::throw_NullPointerException_entry();
   665       case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
   666       case STACK_OVERFLOW:          return Interpreter::throw_StackOverflowError_entry();
   667       default:                      ShouldNotReachHere();
   668     }
   669 #endif // !CC_INTERP
   670   } else {
   671     switch (exception_kind) {
   672       case STACK_OVERFLOW: {
   673         // Stack overflow only occurs upon frame setup; the callee is
   674         // going to be unwound. Dispatch to a shared runtime stub
   675         // which will cause the StackOverflowError to be fabricated
   676         // and processed.
   677         // For stack overflow in deoptimization blob, cleanup thread.
   678         if (thread->deopt_mark() != NULL) {
   679           Deoptimization::cleanup_deopt_info(thread, NULL);
   680         }
   681         return StubRoutines::throw_StackOverflowError_entry();
   682       }
   684       case IMPLICIT_NULL: {
   685         if (VtableStubs::contains(pc)) {
   686           // We haven't yet entered the callee frame. Fabricate an
   687           // exception and begin dispatching it in the caller. Since
   688           // the caller was at a call site, it's safe to destroy all
   689           // caller-saved registers, as these entry points do.
   690           VtableStub* vt_stub = VtableStubs::stub_containing(pc);
   692           // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error.
   693           if (vt_stub == NULL) return NULL;
   695           if (vt_stub->is_abstract_method_error(pc)) {
   696             assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
   697             return StubRoutines::throw_AbstractMethodError_entry();
   698           } else {
   699             return StubRoutines::throw_NullPointerException_at_call_entry();
   700           }
   701         } else {
   702           CodeBlob* cb = CodeCache::find_blob(pc);
   704           // If code blob is NULL, then return NULL to signal handler to report the SEGV error.
   705           if (cb == NULL) return NULL;
   707           // Exception happened in CodeCache. Must be either:
   708           // 1. Inline-cache check in C2I handler blob,
   709           // 2. Inline-cache check in nmethod, or
   710           // 3. Implict null exception in nmethod
   712           if (!cb->is_nmethod()) {
   713             guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
   714                       "exception happened outside interpreter, nmethods and vtable stubs (1)");
   715             // There is no handler here, so we will simply unwind.
   716             return StubRoutines::throw_NullPointerException_at_call_entry();
   717           }
   719           // Otherwise, it's an nmethod.  Consult its exception handlers.
   720           nmethod* nm = (nmethod*)cb;
   721           if (nm->inlinecache_check_contains(pc)) {
   722             // exception happened inside inline-cache check code
   723             // => the nmethod is not yet active (i.e., the frame
   724             // is not set up yet) => use return address pushed by
   725             // caller => don't push another return address
   726             return StubRoutines::throw_NullPointerException_at_call_entry();
   727           }
   729 #ifndef PRODUCT
   730           _implicit_null_throws++;
   731 #endif
   732           target_pc = nm->continuation_for_implicit_exception(pc);
   733           // If there's an unexpected fault, target_pc might be NULL,
   734           // in which case we want to fall through into the normal
   735           // error handling code.
   736         }
   738         break; // fall through
   739       }
   742       case IMPLICIT_DIVIDE_BY_ZERO: {
   743         nmethod* nm = CodeCache::find_nmethod(pc);
   744         guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions");
   745 #ifndef PRODUCT
   746         _implicit_div0_throws++;
   747 #endif
   748         target_pc = nm->continuation_for_implicit_exception(pc);
   749         // If there's an unexpected fault, target_pc might be NULL,
   750         // in which case we want to fall through into the normal
   751         // error handling code.
   752         break; // fall through
   753       }
   755       default: ShouldNotReachHere();
   756     }
   758     assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
   760     // for AbortVMOnException flag
   761     NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
   762     if (exception_kind == IMPLICIT_NULL) {
   763       Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
   764     } else {
   765       Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
   766     }
   767     return target_pc;
   768   }
   770   ShouldNotReachHere();
   771   return NULL;
   772 }
   775 JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
   776 {
   777   THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
   778 }
   779 JNI_END
   782 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
   783   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
   784 }
   787 #ifndef PRODUCT
   788 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
   789   const frame f = thread->last_frame();
   790   assert(f.is_interpreted_frame(), "must be an interpreted frame");
   791 #ifndef PRODUCT
   792   methodHandle mh(THREAD, f.interpreter_frame_method());
   793   BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
   794 #endif // !PRODUCT
   795   return preserve_this_value;
   796 JRT_END
   797 #endif // !PRODUCT
   800 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
   801   os::yield_all(attempts);
   802 JRT_END
   805 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
   806   assert(obj->is_oop(), "must be a valid oop");
   807   assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
   808   instanceKlass::register_finalizer(instanceOop(obj), CHECK);
   809 JRT_END
   812 jlong SharedRuntime::get_java_tid(Thread* thread) {
   813   if (thread != NULL) {
   814     if (thread->is_Java_thread()) {
   815       oop obj = ((JavaThread*)thread)->threadObj();
   816       return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
   817     }
   818   }
   819   return 0;
   820 }
   822 /**
   823  * This function ought to be a void function, but cannot be because
   824  * it gets turned into a tail-call on sparc, which runs into dtrace bug
   825  * 6254741.  Once that is fixed we can remove the dummy return value.
   826  */
   827 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
   828   return dtrace_object_alloc_base(Thread::current(), o);
   829 }
   831 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
   832   assert(DTraceAllocProbes, "wrong call");
   833   Klass* klass = o->blueprint();
   834   int size = o->size();
   835   symbolOop name = klass->name();
   836   HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
   837                    name->bytes(), name->utf8_length(), size * HeapWordSize);
   838   return 0;
   839 }
   841 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
   842     JavaThread* thread, methodOopDesc* method))
   843   assert(DTraceMethodProbes, "wrong call");
   844   symbolOop kname = method->klass_name();
   845   symbolOop name = method->name();
   846   symbolOop sig = method->signature();
   847   HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
   848       kname->bytes(), kname->utf8_length(),
   849       name->bytes(), name->utf8_length(),
   850       sig->bytes(), sig->utf8_length());
   851   return 0;
   852 JRT_END
   854 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
   855     JavaThread* thread, methodOopDesc* method))
   856   assert(DTraceMethodProbes, "wrong call");
   857   symbolOop kname = method->klass_name();
   858   symbolOop name = method->name();
   859   symbolOop sig = method->signature();
   860   HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
   861       kname->bytes(), kname->utf8_length(),
   862       name->bytes(), name->utf8_length(),
   863       sig->bytes(), sig->utf8_length());
   864   return 0;
   865 JRT_END
   868 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
   869 // for a call current in progress, i.e., arguments has been pushed on stack
   870 // put callee has not been invoked yet.  Used by: resolve virtual/static,
   871 // vtable updates, etc.  Caller frame must be compiled.
   872 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
   873   ResourceMark rm(THREAD);
   875   // last java frame on stack (which includes native call frames)
   876   vframeStream vfst(thread, true);  // Do not skip and javaCalls
   878   return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
   879 }
   882 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
   883 // for a call current in progress, i.e., arguments has been pushed on stack
   884 // but callee has not been invoked yet.  Caller frame must be compiled.
   885 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
   886                                               vframeStream& vfst,
   887                                               Bytecodes::Code& bc,
   888                                               CallInfo& callinfo, TRAPS) {
   889   Handle receiver;
   890   Handle nullHandle;  //create a handy null handle for exception returns
   892   assert(!vfst.at_end(), "Java frame must exist");
   894   // Find caller and bci from vframe
   895   methodHandle caller (THREAD, vfst.method());
   896   int          bci    = vfst.bci();
   898   // Find bytecode
   899   Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
   900   bc = bytecode->java_code();
   901   int bytecode_index = bytecode->index();
   903   // Find receiver for non-static call
   904   if (bc != Bytecodes::_invokestatic) {
   905     // This register map must be update since we need to find the receiver for
   906     // compiled frames. The receiver might be in a register.
   907     RegisterMap reg_map2(thread);
   908     frame stubFrame   = thread->last_frame();
   909     // Caller-frame is a compiled frame
   910     frame callerFrame = stubFrame.sender(&reg_map2);
   912     methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
   913     if (callee.is_null()) {
   914       THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
   915     }
   916     // Retrieve from a compiled argument list
   917     receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
   919     if (receiver.is_null()) {
   920       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
   921     }
   922   }
   924   // Resolve method. This is parameterized by bytecode.
   925   constantPoolHandle constants (THREAD, caller->constants());
   926   assert (receiver.is_null() || receiver->is_oop(), "wrong receiver");
   927   LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
   929 #ifdef ASSERT
   930   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
   931   if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
   932     assert(receiver.not_null(), "should have thrown exception");
   933     KlassHandle receiver_klass (THREAD, receiver->klass());
   934     klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
   935                             // klass is already loaded
   936     KlassHandle static_receiver_klass (THREAD, rk);
   937     assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass");
   938     if (receiver_klass->oop_is_instance()) {
   939       if (instanceKlass::cast(receiver_klass())->is_not_initialized()) {
   940         tty->print_cr("ERROR: Klass not yet initialized!!");
   941         receiver_klass.print();
   942       }
   943       assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
   944     }
   945   }
   946 #endif
   948   return receiver;
   949 }
   951 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
   952   ResourceMark rm(THREAD);
   953   // We need first to check if any Java activations (compiled, interpreted)
   954   // exist on the stack since last JavaCall.  If not, we need
   955   // to get the target method from the JavaCall wrapper.
   956   vframeStream vfst(thread, true);  // Do not skip any javaCalls
   957   methodHandle callee_method;
   958   if (vfst.at_end()) {
   959     // No Java frames were found on stack since we did the JavaCall.
   960     // Hence the stack can only contain an entry_frame.  We need to
   961     // find the target method from the stub frame.
   962     RegisterMap reg_map(thread, false);
   963     frame fr = thread->last_frame();
   964     assert(fr.is_runtime_frame(), "must be a runtimeStub");
   965     fr = fr.sender(&reg_map);
   966     assert(fr.is_entry_frame(), "must be");
   967     // fr is now pointing to the entry frame.
   968     callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
   969     assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
   970   } else {
   971     Bytecodes::Code bc;
   972     CallInfo callinfo;
   973     find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
   974     callee_method = callinfo.selected_method();
   975   }
   976   assert(callee_method()->is_method(), "must be");
   977   return callee_method;
   978 }
   980 // Resolves a call.
   981 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
   982                                            bool is_virtual,
   983                                            bool is_optimized, TRAPS) {
   984   methodHandle callee_method;
   985   callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
   986   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
   987     int retry_count = 0;
   988     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
   989            callee_method->method_holder() != SystemDictionary::Object_klass()) {
   990       // If has a pending exception then there is no need to re-try to
   991       // resolve this method.
   992       // If the method has been redefined, we need to try again.
   993       // Hack: we have no way to update the vtables of arrays, so don't
   994       // require that java.lang.Object has been updated.
   996       // It is very unlikely that method is redefined more than 100 times
   997       // in the middle of resolve. If it is looping here more than 100 times
   998       // means then there could be a bug here.
   999       guarantee((retry_count++ < 100),
  1000                 "Could not resolve to latest version of redefined method");
  1001       // method is redefined in the middle of resolve so re-try.
  1002       callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
  1005   return callee_method;
  1008 // Resolves a call.  The compilers generate code for calls that go here
  1009 // and are patched with the real destination of the call.
  1010 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
  1011                                            bool is_virtual,
  1012                                            bool is_optimized, TRAPS) {
  1014   ResourceMark rm(thread);
  1015   RegisterMap cbl_map(thread, false);
  1016   frame caller_frame = thread->last_frame().sender(&cbl_map);
  1018   CodeBlob* caller_cb = caller_frame.cb();
  1019   guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
  1020   nmethod* caller_nm = caller_cb->as_nmethod_or_null();
  1021   // make sure caller is not getting deoptimized
  1022   // and removed before we are done with it.
  1023   // CLEANUP - with lazy deopt shouldn't need this lock
  1024   nmethodLocker caller_lock(caller_nm);
  1027   // determine call info & receiver
  1028   // note: a) receiver is NULL for static calls
  1029   //       b) an exception is thrown if receiver is NULL for non-static calls
  1030   CallInfo call_info;
  1031   Bytecodes::Code invoke_code = Bytecodes::_illegal;
  1032   Handle receiver = find_callee_info(thread, invoke_code,
  1033                                      call_info, CHECK_(methodHandle()));
  1034   methodHandle callee_method = call_info.selected_method();
  1036   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
  1037          ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
  1039 #ifndef PRODUCT
  1040   // tracing/debugging/statistics
  1041   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
  1042                 (is_virtual) ? (&_resolve_virtual_ctr) :
  1043                                (&_resolve_static_ctr);
  1044   Atomic::inc(addr);
  1046   if (TraceCallFixup) {
  1047     ResourceMark rm(thread);
  1048     tty->print("resolving %s%s (%s) call to",
  1049       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
  1050       Bytecodes::name(invoke_code));
  1051     callee_method->print_short_name(tty);
  1052     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1054 #endif
  1056   // JSR 292
  1057   // If the resolved method is a MethodHandle invoke target the call
  1058   // site must be a MethodHandle call site.
  1059   if (callee_method->is_method_handle_invoke()) {
  1060     assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
  1063   // Compute entry points. This might require generation of C2I converter
  1064   // frames, so we cannot be holding any locks here. Furthermore, the
  1065   // computation of the entry points is independent of patching the call.  We
  1066   // always return the entry-point, but we only patch the stub if the call has
  1067   // not been deoptimized.  Return values: For a virtual call this is an
  1068   // (cached_oop, destination address) pair. For a static call/optimized
  1069   // virtual this is just a destination address.
  1071   StaticCallInfo static_call_info;
  1072   CompiledICInfo virtual_call_info;
  1074   // Make sure the callee nmethod does not get deoptimized and removed before
  1075   // we are done patching the code.
  1076   nmethod* callee_nm = callee_method->code();
  1077   nmethodLocker nl_callee(callee_nm);
  1078 #ifdef ASSERT
  1079   address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
  1080 #endif
  1082   if (is_virtual) {
  1083     assert(receiver.not_null(), "sanity check");
  1084     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
  1085     KlassHandle h_klass(THREAD, receiver->klass());
  1086     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
  1087                      is_optimized, static_bound, virtual_call_info,
  1088                      CHECK_(methodHandle()));
  1089   } else {
  1090     // static call
  1091     CompiledStaticCall::compute_entry(callee_method, static_call_info);
  1094   // grab lock, check for deoptimization and potentially patch caller
  1096     MutexLocker ml_patch(CompiledIC_lock);
  1098     // Now that we are ready to patch if the methodOop was redefined then
  1099     // don't update call site and let the caller retry.
  1101     if (!callee_method->is_old()) {
  1102 #ifdef ASSERT
  1103       // We must not try to patch to jump to an already unloaded method.
  1104       if (dest_entry_point != 0) {
  1105         assert(CodeCache::find_blob(dest_entry_point) != NULL,
  1106                "should not unload nmethod while locked");
  1108 #endif
  1109       if (is_virtual) {
  1110         CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
  1111         if (inline_cache->is_clean()) {
  1112           inline_cache->set_to_monomorphic(virtual_call_info);
  1114       } else {
  1115         CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
  1116         if (ssc->is_clean()) ssc->set(static_call_info);
  1120   } // unlock CompiledIC_lock
  1122   return callee_method;
  1126 // Inline caches exist only in compiled code
  1127 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
  1128 #ifdef ASSERT
  1129   RegisterMap reg_map(thread, false);
  1130   frame stub_frame = thread->last_frame();
  1131   assert(stub_frame.is_runtime_frame(), "sanity check");
  1132   frame caller_frame = stub_frame.sender(&reg_map);
  1133   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
  1134 #endif /* ASSERT */
  1136   methodHandle callee_method;
  1137   JRT_BLOCK
  1138     callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
  1139     // Return methodOop through TLS
  1140     thread->set_vm_result(callee_method());
  1141   JRT_BLOCK_END
  1142   // return compiled code entry point after potential safepoints
  1143   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1144   return callee_method->verified_code_entry();
  1145 JRT_END
  1148 // Handle call site that has been made non-entrant
  1149 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
  1150   // 6243940 We might end up in here if the callee is deoptimized
  1151   // as we race to call it.  We don't want to take a safepoint if
  1152   // the caller was interpreted because the caller frame will look
  1153   // interpreted to the stack walkers and arguments are now
  1154   // "compiled" so it is much better to make this transition
  1155   // invisible to the stack walking code. The i2c path will
  1156   // place the callee method in the callee_target. It is stashed
  1157   // there because if we try and find the callee by normal means a
  1158   // safepoint is possible and have trouble gc'ing the compiled args.
  1159   RegisterMap reg_map(thread, false);
  1160   frame stub_frame = thread->last_frame();
  1161   assert(stub_frame.is_runtime_frame(), "sanity check");
  1162   frame caller_frame = stub_frame.sender(&reg_map);
  1164   // MethodHandle invokes don't have a CompiledIC and should always
  1165   // simply redispatch to the callee_target.
  1166   address   sender_pc = caller_frame.pc();
  1167   CodeBlob* sender_cb = caller_frame.cb();
  1168   nmethod*  sender_nm = sender_cb->as_nmethod_or_null();
  1169   bool is_mh_invoke_via_adapter = false;  // Direct c2c call or via adapter?
  1170   if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
  1171     // If the callee_target is set, then we have come here via an i2c
  1172     // adapter.
  1173     methodOop callee = thread->callee_target();
  1174     if (callee != NULL) {
  1175       assert(callee->is_method(), "sanity");
  1176       is_mh_invoke_via_adapter = true;
  1180   if (caller_frame.is_interpreted_frame() ||
  1181       caller_frame.is_entry_frame()       ||
  1182       is_mh_invoke_via_adapter) {
  1183     methodOop callee = thread->callee_target();
  1184     guarantee(callee != NULL && callee->is_method(), "bad handshake");
  1185     thread->set_vm_result(callee);
  1186     thread->set_callee_target(NULL);
  1187     return callee->get_c2i_entry();
  1190   // Must be compiled to compiled path which is safe to stackwalk
  1191   methodHandle callee_method;
  1192   JRT_BLOCK
  1193     // Force resolving of caller (if we called from compiled frame)
  1194     callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
  1195     thread->set_vm_result(callee_method());
  1196   JRT_BLOCK_END
  1197   // return compiled code entry point after potential safepoints
  1198   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1199   return callee_method->verified_code_entry();
  1200 JRT_END
  1203 // resolve a static call and patch code
  1204 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
  1205   methodHandle callee_method;
  1206   JRT_BLOCK
  1207     callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
  1208     thread->set_vm_result(callee_method());
  1209   JRT_BLOCK_END
  1210   // return compiled code entry point after potential safepoints
  1211   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1212   return callee_method->verified_code_entry();
  1213 JRT_END
  1216 // resolve virtual call and update inline cache to monomorphic
  1217 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
  1218   methodHandle callee_method;
  1219   JRT_BLOCK
  1220     callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
  1221     thread->set_vm_result(callee_method());
  1222   JRT_BLOCK_END
  1223   // return compiled code entry point after potential safepoints
  1224   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1225   return callee_method->verified_code_entry();
  1226 JRT_END
  1229 // Resolve a virtual call that can be statically bound (e.g., always
  1230 // monomorphic, so it has no inline cache).  Patch code to resolved target.
  1231 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
  1232   methodHandle callee_method;
  1233   JRT_BLOCK
  1234     callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
  1235     thread->set_vm_result(callee_method());
  1236   JRT_BLOCK_END
  1237   // return compiled code entry point after potential safepoints
  1238   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1239   return callee_method->verified_code_entry();
  1240 JRT_END
  1246 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
  1247   ResourceMark rm(thread);
  1248   CallInfo call_info;
  1249   Bytecodes::Code bc;
  1251   // receiver is NULL for static calls. An exception is thrown for NULL
  1252   // receivers for non-static calls
  1253   Handle receiver = find_callee_info(thread, bc, call_info,
  1254                                      CHECK_(methodHandle()));
  1255   // Compiler1 can produce virtual call sites that can actually be statically bound
  1256   // If we fell thru to below we would think that the site was going megamorphic
  1257   // when in fact the site can never miss. Worse because we'd think it was megamorphic
  1258   // we'd try and do a vtable dispatch however methods that can be statically bound
  1259   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
  1260   // reresolution of the  call site (as if we did a handle_wrong_method and not an
  1261   // plain ic_miss) and the site will be converted to an optimized virtual call site
  1262   // never to miss again. I don't believe C2 will produce code like this but if it
  1263   // did this would still be the correct thing to do for it too, hence no ifdef.
  1264   //
  1265   if (call_info.resolved_method()->can_be_statically_bound()) {
  1266     methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
  1267     if (TraceCallFixup) {
  1268       RegisterMap reg_map(thread, false);
  1269       frame caller_frame = thread->last_frame().sender(&reg_map);
  1270       ResourceMark rm(thread);
  1271       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
  1272       callee_method->print_short_name(tty);
  1273       tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc());
  1274       tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1276     return callee_method;
  1279   methodHandle callee_method = call_info.selected_method();
  1281   bool should_be_mono = false;
  1283 #ifndef PRODUCT
  1284   Atomic::inc(&_ic_miss_ctr);
  1286   // Statistics & Tracing
  1287   if (TraceCallFixup) {
  1288     ResourceMark rm(thread);
  1289     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
  1290     callee_method->print_short_name(tty);
  1291     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1294   if (ICMissHistogram) {
  1295     MutexLocker m(VMStatistic_lock);
  1296     RegisterMap reg_map(thread, false);
  1297     frame f = thread->last_frame().real_sender(&reg_map);// skip runtime stub
  1298     // produce statistics under the lock
  1299     trace_ic_miss(f.pc());
  1301 #endif
  1303   // install an event collector so that when a vtable stub is created the
  1304   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
  1305   // event can't be posted when the stub is created as locks are held
  1306   // - instead the event will be deferred until the event collector goes
  1307   // out of scope.
  1308   JvmtiDynamicCodeEventCollector event_collector;
  1310   // Update inline cache to megamorphic. Skip update if caller has been
  1311   // made non-entrant or we are called from interpreted.
  1312   { MutexLocker ml_patch (CompiledIC_lock);
  1313     RegisterMap reg_map(thread, false);
  1314     frame caller_frame = thread->last_frame().sender(&reg_map);
  1315     CodeBlob* cb = caller_frame.cb();
  1316     if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
  1317       // Not a non-entrant nmethod, so find inline_cache
  1318       CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
  1319       bool should_be_mono = false;
  1320       if (inline_cache->is_optimized()) {
  1321         if (TraceCallFixup) {
  1322           ResourceMark rm(thread);
  1323           tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
  1324           callee_method->print_short_name(tty);
  1325           tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1327         should_be_mono = true;
  1328       } else {
  1329         compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop();
  1330         if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) {
  1332           if (receiver()->klass() == ic_oop->holder_klass()) {
  1333             // This isn't a real miss. We must have seen that compiled code
  1334             // is now available and we want the call site converted to a
  1335             // monomorphic compiled call site.
  1336             // We can't assert for callee_method->code() != NULL because it
  1337             // could have been deoptimized in the meantime
  1338             if (TraceCallFixup) {
  1339               ResourceMark rm(thread);
  1340               tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
  1341               callee_method->print_short_name(tty);
  1342               tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1344             should_be_mono = true;
  1349       if (should_be_mono) {
  1351         // We have a path that was monomorphic but was going interpreted
  1352         // and now we have (or had) a compiled entry. We correct the IC
  1353         // by using a new icBuffer.
  1354         CompiledICInfo info;
  1355         KlassHandle receiver_klass(THREAD, receiver()->klass());
  1356         inline_cache->compute_monomorphic_entry(callee_method,
  1357                                                 receiver_klass,
  1358                                                 inline_cache->is_optimized(),
  1359                                                 false,
  1360                                                 info, CHECK_(methodHandle()));
  1361         inline_cache->set_to_monomorphic(info);
  1362       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
  1363         // Change to megamorphic
  1364         inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
  1365       } else {
  1366         // Either clean or megamorphic
  1369   } // Release CompiledIC_lock
  1371   return callee_method;
  1374 //
  1375 // Resets a call-site in compiled code so it will get resolved again.
  1376 // This routines handles both virtual call sites, optimized virtual call
  1377 // sites, and static call sites. Typically used to change a call sites
  1378 // destination from compiled to interpreted.
  1379 //
  1380 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
  1381   ResourceMark rm(thread);
  1382   RegisterMap reg_map(thread, false);
  1383   frame stub_frame = thread->last_frame();
  1384   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
  1385   frame caller = stub_frame.sender(&reg_map);
  1387   // Do nothing if the frame isn't a live compiled frame.
  1388   // nmethod could be deoptimized by the time we get here
  1389   // so no update to the caller is needed.
  1391   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
  1393     address pc = caller.pc();
  1394     Events::log("update call-site at pc " INTPTR_FORMAT, pc);
  1396     // Default call_addr is the location of the "basic" call.
  1397     // Determine the address of the call we a reresolving. With
  1398     // Inline Caches we will always find a recognizable call.
  1399     // With Inline Caches disabled we may or may not find a
  1400     // recognizable call. We will always find a call for static
  1401     // calls and for optimized virtual calls. For vanilla virtual
  1402     // calls it depends on the state of the UseInlineCaches switch.
  1403     //
  1404     // With Inline Caches disabled we can get here for a virtual call
  1405     // for two reasons:
  1406     //   1 - calling an abstract method. The vtable for abstract methods
  1407     //       will run us thru handle_wrong_method and we will eventually
  1408     //       end up in the interpreter to throw the ame.
  1409     //   2 - a racing deoptimization. We could be doing a vanilla vtable
  1410     //       call and between the time we fetch the entry address and
  1411     //       we jump to it the target gets deoptimized. Similar to 1
  1412     //       we will wind up in the interprter (thru a c2i with c2).
  1413     //
  1414     address call_addr = NULL;
  1416       // Get call instruction under lock because another thread may be
  1417       // busy patching it.
  1418       MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
  1419       // Location of call instruction
  1420       if (NativeCall::is_call_before(pc)) {
  1421         NativeCall *ncall = nativeCall_before(pc);
  1422         call_addr = ncall->instruction_address();
  1426     // Check for static or virtual call
  1427     bool is_static_call = false;
  1428     nmethod* caller_nm = CodeCache::find_nmethod(pc);
  1429     // Make sure nmethod doesn't get deoptimized and removed until
  1430     // this is done with it.
  1431     // CLEANUP - with lazy deopt shouldn't need this lock
  1432     nmethodLocker nmlock(caller_nm);
  1434     if (call_addr != NULL) {
  1435       RelocIterator iter(caller_nm, call_addr, call_addr+1);
  1436       int ret = iter.next(); // Get item
  1437       if (ret) {
  1438         assert(iter.addr() == call_addr, "must find call");
  1439         if (iter.type() == relocInfo::static_call_type) {
  1440           is_static_call = true;
  1441         } else {
  1442           assert(iter.type() == relocInfo::virtual_call_type ||
  1443                  iter.type() == relocInfo::opt_virtual_call_type
  1444                 , "unexpected relocInfo. type");
  1446       } else {
  1447         assert(!UseInlineCaches, "relocation info. must exist for this address");
  1450       // Cleaning the inline cache will force a new resolve. This is more robust
  1451       // than directly setting it to the new destination, since resolving of calls
  1452       // is always done through the same code path. (experience shows that it
  1453       // leads to very hard to track down bugs, if an inline cache gets updated
  1454       // to a wrong method). It should not be performance critical, since the
  1455       // resolve is only done once.
  1457       MutexLocker ml(CompiledIC_lock);
  1458       //
  1459       // We do not patch the call site if the nmethod has been made non-entrant
  1460       // as it is a waste of time
  1461       //
  1462       if (caller_nm->is_in_use()) {
  1463         if (is_static_call) {
  1464           CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
  1465           ssc->set_to_clean();
  1466         } else {
  1467           // compiled, dispatched call (which used to call an interpreted method)
  1468           CompiledIC* inline_cache = CompiledIC_at(call_addr);
  1469           inline_cache->set_to_clean();
  1476   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
  1479 #ifndef PRODUCT
  1480   Atomic::inc(&_wrong_method_ctr);
  1482   if (TraceCallFixup) {
  1483     ResourceMark rm(thread);
  1484     tty->print("handle_wrong_method reresolving call to");
  1485     callee_method->print_short_name(tty);
  1486     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1488 #endif
  1490   return callee_method;
  1493 // ---------------------------------------------------------------------------
  1494 // We are calling the interpreter via a c2i. Normally this would mean that
  1495 // we were called by a compiled method. However we could have lost a race
  1496 // where we went int -> i2c -> c2i and so the caller could in fact be
  1497 // interpreted. If the caller is compiled we attempt to patch the caller
  1498 // so he no longer calls into the interpreter.
  1499 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
  1500   methodOop moop(method);
  1502   address entry_point = moop->from_compiled_entry();
  1504   // It's possible that deoptimization can occur at a call site which hasn't
  1505   // been resolved yet, in which case this function will be called from
  1506   // an nmethod that has been patched for deopt and we can ignore the
  1507   // request for a fixup.
  1508   // Also it is possible that we lost a race in that from_compiled_entry
  1509   // is now back to the i2c in that case we don't need to patch and if
  1510   // we did we'd leap into space because the callsite needs to use
  1511   // "to interpreter" stub in order to load up the methodOop. Don't
  1512   // ask me how I know this...
  1514   CodeBlob* cb = CodeCache::find_blob(caller_pc);
  1515   if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
  1516     return;
  1519   // The check above makes sure this is a nmethod.
  1520   nmethod* nm = cb->as_nmethod_or_null();
  1521   assert(nm, "must be");
  1523   // Don't fixup MethodHandle call sites as c2i/i2c adapters are used
  1524   // to implement MethodHandle actions.
  1525   if (nm->is_method_handle_return(caller_pc)) {
  1526     return;
  1529   // There is a benign race here. We could be attempting to patch to a compiled
  1530   // entry point at the same time the callee is being deoptimized. If that is
  1531   // the case then entry_point may in fact point to a c2i and we'd patch the
  1532   // call site with the same old data. clear_code will set code() to NULL
  1533   // at the end of it. If we happen to see that NULL then we can skip trying
  1534   // to patch. If we hit the window where the callee has a c2i in the
  1535   // from_compiled_entry and the NULL isn't present yet then we lose the race
  1536   // and patch the code with the same old data. Asi es la vida.
  1538   if (moop->code() == NULL) return;
  1540   if (nm->is_in_use()) {
  1542     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
  1543     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
  1544     if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
  1545       NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
  1546       //
  1547       // bug 6281185. We might get here after resolving a call site to a vanilla
  1548       // virtual call. Because the resolvee uses the verified entry it may then
  1549       // see compiled code and attempt to patch the site by calling us. This would
  1550       // then incorrectly convert the call site to optimized and its downhill from
  1551       // there. If you're lucky you'll get the assert in the bugid, if not you've
  1552       // just made a call site that could be megamorphic into a monomorphic site
  1553       // for the rest of its life! Just another racing bug in the life of
  1554       // fixup_callers_callsite ...
  1555       //
  1556       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
  1557       iter.next();
  1558       assert(iter.has_current(), "must have a reloc at java call site");
  1559       relocInfo::relocType typ = iter.reloc()->type();
  1560       if ( typ != relocInfo::static_call_type &&
  1561            typ != relocInfo::opt_virtual_call_type &&
  1562            typ != relocInfo::static_stub_type) {
  1563         return;
  1565       address destination = call->destination();
  1566       if (destination != entry_point) {
  1567         CodeBlob* callee = CodeCache::find_blob(destination);
  1568         // callee == cb seems weird. It means calling interpreter thru stub.
  1569         if (callee == cb || callee->is_adapter_blob()) {
  1570           // static call or optimized virtual
  1571           if (TraceCallFixup) {
  1572             tty->print("fixup callsite           at " INTPTR_FORMAT " to compiled code for", caller_pc);
  1573             moop->print_short_name(tty);
  1574             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
  1576           call->set_destination_mt_safe(entry_point);
  1577         } else {
  1578           if (TraceCallFixup) {
  1579             tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
  1580             moop->print_short_name(tty);
  1581             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
  1583           // assert is too strong could also be resolve destinations.
  1584           // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
  1586       } else {
  1587           if (TraceCallFixup) {
  1588             tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
  1589             moop->print_short_name(tty);
  1590             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
  1596 IRT_END
  1599 // same as JVM_Arraycopy, but called directly from compiled code
  1600 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
  1601                                                 oopDesc* dest, jint dest_pos,
  1602                                                 jint length,
  1603                                                 JavaThread* thread)) {
  1604 #ifndef PRODUCT
  1605   _slow_array_copy_ctr++;
  1606 #endif
  1607   // Check if we have null pointers
  1608   if (src == NULL || dest == NULL) {
  1609     THROW(vmSymbols::java_lang_NullPointerException());
  1611   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
  1612   // even though the copy_array API also performs dynamic checks to ensure
  1613   // that src and dest are truly arrays (and are conformable).
  1614   // The copy_array mechanism is awkward and could be removed, but
  1615   // the compilers don't call this function except as a last resort,
  1616   // so it probably doesn't matter.
  1617   Klass::cast(src->klass())->copy_array((arrayOopDesc*)src,  src_pos,
  1618                                         (arrayOopDesc*)dest, dest_pos,
  1619                                         length, thread);
  1621 JRT_END
  1623 char* SharedRuntime::generate_class_cast_message(
  1624     JavaThread* thread, const char* objName) {
  1626   // Get target class name from the checkcast instruction
  1627   vframeStream vfst(thread, true);
  1628   assert(!vfst.at_end(), "Java frame must exist");
  1629   Bytecode_checkcast* cc = Bytecode_checkcast_at(
  1630     vfst.method()->bcp_from(vfst.bci()));
  1631   Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
  1632     cc->index(), thread));
  1633   return generate_class_cast_message(objName, targetKlass->external_name());
  1636 char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread,
  1637                                                         oopDesc* required,
  1638                                                         oopDesc* actual) {
  1639   if (TraceMethodHandles) {
  1640     tty->print_cr("WrongMethodType thread="PTR_FORMAT" req="PTR_FORMAT" act="PTR_FORMAT"",
  1641                   thread, required, actual);
  1643   assert(EnableMethodHandles, "");
  1644   oop singleKlass = wrong_method_type_is_for_single_argument(thread, required);
  1645   char* message = NULL;
  1646   if (singleKlass != NULL) {
  1647     const char* objName = "argument or return value";
  1648     if (actual != NULL) {
  1649       // be flexible about the junk passed in:
  1650       klassOop ak = (actual->is_klass()
  1651                      ? (klassOop)actual
  1652                      : actual->klass());
  1653       objName = Klass::cast(ak)->external_name();
  1655     Klass* targetKlass = Klass::cast(required->is_klass()
  1656                                      ? (klassOop)required
  1657                                      : java_lang_Class::as_klassOop(required));
  1658     message = generate_class_cast_message(objName, targetKlass->external_name());
  1659   } else {
  1660     // %%% need to get the MethodType string, without messing around too much
  1661     // Get a signature from the invoke instruction
  1662     const char* mhName = "method handle";
  1663     const char* targetType = "the required signature";
  1664     vframeStream vfst(thread, true);
  1665     if (!vfst.at_end()) {
  1666       Bytecode_invoke* call = Bytecode_invoke_at(vfst.method(), vfst.bci());
  1667       methodHandle target;
  1669         EXCEPTION_MARK;
  1670         target = call->static_target(THREAD);
  1671         if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; }
  1673       if (target.not_null()
  1674           && target->is_method_handle_invoke()
  1675           && required == target->method_handle_type()) {
  1676         targetType = target->signature()->as_C_string();
  1679     klassOop kignore; int fignore;
  1680     methodOop actual_method = MethodHandles::decode_method(actual,
  1681                                                           kignore, fignore);
  1682     if (actual_method != NULL) {
  1683       if (methodOopDesc::is_method_handle_invoke_name(actual_method->name()))
  1684         mhName = "$";
  1685       else
  1686         mhName = actual_method->signature()->as_C_string();
  1687       if (mhName[0] == '$')
  1688         mhName = actual_method->signature()->as_C_string();
  1690     message = generate_class_cast_message(mhName, targetType,
  1691                                           " cannot be called as ");
  1693   if (TraceMethodHandles) {
  1694     tty->print_cr("WrongMethodType => message=%s", message);
  1696   return message;
  1699 oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr,
  1700                                                             oopDesc* required) {
  1701   if (required == NULL)  return NULL;
  1702   if (required->klass() == SystemDictionary::Class_klass())
  1703     return required;
  1704   if (required->is_klass())
  1705     return Klass::cast(klassOop(required))->java_mirror();
  1706   return NULL;
  1710 char* SharedRuntime::generate_class_cast_message(
  1711     const char* objName, const char* targetKlassName, const char* desc) {
  1712   size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1;
  1714   char* message = NEW_RESOURCE_ARRAY(char, msglen);
  1715   if (NULL == message) {
  1716     // Shouldn't happen, but don't cause even more problems if it does
  1717     message = const_cast<char*>(objName);
  1718   } else {
  1719     jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
  1721   return message;
  1724 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
  1725   (void) JavaThread::current()->reguard_stack();
  1726 JRT_END
  1729 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
  1730 #ifndef PRODUCT
  1731 int SharedRuntime::_monitor_enter_ctr=0;
  1732 #endif
  1733 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
  1734   oop obj(_obj);
  1735 #ifndef PRODUCT
  1736   _monitor_enter_ctr++;             // monitor enter slow
  1737 #endif
  1738   if (PrintBiasedLockingStatistics) {
  1739     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
  1741   Handle h_obj(THREAD, obj);
  1742   if (UseBiasedLocking) {
  1743     // Retry fast entry if bias is revoked to avoid unnecessary inflation
  1744     ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
  1745   } else {
  1746     ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
  1748   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
  1749 JRT_END
  1751 #ifndef PRODUCT
  1752 int SharedRuntime::_monitor_exit_ctr=0;
  1753 #endif
  1754 // Handles the uncommon cases of monitor unlocking in compiled code
  1755 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
  1756    oop obj(_obj);
  1757 #ifndef PRODUCT
  1758   _monitor_exit_ctr++;              // monitor exit slow
  1759 #endif
  1760   Thread* THREAD = JavaThread::current();
  1761   // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
  1762   // testing was unable to ever fire the assert that guarded it so I have removed it.
  1763   assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
  1764 #undef MIGHT_HAVE_PENDING
  1765 #ifdef MIGHT_HAVE_PENDING
  1766   // Save and restore any pending_exception around the exception mark.
  1767   // While the slow_exit must not throw an exception, we could come into
  1768   // this routine with one set.
  1769   oop pending_excep = NULL;
  1770   const char* pending_file;
  1771   int pending_line;
  1772   if (HAS_PENDING_EXCEPTION) {
  1773     pending_excep = PENDING_EXCEPTION;
  1774     pending_file  = THREAD->exception_file();
  1775     pending_line  = THREAD->exception_line();
  1776     CLEAR_PENDING_EXCEPTION;
  1778 #endif /* MIGHT_HAVE_PENDING */
  1781     // Exit must be non-blocking, and therefore no exceptions can be thrown.
  1782     EXCEPTION_MARK;
  1783     ObjectSynchronizer::slow_exit(obj, lock, THREAD);
  1786 #ifdef MIGHT_HAVE_PENDING
  1787   if (pending_excep != NULL) {
  1788     THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
  1790 #endif /* MIGHT_HAVE_PENDING */
  1791 JRT_END
  1793 #ifndef PRODUCT
  1795 void SharedRuntime::print_statistics() {
  1796   ttyLocker ttyl;
  1797   if (xtty != NULL)  xtty->head("statistics type='SharedRuntime'");
  1799   if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow",  _monitor_enter_ctr);
  1800   if (_monitor_exit_ctr  ) tty->print_cr("%5d monitor exit slow",   _monitor_exit_ctr);
  1801   if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
  1803   SharedRuntime::print_ic_miss_histogram();
  1805   if (CountRemovableExceptions) {
  1806     if (_nof_removable_exceptions > 0) {
  1807       Unimplemented(); // this counter is not yet incremented
  1808       tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
  1812   // Dump the JRT_ENTRY counters
  1813   if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
  1814   if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
  1815   if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
  1816   if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
  1817   if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
  1818   if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
  1819   if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
  1821   tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
  1822   tty->print_cr("%5d wrong method", _wrong_method_ctr );
  1823   tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
  1824   tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
  1825   tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
  1827   if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
  1828   if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
  1829   if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
  1830   if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
  1831   if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
  1832   if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
  1833   if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
  1834   if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
  1835   if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
  1836   if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
  1837   if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
  1838   if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
  1839   if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
  1840   if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
  1841   if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
  1842   if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
  1844   AdapterHandlerLibrary::print_statistics();
  1846   if (xtty != NULL)  xtty->tail("statistics");
  1849 inline double percent(int x, int y) {
  1850   return 100.0 * x / MAX2(y, 1);
  1853 class MethodArityHistogram {
  1854  public:
  1855   enum { MAX_ARITY = 256 };
  1856  private:
  1857   static int _arity_histogram[MAX_ARITY];     // histogram of #args
  1858   static int _size_histogram[MAX_ARITY];      // histogram of arg size in words
  1859   static int _max_arity;                      // max. arity seen
  1860   static int _max_size;                       // max. arg size seen
  1862   static void add_method_to_histogram(nmethod* nm) {
  1863     methodOop m = nm->method();
  1864     ArgumentCount args(m->signature());
  1865     int arity   = args.size() + (m->is_static() ? 0 : 1);
  1866     int argsize = m->size_of_parameters();
  1867     arity   = MIN2(arity, MAX_ARITY-1);
  1868     argsize = MIN2(argsize, MAX_ARITY-1);
  1869     int count = nm->method()->compiled_invocation_count();
  1870     _arity_histogram[arity]  += count;
  1871     _size_histogram[argsize] += count;
  1872     _max_arity = MAX2(_max_arity, arity);
  1873     _max_size  = MAX2(_max_size, argsize);
  1876   void print_histogram_helper(int n, int* histo, const char* name) {
  1877     const int N = MIN2(5, n);
  1878     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
  1879     double sum = 0;
  1880     double weighted_sum = 0;
  1881     int i;
  1882     for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
  1883     double rest = sum;
  1884     double percent = sum / 100;
  1885     for (i = 0; i <= N; i++) {
  1886       rest -= histo[i];
  1887       tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
  1889     tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
  1890     tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
  1893   void print_histogram() {
  1894     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
  1895     print_histogram_helper(_max_arity, _arity_histogram, "arity");
  1896     tty->print_cr("\nSame for parameter size (in words):");
  1897     print_histogram_helper(_max_size, _size_histogram, "size");
  1898     tty->cr();
  1901  public:
  1902   MethodArityHistogram() {
  1903     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  1904     _max_arity = _max_size = 0;
  1905     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
  1906     CodeCache::nmethods_do(add_method_to_histogram);
  1907     print_histogram();
  1909 };
  1911 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
  1912 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
  1913 int MethodArityHistogram::_max_arity;
  1914 int MethodArityHistogram::_max_size;
  1916 void SharedRuntime::print_call_statistics(int comp_total) {
  1917   tty->print_cr("Calls from compiled code:");
  1918   int total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
  1919   int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
  1920   int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
  1921   tty->print_cr("\t%9d   (%4.1f%%) total non-inlined   ", total, percent(total, total));
  1922   tty->print_cr("\t%9d   (%4.1f%%) virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
  1923   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
  1924   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
  1925   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_c, percent(mono_c, _nof_normal_calls));
  1926   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
  1927   tty->print_cr("\t%9d   (%4.1f%%) interface calls     ", _nof_interface_calls, percent(_nof_interface_calls, total));
  1928   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
  1929   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
  1930   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_i, percent(mono_i, _nof_interface_calls));
  1931   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
  1932   tty->print_cr("\t%9d   (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
  1933   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
  1934   tty->cr();
  1935   tty->print_cr("Note 1: counter updates are not MT-safe.");
  1936   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
  1937   tty->print_cr("        %% in nested categories are relative to their category");
  1938   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
  1939   tty->cr();
  1941   MethodArityHistogram h;
  1943 #endif
  1946 // A simple wrapper class around the calling convention information
  1947 // that allows sharing of adapters for the same calling convention.
  1948 class AdapterFingerPrint : public CHeapObj {
  1949  private:
  1950   union {
  1951     int  _compact[3];
  1952     int* _fingerprint;
  1953   } _value;
  1954   int _length; // A negative length indicates the fingerprint is in the compact form,
  1955                // Otherwise _value._fingerprint is the array.
  1957   // Remap BasicTypes that are handled equivalently by the adapters.
  1958   // These are correct for the current system but someday it might be
  1959   // necessary to make this mapping platform dependent.
  1960   static BasicType adapter_encoding(BasicType in) {
  1961     assert((~0xf & in) == 0, "must fit in 4 bits");
  1962     switch(in) {
  1963       case T_BOOLEAN:
  1964       case T_BYTE:
  1965       case T_SHORT:
  1966       case T_CHAR:
  1967         // There are all promoted to T_INT in the calling convention
  1968         return T_INT;
  1970       case T_OBJECT:
  1971       case T_ARRAY:
  1972 #ifdef _LP64
  1973         return T_LONG;
  1974 #else
  1975         return T_INT;
  1976 #endif
  1978       case T_INT:
  1979       case T_LONG:
  1980       case T_FLOAT:
  1981       case T_DOUBLE:
  1982       case T_VOID:
  1983         return in;
  1985       default:
  1986         ShouldNotReachHere();
  1987         return T_CONFLICT;
  1991  public:
  1992   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
  1993     // The fingerprint is based on the BasicType signature encoded
  1994     // into an array of ints with four entries per int.
  1995     int* ptr;
  1996     int len = (total_args_passed + 3) >> 2;
  1997     if (len <= (int)(sizeof(_value._compact) / sizeof(int))) {
  1998       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
  1999       // Storing the signature encoded as signed chars hits about 98%
  2000       // of the time.
  2001       _length = -len;
  2002       ptr = _value._compact;
  2003     } else {
  2004       _length = len;
  2005       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length);
  2006       ptr = _value._fingerprint;
  2009     // Now pack the BasicTypes with 4 per int
  2010     int sig_index = 0;
  2011     for (int index = 0; index < len; index++) {
  2012       int value = 0;
  2013       for (int byte = 0; byte < 4; byte++) {
  2014         if (sig_index < total_args_passed) {
  2015           value = (value << 4) | adapter_encoding(sig_bt[sig_index++]);
  2018       ptr[index] = value;
  2022   ~AdapterFingerPrint() {
  2023     if (_length > 0) {
  2024       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
  2028   int value(int index) {
  2029     if (_length < 0) {
  2030       return _value._compact[index];
  2032     return _value._fingerprint[index];
  2034   int length() {
  2035     if (_length < 0) return -_length;
  2036     return _length;
  2039   bool is_compact() {
  2040     return _length <= 0;
  2043   unsigned int compute_hash() {
  2044     int hash = 0;
  2045     for (int i = 0; i < length(); i++) {
  2046       int v = value(i);
  2047       hash = (hash << 8) ^ v ^ (hash >> 5);
  2049     return (unsigned int)hash;
  2052   const char* as_string() {
  2053     stringStream st;
  2054     for (int i = 0; i < length(); i++) {
  2055       st.print(PTR_FORMAT, value(i));
  2057     return st.as_string();
  2060   bool equals(AdapterFingerPrint* other) {
  2061     if (other->_length != _length) {
  2062       return false;
  2064     if (_length < 0) {
  2065       return _value._compact[0] == other->_value._compact[0] &&
  2066              _value._compact[1] == other->_value._compact[1] &&
  2067              _value._compact[2] == other->_value._compact[2];
  2068     } else {
  2069       for (int i = 0; i < _length; i++) {
  2070         if (_value._fingerprint[i] != other->_value._fingerprint[i]) {
  2071           return false;
  2075     return true;
  2077 };
  2080 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
  2081 class AdapterHandlerTable : public BasicHashtable {
  2082   friend class AdapterHandlerTableIterator;
  2084  private:
  2086 #ifndef PRODUCT
  2087   static int _lookups; // number of calls to lookup
  2088   static int _buckets; // number of buckets checked
  2089   static int _equals;  // number of buckets checked with matching hash
  2090   static int _hits;    // number of successful lookups
  2091   static int _compact; // number of equals calls with compact signature
  2092 #endif
  2094   AdapterHandlerEntry* bucket(int i) {
  2095     return (AdapterHandlerEntry*)BasicHashtable::bucket(i);
  2098  public:
  2099   AdapterHandlerTable()
  2100     : BasicHashtable(293, sizeof(AdapterHandlerEntry)) { }
  2102   // Create a new entry suitable for insertion in the table
  2103   AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
  2104     AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable::new_entry(fingerprint->compute_hash());
  2105     entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
  2106     return entry;
  2109   // Insert an entry into the table
  2110   void add(AdapterHandlerEntry* entry) {
  2111     int index = hash_to_index(entry->hash());
  2112     add_entry(index, entry);
  2115   void free_entry(AdapterHandlerEntry* entry) {
  2116     entry->deallocate();
  2117     BasicHashtable::free_entry(entry);
  2120   // Find a entry with the same fingerprint if it exists
  2121   AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
  2122     NOT_PRODUCT(_lookups++);
  2123     AdapterFingerPrint fp(total_args_passed, sig_bt);
  2124     unsigned int hash = fp.compute_hash();
  2125     int index = hash_to_index(hash);
  2126     for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
  2127       NOT_PRODUCT(_buckets++);
  2128       if (e->hash() == hash) {
  2129         NOT_PRODUCT(_equals++);
  2130         if (fp.equals(e->fingerprint())) {
  2131 #ifndef PRODUCT
  2132           if (fp.is_compact()) _compact++;
  2133           _hits++;
  2134 #endif
  2135           return e;
  2139     return NULL;
  2142 #ifndef PRODUCT
  2143   void print_statistics() {
  2144     ResourceMark rm;
  2145     int longest = 0;
  2146     int empty = 0;
  2147     int total = 0;
  2148     int nonempty = 0;
  2149     for (int index = 0; index < table_size(); index++) {
  2150       int count = 0;
  2151       for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
  2152         count++;
  2154       if (count != 0) nonempty++;
  2155       if (count == 0) empty++;
  2156       if (count > longest) longest = count;
  2157       total += count;
  2159     tty->print_cr("AdapterHandlerTable: empty %d longest %d total %d average %f",
  2160                   empty, longest, total, total / (double)nonempty);
  2161     tty->print_cr("AdapterHandlerTable: lookups %d buckets %d equals %d hits %d compact %d",
  2162                   _lookups, _buckets, _equals, _hits, _compact);
  2164 #endif
  2165 };
  2168 #ifndef PRODUCT
  2170 int AdapterHandlerTable::_lookups;
  2171 int AdapterHandlerTable::_buckets;
  2172 int AdapterHandlerTable::_equals;
  2173 int AdapterHandlerTable::_hits;
  2174 int AdapterHandlerTable::_compact;
  2176 #endif
  2178 class AdapterHandlerTableIterator : public StackObj {
  2179  private:
  2180   AdapterHandlerTable* _table;
  2181   int _index;
  2182   AdapterHandlerEntry* _current;
  2184   void scan() {
  2185     while (_index < _table->table_size()) {
  2186       AdapterHandlerEntry* a = _table->bucket(_index);
  2187       _index++;
  2188       if (a != NULL) {
  2189         _current = a;
  2190         return;
  2195  public:
  2196   AdapterHandlerTableIterator(AdapterHandlerTable* table): _table(table), _index(0), _current(NULL) {
  2197     scan();
  2199   bool has_next() {
  2200     return _current != NULL;
  2202   AdapterHandlerEntry* next() {
  2203     if (_current != NULL) {
  2204       AdapterHandlerEntry* result = _current;
  2205       _current = _current->next();
  2206       if (_current == NULL) scan();
  2207       return result;
  2208     } else {
  2209       return NULL;
  2212 };
  2215 // ---------------------------------------------------------------------------
  2216 // Implementation of AdapterHandlerLibrary
  2217 AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL;
  2218 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
  2219 const int AdapterHandlerLibrary_size = 16*K;
  2220 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
  2222 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
  2223   // Should be called only when AdapterHandlerLibrary_lock is active.
  2224   if (_buffer == NULL) // Initialize lazily
  2225       _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
  2226   return _buffer;
  2229 void AdapterHandlerLibrary::initialize() {
  2230   if (_adapters != NULL) return;
  2231   _adapters = new AdapterHandlerTable();
  2233   // Create a special handler for abstract methods.  Abstract methods
  2234   // are never compiled so an i2c entry is somewhat meaningless, but
  2235   // fill it in with something appropriate just in case.  Pass handle
  2236   // wrong method for the c2i transitions.
  2237   address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
  2238   _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
  2239                                                               StubRoutines::throw_AbstractMethodError_entry(),
  2240                                                               wrong_method, wrong_method);
  2243 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
  2244                                                       address i2c_entry,
  2245                                                       address c2i_entry,
  2246                                                       address c2i_unverified_entry) {
  2247   return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
  2250 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
  2251   // Use customized signature handler.  Need to lock around updates to
  2252   // the AdapterHandlerTable (it is not safe for concurrent readers
  2253   // and a single writer: this could be fixed if it becomes a
  2254   // problem).
  2256   // Get the address of the ic_miss handlers before we grab the
  2257   // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
  2258   // was caused by the initialization of the stubs happening
  2259   // while we held the lock and then notifying jvmti while
  2260   // holding it. This just forces the initialization to be a little
  2261   // earlier.
  2262   address ic_miss = SharedRuntime::get_ic_miss_stub();
  2263   assert(ic_miss != NULL, "must have handler");
  2265   ResourceMark rm;
  2267   NOT_PRODUCT(int insts_size);
  2268   AdapterBlob* B = NULL;
  2269   AdapterHandlerEntry* entry = NULL;
  2270   AdapterFingerPrint* fingerprint = NULL;
  2272     MutexLocker mu(AdapterHandlerLibrary_lock);
  2273     // make sure data structure is initialized
  2274     initialize();
  2276     if (method->is_abstract()) {
  2277       return _abstract_method_handler;
  2280     // Fill in the signature array, for the calling-convention call.
  2281     int total_args_passed = method->size_of_parameters(); // All args on stack
  2283     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
  2284     VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
  2285     int i = 0;
  2286     if (!method->is_static())  // Pass in receiver first
  2287       sig_bt[i++] = T_OBJECT;
  2288     for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
  2289       sig_bt[i++] = ss.type();  // Collect remaining bits of signature
  2290       if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
  2291         sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
  2293     assert(i == total_args_passed, "");
  2295     // Lookup method signature's fingerprint
  2296     entry = _adapters->lookup(total_args_passed, sig_bt);
  2298 #ifdef ASSERT
  2299     AdapterHandlerEntry* shared_entry = NULL;
  2300     if (VerifyAdapterSharing && entry != NULL) {
  2301       shared_entry = entry;
  2302       entry = NULL;
  2304 #endif
  2306     if (entry != NULL) {
  2307       return entry;
  2310     // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
  2311     int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
  2313     // Make a C heap allocated version of the fingerprint to store in the adapter
  2314     fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
  2316     // Create I2C & C2I handlers
  2318     BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
  2319     if (buf != NULL) {
  2320       CodeBuffer buffer(buf);
  2321       short buffer_locs[20];
  2322       buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
  2323                                              sizeof(buffer_locs)/sizeof(relocInfo));
  2324       MacroAssembler _masm(&buffer);
  2326       entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
  2327                                                      total_args_passed,
  2328                                                      comp_args_on_stack,
  2329                                                      sig_bt,
  2330                                                      regs,
  2331                                                      fingerprint);
  2333 #ifdef ASSERT
  2334       if (VerifyAdapterSharing) {
  2335         if (shared_entry != NULL) {
  2336           assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt),
  2337                  "code must match");
  2338           // Release the one just created and return the original
  2339           _adapters->free_entry(entry);
  2340           return shared_entry;
  2341         } else  {
  2342           entry->save_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt);
  2345 #endif
  2347       B = AdapterBlob::create(&buffer);
  2348       NOT_PRODUCT(insts_size = buffer.insts_size());
  2350     if (B == NULL) {
  2351       // CodeCache is full, disable compilation
  2352       // Ought to log this but compile log is only per compile thread
  2353       // and we're some non descript Java thread.
  2354       MutexUnlocker mu(AdapterHandlerLibrary_lock);
  2355       CompileBroker::handle_full_code_cache();
  2356       return NULL; // Out of CodeCache space
  2358     entry->relocate(B->content_begin());
  2359 #ifndef PRODUCT
  2360     // debugging suppport
  2361     if (PrintAdapterHandlers) {
  2362       tty->cr();
  2363       tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)",
  2364                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
  2365                     method->signature()->as_C_string(), fingerprint->as_string(), insts_size );
  2366       tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
  2367       Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + insts_size);
  2369 #endif
  2371     _adapters->add(entry);
  2373   // Outside of the lock
  2374   if (B != NULL) {
  2375     char blob_id[256];
  2376     jio_snprintf(blob_id,
  2377                  sizeof(blob_id),
  2378                  "%s(%s)@" PTR_FORMAT,
  2379                  B->name(),
  2380                  fingerprint->as_string(),
  2381                  B->content_begin());
  2382     Forte::register_stub(blob_id, B->content_begin(), B->content_end());
  2384     if (JvmtiExport::should_post_dynamic_code_generated()) {
  2385       JvmtiExport::post_dynamic_code_generated(blob_id, B->content_begin(), B->content_end());
  2388   return entry;
  2391 void AdapterHandlerEntry::relocate(address new_base) {
  2392     ptrdiff_t delta = new_base - _i2c_entry;
  2393     _i2c_entry += delta;
  2394     _c2i_entry += delta;
  2395     _c2i_unverified_entry += delta;
  2399 void AdapterHandlerEntry::deallocate() {
  2400   delete _fingerprint;
  2401 #ifdef ASSERT
  2402   if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
  2403   if (_saved_sig)  FREE_C_HEAP_ARRAY(Basictype, _saved_sig);
  2404 #endif
  2408 #ifdef ASSERT
  2409 // Capture the code before relocation so that it can be compared
  2410 // against other versions.  If the code is captured after relocation
  2411 // then relative instructions won't be equivalent.
  2412 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
  2413   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length);
  2414   _code_length = length;
  2415   memcpy(_saved_code, buffer, length);
  2416   _total_args_passed = total_args_passed;
  2417   _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed);
  2418   memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType));
  2422 bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
  2423   if (length != _code_length) {
  2424     return false;
  2426   for (int i = 0; i < length; i++) {
  2427     if (buffer[i] != _saved_code[i]) {
  2428       return false;
  2431   return true;
  2433 #endif
  2436 // Create a native wrapper for this native method.  The wrapper converts the
  2437 // java compiled calling convention to the native convention, handlizes
  2438 // arguments, and transitions to native.  On return from the native we transition
  2439 // back to java blocking if a safepoint is in progress.
  2440 nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
  2441   ResourceMark rm;
  2442   nmethod* nm = NULL;
  2444   if (PrintCompilation) {
  2445     ttyLocker ttyl;
  2446     tty->print("---   n%s ", (method->is_synchronized() ? "s" : " "));
  2447     method->print_short_name(tty);
  2448     if (method->is_static()) {
  2449       tty->print(" (static)");
  2451     tty->cr();
  2454   assert(method->has_native_function(), "must have something valid to call!");
  2457     // perform the work while holding the lock, but perform any printing outside the lock
  2458     MutexLocker mu(AdapterHandlerLibrary_lock);
  2459     // See if somebody beat us to it
  2460     nm = method->code();
  2461     if (nm) {
  2462       return nm;
  2465     ResourceMark rm;
  2467     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
  2468     if (buf != NULL) {
  2469       CodeBuffer buffer(buf);
  2470       double locs_buf[20];
  2471       buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
  2472       MacroAssembler _masm(&buffer);
  2474       // Fill in the signature array, for the calling-convention call.
  2475       int total_args_passed = method->size_of_parameters();
  2477       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
  2478       VMRegPair*   regs = NEW_RESOURCE_ARRAY(VMRegPair,total_args_passed);
  2479       int i=0;
  2480       if( !method->is_static() )  // Pass in receiver first
  2481         sig_bt[i++] = T_OBJECT;
  2482       SignatureStream ss(method->signature());
  2483       for( ; !ss.at_return_type(); ss.next()) {
  2484         sig_bt[i++] = ss.type();  // Collect remaining bits of signature
  2485         if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
  2486           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
  2488       assert( i==total_args_passed, "" );
  2489       BasicType ret_type = ss.type();
  2491       // Now get the compiled-Java layout as input arguments
  2492       int comp_args_on_stack;
  2493       comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
  2495       // Generate the compiled-to-native wrapper code
  2496       nm = SharedRuntime::generate_native_wrapper(&_masm,
  2497                                                   method,
  2498                                                   total_args_passed,
  2499                                                   comp_args_on_stack,
  2500                                                   sig_bt,regs,
  2501                                                   ret_type);
  2505   // Must unlock before calling set_code
  2507   // Install the generated code.
  2508   if (nm != NULL) {
  2509     method->set_code(method, nm);
  2510     nm->post_compiled_method_load_event();
  2511   } else {
  2512     // CodeCache is full, disable compilation
  2513     CompileBroker::handle_full_code_cache();
  2515   return nm;
  2518 #ifdef HAVE_DTRACE_H
  2519 // Create a dtrace nmethod for this method.  The wrapper converts the
  2520 // java compiled calling convention to the native convention, makes a dummy call
  2521 // (actually nops for the size of the call instruction, which become a trap if
  2522 // probe is enabled). The returns to the caller. Since this all looks like a
  2523 // leaf no thread transition is needed.
  2525 nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) {
  2526   ResourceMark rm;
  2527   nmethod* nm = NULL;
  2529   if (PrintCompilation) {
  2530     ttyLocker ttyl;
  2531     tty->print("---   n%s  ");
  2532     method->print_short_name(tty);
  2533     if (method->is_static()) {
  2534       tty->print(" (static)");
  2536     tty->cr();
  2540     // perform the work while holding the lock, but perform any printing
  2541     // outside the lock
  2542     MutexLocker mu(AdapterHandlerLibrary_lock);
  2543     // See if somebody beat us to it
  2544     nm = method->code();
  2545     if (nm) {
  2546       return nm;
  2549     ResourceMark rm;
  2551     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
  2552     if (buf != NULL) {
  2553       CodeBuffer buffer(buf);
  2554       // Need a few relocation entries
  2555       double locs_buf[20];
  2556       buffer.insts()->initialize_shared_locs(
  2557         (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
  2558       MacroAssembler _masm(&buffer);
  2560       // Generate the compiled-to-native wrapper code
  2561       nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method);
  2564   return nm;
  2567 // the dtrace method needs to convert java lang string to utf8 string.
  2568 void SharedRuntime::get_utf(oopDesc* src, address dst) {
  2569   typeArrayOop jlsValue  = java_lang_String::value(src);
  2570   int          jlsOffset = java_lang_String::offset(src);
  2571   int          jlsLen    = java_lang_String::length(src);
  2572   jchar*       jlsPos    = (jlsLen == 0) ? NULL :
  2573                                            jlsValue->char_at_addr(jlsOffset);
  2574   (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size);
  2576 #endif // ndef HAVE_DTRACE_H
  2578 // -------------------------------------------------------------------------
  2579 // Java-Java calling convention
  2580 // (what you use when Java calls Java)
  2582 //------------------------------name_for_receiver----------------------------------
  2583 // For a given signature, return the VMReg for parameter 0.
  2584 VMReg SharedRuntime::name_for_receiver() {
  2585   VMRegPair regs;
  2586   BasicType sig_bt = T_OBJECT;
  2587   (void) java_calling_convention(&sig_bt, &regs, 1, true);
  2588   // Return argument 0 register.  In the LP64 build pointers
  2589   // take 2 registers, but the VM wants only the 'main' name.
  2590   return regs.first();
  2593 VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool has_receiver, int* arg_size) {
  2594   // This method is returning a data structure allocating as a
  2595   // ResourceObject, so do not put any ResourceMarks in here.
  2596   char *s = sig->as_C_string();
  2597   int len = (int)strlen(s);
  2598   *s++; len--;                  // Skip opening paren
  2599   char *t = s+len;
  2600   while( *(--t) != ')' ) ;      // Find close paren
  2602   BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
  2603   VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
  2604   int cnt = 0;
  2605   if (has_receiver) {
  2606     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
  2609   while( s < t ) {
  2610     switch( *s++ ) {            // Switch on signature character
  2611     case 'B': sig_bt[cnt++] = T_BYTE;    break;
  2612     case 'C': sig_bt[cnt++] = T_CHAR;    break;
  2613     case 'D': sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
  2614     case 'F': sig_bt[cnt++] = T_FLOAT;   break;
  2615     case 'I': sig_bt[cnt++] = T_INT;     break;
  2616     case 'J': sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
  2617     case 'S': sig_bt[cnt++] = T_SHORT;   break;
  2618     case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
  2619     case 'V': sig_bt[cnt++] = T_VOID;    break;
  2620     case 'L':                   // Oop
  2621       while( *s++ != ';'  ) ;   // Skip signature
  2622       sig_bt[cnt++] = T_OBJECT;
  2623       break;
  2624     case '[': {                 // Array
  2625       do {                      // Skip optional size
  2626         while( *s >= '0' && *s <= '9' ) s++;
  2627       } while( *s++ == '[' );   // Nested arrays?
  2628       // Skip element type
  2629       if( s[-1] == 'L' )
  2630         while( *s++ != ';'  ) ; // Skip signature
  2631       sig_bt[cnt++] = T_ARRAY;
  2632       break;
  2634     default : ShouldNotReachHere();
  2637   assert( cnt < 256, "grow table size" );
  2639   int comp_args_on_stack;
  2640   comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
  2642   // the calling convention doesn't count out_preserve_stack_slots so
  2643   // we must add that in to get "true" stack offsets.
  2645   if (comp_args_on_stack) {
  2646     for (int i = 0; i < cnt; i++) {
  2647       VMReg reg1 = regs[i].first();
  2648       if( reg1->is_stack()) {
  2649         // Yuck
  2650         reg1 = reg1->bias(out_preserve_stack_slots());
  2652       VMReg reg2 = regs[i].second();
  2653       if( reg2->is_stack()) {
  2654         // Yuck
  2655         reg2 = reg2->bias(out_preserve_stack_slots());
  2657       regs[i].set_pair(reg2, reg1);
  2661   // results
  2662   *arg_size = cnt;
  2663   return regs;
  2666 // OSR Migration Code
  2667 //
  2668 // This code is used convert interpreter frames into compiled frames.  It is
  2669 // called from very start of a compiled OSR nmethod.  A temp array is
  2670 // allocated to hold the interesting bits of the interpreter frame.  All
  2671 // active locks are inflated to allow them to move.  The displaced headers and
  2672 // active interpeter locals are copied into the temp buffer.  Then we return
  2673 // back to the compiled code.  The compiled code then pops the current
  2674 // interpreter frame off the stack and pushes a new compiled frame.  Then it
  2675 // copies the interpreter locals and displaced headers where it wants.
  2676 // Finally it calls back to free the temp buffer.
  2677 //
  2678 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
  2680 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
  2682 #ifdef IA64
  2683   ShouldNotReachHere(); // NYI
  2684 #endif /* IA64 */
  2686   //
  2687   // This code is dependent on the memory layout of the interpreter local
  2688   // array and the monitors. On all of our platforms the layout is identical
  2689   // so this code is shared. If some platform lays the their arrays out
  2690   // differently then this code could move to platform specific code or
  2691   // the code here could be modified to copy items one at a time using
  2692   // frame accessor methods and be platform independent.
  2694   frame fr = thread->last_frame();
  2695   assert( fr.is_interpreted_frame(), "" );
  2696   assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
  2698   // Figure out how many monitors are active.
  2699   int active_monitor_count = 0;
  2700   for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
  2701        kptr < fr.interpreter_frame_monitor_begin();
  2702        kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
  2703     if( kptr->obj() != NULL ) active_monitor_count++;
  2706   // QQQ we could place number of active monitors in the array so that compiled code
  2707   // could double check it.
  2709   methodOop moop = fr.interpreter_frame_method();
  2710   int max_locals = moop->max_locals();
  2711   // Allocate temp buffer, 1 word per local & 2 per active monitor
  2712   int buf_size_words = max_locals + active_monitor_count*2;
  2713   intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words);
  2715   // Copy the locals.  Order is preserved so that loading of longs works.
  2716   // Since there's no GC I can copy the oops blindly.
  2717   assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
  2718   Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
  2719                        (HeapWord*)&buf[0],
  2720                        max_locals);
  2722   // Inflate locks.  Copy the displaced headers.  Be careful, there can be holes.
  2723   int i = max_locals;
  2724   for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
  2725        kptr2 < fr.interpreter_frame_monitor_begin();
  2726        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
  2727     if( kptr2->obj() != NULL) {         // Avoid 'holes' in the monitor array
  2728       BasicLock *lock = kptr2->lock();
  2729       // Inflate so the displaced header becomes position-independent
  2730       if (lock->displaced_header()->is_unlocked())
  2731         ObjectSynchronizer::inflate_helper(kptr2->obj());
  2732       // Now the displaced header is free to move
  2733       buf[i++] = (intptr_t)lock->displaced_header();
  2734       buf[i++] = (intptr_t)kptr2->obj();
  2737   assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
  2739   return buf;
  2740 JRT_END
  2742 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
  2743   FREE_C_HEAP_ARRAY(intptr_t,buf);
  2744 JRT_END
  2746 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
  2747   AdapterHandlerTableIterator iter(_adapters);
  2748   while (iter.has_next()) {
  2749     AdapterHandlerEntry* a = iter.next();
  2750     if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
  2752   return false;
  2755 void AdapterHandlerLibrary::print_handler_on(outputStream* st, CodeBlob* b) {
  2756   AdapterHandlerTableIterator iter(_adapters);
  2757   while (iter.has_next()) {
  2758     AdapterHandlerEntry* a = iter.next();
  2759     if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) {
  2760       st->print("Adapter for signature: ");
  2761       st->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
  2762                    a->fingerprint()->as_string(),
  2763                    a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
  2765       return;
  2768   assert(false, "Should have found handler");
  2771 #ifndef PRODUCT
  2773 void AdapterHandlerLibrary::print_statistics() {
  2774   _adapters->print_statistics();
  2777 #endif /* PRODUCT */

mercurial