src/share/vm/runtime/sharedRuntime.cpp

Fri, 20 Mar 2009 23:19:36 -0700

author
jrose
date
Fri, 20 Mar 2009 23:19:36 -0700
changeset 1100
c89f86385056
parent 1063
7bb995fbd3c0
child 1145
e5b0439ef4ae
permissions
-rw-r--r--

6814659: separable cleanups and subroutines for 6655638
Summary: preparatory but separable changes for method handles
Reviewed-by: kvn, never

     1 /*
     2  * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_sharedRuntime.cpp.incl"
    27 #include <math.h>
    29 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
    30 HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
    31                       char*, int, char*, int, char*, int);
    32 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
    33                       char*, int, char*, int, char*, int);
    35 // Implementation of SharedRuntime
    37 #ifndef PRODUCT
    38 // For statistics
    39 int SharedRuntime::_ic_miss_ctr = 0;
    40 int SharedRuntime::_wrong_method_ctr = 0;
    41 int SharedRuntime::_resolve_static_ctr = 0;
    42 int SharedRuntime::_resolve_virtual_ctr = 0;
    43 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
    44 int SharedRuntime::_implicit_null_throws = 0;
    45 int SharedRuntime::_implicit_div0_throws = 0;
    46 int SharedRuntime::_throw_null_ctr = 0;
    48 int SharedRuntime::_nof_normal_calls = 0;
    49 int SharedRuntime::_nof_optimized_calls = 0;
    50 int SharedRuntime::_nof_inlined_calls = 0;
    51 int SharedRuntime::_nof_megamorphic_calls = 0;
    52 int SharedRuntime::_nof_static_calls = 0;
    53 int SharedRuntime::_nof_inlined_static_calls = 0;
    54 int SharedRuntime::_nof_interface_calls = 0;
    55 int SharedRuntime::_nof_optimized_interface_calls = 0;
    56 int SharedRuntime::_nof_inlined_interface_calls = 0;
    57 int SharedRuntime::_nof_megamorphic_interface_calls = 0;
    58 int SharedRuntime::_nof_removable_exceptions = 0;
    60 int SharedRuntime::_new_instance_ctr=0;
    61 int SharedRuntime::_new_array_ctr=0;
    62 int SharedRuntime::_multi1_ctr=0;
    63 int SharedRuntime::_multi2_ctr=0;
    64 int SharedRuntime::_multi3_ctr=0;
    65 int SharedRuntime::_multi4_ctr=0;
    66 int SharedRuntime::_multi5_ctr=0;
    67 int SharedRuntime::_mon_enter_stub_ctr=0;
    68 int SharedRuntime::_mon_exit_stub_ctr=0;
    69 int SharedRuntime::_mon_enter_ctr=0;
    70 int SharedRuntime::_mon_exit_ctr=0;
    71 int SharedRuntime::_partial_subtype_ctr=0;
    72 int SharedRuntime::_jbyte_array_copy_ctr=0;
    73 int SharedRuntime::_jshort_array_copy_ctr=0;
    74 int SharedRuntime::_jint_array_copy_ctr=0;
    75 int SharedRuntime::_jlong_array_copy_ctr=0;
    76 int SharedRuntime::_oop_array_copy_ctr=0;
    77 int SharedRuntime::_checkcast_array_copy_ctr=0;
    78 int SharedRuntime::_unsafe_array_copy_ctr=0;
    79 int SharedRuntime::_generic_array_copy_ctr=0;
    80 int SharedRuntime::_slow_array_copy_ctr=0;
    81 int SharedRuntime::_find_handler_ctr=0;
    82 int SharedRuntime::_rethrow_ctr=0;
    84 int     SharedRuntime::_ICmiss_index                    = 0;
    85 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
    86 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
    88 void SharedRuntime::trace_ic_miss(address at) {
    89   for (int i = 0; i < _ICmiss_index; i++) {
    90     if (_ICmiss_at[i] == at) {
    91       _ICmiss_count[i]++;
    92       return;
    93     }
    94   }
    95   int index = _ICmiss_index++;
    96   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
    97   _ICmiss_at[index] = at;
    98   _ICmiss_count[index] = 1;
    99 }
   101 void SharedRuntime::print_ic_miss_histogram() {
   102   if (ICMissHistogram) {
   103     tty->print_cr ("IC Miss Histogram:");
   104     int tot_misses = 0;
   105     for (int i = 0; i < _ICmiss_index; i++) {
   106       tty->print_cr("  at: " INTPTR_FORMAT "  nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
   107       tot_misses += _ICmiss_count[i];
   108     }
   109     tty->print_cr ("Total IC misses: %7d", tot_misses);
   110   }
   111 }
   112 #endif // PRODUCT
   114 #ifndef SERIALGC
   116 // G1 write-barrier pre: executed before a pointer store.
   117 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
   118   if (orig == NULL) {
   119     assert(false, "should be optimized out");
   120     return;
   121   }
   122   // store the original value that was in the field reference
   123   thread->satb_mark_queue().enqueue(orig);
   124 JRT_END
   126 // G1 write-barrier post: executed after a pointer store.
   127 JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread))
   128   thread->dirty_card_queue().enqueue(card_addr);
   129 JRT_END
   131 #endif // !SERIALGC
   134 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
   135   return x * y;
   136 JRT_END
   139 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
   140   if (x == min_jlong && y == CONST64(-1)) {
   141     return x;
   142   } else {
   143     return x / y;
   144   }
   145 JRT_END
   148 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
   149   if (x == min_jlong && y == CONST64(-1)) {
   150     return 0;
   151   } else {
   152     return x % y;
   153   }
   154 JRT_END
   157 const juint  float_sign_mask  = 0x7FFFFFFF;
   158 const juint  float_infinity   = 0x7F800000;
   159 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
   160 const julong double_infinity  = CONST64(0x7FF0000000000000);
   162 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat  x, jfloat  y))
   163 #ifdef _WIN64
   164   // 64-bit Windows on amd64 returns the wrong values for
   165   // infinity operands.
   166   union { jfloat f; juint i; } xbits, ybits;
   167   xbits.f = x;
   168   ybits.f = y;
   169   // x Mod Infinity == x unless x is infinity
   170   if ( ((xbits.i & float_sign_mask) != float_infinity) &&
   171        ((ybits.i & float_sign_mask) == float_infinity) ) {
   172     return x;
   173   }
   174 #endif
   175   return ((jfloat)fmod((double)x,(double)y));
   176 JRT_END
   179 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
   180 #ifdef _WIN64
   181   union { jdouble d; julong l; } xbits, ybits;
   182   xbits.d = x;
   183   ybits.d = y;
   184   // x Mod Infinity == x unless x is infinity
   185   if ( ((xbits.l & double_sign_mask) != double_infinity) &&
   186        ((ybits.l & double_sign_mask) == double_infinity) ) {
   187     return x;
   188   }
   189 #endif
   190   return ((jdouble)fmod((double)x,(double)y));
   191 JRT_END
   194 JRT_LEAF(jint, SharedRuntime::f2i(jfloat  x))
   195   if (g_isnan(x))
   196     return 0;
   197   if (x >= (jfloat) max_jint)
   198     return max_jint;
   199   if (x <= (jfloat) min_jint)
   200     return min_jint;
   201   return (jint) x;
   202 JRT_END
   205 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat  x))
   206   if (g_isnan(x))
   207     return 0;
   208   if (x >= (jfloat) max_jlong)
   209     return max_jlong;
   210   if (x <= (jfloat) min_jlong)
   211     return min_jlong;
   212   return (jlong) x;
   213 JRT_END
   216 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
   217   if (g_isnan(x))
   218     return 0;
   219   if (x >= (jdouble) max_jint)
   220     return max_jint;
   221   if (x <= (jdouble) min_jint)
   222     return min_jint;
   223   return (jint) x;
   224 JRT_END
   227 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
   228   if (g_isnan(x))
   229     return 0;
   230   if (x >= (jdouble) max_jlong)
   231     return max_jlong;
   232   if (x <= (jdouble) min_jlong)
   233     return min_jlong;
   234   return (jlong) x;
   235 JRT_END
   238 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
   239   return (jfloat)x;
   240 JRT_END
   243 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
   244   return (jfloat)x;
   245 JRT_END
   248 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
   249   return (jdouble)x;
   250 JRT_END
   252 // Exception handling accross interpreter/compiler boundaries
   253 //
   254 // exception_handler_for_return_address(...) returns the continuation address.
   255 // The continuation address is the entry point of the exception handler of the
   256 // previous frame depending on the return address.
   258 address SharedRuntime::raw_exception_handler_for_return_address(address return_address) {
   259   assert(frame::verify_return_pc(return_address), "must be a return pc");
   261   // the fastest case first
   262   CodeBlob* blob = CodeCache::find_blob(return_address);
   263   if (blob != NULL && blob->is_nmethod()) {
   264     nmethod* code = (nmethod*)blob;
   265     assert(code != NULL, "nmethod must be present");
   266     // native nmethods don't have exception handlers
   267     assert(!code->is_native_method(), "no exception handler");
   268     assert(code->header_begin() != code->exception_begin(), "no exception handler");
   269     if (code->is_deopt_pc(return_address)) {
   270       return SharedRuntime::deopt_blob()->unpack_with_exception();
   271     } else {
   272       return code->exception_begin();
   273     }
   274   }
   276   // Entry code
   277   if (StubRoutines::returns_to_call_stub(return_address)) {
   278     return StubRoutines::catch_exception_entry();
   279   }
   280   // Interpreted code
   281   if (Interpreter::contains(return_address)) {
   282     return Interpreter::rethrow_exception_entry();
   283   }
   285   // Compiled code
   286   if (CodeCache::contains(return_address)) {
   287     CodeBlob* blob = CodeCache::find_blob(return_address);
   288     if (blob->is_nmethod()) {
   289       nmethod* code = (nmethod*)blob;
   290       assert(code != NULL, "nmethod must be present");
   291       assert(code->header_begin() != code->exception_begin(), "no exception handler");
   292       return code->exception_begin();
   293     }
   294     if (blob->is_runtime_stub()) {
   295       ShouldNotReachHere();   // callers are responsible for skipping runtime stub frames
   296     }
   297   }
   298   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
   299 #ifndef PRODUCT
   300   { ResourceMark rm;
   301     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
   302     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
   303     tty->print_cr("b) other problem");
   304   }
   305 #endif // PRODUCT
   306   ShouldNotReachHere();
   307   return NULL;
   308 }
   311 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address))
   312   return raw_exception_handler_for_return_address(return_address);
   313 JRT_END
   315 address SharedRuntime::get_poll_stub(address pc) {
   316   address stub;
   317   // Look up the code blob
   318   CodeBlob *cb = CodeCache::find_blob(pc);
   320   // Should be an nmethod
   321   assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
   323   // Look up the relocation information
   324   assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
   325     "safepoint polling: type must be poll" );
   327   assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
   328     "Only polling locations are used for safepoint");
   330   bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
   331   if (at_poll_return) {
   332     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
   333            "polling page return stub not created yet");
   334     stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin();
   335   } else {
   336     assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
   337            "polling page safepoint stub not created yet");
   338     stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin();
   339   }
   340 #ifndef PRODUCT
   341   if( TraceSafepoint ) {
   342     char buf[256];
   343     jio_snprintf(buf, sizeof(buf),
   344                  "... found polling page %s exception at pc = "
   345                  INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
   346                  at_poll_return ? "return" : "loop",
   347                  (intptr_t)pc, (intptr_t)stub);
   348     tty->print_raw_cr(buf);
   349   }
   350 #endif // PRODUCT
   351   return stub;
   352 }
   355 oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) {
   356   assert(caller.is_interpreted_frame(), "");
   357   int args_size = ArgumentSizeComputer(sig).size() + 1;
   358   assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
   359   oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
   360   assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
   361   return result;
   362 }
   365 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
   366   if (JvmtiExport::can_post_exceptions()) {
   367     vframeStream vfst(thread, true);
   368     methodHandle method = methodHandle(thread, vfst.method());
   369     address bcp = method()->bcp_from(vfst.bci());
   370     JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
   371   }
   372   Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
   373 }
   375 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) {
   376   Handle h_exception = Exceptions::new_exception(thread, name, message);
   377   throw_and_post_jvmti_exception(thread, h_exception);
   378 }
   380 // The interpreter code to call this tracing function is only
   381 // called/generated when TraceRedefineClasses has the right bits
   382 // set. Since obsolete methods are never compiled, we don't have
   383 // to modify the compilers to generate calls to this function.
   384 //
   385 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
   386     JavaThread* thread, methodOopDesc* method))
   387   assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call");
   389   if (method->is_obsolete()) {
   390     // We are calling an obsolete method, but this is not necessarily
   391     // an error. Our method could have been redefined just after we
   392     // fetched the methodOop from the constant pool.
   394     // RC_TRACE macro has an embedded ResourceMark
   395     RC_TRACE_WITH_THREAD(0x00001000, thread,
   396                          ("calling obsolete method '%s'",
   397                           method->name_and_sig_as_C_string()));
   398     if (RC_TRACE_ENABLED(0x00002000)) {
   399       // this option is provided to debug calls to obsolete methods
   400       guarantee(false, "faulting at call to an obsolete method.");
   401     }
   402   }
   403   return 0;
   404 JRT_END
   406 // ret_pc points into caller; we are returning caller's exception handler
   407 // for given exception
   408 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
   409                                                     bool force_unwind, bool top_frame_only) {
   410   assert(nm != NULL, "must exist");
   411   ResourceMark rm;
   413   ScopeDesc* sd = nm->scope_desc_at(ret_pc);
   414   // determine handler bci, if any
   415   EXCEPTION_MARK;
   417   int handler_bci = -1;
   418   int scope_depth = 0;
   419   if (!force_unwind) {
   420     int bci = sd->bci();
   421     do {
   422       bool skip_scope_increment = false;
   423       // exception handler lookup
   424       KlassHandle ek (THREAD, exception->klass());
   425       handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD);
   426       if (HAS_PENDING_EXCEPTION) {
   427         // We threw an exception while trying to find the exception handler.
   428         // Transfer the new exception to the exception handle which will
   429         // be set into thread local storage, and do another lookup for an
   430         // exception handler for this exception, this time starting at the
   431         // BCI of the exception handler which caused the exception to be
   432         // thrown (bugs 4307310 and 4546590). Set "exception" reference
   433         // argument to ensure that the correct exception is thrown (4870175).
   434         exception = Handle(THREAD, PENDING_EXCEPTION);
   435         CLEAR_PENDING_EXCEPTION;
   436         if (handler_bci >= 0) {
   437           bci = handler_bci;
   438           handler_bci = -1;
   439           skip_scope_increment = true;
   440         }
   441       }
   442       if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
   443         sd = sd->sender();
   444         if (sd != NULL) {
   445           bci = sd->bci();
   446         }
   447         ++scope_depth;
   448       }
   449     } while (!top_frame_only && handler_bci < 0 && sd != NULL);
   450   }
   452   // found handling method => lookup exception handler
   453   int catch_pco = ret_pc - nm->instructions_begin();
   455   ExceptionHandlerTable table(nm);
   456   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
   457   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
   458     // Allow abbreviated catch tables.  The idea is to allow a method
   459     // to materialize its exceptions without committing to the exact
   460     // routing of exceptions.  In particular this is needed for adding
   461     // a synthethic handler to unlock monitors when inlining
   462     // synchonized methods since the unlock path isn't represented in
   463     // the bytecodes.
   464     t = table.entry_for(catch_pco, -1, 0);
   465   }
   467 #ifdef COMPILER1
   468   if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) {
   469     // Exception is not handled by this frame so unwind.  Note that
   470     // this is not the same as how C2 does this.  C2 emits a table
   471     // entry that dispatches to the unwind code in the nmethod.
   472     return NULL;
   473   }
   474 #endif /* COMPILER1 */
   477   if (t == NULL) {
   478     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
   479     tty->print_cr("   Exception:");
   480     exception->print();
   481     tty->cr();
   482     tty->print_cr(" Compiled exception table :");
   483     table.print();
   484     nm->print_code();
   485     guarantee(false, "missing exception handler");
   486     return NULL;
   487   }
   489   return nm->instructions_begin() + t->pco();
   490 }
   492 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
   493   // These errors occur only at call sites
   494   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
   495 JRT_END
   497 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
   498   // These errors occur only at call sites
   499   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
   500 JRT_END
   502 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
   503   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
   504 JRT_END
   506 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
   507   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
   508 JRT_END
   510 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
   511   // This entry point is effectively only used for NullPointerExceptions which occur at inline
   512   // cache sites (when the callee activation is not yet set up) so we are at a call site
   513   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
   514 JRT_END
   516 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
   517   // We avoid using the normal exception construction in this case because
   518   // it performs an upcall to Java, and we're already out of stack space.
   519   klassOop k = SystemDictionary::StackOverflowError_klass();
   520   oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK);
   521   Handle exception (thread, exception_oop);
   522   if (StackTraceInThrowable) {
   523     java_lang_Throwable::fill_in_stack_trace(exception);
   524   }
   525   throw_and_post_jvmti_exception(thread, exception);
   526 JRT_END
   528 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
   529                                                            address pc,
   530                                                            SharedRuntime::ImplicitExceptionKind exception_kind)
   531 {
   532   address target_pc = NULL;
   534   if (Interpreter::contains(pc)) {
   535 #ifdef CC_INTERP
   536     // C++ interpreter doesn't throw implicit exceptions
   537     ShouldNotReachHere();
   538 #else
   539     switch (exception_kind) {
   540       case IMPLICIT_NULL:           return Interpreter::throw_NullPointerException_entry();
   541       case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
   542       case STACK_OVERFLOW:          return Interpreter::throw_StackOverflowError_entry();
   543       default:                      ShouldNotReachHere();
   544     }
   545 #endif // !CC_INTERP
   546   } else {
   547     switch (exception_kind) {
   548       case STACK_OVERFLOW: {
   549         // Stack overflow only occurs upon frame setup; the callee is
   550         // going to be unwound. Dispatch to a shared runtime stub
   551         // which will cause the StackOverflowError to be fabricated
   552         // and processed.
   553         // For stack overflow in deoptimization blob, cleanup thread.
   554         if (thread->deopt_mark() != NULL) {
   555           Deoptimization::cleanup_deopt_info(thread, NULL);
   556         }
   557         return StubRoutines::throw_StackOverflowError_entry();
   558       }
   560       case IMPLICIT_NULL: {
   561         if (VtableStubs::contains(pc)) {
   562           // We haven't yet entered the callee frame. Fabricate an
   563           // exception and begin dispatching it in the caller. Since
   564           // the caller was at a call site, it's safe to destroy all
   565           // caller-saved registers, as these entry points do.
   566           VtableStub* vt_stub = VtableStubs::stub_containing(pc);
   568           // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error.
   569           if (vt_stub == NULL) return NULL;
   571           if (vt_stub->is_abstract_method_error(pc)) {
   572             assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
   573             return StubRoutines::throw_AbstractMethodError_entry();
   574           } else {
   575             return StubRoutines::throw_NullPointerException_at_call_entry();
   576           }
   577         } else {
   578           CodeBlob* cb = CodeCache::find_blob(pc);
   580           // If code blob is NULL, then return NULL to signal handler to report the SEGV error.
   581           if (cb == NULL) return NULL;
   583           // Exception happened in CodeCache. Must be either:
   584           // 1. Inline-cache check in C2I handler blob,
   585           // 2. Inline-cache check in nmethod, or
   586           // 3. Implict null exception in nmethod
   588           if (!cb->is_nmethod()) {
   589             guarantee(cb->is_adapter_blob(),
   590                       "exception happened outside interpreter, nmethods and vtable stubs (1)");
   591             // There is no handler here, so we will simply unwind.
   592             return StubRoutines::throw_NullPointerException_at_call_entry();
   593           }
   595           // Otherwise, it's an nmethod.  Consult its exception handlers.
   596           nmethod* nm = (nmethod*)cb;
   597           if (nm->inlinecache_check_contains(pc)) {
   598             // exception happened inside inline-cache check code
   599             // => the nmethod is not yet active (i.e., the frame
   600             // is not set up yet) => use return address pushed by
   601             // caller => don't push another return address
   602             return StubRoutines::throw_NullPointerException_at_call_entry();
   603           }
   605 #ifndef PRODUCT
   606           _implicit_null_throws++;
   607 #endif
   608           target_pc = nm->continuation_for_implicit_exception(pc);
   609           guarantee(target_pc != 0, "must have a continuation point");
   610         }
   612         break; // fall through
   613       }
   616       case IMPLICIT_DIVIDE_BY_ZERO: {
   617         nmethod* nm = CodeCache::find_nmethod(pc);
   618         guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions");
   619 #ifndef PRODUCT
   620         _implicit_div0_throws++;
   621 #endif
   622         target_pc = nm->continuation_for_implicit_exception(pc);
   623         guarantee(target_pc != 0, "must have a continuation point");
   624         break; // fall through
   625       }
   627       default: ShouldNotReachHere();
   628     }
   630     guarantee(target_pc != NULL, "must have computed destination PC for implicit exception");
   631     assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
   633     // for AbortVMOnException flag
   634     NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
   635     if (exception_kind == IMPLICIT_NULL) {
   636       Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
   637     } else {
   638       Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
   639     }
   640     return target_pc;
   641   }
   643   ShouldNotReachHere();
   644   return NULL;
   645 }
   648 JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
   649 {
   650   THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
   651 }
   652 JNI_END
   655 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
   656   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
   657 }
   660 #ifndef PRODUCT
   661 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
   662   const frame f = thread->last_frame();
   663   assert(f.is_interpreted_frame(), "must be an interpreted frame");
   664 #ifndef PRODUCT
   665   methodHandle mh(THREAD, f.interpreter_frame_method());
   666   BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
   667 #endif // !PRODUCT
   668   return preserve_this_value;
   669 JRT_END
   670 #endif // !PRODUCT
   673 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
   674   os::yield_all(attempts);
   675 JRT_END
   678 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
   679   assert(obj->is_oop(), "must be a valid oop");
   680   assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
   681   instanceKlass::register_finalizer(instanceOop(obj), CHECK);
   682 JRT_END
   685 jlong SharedRuntime::get_java_tid(Thread* thread) {
   686   if (thread != NULL) {
   687     if (thread->is_Java_thread()) {
   688       oop obj = ((JavaThread*)thread)->threadObj();
   689       return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
   690     }
   691   }
   692   return 0;
   693 }
   695 /**
   696  * This function ought to be a void function, but cannot be because
   697  * it gets turned into a tail-call on sparc, which runs into dtrace bug
   698  * 6254741.  Once that is fixed we can remove the dummy return value.
   699  */
   700 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
   701   return dtrace_object_alloc_base(Thread::current(), o);
   702 }
   704 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
   705   assert(DTraceAllocProbes, "wrong call");
   706   Klass* klass = o->blueprint();
   707   int size = o->size();
   708   symbolOop name = klass->name();
   709   HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
   710                    name->bytes(), name->utf8_length(), size * HeapWordSize);
   711   return 0;
   712 }
   714 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
   715     JavaThread* thread, methodOopDesc* method))
   716   assert(DTraceMethodProbes, "wrong call");
   717   symbolOop kname = method->klass_name();
   718   symbolOop name = method->name();
   719   symbolOop sig = method->signature();
   720   HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
   721       kname->bytes(), kname->utf8_length(),
   722       name->bytes(), name->utf8_length(),
   723       sig->bytes(), sig->utf8_length());
   724   return 0;
   725 JRT_END
   727 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
   728     JavaThread* thread, methodOopDesc* method))
   729   assert(DTraceMethodProbes, "wrong call");
   730   symbolOop kname = method->klass_name();
   731   symbolOop name = method->name();
   732   symbolOop sig = method->signature();
   733   HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
   734       kname->bytes(), kname->utf8_length(),
   735       name->bytes(), name->utf8_length(),
   736       sig->bytes(), sig->utf8_length());
   737   return 0;
   738 JRT_END
   741 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
   742 // for a call current in progress, i.e., arguments has been pushed on stack
   743 // put callee has not been invoked yet.  Used by: resolve virtual/static,
   744 // vtable updates, etc.  Caller frame must be compiled.
   745 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
   746   ResourceMark rm(THREAD);
   748   // last java frame on stack (which includes native call frames)
   749   vframeStream vfst(thread, true);  // Do not skip and javaCalls
   751   return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
   752 }
   755 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
   756 // for a call current in progress, i.e., arguments has been pushed on stack
   757 // but callee has not been invoked yet.  Caller frame must be compiled.
   758 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
   759                                               vframeStream& vfst,
   760                                               Bytecodes::Code& bc,
   761                                               CallInfo& callinfo, TRAPS) {
   762   Handle receiver;
   763   Handle nullHandle;  //create a handy null handle for exception returns
   765   assert(!vfst.at_end(), "Java frame must exist");
   767   // Find caller and bci from vframe
   768   methodHandle caller (THREAD, vfst.method());
   769   int          bci    = vfst.bci();
   771   // Find bytecode
   772   Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
   773   bc = bytecode->adjusted_invoke_code();
   774   int bytecode_index = bytecode->index();
   776   // Find receiver for non-static call
   777   if (bc != Bytecodes::_invokestatic) {
   778     // This register map must be update since we need to find the receiver for
   779     // compiled frames. The receiver might be in a register.
   780     RegisterMap reg_map2(thread);
   781     frame stubFrame   = thread->last_frame();
   782     // Caller-frame is a compiled frame
   783     frame callerFrame = stubFrame.sender(&reg_map2);
   785     methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
   786     if (callee.is_null()) {
   787       THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
   788     }
   789     // Retrieve from a compiled argument list
   790     receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
   792     if (receiver.is_null()) {
   793       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
   794     }
   795   }
   797   // Resolve method. This is parameterized by bytecode.
   798   constantPoolHandle constants (THREAD, caller->constants());
   799   assert (receiver.is_null() || receiver->is_oop(), "wrong receiver");
   800   LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
   802 #ifdef ASSERT
   803   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
   804   if (bc != Bytecodes::_invokestatic) {
   805     assert(receiver.not_null(), "should have thrown exception");
   806     KlassHandle receiver_klass (THREAD, receiver->klass());
   807     klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
   808                             // klass is already loaded
   809     KlassHandle static_receiver_klass (THREAD, rk);
   810     assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass");
   811     if (receiver_klass->oop_is_instance()) {
   812       if (instanceKlass::cast(receiver_klass())->is_not_initialized()) {
   813         tty->print_cr("ERROR: Klass not yet initialized!!");
   814         receiver_klass.print();
   815       }
   816       assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
   817     }
   818   }
   819 #endif
   821   return receiver;
   822 }
   824 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
   825   ResourceMark rm(THREAD);
   826   // We need first to check if any Java activations (compiled, interpreted)
   827   // exist on the stack since last JavaCall.  If not, we need
   828   // to get the target method from the JavaCall wrapper.
   829   vframeStream vfst(thread, true);  // Do not skip any javaCalls
   830   methodHandle callee_method;
   831   if (vfst.at_end()) {
   832     // No Java frames were found on stack since we did the JavaCall.
   833     // Hence the stack can only contain an entry_frame.  We need to
   834     // find the target method from the stub frame.
   835     RegisterMap reg_map(thread, false);
   836     frame fr = thread->last_frame();
   837     assert(fr.is_runtime_frame(), "must be a runtimeStub");
   838     fr = fr.sender(&reg_map);
   839     assert(fr.is_entry_frame(), "must be");
   840     // fr is now pointing to the entry frame.
   841     callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
   842     assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
   843   } else {
   844     Bytecodes::Code bc;
   845     CallInfo callinfo;
   846     find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
   847     callee_method = callinfo.selected_method();
   848   }
   849   assert(callee_method()->is_method(), "must be");
   850   return callee_method;
   851 }
   853 // Resolves a call.
   854 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
   855                                            bool is_virtual,
   856                                            bool is_optimized, TRAPS) {
   857   methodHandle callee_method;
   858   callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
   859   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
   860     int retry_count = 0;
   861     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
   862            callee_method->method_holder() != SystemDictionary::object_klass()) {
   863       // If has a pending exception then there is no need to re-try to
   864       // resolve this method.
   865       // If the method has been redefined, we need to try again.
   866       // Hack: we have no way to update the vtables of arrays, so don't
   867       // require that java.lang.Object has been updated.
   869       // It is very unlikely that method is redefined more than 100 times
   870       // in the middle of resolve. If it is looping here more than 100 times
   871       // means then there could be a bug here.
   872       guarantee((retry_count++ < 100),
   873                 "Could not resolve to latest version of redefined method");
   874       // method is redefined in the middle of resolve so re-try.
   875       callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
   876     }
   877   }
   878   return callee_method;
   879 }
   881 // Resolves a call.  The compilers generate code for calls that go here
   882 // and are patched with the real destination of the call.
   883 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
   884                                            bool is_virtual,
   885                                            bool is_optimized, TRAPS) {
   887   ResourceMark rm(thread);
   888   RegisterMap cbl_map(thread, false);
   889   frame caller_frame = thread->last_frame().sender(&cbl_map);
   891   CodeBlob* cb = caller_frame.cb();
   892   guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod");
   893   // make sure caller is not getting deoptimized
   894   // and removed before we are done with it.
   895   // CLEANUP - with lazy deopt shouldn't need this lock
   896   nmethodLocker caller_lock((nmethod*)cb);
   899   // determine call info & receiver
   900   // note: a) receiver is NULL for static calls
   901   //       b) an exception is thrown if receiver is NULL for non-static calls
   902   CallInfo call_info;
   903   Bytecodes::Code invoke_code = Bytecodes::_illegal;
   904   Handle receiver = find_callee_info(thread, invoke_code,
   905                                      call_info, CHECK_(methodHandle()));
   906   methodHandle callee_method = call_info.selected_method();
   908   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
   909          ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
   911 #ifndef PRODUCT
   912   // tracing/debugging/statistics
   913   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
   914                 (is_virtual) ? (&_resolve_virtual_ctr) :
   915                                (&_resolve_static_ctr);
   916   Atomic::inc(addr);
   918   if (TraceCallFixup) {
   919     ResourceMark rm(thread);
   920     tty->print("resolving %s%s (%s) call to",
   921       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
   922       Bytecodes::name(invoke_code));
   923     callee_method->print_short_name(tty);
   924     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
   925   }
   926 #endif
   928   // Compute entry points. This might require generation of C2I converter
   929   // frames, so we cannot be holding any locks here. Furthermore, the
   930   // computation of the entry points is independent of patching the call.  We
   931   // always return the entry-point, but we only patch the stub if the call has
   932   // not been deoptimized.  Return values: For a virtual call this is an
   933   // (cached_oop, destination address) pair. For a static call/optimized
   934   // virtual this is just a destination address.
   936   StaticCallInfo static_call_info;
   937   CompiledICInfo virtual_call_info;
   940   // Make sure the callee nmethod does not get deoptimized and removed before
   941   // we are done patching the code.
   942   nmethod* nm = callee_method->code();
   943   nmethodLocker nl_callee(nm);
   944 #ifdef ASSERT
   945   address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below
   946 #endif
   948   if (is_virtual) {
   949     assert(receiver.not_null(), "sanity check");
   950     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
   951     KlassHandle h_klass(THREAD, receiver->klass());
   952     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
   953                      is_optimized, static_bound, virtual_call_info,
   954                      CHECK_(methodHandle()));
   955   } else {
   956     // static call
   957     CompiledStaticCall::compute_entry(callee_method, static_call_info);
   958   }
   960   // grab lock, check for deoptimization and potentially patch caller
   961   {
   962     MutexLocker ml_patch(CompiledIC_lock);
   964     // Now that we are ready to patch if the methodOop was redefined then
   965     // don't update call site and let the caller retry.
   967     if (!callee_method->is_old()) {
   968 #ifdef ASSERT
   969       // We must not try to patch to jump to an already unloaded method.
   970       if (dest_entry_point != 0) {
   971         assert(CodeCache::find_blob(dest_entry_point) != NULL,
   972                "should not unload nmethod while locked");
   973       }
   974 #endif
   975       if (is_virtual) {
   976         CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
   977         if (inline_cache->is_clean()) {
   978           inline_cache->set_to_monomorphic(virtual_call_info);
   979         }
   980       } else {
   981         CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
   982         if (ssc->is_clean()) ssc->set(static_call_info);
   983       }
   984     }
   986   } // unlock CompiledIC_lock
   988   return callee_method;
   989 }
   992 // Inline caches exist only in compiled code
   993 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
   994 #ifdef ASSERT
   995   RegisterMap reg_map(thread, false);
   996   frame stub_frame = thread->last_frame();
   997   assert(stub_frame.is_runtime_frame(), "sanity check");
   998   frame caller_frame = stub_frame.sender(&reg_map);
   999   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
  1000 #endif /* ASSERT */
  1002   methodHandle callee_method;
  1003   JRT_BLOCK
  1004     callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
  1005     // Return methodOop through TLS
  1006     thread->set_vm_result(callee_method());
  1007   JRT_BLOCK_END
  1008   // return compiled code entry point after potential safepoints
  1009   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1010   return callee_method->verified_code_entry();
  1011 JRT_END
  1014 // Handle call site that has been made non-entrant
  1015 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
  1016   // 6243940 We might end up in here if the callee is deoptimized
  1017   // as we race to call it.  We don't want to take a safepoint if
  1018   // the caller was interpreted because the caller frame will look
  1019   // interpreted to the stack walkers and arguments are now
  1020   // "compiled" so it is much better to make this transition
  1021   // invisible to the stack walking code. The i2c path will
  1022   // place the callee method in the callee_target. It is stashed
  1023   // there because if we try and find the callee by normal means a
  1024   // safepoint is possible and have trouble gc'ing the compiled args.
  1025   RegisterMap reg_map(thread, false);
  1026   frame stub_frame = thread->last_frame();
  1027   assert(stub_frame.is_runtime_frame(), "sanity check");
  1028   frame caller_frame = stub_frame.sender(&reg_map);
  1029   if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) {
  1030     methodOop callee = thread->callee_target();
  1031     guarantee(callee != NULL && callee->is_method(), "bad handshake");
  1032     thread->set_vm_result(callee);
  1033     thread->set_callee_target(NULL);
  1034     return callee->get_c2i_entry();
  1037   // Must be compiled to compiled path which is safe to stackwalk
  1038   methodHandle callee_method;
  1039   JRT_BLOCK
  1040     // Force resolving of caller (if we called from compiled frame)
  1041     callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
  1042     thread->set_vm_result(callee_method());
  1043   JRT_BLOCK_END
  1044   // return compiled code entry point after potential safepoints
  1045   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1046   return callee_method->verified_code_entry();
  1047 JRT_END
  1050 // resolve a static call and patch code
  1051 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
  1052   methodHandle callee_method;
  1053   JRT_BLOCK
  1054     callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
  1055     thread->set_vm_result(callee_method());
  1056   JRT_BLOCK_END
  1057   // return compiled code entry point after potential safepoints
  1058   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1059   return callee_method->verified_code_entry();
  1060 JRT_END
  1063 // resolve virtual call and update inline cache to monomorphic
  1064 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
  1065   methodHandle callee_method;
  1066   JRT_BLOCK
  1067     callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
  1068     thread->set_vm_result(callee_method());
  1069   JRT_BLOCK_END
  1070   // return compiled code entry point after potential safepoints
  1071   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1072   return callee_method->verified_code_entry();
  1073 JRT_END
  1076 // Resolve a virtual call that can be statically bound (e.g., always
  1077 // monomorphic, so it has no inline cache).  Patch code to resolved target.
  1078 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
  1079   methodHandle callee_method;
  1080   JRT_BLOCK
  1081     callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
  1082     thread->set_vm_result(callee_method());
  1083   JRT_BLOCK_END
  1084   // return compiled code entry point after potential safepoints
  1085   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1086   return callee_method->verified_code_entry();
  1087 JRT_END
  1093 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
  1094   ResourceMark rm(thread);
  1095   CallInfo call_info;
  1096   Bytecodes::Code bc;
  1098   // receiver is NULL for static calls. An exception is thrown for NULL
  1099   // receivers for non-static calls
  1100   Handle receiver = find_callee_info(thread, bc, call_info,
  1101                                      CHECK_(methodHandle()));
  1102   // Compiler1 can produce virtual call sites that can actually be statically bound
  1103   // If we fell thru to below we would think that the site was going megamorphic
  1104   // when in fact the site can never miss. Worse because we'd think it was megamorphic
  1105   // we'd try and do a vtable dispatch however methods that can be statically bound
  1106   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
  1107   // reresolution of the  call site (as if we did a handle_wrong_method and not an
  1108   // plain ic_miss) and the site will be converted to an optimized virtual call site
  1109   // never to miss again. I don't believe C2 will produce code like this but if it
  1110   // did this would still be the correct thing to do for it too, hence no ifdef.
  1111   //
  1112   if (call_info.resolved_method()->can_be_statically_bound()) {
  1113     methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
  1114     if (TraceCallFixup) {
  1115       RegisterMap reg_map(thread, false);
  1116       frame caller_frame = thread->last_frame().sender(&reg_map);
  1117       ResourceMark rm(thread);
  1118       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
  1119       callee_method->print_short_name(tty);
  1120       tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc());
  1121       tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1123     return callee_method;
  1126   methodHandle callee_method = call_info.selected_method();
  1128   bool should_be_mono = false;
  1130 #ifndef PRODUCT
  1131   Atomic::inc(&_ic_miss_ctr);
  1133   // Statistics & Tracing
  1134   if (TraceCallFixup) {
  1135     ResourceMark rm(thread);
  1136     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
  1137     callee_method->print_short_name(tty);
  1138     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1141   if (ICMissHistogram) {
  1142     MutexLocker m(VMStatistic_lock);
  1143     RegisterMap reg_map(thread, false);
  1144     frame f = thread->last_frame().real_sender(&reg_map);// skip runtime stub
  1145     // produce statistics under the lock
  1146     trace_ic_miss(f.pc());
  1148 #endif
  1150   // install an event collector so that when a vtable stub is created the
  1151   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
  1152   // event can't be posted when the stub is created as locks are held
  1153   // - instead the event will be deferred until the event collector goes
  1154   // out of scope.
  1155   JvmtiDynamicCodeEventCollector event_collector;
  1157   // Update inline cache to megamorphic. Skip update if caller has been
  1158   // made non-entrant or we are called from interpreted.
  1159   { MutexLocker ml_patch (CompiledIC_lock);
  1160     RegisterMap reg_map(thread, false);
  1161     frame caller_frame = thread->last_frame().sender(&reg_map);
  1162     CodeBlob* cb = caller_frame.cb();
  1163     if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
  1164       // Not a non-entrant nmethod, so find inline_cache
  1165       CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
  1166       bool should_be_mono = false;
  1167       if (inline_cache->is_optimized()) {
  1168         if (TraceCallFixup) {
  1169           ResourceMark rm(thread);
  1170           tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
  1171           callee_method->print_short_name(tty);
  1172           tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1174         should_be_mono = true;
  1175       } else {
  1176         compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop();
  1177         if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) {
  1179           if (receiver()->klass() == ic_oop->holder_klass()) {
  1180             // This isn't a real miss. We must have seen that compiled code
  1181             // is now available and we want the call site converted to a
  1182             // monomorphic compiled call site.
  1183             // We can't assert for callee_method->code() != NULL because it
  1184             // could have been deoptimized in the meantime
  1185             if (TraceCallFixup) {
  1186               ResourceMark rm(thread);
  1187               tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
  1188               callee_method->print_short_name(tty);
  1189               tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1191             should_be_mono = true;
  1196       if (should_be_mono) {
  1198         // We have a path that was monomorphic but was going interpreted
  1199         // and now we have (or had) a compiled entry. We correct the IC
  1200         // by using a new icBuffer.
  1201         CompiledICInfo info;
  1202         KlassHandle receiver_klass(THREAD, receiver()->klass());
  1203         inline_cache->compute_monomorphic_entry(callee_method,
  1204                                                 receiver_klass,
  1205                                                 inline_cache->is_optimized(),
  1206                                                 false,
  1207                                                 info, CHECK_(methodHandle()));
  1208         inline_cache->set_to_monomorphic(info);
  1209       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
  1210         // Change to megamorphic
  1211         inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
  1212       } else {
  1213         // Either clean or megamorphic
  1216   } // Release CompiledIC_lock
  1218   return callee_method;
  1221 //
  1222 // Resets a call-site in compiled code so it will get resolved again.
  1223 // This routines handles both virtual call sites, optimized virtual call
  1224 // sites, and static call sites. Typically used to change a call sites
  1225 // destination from compiled to interpreted.
  1226 //
  1227 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
  1228   ResourceMark rm(thread);
  1229   RegisterMap reg_map(thread, false);
  1230   frame stub_frame = thread->last_frame();
  1231   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
  1232   frame caller = stub_frame.sender(&reg_map);
  1234   // Do nothing if the frame isn't a live compiled frame.
  1235   // nmethod could be deoptimized by the time we get here
  1236   // so no update to the caller is needed.
  1238   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
  1240     address pc = caller.pc();
  1241     Events::log("update call-site at pc " INTPTR_FORMAT, pc);
  1243     // Default call_addr is the location of the "basic" call.
  1244     // Determine the address of the call we a reresolving. With
  1245     // Inline Caches we will always find a recognizable call.
  1246     // With Inline Caches disabled we may or may not find a
  1247     // recognizable call. We will always find a call for static
  1248     // calls and for optimized virtual calls. For vanilla virtual
  1249     // calls it depends on the state of the UseInlineCaches switch.
  1250     //
  1251     // With Inline Caches disabled we can get here for a virtual call
  1252     // for two reasons:
  1253     //   1 - calling an abstract method. The vtable for abstract methods
  1254     //       will run us thru handle_wrong_method and we will eventually
  1255     //       end up in the interpreter to throw the ame.
  1256     //   2 - a racing deoptimization. We could be doing a vanilla vtable
  1257     //       call and between the time we fetch the entry address and
  1258     //       we jump to it the target gets deoptimized. Similar to 1
  1259     //       we will wind up in the interprter (thru a c2i with c2).
  1260     //
  1261     address call_addr = NULL;
  1263       // Get call instruction under lock because another thread may be
  1264       // busy patching it.
  1265       MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
  1266       // Location of call instruction
  1267       if (NativeCall::is_call_before(pc)) {
  1268         NativeCall *ncall = nativeCall_before(pc);
  1269         call_addr = ncall->instruction_address();
  1273     // Check for static or virtual call
  1274     bool is_static_call = false;
  1275     nmethod* caller_nm = CodeCache::find_nmethod(pc);
  1276     // Make sure nmethod doesn't get deoptimized and removed until
  1277     // this is done with it.
  1278     // CLEANUP - with lazy deopt shouldn't need this lock
  1279     nmethodLocker nmlock(caller_nm);
  1281     if (call_addr != NULL) {
  1282       RelocIterator iter(caller_nm, call_addr, call_addr+1);
  1283       int ret = iter.next(); // Get item
  1284       if (ret) {
  1285         assert(iter.addr() == call_addr, "must find call");
  1286         if (iter.type() == relocInfo::static_call_type) {
  1287           is_static_call = true;
  1288         } else {
  1289           assert(iter.type() == relocInfo::virtual_call_type ||
  1290                  iter.type() == relocInfo::opt_virtual_call_type
  1291                 , "unexpected relocInfo. type");
  1293       } else {
  1294         assert(!UseInlineCaches, "relocation info. must exist for this address");
  1297       // Cleaning the inline cache will force a new resolve. This is more robust
  1298       // than directly setting it to the new destination, since resolving of calls
  1299       // is always done through the same code path. (experience shows that it
  1300       // leads to very hard to track down bugs, if an inline cache gets updated
  1301       // to a wrong method). It should not be performance critical, since the
  1302       // resolve is only done once.
  1304       MutexLocker ml(CompiledIC_lock);
  1305       //
  1306       // We do not patch the call site if the nmethod has been made non-entrant
  1307       // as it is a waste of time
  1308       //
  1309       if (caller_nm->is_in_use()) {
  1310         if (is_static_call) {
  1311           CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
  1312           ssc->set_to_clean();
  1313         } else {
  1314           // compiled, dispatched call (which used to call an interpreted method)
  1315           CompiledIC* inline_cache = CompiledIC_at(call_addr);
  1316           inline_cache->set_to_clean();
  1323   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
  1326 #ifndef PRODUCT
  1327   Atomic::inc(&_wrong_method_ctr);
  1329   if (TraceCallFixup) {
  1330     ResourceMark rm(thread);
  1331     tty->print("handle_wrong_method reresolving call to");
  1332     callee_method->print_short_name(tty);
  1333     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1335 #endif
  1337   return callee_method;
  1340 // ---------------------------------------------------------------------------
  1341 // We are calling the interpreter via a c2i. Normally this would mean that
  1342 // we were called by a compiled method. However we could have lost a race
  1343 // where we went int -> i2c -> c2i and so the caller could in fact be
  1344 // interpreted. If the caller is compiled we attampt to patch the caller
  1345 // so he no longer calls into the interpreter.
  1346 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
  1347   methodOop moop(method);
  1349   address entry_point = moop->from_compiled_entry();
  1351   // It's possible that deoptimization can occur at a call site which hasn't
  1352   // been resolved yet, in which case this function will be called from
  1353   // an nmethod that has been patched for deopt and we can ignore the
  1354   // request for a fixup.
  1355   // Also it is possible that we lost a race in that from_compiled_entry
  1356   // is now back to the i2c in that case we don't need to patch and if
  1357   // we did we'd leap into space because the callsite needs to use
  1358   // "to interpreter" stub in order to load up the methodOop. Don't
  1359   // ask me how I know this...
  1360   //
  1362   CodeBlob* cb = CodeCache::find_blob(caller_pc);
  1363   if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
  1364     return;
  1367   // There is a benign race here. We could be attempting to patch to a compiled
  1368   // entry point at the same time the callee is being deoptimized. If that is
  1369   // the case then entry_point may in fact point to a c2i and we'd patch the
  1370   // call site with the same old data. clear_code will set code() to NULL
  1371   // at the end of it. If we happen to see that NULL then we can skip trying
  1372   // to patch. If we hit the window where the callee has a c2i in the
  1373   // from_compiled_entry and the NULL isn't present yet then we lose the race
  1374   // and patch the code with the same old data. Asi es la vida.
  1376   if (moop->code() == NULL) return;
  1378   if (((nmethod*)cb)->is_in_use()) {
  1380     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
  1381     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
  1382     if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
  1383       NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
  1384       //
  1385       // bug 6281185. We might get here after resolving a call site to a vanilla
  1386       // virtual call. Because the resolvee uses the verified entry it may then
  1387       // see compiled code and attempt to patch the site by calling us. This would
  1388       // then incorrectly convert the call site to optimized and its downhill from
  1389       // there. If you're lucky you'll get the assert in the bugid, if not you've
  1390       // just made a call site that could be megamorphic into a monomorphic site
  1391       // for the rest of its life! Just another racing bug in the life of
  1392       // fixup_callers_callsite ...
  1393       //
  1394       RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address());
  1395       iter.next();
  1396       assert(iter.has_current(), "must have a reloc at java call site");
  1397       relocInfo::relocType typ = iter.reloc()->type();
  1398       if ( typ != relocInfo::static_call_type &&
  1399            typ != relocInfo::opt_virtual_call_type &&
  1400            typ != relocInfo::static_stub_type) {
  1401         return;
  1403       address destination = call->destination();
  1404       if (destination != entry_point) {
  1405         CodeBlob* callee = CodeCache::find_blob(destination);
  1406         // callee == cb seems weird. It means calling interpreter thru stub.
  1407         if (callee == cb || callee->is_adapter_blob()) {
  1408           // static call or optimized virtual
  1409           if (TraceCallFixup) {
  1410             tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
  1411             moop->print_short_name(tty);
  1412             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
  1414           call->set_destination_mt_safe(entry_point);
  1415         } else {
  1416           if (TraceCallFixup) {
  1417             tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
  1418             moop->print_short_name(tty);
  1419             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
  1421           // assert is too strong could also be resolve destinations.
  1422           // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
  1424       } else {
  1425           if (TraceCallFixup) {
  1426             tty->print("already patched  callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
  1427             moop->print_short_name(tty);
  1428             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
  1434 IRT_END
  1437 // same as JVM_Arraycopy, but called directly from compiled code
  1438 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
  1439                                                 oopDesc* dest, jint dest_pos,
  1440                                                 jint length,
  1441                                                 JavaThread* thread)) {
  1442 #ifndef PRODUCT
  1443   _slow_array_copy_ctr++;
  1444 #endif
  1445   // Check if we have null pointers
  1446   if (src == NULL || dest == NULL) {
  1447     THROW(vmSymbols::java_lang_NullPointerException());
  1449   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
  1450   // even though the copy_array API also performs dynamic checks to ensure
  1451   // that src and dest are truly arrays (and are conformable).
  1452   // The copy_array mechanism is awkward and could be removed, but
  1453   // the compilers don't call this function except as a last resort,
  1454   // so it probably doesn't matter.
  1455   Klass::cast(src->klass())->copy_array((arrayOopDesc*)src,  src_pos,
  1456                                         (arrayOopDesc*)dest, dest_pos,
  1457                                         length, thread);
  1459 JRT_END
  1461 char* SharedRuntime::generate_class_cast_message(
  1462     JavaThread* thread, const char* objName) {
  1464   // Get target class name from the checkcast instruction
  1465   vframeStream vfst(thread, true);
  1466   assert(!vfst.at_end(), "Java frame must exist");
  1467   Bytecode_checkcast* cc = Bytecode_checkcast_at(
  1468     vfst.method()->bcp_from(vfst.bci()));
  1469   Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
  1470     cc->index(), thread));
  1471   return generate_class_cast_message(objName, targetKlass->external_name());
  1474 char* SharedRuntime::generate_class_cast_message(
  1475     const char* objName, const char* targetKlassName) {
  1476   const char* desc = " cannot be cast to ";
  1477   size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1;
  1479   char* message = NEW_RESOURCE_ARRAY(char, msglen);
  1480   if (NULL == message) {
  1481     // Shouldn't happen, but don't cause even more problems if it does
  1482     message = const_cast<char*>(objName);
  1483   } else {
  1484     jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
  1486   return message;
  1489 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
  1490   (void) JavaThread::current()->reguard_stack();
  1491 JRT_END
  1494 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
  1495 #ifndef PRODUCT
  1496 int SharedRuntime::_monitor_enter_ctr=0;
  1497 #endif
  1498 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
  1499   oop obj(_obj);
  1500 #ifndef PRODUCT
  1501   _monitor_enter_ctr++;             // monitor enter slow
  1502 #endif
  1503   if (PrintBiasedLockingStatistics) {
  1504     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
  1506   Handle h_obj(THREAD, obj);
  1507   if (UseBiasedLocking) {
  1508     // Retry fast entry if bias is revoked to avoid unnecessary inflation
  1509     ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
  1510   } else {
  1511     ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
  1513   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
  1514 JRT_END
  1516 #ifndef PRODUCT
  1517 int SharedRuntime::_monitor_exit_ctr=0;
  1518 #endif
  1519 // Handles the uncommon cases of monitor unlocking in compiled code
  1520 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
  1521    oop obj(_obj);
  1522 #ifndef PRODUCT
  1523   _monitor_exit_ctr++;              // monitor exit slow
  1524 #endif
  1525   Thread* THREAD = JavaThread::current();
  1526   // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
  1527   // testing was unable to ever fire the assert that guarded it so I have removed it.
  1528   assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
  1529 #undef MIGHT_HAVE_PENDING
  1530 #ifdef MIGHT_HAVE_PENDING
  1531   // Save and restore any pending_exception around the exception mark.
  1532   // While the slow_exit must not throw an exception, we could come into
  1533   // this routine with one set.
  1534   oop pending_excep = NULL;
  1535   const char* pending_file;
  1536   int pending_line;
  1537   if (HAS_PENDING_EXCEPTION) {
  1538     pending_excep = PENDING_EXCEPTION;
  1539     pending_file  = THREAD->exception_file();
  1540     pending_line  = THREAD->exception_line();
  1541     CLEAR_PENDING_EXCEPTION;
  1543 #endif /* MIGHT_HAVE_PENDING */
  1546     // Exit must be non-blocking, and therefore no exceptions can be thrown.
  1547     EXCEPTION_MARK;
  1548     ObjectSynchronizer::slow_exit(obj, lock, THREAD);
  1551 #ifdef MIGHT_HAVE_PENDING
  1552   if (pending_excep != NULL) {
  1553     THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
  1555 #endif /* MIGHT_HAVE_PENDING */
  1556 JRT_END
  1558 #ifndef PRODUCT
  1560 void SharedRuntime::print_statistics() {
  1561   ttyLocker ttyl;
  1562   if (xtty != NULL)  xtty->head("statistics type='SharedRuntime'");
  1564   if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow",  _monitor_enter_ctr);
  1565   if (_monitor_exit_ctr  ) tty->print_cr("%5d monitor exit slow",   _monitor_exit_ctr);
  1566   if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
  1568   SharedRuntime::print_ic_miss_histogram();
  1570   if (CountRemovableExceptions) {
  1571     if (_nof_removable_exceptions > 0) {
  1572       Unimplemented(); // this counter is not yet incremented
  1573       tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
  1577   // Dump the JRT_ENTRY counters
  1578   if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
  1579   if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
  1580   if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
  1581   if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
  1582   if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
  1583   if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
  1584   if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
  1586   tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
  1587   tty->print_cr("%5d wrong method", _wrong_method_ctr );
  1588   tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
  1589   tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
  1590   tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
  1592   if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
  1593   if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
  1594   if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
  1595   if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
  1596   if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
  1597   if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
  1598   if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
  1599   if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
  1600   if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
  1601   if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
  1602   if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
  1603   if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
  1604   if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
  1605   if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
  1606   if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
  1607   if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
  1609   if (xtty != NULL)  xtty->tail("statistics");
  1612 inline double percent(int x, int y) {
  1613   return 100.0 * x / MAX2(y, 1);
  1616 class MethodArityHistogram {
  1617  public:
  1618   enum { MAX_ARITY = 256 };
  1619  private:
  1620   static int _arity_histogram[MAX_ARITY];     // histogram of #args
  1621   static int _size_histogram[MAX_ARITY];      // histogram of arg size in words
  1622   static int _max_arity;                      // max. arity seen
  1623   static int _max_size;                       // max. arg size seen
  1625   static void add_method_to_histogram(nmethod* nm) {
  1626     methodOop m = nm->method();
  1627     ArgumentCount args(m->signature());
  1628     int arity   = args.size() + (m->is_static() ? 0 : 1);
  1629     int argsize = m->size_of_parameters();
  1630     arity   = MIN2(arity, MAX_ARITY-1);
  1631     argsize = MIN2(argsize, MAX_ARITY-1);
  1632     int count = nm->method()->compiled_invocation_count();
  1633     _arity_histogram[arity]  += count;
  1634     _size_histogram[argsize] += count;
  1635     _max_arity = MAX2(_max_arity, arity);
  1636     _max_size  = MAX2(_max_size, argsize);
  1639   void print_histogram_helper(int n, int* histo, const char* name) {
  1640     const int N = MIN2(5, n);
  1641     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
  1642     double sum = 0;
  1643     double weighted_sum = 0;
  1644     int i;
  1645     for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
  1646     double rest = sum;
  1647     double percent = sum / 100;
  1648     for (i = 0; i <= N; i++) {
  1649       rest -= histo[i];
  1650       tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
  1652     tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
  1653     tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
  1656   void print_histogram() {
  1657     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
  1658     print_histogram_helper(_max_arity, _arity_histogram, "arity");
  1659     tty->print_cr("\nSame for parameter size (in words):");
  1660     print_histogram_helper(_max_size, _size_histogram, "size");
  1661     tty->cr();
  1664  public:
  1665   MethodArityHistogram() {
  1666     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  1667     _max_arity = _max_size = 0;
  1668     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
  1669     CodeCache::nmethods_do(add_method_to_histogram);
  1670     print_histogram();
  1672 };
  1674 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
  1675 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
  1676 int MethodArityHistogram::_max_arity;
  1677 int MethodArityHistogram::_max_size;
  1679 void SharedRuntime::print_call_statistics(int comp_total) {
  1680   tty->print_cr("Calls from compiled code:");
  1681   int total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
  1682   int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
  1683   int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
  1684   tty->print_cr("\t%9d   (%4.1f%%) total non-inlined   ", total, percent(total, total));
  1685   tty->print_cr("\t%9d   (%4.1f%%) virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
  1686   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
  1687   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
  1688   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_c, percent(mono_c, _nof_normal_calls));
  1689   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
  1690   tty->print_cr("\t%9d   (%4.1f%%) interface calls     ", _nof_interface_calls, percent(_nof_interface_calls, total));
  1691   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
  1692   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
  1693   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_i, percent(mono_i, _nof_interface_calls));
  1694   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
  1695   tty->print_cr("\t%9d   (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
  1696   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
  1697   tty->cr();
  1698   tty->print_cr("Note 1: counter updates are not MT-safe.");
  1699   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
  1700   tty->print_cr("        %% in nested categories are relative to their category");
  1701   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
  1702   tty->cr();
  1704   MethodArityHistogram h;
  1706 #endif
  1709 // ---------------------------------------------------------------------------
  1710 // Implementation of AdapterHandlerLibrary
  1711 const char* AdapterHandlerEntry::name = "I2C/C2I adapters";
  1712 GrowableArray<uint64_t>* AdapterHandlerLibrary::_fingerprints = NULL;
  1713 GrowableArray<AdapterHandlerEntry* >* AdapterHandlerLibrary::_handlers = NULL;
  1714 const int AdapterHandlerLibrary_size = 16*K;
  1715 u_char                   AdapterHandlerLibrary::_buffer[AdapterHandlerLibrary_size + 32];
  1717 void AdapterHandlerLibrary::initialize() {
  1718   if (_fingerprints != NULL) return;
  1719   _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true);
  1720   _handlers = new(ResourceObj::C_HEAP)GrowableArray<AdapterHandlerEntry*>(32, true);
  1721   // Index 0 reserved for the slow path handler
  1722   _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
  1723   _handlers->append(NULL);
  1725   // Create a special handler for abstract methods.  Abstract methods
  1726   // are never compiled so an i2c entry is somewhat meaningless, but
  1727   // fill it in with something appropriate just in case.  Pass handle
  1728   // wrong method for the c2i transitions.
  1729   address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
  1730   _fingerprints->append(0/*the never-allowed 0 fingerprint*/);
  1731   assert(_handlers->length() == AbstractMethodHandler, "in wrong slot");
  1732   _handlers->append(new AdapterHandlerEntry(StubRoutines::throw_AbstractMethodError_entry(),
  1733                                             wrong_method, wrong_method));
  1736 int AdapterHandlerLibrary::get_create_adapter_index(methodHandle method) {
  1737   // Use customized signature handler.  Need to lock around updates to the
  1738   // _fingerprints array (it is not safe for concurrent readers and a single
  1739   // writer: this can be fixed if it becomes a problem).
  1741   // Get the address of the ic_miss handlers before we grab the
  1742   // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
  1743   // was caused by the initialization of the stubs happening
  1744   // while we held the lock and then notifying jvmti while
  1745   // holding it. This just forces the initialization to be a little
  1746   // earlier.
  1747   address ic_miss = SharedRuntime::get_ic_miss_stub();
  1748   assert(ic_miss != NULL, "must have handler");
  1750   int result;
  1751   BufferBlob *B = NULL;
  1752   uint64_t fingerprint;
  1754     MutexLocker mu(AdapterHandlerLibrary_lock);
  1755     // make sure data structure is initialized
  1756     initialize();
  1758     if (method->is_abstract()) {
  1759       return AbstractMethodHandler;
  1762     // Lookup method signature's fingerprint
  1763     fingerprint = Fingerprinter(method).fingerprint();
  1764     assert( fingerprint != CONST64( 0), "no zero fingerprints allowed" );
  1765     // Fingerprints are small fixed-size condensed representations of
  1766     // signatures.  If the signature is too large, it won't fit in a
  1767     // fingerprint.  Signatures which cannot support a fingerprint get a new i2c
  1768     // adapter gen'd each time, instead of searching the cache for one.  This -1
  1769     // game can be avoided if I compared signatures instead of using
  1770     // fingerprints.  However, -1 fingerprints are very rare.
  1771     if( fingerprint != UCONST64(-1) ) { // If this is a cache-able fingerprint
  1772       // Turns out i2c adapters do not care what the return value is.  Mask it
  1773       // out so signatures that only differ in return type will share the same
  1774       // adapter.
  1775       fingerprint &= ~(SignatureIterator::result_feature_mask << SignatureIterator::static_feature_size);
  1776       // Search for a prior existing i2c/c2i adapter
  1777       int index = _fingerprints->find(fingerprint);
  1778       if( index >= 0 ) return index; // Found existing handlers?
  1779     } else {
  1780       // Annoyingly, I end up adding -1 fingerprints to the array of handlers,
  1781       // because I need a unique handler index.  It cannot be scanned for
  1782       // because all -1's look alike.  Instead, the matching index is passed out
  1783       // and immediately used to collect the 2 return values (the c2i and i2c
  1784       // adapters).
  1787     // Create I2C & C2I handlers
  1788     ResourceMark rm;
  1789     // Improve alignment slightly
  1790     u_char *buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
  1791     CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
  1792     short buffer_locs[20];
  1793     buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
  1794                                            sizeof(buffer_locs)/sizeof(relocInfo));
  1795     MacroAssembler _masm(&buffer);
  1797     // Fill in the signature array, for the calling-convention call.
  1798     int total_args_passed = method->size_of_parameters(); // All args on stack
  1800     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
  1801     VMRegPair  * regs   = NEW_RESOURCE_ARRAY(VMRegPair  ,total_args_passed);
  1802     int i=0;
  1803     if( !method->is_static() )  // Pass in receiver first
  1804       sig_bt[i++] = T_OBJECT;
  1805     for( SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
  1806       sig_bt[i++] = ss.type();  // Collect remaining bits of signature
  1807       if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
  1808         sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
  1810     assert( i==total_args_passed, "" );
  1812     // Now get the re-packed compiled-Java layout.
  1813     int comp_args_on_stack;
  1815     // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
  1816     comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
  1818     AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
  1819                                                                         total_args_passed,
  1820                                                                         comp_args_on_stack,
  1821                                                                         sig_bt,
  1822                                                                         regs);
  1824     B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
  1825     if (B == NULL) {
  1826       // CodeCache is full, disable compilation
  1827       // Ought to log this but compile log is only per compile thread
  1828       // and we're some non descript Java thread.
  1829       UseInterpreter = true;
  1830       if (UseCompiler || AlwaysCompileLoopMethods ) {
  1831 #ifndef PRODUCT
  1832         warning("CodeCache is full. Compiler has been disabled");
  1833         if (CompileTheWorld || ExitOnFullCodeCache) {
  1834           before_exit(JavaThread::current());
  1835           exit_globals(); // will delete tty
  1836           vm_direct_exit(CompileTheWorld ? 0 : 1);
  1838 #endif
  1839         UseCompiler               = false;
  1840         AlwaysCompileLoopMethods  = false;
  1842       return 0; // Out of CodeCache space (_handlers[0] == NULL)
  1844     entry->relocate(B->instructions_begin());
  1845 #ifndef PRODUCT
  1846     // debugging suppport
  1847     if (PrintAdapterHandlers) {
  1848       tty->cr();
  1849       tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = 0x%llx, %d bytes generated)",
  1850                     _handlers->length(), (method->is_static() ? "static" : "receiver"),
  1851                     method->signature()->as_C_string(), fingerprint, buffer.code_size() );
  1852       tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
  1853       Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + buffer.code_size());
  1855 #endif
  1857     // add handlers to library
  1858     _fingerprints->append(fingerprint);
  1859     _handlers->append(entry);
  1860     // set handler index
  1861     assert(_fingerprints->length() == _handlers->length(), "sanity check");
  1862     result = _fingerprints->length() - 1;
  1864   // Outside of the lock
  1865   if (B != NULL) {
  1866     char blob_id[256];
  1867     jio_snprintf(blob_id,
  1868                  sizeof(blob_id),
  1869                  "%s(" PTR64_FORMAT ")@" PTR_FORMAT,
  1870                  AdapterHandlerEntry::name,
  1871                  fingerprint,
  1872                  B->instructions_begin());
  1873     VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
  1874     Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end());
  1876     if (JvmtiExport::should_post_dynamic_code_generated()) {
  1877       JvmtiExport::post_dynamic_code_generated(blob_id,
  1878                                                B->instructions_begin(),
  1879                                                B->instructions_end());
  1882   return result;
  1885 void AdapterHandlerEntry::relocate(address new_base) {
  1886     ptrdiff_t delta = new_base - _i2c_entry;
  1887     _i2c_entry += delta;
  1888     _c2i_entry += delta;
  1889     _c2i_unverified_entry += delta;
  1892 // Create a native wrapper for this native method.  The wrapper converts the
  1893 // java compiled calling convention to the native convention, handlizes
  1894 // arguments, and transitions to native.  On return from the native we transition
  1895 // back to java blocking if a safepoint is in progress.
  1896 nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
  1897   ResourceMark rm;
  1898   nmethod* nm = NULL;
  1900   if (PrintCompilation) {
  1901     ttyLocker ttyl;
  1902     tty->print("---   n%s ", (method->is_synchronized() ? "s" : " "));
  1903     method->print_short_name(tty);
  1904     if (method->is_static()) {
  1905       tty->print(" (static)");
  1907     tty->cr();
  1910   assert(method->has_native_function(), "must have something valid to call!");
  1913     // perform the work while holding the lock, but perform any printing outside the lock
  1914     MutexLocker mu(AdapterHandlerLibrary_lock);
  1915     // See if somebody beat us to it
  1916     nm = method->code();
  1917     if (nm) {
  1918       return nm;
  1921     // Improve alignment slightly
  1922     u_char* buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
  1923     CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
  1924     // Need a few relocation entries
  1925     double locs_buf[20];
  1926     buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
  1927     MacroAssembler _masm(&buffer);
  1929     // Fill in the signature array, for the calling-convention call.
  1930     int total_args_passed = method->size_of_parameters();
  1932     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
  1933     VMRegPair  * regs   = NEW_RESOURCE_ARRAY(VMRegPair  ,total_args_passed);
  1934     int i=0;
  1935     if( !method->is_static() )  // Pass in receiver first
  1936       sig_bt[i++] = T_OBJECT;
  1937     SignatureStream ss(method->signature());
  1938     for( ; !ss.at_return_type(); ss.next()) {
  1939       sig_bt[i++] = ss.type();  // Collect remaining bits of signature
  1940       if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
  1941         sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
  1943     assert( i==total_args_passed, "" );
  1944     BasicType ret_type = ss.type();
  1946     // Now get the compiled-Java layout as input arguments
  1947     int comp_args_on_stack;
  1948     comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
  1950     // Generate the compiled-to-native wrapper code
  1951     nm = SharedRuntime::generate_native_wrapper(&_masm,
  1952                                                 method,
  1953                                                 total_args_passed,
  1954                                                 comp_args_on_stack,
  1955                                                 sig_bt,regs,
  1956                                                 ret_type);
  1959   // Must unlock before calling set_code
  1960   // Install the generated code.
  1961   if (nm != NULL) {
  1962     method->set_code(method, nm);
  1963     nm->post_compiled_method_load_event();
  1964   } else {
  1965     // CodeCache is full, disable compilation
  1966     // Ought to log this but compile log is only per compile thread
  1967     // and we're some non descript Java thread.
  1968     UseInterpreter = true;
  1969     if (UseCompiler || AlwaysCompileLoopMethods ) {
  1970 #ifndef PRODUCT
  1971       warning("CodeCache is full. Compiler has been disabled");
  1972       if (CompileTheWorld || ExitOnFullCodeCache) {
  1973         before_exit(JavaThread::current());
  1974         exit_globals(); // will delete tty
  1975         vm_direct_exit(CompileTheWorld ? 0 : 1);
  1977 #endif
  1978       UseCompiler               = false;
  1979       AlwaysCompileLoopMethods  = false;
  1982   return nm;
  1985 #ifdef HAVE_DTRACE_H
  1986 // Create a dtrace nmethod for this method.  The wrapper converts the
  1987 // java compiled calling convention to the native convention, makes a dummy call
  1988 // (actually nops for the size of the call instruction, which become a trap if
  1989 // probe is enabled). The returns to the caller. Since this all looks like a
  1990 // leaf no thread transition is needed.
  1992 nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) {
  1993   ResourceMark rm;
  1994   nmethod* nm = NULL;
  1996   if (PrintCompilation) {
  1997     ttyLocker ttyl;
  1998     tty->print("---   n%s  ");
  1999     method->print_short_name(tty);
  2000     if (method->is_static()) {
  2001       tty->print(" (static)");
  2003     tty->cr();
  2007     // perform the work while holding the lock, but perform any printing
  2008     // outside the lock
  2009     MutexLocker mu(AdapterHandlerLibrary_lock);
  2010     // See if somebody beat us to it
  2011     nm = method->code();
  2012     if (nm) {
  2013       return nm;
  2016     // Improve alignment slightly
  2017     u_char* buf = (u_char*)
  2018         (((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1));
  2019     CodeBuffer buffer(buf, AdapterHandlerLibrary_size);
  2020     // Need a few relocation entries
  2021     double locs_buf[20];
  2022     buffer.insts()->initialize_shared_locs(
  2023         (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
  2024     MacroAssembler _masm(&buffer);
  2026     // Generate the compiled-to-native wrapper code
  2027     nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method);
  2029   return nm;
  2032 // the dtrace method needs to convert java lang string to utf8 string.
  2033 void SharedRuntime::get_utf(oopDesc* src, address dst) {
  2034   typeArrayOop jlsValue  = java_lang_String::value(src);
  2035   int          jlsOffset = java_lang_String::offset(src);
  2036   int          jlsLen    = java_lang_String::length(src);
  2037   jchar*       jlsPos    = (jlsLen == 0) ? NULL :
  2038                                            jlsValue->char_at_addr(jlsOffset);
  2039   (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size);
  2041 #endif // ndef HAVE_DTRACE_H
  2043 // -------------------------------------------------------------------------
  2044 // Java-Java calling convention
  2045 // (what you use when Java calls Java)
  2047 //------------------------------name_for_receiver----------------------------------
  2048 // For a given signature, return the VMReg for parameter 0.
  2049 VMReg SharedRuntime::name_for_receiver() {
  2050   VMRegPair regs;
  2051   BasicType sig_bt = T_OBJECT;
  2052   (void) java_calling_convention(&sig_bt, &regs, 1, true);
  2053   // Return argument 0 register.  In the LP64 build pointers
  2054   // take 2 registers, but the VM wants only the 'main' name.
  2055   return regs.first();
  2058 VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) {
  2059   // This method is returning a data structure allocating as a
  2060   // ResourceObject, so do not put any ResourceMarks in here.
  2061   char *s = sig->as_C_string();
  2062   int len = (int)strlen(s);
  2063   *s++; len--;                  // Skip opening paren
  2064   char *t = s+len;
  2065   while( *(--t) != ')' ) ;      // Find close paren
  2067   BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
  2068   VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
  2069   int cnt = 0;
  2070   if (!is_static) {
  2071     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
  2074   while( s < t ) {
  2075     switch( *s++ ) {            // Switch on signature character
  2076     case 'B': sig_bt[cnt++] = T_BYTE;    break;
  2077     case 'C': sig_bt[cnt++] = T_CHAR;    break;
  2078     case 'D': sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
  2079     case 'F': sig_bt[cnt++] = T_FLOAT;   break;
  2080     case 'I': sig_bt[cnt++] = T_INT;     break;
  2081     case 'J': sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
  2082     case 'S': sig_bt[cnt++] = T_SHORT;   break;
  2083     case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
  2084     case 'V': sig_bt[cnt++] = T_VOID;    break;
  2085     case 'L':                   // Oop
  2086       while( *s++ != ';'  ) ;   // Skip signature
  2087       sig_bt[cnt++] = T_OBJECT;
  2088       break;
  2089     case '[': {                 // Array
  2090       do {                      // Skip optional size
  2091         while( *s >= '0' && *s <= '9' ) s++;
  2092       } while( *s++ == '[' );   // Nested arrays?
  2093       // Skip element type
  2094       if( s[-1] == 'L' )
  2095         while( *s++ != ';'  ) ; // Skip signature
  2096       sig_bt[cnt++] = T_ARRAY;
  2097       break;
  2099     default : ShouldNotReachHere();
  2102   assert( cnt < 256, "grow table size" );
  2104   int comp_args_on_stack;
  2105   comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
  2107   // the calling convention doesn't count out_preserve_stack_slots so
  2108   // we must add that in to get "true" stack offsets.
  2110   if (comp_args_on_stack) {
  2111     for (int i = 0; i < cnt; i++) {
  2112       VMReg reg1 = regs[i].first();
  2113       if( reg1->is_stack()) {
  2114         // Yuck
  2115         reg1 = reg1->bias(out_preserve_stack_slots());
  2117       VMReg reg2 = regs[i].second();
  2118       if( reg2->is_stack()) {
  2119         // Yuck
  2120         reg2 = reg2->bias(out_preserve_stack_slots());
  2122       regs[i].set_pair(reg2, reg1);
  2126   // results
  2127   *arg_size = cnt;
  2128   return regs;
  2131 // OSR Migration Code
  2132 //
  2133 // This code is used convert interpreter frames into compiled frames.  It is
  2134 // called from very start of a compiled OSR nmethod.  A temp array is
  2135 // allocated to hold the interesting bits of the interpreter frame.  All
  2136 // active locks are inflated to allow them to move.  The displaced headers and
  2137 // active interpeter locals are copied into the temp buffer.  Then we return
  2138 // back to the compiled code.  The compiled code then pops the current
  2139 // interpreter frame off the stack and pushes a new compiled frame.  Then it
  2140 // copies the interpreter locals and displaced headers where it wants.
  2141 // Finally it calls back to free the temp buffer.
  2142 //
  2143 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
  2145 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
  2147 #ifdef IA64
  2148   ShouldNotReachHere(); // NYI
  2149 #endif /* IA64 */
  2151   //
  2152   // This code is dependent on the memory layout of the interpreter local
  2153   // array and the monitors. On all of our platforms the layout is identical
  2154   // so this code is shared. If some platform lays the their arrays out
  2155   // differently then this code could move to platform specific code or
  2156   // the code here could be modified to copy items one at a time using
  2157   // frame accessor methods and be platform independent.
  2159   frame fr = thread->last_frame();
  2160   assert( fr.is_interpreted_frame(), "" );
  2161   assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
  2163   // Figure out how many monitors are active.
  2164   int active_monitor_count = 0;
  2165   for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
  2166        kptr < fr.interpreter_frame_monitor_begin();
  2167        kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
  2168     if( kptr->obj() != NULL ) active_monitor_count++;
  2171   // QQQ we could place number of active monitors in the array so that compiled code
  2172   // could double check it.
  2174   methodOop moop = fr.interpreter_frame_method();
  2175   int max_locals = moop->max_locals();
  2176   // Allocate temp buffer, 1 word per local & 2 per active monitor
  2177   int buf_size_words = max_locals + active_monitor_count*2;
  2178   intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words);
  2180   // Copy the locals.  Order is preserved so that loading of longs works.
  2181   // Since there's no GC I can copy the oops blindly.
  2182   assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
  2183   if (TaggedStackInterpreter) {
  2184     for (int i = 0; i < max_locals; i++) {
  2185       // copy only each local separately to the buffer avoiding the tag
  2186       buf[i] = *fr.interpreter_frame_local_at(max_locals-i-1);
  2188   } else {
  2189     Copy::disjoint_words(
  2190                        (HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
  2191                        (HeapWord*)&buf[0],
  2192                        max_locals);
  2195   // Inflate locks.  Copy the displaced headers.  Be careful, there can be holes.
  2196   int i = max_locals;
  2197   for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
  2198        kptr2 < fr.interpreter_frame_monitor_begin();
  2199        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
  2200     if( kptr2->obj() != NULL) {         // Avoid 'holes' in the monitor array
  2201       BasicLock *lock = kptr2->lock();
  2202       // Inflate so the displaced header becomes position-independent
  2203       if (lock->displaced_header()->is_unlocked())
  2204         ObjectSynchronizer::inflate_helper(kptr2->obj());
  2205       // Now the displaced header is free to move
  2206       buf[i++] = (intptr_t)lock->displaced_header();
  2207       buf[i++] = (intptr_t)kptr2->obj();
  2210   assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
  2212   return buf;
  2213 JRT_END
  2215 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
  2216   FREE_C_HEAP_ARRAY(intptr_t,buf);
  2217 JRT_END
  2219 #ifndef PRODUCT
  2220 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
  2222   if (_handlers == NULL) return false;
  2224   for (int i = 0 ; i < _handlers->length() ; i++) {
  2225     AdapterHandlerEntry* a = get_entry(i);
  2226     if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
  2228   return false;
  2231 void AdapterHandlerLibrary::print_handler(CodeBlob* b) {
  2233   for (int i = 0 ; i < _handlers->length() ; i++) {
  2234     AdapterHandlerEntry* a = get_entry(i);
  2235     if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) {
  2236       tty->print("Adapter for signature: ");
  2237       // Fingerprinter::print(_fingerprints->at(i));
  2238       tty->print("0x%" FORMAT64_MODIFIER "x", _fingerprints->at(i));
  2239       tty->print_cr(" i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
  2240                     a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
  2242       return;
  2245   assert(false, "Should have found handler");
  2247 #endif /* PRODUCT */

mercurial