src/share/vm/runtime/sharedRuntime.cpp

Wed, 01 Dec 2010 15:04:06 +0100

author
stefank
date
Wed, 01 Dec 2010 15:04:06 +0100
changeset 2325
c760f78e0a53
parent 2314
f95d63e2154a
child 2462
8012aa3ccede
permissions
-rw-r--r--

7003125: precompiled.hpp is included when precompiled headers are not used
Summary: Added an ifndef DONT_USE_PRECOMPILED_HEADER to precompiled.hpp. Set up DONT_USE_PRECOMPILED_HEADER when compiling with Sun Studio or when the user specifies USE_PRECOMPILED_HEADER=0. Fixed broken include dependencies.
Reviewed-by: coleenp, kvn

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "classfile/vmSymbols.hpp"
    28 #include "code/compiledIC.hpp"
    29 #include "code/scopeDesc.hpp"
    30 #include "code/vtableStubs.hpp"
    31 #include "compiler/abstractCompiler.hpp"
    32 #include "compiler/compileBroker.hpp"
    33 #include "compiler/compilerOracle.hpp"
    34 #include "interpreter/interpreter.hpp"
    35 #include "interpreter/interpreterRuntime.hpp"
    36 #include "memory/gcLocker.inline.hpp"
    37 #include "memory/universe.inline.hpp"
    38 #include "oops/oop.inline.hpp"
    39 #include "prims/forte.hpp"
    40 #include "prims/jvmtiExport.hpp"
    41 #include "prims/jvmtiRedefineClassesTrace.hpp"
    42 #include "prims/methodHandles.hpp"
    43 #include "prims/nativeLookup.hpp"
    44 #include "runtime/arguments.hpp"
    45 #include "runtime/biasedLocking.hpp"
    46 #include "runtime/handles.inline.hpp"
    47 #include "runtime/init.hpp"
    48 #include "runtime/interfaceSupport.hpp"
    49 #include "runtime/javaCalls.hpp"
    50 #include "runtime/sharedRuntime.hpp"
    51 #include "runtime/stubRoutines.hpp"
    52 #include "runtime/vframe.hpp"
    53 #include "runtime/vframeArray.hpp"
    54 #include "utilities/copy.hpp"
    55 #include "utilities/dtrace.hpp"
    56 #include "utilities/events.hpp"
    57 #include "utilities/hashtable.inline.hpp"
    58 #include "utilities/xmlstream.hpp"
    59 #ifdef TARGET_ARCH_x86
    60 # include "nativeInst_x86.hpp"
    61 # include "vmreg_x86.inline.hpp"
    62 #endif
    63 #ifdef TARGET_ARCH_sparc
    64 # include "nativeInst_sparc.hpp"
    65 # include "vmreg_sparc.inline.hpp"
    66 #endif
    67 #ifdef TARGET_ARCH_zero
    68 # include "nativeInst_zero.hpp"
    69 # include "vmreg_zero.inline.hpp"
    70 #endif
    71 #ifdef COMPILER1
    72 #include "c1/c1_Runtime1.hpp"
    73 #endif
    75 #include <math.h>
    77 HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t);
    78 HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int,
    79                       char*, int, char*, int, char*, int);
    80 HS_DTRACE_PROBE_DECL7(hotspot, method__return, int,
    81                       char*, int, char*, int, char*, int);
    83 // Implementation of SharedRuntime
    85 #ifndef PRODUCT
    86 // For statistics
    87 int SharedRuntime::_ic_miss_ctr = 0;
    88 int SharedRuntime::_wrong_method_ctr = 0;
    89 int SharedRuntime::_resolve_static_ctr = 0;
    90 int SharedRuntime::_resolve_virtual_ctr = 0;
    91 int SharedRuntime::_resolve_opt_virtual_ctr = 0;
    92 int SharedRuntime::_implicit_null_throws = 0;
    93 int SharedRuntime::_implicit_div0_throws = 0;
    94 int SharedRuntime::_throw_null_ctr = 0;
    96 int SharedRuntime::_nof_normal_calls = 0;
    97 int SharedRuntime::_nof_optimized_calls = 0;
    98 int SharedRuntime::_nof_inlined_calls = 0;
    99 int SharedRuntime::_nof_megamorphic_calls = 0;
   100 int SharedRuntime::_nof_static_calls = 0;
   101 int SharedRuntime::_nof_inlined_static_calls = 0;
   102 int SharedRuntime::_nof_interface_calls = 0;
   103 int SharedRuntime::_nof_optimized_interface_calls = 0;
   104 int SharedRuntime::_nof_inlined_interface_calls = 0;
   105 int SharedRuntime::_nof_megamorphic_interface_calls = 0;
   106 int SharedRuntime::_nof_removable_exceptions = 0;
   108 int SharedRuntime::_new_instance_ctr=0;
   109 int SharedRuntime::_new_array_ctr=0;
   110 int SharedRuntime::_multi1_ctr=0;
   111 int SharedRuntime::_multi2_ctr=0;
   112 int SharedRuntime::_multi3_ctr=0;
   113 int SharedRuntime::_multi4_ctr=0;
   114 int SharedRuntime::_multi5_ctr=0;
   115 int SharedRuntime::_mon_enter_stub_ctr=0;
   116 int SharedRuntime::_mon_exit_stub_ctr=0;
   117 int SharedRuntime::_mon_enter_ctr=0;
   118 int SharedRuntime::_mon_exit_ctr=0;
   119 int SharedRuntime::_partial_subtype_ctr=0;
   120 int SharedRuntime::_jbyte_array_copy_ctr=0;
   121 int SharedRuntime::_jshort_array_copy_ctr=0;
   122 int SharedRuntime::_jint_array_copy_ctr=0;
   123 int SharedRuntime::_jlong_array_copy_ctr=0;
   124 int SharedRuntime::_oop_array_copy_ctr=0;
   125 int SharedRuntime::_checkcast_array_copy_ctr=0;
   126 int SharedRuntime::_unsafe_array_copy_ctr=0;
   127 int SharedRuntime::_generic_array_copy_ctr=0;
   128 int SharedRuntime::_slow_array_copy_ctr=0;
   129 int SharedRuntime::_find_handler_ctr=0;
   130 int SharedRuntime::_rethrow_ctr=0;
   132 int     SharedRuntime::_ICmiss_index                    = 0;
   133 int     SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count];
   134 address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count];
   136 void SharedRuntime::trace_ic_miss(address at) {
   137   for (int i = 0; i < _ICmiss_index; i++) {
   138     if (_ICmiss_at[i] == at) {
   139       _ICmiss_count[i]++;
   140       return;
   141     }
   142   }
   143   int index = _ICmiss_index++;
   144   if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1;
   145   _ICmiss_at[index] = at;
   146   _ICmiss_count[index] = 1;
   147 }
   149 void SharedRuntime::print_ic_miss_histogram() {
   150   if (ICMissHistogram) {
   151     tty->print_cr ("IC Miss Histogram:");
   152     int tot_misses = 0;
   153     for (int i = 0; i < _ICmiss_index; i++) {
   154       tty->print_cr("  at: " INTPTR_FORMAT "  nof: %d", _ICmiss_at[i], _ICmiss_count[i]);
   155       tot_misses += _ICmiss_count[i];
   156     }
   157     tty->print_cr ("Total IC misses: %7d", tot_misses);
   158   }
   159 }
   160 #endif // PRODUCT
   162 #ifndef SERIALGC
   164 // G1 write-barrier pre: executed before a pointer store.
   165 JRT_LEAF(void, SharedRuntime::g1_wb_pre(oopDesc* orig, JavaThread *thread))
   166   if (orig == NULL) {
   167     assert(false, "should be optimized out");
   168     return;
   169   }
   170   assert(orig->is_oop(true /* ignore mark word */), "Error");
   171   // store the original value that was in the field reference
   172   thread->satb_mark_queue().enqueue(orig);
   173 JRT_END
   175 // G1 write-barrier post: executed after a pointer store.
   176 JRT_LEAF(void, SharedRuntime::g1_wb_post(void* card_addr, JavaThread* thread))
   177   thread->dirty_card_queue().enqueue(card_addr);
   178 JRT_END
   180 #endif // !SERIALGC
   183 JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x))
   184   return x * y;
   185 JRT_END
   188 JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x))
   189   if (x == min_jlong && y == CONST64(-1)) {
   190     return x;
   191   } else {
   192     return x / y;
   193   }
   194 JRT_END
   197 JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x))
   198   if (x == min_jlong && y == CONST64(-1)) {
   199     return 0;
   200   } else {
   201     return x % y;
   202   }
   203 JRT_END
   206 const juint  float_sign_mask  = 0x7FFFFFFF;
   207 const juint  float_infinity   = 0x7F800000;
   208 const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF);
   209 const julong double_infinity  = CONST64(0x7FF0000000000000);
   211 JRT_LEAF(jfloat, SharedRuntime::frem(jfloat  x, jfloat  y))
   212 #ifdef _WIN64
   213   // 64-bit Windows on amd64 returns the wrong values for
   214   // infinity operands.
   215   union { jfloat f; juint i; } xbits, ybits;
   216   xbits.f = x;
   217   ybits.f = y;
   218   // x Mod Infinity == x unless x is infinity
   219   if ( ((xbits.i & float_sign_mask) != float_infinity) &&
   220        ((ybits.i & float_sign_mask) == float_infinity) ) {
   221     return x;
   222   }
   223 #endif
   224   return ((jfloat)fmod((double)x,(double)y));
   225 JRT_END
   228 JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y))
   229 #ifdef _WIN64
   230   union { jdouble d; julong l; } xbits, ybits;
   231   xbits.d = x;
   232   ybits.d = y;
   233   // x Mod Infinity == x unless x is infinity
   234   if ( ((xbits.l & double_sign_mask) != double_infinity) &&
   235        ((ybits.l & double_sign_mask) == double_infinity) ) {
   236     return x;
   237   }
   238 #endif
   239   return ((jdouble)fmod((double)x,(double)y));
   240 JRT_END
   242 #ifdef __SOFTFP__
   243 JRT_LEAF(jfloat, SharedRuntime::fadd(jfloat x, jfloat y))
   244   return x + y;
   245 JRT_END
   247 JRT_LEAF(jfloat, SharedRuntime::fsub(jfloat x, jfloat y))
   248   return x - y;
   249 JRT_END
   251 JRT_LEAF(jfloat, SharedRuntime::fmul(jfloat x, jfloat y))
   252   return x * y;
   253 JRT_END
   255 JRT_LEAF(jfloat, SharedRuntime::fdiv(jfloat x, jfloat y))
   256   return x / y;
   257 JRT_END
   259 JRT_LEAF(jdouble, SharedRuntime::dadd(jdouble x, jdouble y))
   260   return x + y;
   261 JRT_END
   263 JRT_LEAF(jdouble, SharedRuntime::dsub(jdouble x, jdouble y))
   264   return x - y;
   265 JRT_END
   267 JRT_LEAF(jdouble, SharedRuntime::dmul(jdouble x, jdouble y))
   268   return x * y;
   269 JRT_END
   271 JRT_LEAF(jdouble, SharedRuntime::ddiv(jdouble x, jdouble y))
   272   return x / y;
   273 JRT_END
   275 JRT_LEAF(jfloat, SharedRuntime::i2f(jint x))
   276   return (jfloat)x;
   277 JRT_END
   279 JRT_LEAF(jdouble, SharedRuntime::i2d(jint x))
   280   return (jdouble)x;
   281 JRT_END
   283 JRT_LEAF(jdouble, SharedRuntime::f2d(jfloat x))
   284   return (jdouble)x;
   285 JRT_END
   287 JRT_LEAF(int,  SharedRuntime::fcmpl(float x, float y))
   288   return x>y ? 1 : (x==y ? 0 : -1);  /* x<y or is_nan*/
   289 JRT_END
   291 JRT_LEAF(int,  SharedRuntime::fcmpg(float x, float y))
   292   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
   293 JRT_END
   295 JRT_LEAF(int,  SharedRuntime::dcmpl(double x, double y))
   296   return x>y ? 1 : (x==y ? 0 : -1); /* x<y or is_nan */
   297 JRT_END
   299 JRT_LEAF(int,  SharedRuntime::dcmpg(double x, double y))
   300   return x<y ? -1 : (x==y ? 0 : 1);  /* x>y or is_nan */
   301 JRT_END
   303 // Functions to return the opposite of the aeabi functions for nan.
   304 JRT_LEAF(int, SharedRuntime::unordered_fcmplt(float x, float y))
   305   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   306 JRT_END
   308 JRT_LEAF(int, SharedRuntime::unordered_dcmplt(double x, double y))
   309   return (x < y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   310 JRT_END
   312 JRT_LEAF(int, SharedRuntime::unordered_fcmple(float x, float y))
   313   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   314 JRT_END
   316 JRT_LEAF(int, SharedRuntime::unordered_dcmple(double x, double y))
   317   return (x <= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   318 JRT_END
   320 JRT_LEAF(int, SharedRuntime::unordered_fcmpge(float x, float y))
   321   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   322 JRT_END
   324 JRT_LEAF(int, SharedRuntime::unordered_dcmpge(double x, double y))
   325   return (x >= y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   326 JRT_END
   328 JRT_LEAF(int, SharedRuntime::unordered_fcmpgt(float x, float y))
   329   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   330 JRT_END
   332 JRT_LEAF(int, SharedRuntime::unordered_dcmpgt(double x, double y))
   333   return (x > y) ? 1 : ((g_isnan(x) || g_isnan(y)) ? 1 : 0);
   334 JRT_END
   336 // Intrinsics make gcc generate code for these.
   337 float  SharedRuntime::fneg(float f)   {
   338   return -f;
   339 }
   341 double SharedRuntime::dneg(double f)  {
   342   return -f;
   343 }
   345 #endif // __SOFTFP__
   347 #if defined(__SOFTFP__) || defined(E500V2)
   348 // Intrinsics make gcc generate code for these.
   349 double SharedRuntime::dabs(double f)  {
   350   return (f <= (double)0.0) ? (double)0.0 - f : f;
   351 }
   353 #endif
   355 #if defined(__SOFTFP__) || defined(PPC)
   356 double SharedRuntime::dsqrt(double f) {
   357   return sqrt(f);
   358 }
   359 #endif
   361 JRT_LEAF(jint, SharedRuntime::f2i(jfloat  x))
   362   if (g_isnan(x))
   363     return 0;
   364   if (x >= (jfloat) max_jint)
   365     return max_jint;
   366   if (x <= (jfloat) min_jint)
   367     return min_jint;
   368   return (jint) x;
   369 JRT_END
   372 JRT_LEAF(jlong, SharedRuntime::f2l(jfloat  x))
   373   if (g_isnan(x))
   374     return 0;
   375   if (x >= (jfloat) max_jlong)
   376     return max_jlong;
   377   if (x <= (jfloat) min_jlong)
   378     return min_jlong;
   379   return (jlong) x;
   380 JRT_END
   383 JRT_LEAF(jint, SharedRuntime::d2i(jdouble x))
   384   if (g_isnan(x))
   385     return 0;
   386   if (x >= (jdouble) max_jint)
   387     return max_jint;
   388   if (x <= (jdouble) min_jint)
   389     return min_jint;
   390   return (jint) x;
   391 JRT_END
   394 JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x))
   395   if (g_isnan(x))
   396     return 0;
   397   if (x >= (jdouble) max_jlong)
   398     return max_jlong;
   399   if (x <= (jdouble) min_jlong)
   400     return min_jlong;
   401   return (jlong) x;
   402 JRT_END
   405 JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x))
   406   return (jfloat)x;
   407 JRT_END
   410 JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x))
   411   return (jfloat)x;
   412 JRT_END
   415 JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x))
   416   return (jdouble)x;
   417 JRT_END
   419 // Exception handling accross interpreter/compiler boundaries
   420 //
   421 // exception_handler_for_return_address(...) returns the continuation address.
   422 // The continuation address is the entry point of the exception handler of the
   423 // previous frame depending on the return address.
   425 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) {
   426   assert(frame::verify_return_pc(return_address), "must be a return pc");
   428   // Reset MethodHandle flag.
   429   thread->set_is_method_handle_return(false);
   431   // the fastest case first
   432   CodeBlob* blob = CodeCache::find_blob(return_address);
   433   if (blob != NULL && blob->is_nmethod()) {
   434     nmethod* code = (nmethod*)blob;
   435     assert(code != NULL, "nmethod must be present");
   436     // Check if the return address is a MethodHandle call site.
   437     thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
   438     // native nmethods don't have exception handlers
   439     assert(!code->is_native_method(), "no exception handler");
   440     assert(code->header_begin() != code->exception_begin(), "no exception handler");
   441     if (code->is_deopt_pc(return_address)) {
   442       return SharedRuntime::deopt_blob()->unpack_with_exception();
   443     } else {
   444       return code->exception_begin();
   445     }
   446   }
   448   // Entry code
   449   if (StubRoutines::returns_to_call_stub(return_address)) {
   450     return StubRoutines::catch_exception_entry();
   451   }
   452   // Interpreted code
   453   if (Interpreter::contains(return_address)) {
   454     return Interpreter::rethrow_exception_entry();
   455   }
   457   // Compiled code
   458   if (CodeCache::contains(return_address)) {
   459     CodeBlob* blob = CodeCache::find_blob(return_address);
   460     if (blob->is_nmethod()) {
   461       nmethod* code = (nmethod*)blob;
   462       assert(code != NULL, "nmethod must be present");
   463       // Check if the return address is a MethodHandle call site.
   464       thread->set_is_method_handle_return(code->is_method_handle_return(return_address));
   465       assert(code->header_begin() != code->exception_begin(), "no exception handler");
   466       return code->exception_begin();
   467     }
   468     if (blob->is_runtime_stub()) {
   469       ShouldNotReachHere();   // callers are responsible for skipping runtime stub frames
   470     }
   471   }
   472   guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!");
   473 #ifndef PRODUCT
   474   { ResourceMark rm;
   475     tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address);
   476     tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here");
   477     tty->print_cr("b) other problem");
   478   }
   479 #endif // PRODUCT
   480   ShouldNotReachHere();
   481   return NULL;
   482 }
   485 JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(JavaThread* thread, address return_address))
   486   return raw_exception_handler_for_return_address(thread, return_address);
   487 JRT_END
   490 address SharedRuntime::get_poll_stub(address pc) {
   491   address stub;
   492   // Look up the code blob
   493   CodeBlob *cb = CodeCache::find_blob(pc);
   495   // Should be an nmethod
   496   assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" );
   498   // Look up the relocation information
   499   assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc),
   500     "safepoint polling: type must be poll" );
   502   assert( ((NativeInstruction*)pc)->is_safepoint_poll(),
   503     "Only polling locations are used for safepoint");
   505   bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc);
   506   if (at_poll_return) {
   507     assert(SharedRuntime::polling_page_return_handler_blob() != NULL,
   508            "polling page return stub not created yet");
   509     stub = SharedRuntime::polling_page_return_handler_blob()->entry_point();
   510   } else {
   511     assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL,
   512            "polling page safepoint stub not created yet");
   513     stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point();
   514   }
   515 #ifndef PRODUCT
   516   if( TraceSafepoint ) {
   517     char buf[256];
   518     jio_snprintf(buf, sizeof(buf),
   519                  "... found polling page %s exception at pc = "
   520                  INTPTR_FORMAT ", stub =" INTPTR_FORMAT,
   521                  at_poll_return ? "return" : "loop",
   522                  (intptr_t)pc, (intptr_t)stub);
   523     tty->print_raw_cr(buf);
   524   }
   525 #endif // PRODUCT
   526   return stub;
   527 }
   530 oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) {
   531   assert(caller.is_interpreted_frame(), "");
   532   int args_size = ArgumentSizeComputer(sig).size() + 1;
   533   assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
   534   oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
   535   assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
   536   return result;
   537 }
   540 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) {
   541   if (JvmtiExport::can_post_on_exceptions()) {
   542     vframeStream vfst(thread, true);
   543     methodHandle method = methodHandle(thread, vfst.method());
   544     address bcp = method()->bcp_from(vfst.bci());
   545     JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception());
   546   }
   547   Exceptions::_throw(thread, __FILE__, __LINE__, h_exception);
   548 }
   550 void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) {
   551   Handle h_exception = Exceptions::new_exception(thread, name, message);
   552   throw_and_post_jvmti_exception(thread, h_exception);
   553 }
   555 // The interpreter code to call this tracing function is only
   556 // called/generated when TraceRedefineClasses has the right bits
   557 // set. Since obsolete methods are never compiled, we don't have
   558 // to modify the compilers to generate calls to this function.
   559 //
   560 JRT_LEAF(int, SharedRuntime::rc_trace_method_entry(
   561     JavaThread* thread, methodOopDesc* method))
   562   assert(RC_TRACE_IN_RANGE(0x00001000, 0x00002000), "wrong call");
   564   if (method->is_obsolete()) {
   565     // We are calling an obsolete method, but this is not necessarily
   566     // an error. Our method could have been redefined just after we
   567     // fetched the methodOop from the constant pool.
   569     // RC_TRACE macro has an embedded ResourceMark
   570     RC_TRACE_WITH_THREAD(0x00001000, thread,
   571                          ("calling obsolete method '%s'",
   572                           method->name_and_sig_as_C_string()));
   573     if (RC_TRACE_ENABLED(0x00002000)) {
   574       // this option is provided to debug calls to obsolete methods
   575       guarantee(false, "faulting at call to an obsolete method.");
   576     }
   577   }
   578   return 0;
   579 JRT_END
   581 // ret_pc points into caller; we are returning caller's exception handler
   582 // for given exception
   583 address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception,
   584                                                     bool force_unwind, bool top_frame_only) {
   585   assert(nm != NULL, "must exist");
   586   ResourceMark rm;
   588   ScopeDesc* sd = nm->scope_desc_at(ret_pc);
   589   // determine handler bci, if any
   590   EXCEPTION_MARK;
   592   int handler_bci = -1;
   593   int scope_depth = 0;
   594   if (!force_unwind) {
   595     int bci = sd->bci();
   596     do {
   597       bool skip_scope_increment = false;
   598       // exception handler lookup
   599       KlassHandle ek (THREAD, exception->klass());
   600       handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD);
   601       if (HAS_PENDING_EXCEPTION) {
   602         // We threw an exception while trying to find the exception handler.
   603         // Transfer the new exception to the exception handle which will
   604         // be set into thread local storage, and do another lookup for an
   605         // exception handler for this exception, this time starting at the
   606         // BCI of the exception handler which caused the exception to be
   607         // thrown (bugs 4307310 and 4546590). Set "exception" reference
   608         // argument to ensure that the correct exception is thrown (4870175).
   609         exception = Handle(THREAD, PENDING_EXCEPTION);
   610         CLEAR_PENDING_EXCEPTION;
   611         if (handler_bci >= 0) {
   612           bci = handler_bci;
   613           handler_bci = -1;
   614           skip_scope_increment = true;
   615         }
   616       }
   617       if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) {
   618         sd = sd->sender();
   619         if (sd != NULL) {
   620           bci = sd->bci();
   621         }
   622         ++scope_depth;
   623       }
   624     } while (!top_frame_only && handler_bci < 0 && sd != NULL);
   625   }
   627   // found handling method => lookup exception handler
   628   int catch_pco = ret_pc - nm->code_begin();
   630   ExceptionHandlerTable table(nm);
   631   HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth);
   632   if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) {
   633     // Allow abbreviated catch tables.  The idea is to allow a method
   634     // to materialize its exceptions without committing to the exact
   635     // routing of exceptions.  In particular this is needed for adding
   636     // a synthethic handler to unlock monitors when inlining
   637     // synchonized methods since the unlock path isn't represented in
   638     // the bytecodes.
   639     t = table.entry_for(catch_pco, -1, 0);
   640   }
   642 #ifdef COMPILER1
   643   if (t == NULL && nm->is_compiled_by_c1()) {
   644     assert(nm->unwind_handler_begin() != NULL, "");
   645     return nm->unwind_handler_begin();
   646   }
   647 #endif
   649   if (t == NULL) {
   650     tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci);
   651     tty->print_cr("   Exception:");
   652     exception->print();
   653     tty->cr();
   654     tty->print_cr(" Compiled exception table :");
   655     table.print();
   656     nm->print_code();
   657     guarantee(false, "missing exception handler");
   658     return NULL;
   659   }
   661   return nm->code_begin() + t->pco();
   662 }
   664 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
   665   // These errors occur only at call sites
   666   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
   667 JRT_END
   669 JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
   670   // These errors occur only at call sites
   671   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
   672 JRT_END
   674 JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
   675   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
   676 JRT_END
   678 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread))
   679   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
   680 JRT_END
   682 JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread))
   683   // This entry point is effectively only used for NullPointerExceptions which occur at inline
   684   // cache sites (when the callee activation is not yet set up) so we are at a call site
   685   throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
   686 JRT_END
   688 JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread))
   689   // We avoid using the normal exception construction in this case because
   690   // it performs an upcall to Java, and we're already out of stack space.
   691   klassOop k = SystemDictionary::StackOverflowError_klass();
   692   oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK);
   693   Handle exception (thread, exception_oop);
   694   if (StackTraceInThrowable) {
   695     java_lang_Throwable::fill_in_stack_trace(exception);
   696   }
   697   throw_and_post_jvmti_exception(thread, exception);
   698 JRT_END
   700 address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread,
   701                                                            address pc,
   702                                                            SharedRuntime::ImplicitExceptionKind exception_kind)
   703 {
   704   address target_pc = NULL;
   706   if (Interpreter::contains(pc)) {
   707 #ifdef CC_INTERP
   708     // C++ interpreter doesn't throw implicit exceptions
   709     ShouldNotReachHere();
   710 #else
   711     switch (exception_kind) {
   712       case IMPLICIT_NULL:           return Interpreter::throw_NullPointerException_entry();
   713       case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry();
   714       case STACK_OVERFLOW:          return Interpreter::throw_StackOverflowError_entry();
   715       default:                      ShouldNotReachHere();
   716     }
   717 #endif // !CC_INTERP
   718   } else {
   719     switch (exception_kind) {
   720       case STACK_OVERFLOW: {
   721         // Stack overflow only occurs upon frame setup; the callee is
   722         // going to be unwound. Dispatch to a shared runtime stub
   723         // which will cause the StackOverflowError to be fabricated
   724         // and processed.
   725         // For stack overflow in deoptimization blob, cleanup thread.
   726         if (thread->deopt_mark() != NULL) {
   727           Deoptimization::cleanup_deopt_info(thread, NULL);
   728         }
   729         return StubRoutines::throw_StackOverflowError_entry();
   730       }
   732       case IMPLICIT_NULL: {
   733         if (VtableStubs::contains(pc)) {
   734           // We haven't yet entered the callee frame. Fabricate an
   735           // exception and begin dispatching it in the caller. Since
   736           // the caller was at a call site, it's safe to destroy all
   737           // caller-saved registers, as these entry points do.
   738           VtableStub* vt_stub = VtableStubs::stub_containing(pc);
   740           // If vt_stub is NULL, then return NULL to signal handler to report the SEGV error.
   741           if (vt_stub == NULL) return NULL;
   743           if (vt_stub->is_abstract_method_error(pc)) {
   744             assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs");
   745             return StubRoutines::throw_AbstractMethodError_entry();
   746           } else {
   747             return StubRoutines::throw_NullPointerException_at_call_entry();
   748           }
   749         } else {
   750           CodeBlob* cb = CodeCache::find_blob(pc);
   752           // If code blob is NULL, then return NULL to signal handler to report the SEGV error.
   753           if (cb == NULL) return NULL;
   755           // Exception happened in CodeCache. Must be either:
   756           // 1. Inline-cache check in C2I handler blob,
   757           // 2. Inline-cache check in nmethod, or
   758           // 3. Implict null exception in nmethod
   760           if (!cb->is_nmethod()) {
   761             guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(),
   762                       "exception happened outside interpreter, nmethods and vtable stubs (1)");
   763             // There is no handler here, so we will simply unwind.
   764             return StubRoutines::throw_NullPointerException_at_call_entry();
   765           }
   767           // Otherwise, it's an nmethod.  Consult its exception handlers.
   768           nmethod* nm = (nmethod*)cb;
   769           if (nm->inlinecache_check_contains(pc)) {
   770             // exception happened inside inline-cache check code
   771             // => the nmethod is not yet active (i.e., the frame
   772             // is not set up yet) => use return address pushed by
   773             // caller => don't push another return address
   774             return StubRoutines::throw_NullPointerException_at_call_entry();
   775           }
   777 #ifndef PRODUCT
   778           _implicit_null_throws++;
   779 #endif
   780           target_pc = nm->continuation_for_implicit_exception(pc);
   781           // If there's an unexpected fault, target_pc might be NULL,
   782           // in which case we want to fall through into the normal
   783           // error handling code.
   784         }
   786         break; // fall through
   787       }
   790       case IMPLICIT_DIVIDE_BY_ZERO: {
   791         nmethod* nm = CodeCache::find_nmethod(pc);
   792         guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions");
   793 #ifndef PRODUCT
   794         _implicit_div0_throws++;
   795 #endif
   796         target_pc = nm->continuation_for_implicit_exception(pc);
   797         // If there's an unexpected fault, target_pc might be NULL,
   798         // in which case we want to fall through into the normal
   799         // error handling code.
   800         break; // fall through
   801       }
   803       default: ShouldNotReachHere();
   804     }
   806     assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind");
   808     // for AbortVMOnException flag
   809     NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException"));
   810     if (exception_kind == IMPLICIT_NULL) {
   811       Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
   812     } else {
   813       Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc);
   814     }
   815     return target_pc;
   816   }
   818   ShouldNotReachHere();
   819   return NULL;
   820 }
   823 JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...))
   824 {
   825   THROW(vmSymbols::java_lang_UnsatisfiedLinkError());
   826 }
   827 JNI_END
   830 address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() {
   831   return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error);
   832 }
   835 #ifndef PRODUCT
   836 JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2))
   837   const frame f = thread->last_frame();
   838   assert(f.is_interpreted_frame(), "must be an interpreted frame");
   839 #ifndef PRODUCT
   840   methodHandle mh(THREAD, f.interpreter_frame_method());
   841   BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2);
   842 #endif // !PRODUCT
   843   return preserve_this_value;
   844 JRT_END
   845 #endif // !PRODUCT
   848 JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts))
   849   os::yield_all(attempts);
   850 JRT_END
   853 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj))
   854   assert(obj->is_oop(), "must be a valid oop");
   855   assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise");
   856   instanceKlass::register_finalizer(instanceOop(obj), CHECK);
   857 JRT_END
   860 jlong SharedRuntime::get_java_tid(Thread* thread) {
   861   if (thread != NULL) {
   862     if (thread->is_Java_thread()) {
   863       oop obj = ((JavaThread*)thread)->threadObj();
   864       return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj);
   865     }
   866   }
   867   return 0;
   868 }
   870 /**
   871  * This function ought to be a void function, but cannot be because
   872  * it gets turned into a tail-call on sparc, which runs into dtrace bug
   873  * 6254741.  Once that is fixed we can remove the dummy return value.
   874  */
   875 int SharedRuntime::dtrace_object_alloc(oopDesc* o) {
   876   return dtrace_object_alloc_base(Thread::current(), o);
   877 }
   879 int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) {
   880   assert(DTraceAllocProbes, "wrong call");
   881   Klass* klass = o->blueprint();
   882   int size = o->size();
   883   symbolOop name = klass->name();
   884   HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread),
   885                    name->bytes(), name->utf8_length(), size * HeapWordSize);
   886   return 0;
   887 }
   889 JRT_LEAF(int, SharedRuntime::dtrace_method_entry(
   890     JavaThread* thread, methodOopDesc* method))
   891   assert(DTraceMethodProbes, "wrong call");
   892   symbolOop kname = method->klass_name();
   893   symbolOop name = method->name();
   894   symbolOop sig = method->signature();
   895   HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread),
   896       kname->bytes(), kname->utf8_length(),
   897       name->bytes(), name->utf8_length(),
   898       sig->bytes(), sig->utf8_length());
   899   return 0;
   900 JRT_END
   902 JRT_LEAF(int, SharedRuntime::dtrace_method_exit(
   903     JavaThread* thread, methodOopDesc* method))
   904   assert(DTraceMethodProbes, "wrong call");
   905   symbolOop kname = method->klass_name();
   906   symbolOop name = method->name();
   907   symbolOop sig = method->signature();
   908   HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread),
   909       kname->bytes(), kname->utf8_length(),
   910       name->bytes(), name->utf8_length(),
   911       sig->bytes(), sig->utf8_length());
   912   return 0;
   913 JRT_END
   916 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode)
   917 // for a call current in progress, i.e., arguments has been pushed on stack
   918 // put callee has not been invoked yet.  Used by: resolve virtual/static,
   919 // vtable updates, etc.  Caller frame must be compiled.
   920 Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) {
   921   ResourceMark rm(THREAD);
   923   // last java frame on stack (which includes native call frames)
   924   vframeStream vfst(thread, true);  // Do not skip and javaCalls
   926   return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle()));
   927 }
   930 // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode
   931 // for a call current in progress, i.e., arguments has been pushed on stack
   932 // but callee has not been invoked yet.  Caller frame must be compiled.
   933 Handle SharedRuntime::find_callee_info_helper(JavaThread* thread,
   934                                               vframeStream& vfst,
   935                                               Bytecodes::Code& bc,
   936                                               CallInfo& callinfo, TRAPS) {
   937   Handle receiver;
   938   Handle nullHandle;  //create a handy null handle for exception returns
   940   assert(!vfst.at_end(), "Java frame must exist");
   942   // Find caller and bci from vframe
   943   methodHandle caller (THREAD, vfst.method());
   944   int          bci    = vfst.bci();
   946   // Find bytecode
   947   Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci);
   948   bc = bytecode->java_code();
   949   int bytecode_index = bytecode->index();
   951   // Find receiver for non-static call
   952   if (bc != Bytecodes::_invokestatic) {
   953     // This register map must be update since we need to find the receiver for
   954     // compiled frames. The receiver might be in a register.
   955     RegisterMap reg_map2(thread);
   956     frame stubFrame   = thread->last_frame();
   957     // Caller-frame is a compiled frame
   958     frame callerFrame = stubFrame.sender(&reg_map2);
   960     methodHandle callee = bytecode->static_target(CHECK_(nullHandle));
   961     if (callee.is_null()) {
   962       THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle);
   963     }
   964     // Retrieve from a compiled argument list
   965     receiver = Handle(THREAD, callerFrame.retrieve_receiver(&reg_map2));
   967     if (receiver.is_null()) {
   968       THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle);
   969     }
   970   }
   972   // Resolve method. This is parameterized by bytecode.
   973   constantPoolHandle constants (THREAD, caller->constants());
   974   assert (receiver.is_null() || receiver->is_oop(), "wrong receiver");
   975   LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle));
   977 #ifdef ASSERT
   978   // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls
   979   if (bc != Bytecodes::_invokestatic && bc != Bytecodes::_invokedynamic) {
   980     assert(receiver.not_null(), "should have thrown exception");
   981     KlassHandle receiver_klass (THREAD, receiver->klass());
   982     klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle));
   983                             // klass is already loaded
   984     KlassHandle static_receiver_klass (THREAD, rk);
   985     assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass");
   986     if (receiver_klass->oop_is_instance()) {
   987       if (instanceKlass::cast(receiver_klass())->is_not_initialized()) {
   988         tty->print_cr("ERROR: Klass not yet initialized!!");
   989         receiver_klass.print();
   990       }
   991       assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized");
   992     }
   993   }
   994 #endif
   996   return receiver;
   997 }
   999 methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) {
  1000   ResourceMark rm(THREAD);
  1001   // We need first to check if any Java activations (compiled, interpreted)
  1002   // exist on the stack since last JavaCall.  If not, we need
  1003   // to get the target method from the JavaCall wrapper.
  1004   vframeStream vfst(thread, true);  // Do not skip any javaCalls
  1005   methodHandle callee_method;
  1006   if (vfst.at_end()) {
  1007     // No Java frames were found on stack since we did the JavaCall.
  1008     // Hence the stack can only contain an entry_frame.  We need to
  1009     // find the target method from the stub frame.
  1010     RegisterMap reg_map(thread, false);
  1011     frame fr = thread->last_frame();
  1012     assert(fr.is_runtime_frame(), "must be a runtimeStub");
  1013     fr = fr.sender(&reg_map);
  1014     assert(fr.is_entry_frame(), "must be");
  1015     // fr is now pointing to the entry frame.
  1016     callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method());
  1017     assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??");
  1018   } else {
  1019     Bytecodes::Code bc;
  1020     CallInfo callinfo;
  1021     find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle()));
  1022     callee_method = callinfo.selected_method();
  1024   assert(callee_method()->is_method(), "must be");
  1025   return callee_method;
  1028 // Resolves a call.
  1029 methodHandle SharedRuntime::resolve_helper(JavaThread *thread,
  1030                                            bool is_virtual,
  1031                                            bool is_optimized, TRAPS) {
  1032   methodHandle callee_method;
  1033   callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
  1034   if (JvmtiExport::can_hotswap_or_post_breakpoint()) {
  1035     int retry_count = 0;
  1036     while (!HAS_PENDING_EXCEPTION && callee_method->is_old() &&
  1037            callee_method->method_holder() != SystemDictionary::Object_klass()) {
  1038       // If has a pending exception then there is no need to re-try to
  1039       // resolve this method.
  1040       // If the method has been redefined, we need to try again.
  1041       // Hack: we have no way to update the vtables of arrays, so don't
  1042       // require that java.lang.Object has been updated.
  1044       // It is very unlikely that method is redefined more than 100 times
  1045       // in the middle of resolve. If it is looping here more than 100 times
  1046       // means then there could be a bug here.
  1047       guarantee((retry_count++ < 100),
  1048                 "Could not resolve to latest version of redefined method");
  1049       // method is redefined in the middle of resolve so re-try.
  1050       callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD);
  1053   return callee_method;
  1056 // Resolves a call.  The compilers generate code for calls that go here
  1057 // and are patched with the real destination of the call.
  1058 methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
  1059                                            bool is_virtual,
  1060                                            bool is_optimized, TRAPS) {
  1062   ResourceMark rm(thread);
  1063   RegisterMap cbl_map(thread, false);
  1064   frame caller_frame = thread->last_frame().sender(&cbl_map);
  1066   CodeBlob* caller_cb = caller_frame.cb();
  1067   guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
  1068   nmethod* caller_nm = caller_cb->as_nmethod_or_null();
  1069   // make sure caller is not getting deoptimized
  1070   // and removed before we are done with it.
  1071   // CLEANUP - with lazy deopt shouldn't need this lock
  1072   nmethodLocker caller_lock(caller_nm);
  1075   // determine call info & receiver
  1076   // note: a) receiver is NULL for static calls
  1077   //       b) an exception is thrown if receiver is NULL for non-static calls
  1078   CallInfo call_info;
  1079   Bytecodes::Code invoke_code = Bytecodes::_illegal;
  1080   Handle receiver = find_callee_info(thread, invoke_code,
  1081                                      call_info, CHECK_(methodHandle()));
  1082   methodHandle callee_method = call_info.selected_method();
  1084   assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) ||
  1085          ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode");
  1087 #ifndef PRODUCT
  1088   // tracing/debugging/statistics
  1089   int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
  1090                 (is_virtual) ? (&_resolve_virtual_ctr) :
  1091                                (&_resolve_static_ctr);
  1092   Atomic::inc(addr);
  1094   if (TraceCallFixup) {
  1095     ResourceMark rm(thread);
  1096     tty->print("resolving %s%s (%s) call to",
  1097       (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static",
  1098       Bytecodes::name(invoke_code));
  1099     callee_method->print_short_name(tty);
  1100     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1102 #endif
  1104   // JSR 292
  1105   // If the resolved method is a MethodHandle invoke target the call
  1106   // site must be a MethodHandle call site.
  1107   if (callee_method->is_method_handle_invoke()) {
  1108     assert(caller_nm->is_method_handle_return(caller_frame.pc()), "must be MH call site");
  1111   // Compute entry points. This might require generation of C2I converter
  1112   // frames, so we cannot be holding any locks here. Furthermore, the
  1113   // computation of the entry points is independent of patching the call.  We
  1114   // always return the entry-point, but we only patch the stub if the call has
  1115   // not been deoptimized.  Return values: For a virtual call this is an
  1116   // (cached_oop, destination address) pair. For a static call/optimized
  1117   // virtual this is just a destination address.
  1119   StaticCallInfo static_call_info;
  1120   CompiledICInfo virtual_call_info;
  1122   // Make sure the callee nmethod does not get deoptimized and removed before
  1123   // we are done patching the code.
  1124   nmethod* callee_nm = callee_method->code();
  1125   nmethodLocker nl_callee(callee_nm);
  1126 #ifdef ASSERT
  1127   address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
  1128 #endif
  1130   if (is_virtual) {
  1131     assert(receiver.not_null(), "sanity check");
  1132     bool static_bound = call_info.resolved_method()->can_be_statically_bound();
  1133     KlassHandle h_klass(THREAD, receiver->klass());
  1134     CompiledIC::compute_monomorphic_entry(callee_method, h_klass,
  1135                      is_optimized, static_bound, virtual_call_info,
  1136                      CHECK_(methodHandle()));
  1137   } else {
  1138     // static call
  1139     CompiledStaticCall::compute_entry(callee_method, static_call_info);
  1142   // grab lock, check for deoptimization and potentially patch caller
  1144     MutexLocker ml_patch(CompiledIC_lock);
  1146     // Now that we are ready to patch if the methodOop was redefined then
  1147     // don't update call site and let the caller retry.
  1149     if (!callee_method->is_old()) {
  1150 #ifdef ASSERT
  1151       // We must not try to patch to jump to an already unloaded method.
  1152       if (dest_entry_point != 0) {
  1153         assert(CodeCache::find_blob(dest_entry_point) != NULL,
  1154                "should not unload nmethod while locked");
  1156 #endif
  1157       if (is_virtual) {
  1158         CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
  1159         if (inline_cache->is_clean()) {
  1160           inline_cache->set_to_monomorphic(virtual_call_info);
  1162       } else {
  1163         CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc());
  1164         if (ssc->is_clean()) ssc->set(static_call_info);
  1168   } // unlock CompiledIC_lock
  1170   return callee_method;
  1174 // Inline caches exist only in compiled code
  1175 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread))
  1176 #ifdef ASSERT
  1177   RegisterMap reg_map(thread, false);
  1178   frame stub_frame = thread->last_frame();
  1179   assert(stub_frame.is_runtime_frame(), "sanity check");
  1180   frame caller_frame = stub_frame.sender(&reg_map);
  1181   assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame");
  1182 #endif /* ASSERT */
  1184   methodHandle callee_method;
  1185   JRT_BLOCK
  1186     callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL);
  1187     // Return methodOop through TLS
  1188     thread->set_vm_result(callee_method());
  1189   JRT_BLOCK_END
  1190   // return compiled code entry point after potential safepoints
  1191   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1192   return callee_method->verified_code_entry();
  1193 JRT_END
  1196 // Handle call site that has been made non-entrant
  1197 JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread))
  1198   // 6243940 We might end up in here if the callee is deoptimized
  1199   // as we race to call it.  We don't want to take a safepoint if
  1200   // the caller was interpreted because the caller frame will look
  1201   // interpreted to the stack walkers and arguments are now
  1202   // "compiled" so it is much better to make this transition
  1203   // invisible to the stack walking code. The i2c path will
  1204   // place the callee method in the callee_target. It is stashed
  1205   // there because if we try and find the callee by normal means a
  1206   // safepoint is possible and have trouble gc'ing the compiled args.
  1207   RegisterMap reg_map(thread, false);
  1208   frame stub_frame = thread->last_frame();
  1209   assert(stub_frame.is_runtime_frame(), "sanity check");
  1210   frame caller_frame = stub_frame.sender(&reg_map);
  1212   // MethodHandle invokes don't have a CompiledIC and should always
  1213   // simply redispatch to the callee_target.
  1214   address   sender_pc = caller_frame.pc();
  1215   CodeBlob* sender_cb = caller_frame.cb();
  1216   nmethod*  sender_nm = sender_cb->as_nmethod_or_null();
  1217   bool is_mh_invoke_via_adapter = false;  // Direct c2c call or via adapter?
  1218   if (sender_nm != NULL && sender_nm->is_method_handle_return(sender_pc)) {
  1219     // If the callee_target is set, then we have come here via an i2c
  1220     // adapter.
  1221     methodOop callee = thread->callee_target();
  1222     if (callee != NULL) {
  1223       assert(callee->is_method(), "sanity");
  1224       is_mh_invoke_via_adapter = true;
  1228   if (caller_frame.is_interpreted_frame() ||
  1229       caller_frame.is_entry_frame()       ||
  1230       is_mh_invoke_via_adapter) {
  1231     methodOop callee = thread->callee_target();
  1232     guarantee(callee != NULL && callee->is_method(), "bad handshake");
  1233     thread->set_vm_result(callee);
  1234     thread->set_callee_target(NULL);
  1235     return callee->get_c2i_entry();
  1238   // Must be compiled to compiled path which is safe to stackwalk
  1239   methodHandle callee_method;
  1240   JRT_BLOCK
  1241     // Force resolving of caller (if we called from compiled frame)
  1242     callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL);
  1243     thread->set_vm_result(callee_method());
  1244   JRT_BLOCK_END
  1245   // return compiled code entry point after potential safepoints
  1246   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1247   return callee_method->verified_code_entry();
  1248 JRT_END
  1251 // resolve a static call and patch code
  1252 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread ))
  1253   methodHandle callee_method;
  1254   JRT_BLOCK
  1255     callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL);
  1256     thread->set_vm_result(callee_method());
  1257   JRT_BLOCK_END
  1258   // return compiled code entry point after potential safepoints
  1259   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1260   return callee_method->verified_code_entry();
  1261 JRT_END
  1264 // resolve virtual call and update inline cache to monomorphic
  1265 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread ))
  1266   methodHandle callee_method;
  1267   JRT_BLOCK
  1268     callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL);
  1269     thread->set_vm_result(callee_method());
  1270   JRT_BLOCK_END
  1271   // return compiled code entry point after potential safepoints
  1272   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1273   return callee_method->verified_code_entry();
  1274 JRT_END
  1277 // Resolve a virtual call that can be statically bound (e.g., always
  1278 // monomorphic, so it has no inline cache).  Patch code to resolved target.
  1279 JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread))
  1280   methodHandle callee_method;
  1281   JRT_BLOCK
  1282     callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL);
  1283     thread->set_vm_result(callee_method());
  1284   JRT_BLOCK_END
  1285   // return compiled code entry point after potential safepoints
  1286   assert(callee_method->verified_code_entry() != NULL, " Jump to zero!");
  1287   return callee_method->verified_code_entry();
  1288 JRT_END
  1294 methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) {
  1295   ResourceMark rm(thread);
  1296   CallInfo call_info;
  1297   Bytecodes::Code bc;
  1299   // receiver is NULL for static calls. An exception is thrown for NULL
  1300   // receivers for non-static calls
  1301   Handle receiver = find_callee_info(thread, bc, call_info,
  1302                                      CHECK_(methodHandle()));
  1303   // Compiler1 can produce virtual call sites that can actually be statically bound
  1304   // If we fell thru to below we would think that the site was going megamorphic
  1305   // when in fact the site can never miss. Worse because we'd think it was megamorphic
  1306   // we'd try and do a vtable dispatch however methods that can be statically bound
  1307   // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a
  1308   // reresolution of the  call site (as if we did a handle_wrong_method and not an
  1309   // plain ic_miss) and the site will be converted to an optimized virtual call site
  1310   // never to miss again. I don't believe C2 will produce code like this but if it
  1311   // did this would still be the correct thing to do for it too, hence no ifdef.
  1312   //
  1313   if (call_info.resolved_method()->can_be_statically_bound()) {
  1314     methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle()));
  1315     if (TraceCallFixup) {
  1316       RegisterMap reg_map(thread, false);
  1317       frame caller_frame = thread->last_frame().sender(&reg_map);
  1318       ResourceMark rm(thread);
  1319       tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc));
  1320       callee_method->print_short_name(tty);
  1321       tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc());
  1322       tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1324     return callee_method;
  1327   methodHandle callee_method = call_info.selected_method();
  1329   bool should_be_mono = false;
  1331 #ifndef PRODUCT
  1332   Atomic::inc(&_ic_miss_ctr);
  1334   // Statistics & Tracing
  1335   if (TraceCallFixup) {
  1336     ResourceMark rm(thread);
  1337     tty->print("IC miss (%s) call to", Bytecodes::name(bc));
  1338     callee_method->print_short_name(tty);
  1339     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1342   if (ICMissHistogram) {
  1343     MutexLocker m(VMStatistic_lock);
  1344     RegisterMap reg_map(thread, false);
  1345     frame f = thread->last_frame().real_sender(&reg_map);// skip runtime stub
  1346     // produce statistics under the lock
  1347     trace_ic_miss(f.pc());
  1349 #endif
  1351   // install an event collector so that when a vtable stub is created the
  1352   // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The
  1353   // event can't be posted when the stub is created as locks are held
  1354   // - instead the event will be deferred until the event collector goes
  1355   // out of scope.
  1356   JvmtiDynamicCodeEventCollector event_collector;
  1358   // Update inline cache to megamorphic. Skip update if caller has been
  1359   // made non-entrant or we are called from interpreted.
  1360   { MutexLocker ml_patch (CompiledIC_lock);
  1361     RegisterMap reg_map(thread, false);
  1362     frame caller_frame = thread->last_frame().sender(&reg_map);
  1363     CodeBlob* cb = caller_frame.cb();
  1364     if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) {
  1365       // Not a non-entrant nmethod, so find inline_cache
  1366       CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc());
  1367       bool should_be_mono = false;
  1368       if (inline_cache->is_optimized()) {
  1369         if (TraceCallFixup) {
  1370           ResourceMark rm(thread);
  1371           tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc));
  1372           callee_method->print_short_name(tty);
  1373           tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1375         should_be_mono = true;
  1376       } else {
  1377         compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop();
  1378         if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) {
  1380           if (receiver()->klass() == ic_oop->holder_klass()) {
  1381             // This isn't a real miss. We must have seen that compiled code
  1382             // is now available and we want the call site converted to a
  1383             // monomorphic compiled call site.
  1384             // We can't assert for callee_method->code() != NULL because it
  1385             // could have been deoptimized in the meantime
  1386             if (TraceCallFixup) {
  1387               ResourceMark rm(thread);
  1388               tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc));
  1389               callee_method->print_short_name(tty);
  1390               tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1392             should_be_mono = true;
  1397       if (should_be_mono) {
  1399         // We have a path that was monomorphic but was going interpreted
  1400         // and now we have (or had) a compiled entry. We correct the IC
  1401         // by using a new icBuffer.
  1402         CompiledICInfo info;
  1403         KlassHandle receiver_klass(THREAD, receiver()->klass());
  1404         inline_cache->compute_monomorphic_entry(callee_method,
  1405                                                 receiver_klass,
  1406                                                 inline_cache->is_optimized(),
  1407                                                 false,
  1408                                                 info, CHECK_(methodHandle()));
  1409         inline_cache->set_to_monomorphic(info);
  1410       } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
  1411         // Change to megamorphic
  1412         inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
  1413       } else {
  1414         // Either clean or megamorphic
  1417   } // Release CompiledIC_lock
  1419   return callee_method;
  1422 //
  1423 // Resets a call-site in compiled code so it will get resolved again.
  1424 // This routines handles both virtual call sites, optimized virtual call
  1425 // sites, and static call sites. Typically used to change a call sites
  1426 // destination from compiled to interpreted.
  1427 //
  1428 methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) {
  1429   ResourceMark rm(thread);
  1430   RegisterMap reg_map(thread, false);
  1431   frame stub_frame = thread->last_frame();
  1432   assert(stub_frame.is_runtime_frame(), "must be a runtimeStub");
  1433   frame caller = stub_frame.sender(&reg_map);
  1435   // Do nothing if the frame isn't a live compiled frame.
  1436   // nmethod could be deoptimized by the time we get here
  1437   // so no update to the caller is needed.
  1439   if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) {
  1441     address pc = caller.pc();
  1442     Events::log("update call-site at pc " INTPTR_FORMAT, pc);
  1444     // Default call_addr is the location of the "basic" call.
  1445     // Determine the address of the call we a reresolving. With
  1446     // Inline Caches we will always find a recognizable call.
  1447     // With Inline Caches disabled we may or may not find a
  1448     // recognizable call. We will always find a call for static
  1449     // calls and for optimized virtual calls. For vanilla virtual
  1450     // calls it depends on the state of the UseInlineCaches switch.
  1451     //
  1452     // With Inline Caches disabled we can get here for a virtual call
  1453     // for two reasons:
  1454     //   1 - calling an abstract method. The vtable for abstract methods
  1455     //       will run us thru handle_wrong_method and we will eventually
  1456     //       end up in the interpreter to throw the ame.
  1457     //   2 - a racing deoptimization. We could be doing a vanilla vtable
  1458     //       call and between the time we fetch the entry address and
  1459     //       we jump to it the target gets deoptimized. Similar to 1
  1460     //       we will wind up in the interprter (thru a c2i with c2).
  1461     //
  1462     address call_addr = NULL;
  1464       // Get call instruction under lock because another thread may be
  1465       // busy patching it.
  1466       MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
  1467       // Location of call instruction
  1468       if (NativeCall::is_call_before(pc)) {
  1469         NativeCall *ncall = nativeCall_before(pc);
  1470         call_addr = ncall->instruction_address();
  1474     // Check for static or virtual call
  1475     bool is_static_call = false;
  1476     nmethod* caller_nm = CodeCache::find_nmethod(pc);
  1477     // Make sure nmethod doesn't get deoptimized and removed until
  1478     // this is done with it.
  1479     // CLEANUP - with lazy deopt shouldn't need this lock
  1480     nmethodLocker nmlock(caller_nm);
  1482     if (call_addr != NULL) {
  1483       RelocIterator iter(caller_nm, call_addr, call_addr+1);
  1484       int ret = iter.next(); // Get item
  1485       if (ret) {
  1486         assert(iter.addr() == call_addr, "must find call");
  1487         if (iter.type() == relocInfo::static_call_type) {
  1488           is_static_call = true;
  1489         } else {
  1490           assert(iter.type() == relocInfo::virtual_call_type ||
  1491                  iter.type() == relocInfo::opt_virtual_call_type
  1492                 , "unexpected relocInfo. type");
  1494       } else {
  1495         assert(!UseInlineCaches, "relocation info. must exist for this address");
  1498       // Cleaning the inline cache will force a new resolve. This is more robust
  1499       // than directly setting it to the new destination, since resolving of calls
  1500       // is always done through the same code path. (experience shows that it
  1501       // leads to very hard to track down bugs, if an inline cache gets updated
  1502       // to a wrong method). It should not be performance critical, since the
  1503       // resolve is only done once.
  1505       MutexLocker ml(CompiledIC_lock);
  1506       //
  1507       // We do not patch the call site if the nmethod has been made non-entrant
  1508       // as it is a waste of time
  1509       //
  1510       if (caller_nm->is_in_use()) {
  1511         if (is_static_call) {
  1512           CompiledStaticCall* ssc= compiledStaticCall_at(call_addr);
  1513           ssc->set_to_clean();
  1514         } else {
  1515           // compiled, dispatched call (which used to call an interpreted method)
  1516           CompiledIC* inline_cache = CompiledIC_at(call_addr);
  1517           inline_cache->set_to_clean();
  1524   methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle()));
  1527 #ifndef PRODUCT
  1528   Atomic::inc(&_wrong_method_ctr);
  1530   if (TraceCallFixup) {
  1531     ResourceMark rm(thread);
  1532     tty->print("handle_wrong_method reresolving call to");
  1533     callee_method->print_short_name(tty);
  1534     tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code());
  1536 #endif
  1538   return callee_method;
  1541 // ---------------------------------------------------------------------------
  1542 // We are calling the interpreter via a c2i. Normally this would mean that
  1543 // we were called by a compiled method. However we could have lost a race
  1544 // where we went int -> i2c -> c2i and so the caller could in fact be
  1545 // interpreted. If the caller is compiled we attempt to patch the caller
  1546 // so he no longer calls into the interpreter.
  1547 IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc))
  1548   methodOop moop(method);
  1550   address entry_point = moop->from_compiled_entry();
  1552   // It's possible that deoptimization can occur at a call site which hasn't
  1553   // been resolved yet, in which case this function will be called from
  1554   // an nmethod that has been patched for deopt and we can ignore the
  1555   // request for a fixup.
  1556   // Also it is possible that we lost a race in that from_compiled_entry
  1557   // is now back to the i2c in that case we don't need to patch and if
  1558   // we did we'd leap into space because the callsite needs to use
  1559   // "to interpreter" stub in order to load up the methodOop. Don't
  1560   // ask me how I know this...
  1562   CodeBlob* cb = CodeCache::find_blob(caller_pc);
  1563   if (!cb->is_nmethod() || entry_point == moop->get_c2i_entry()) {
  1564     return;
  1567   // The check above makes sure this is a nmethod.
  1568   nmethod* nm = cb->as_nmethod_or_null();
  1569   assert(nm, "must be");
  1571   // Don't fixup MethodHandle call sites as c2i/i2c adapters are used
  1572   // to implement MethodHandle actions.
  1573   if (nm->is_method_handle_return(caller_pc)) {
  1574     return;
  1577   // There is a benign race here. We could be attempting to patch to a compiled
  1578   // entry point at the same time the callee is being deoptimized. If that is
  1579   // the case then entry_point may in fact point to a c2i and we'd patch the
  1580   // call site with the same old data. clear_code will set code() to NULL
  1581   // at the end of it. If we happen to see that NULL then we can skip trying
  1582   // to patch. If we hit the window where the callee has a c2i in the
  1583   // from_compiled_entry and the NULL isn't present yet then we lose the race
  1584   // and patch the code with the same old data. Asi es la vida.
  1586   if (moop->code() == NULL) return;
  1588   if (nm->is_in_use()) {
  1590     // Expect to find a native call there (unless it was no-inline cache vtable dispatch)
  1591     MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag);
  1592     if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) {
  1593       NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset);
  1594       //
  1595       // bug 6281185. We might get here after resolving a call site to a vanilla
  1596       // virtual call. Because the resolvee uses the verified entry it may then
  1597       // see compiled code and attempt to patch the site by calling us. This would
  1598       // then incorrectly convert the call site to optimized and its downhill from
  1599       // there. If you're lucky you'll get the assert in the bugid, if not you've
  1600       // just made a call site that could be megamorphic into a monomorphic site
  1601       // for the rest of its life! Just another racing bug in the life of
  1602       // fixup_callers_callsite ...
  1603       //
  1604       RelocIterator iter(nm, call->instruction_address(), call->next_instruction_address());
  1605       iter.next();
  1606       assert(iter.has_current(), "must have a reloc at java call site");
  1607       relocInfo::relocType typ = iter.reloc()->type();
  1608       if ( typ != relocInfo::static_call_type &&
  1609            typ != relocInfo::opt_virtual_call_type &&
  1610            typ != relocInfo::static_stub_type) {
  1611         return;
  1613       address destination = call->destination();
  1614       if (destination != entry_point) {
  1615         CodeBlob* callee = CodeCache::find_blob(destination);
  1616         // callee == cb seems weird. It means calling interpreter thru stub.
  1617         if (callee == cb || callee->is_adapter_blob()) {
  1618           // static call or optimized virtual
  1619           if (TraceCallFixup) {
  1620             tty->print("fixup callsite           at " INTPTR_FORMAT " to compiled code for", caller_pc);
  1621             moop->print_short_name(tty);
  1622             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
  1624           call->set_destination_mt_safe(entry_point);
  1625         } else {
  1626           if (TraceCallFixup) {
  1627             tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
  1628             moop->print_short_name(tty);
  1629             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
  1631           // assert is too strong could also be resolve destinations.
  1632           // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be");
  1634       } else {
  1635           if (TraceCallFixup) {
  1636             tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc);
  1637             moop->print_short_name(tty);
  1638             tty->print_cr(" to " INTPTR_FORMAT, entry_point);
  1644 IRT_END
  1647 // same as JVM_Arraycopy, but called directly from compiled code
  1648 JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src,  jint src_pos,
  1649                                                 oopDesc* dest, jint dest_pos,
  1650                                                 jint length,
  1651                                                 JavaThread* thread)) {
  1652 #ifndef PRODUCT
  1653   _slow_array_copy_ctr++;
  1654 #endif
  1655   // Check if we have null pointers
  1656   if (src == NULL || dest == NULL) {
  1657     THROW(vmSymbols::java_lang_NullPointerException());
  1659   // Do the copy.  The casts to arrayOop are necessary to the copy_array API,
  1660   // even though the copy_array API also performs dynamic checks to ensure
  1661   // that src and dest are truly arrays (and are conformable).
  1662   // The copy_array mechanism is awkward and could be removed, but
  1663   // the compilers don't call this function except as a last resort,
  1664   // so it probably doesn't matter.
  1665   Klass::cast(src->klass())->copy_array((arrayOopDesc*)src,  src_pos,
  1666                                         (arrayOopDesc*)dest, dest_pos,
  1667                                         length, thread);
  1669 JRT_END
  1671 char* SharedRuntime::generate_class_cast_message(
  1672     JavaThread* thread, const char* objName) {
  1674   // Get target class name from the checkcast instruction
  1675   vframeStream vfst(thread, true);
  1676   assert(!vfst.at_end(), "Java frame must exist");
  1677   Bytecode_checkcast* cc = Bytecode_checkcast_at(
  1678     vfst.method()->bcp_from(vfst.bci()));
  1679   Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at(
  1680     cc->index(), thread));
  1681   return generate_class_cast_message(objName, targetKlass->external_name());
  1684 char* SharedRuntime::generate_wrong_method_type_message(JavaThread* thread,
  1685                                                         oopDesc* required,
  1686                                                         oopDesc* actual) {
  1687   if (TraceMethodHandles) {
  1688     tty->print_cr("WrongMethodType thread="PTR_FORMAT" req="PTR_FORMAT" act="PTR_FORMAT"",
  1689                   thread, required, actual);
  1691   assert(EnableMethodHandles, "");
  1692   oop singleKlass = wrong_method_type_is_for_single_argument(thread, required);
  1693   char* message = NULL;
  1694   if (singleKlass != NULL) {
  1695     const char* objName = "argument or return value";
  1696     if (actual != NULL) {
  1697       // be flexible about the junk passed in:
  1698       klassOop ak = (actual->is_klass()
  1699                      ? (klassOop)actual
  1700                      : actual->klass());
  1701       objName = Klass::cast(ak)->external_name();
  1703     Klass* targetKlass = Klass::cast(required->is_klass()
  1704                                      ? (klassOop)required
  1705                                      : java_lang_Class::as_klassOop(required));
  1706     message = generate_class_cast_message(objName, targetKlass->external_name());
  1707   } else {
  1708     // %%% need to get the MethodType string, without messing around too much
  1709     // Get a signature from the invoke instruction
  1710     const char* mhName = "method handle";
  1711     const char* targetType = "the required signature";
  1712     vframeStream vfst(thread, true);
  1713     if (!vfst.at_end()) {
  1714       Bytecode_invoke* call = Bytecode_invoke_at(vfst.method(), vfst.bci());
  1715       methodHandle target;
  1717         EXCEPTION_MARK;
  1718         target = call->static_target(THREAD);
  1719         if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; }
  1721       if (target.not_null()
  1722           && target->is_method_handle_invoke()
  1723           && required == target->method_handle_type()) {
  1724         targetType = target->signature()->as_C_string();
  1727     klassOop kignore; int fignore;
  1728     methodOop actual_method = MethodHandles::decode_method(actual,
  1729                                                           kignore, fignore);
  1730     if (actual_method != NULL) {
  1731       if (methodOopDesc::is_method_handle_invoke_name(actual_method->name()))
  1732         mhName = "$";
  1733       else
  1734         mhName = actual_method->signature()->as_C_string();
  1735       if (mhName[0] == '$')
  1736         mhName = actual_method->signature()->as_C_string();
  1738     message = generate_class_cast_message(mhName, targetType,
  1739                                           " cannot be called as ");
  1741   if (TraceMethodHandles) {
  1742     tty->print_cr("WrongMethodType => message=%s", message);
  1744   return message;
  1747 oop SharedRuntime::wrong_method_type_is_for_single_argument(JavaThread* thr,
  1748                                                             oopDesc* required) {
  1749   if (required == NULL)  return NULL;
  1750   if (required->klass() == SystemDictionary::Class_klass())
  1751     return required;
  1752   if (required->is_klass())
  1753     return Klass::cast(klassOop(required))->java_mirror();
  1754   return NULL;
  1758 char* SharedRuntime::generate_class_cast_message(
  1759     const char* objName, const char* targetKlassName, const char* desc) {
  1760   size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1;
  1762   char* message = NEW_RESOURCE_ARRAY(char, msglen);
  1763   if (NULL == message) {
  1764     // Shouldn't happen, but don't cause even more problems if it does
  1765     message = const_cast<char*>(objName);
  1766   } else {
  1767     jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName);
  1769   return message;
  1772 JRT_LEAF(void, SharedRuntime::reguard_yellow_pages())
  1773   (void) JavaThread::current()->reguard_stack();
  1774 JRT_END
  1777 // Handles the uncommon case in locking, i.e., contention or an inflated lock.
  1778 #ifndef PRODUCT
  1779 int SharedRuntime::_monitor_enter_ctr=0;
  1780 #endif
  1781 JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
  1782   oop obj(_obj);
  1783 #ifndef PRODUCT
  1784   _monitor_enter_ctr++;             // monitor enter slow
  1785 #endif
  1786   if (PrintBiasedLockingStatistics) {
  1787     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
  1789   Handle h_obj(THREAD, obj);
  1790   if (UseBiasedLocking) {
  1791     // Retry fast entry if bias is revoked to avoid unnecessary inflation
  1792     ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK);
  1793   } else {
  1794     ObjectSynchronizer::slow_enter(h_obj, lock, CHECK);
  1796   assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
  1797 JRT_END
  1799 #ifndef PRODUCT
  1800 int SharedRuntime::_monitor_exit_ctr=0;
  1801 #endif
  1802 // Handles the uncommon cases of monitor unlocking in compiled code
  1803 JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
  1804    oop obj(_obj);
  1805 #ifndef PRODUCT
  1806   _monitor_exit_ctr++;              // monitor exit slow
  1807 #endif
  1808   Thread* THREAD = JavaThread::current();
  1809   // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
  1810   // testing was unable to ever fire the assert that guarded it so I have removed it.
  1811   assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?");
  1812 #undef MIGHT_HAVE_PENDING
  1813 #ifdef MIGHT_HAVE_PENDING
  1814   // Save and restore any pending_exception around the exception mark.
  1815   // While the slow_exit must not throw an exception, we could come into
  1816   // this routine with one set.
  1817   oop pending_excep = NULL;
  1818   const char* pending_file;
  1819   int pending_line;
  1820   if (HAS_PENDING_EXCEPTION) {
  1821     pending_excep = PENDING_EXCEPTION;
  1822     pending_file  = THREAD->exception_file();
  1823     pending_line  = THREAD->exception_line();
  1824     CLEAR_PENDING_EXCEPTION;
  1826 #endif /* MIGHT_HAVE_PENDING */
  1829     // Exit must be non-blocking, and therefore no exceptions can be thrown.
  1830     EXCEPTION_MARK;
  1831     ObjectSynchronizer::slow_exit(obj, lock, THREAD);
  1834 #ifdef MIGHT_HAVE_PENDING
  1835   if (pending_excep != NULL) {
  1836     THREAD->set_pending_exception(pending_excep, pending_file, pending_line);
  1838 #endif /* MIGHT_HAVE_PENDING */
  1839 JRT_END
  1841 #ifndef PRODUCT
  1843 void SharedRuntime::print_statistics() {
  1844   ttyLocker ttyl;
  1845   if (xtty != NULL)  xtty->head("statistics type='SharedRuntime'");
  1847   if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow",  _monitor_enter_ctr);
  1848   if (_monitor_exit_ctr  ) tty->print_cr("%5d monitor exit slow",   _monitor_exit_ctr);
  1849   if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
  1851   SharedRuntime::print_ic_miss_histogram();
  1853   if (CountRemovableExceptions) {
  1854     if (_nof_removable_exceptions > 0) {
  1855       Unimplemented(); // this counter is not yet incremented
  1856       tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions);
  1860   // Dump the JRT_ENTRY counters
  1861   if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr);
  1862   if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr);
  1863   if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr);
  1864   if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr);
  1865   if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr);
  1866   if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr);
  1867   if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr);
  1869   tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr );
  1870   tty->print_cr("%5d wrong method", _wrong_method_ctr );
  1871   tty->print_cr("%5d unresolved static call site", _resolve_static_ctr );
  1872   tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr );
  1873   tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr );
  1875   if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr );
  1876   if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr );
  1877   if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr );
  1878   if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr );
  1879   if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr );
  1880   if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr );
  1881   if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr );
  1882   if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr );
  1883   if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr );
  1884   if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr );
  1885   if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr );
  1886   if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr );
  1887   if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr );
  1888   if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr );
  1889   if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr );
  1890   if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr );
  1892   AdapterHandlerLibrary::print_statistics();
  1894   if (xtty != NULL)  xtty->tail("statistics");
  1897 inline double percent(int x, int y) {
  1898   return 100.0 * x / MAX2(y, 1);
  1901 class MethodArityHistogram {
  1902  public:
  1903   enum { MAX_ARITY = 256 };
  1904  private:
  1905   static int _arity_histogram[MAX_ARITY];     // histogram of #args
  1906   static int _size_histogram[MAX_ARITY];      // histogram of arg size in words
  1907   static int _max_arity;                      // max. arity seen
  1908   static int _max_size;                       // max. arg size seen
  1910   static void add_method_to_histogram(nmethod* nm) {
  1911     methodOop m = nm->method();
  1912     ArgumentCount args(m->signature());
  1913     int arity   = args.size() + (m->is_static() ? 0 : 1);
  1914     int argsize = m->size_of_parameters();
  1915     arity   = MIN2(arity, MAX_ARITY-1);
  1916     argsize = MIN2(argsize, MAX_ARITY-1);
  1917     int count = nm->method()->compiled_invocation_count();
  1918     _arity_histogram[arity]  += count;
  1919     _size_histogram[argsize] += count;
  1920     _max_arity = MAX2(_max_arity, arity);
  1921     _max_size  = MAX2(_max_size, argsize);
  1924   void print_histogram_helper(int n, int* histo, const char* name) {
  1925     const int N = MIN2(5, n);
  1926     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
  1927     double sum = 0;
  1928     double weighted_sum = 0;
  1929     int i;
  1930     for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; }
  1931     double rest = sum;
  1932     double percent = sum / 100;
  1933     for (i = 0; i <= N; i++) {
  1934       rest -= histo[i];
  1935       tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent);
  1937     tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent);
  1938     tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n);
  1941   void print_histogram() {
  1942     tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):");
  1943     print_histogram_helper(_max_arity, _arity_histogram, "arity");
  1944     tty->print_cr("\nSame for parameter size (in words):");
  1945     print_histogram_helper(_max_size, _size_histogram, "size");
  1946     tty->cr();
  1949  public:
  1950   MethodArityHistogram() {
  1951     MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
  1952     _max_arity = _max_size = 0;
  1953     for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0;
  1954     CodeCache::nmethods_do(add_method_to_histogram);
  1955     print_histogram();
  1957 };
  1959 int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY];
  1960 int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY];
  1961 int MethodArityHistogram::_max_arity;
  1962 int MethodArityHistogram::_max_size;
  1964 void SharedRuntime::print_call_statistics(int comp_total) {
  1965   tty->print_cr("Calls from compiled code:");
  1966   int total  = _nof_normal_calls + _nof_interface_calls + _nof_static_calls;
  1967   int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls;
  1968   int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls;
  1969   tty->print_cr("\t%9d   (%4.1f%%) total non-inlined   ", total, percent(total, total));
  1970   tty->print_cr("\t%9d   (%4.1f%%) virtual calls       ", _nof_normal_calls, percent(_nof_normal_calls, total));
  1971   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls));
  1972   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls));
  1973   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_c, percent(mono_c, _nof_normal_calls));
  1974   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls));
  1975   tty->print_cr("\t%9d   (%4.1f%%) interface calls     ", _nof_interface_calls, percent(_nof_interface_calls, total));
  1976   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls));
  1977   tty->print_cr("\t  %9d  (%3.0f%%)   optimized        ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls));
  1978   tty->print_cr("\t  %9d  (%3.0f%%)   monomorphic      ", mono_i, percent(mono_i, _nof_interface_calls));
  1979   tty->print_cr("\t  %9d  (%3.0f%%)   megamorphic      ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls));
  1980   tty->print_cr("\t%9d   (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total));
  1981   tty->print_cr("\t  %9d  (%3.0f%%)   inlined          ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls));
  1982   tty->cr();
  1983   tty->print_cr("Note 1: counter updates are not MT-safe.");
  1984   tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;");
  1985   tty->print_cr("        %% in nested categories are relative to their category");
  1986   tty->print_cr("        (and thus add up to more than 100%% with inlining)");
  1987   tty->cr();
  1989   MethodArityHistogram h;
  1991 #endif
  1994 // A simple wrapper class around the calling convention information
  1995 // that allows sharing of adapters for the same calling convention.
  1996 class AdapterFingerPrint : public CHeapObj {
  1997  private:
  1998   union {
  1999     int  _compact[3];
  2000     int* _fingerprint;
  2001   } _value;
  2002   int _length; // A negative length indicates the fingerprint is in the compact form,
  2003                // Otherwise _value._fingerprint is the array.
  2005   // Remap BasicTypes that are handled equivalently by the adapters.
  2006   // These are correct for the current system but someday it might be
  2007   // necessary to make this mapping platform dependent.
  2008   static BasicType adapter_encoding(BasicType in) {
  2009     assert((~0xf & in) == 0, "must fit in 4 bits");
  2010     switch(in) {
  2011       case T_BOOLEAN:
  2012       case T_BYTE:
  2013       case T_SHORT:
  2014       case T_CHAR:
  2015         // There are all promoted to T_INT in the calling convention
  2016         return T_INT;
  2018       case T_OBJECT:
  2019       case T_ARRAY:
  2020 #ifdef _LP64
  2021         return T_LONG;
  2022 #else
  2023         return T_INT;
  2024 #endif
  2026       case T_INT:
  2027       case T_LONG:
  2028       case T_FLOAT:
  2029       case T_DOUBLE:
  2030       case T_VOID:
  2031         return in;
  2033       default:
  2034         ShouldNotReachHere();
  2035         return T_CONFLICT;
  2039  public:
  2040   AdapterFingerPrint(int total_args_passed, BasicType* sig_bt) {
  2041     // The fingerprint is based on the BasicType signature encoded
  2042     // into an array of ints with four entries per int.
  2043     int* ptr;
  2044     int len = (total_args_passed + 3) >> 2;
  2045     if (len <= (int)(sizeof(_value._compact) / sizeof(int))) {
  2046       _value._compact[0] = _value._compact[1] = _value._compact[2] = 0;
  2047       // Storing the signature encoded as signed chars hits about 98%
  2048       // of the time.
  2049       _length = -len;
  2050       ptr = _value._compact;
  2051     } else {
  2052       _length = len;
  2053       _value._fingerprint = NEW_C_HEAP_ARRAY(int, _length);
  2054       ptr = _value._fingerprint;
  2057     // Now pack the BasicTypes with 4 per int
  2058     int sig_index = 0;
  2059     for (int index = 0; index < len; index++) {
  2060       int value = 0;
  2061       for (int byte = 0; byte < 4; byte++) {
  2062         if (sig_index < total_args_passed) {
  2063           value = (value << 4) | adapter_encoding(sig_bt[sig_index++]);
  2066       ptr[index] = value;
  2070   ~AdapterFingerPrint() {
  2071     if (_length > 0) {
  2072       FREE_C_HEAP_ARRAY(int, _value._fingerprint);
  2076   int value(int index) {
  2077     if (_length < 0) {
  2078       return _value._compact[index];
  2080     return _value._fingerprint[index];
  2082   int length() {
  2083     if (_length < 0) return -_length;
  2084     return _length;
  2087   bool is_compact() {
  2088     return _length <= 0;
  2091   unsigned int compute_hash() {
  2092     int hash = 0;
  2093     for (int i = 0; i < length(); i++) {
  2094       int v = value(i);
  2095       hash = (hash << 8) ^ v ^ (hash >> 5);
  2097     return (unsigned int)hash;
  2100   const char* as_string() {
  2101     stringStream st;
  2102     for (int i = 0; i < length(); i++) {
  2103       st.print(PTR_FORMAT, value(i));
  2105     return st.as_string();
  2108   bool equals(AdapterFingerPrint* other) {
  2109     if (other->_length != _length) {
  2110       return false;
  2112     if (_length < 0) {
  2113       return _value._compact[0] == other->_value._compact[0] &&
  2114              _value._compact[1] == other->_value._compact[1] &&
  2115              _value._compact[2] == other->_value._compact[2];
  2116     } else {
  2117       for (int i = 0; i < _length; i++) {
  2118         if (_value._fingerprint[i] != other->_value._fingerprint[i]) {
  2119           return false;
  2123     return true;
  2125 };
  2128 // A hashtable mapping from AdapterFingerPrints to AdapterHandlerEntries
  2129 class AdapterHandlerTable : public BasicHashtable {
  2130   friend class AdapterHandlerTableIterator;
  2132  private:
  2134 #ifndef PRODUCT
  2135   static int _lookups; // number of calls to lookup
  2136   static int _buckets; // number of buckets checked
  2137   static int _equals;  // number of buckets checked with matching hash
  2138   static int _hits;    // number of successful lookups
  2139   static int _compact; // number of equals calls with compact signature
  2140 #endif
  2142   AdapterHandlerEntry* bucket(int i) {
  2143     return (AdapterHandlerEntry*)BasicHashtable::bucket(i);
  2146  public:
  2147   AdapterHandlerTable()
  2148     : BasicHashtable(293, sizeof(AdapterHandlerEntry)) { }
  2150   // Create a new entry suitable for insertion in the table
  2151   AdapterHandlerEntry* new_entry(AdapterFingerPrint* fingerprint, address i2c_entry, address c2i_entry, address c2i_unverified_entry) {
  2152     AdapterHandlerEntry* entry = (AdapterHandlerEntry*)BasicHashtable::new_entry(fingerprint->compute_hash());
  2153     entry->init(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
  2154     return entry;
  2157   // Insert an entry into the table
  2158   void add(AdapterHandlerEntry* entry) {
  2159     int index = hash_to_index(entry->hash());
  2160     add_entry(index, entry);
  2163   void free_entry(AdapterHandlerEntry* entry) {
  2164     entry->deallocate();
  2165     BasicHashtable::free_entry(entry);
  2168   // Find a entry with the same fingerprint if it exists
  2169   AdapterHandlerEntry* lookup(int total_args_passed, BasicType* sig_bt) {
  2170     NOT_PRODUCT(_lookups++);
  2171     AdapterFingerPrint fp(total_args_passed, sig_bt);
  2172     unsigned int hash = fp.compute_hash();
  2173     int index = hash_to_index(hash);
  2174     for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
  2175       NOT_PRODUCT(_buckets++);
  2176       if (e->hash() == hash) {
  2177         NOT_PRODUCT(_equals++);
  2178         if (fp.equals(e->fingerprint())) {
  2179 #ifndef PRODUCT
  2180           if (fp.is_compact()) _compact++;
  2181           _hits++;
  2182 #endif
  2183           return e;
  2187     return NULL;
  2190 #ifndef PRODUCT
  2191   void print_statistics() {
  2192     ResourceMark rm;
  2193     int longest = 0;
  2194     int empty = 0;
  2195     int total = 0;
  2196     int nonempty = 0;
  2197     for (int index = 0; index < table_size(); index++) {
  2198       int count = 0;
  2199       for (AdapterHandlerEntry* e = bucket(index); e != NULL; e = e->next()) {
  2200         count++;
  2202       if (count != 0) nonempty++;
  2203       if (count == 0) empty++;
  2204       if (count > longest) longest = count;
  2205       total += count;
  2207     tty->print_cr("AdapterHandlerTable: empty %d longest %d total %d average %f",
  2208                   empty, longest, total, total / (double)nonempty);
  2209     tty->print_cr("AdapterHandlerTable: lookups %d buckets %d equals %d hits %d compact %d",
  2210                   _lookups, _buckets, _equals, _hits, _compact);
  2212 #endif
  2213 };
  2216 #ifndef PRODUCT
  2218 int AdapterHandlerTable::_lookups;
  2219 int AdapterHandlerTable::_buckets;
  2220 int AdapterHandlerTable::_equals;
  2221 int AdapterHandlerTable::_hits;
  2222 int AdapterHandlerTable::_compact;
  2224 #endif
  2226 class AdapterHandlerTableIterator : public StackObj {
  2227  private:
  2228   AdapterHandlerTable* _table;
  2229   int _index;
  2230   AdapterHandlerEntry* _current;
  2232   void scan() {
  2233     while (_index < _table->table_size()) {
  2234       AdapterHandlerEntry* a = _table->bucket(_index);
  2235       _index++;
  2236       if (a != NULL) {
  2237         _current = a;
  2238         return;
  2243  public:
  2244   AdapterHandlerTableIterator(AdapterHandlerTable* table): _table(table), _index(0), _current(NULL) {
  2245     scan();
  2247   bool has_next() {
  2248     return _current != NULL;
  2250   AdapterHandlerEntry* next() {
  2251     if (_current != NULL) {
  2252       AdapterHandlerEntry* result = _current;
  2253       _current = _current->next();
  2254       if (_current == NULL) scan();
  2255       return result;
  2256     } else {
  2257       return NULL;
  2260 };
  2263 // ---------------------------------------------------------------------------
  2264 // Implementation of AdapterHandlerLibrary
  2265 AdapterHandlerTable* AdapterHandlerLibrary::_adapters = NULL;
  2266 AdapterHandlerEntry* AdapterHandlerLibrary::_abstract_method_handler = NULL;
  2267 const int AdapterHandlerLibrary_size = 16*K;
  2268 BufferBlob* AdapterHandlerLibrary::_buffer = NULL;
  2270 BufferBlob* AdapterHandlerLibrary::buffer_blob() {
  2271   // Should be called only when AdapterHandlerLibrary_lock is active.
  2272   if (_buffer == NULL) // Initialize lazily
  2273       _buffer = BufferBlob::create("adapters", AdapterHandlerLibrary_size);
  2274   return _buffer;
  2277 void AdapterHandlerLibrary::initialize() {
  2278   if (_adapters != NULL) return;
  2279   _adapters = new AdapterHandlerTable();
  2281   // Create a special handler for abstract methods.  Abstract methods
  2282   // are never compiled so an i2c entry is somewhat meaningless, but
  2283   // fill it in with something appropriate just in case.  Pass handle
  2284   // wrong method for the c2i transitions.
  2285   address wrong_method = SharedRuntime::get_handle_wrong_method_stub();
  2286   _abstract_method_handler = AdapterHandlerLibrary::new_entry(new AdapterFingerPrint(0, NULL),
  2287                                                               StubRoutines::throw_AbstractMethodError_entry(),
  2288                                                               wrong_method, wrong_method);
  2291 AdapterHandlerEntry* AdapterHandlerLibrary::new_entry(AdapterFingerPrint* fingerprint,
  2292                                                       address i2c_entry,
  2293                                                       address c2i_entry,
  2294                                                       address c2i_unverified_entry) {
  2295   return _adapters->new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
  2298 AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
  2299   // Use customized signature handler.  Need to lock around updates to
  2300   // the AdapterHandlerTable (it is not safe for concurrent readers
  2301   // and a single writer: this could be fixed if it becomes a
  2302   // problem).
  2304   // Get the address of the ic_miss handlers before we grab the
  2305   // AdapterHandlerLibrary_lock. This fixes bug 6236259 which
  2306   // was caused by the initialization of the stubs happening
  2307   // while we held the lock and then notifying jvmti while
  2308   // holding it. This just forces the initialization to be a little
  2309   // earlier.
  2310   address ic_miss = SharedRuntime::get_ic_miss_stub();
  2311   assert(ic_miss != NULL, "must have handler");
  2313   ResourceMark rm;
  2315   NOT_PRODUCT(int insts_size);
  2316   AdapterBlob* B = NULL;
  2317   AdapterHandlerEntry* entry = NULL;
  2318   AdapterFingerPrint* fingerprint = NULL;
  2320     MutexLocker mu(AdapterHandlerLibrary_lock);
  2321     // make sure data structure is initialized
  2322     initialize();
  2324     if (method->is_abstract()) {
  2325       return _abstract_method_handler;
  2328     // Fill in the signature array, for the calling-convention call.
  2329     int total_args_passed = method->size_of_parameters(); // All args on stack
  2331     BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, total_args_passed);
  2332     VMRegPair* regs   = NEW_RESOURCE_ARRAY(VMRegPair, total_args_passed);
  2333     int i = 0;
  2334     if (!method->is_static())  // Pass in receiver first
  2335       sig_bt[i++] = T_OBJECT;
  2336     for (SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) {
  2337       sig_bt[i++] = ss.type();  // Collect remaining bits of signature
  2338       if (ss.type() == T_LONG || ss.type() == T_DOUBLE)
  2339         sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
  2341     assert(i == total_args_passed, "");
  2343     // Lookup method signature's fingerprint
  2344     entry = _adapters->lookup(total_args_passed, sig_bt);
  2346 #ifdef ASSERT
  2347     AdapterHandlerEntry* shared_entry = NULL;
  2348     if (VerifyAdapterSharing && entry != NULL) {
  2349       shared_entry = entry;
  2350       entry = NULL;
  2352 #endif
  2354     if (entry != NULL) {
  2355       return entry;
  2358     // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage
  2359     int comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
  2361     // Make a C heap allocated version of the fingerprint to store in the adapter
  2362     fingerprint = new AdapterFingerPrint(total_args_passed, sig_bt);
  2364     // Create I2C & C2I handlers
  2366     BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache
  2367     if (buf != NULL) {
  2368       CodeBuffer buffer(buf);
  2369       short buffer_locs[20];
  2370       buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs,
  2371                                              sizeof(buffer_locs)/sizeof(relocInfo));
  2372       MacroAssembler _masm(&buffer);
  2374       entry = SharedRuntime::generate_i2c2i_adapters(&_masm,
  2375                                                      total_args_passed,
  2376                                                      comp_args_on_stack,
  2377                                                      sig_bt,
  2378                                                      regs,
  2379                                                      fingerprint);
  2381 #ifdef ASSERT
  2382       if (VerifyAdapterSharing) {
  2383         if (shared_entry != NULL) {
  2384           assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt),
  2385                  "code must match");
  2386           // Release the one just created and return the original
  2387           _adapters->free_entry(entry);
  2388           return shared_entry;
  2389         } else  {
  2390           entry->save_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt);
  2393 #endif
  2395       B = AdapterBlob::create(&buffer);
  2396       NOT_PRODUCT(insts_size = buffer.insts_size());
  2398     if (B == NULL) {
  2399       // CodeCache is full, disable compilation
  2400       // Ought to log this but compile log is only per compile thread
  2401       // and we're some non descript Java thread.
  2402       MutexUnlocker mu(AdapterHandlerLibrary_lock);
  2403       CompileBroker::handle_full_code_cache();
  2404       return NULL; // Out of CodeCache space
  2406     entry->relocate(B->content_begin());
  2407 #ifndef PRODUCT
  2408     // debugging suppport
  2409     if (PrintAdapterHandlers) {
  2410       tty->cr();
  2411       tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)",
  2412                     _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
  2413                     method->signature()->as_C_string(), fingerprint->as_string(), insts_size );
  2414       tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry());
  2415       Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + insts_size);
  2417 #endif
  2419     _adapters->add(entry);
  2421   // Outside of the lock
  2422   if (B != NULL) {
  2423     char blob_id[256];
  2424     jio_snprintf(blob_id,
  2425                  sizeof(blob_id),
  2426                  "%s(%s)@" PTR_FORMAT,
  2427                  B->name(),
  2428                  fingerprint->as_string(),
  2429                  B->content_begin());
  2430     Forte::register_stub(blob_id, B->content_begin(), B->content_end());
  2432     if (JvmtiExport::should_post_dynamic_code_generated()) {
  2433       JvmtiExport::post_dynamic_code_generated(blob_id, B->content_begin(), B->content_end());
  2436   return entry;
  2439 void AdapterHandlerEntry::relocate(address new_base) {
  2440     ptrdiff_t delta = new_base - _i2c_entry;
  2441     _i2c_entry += delta;
  2442     _c2i_entry += delta;
  2443     _c2i_unverified_entry += delta;
  2447 void AdapterHandlerEntry::deallocate() {
  2448   delete _fingerprint;
  2449 #ifdef ASSERT
  2450   if (_saved_code) FREE_C_HEAP_ARRAY(unsigned char, _saved_code);
  2451   if (_saved_sig)  FREE_C_HEAP_ARRAY(Basictype, _saved_sig);
  2452 #endif
  2456 #ifdef ASSERT
  2457 // Capture the code before relocation so that it can be compared
  2458 // against other versions.  If the code is captured after relocation
  2459 // then relative instructions won't be equivalent.
  2460 void AdapterHandlerEntry::save_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
  2461   _saved_code = NEW_C_HEAP_ARRAY(unsigned char, length);
  2462   _code_length = length;
  2463   memcpy(_saved_code, buffer, length);
  2464   _total_args_passed = total_args_passed;
  2465   _saved_sig = NEW_C_HEAP_ARRAY(BasicType, _total_args_passed);
  2466   memcpy(_saved_sig, sig_bt, _total_args_passed * sizeof(BasicType));
  2470 bool AdapterHandlerEntry::compare_code(unsigned char* buffer, int length, int total_args_passed, BasicType* sig_bt) {
  2471   if (length != _code_length) {
  2472     return false;
  2474   for (int i = 0; i < length; i++) {
  2475     if (buffer[i] != _saved_code[i]) {
  2476       return false;
  2479   return true;
  2481 #endif
  2484 // Create a native wrapper for this native method.  The wrapper converts the
  2485 // java compiled calling convention to the native convention, handlizes
  2486 // arguments, and transitions to native.  On return from the native we transition
  2487 // back to java blocking if a safepoint is in progress.
  2488 nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
  2489   ResourceMark rm;
  2490   nmethod* nm = NULL;
  2492   if (PrintCompilation) {
  2493     ttyLocker ttyl;
  2494     tty->print("---   n%s ", (method->is_synchronized() ? "s" : " "));
  2495     method->print_short_name(tty);
  2496     if (method->is_static()) {
  2497       tty->print(" (static)");
  2499     tty->cr();
  2502   assert(method->has_native_function(), "must have something valid to call!");
  2505     // perform the work while holding the lock, but perform any printing outside the lock
  2506     MutexLocker mu(AdapterHandlerLibrary_lock);
  2507     // See if somebody beat us to it
  2508     nm = method->code();
  2509     if (nm) {
  2510       return nm;
  2513     ResourceMark rm;
  2515     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
  2516     if (buf != NULL) {
  2517       CodeBuffer buffer(buf);
  2518       double locs_buf[20];
  2519       buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
  2520       MacroAssembler _masm(&buffer);
  2522       // Fill in the signature array, for the calling-convention call.
  2523       int total_args_passed = method->size_of_parameters();
  2525       BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed);
  2526       VMRegPair*   regs = NEW_RESOURCE_ARRAY(VMRegPair,total_args_passed);
  2527       int i=0;
  2528       if( !method->is_static() )  // Pass in receiver first
  2529         sig_bt[i++] = T_OBJECT;
  2530       SignatureStream ss(method->signature());
  2531       for( ; !ss.at_return_type(); ss.next()) {
  2532         sig_bt[i++] = ss.type();  // Collect remaining bits of signature
  2533         if( ss.type() == T_LONG || ss.type() == T_DOUBLE )
  2534           sig_bt[i++] = T_VOID;   // Longs & doubles take 2 Java slots
  2536       assert( i==total_args_passed, "" );
  2537       BasicType ret_type = ss.type();
  2539       // Now get the compiled-Java layout as input arguments
  2540       int comp_args_on_stack;
  2541       comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false);
  2543       // Generate the compiled-to-native wrapper code
  2544       nm = SharedRuntime::generate_native_wrapper(&_masm,
  2545                                                   method,
  2546                                                   total_args_passed,
  2547                                                   comp_args_on_stack,
  2548                                                   sig_bt,regs,
  2549                                                   ret_type);
  2553   // Must unlock before calling set_code
  2555   // Install the generated code.
  2556   if (nm != NULL) {
  2557     method->set_code(method, nm);
  2558     nm->post_compiled_method_load_event();
  2559   } else {
  2560     // CodeCache is full, disable compilation
  2561     CompileBroker::handle_full_code_cache();
  2563   return nm;
  2566 #ifdef HAVE_DTRACE_H
  2567 // Create a dtrace nmethod for this method.  The wrapper converts the
  2568 // java compiled calling convention to the native convention, makes a dummy call
  2569 // (actually nops for the size of the call instruction, which become a trap if
  2570 // probe is enabled). The returns to the caller. Since this all looks like a
  2571 // leaf no thread transition is needed.
  2573 nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) {
  2574   ResourceMark rm;
  2575   nmethod* nm = NULL;
  2577   if (PrintCompilation) {
  2578     ttyLocker ttyl;
  2579     tty->print("---   n%s  ");
  2580     method->print_short_name(tty);
  2581     if (method->is_static()) {
  2582       tty->print(" (static)");
  2584     tty->cr();
  2588     // perform the work while holding the lock, but perform any printing
  2589     // outside the lock
  2590     MutexLocker mu(AdapterHandlerLibrary_lock);
  2591     // See if somebody beat us to it
  2592     nm = method->code();
  2593     if (nm) {
  2594       return nm;
  2597     ResourceMark rm;
  2599     BufferBlob*  buf = buffer_blob(); // the temporary code buffer in CodeCache
  2600     if (buf != NULL) {
  2601       CodeBuffer buffer(buf);
  2602       // Need a few relocation entries
  2603       double locs_buf[20];
  2604       buffer.insts()->initialize_shared_locs(
  2605         (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo));
  2606       MacroAssembler _masm(&buffer);
  2608       // Generate the compiled-to-native wrapper code
  2609       nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method);
  2612   return nm;
  2615 // the dtrace method needs to convert java lang string to utf8 string.
  2616 void SharedRuntime::get_utf(oopDesc* src, address dst) {
  2617   typeArrayOop jlsValue  = java_lang_String::value(src);
  2618   int          jlsOffset = java_lang_String::offset(src);
  2619   int          jlsLen    = java_lang_String::length(src);
  2620   jchar*       jlsPos    = (jlsLen == 0) ? NULL :
  2621                                            jlsValue->char_at_addr(jlsOffset);
  2622   (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size);
  2624 #endif // ndef HAVE_DTRACE_H
  2626 // -------------------------------------------------------------------------
  2627 // Java-Java calling convention
  2628 // (what you use when Java calls Java)
  2630 //------------------------------name_for_receiver----------------------------------
  2631 // For a given signature, return the VMReg for parameter 0.
  2632 VMReg SharedRuntime::name_for_receiver() {
  2633   VMRegPair regs;
  2634   BasicType sig_bt = T_OBJECT;
  2635   (void) java_calling_convention(&sig_bt, &regs, 1, true);
  2636   // Return argument 0 register.  In the LP64 build pointers
  2637   // take 2 registers, but the VM wants only the 'main' name.
  2638   return regs.first();
  2641 VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool has_receiver, int* arg_size) {
  2642   // This method is returning a data structure allocating as a
  2643   // ResourceObject, so do not put any ResourceMarks in here.
  2644   char *s = sig->as_C_string();
  2645   int len = (int)strlen(s);
  2646   *s++; len--;                  // Skip opening paren
  2647   char *t = s+len;
  2648   while( *(--t) != ')' ) ;      // Find close paren
  2650   BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 );
  2651   VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 );
  2652   int cnt = 0;
  2653   if (has_receiver) {
  2654     sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature
  2657   while( s < t ) {
  2658     switch( *s++ ) {            // Switch on signature character
  2659     case 'B': sig_bt[cnt++] = T_BYTE;    break;
  2660     case 'C': sig_bt[cnt++] = T_CHAR;    break;
  2661     case 'D': sig_bt[cnt++] = T_DOUBLE;  sig_bt[cnt++] = T_VOID; break;
  2662     case 'F': sig_bt[cnt++] = T_FLOAT;   break;
  2663     case 'I': sig_bt[cnt++] = T_INT;     break;
  2664     case 'J': sig_bt[cnt++] = T_LONG;    sig_bt[cnt++] = T_VOID; break;
  2665     case 'S': sig_bt[cnt++] = T_SHORT;   break;
  2666     case 'Z': sig_bt[cnt++] = T_BOOLEAN; break;
  2667     case 'V': sig_bt[cnt++] = T_VOID;    break;
  2668     case 'L':                   // Oop
  2669       while( *s++ != ';'  ) ;   // Skip signature
  2670       sig_bt[cnt++] = T_OBJECT;
  2671       break;
  2672     case '[': {                 // Array
  2673       do {                      // Skip optional size
  2674         while( *s >= '0' && *s <= '9' ) s++;
  2675       } while( *s++ == '[' );   // Nested arrays?
  2676       // Skip element type
  2677       if( s[-1] == 'L' )
  2678         while( *s++ != ';'  ) ; // Skip signature
  2679       sig_bt[cnt++] = T_ARRAY;
  2680       break;
  2682     default : ShouldNotReachHere();
  2685   assert( cnt < 256, "grow table size" );
  2687   int comp_args_on_stack;
  2688   comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true);
  2690   // the calling convention doesn't count out_preserve_stack_slots so
  2691   // we must add that in to get "true" stack offsets.
  2693   if (comp_args_on_stack) {
  2694     for (int i = 0; i < cnt; i++) {
  2695       VMReg reg1 = regs[i].first();
  2696       if( reg1->is_stack()) {
  2697         // Yuck
  2698         reg1 = reg1->bias(out_preserve_stack_slots());
  2700       VMReg reg2 = regs[i].second();
  2701       if( reg2->is_stack()) {
  2702         // Yuck
  2703         reg2 = reg2->bias(out_preserve_stack_slots());
  2705       regs[i].set_pair(reg2, reg1);
  2709   // results
  2710   *arg_size = cnt;
  2711   return regs;
  2714 // OSR Migration Code
  2715 //
  2716 // This code is used convert interpreter frames into compiled frames.  It is
  2717 // called from very start of a compiled OSR nmethod.  A temp array is
  2718 // allocated to hold the interesting bits of the interpreter frame.  All
  2719 // active locks are inflated to allow them to move.  The displaced headers and
  2720 // active interpeter locals are copied into the temp buffer.  Then we return
  2721 // back to the compiled code.  The compiled code then pops the current
  2722 // interpreter frame off the stack and pushes a new compiled frame.  Then it
  2723 // copies the interpreter locals and displaced headers where it wants.
  2724 // Finally it calls back to free the temp buffer.
  2725 //
  2726 // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed.
  2728 JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) )
  2730 #ifdef IA64
  2731   ShouldNotReachHere(); // NYI
  2732 #endif /* IA64 */
  2734   //
  2735   // This code is dependent on the memory layout of the interpreter local
  2736   // array and the monitors. On all of our platforms the layout is identical
  2737   // so this code is shared. If some platform lays the their arrays out
  2738   // differently then this code could move to platform specific code or
  2739   // the code here could be modified to copy items one at a time using
  2740   // frame accessor methods and be platform independent.
  2742   frame fr = thread->last_frame();
  2743   assert( fr.is_interpreted_frame(), "" );
  2744   assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" );
  2746   // Figure out how many monitors are active.
  2747   int active_monitor_count = 0;
  2748   for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end();
  2749        kptr < fr.interpreter_frame_monitor_begin();
  2750        kptr = fr.next_monitor_in_interpreter_frame(kptr) ) {
  2751     if( kptr->obj() != NULL ) active_monitor_count++;
  2754   // QQQ we could place number of active monitors in the array so that compiled code
  2755   // could double check it.
  2757   methodOop moop = fr.interpreter_frame_method();
  2758   int max_locals = moop->max_locals();
  2759   // Allocate temp buffer, 1 word per local & 2 per active monitor
  2760   int buf_size_words = max_locals + active_monitor_count*2;
  2761   intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words);
  2763   // Copy the locals.  Order is preserved so that loading of longs works.
  2764   // Since there's no GC I can copy the oops blindly.
  2765   assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code");
  2766   Copy::disjoint_words((HeapWord*)fr.interpreter_frame_local_at(max_locals-1),
  2767                        (HeapWord*)&buf[0],
  2768                        max_locals);
  2770   // Inflate locks.  Copy the displaced headers.  Be careful, there can be holes.
  2771   int i = max_locals;
  2772   for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end();
  2773        kptr2 < fr.interpreter_frame_monitor_begin();
  2774        kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) {
  2775     if( kptr2->obj() != NULL) {         // Avoid 'holes' in the monitor array
  2776       BasicLock *lock = kptr2->lock();
  2777       // Inflate so the displaced header becomes position-independent
  2778       if (lock->displaced_header()->is_unlocked())
  2779         ObjectSynchronizer::inflate_helper(kptr2->obj());
  2780       // Now the displaced header is free to move
  2781       buf[i++] = (intptr_t)lock->displaced_header();
  2782       buf[i++] = (intptr_t)kptr2->obj();
  2785   assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
  2787   return buf;
  2788 JRT_END
  2790 JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) )
  2791   FREE_C_HEAP_ARRAY(intptr_t,buf);
  2792 JRT_END
  2794 bool AdapterHandlerLibrary::contains(CodeBlob* b) {
  2795   AdapterHandlerTableIterator iter(_adapters);
  2796   while (iter.has_next()) {
  2797     AdapterHandlerEntry* a = iter.next();
  2798     if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) return true;
  2800   return false;
  2803 void AdapterHandlerLibrary::print_handler_on(outputStream* st, CodeBlob* b) {
  2804   AdapterHandlerTableIterator iter(_adapters);
  2805   while (iter.has_next()) {
  2806     AdapterHandlerEntry* a = iter.next();
  2807     if ( b == CodeCache::find_blob(a->get_i2c_entry()) ) {
  2808       st->print("Adapter for signature: ");
  2809       st->print_cr("%s i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT,
  2810                    a->fingerprint()->as_string(),
  2811                    a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry());
  2813       return;
  2816   assert(false, "Should have found handler");
  2819 #ifndef PRODUCT
  2821 void AdapterHandlerLibrary::print_statistics() {
  2822   _adapters->print_statistics();
  2825 #endif /* PRODUCT */

mercurial