src/share/vm/c1/c1_Runtime1.cpp

Thu, 24 May 2018 18:41:44 +0800

author
aoqi
date
Thu, 24 May 2018 18:41:44 +0800
changeset 8856
ac27a9c85bea
parent 8604
04d83ba48607
child 8865
ffcdff41a92f
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 /*
    26  * This file has been modified by Loongson Technology in 2015. These
    27  * modifications are Copyright (c) 2015 Loongson Technology, and are made
    28  * available on the same license terms set forth above.
    29  */
    31 #include "precompiled.hpp"
    32 #include "asm/codeBuffer.hpp"
    33 #include "c1/c1_CodeStubs.hpp"
    34 #include "c1/c1_Defs.hpp"
    35 #include "c1/c1_FrameMap.hpp"
    36 #include "c1/c1_LIRAssembler.hpp"
    37 #include "c1/c1_MacroAssembler.hpp"
    38 #include "c1/c1_Runtime1.hpp"
    39 #include "classfile/systemDictionary.hpp"
    40 #include "classfile/vmSymbols.hpp"
    41 #include "code/codeBlob.hpp"
    42 #include "code/compiledIC.hpp"
    43 #include "code/pcDesc.hpp"
    44 #include "code/scopeDesc.hpp"
    45 #include "code/vtableStubs.hpp"
    46 #include "compiler/disassembler.hpp"
    47 #include "gc_interface/collectedHeap.hpp"
    48 #include "interpreter/bytecode.hpp"
    49 #include "interpreter/interpreter.hpp"
    50 #include "memory/allocation.inline.hpp"
    51 #include "memory/barrierSet.hpp"
    52 #include "memory/oopFactory.hpp"
    53 #include "memory/resourceArea.hpp"
    54 #include "oops/objArrayKlass.hpp"
    55 #include "oops/oop.inline.hpp"
    56 #include "runtime/biasedLocking.hpp"
    57 #include "runtime/compilationPolicy.hpp"
    58 #include "runtime/interfaceSupport.hpp"
    59 #include "runtime/javaCalls.hpp"
    60 #include "runtime/sharedRuntime.hpp"
    61 #include "runtime/threadCritical.hpp"
    62 #include "runtime/vframe.hpp"
    63 #include "runtime/vframeArray.hpp"
    64 #include "utilities/copy.hpp"
    65 #include "utilities/events.hpp"
    68 // Implementation of StubAssembler
    70 StubAssembler::StubAssembler(CodeBuffer* code, const char * name, int stub_id) : C1_MacroAssembler(code) {
    71   _name = name;
    72   _must_gc_arguments = false;
    73   _frame_size = no_frame_size;
    74   _num_rt_args = 0;
    75   _stub_id = stub_id;
    76 }
    79 void StubAssembler::set_info(const char* name, bool must_gc_arguments) {
    80   _name = name;
    81   _must_gc_arguments = must_gc_arguments;
    82 }
    85 void StubAssembler::set_frame_size(int size) {
    86   if (_frame_size == no_frame_size) {
    87     _frame_size = size;
    88   }
    89   assert(_frame_size == size, "can't change the frame size");
    90 }
    93 void StubAssembler::set_num_rt_args(int args) {
    94   if (_num_rt_args == 0) {
    95     _num_rt_args = args;
    96   }
    97   assert(_num_rt_args == args, "can't change the number of args");
    98 }
   100 // Implementation of Runtime1
   102 CodeBlob* Runtime1::_blobs[Runtime1::number_of_ids];
   103 const char *Runtime1::_blob_names[] = {
   104   RUNTIME1_STUBS(STUB_NAME, LAST_STUB_NAME)
   105 };
   107 #ifndef PRODUCT
   108 // statistics
   109 int Runtime1::_generic_arraycopy_cnt = 0;
   110 int Runtime1::_primitive_arraycopy_cnt = 0;
   111 int Runtime1::_oop_arraycopy_cnt = 0;
   112 int Runtime1::_generic_arraycopystub_cnt = 0;
   113 int Runtime1::_arraycopy_slowcase_cnt = 0;
   114 int Runtime1::_arraycopy_checkcast_cnt = 0;
   115 int Runtime1::_arraycopy_checkcast_attempt_cnt = 0;
   116 int Runtime1::_new_type_array_slowcase_cnt = 0;
   117 int Runtime1::_new_object_array_slowcase_cnt = 0;
   118 int Runtime1::_new_instance_slowcase_cnt = 0;
   119 int Runtime1::_new_multi_array_slowcase_cnt = 0;
   120 int Runtime1::_monitorenter_slowcase_cnt = 0;
   121 int Runtime1::_monitorexit_slowcase_cnt = 0;
   122 int Runtime1::_patch_code_slowcase_cnt = 0;
   123 int Runtime1::_throw_range_check_exception_count = 0;
   124 int Runtime1::_throw_index_exception_count = 0;
   125 int Runtime1::_throw_div0_exception_count = 0;
   126 int Runtime1::_throw_null_pointer_exception_count = 0;
   127 int Runtime1::_throw_class_cast_exception_count = 0;
   128 int Runtime1::_throw_incompatible_class_change_error_count = 0;
   129 int Runtime1::_throw_array_store_exception_count = 0;
   130 int Runtime1::_throw_count = 0;
   132 static int _byte_arraycopy_cnt = 0;
   133 static int _short_arraycopy_cnt = 0;
   134 static int _int_arraycopy_cnt = 0;
   135 static int _long_arraycopy_cnt = 0;
   136 static int _oop_arraycopy_cnt = 0;
   138 address Runtime1::arraycopy_count_address(BasicType type) {
   139   switch (type) {
   140   case T_BOOLEAN:
   141   case T_BYTE:   return (address)&_byte_arraycopy_cnt;
   142   case T_CHAR:
   143   case T_SHORT:  return (address)&_short_arraycopy_cnt;
   144   case T_FLOAT:
   145   case T_INT:    return (address)&_int_arraycopy_cnt;
   146   case T_DOUBLE:
   147   case T_LONG:   return (address)&_long_arraycopy_cnt;
   148   case T_ARRAY:
   149   case T_OBJECT: return (address)&_oop_arraycopy_cnt;
   150   default:
   151     ShouldNotReachHere();
   152     return NULL;
   153   }
   154 }
   157 #endif
   159 // Simple helper to see if the caller of a runtime stub which
   160 // entered the VM has been deoptimized
   162 static bool caller_is_deopted() {
   163   JavaThread* thread = JavaThread::current();
   164   RegisterMap reg_map(thread, false);
   165   frame runtime_frame = thread->last_frame();
   166   frame caller_frame = runtime_frame.sender(&reg_map);
   167   assert(caller_frame.is_compiled_frame(), "must be compiled");
   168   return caller_frame.is_deoptimized_frame();
   169 }
   171 // Stress deoptimization
   172 static void deopt_caller() {
   173   if ( !caller_is_deopted()) {
   174     JavaThread* thread = JavaThread::current();
   175     RegisterMap reg_map(thread, false);
   176     frame runtime_frame = thread->last_frame();
   177     frame caller_frame = runtime_frame.sender(&reg_map);
   178     Deoptimization::deoptimize_frame(thread, caller_frame.id());
   179     assert(caller_is_deopted(), "Must be deoptimized");
   180   }
   181 }
   184 void Runtime1::generate_blob_for(BufferBlob* buffer_blob, StubID id) {
   185   assert(0 <= id && id < number_of_ids, "illegal stub id");
   186   ResourceMark rm;
   187   // create code buffer for code storage
   188   CodeBuffer code(buffer_blob);
   190   Compilation::setup_code_buffer(&code, 0);
   192   // create assembler for code generation
   193   StubAssembler* sasm = new StubAssembler(&code, name_for(id), id);
   194   // generate code for runtime stub
   195   OopMapSet* oop_maps;
   196   oop_maps = generate_code_for(id, sasm);
   197   assert(oop_maps == NULL || sasm->frame_size() != no_frame_size,
   198          "if stub has an oop map it must have a valid frame size");
   200 #ifdef ASSERT
   201   // Make sure that stubs that need oopmaps have them
   202   switch (id) {
   203     // These stubs don't need to have an oopmap
   204     case dtrace_object_alloc_id:
   205     case g1_pre_barrier_slow_id:
   206     case g1_post_barrier_slow_id:
   207     case slow_subtype_check_id:
   208     case fpu2long_stub_id:
   209     case unwind_exception_id:
   210     case counter_overflow_id:
   211 #if defined(SPARC) || defined(PPC)
   212     case handle_exception_nofpu_id:  // Unused on sparc
   213 #endif
   214       break;
   216     // All other stubs should have oopmaps
   217     default:
   218       assert(oop_maps != NULL, "must have an oopmap");
   219   }
   220 #endif
   222   // align so printing shows nop's instead of random code at the end (SimpleStubs are aligned)
   223   sasm->align(BytesPerWord);
   224   // make sure all code is in code buffer
   225   sasm->flush();
   226   // create blob - distinguish a few special cases
   227   CodeBlob* blob = RuntimeStub::new_runtime_stub(name_for(id),
   228                                                  &code,
   229                                                  CodeOffsets::frame_never_safe,
   230                                                  sasm->frame_size(),
   231                                                  oop_maps,
   232                                                  sasm->must_gc_arguments());
   233   // install blob
   234   assert(blob != NULL, "blob must exist");
   235   _blobs[id] = blob;
   236 }
   239 void Runtime1::initialize(BufferBlob* blob) {
   240   // platform-dependent initialization
   241   initialize_pd();
   242   // generate stubs
   243   for (int id = 0; id < number_of_ids; id++) generate_blob_for(blob, (StubID)id);
   244   // printing
   245 #ifndef PRODUCT
   246   if (PrintSimpleStubs) {
   247     ResourceMark rm;
   248     for (int id = 0; id < number_of_ids; id++) {
   249       _blobs[id]->print();
   250       if (_blobs[id]->oop_maps() != NULL) {
   251         _blobs[id]->oop_maps()->print();
   252       }
   253     }
   254   }
   255 #endif
   256 }
   259 CodeBlob* Runtime1::blob_for(StubID id) {
   260   assert(0 <= id && id < number_of_ids, "illegal stub id");
   261   return _blobs[id];
   262 }
   265 const char* Runtime1::name_for(StubID id) {
   266   assert(0 <= id && id < number_of_ids, "illegal stub id");
   267   return _blob_names[id];
   268 }
   270 const char* Runtime1::name_for_address(address entry) {
   271   for (int id = 0; id < number_of_ids; id++) {
   272     if (entry == entry_for((StubID)id)) return name_for((StubID)id);
   273   }
   275 #define FUNCTION_CASE(a, f) \
   276   if ((intptr_t)a == CAST_FROM_FN_PTR(intptr_t, f))  return #f
   278   FUNCTION_CASE(entry, os::javaTimeMillis);
   279   FUNCTION_CASE(entry, os::javaTimeNanos);
   280   FUNCTION_CASE(entry, SharedRuntime::OSR_migration_end);
   281   FUNCTION_CASE(entry, SharedRuntime::d2f);
   282   FUNCTION_CASE(entry, SharedRuntime::d2i);
   283   FUNCTION_CASE(entry, SharedRuntime::d2l);
   284   FUNCTION_CASE(entry, SharedRuntime::dcos);
   285   FUNCTION_CASE(entry, SharedRuntime::dexp);
   286   FUNCTION_CASE(entry, SharedRuntime::dlog);
   287   FUNCTION_CASE(entry, SharedRuntime::dlog10);
   288   FUNCTION_CASE(entry, SharedRuntime::dpow);
   289   FUNCTION_CASE(entry, SharedRuntime::drem);
   290   FUNCTION_CASE(entry, SharedRuntime::dsin);
   291   FUNCTION_CASE(entry, SharedRuntime::dtan);
   292   FUNCTION_CASE(entry, SharedRuntime::f2i);
   293   FUNCTION_CASE(entry, SharedRuntime::f2l);
   294   FUNCTION_CASE(entry, SharedRuntime::frem);
   295   FUNCTION_CASE(entry, SharedRuntime::l2d);
   296   FUNCTION_CASE(entry, SharedRuntime::l2f);
   297   FUNCTION_CASE(entry, SharedRuntime::ldiv);
   298   FUNCTION_CASE(entry, SharedRuntime::lmul);
   299   FUNCTION_CASE(entry, SharedRuntime::lrem);
   300   FUNCTION_CASE(entry, SharedRuntime::lrem);
   301   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_entry);
   302   FUNCTION_CASE(entry, SharedRuntime::dtrace_method_exit);
   303   FUNCTION_CASE(entry, is_instance_of);
   304   FUNCTION_CASE(entry, trace_block_entry);
   305 #ifdef TRACE_HAVE_INTRINSICS
   306   FUNCTION_CASE(entry, TRACE_TIME_METHOD);
   307 #endif
   308   FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32());
   310 #undef FUNCTION_CASE
   312   // Soft float adds more runtime names.
   313   return pd_name_for_address(entry);
   314 }
   317 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
   318   NOT_PRODUCT(_new_instance_slowcase_cnt++;)
   320   assert(klass->is_klass(), "not a class");
   321   Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
   322   instanceKlassHandle h(thread, klass);
   323   h->check_valid_for_instantiation(true, CHECK);
   324   // make sure klass is initialized
   325   h->initialize(CHECK);
   326   // allocate instance and return via TLS
   327   oop obj = h->allocate_instance(CHECK);
   328   thread->set_vm_result(obj);
   329 JRT_END
   332 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, Klass* klass, jint length))
   333   NOT_PRODUCT(_new_type_array_slowcase_cnt++;)
   334   // Note: no handle for klass needed since they are not used
   335   //       anymore after new_typeArray() and no GC can happen before.
   336   //       (This may have to change if this code changes!)
   337   assert(klass->is_klass(), "not a class");
   338   BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
   339   oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
   340   thread->set_vm_result(obj);
   341   // This is pretty rare but this runtime patch is stressful to deoptimization
   342   // if we deoptimize here so force a deopt to stress the path.
   343   if (DeoptimizeALot) {
   344     deopt_caller();
   345   }
   347 JRT_END
   350 JRT_ENTRY(void, Runtime1::new_object_array(JavaThread* thread, Klass* array_klass, jint length))
   351   NOT_PRODUCT(_new_object_array_slowcase_cnt++;)
   353   // Note: no handle for klass needed since they are not used
   354   //       anymore after new_objArray() and no GC can happen before.
   355   //       (This may have to change if this code changes!)
   356   assert(array_klass->is_klass(), "not a class");
   357   Handle holder(THREAD, array_klass->klass_holder()); // keep the klass alive
   358   Klass* elem_klass = ObjArrayKlass::cast(array_klass)->element_klass();
   359   objArrayOop obj = oopFactory::new_objArray(elem_klass, length, CHECK);
   360   thread->set_vm_result(obj);
   361   // This is pretty rare but this runtime patch is stressful to deoptimization
   362   // if we deoptimize here so force a deopt to stress the path.
   363   if (DeoptimizeALot) {
   364     deopt_caller();
   365   }
   366 JRT_END
   369 JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, Klass* klass, int rank, jint* dims))
   370   NOT_PRODUCT(_new_multi_array_slowcase_cnt++;)
   372   assert(klass->is_klass(), "not a class");
   373   assert(rank >= 1, "rank must be nonzero");
   374   Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
   375   oop obj = ArrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
   376   thread->set_vm_result(obj);
   377 JRT_END
   380 JRT_ENTRY(void, Runtime1::unimplemented_entry(JavaThread* thread, StubID id))
   381   tty->print_cr("Runtime1::entry_for(%d) returned unimplemented entry point", id);
   382 JRT_END
   385 JRT_ENTRY(void, Runtime1::throw_array_store_exception(JavaThread* thread, oopDesc* obj))
   386   ResourceMark rm(thread);
   387   const char* klass_name = obj->klass()->external_name();
   388   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayStoreException(), klass_name);
   389 JRT_END
   392 // counter_overflow() is called from within C1-compiled methods. The enclosing method is the method
   393 // associated with the top activation record. The inlinee (that is possibly included in the enclosing
   394 // method) method oop is passed as an argument. In order to do that it is embedded in the code as
   395 // a constant.
   396 static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, Method* m) {
   397   nmethod* osr_nm = NULL;
   398   methodHandle method(THREAD, m);
   400   RegisterMap map(THREAD, false);
   401   frame fr =  THREAD->last_frame().sender(&map);
   402   nmethod* nm = (nmethod*) fr.cb();
   403   assert(nm!= NULL && nm->is_nmethod(), "Sanity check");
   404   methodHandle enclosing_method(THREAD, nm->method());
   406   CompLevel level = (CompLevel)nm->comp_level();
   407   int bci = InvocationEntryBci;
   408   if (branch_bci != InvocationEntryBci) {
   409     // Compute desination bci
   410     address pc = method()->code_base() + branch_bci;
   411     Bytecodes::Code branch = Bytecodes::code_at(method(), pc);
   412     int offset = 0;
   413     switch (branch) {
   414       case Bytecodes::_if_icmplt: case Bytecodes::_iflt:
   415       case Bytecodes::_if_icmpgt: case Bytecodes::_ifgt:
   416       case Bytecodes::_if_icmple: case Bytecodes::_ifle:
   417       case Bytecodes::_if_icmpge: case Bytecodes::_ifge:
   418       case Bytecodes::_if_icmpeq: case Bytecodes::_if_acmpeq: case Bytecodes::_ifeq:
   419       case Bytecodes::_if_icmpne: case Bytecodes::_if_acmpne: case Bytecodes::_ifne:
   420       case Bytecodes::_ifnull: case Bytecodes::_ifnonnull: case Bytecodes::_goto:
   421         offset = (int16_t)Bytes::get_Java_u2(pc + 1);
   422         break;
   423       case Bytecodes::_goto_w:
   424         offset = Bytes::get_Java_u4(pc + 1);
   425         break;
   426       default: ;
   427     }
   428     bci = branch_bci + offset;
   429   }
   430   assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending");
   431   osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
   432   assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions");
   433   return osr_nm;
   434 }
   436 JRT_BLOCK_ENTRY(address, Runtime1::counter_overflow(JavaThread* thread, int bci, Method* method))
   437   nmethod* osr_nm;
   438   JRT_BLOCK
   439     osr_nm = counter_overflow_helper(thread, bci, method);
   440     if (osr_nm != NULL) {
   441       RegisterMap map(thread, false);
   442       frame fr =  thread->last_frame().sender(&map);
   443       Deoptimization::deoptimize_frame(thread, fr.id());
   444     }
   445   JRT_BLOCK_END
   446   return NULL;
   447 JRT_END
   449 extern void vm_exit(int code);
   451 // Enter this method from compiled code handler below. This is where we transition
   452 // to VM mode. This is done as a helper routine so that the method called directly
   453 // from compiled code does not have to transition to VM. This allows the entry
   454 // method to see if the nmethod that we have just looked up a handler for has
   455 // been deoptimized while we were in the vm. This simplifies the assembly code
   456 // cpu directories.
   457 //
   458 // We are entering here from exception stub (via the entry method below)
   459 // If there is a compiled exception handler in this method, we will continue there;
   460 // otherwise we will unwind the stack and continue at the caller of top frame method
   461 // Note: we enter in Java using a special JRT wrapper. This wrapper allows us to
   462 // control the area where we can allow a safepoint. After we exit the safepoint area we can
   463 // check to see if the handler we are going to return is now in a nmethod that has
   464 // been deoptimized. If that is the case we return the deopt blob
   465 // unpack_with_exception entry instead. This makes life for the exception blob easier
   466 // because making that same check and diverting is painful from assembly language.
   467 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
   468   // Reset method handle flag.
   469   thread->set_is_method_handle_return(false);
   471   Handle exception(thread, ex);
   472   nm = CodeCache::find_nmethod(pc);
   473   assert(nm != NULL, "this is not an nmethod");
   474   // Adjust the pc as needed/
   475   if (nm->is_deopt_pc(pc)) {
   476     RegisterMap map(thread, false);
   477     frame exception_frame = thread->last_frame().sender(&map);
   478     // if the frame isn't deopted then pc must not correspond to the caller of last_frame
   479     assert(exception_frame.is_deoptimized_frame(), "must be deopted");
   480     pc = exception_frame.pc();
   481   }
   482 #ifdef ASSERT
   483   assert(exception.not_null(), "NULL exceptions should be handled by throw_exception");
   484   assert(exception->is_oop(), "just checking");
   485   // Check that exception is a subclass of Throwable, otherwise we have a VerifyError
   486   if (!(exception->is_a(SystemDictionary::Throwable_klass()))) {
   487     if (ExitVMOnVerifyError) vm_exit(-1);
   488     ShouldNotReachHere();
   489   }
   490 #endif
   492   // Check the stack guard pages and reenable them if necessary and there is
   493   // enough space on the stack to do so.  Use fast exceptions only if the guard
   494   // pages are enabled.
   495   bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
   496   if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
   498   if (JvmtiExport::can_post_on_exceptions()) {
   499     // To ensure correct notification of exception catches and throws
   500     // we have to deoptimize here.  If we attempted to notify the
   501     // catches and throws during this exception lookup it's possible
   502     // we could deoptimize on the way out of the VM and end back in
   503     // the interpreter at the throw site.  This would result in double
   504     // notifications since the interpreter would also notify about
   505     // these same catches and throws as it unwound the frame.
   507     RegisterMap reg_map(thread);
   508     frame stub_frame = thread->last_frame();
   509     frame caller_frame = stub_frame.sender(&reg_map);
   511     // We don't really want to deoptimize the nmethod itself since we
   512     // can actually continue in the exception handler ourselves but I
   513     // don't see an easy way to have the desired effect.
   514     Deoptimization::deoptimize_frame(thread, caller_frame.id());
   515     assert(caller_is_deopted(), "Must be deoptimized");
   517     return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
   518   }
   520   // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
   521   if (guard_pages_enabled) {
   522     address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
   523     if (fast_continuation != NULL) {
   524       // Set flag if return address is a method handle call site.
   525       thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
   526       return fast_continuation;
   527     }
   528   }
   530   // If the stack guard pages are enabled, check whether there is a handler in
   531   // the current method.  Otherwise (guard pages disabled), force an unwind and
   532   // skip the exception cache update (i.e., just leave continuation==NULL).
   533   address continuation = NULL;
   534   if (guard_pages_enabled) {
   536     // New exception handling mechanism can support inlined methods
   537     // with exception handlers since the mappings are from PC to PC
   539     // debugging support
   540     // tracing
   541     if (TraceExceptions) {
   542       ttyLocker ttyl;
   543       ResourceMark rm;
   544       tty->print_cr("Exception <%s> (" INTPTR_FORMAT ") thrown in compiled method <%s> at PC " INTPTR_FORMAT " for thread " INTPTR_FORMAT "",
   545                     exception->print_value_string(), p2i((address)exception()), nm->method()->print_value_string(), p2i(pc), p2i(thread));
   546     }
   547     // for AbortVMOnException flag
   548     NOT_PRODUCT(Exceptions::debug_check_abort(exception));
   550     // Clear out the exception oop and pc since looking up an
   551     // exception handler can cause class loading, which might throw an
   552     // exception and those fields are expected to be clear during
   553     // normal bytecode execution.
   554     thread->clear_exception_oop_and_pc();
   556     Handle original_exception(thread, exception());
   558     continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
   559     // If an exception was thrown during exception dispatch, the exception oop may have changed
   560     thread->set_exception_oop(exception());
   561     thread->set_exception_pc(pc);
   563     // the exception cache is used only by non-implicit exceptions
   564     // Update the exception cache only when there didn't happen
   565     // another exception during the computation of the compiled
   566     // exception handler.
   567     if (continuation != NULL && original_exception() == exception()) {
   568       nm->add_handler_for_exception_and_pc(exception, pc, continuation);
   569     }
   570   }
   572   thread->set_vm_result(exception());
   573   // Set flag if return address is a method handle call site.
   574   thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
   576   if (TraceExceptions) {
   577     ttyLocker ttyl;
   578     ResourceMark rm;
   579     tty->print_cr("Thread " PTR_FORMAT " continuing at PC " PTR_FORMAT " for exception thrown at PC " PTR_FORMAT,
   580                   p2i(thread), p2i(continuation), p2i(pc));
   581   }
   583   return continuation;
   584 JRT_END
   586 // Enter this method from compiled code only if there is a Java exception handler
   587 // in the method handling the exception.
   588 // We are entering here from exception stub. We don't do a normal VM transition here.
   589 // We do it in a helper. This is so we can check to see if the nmethod we have just
   590 // searched for an exception handler has been deoptimized in the meantime.
   591 address Runtime1::exception_handler_for_pc(JavaThread* thread) {
   592   oop exception = thread->exception_oop();
   593   address pc = thread->exception_pc();
   594   // Still in Java mode
   595   DEBUG_ONLY(ResetNoHandleMark rnhm);
   596   nmethod* nm = NULL;
   597   address continuation = NULL;
   598   {
   599     // Enter VM mode by calling the helper
   600     ResetNoHandleMark rnhm;
   601     continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);
   602   }
   603   // Back in JAVA, use no oops DON'T safepoint
   605   // Now check to see if the nmethod we were called from is now deoptimized.
   606   // If so we must return to the deopt blob and deoptimize the nmethod
   607   if (nm != NULL && caller_is_deopted()) {
   608     continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
   609   }
   611   assert(continuation != NULL, "no handler found");
   612   return continuation;
   613 }
   616 JRT_ENTRY(void, Runtime1::throw_range_check_exception(JavaThread* thread, int index))
   617   NOT_PRODUCT(_throw_range_check_exception_count++;)
   618   char message[jintAsStringSize];
   619   sprintf(message, "%d", index);
   620   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArrayIndexOutOfBoundsException(), message);
   621 JRT_END
   624 JRT_ENTRY(void, Runtime1::throw_index_exception(JavaThread* thread, int index))
   625   NOT_PRODUCT(_throw_index_exception_count++;)
   626   char message[16];
   627   sprintf(message, "%d", index);
   628   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IndexOutOfBoundsException(), message);
   629 JRT_END
   632 JRT_ENTRY(void, Runtime1::throw_div0_exception(JavaThread* thread))
   633   NOT_PRODUCT(_throw_div0_exception_count++;)
   634   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
   635 JRT_END
   638 JRT_ENTRY(void, Runtime1::throw_null_pointer_exception(JavaThread* thread))
   639   NOT_PRODUCT(_throw_null_pointer_exception_count++;)
   640   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException());
   641 JRT_END
   644 JRT_ENTRY(void, Runtime1::throw_class_cast_exception(JavaThread* thread, oopDesc* object))
   645   NOT_PRODUCT(_throw_class_cast_exception_count++;)
   646   ResourceMark rm(thread);
   647   char* message = SharedRuntime::generate_class_cast_message(
   648     thread, object->klass()->external_name());
   649   SharedRuntime::throw_and_post_jvmti_exception(
   650     thread, vmSymbols::java_lang_ClassCastException(), message);
   651 JRT_END
   654 JRT_ENTRY(void, Runtime1::throw_incompatible_class_change_error(JavaThread* thread))
   655   NOT_PRODUCT(_throw_incompatible_class_change_error_count++;)
   656   ResourceMark rm(thread);
   657   SharedRuntime::throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError());
   658 JRT_END
   661 JRT_ENTRY_NO_ASYNC(void, Runtime1::monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock))
   662   NOT_PRODUCT(_monitorenter_slowcase_cnt++;)
   663   if (PrintBiasedLockingStatistics) {
   664     Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
   665   }
   666   Handle h_obj(thread, obj);
   667   assert(h_obj()->is_oop(), "must be NULL or an object");
   668   if (UseBiasedLocking) {
   669     // Retry fast entry if bias is revoked to avoid unnecessary inflation
   670     ObjectSynchronizer::fast_enter(h_obj, lock->lock(), true, CHECK);
   671   } else {
   672     if (UseFastLocking) {
   673       // When using fast locking, the compiled code has already tried the fast case
   674       assert(obj == lock->obj(), "must match");
   675       ObjectSynchronizer::slow_enter(h_obj, lock->lock(), THREAD);
   676     } else {
   677       lock->set_obj(obj);
   678       ObjectSynchronizer::fast_enter(h_obj, lock->lock(), false, THREAD);
   679     }
   680   }
   681 JRT_END
   684 JRT_LEAF(void, Runtime1::monitorexit(JavaThread* thread, BasicObjectLock* lock))
   685   NOT_PRODUCT(_monitorexit_slowcase_cnt++;)
   686   assert(thread == JavaThread::current(), "threads must correspond");
   687   assert(thread->last_Java_sp(), "last_Java_sp must be set");
   688   // monitorexit is non-blocking (leaf routine) => no exceptions can be thrown
   689   EXCEPTION_MARK;
   691   oop obj = lock->obj();
   692   assert(obj->is_oop(), "must be NULL or an object");
   693   if (UseFastLocking) {
   694     // When using fast locking, the compiled code has already tried the fast case
   695     ObjectSynchronizer::slow_exit(obj, lock->lock(), THREAD);
   696   } else {
   697     ObjectSynchronizer::fast_exit(obj, lock->lock(), THREAD);
   698   }
   699 JRT_END
   701 // Cf. OptoRuntime::deoptimize_caller_frame
   702 JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread))
   703   // Called from within the owner thread, so no need for safepoint
   704   RegisterMap reg_map(thread, false);
   705   frame stub_frame = thread->last_frame();
   706   assert(stub_frame.is_runtime_frame(), "sanity check");
   707   frame caller_frame = stub_frame.sender(&reg_map);
   709   // We are coming from a compiled method; check this is true.
   710   assert(CodeCache::find_nmethod(caller_frame.pc()) != NULL, "sanity");
   712   // Deoptimize the caller frame.
   713   Deoptimization::deoptimize_frame(thread, caller_frame.id());
   715   // Return to the now deoptimized frame.
   716 JRT_END
   719 static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
   720   Bytecode_field field_access(caller, bci);
   721   // This can be static or non-static field access
   722   Bytecodes::Code code       = field_access.code();
   724   // We must load class, initialize class and resolvethe field
   725   fieldDescriptor result; // initialize class if needed
   726   constantPoolHandle constants(THREAD, caller->constants());
   727   LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
   728   return result.field_holder();
   729 }
   732 //
   733 // This routine patches sites where a class wasn't loaded or
   734 // initialized at the time the code was generated.  It handles
   735 // references to classes, fields and forcing of initialization.  Most
   736 // of the cases are straightforward and involving simply forcing
   737 // resolution of a class, rewriting the instruction stream with the
   738 // needed constant and replacing the call in this function with the
   739 // patched code.  The case for static field is more complicated since
   740 // the thread which is in the process of initializing a class can
   741 // access it's static fields but other threads can't so the code
   742 // either has to deoptimize when this case is detected or execute a
   743 // check that the current thread is the initializing thread.  The
   744 // current
   745 //
   746 // Patches basically look like this:
   747 //
   748 //
   749 // patch_site: jmp patch stub     ;; will be patched
   750 // continue:   ...
   751 //             ...
   752 //             ...
   753 //             ...
   754 //
   755 // They have a stub which looks like this:
   756 //
   757 //             ;; patch body
   758 //             movl <const>, reg           (for class constants)
   759 //        <or> movl [reg1 + <const>], reg  (for field offsets)
   760 //        <or> movl reg, [reg1 + <const>]  (for field offsets)
   761 //             <being_init offset> <bytes to copy> <bytes to skip>
   762 // patch_stub: call Runtime1::patch_code (through a runtime stub)
   763 //             jmp patch_site
   764 //
   765 //
   766 // A normal patch is done by rewriting the patch body, usually a move,
   767 // and then copying it into place over top of the jmp instruction
   768 // being careful to flush caches and doing it in an MP-safe way.  The
   769 // constants following the patch body are used to find various pieces
   770 // of the patch relative to the call site for Runtime1::patch_code.
   771 // The case for getstatic and putstatic is more complicated because
   772 // getstatic and putstatic have special semantics when executing while
   773 // the class is being initialized.  getstatic/putstatic on a class
   774 // which is being_initialized may be executed by the initializing
   775 // thread but other threads have to block when they execute it.  This
   776 // is accomplished in compiled code by executing a test of the current
   777 // thread against the initializing thread of the class.  It's emitted
   778 // as boilerplate in their stub which allows the patched code to be
   779 // executed before it's copied back into the main body of the nmethod.
   780 //
   781 // being_init: get_thread(<tmp reg>
   782 //             cmpl [reg1 + <init_thread_offset>], <tmp reg>
   783 //             jne patch_stub
   784 //             movl [reg1 + <const>], reg  (for field offsets)  <or>
   785 //             movl reg, [reg1 + <const>]  (for field offsets)
   786 //             jmp continue
   787 //             <being_init offset> <bytes to copy> <bytes to skip>
   788 // patch_stub: jmp Runtim1::patch_code (through a runtime stub)
   789 //             jmp patch_site
   790 //
   791 // If the class is being initialized the patch body is rewritten and
   792 // the patch site is rewritten to jump to being_init, instead of
   793 // patch_stub.  Whenever this code is executed it checks the current
   794 // thread against the intializing thread so other threads will enter
   795 // the runtime and end up blocked waiting the class to finish
   796 // initializing inside the calls to resolve_field below.  The
   797 // initializing class will continue on it's way.  Once the class is
   798 // fully_initialized, the intializing_thread of the class becomes
   799 // NULL, so the next thread to execute this code will fail the test,
   800 // call into patch_code and complete the patching process by copying
   801 // the patch body back into the main part of the nmethod and resume
   802 // executing.
   803 //
   804 //
   806 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
   807   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
   809   ResourceMark rm(thread);
   810   RegisterMap reg_map(thread, false);
   811   frame runtime_frame = thread->last_frame();
   812   frame caller_frame = runtime_frame.sender(&reg_map);
   814   // last java frame on stack
   815   vframeStream vfst(thread, true);
   816   assert(!vfst.at_end(), "Java frame must exist");
   818   methodHandle caller_method(THREAD, vfst.method());
   819   // Note that caller_method->code() may not be same as caller_code because of OSR's
   820   // Note also that in the presence of inlining it is not guaranteed
   821   // that caller_method() == caller_code->method()
   823   int bci = vfst.bci();
   824   Bytecodes::Code code = caller_method()->java_code_at(bci);
   826 #ifndef PRODUCT
   827   // this is used by assertions in the access_field_patching_id
   828   BasicType patch_field_type = T_ILLEGAL;
   829 #endif // PRODUCT
   830   bool deoptimize_for_volatile = false;
   831   int patch_field_offset = -1;
   832   KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
   833   KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
   834   Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
   835   Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
   836   bool load_klass_or_mirror_patch_id =
   837     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
   839   if (stub_id == Runtime1::access_field_patching_id) {
   841     Bytecode_field field_access(caller_method, bci);
   842     fieldDescriptor result; // initialize class if needed
   843     Bytecodes::Code code = field_access.code();
   844     constantPoolHandle constants(THREAD, caller_method->constants());
   845     LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
   846     patch_field_offset = result.offset();
   848     // If we're patching a field which is volatile then at compile it
   849     // must not have been know to be volatile, so the generated code
   850     // isn't correct for a volatile reference.  The nmethod has to be
   851     // deoptimized so that the code can be regenerated correctly.
   852     // This check is only needed for access_field_patching since this
   853     // is the path for patching field offsets.  load_klass is only
   854     // used for patching references to oops which don't need special
   855     // handling in the volatile case.
   856     deoptimize_for_volatile = result.access_flags().is_volatile();
   858 #ifndef PRODUCT
   859     patch_field_type = result.field_type();
   860 #endif
   861   } else if (load_klass_or_mirror_patch_id) {
   862     Klass* k = NULL;
   863     switch (code) {
   864       case Bytecodes::_putstatic:
   865       case Bytecodes::_getstatic:
   866         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
   867           init_klass = KlassHandle(THREAD, klass);
   868           mirror = Handle(THREAD, klass->java_mirror());
   869         }
   870         break;
   871       case Bytecodes::_new:
   872         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
   873           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
   874         }
   875         break;
   876       case Bytecodes::_multianewarray:
   877         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
   878           k = caller_method->constants()->klass_at(mna.index(), CHECK);
   879         }
   880         break;
   881       case Bytecodes::_instanceof:
   882         { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
   883           k = caller_method->constants()->klass_at(io.index(), CHECK);
   884         }
   885         break;
   886       case Bytecodes::_checkcast:
   887         { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
   888           k = caller_method->constants()->klass_at(cc.index(), CHECK);
   889         }
   890         break;
   891       case Bytecodes::_anewarray:
   892         { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
   893           Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
   894           k = ek->array_klass(CHECK);
   895         }
   896         break;
   897       case Bytecodes::_ldc:
   898       case Bytecodes::_ldc_w:
   899         {
   900           Bytecode_loadconstant cc(caller_method, bci);
   901           oop m = cc.resolve_constant(CHECK);
   902           mirror = Handle(THREAD, m);
   903         }
   904         break;
   905       default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
   906     }
   907     // convert to handle
   908     load_klass = KlassHandle(THREAD, k);
   909   } else if (stub_id == load_appendix_patching_id) {
   910     Bytecode_invoke bytecode(caller_method, bci);
   911     Bytecodes::Code bc = bytecode.invoke_code();
   913     CallInfo info;
   914     constantPoolHandle pool(thread, caller_method->constants());
   915     int index = bytecode.index();
   916     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
   917     appendix = info.resolved_appendix();
   918     switch (bc) {
   919       case Bytecodes::_invokehandle: {
   920         int cache_index = ConstantPool::decode_cpcache_index(index, true);
   921         assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
   922         pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
   923         break;
   924       }
   925       case Bytecodes::_invokedynamic: {
   926         pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
   927         break;
   928       }
   929       default: fatal("unexpected bytecode for load_appendix_patching_id");
   930     }
   931   } else {
   932     ShouldNotReachHere();
   933   }
   935   if (deoptimize_for_volatile) {
   936     // At compile time we assumed the field wasn't volatile but after
   937     // loading it turns out it was volatile so we have to throw the
   938     // compiled code out and let it be regenerated.
   939     if (TracePatching) {
   940       tty->print_cr("Deoptimizing for patching volatile field reference");
   941     }
   942     // It's possible the nmethod was invalidated in the last
   943     // safepoint, but if it's still alive then make it not_entrant.
   944     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
   945     if (nm != NULL) {
   946       nm->make_not_entrant();
   947     }
   949     Deoptimization::deoptimize_frame(thread, caller_frame.id());
   951     // Return to the now deoptimized frame.
   952   }
   954   // Now copy code back
   956   {
   957     MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
   958     //
   959     // Deoptimization may have happened while we waited for the lock.
   960     // In that case we don't bother to do any patching we just return
   961     // and let the deopt happen
   962     if (!caller_is_deopted()) {
   963       NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
   964       address instr_pc = jump->jump_destination();
   965       NativeInstruction* ni = nativeInstruction_at(instr_pc);
   966       if (ni->is_jump() ) {
   967         // the jump has not been patched yet
   968         // The jump destination is slow case and therefore not part of the stubs
   969         // (stubs are only for StaticCalls)
   971         // format of buffer
   972         //    ....
   973         //    instr byte 0     <-- copy_buff
   974         //    instr byte 1
   975         //    ..
   976         //    instr byte n-1
   977         //      n
   978         //    ....             <-- call destination
   980         address stub_location = caller_frame.pc() + PatchingStub::patch_info_offset();
   982 #if defined(MIPS32) && defined(_LP64)
   983 /* Jin: In MIPS64, byte_skip is much larger than that in X86. It can not be contained in a byte:
   984  *	int bc = 0x20;
   985  *	int bs = 0x190;
   986  *	int bi = 0x1b0;
   987  * 
   988  *   To minimize the modification of share codes, the values are decreased 4 times when generated.
   989  *   See [mips/c1_CodeStubs_mips.cpp 307] PatchingStub::emit_code().
   990  */
   991         int bc = *(unsigned char*) (stub_location - 1) * 4;
   992         int bs = *(unsigned char*) (stub_location - 2) * 4;
   993         int bi = *(unsigned char*) (stub_location - 3) * 4;
   995 	int *byte_count = &bc;
   996 	int *byte_skip = &bs;
   997 	int *being_initialized_entry_offset = &bi;
   998 #else
   999         unsigned char* byte_count = (unsigned char*) (stub_location - 1);
  1000         unsigned char* byte_skip = (unsigned char*) (stub_location - 2);
  1001         unsigned char* being_initialized_entry_offset = (unsigned char*) (stub_location - 3);
  1002 #endif
  1003         address copy_buff = stub_location - *byte_skip - *byte_count;
  1004         address being_initialized_entry = stub_location - *being_initialized_entry_offset;
  1006         if (TracePatching) {
  1007           tty->print_cr(" Patching %s at bci %d at address " INTPTR_FORMAT "  (%s)", Bytecodes::name(code), bci,
  1008                         p2i(instr_pc), (stub_id == Runtime1::access_field_patching_id) ? "field" : "klass");
  1009           nmethod* caller_code = CodeCache::find_nmethod(caller_frame.pc());
  1010           assert(caller_code != NULL, "nmethod not found");
  1012           // NOTE we use pc() not original_pc() because we already know they are
  1013           // identical otherwise we'd have never entered this block of code
  1015           OopMap* map = caller_code->oop_map_for_return_address(caller_frame.pc());
  1016           assert(map != NULL, "null check");
  1017           map->print();
  1018           tty->cr();
  1020           Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
  1022         // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
  1023         bool do_patch = true;
  1024         if (stub_id == Runtime1::access_field_patching_id) {
  1025           // The offset may not be correct if the class was not loaded at code generation time.
  1026           // Set it now.
  1027           NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
  1028           assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
  1029           assert(patch_field_offset >= 0, "illegal offset");
  1030           n_move->add_offset_in_bytes(patch_field_offset);
  1031         } else if (load_klass_or_mirror_patch_id) {
  1032           // If a getstatic or putstatic is referencing a klass which
  1033           // isn't fully initialized, the patch body isn't copied into
  1034           // place until initialization is complete.  In this case the
  1035           // patch site is setup so that any threads besides the
  1036           // initializing thread are forced to come into the VM and
  1037           // block.
  1038           do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
  1039                      InstanceKlass::cast(init_klass())->is_initialized();
  1040           NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
  1041           if (jump->jump_destination() == being_initialized_entry) {
  1042             assert(do_patch == true, "initialization must be complete at this point");
  1043           } else {
  1044             // patch the instruction <move reg, klass>
  1045             NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
  1047             assert(n_copy->data() == 0 ||
  1048                    n_copy->data() == (intptr_t)Universe::non_oop_word(),
  1049                    "illegal init value");
  1050             if (stub_id == Runtime1::load_klass_patching_id) {
  1051               assert(load_klass() != NULL, "klass not set");
  1052               n_copy->set_data((intx) (load_klass()));
  1053             } else {
  1054               assert(mirror() != NULL, "klass not set");
  1055               // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
  1056               n_copy->set_data(cast_from_oop<intx>(mirror()));
  1059             if (TracePatching) {
  1060               Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
  1063         } else if (stub_id == Runtime1::load_appendix_patching_id) {
  1064           NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
  1065           assert(n_copy->data() == 0 ||
  1066                  n_copy->data() == (intptr_t)Universe::non_oop_word(),
  1067                  "illegal init value");
  1068           n_copy->set_data(cast_from_oop<intx>(appendix()));
  1070           if (TracePatching) {
  1071             Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
  1073         } else {
  1074           ShouldNotReachHere();
  1077 #if defined(SPARC) || defined(PPC)
  1078         if (load_klass_or_mirror_patch_id ||
  1079             stub_id == Runtime1::load_appendix_patching_id) {
  1080           // Update the location in the nmethod with the proper
  1081           // metadata.  When the code was generated, a NULL was stuffed
  1082           // in the metadata table and that table needs to be update to
  1083           // have the right value.  On intel the value is kept
  1084           // directly in the instruction instead of in the metadata
  1085           // table, so set_data above effectively updated the value.
  1086           nmethod* nm = CodeCache::find_nmethod(instr_pc);
  1087           assert(nm != NULL, "invalid nmethod_pc");
  1088           RelocIterator mds(nm, copy_buff, copy_buff + 1);
  1089           bool found = false;
  1090           while (mds.next() && !found) {
  1091             if (mds.type() == relocInfo::oop_type) {
  1092               assert(stub_id == Runtime1::load_mirror_patching_id ||
  1093                      stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
  1094               oop_Relocation* r = mds.oop_reloc();
  1095               oop* oop_adr = r->oop_addr();
  1096               *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
  1097               r->fix_oop_relocation();
  1098               found = true;
  1099             } else if (mds.type() == relocInfo::metadata_type) {
  1100               assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
  1101               metadata_Relocation* r = mds.metadata_reloc();
  1102               Metadata** metadata_adr = r->metadata_addr();
  1103               *metadata_adr = load_klass();
  1104               r->fix_metadata_relocation();
  1105               found = true;
  1108           assert(found, "the metadata must exist!");
  1110 #endif
  1111         if (do_patch) {
  1112           // replace instructions
  1113           // first replace the tail, then the call
  1114 #ifdef ARM
  1115           if((load_klass_or_mirror_patch_id ||
  1116               stub_id == Runtime1::load_appendix_patching_id) &&
  1117               nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
  1118             nmethod* nm = CodeCache::find_nmethod(instr_pc);
  1119             address addr = NULL;
  1120             assert(nm != NULL, "invalid nmethod_pc");
  1121             RelocIterator mds(nm, copy_buff, copy_buff + 1);
  1122             while (mds.next()) {
  1123               if (mds.type() == relocInfo::oop_type) {
  1124                 assert(stub_id == Runtime1::load_mirror_patching_id ||
  1125                        stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
  1126                 oop_Relocation* r = mds.oop_reloc();
  1127                 addr = (address)r->oop_addr();
  1128                 break;
  1129               } else if (mds.type() == relocInfo::metadata_type) {
  1130                 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
  1131                 metadata_Relocation* r = mds.metadata_reloc();
  1132                 addr = (address)r->metadata_addr();
  1133                 break;
  1136             assert(addr != NULL, "metadata relocation must exist");
  1137             copy_buff -= *byte_count;
  1138             NativeMovConstReg* n_copy2 = nativeMovConstReg_at(copy_buff);
  1139             n_copy2->set_pc_relative_offset(addr, instr_pc);
  1141 #endif
  1143           for (int i = NativeCall::instruction_size; i < *byte_count; i++) {
  1144             address ptr = copy_buff + i;
  1145             int a_byte = (*ptr) & 0xFF;
  1146             address dst = instr_pc + i;
  1147             *(unsigned char*)dst = (unsigned char) a_byte;
  1149           ICache::invalidate_range(instr_pc, *byte_count);
  1150           NativeGeneralJump::replace_mt_safe(instr_pc, copy_buff);
  1152           if (load_klass_or_mirror_patch_id ||
  1153               stub_id == Runtime1::load_appendix_patching_id) {
  1154             relocInfo::relocType rtype =
  1155               (stub_id == Runtime1::load_klass_patching_id) ?
  1156                                    relocInfo::metadata_type :
  1157                                    relocInfo::oop_type;
  1158             // update relocInfo to metadata
  1159             nmethod* nm = CodeCache::find_nmethod(instr_pc);
  1160             assert(nm != NULL, "invalid nmethod_pc");
  1162             // The old patch site is now a move instruction so update
  1163             // the reloc info so that it will get updated during
  1164             // future GCs.
  1165             RelocIterator iter(nm, (address)instr_pc, (address)(instr_pc + 1));
  1166             relocInfo::change_reloc_info_for_address(&iter, (address) instr_pc,
  1167                                                      relocInfo::none, rtype);
  1168 #ifdef SPARC
  1169             // Sparc takes two relocations for an metadata so update the second one.
  1170             address instr_pc2 = instr_pc + NativeMovConstReg::add_offset;
  1171             RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
  1172             relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
  1173                                                      relocInfo::none, rtype);
  1174 #endif
  1175 #ifdef PPC
  1176           { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
  1177             RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
  1178             relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
  1179                                                      relocInfo::none, rtype);
  1181 #endif
  1184         } else {
  1185           ICache::invalidate_range(copy_buff, *byte_count);
  1186           NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
  1192   // If we are patching in a non-perm oop, make sure the nmethod
  1193   // is on the right list.
  1194   if (ScavengeRootsInCode && ((mirror.not_null() && mirror()->is_scavengable()) ||
  1195                               (appendix.not_null() && appendix->is_scavengable()))) {
  1196     MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
  1197     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
  1198     guarantee(nm != NULL, "only nmethods can contain non-perm oops");
  1199     if (!nm->on_scavenge_root_list()) {
  1200       CodeCache::add_scavenge_root_nmethod(nm);
  1203     // Since we've patched some oops in the nmethod,
  1204     // (re)register it with the heap.
  1205     Universe::heap()->register_nmethod(nm);
  1207 JRT_END
  1209 //
  1210 // Entry point for compiled code. We want to patch a nmethod.
  1211 // We don't do a normal VM transition here because we want to
  1212 // know after the patching is complete and any safepoint(s) are taken
  1213 // if the calling nmethod was deoptimized. We do this by calling a
  1214 // helper method which does the normal VM transition and when it
  1215 // completes we can check for deoptimization. This simplifies the
  1216 // assembly code in the cpu directories.
  1217 //
  1218 int Runtime1::move_klass_patching(JavaThread* thread) {
  1219 //
  1220 // NOTE: we are still in Java
  1221 //
  1222   Thread* THREAD = thread;
  1223   debug_only(NoHandleMark nhm;)
  1225     // Enter VM mode
  1227     ResetNoHandleMark rnhm;
  1228     patch_code(thread, load_klass_patching_id);
  1230   // Back in JAVA, use no oops DON'T safepoint
  1232   // Return true if calling code is deoptimized
  1234   return caller_is_deopted();
  1237 int Runtime1::move_mirror_patching(JavaThread* thread) {
  1238 //
  1239 // NOTE: we are still in Java
  1240 //
  1241   Thread* THREAD = thread;
  1242   debug_only(NoHandleMark nhm;)
  1244     // Enter VM mode
  1246     ResetNoHandleMark rnhm;
  1247     patch_code(thread, load_mirror_patching_id);
  1249   // Back in JAVA, use no oops DON'T safepoint
  1251   // Return true if calling code is deoptimized
  1253   return caller_is_deopted();
  1256 int Runtime1::move_appendix_patching(JavaThread* thread) {
  1257 //
  1258 // NOTE: we are still in Java
  1259 //
  1260   Thread* THREAD = thread;
  1261   debug_only(NoHandleMark nhm;)
  1263     // Enter VM mode
  1265     ResetNoHandleMark rnhm;
  1266     patch_code(thread, load_appendix_patching_id);
  1268   // Back in JAVA, use no oops DON'T safepoint
  1270   // Return true if calling code is deoptimized
  1272   return caller_is_deopted();
  1274 //
  1275 // Entry point for compiled code. We want to patch a nmethod.
  1276 // We don't do a normal VM transition here because we want to
  1277 // know after the patching is complete and any safepoint(s) are taken
  1278 // if the calling nmethod was deoptimized. We do this by calling a
  1279 // helper method which does the normal VM transition and when it
  1280 // completes we can check for deoptimization. This simplifies the
  1281 // assembly code in the cpu directories.
  1282 //
  1284 int Runtime1::access_field_patching(JavaThread* thread) {
  1285 //
  1286 // NOTE: we are still in Java
  1287 //
  1288   Thread* THREAD = thread;
  1289   debug_only(NoHandleMark nhm;)
  1291     // Enter VM mode
  1293     ResetNoHandleMark rnhm;
  1294     patch_code(thread, access_field_patching_id);
  1296   // Back in JAVA, use no oops DON'T safepoint
  1298   // Return true if calling code is deoptimized
  1300   return caller_is_deopted();
  1301 JRT_END
  1304 JRT_LEAF(void, Runtime1::trace_block_entry(jint block_id))
  1305   // for now we just print out the block id
  1306   tty->print("%d ", block_id);
  1307 JRT_END
  1310 // Array copy return codes.
  1311 enum {
  1312   ac_failed = -1, // arraycopy failed
  1313   ac_ok = 0       // arraycopy succeeded
  1314 };
  1317 // Below length is the # elements copied.
  1318 template <class T> int obj_arraycopy_work(oopDesc* src, T* src_addr,
  1319                                           oopDesc* dst, T* dst_addr,
  1320                                           int length) {
  1322   // For performance reasons, we assume we are using a card marking write
  1323   // barrier. The assert will fail if this is not the case.
  1324   // Note that we use the non-virtual inlineable variant of write_ref_array.
  1325   BarrierSet* bs = Universe::heap()->barrier_set();
  1326   assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
  1327   assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
  1328   if (src == dst) {
  1329     // same object, no check
  1330     bs->write_ref_array_pre(dst_addr, length);
  1331     Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
  1332     bs->write_ref_array((HeapWord*)dst_addr, length);
  1333     return ac_ok;
  1334   } else {
  1335     Klass* bound = ObjArrayKlass::cast(dst->klass())->element_klass();
  1336     Klass* stype = ObjArrayKlass::cast(src->klass())->element_klass();
  1337     if (stype == bound || stype->is_subtype_of(bound)) {
  1338       // Elements are guaranteed to be subtypes, so no check necessary
  1339       bs->write_ref_array_pre(dst_addr, length);
  1340       Copy::conjoint_oops_atomic(src_addr, dst_addr, length);
  1341       bs->write_ref_array((HeapWord*)dst_addr, length);
  1342       return ac_ok;
  1345   return ac_failed;
  1348 // fast and direct copy of arrays; returning -1, means that an exception may be thrown
  1349 // and we did not copy anything
  1350 JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int dst_pos, int length))
  1351 #ifndef PRODUCT
  1352   _generic_arraycopy_cnt++;        // Slow-path oop array copy
  1353 #endif
  1355   if (src == NULL || dst == NULL || src_pos < 0 || dst_pos < 0 || length < 0) return ac_failed;
  1356   if (!dst->is_array() || !src->is_array()) return ac_failed;
  1357   if ((unsigned int) arrayOop(src)->length() < (unsigned int)src_pos + (unsigned int)length) return ac_failed;
  1358   if ((unsigned int) arrayOop(dst)->length() < (unsigned int)dst_pos + (unsigned int)length) return ac_failed;
  1360   if (length == 0) return ac_ok;
  1361   if (src->is_typeArray()) {
  1362     Klass* klass_oop = src->klass();
  1363     if (klass_oop != dst->klass()) return ac_failed;
  1364     TypeArrayKlass* klass = TypeArrayKlass::cast(klass_oop);
  1365     const int l2es = klass->log2_element_size();
  1366     const int ihs = klass->array_header_in_bytes() / wordSize;
  1367     char* src_addr = (char*) ((oopDesc**)src + ihs) + (src_pos << l2es);
  1368     char* dst_addr = (char*) ((oopDesc**)dst + ihs) + (dst_pos << l2es);
  1369     // Potential problem: memmove is not guaranteed to be word atomic
  1370     // Revisit in Merlin
  1371     memmove(dst_addr, src_addr, length << l2es);
  1372     return ac_ok;
  1373   } else if (src->is_objArray() && dst->is_objArray()) {
  1374     if (UseCompressedOops) {
  1375       narrowOop *src_addr  = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
  1376       narrowOop *dst_addr  = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
  1377       return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
  1378     } else {
  1379       oop *src_addr  = objArrayOop(src)->obj_at_addr<oop>(src_pos);
  1380       oop *dst_addr  = objArrayOop(dst)->obj_at_addr<oop>(dst_pos);
  1381       return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
  1384   return ac_failed;
  1385 JRT_END
  1388 JRT_LEAF(void, Runtime1::primitive_arraycopy(HeapWord* src, HeapWord* dst, int length))
  1389 #ifndef PRODUCT
  1390   _primitive_arraycopy_cnt++;
  1391 #endif
  1393   if (length == 0) return;
  1394   // Not guaranteed to be word atomic, but that doesn't matter
  1395   // for anything but an oop array, which is covered by oop_arraycopy.
  1396   Copy::conjoint_jbytes(src, dst, length);
  1397 JRT_END
  1399 JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
  1400 #ifndef PRODUCT
  1401   _oop_arraycopy_cnt++;
  1402 #endif
  1404   if (num == 0) return;
  1405   BarrierSet* bs = Universe::heap()->barrier_set();
  1406   assert(bs->has_write_ref_array_opt(), "Barrier set must have ref array opt");
  1407   assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
  1408   if (UseCompressedOops) {
  1409     bs->write_ref_array_pre((narrowOop*)dst, num);
  1410     Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);
  1411   } else {
  1412     bs->write_ref_array_pre((oop*)dst, num);
  1413     Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
  1415   bs->write_ref_array(dst, num);
  1416 JRT_END
  1419 JRT_LEAF(int, Runtime1::is_instance_of(oopDesc* mirror, oopDesc* obj))
  1420   // had to return int instead of bool, otherwise there may be a mismatch
  1421   // between the C calling convention and the Java one.
  1422   // e.g., on x86, GCC may clear only %al when returning a bool false, but
  1423   // JVM takes the whole %eax as the return value, which may misinterpret
  1424   // the return value as a boolean true.
  1426   assert(mirror != NULL, "should null-check on mirror before calling");
  1427   Klass* k = java_lang_Class::as_Klass(mirror);
  1428   return (k != NULL && obj != NULL && obj->is_a(k)) ? 1 : 0;
  1429 JRT_END
  1431 JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
  1432   ResourceMark rm;
  1434   assert(!TieredCompilation, "incompatible with tiered compilation");
  1436   RegisterMap reg_map(thread, false);
  1437   frame runtime_frame = thread->last_frame();
  1438   frame caller_frame = runtime_frame.sender(&reg_map);
  1440   nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
  1441   assert (nm != NULL, "no more nmethod?");
  1442   nm->make_not_entrant();
  1444   methodHandle m(nm->method());
  1445   MethodData* mdo = m->method_data();
  1447   if (mdo == NULL && !HAS_PENDING_EXCEPTION) {
  1448     // Build an MDO.  Ignore errors like OutOfMemory;
  1449     // that simply means we won't have an MDO to update.
  1450     Method::build_interpreter_method_data(m, THREAD);
  1451     if (HAS_PENDING_EXCEPTION) {
  1452       assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
  1453       CLEAR_PENDING_EXCEPTION;
  1455     mdo = m->method_data();
  1458   if (mdo != NULL) {
  1459     mdo->inc_trap_count(Deoptimization::Reason_none);
  1462   if (TracePredicateFailedTraps) {
  1463     stringStream ss1, ss2;
  1464     vframeStream vfst(thread);
  1465     methodHandle inlinee = methodHandle(vfst.method());
  1466     inlinee->print_short_name(&ss1);
  1467     m->print_short_name(&ss2);
  1468     tty->print_cr("Predicate failed trap in method %s at bci %d inlined in %s at pc " INTPTR_FORMAT, ss1.as_string(), vfst.bci(), ss2.as_string(), p2i(caller_frame.pc()));
  1472   Deoptimization::deoptimize_frame(thread, caller_frame.id());
  1474 JRT_END
  1476 #ifndef PRODUCT
  1477 void Runtime1::print_statistics() {
  1478   tty->print_cr("C1 Runtime statistics:");
  1479   tty->print_cr(" _resolve_invoke_virtual_cnt:     %d", SharedRuntime::_resolve_virtual_ctr);
  1480   tty->print_cr(" _resolve_invoke_opt_virtual_cnt: %d", SharedRuntime::_resolve_opt_virtual_ctr);
  1481   tty->print_cr(" _resolve_invoke_static_cnt:      %d", SharedRuntime::_resolve_static_ctr);
  1482   tty->print_cr(" _handle_wrong_method_cnt:        %d", SharedRuntime::_wrong_method_ctr);
  1483   tty->print_cr(" _ic_miss_cnt:                    %d", SharedRuntime::_ic_miss_ctr);
  1484   tty->print_cr(" _generic_arraycopy_cnt:          %d", _generic_arraycopy_cnt);
  1485   tty->print_cr(" _generic_arraycopystub_cnt:      %d", _generic_arraycopystub_cnt);
  1486   tty->print_cr(" _byte_arraycopy_cnt:             %d", _byte_arraycopy_cnt);
  1487   tty->print_cr(" _short_arraycopy_cnt:            %d", _short_arraycopy_cnt);
  1488   tty->print_cr(" _int_arraycopy_cnt:              %d", _int_arraycopy_cnt);
  1489   tty->print_cr(" _long_arraycopy_cnt:             %d", _long_arraycopy_cnt);
  1490   tty->print_cr(" _primitive_arraycopy_cnt:        %d", _primitive_arraycopy_cnt);
  1491   tty->print_cr(" _oop_arraycopy_cnt (C):          %d", Runtime1::_oop_arraycopy_cnt);
  1492   tty->print_cr(" _oop_arraycopy_cnt (stub):       %d", _oop_arraycopy_cnt);
  1493   tty->print_cr(" _arraycopy_slowcase_cnt:         %d", _arraycopy_slowcase_cnt);
  1494   tty->print_cr(" _arraycopy_checkcast_cnt:        %d", _arraycopy_checkcast_cnt);
  1495   tty->print_cr(" _arraycopy_checkcast_attempt_cnt:%d", _arraycopy_checkcast_attempt_cnt);
  1497   tty->print_cr(" _new_type_array_slowcase_cnt:    %d", _new_type_array_slowcase_cnt);
  1498   tty->print_cr(" _new_object_array_slowcase_cnt:  %d", _new_object_array_slowcase_cnt);
  1499   tty->print_cr(" _new_instance_slowcase_cnt:      %d", _new_instance_slowcase_cnt);
  1500   tty->print_cr(" _new_multi_array_slowcase_cnt:   %d", _new_multi_array_slowcase_cnt);
  1501   tty->print_cr(" _monitorenter_slowcase_cnt:      %d", _monitorenter_slowcase_cnt);
  1502   tty->print_cr(" _monitorexit_slowcase_cnt:       %d", _monitorexit_slowcase_cnt);
  1503   tty->print_cr(" _patch_code_slowcase_cnt:        %d", _patch_code_slowcase_cnt);
  1505   tty->print_cr(" _throw_range_check_exception_count:            %d:", _throw_range_check_exception_count);
  1506   tty->print_cr(" _throw_index_exception_count:                  %d:", _throw_index_exception_count);
  1507   tty->print_cr(" _throw_div0_exception_count:                   %d:", _throw_div0_exception_count);
  1508   tty->print_cr(" _throw_null_pointer_exception_count:           %d:", _throw_null_pointer_exception_count);
  1509   tty->print_cr(" _throw_class_cast_exception_count:             %d:", _throw_class_cast_exception_count);
  1510   tty->print_cr(" _throw_incompatible_class_change_error_count:  %d:", _throw_incompatible_class_change_error_count);
  1511   tty->print_cr(" _throw_array_store_exception_count:            %d:", _throw_array_store_exception_count);
  1512   tty->print_cr(" _throw_count:                                  %d:", _throw_count);
  1514   SharedRuntime::print_ic_miss_histogram();
  1515   tty->cr();
  1517 #endif // PRODUCT

mercurial