src/share/vm/runtime/deoptimization.cpp

Wed, 24 Sep 2014 12:19:07 -0700

author
simonis
date
Wed, 24 Sep 2014 12:19:07 -0700
changeset 7553
f43fad8786fc
parent 7420
793204f5528a
child 7535
7ae4e26cb1e0
child 7598
ddce0b7cee93
permissions
-rw-r--r--

8058345: Refactor native stack printing from vmError.cpp to debug.cpp to make it available in gdb as well
Summary: Also fix stack trace on x86 to enable walking of runtime stubs and native wrappers
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "code/debugInfoRec.hpp"
    28 #include "code/nmethod.hpp"
    29 #include "code/pcDesc.hpp"
    30 #include "code/scopeDesc.hpp"
    31 #include "interpreter/bytecode.hpp"
    32 #include "interpreter/interpreter.hpp"
    33 #include "interpreter/oopMapCache.hpp"
    34 #include "memory/allocation.inline.hpp"
    35 #include "memory/oopFactory.hpp"
    36 #include "memory/resourceArea.hpp"
    37 #include "oops/method.hpp"
    38 #include "oops/oop.inline.hpp"
    39 #include "prims/jvmtiThreadState.hpp"
    40 #include "runtime/biasedLocking.hpp"
    41 #include "runtime/compilationPolicy.hpp"
    42 #include "runtime/deoptimization.hpp"
    43 #include "runtime/interfaceSupport.hpp"
    44 #include "runtime/sharedRuntime.hpp"
    45 #include "runtime/signature.hpp"
    46 #include "runtime/stubRoutines.hpp"
    47 #include "runtime/thread.hpp"
    48 #include "runtime/vframe.hpp"
    49 #include "runtime/vframeArray.hpp"
    50 #include "runtime/vframe_hp.hpp"
    51 #include "utilities/events.hpp"
    52 #include "utilities/xmlstream.hpp"
    53 #ifdef TARGET_ARCH_x86
    54 # include "vmreg_x86.inline.hpp"
    55 #endif
    56 #ifdef TARGET_ARCH_sparc
    57 # include "vmreg_sparc.inline.hpp"
    58 #endif
    59 #ifdef TARGET_ARCH_zero
    60 # include "vmreg_zero.inline.hpp"
    61 #endif
    62 #ifdef TARGET_ARCH_arm
    63 # include "vmreg_arm.inline.hpp"
    64 #endif
    65 #ifdef TARGET_ARCH_ppc
    66 # include "vmreg_ppc.inline.hpp"
    67 #endif
    68 #ifdef COMPILER2
    69 #ifdef TARGET_ARCH_MODEL_x86_32
    70 # include "adfiles/ad_x86_32.hpp"
    71 #endif
    72 #ifdef TARGET_ARCH_MODEL_x86_64
    73 # include "adfiles/ad_x86_64.hpp"
    74 #endif
    75 #ifdef TARGET_ARCH_MODEL_sparc
    76 # include "adfiles/ad_sparc.hpp"
    77 #endif
    78 #ifdef TARGET_ARCH_MODEL_zero
    79 # include "adfiles/ad_zero.hpp"
    80 #endif
    81 #ifdef TARGET_ARCH_MODEL_arm
    82 # include "adfiles/ad_arm.hpp"
    83 #endif
    84 #ifdef TARGET_ARCH_MODEL_ppc_32
    85 # include "adfiles/ad_ppc_32.hpp"
    86 #endif
    87 #ifdef TARGET_ARCH_MODEL_ppc_64
    88 # include "adfiles/ad_ppc_64.hpp"
    89 #endif
    90 #endif // COMPILER2
    92 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    94 bool DeoptimizationMarker::_is_active = false;
    96 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
    97                                          int  caller_adjustment,
    98                                          int  caller_actual_parameters,
    99                                          int  number_of_frames,
   100                                          intptr_t* frame_sizes,
   101                                          address* frame_pcs,
   102                                          BasicType return_type) {
   103   _size_of_deoptimized_frame = size_of_deoptimized_frame;
   104   _caller_adjustment         = caller_adjustment;
   105   _caller_actual_parameters  = caller_actual_parameters;
   106   _number_of_frames          = number_of_frames;
   107   _frame_sizes               = frame_sizes;
   108   _frame_pcs                 = frame_pcs;
   109   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2, mtCompiler);
   110   _return_type               = return_type;
   111   _initial_info              = 0;
   112   // PD (x86 only)
   113   _counter_temp              = 0;
   114   _unpack_kind               = 0;
   115   _sender_sp_temp            = 0;
   117   _total_frame_sizes         = size_of_frames();
   118 }
   121 Deoptimization::UnrollBlock::~UnrollBlock() {
   122   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes, mtCompiler);
   123   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs, mtCompiler);
   124   FREE_C_HEAP_ARRAY(intptr_t, _register_block, mtCompiler);
   125 }
   128 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
   129   assert(register_number < RegisterMap::reg_count, "checking register number");
   130   return &_register_block[register_number * 2];
   131 }
   135 int Deoptimization::UnrollBlock::size_of_frames() const {
   136   // Acount first for the adjustment of the initial frame
   137   int result = _caller_adjustment;
   138   for (int index = 0; index < number_of_frames(); index++) {
   139     result += frame_sizes()[index];
   140   }
   141   return result;
   142 }
   145 void Deoptimization::UnrollBlock::print() {
   146   ttyLocker ttyl;
   147   tty->print_cr("UnrollBlock");
   148   tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
   149   tty->print(   "  frame_sizes: ");
   150   for (int index = 0; index < number_of_frames(); index++) {
   151     tty->print("%d ", frame_sizes()[index]);
   152   }
   153   tty->cr();
   154 }
   157 // In order to make fetch_unroll_info work properly with escape
   158 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
   159 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
   160 // of previously eliminated objects occurs in realloc_objects, which is
   161 // called from the method fetch_unroll_info_helper below.
   162 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
   163   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
   164   // but makes the entry a little slower. There is however a little dance we have to
   165   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
   167   // fetch_unroll_info() is called at the beginning of the deoptimization
   168   // handler. Note this fact before we start generating temporary frames
   169   // that can confuse an asynchronous stack walker. This counter is
   170   // decremented at the end of unpack_frames().
   171   thread->inc_in_deopt_handler();
   173   return fetch_unroll_info_helper(thread);
   174 JRT_END
   177 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
   178 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) {
   180   // Note: there is a safepoint safety issue here. No matter whether we enter
   181   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
   182   // the vframeArray is created.
   183   //
   185   // Allocate our special deoptimization ResourceMark
   186   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
   187   assert(thread->deopt_mark() == NULL, "Pending deopt!");
   188   thread->set_deopt_mark(dmark);
   190   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
   191   RegisterMap map(thread, true);
   192   RegisterMap dummy_map(thread, false);
   193   // Now get the deoptee with a valid map
   194   frame deoptee = stub_frame.sender(&map);
   195   // Set the deoptee nmethod
   196   assert(thread->deopt_nmethod() == NULL, "Pending deopt!");
   197   thread->set_deopt_nmethod(deoptee.cb()->as_nmethod_or_null());
   199   if (VerifyStack) {
   200     thread->validate_frame_layout();
   201   }
   203   // Create a growable array of VFrames where each VFrame represents an inlined
   204   // Java frame.  This storage is allocated with the usual system arena.
   205   assert(deoptee.is_compiled_frame(), "Wrong frame type");
   206   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
   207   vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
   208   while (!vf->is_top()) {
   209     assert(vf->is_compiled_frame(), "Wrong frame type");
   210     chunk->push(compiledVFrame::cast(vf));
   211     vf = vf->sender();
   212   }
   213   assert(vf->is_compiled_frame(), "Wrong frame type");
   214   chunk->push(compiledVFrame::cast(vf));
   216   bool realloc_failures = false;
   218 #ifdef COMPILER2
   219   // Reallocate the non-escaping objects and restore their fields. Then
   220   // relock objects if synchronization on them was eliminated.
   221   if (DoEscapeAnalysis || EliminateNestedLocks) {
   222     if (EliminateAllocations) {
   223       assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
   224       GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
   226       // The flag return_oop() indicates call sites which return oop
   227       // in compiled code. Such sites include java method calls,
   228       // runtime calls (for example, used to allocate new objects/arrays
   229       // on slow code path) and any other calls generated in compiled code.
   230       // It is not guaranteed that we can get such information here only
   231       // by analyzing bytecode in deoptimized frames. This is why this flag
   232       // is set during method compilation (see Compile::Process_OopMap_Node()).
   233       // If the previous frame was popped, we don't have a result.
   234       bool save_oop_result = chunk->at(0)->scope()->return_oop() && !thread->popframe_forcing_deopt_reexecution();
   235       Handle return_value;
   236       if (save_oop_result) {
   237         // Reallocation may trigger GC. If deoptimization happened on return from
   238         // call which returns oop we need to save it since it is not in oopmap.
   239         oop result = deoptee.saved_oop_result(&map);
   240         assert(result == NULL || result->is_oop(), "must be oop");
   241         return_value = Handle(thread, result);
   242         assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
   243         if (TraceDeoptimization) {
   244           ttyLocker ttyl;
   245           tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread);
   246         }
   247       }
   248       if (objects != NULL) {
   249         JRT_BLOCK
   250           realloc_failures = realloc_objects(thread, &deoptee, objects, THREAD);
   251         JRT_END
   252         reassign_fields(&deoptee, &map, objects, realloc_failures);
   253 #ifndef PRODUCT
   254         if (TraceDeoptimization) {
   255           ttyLocker ttyl;
   256           tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
   257           print_objects(objects, realloc_failures);
   258         }
   259 #endif
   260       }
   261       if (save_oop_result) {
   262         // Restore result.
   263         deoptee.set_saved_oop_result(&map, return_value());
   264       }
   265     }
   266     if (EliminateLocks) {
   267 #ifndef PRODUCT
   268       bool first = true;
   269 #endif
   270       for (int i = 0; i < chunk->length(); i++) {
   271         compiledVFrame* cvf = chunk->at(i);
   272         assert (cvf->scope() != NULL,"expect only compiled java frames");
   273         GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
   274         if (monitors->is_nonempty()) {
   275           relock_objects(monitors, thread, realloc_failures);
   276 #ifndef PRODUCT
   277           if (TraceDeoptimization) {
   278             ttyLocker ttyl;
   279             for (int j = 0; j < monitors->length(); j++) {
   280               MonitorInfo* mi = monitors->at(j);
   281               if (mi->eliminated()) {
   282                 if (first) {
   283                   first = false;
   284                   tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
   285                 }
   286                 if (mi->owner_is_scalar_replaced()) {
   287                   Klass* k = java_lang_Class::as_Klass(mi->owner_klass());
   288                   tty->print_cr("     failed reallocation for klass %s", k->external_name());
   289                 } else {
   290                   tty->print_cr("     object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
   291                 }
   292               }
   293             }
   294           }
   295 #endif
   296         }
   297       }
   298     }
   299   }
   300 #endif // COMPILER2
   301   // Ensure that no safepoint is taken after pointers have been stored
   302   // in fields of rematerialized objects.  If a safepoint occurs from here on
   303   // out the java state residing in the vframeArray will be missed.
   304   No_Safepoint_Verifier no_safepoint;
   306   vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk, realloc_failures);
   307 #ifdef COMPILER2
   308   if (realloc_failures) {
   309     pop_frames_failed_reallocs(thread, array);
   310   }
   311 #endif
   313   assert(thread->vframe_array_head() == NULL, "Pending deopt!");
   314   thread->set_vframe_array_head(array);
   316   // Now that the vframeArray has been created if we have any deferred local writes
   317   // added by jvmti then we can free up that structure as the data is now in the
   318   // vframeArray
   320   if (thread->deferred_locals() != NULL) {
   321     GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
   322     int i = 0;
   323     do {
   324       // Because of inlining we could have multiple vframes for a single frame
   325       // and several of the vframes could have deferred writes. Find them all.
   326       if (list->at(i)->id() == array->original().id()) {
   327         jvmtiDeferredLocalVariableSet* dlv = list->at(i);
   328         list->remove_at(i);
   329         // individual jvmtiDeferredLocalVariableSet are CHeapObj's
   330         delete dlv;
   331       } else {
   332         i++;
   333       }
   334     } while ( i < list->length() );
   335     if (list->length() == 0) {
   336       thread->set_deferred_locals(NULL);
   337       // free the list and elements back to C heap.
   338       delete list;
   339     }
   341   }
   343 #ifndef SHARK
   344   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
   345   CodeBlob* cb = stub_frame.cb();
   346   // Verify we have the right vframeArray
   347   assert(cb->frame_size() >= 0, "Unexpected frame size");
   348   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
   350   // If the deopt call site is a MethodHandle invoke call site we have
   351   // to adjust the unpack_sp.
   352   nmethod* deoptee_nm = deoptee.cb()->as_nmethod_or_null();
   353   if (deoptee_nm != NULL && deoptee_nm->is_method_handle_return(deoptee.pc()))
   354     unpack_sp = deoptee.unextended_sp();
   356 #ifdef ASSERT
   357   assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
   358 #endif
   359 #else
   360   intptr_t* unpack_sp = stub_frame.sender(&dummy_map).unextended_sp();
   361 #endif // !SHARK
   363   // This is a guarantee instead of an assert because if vframe doesn't match
   364   // we will unpack the wrong deoptimized frame and wind up in strange places
   365   // where it will be very difficult to figure out what went wrong. Better
   366   // to die an early death here than some very obscure death later when the
   367   // trail is cold.
   368   // Note: on ia64 this guarantee can be fooled by frames with no memory stack
   369   // in that it will fail to detect a problem when there is one. This needs
   370   // more work in tiger timeframe.
   371   guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
   373   int number_of_frames = array->frames();
   375   // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
   376   // virtual activation, which is the reverse of the elements in the vframes array.
   377   intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames, mtCompiler);
   378   // +1 because we always have an interpreter return address for the final slot.
   379   address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1, mtCompiler);
   380   int popframe_extra_args = 0;
   381   // Create an interpreter return address for the stub to use as its return
   382   // address so the skeletal frames are perfectly walkable
   383   frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
   385   // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
   386   // activation be put back on the expression stack of the caller for reexecution
   387   if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
   388     popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
   389   }
   391   // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
   392   // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
   393   // than simply use array->sender.pc(). This requires us to walk the current set of frames
   394   //
   395   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
   396   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
   398   // It's possible that the number of paramters at the call site is
   399   // different than number of arguments in the callee when method
   400   // handles are used.  If the caller is interpreted get the real
   401   // value so that the proper amount of space can be added to it's
   402   // frame.
   403   bool caller_was_method_handle = false;
   404   if (deopt_sender.is_interpreted_frame()) {
   405     methodHandle method = deopt_sender.interpreter_frame_method();
   406     Bytecode_invoke cur = Bytecode_invoke_check(method, deopt_sender.interpreter_frame_bci());
   407     if (cur.is_invokedynamic() || cur.is_invokehandle()) {
   408       // Method handle invokes may involve fairly arbitrary chains of
   409       // calls so it's impossible to know how much actual space the
   410       // caller has for locals.
   411       caller_was_method_handle = true;
   412     }
   413   }
   415   //
   416   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
   417   // frame_sizes/frame_pcs[1] next oldest frame (int)
   418   // frame_sizes/frame_pcs[n] youngest frame (int)
   419   //
   420   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
   421   // owns the space for the return address to it's caller).  Confusing ain't it.
   422   //
   423   // The vframe array can address vframes with indices running from
   424   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
   425   // When we create the skeletal frames we need the oldest frame to be in the zero slot
   426   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
   427   // so things look a little strange in this loop.
   428   //
   429   int callee_parameters = 0;
   430   int callee_locals = 0;
   431   for (int index = 0; index < array->frames(); index++ ) {
   432     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
   433     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
   434     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
   435     frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
   436                                                                                                     callee_locals,
   437                                                                                                     index == 0,
   438                                                                                                     popframe_extra_args);
   439     // This pc doesn't have to be perfect just good enough to identify the frame
   440     // as interpreted so the skeleton frame will be walkable
   441     // The correct pc will be set when the skeleton frame is completely filled out
   442     // The final pc we store in the loop is wrong and will be overwritten below
   443     frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
   445     callee_parameters = array->element(index)->method()->size_of_parameters();
   446     callee_locals = array->element(index)->method()->max_locals();
   447     popframe_extra_args = 0;
   448   }
   450   // Compute whether the root vframe returns a float or double value.
   451   BasicType return_type;
   452   {
   453     HandleMark hm;
   454     methodHandle method(thread, array->element(0)->method());
   455     Bytecode_invoke invoke = Bytecode_invoke_check(method, array->element(0)->bci());
   456     return_type = invoke.is_valid() ? invoke.result_type() : T_ILLEGAL;
   457   }
   459   // Compute information for handling adapters and adjusting the frame size of the caller.
   460   int caller_adjustment = 0;
   462   // Compute the amount the oldest interpreter frame will have to adjust
   463   // its caller's stack by. If the caller is a compiled frame then
   464   // we pretend that the callee has no parameters so that the
   465   // extension counts for the full amount of locals and not just
   466   // locals-parms. This is because without a c2i adapter the parm
   467   // area as created by the compiled frame will not be usable by
   468   // the interpreter. (Depending on the calling convention there
   469   // may not even be enough space).
   471   // QQQ I'd rather see this pushed down into last_frame_adjust
   472   // and have it take the sender (aka caller).
   474   if (deopt_sender.is_compiled_frame() || caller_was_method_handle) {
   475     caller_adjustment = last_frame_adjust(0, callee_locals);
   476   } else if (callee_locals > callee_parameters) {
   477     // The caller frame may need extending to accommodate
   478     // non-parameter locals of the first unpacked interpreted frame.
   479     // Compute that adjustment.
   480     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
   481   }
   483   // If the sender is deoptimized the we must retrieve the address of the handler
   484   // since the frame will "magically" show the original pc before the deopt
   485   // and we'd undo the deopt.
   487   frame_pcs[0] = deopt_sender.raw_pc();
   489 #ifndef SHARK
   490   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
   491 #endif // SHARK
   493   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
   494                                       caller_adjustment * BytesPerWord,
   495                                       caller_was_method_handle ? 0 : callee_parameters,
   496                                       number_of_frames,
   497                                       frame_sizes,
   498                                       frame_pcs,
   499                                       return_type);
   500   // On some platforms, we need a way to pass some platform dependent
   501   // information to the unpacking code so the skeletal frames come out
   502   // correct (initial fp value, unextended sp, ...)
   503   info->set_initial_info((intptr_t) array->sender().initial_deoptimization_info());
   505   if (array->frames() > 1) {
   506     if (VerifyStack && TraceDeoptimization) {
   507       ttyLocker ttyl;
   508       tty->print_cr("Deoptimizing method containing inlining");
   509     }
   510   }
   512   array->set_unroll_block(info);
   513   return info;
   514 }
   516 // Called to cleanup deoptimization data structures in normal case
   517 // after unpacking to stack and when stack overflow error occurs
   518 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
   519                                         vframeArray *array) {
   521   // Get array if coming from exception
   522   if (array == NULL) {
   523     array = thread->vframe_array_head();
   524   }
   525   thread->set_vframe_array_head(NULL);
   527   // Free the previous UnrollBlock
   528   vframeArray* old_array = thread->vframe_array_last();
   529   thread->set_vframe_array_last(array);
   531   if (old_array != NULL) {
   532     UnrollBlock* old_info = old_array->unroll_block();
   533     old_array->set_unroll_block(NULL);
   534     delete old_info;
   535     delete old_array;
   536   }
   538   // Deallocate any resource creating in this routine and any ResourceObjs allocated
   539   // inside the vframeArray (StackValueCollections)
   541   delete thread->deopt_mark();
   542   thread->set_deopt_mark(NULL);
   543   thread->set_deopt_nmethod(NULL);
   546   if (JvmtiExport::can_pop_frame()) {
   547 #ifndef CC_INTERP
   548     // Regardless of whether we entered this routine with the pending
   549     // popframe condition bit set, we should always clear it now
   550     thread->clear_popframe_condition();
   551 #else
   552     // C++ interpeter will clear has_pending_popframe when it enters
   553     // with method_resume. For deopt_resume2 we clear it now.
   554     if (thread->popframe_forcing_deopt_reexecution())
   555         thread->clear_popframe_condition();
   556 #endif /* CC_INTERP */
   557   }
   559   // unpack_frames() is called at the end of the deoptimization handler
   560   // and (in C2) at the end of the uncommon trap handler. Note this fact
   561   // so that an asynchronous stack walker can work again. This counter is
   562   // incremented at the beginning of fetch_unroll_info() and (in C2) at
   563   // the beginning of uncommon_trap().
   564   thread->dec_in_deopt_handler();
   565 }
   568 // Return BasicType of value being returned
   569 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
   571   // We are already active int he special DeoptResourceMark any ResourceObj's we
   572   // allocate will be freed at the end of the routine.
   574   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
   575   // but makes the entry a little slower. There is however a little dance we have to
   576   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
   577   ResetNoHandleMark rnhm; // No-op in release/product versions
   578   HandleMark hm;
   580   frame stub_frame = thread->last_frame();
   582   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
   583   // must point to the vframeArray for the unpack frame.
   584   vframeArray* array = thread->vframe_array_head();
   586 #ifndef PRODUCT
   587   if (TraceDeoptimization) {
   588     ttyLocker ttyl;
   589     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
   590   }
   591 #endif
   592   Events::log(thread, "DEOPT UNPACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT " mode %d",
   593               stub_frame.pc(), stub_frame.sp(), exec_mode);
   595   UnrollBlock* info = array->unroll_block();
   597   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
   598   array->unpack_to_stack(stub_frame, exec_mode, info->caller_actual_parameters());
   600   BasicType bt = info->return_type();
   602   // If we have an exception pending, claim that the return type is an oop
   603   // so the deopt_blob does not overwrite the exception_oop.
   605   if (exec_mode == Unpack_exception)
   606     bt = T_OBJECT;
   608   // Cleanup thread deopt data
   609   cleanup_deopt_info(thread, array);
   611 #ifndef PRODUCT
   612   if (VerifyStack) {
   613     ResourceMark res_mark;
   615     thread->validate_frame_layout();
   617     // Verify that the just-unpacked frames match the interpreter's
   618     // notions of expression stack and locals
   619     vframeArray* cur_array = thread->vframe_array_last();
   620     RegisterMap rm(thread, false);
   621     rm.set_include_argument_oops(false);
   622     bool is_top_frame = true;
   623     int callee_size_of_parameters = 0;
   624     int callee_max_locals = 0;
   625     for (int i = 0; i < cur_array->frames(); i++) {
   626       vframeArrayElement* el = cur_array->element(i);
   627       frame* iframe = el->iframe();
   628       guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
   630       // Get the oop map for this bci
   631       InterpreterOopMap mask;
   632       int cur_invoke_parameter_size = 0;
   633       bool try_next_mask = false;
   634       int next_mask_expression_stack_size = -1;
   635       int top_frame_expression_stack_adjustment = 0;
   636       methodHandle mh(thread, iframe->interpreter_frame_method());
   637       OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
   638       BytecodeStream str(mh);
   639       str.set_start(iframe->interpreter_frame_bci());
   640       int max_bci = mh->code_size();
   641       // Get to the next bytecode if possible
   642       assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
   643       // Check to see if we can grab the number of outgoing arguments
   644       // at an uncommon trap for an invoke (where the compiler
   645       // generates debug info before the invoke has executed)
   646       Bytecodes::Code cur_code = str.next();
   647       if (cur_code == Bytecodes::_invokevirtual   ||
   648           cur_code == Bytecodes::_invokespecial   ||
   649           cur_code == Bytecodes::_invokestatic    ||
   650           cur_code == Bytecodes::_invokeinterface ||
   651           cur_code == Bytecodes::_invokedynamic) {
   652         Bytecode_invoke invoke(mh, iframe->interpreter_frame_bci());
   653         Symbol* signature = invoke.signature();
   654         ArgumentSizeComputer asc(signature);
   655         cur_invoke_parameter_size = asc.size();
   656         if (invoke.has_receiver()) {
   657           // Add in receiver
   658           ++cur_invoke_parameter_size;
   659         }
   660         if (i != 0 && !invoke.is_invokedynamic() && MethodHandles::has_member_arg(invoke.klass(), invoke.name())) {
   661           callee_size_of_parameters++;
   662         }
   663       }
   664       if (str.bci() < max_bci) {
   665         Bytecodes::Code bc = str.next();
   666         if (bc >= 0) {
   667           // The interpreter oop map generator reports results before
   668           // the current bytecode has executed except in the case of
   669           // calls. It seems to be hard to tell whether the compiler
   670           // has emitted debug information matching the "state before"
   671           // a given bytecode or the state after, so we try both
   672           switch (cur_code) {
   673             case Bytecodes::_invokevirtual:
   674             case Bytecodes::_invokespecial:
   675             case Bytecodes::_invokestatic:
   676             case Bytecodes::_invokeinterface:
   677             case Bytecodes::_invokedynamic:
   678             case Bytecodes::_athrow:
   679               break;
   680             default: {
   681               InterpreterOopMap next_mask;
   682               OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
   683               next_mask_expression_stack_size = next_mask.expression_stack_size();
   684               // Need to subtract off the size of the result type of
   685               // the bytecode because this is not described in the
   686               // debug info but returned to the interpreter in the TOS
   687               // caching register
   688               BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
   689               if (bytecode_result_type != T_ILLEGAL) {
   690                 top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
   691               }
   692               assert(top_frame_expression_stack_adjustment >= 0, "");
   693               try_next_mask = true;
   694               break;
   695             }
   696           }
   697         }
   698       }
   700       // Verify stack depth and oops in frame
   701       // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
   702       if (!(
   703             /* SPARC */
   704             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
   705             /* x86 */
   706             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
   707             (try_next_mask &&
   708              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
   709                                                                     top_frame_expression_stack_adjustment))) ||
   710             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
   711             (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute || el->should_reexecute()) &&
   712              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
   713             )) {
   714         ttyLocker ttyl;
   716         // Print out some information that will help us debug the problem
   717         tty->print_cr("Wrong number of expression stack elements during deoptimization");
   718         tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
   719         tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
   720                       iframe->interpreter_frame_expression_stack_size());
   721         tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
   722         tty->print_cr("  try_next_mask = %d", try_next_mask);
   723         tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
   724         tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
   725         tty->print_cr("  callee_max_locals = %d", callee_max_locals);
   726         tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
   727         tty->print_cr("  exec_mode = %d", exec_mode);
   728         tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
   729         tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = " UINTX_FORMAT, thread, thread->osthread()->thread_id());
   730         tty->print_cr("  Interpreted frames:");
   731         for (int k = 0; k < cur_array->frames(); k++) {
   732           vframeArrayElement* el = cur_array->element(k);
   733           tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
   734         }
   735         cur_array->print_on_2(tty);
   736         guarantee(false, "wrong number of expression stack elements during deopt");
   737       }
   738       VerifyOopClosure verify;
   739       iframe->oops_interpreted_do(&verify, NULL, &rm, false);
   740       callee_size_of_parameters = mh->size_of_parameters();
   741       callee_max_locals = mh->max_locals();
   742       is_top_frame = false;
   743     }
   744   }
   745 #endif /* !PRODUCT */
   748   return bt;
   749 JRT_END
   752 int Deoptimization::deoptimize_dependents() {
   753   Threads::deoptimized_wrt_marked_nmethods();
   754   return 0;
   755 }
   758 #ifdef COMPILER2
   759 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
   760   Handle pending_exception(thread->pending_exception());
   761   const char* exception_file = thread->exception_file();
   762   int exception_line = thread->exception_line();
   763   thread->clear_pending_exception();
   765   bool failures = false;
   767   for (int i = 0; i < objects->length(); i++) {
   768     assert(objects->at(i)->is_object(), "invalid debug information");
   769     ObjectValue* sv = (ObjectValue*) objects->at(i);
   771     KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
   772     oop obj = NULL;
   774     if (k->oop_is_instance()) {
   775       InstanceKlass* ik = InstanceKlass::cast(k());
   776       obj = ik->allocate_instance(THREAD);
   777     } else if (k->oop_is_typeArray()) {
   778       TypeArrayKlass* ak = TypeArrayKlass::cast(k());
   779       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
   780       int len = sv->field_size() / type2size[ak->element_type()];
   781       obj = ak->allocate(len, THREAD);
   782     } else if (k->oop_is_objArray()) {
   783       ObjArrayKlass* ak = ObjArrayKlass::cast(k());
   784       obj = ak->allocate(sv->field_size(), THREAD);
   785     }
   787     if (obj == NULL) {
   788       failures = true;
   789     }
   791     assert(sv->value().is_null(), "redundant reallocation");
   792     assert(obj != NULL || HAS_PENDING_EXCEPTION, "allocation should succeed or we should get an exception");
   793     CLEAR_PENDING_EXCEPTION;
   794     sv->set_value(obj);
   795   }
   797   if (failures) {
   798     THROW_OOP_(Universe::out_of_memory_error_realloc_objects(), failures);
   799   } else if (pending_exception.not_null()) {
   800     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
   801   }
   803   return failures;
   804 }
   806 // This assumes that the fields are stored in ObjectValue in the same order
   807 // they are yielded by do_nonstatic_fields.
   808 class FieldReassigner: public FieldClosure {
   809   frame* _fr;
   810   RegisterMap* _reg_map;
   811   ObjectValue* _sv;
   812   InstanceKlass* _ik;
   813   oop _obj;
   815   int _i;
   816 public:
   817   FieldReassigner(frame* fr, RegisterMap* reg_map, ObjectValue* sv, oop obj) :
   818     _fr(fr), _reg_map(reg_map), _sv(sv), _obj(obj), _i(0) {}
   820   int i() const { return _i; }
   823   void do_field(fieldDescriptor* fd) {
   824     intptr_t val;
   825     StackValue* value =
   826       StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i()));
   827     int offset = fd->offset();
   828     switch (fd->field_type()) {
   829     case T_OBJECT: case T_ARRAY:
   830       assert(value->type() == T_OBJECT, "Agreement.");
   831       _obj->obj_field_put(offset, value->get_obj()());
   832       break;
   834     case T_LONG: case T_DOUBLE: {
   835       assert(value->type() == T_INT, "Agreement.");
   836       StackValue* low =
   837         StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i));
   838 #ifdef _LP64
   839       jlong res = (jlong)low->get_int();
   840 #else
   841 #ifdef SPARC
   842       // For SPARC we have to swap high and low words.
   843       jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
   844 #else
   845       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
   846 #endif //SPARC
   847 #endif
   848       _obj->long_field_put(offset, res);
   849       break;
   850     }
   851     // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
   852     case T_INT: case T_FLOAT: // 4 bytes.
   853       assert(value->type() == T_INT, "Agreement.");
   854       val = value->get_int();
   855       _obj->int_field_put(offset, (jint)*((jint*)&val));
   856       break;
   858     case T_SHORT: case T_CHAR: // 2 bytes
   859       assert(value->type() == T_INT, "Agreement.");
   860       val = value->get_int();
   861       _obj->short_field_put(offset, (jshort)*((jint*)&val));
   862       break;
   864     case T_BOOLEAN: case T_BYTE: // 1 byte
   865       assert(value->type() == T_INT, "Agreement.");
   866       val = value->get_int();
   867       _obj->bool_field_put(offset, (jboolean)*((jint*)&val));
   868       break;
   870     default:
   871       ShouldNotReachHere();
   872     }
   873     _i++;
   874   }
   875 };
   877 // restore elements of an eliminated type array
   878 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
   879   int index = 0;
   880   intptr_t val;
   882   for (int i = 0; i < sv->field_size(); i++) {
   883     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
   884     switch(type) {
   885     case T_LONG: case T_DOUBLE: {
   886       assert(value->type() == T_INT, "Agreement.");
   887       StackValue* low =
   888         StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
   889 #ifdef _LP64
   890       jlong res = (jlong)low->get_int();
   891 #else
   892 #ifdef SPARC
   893       // For SPARC we have to swap high and low words.
   894       jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
   895 #else
   896       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
   897 #endif //SPARC
   898 #endif
   899       obj->long_at_put(index, res);
   900       break;
   901     }
   903     // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
   904     case T_INT: case T_FLOAT: // 4 bytes.
   905       assert(value->type() == T_INT, "Agreement.");
   906       val = value->get_int();
   907       obj->int_at_put(index, (jint)*((jint*)&val));
   908       break;
   910     case T_SHORT: case T_CHAR: // 2 bytes
   911       assert(value->type() == T_INT, "Agreement.");
   912       val = value->get_int();
   913       obj->short_at_put(index, (jshort)*((jint*)&val));
   914       break;
   916     case T_BOOLEAN: case T_BYTE: // 1 byte
   917       assert(value->type() == T_INT, "Agreement.");
   918       val = value->get_int();
   919       obj->bool_at_put(index, (jboolean)*((jint*)&val));
   920       break;
   922       default:
   923         ShouldNotReachHere();
   924     }
   925     index++;
   926   }
   927 }
   930 // restore fields of an eliminated object array
   931 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
   932   for (int i = 0; i < sv->field_size(); i++) {
   933     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
   934     assert(value->type() == T_OBJECT, "object element expected");
   935     obj->obj_at_put(i, value->get_obj()());
   936   }
   937 }
   940 // restore fields of all eliminated objects and arrays
   941 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
   942   for (int i = 0; i < objects->length(); i++) {
   943     ObjectValue* sv = (ObjectValue*) objects->at(i);
   944     KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
   945     Handle obj = sv->value();
   946     assert(obj.not_null() || realloc_failures, "reallocation was missed");
   947     if (obj.is_null()) {
   948       continue;
   949     }
   951     if (k->oop_is_instance()) {
   952       InstanceKlass* ik = InstanceKlass::cast(k());
   953       FieldReassigner reassign(fr, reg_map, sv, obj());
   954       ik->do_nonstatic_fields(&reassign);
   955     } else if (k->oop_is_typeArray()) {
   956       TypeArrayKlass* ak = TypeArrayKlass::cast(k());
   957       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
   958     } else if (k->oop_is_objArray()) {
   959       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
   960     }
   961   }
   962 }
   965 // relock objects for which synchronization was eliminated
   966 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread, bool realloc_failures) {
   967   for (int i = 0; i < monitors->length(); i++) {
   968     MonitorInfo* mon_info = monitors->at(i);
   969     if (mon_info->eliminated()) {
   970       assert(!mon_info->owner_is_scalar_replaced() || realloc_failures, "reallocation was missed");
   971       if (!mon_info->owner_is_scalar_replaced()) {
   972         Handle obj = Handle(mon_info->owner());
   973         markOop mark = obj->mark();
   974         if (UseBiasedLocking && mark->has_bias_pattern()) {
   975           // New allocated objects may have the mark set to anonymously biased.
   976           // Also the deoptimized method may called methods with synchronization
   977           // where the thread-local object is bias locked to the current thread.
   978           assert(mark->is_biased_anonymously() ||
   979                  mark->biased_locker() == thread, "should be locked to current thread");
   980           // Reset mark word to unbiased prototype.
   981           markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
   982           obj->set_mark(unbiased_prototype);
   983         }
   984         BasicLock* lock = mon_info->lock();
   985         ObjectSynchronizer::slow_enter(obj, lock, thread);
   986         assert(mon_info->owner()->is_locked(), "object must be locked now");
   987       }
   988     }
   989   }
   990 }
   993 #ifndef PRODUCT
   994 // print information about reallocated objects
   995 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects, bool realloc_failures) {
   996   fieldDescriptor fd;
   998   for (int i = 0; i < objects->length(); i++) {
   999     ObjectValue* sv = (ObjectValue*) objects->at(i);
  1000     KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
  1001     Handle obj = sv->value();
  1003     tty->print("     object <" INTPTR_FORMAT "> of type ", (void *)sv->value()());
  1004     k->print_value();
  1005     assert(obj.not_null() || realloc_failures, "reallocation was missed");
  1006     if (obj.is_null()) {
  1007       tty->print(" allocation failed");
  1008     } else {
  1009       tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
  1011     tty->cr();
  1013     if (Verbose && !obj.is_null()) {
  1014       k->oop_print_on(obj(), tty);
  1018 #endif
  1019 #endif // COMPILER2
  1021 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk, bool realloc_failures) {
  1022   Events::log(thread, "DEOPT PACKING pc=" INTPTR_FORMAT " sp=" INTPTR_FORMAT, fr.pc(), fr.sp());
  1024 #ifndef PRODUCT
  1025   if (TraceDeoptimization) {
  1026     ttyLocker ttyl;
  1027     tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", thread);
  1028     fr.print_on(tty);
  1029     tty->print_cr("     Virtual frames (innermost first):");
  1030     for (int index = 0; index < chunk->length(); index++) {
  1031       compiledVFrame* vf = chunk->at(index);
  1032       tty->print("       %2d - ", index);
  1033       vf->print_value();
  1034       int bci = chunk->at(index)->raw_bci();
  1035       const char* code_name;
  1036       if (bci == SynchronizationEntryBCI) {
  1037         code_name = "sync entry";
  1038       } else {
  1039         Bytecodes::Code code = vf->method()->code_at(bci);
  1040         code_name = Bytecodes::name(code);
  1042       tty->print(" - %s", code_name);
  1043       tty->print_cr(" @ bci %d ", bci);
  1044       if (Verbose) {
  1045         vf->print();
  1046         tty->cr();
  1050 #endif
  1052   // Register map for next frame (used for stack crawl).  We capture
  1053   // the state of the deopt'ing frame's caller.  Thus if we need to
  1054   // stuff a C2I adapter we can properly fill in the callee-save
  1055   // register locations.
  1056   frame caller = fr.sender(reg_map);
  1057   int frame_size = caller.sp() - fr.sp();
  1059   frame sender = caller;
  1061   // Since the Java thread being deoptimized will eventually adjust it's own stack,
  1062   // the vframeArray containing the unpacking information is allocated in the C heap.
  1063   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
  1064   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr, realloc_failures);
  1066   // Compare the vframeArray to the collected vframes
  1067   assert(array->structural_compare(thread, chunk), "just checking");
  1069 #ifndef PRODUCT
  1070   if (TraceDeoptimization) {
  1071     ttyLocker ttyl;
  1072     tty->print_cr("     Created vframeArray " INTPTR_FORMAT, array);
  1074 #endif // PRODUCT
  1076   return array;
  1079 #ifdef COMPILER2
  1080 void Deoptimization::pop_frames_failed_reallocs(JavaThread* thread, vframeArray* array) {
  1081   // Reallocation of some scalar replaced objects failed. Record
  1082   // that we need to pop all the interpreter frames for the
  1083   // deoptimized compiled frame.
  1084   assert(thread->frames_to_pop_failed_realloc() == 0, "missed frames to pop?");
  1085   thread->set_frames_to_pop_failed_realloc(array->frames());
  1086   // Unlock all monitors here otherwise the interpreter will see a
  1087   // mix of locked and unlocked monitors (because of failed
  1088   // reallocations of synchronized objects) and be confused.
  1089   for (int i = 0; i < array->frames(); i++) {
  1090     MonitorChunk* monitors = array->element(i)->monitors();
  1091     if (monitors != NULL) {
  1092       for (int j = 0; j < monitors->number_of_monitors(); j++) {
  1093         BasicObjectLock* src = monitors->at(j);
  1094         if (src->obj() != NULL) {
  1095           ObjectSynchronizer::fast_exit(src->obj(), src->lock(), thread);
  1098       array->element(i)->free_monitors(thread);
  1099 #ifdef ASSERT
  1100       array->element(i)->set_removed_monitors();
  1101 #endif
  1105 #endif
  1107 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
  1108   GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
  1109   for (int i = 0; i < monitors->length(); i++) {
  1110     MonitorInfo* mon_info = monitors->at(i);
  1111     if (!mon_info->eliminated() && mon_info->owner() != NULL) {
  1112       objects_to_revoke->append(Handle(mon_info->owner()));
  1118 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
  1119   if (!UseBiasedLocking) {
  1120     return;
  1123   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
  1125   // Unfortunately we don't have a RegisterMap available in most of
  1126   // the places we want to call this routine so we need to walk the
  1127   // stack again to update the register map.
  1128   if (map == NULL || !map->update_map()) {
  1129     StackFrameStream sfs(thread, true);
  1130     bool found = false;
  1131     while (!found && !sfs.is_done()) {
  1132       frame* cur = sfs.current();
  1133       sfs.next();
  1134       found = cur->id() == fr.id();
  1136     assert(found, "frame to be deoptimized not found on target thread's stack");
  1137     map = sfs.register_map();
  1140   vframe* vf = vframe::new_vframe(&fr, map, thread);
  1141   compiledVFrame* cvf = compiledVFrame::cast(vf);
  1142   // Revoke monitors' biases in all scopes
  1143   while (!cvf->is_top()) {
  1144     collect_monitors(cvf, objects_to_revoke);
  1145     cvf = compiledVFrame::cast(cvf->sender());
  1147   collect_monitors(cvf, objects_to_revoke);
  1149   if (SafepointSynchronize::is_at_safepoint()) {
  1150     BiasedLocking::revoke_at_safepoint(objects_to_revoke);
  1151   } else {
  1152     BiasedLocking::revoke(objects_to_revoke);
  1157 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
  1158   if (!UseBiasedLocking) {
  1159     return;
  1162   assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
  1163   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
  1164   for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
  1165     if (jt->has_last_Java_frame()) {
  1166       StackFrameStream sfs(jt, true);
  1167       while (!sfs.is_done()) {
  1168         frame* cur = sfs.current();
  1169         if (cb->contains(cur->pc())) {
  1170           vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt);
  1171           compiledVFrame* cvf = compiledVFrame::cast(vf);
  1172           // Revoke monitors' biases in all scopes
  1173           while (!cvf->is_top()) {
  1174             collect_monitors(cvf, objects_to_revoke);
  1175             cvf = compiledVFrame::cast(cvf->sender());
  1177           collect_monitors(cvf, objects_to_revoke);
  1179         sfs.next();
  1183   BiasedLocking::revoke_at_safepoint(objects_to_revoke);
  1187 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) {
  1188   assert(fr.can_be_deoptimized(), "checking frame type");
  1190   gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal);
  1192   // Patch the nmethod so that when execution returns to it we will
  1193   // deopt the execution state and return to the interpreter.
  1194   fr.deoptimize(thread);
  1197 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
  1198   // Deoptimize only if the frame comes from compile code.
  1199   // Do not deoptimize the frame which is already patched
  1200   // during the execution of the loops below.
  1201   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
  1202     return;
  1204   ResourceMark rm;
  1205   DeoptimizationMarker dm;
  1206   if (UseBiasedLocking) {
  1207     revoke_biases_of_monitors(thread, fr, map);
  1209   deoptimize_single_frame(thread, fr);
  1214 void Deoptimization::deoptimize_frame_internal(JavaThread* thread, intptr_t* id) {
  1215   assert(thread == Thread::current() || SafepointSynchronize::is_at_safepoint(),
  1216          "can only deoptimize other thread at a safepoint");
  1217   // Compute frame and register map based on thread and sp.
  1218   RegisterMap reg_map(thread, UseBiasedLocking);
  1219   frame fr = thread->last_frame();
  1220   while (fr.id() != id) {
  1221     fr = fr.sender(&reg_map);
  1223   deoptimize(thread, fr, &reg_map);
  1227 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
  1228   if (thread == Thread::current()) {
  1229     Deoptimization::deoptimize_frame_internal(thread, id);
  1230   } else {
  1231     VM_DeoptimizeFrame deopt(thread, id);
  1232     VMThread::execute(&deopt);
  1237 // JVMTI PopFrame support
  1238 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
  1240   thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
  1242 JRT_END
  1245 #if defined(COMPILER2) || defined(SHARK)
  1246 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
  1247   // in case of an unresolved klass entry, load the class.
  1248   if (constant_pool->tag_at(index).is_unresolved_klass()) {
  1249     Klass* tk = constant_pool->klass_at(index, CHECK);
  1250     return;
  1253   if (!constant_pool->tag_at(index).is_symbol()) return;
  1255   Handle class_loader (THREAD, constant_pool->pool_holder()->class_loader());
  1256   Symbol*  symbol  = constant_pool->symbol_at(index);
  1258   // class name?
  1259   if (symbol->byte_at(0) != '(') {
  1260     Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
  1261     SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
  1262     return;
  1265   // then it must be a signature!
  1266   ResourceMark rm(THREAD);
  1267   for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
  1268     if (ss.is_object()) {
  1269       Symbol* class_name = ss.as_symbol(CHECK);
  1270       Handle protection_domain (THREAD, constant_pool->pool_holder()->protection_domain());
  1271       SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
  1277 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) {
  1278   EXCEPTION_MARK;
  1279   load_class_by_index(constant_pool, index, THREAD);
  1280   if (HAS_PENDING_EXCEPTION) {
  1281     // Exception happened during classloading. We ignore the exception here, since it
  1282     // is going to be rethrown since the current activation is going to be deoptimized and
  1283     // the interpreter will re-execute the bytecode.
  1284     CLEAR_PENDING_EXCEPTION;
  1285     // Class loading called java code which may have caused a stack
  1286     // overflow. If the exception was thrown right before the return
  1287     // to the runtime the stack is no longer guarded. Reguard the
  1288     // stack otherwise if we return to the uncommon trap blob and the
  1289     // stack bang causes a stack overflow we crash.
  1290     assert(THREAD->is_Java_thread(), "only a java thread can be here");
  1291     JavaThread* thread = (JavaThread*)THREAD;
  1292     bool guard_pages_enabled = thread->stack_yellow_zone_enabled();
  1293     if (!guard_pages_enabled) guard_pages_enabled = thread->reguard_stack();
  1294     assert(guard_pages_enabled, "stack banging in uncommon trap blob may cause crash");
  1298 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
  1299   HandleMark hm;
  1301   // uncommon_trap() is called at the beginning of the uncommon trap
  1302   // handler. Note this fact before we start generating temporary frames
  1303   // that can confuse an asynchronous stack walker. This counter is
  1304   // decremented at the end of unpack_frames().
  1305   thread->inc_in_deopt_handler();
  1307   // We need to update the map if we have biased locking.
  1308   RegisterMap reg_map(thread, UseBiasedLocking);
  1309   frame stub_frame = thread->last_frame();
  1310   frame fr = stub_frame.sender(&reg_map);
  1311   // Make sure the calling nmethod is not getting deoptimized and removed
  1312   // before we are done with it.
  1313   nmethodLocker nl(fr.pc());
  1315   // Log a message
  1316   Events::log(thread, "Uncommon trap: trap_request=" PTR32_FORMAT " fr.pc=" INTPTR_FORMAT,
  1317               trap_request, fr.pc());
  1320     ResourceMark rm;
  1322     // Revoke biases of any monitors in the frame to ensure we can migrate them
  1323     revoke_biases_of_monitors(thread, fr, &reg_map);
  1325     DeoptReason reason = trap_request_reason(trap_request);
  1326     DeoptAction action = trap_request_action(trap_request);
  1327     jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
  1329     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
  1330     compiledVFrame* cvf = compiledVFrame::cast(vf);
  1332     nmethod* nm = cvf->code();
  1334     ScopeDesc*      trap_scope  = cvf->scope();
  1335     methodHandle    trap_method = trap_scope->method();
  1336     int             trap_bci    = trap_scope->bci();
  1337     Bytecodes::Code trap_bc     = trap_method->java_code_at(trap_bci);
  1339     // Record this event in the histogram.
  1340     gather_statistics(reason, action, trap_bc);
  1342     // Ensure that we can record deopt. history:
  1343     // Need MDO to record RTM code generation state.
  1344     bool create_if_missing = ProfileTraps RTM_OPT_ONLY( || UseRTMLocking );
  1346     MethodData* trap_mdo =
  1347       get_method_data(thread, trap_method, create_if_missing);
  1349     // Log a message
  1350     Events::log_deopt_message(thread, "Uncommon trap: reason=%s action=%s pc=" INTPTR_FORMAT " method=%s @ %d",
  1351                               trap_reason_name(reason), trap_action_name(action), fr.pc(),
  1352                               trap_method->name_and_sig_as_C_string(), trap_bci);
  1354     // Print a bunch of diagnostics, if requested.
  1355     if (TraceDeoptimization || LogCompilation) {
  1356       ResourceMark rm;
  1357       ttyLocker ttyl;
  1358       char buf[100];
  1359       if (xtty != NULL) {
  1360         xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT"' %s",
  1361                          os::current_thread_id(),
  1362                          format_trap_request(buf, sizeof(buf), trap_request));
  1363         nm->log_identity(xtty);
  1365       Symbol* class_name = NULL;
  1366       bool unresolved = false;
  1367       if (unloaded_class_index >= 0) {
  1368         constantPoolHandle constants (THREAD, trap_method->constants());
  1369         if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
  1370           class_name = constants->klass_name_at(unloaded_class_index);
  1371           unresolved = true;
  1372           if (xtty != NULL)
  1373             xtty->print(" unresolved='1'");
  1374         } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
  1375           class_name = constants->symbol_at(unloaded_class_index);
  1377         if (xtty != NULL)
  1378           xtty->name(class_name);
  1380       if (xtty != NULL && trap_mdo != NULL) {
  1381         // Dump the relevant MDO state.
  1382         // This is the deopt count for the current reason, any previous
  1383         // reasons or recompiles seen at this point.
  1384         int dcnt = trap_mdo->trap_count(reason);
  1385         if (dcnt != 0)
  1386           xtty->print(" count='%d'", dcnt);
  1387         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
  1388         int dos = (pdata == NULL)? 0: pdata->trap_state();
  1389         if (dos != 0) {
  1390           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
  1391           if (trap_state_is_recompiled(dos)) {
  1392             int recnt2 = trap_mdo->overflow_recompile_count();
  1393             if (recnt2 != 0)
  1394               xtty->print(" recompiles2='%d'", recnt2);
  1398       if (xtty != NULL) {
  1399         xtty->stamp();
  1400         xtty->end_head();
  1402       if (TraceDeoptimization) {  // make noise on the tty
  1403         tty->print("Uncommon trap occurred in");
  1404         nm->method()->print_short_name(tty);
  1405         tty->print(" (@" INTPTR_FORMAT ") thread=" UINTX_FORMAT " reason=%s action=%s unloaded_class_index=%d",
  1406                    fr.pc(),
  1407                    os::current_thread_id(),
  1408                    trap_reason_name(reason),
  1409                    trap_action_name(action),
  1410                    unloaded_class_index);
  1411         if (class_name != NULL) {
  1412           tty->print(unresolved ? " unresolved class: " : " symbol: ");
  1413           class_name->print_symbol_on(tty);
  1415         tty->cr();
  1417       if (xtty != NULL) {
  1418         // Log the precise location of the trap.
  1419         for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
  1420           xtty->begin_elem("jvms bci='%d'", sd->bci());
  1421           xtty->method(sd->method());
  1422           xtty->end_elem();
  1423           if (sd->is_top())  break;
  1425         xtty->tail("uncommon_trap");
  1428     // (End diagnostic printout.)
  1430     // Load class if necessary
  1431     if (unloaded_class_index >= 0) {
  1432       constantPoolHandle constants(THREAD, trap_method->constants());
  1433       load_class_by_index(constants, unloaded_class_index);
  1436     // Flush the nmethod if necessary and desirable.
  1437     //
  1438     // We need to avoid situations where we are re-flushing the nmethod
  1439     // because of a hot deoptimization site.  Repeated flushes at the same
  1440     // point need to be detected by the compiler and avoided.  If the compiler
  1441     // cannot avoid them (or has a bug and "refuses" to avoid them), this
  1442     // module must take measures to avoid an infinite cycle of recompilation
  1443     // and deoptimization.  There are several such measures:
  1444     //
  1445     //   1. If a recompilation is ordered a second time at some site X
  1446     //   and for the same reason R, the action is adjusted to 'reinterpret',
  1447     //   to give the interpreter time to exercise the method more thoroughly.
  1448     //   If this happens, the method's overflow_recompile_count is incremented.
  1449     //
  1450     //   2. If the compiler fails to reduce the deoptimization rate, then
  1451     //   the method's overflow_recompile_count will begin to exceed the set
  1452     //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
  1453     //   is adjusted to 'make_not_compilable', and the method is abandoned
  1454     //   to the interpreter.  This is a performance hit for hot methods,
  1455     //   but is better than a disastrous infinite cycle of recompilations.
  1456     //   (Actually, only the method containing the site X is abandoned.)
  1457     //
  1458     //   3. In parallel with the previous measures, if the total number of
  1459     //   recompilations of a method exceeds the much larger set limit
  1460     //   PerMethodRecompilationCutoff, the method is abandoned.
  1461     //   This should only happen if the method is very large and has
  1462     //   many "lukewarm" deoptimizations.  The code which enforces this
  1463     //   limit is elsewhere (class nmethod, class Method).
  1464     //
  1465     // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
  1466     // to recompile at each bytecode independently of the per-BCI cutoff.
  1467     //
  1468     // The decision to update code is up to the compiler, and is encoded
  1469     // in the Action_xxx code.  If the compiler requests Action_none
  1470     // no trap state is changed, no compiled code is changed, and the
  1471     // computation suffers along in the interpreter.
  1472     //
  1473     // The other action codes specify various tactics for decompilation
  1474     // and recompilation.  Action_maybe_recompile is the loosest, and
  1475     // allows the compiled code to stay around until enough traps are seen,
  1476     // and until the compiler gets around to recompiling the trapping method.
  1477     //
  1478     // The other actions cause immediate removal of the present code.
  1480     bool update_trap_state = true;
  1481     bool make_not_entrant = false;
  1482     bool make_not_compilable = false;
  1483     bool reprofile = false;
  1484     switch (action) {
  1485     case Action_none:
  1486       // Keep the old code.
  1487       update_trap_state = false;
  1488       break;
  1489     case Action_maybe_recompile:
  1490       // Do not need to invalidate the present code, but we can
  1491       // initiate another
  1492       // Start compiler without (necessarily) invalidating the nmethod.
  1493       // The system will tolerate the old code, but new code should be
  1494       // generated when possible.
  1495       break;
  1496     case Action_reinterpret:
  1497       // Go back into the interpreter for a while, and then consider
  1498       // recompiling form scratch.
  1499       make_not_entrant = true;
  1500       // Reset invocation counter for outer most method.
  1501       // This will allow the interpreter to exercise the bytecodes
  1502       // for a while before recompiling.
  1503       // By contrast, Action_make_not_entrant is immediate.
  1504       //
  1505       // Note that the compiler will track null_check, null_assert,
  1506       // range_check, and class_check events and log them as if they
  1507       // had been traps taken from compiled code.  This will update
  1508       // the MDO trap history so that the next compilation will
  1509       // properly detect hot trap sites.
  1510       reprofile = true;
  1511       break;
  1512     case Action_make_not_entrant:
  1513       // Request immediate recompilation, and get rid of the old code.
  1514       // Make them not entrant, so next time they are called they get
  1515       // recompiled.  Unloaded classes are loaded now so recompile before next
  1516       // time they are called.  Same for uninitialized.  The interpreter will
  1517       // link the missing class, if any.
  1518       make_not_entrant = true;
  1519       break;
  1520     case Action_make_not_compilable:
  1521       // Give up on compiling this method at all.
  1522       make_not_entrant = true;
  1523       make_not_compilable = true;
  1524       break;
  1525     default:
  1526       ShouldNotReachHere();
  1529     // Setting +ProfileTraps fixes the following, on all platforms:
  1530     // 4852688: ProfileInterpreter is off by default for ia64.  The result is
  1531     // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
  1532     // recompile relies on a MethodData* to record heroic opt failures.
  1534     // Whether the interpreter is producing MDO data or not, we also need
  1535     // to use the MDO to detect hot deoptimization points and control
  1536     // aggressive optimization.
  1537     bool inc_recompile_count = false;
  1538     ProfileData* pdata = NULL;
  1539     if (ProfileTraps && update_trap_state && trap_mdo != NULL) {
  1540       assert(trap_mdo == get_method_data(thread, trap_method, false), "sanity");
  1541       uint this_trap_count = 0;
  1542       bool maybe_prior_trap = false;
  1543       bool maybe_prior_recompile = false;
  1544       pdata = query_update_method_data(trap_mdo, trap_bci, reason,
  1545                                    nm->method(),
  1546                                    //outputs:
  1547                                    this_trap_count,
  1548                                    maybe_prior_trap,
  1549                                    maybe_prior_recompile);
  1550       // Because the interpreter also counts null, div0, range, and class
  1551       // checks, these traps from compiled code are double-counted.
  1552       // This is harmless; it just means that the PerXTrapLimit values
  1553       // are in effect a little smaller than they look.
  1555       DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
  1556       if (per_bc_reason != Reason_none) {
  1557         // Now take action based on the partially known per-BCI history.
  1558         if (maybe_prior_trap
  1559             && this_trap_count >= (uint)PerBytecodeTrapLimit) {
  1560           // If there are too many traps at this BCI, force a recompile.
  1561           // This will allow the compiler to see the limit overflow, and
  1562           // take corrective action, if possible.  The compiler generally
  1563           // does not use the exact PerBytecodeTrapLimit value, but instead
  1564           // changes its tactics if it sees any traps at all.  This provides
  1565           // a little hysteresis, delaying a recompile until a trap happens
  1566           // several times.
  1567           //
  1568           // Actually, since there is only one bit of counter per BCI,
  1569           // the possible per-BCI counts are {0,1,(per-method count)}.
  1570           // This produces accurate results if in fact there is only
  1571           // one hot trap site, but begins to get fuzzy if there are
  1572           // many sites.  For example, if there are ten sites each
  1573           // trapping two or more times, they each get the blame for
  1574           // all of their traps.
  1575           make_not_entrant = true;
  1578         // Detect repeated recompilation at the same BCI, and enforce a limit.
  1579         if (make_not_entrant && maybe_prior_recompile) {
  1580           // More than one recompile at this point.
  1581           inc_recompile_count = maybe_prior_trap;
  1583       } else {
  1584         // For reasons which are not recorded per-bytecode, we simply
  1585         // force recompiles unconditionally.
  1586         // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
  1587         make_not_entrant = true;
  1590       // Go back to the compiler if there are too many traps in this method.
  1591       if (this_trap_count >= per_method_trap_limit(reason)) {
  1592         // If there are too many traps in this method, force a recompile.
  1593         // This will allow the compiler to see the limit overflow, and
  1594         // take corrective action, if possible.
  1595         // (This condition is an unlikely backstop only, because the
  1596         // PerBytecodeTrapLimit is more likely to take effect first,
  1597         // if it is applicable.)
  1598         make_not_entrant = true;
  1601       // Here's more hysteresis:  If there has been a recompile at
  1602       // this trap point already, run the method in the interpreter
  1603       // for a while to exercise it more thoroughly.
  1604       if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
  1605         reprofile = true;
  1610     // Take requested actions on the method:
  1612     // Recompile
  1613     if (make_not_entrant) {
  1614       if (!nm->make_not_entrant()) {
  1615         return; // the call did not change nmethod's state
  1618       if (pdata != NULL) {
  1619         // Record the recompilation event, if any.
  1620         int tstate0 = pdata->trap_state();
  1621         int tstate1 = trap_state_set_recompiled(tstate0, true);
  1622         if (tstate1 != tstate0)
  1623           pdata->set_trap_state(tstate1);
  1626 #if INCLUDE_RTM_OPT
  1627       // Restart collecting RTM locking abort statistic if the method
  1628       // is recompiled for a reason other than RTM state change.
  1629       // Assume that in new recompiled code the statistic could be different,
  1630       // for example, due to different inlining.
  1631       if ((reason != Reason_rtm_state_change) && (trap_mdo != NULL) &&
  1632           UseRTMDeopt && (nm->rtm_state() != ProfileRTM)) {
  1633         trap_mdo->atomic_set_rtm_state(ProfileRTM);
  1635 #endif
  1638     if (inc_recompile_count) {
  1639       trap_mdo->inc_overflow_recompile_count();
  1640       if ((uint)trap_mdo->overflow_recompile_count() >
  1641           (uint)PerBytecodeRecompilationCutoff) {
  1642         // Give up on the method containing the bad BCI.
  1643         if (trap_method() == nm->method()) {
  1644           make_not_compilable = true;
  1645         } else {
  1646           trap_method->set_not_compilable(CompLevel_full_optimization, true, "overflow_recompile_count > PerBytecodeRecompilationCutoff");
  1647           // But give grace to the enclosing nm->method().
  1652     // Reprofile
  1653     if (reprofile) {
  1654       CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
  1657     // Give up compiling
  1658     if (make_not_compilable && !nm->method()->is_not_compilable(CompLevel_full_optimization)) {
  1659       assert(make_not_entrant, "consistent");
  1660       nm->method()->set_not_compilable(CompLevel_full_optimization);
  1663   } // Free marked resources
  1666 JRT_END
  1668 MethodData*
  1669 Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
  1670                                 bool create_if_missing) {
  1671   Thread* THREAD = thread;
  1672   MethodData* mdo = m()->method_data();
  1673   if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
  1674     // Build an MDO.  Ignore errors like OutOfMemory;
  1675     // that simply means we won't have an MDO to update.
  1676     Method::build_interpreter_method_data(m, THREAD);
  1677     if (HAS_PENDING_EXCEPTION) {
  1678       assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
  1679       CLEAR_PENDING_EXCEPTION;
  1681     mdo = m()->method_data();
  1683   return mdo;
  1686 ProfileData*
  1687 Deoptimization::query_update_method_data(MethodData* trap_mdo,
  1688                                          int trap_bci,
  1689                                          Deoptimization::DeoptReason reason,
  1690                                          Method* compiled_method,
  1691                                          //outputs:
  1692                                          uint& ret_this_trap_count,
  1693                                          bool& ret_maybe_prior_trap,
  1694                                          bool& ret_maybe_prior_recompile) {
  1695   uint prior_trap_count = trap_mdo->trap_count(reason);
  1696   uint this_trap_count  = trap_mdo->inc_trap_count(reason);
  1698   // If the runtime cannot find a place to store trap history,
  1699   // it is estimated based on the general condition of the method.
  1700   // If the method has ever been recompiled, or has ever incurred
  1701   // a trap with the present reason , then this BCI is assumed
  1702   // (pessimistically) to be the culprit.
  1703   bool maybe_prior_trap      = (prior_trap_count != 0);
  1704   bool maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
  1705   ProfileData* pdata = NULL;
  1708   // For reasons which are recorded per bytecode, we check per-BCI data.
  1709   DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
  1710   if (per_bc_reason != Reason_none) {
  1711     // Find the profile data for this BCI.  If there isn't one,
  1712     // try to allocate one from the MDO's set of spares.
  1713     // This will let us detect a repeated trap at this point.
  1714     pdata = trap_mdo->allocate_bci_to_data(trap_bci, reason_is_speculate(reason) ? compiled_method : NULL);
  1716     if (pdata != NULL) {
  1717       if (reason_is_speculate(reason) && !pdata->is_SpeculativeTrapData()) {
  1718         if (LogCompilation && xtty != NULL) {
  1719           ttyLocker ttyl;
  1720           // no more room for speculative traps in this MDO
  1721           xtty->elem("speculative_traps_oom");
  1724       // Query the trap state of this profile datum.
  1725       int tstate0 = pdata->trap_state();
  1726       if (!trap_state_has_reason(tstate0, per_bc_reason))
  1727         maybe_prior_trap = false;
  1728       if (!trap_state_is_recompiled(tstate0))
  1729         maybe_prior_recompile = false;
  1731       // Update the trap state of this profile datum.
  1732       int tstate1 = tstate0;
  1733       // Record the reason.
  1734       tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
  1735       // Store the updated state on the MDO, for next time.
  1736       if (tstate1 != tstate0)
  1737         pdata->set_trap_state(tstate1);
  1738     } else {
  1739       if (LogCompilation && xtty != NULL) {
  1740         ttyLocker ttyl;
  1741         // Missing MDP?  Leave a small complaint in the log.
  1742         xtty->elem("missing_mdp bci='%d'", trap_bci);
  1747   // Return results:
  1748   ret_this_trap_count = this_trap_count;
  1749   ret_maybe_prior_trap = maybe_prior_trap;
  1750   ret_maybe_prior_recompile = maybe_prior_recompile;
  1751   return pdata;
  1754 void
  1755 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
  1756   ResourceMark rm;
  1757   // Ignored outputs:
  1758   uint ignore_this_trap_count;
  1759   bool ignore_maybe_prior_trap;
  1760   bool ignore_maybe_prior_recompile;
  1761   assert(!reason_is_speculate(reason), "reason speculate only used by compiler");
  1762   query_update_method_data(trap_mdo, trap_bci,
  1763                            (DeoptReason)reason,
  1764                            NULL,
  1765                            ignore_this_trap_count,
  1766                            ignore_maybe_prior_trap,
  1767                            ignore_maybe_prior_recompile);
  1770 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
  1772   // Still in Java no safepoints
  1774     // This enters VM and may safepoint
  1775     uncommon_trap_inner(thread, trap_request);
  1777   return fetch_unroll_info_helper(thread);
  1780 // Local derived constants.
  1781 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
  1782 const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
  1783 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
  1785 //---------------------------trap_state_reason---------------------------------
  1786 Deoptimization::DeoptReason
  1787 Deoptimization::trap_state_reason(int trap_state) {
  1788   // This assert provides the link between the width of DataLayout::trap_bits
  1789   // and the encoding of "recorded" reasons.  It ensures there are enough
  1790   // bits to store all needed reasons in the per-BCI MDO profile.
  1791   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
  1792   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
  1793   trap_state -= recompile_bit;
  1794   if (trap_state == DS_REASON_MASK) {
  1795     return Reason_many;
  1796   } else {
  1797     assert((int)Reason_none == 0, "state=0 => Reason_none");
  1798     return (DeoptReason)trap_state;
  1801 //-------------------------trap_state_has_reason-------------------------------
  1802 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
  1803   assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
  1804   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
  1805   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
  1806   trap_state -= recompile_bit;
  1807   if (trap_state == DS_REASON_MASK) {
  1808     return -1;  // true, unspecifically (bottom of state lattice)
  1809   } else if (trap_state == reason) {
  1810     return 1;   // true, definitely
  1811   } else if (trap_state == 0) {
  1812     return 0;   // false, definitely (top of state lattice)
  1813   } else {
  1814     return 0;   // false, definitely
  1817 //-------------------------trap_state_add_reason-------------------------------
  1818 int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
  1819   assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
  1820   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
  1821   trap_state -= recompile_bit;
  1822   if (trap_state == DS_REASON_MASK) {
  1823     return trap_state + recompile_bit;     // already at state lattice bottom
  1824   } else if (trap_state == reason) {
  1825     return trap_state + recompile_bit;     // the condition is already true
  1826   } else if (trap_state == 0) {
  1827     return reason + recompile_bit;          // no condition has yet been true
  1828   } else {
  1829     return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
  1832 //-----------------------trap_state_is_recompiled------------------------------
  1833 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
  1834   return (trap_state & DS_RECOMPILE_BIT) != 0;
  1836 //-----------------------trap_state_set_recompiled-----------------------------
  1837 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
  1838   if (z)  return trap_state |  DS_RECOMPILE_BIT;
  1839   else    return trap_state & ~DS_RECOMPILE_BIT;
  1841 //---------------------------format_trap_state---------------------------------
  1842 // This is used for debugging and diagnostics, including LogFile output.
  1843 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
  1844                                               int trap_state) {
  1845   DeoptReason reason      = trap_state_reason(trap_state);
  1846   bool        recomp_flag = trap_state_is_recompiled(trap_state);
  1847   // Re-encode the state from its decoded components.
  1848   int decoded_state = 0;
  1849   if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
  1850     decoded_state = trap_state_add_reason(decoded_state, reason);
  1851   if (recomp_flag)
  1852     decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
  1853   // If the state re-encodes properly, format it symbolically.
  1854   // Because this routine is used for debugging and diagnostics,
  1855   // be robust even if the state is a strange value.
  1856   size_t len;
  1857   if (decoded_state != trap_state) {
  1858     // Random buggy state that doesn't decode??
  1859     len = jio_snprintf(buf, buflen, "#%d", trap_state);
  1860   } else {
  1861     len = jio_snprintf(buf, buflen, "%s%s",
  1862                        trap_reason_name(reason),
  1863                        recomp_flag ? " recompiled" : "");
  1865   if (len >= buflen)
  1866     buf[buflen-1] = '\0';
  1867   return buf;
  1871 //--------------------------------statics--------------------------------------
  1872 Deoptimization::DeoptAction Deoptimization::_unloaded_action
  1873   = Deoptimization::Action_reinterpret;
  1874 const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
  1875   // Note:  Keep this in sync. with enum DeoptReason.
  1876   "none",
  1877   "null_check",
  1878   "null_assert",
  1879   "range_check",
  1880   "class_check",
  1881   "array_check",
  1882   "intrinsic",
  1883   "bimorphic",
  1884   "unloaded",
  1885   "uninitialized",
  1886   "unreached",
  1887   "unhandled",
  1888   "constraint",
  1889   "div0_check",
  1890   "age",
  1891   "predicate",
  1892   "loop_limit_check",
  1893   "speculate_class_check",
  1894   "rtm_state_change",
  1895   "unstable_if"
  1896 };
  1897 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
  1898   // Note:  Keep this in sync. with enum DeoptAction.
  1899   "none",
  1900   "maybe_recompile",
  1901   "reinterpret",
  1902   "make_not_entrant",
  1903   "make_not_compilable"
  1904 };
  1906 const char* Deoptimization::trap_reason_name(int reason) {
  1907   if (reason == Reason_many)  return "many";
  1908   if ((uint)reason < Reason_LIMIT)
  1909     return _trap_reason_name[reason];
  1910   static char buf[20];
  1911   sprintf(buf, "reason%d", reason);
  1912   return buf;
  1914 const char* Deoptimization::trap_action_name(int action) {
  1915   if ((uint)action < Action_LIMIT)
  1916     return _trap_action_name[action];
  1917   static char buf[20];
  1918   sprintf(buf, "action%d", action);
  1919   return buf;
  1922 // This is used for debugging and diagnostics, including LogFile output.
  1923 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
  1924                                                 int trap_request) {
  1925   jint unloaded_class_index = trap_request_index(trap_request);
  1926   const char* reason = trap_reason_name(trap_request_reason(trap_request));
  1927   const char* action = trap_action_name(trap_request_action(trap_request));
  1928   size_t len;
  1929   if (unloaded_class_index < 0) {
  1930     len = jio_snprintf(buf, buflen, "reason='%s' action='%s'",
  1931                        reason, action);
  1932   } else {
  1933     len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'",
  1934                        reason, action, unloaded_class_index);
  1936   if (len >= buflen)
  1937     buf[buflen-1] = '\0';
  1938   return buf;
  1941 juint Deoptimization::_deoptimization_hist
  1942         [Deoptimization::Reason_LIMIT]
  1943     [1 + Deoptimization::Action_LIMIT]
  1944         [Deoptimization::BC_CASE_LIMIT]
  1945   = {0};
  1947 enum {
  1948   LSB_BITS = 8,
  1949   LSB_MASK = right_n_bits(LSB_BITS)
  1950 };
  1952 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
  1953                                        Bytecodes::Code bc) {
  1954   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
  1955   assert(action >= 0 && action < Action_LIMIT, "oob");
  1956   _deoptimization_hist[Reason_none][0][0] += 1;  // total
  1957   _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
  1958   juint* cases = _deoptimization_hist[reason][1+action];
  1959   juint* bc_counter_addr = NULL;
  1960   juint  bc_counter      = 0;
  1961   // Look for an unused counter, or an exact match to this BC.
  1962   if (bc != Bytecodes::_illegal) {
  1963     for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
  1964       juint* counter_addr = &cases[bc_case];
  1965       juint  counter = *counter_addr;
  1966       if ((counter == 0 && bc_counter_addr == NULL)
  1967           || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
  1968         // this counter is either free or is already devoted to this BC
  1969         bc_counter_addr = counter_addr;
  1970         bc_counter = counter | bc;
  1974   if (bc_counter_addr == NULL) {
  1975     // Overflow, or no given bytecode.
  1976     bc_counter_addr = &cases[BC_CASE_LIMIT-1];
  1977     bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
  1979   *bc_counter_addr = bc_counter + (1 << LSB_BITS);
  1982 jint Deoptimization::total_deoptimization_count() {
  1983   return _deoptimization_hist[Reason_none][0][0];
  1986 jint Deoptimization::deoptimization_count(DeoptReason reason) {
  1987   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
  1988   return _deoptimization_hist[reason][0][0];
  1991 void Deoptimization::print_statistics() {
  1992   juint total = total_deoptimization_count();
  1993   juint account = total;
  1994   if (total != 0) {
  1995     ttyLocker ttyl;
  1996     if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
  1997     tty->print_cr("Deoptimization traps recorded:");
  1998     #define PRINT_STAT_LINE(name, r) \
  1999       tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
  2000     PRINT_STAT_LINE("total", total);
  2001     // For each non-zero entry in the histogram, print the reason,
  2002     // the action, and (if specifically known) the type of bytecode.
  2003     for (int reason = 0; reason < Reason_LIMIT; reason++) {
  2004       for (int action = 0; action < Action_LIMIT; action++) {
  2005         juint* cases = _deoptimization_hist[reason][1+action];
  2006         for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
  2007           juint counter = cases[bc_case];
  2008           if (counter != 0) {
  2009             char name[1*K];
  2010             Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
  2011             if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
  2012               bc = Bytecodes::_illegal;
  2013             sprintf(name, "%s/%s/%s",
  2014                     trap_reason_name(reason),
  2015                     trap_action_name(action),
  2016                     Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
  2017             juint r = counter >> LSB_BITS;
  2018             tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
  2019             account -= r;
  2024     if (account != 0) {
  2025       PRINT_STAT_LINE("unaccounted", account);
  2027     #undef PRINT_STAT_LINE
  2028     if (xtty != NULL)  xtty->tail("statistics");
  2031 #else // COMPILER2 || SHARK
  2034 // Stubs for C1 only system.
  2035 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
  2036   return false;
  2039 const char* Deoptimization::trap_reason_name(int reason) {
  2040   return "unknown";
  2043 void Deoptimization::print_statistics() {
  2044   // no output
  2047 void
  2048 Deoptimization::update_method_data_from_interpreter(MethodData* trap_mdo, int trap_bci, int reason) {
  2049   // no udpate
  2052 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
  2053   return 0;
  2056 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
  2057                                        Bytecodes::Code bc) {
  2058   // no update
  2061 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
  2062                                               int trap_state) {
  2063   jio_snprintf(buf, buflen, "#%d", trap_state);
  2064   return buf;
  2067 #endif // COMPILER2 || SHARK

mercurial