src/share/vm/runtime/deoptimization.cpp

Tue, 12 Jan 2010 14:37:35 -0800

author
cfang
date
Tue, 12 Jan 2010 14:37:35 -0800
changeset 1607
b2b6a9bf6238
parent 1279
bd02caa94611
child 1635
ba263cfb7611
permissions
-rw-r--r--

6894779: Loop Predication for Loop Optimizer in C2
Summary: Loop predication implementation
Reviewed-by: never, kvn

     1 /*
     2  * Copyright 1997-2009 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_deoptimization.cpp.incl"
    28 bool DeoptimizationMarker::_is_active = false;
    30 Deoptimization::UnrollBlock::UnrollBlock(int  size_of_deoptimized_frame,
    31                                          int  caller_adjustment,
    32                                          int  number_of_frames,
    33                                          intptr_t* frame_sizes,
    34                                          address* frame_pcs,
    35                                          BasicType return_type) {
    36   _size_of_deoptimized_frame = size_of_deoptimized_frame;
    37   _caller_adjustment         = caller_adjustment;
    38   _number_of_frames          = number_of_frames;
    39   _frame_sizes               = frame_sizes;
    40   _frame_pcs                 = frame_pcs;
    41   _register_block            = NEW_C_HEAP_ARRAY(intptr_t, RegisterMap::reg_count * 2);
    42   _return_type               = return_type;
    43   // PD (x86 only)
    44   _counter_temp              = 0;
    45   _initial_fp                = 0;
    46   _unpack_kind               = 0;
    47   _sender_sp_temp            = 0;
    49   _total_frame_sizes         = size_of_frames();
    50 }
    53 Deoptimization::UnrollBlock::~UnrollBlock() {
    54   FREE_C_HEAP_ARRAY(intptr_t, _frame_sizes);
    55   FREE_C_HEAP_ARRAY(intptr_t, _frame_pcs);
    56   FREE_C_HEAP_ARRAY(intptr_t, _register_block);
    57 }
    60 intptr_t* Deoptimization::UnrollBlock::value_addr_at(int register_number) const {
    61   assert(register_number < RegisterMap::reg_count, "checking register number");
    62   return &_register_block[register_number * 2];
    63 }
    67 int Deoptimization::UnrollBlock::size_of_frames() const {
    68   // Acount first for the adjustment of the initial frame
    69   int result = _caller_adjustment;
    70   for (int index = 0; index < number_of_frames(); index++) {
    71     result += frame_sizes()[index];
    72   }
    73   return result;
    74 }
    77 void Deoptimization::UnrollBlock::print() {
    78   ttyLocker ttyl;
    79   tty->print_cr("UnrollBlock");
    80   tty->print_cr("  size_of_deoptimized_frame = %d", _size_of_deoptimized_frame);
    81   tty->print(   "  frame_sizes: ");
    82   for (int index = 0; index < number_of_frames(); index++) {
    83     tty->print("%d ", frame_sizes()[index]);
    84   }
    85   tty->cr();
    86 }
    89 // In order to make fetch_unroll_info work properly with escape
    90 // analysis, The method was changed from JRT_LEAF to JRT_BLOCK_ENTRY and
    91 // ResetNoHandleMark and HandleMark were removed from it. The actual reallocation
    92 // of previously eliminated objects occurs in realloc_objects, which is
    93 // called from the method fetch_unroll_info_helper below.
    94 JRT_BLOCK_ENTRY(Deoptimization::UnrollBlock*, Deoptimization::fetch_unroll_info(JavaThread* thread))
    95   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
    96   // but makes the entry a little slower. There is however a little dance we have to
    97   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
    99   // fetch_unroll_info() is called at the beginning of the deoptimization
   100   // handler. Note this fact before we start generating temporary frames
   101   // that can confuse an asynchronous stack walker. This counter is
   102   // decremented at the end of unpack_frames().
   103   thread->inc_in_deopt_handler();
   105   return fetch_unroll_info_helper(thread);
   106 JRT_END
   109 // This is factored, since it is both called from a JRT_LEAF (deoptimization) and a JRT_ENTRY (uncommon_trap)
   110 Deoptimization::UnrollBlock* Deoptimization::fetch_unroll_info_helper(JavaThread* thread) {
   112   // Note: there is a safepoint safety issue here. No matter whether we enter
   113   // via vanilla deopt or uncommon trap we MUST NOT stop at a safepoint once
   114   // the vframeArray is created.
   115   //
   117   // Allocate our special deoptimization ResourceMark
   118   DeoptResourceMark* dmark = new DeoptResourceMark(thread);
   119   assert(thread->deopt_mark() == NULL, "Pending deopt!");
   120   thread->set_deopt_mark(dmark);
   122   frame stub_frame = thread->last_frame(); // Makes stack walkable as side effect
   123   RegisterMap map(thread, true);
   124   RegisterMap dummy_map(thread, false);
   125   // Now get the deoptee with a valid map
   126   frame deoptee = stub_frame.sender(&map);
   128   // Create a growable array of VFrames where each VFrame represents an inlined
   129   // Java frame.  This storage is allocated with the usual system arena.
   130   assert(deoptee.is_compiled_frame(), "Wrong frame type");
   131   GrowableArray<compiledVFrame*>* chunk = new GrowableArray<compiledVFrame*>(10);
   132   vframe* vf = vframe::new_vframe(&deoptee, &map, thread);
   133   while (!vf->is_top()) {
   134     assert(vf->is_compiled_frame(), "Wrong frame type");
   135     chunk->push(compiledVFrame::cast(vf));
   136     vf = vf->sender();
   137   }
   138   assert(vf->is_compiled_frame(), "Wrong frame type");
   139   chunk->push(compiledVFrame::cast(vf));
   141 #ifdef COMPILER2
   142   // Reallocate the non-escaping objects and restore their fields. Then
   143   // relock objects if synchronization on them was eliminated.
   144   if (DoEscapeAnalysis) {
   145     if (EliminateAllocations) {
   146       assert (chunk->at(0)->scope() != NULL,"expect only compiled java frames");
   147       GrowableArray<ScopeValue*>* objects = chunk->at(0)->scope()->objects();
   148       bool reallocated = false;
   149       if (objects != NULL) {
   150         JRT_BLOCK
   151           reallocated = realloc_objects(thread, &deoptee, objects, THREAD);
   152         JRT_END
   153       }
   154       if (reallocated) {
   155         reassign_fields(&deoptee, &map, objects);
   156 #ifndef PRODUCT
   157         if (TraceDeoptimization) {
   158           ttyLocker ttyl;
   159           tty->print_cr("REALLOC OBJECTS in thread " INTPTR_FORMAT, thread);
   160           print_objects(objects);
   161       }
   162 #endif
   163       }
   164     }
   165     if (EliminateLocks) {
   166 #ifndef PRODUCT
   167       bool first = true;
   168 #endif
   169       for (int i = 0; i < chunk->length(); i++) {
   170         compiledVFrame* cvf = chunk->at(i);
   171         assert (cvf->scope() != NULL,"expect only compiled java frames");
   172         GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
   173         if (monitors->is_nonempty()) {
   174           relock_objects(monitors, thread);
   175 #ifndef PRODUCT
   176           if (TraceDeoptimization) {
   177             ttyLocker ttyl;
   178             for (int j = 0; j < monitors->length(); j++) {
   179               MonitorInfo* mi = monitors->at(j);
   180               if (mi->eliminated()) {
   181                 if (first) {
   182                   first = false;
   183                   tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
   184                 }
   185                 tty->print_cr("     object <" INTPTR_FORMAT "> locked", mi->owner());
   186               }
   187             }
   188           }
   189 #endif
   190         }
   191       }
   192     }
   193   }
   194 #endif // COMPILER2
   195   // Ensure that no safepoint is taken after pointers have been stored
   196   // in fields of rematerialized objects.  If a safepoint occurs from here on
   197   // out the java state residing in the vframeArray will be missed.
   198   No_Safepoint_Verifier no_safepoint;
   200   vframeArray* array = create_vframeArray(thread, deoptee, &map, chunk);
   202   assert(thread->vframe_array_head() == NULL, "Pending deopt!");;
   203   thread->set_vframe_array_head(array);
   205   // Now that the vframeArray has been created if we have any deferred local writes
   206   // added by jvmti then we can free up that structure as the data is now in the
   207   // vframeArray
   209   if (thread->deferred_locals() != NULL) {
   210     GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
   211     int i = 0;
   212     do {
   213       // Because of inlining we could have multiple vframes for a single frame
   214       // and several of the vframes could have deferred writes. Find them all.
   215       if (list->at(i)->id() == array->original().id()) {
   216         jvmtiDeferredLocalVariableSet* dlv = list->at(i);
   217         list->remove_at(i);
   218         // individual jvmtiDeferredLocalVariableSet are CHeapObj's
   219         delete dlv;
   220       } else {
   221         i++;
   222       }
   223     } while ( i < list->length() );
   224     if (list->length() == 0) {
   225       thread->set_deferred_locals(NULL);
   226       // free the list and elements back to C heap.
   227       delete list;
   228     }
   230   }
   232   // Compute the caller frame based on the sender sp of stub_frame and stored frame sizes info.
   233   CodeBlob* cb = stub_frame.cb();
   234   // Verify we have the right vframeArray
   235   assert(cb->frame_size() >= 0, "Unexpected frame size");
   236   intptr_t* unpack_sp = stub_frame.sp() + cb->frame_size();
   238 #ifdef ASSERT
   239   assert(cb->is_deoptimization_stub() || cb->is_uncommon_trap_stub(), "just checking");
   240   Events::log("fetch unroll sp " INTPTR_FORMAT, unpack_sp);
   241 #endif
   242   // This is a guarantee instead of an assert because if vframe doesn't match
   243   // we will unpack the wrong deoptimized frame and wind up in strange places
   244   // where it will be very difficult to figure out what went wrong. Better
   245   // to die an early death here than some very obscure death later when the
   246   // trail is cold.
   247   // Note: on ia64 this guarantee can be fooled by frames with no memory stack
   248   // in that it will fail to detect a problem when there is one. This needs
   249   // more work in tiger timeframe.
   250   guarantee(array->unextended_sp() == unpack_sp, "vframe_array_head must contain the vframeArray to unpack");
   252   int number_of_frames = array->frames();
   254   // Compute the vframes' sizes.  Note that frame_sizes[] entries are ordered from outermost to innermost
   255   // virtual activation, which is the reverse of the elements in the vframes array.
   256   intptr_t* frame_sizes = NEW_C_HEAP_ARRAY(intptr_t, number_of_frames);
   257   // +1 because we always have an interpreter return address for the final slot.
   258   address* frame_pcs = NEW_C_HEAP_ARRAY(address, number_of_frames + 1);
   259   int callee_parameters = 0;
   260   int callee_locals = 0;
   261   int popframe_extra_args = 0;
   262   // Create an interpreter return address for the stub to use as its return
   263   // address so the skeletal frames are perfectly walkable
   264   frame_pcs[number_of_frames] = Interpreter::deopt_entry(vtos, 0);
   266   // PopFrame requires that the preserved incoming arguments from the recently-popped topmost
   267   // activation be put back on the expression stack of the caller for reexecution
   268   if (JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
   269     popframe_extra_args = in_words(thread->popframe_preserved_args_size_in_words());
   270   }
   272   //
   273   // frame_sizes/frame_pcs[0] oldest frame (int or c2i)
   274   // frame_sizes/frame_pcs[1] next oldest frame (int)
   275   // frame_sizes/frame_pcs[n] youngest frame (int)
   276   //
   277   // Now a pc in frame_pcs is actually the return address to the frame's caller (a frame
   278   // owns the space for the return address to it's caller).  Confusing ain't it.
   279   //
   280   // The vframe array can address vframes with indices running from
   281   // 0.._frames-1. Index  0 is the youngest frame and _frame - 1 is the oldest (root) frame.
   282   // When we create the skeletal frames we need the oldest frame to be in the zero slot
   283   // in the frame_sizes/frame_pcs so the assembly code can do a trivial walk.
   284   // so things look a little strange in this loop.
   285   //
   286   for (int index = 0; index < array->frames(); index++ ) {
   287     // frame[number_of_frames - 1 ] = on_stack_size(youngest)
   288     // frame[number_of_frames - 2 ] = on_stack_size(sender(youngest))
   289     // frame[number_of_frames - 3 ] = on_stack_size(sender(sender(youngest)))
   290     frame_sizes[number_of_frames - 1 - index] = BytesPerWord * array->element(index)->on_stack_size(callee_parameters,
   291                                                                                                     callee_locals,
   292                                                                                                     index == 0,
   293                                                                                                     popframe_extra_args);
   294     // This pc doesn't have to be perfect just good enough to identify the frame
   295     // as interpreted so the skeleton frame will be walkable
   296     // The correct pc will be set when the skeleton frame is completely filled out
   297     // The final pc we store in the loop is wrong and will be overwritten below
   298     frame_pcs[number_of_frames - 1 - index ] = Interpreter::deopt_entry(vtos, 0) - frame::pc_return_offset;
   300     callee_parameters = array->element(index)->method()->size_of_parameters();
   301     callee_locals = array->element(index)->method()->max_locals();
   302     popframe_extra_args = 0;
   303   }
   305   // Compute whether the root vframe returns a float or double value.
   306   BasicType return_type;
   307   {
   308     HandleMark hm;
   309     methodHandle method(thread, array->element(0)->method());
   310     Bytecode_invoke* invoke = Bytecode_invoke_at_check(method, array->element(0)->bci());
   311     return_type = (invoke != NULL) ? invoke->result_type(thread) : T_ILLEGAL;
   312   }
   314   // Compute information for handling adapters and adjusting the frame size of the caller.
   315   int caller_adjustment = 0;
   317   // Find the current pc for sender of the deoptee. Since the sender may have been deoptimized
   318   // itself since the deoptee vframeArray was created we must get a fresh value of the pc rather
   319   // than simply use array->sender.pc(). This requires us to walk the current set of frames
   320   //
   321   frame deopt_sender = stub_frame.sender(&dummy_map); // First is the deoptee frame
   322   deopt_sender = deopt_sender.sender(&dummy_map);     // Now deoptee caller
   324   // Compute the amount the oldest interpreter frame will have to adjust
   325   // its caller's stack by. If the caller is a compiled frame then
   326   // we pretend that the callee has no parameters so that the
   327   // extension counts for the full amount of locals and not just
   328   // locals-parms. This is because without a c2i adapter the parm
   329   // area as created by the compiled frame will not be usable by
   330   // the interpreter. (Depending on the calling convention there
   331   // may not even be enough space).
   333   // QQQ I'd rather see this pushed down into last_frame_adjust
   334   // and have it take the sender (aka caller).
   336   if (deopt_sender.is_compiled_frame()) {
   337     caller_adjustment = last_frame_adjust(0, callee_locals);
   338   } else if (callee_locals > callee_parameters) {
   339     // The caller frame may need extending to accommodate
   340     // non-parameter locals of the first unpacked interpreted frame.
   341     // Compute that adjustment.
   342     caller_adjustment = last_frame_adjust(callee_parameters, callee_locals);
   343   }
   346   // If the sender is deoptimized the we must retrieve the address of the handler
   347   // since the frame will "magically" show the original pc before the deopt
   348   // and we'd undo the deopt.
   350   frame_pcs[0] = deopt_sender.raw_pc();
   352   assert(CodeCache::find_blob_unsafe(frame_pcs[0]) != NULL, "bad pc");
   354   UnrollBlock* info = new UnrollBlock(array->frame_size() * BytesPerWord,
   355                                       caller_adjustment * BytesPerWord,
   356                                       number_of_frames,
   357                                       frame_sizes,
   358                                       frame_pcs,
   359                                       return_type);
   360 #if defined(IA32) || defined(AMD64)
   361   // We need a way to pass fp to the unpacking code so the skeletal frames
   362   // come out correct. This is only needed for x86 because of c2 using ebp
   363   // as an allocatable register. So this update is useless (and harmless)
   364   // on the other platforms. It would be nice to do this in a different
   365   // way but even the old style deoptimization had a problem with deriving
   366   // this value. NEEDS_CLEANUP
   367   // Note: now that c1 is using c2's deopt blob we must do this on all
   368   // x86 based platforms
   369   intptr_t** fp_addr = (intptr_t**) (((address)info) + info->initial_fp_offset_in_bytes());
   370   *fp_addr = array->sender().fp(); // was adapter_caller
   371 #endif /* IA32 || AMD64 */
   373   if (array->frames() > 1) {
   374     if (VerifyStack && TraceDeoptimization) {
   375       tty->print_cr("Deoptimizing method containing inlining");
   376     }
   377   }
   379   array->set_unroll_block(info);
   380   return info;
   381 }
   383 // Called to cleanup deoptimization data structures in normal case
   384 // after unpacking to stack and when stack overflow error occurs
   385 void Deoptimization::cleanup_deopt_info(JavaThread *thread,
   386                                         vframeArray *array) {
   388   // Get array if coming from exception
   389   if (array == NULL) {
   390     array = thread->vframe_array_head();
   391   }
   392   thread->set_vframe_array_head(NULL);
   394   // Free the previous UnrollBlock
   395   vframeArray* old_array = thread->vframe_array_last();
   396   thread->set_vframe_array_last(array);
   398   if (old_array != NULL) {
   399     UnrollBlock* old_info = old_array->unroll_block();
   400     old_array->set_unroll_block(NULL);
   401     delete old_info;
   402     delete old_array;
   403   }
   405   // Deallocate any resource creating in this routine and any ResourceObjs allocated
   406   // inside the vframeArray (StackValueCollections)
   408   delete thread->deopt_mark();
   409   thread->set_deopt_mark(NULL);
   412   if (JvmtiExport::can_pop_frame()) {
   413 #ifndef CC_INTERP
   414     // Regardless of whether we entered this routine with the pending
   415     // popframe condition bit set, we should always clear it now
   416     thread->clear_popframe_condition();
   417 #else
   418     // C++ interpeter will clear has_pending_popframe when it enters
   419     // with method_resume. For deopt_resume2 we clear it now.
   420     if (thread->popframe_forcing_deopt_reexecution())
   421         thread->clear_popframe_condition();
   422 #endif /* CC_INTERP */
   423   }
   425   // unpack_frames() is called at the end of the deoptimization handler
   426   // and (in C2) at the end of the uncommon trap handler. Note this fact
   427   // so that an asynchronous stack walker can work again. This counter is
   428   // incremented at the beginning of fetch_unroll_info() and (in C2) at
   429   // the beginning of uncommon_trap().
   430   thread->dec_in_deopt_handler();
   431 }
   434 // Return BasicType of value being returned
   435 JRT_LEAF(BasicType, Deoptimization::unpack_frames(JavaThread* thread, int exec_mode))
   437   // We are already active int he special DeoptResourceMark any ResourceObj's we
   438   // allocate will be freed at the end of the routine.
   440   // It is actually ok to allocate handles in a leaf method. It causes no safepoints,
   441   // but makes the entry a little slower. There is however a little dance we have to
   442   // do in debug mode to get around the NoHandleMark code in the JRT_LEAF macro
   443   ResetNoHandleMark rnhm; // No-op in release/product versions
   444   HandleMark hm;
   446   frame stub_frame = thread->last_frame();
   448   // Since the frame to unpack is the top frame of this thread, the vframe_array_head
   449   // must point to the vframeArray for the unpack frame.
   450   vframeArray* array = thread->vframe_array_head();
   452 #ifndef PRODUCT
   453   if (TraceDeoptimization) {
   454     tty->print_cr("DEOPT UNPACKING thread " INTPTR_FORMAT " vframeArray " INTPTR_FORMAT " mode %d", thread, array, exec_mode);
   455   }
   456 #endif
   458   UnrollBlock* info = array->unroll_block();
   460   // Unpack the interpreter frames and any adapter frame (c2 only) we might create.
   461   array->unpack_to_stack(stub_frame, exec_mode);
   463   BasicType bt = info->return_type();
   465   // If we have an exception pending, claim that the return type is an oop
   466   // so the deopt_blob does not overwrite the exception_oop.
   468   if (exec_mode == Unpack_exception)
   469     bt = T_OBJECT;
   471   // Cleanup thread deopt data
   472   cleanup_deopt_info(thread, array);
   474 #ifndef PRODUCT
   475   if (VerifyStack) {
   476     ResourceMark res_mark;
   478     // Verify that the just-unpacked frames match the interpreter's
   479     // notions of expression stack and locals
   480     vframeArray* cur_array = thread->vframe_array_last();
   481     RegisterMap rm(thread, false);
   482     rm.set_include_argument_oops(false);
   483     bool is_top_frame = true;
   484     int callee_size_of_parameters = 0;
   485     int callee_max_locals = 0;
   486     for (int i = 0; i < cur_array->frames(); i++) {
   487       vframeArrayElement* el = cur_array->element(i);
   488       frame* iframe = el->iframe();
   489       guarantee(iframe->is_interpreted_frame(), "Wrong frame type");
   491       // Get the oop map for this bci
   492       InterpreterOopMap mask;
   493       int cur_invoke_parameter_size = 0;
   494       bool try_next_mask = false;
   495       int next_mask_expression_stack_size = -1;
   496       int top_frame_expression_stack_adjustment = 0;
   497       methodHandle mh(thread, iframe->interpreter_frame_method());
   498       OopMapCache::compute_one_oop_map(mh, iframe->interpreter_frame_bci(), &mask);
   499       BytecodeStream str(mh);
   500       str.set_start(iframe->interpreter_frame_bci());
   501       int max_bci = mh->code_size();
   502       // Get to the next bytecode if possible
   503       assert(str.bci() < max_bci, "bci in interpreter frame out of bounds");
   504       // Check to see if we can grab the number of outgoing arguments
   505       // at an uncommon trap for an invoke (where the compiler
   506       // generates debug info before the invoke has executed)
   507       Bytecodes::Code cur_code = str.next();
   508       if (cur_code == Bytecodes::_invokevirtual ||
   509           cur_code == Bytecodes::_invokespecial ||
   510           cur_code == Bytecodes::_invokestatic  ||
   511           cur_code == Bytecodes::_invokeinterface) {
   512         Bytecode_invoke* invoke = Bytecode_invoke_at(mh, iframe->interpreter_frame_bci());
   513         symbolHandle signature(thread, invoke->signature());
   514         ArgumentSizeComputer asc(signature);
   515         cur_invoke_parameter_size = asc.size();
   516         if (cur_code != Bytecodes::_invokestatic) {
   517           // Add in receiver
   518           ++cur_invoke_parameter_size;
   519         }
   520       }
   521       if (str.bci() < max_bci) {
   522         Bytecodes::Code bc = str.next();
   523         if (bc >= 0) {
   524           // The interpreter oop map generator reports results before
   525           // the current bytecode has executed except in the case of
   526           // calls. It seems to be hard to tell whether the compiler
   527           // has emitted debug information matching the "state before"
   528           // a given bytecode or the state after, so we try both
   529           switch (cur_code) {
   530             case Bytecodes::_invokevirtual:
   531             case Bytecodes::_invokespecial:
   532             case Bytecodes::_invokestatic:
   533             case Bytecodes::_invokeinterface:
   534             case Bytecodes::_athrow:
   535               break;
   536             default: {
   537               InterpreterOopMap next_mask;
   538               OopMapCache::compute_one_oop_map(mh, str.bci(), &next_mask);
   539               next_mask_expression_stack_size = next_mask.expression_stack_size();
   540               // Need to subtract off the size of the result type of
   541               // the bytecode because this is not described in the
   542               // debug info but returned to the interpreter in the TOS
   543               // caching register
   544               BasicType bytecode_result_type = Bytecodes::result_type(cur_code);
   545               if (bytecode_result_type != T_ILLEGAL) {
   546                 top_frame_expression_stack_adjustment = type2size[bytecode_result_type];
   547               }
   548               assert(top_frame_expression_stack_adjustment >= 0, "");
   549               try_next_mask = true;
   550               break;
   551             }
   552           }
   553         }
   554       }
   556       // Verify stack depth and oops in frame
   557       // This assertion may be dependent on the platform we're running on and may need modification (tested on x86 and sparc)
   558       if (!(
   559             /* SPARC */
   560             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_size_of_parameters) ||
   561             /* x86 */
   562             (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + callee_max_locals) ||
   563             (try_next_mask &&
   564              (iframe->interpreter_frame_expression_stack_size() == (next_mask_expression_stack_size -
   565                                                                     top_frame_expression_stack_adjustment))) ||
   566             (is_top_frame && (exec_mode == Unpack_exception) && iframe->interpreter_frame_expression_stack_size() == 0) ||
   567             (is_top_frame && (exec_mode == Unpack_uncommon_trap || exec_mode == Unpack_reexecute) &&
   568              (iframe->interpreter_frame_expression_stack_size() == mask.expression_stack_size() + cur_invoke_parameter_size))
   569             )) {
   570         ttyLocker ttyl;
   572         // Print out some information that will help us debug the problem
   573         tty->print_cr("Wrong number of expression stack elements during deoptimization");
   574         tty->print_cr("  Error occurred while verifying frame %d (0..%d, 0 is topmost)", i, cur_array->frames() - 1);
   575         tty->print_cr("  Fabricated interpreter frame had %d expression stack elements",
   576                       iframe->interpreter_frame_expression_stack_size());
   577         tty->print_cr("  Interpreter oop map had %d expression stack elements", mask.expression_stack_size());
   578         tty->print_cr("  try_next_mask = %d", try_next_mask);
   579         tty->print_cr("  next_mask_expression_stack_size = %d", next_mask_expression_stack_size);
   580         tty->print_cr("  callee_size_of_parameters = %d", callee_size_of_parameters);
   581         tty->print_cr("  callee_max_locals = %d", callee_max_locals);
   582         tty->print_cr("  top_frame_expression_stack_adjustment = %d", top_frame_expression_stack_adjustment);
   583         tty->print_cr("  exec_mode = %d", exec_mode);
   584         tty->print_cr("  cur_invoke_parameter_size = %d", cur_invoke_parameter_size);
   585         tty->print_cr("  Thread = " INTPTR_FORMAT ", thread ID = " UINTX_FORMAT, thread, thread->osthread()->thread_id());
   586         tty->print_cr("  Interpreted frames:");
   587         for (int k = 0; k < cur_array->frames(); k++) {
   588           vframeArrayElement* el = cur_array->element(k);
   589           tty->print_cr("    %s (bci %d)", el->method()->name_and_sig_as_C_string(), el->bci());
   590         }
   591         cur_array->print_on_2(tty);
   592         guarantee(false, "wrong number of expression stack elements during deopt");
   593       }
   594       VerifyOopClosure verify;
   595       iframe->oops_interpreted_do(&verify, &rm, false);
   596       callee_size_of_parameters = mh->size_of_parameters();
   597       callee_max_locals = mh->max_locals();
   598       is_top_frame = false;
   599     }
   600   }
   601 #endif /* !PRODUCT */
   604   return bt;
   605 JRT_END
   608 int Deoptimization::deoptimize_dependents() {
   609   Threads::deoptimized_wrt_marked_nmethods();
   610   return 0;
   611 }
   614 #ifdef COMPILER2
   615 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray<ScopeValue*>* objects, TRAPS) {
   616   Handle pending_exception(thread->pending_exception());
   617   const char* exception_file = thread->exception_file();
   618   int exception_line = thread->exception_line();
   619   thread->clear_pending_exception();
   621   for (int i = 0; i < objects->length(); i++) {
   622     assert(objects->at(i)->is_object(), "invalid debug information");
   623     ObjectValue* sv = (ObjectValue*) objects->at(i);
   625     KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
   626     oop obj = NULL;
   628     if (k->oop_is_instance()) {
   629       instanceKlass* ik = instanceKlass::cast(k());
   630       obj = ik->allocate_instance(CHECK_(false));
   631     } else if (k->oop_is_typeArray()) {
   632       typeArrayKlass* ak = typeArrayKlass::cast(k());
   633       assert(sv->field_size() % type2size[ak->element_type()] == 0, "non-integral array length");
   634       int len = sv->field_size() / type2size[ak->element_type()];
   635       obj = ak->allocate(len, CHECK_(false));
   636     } else if (k->oop_is_objArray()) {
   637       objArrayKlass* ak = objArrayKlass::cast(k());
   638       obj = ak->allocate(sv->field_size(), CHECK_(false));
   639     }
   641     assert(obj != NULL, "allocation failed");
   642     assert(sv->value().is_null(), "redundant reallocation");
   643     sv->set_value(obj);
   644   }
   646   if (pending_exception.not_null()) {
   647     thread->set_pending_exception(pending_exception(), exception_file, exception_line);
   648   }
   650   return true;
   651 }
   653 // This assumes that the fields are stored in ObjectValue in the same order
   654 // they are yielded by do_nonstatic_fields.
   655 class FieldReassigner: public FieldClosure {
   656   frame* _fr;
   657   RegisterMap* _reg_map;
   658   ObjectValue* _sv;
   659   instanceKlass* _ik;
   660   oop _obj;
   662   int _i;
   663 public:
   664   FieldReassigner(frame* fr, RegisterMap* reg_map, ObjectValue* sv, oop obj) :
   665     _fr(fr), _reg_map(reg_map), _sv(sv), _obj(obj), _i(0) {}
   667   int i() const { return _i; }
   670   void do_field(fieldDescriptor* fd) {
   671     intptr_t val;
   672     StackValue* value =
   673       StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(i()));
   674     int offset = fd->offset();
   675     switch (fd->field_type()) {
   676     case T_OBJECT: case T_ARRAY:
   677       assert(value->type() == T_OBJECT, "Agreement.");
   678       _obj->obj_field_put(offset, value->get_obj()());
   679       break;
   681     case T_LONG: case T_DOUBLE: {
   682       assert(value->type() == T_INT, "Agreement.");
   683       StackValue* low =
   684         StackValue::create_stack_value(_fr, _reg_map, _sv->field_at(++_i));
   685 #ifdef _LP64
   686       jlong res = (jlong)low->get_int();
   687 #else
   688 #ifdef SPARC
   689       // For SPARC we have to swap high and low words.
   690       jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
   691 #else
   692       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
   693 #endif //SPARC
   694 #endif
   695       _obj->long_field_put(offset, res);
   696       break;
   697     }
   698     // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
   699     case T_INT: case T_FLOAT: // 4 bytes.
   700       assert(value->type() == T_INT, "Agreement.");
   701       val = value->get_int();
   702       _obj->int_field_put(offset, (jint)*((jint*)&val));
   703       break;
   705     case T_SHORT: case T_CHAR: // 2 bytes
   706       assert(value->type() == T_INT, "Agreement.");
   707       val = value->get_int();
   708       _obj->short_field_put(offset, (jshort)*((jint*)&val));
   709       break;
   711     case T_BOOLEAN: case T_BYTE: // 1 byte
   712       assert(value->type() == T_INT, "Agreement.");
   713       val = value->get_int();
   714       _obj->bool_field_put(offset, (jboolean)*((jint*)&val));
   715       break;
   717     default:
   718       ShouldNotReachHere();
   719     }
   720     _i++;
   721   }
   722 };
   724 // restore elements of an eliminated type array
   725 void Deoptimization::reassign_type_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, typeArrayOop obj, BasicType type) {
   726   int index = 0;
   727   intptr_t val;
   729   for (int i = 0; i < sv->field_size(); i++) {
   730     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
   731     switch(type) {
   732     case T_LONG: case T_DOUBLE: {
   733       assert(value->type() == T_INT, "Agreement.");
   734       StackValue* low =
   735         StackValue::create_stack_value(fr, reg_map, sv->field_at(++i));
   736 #ifdef _LP64
   737       jlong res = (jlong)low->get_int();
   738 #else
   739 #ifdef SPARC
   740       // For SPARC we have to swap high and low words.
   741       jlong res = jlong_from((jint)low->get_int(), (jint)value->get_int());
   742 #else
   743       jlong res = jlong_from((jint)value->get_int(), (jint)low->get_int());
   744 #endif //SPARC
   745 #endif
   746       obj->long_at_put(index, res);
   747       break;
   748     }
   750     // Have to cast to INT (32 bits) pointer to avoid little/big-endian problem.
   751     case T_INT: case T_FLOAT: // 4 bytes.
   752       assert(value->type() == T_INT, "Agreement.");
   753       val = value->get_int();
   754       obj->int_at_put(index, (jint)*((jint*)&val));
   755       break;
   757     case T_SHORT: case T_CHAR: // 2 bytes
   758       assert(value->type() == T_INT, "Agreement.");
   759       val = value->get_int();
   760       obj->short_at_put(index, (jshort)*((jint*)&val));
   761       break;
   763     case T_BOOLEAN: case T_BYTE: // 1 byte
   764       assert(value->type() == T_INT, "Agreement.");
   765       val = value->get_int();
   766       obj->bool_at_put(index, (jboolean)*((jint*)&val));
   767       break;
   769       default:
   770         ShouldNotReachHere();
   771     }
   772     index++;
   773   }
   774 }
   777 // restore fields of an eliminated object array
   778 void Deoptimization::reassign_object_array_elements(frame* fr, RegisterMap* reg_map, ObjectValue* sv, objArrayOop obj) {
   779   for (int i = 0; i < sv->field_size(); i++) {
   780     StackValue* value = StackValue::create_stack_value(fr, reg_map, sv->field_at(i));
   781     assert(value->type() == T_OBJECT, "object element expected");
   782     obj->obj_at_put(i, value->get_obj()());
   783   }
   784 }
   787 // restore fields of all eliminated objects and arrays
   788 void Deoptimization::reassign_fields(frame* fr, RegisterMap* reg_map, GrowableArray<ScopeValue*>* objects) {
   789   for (int i = 0; i < objects->length(); i++) {
   790     ObjectValue* sv = (ObjectValue*) objects->at(i);
   791     KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
   792     Handle obj = sv->value();
   793     assert(obj.not_null(), "reallocation was missed");
   795     if (k->oop_is_instance()) {
   796       instanceKlass* ik = instanceKlass::cast(k());
   797       FieldReassigner reassign(fr, reg_map, sv, obj());
   798       ik->do_nonstatic_fields(&reassign);
   799     } else if (k->oop_is_typeArray()) {
   800       typeArrayKlass* ak = typeArrayKlass::cast(k());
   801       reassign_type_array_elements(fr, reg_map, sv, (typeArrayOop) obj(), ak->element_type());
   802     } else if (k->oop_is_objArray()) {
   803       reassign_object_array_elements(fr, reg_map, sv, (objArrayOop) obj());
   804     }
   805   }
   806 }
   809 // relock objects for which synchronization was eliminated
   810 void Deoptimization::relock_objects(GrowableArray<MonitorInfo*>* monitors, JavaThread* thread) {
   811   for (int i = 0; i < monitors->length(); i++) {
   812     MonitorInfo* mon_info = monitors->at(i);
   813     if (mon_info->eliminated()) {
   814       assert(mon_info->owner() != NULL, "reallocation was missed");
   815       Handle obj = Handle(mon_info->owner());
   816       markOop mark = obj->mark();
   817       if (UseBiasedLocking && mark->has_bias_pattern()) {
   818         // New allocated objects may have the mark set to anonymously biased.
   819         // Also the deoptimized method may called methods with synchronization
   820         // where the thread-local object is bias locked to the current thread.
   821         assert(mark->is_biased_anonymously() ||
   822                mark->biased_locker() == thread, "should be locked to current thread");
   823         // Reset mark word to unbiased prototype.
   824         markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
   825         obj->set_mark(unbiased_prototype);
   826       }
   827       BasicLock* lock = mon_info->lock();
   828       ObjectSynchronizer::slow_enter(obj, lock, thread);
   829     }
   830     assert(mon_info->owner()->is_locked(), "object must be locked now");
   831   }
   832 }
   835 #ifndef PRODUCT
   836 // print information about reallocated objects
   837 void Deoptimization::print_objects(GrowableArray<ScopeValue*>* objects) {
   838   fieldDescriptor fd;
   840   for (int i = 0; i < objects->length(); i++) {
   841     ObjectValue* sv = (ObjectValue*) objects->at(i);
   842     KlassHandle k(((ConstantOopReadValue*) sv->klass())->value()());
   843     Handle obj = sv->value();
   845     tty->print("     object <" INTPTR_FORMAT "> of type ", sv->value()());
   846     k->as_klassOop()->print_value();
   847     tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
   848     tty->cr();
   850     if (Verbose) {
   851       k->oop_print_on(obj(), tty);
   852     }
   853   }
   854 }
   855 #endif
   856 #endif // COMPILER2
   858 vframeArray* Deoptimization::create_vframeArray(JavaThread* thread, frame fr, RegisterMap *reg_map, GrowableArray<compiledVFrame*>* chunk) {
   860 #ifndef PRODUCT
   861   if (TraceDeoptimization) {
   862     ttyLocker ttyl;
   863     tty->print("DEOPT PACKING thread " INTPTR_FORMAT " ", thread);
   864     fr.print_on(tty);
   865     tty->print_cr("     Virtual frames (innermost first):");
   866     for (int index = 0; index < chunk->length(); index++) {
   867       compiledVFrame* vf = chunk->at(index);
   868       tty->print("       %2d - ", index);
   869       vf->print_value();
   870       int bci = chunk->at(index)->raw_bci();
   871       const char* code_name;
   872       if (bci == SynchronizationEntryBCI) {
   873         code_name = "sync entry";
   874       } else {
   875         Bytecodes::Code code = Bytecodes::code_at(vf->method(), bci);
   876         code_name = Bytecodes::name(code);
   877       }
   878       tty->print(" - %s", code_name);
   879       tty->print_cr(" @ bci %d ", bci);
   880       if (Verbose) {
   881         vf->print();
   882         tty->cr();
   883       }
   884     }
   885   }
   886 #endif
   888   // Register map for next frame (used for stack crawl).  We capture
   889   // the state of the deopt'ing frame's caller.  Thus if we need to
   890   // stuff a C2I adapter we can properly fill in the callee-save
   891   // register locations.
   892   frame caller = fr.sender(reg_map);
   893   int frame_size = caller.sp() - fr.sp();
   895   frame sender = caller;
   897   // Since the Java thread being deoptimized will eventually adjust it's own stack,
   898   // the vframeArray containing the unpacking information is allocated in the C heap.
   899   // For Compiler1, the caller of the deoptimized frame is saved for use by unpack_frames().
   900   vframeArray* array = vframeArray::allocate(thread, frame_size, chunk, reg_map, sender, caller, fr);
   902   // Compare the vframeArray to the collected vframes
   903   assert(array->structural_compare(thread, chunk), "just checking");
   904   Events::log("# vframes = %d", (intptr_t)chunk->length());
   906 #ifndef PRODUCT
   907   if (TraceDeoptimization) {
   908     ttyLocker ttyl;
   909     tty->print_cr("     Created vframeArray " INTPTR_FORMAT, array);
   910     if (Verbose) {
   911       int count = 0;
   912       // this used to leak deoptimizedVFrame like it was going out of style!!!
   913       for (int index = 0; index < array->frames(); index++ ) {
   914         vframeArrayElement* e = array->element(index);
   915         e->print(tty);
   917         /*
   918           No printing yet.
   919         array->vframe_at(index)->print_activation(count++);
   920         // better as...
   921         array->print_activation_for(index, count++);
   922         */
   923       }
   924     }
   925   }
   926 #endif // PRODUCT
   928   return array;
   929 }
   932 static void collect_monitors(compiledVFrame* cvf, GrowableArray<Handle>* objects_to_revoke) {
   933   GrowableArray<MonitorInfo*>* monitors = cvf->monitors();
   934   for (int i = 0; i < monitors->length(); i++) {
   935     MonitorInfo* mon_info = monitors->at(i);
   936     if (!mon_info->eliminated() && mon_info->owner() != NULL) {
   937       objects_to_revoke->append(Handle(mon_info->owner()));
   938     }
   939   }
   940 }
   943 void Deoptimization::revoke_biases_of_monitors(JavaThread* thread, frame fr, RegisterMap* map) {
   944   if (!UseBiasedLocking) {
   945     return;
   946   }
   948   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
   950   // Unfortunately we don't have a RegisterMap available in most of
   951   // the places we want to call this routine so we need to walk the
   952   // stack again to update the register map.
   953   if (map == NULL || !map->update_map()) {
   954     StackFrameStream sfs(thread, true);
   955     bool found = false;
   956     while (!found && !sfs.is_done()) {
   957       frame* cur = sfs.current();
   958       sfs.next();
   959       found = cur->id() == fr.id();
   960     }
   961     assert(found, "frame to be deoptimized not found on target thread's stack");
   962     map = sfs.register_map();
   963   }
   965   vframe* vf = vframe::new_vframe(&fr, map, thread);
   966   compiledVFrame* cvf = compiledVFrame::cast(vf);
   967   // Revoke monitors' biases in all scopes
   968   while (!cvf->is_top()) {
   969     collect_monitors(cvf, objects_to_revoke);
   970     cvf = compiledVFrame::cast(cvf->sender());
   971   }
   972   collect_monitors(cvf, objects_to_revoke);
   974   if (SafepointSynchronize::is_at_safepoint()) {
   975     BiasedLocking::revoke_at_safepoint(objects_to_revoke);
   976   } else {
   977     BiasedLocking::revoke(objects_to_revoke);
   978   }
   979 }
   982 void Deoptimization::revoke_biases_of_monitors(CodeBlob* cb) {
   983   if (!UseBiasedLocking) {
   984     return;
   985   }
   987   assert(SafepointSynchronize::is_at_safepoint(), "must only be called from safepoint");
   988   GrowableArray<Handle>* objects_to_revoke = new GrowableArray<Handle>();
   989   for (JavaThread* jt = Threads::first(); jt != NULL ; jt = jt->next()) {
   990     if (jt->has_last_Java_frame()) {
   991       StackFrameStream sfs(jt, true);
   992       while (!sfs.is_done()) {
   993         frame* cur = sfs.current();
   994         if (cb->contains(cur->pc())) {
   995           vframe* vf = vframe::new_vframe(cur, sfs.register_map(), jt);
   996           compiledVFrame* cvf = compiledVFrame::cast(vf);
   997           // Revoke monitors' biases in all scopes
   998           while (!cvf->is_top()) {
   999             collect_monitors(cvf, objects_to_revoke);
  1000             cvf = compiledVFrame::cast(cvf->sender());
  1002           collect_monitors(cvf, objects_to_revoke);
  1004         sfs.next();
  1008   BiasedLocking::revoke_at_safepoint(objects_to_revoke);
  1012 void Deoptimization::deoptimize_single_frame(JavaThread* thread, frame fr) {
  1013   assert(fr.can_be_deoptimized(), "checking frame type");
  1015   gather_statistics(Reason_constraint, Action_none, Bytecodes::_illegal);
  1017   EventMark m("Deoptimization (pc=" INTPTR_FORMAT ", sp=" INTPTR_FORMAT ")", fr.pc(), fr.id());
  1019   // Patch the nmethod so that when execution returns to it we will
  1020   // deopt the execution state and return to the interpreter.
  1021   fr.deoptimize(thread);
  1024 void Deoptimization::deoptimize(JavaThread* thread, frame fr, RegisterMap *map) {
  1025   // Deoptimize only if the frame comes from compile code.
  1026   // Do not deoptimize the frame which is already patched
  1027   // during the execution of the loops below.
  1028   if (!fr.is_compiled_frame() || fr.is_deoptimized_frame()) {
  1029     return;
  1031   ResourceMark rm;
  1032   DeoptimizationMarker dm;
  1033   if (UseBiasedLocking) {
  1034     revoke_biases_of_monitors(thread, fr, map);
  1036   deoptimize_single_frame(thread, fr);
  1041 void Deoptimization::deoptimize_frame(JavaThread* thread, intptr_t* id) {
  1042   // Compute frame and register map based on thread and sp.
  1043   RegisterMap reg_map(thread, UseBiasedLocking);
  1044   frame fr = thread->last_frame();
  1045   while (fr.id() != id) {
  1046     fr = fr.sender(&reg_map);
  1048   deoptimize(thread, fr, &reg_map);
  1052 // JVMTI PopFrame support
  1053 JRT_LEAF(void, Deoptimization::popframe_preserve_args(JavaThread* thread, int bytes_to_save, void* start_address))
  1055   thread->popframe_preserve_args(in_ByteSize(bytes_to_save), start_address);
  1057 JRT_END
  1060 #ifdef COMPILER2
  1061 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index, TRAPS) {
  1062   // in case of an unresolved klass entry, load the class.
  1063   if (constant_pool->tag_at(index).is_unresolved_klass()) {
  1064     klassOop tk = constant_pool->klass_at(index, CHECK);
  1065     return;
  1068   if (!constant_pool->tag_at(index).is_symbol()) return;
  1070   Handle class_loader (THREAD, instanceKlass::cast(constant_pool->pool_holder())->class_loader());
  1071   symbolHandle symbol (THREAD, constant_pool->symbol_at(index));
  1073   // class name?
  1074   if (symbol->byte_at(0) != '(') {
  1075     Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
  1076     SystemDictionary::resolve_or_null(symbol, class_loader, protection_domain, CHECK);
  1077     return;
  1080   // then it must be a signature!
  1081   for (SignatureStream ss(symbol); !ss.is_done(); ss.next()) {
  1082     if (ss.is_object()) {
  1083       symbolOop s = ss.as_symbol(CHECK);
  1084       symbolHandle class_name (THREAD, s);
  1085       Handle protection_domain (THREAD, Klass::cast(constant_pool->pool_holder())->protection_domain());
  1086       SystemDictionary::resolve_or_null(class_name, class_loader, protection_domain, CHECK);
  1092 void Deoptimization::load_class_by_index(constantPoolHandle constant_pool, int index) {
  1093   EXCEPTION_MARK;
  1094   load_class_by_index(constant_pool, index, THREAD);
  1095   if (HAS_PENDING_EXCEPTION) {
  1096     // Exception happened during classloading. We ignore the exception here, since it
  1097     // is going to be rethrown since the current activation is going to be deoptimzied and
  1098     // the interpreter will re-execute the bytecode.
  1099     CLEAR_PENDING_EXCEPTION;
  1103 JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint trap_request)) {
  1104   HandleMark hm;
  1106   // uncommon_trap() is called at the beginning of the uncommon trap
  1107   // handler. Note this fact before we start generating temporary frames
  1108   // that can confuse an asynchronous stack walker. This counter is
  1109   // decremented at the end of unpack_frames().
  1110   thread->inc_in_deopt_handler();
  1112   // We need to update the map if we have biased locking.
  1113   RegisterMap reg_map(thread, UseBiasedLocking);
  1114   frame stub_frame = thread->last_frame();
  1115   frame fr = stub_frame.sender(&reg_map);
  1116   // Make sure the calling nmethod is not getting deoptimized and removed
  1117   // before we are done with it.
  1118   nmethodLocker nl(fr.pc());
  1121     ResourceMark rm;
  1123     // Revoke biases of any monitors in the frame to ensure we can migrate them
  1124     revoke_biases_of_monitors(thread, fr, &reg_map);
  1126     DeoptReason reason = trap_request_reason(trap_request);
  1127     DeoptAction action = trap_request_action(trap_request);
  1128     jint unloaded_class_index = trap_request_index(trap_request); // CP idx or -1
  1130     Events::log("Uncommon trap occurred @" INTPTR_FORMAT " unloaded_class_index = %d", fr.pc(), (int) trap_request);
  1131     vframe*  vf  = vframe::new_vframe(&fr, &reg_map, thread);
  1132     compiledVFrame* cvf = compiledVFrame::cast(vf);
  1134     nmethod* nm = cvf->code();
  1136     ScopeDesc*      trap_scope  = cvf->scope();
  1137     methodHandle    trap_method = trap_scope->method();
  1138     int             trap_bci    = trap_scope->bci();
  1139     Bytecodes::Code trap_bc     = Bytecode_at(trap_method->bcp_from(trap_bci))->java_code();
  1141     // Record this event in the histogram.
  1142     gather_statistics(reason, action, trap_bc);
  1144     // Ensure that we can record deopt. history:
  1145     bool create_if_missing = ProfileTraps;
  1147     methodDataHandle trap_mdo
  1148       (THREAD, get_method_data(thread, trap_method, create_if_missing));
  1150     // Print a bunch of diagnostics, if requested.
  1151     if (TraceDeoptimization || LogCompilation) {
  1152       ResourceMark rm;
  1153       ttyLocker ttyl;
  1154       char buf[100];
  1155       if (xtty != NULL) {
  1156         xtty->begin_head("uncommon_trap thread='" UINTX_FORMAT"' %s",
  1157                          os::current_thread_id(),
  1158                          format_trap_request(buf, sizeof(buf), trap_request));
  1159         nm->log_identity(xtty);
  1161       symbolHandle class_name;
  1162       bool unresolved = false;
  1163       if (unloaded_class_index >= 0) {
  1164         constantPoolHandle constants (THREAD, trap_method->constants());
  1165         if (constants->tag_at(unloaded_class_index).is_unresolved_klass()) {
  1166           class_name = symbolHandle(THREAD,
  1167             constants->klass_name_at(unloaded_class_index));
  1168           unresolved = true;
  1169           if (xtty != NULL)
  1170             xtty->print(" unresolved='1'");
  1171         } else if (constants->tag_at(unloaded_class_index).is_symbol()) {
  1172           class_name = symbolHandle(THREAD,
  1173             constants->symbol_at(unloaded_class_index));
  1175         if (xtty != NULL)
  1176           xtty->name(class_name);
  1178       if (xtty != NULL && trap_mdo.not_null()) {
  1179         // Dump the relevant MDO state.
  1180         // This is the deopt count for the current reason, any previous
  1181         // reasons or recompiles seen at this point.
  1182         int dcnt = trap_mdo->trap_count(reason);
  1183         if (dcnt != 0)
  1184           xtty->print(" count='%d'", dcnt);
  1185         ProfileData* pdata = trap_mdo->bci_to_data(trap_bci);
  1186         int dos = (pdata == NULL)? 0: pdata->trap_state();
  1187         if (dos != 0) {
  1188           xtty->print(" state='%s'", format_trap_state(buf, sizeof(buf), dos));
  1189           if (trap_state_is_recompiled(dos)) {
  1190             int recnt2 = trap_mdo->overflow_recompile_count();
  1191             if (recnt2 != 0)
  1192               xtty->print(" recompiles2='%d'", recnt2);
  1196       if (xtty != NULL) {
  1197         xtty->stamp();
  1198         xtty->end_head();
  1200       if (TraceDeoptimization) {  // make noise on the tty
  1201         tty->print("Uncommon trap occurred in");
  1202         nm->method()->print_short_name(tty);
  1203         tty->print(" (@" INTPTR_FORMAT ") thread=%d reason=%s action=%s unloaded_class_index=%d",
  1204                    fr.pc(),
  1205                    (int) os::current_thread_id(),
  1206                    trap_reason_name(reason),
  1207                    trap_action_name(action),
  1208                    unloaded_class_index);
  1209         if (class_name.not_null()) {
  1210           tty->print(unresolved ? " unresolved class: " : " symbol: ");
  1211           class_name->print_symbol_on(tty);
  1213         tty->cr();
  1215       if (xtty != NULL) {
  1216         // Log the precise location of the trap.
  1217         for (ScopeDesc* sd = trap_scope; ; sd = sd->sender()) {
  1218           xtty->begin_elem("jvms bci='%d'", sd->bci());
  1219           xtty->method(sd->method());
  1220           xtty->end_elem();
  1221           if (sd->is_top())  break;
  1223         xtty->tail("uncommon_trap");
  1226     // (End diagnostic printout.)
  1228     // Load class if necessary
  1229     if (unloaded_class_index >= 0) {
  1230       constantPoolHandle constants(THREAD, trap_method->constants());
  1231       load_class_by_index(constants, unloaded_class_index);
  1234     // Flush the nmethod if necessary and desirable.
  1235     //
  1236     // We need to avoid situations where we are re-flushing the nmethod
  1237     // because of a hot deoptimization site.  Repeated flushes at the same
  1238     // point need to be detected by the compiler and avoided.  If the compiler
  1239     // cannot avoid them (or has a bug and "refuses" to avoid them), this
  1240     // module must take measures to avoid an infinite cycle of recompilation
  1241     // and deoptimization.  There are several such measures:
  1242     //
  1243     //   1. If a recompilation is ordered a second time at some site X
  1244     //   and for the same reason R, the action is adjusted to 'reinterpret',
  1245     //   to give the interpreter time to exercise the method more thoroughly.
  1246     //   If this happens, the method's overflow_recompile_count is incremented.
  1247     //
  1248     //   2. If the compiler fails to reduce the deoptimization rate, then
  1249     //   the method's overflow_recompile_count will begin to exceed the set
  1250     //   limit PerBytecodeRecompilationCutoff.  If this happens, the action
  1251     //   is adjusted to 'make_not_compilable', and the method is abandoned
  1252     //   to the interpreter.  This is a performance hit for hot methods,
  1253     //   but is better than a disastrous infinite cycle of recompilations.
  1254     //   (Actually, only the method containing the site X is abandoned.)
  1255     //
  1256     //   3. In parallel with the previous measures, if the total number of
  1257     //   recompilations of a method exceeds the much larger set limit
  1258     //   PerMethodRecompilationCutoff, the method is abandoned.
  1259     //   This should only happen if the method is very large and has
  1260     //   many "lukewarm" deoptimizations.  The code which enforces this
  1261     //   limit is elsewhere (class nmethod, class methodOopDesc).
  1262     //
  1263     // Note that the per-BCI 'is_recompiled' bit gives the compiler one chance
  1264     // to recompile at each bytecode independently of the per-BCI cutoff.
  1265     //
  1266     // The decision to update code is up to the compiler, and is encoded
  1267     // in the Action_xxx code.  If the compiler requests Action_none
  1268     // no trap state is changed, no compiled code is changed, and the
  1269     // computation suffers along in the interpreter.
  1270     //
  1271     // The other action codes specify various tactics for decompilation
  1272     // and recompilation.  Action_maybe_recompile is the loosest, and
  1273     // allows the compiled code to stay around until enough traps are seen,
  1274     // and until the compiler gets around to recompiling the trapping method.
  1275     //
  1276     // The other actions cause immediate removal of the present code.
  1278     bool update_trap_state = true;
  1279     bool make_not_entrant = false;
  1280     bool make_not_compilable = false;
  1281     bool reset_counters = false;
  1282     switch (action) {
  1283     case Action_none:
  1284       // Keep the old code.
  1285       update_trap_state = false;
  1286       break;
  1287     case Action_maybe_recompile:
  1288       // Do not need to invalidate the present code, but we can
  1289       // initiate another
  1290       // Start compiler without (necessarily) invalidating the nmethod.
  1291       // The system will tolerate the old code, but new code should be
  1292       // generated when possible.
  1293       break;
  1294     case Action_reinterpret:
  1295       // Go back into the interpreter for a while, and then consider
  1296       // recompiling form scratch.
  1297       make_not_entrant = true;
  1298       // Reset invocation counter for outer most method.
  1299       // This will allow the interpreter to exercise the bytecodes
  1300       // for a while before recompiling.
  1301       // By contrast, Action_make_not_entrant is immediate.
  1302       //
  1303       // Note that the compiler will track null_check, null_assert,
  1304       // range_check, and class_check events and log them as if they
  1305       // had been traps taken from compiled code.  This will update
  1306       // the MDO trap history so that the next compilation will
  1307       // properly detect hot trap sites.
  1308       reset_counters = true;
  1309       break;
  1310     case Action_make_not_entrant:
  1311       // Request immediate recompilation, and get rid of the old code.
  1312       // Make them not entrant, so next time they are called they get
  1313       // recompiled.  Unloaded classes are loaded now so recompile before next
  1314       // time they are called.  Same for uninitialized.  The interpreter will
  1315       // link the missing class, if any.
  1316       make_not_entrant = true;
  1317       break;
  1318     case Action_make_not_compilable:
  1319       // Give up on compiling this method at all.
  1320       make_not_entrant = true;
  1321       make_not_compilable = true;
  1322       break;
  1323     default:
  1324       ShouldNotReachHere();
  1327     // Setting +ProfileTraps fixes the following, on all platforms:
  1328     // 4852688: ProfileInterpreter is off by default for ia64.  The result is
  1329     // infinite heroic-opt-uncommon-trap/deopt/recompile cycles, since the
  1330     // recompile relies on a methodDataOop to record heroic opt failures.
  1332     // Whether the interpreter is producing MDO data or not, we also need
  1333     // to use the MDO to detect hot deoptimization points and control
  1334     // aggressive optimization.
  1335     if (ProfileTraps && update_trap_state && trap_mdo.not_null()) {
  1336       assert(trap_mdo() == get_method_data(thread, trap_method, false), "sanity");
  1337       uint this_trap_count = 0;
  1338       bool maybe_prior_trap = false;
  1339       bool maybe_prior_recompile = false;
  1340       ProfileData* pdata
  1341         = query_update_method_data(trap_mdo, trap_bci, reason,
  1342                                    //outputs:
  1343                                    this_trap_count,
  1344                                    maybe_prior_trap,
  1345                                    maybe_prior_recompile);
  1346       // Because the interpreter also counts null, div0, range, and class
  1347       // checks, these traps from compiled code are double-counted.
  1348       // This is harmless; it just means that the PerXTrapLimit values
  1349       // are in effect a little smaller than they look.
  1351       DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
  1352       if (per_bc_reason != Reason_none) {
  1353         // Now take action based on the partially known per-BCI history.
  1354         if (maybe_prior_trap
  1355             && this_trap_count >= (uint)PerBytecodeTrapLimit) {
  1356           // If there are too many traps at this BCI, force a recompile.
  1357           // This will allow the compiler to see the limit overflow, and
  1358           // take corrective action, if possible.  The compiler generally
  1359           // does not use the exact PerBytecodeTrapLimit value, but instead
  1360           // changes its tactics if it sees any traps at all.  This provides
  1361           // a little hysteresis, delaying a recompile until a trap happens
  1362           // several times.
  1363           //
  1364           // Actually, since there is only one bit of counter per BCI,
  1365           // the possible per-BCI counts are {0,1,(per-method count)}.
  1366           // This produces accurate results if in fact there is only
  1367           // one hot trap site, but begins to get fuzzy if there are
  1368           // many sites.  For example, if there are ten sites each
  1369           // trapping two or more times, they each get the blame for
  1370           // all of their traps.
  1371           make_not_entrant = true;
  1374         // Detect repeated recompilation at the same BCI, and enforce a limit.
  1375         if (make_not_entrant && maybe_prior_recompile) {
  1376           // More than one recompile at this point.
  1377           trap_mdo->inc_overflow_recompile_count();
  1378           if (maybe_prior_trap
  1379               && ((uint)trap_mdo->overflow_recompile_count()
  1380                   > (uint)PerBytecodeRecompilationCutoff)) {
  1381             // Give up on the method containing the bad BCI.
  1382             if (trap_method() == nm->method()) {
  1383               make_not_compilable = true;
  1384             } else {
  1385               trap_method->set_not_compilable();
  1386               // But give grace to the enclosing nm->method().
  1390       } else {
  1391         // For reasons which are not recorded per-bytecode, we simply
  1392         // force recompiles unconditionally.
  1393         // (Note that PerMethodRecompilationCutoff is enforced elsewhere.)
  1394         make_not_entrant = true;
  1397       // Go back to the compiler if there are too many traps in this method.
  1398       if (this_trap_count >= (uint)PerMethodTrapLimit) {
  1399         // If there are too many traps in this method, force a recompile.
  1400         // This will allow the compiler to see the limit overflow, and
  1401         // take corrective action, if possible.
  1402         // (This condition is an unlikely backstop only, because the
  1403         // PerBytecodeTrapLimit is more likely to take effect first,
  1404         // if it is applicable.)
  1405         make_not_entrant = true;
  1408       // Here's more hysteresis:  If there has been a recompile at
  1409       // this trap point already, run the method in the interpreter
  1410       // for a while to exercise it more thoroughly.
  1411       if (make_not_entrant && maybe_prior_recompile && maybe_prior_trap) {
  1412         reset_counters = true;
  1415       if (make_not_entrant && pdata != NULL) {
  1416         // Record the recompilation event, if any.
  1417         int tstate0 = pdata->trap_state();
  1418         int tstate1 = trap_state_set_recompiled(tstate0, true);
  1419         if (tstate1 != tstate0)
  1420           pdata->set_trap_state(tstate1);
  1424     // Take requested actions on the method:
  1426     // Reset invocation counters
  1427     if (reset_counters) {
  1428       if (nm->is_osr_method())
  1429         reset_invocation_counter(trap_scope, CompileThreshold);
  1430       else
  1431         reset_invocation_counter(trap_scope);
  1434     // Recompile
  1435     if (make_not_entrant) {
  1436       nm->make_not_entrant();
  1439     // Give up compiling
  1440     if (make_not_compilable) {
  1441       assert(make_not_entrant, "consistent");
  1442       nm->method()->set_not_compilable();
  1445   } // Free marked resources
  1448 JRT_END
  1450 methodDataOop
  1451 Deoptimization::get_method_data(JavaThread* thread, methodHandle m,
  1452                                 bool create_if_missing) {
  1453   Thread* THREAD = thread;
  1454   methodDataOop mdo = m()->method_data();
  1455   if (mdo == NULL && create_if_missing && !HAS_PENDING_EXCEPTION) {
  1456     // Build an MDO.  Ignore errors like OutOfMemory;
  1457     // that simply means we won't have an MDO to update.
  1458     methodOopDesc::build_interpreter_method_data(m, THREAD);
  1459     if (HAS_PENDING_EXCEPTION) {
  1460       assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
  1461       CLEAR_PENDING_EXCEPTION;
  1463     mdo = m()->method_data();
  1465   return mdo;
  1468 ProfileData*
  1469 Deoptimization::query_update_method_data(methodDataHandle trap_mdo,
  1470                                          int trap_bci,
  1471                                          Deoptimization::DeoptReason reason,
  1472                                          //outputs:
  1473                                          uint& ret_this_trap_count,
  1474                                          bool& ret_maybe_prior_trap,
  1475                                          bool& ret_maybe_prior_recompile) {
  1476   uint prior_trap_count = trap_mdo->trap_count(reason);
  1477   uint this_trap_count  = trap_mdo->inc_trap_count(reason);
  1479   // If the runtime cannot find a place to store trap history,
  1480   // it is estimated based on the general condition of the method.
  1481   // If the method has ever been recompiled, or has ever incurred
  1482   // a trap with the present reason , then this BCI is assumed
  1483   // (pessimistically) to be the culprit.
  1484   bool maybe_prior_trap      = (prior_trap_count != 0);
  1485   bool maybe_prior_recompile = (trap_mdo->decompile_count() != 0);
  1486   ProfileData* pdata = NULL;
  1489   // For reasons which are recorded per bytecode, we check per-BCI data.
  1490   DeoptReason per_bc_reason = reason_recorded_per_bytecode_if_any(reason);
  1491   if (per_bc_reason != Reason_none) {
  1492     // Find the profile data for this BCI.  If there isn't one,
  1493     // try to allocate one from the MDO's set of spares.
  1494     // This will let us detect a repeated trap at this point.
  1495     pdata = trap_mdo->allocate_bci_to_data(trap_bci);
  1497     if (pdata != NULL) {
  1498       // Query the trap state of this profile datum.
  1499       int tstate0 = pdata->trap_state();
  1500       if (!trap_state_has_reason(tstate0, per_bc_reason))
  1501         maybe_prior_trap = false;
  1502       if (!trap_state_is_recompiled(tstate0))
  1503         maybe_prior_recompile = false;
  1505       // Update the trap state of this profile datum.
  1506       int tstate1 = tstate0;
  1507       // Record the reason.
  1508       tstate1 = trap_state_add_reason(tstate1, per_bc_reason);
  1509       // Store the updated state on the MDO, for next time.
  1510       if (tstate1 != tstate0)
  1511         pdata->set_trap_state(tstate1);
  1512     } else {
  1513       if (LogCompilation && xtty != NULL)
  1514         // Missing MDP?  Leave a small complaint in the log.
  1515         xtty->elem("missing_mdp bci='%d'", trap_bci);
  1519   // Return results:
  1520   ret_this_trap_count = this_trap_count;
  1521   ret_maybe_prior_trap = maybe_prior_trap;
  1522   ret_maybe_prior_recompile = maybe_prior_recompile;
  1523   return pdata;
  1526 void
  1527 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
  1528   ResourceMark rm;
  1529   // Ignored outputs:
  1530   uint ignore_this_trap_count;
  1531   bool ignore_maybe_prior_trap;
  1532   bool ignore_maybe_prior_recompile;
  1533   query_update_method_data(trap_mdo, trap_bci,
  1534                            (DeoptReason)reason,
  1535                            ignore_this_trap_count,
  1536                            ignore_maybe_prior_trap,
  1537                            ignore_maybe_prior_recompile);
  1540 void Deoptimization::reset_invocation_counter(ScopeDesc* trap_scope, jint top_count) {
  1541   ScopeDesc* sd = trap_scope;
  1542   for (; !sd->is_top(); sd = sd->sender()) {
  1543     // Reset ICs of inlined methods, since they can trigger compilations also.
  1544     sd->method()->invocation_counter()->reset();
  1546   InvocationCounter* c = sd->method()->invocation_counter();
  1547   if (top_count != _no_count) {
  1548     // It was an OSR method, so bump the count higher.
  1549     c->set(c->state(), top_count);
  1550   } else {
  1551     c->reset();
  1553   sd->method()->backedge_counter()->reset();
  1556 Deoptimization::UnrollBlock* Deoptimization::uncommon_trap(JavaThread* thread, jint trap_request) {
  1558   // Still in Java no safepoints
  1560     // This enters VM and may safepoint
  1561     uncommon_trap_inner(thread, trap_request);
  1563   return fetch_unroll_info_helper(thread);
  1566 // Local derived constants.
  1567 // Further breakdown of DataLayout::trap_state, as promised by DataLayout.
  1568 const int DS_REASON_MASK   = DataLayout::trap_mask >> 1;
  1569 const int DS_RECOMPILE_BIT = DataLayout::trap_mask - DS_REASON_MASK;
  1571 //---------------------------trap_state_reason---------------------------------
  1572 Deoptimization::DeoptReason
  1573 Deoptimization::trap_state_reason(int trap_state) {
  1574   // This assert provides the link between the width of DataLayout::trap_bits
  1575   // and the encoding of "recorded" reasons.  It ensures there are enough
  1576   // bits to store all needed reasons in the per-BCI MDO profile.
  1577   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
  1578   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
  1579   trap_state -= recompile_bit;
  1580   if (trap_state == DS_REASON_MASK) {
  1581     return Reason_many;
  1582   } else {
  1583     assert((int)Reason_none == 0, "state=0 => Reason_none");
  1584     return (DeoptReason)trap_state;
  1587 //-------------------------trap_state_has_reason-------------------------------
  1588 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
  1589   assert(reason_is_recorded_per_bytecode((DeoptReason)reason), "valid reason");
  1590   assert(DS_REASON_MASK >= Reason_RECORDED_LIMIT, "enough bits");
  1591   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
  1592   trap_state -= recompile_bit;
  1593   if (trap_state == DS_REASON_MASK) {
  1594     return -1;  // true, unspecifically (bottom of state lattice)
  1595   } else if (trap_state == reason) {
  1596     return 1;   // true, definitely
  1597   } else if (trap_state == 0) {
  1598     return 0;   // false, definitely (top of state lattice)
  1599   } else {
  1600     return 0;   // false, definitely
  1603 //-------------------------trap_state_add_reason-------------------------------
  1604 int Deoptimization::trap_state_add_reason(int trap_state, int reason) {
  1605   assert(reason_is_recorded_per_bytecode((DeoptReason)reason) || reason == Reason_many, "valid reason");
  1606   int recompile_bit = (trap_state & DS_RECOMPILE_BIT);
  1607   trap_state -= recompile_bit;
  1608   if (trap_state == DS_REASON_MASK) {
  1609     return trap_state + recompile_bit;     // already at state lattice bottom
  1610   } else if (trap_state == reason) {
  1611     return trap_state + recompile_bit;     // the condition is already true
  1612   } else if (trap_state == 0) {
  1613     return reason + recompile_bit;          // no condition has yet been true
  1614   } else {
  1615     return DS_REASON_MASK + recompile_bit;  // fall to state lattice bottom
  1618 //-----------------------trap_state_is_recompiled------------------------------
  1619 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
  1620   return (trap_state & DS_RECOMPILE_BIT) != 0;
  1622 //-----------------------trap_state_set_recompiled-----------------------------
  1623 int Deoptimization::trap_state_set_recompiled(int trap_state, bool z) {
  1624   if (z)  return trap_state |  DS_RECOMPILE_BIT;
  1625   else    return trap_state & ~DS_RECOMPILE_BIT;
  1627 //---------------------------format_trap_state---------------------------------
  1628 // This is used for debugging and diagnostics, including hotspot.log output.
  1629 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
  1630                                               int trap_state) {
  1631   DeoptReason reason      = trap_state_reason(trap_state);
  1632   bool        recomp_flag = trap_state_is_recompiled(trap_state);
  1633   // Re-encode the state from its decoded components.
  1634   int decoded_state = 0;
  1635   if (reason_is_recorded_per_bytecode(reason) || reason == Reason_many)
  1636     decoded_state = trap_state_add_reason(decoded_state, reason);
  1637   if (recomp_flag)
  1638     decoded_state = trap_state_set_recompiled(decoded_state, recomp_flag);
  1639   // If the state re-encodes properly, format it symbolically.
  1640   // Because this routine is used for debugging and diagnostics,
  1641   // be robust even if the state is a strange value.
  1642   size_t len;
  1643   if (decoded_state != trap_state) {
  1644     // Random buggy state that doesn't decode??
  1645     len = jio_snprintf(buf, buflen, "#%d", trap_state);
  1646   } else {
  1647     len = jio_snprintf(buf, buflen, "%s%s",
  1648                        trap_reason_name(reason),
  1649                        recomp_flag ? " recompiled" : "");
  1651   if (len >= buflen)
  1652     buf[buflen-1] = '\0';
  1653   return buf;
  1657 //--------------------------------statics--------------------------------------
  1658 Deoptimization::DeoptAction Deoptimization::_unloaded_action
  1659   = Deoptimization::Action_reinterpret;
  1660 const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = {
  1661   // Note:  Keep this in sync. with enum DeoptReason.
  1662   "none",
  1663   "null_check",
  1664   "null_assert",
  1665   "range_check",
  1666   "class_check",
  1667   "array_check",
  1668   "intrinsic",
  1669   "unloaded",
  1670   "uninitialized",
  1671   "unreached",
  1672   "unhandled",
  1673   "constraint",
  1674   "div0_check",
  1675   "age",
  1676   "predicate"
  1677 };
  1678 const char* Deoptimization::_trap_action_name[Action_LIMIT] = {
  1679   // Note:  Keep this in sync. with enum DeoptAction.
  1680   "none",
  1681   "maybe_recompile",
  1682   "reinterpret",
  1683   "make_not_entrant",
  1684   "make_not_compilable"
  1685 };
  1687 const char* Deoptimization::trap_reason_name(int reason) {
  1688   if (reason == Reason_many)  return "many";
  1689   if ((uint)reason < Reason_LIMIT)
  1690     return _trap_reason_name[reason];
  1691   static char buf[20];
  1692   sprintf(buf, "reason%d", reason);
  1693   return buf;
  1695 const char* Deoptimization::trap_action_name(int action) {
  1696   if ((uint)action < Action_LIMIT)
  1697     return _trap_action_name[action];
  1698   static char buf[20];
  1699   sprintf(buf, "action%d", action);
  1700   return buf;
  1703 // This is used for debugging and diagnostics, including hotspot.log output.
  1704 const char* Deoptimization::format_trap_request(char* buf, size_t buflen,
  1705                                                 int trap_request) {
  1706   jint unloaded_class_index = trap_request_index(trap_request);
  1707   const char* reason = trap_reason_name(trap_request_reason(trap_request));
  1708   const char* action = trap_action_name(trap_request_action(trap_request));
  1709   size_t len;
  1710   if (unloaded_class_index < 0) {
  1711     len = jio_snprintf(buf, buflen, "reason='%s' action='%s'",
  1712                        reason, action);
  1713   } else {
  1714     len = jio_snprintf(buf, buflen, "reason='%s' action='%s' index='%d'",
  1715                        reason, action, unloaded_class_index);
  1717   if (len >= buflen)
  1718     buf[buflen-1] = '\0';
  1719   return buf;
  1722 juint Deoptimization::_deoptimization_hist
  1723         [Deoptimization::Reason_LIMIT]
  1724     [1 + Deoptimization::Action_LIMIT]
  1725         [Deoptimization::BC_CASE_LIMIT]
  1726   = {0};
  1728 enum {
  1729   LSB_BITS = 8,
  1730   LSB_MASK = right_n_bits(LSB_BITS)
  1731 };
  1733 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
  1734                                        Bytecodes::Code bc) {
  1735   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
  1736   assert(action >= 0 && action < Action_LIMIT, "oob");
  1737   _deoptimization_hist[Reason_none][0][0] += 1;  // total
  1738   _deoptimization_hist[reason][0][0]      += 1;  // per-reason total
  1739   juint* cases = _deoptimization_hist[reason][1+action];
  1740   juint* bc_counter_addr = NULL;
  1741   juint  bc_counter      = 0;
  1742   // Look for an unused counter, or an exact match to this BC.
  1743   if (bc != Bytecodes::_illegal) {
  1744     for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
  1745       juint* counter_addr = &cases[bc_case];
  1746       juint  counter = *counter_addr;
  1747       if ((counter == 0 && bc_counter_addr == NULL)
  1748           || (Bytecodes::Code)(counter & LSB_MASK) == bc) {
  1749         // this counter is either free or is already devoted to this BC
  1750         bc_counter_addr = counter_addr;
  1751         bc_counter = counter | bc;
  1755   if (bc_counter_addr == NULL) {
  1756     // Overflow, or no given bytecode.
  1757     bc_counter_addr = &cases[BC_CASE_LIMIT-1];
  1758     bc_counter = (*bc_counter_addr & ~LSB_MASK);  // clear LSB
  1760   *bc_counter_addr = bc_counter + (1 << LSB_BITS);
  1763 jint Deoptimization::total_deoptimization_count() {
  1764   return _deoptimization_hist[Reason_none][0][0];
  1767 jint Deoptimization::deoptimization_count(DeoptReason reason) {
  1768   assert(reason >= 0 && reason < Reason_LIMIT, "oob");
  1769   return _deoptimization_hist[reason][0][0];
  1772 void Deoptimization::print_statistics() {
  1773   juint total = total_deoptimization_count();
  1774   juint account = total;
  1775   if (total != 0) {
  1776     ttyLocker ttyl;
  1777     if (xtty != NULL)  xtty->head("statistics type='deoptimization'");
  1778     tty->print_cr("Deoptimization traps recorded:");
  1779     #define PRINT_STAT_LINE(name, r) \
  1780       tty->print_cr("  %4d (%4.1f%%) %s", (int)(r), ((r) * 100.0) / total, name);
  1781     PRINT_STAT_LINE("total", total);
  1782     // For each non-zero entry in the histogram, print the reason,
  1783     // the action, and (if specifically known) the type of bytecode.
  1784     for (int reason = 0; reason < Reason_LIMIT; reason++) {
  1785       for (int action = 0; action < Action_LIMIT; action++) {
  1786         juint* cases = _deoptimization_hist[reason][1+action];
  1787         for (int bc_case = 0; bc_case < BC_CASE_LIMIT; bc_case++) {
  1788           juint counter = cases[bc_case];
  1789           if (counter != 0) {
  1790             char name[1*K];
  1791             Bytecodes::Code bc = (Bytecodes::Code)(counter & LSB_MASK);
  1792             if (bc_case == BC_CASE_LIMIT && (int)bc == 0)
  1793               bc = Bytecodes::_illegal;
  1794             sprintf(name, "%s/%s/%s",
  1795                     trap_reason_name(reason),
  1796                     trap_action_name(action),
  1797                     Bytecodes::is_defined(bc)? Bytecodes::name(bc): "other");
  1798             juint r = counter >> LSB_BITS;
  1799             tty->print_cr("  %40s: " UINT32_FORMAT " (%.1f%%)", name, r, (r * 100.0) / total);
  1800             account -= r;
  1805     if (account != 0) {
  1806       PRINT_STAT_LINE("unaccounted", account);
  1808     #undef PRINT_STAT_LINE
  1809     if (xtty != NULL)  xtty->tail("statistics");
  1812 #else // COMPILER2
  1815 // Stubs for C1 only system.
  1816 bool Deoptimization::trap_state_is_recompiled(int trap_state) {
  1817   return false;
  1820 const char* Deoptimization::trap_reason_name(int reason) {
  1821   return "unknown";
  1824 void Deoptimization::print_statistics() {
  1825   // no output
  1828 void
  1829 Deoptimization::update_method_data_from_interpreter(methodDataHandle trap_mdo, int trap_bci, int reason) {
  1830   // no udpate
  1833 int Deoptimization::trap_state_has_reason(int trap_state, int reason) {
  1834   return 0;
  1837 void Deoptimization::gather_statistics(DeoptReason reason, DeoptAction action,
  1838                                        Bytecodes::Code bc) {
  1839   // no update
  1842 const char* Deoptimization::format_trap_state(char* buf, size_t buflen,
  1843                                               int trap_state) {
  1844   jio_snprintf(buf, buflen, "#%d", trap_state);
  1845   return buf;
  1848 #endif // COMPILER2

mercurial