src/share/vm/runtime/vframeArray.cpp

Thu, 26 Sep 2013 10:25:02 -0400

author
hseigel
date
Thu, 26 Sep 2013 10:25:02 -0400
changeset 5784
190899198332
parent 4727
0094485b46c7
child 6680
78bbf4d43a14
permissions
-rw-r--r--

7195622: CheckUnhandledOops has limited usefulness now
Summary: Enable CHECK_UNHANDLED_OOPS in fastdebug builds across all supported platforms.
Reviewed-by: coleenp, hseigel, dholmes, stefank, twisti, ihse, rdurbin
Contributed-by: lois.foltan@oracle.com

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/vmSymbols.hpp"
    27 #include "interpreter/bytecode.hpp"
    28 #include "interpreter/interpreter.hpp"
    29 #include "memory/allocation.inline.hpp"
    30 #include "memory/resourceArea.hpp"
    31 #include "memory/universe.inline.hpp"
    32 #include "oops/methodData.hpp"
    33 #include "oops/oop.inline.hpp"
    34 #include "prims/jvmtiThreadState.hpp"
    35 #include "runtime/handles.inline.hpp"
    36 #include "runtime/monitorChunk.hpp"
    37 #include "runtime/sharedRuntime.hpp"
    38 #include "runtime/vframe.hpp"
    39 #include "runtime/vframeArray.hpp"
    40 #include "runtime/vframe_hp.hpp"
    41 #include "utilities/events.hpp"
    42 #ifdef COMPILER2
    43 #include "opto/runtime.hpp"
    44 #endif
    47 int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); }
    49 void vframeArrayElement::free_monitors(JavaThread* jt) {
    50   if (_monitors != NULL) {
    51      MonitorChunk* chunk = _monitors;
    52      _monitors = NULL;
    53      jt->remove_monitor_chunk(chunk);
    54      delete chunk;
    55   }
    56 }
    58 void vframeArrayElement::fill_in(compiledVFrame* vf) {
    60 // Copy the information from the compiled vframe to the
    61 // interpreter frame we will be creating to replace vf
    63   _method = vf->method();
    64   _bci    = vf->raw_bci();
    65   _reexecute = vf->should_reexecute();
    67   int index;
    69   // Get the monitors off-stack
    71   GrowableArray<MonitorInfo*>* list = vf->monitors();
    72   if (list->is_empty()) {
    73     _monitors = NULL;
    74   } else {
    76     // Allocate monitor chunk
    77     _monitors = new MonitorChunk(list->length());
    78     vf->thread()->add_monitor_chunk(_monitors);
    80     // Migrate the BasicLocks from the stack to the monitor chunk
    81     for (index = 0; index < list->length(); index++) {
    82       MonitorInfo* monitor = list->at(index);
    83       assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already");
    84       assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
    85       BasicObjectLock* dest = _monitors->at(index);
    86       dest->set_obj(monitor->owner());
    87       monitor->lock()->move_to(monitor->owner(), dest->lock());
    88     }
    89   }
    91   // Convert the vframe locals and expressions to off stack
    92   // values. Because we will not gc all oops can be converted to
    93   // intptr_t (i.e. a stack slot) and we are fine. This is
    94   // good since we are inside a HandleMark and the oops in our
    95   // collection would go away between packing them here and
    96   // unpacking them in unpack_on_stack.
    98   // First the locals go off-stack
   100   // FIXME this seems silly it creates a StackValueCollection
   101   // in order to get the size to then copy them and
   102   // convert the types to intptr_t size slots. Seems like it
   103   // could do it in place... Still uses less memory than the
   104   // old way though
   106   StackValueCollection *locs = vf->locals();
   107   _locals = new StackValueCollection(locs->size());
   108   for(index = 0; index < locs->size(); index++) {
   109     StackValue* value = locs->at(index);
   110     switch(value->type()) {
   111       case T_OBJECT:
   112         assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
   113         // preserve object type
   114         _locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
   115         break;
   116       case T_CONFLICT:
   117         // A dead local.  Will be initialized to null/zero.
   118         _locals->add( new StackValue());
   119         break;
   120       case T_INT:
   121         _locals->add( new StackValue(value->get_int()));
   122         break;
   123       default:
   124         ShouldNotReachHere();
   125     }
   126   }
   128   // Now the expressions off-stack
   129   // Same silliness as above
   131   StackValueCollection *exprs = vf->expressions();
   132   _expressions = new StackValueCollection(exprs->size());
   133   for(index = 0; index < exprs->size(); index++) {
   134     StackValue* value = exprs->at(index);
   135     switch(value->type()) {
   136       case T_OBJECT:
   137         assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
   138         // preserve object type
   139         _expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
   140         break;
   141       case T_CONFLICT:
   142         // A dead stack element.  Will be initialized to null/zero.
   143         // This can occur when the compiler emits a state in which stack
   144         // elements are known to be dead (because of an imminent exception).
   145         _expressions->add( new StackValue());
   146         break;
   147       case T_INT:
   148         _expressions->add( new StackValue(value->get_int()));
   149         break;
   150       default:
   151         ShouldNotReachHere();
   152     }
   153   }
   154 }
   156 int unpack_counter = 0;
   158 void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
   159                                          int callee_parameters,
   160                                          int callee_locals,
   161                                          frame* caller,
   162                                          bool is_top_frame,
   163                                          bool is_bottom_frame,
   164                                          int exec_mode) {
   165   JavaThread* thread = (JavaThread*) Thread::current();
   167   // Look at bci and decide on bcp and continuation pc
   168   address bcp;
   169   // C++ interpreter doesn't need a pc since it will figure out what to do when it
   170   // begins execution
   171   address pc;
   172   bool use_next_mdp = false; // true if we should use the mdp associated with the next bci
   173                              // rather than the one associated with bcp
   174   if (raw_bci() == SynchronizationEntryBCI) {
   175     // We are deoptimizing while hanging in prologue code for synchronized method
   176     bcp = method()->bcp_from(0); // first byte code
   177     pc  = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode
   178   } else if (should_reexecute()) { //reexecute this bytecode
   179     assert(is_top_frame, "reexecute allowed only for the top frame");
   180     bcp = method()->bcp_from(bci());
   181     pc  = Interpreter::deopt_reexecute_entry(method(), bcp);
   182   } else {
   183     bcp = method()->bcp_from(bci());
   184     pc  = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame);
   185     use_next_mdp = true;
   186   }
   187   assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode");
   189   // Monitorenter and pending exceptions:
   190   //
   191   // For Compiler2, there should be no pending exception when deoptimizing at monitorenter
   192   // because there is no safepoint at the null pointer check (it is either handled explicitly
   193   // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the
   194   // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER).  If an asynchronous
   195   // exception was processed, the bytecode pointer would have to be extended one bytecode beyond
   196   // the monitorenter to place it in the proper exception range.
   197   //
   198   // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter,
   199   // in which case bcp should point to the monitorenter since it is within the exception's range.
   201   assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
   202   assert(thread->deopt_nmethod() != NULL, "nmethod should be known");
   203   guarantee(!(thread->deopt_nmethod()->is_compiled_by_c2() &&
   204               *bcp == Bytecodes::_monitorenter             &&
   205               exec_mode == Deoptimization::Unpack_exception),
   206             "shouldn't get exception during monitorenter");
   208   int popframe_preserved_args_size_in_bytes = 0;
   209   int popframe_preserved_args_size_in_words = 0;
   210   if (is_top_frame) {
   211     JvmtiThreadState *state = thread->jvmti_thread_state();
   212     if (JvmtiExport::can_pop_frame() &&
   213         (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
   214       if (thread->has_pending_popframe()) {
   215         // Pop top frame after deoptimization
   216 #ifndef CC_INTERP
   217         pc = Interpreter::remove_activation_preserving_args_entry();
   218 #else
   219         // Do an uncommon trap type entry. c++ interpreter will know
   220         // to pop frame and preserve the args
   221         pc = Interpreter::deopt_entry(vtos, 0);
   222         use_next_mdp = false;
   223 #endif
   224       } else {
   225         // Reexecute invoke in top frame
   226         pc = Interpreter::deopt_entry(vtos, 0);
   227         use_next_mdp = false;
   228         popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
   229         // Note: the PopFrame-related extension of the expression stack size is done in
   230         // Deoptimization::fetch_unroll_info_helper
   231         popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
   232       }
   233     } else if (JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
   234       // Force early return from top frame after deoptimization
   235 #ifndef CC_INTERP
   236       pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
   237 #endif
   238     } else {
   239       // Possibly override the previous pc computation of the top (youngest) frame
   240       switch (exec_mode) {
   241       case Deoptimization::Unpack_deopt:
   242         // use what we've got
   243         break;
   244       case Deoptimization::Unpack_exception:
   245         // exception is pending
   246         pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc);
   247         // [phh] We're going to end up in some handler or other, so it doesn't
   248         // matter what mdp we point to.  See exception_handler_for_exception()
   249         // in interpreterRuntime.cpp.
   250         break;
   251       case Deoptimization::Unpack_uncommon_trap:
   252       case Deoptimization::Unpack_reexecute:
   253         // redo last byte code
   254         pc  = Interpreter::deopt_entry(vtos, 0);
   255         use_next_mdp = false;
   256         break;
   257       default:
   258         ShouldNotReachHere();
   259       }
   260     }
   261   }
   263   // Setup the interpreter frame
   265   assert(method() != NULL, "method must exist");
   266   int temps = expressions()->size();
   268   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
   270   Interpreter::layout_activation(method(),
   271                                  temps + callee_parameters,
   272                                  popframe_preserved_args_size_in_words,
   273                                  locks,
   274                                  caller_actual_parameters,
   275                                  callee_parameters,
   276                                  callee_locals,
   277                                  caller,
   278                                  iframe(),
   279                                  is_top_frame,
   280                                  is_bottom_frame);
   282   // Update the pc in the frame object and overwrite the temporary pc
   283   // we placed in the skeletal frame now that we finally know the
   284   // exact interpreter address we should use.
   286   _frame.patch_pc(thread, pc);
   288   assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors");
   290   BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
   291   for (int index = 0; index < locks; index++) {
   292     top = iframe()->previous_monitor_in_interpreter_frame(top);
   293     BasicObjectLock* src = _monitors->at(index);
   294     top->set_obj(src->obj());
   295     src->lock()->move_to(src->obj(), top->lock());
   296   }
   297   if (ProfileInterpreter) {
   298     iframe()->interpreter_frame_set_mdx(0); // clear out the mdp.
   299   }
   300   iframe()->interpreter_frame_set_bcx((intptr_t)bcp); // cannot use bcp because frame is not initialized yet
   301   if (ProfileInterpreter) {
   302     MethodData* mdo = method()->method_data();
   303     if (mdo != NULL) {
   304       int bci = iframe()->interpreter_frame_bci();
   305       if (use_next_mdp) ++bci;
   306       address mdp = mdo->bci_to_dp(bci);
   307       iframe()->interpreter_frame_set_mdp(mdp);
   308     }
   309   }
   311   // Unpack expression stack
   312   // If this is an intermediate frame (i.e. not top frame) then this
   313   // only unpacks the part of the expression stack not used by callee
   314   // as parameters. The callee parameters are unpacked as part of the
   315   // callee locals.
   316   int i;
   317   for(i = 0; i < expressions()->size(); i++) {
   318     StackValue *value = expressions()->at(i);
   319     intptr_t*   addr  = iframe()->interpreter_frame_expression_stack_at(i);
   320     switch(value->type()) {
   321       case T_INT:
   322         *addr = value->get_int();
   323         break;
   324       case T_OBJECT:
   325         *addr = value->get_int(T_OBJECT);
   326         break;
   327       case T_CONFLICT:
   328         // A dead stack slot.  Initialize to null in case it is an oop.
   329         *addr = NULL_WORD;
   330         break;
   331       default:
   332         ShouldNotReachHere();
   333     }
   334   }
   337   // Unpack the locals
   338   for(i = 0; i < locals()->size(); i++) {
   339     StackValue *value = locals()->at(i);
   340     intptr_t* addr  = iframe()->interpreter_frame_local_at(i);
   341     switch(value->type()) {
   342       case T_INT:
   343         *addr = value->get_int();
   344         break;
   345       case T_OBJECT:
   346         *addr = value->get_int(T_OBJECT);
   347         break;
   348       case T_CONFLICT:
   349         // A dead location. If it is an oop then we need a NULL to prevent GC from following it
   350         *addr = NULL_WORD;
   351         break;
   352       default:
   353         ShouldNotReachHere();
   354     }
   355   }
   357   if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
   358     // An interpreted frame was popped but it returns to a deoptimized
   359     // frame. The incoming arguments to the interpreted activation
   360     // were preserved in thread-local storage by the
   361     // remove_activation_preserving_args_entry in the interpreter; now
   362     // we put them back into the just-unpacked interpreter frame.
   363     // Note that this assumes that the locals arena grows toward lower
   364     // addresses.
   365     if (popframe_preserved_args_size_in_words != 0) {
   366       void* saved_args = thread->popframe_preserved_args();
   367       assert(saved_args != NULL, "must have been saved by interpreter");
   368 #ifdef ASSERT
   369       assert(popframe_preserved_args_size_in_words <=
   370              iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords,
   371              "expression stack size should have been extended");
   372 #endif // ASSERT
   373       int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
   374       intptr_t* base;
   375       if (frame::interpreter_frame_expression_stack_direction() < 0) {
   376         base = iframe()->interpreter_frame_expression_stack_at(top_element);
   377       } else {
   378         base = iframe()->interpreter_frame_expression_stack();
   379       }
   380       Copy::conjoint_jbytes(saved_args,
   381                             base,
   382                             popframe_preserved_args_size_in_bytes);
   383       thread->popframe_free_preserved_args();
   384     }
   385   }
   387 #ifndef PRODUCT
   388   if (TraceDeoptimization && Verbose) {
   389     ttyLocker ttyl;
   390     tty->print_cr("[%d Interpreted Frame]", ++unpack_counter);
   391     iframe()->print_on(tty);
   392     RegisterMap map(thread);
   393     vframe* f = vframe::new_vframe(iframe(), &map, thread);
   394     f->print();
   396     tty->print_cr("locals size     %d", locals()->size());
   397     tty->print_cr("expression size %d", expressions()->size());
   399     method()->print_value();
   400     tty->cr();
   401     // method()->print_codes();
   402   } else if (TraceDeoptimization) {
   403     tty->print("     ");
   404     method()->print_value();
   405     Bytecodes::Code code = Bytecodes::java_code_at(method(), bcp);
   406     int bci = method()->bci_from(bcp);
   407     tty->print(" - %s", Bytecodes::name(code));
   408     tty->print(" @ bci %d ", bci);
   409     tty->print_cr("sp = " PTR_FORMAT, iframe()->sp());
   410   }
   411 #endif // PRODUCT
   413   // The expression stack and locals are in the resource area don't leave
   414   // a dangling pointer in the vframeArray we leave around for debug
   415   // purposes
   417   _locals = _expressions = NULL;
   419 }
   421 int vframeArrayElement::on_stack_size(int caller_actual_parameters,
   422                                       int callee_parameters,
   423                                       int callee_locals,
   424                                       bool is_top_frame,
   425                                       bool is_bottom_frame,
   426                                       int popframe_extra_stack_expression_els) const {
   427   assert(method()->max_locals() == locals()->size(), "just checking");
   428   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
   429   int temps = expressions()->size();
   430   return Interpreter::size_activation(method(),
   431                                       temps + callee_parameters,
   432                                       popframe_extra_stack_expression_els,
   433                                       locks,
   434                                       caller_actual_parameters,
   435                                       callee_parameters,
   436                                       callee_locals,
   437                                       is_top_frame,
   438                                       is_bottom_frame);
   439 }
   443 vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
   444                                    RegisterMap *reg_map, frame sender, frame caller, frame self) {
   446   // Allocate the vframeArray
   447   vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
   448                                                      sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
   449                                                      mtCompiler);
   450   result->_frames = chunk->length();
   451   result->_owner_thread = thread;
   452   result->_sender = sender;
   453   result->_caller = caller;
   454   result->_original = self;
   455   result->set_unroll_block(NULL); // initialize it
   456   result->fill_in(thread, frame_size, chunk, reg_map);
   457   return result;
   458 }
   460 void vframeArray::fill_in(JavaThread* thread,
   461                           int frame_size,
   462                           GrowableArray<compiledVFrame*>* chunk,
   463                           const RegisterMap *reg_map) {
   464   // Set owner first, it is used when adding monitor chunks
   466   _frame_size = frame_size;
   467   for(int i = 0; i < chunk->length(); i++) {
   468     element(i)->fill_in(chunk->at(i));
   469   }
   471   // Copy registers for callee-saved registers
   472   if (reg_map != NULL) {
   473     for(int i = 0; i < RegisterMap::reg_count; i++) {
   474 #ifdef AMD64
   475       // The register map has one entry for every int (32-bit value), so
   476       // 64-bit physical registers have two entries in the map, one for
   477       // each half.  Ignore the high halves of 64-bit registers, just like
   478       // frame::oopmapreg_to_location does.
   479       //
   480       // [phh] FIXME: this is a temporary hack!  This code *should* work
   481       // correctly w/o this hack, possibly by changing RegisterMap::pd_location
   482       // in frame_amd64.cpp and the values of the phantom high half registers
   483       // in amd64.ad.
   484       //      if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) {
   485         intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i));
   486         _callee_registers[i] = src != NULL ? *src : NULL_WORD;
   487         //      } else {
   488         //      jint* src = (jint*) reg_map->location(VMReg::Name(i));
   489         //      _callee_registers[i] = src != NULL ? *src : NULL_WORD;
   490         //      }
   491 #else
   492       jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i));
   493       _callee_registers[i] = src != NULL ? *src : NULL_WORD;
   494 #endif
   495       if (src == NULL) {
   496         set_location_valid(i, false);
   497       } else {
   498         set_location_valid(i, true);
   499         jint* dst = (jint*) register_location(i);
   500         *dst = *src;
   501       }
   502     }
   503   }
   504 }
   506 void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) {
   507   // stack picture
   508   //   unpack_frame
   509   //   [new interpreter frames ] (frames are skeletal but walkable)
   510   //   caller_frame
   511   //
   512   //  This routine fills in the missing data for the skeletal interpreter frames
   513   //  in the above picture.
   515   // Find the skeletal interpreter frames to unpack into
   516   JavaThread* THREAD = JavaThread::current();
   517   RegisterMap map(THREAD, false);
   518   // Get the youngest frame we will unpack (last to be unpacked)
   519   frame me = unpack_frame.sender(&map);
   520   int index;
   521   for (index = 0; index < frames(); index++ ) {
   522     *element(index)->iframe() = me;
   523     // Get the caller frame (possibly skeletal)
   524     me = me.sender(&map);
   525   }
   527   // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee
   528   // Unpack the frames from the oldest (frames() -1) to the youngest (0)
   529   frame* caller_frame = &me;
   530   for (index = frames() - 1; index >= 0 ; index--) {
   531     vframeArrayElement* elem = element(index);  // caller
   532     int callee_parameters, callee_locals;
   533     if (index == 0) {
   534       callee_parameters = callee_locals = 0;
   535     } else {
   536       methodHandle caller = elem->method();
   537       methodHandle callee = element(index - 1)->method();
   538       Bytecode_invoke inv(caller, elem->bci());
   539       // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix.
   540       // NOTE:  Use machinery here that avoids resolving of any kind.
   541       const bool has_member_arg =
   542           !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name());
   543       callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0);
   544       callee_locals     = callee->max_locals();
   545     }
   546     elem->unpack_on_stack(caller_actual_parameters,
   547                           callee_parameters,
   548                           callee_locals,
   549                           caller_frame,
   550                           index == 0,
   551                           index == frames() - 1,
   552                           exec_mode);
   553     if (index == frames() - 1) {
   554       Deoptimization::unwind_callee_save_values(elem->iframe(), this);
   555     }
   556     caller_frame = elem->iframe();
   557     caller_actual_parameters = callee_parameters;
   558   }
   559   deallocate_monitor_chunks();
   560 }
   562 void vframeArray::deallocate_monitor_chunks() {
   563   JavaThread* jt = JavaThread::current();
   564   for (int index = 0; index < frames(); index++ ) {
   565      element(index)->free_monitors(jt);
   566   }
   567 }
   569 #ifndef PRODUCT
   571 bool vframeArray::structural_compare(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk) {
   572   if (owner_thread() != thread) return false;
   573   int index = 0;
   574 #if 0 // FIXME can't do this comparison
   576   // Compare only within vframe array.
   577   for (deoptimizedVFrame* vf = deoptimizedVFrame::cast(vframe_at(first_index())); vf; vf = vf->deoptimized_sender_or_null()) {
   578     if (index >= chunk->length() || !vf->structural_compare(chunk->at(index))) return false;
   579     index++;
   580   }
   581   if (index != chunk->length()) return false;
   582 #endif
   584   return true;
   585 }
   587 #endif
   589 address vframeArray::register_location(int i) const {
   590   assert(0 <= i && i < RegisterMap::reg_count, "index out of bounds");
   591   return (address) & _callee_registers[i];
   592 }
   595 #ifndef PRODUCT
   597 // Printing
   599 // Note: we cannot have print_on as const, as we allocate inside the method
   600 void vframeArray::print_on_2(outputStream* st)  {
   601   st->print_cr(" - sp: " INTPTR_FORMAT, sp());
   602   st->print(" - thread: ");
   603   Thread::current()->print();
   604   st->print_cr(" - frame size: %d", frame_size());
   605   for (int index = 0; index < frames() ; index++ ) {
   606     element(index)->print(st);
   607   }
   608 }
   610 void vframeArrayElement::print(outputStream* st) {
   611   st->print_cr(" - interpreter_frame -> sp: " INTPTR_FORMAT, iframe()->sp());
   612 }
   614 void vframeArray::print_value_on(outputStream* st) const {
   615   st->print_cr("vframeArray [%d] ", frames());
   616 }
   619 #endif

mercurial