src/share/vm/runtime/vframeArray.cpp

Sat, 01 Sep 2012 13:25:18 -0400

author
coleenp
date
Sat, 01 Sep 2012 13:25:18 -0400
changeset 4037
da91efe96a93
parent 3969
1d7922586cf6
child 4535
9fae07c31641
permissions
-rw-r--r--

6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/vmSymbols.hpp"
    27 #include "interpreter/bytecode.hpp"
    28 #include "interpreter/interpreter.hpp"
    29 #include "memory/allocation.inline.hpp"
    30 #include "memory/resourceArea.hpp"
    31 #include "memory/universe.inline.hpp"
    32 #include "oops/methodData.hpp"
    33 #include "oops/oop.inline.hpp"
    34 #include "prims/jvmtiThreadState.hpp"
    35 #include "runtime/handles.inline.hpp"
    36 #include "runtime/monitorChunk.hpp"
    37 #include "runtime/sharedRuntime.hpp"
    38 #include "runtime/vframe.hpp"
    39 #include "runtime/vframeArray.hpp"
    40 #include "runtime/vframe_hp.hpp"
    41 #include "utilities/events.hpp"
    42 #ifdef COMPILER2
    43 #include "opto/runtime.hpp"
    44 #endif
    47 int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); }
    49 void vframeArrayElement::free_monitors(JavaThread* jt) {
    50   if (_monitors != NULL) {
    51      MonitorChunk* chunk = _monitors;
    52      _monitors = NULL;
    53      jt->remove_monitor_chunk(chunk);
    54      delete chunk;
    55   }
    56 }
    58 void vframeArrayElement::fill_in(compiledVFrame* vf) {
    60 // Copy the information from the compiled vframe to the
    61 // interpreter frame we will be creating to replace vf
    63   _method = vf->method();
    64   _bci    = vf->raw_bci();
    65   _reexecute = vf->should_reexecute();
    67   int index;
    69   // Get the monitors off-stack
    71   GrowableArray<MonitorInfo*>* list = vf->monitors();
    72   if (list->is_empty()) {
    73     _monitors = NULL;
    74   } else {
    76     // Allocate monitor chunk
    77     _monitors = new MonitorChunk(list->length());
    78     vf->thread()->add_monitor_chunk(_monitors);
    80     // Migrate the BasicLocks from the stack to the monitor chunk
    81     for (index = 0; index < list->length(); index++) {
    82       MonitorInfo* monitor = list->at(index);
    83       assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already");
    84       assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased");
    85       BasicObjectLock* dest = _monitors->at(index);
    86       dest->set_obj(monitor->owner());
    87       monitor->lock()->move_to(monitor->owner(), dest->lock());
    88     }
    89   }
    91   // Convert the vframe locals and expressions to off stack
    92   // values. Because we will not gc all oops can be converted to
    93   // intptr_t (i.e. a stack slot) and we are fine. This is
    94   // good since we are inside a HandleMark and the oops in our
    95   // collection would go away between packing them here and
    96   // unpacking them in unpack_on_stack.
    98   // First the locals go off-stack
   100   // FIXME this seems silly it creates a StackValueCollection
   101   // in order to get the size to then copy them and
   102   // convert the types to intptr_t size slots. Seems like it
   103   // could do it in place... Still uses less memory than the
   104   // old way though
   106   StackValueCollection *locs = vf->locals();
   107   _locals = new StackValueCollection(locs->size());
   108   for(index = 0; index < locs->size(); index++) {
   109     StackValue* value = locs->at(index);
   110     switch(value->type()) {
   111       case T_OBJECT:
   112         assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
   113         // preserve object type
   114         _locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
   115         break;
   116       case T_CONFLICT:
   117         // A dead local.  Will be initialized to null/zero.
   118         _locals->add( new StackValue());
   119         break;
   120       case T_INT:
   121         _locals->add( new StackValue(value->get_int()));
   122         break;
   123       default:
   124         ShouldNotReachHere();
   125     }
   126   }
   128   // Now the expressions off-stack
   129   // Same silliness as above
   131   StackValueCollection *exprs = vf->expressions();
   132   _expressions = new StackValueCollection(exprs->size());
   133   for(index = 0; index < exprs->size(); index++) {
   134     StackValue* value = exprs->at(index);
   135     switch(value->type()) {
   136       case T_OBJECT:
   137         assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
   138         // preserve object type
   139         _expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
   140         break;
   141       case T_CONFLICT:
   142         // A dead stack element.  Will be initialized to null/zero.
   143         // This can occur when the compiler emits a state in which stack
   144         // elements are known to be dead (because of an imminent exception).
   145         _expressions->add( new StackValue());
   146         break;
   147       case T_INT:
   148         _expressions->add( new StackValue(value->get_int()));
   149         break;
   150       default:
   151         ShouldNotReachHere();
   152     }
   153   }
   154 }
   156 int unpack_counter = 0;
   158 void vframeArrayElement::unpack_on_stack(int caller_actual_parameters,
   159                                          int callee_parameters,
   160                                          int callee_locals,
   161                                          frame* caller,
   162                                          bool is_top_frame,
   163                                          int exec_mode) {
   164   JavaThread* thread = (JavaThread*) Thread::current();
   166   // Look at bci and decide on bcp and continuation pc
   167   address bcp;
   168   // C++ interpreter doesn't need a pc since it will figure out what to do when it
   169   // begins execution
   170   address pc;
   171   bool use_next_mdp = false; // true if we should use the mdp associated with the next bci
   172                              // rather than the one associated with bcp
   173   if (raw_bci() == SynchronizationEntryBCI) {
   174     // We are deoptimizing while hanging in prologue code for synchronized method
   175     bcp = method()->bcp_from(0); // first byte code
   176     pc  = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode
   177   } else if (should_reexecute()) { //reexecute this bytecode
   178     assert(is_top_frame, "reexecute allowed only for the top frame");
   179     bcp = method()->bcp_from(bci());
   180     pc  = Interpreter::deopt_reexecute_entry(method(), bcp);
   181   } else {
   182     bcp = method()->bcp_from(bci());
   183     pc  = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame);
   184     use_next_mdp = true;
   185   }
   186   assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode");
   188   // Monitorenter and pending exceptions:
   189   //
   190   // For Compiler2, there should be no pending exception when deoptimizing at monitorenter
   191   // because there is no safepoint at the null pointer check (it is either handled explicitly
   192   // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the
   193   // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER).  If an asynchronous
   194   // exception was processed, the bytecode pointer would have to be extended one bytecode beyond
   195   // the monitorenter to place it in the proper exception range.
   196   //
   197   // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter,
   198   // in which case bcp should point to the monitorenter since it is within the exception's range.
   200   assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame");
   201   assert(thread->deopt_nmethod() != NULL, "nmethod should be known");
   202   guarantee(!(thread->deopt_nmethod()->is_compiled_by_c2() &&
   203               *bcp == Bytecodes::_monitorenter             &&
   204               exec_mode == Deoptimization::Unpack_exception),
   205             "shouldn't get exception during monitorenter");
   207   int popframe_preserved_args_size_in_bytes = 0;
   208   int popframe_preserved_args_size_in_words = 0;
   209   if (is_top_frame) {
   210     JvmtiThreadState *state = thread->jvmti_thread_state();
   211     if (JvmtiExport::can_pop_frame() &&
   212         (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) {
   213       if (thread->has_pending_popframe()) {
   214         // Pop top frame after deoptimization
   215 #ifndef CC_INTERP
   216         pc = Interpreter::remove_activation_preserving_args_entry();
   217 #else
   218         // Do an uncommon trap type entry. c++ interpreter will know
   219         // to pop frame and preserve the args
   220         pc = Interpreter::deopt_entry(vtos, 0);
   221         use_next_mdp = false;
   222 #endif
   223       } else {
   224         // Reexecute invoke in top frame
   225         pc = Interpreter::deopt_entry(vtos, 0);
   226         use_next_mdp = false;
   227         popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
   228         // Note: the PopFrame-related extension of the expression stack size is done in
   229         // Deoptimization::fetch_unroll_info_helper
   230         popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
   231       }
   232     } else if (JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) {
   233       // Force early return from top frame after deoptimization
   234 #ifndef CC_INTERP
   235       pc = Interpreter::remove_activation_early_entry(state->earlyret_tos());
   236 #else
   237      // TBD: Need to implement ForceEarlyReturn for CC_INTERP (ia64)
   238 #endif
   239     } else {
   240       // Possibly override the previous pc computation of the top (youngest) frame
   241       switch (exec_mode) {
   242       case Deoptimization::Unpack_deopt:
   243         // use what we've got
   244         break;
   245       case Deoptimization::Unpack_exception:
   246         // exception is pending
   247         pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc);
   248         // [phh] We're going to end up in some handler or other, so it doesn't
   249         // matter what mdp we point to.  See exception_handler_for_exception()
   250         // in interpreterRuntime.cpp.
   251         break;
   252       case Deoptimization::Unpack_uncommon_trap:
   253       case Deoptimization::Unpack_reexecute:
   254         // redo last byte code
   255         pc  = Interpreter::deopt_entry(vtos, 0);
   256         use_next_mdp = false;
   257         break;
   258       default:
   259         ShouldNotReachHere();
   260       }
   261     }
   262   }
   264   // Setup the interpreter frame
   266   assert(method() != NULL, "method must exist");
   267   int temps = expressions()->size();
   269   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
   271   Interpreter::layout_activation(method(),
   272                                  temps + callee_parameters,
   273                                  popframe_preserved_args_size_in_words,
   274                                  locks,
   275                                  caller_actual_parameters,
   276                                  callee_parameters,
   277                                  callee_locals,
   278                                  caller,
   279                                  iframe(),
   280                                  is_top_frame);
   282   // Update the pc in the frame object and overwrite the temporary pc
   283   // we placed in the skeletal frame now that we finally know the
   284   // exact interpreter address we should use.
   286   _frame.patch_pc(thread, pc);
   288   assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors");
   290   BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin();
   291   for (int index = 0; index < locks; index++) {
   292     top = iframe()->previous_monitor_in_interpreter_frame(top);
   293     BasicObjectLock* src = _monitors->at(index);
   294     top->set_obj(src->obj());
   295     src->lock()->move_to(src->obj(), top->lock());
   296   }
   297   if (ProfileInterpreter) {
   298     iframe()->interpreter_frame_set_mdx(0); // clear out the mdp.
   299   }
   300   iframe()->interpreter_frame_set_bcx((intptr_t)bcp); // cannot use bcp because frame is not initialized yet
   301   if (ProfileInterpreter) {
   302     MethodData* mdo = method()->method_data();
   303     if (mdo != NULL) {
   304       int bci = iframe()->interpreter_frame_bci();
   305       if (use_next_mdp) ++bci;
   306       address mdp = mdo->bci_to_dp(bci);
   307       iframe()->interpreter_frame_set_mdp(mdp);
   308     }
   309   }
   311   // Unpack expression stack
   312   // If this is an intermediate frame (i.e. not top frame) then this
   313   // only unpacks the part of the expression stack not used by callee
   314   // as parameters. The callee parameters are unpacked as part of the
   315   // callee locals.
   316   int i;
   317   for(i = 0; i < expressions()->size(); i++) {
   318     StackValue *value = expressions()->at(i);
   319     intptr_t*   addr  = iframe()->interpreter_frame_expression_stack_at(i);
   320     switch(value->type()) {
   321       case T_INT:
   322         *addr = value->get_int();
   323         break;
   324       case T_OBJECT:
   325         *addr = value->get_int(T_OBJECT);
   326         break;
   327       case T_CONFLICT:
   328         // A dead stack slot.  Initialize to null in case it is an oop.
   329         *addr = NULL_WORD;
   330         break;
   331       default:
   332         ShouldNotReachHere();
   333     }
   334   }
   337   // Unpack the locals
   338   for(i = 0; i < locals()->size(); i++) {
   339     StackValue *value = locals()->at(i);
   340     intptr_t* addr  = iframe()->interpreter_frame_local_at(i);
   341     switch(value->type()) {
   342       case T_INT:
   343         *addr = value->get_int();
   344         break;
   345       case T_OBJECT:
   346         *addr = value->get_int(T_OBJECT);
   347         break;
   348       case T_CONFLICT:
   349         // A dead location. If it is an oop then we need a NULL to prevent GC from following it
   350         *addr = NULL_WORD;
   351         break;
   352       default:
   353         ShouldNotReachHere();
   354     }
   355   }
   357   if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) {
   358     // An interpreted frame was popped but it returns to a deoptimized
   359     // frame. The incoming arguments to the interpreted activation
   360     // were preserved in thread-local storage by the
   361     // remove_activation_preserving_args_entry in the interpreter; now
   362     // we put them back into the just-unpacked interpreter frame.
   363     // Note that this assumes that the locals arena grows toward lower
   364     // addresses.
   365     if (popframe_preserved_args_size_in_words != 0) {
   366       void* saved_args = thread->popframe_preserved_args();
   367       assert(saved_args != NULL, "must have been saved by interpreter");
   368 #ifdef ASSERT
   369       assert(popframe_preserved_args_size_in_words <=
   370              iframe()->interpreter_frame_expression_stack_size()*Interpreter::stackElementWords,
   371              "expression stack size should have been extended");
   372 #endif // ASSERT
   373       int top_element = iframe()->interpreter_frame_expression_stack_size()-1;
   374       intptr_t* base;
   375       if (frame::interpreter_frame_expression_stack_direction() < 0) {
   376         base = iframe()->interpreter_frame_expression_stack_at(top_element);
   377       } else {
   378         base = iframe()->interpreter_frame_expression_stack();
   379       }
   380       Copy::conjoint_jbytes(saved_args,
   381                             base,
   382                             popframe_preserved_args_size_in_bytes);
   383       thread->popframe_free_preserved_args();
   384     }
   385   }
   387 #ifndef PRODUCT
   388   if (TraceDeoptimization && Verbose) {
   389     ttyLocker ttyl;
   390     tty->print_cr("[%d Interpreted Frame]", ++unpack_counter);
   391     iframe()->print_on(tty);
   392     RegisterMap map(thread);
   393     vframe* f = vframe::new_vframe(iframe(), &map, thread);
   394     f->print();
   396     tty->print_cr("locals size     %d", locals()->size());
   397     tty->print_cr("expression size %d", expressions()->size());
   399     method()->print_value();
   400     tty->cr();
   401     // method()->print_codes();
   402   } else if (TraceDeoptimization) {
   403     tty->print("     ");
   404     method()->print_value();
   405     Bytecodes::Code code = Bytecodes::java_code_at(method(), bcp);
   406     int bci = method()->bci_from(bcp);
   407     tty->print(" - %s", Bytecodes::name(code));
   408     tty->print(" @ bci %d ", bci);
   409     tty->print_cr("sp = " PTR_FORMAT, iframe()->sp());
   410   }
   411 #endif // PRODUCT
   413   // The expression stack and locals are in the resource area don't leave
   414   // a dangling pointer in the vframeArray we leave around for debug
   415   // purposes
   417   _locals = _expressions = NULL;
   419 }
   421 int vframeArrayElement::on_stack_size(int caller_actual_parameters,
   422                                       int callee_parameters,
   423                                       int callee_locals,
   424                                       bool is_top_frame,
   425                                       int popframe_extra_stack_expression_els) const {
   426   assert(method()->max_locals() == locals()->size(), "just checking");
   427   int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors();
   428   int temps = expressions()->size();
   429   return Interpreter::size_activation(method(),
   430                                       temps + callee_parameters,
   431                                       popframe_extra_stack_expression_els,
   432                                       locks,
   433                                       caller_actual_parameters,
   434                                       callee_parameters,
   435                                       callee_locals,
   436                                       is_top_frame);
   437 }
   441 vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray<compiledVFrame*>* chunk,
   442                                    RegisterMap *reg_map, frame sender, frame caller, frame self) {
   444   // Allocate the vframeArray
   445   vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part
   446                                                      sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part
   447                                                      mtCompiler);
   448   result->_frames = chunk->length();
   449   result->_owner_thread = thread;
   450   result->_sender = sender;
   451   result->_caller = caller;
   452   result->_original = self;
   453   result->set_unroll_block(NULL); // initialize it
   454   result->fill_in(thread, frame_size, chunk, reg_map);
   455   return result;
   456 }
   458 void vframeArray::fill_in(JavaThread* thread,
   459                           int frame_size,
   460                           GrowableArray<compiledVFrame*>* chunk,
   461                           const RegisterMap *reg_map) {
   462   // Set owner first, it is used when adding monitor chunks
   464   _frame_size = frame_size;
   465   for(int i = 0; i < chunk->length(); i++) {
   466     element(i)->fill_in(chunk->at(i));
   467   }
   469   // Copy registers for callee-saved registers
   470   if (reg_map != NULL) {
   471     for(int i = 0; i < RegisterMap::reg_count; i++) {
   472 #ifdef AMD64
   473       // The register map has one entry for every int (32-bit value), so
   474       // 64-bit physical registers have two entries in the map, one for
   475       // each half.  Ignore the high halves of 64-bit registers, just like
   476       // frame::oopmapreg_to_location does.
   477       //
   478       // [phh] FIXME: this is a temporary hack!  This code *should* work
   479       // correctly w/o this hack, possibly by changing RegisterMap::pd_location
   480       // in frame_amd64.cpp and the values of the phantom high half registers
   481       // in amd64.ad.
   482       //      if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) {
   483         intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i));
   484         _callee_registers[i] = src != NULL ? *src : NULL_WORD;
   485         //      } else {
   486         //      jint* src = (jint*) reg_map->location(VMReg::Name(i));
   487         //      _callee_registers[i] = src != NULL ? *src : NULL_WORD;
   488         //      }
   489 #else
   490       jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i));
   491       _callee_registers[i] = src != NULL ? *src : NULL_WORD;
   492 #endif
   493       if (src == NULL) {
   494         set_location_valid(i, false);
   495       } else {
   496         set_location_valid(i, true);
   497         jint* dst = (jint*) register_location(i);
   498         *dst = *src;
   499       }
   500     }
   501   }
   502 }
   504 void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode, int caller_actual_parameters) {
   505   // stack picture
   506   //   unpack_frame
   507   //   [new interpreter frames ] (frames are skeletal but walkable)
   508   //   caller_frame
   509   //
   510   //  This routine fills in the missing data for the skeletal interpreter frames
   511   //  in the above picture.
   513   // Find the skeletal interpreter frames to unpack into
   514   JavaThread* THREAD = JavaThread::current();
   515   RegisterMap map(THREAD, false);
   516   // Get the youngest frame we will unpack (last to be unpacked)
   517   frame me = unpack_frame.sender(&map);
   518   int index;
   519   for (index = 0; index < frames(); index++ ) {
   520     *element(index)->iframe() = me;
   521     // Get the caller frame (possibly skeletal)
   522     me = me.sender(&map);
   523   }
   525   // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee
   526   // Unpack the frames from the oldest (frames() -1) to the youngest (0)
   527   frame caller_frame = me;
   528   for (index = frames() - 1; index >= 0 ; index--) {
   529     vframeArrayElement* elem = element(index);  // caller
   530     int callee_parameters, callee_locals;
   531     if (index == 0) {
   532       callee_parameters = callee_locals = 0;
   533     } else {
   534       methodHandle caller = elem->method();
   535       methodHandle callee = element(index - 1)->method();
   536       Bytecode_invoke inv(caller, elem->bci());
   537       // invokedynamic instructions don't have a class but obviously don't have a MemberName appendix.
   538       // NOTE:  Use machinery here that avoids resolving of any kind.
   539       const bool has_member_arg =
   540           !inv.is_invokedynamic() && MethodHandles::has_member_arg(inv.klass(), inv.name());
   541       callee_parameters = callee->size_of_parameters() + (has_member_arg ? 1 : 0);
   542       callee_locals     = callee->max_locals();
   543     }
   544     elem->unpack_on_stack(caller_actual_parameters,
   545                           callee_parameters,
   546                           callee_locals,
   547                           &caller_frame,
   548                           index == 0,
   549                           exec_mode);
   550     if (index == frames() - 1) {
   551       Deoptimization::unwind_callee_save_values(elem->iframe(), this);
   552     }
   553     caller_frame = *elem->iframe();
   554     caller_actual_parameters = callee_parameters;
   555   }
   556   deallocate_monitor_chunks();
   557 }
   559 void vframeArray::deallocate_monitor_chunks() {
   560   JavaThread* jt = JavaThread::current();
   561   for (int index = 0; index < frames(); index++ ) {
   562      element(index)->free_monitors(jt);
   563   }
   564 }
   566 #ifndef PRODUCT
   568 bool vframeArray::structural_compare(JavaThread* thread, GrowableArray<compiledVFrame*>* chunk) {
   569   if (owner_thread() != thread) return false;
   570   int index = 0;
   571 #if 0 // FIXME can't do this comparison
   573   // Compare only within vframe array.
   574   for (deoptimizedVFrame* vf = deoptimizedVFrame::cast(vframe_at(first_index())); vf; vf = vf->deoptimized_sender_or_null()) {
   575     if (index >= chunk->length() || !vf->structural_compare(chunk->at(index))) return false;
   576     index++;
   577   }
   578   if (index != chunk->length()) return false;
   579 #endif
   581   return true;
   582 }
   584 #endif
   586 address vframeArray::register_location(int i) const {
   587   assert(0 <= i && i < RegisterMap::reg_count, "index out of bounds");
   588   return (address) & _callee_registers[i];
   589 }
   592 #ifndef PRODUCT
   594 // Printing
   596 // Note: we cannot have print_on as const, as we allocate inside the method
   597 void vframeArray::print_on_2(outputStream* st)  {
   598   st->print_cr(" - sp: " INTPTR_FORMAT, sp());
   599   st->print(" - thread: ");
   600   Thread::current()->print();
   601   st->print_cr(" - frame size: %d", frame_size());
   602   for (int index = 0; index < frames() ; index++ ) {
   603     element(index)->print(st);
   604   }
   605 }
   607 void vframeArrayElement::print(outputStream* st) {
   608   st->print_cr(" - interpreter_frame -> sp: " INTPTR_FORMAT, iframe()->sp());
   609 }
   611 void vframeArray::print_value_on(outputStream* st) const {
   612   st->print_cr("vframeArray [%d] ", frames());
   613 }
   616 #endif

mercurial