duke@435: /* kvn@1690: * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * duke@435: * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, duke@435: * CA 95054 USA or visit www.sun.com if you need additional information or duke@435: * have any questions. duke@435: * duke@435: */ duke@435: duke@435: # include "incls/_precompiled.incl" duke@435: # include "incls/_vframeArray.cpp.incl" duke@435: duke@435: duke@435: int vframeArrayElement:: bci(void) const { return (_bci == SynchronizationEntryBCI ? 0 : _bci); } duke@435: duke@435: void vframeArrayElement::free_monitors(JavaThread* jt) { duke@435: if (_monitors != NULL) { duke@435: MonitorChunk* chunk = _monitors; duke@435: _monitors = NULL; duke@435: jt->remove_monitor_chunk(chunk); duke@435: delete chunk; duke@435: } duke@435: } duke@435: duke@435: void vframeArrayElement::fill_in(compiledVFrame* vf) { duke@435: duke@435: // Copy the information from the compiled vframe to the duke@435: // interpreter frame we will be creating to replace vf duke@435: duke@435: _method = vf->method(); duke@435: _bci = vf->raw_bci(); cfang@1335: _reexecute = vf->should_reexecute(); duke@435: duke@435: int index; duke@435: duke@435: // Get the monitors off-stack duke@435: duke@435: GrowableArray* list = vf->monitors(); duke@435: if (list->is_empty()) { duke@435: _monitors = NULL; duke@435: } else { duke@435: duke@435: // Allocate monitor chunk duke@435: _monitors = new MonitorChunk(list->length()); duke@435: vf->thread()->add_monitor_chunk(_monitors); duke@435: duke@435: // Migrate the BasicLocks from the stack to the monitor chunk duke@435: for (index = 0; index < list->length(); index++) { duke@435: MonitorInfo* monitor = list->at(index); kvn@1253: assert(!monitor->owner_is_scalar_replaced(), "object should be reallocated already"); duke@435: assert(monitor->owner() == NULL || (!monitor->owner()->is_unlocked() && !monitor->owner()->has_bias_pattern()), "object must be null or locked, and unbiased"); duke@435: BasicObjectLock* dest = _monitors->at(index); duke@435: dest->set_obj(monitor->owner()); duke@435: monitor->lock()->move_to(monitor->owner(), dest->lock()); duke@435: } duke@435: } duke@435: duke@435: // Convert the vframe locals and expressions to off stack duke@435: // values. Because we will not gc all oops can be converted to duke@435: // intptr_t (i.e. a stack slot) and we are fine. This is duke@435: // good since we are inside a HandleMark and the oops in our duke@435: // collection would go away between packing them here and duke@435: // unpacking them in unpack_on_stack. duke@435: duke@435: // First the locals go off-stack duke@435: duke@435: // FIXME this seems silly it creates a StackValueCollection duke@435: // in order to get the size to then copy them and duke@435: // convert the types to intptr_t size slots. Seems like it duke@435: // could do it in place... Still uses less memory than the duke@435: // old way though duke@435: duke@435: StackValueCollection *locs = vf->locals(); duke@435: _locals = new StackValueCollection(locs->size()); duke@435: for(index = 0; index < locs->size(); index++) { duke@435: StackValue* value = locs->at(index); duke@435: switch(value->type()) { duke@435: case T_OBJECT: kvn@1253: assert(!value->obj_is_scalar_replaced(), "object should be reallocated already"); duke@435: // preserve object type duke@435: _locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT )); duke@435: break; duke@435: case T_CONFLICT: duke@435: // A dead local. Will be initialized to null/zero. duke@435: _locals->add( new StackValue()); duke@435: break; duke@435: case T_INT: duke@435: _locals->add( new StackValue(value->get_int())); duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: duke@435: // Now the expressions off-stack duke@435: // Same silliness as above duke@435: duke@435: StackValueCollection *exprs = vf->expressions(); duke@435: _expressions = new StackValueCollection(exprs->size()); duke@435: for(index = 0; index < exprs->size(); index++) { duke@435: StackValue* value = exprs->at(index); duke@435: switch(value->type()) { duke@435: case T_OBJECT: kvn@1253: assert(!value->obj_is_scalar_replaced(), "object should be reallocated already"); duke@435: // preserve object type duke@435: _expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT )); duke@435: break; duke@435: case T_CONFLICT: duke@435: // A dead stack element. Will be initialized to null/zero. duke@435: // This can occur when the compiler emits a state in which stack duke@435: // elements are known to be dead (because of an imminent exception). duke@435: _expressions->add( new StackValue()); duke@435: break; duke@435: case T_INT: duke@435: _expressions->add( new StackValue(value->get_int())); duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: } duke@435: duke@435: int unpack_counter = 0; duke@435: duke@435: void vframeArrayElement::unpack_on_stack(int callee_parameters, duke@435: int callee_locals, duke@435: frame* caller, duke@435: bool is_top_frame, duke@435: int exec_mode) { duke@435: JavaThread* thread = (JavaThread*) Thread::current(); duke@435: duke@435: // Look at bci and decide on bcp and continuation pc duke@435: address bcp; duke@435: // C++ interpreter doesn't need a pc since it will figure out what to do when it duke@435: // begins execution duke@435: address pc; cfang@1335: bool use_next_mdp = false; // true if we should use the mdp associated with the next bci cfang@1335: // rather than the one associated with bcp duke@435: if (raw_bci() == SynchronizationEntryBCI) { duke@435: // We are deoptimizing while hanging in prologue code for synchronized method duke@435: bcp = method()->bcp_from(0); // first byte code duke@435: pc = Interpreter::deopt_entry(vtos, 0); // step = 0 since we don't skip current bytecode cfang@1335: } else if (should_reexecute()) { //reexecute this bytecode cfang@1335: assert(is_top_frame, "reexecute allowed only for the top frame"); cfang@1335: bcp = method()->bcp_from(bci()); cfang@1335: pc = Interpreter::deopt_reexecute_entry(method(), bcp); duke@435: } else { duke@435: bcp = method()->bcp_from(bci()); cfang@1335: pc = Interpreter::deopt_continue_after_entry(method(), bcp, callee_parameters, is_top_frame); cfang@1335: use_next_mdp = true; duke@435: } duke@435: assert(Bytecodes::is_defined(*bcp), "must be a valid bytecode"); duke@435: duke@435: // Monitorenter and pending exceptions: duke@435: // duke@435: // For Compiler2, there should be no pending exception when deoptimizing at monitorenter duke@435: // because there is no safepoint at the null pointer check (it is either handled explicitly duke@435: // or prior to the monitorenter) and asynchronous exceptions are not made "pending" by the duke@435: // runtime interface for the slow case (see JRT_ENTRY_FOR_MONITORENTER). If an asynchronous duke@435: // exception was processed, the bytecode pointer would have to be extended one bytecode beyond duke@435: // the monitorenter to place it in the proper exception range. duke@435: // duke@435: // For Compiler1, deoptimization can occur while throwing a NullPointerException at monitorenter, duke@435: // in which case bcp should point to the monitorenter since it is within the exception's range. duke@435: duke@435: assert(*bcp != Bytecodes::_monitorenter || is_top_frame, "a _monitorenter must be a top frame"); duke@435: // TIERED Must know the compiler of the deoptee QQQ duke@435: COMPILER2_PRESENT(guarantee(*bcp != Bytecodes::_monitorenter || exec_mode != Deoptimization::Unpack_exception, duke@435: "shouldn't get exception during monitorenter");) duke@435: duke@435: int popframe_preserved_args_size_in_bytes = 0; duke@435: int popframe_preserved_args_size_in_words = 0; duke@435: if (is_top_frame) { kvn@1690: JvmtiThreadState *state = thread->jvmti_thread_state(); duke@435: if (JvmtiExport::can_pop_frame() && duke@435: (thread->has_pending_popframe() || thread->popframe_forcing_deopt_reexecution())) { duke@435: if (thread->has_pending_popframe()) { duke@435: // Pop top frame after deoptimization duke@435: #ifndef CC_INTERP duke@435: pc = Interpreter::remove_activation_preserving_args_entry(); duke@435: #else duke@435: // Do an uncommon trap type entry. c++ interpreter will know duke@435: // to pop frame and preserve the args duke@435: pc = Interpreter::deopt_entry(vtos, 0); duke@435: use_next_mdp = false; duke@435: #endif duke@435: } else { duke@435: // Reexecute invoke in top frame duke@435: pc = Interpreter::deopt_entry(vtos, 0); duke@435: use_next_mdp = false; duke@435: popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size()); duke@435: // Note: the PopFrame-related extension of the expression stack size is done in duke@435: // Deoptimization::fetch_unroll_info_helper duke@435: popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words()); duke@435: } duke@435: } else if (JvmtiExport::can_force_early_return() && state != NULL && state->is_earlyret_pending()) { duke@435: // Force early return from top frame after deoptimization duke@435: #ifndef CC_INTERP duke@435: pc = Interpreter::remove_activation_early_entry(state->earlyret_tos()); duke@435: #else duke@435: // TBD: Need to implement ForceEarlyReturn for CC_INTERP (ia64) duke@435: #endif duke@435: } else { duke@435: // Possibly override the previous pc computation of the top (youngest) frame duke@435: switch (exec_mode) { duke@435: case Deoptimization::Unpack_deopt: duke@435: // use what we've got duke@435: break; duke@435: case Deoptimization::Unpack_exception: duke@435: // exception is pending twisti@1730: pc = SharedRuntime::raw_exception_handler_for_return_address(thread, pc); duke@435: // [phh] We're going to end up in some handler or other, so it doesn't duke@435: // matter what mdp we point to. See exception_handler_for_exception() duke@435: // in interpreterRuntime.cpp. duke@435: break; duke@435: case Deoptimization::Unpack_uncommon_trap: duke@435: case Deoptimization::Unpack_reexecute: duke@435: // redo last byte code duke@435: pc = Interpreter::deopt_entry(vtos, 0); duke@435: use_next_mdp = false; duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: } duke@435: } duke@435: duke@435: // Setup the interpreter frame duke@435: duke@435: assert(method() != NULL, "method must exist"); duke@435: int temps = expressions()->size(); duke@435: duke@435: int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors(); duke@435: duke@435: Interpreter::layout_activation(method(), duke@435: temps + callee_parameters, duke@435: popframe_preserved_args_size_in_words, duke@435: locks, duke@435: callee_parameters, duke@435: callee_locals, duke@435: caller, duke@435: iframe(), duke@435: is_top_frame); duke@435: duke@435: // Update the pc in the frame object and overwrite the temporary pc duke@435: // we placed in the skeletal frame now that we finally know the duke@435: // exact interpreter address we should use. duke@435: duke@435: _frame.patch_pc(thread, pc); duke@435: duke@435: assert (!method()->is_synchronized() || locks > 0, "synchronized methods must have monitors"); duke@435: duke@435: BasicObjectLock* top = iframe()->interpreter_frame_monitor_begin(); duke@435: for (int index = 0; index < locks; index++) { duke@435: top = iframe()->previous_monitor_in_interpreter_frame(top); duke@435: BasicObjectLock* src = _monitors->at(index); duke@435: top->set_obj(src->obj()); duke@435: src->lock()->move_to(src->obj(), top->lock()); duke@435: } duke@435: if (ProfileInterpreter) { duke@435: iframe()->interpreter_frame_set_mdx(0); // clear out the mdp. duke@435: } duke@435: iframe()->interpreter_frame_set_bcx((intptr_t)bcp); // cannot use bcp because frame is not initialized yet duke@435: if (ProfileInterpreter) { duke@435: methodDataOop mdo = method()->method_data(); duke@435: if (mdo != NULL) { duke@435: int bci = iframe()->interpreter_frame_bci(); duke@435: if (use_next_mdp) ++bci; duke@435: address mdp = mdo->bci_to_dp(bci); duke@435: iframe()->interpreter_frame_set_mdp(mdp); duke@435: } duke@435: } duke@435: duke@435: // Unpack expression stack duke@435: // If this is an intermediate frame (i.e. not top frame) then this duke@435: // only unpacks the part of the expression stack not used by callee duke@435: // as parameters. The callee parameters are unpacked as part of the duke@435: // callee locals. duke@435: int i; duke@435: for(i = 0; i < expressions()->size(); i++) { duke@435: StackValue *value = expressions()->at(i); duke@435: intptr_t* addr = iframe()->interpreter_frame_expression_stack_at(i); duke@435: switch(value->type()) { duke@435: case T_INT: duke@435: *addr = value->get_int(); duke@435: break; duke@435: case T_OBJECT: duke@435: *addr = value->get_int(T_OBJECT); duke@435: break; duke@435: case T_CONFLICT: duke@435: // A dead stack slot. Initialize to null in case it is an oop. duke@435: *addr = NULL_WORD; duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: if (TaggedStackInterpreter) { duke@435: // Write tag to the stack duke@435: iframe()->interpreter_frame_set_expression_stack_tag(i, duke@435: frame::tag_for_basic_type(value->type())); duke@435: } duke@435: } duke@435: duke@435: duke@435: // Unpack the locals duke@435: for(i = 0; i < locals()->size(); i++) { duke@435: StackValue *value = locals()->at(i); duke@435: intptr_t* addr = iframe()->interpreter_frame_local_at(i); duke@435: switch(value->type()) { duke@435: case T_INT: duke@435: *addr = value->get_int(); duke@435: break; duke@435: case T_OBJECT: duke@435: *addr = value->get_int(T_OBJECT); duke@435: break; duke@435: case T_CONFLICT: duke@435: // A dead location. If it is an oop then we need a NULL to prevent GC from following it duke@435: *addr = NULL_WORD; duke@435: break; duke@435: default: duke@435: ShouldNotReachHere(); duke@435: } duke@435: if (TaggedStackInterpreter) { duke@435: // Write tag to stack duke@435: iframe()->interpreter_frame_set_local_tag(i, duke@435: frame::tag_for_basic_type(value->type())); duke@435: } duke@435: } duke@435: duke@435: if (is_top_frame && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution()) { duke@435: // An interpreted frame was popped but it returns to a deoptimized duke@435: // frame. The incoming arguments to the interpreted activation duke@435: // were preserved in thread-local storage by the duke@435: // remove_activation_preserving_args_entry in the interpreter; now duke@435: // we put them back into the just-unpacked interpreter frame. duke@435: // Note that this assumes that the locals arena grows toward lower duke@435: // addresses. duke@435: if (popframe_preserved_args_size_in_words != 0) { duke@435: void* saved_args = thread->popframe_preserved_args(); duke@435: assert(saved_args != NULL, "must have been saved by interpreter"); duke@435: #ifdef ASSERT duke@435: int stack_words = Interpreter::stackElementWords(); duke@435: assert(popframe_preserved_args_size_in_words <= duke@435: iframe()->interpreter_frame_expression_stack_size()*stack_words, duke@435: "expression stack size should have been extended"); duke@435: #endif // ASSERT duke@435: int top_element = iframe()->interpreter_frame_expression_stack_size()-1; duke@435: intptr_t* base; duke@435: if (frame::interpreter_frame_expression_stack_direction() < 0) { duke@435: base = iframe()->interpreter_frame_expression_stack_at(top_element); duke@435: } else { duke@435: base = iframe()->interpreter_frame_expression_stack(); duke@435: } duke@435: Copy::conjoint_bytes(saved_args, duke@435: base, duke@435: popframe_preserved_args_size_in_bytes); duke@435: thread->popframe_free_preserved_args(); duke@435: } duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: if (TraceDeoptimization && Verbose) { duke@435: ttyLocker ttyl; duke@435: tty->print_cr("[%d Interpreted Frame]", ++unpack_counter); duke@435: iframe()->print_on(tty); duke@435: RegisterMap map(thread); duke@435: vframe* f = vframe::new_vframe(iframe(), &map, thread); duke@435: f->print(); duke@435: duke@435: tty->print_cr("locals size %d", locals()->size()); duke@435: tty->print_cr("expression size %d", expressions()->size()); duke@435: duke@435: method()->print_value(); duke@435: tty->cr(); duke@435: // method()->print_codes(); duke@435: } else if (TraceDeoptimization) { duke@435: tty->print(" "); duke@435: method()->print_value(); duke@435: Bytecodes::Code code = Bytecodes::java_code_at(bcp); duke@435: int bci = method()->bci_from(bcp); duke@435: tty->print(" - %s", Bytecodes::name(code)); duke@435: tty->print(" @ bci %d ", bci); duke@435: tty->print_cr("sp = " PTR_FORMAT, iframe()->sp()); duke@435: } duke@435: #endif // PRODUCT duke@435: duke@435: // The expression stack and locals are in the resource area don't leave duke@435: // a dangling pointer in the vframeArray we leave around for debug duke@435: // purposes duke@435: duke@435: _locals = _expressions = NULL; duke@435: duke@435: } duke@435: duke@435: int vframeArrayElement::on_stack_size(int callee_parameters, duke@435: int callee_locals, duke@435: bool is_top_frame, duke@435: int popframe_extra_stack_expression_els) const { duke@435: assert(method()->max_locals() == locals()->size(), "just checking"); duke@435: int locks = monitors() == NULL ? 0 : monitors()->number_of_monitors(); duke@435: int temps = expressions()->size(); duke@435: return Interpreter::size_activation(method(), duke@435: temps + callee_parameters, duke@435: popframe_extra_stack_expression_els, duke@435: locks, duke@435: callee_parameters, duke@435: callee_locals, duke@435: is_top_frame); duke@435: } duke@435: duke@435: duke@435: duke@435: vframeArray* vframeArray::allocate(JavaThread* thread, int frame_size, GrowableArray* chunk, duke@435: RegisterMap *reg_map, frame sender, frame caller, frame self) { duke@435: duke@435: // Allocate the vframeArray duke@435: vframeArray * result = (vframeArray*) AllocateHeap(sizeof(vframeArray) + // fixed part duke@435: sizeof(vframeArrayElement) * (chunk->length() - 1), // variable part duke@435: "vframeArray::allocate"); duke@435: result->_frames = chunk->length(); duke@435: result->_owner_thread = thread; duke@435: result->_sender = sender; duke@435: result->_caller = caller; duke@435: result->_original = self; duke@435: result->set_unroll_block(NULL); // initialize it duke@435: result->fill_in(thread, frame_size, chunk, reg_map); duke@435: return result; duke@435: } duke@435: duke@435: void vframeArray::fill_in(JavaThread* thread, duke@435: int frame_size, duke@435: GrowableArray* chunk, duke@435: const RegisterMap *reg_map) { duke@435: // Set owner first, it is used when adding monitor chunks duke@435: duke@435: _frame_size = frame_size; duke@435: for(int i = 0; i < chunk->length(); i++) { duke@435: element(i)->fill_in(chunk->at(i)); duke@435: } duke@435: duke@435: // Copy registers for callee-saved registers duke@435: if (reg_map != NULL) { duke@435: for(int i = 0; i < RegisterMap::reg_count; i++) { duke@435: #ifdef AMD64 duke@435: // The register map has one entry for every int (32-bit value), so duke@435: // 64-bit physical registers have two entries in the map, one for duke@435: // each half. Ignore the high halves of 64-bit registers, just like duke@435: // frame::oopmapreg_to_location does. duke@435: // duke@435: // [phh] FIXME: this is a temporary hack! This code *should* work duke@435: // correctly w/o this hack, possibly by changing RegisterMap::pd_location duke@435: // in frame_amd64.cpp and the values of the phantom high half registers duke@435: // in amd64.ad. duke@435: // if (VMReg::Name(i) < SharedInfo::stack0 && is_even(i)) { duke@435: intptr_t* src = (intptr_t*) reg_map->location(VMRegImpl::as_VMReg(i)); duke@435: _callee_registers[i] = src != NULL ? *src : NULL_WORD; duke@435: // } else { duke@435: // jint* src = (jint*) reg_map->location(VMReg::Name(i)); duke@435: // _callee_registers[i] = src != NULL ? *src : NULL_WORD; duke@435: // } duke@435: #else duke@435: jint* src = (jint*) reg_map->location(VMRegImpl::as_VMReg(i)); duke@435: _callee_registers[i] = src != NULL ? *src : NULL_WORD; duke@435: #endif duke@435: if (src == NULL) { duke@435: set_location_valid(i, false); duke@435: } else { duke@435: set_location_valid(i, true); duke@435: jint* dst = (jint*) register_location(i); duke@435: *dst = *src; duke@435: } duke@435: } duke@435: } duke@435: } duke@435: duke@435: void vframeArray::unpack_to_stack(frame &unpack_frame, int exec_mode) { duke@435: // stack picture duke@435: // unpack_frame duke@435: // [new interpreter frames ] (frames are skeletal but walkable) duke@435: // caller_frame duke@435: // duke@435: // This routine fills in the missing data for the skeletal interpreter frames duke@435: // in the above picture. duke@435: duke@435: // Find the skeletal interpreter frames to unpack into duke@435: RegisterMap map(JavaThread::current(), false); duke@435: // Get the youngest frame we will unpack (last to be unpacked) duke@435: frame me = unpack_frame.sender(&map); duke@435: int index; duke@435: for (index = 0; index < frames(); index++ ) { duke@435: *element(index)->iframe() = me; duke@435: // Get the caller frame (possibly skeletal) duke@435: me = me.sender(&map); duke@435: } duke@435: duke@435: frame caller_frame = me; duke@435: duke@435: // Do the unpacking of interpreter frames; the frame at index 0 represents the top activation, so it has no callee duke@435: duke@435: // Unpack the frames from the oldest (frames() -1) to the youngest (0) duke@435: duke@435: for (index = frames() - 1; index >= 0 ; index--) { duke@435: int callee_parameters = index == 0 ? 0 : element(index-1)->method()->size_of_parameters(); duke@435: int callee_locals = index == 0 ? 0 : element(index-1)->method()->max_locals(); duke@435: element(index)->unpack_on_stack(callee_parameters, duke@435: callee_locals, duke@435: &caller_frame, duke@435: index == 0, duke@435: exec_mode); duke@435: if (index == frames() - 1) { duke@435: Deoptimization::unwind_callee_save_values(element(index)->iframe(), this); duke@435: } duke@435: caller_frame = *element(index)->iframe(); duke@435: } duke@435: duke@435: duke@435: deallocate_monitor_chunks(); duke@435: } duke@435: duke@435: void vframeArray::deallocate_monitor_chunks() { duke@435: JavaThread* jt = JavaThread::current(); duke@435: for (int index = 0; index < frames(); index++ ) { duke@435: element(index)->free_monitors(jt); duke@435: } duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: duke@435: bool vframeArray::structural_compare(JavaThread* thread, GrowableArray* chunk) { duke@435: if (owner_thread() != thread) return false; duke@435: int index = 0; duke@435: #if 0 // FIXME can't do this comparison duke@435: duke@435: // Compare only within vframe array. duke@435: for (deoptimizedVFrame* vf = deoptimizedVFrame::cast(vframe_at(first_index())); vf; vf = vf->deoptimized_sender_or_null()) { duke@435: if (index >= chunk->length() || !vf->structural_compare(chunk->at(index))) return false; duke@435: index++; duke@435: } duke@435: if (index != chunk->length()) return false; duke@435: #endif duke@435: duke@435: return true; duke@435: } duke@435: duke@435: #endif duke@435: duke@435: address vframeArray::register_location(int i) const { duke@435: assert(0 <= i && i < RegisterMap::reg_count, "index out of bounds"); duke@435: return (address) & _callee_registers[i]; duke@435: } duke@435: duke@435: duke@435: #ifndef PRODUCT duke@435: duke@435: // Printing duke@435: duke@435: // Note: we cannot have print_on as const, as we allocate inside the method duke@435: void vframeArray::print_on_2(outputStream* st) { duke@435: st->print_cr(" - sp: " INTPTR_FORMAT, sp()); duke@435: st->print(" - thread: "); duke@435: Thread::current()->print(); duke@435: st->print_cr(" - frame size: %d", frame_size()); duke@435: for (int index = 0; index < frames() ; index++ ) { duke@435: element(index)->print(st); duke@435: } duke@435: } duke@435: duke@435: void vframeArrayElement::print(outputStream* st) { kvn@1690: st->print_cr(" - interpreter_frame -> sp: " INTPTR_FORMAT, iframe()->sp()); duke@435: } duke@435: duke@435: void vframeArray::print_value_on(outputStream* st) const { duke@435: st->print_cr("vframeArray [%d] ", frames()); duke@435: } duke@435: duke@435: duke@435: #endif