duke@435: /* drchase@6680: * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_RUNTIME_VFRAME_HPP stefank@2314: #define SHARE_VM_RUNTIME_VFRAME_HPP stefank@2314: stefank@2314: #include "code/debugInfo.hpp" stefank@2314: #include "code/debugInfoRec.hpp" stefank@2314: #include "code/location.hpp" stefank@2314: #include "oops/oop.hpp" stefank@2314: #include "runtime/frame.hpp" stefank@2314: #include "runtime/frame.inline.hpp" stefank@2314: #include "runtime/stackValue.hpp" stefank@2314: #include "runtime/stackValueCollection.hpp" stefank@2314: #include "utilities/growableArray.hpp" stefank@2314: duke@435: // vframes are virtual stack frames representing source level activations. duke@435: // A single frame may hold several source level activations in the case of duke@435: // optimized code. The debugging stored with the optimized code enables duke@435: // us to unfold a frame as a stack of vframes. duke@435: // A cVFrame represents an activation of a non-java method. duke@435: duke@435: // The vframe inheritance hierarchy: duke@435: // - vframe duke@435: // - javaVFrame duke@435: // - interpretedVFrame duke@435: // - compiledVFrame ; (used for both compiled Java methods and native stubs) duke@435: // - externalVFrame duke@435: // - entryVFrame ; special frame created when calling Java from C duke@435: duke@435: // - BasicLock duke@435: duke@435: class vframe: public ResourceObj { duke@435: protected: duke@435: frame _fr; // Raw frame behind the virtual frame. duke@435: RegisterMap _reg_map; // Register map for the raw frame (used to handle callee-saved registers). duke@435: JavaThread* _thread; // The thread owning the raw frame. duke@435: duke@435: vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread); duke@435: vframe(const frame* fr, JavaThread* thread); duke@435: public: duke@435: // Factory method for creating vframes duke@435: static vframe* new_vframe(const frame* f, const RegisterMap *reg_map, JavaThread* thread); duke@435: duke@435: // Accessors duke@435: frame fr() const { return _fr; } duke@435: CodeBlob* cb() const { return _fr.cb(); } duke@435: nmethod* nm() const { duke@435: assert( cb() != NULL && cb()->is_nmethod(), "usage"); duke@435: return (nmethod*) cb(); duke@435: } duke@435: duke@435: // ???? Does this need to be a copy? duke@435: frame* frame_pointer() { return &_fr; } duke@435: const RegisterMap* register_map() const { return &_reg_map; } duke@435: JavaThread* thread() const { return _thread; } duke@435: duke@435: // Returns the sender vframe duke@435: virtual vframe* sender() const; duke@435: duke@435: // Returns the next javaVFrame on the stack (skipping all other kinds of frame) duke@435: javaVFrame *java_sender() const; duke@435: duke@435: // Answers if the this is the top vframe in the frame, i.e., if the sender vframe duke@435: // is in the caller frame duke@435: virtual bool is_top() const { return true; } duke@435: duke@435: // Returns top vframe within same frame (see is_top()) duke@435: virtual vframe* top() const; duke@435: duke@435: // Type testing operations duke@435: virtual bool is_entry_frame() const { return false; } duke@435: virtual bool is_java_frame() const { return false; } duke@435: virtual bool is_interpreted_frame() const { return false; } duke@435: virtual bool is_compiled_frame() const { return false; } duke@435: duke@435: #ifndef PRODUCT duke@435: // printing operations duke@435: virtual void print_value() const; duke@435: virtual void print(); duke@435: #endif duke@435: }; duke@435: duke@435: duke@435: class javaVFrame: public vframe { duke@435: public: duke@435: // JVM state coleenp@4037: virtual Method* method() const = 0; duke@435: virtual int bci() const = 0; duke@435: virtual StackValueCollection* locals() const = 0; duke@435: virtual StackValueCollection* expressions() const = 0; duke@435: // the order returned by monitors() is from oldest -> youngest#4418568 duke@435: virtual GrowableArray* monitors() const = 0; duke@435: duke@435: // Debugging support via JVMTI. duke@435: // NOTE that this is not guaranteed to give correct results for compiled vframes. duke@435: // Deoptimize first if necessary. duke@435: virtual void set_locals(StackValueCollection* values) const = 0; duke@435: duke@435: // Test operation duke@435: bool is_java_frame() const { return true; } duke@435: duke@435: protected: duke@435: javaVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {} duke@435: javaVFrame(const frame* fr, JavaThread* thread) : vframe(fr, thread) {} duke@435: duke@435: public: duke@435: // casting duke@435: static javaVFrame* cast(vframe* vf) { duke@435: assert(vf == NULL || vf->is_java_frame(), "must be java frame"); duke@435: return (javaVFrame*) vf; duke@435: } duke@435: duke@435: // Return an array of monitors locked by this frame in the youngest to oldest order duke@435: GrowableArray* locked_monitors(); duke@435: duke@435: // printing used during stack dumps duke@435: void print_lock_info_on(outputStream* st, int frame_count); duke@435: void print_lock_info(int frame_count) { print_lock_info_on(tty, frame_count); } duke@435: duke@435: #ifndef PRODUCT duke@435: public: duke@435: // printing operations duke@435: void print(); duke@435: void print_value() const; duke@435: void print_activation(int index) const; duke@435: duke@435: // verify operations duke@435: virtual void verify() const; duke@435: duke@435: // Structural compare duke@435: bool structural_compare(javaVFrame* other); duke@435: #endif duke@435: friend class vframe; duke@435: }; duke@435: duke@435: class interpretedVFrame: public javaVFrame { duke@435: public: duke@435: // JVM state coleenp@4037: Method* method() const; duke@435: int bci() const; duke@435: StackValueCollection* locals() const; duke@435: StackValueCollection* expressions() const; duke@435: GrowableArray* monitors() const; duke@435: duke@435: void set_locals(StackValueCollection* values) const; duke@435: duke@435: // Test operation duke@435: bool is_interpreted_frame() const { return true; } duke@435: duke@435: protected: duke@435: interpretedVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : javaVFrame(fr, reg_map, thread) {}; duke@435: duke@435: public: duke@435: // Accessors for Byte Code Pointer duke@435: u_char* bcp() const; duke@435: void set_bcp(u_char* bcp); duke@435: duke@435: // casting duke@435: static interpretedVFrame* cast(vframe* vf) { duke@435: assert(vf == NULL || vf->is_interpreted_frame(), "must be interpreted frame"); duke@435: return (interpretedVFrame*) vf; duke@435: } duke@435: duke@435: private: duke@435: static const int bcp_offset; duke@435: intptr_t* locals_addr_at(int offset) const; mgronlun@7215: StackValueCollection* stack_data(bool expressions) const; duke@435: // returns where the parameters starts relative to the frame pointer duke@435: int start_of_parameters() const; duke@435: duke@435: #ifndef PRODUCT duke@435: public: duke@435: // verify operations duke@435: void verify() const; duke@435: #endif duke@435: friend class vframe; duke@435: }; duke@435: duke@435: duke@435: class externalVFrame: public vframe { duke@435: protected: duke@435: externalVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {} duke@435: duke@435: #ifndef PRODUCT duke@435: public: duke@435: // printing operations duke@435: void print_value() const; duke@435: void print(); duke@435: #endif duke@435: friend class vframe; duke@435: }; duke@435: duke@435: class entryVFrame: public externalVFrame { duke@435: public: duke@435: bool is_entry_frame() const { return true; } duke@435: duke@435: protected: duke@435: entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread); duke@435: duke@435: public: duke@435: // casting duke@435: static entryVFrame* cast(vframe* vf) { duke@435: assert(vf == NULL || vf->is_entry_frame(), "must be entry frame"); duke@435: return (entryVFrame*) vf; duke@435: } duke@435: duke@435: #ifndef PRODUCT duke@435: public: duke@435: // printing duke@435: void print_value() const; duke@435: void print(); duke@435: #endif duke@435: friend class vframe; duke@435: }; duke@435: duke@435: duke@435: // A MonitorInfo is a ResourceObject that describes a the pair: duke@435: // 1) the owner of the monitor duke@435: // 2) the monitor lock duke@435: class MonitorInfo : public ResourceObj { duke@435: private: duke@435: oop _owner; // the object owning the monitor duke@435: BasicLock* _lock; coleenp@4037: oop _owner_klass; // klass (mirror) if owner was scalar replaced kvn@518: bool _eliminated; kvn@1253: bool _owner_is_scalar_replaced; duke@435: public: duke@435: // Constructor kvn@1253: MonitorInfo(oop owner, BasicLock* lock, bool eliminated, bool owner_is_scalar_replaced) { kvn@1253: if (!owner_is_scalar_replaced) { kvn@1253: _owner = owner; kvn@1253: _owner_klass = NULL; kvn@1253: } else { kvn@1253: assert(eliminated, "monitor should be eliminated for scalar replaced object"); kvn@1253: _owner = NULL; kvn@1253: _owner_klass = owner; kvn@1253: } duke@435: _lock = lock; kvn@518: _eliminated = eliminated; kvn@1253: _owner_is_scalar_replaced = owner_is_scalar_replaced; duke@435: } duke@435: // Accessors kvn@1253: oop owner() const { kvn@1253: assert(!_owner_is_scalar_replaced, "should not be called for scalar replaced object"); kvn@1253: return _owner; kvn@1253: } coleenp@4037: oop owner_klass() const { kvn@1253: assert(_owner_is_scalar_replaced, "should not be called for not scalar replaced object"); coleenp@4037: return _owner_klass; kvn@1253: } duke@435: BasicLock* lock() const { return _lock; } kvn@518: bool eliminated() const { return _eliminated; } kvn@1253: bool owner_is_scalar_replaced() const { return _owner_is_scalar_replaced; } duke@435: }; duke@435: duke@435: class vframeStreamCommon : StackObj { duke@435: protected: duke@435: // common duke@435: frame _frame; duke@435: JavaThread* _thread; duke@435: RegisterMap _reg_map; duke@435: enum { interpreted_mode, compiled_mode, at_end_mode } _mode; duke@435: duke@435: int _sender_decode_offset; duke@435: duke@435: // Cached information coleenp@4037: Method* _method; duke@435: int _bci; duke@435: duke@435: // Should VM activations be ignored or not duke@435: bool _stop_at_java_call_stub; duke@435: duke@435: bool fill_in_compiled_inlined_sender(); duke@435: void fill_from_compiled_frame(int decode_offset); duke@435: void fill_from_compiled_native_frame(); duke@435: duke@435: void found_bad_method_frame(); duke@435: duke@435: void fill_from_interpreter_frame(); duke@435: bool fill_from_frame(); duke@435: duke@435: // Helper routine for security_get_caller_frame duke@435: void skip_prefixed_method_and_wrappers(); duke@435: duke@435: public: duke@435: // Constructor duke@435: vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) { duke@435: _thread = thread; duke@435: } duke@435: duke@435: // Accessors coleenp@4037: Method* method() const { return _method; } duke@435: int bci() const { return _bci; } duke@435: intptr_t* frame_id() const { return _frame.id(); } duke@435: address frame_pc() const { return _frame.pc(); } duke@435: duke@435: CodeBlob* cb() const { return _frame.cb(); } duke@435: nmethod* nm() const { duke@435: assert( cb() != NULL && cb()->is_nmethod(), "usage"); duke@435: return (nmethod*) cb(); duke@435: } duke@435: duke@435: // Frame type duke@435: bool is_interpreted_frame() const { return _frame.is_interpreted_frame(); } duke@435: bool is_entry_frame() const { return _frame.is_entry_frame(); } duke@435: duke@435: // Iteration duke@435: void next() { duke@435: // handle frames with inlining duke@435: if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return; duke@435: duke@435: // handle general case duke@435: do { duke@435: _frame = _frame.sender(&_reg_map); duke@435: } while (!fill_from_frame()); duke@435: } twisti@4866: void security_next(); duke@435: duke@435: bool at_end() const { return _mode == at_end_mode; } duke@435: duke@435: // Implements security traversal. Skips depth no. of frame including duke@435: // special security frames and prefixed native methods duke@435: void security_get_caller_frame(int depth); duke@435: duke@435: // Helper routine for JVM_LatestUserDefinedLoader -- needed for 1.4 duke@435: // reflection implementation duke@435: void skip_reflection_related_frames(); duke@435: }; duke@435: duke@435: class vframeStream : public vframeStreamCommon { duke@435: public: duke@435: // Constructors duke@435: vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false) duke@435: : vframeStreamCommon(thread) { duke@435: _stop_at_java_call_stub = stop_at_java_call_stub; duke@435: duke@435: if (!thread->has_last_Java_frame()) { duke@435: _mode = at_end_mode; duke@435: return; duke@435: } duke@435: duke@435: _frame = _thread->last_frame(); duke@435: while (!fill_from_frame()) { duke@435: _frame = _frame.sender(&_reg_map); duke@435: } duke@435: } duke@435: duke@435: // top_frame may not be at safepoint, start with sender duke@435: vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false); duke@435: }; duke@435: duke@435: duke@435: inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() { duke@435: if (_sender_decode_offset == DebugInformationRecorder::serialized_null) { duke@435: return false; duke@435: } duke@435: fill_from_compiled_frame(_sender_decode_offset); duke@435: return true; duke@435: } duke@435: duke@435: duke@435: inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) { duke@435: _mode = compiled_mode; duke@435: duke@435: // Range check to detect ridiculous offsets. duke@435: if (decode_offset == DebugInformationRecorder::serialized_null || duke@435: decode_offset < 0 || duke@435: decode_offset >= nm()->scopes_data_size()) { duke@435: // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. duke@435: // If we attempt to read nmethod::scopes_data at serialized_null (== 0), duke@435: // or if we read some at other crazy offset, duke@435: // we will decode garbage and make wild references into the heap, duke@435: // leading to crashes in product mode. duke@435: // (This isn't airtight, of course, since there are internal duke@435: // offsets which are also crazy.) duke@435: #ifdef ASSERT duke@435: if (WizardMode) { duke@435: tty->print_cr("Error in fill_from_frame: pc_desc for " duke@435: INTPTR_FORMAT " not found or invalid at %d", drchase@6680: p2i(_frame.pc()), decode_offset); duke@435: nm()->print(); duke@435: nm()->method()->print_codes(); duke@435: nm()->print_code(); duke@435: nm()->print_pcs(); duke@435: } duke@435: #endif duke@435: // Provide a cheap fallback in product mode. (See comment above.) duke@435: found_bad_method_frame(); duke@435: fill_from_compiled_native_frame(); duke@435: return; duke@435: } duke@435: duke@435: // Decode first part of scopeDesc duke@435: DebugInfoReadStream buffer(nm(), decode_offset); duke@435: _sender_decode_offset = buffer.read_int(); coleenp@4037: _method = buffer.read_method(); cfang@1366: _bci = buffer.read_bci(); duke@435: duke@435: assert(_method->is_method(), "checking type of decoded method"); duke@435: } duke@435: duke@435: // The native frames are handled specially. We do not rely on ScopeDesc info duke@435: // since the pc might not be exact due to the _last_native_pc trick. duke@435: inline void vframeStreamCommon::fill_from_compiled_native_frame() { duke@435: _mode = compiled_mode; duke@435: _sender_decode_offset = DebugInformationRecorder::serialized_null; duke@435: _method = nm()->method(); duke@435: _bci = 0; duke@435: } duke@435: duke@435: inline bool vframeStreamCommon::fill_from_frame() { duke@435: // Interpreted frame duke@435: if (_frame.is_interpreted_frame()) { duke@435: fill_from_interpreter_frame(); duke@435: return true; duke@435: } duke@435: duke@435: // Compiled frame duke@435: duke@435: if (cb() != NULL && cb()->is_nmethod()) { duke@435: if (nm()->is_native_method()) { duke@435: // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick. duke@435: fill_from_compiled_native_frame(); duke@435: } else { duke@435: PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc()); duke@435: int decode_offset; duke@435: if (pc_desc == NULL) { duke@435: // Should not happen, but let fill_from_compiled_frame handle it. sgoldman@542: sgoldman@542: // If we are trying to walk the stack of a thread that is not sgoldman@542: // at a safepoint (like AsyncGetCallTrace would do) then this is an sgoldman@542: // acceptable result. [ This is assuming that safe_for_sender sgoldman@542: // is so bullet proof that we can trust the frames it produced. ] sgoldman@542: // sgoldman@542: // So if we see that the thread is not safepoint safe sgoldman@542: // then simply produce the method and a bci of zero sgoldman@542: // and skip the possibility of decoding any inlining that sgoldman@542: // may be present. That is far better than simply stopping (or sgoldman@542: // asserting. If however the thread is safepoint safe this sgoldman@542: // is the sign of a compiler bug and we'll let sgoldman@542: // fill_from_compiled_frame handle it. sgoldman@542: sgoldman@542: sgoldman@542: JavaThreadState state = _thread->thread_state(); sgoldman@542: sgoldman@542: // in_Java should be good enough to test safepoint safety sgoldman@542: // if state were say in_Java_trans then we'd expect that sgoldman@542: // the pc would have already been slightly adjusted to sgoldman@542: // one that would produce a pcDesc since the trans state sgoldman@542: // would be one that might in fact anticipate a safepoint sgoldman@542: sgoldman@542: if (state == _thread_in_Java ) { sgoldman@542: // This will get a method a zero bci and no inlining. sgoldman@542: // Might be nice to have a unique bci to signify this sgoldman@542: // particular case but for now zero will do. sgoldman@542: sgoldman@542: fill_from_compiled_native_frame(); sgoldman@542: sgoldman@542: // There is something to be said for setting the mode to sgoldman@542: // at_end_mode to prevent trying to walk further up the sgoldman@542: // stack. There is evidence that if we walk any further sgoldman@542: // that we could produce a bad stack chain. However until sgoldman@542: // we see evidence that allowing this causes us to find sgoldman@542: // frames bad enough to cause segv's or assertion failures sgoldman@542: // we don't do it as while we may get a bad call chain the sgoldman@542: // probability is much higher (several magnitudes) that we sgoldman@542: // get good data. sgoldman@542: sgoldman@542: return true; sgoldman@542: } duke@435: decode_offset = DebugInformationRecorder::serialized_null; duke@435: } else { duke@435: decode_offset = pc_desc->scope_decode_offset(); duke@435: } duke@435: fill_from_compiled_frame(decode_offset); duke@435: } duke@435: return true; duke@435: } duke@435: duke@435: // End of stack? duke@435: if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) { duke@435: _mode = at_end_mode; duke@435: return true; duke@435: } duke@435: duke@435: return false; duke@435: } duke@435: duke@435: duke@435: inline void vframeStreamCommon::fill_from_interpreter_frame() { coleenp@4037: Method* method = _frame.interpreter_frame_method(); duke@435: intptr_t bcx = _frame.interpreter_frame_bcx(); duke@435: int bci = method->validate_bci_from_bcx(bcx); duke@435: // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. duke@435: if (bci < 0) { duke@435: found_bad_method_frame(); duke@435: bci = 0; // pretend it's on the point of entering duke@435: } duke@435: _mode = interpreted_mode; duke@435: _method = method; duke@435: _bci = bci; duke@435: } stefank@2314: stefank@2314: #endif // SHARE_VM_RUNTIME_VFRAME_HPP