Tue, 09 Oct 2012 10:09:34 -0700
7197424: update copyright year to match last edit in jdk8 hotspot repository
Summary: Update copyright year to 2012 for relevant files
Reviewed-by: dholmes, coleenp
duke@435 | 1 | /* |
coleenp@4037 | 2 | * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_RUNTIME_VFRAME_HPP |
stefank@2314 | 26 | #define SHARE_VM_RUNTIME_VFRAME_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "code/debugInfo.hpp" |
stefank@2314 | 29 | #include "code/debugInfoRec.hpp" |
stefank@2314 | 30 | #include "code/location.hpp" |
stefank@2314 | 31 | #include "oops/oop.hpp" |
stefank@2314 | 32 | #include "runtime/frame.hpp" |
stefank@2314 | 33 | #include "runtime/frame.inline.hpp" |
stefank@2314 | 34 | #include "runtime/stackValue.hpp" |
stefank@2314 | 35 | #include "runtime/stackValueCollection.hpp" |
stefank@2314 | 36 | #include "utilities/growableArray.hpp" |
stefank@2314 | 37 | |
duke@435 | 38 | // vframes are virtual stack frames representing source level activations. |
duke@435 | 39 | // A single frame may hold several source level activations in the case of |
duke@435 | 40 | // optimized code. The debugging stored with the optimized code enables |
duke@435 | 41 | // us to unfold a frame as a stack of vframes. |
duke@435 | 42 | // A cVFrame represents an activation of a non-java method. |
duke@435 | 43 | |
duke@435 | 44 | // The vframe inheritance hierarchy: |
duke@435 | 45 | // - vframe |
duke@435 | 46 | // - javaVFrame |
duke@435 | 47 | // - interpretedVFrame |
duke@435 | 48 | // - compiledVFrame ; (used for both compiled Java methods and native stubs) |
duke@435 | 49 | // - externalVFrame |
duke@435 | 50 | // - entryVFrame ; special frame created when calling Java from C |
duke@435 | 51 | |
duke@435 | 52 | // - BasicLock |
duke@435 | 53 | |
duke@435 | 54 | class vframe: public ResourceObj { |
duke@435 | 55 | protected: |
duke@435 | 56 | frame _fr; // Raw frame behind the virtual frame. |
duke@435 | 57 | RegisterMap _reg_map; // Register map for the raw frame (used to handle callee-saved registers). |
duke@435 | 58 | JavaThread* _thread; // The thread owning the raw frame. |
duke@435 | 59 | |
duke@435 | 60 | vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread); |
duke@435 | 61 | vframe(const frame* fr, JavaThread* thread); |
duke@435 | 62 | public: |
duke@435 | 63 | // Factory method for creating vframes |
duke@435 | 64 | static vframe* new_vframe(const frame* f, const RegisterMap *reg_map, JavaThread* thread); |
duke@435 | 65 | |
duke@435 | 66 | // Accessors |
duke@435 | 67 | frame fr() const { return _fr; } |
duke@435 | 68 | CodeBlob* cb() const { return _fr.cb(); } |
duke@435 | 69 | nmethod* nm() const { |
duke@435 | 70 | assert( cb() != NULL && cb()->is_nmethod(), "usage"); |
duke@435 | 71 | return (nmethod*) cb(); |
duke@435 | 72 | } |
duke@435 | 73 | |
duke@435 | 74 | // ???? Does this need to be a copy? |
duke@435 | 75 | frame* frame_pointer() { return &_fr; } |
duke@435 | 76 | const RegisterMap* register_map() const { return &_reg_map; } |
duke@435 | 77 | JavaThread* thread() const { return _thread; } |
duke@435 | 78 | |
duke@435 | 79 | // Returns the sender vframe |
duke@435 | 80 | virtual vframe* sender() const; |
duke@435 | 81 | |
duke@435 | 82 | // Returns the next javaVFrame on the stack (skipping all other kinds of frame) |
duke@435 | 83 | javaVFrame *java_sender() const; |
duke@435 | 84 | |
duke@435 | 85 | // Answers if the this is the top vframe in the frame, i.e., if the sender vframe |
duke@435 | 86 | // is in the caller frame |
duke@435 | 87 | virtual bool is_top() const { return true; } |
duke@435 | 88 | |
duke@435 | 89 | // Returns top vframe within same frame (see is_top()) |
duke@435 | 90 | virtual vframe* top() const; |
duke@435 | 91 | |
duke@435 | 92 | // Type testing operations |
duke@435 | 93 | virtual bool is_entry_frame() const { return false; } |
duke@435 | 94 | virtual bool is_java_frame() const { return false; } |
duke@435 | 95 | virtual bool is_interpreted_frame() const { return false; } |
duke@435 | 96 | virtual bool is_compiled_frame() const { return false; } |
duke@435 | 97 | |
duke@435 | 98 | #ifndef PRODUCT |
duke@435 | 99 | // printing operations |
duke@435 | 100 | virtual void print_value() const; |
duke@435 | 101 | virtual void print(); |
duke@435 | 102 | #endif |
duke@435 | 103 | }; |
duke@435 | 104 | |
duke@435 | 105 | |
duke@435 | 106 | class javaVFrame: public vframe { |
duke@435 | 107 | public: |
duke@435 | 108 | // JVM state |
coleenp@4037 | 109 | virtual Method* method() const = 0; |
duke@435 | 110 | virtual int bci() const = 0; |
duke@435 | 111 | virtual StackValueCollection* locals() const = 0; |
duke@435 | 112 | virtual StackValueCollection* expressions() const = 0; |
duke@435 | 113 | // the order returned by monitors() is from oldest -> youngest#4418568 |
duke@435 | 114 | virtual GrowableArray<MonitorInfo*>* monitors() const = 0; |
duke@435 | 115 | |
duke@435 | 116 | // Debugging support via JVMTI. |
duke@435 | 117 | // NOTE that this is not guaranteed to give correct results for compiled vframes. |
duke@435 | 118 | // Deoptimize first if necessary. |
duke@435 | 119 | virtual void set_locals(StackValueCollection* values) const = 0; |
duke@435 | 120 | |
duke@435 | 121 | // Test operation |
duke@435 | 122 | bool is_java_frame() const { return true; } |
duke@435 | 123 | |
duke@435 | 124 | protected: |
duke@435 | 125 | javaVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {} |
duke@435 | 126 | javaVFrame(const frame* fr, JavaThread* thread) : vframe(fr, thread) {} |
duke@435 | 127 | |
duke@435 | 128 | public: |
duke@435 | 129 | // casting |
duke@435 | 130 | static javaVFrame* cast(vframe* vf) { |
duke@435 | 131 | assert(vf == NULL || vf->is_java_frame(), "must be java frame"); |
duke@435 | 132 | return (javaVFrame*) vf; |
duke@435 | 133 | } |
duke@435 | 134 | |
duke@435 | 135 | // Return an array of monitors locked by this frame in the youngest to oldest order |
duke@435 | 136 | GrowableArray<MonitorInfo*>* locked_monitors(); |
duke@435 | 137 | |
duke@435 | 138 | // printing used during stack dumps |
duke@435 | 139 | void print_lock_info_on(outputStream* st, int frame_count); |
duke@435 | 140 | void print_lock_info(int frame_count) { print_lock_info_on(tty, frame_count); } |
duke@435 | 141 | |
duke@435 | 142 | #ifndef PRODUCT |
duke@435 | 143 | public: |
duke@435 | 144 | // printing operations |
duke@435 | 145 | void print(); |
duke@435 | 146 | void print_value() const; |
duke@435 | 147 | void print_activation(int index) const; |
duke@435 | 148 | |
duke@435 | 149 | // verify operations |
duke@435 | 150 | virtual void verify() const; |
duke@435 | 151 | |
duke@435 | 152 | // Structural compare |
duke@435 | 153 | bool structural_compare(javaVFrame* other); |
duke@435 | 154 | #endif |
duke@435 | 155 | friend class vframe; |
duke@435 | 156 | }; |
duke@435 | 157 | |
duke@435 | 158 | class interpretedVFrame: public javaVFrame { |
duke@435 | 159 | public: |
duke@435 | 160 | // JVM state |
coleenp@4037 | 161 | Method* method() const; |
duke@435 | 162 | int bci() const; |
duke@435 | 163 | StackValueCollection* locals() const; |
duke@435 | 164 | StackValueCollection* expressions() const; |
duke@435 | 165 | GrowableArray<MonitorInfo*>* monitors() const; |
duke@435 | 166 | |
duke@435 | 167 | void set_locals(StackValueCollection* values) const; |
duke@435 | 168 | |
duke@435 | 169 | // Test operation |
duke@435 | 170 | bool is_interpreted_frame() const { return true; } |
duke@435 | 171 | |
duke@435 | 172 | protected: |
duke@435 | 173 | interpretedVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : javaVFrame(fr, reg_map, thread) {}; |
duke@435 | 174 | |
duke@435 | 175 | public: |
duke@435 | 176 | // Accessors for Byte Code Pointer |
duke@435 | 177 | u_char* bcp() const; |
duke@435 | 178 | void set_bcp(u_char* bcp); |
duke@435 | 179 | |
duke@435 | 180 | // casting |
duke@435 | 181 | static interpretedVFrame* cast(vframe* vf) { |
duke@435 | 182 | assert(vf == NULL || vf->is_interpreted_frame(), "must be interpreted frame"); |
duke@435 | 183 | return (interpretedVFrame*) vf; |
duke@435 | 184 | } |
duke@435 | 185 | |
duke@435 | 186 | private: |
duke@435 | 187 | static const int bcp_offset; |
duke@435 | 188 | intptr_t* locals_addr_at(int offset) const; |
duke@435 | 189 | |
duke@435 | 190 | // returns where the parameters starts relative to the frame pointer |
duke@435 | 191 | int start_of_parameters() const; |
duke@435 | 192 | |
duke@435 | 193 | #ifndef PRODUCT |
duke@435 | 194 | public: |
duke@435 | 195 | // verify operations |
duke@435 | 196 | void verify() const; |
duke@435 | 197 | #endif |
duke@435 | 198 | friend class vframe; |
duke@435 | 199 | }; |
duke@435 | 200 | |
duke@435 | 201 | |
duke@435 | 202 | class externalVFrame: public vframe { |
duke@435 | 203 | protected: |
duke@435 | 204 | externalVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {} |
duke@435 | 205 | |
duke@435 | 206 | #ifndef PRODUCT |
duke@435 | 207 | public: |
duke@435 | 208 | // printing operations |
duke@435 | 209 | void print_value() const; |
duke@435 | 210 | void print(); |
duke@435 | 211 | #endif |
duke@435 | 212 | friend class vframe; |
duke@435 | 213 | }; |
duke@435 | 214 | |
duke@435 | 215 | class entryVFrame: public externalVFrame { |
duke@435 | 216 | public: |
duke@435 | 217 | bool is_entry_frame() const { return true; } |
duke@435 | 218 | |
duke@435 | 219 | protected: |
duke@435 | 220 | entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread); |
duke@435 | 221 | |
duke@435 | 222 | public: |
duke@435 | 223 | // casting |
duke@435 | 224 | static entryVFrame* cast(vframe* vf) { |
duke@435 | 225 | assert(vf == NULL || vf->is_entry_frame(), "must be entry frame"); |
duke@435 | 226 | return (entryVFrame*) vf; |
duke@435 | 227 | } |
duke@435 | 228 | |
duke@435 | 229 | #ifndef PRODUCT |
duke@435 | 230 | public: |
duke@435 | 231 | // printing |
duke@435 | 232 | void print_value() const; |
duke@435 | 233 | void print(); |
duke@435 | 234 | #endif |
duke@435 | 235 | friend class vframe; |
duke@435 | 236 | }; |
duke@435 | 237 | |
duke@435 | 238 | |
duke@435 | 239 | // A MonitorInfo is a ResourceObject that describes a the pair: |
duke@435 | 240 | // 1) the owner of the monitor |
duke@435 | 241 | // 2) the monitor lock |
duke@435 | 242 | class MonitorInfo : public ResourceObj { |
duke@435 | 243 | private: |
duke@435 | 244 | oop _owner; // the object owning the monitor |
duke@435 | 245 | BasicLock* _lock; |
coleenp@4037 | 246 | oop _owner_klass; // klass (mirror) if owner was scalar replaced |
kvn@518 | 247 | bool _eliminated; |
kvn@1253 | 248 | bool _owner_is_scalar_replaced; |
duke@435 | 249 | public: |
duke@435 | 250 | // Constructor |
kvn@1253 | 251 | MonitorInfo(oop owner, BasicLock* lock, bool eliminated, bool owner_is_scalar_replaced) { |
kvn@1253 | 252 | if (!owner_is_scalar_replaced) { |
kvn@1253 | 253 | _owner = owner; |
kvn@1253 | 254 | _owner_klass = NULL; |
kvn@1253 | 255 | } else { |
kvn@1253 | 256 | assert(eliminated, "monitor should be eliminated for scalar replaced object"); |
kvn@1253 | 257 | _owner = NULL; |
kvn@1253 | 258 | _owner_klass = owner; |
kvn@1253 | 259 | } |
duke@435 | 260 | _lock = lock; |
kvn@518 | 261 | _eliminated = eliminated; |
kvn@1253 | 262 | _owner_is_scalar_replaced = owner_is_scalar_replaced; |
duke@435 | 263 | } |
duke@435 | 264 | // Accessors |
kvn@1253 | 265 | oop owner() const { |
kvn@1253 | 266 | assert(!_owner_is_scalar_replaced, "should not be called for scalar replaced object"); |
kvn@1253 | 267 | return _owner; |
kvn@1253 | 268 | } |
coleenp@4037 | 269 | oop owner_klass() const { |
kvn@1253 | 270 | assert(_owner_is_scalar_replaced, "should not be called for not scalar replaced object"); |
coleenp@4037 | 271 | return _owner_klass; |
kvn@1253 | 272 | } |
duke@435 | 273 | BasicLock* lock() const { return _lock; } |
kvn@518 | 274 | bool eliminated() const { return _eliminated; } |
kvn@1253 | 275 | bool owner_is_scalar_replaced() const { return _owner_is_scalar_replaced; } |
duke@435 | 276 | }; |
duke@435 | 277 | |
duke@435 | 278 | class vframeStreamCommon : StackObj { |
duke@435 | 279 | protected: |
duke@435 | 280 | // common |
duke@435 | 281 | frame _frame; |
duke@435 | 282 | JavaThread* _thread; |
duke@435 | 283 | RegisterMap _reg_map; |
duke@435 | 284 | enum { interpreted_mode, compiled_mode, at_end_mode } _mode; |
duke@435 | 285 | |
duke@435 | 286 | int _sender_decode_offset; |
duke@435 | 287 | |
duke@435 | 288 | // Cached information |
coleenp@4037 | 289 | Method* _method; |
duke@435 | 290 | int _bci; |
duke@435 | 291 | |
duke@435 | 292 | // Should VM activations be ignored or not |
duke@435 | 293 | bool _stop_at_java_call_stub; |
duke@435 | 294 | |
duke@435 | 295 | bool fill_in_compiled_inlined_sender(); |
duke@435 | 296 | void fill_from_compiled_frame(int decode_offset); |
duke@435 | 297 | void fill_from_compiled_native_frame(); |
duke@435 | 298 | |
duke@435 | 299 | void found_bad_method_frame(); |
duke@435 | 300 | |
duke@435 | 301 | void fill_from_interpreter_frame(); |
duke@435 | 302 | bool fill_from_frame(); |
duke@435 | 303 | |
duke@435 | 304 | // Helper routine for security_get_caller_frame |
duke@435 | 305 | void skip_prefixed_method_and_wrappers(); |
duke@435 | 306 | |
duke@435 | 307 | public: |
duke@435 | 308 | // Constructor |
duke@435 | 309 | vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) { |
duke@435 | 310 | _thread = thread; |
duke@435 | 311 | } |
duke@435 | 312 | |
duke@435 | 313 | // Accessors |
coleenp@4037 | 314 | Method* method() const { return _method; } |
duke@435 | 315 | int bci() const { return _bci; } |
duke@435 | 316 | intptr_t* frame_id() const { return _frame.id(); } |
duke@435 | 317 | address frame_pc() const { return _frame.pc(); } |
duke@435 | 318 | |
duke@435 | 319 | CodeBlob* cb() const { return _frame.cb(); } |
duke@435 | 320 | nmethod* nm() const { |
duke@435 | 321 | assert( cb() != NULL && cb()->is_nmethod(), "usage"); |
duke@435 | 322 | return (nmethod*) cb(); |
duke@435 | 323 | } |
duke@435 | 324 | |
duke@435 | 325 | // Frame type |
duke@435 | 326 | bool is_interpreted_frame() const { return _frame.is_interpreted_frame(); } |
duke@435 | 327 | bool is_entry_frame() const { return _frame.is_entry_frame(); } |
duke@435 | 328 | |
duke@435 | 329 | // Iteration |
duke@435 | 330 | void next() { |
duke@435 | 331 | // handle frames with inlining |
duke@435 | 332 | if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return; |
duke@435 | 333 | |
duke@435 | 334 | // handle general case |
duke@435 | 335 | do { |
duke@435 | 336 | _frame = _frame.sender(&_reg_map); |
duke@435 | 337 | } while (!fill_from_frame()); |
duke@435 | 338 | } |
duke@435 | 339 | |
duke@435 | 340 | bool at_end() const { return _mode == at_end_mode; } |
duke@435 | 341 | |
duke@435 | 342 | // Implements security traversal. Skips depth no. of frame including |
duke@435 | 343 | // special security frames and prefixed native methods |
duke@435 | 344 | void security_get_caller_frame(int depth); |
duke@435 | 345 | |
duke@435 | 346 | // Helper routine for JVM_LatestUserDefinedLoader -- needed for 1.4 |
duke@435 | 347 | // reflection implementation |
duke@435 | 348 | void skip_reflection_related_frames(); |
duke@435 | 349 | }; |
duke@435 | 350 | |
duke@435 | 351 | class vframeStream : public vframeStreamCommon { |
duke@435 | 352 | public: |
duke@435 | 353 | // Constructors |
duke@435 | 354 | vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false) |
duke@435 | 355 | : vframeStreamCommon(thread) { |
duke@435 | 356 | _stop_at_java_call_stub = stop_at_java_call_stub; |
duke@435 | 357 | |
duke@435 | 358 | if (!thread->has_last_Java_frame()) { |
duke@435 | 359 | _mode = at_end_mode; |
duke@435 | 360 | return; |
duke@435 | 361 | } |
duke@435 | 362 | |
duke@435 | 363 | _frame = _thread->last_frame(); |
duke@435 | 364 | while (!fill_from_frame()) { |
duke@435 | 365 | _frame = _frame.sender(&_reg_map); |
duke@435 | 366 | } |
duke@435 | 367 | } |
duke@435 | 368 | |
duke@435 | 369 | // top_frame may not be at safepoint, start with sender |
duke@435 | 370 | vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false); |
duke@435 | 371 | }; |
duke@435 | 372 | |
duke@435 | 373 | |
duke@435 | 374 | inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() { |
duke@435 | 375 | if (_sender_decode_offset == DebugInformationRecorder::serialized_null) { |
duke@435 | 376 | return false; |
duke@435 | 377 | } |
duke@435 | 378 | fill_from_compiled_frame(_sender_decode_offset); |
duke@435 | 379 | return true; |
duke@435 | 380 | } |
duke@435 | 381 | |
duke@435 | 382 | |
duke@435 | 383 | inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) { |
duke@435 | 384 | _mode = compiled_mode; |
duke@435 | 385 | |
duke@435 | 386 | // Range check to detect ridiculous offsets. |
duke@435 | 387 | if (decode_offset == DebugInformationRecorder::serialized_null || |
duke@435 | 388 | decode_offset < 0 || |
duke@435 | 389 | decode_offset >= nm()->scopes_data_size()) { |
duke@435 | 390 | // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. |
duke@435 | 391 | // If we attempt to read nmethod::scopes_data at serialized_null (== 0), |
duke@435 | 392 | // or if we read some at other crazy offset, |
duke@435 | 393 | // we will decode garbage and make wild references into the heap, |
duke@435 | 394 | // leading to crashes in product mode. |
duke@435 | 395 | // (This isn't airtight, of course, since there are internal |
duke@435 | 396 | // offsets which are also crazy.) |
duke@435 | 397 | #ifdef ASSERT |
duke@435 | 398 | if (WizardMode) { |
duke@435 | 399 | tty->print_cr("Error in fill_from_frame: pc_desc for " |
duke@435 | 400 | INTPTR_FORMAT " not found or invalid at %d", |
duke@435 | 401 | _frame.pc(), decode_offset); |
duke@435 | 402 | nm()->print(); |
duke@435 | 403 | nm()->method()->print_codes(); |
duke@435 | 404 | nm()->print_code(); |
duke@435 | 405 | nm()->print_pcs(); |
duke@435 | 406 | } |
duke@435 | 407 | #endif |
duke@435 | 408 | // Provide a cheap fallback in product mode. (See comment above.) |
duke@435 | 409 | found_bad_method_frame(); |
duke@435 | 410 | fill_from_compiled_native_frame(); |
duke@435 | 411 | return; |
duke@435 | 412 | } |
duke@435 | 413 | |
duke@435 | 414 | // Decode first part of scopeDesc |
duke@435 | 415 | DebugInfoReadStream buffer(nm(), decode_offset); |
duke@435 | 416 | _sender_decode_offset = buffer.read_int(); |
coleenp@4037 | 417 | _method = buffer.read_method(); |
cfang@1366 | 418 | _bci = buffer.read_bci(); |
duke@435 | 419 | |
duke@435 | 420 | assert(_method->is_method(), "checking type of decoded method"); |
duke@435 | 421 | } |
duke@435 | 422 | |
duke@435 | 423 | // The native frames are handled specially. We do not rely on ScopeDesc info |
duke@435 | 424 | // since the pc might not be exact due to the _last_native_pc trick. |
duke@435 | 425 | inline void vframeStreamCommon::fill_from_compiled_native_frame() { |
duke@435 | 426 | _mode = compiled_mode; |
duke@435 | 427 | _sender_decode_offset = DebugInformationRecorder::serialized_null; |
duke@435 | 428 | _method = nm()->method(); |
duke@435 | 429 | _bci = 0; |
duke@435 | 430 | } |
duke@435 | 431 | |
duke@435 | 432 | inline bool vframeStreamCommon::fill_from_frame() { |
duke@435 | 433 | // Interpreted frame |
duke@435 | 434 | if (_frame.is_interpreted_frame()) { |
duke@435 | 435 | fill_from_interpreter_frame(); |
duke@435 | 436 | return true; |
duke@435 | 437 | } |
duke@435 | 438 | |
duke@435 | 439 | // Compiled frame |
duke@435 | 440 | |
duke@435 | 441 | if (cb() != NULL && cb()->is_nmethod()) { |
duke@435 | 442 | if (nm()->is_native_method()) { |
duke@435 | 443 | // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick. |
duke@435 | 444 | fill_from_compiled_native_frame(); |
duke@435 | 445 | } else { |
duke@435 | 446 | PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc()); |
duke@435 | 447 | int decode_offset; |
duke@435 | 448 | if (pc_desc == NULL) { |
duke@435 | 449 | // Should not happen, but let fill_from_compiled_frame handle it. |
sgoldman@542 | 450 | |
sgoldman@542 | 451 | // If we are trying to walk the stack of a thread that is not |
sgoldman@542 | 452 | // at a safepoint (like AsyncGetCallTrace would do) then this is an |
sgoldman@542 | 453 | // acceptable result. [ This is assuming that safe_for_sender |
sgoldman@542 | 454 | // is so bullet proof that we can trust the frames it produced. ] |
sgoldman@542 | 455 | // |
sgoldman@542 | 456 | // So if we see that the thread is not safepoint safe |
sgoldman@542 | 457 | // then simply produce the method and a bci of zero |
sgoldman@542 | 458 | // and skip the possibility of decoding any inlining that |
sgoldman@542 | 459 | // may be present. That is far better than simply stopping (or |
sgoldman@542 | 460 | // asserting. If however the thread is safepoint safe this |
sgoldman@542 | 461 | // is the sign of a compiler bug and we'll let |
sgoldman@542 | 462 | // fill_from_compiled_frame handle it. |
sgoldman@542 | 463 | |
sgoldman@542 | 464 | |
sgoldman@542 | 465 | JavaThreadState state = _thread->thread_state(); |
sgoldman@542 | 466 | |
sgoldman@542 | 467 | // in_Java should be good enough to test safepoint safety |
sgoldman@542 | 468 | // if state were say in_Java_trans then we'd expect that |
sgoldman@542 | 469 | // the pc would have already been slightly adjusted to |
sgoldman@542 | 470 | // one that would produce a pcDesc since the trans state |
sgoldman@542 | 471 | // would be one that might in fact anticipate a safepoint |
sgoldman@542 | 472 | |
sgoldman@542 | 473 | if (state == _thread_in_Java ) { |
sgoldman@542 | 474 | // This will get a method a zero bci and no inlining. |
sgoldman@542 | 475 | // Might be nice to have a unique bci to signify this |
sgoldman@542 | 476 | // particular case but for now zero will do. |
sgoldman@542 | 477 | |
sgoldman@542 | 478 | fill_from_compiled_native_frame(); |
sgoldman@542 | 479 | |
sgoldman@542 | 480 | // There is something to be said for setting the mode to |
sgoldman@542 | 481 | // at_end_mode to prevent trying to walk further up the |
sgoldman@542 | 482 | // stack. There is evidence that if we walk any further |
sgoldman@542 | 483 | // that we could produce a bad stack chain. However until |
sgoldman@542 | 484 | // we see evidence that allowing this causes us to find |
sgoldman@542 | 485 | // frames bad enough to cause segv's or assertion failures |
sgoldman@542 | 486 | // we don't do it as while we may get a bad call chain the |
sgoldman@542 | 487 | // probability is much higher (several magnitudes) that we |
sgoldman@542 | 488 | // get good data. |
sgoldman@542 | 489 | |
sgoldman@542 | 490 | return true; |
sgoldman@542 | 491 | } |
duke@435 | 492 | decode_offset = DebugInformationRecorder::serialized_null; |
duke@435 | 493 | } else { |
duke@435 | 494 | decode_offset = pc_desc->scope_decode_offset(); |
duke@435 | 495 | } |
duke@435 | 496 | fill_from_compiled_frame(decode_offset); |
duke@435 | 497 | } |
duke@435 | 498 | return true; |
duke@435 | 499 | } |
duke@435 | 500 | |
duke@435 | 501 | // End of stack? |
duke@435 | 502 | if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) { |
duke@435 | 503 | _mode = at_end_mode; |
duke@435 | 504 | return true; |
duke@435 | 505 | } |
duke@435 | 506 | |
duke@435 | 507 | return false; |
duke@435 | 508 | } |
duke@435 | 509 | |
duke@435 | 510 | |
duke@435 | 511 | inline void vframeStreamCommon::fill_from_interpreter_frame() { |
coleenp@4037 | 512 | Method* method = _frame.interpreter_frame_method(); |
duke@435 | 513 | intptr_t bcx = _frame.interpreter_frame_bcx(); |
duke@435 | 514 | int bci = method->validate_bci_from_bcx(bcx); |
duke@435 | 515 | // 6379830 AsyncGetCallTrace sometimes feeds us wild frames. |
duke@435 | 516 | if (bci < 0) { |
duke@435 | 517 | found_bad_method_frame(); |
duke@435 | 518 | bci = 0; // pretend it's on the point of entering |
duke@435 | 519 | } |
duke@435 | 520 | _mode = interpreted_mode; |
duke@435 | 521 | _method = method; |
duke@435 | 522 | _bci = bci; |
duke@435 | 523 | } |
stefank@2314 | 524 | |
stefank@2314 | 525 | #endif // SHARE_VM_RUNTIME_VFRAME_HPP |