src/share/vm/runtime/vframe.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_VFRAME_HPP
26 #define SHARE_VM_RUNTIME_VFRAME_HPP
27
28 #include "code/debugInfo.hpp"
29 #include "code/debugInfoRec.hpp"
30 #include "code/location.hpp"
31 #include "oops/oop.hpp"
32 #include "runtime/frame.hpp"
33 #include "runtime/frame.inline.hpp"
34 #include "runtime/stackValue.hpp"
35 #include "runtime/stackValueCollection.hpp"
36 #include "utilities/growableArray.hpp"
37
38 // vframes are virtual stack frames representing source level activations.
39 // A single frame may hold several source level activations in the case of
40 // optimized code. The debugging stored with the optimized code enables
41 // us to unfold a frame as a stack of vframes.
42 // A cVFrame represents an activation of a non-java method.
43
44 // The vframe inheritance hierarchy:
45 // - vframe
46 // - javaVFrame
47 // - interpretedVFrame
48 // - compiledVFrame ; (used for both compiled Java methods and native stubs)
49 // - externalVFrame
50 // - entryVFrame ; special frame created when calling Java from C
51
52 // - BasicLock
53
54 class vframe: public ResourceObj {
55 protected:
56 frame _fr; // Raw frame behind the virtual frame.
57 RegisterMap _reg_map; // Register map for the raw frame (used to handle callee-saved registers).
58 JavaThread* _thread; // The thread owning the raw frame.
59
60 vframe(const frame* fr, const RegisterMap* reg_map, JavaThread* thread);
61 vframe(const frame* fr, JavaThread* thread);
62 public:
63 // Factory method for creating vframes
64 static vframe* new_vframe(const frame* f, const RegisterMap *reg_map, JavaThread* thread);
65
66 // Accessors
67 frame fr() const { return _fr; }
68 CodeBlob* cb() const { return _fr.cb(); }
69 nmethod* nm() const {
70 assert( cb() != NULL && cb()->is_nmethod(), "usage");
71 return (nmethod*) cb();
72 }
73
74 // ???? Does this need to be a copy?
75 frame* frame_pointer() { return &_fr; }
76 const RegisterMap* register_map() const { return &_reg_map; }
77 JavaThread* thread() const { return _thread; }
78
79 // Returns the sender vframe
80 virtual vframe* sender() const;
81
82 // Returns the next javaVFrame on the stack (skipping all other kinds of frame)
83 javaVFrame *java_sender() const;
84
85 // Answers if the this is the top vframe in the frame, i.e., if the sender vframe
86 // is in the caller frame
87 virtual bool is_top() const { return true; }
88
89 // Returns top vframe within same frame (see is_top())
90 virtual vframe* top() const;
91
92 // Type testing operations
93 virtual bool is_entry_frame() const { return false; }
94 virtual bool is_java_frame() const { return false; }
95 virtual bool is_interpreted_frame() const { return false; }
96 virtual bool is_compiled_frame() const { return false; }
97
98 #ifndef PRODUCT
99 // printing operations
100 virtual void print_value() const;
101 virtual void print();
102 #endif
103 };
104
105
106 class javaVFrame: public vframe {
107 public:
108 // JVM state
109 virtual Method* method() const = 0;
110 virtual int bci() const = 0;
111 virtual StackValueCollection* locals() const = 0;
112 virtual StackValueCollection* expressions() const = 0;
113 // the order returned by monitors() is from oldest -> youngest#4418568
114 virtual GrowableArray<MonitorInfo*>* monitors() const = 0;
115
116 // Debugging support via JVMTI.
117 // NOTE that this is not guaranteed to give correct results for compiled vframes.
118 // Deoptimize first if necessary.
119 virtual void set_locals(StackValueCollection* values) const = 0;
120
121 // Test operation
122 bool is_java_frame() const { return true; }
123
124 protected:
125 javaVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {}
126 javaVFrame(const frame* fr, JavaThread* thread) : vframe(fr, thread) {}
127
128 public:
129 // casting
130 static javaVFrame* cast(vframe* vf) {
131 assert(vf == NULL || vf->is_java_frame(), "must be java frame");
132 return (javaVFrame*) vf;
133 }
134
135 // Return an array of monitors locked by this frame in the youngest to oldest order
136 GrowableArray<MonitorInfo*>* locked_monitors();
137
138 // printing used during stack dumps
139 void print_lock_info_on(outputStream* st, int frame_count);
140 void print_lock_info(int frame_count) { print_lock_info_on(tty, frame_count); }
141
142 #ifndef PRODUCT
143 public:
144 // printing operations
145 void print();
146 void print_value() const;
147 void print_activation(int index) const;
148
149 // verify operations
150 virtual void verify() const;
151
152 // Structural compare
153 bool structural_compare(javaVFrame* other);
154 #endif
155 friend class vframe;
156 };
157
158 class interpretedVFrame: public javaVFrame {
159 public:
160 // JVM state
161 Method* method() const;
162 int bci() const;
163 StackValueCollection* locals() const;
164 StackValueCollection* expressions() const;
165 GrowableArray<MonitorInfo*>* monitors() const;
166
167 void set_locals(StackValueCollection* values) const;
168
169 // Test operation
170 bool is_interpreted_frame() const { return true; }
171
172 protected:
173 interpretedVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : javaVFrame(fr, reg_map, thread) {};
174
175 public:
176 // Accessors for Byte Code Pointer
177 u_char* bcp() const;
178 void set_bcp(u_char* bcp);
179
180 // casting
181 static interpretedVFrame* cast(vframe* vf) {
182 assert(vf == NULL || vf->is_interpreted_frame(), "must be interpreted frame");
183 return (interpretedVFrame*) vf;
184 }
185
186 private:
187 static const int bcp_offset;
188 intptr_t* locals_addr_at(int offset) const;
189
190 // returns where the parameters starts relative to the frame pointer
191 int start_of_parameters() const;
192
193 #ifndef PRODUCT
194 public:
195 // verify operations
196 void verify() const;
197 #endif
198 friend class vframe;
199 };
200
201
202 class externalVFrame: public vframe {
203 protected:
204 externalVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread) : vframe(fr, reg_map, thread) {}
205
206 #ifndef PRODUCT
207 public:
208 // printing operations
209 void print_value() const;
210 void print();
211 #endif
212 friend class vframe;
213 };
214
215 class entryVFrame: public externalVFrame {
216 public:
217 bool is_entry_frame() const { return true; }
218
219 protected:
220 entryVFrame(const frame* fr, const RegisterMap* reg_map, JavaThread* thread);
221
222 public:
223 // casting
224 static entryVFrame* cast(vframe* vf) {
225 assert(vf == NULL || vf->is_entry_frame(), "must be entry frame");
226 return (entryVFrame*) vf;
227 }
228
229 #ifndef PRODUCT
230 public:
231 // printing
232 void print_value() const;
233 void print();
234 #endif
235 friend class vframe;
236 };
237
238
239 // A MonitorInfo is a ResourceObject that describes a the pair:
240 // 1) the owner of the monitor
241 // 2) the monitor lock
242 class MonitorInfo : public ResourceObj {
243 private:
244 oop _owner; // the object owning the monitor
245 BasicLock* _lock;
246 oop _owner_klass; // klass (mirror) if owner was scalar replaced
247 bool _eliminated;
248 bool _owner_is_scalar_replaced;
249 public:
250 // Constructor
251 MonitorInfo(oop owner, BasicLock* lock, bool eliminated, bool owner_is_scalar_replaced) {
252 if (!owner_is_scalar_replaced) {
253 _owner = owner;
254 _owner_klass = NULL;
255 } else {
256 assert(eliminated, "monitor should be eliminated for scalar replaced object");
257 _owner = NULL;
258 _owner_klass = owner;
259 }
260 _lock = lock;
261 _eliminated = eliminated;
262 _owner_is_scalar_replaced = owner_is_scalar_replaced;
263 }
264 // Accessors
265 oop owner() const {
266 assert(!_owner_is_scalar_replaced, "should not be called for scalar replaced object");
267 return _owner;
268 }
269 oop owner_klass() const {
270 assert(_owner_is_scalar_replaced, "should not be called for not scalar replaced object");
271 return _owner_klass;
272 }
273 BasicLock* lock() const { return _lock; }
274 bool eliminated() const { return _eliminated; }
275 bool owner_is_scalar_replaced() const { return _owner_is_scalar_replaced; }
276 };
277
278 class vframeStreamCommon : StackObj {
279 protected:
280 // common
281 frame _frame;
282 JavaThread* _thread;
283 RegisterMap _reg_map;
284 enum { interpreted_mode, compiled_mode, at_end_mode } _mode;
285
286 int _sender_decode_offset;
287
288 // Cached information
289 Method* _method;
290 int _bci;
291
292 // Should VM activations be ignored or not
293 bool _stop_at_java_call_stub;
294
295 bool fill_in_compiled_inlined_sender();
296 void fill_from_compiled_frame(int decode_offset);
297 void fill_from_compiled_native_frame();
298
299 void found_bad_method_frame();
300
301 void fill_from_interpreter_frame();
302 bool fill_from_frame();
303
304 // Helper routine for security_get_caller_frame
305 void skip_prefixed_method_and_wrappers();
306
307 public:
308 // Constructor
309 vframeStreamCommon(JavaThread* thread) : _reg_map(thread, false) {
310 _thread = thread;
311 }
312
313 // Accessors
314 Method* method() const { return _method; }
315 int bci() const { return _bci; }
316 intptr_t* frame_id() const { return _frame.id(); }
317 address frame_pc() const { return _frame.pc(); }
318
319 CodeBlob* cb() const { return _frame.cb(); }
320 nmethod* nm() const {
321 assert( cb() != NULL && cb()->is_nmethod(), "usage");
322 return (nmethod*) cb();
323 }
324
325 // Frame type
326 bool is_interpreted_frame() const { return _frame.is_interpreted_frame(); }
327 bool is_entry_frame() const { return _frame.is_entry_frame(); }
328
329 // Iteration
330 void next() {
331 // handle frames with inlining
332 if (_mode == compiled_mode && fill_in_compiled_inlined_sender()) return;
333
334 // handle general case
335 do {
336 _frame = _frame.sender(&_reg_map);
337 } while (!fill_from_frame());
338 }
339 void security_next();
340
341 bool at_end() const { return _mode == at_end_mode; }
342
343 // Implements security traversal. Skips depth no. of frame including
344 // special security frames and prefixed native methods
345 void security_get_caller_frame(int depth);
346
347 // Helper routine for JVM_LatestUserDefinedLoader -- needed for 1.4
348 // reflection implementation
349 void skip_reflection_related_frames();
350 };
351
352 class vframeStream : public vframeStreamCommon {
353 public:
354 // Constructors
355 vframeStream(JavaThread* thread, bool stop_at_java_call_stub = false)
356 : vframeStreamCommon(thread) {
357 _stop_at_java_call_stub = stop_at_java_call_stub;
358
359 if (!thread->has_last_Java_frame()) {
360 _mode = at_end_mode;
361 return;
362 }
363
364 _frame = _thread->last_frame();
365 while (!fill_from_frame()) {
366 _frame = _frame.sender(&_reg_map);
367 }
368 }
369
370 // top_frame may not be at safepoint, start with sender
371 vframeStream(JavaThread* thread, frame top_frame, bool stop_at_java_call_stub = false);
372 };
373
374
375 inline bool vframeStreamCommon::fill_in_compiled_inlined_sender() {
376 if (_sender_decode_offset == DebugInformationRecorder::serialized_null) {
377 return false;
378 }
379 fill_from_compiled_frame(_sender_decode_offset);
380 return true;
381 }
382
383
384 inline void vframeStreamCommon::fill_from_compiled_frame(int decode_offset) {
385 _mode = compiled_mode;
386
387 // Range check to detect ridiculous offsets.
388 if (decode_offset == DebugInformationRecorder::serialized_null ||
389 decode_offset < 0 ||
390 decode_offset >= nm()->scopes_data_size()) {
391 // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
392 // If we attempt to read nmethod::scopes_data at serialized_null (== 0),
393 // or if we read some at other crazy offset,
394 // we will decode garbage and make wild references into the heap,
395 // leading to crashes in product mode.
396 // (This isn't airtight, of course, since there are internal
397 // offsets which are also crazy.)
398 #ifdef ASSERT
399 if (WizardMode) {
400 tty->print_cr("Error in fill_from_frame: pc_desc for "
401 INTPTR_FORMAT " not found or invalid at %d",
402 p2i(_frame.pc()), decode_offset);
403 nm()->print();
404 nm()->method()->print_codes();
405 nm()->print_code();
406 nm()->print_pcs();
407 }
408 #endif
409 // Provide a cheap fallback in product mode. (See comment above.)
410 found_bad_method_frame();
411 fill_from_compiled_native_frame();
412 return;
413 }
414
415 // Decode first part of scopeDesc
416 DebugInfoReadStream buffer(nm(), decode_offset);
417 _sender_decode_offset = buffer.read_int();
418 _method = buffer.read_method();
419 _bci = buffer.read_bci();
420
421 assert(_method->is_method(), "checking type of decoded method");
422 }
423
424 // The native frames are handled specially. We do not rely on ScopeDesc info
425 // since the pc might not be exact due to the _last_native_pc trick.
426 inline void vframeStreamCommon::fill_from_compiled_native_frame() {
427 _mode = compiled_mode;
428 _sender_decode_offset = DebugInformationRecorder::serialized_null;
429 _method = nm()->method();
430 _bci = 0;
431 }
432
433 inline bool vframeStreamCommon::fill_from_frame() {
434 // Interpreted frame
435 if (_frame.is_interpreted_frame()) {
436 fill_from_interpreter_frame();
437 return true;
438 }
439
440 // Compiled frame
441
442 if (cb() != NULL && cb()->is_nmethod()) {
443 if (nm()->is_native_method()) {
444 // Do not rely on scopeDesc since the pc might be unprecise due to the _last_native_pc trick.
445 fill_from_compiled_native_frame();
446 } else {
447 PcDesc* pc_desc = nm()->pc_desc_at(_frame.pc());
448 int decode_offset;
449 if (pc_desc == NULL) {
450 // Should not happen, but let fill_from_compiled_frame handle it.
451
452 // If we are trying to walk the stack of a thread that is not
453 // at a safepoint (like AsyncGetCallTrace would do) then this is an
454 // acceptable result. [ This is assuming that safe_for_sender
455 // is so bullet proof that we can trust the frames it produced. ]
456 //
457 // So if we see that the thread is not safepoint safe
458 // then simply produce the method and a bci of zero
459 // and skip the possibility of decoding any inlining that
460 // may be present. That is far better than simply stopping (or
461 // asserting. If however the thread is safepoint safe this
462 // is the sign of a compiler bug and we'll let
463 // fill_from_compiled_frame handle it.
464
465
466 JavaThreadState state = _thread->thread_state();
467
468 // in_Java should be good enough to test safepoint safety
469 // if state were say in_Java_trans then we'd expect that
470 // the pc would have already been slightly adjusted to
471 // one that would produce a pcDesc since the trans state
472 // would be one that might in fact anticipate a safepoint
473
474 if (state == _thread_in_Java ) {
475 // This will get a method a zero bci and no inlining.
476 // Might be nice to have a unique bci to signify this
477 // particular case but for now zero will do.
478
479 fill_from_compiled_native_frame();
480
481 // There is something to be said for setting the mode to
482 // at_end_mode to prevent trying to walk further up the
483 // stack. There is evidence that if we walk any further
484 // that we could produce a bad stack chain. However until
485 // we see evidence that allowing this causes us to find
486 // frames bad enough to cause segv's or assertion failures
487 // we don't do it as while we may get a bad call chain the
488 // probability is much higher (several magnitudes) that we
489 // get good data.
490
491 return true;
492 }
493 decode_offset = DebugInformationRecorder::serialized_null;
494 } else {
495 decode_offset = pc_desc->scope_decode_offset();
496 }
497 fill_from_compiled_frame(decode_offset);
498 }
499 return true;
500 }
501
502 // End of stack?
503 if (_frame.is_first_frame() || (_stop_at_java_call_stub && _frame.is_entry_frame())) {
504 _mode = at_end_mode;
505 return true;
506 }
507
508 return false;
509 }
510
511
512 inline void vframeStreamCommon::fill_from_interpreter_frame() {
513 Method* method = _frame.interpreter_frame_method();
514 intptr_t bcx = _frame.interpreter_frame_bcx();
515 int bci = method->validate_bci_from_bcx(bcx);
516 // 6379830 AsyncGetCallTrace sometimes feeds us wild frames.
517 if (bci < 0) {
518 found_bad_method_frame();
519 bci = 0; // pretend it's on the point of entering
520 }
521 _mode = interpreted_mode;
522 _method = method;
523 _bci = bci;
524 }
525
526 #endif // SHARE_VM_RUNTIME_VFRAME_HPP

mercurial