Fri, 21 Jun 2013 10:57:00 -0400
8017177: more explicit code location information in hs_err crash log
Summary: Add code pc location for compiled code
Reviewed-by: kvn, coleenp
Contributed-by: doug.simon@oracle.com
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "compiler/disassembler.hpp"
27 #include "gc_interface/collectedHeap.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "interpreter/oopMapCache.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "memory/universe.inline.hpp"
32 #include "oops/markOop.hpp"
33 #include "oops/methodData.hpp"
34 #include "oops/method.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "oops/oop.inline2.hpp"
37 #include "prims/methodHandles.hpp"
38 #include "runtime/frame.inline.hpp"
39 #include "runtime/handles.inline.hpp"
40 #include "runtime/javaCalls.hpp"
41 #include "runtime/monitorChunk.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/signature.hpp"
44 #include "runtime/stubCodeGenerator.hpp"
45 #include "runtime/stubRoutines.hpp"
46 #include "utilities/decoder.hpp"
48 #ifdef TARGET_ARCH_x86
49 # include "nativeInst_x86.hpp"
50 #endif
51 #ifdef TARGET_ARCH_sparc
52 # include "nativeInst_sparc.hpp"
53 #endif
54 #ifdef TARGET_ARCH_zero
55 # include "nativeInst_zero.hpp"
56 #endif
57 #ifdef TARGET_ARCH_arm
58 # include "nativeInst_arm.hpp"
59 #endif
60 #ifdef TARGET_ARCH_ppc
61 # include "nativeInst_ppc.hpp"
62 #endif
64 RegisterMap::RegisterMap(JavaThread *thread, bool update_map) {
65 _thread = thread;
66 _update_map = update_map;
67 clear();
68 debug_only(_update_for_id = NULL;)
69 #ifndef PRODUCT
70 for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL;
71 #endif /* PRODUCT */
72 }
74 RegisterMap::RegisterMap(const RegisterMap* map) {
75 assert(map != this, "bad initialization parameter");
76 assert(map != NULL, "RegisterMap must be present");
77 _thread = map->thread();
78 _update_map = map->update_map();
79 _include_argument_oops = map->include_argument_oops();
80 debug_only(_update_for_id = map->_update_for_id;)
81 pd_initialize_from(map);
82 if (update_map()) {
83 for(int i = 0; i < location_valid_size; i++) {
84 LocationValidType bits = !update_map() ? 0 : map->_location_valid[i];
85 _location_valid[i] = bits;
86 // for whichever bits are set, pull in the corresponding map->_location
87 int j = i*location_valid_type_size;
88 while (bits != 0) {
89 if ((bits & 1) != 0) {
90 assert(0 <= j && j < reg_count, "range check");
91 _location[j] = map->_location[j];
92 }
93 bits >>= 1;
94 j += 1;
95 }
96 }
97 }
98 }
100 void RegisterMap::clear() {
101 set_include_argument_oops(true);
102 if (_update_map) {
103 for(int i = 0; i < location_valid_size; i++) {
104 _location_valid[i] = 0;
105 }
106 pd_clear();
107 } else {
108 pd_initialize();
109 }
110 }
112 #ifndef PRODUCT
114 void RegisterMap::print_on(outputStream* st) const {
115 st->print_cr("Register map");
116 for(int i = 0; i < reg_count; i++) {
118 VMReg r = VMRegImpl::as_VMReg(i);
119 intptr_t* src = (intptr_t*) location(r);
120 if (src != NULL) {
122 r->print_on(st);
123 st->print(" [" INTPTR_FORMAT "] = ", src);
124 if (((uintptr_t)src & (sizeof(*src)-1)) != 0) {
125 st->print_cr("<misaligned>");
126 } else {
127 st->print_cr(INTPTR_FORMAT, *src);
128 }
129 }
130 }
131 }
133 void RegisterMap::print() const {
134 print_on(tty);
135 }
137 #endif
138 // This returns the pc that if you were in the debugger you'd see. Not
139 // the idealized value in the frame object. This undoes the magic conversion
140 // that happens for deoptimized frames. In addition it makes the value the
141 // hardware would want to see in the native frame. The only user (at this point)
142 // is deoptimization. It likely no one else should ever use it.
144 address frame::raw_pc() const {
145 if (is_deoptimized_frame()) {
146 nmethod* nm = cb()->as_nmethod_or_null();
147 if (nm->is_method_handle_return(pc()))
148 return nm->deopt_mh_handler_begin() - pc_return_offset;
149 else
150 return nm->deopt_handler_begin() - pc_return_offset;
151 } else {
152 return (pc() - pc_return_offset);
153 }
154 }
156 // Change the pc in a frame object. This does not change the actual pc in
157 // actual frame. To do that use patch_pc.
158 //
159 void frame::set_pc(address newpc ) {
160 #ifdef ASSERT
161 if (_cb != NULL && _cb->is_nmethod()) {
162 assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation");
163 }
164 #endif // ASSERT
166 // Unsafe to use the is_deoptimzed tester after changing pc
167 _deopt_state = unknown;
168 _pc = newpc;
169 _cb = CodeCache::find_blob_unsafe(_pc);
171 }
173 // type testers
174 bool frame::is_ignored_frame() const {
175 return false; // FIXME: some LambdaForm frames should be ignored
176 }
177 bool frame::is_deoptimized_frame() const {
178 assert(_deopt_state != unknown, "not answerable");
179 return _deopt_state == is_deoptimized;
180 }
182 bool frame::is_native_frame() const {
183 return (_cb != NULL &&
184 _cb->is_nmethod() &&
185 ((nmethod*)_cb)->is_native_method());
186 }
188 bool frame::is_java_frame() const {
189 if (is_interpreted_frame()) return true;
190 if (is_compiled_frame()) return true;
191 return false;
192 }
195 bool frame::is_compiled_frame() const {
196 if (_cb != NULL &&
197 _cb->is_nmethod() &&
198 ((nmethod*)_cb)->is_java_method()) {
199 return true;
200 }
201 return false;
202 }
205 bool frame::is_runtime_frame() const {
206 return (_cb != NULL && _cb->is_runtime_stub());
207 }
209 bool frame::is_safepoint_blob_frame() const {
210 return (_cb != NULL && _cb->is_safepoint_stub());
211 }
213 // testers
215 bool frame::is_first_java_frame() const {
216 RegisterMap map(JavaThread::current(), false); // No update
217 frame s;
218 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map));
219 return s.is_first_frame();
220 }
223 bool frame::entry_frame_is_first() const {
224 return entry_frame_call_wrapper()->anchor()->last_Java_sp() == NULL;
225 }
228 bool frame::should_be_deoptimized() const {
229 if (_deopt_state == is_deoptimized ||
230 !is_compiled_frame() ) return false;
231 assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod");
232 nmethod* nm = (nmethod *)_cb;
233 if (TraceDependencies) {
234 tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false");
235 nm->print_value_on(tty);
236 tty->cr();
237 }
239 if( !nm->is_marked_for_deoptimization() )
240 return false;
242 // If at the return point, then the frame has already been popped, and
243 // only the return needs to be executed. Don't deoptimize here.
244 return !nm->is_at_poll_return(pc());
245 }
247 bool frame::can_be_deoptimized() const {
248 if (!is_compiled_frame()) return false;
249 nmethod* nm = (nmethod*)_cb;
251 if( !nm->can_be_deoptimized() )
252 return false;
254 return !nm->is_at_poll_return(pc());
255 }
257 void frame::deoptimize(JavaThread* thread) {
258 // Schedule deoptimization of an nmethod activation with this frame.
259 assert(_cb != NULL && _cb->is_nmethod(), "must be");
260 nmethod* nm = (nmethod*)_cb;
262 // This is a fix for register window patching race
263 if (NeedsDeoptSuspend && Thread::current() != thread) {
264 assert(SafepointSynchronize::is_at_safepoint(),
265 "patching other threads for deopt may only occur at a safepoint");
267 // It is possible especially with DeoptimizeALot/DeoptimizeRandom that
268 // we could see the frame again and ask for it to be deoptimized since
269 // it might move for a long time. That is harmless and we just ignore it.
270 if (id() == thread->must_deopt_id()) {
271 assert(thread->is_deopt_suspend(), "lost suspension");
272 return;
273 }
275 // We are at a safepoint so the target thread can only be
276 // in 4 states:
277 // blocked - no problem
278 // blocked_trans - no problem (i.e. could have woken up from blocked
279 // during a safepoint).
280 // native - register window pc patching race
281 // native_trans - momentary state
282 //
283 // We could just wait out a thread in native_trans to block.
284 // Then we'd have all the issues that the safepoint code has as to
285 // whether to spin or block. It isn't worth it. Just treat it like
286 // native and be done with it.
287 //
288 // Examine the state of the thread at the start of safepoint since
289 // threads that were in native at the start of the safepoint could
290 // come to a halt during the safepoint, changing the current value
291 // of the safepoint_state.
292 JavaThreadState state = thread->safepoint_state()->orig_thread_state();
293 if (state == _thread_in_native || state == _thread_in_native_trans) {
294 // Since we are at a safepoint the target thread will stop itself
295 // before it can return to java as long as we remain at the safepoint.
296 // Therefore we can put an additional request for the thread to stop
297 // no matter what no (like a suspend). This will cause the thread
298 // to notice it needs to do the deopt on its own once it leaves native.
299 //
300 // The only reason we must do this is because on machine with register
301 // windows we have a race with patching the return address and the
302 // window coming live as the thread returns to the Java code (but still
303 // in native mode) and then blocks. It is only this top most frame
304 // that is at risk. So in truth we could add an additional check to
305 // see if this frame is one that is at risk.
306 RegisterMap map(thread, false);
307 frame at_risk = thread->last_frame().sender(&map);
308 if (id() == at_risk.id()) {
309 thread->set_must_deopt_id(id());
310 thread->set_deopt_suspend();
311 return;
312 }
313 }
314 } // NeedsDeoptSuspend
317 // If the call site is a MethodHandle call site use the MH deopt
318 // handler.
319 address deopt = nm->is_method_handle_return(pc()) ?
320 nm->deopt_mh_handler_begin() :
321 nm->deopt_handler_begin();
323 // Save the original pc before we patch in the new one
324 nm->set_original_pc(this, pc());
325 patch_pc(thread, deopt);
327 #ifdef ASSERT
328 {
329 RegisterMap map(thread, false);
330 frame check = thread->last_frame();
331 while (id() != check.id()) {
332 check = check.sender(&map);
333 }
334 assert(check.is_deoptimized_frame(), "missed deopt");
335 }
336 #endif // ASSERT
337 }
339 frame frame::java_sender() const {
340 RegisterMap map(JavaThread::current(), false);
341 frame s;
342 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)) ;
343 guarantee(s.is_java_frame(), "tried to get caller of first java frame");
344 return s;
345 }
347 frame frame::real_sender(RegisterMap* map) const {
348 frame result = sender(map);
349 while (result.is_runtime_frame() ||
350 result.is_ignored_frame()) {
351 result = result.sender(map);
352 }
353 return result;
354 }
356 // Note: called by profiler - NOT for current thread
357 frame frame::profile_find_Java_sender_frame(JavaThread *thread) {
358 // If we don't recognize this frame, walk back up the stack until we do
359 RegisterMap map(thread, false);
360 frame first_java_frame = frame();
362 // Find the first Java frame on the stack starting with input frame
363 if (is_java_frame()) {
364 // top frame is compiled frame or deoptimized frame
365 first_java_frame = *this;
366 } else if (safe_for_sender(thread)) {
367 for (frame sender_frame = sender(&map);
368 sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame();
369 sender_frame = sender_frame.sender(&map)) {
370 if (sender_frame.is_java_frame()) {
371 first_java_frame = sender_frame;
372 break;
373 }
374 }
375 }
376 return first_java_frame;
377 }
379 // Interpreter frames
382 void frame::interpreter_frame_set_locals(intptr_t* locs) {
383 assert(is_interpreted_frame(), "Not an interpreted frame");
384 *interpreter_frame_locals_addr() = locs;
385 }
387 Method* frame::interpreter_frame_method() const {
388 assert(is_interpreted_frame(), "interpreted frame expected");
389 Method* m = *interpreter_frame_method_addr();
390 assert(m->is_metadata(), "bad Method* in interpreter frame");
391 assert(m->is_method(), "not a Method*");
392 return m;
393 }
395 void frame::interpreter_frame_set_method(Method* method) {
396 assert(is_interpreted_frame(), "interpreted frame expected");
397 *interpreter_frame_method_addr() = method;
398 }
400 void frame::interpreter_frame_set_bcx(intptr_t bcx) {
401 assert(is_interpreted_frame(), "Not an interpreted frame");
402 if (ProfileInterpreter) {
403 bool formerly_bci = is_bci(interpreter_frame_bcx());
404 bool is_now_bci = is_bci(bcx);
405 *interpreter_frame_bcx_addr() = bcx;
407 intptr_t mdx = interpreter_frame_mdx();
409 if (mdx != 0) {
410 if (formerly_bci) {
411 if (!is_now_bci) {
412 // The bcx was just converted from bci to bcp.
413 // Convert the mdx in parallel.
414 MethodData* mdo = interpreter_frame_method()->method_data();
415 assert(mdo != NULL, "");
416 int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one.
417 address mdp = mdo->di_to_dp(mdi);
418 interpreter_frame_set_mdx((intptr_t)mdp);
419 }
420 } else {
421 if (is_now_bci) {
422 // The bcx was just converted from bcp to bci.
423 // Convert the mdx in parallel.
424 MethodData* mdo = interpreter_frame_method()->method_data();
425 assert(mdo != NULL, "");
426 int mdi = mdo->dp_to_di((address)mdx);
427 interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0.
428 }
429 }
430 }
431 } else {
432 *interpreter_frame_bcx_addr() = bcx;
433 }
434 }
436 jint frame::interpreter_frame_bci() const {
437 assert(is_interpreted_frame(), "interpreted frame expected");
438 intptr_t bcx = interpreter_frame_bcx();
439 return is_bci(bcx) ? bcx : interpreter_frame_method()->bci_from((address)bcx);
440 }
442 void frame::interpreter_frame_set_bci(jint bci) {
443 assert(is_interpreted_frame(), "interpreted frame expected");
444 assert(!is_bci(interpreter_frame_bcx()), "should not set bci during GC");
445 interpreter_frame_set_bcx((intptr_t)interpreter_frame_method()->bcp_from(bci));
446 }
448 address frame::interpreter_frame_bcp() const {
449 assert(is_interpreted_frame(), "interpreted frame expected");
450 intptr_t bcx = interpreter_frame_bcx();
451 return is_bci(bcx) ? interpreter_frame_method()->bcp_from(bcx) : (address)bcx;
452 }
454 void frame::interpreter_frame_set_bcp(address bcp) {
455 assert(is_interpreted_frame(), "interpreted frame expected");
456 assert(!is_bci(interpreter_frame_bcx()), "should not set bcp during GC");
457 interpreter_frame_set_bcx((intptr_t)bcp);
458 }
460 void frame::interpreter_frame_set_mdx(intptr_t mdx) {
461 assert(is_interpreted_frame(), "Not an interpreted frame");
462 assert(ProfileInterpreter, "must be profiling interpreter");
463 *interpreter_frame_mdx_addr() = mdx;
464 }
466 address frame::interpreter_frame_mdp() const {
467 assert(ProfileInterpreter, "must be profiling interpreter");
468 assert(is_interpreted_frame(), "interpreted frame expected");
469 intptr_t bcx = interpreter_frame_bcx();
470 intptr_t mdx = interpreter_frame_mdx();
472 assert(!is_bci(bcx), "should not access mdp during GC");
473 return (address)mdx;
474 }
476 void frame::interpreter_frame_set_mdp(address mdp) {
477 assert(is_interpreted_frame(), "interpreted frame expected");
478 if (mdp == NULL) {
479 // Always allow the mdp to be cleared.
480 interpreter_frame_set_mdx((intptr_t)mdp);
481 }
482 intptr_t bcx = interpreter_frame_bcx();
483 assert(!is_bci(bcx), "should not set mdp during GC");
484 interpreter_frame_set_mdx((intptr_t)mdp);
485 }
487 BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const {
488 assert(is_interpreted_frame(), "Not an interpreted frame");
489 #ifdef ASSERT
490 interpreter_frame_verify_monitor(current);
491 #endif
492 BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size());
493 return next;
494 }
496 BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const {
497 assert(is_interpreted_frame(), "Not an interpreted frame");
498 #ifdef ASSERT
499 // // This verification needs to be checked before being enabled
500 // interpreter_frame_verify_monitor(current);
501 #endif
502 BasicObjectLock* previous = (BasicObjectLock*) (((intptr_t*) current) - interpreter_frame_monitor_size());
503 return previous;
504 }
506 // Interpreter locals and expression stack locations.
508 intptr_t* frame::interpreter_frame_local_at(int index) const {
509 const int n = Interpreter::local_offset_in_bytes(index)/wordSize;
510 return &((*interpreter_frame_locals_addr())[n]);
511 }
513 intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const {
514 const int i = offset * interpreter_frame_expression_stack_direction();
515 const int n = i * Interpreter::stackElementWords;
516 return &(interpreter_frame_expression_stack()[n]);
517 }
519 jint frame::interpreter_frame_expression_stack_size() const {
520 // Number of elements on the interpreter expression stack
521 // Callers should span by stackElementWords
522 int element_size = Interpreter::stackElementWords;
523 if (frame::interpreter_frame_expression_stack_direction() < 0) {
524 return (interpreter_frame_expression_stack() -
525 interpreter_frame_tos_address() + 1)/element_size;
526 } else {
527 return (interpreter_frame_tos_address() -
528 interpreter_frame_expression_stack() + 1)/element_size;
529 }
530 }
533 // (frame::interpreter_frame_sender_sp accessor is in frame_<arch>.cpp)
535 const char* frame::print_name() const {
536 if (is_native_frame()) return "Native";
537 if (is_interpreted_frame()) return "Interpreted";
538 if (is_compiled_frame()) {
539 if (is_deoptimized_frame()) return "Deoptimized";
540 return "Compiled";
541 }
542 if (sp() == NULL) return "Empty";
543 return "C";
544 }
546 void frame::print_value_on(outputStream* st, JavaThread *thread) const {
547 NOT_PRODUCT(address begin = pc()-40;)
548 NOT_PRODUCT(address end = NULL;)
550 st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp());
551 if (sp() != NULL)
552 st->print(", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), pc());
554 if (StubRoutines::contains(pc())) {
555 st->print_cr(")");
556 st->print("(");
557 StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
558 st->print("~Stub::%s", desc->name());
559 NOT_PRODUCT(begin = desc->begin(); end = desc->end();)
560 } else if (Interpreter::contains(pc())) {
561 st->print_cr(")");
562 st->print("(");
563 InterpreterCodelet* desc = Interpreter::codelet_containing(pc());
564 if (desc != NULL) {
565 st->print("~");
566 desc->print_on(st);
567 NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();)
568 } else {
569 st->print("~interpreter");
570 }
571 }
572 st->print_cr(")");
574 if (_cb != NULL) {
575 st->print(" ");
576 _cb->print_value_on(st);
577 st->cr();
578 #ifndef PRODUCT
579 if (end == NULL) {
580 begin = _cb->code_begin();
581 end = _cb->code_end();
582 }
583 #endif
584 }
585 NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);)
586 }
589 void frame::print_on(outputStream* st) const {
590 print_value_on(st,NULL);
591 if (is_interpreted_frame()) {
592 interpreter_frame_print_on(st);
593 }
594 }
597 void frame::interpreter_frame_print_on(outputStream* st) const {
598 #ifndef PRODUCT
599 assert(is_interpreted_frame(), "Not an interpreted frame");
600 jint i;
601 for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) {
602 intptr_t x = *interpreter_frame_local_at(i);
603 st->print(" - local [" INTPTR_FORMAT "]", x);
604 st->fill_to(23);
605 st->print_cr("; #%d", i);
606 }
607 for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) {
608 intptr_t x = *interpreter_frame_expression_stack_at(i);
609 st->print(" - stack [" INTPTR_FORMAT "]", x);
610 st->fill_to(23);
611 st->print_cr("; #%d", i);
612 }
613 // locks for synchronization
614 for (BasicObjectLock* current = interpreter_frame_monitor_end();
615 current < interpreter_frame_monitor_begin();
616 current = next_monitor_in_interpreter_frame(current)) {
617 st->print(" - obj [");
618 current->obj()->print_value_on(st);
619 st->print_cr("]");
620 st->print(" - lock [");
621 current->lock()->print_on(st);
622 st->print_cr("]");
623 }
624 // monitor
625 st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin());
626 // bcp
627 st->print(" - bcp [" INTPTR_FORMAT "]", interpreter_frame_bcp());
628 st->fill_to(23);
629 st->print_cr("; @%d", interpreter_frame_bci());
630 // locals
631 st->print_cr(" - locals [" INTPTR_FORMAT "]", interpreter_frame_local_at(0));
632 // method
633 st->print(" - method [" INTPTR_FORMAT "]", (address)interpreter_frame_method());
634 st->fill_to(23);
635 st->print("; ");
636 interpreter_frame_method()->print_name(st);
637 st->cr();
638 #endif
639 }
641 // Return whether the frame is in the VM or os indicating a Hotspot problem.
642 // Otherwise, it's likely a bug in the native library that the Java code calls,
643 // hopefully indicating where to submit bugs.
644 static void print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
645 // C/C++ frame
646 bool in_vm = os::address_is_in_vm(pc);
647 st->print(in_vm ? "V" : "C");
649 int offset;
650 bool found;
652 // libname
653 found = os::dll_address_to_library_name(pc, buf, buflen, &offset);
654 if (found) {
655 // skip directory names
656 const char *p1, *p2;
657 p1 = buf;
658 int len = (int)strlen(os::file_separator());
659 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
660 st->print(" [%s+0x%x]", p1, offset);
661 } else {
662 st->print(" " PTR_FORMAT, pc);
663 }
665 // function name - os::dll_address_to_function_name() may return confusing
666 // names if pc is within jvm.dll or libjvm.so, because JVM only has
667 // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this
668 // only for native libraries.
669 if (!in_vm || Decoder::can_decode_C_frame_in_vm()) {
670 found = os::dll_address_to_function_name(pc, buf, buflen, &offset);
672 if (found) {
673 st->print(" %s+0x%x", buf, offset);
674 }
675 }
676 }
678 // frame::print_on_error() is called by fatal error handler. Notice that we may
679 // crash inside this function if stack frame is corrupted. The fatal error
680 // handler can catch and handle the crash. Here we assume the frame is valid.
681 //
682 // First letter indicates type of the frame:
683 // J: Java frame (compiled)
684 // j: Java frame (interpreted)
685 // V: VM frame (C/C++)
686 // v: Other frames running VM generated code (e.g. stubs, adapters, etc.)
687 // C: C/C++ frame
688 //
689 // We don't need detailed frame type as that in frame::print_name(). "C"
690 // suggests the problem is in user lib; everything else is likely a VM bug.
692 void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const {
693 if (_cb != NULL) {
694 if (Interpreter::contains(pc())) {
695 Method* m = this->interpreter_frame_method();
696 if (m != NULL) {
697 m->name_and_sig_as_C_string(buf, buflen);
698 st->print("j %s", buf);
699 st->print("+%d", this->interpreter_frame_bci());
700 } else {
701 st->print("j " PTR_FORMAT, pc());
702 }
703 } else if (StubRoutines::contains(pc())) {
704 StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
705 if (desc != NULL) {
706 st->print("v ~StubRoutines::%s", desc->name());
707 } else {
708 st->print("v ~StubRoutines::" PTR_FORMAT, pc());
709 }
710 } else if (_cb->is_buffer_blob()) {
711 st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
712 } else if (_cb->is_nmethod()) {
713 Method* m = ((nmethod *)_cb)->method();
714 if (m != NULL) {
715 m->name_and_sig_as_C_string(buf, buflen);
716 st->print("J %s @ " PTR_FORMAT " [" PTR_FORMAT "+" SIZE_FORMAT "]",
717 buf, _pc, _cb->code_begin(), _pc - _cb->code_begin());
718 } else {
719 st->print("J " PTR_FORMAT, pc());
720 }
721 } else if (_cb->is_runtime_stub()) {
722 st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name());
723 } else if (_cb->is_deoptimization_stub()) {
724 st->print("v ~DeoptimizationBlob");
725 } else if (_cb->is_exception_stub()) {
726 st->print("v ~ExceptionBlob");
727 } else if (_cb->is_safepoint_stub()) {
728 st->print("v ~SafepointBlob");
729 } else {
730 st->print("v blob " PTR_FORMAT, pc());
731 }
732 } else {
733 print_C_frame(st, buf, buflen, pc());
734 }
735 }
738 /*
739 The interpreter_frame_expression_stack_at method in the case of SPARC needs the
740 max_stack value of the method in order to compute the expression stack address.
741 It uses the Method* in order to get the max_stack value but during GC this
742 Method* value saved on the frame is changed by reverse_and_push and hence cannot
743 be used. So we save the max_stack value in the FrameClosure object and pass it
744 down to the interpreter_frame_expression_stack_at method
745 */
746 class InterpreterFrameClosure : public OffsetClosure {
747 private:
748 frame* _fr;
749 OopClosure* _f;
750 int _max_locals;
751 int _max_stack;
753 public:
754 InterpreterFrameClosure(frame* fr, int max_locals, int max_stack,
755 OopClosure* f) {
756 _fr = fr;
757 _max_locals = max_locals;
758 _max_stack = max_stack;
759 _f = f;
760 }
762 void offset_do(int offset) {
763 oop* addr;
764 if (offset < _max_locals) {
765 addr = (oop*) _fr->interpreter_frame_local_at(offset);
766 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
767 _f->do_oop(addr);
768 } else {
769 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
770 // In case of exceptions, the expression stack is invalid and the esp will be reset to express
771 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
772 bool in_stack;
773 if (frame::interpreter_frame_expression_stack_direction() > 0) {
774 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
775 } else {
776 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
777 }
778 if (in_stack) {
779 _f->do_oop(addr);
780 }
781 }
782 }
784 int max_locals() { return _max_locals; }
785 frame* fr() { return _fr; }
786 };
789 class InterpretedArgumentOopFinder: public SignatureInfo {
790 private:
791 OopClosure* _f; // Closure to invoke
792 int _offset; // TOS-relative offset, decremented with each argument
793 bool _has_receiver; // true if the callee has a receiver
794 frame* _fr;
796 void set(int size, BasicType type) {
797 _offset -= size;
798 if (type == T_OBJECT || type == T_ARRAY) oop_offset_do();
799 }
801 void oop_offset_do() {
802 oop* addr;
803 addr = (oop*)_fr->interpreter_frame_tos_at(_offset);
804 _f->do_oop(addr);
805 }
807 public:
808 InterpretedArgumentOopFinder(Symbol* signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) {
809 // compute size of arguments
810 int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
811 assert(!fr->is_interpreted_frame() ||
812 args_size <= fr->interpreter_frame_expression_stack_size(),
813 "args cannot be on stack anymore");
814 // initialize InterpretedArgumentOopFinder
815 _f = f;
816 _fr = fr;
817 _offset = args_size;
818 }
820 void oops_do() {
821 if (_has_receiver) {
822 --_offset;
823 oop_offset_do();
824 }
825 iterate_parameters();
826 }
827 };
830 // Entry frame has following form (n arguments)
831 // +-----------+
832 // sp -> | last arg |
833 // +-----------+
834 // : ::: :
835 // +-----------+
836 // (sp+n)->| first arg|
837 // +-----------+
841 // visits and GC's all the arguments in entry frame
842 class EntryFrameOopFinder: public SignatureInfo {
843 private:
844 bool _is_static;
845 int _offset;
846 frame* _fr;
847 OopClosure* _f;
849 void set(int size, BasicType type) {
850 assert (_offset >= 0, "illegal offset");
851 if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset);
852 _offset -= size;
853 }
855 void oop_at_offset_do(int offset) {
856 assert (offset >= 0, "illegal offset");
857 oop* addr = (oop*) _fr->entry_frame_argument_at(offset);
858 _f->do_oop(addr);
859 }
861 public:
862 EntryFrameOopFinder(frame* frame, Symbol* signature, bool is_static) : SignatureInfo(signature) {
863 _f = NULL; // will be set later
864 _fr = frame;
865 _is_static = is_static;
866 _offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0
867 }
869 void arguments_do(OopClosure* f) {
870 _f = f;
871 if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver
872 iterate_parameters();
873 }
875 };
877 oop* frame::interpreter_callee_receiver_addr(Symbol* signature) {
878 ArgumentSizeComputer asc(signature);
879 int size = asc.size();
880 return (oop *)interpreter_frame_tos_at(size);
881 }
884 void frame::oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f,
885 const RegisterMap* map, bool query_oop_map_cache) {
886 assert(is_interpreted_frame(), "Not an interpreted frame");
887 assert(map != NULL, "map must be set");
888 Thread *thread = Thread::current();
889 methodHandle m (thread, interpreter_frame_method());
890 jint bci = interpreter_frame_bci();
892 assert(!Universe::heap()->is_in(m()),
893 "must be valid oop");
894 assert(m->is_method(), "checking frame value");
895 assert((m->is_native() && bci == 0) ||
896 (!m->is_native() && bci >= 0 && bci < m->code_size()),
897 "invalid bci value");
899 // Handle the monitor elements in the activation
900 for (
901 BasicObjectLock* current = interpreter_frame_monitor_end();
902 current < interpreter_frame_monitor_begin();
903 current = next_monitor_in_interpreter_frame(current)
904 ) {
905 #ifdef ASSERT
906 interpreter_frame_verify_monitor(current);
907 #endif
908 current->oops_do(f);
909 }
911 // process fixed part
912 if (cld_f != NULL) {
913 // The method pointer in the frame might be the only path to the method's
914 // klass, and the klass needs to be kept alive while executing. The GCs
915 // don't trace through method pointers, so typically in similar situations
916 // the mirror or the class loader of the klass are installed as a GC root.
917 // To minimze the overhead of doing that here, we ask the GC to pass down a
918 // closure that knows how to keep klasses alive given a ClassLoaderData.
919 cld_f->do_cld(m->method_holder()->class_loader_data());
920 }
922 #if !defined(PPC) || defined(ZERO)
923 if (m->is_native()) {
924 #ifdef CC_INTERP
925 interpreterState istate = get_interpreterState();
926 f->do_oop((oop*)&istate->_oop_temp);
927 #else
928 f->do_oop((oop*)( fp() + interpreter_frame_oop_temp_offset ));
929 #endif /* CC_INTERP */
930 }
931 #else // PPC
932 if (m->is_native() && m->is_static()) {
933 f->do_oop(interpreter_frame_mirror_addr());
934 }
935 #endif // PPC
937 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
939 Symbol* signature = NULL;
940 bool has_receiver = false;
942 // Process a callee's arguments if we are at a call site
943 // (i.e., if we are at an invoke bytecode)
944 // This is used sometimes for calling into the VM, not for another
945 // interpreted or compiled frame.
946 if (!m->is_native()) {
947 Bytecode_invoke call = Bytecode_invoke_check(m, bci);
948 if (call.is_valid()) {
949 signature = call.signature();
950 has_receiver = call.has_receiver();
951 if (map->include_argument_oops() &&
952 interpreter_frame_expression_stack_size() > 0) {
953 ResourceMark rm(thread); // is this right ???
954 // we are at a call site & the expression stack is not empty
955 // => process callee's arguments
956 //
957 // Note: The expression stack can be empty if an exception
958 // occurred during method resolution/execution. In all
959 // cases we empty the expression stack completely be-
960 // fore handling the exception (the exception handling
961 // code in the interpreter calls a blocking runtime
962 // routine which can cause this code to be executed).
963 // (was bug gri 7/27/98)
964 oops_interpreted_arguments_do(signature, has_receiver, f);
965 }
966 }
967 }
969 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
971 // process locals & expression stack
972 InterpreterOopMap mask;
973 if (query_oop_map_cache) {
974 m->mask_for(bci, &mask);
975 } else {
976 OopMapCache::compute_one_oop_map(m, bci, &mask);
977 }
978 mask.iterate_oop(&blk);
979 }
982 void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) {
983 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
984 finder.oops_do();
985 }
987 void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) {
988 assert(_cb != NULL, "sanity check");
989 if (_cb->oop_maps() != NULL) {
990 OopMapSet::oops_do(this, reg_map, f);
992 // Preserve potential arguments for a callee. We handle this by dispatching
993 // on the codeblob. For c2i, we do
994 if (reg_map->include_argument_oops()) {
995 _cb->preserve_callee_argument_oops(*this, reg_map, f);
996 }
997 }
998 // In cases where perm gen is collected, GC will want to mark
999 // oops referenced from nmethods active on thread stacks so as to
1000 // prevent them from being collected. However, this visit should be
1001 // restricted to certain phases of the collection only. The
1002 // closure decides how it wants nmethods to be traced.
1003 if (cf != NULL)
1004 cf->do_code_blob(_cb);
1005 }
1007 class CompiledArgumentOopFinder: public SignatureInfo {
1008 protected:
1009 OopClosure* _f;
1010 int _offset; // the current offset, incremented with each argument
1011 bool _has_receiver; // true if the callee has a receiver
1012 bool _has_appendix; // true if the call has an appendix
1013 frame _fr;
1014 RegisterMap* _reg_map;
1015 int _arg_size;
1016 VMRegPair* _regs; // VMReg list of arguments
1018 void set(int size, BasicType type) {
1019 if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset();
1020 _offset += size;
1021 }
1023 virtual void handle_oop_offset() {
1024 // Extract low order register number from register array.
1025 // In LP64-land, the high-order bits are valid but unhelpful.
1026 VMReg reg = _regs[_offset].first();
1027 oop *loc = _fr.oopmapreg_to_location(reg, _reg_map);
1028 _f->do_oop(loc);
1029 }
1031 public:
1032 CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map)
1033 : SignatureInfo(signature) {
1035 // initialize CompiledArgumentOopFinder
1036 _f = f;
1037 _offset = 0;
1038 _has_receiver = has_receiver;
1039 _has_appendix = has_appendix;
1040 _fr = fr;
1041 _reg_map = (RegisterMap*)reg_map;
1042 _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0);
1044 int arg_size;
1045 _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size);
1046 assert(arg_size == _arg_size, "wrong arg size");
1047 }
1049 void oops_do() {
1050 if (_has_receiver) {
1051 handle_oop_offset();
1052 _offset++;
1053 }
1054 iterate_parameters();
1055 if (_has_appendix) {
1056 handle_oop_offset();
1057 _offset++;
1058 }
1059 }
1060 };
1062 void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) {
1063 ResourceMark rm;
1064 CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map);
1065 finder.oops_do();
1066 }
1069 // Get receiver out of callers frame, i.e. find parameter 0 in callers
1070 // frame. Consult ADLC for where parameter 0 is to be found. Then
1071 // check local reg_map for it being a callee-save register or argument
1072 // register, both of which are saved in the local frame. If not found
1073 // there, it must be an in-stack argument of the caller.
1074 // Note: caller.sp() points to callee-arguments
1075 oop frame::retrieve_receiver(RegisterMap* reg_map) {
1076 frame caller = *this;
1078 // First consult the ADLC on where it puts parameter 0 for this signature.
1079 VMReg reg = SharedRuntime::name_for_receiver();
1080 oop* oop_adr = caller.oopmapreg_to_location(reg, reg_map);
1081 if (oop_adr == NULL) {
1082 guarantee(oop_adr != NULL, "bad register save location");
1083 return NULL;
1084 }
1085 oop r = *oop_adr;
1086 assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (intptr_t) r, (intptr_t) r));
1087 return r;
1088 }
1091 oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const {
1092 if(reg->is_reg()) {
1093 // If it is passed in a register, it got spilled in the stub frame.
1094 return (oop *)reg_map->location(reg);
1095 } else {
1096 int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size;
1097 return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes);
1098 }
1099 }
1101 BasicLock* frame::get_native_monitor() {
1102 nmethod* nm = (nmethod*)_cb;
1103 assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(),
1104 "Should not call this unless it's a native nmethod");
1105 int byte_offset = in_bytes(nm->native_basic_lock_sp_offset());
1106 assert(byte_offset >= 0, "should not see invalid offset");
1107 return (BasicLock*) &sp()[byte_offset / wordSize];
1108 }
1110 oop frame::get_native_receiver() {
1111 nmethod* nm = (nmethod*)_cb;
1112 assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(),
1113 "Should not call this unless it's a native nmethod");
1114 int byte_offset = in_bytes(nm->native_receiver_sp_offset());
1115 assert(byte_offset >= 0, "should not see invalid offset");
1116 oop owner = ((oop*) sp())[byte_offset / wordSize];
1117 assert( Universe::heap()->is_in(owner), "bad receiver" );
1118 return owner;
1119 }
1121 void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) {
1122 assert(map != NULL, "map must be set");
1123 if (map->include_argument_oops()) {
1124 // must collect argument oops, as nobody else is doing it
1125 Thread *thread = Thread::current();
1126 methodHandle m (thread, entry_frame_call_wrapper()->callee_method());
1127 EntryFrameOopFinder finder(this, m->signature(), m->is_static());
1128 finder.arguments_do(f);
1129 }
1130 // Traverse the Handle Block saved in the entry frame
1131 entry_frame_call_wrapper()->oops_do(f);
1132 }
1135 void frame::oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
1136 #ifndef PRODUCT
1137 // simulate GC crash here to dump java thread in error report
1138 if (CrashGCForDumpingJavaThread) {
1139 char *t = NULL;
1140 *t = 'c';
1141 }
1142 #endif
1143 if (is_interpreted_frame()) {
1144 oops_interpreted_do(f, cld_f, map, use_interpreter_oop_map_cache);
1145 } else if (is_entry_frame()) {
1146 oops_entry_do(f, map);
1147 } else if (CodeCache::contains(pc())) {
1148 oops_code_blob_do(f, cf, map);
1149 #ifdef SHARK
1150 } else if (is_fake_stub_frame()) {
1151 // nothing to do
1152 #endif // SHARK
1153 } else {
1154 ShouldNotReachHere();
1155 }
1156 }
1158 void frame::nmethods_do(CodeBlobClosure* cf) {
1159 if (_cb != NULL && _cb->is_nmethod()) {
1160 cf->do_code_blob(_cb);
1161 }
1162 }
1165 // call f() on the interpreted Method*s in the stack.
1166 // Have to walk the entire code cache for the compiled frames Yuck.
1167 void frame::metadata_do(void f(Metadata*)) {
1168 if (_cb != NULL && Interpreter::contains(pc())) {
1169 Method* m = this->interpreter_frame_method();
1170 assert(m != NULL, "huh?");
1171 f(m);
1172 }
1173 }
1175 void frame::gc_prologue() {
1176 if (is_interpreted_frame()) {
1177 // set bcx to bci to become Method* position independent during GC
1178 interpreter_frame_set_bcx(interpreter_frame_bci());
1179 }
1180 }
1183 void frame::gc_epilogue() {
1184 if (is_interpreted_frame()) {
1185 // set bcx back to bcp for interpreter
1186 interpreter_frame_set_bcx((intptr_t)interpreter_frame_bcp());
1187 }
1188 // call processor specific epilog function
1189 pd_gc_epilog();
1190 }
1193 # ifdef ENABLE_ZAP_DEAD_LOCALS
1195 void frame::CheckValueClosure::do_oop(oop* p) {
1196 if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) {
1197 warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
1198 }
1199 }
1200 frame::CheckValueClosure frame::_check_value;
1203 void frame::CheckOopClosure::do_oop(oop* p) {
1204 if (*p != NULL && !(*p)->is_oop()) {
1205 warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
1206 }
1207 }
1208 frame::CheckOopClosure frame::_check_oop;
1210 void frame::check_derived_oop(oop* base, oop* derived) {
1211 _check_oop.do_oop(base);
1212 }
1215 void frame::ZapDeadClosure::do_oop(oop* p) {
1216 if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p);
1217 // Need cast because on _LP64 the conversion to oop is ambiguous. Constant
1218 // can be either long or int.
1219 *p = (oop)(int)0xbabebabe;
1220 }
1221 frame::ZapDeadClosure frame::_zap_dead;
1223 void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) {
1224 assert(thread == Thread::current(), "need to synchronize to do this to another thread");
1225 // Tracing - part 1
1226 if (TraceZapDeadLocals) {
1227 ResourceMark rm(thread);
1228 tty->print_cr("--------------------------------------------------------------------------------");
1229 tty->print("Zapping dead locals in ");
1230 print_on(tty);
1231 tty->cr();
1232 }
1233 // Zapping
1234 if (is_entry_frame ()) zap_dead_entry_locals (thread, map);
1235 else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map);
1236 else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map);
1238 else
1239 // could be is_runtime_frame
1240 // so remove error: ShouldNotReachHere();
1241 ;
1242 // Tracing - part 2
1243 if (TraceZapDeadLocals) {
1244 tty->cr();
1245 }
1246 }
1249 void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) {
1250 // get current interpreter 'pc'
1251 assert(is_interpreted_frame(), "Not an interpreted frame");
1252 Method* m = interpreter_frame_method();
1253 int bci = interpreter_frame_bci();
1255 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
1257 // process dynamic part
1258 InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(),
1259 &_check_value);
1260 InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(),
1261 &_check_oop );
1262 InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(),
1263 &_zap_dead );
1265 // get frame map
1266 InterpreterOopMap mask;
1267 m->mask_for(bci, &mask);
1268 mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
1269 }
1272 void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) {
1274 ResourceMark rm(thread);
1275 assert(_cb != NULL, "sanity check");
1276 if (_cb->oop_maps() != NULL) {
1277 OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value);
1278 }
1279 }
1282 void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) {
1283 if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented");
1284 }
1287 void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) {
1288 if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented");
1289 }
1291 # endif // ENABLE_ZAP_DEAD_LOCALS
1293 void frame::verify(const RegisterMap* map) {
1294 // for now make sure receiver type is correct
1295 if (is_interpreted_frame()) {
1296 Method* method = interpreter_frame_method();
1297 guarantee(method->is_method(), "method is wrong in frame::verify");
1298 if (!method->is_static()) {
1299 // fetch the receiver
1300 oop* p = (oop*) interpreter_frame_local_at(0);
1301 // make sure we have the right receiver type
1302 }
1303 }
1304 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");)
1305 oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, (RegisterMap*)map, false);
1306 }
1309 #ifdef ASSERT
1310 bool frame::verify_return_pc(address x) {
1311 if (StubRoutines::returns_to_call_stub(x)) {
1312 return true;
1313 }
1314 if (CodeCache::contains(x)) {
1315 return true;
1316 }
1317 if (Interpreter::contains(x)) {
1318 return true;
1319 }
1320 return false;
1321 }
1322 #endif
1324 #ifdef ASSERT
1325 void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
1326 assert(is_interpreted_frame(), "Not an interpreted frame");
1327 // verify that the value is in the right part of the frame
1328 address low_mark = (address) interpreter_frame_monitor_end();
1329 address high_mark = (address) interpreter_frame_monitor_begin();
1330 address current = (address) value;
1332 const int monitor_size = frame::interpreter_frame_monitor_size();
1333 guarantee((high_mark - current) % monitor_size == 0 , "Misaligned top of BasicObjectLock*");
1334 guarantee( high_mark > current , "Current BasicObjectLock* higher than high_mark");
1336 guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*");
1337 guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark");
1338 }
1339 #endif
1341 #ifndef PRODUCT
1342 void frame::describe(FrameValues& values, int frame_no) {
1343 // boundaries: sp and the 'real' frame pointer
1344 values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1);
1345 intptr_t* frame_pointer = real_fp(); // Note: may differ from fp()
1347 // print frame info at the highest boundary
1348 intptr_t* info_address = MAX2(sp(), frame_pointer);
1350 if (info_address != frame_pointer) {
1351 // print frame_pointer explicitly if not marked by the frame info
1352 values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1);
1353 }
1355 if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) {
1356 // Label values common to most frames
1357 values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no));
1358 }
1360 if (is_interpreted_frame()) {
1361 Method* m = interpreter_frame_method();
1362 int bci = interpreter_frame_bci();
1364 // Label the method and current bci
1365 values.describe(-1, info_address,
1366 FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2);
1367 values.describe(-1, info_address,
1368 err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1);
1369 if (m->max_locals() > 0) {
1370 intptr_t* l0 = interpreter_frame_local_at(0);
1371 intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1);
1372 values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1);
1373 // Report each local and mark as owned by this frame
1374 for (int l = 0; l < m->max_locals(); l++) {
1375 intptr_t* l0 = interpreter_frame_local_at(l);
1376 values.describe(frame_no, l0, err_msg("local %d", l));
1377 }
1378 }
1380 // Compute the actual expression stack size
1381 InterpreterOopMap mask;
1382 OopMapCache::compute_one_oop_map(m, bci, &mask);
1383 intptr_t* tos = NULL;
1384 // Report each stack element and mark as owned by this frame
1385 for (int e = 0; e < mask.expression_stack_size(); e++) {
1386 tos = MAX2(tos, interpreter_frame_expression_stack_at(e));
1387 values.describe(frame_no, interpreter_frame_expression_stack_at(e),
1388 err_msg("stack %d", e));
1389 }
1390 if (tos != NULL) {
1391 values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1);
1392 }
1393 if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) {
1394 values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin");
1395 values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end");
1396 }
1397 } else if (is_entry_frame()) {
1398 // For now just label the frame
1399 values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2);
1400 } else if (is_compiled_frame()) {
1401 // For now just label the frame
1402 nmethod* nm = cb()->as_nmethod_or_null();
1403 values.describe(-1, info_address,
1404 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no,
1405 nm, nm->method()->name_and_sig_as_C_string(),
1406 (_deopt_state == is_deoptimized) ?
1407 " (deoptimized)" :
1408 ((_deopt_state == unknown) ? " (state unknown)" : "")),
1409 2);
1410 } else if (is_native_frame()) {
1411 // For now just label the frame
1412 nmethod* nm = cb()->as_nmethod_or_null();
1413 values.describe(-1, info_address,
1414 FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no,
1415 nm, nm->method()->name_and_sig_as_C_string()), 2);
1416 } else {
1417 // provide default info if not handled before
1418 char *info = (char *) "special frame";
1419 if ((_cb != NULL) &&
1420 (_cb->name() != NULL)) {
1421 info = (char *)_cb->name();
1422 }
1423 values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2);
1424 }
1426 // platform dependent additional data
1427 describe_pd(values, frame_no);
1428 }
1430 #endif
1433 //-----------------------------------------------------------------------------------
1434 // StackFrameStream implementation
1436 StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) {
1437 assert(thread->has_last_Java_frame(), "sanity check");
1438 _fr = thread->last_frame();
1439 _is_done = false;
1440 }
1443 #ifndef PRODUCT
1445 void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) {
1446 FrameValue fv;
1447 fv.location = location;
1448 fv.owner = owner;
1449 fv.priority = priority;
1450 fv.description = NEW_RESOURCE_ARRAY(char, strlen(description) + 1);
1451 strcpy(fv.description, description);
1452 _values.append(fv);
1453 }
1456 #ifdef ASSERT
1457 void FrameValues::validate() {
1458 _values.sort(compare);
1459 bool error = false;
1460 FrameValue prev;
1461 prev.owner = -1;
1462 for (int i = _values.length() - 1; i >= 0; i--) {
1463 FrameValue fv = _values.at(i);
1464 if (fv.owner == -1) continue;
1465 if (prev.owner == -1) {
1466 prev = fv;
1467 continue;
1468 }
1469 if (prev.location == fv.location) {
1470 if (fv.owner != prev.owner) {
1471 tty->print_cr("overlapping storage");
1472 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", prev.location, *prev.location, prev.description);
1473 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description);
1474 error = true;
1475 }
1476 } else {
1477 prev = fv;
1478 }
1479 }
1480 assert(!error, "invalid layout");
1481 }
1482 #endif // ASSERT
1484 void FrameValues::print(JavaThread* thread) {
1485 _values.sort(compare);
1487 // Sometimes values like the fp can be invalid values if the
1488 // register map wasn't updated during the walk. Trim out values
1489 // that aren't actually in the stack of the thread.
1490 int min_index = 0;
1491 int max_index = _values.length() - 1;
1492 intptr_t* v0 = _values.at(min_index).location;
1493 intptr_t* v1 = _values.at(max_index).location;
1495 if (thread == Thread::current()) {
1496 while (!thread->is_in_stack((address)v0)) {
1497 v0 = _values.at(++min_index).location;
1498 }
1499 while (!thread->is_in_stack((address)v1)) {
1500 v1 = _values.at(--max_index).location;
1501 }
1502 } else {
1503 while (!thread->on_local_stack((address)v0)) {
1504 v0 = _values.at(++min_index).location;
1505 }
1506 while (!thread->on_local_stack((address)v1)) {
1507 v1 = _values.at(--max_index).location;
1508 }
1509 }
1510 intptr_t* min = MIN2(v0, v1);
1511 intptr_t* max = MAX2(v0, v1);
1512 intptr_t* cur = max;
1513 intptr_t* last = NULL;
1514 for (int i = max_index; i >= min_index; i--) {
1515 FrameValue fv = _values.at(i);
1516 while (cur > fv.location) {
1517 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur);
1518 cur--;
1519 }
1520 if (last == fv.location) {
1521 const char* spacer = " " LP64_ONLY(" ");
1522 tty->print_cr(" %s %s %s", spacer, spacer, fv.description);
1523 } else {
1524 tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description);
1525 last = fv.location;
1526 cur--;
1527 }
1528 }
1529 }
1531 #endif // ndef PRODUCT