Fri, 12 Feb 2010 15:27:36 -0800
Merge
1 /*
2 * Copyright 1997-2010 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_frame.cpp.incl"
28 RegisterMap::RegisterMap(JavaThread *thread, bool update_map) {
29 _thread = thread;
30 _update_map = update_map;
31 clear();
32 debug_only(_update_for_id = NULL;)
33 #ifndef PRODUCT
34 for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL;
35 #endif /* PRODUCT */
36 }
38 RegisterMap::RegisterMap(const RegisterMap* map) {
39 assert(map != this, "bad initialization parameter");
40 assert(map != NULL, "RegisterMap must be present");
41 _thread = map->thread();
42 _update_map = map->update_map();
43 _include_argument_oops = map->include_argument_oops();
44 debug_only(_update_for_id = map->_update_for_id;)
45 pd_initialize_from(map);
46 if (update_map()) {
47 for(int i = 0; i < location_valid_size; i++) {
48 LocationValidType bits = !update_map() ? 0 : map->_location_valid[i];
49 _location_valid[i] = bits;
50 // for whichever bits are set, pull in the corresponding map->_location
51 int j = i*location_valid_type_size;
52 while (bits != 0) {
53 if ((bits & 1) != 0) {
54 assert(0 <= j && j < reg_count, "range check");
55 _location[j] = map->_location[j];
56 }
57 bits >>= 1;
58 j += 1;
59 }
60 }
61 }
62 }
64 void RegisterMap::clear() {
65 set_include_argument_oops(true);
66 if (_update_map) {
67 for(int i = 0; i < location_valid_size; i++) {
68 _location_valid[i] = 0;
69 }
70 pd_clear();
71 } else {
72 pd_initialize();
73 }
74 }
76 #ifndef PRODUCT
78 void RegisterMap::print_on(outputStream* st) const {
79 st->print_cr("Register map");
80 for(int i = 0; i < reg_count; i++) {
82 VMReg r = VMRegImpl::as_VMReg(i);
83 intptr_t* src = (intptr_t*) location(r);
84 if (src != NULL) {
86 r->print_on(st);
87 st->print(" [" INTPTR_FORMAT "] = ", src);
88 if (((uintptr_t)src & (sizeof(*src)-1)) != 0) {
89 st->print_cr("<misaligned>");
90 } else {
91 st->print_cr(INTPTR_FORMAT, *src);
92 }
93 }
94 }
95 }
97 void RegisterMap::print() const {
98 print_on(tty);
99 }
101 #endif
102 // This returns the pc that if you were in the debugger you'd see. Not
103 // the idealized value in the frame object. This undoes the magic conversion
104 // that happens for deoptimized frames. In addition it makes the value the
105 // hardware would want to see in the native frame. The only user (at this point)
106 // is deoptimization. It likely no one else should ever use it.
108 address frame::raw_pc() const {
109 if (is_deoptimized_frame()) {
110 nmethod* nm = cb()->as_nmethod_or_null();
111 if (nm->is_method_handle_return(pc()))
112 return nm->deopt_mh_handler_begin() - pc_return_offset;
113 else
114 return nm->deopt_handler_begin() - pc_return_offset;
115 } else {
116 return (pc() - pc_return_offset);
117 }
118 }
120 // Change the pc in a frame object. This does not change the actual pc in
121 // actual frame. To do that use patch_pc.
122 //
123 void frame::set_pc(address newpc ) {
124 #ifdef ASSERT
125 if (_cb != NULL && _cb->is_nmethod()) {
126 assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation");
127 }
128 #endif // ASSERT
130 // Unsafe to use the is_deoptimzed tester after changing pc
131 _deopt_state = unknown;
132 _pc = newpc;
133 _cb = CodeCache::find_blob_unsafe(_pc);
135 }
137 // type testers
138 bool frame::is_deoptimized_frame() const {
139 assert(_deopt_state != unknown, "not answerable");
140 return _deopt_state == is_deoptimized;
141 }
143 bool frame::is_native_frame() const {
144 return (_cb != NULL &&
145 _cb->is_nmethod() &&
146 ((nmethod*)_cb)->is_native_method());
147 }
149 bool frame::is_java_frame() const {
150 if (is_interpreted_frame()) return true;
151 if (is_compiled_frame()) return true;
152 return false;
153 }
156 bool frame::is_compiled_frame() const {
157 if (_cb != NULL &&
158 _cb->is_nmethod() &&
159 ((nmethod*)_cb)->is_java_method()) {
160 return true;
161 }
162 return false;
163 }
166 bool frame::is_runtime_frame() const {
167 return (_cb != NULL && _cb->is_runtime_stub());
168 }
170 bool frame::is_safepoint_blob_frame() const {
171 return (_cb != NULL && _cb->is_safepoint_stub());
172 }
174 // testers
176 bool frame::is_first_java_frame() const {
177 RegisterMap map(JavaThread::current(), false); // No update
178 frame s;
179 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map));
180 return s.is_first_frame();
181 }
184 bool frame::entry_frame_is_first() const {
185 return entry_frame_call_wrapper()->anchor()->last_Java_sp() == NULL;
186 }
189 bool frame::should_be_deoptimized() const {
190 if (_deopt_state == is_deoptimized ||
191 !is_compiled_frame() ) return false;
192 assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod");
193 nmethod* nm = (nmethod *)_cb;
194 if (TraceDependencies) {
195 tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false");
196 nm->print_value_on(tty);
197 tty->cr();
198 }
200 if( !nm->is_marked_for_deoptimization() )
201 return false;
203 // If at the return point, then the frame has already been popped, and
204 // only the return needs to be executed. Don't deoptimize here.
205 return !nm->is_at_poll_return(pc());
206 }
208 bool frame::can_be_deoptimized() const {
209 if (!is_compiled_frame()) return false;
210 nmethod* nm = (nmethod*)_cb;
212 if( !nm->can_be_deoptimized() )
213 return false;
215 return !nm->is_at_poll_return(pc());
216 }
218 void frame::deoptimize(JavaThread* thread, bool thread_is_known_safe) {
219 // Schedule deoptimization of an nmethod activation with this frame.
221 // Store the original pc before an patch (or request to self-deopt)
222 // in the published location of the frame.
224 assert(_cb != NULL && _cb->is_nmethod(), "must be");
225 nmethod* nm = (nmethod*)_cb;
227 // This is a fix for register window patching race
228 if (NeedsDeoptSuspend && !thread_is_known_safe) {
230 // It is possible especially with DeoptimizeALot/DeoptimizeRandom that
231 // we could see the frame again and ask for it to be deoptimized since
232 // it might move for a long time. That is harmless and we just ignore it.
233 if (id() == thread->must_deopt_id()) {
234 assert(thread->is_deopt_suspend(), "lost suspension");
235 return;
236 }
238 // We are at a safepoint so the target thread can only be
239 // in 4 states:
240 // blocked - no problem
241 // blocked_trans - no problem (i.e. could have woken up from blocked
242 // during a safepoint).
243 // native - register window pc patching race
244 // native_trans - momentary state
245 //
246 // We could just wait out a thread in native_trans to block.
247 // Then we'd have all the issues that the safepoint code has as to
248 // whether to spin or block. It isn't worth it. Just treat it like
249 // native and be done with it.
250 //
251 JavaThreadState state = thread->thread_state();
252 if (state == _thread_in_native || state == _thread_in_native_trans) {
253 // Since we are at a safepoint the target thread will stop itself
254 // before it can return to java as long as we remain at the safepoint.
255 // Therefore we can put an additional request for the thread to stop
256 // no matter what no (like a suspend). This will cause the thread
257 // to notice it needs to do the deopt on its own once it leaves native.
258 //
259 // The only reason we must do this is because on machine with register
260 // windows we have a race with patching the return address and the
261 // window coming live as the thread returns to the Java code (but still
262 // in native mode) and then blocks. It is only this top most frame
263 // that is at risk. So in truth we could add an additional check to
264 // see if this frame is one that is at risk.
265 RegisterMap map(thread, false);
266 frame at_risk = thread->last_frame().sender(&map);
267 if (id() == at_risk.id()) {
268 thread->set_must_deopt_id(id());
269 thread->set_deopt_suspend();
270 return;
271 }
272 }
273 } // NeedsDeoptSuspend
276 // If the call site is a MethodHandle call site use the MH deopt
277 // handler.
278 address deopt = nm->is_method_handle_return(pc()) ?
279 nm->deopt_mh_handler_begin() :
280 nm->deopt_handler_begin();
282 // Save the original pc before we patch in the new one
283 nm->set_original_pc(this, pc());
284 patch_pc(thread, deopt);
286 #ifdef ASSERT
287 {
288 RegisterMap map(thread, false);
289 frame check = thread->last_frame();
290 while (id() != check.id()) {
291 check = check.sender(&map);
292 }
293 assert(check.is_deoptimized_frame(), "missed deopt");
294 }
295 #endif // ASSERT
296 }
298 frame frame::java_sender() const {
299 RegisterMap map(JavaThread::current(), false);
300 frame s;
301 for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)) ;
302 guarantee(s.is_java_frame(), "tried to get caller of first java frame");
303 return s;
304 }
306 frame frame::real_sender(RegisterMap* map) const {
307 frame result = sender(map);
308 while (result.is_runtime_frame()) {
309 result = result.sender(map);
310 }
311 return result;
312 }
314 // Note: called by profiler - NOT for current thread
315 frame frame::profile_find_Java_sender_frame(JavaThread *thread) {
316 // If we don't recognize this frame, walk back up the stack until we do
317 RegisterMap map(thread, false);
318 frame first_java_frame = frame();
320 // Find the first Java frame on the stack starting with input frame
321 if (is_java_frame()) {
322 // top frame is compiled frame or deoptimized frame
323 first_java_frame = *this;
324 } else if (safe_for_sender(thread)) {
325 for (frame sender_frame = sender(&map);
326 sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame();
327 sender_frame = sender_frame.sender(&map)) {
328 if (sender_frame.is_java_frame()) {
329 first_java_frame = sender_frame;
330 break;
331 }
332 }
333 }
334 return first_java_frame;
335 }
337 // Interpreter frames
340 void frame::interpreter_frame_set_locals(intptr_t* locs) {
341 assert(is_interpreted_frame(), "Not an interpreted frame");
342 *interpreter_frame_locals_addr() = locs;
343 }
345 methodOop frame::interpreter_frame_method() const {
346 assert(is_interpreted_frame(), "interpreted frame expected");
347 methodOop m = *interpreter_frame_method_addr();
348 assert(m->is_perm(), "bad methodOop in interpreter frame");
349 assert(m->is_method(), "not a methodOop");
350 return m;
351 }
353 void frame::interpreter_frame_set_method(methodOop method) {
354 assert(is_interpreted_frame(), "interpreted frame expected");
355 *interpreter_frame_method_addr() = method;
356 }
358 void frame::interpreter_frame_set_bcx(intptr_t bcx) {
359 assert(is_interpreted_frame(), "Not an interpreted frame");
360 if (ProfileInterpreter) {
361 bool formerly_bci = is_bci(interpreter_frame_bcx());
362 bool is_now_bci = is_bci(bcx);
363 *interpreter_frame_bcx_addr() = bcx;
365 intptr_t mdx = interpreter_frame_mdx();
367 if (mdx != 0) {
368 if (formerly_bci) {
369 if (!is_now_bci) {
370 // The bcx was just converted from bci to bcp.
371 // Convert the mdx in parallel.
372 methodDataOop mdo = interpreter_frame_method()->method_data();
373 assert(mdo != NULL, "");
374 int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one.
375 address mdp = mdo->di_to_dp(mdi);
376 interpreter_frame_set_mdx((intptr_t)mdp);
377 }
378 } else {
379 if (is_now_bci) {
380 // The bcx was just converted from bcp to bci.
381 // Convert the mdx in parallel.
382 methodDataOop mdo = interpreter_frame_method()->method_data();
383 assert(mdo != NULL, "");
384 int mdi = mdo->dp_to_di((address)mdx);
385 interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0.
386 }
387 }
388 }
389 } else {
390 *interpreter_frame_bcx_addr() = bcx;
391 }
392 }
394 jint frame::interpreter_frame_bci() const {
395 assert(is_interpreted_frame(), "interpreted frame expected");
396 intptr_t bcx = interpreter_frame_bcx();
397 return is_bci(bcx) ? bcx : interpreter_frame_method()->bci_from((address)bcx);
398 }
400 void frame::interpreter_frame_set_bci(jint bci) {
401 assert(is_interpreted_frame(), "interpreted frame expected");
402 assert(!is_bci(interpreter_frame_bcx()), "should not set bci during GC");
403 interpreter_frame_set_bcx((intptr_t)interpreter_frame_method()->bcp_from(bci));
404 }
406 address frame::interpreter_frame_bcp() const {
407 assert(is_interpreted_frame(), "interpreted frame expected");
408 intptr_t bcx = interpreter_frame_bcx();
409 return is_bci(bcx) ? interpreter_frame_method()->bcp_from(bcx) : (address)bcx;
410 }
412 void frame::interpreter_frame_set_bcp(address bcp) {
413 assert(is_interpreted_frame(), "interpreted frame expected");
414 assert(!is_bci(interpreter_frame_bcx()), "should not set bcp during GC");
415 interpreter_frame_set_bcx((intptr_t)bcp);
416 }
418 void frame::interpreter_frame_set_mdx(intptr_t mdx) {
419 assert(is_interpreted_frame(), "Not an interpreted frame");
420 assert(ProfileInterpreter, "must be profiling interpreter");
421 *interpreter_frame_mdx_addr() = mdx;
422 }
424 address frame::interpreter_frame_mdp() const {
425 assert(ProfileInterpreter, "must be profiling interpreter");
426 assert(is_interpreted_frame(), "interpreted frame expected");
427 intptr_t bcx = interpreter_frame_bcx();
428 intptr_t mdx = interpreter_frame_mdx();
430 assert(!is_bci(bcx), "should not access mdp during GC");
431 return (address)mdx;
432 }
434 void frame::interpreter_frame_set_mdp(address mdp) {
435 assert(is_interpreted_frame(), "interpreted frame expected");
436 if (mdp == NULL) {
437 // Always allow the mdp to be cleared.
438 interpreter_frame_set_mdx((intptr_t)mdp);
439 }
440 intptr_t bcx = interpreter_frame_bcx();
441 assert(!is_bci(bcx), "should not set mdp during GC");
442 interpreter_frame_set_mdx((intptr_t)mdp);
443 }
445 BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const {
446 assert(is_interpreted_frame(), "Not an interpreted frame");
447 #ifdef ASSERT
448 interpreter_frame_verify_monitor(current);
449 #endif
450 BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size());
451 return next;
452 }
454 BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const {
455 assert(is_interpreted_frame(), "Not an interpreted frame");
456 #ifdef ASSERT
457 // // This verification needs to be checked before being enabled
458 // interpreter_frame_verify_monitor(current);
459 #endif
460 BasicObjectLock* previous = (BasicObjectLock*) (((intptr_t*) current) - interpreter_frame_monitor_size());
461 return previous;
462 }
464 // Interpreter locals and expression stack locations.
466 intptr_t* frame::interpreter_frame_local_at(int index) const {
467 const int n = Interpreter::local_offset_in_bytes(index)/wordSize;
468 return &((*interpreter_frame_locals_addr())[n]);
469 }
471 frame::Tag frame::interpreter_frame_local_tag(int index) const {
472 const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize;
473 return (Tag)(*interpreter_frame_locals_addr()) [n];
474 }
476 void frame::interpreter_frame_set_local_tag(int index, Tag tag) const {
477 const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize;
478 (*interpreter_frame_locals_addr())[n] = (intptr_t)tag;
479 }
481 intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const {
482 const int i = offset * interpreter_frame_expression_stack_direction();
483 const int n = ((i * Interpreter::stackElementSize()) +
484 Interpreter::value_offset_in_bytes())/wordSize;
485 return &(interpreter_frame_expression_stack()[n]);
486 }
488 frame::Tag frame::interpreter_frame_expression_stack_tag(jint offset) const {
489 const int i = offset * interpreter_frame_expression_stack_direction();
490 const int n = ((i * Interpreter::stackElementSize()) +
491 Interpreter::tag_offset_in_bytes())/wordSize;
492 return (Tag)(interpreter_frame_expression_stack()[n]);
493 }
495 void frame::interpreter_frame_set_expression_stack_tag(jint offset,
496 Tag tag) const {
497 const int i = offset * interpreter_frame_expression_stack_direction();
498 const int n = ((i * Interpreter::stackElementSize()) +
499 Interpreter::tag_offset_in_bytes())/wordSize;
500 interpreter_frame_expression_stack()[n] = (intptr_t)tag;
501 }
503 jint frame::interpreter_frame_expression_stack_size() const {
504 // Number of elements on the interpreter expression stack
505 // Callers should span by stackElementWords
506 int element_size = Interpreter::stackElementWords();
507 if (frame::interpreter_frame_expression_stack_direction() < 0) {
508 return (interpreter_frame_expression_stack() -
509 interpreter_frame_tos_address() + 1)/element_size;
510 } else {
511 return (interpreter_frame_tos_address() -
512 interpreter_frame_expression_stack() + 1)/element_size;
513 }
514 }
517 // (frame::interpreter_frame_sender_sp accessor is in frame_<arch>.cpp)
519 const char* frame::print_name() const {
520 if (is_native_frame()) return "Native";
521 if (is_interpreted_frame()) return "Interpreted";
522 if (is_compiled_frame()) {
523 if (is_deoptimized_frame()) return "Deoptimized";
524 return "Compiled";
525 }
526 if (sp() == NULL) return "Empty";
527 return "C";
528 }
530 void frame::print_value_on(outputStream* st, JavaThread *thread) const {
531 NOT_PRODUCT(address begin = pc()-40;)
532 NOT_PRODUCT(address end = NULL;)
534 st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp());
535 if (sp() != NULL)
536 st->print(", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), pc());
538 if (StubRoutines::contains(pc())) {
539 st->print_cr(")");
540 st->print("(");
541 StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
542 st->print("~Stub::%s", desc->name());
543 NOT_PRODUCT(begin = desc->begin(); end = desc->end();)
544 } else if (Interpreter::contains(pc())) {
545 st->print_cr(")");
546 st->print("(");
547 InterpreterCodelet* desc = Interpreter::codelet_containing(pc());
548 if (desc != NULL) {
549 st->print("~");
550 desc->print();
551 NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();)
552 } else {
553 st->print("~interpreter");
554 }
555 }
556 st->print_cr(")");
558 if (_cb != NULL) {
559 st->print(" ");
560 _cb->print_value_on(st);
561 st->cr();
562 #ifndef PRODUCT
563 if (end == NULL) {
564 begin = _cb->instructions_begin();
565 end = _cb->instructions_end();
566 }
567 #endif
568 }
569 NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);)
570 }
573 void frame::print_on(outputStream* st) const {
574 print_value_on(st,NULL);
575 if (is_interpreted_frame()) {
576 interpreter_frame_print_on(st);
577 }
578 }
581 void frame::interpreter_frame_print_on(outputStream* st) const {
582 #ifndef PRODUCT
583 assert(is_interpreted_frame(), "Not an interpreted frame");
584 jint i;
585 for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) {
586 intptr_t x = *interpreter_frame_local_at(i);
587 st->print(" - local [" INTPTR_FORMAT "]", x);
588 if (TaggedStackInterpreter) {
589 Tag x = interpreter_frame_local_tag(i);
590 st->print(" - local tag [" INTPTR_FORMAT "]", x);
591 }
592 st->fill_to(23);
593 st->print_cr("; #%d", i);
594 }
595 for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) {
596 intptr_t x = *interpreter_frame_expression_stack_at(i);
597 st->print(" - stack [" INTPTR_FORMAT "]", x);
598 if (TaggedStackInterpreter) {
599 Tag x = interpreter_frame_expression_stack_tag(i);
600 st->print(" - stack tag [" INTPTR_FORMAT "]", x);
601 }
602 st->fill_to(23);
603 st->print_cr("; #%d", i);
604 }
605 // locks for synchronization
606 for (BasicObjectLock* current = interpreter_frame_monitor_end();
607 current < interpreter_frame_monitor_begin();
608 current = next_monitor_in_interpreter_frame(current)) {
609 st->print(" - obj [");
610 current->obj()->print_value_on(st);
611 st->print_cr("]");
612 st->print(" - lock [");
613 current->lock()->print_on(st);
614 st->print_cr("]");
615 }
616 // monitor
617 st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin());
618 // bcp
619 st->print(" - bcp [" INTPTR_FORMAT "]", interpreter_frame_bcp());
620 st->fill_to(23);
621 st->print_cr("; @%d", interpreter_frame_bci());
622 // locals
623 st->print_cr(" - locals [" INTPTR_FORMAT "]", interpreter_frame_local_at(0));
624 // method
625 st->print(" - method [" INTPTR_FORMAT "]", (address)interpreter_frame_method());
626 st->fill_to(23);
627 st->print("; ");
628 interpreter_frame_method()->print_name(st);
629 st->cr();
630 #endif
631 }
633 // Return whether the frame is in the VM or os indicating a Hotspot problem.
634 // Otherwise, it's likely a bug in the native library that the Java code calls,
635 // hopefully indicating where to submit bugs.
636 static void print_C_frame(outputStream* st, char* buf, int buflen, address pc) {
637 // C/C++ frame
638 bool in_vm = os::address_is_in_vm(pc);
639 st->print(in_vm ? "V" : "C");
641 int offset;
642 bool found;
644 // libname
645 found = os::dll_address_to_library_name(pc, buf, buflen, &offset);
646 if (found) {
647 // skip directory names
648 const char *p1, *p2;
649 p1 = buf;
650 int len = (int)strlen(os::file_separator());
651 while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len;
652 st->print(" [%s+0x%x]", p1, offset);
653 } else {
654 st->print(" " PTR_FORMAT, pc);
655 }
657 // function name - os::dll_address_to_function_name() may return confusing
658 // names if pc is within jvm.dll or libjvm.so, because JVM only has
659 // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this
660 // only for native libraries.
661 if (!in_vm) {
662 found = os::dll_address_to_function_name(pc, buf, buflen, &offset);
664 if (found) {
665 st->print(" %s+0x%x", buf, offset);
666 }
667 }
668 }
670 // frame::print_on_error() is called by fatal error handler. Notice that we may
671 // crash inside this function if stack frame is corrupted. The fatal error
672 // handler can catch and handle the crash. Here we assume the frame is valid.
673 //
674 // First letter indicates type of the frame:
675 // J: Java frame (compiled)
676 // j: Java frame (interpreted)
677 // V: VM frame (C/C++)
678 // v: Other frames running VM generated code (e.g. stubs, adapters, etc.)
679 // C: C/C++ frame
680 //
681 // We don't need detailed frame type as that in frame::print_name(). "C"
682 // suggests the problem is in user lib; everything else is likely a VM bug.
684 void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const {
685 if (_cb != NULL) {
686 if (Interpreter::contains(pc())) {
687 methodOop m = this->interpreter_frame_method();
688 if (m != NULL) {
689 m->name_and_sig_as_C_string(buf, buflen);
690 st->print("j %s", buf);
691 st->print("+%d", this->interpreter_frame_bci());
692 } else {
693 st->print("j " PTR_FORMAT, pc());
694 }
695 } else if (StubRoutines::contains(pc())) {
696 StubCodeDesc* desc = StubCodeDesc::desc_for(pc());
697 if (desc != NULL) {
698 st->print("v ~StubRoutines::%s", desc->name());
699 } else {
700 st->print("v ~StubRoutines::" PTR_FORMAT, pc());
701 }
702 } else if (_cb->is_buffer_blob()) {
703 st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name());
704 } else if (_cb->is_nmethod()) {
705 methodOop m = ((nmethod *)_cb)->method();
706 if (m != NULL) {
707 m->name_and_sig_as_C_string(buf, buflen);
708 st->print("J %s", buf);
709 } else {
710 st->print("J " PTR_FORMAT, pc());
711 }
712 } else if (_cb->is_runtime_stub()) {
713 st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name());
714 } else if (_cb->is_deoptimization_stub()) {
715 st->print("v ~DeoptimizationBlob");
716 } else if (_cb->is_exception_stub()) {
717 st->print("v ~ExceptionBlob");
718 } else if (_cb->is_safepoint_stub()) {
719 st->print("v ~SafepointBlob");
720 } else {
721 st->print("v blob " PTR_FORMAT, pc());
722 }
723 } else {
724 print_C_frame(st, buf, buflen, pc());
725 }
726 }
729 /*
730 The interpreter_frame_expression_stack_at method in the case of SPARC needs the
731 max_stack value of the method in order to compute the expression stack address.
732 It uses the methodOop in order to get the max_stack value but during GC this
733 methodOop value saved on the frame is changed by reverse_and_push and hence cannot
734 be used. So we save the max_stack value in the FrameClosure object and pass it
735 down to the interpreter_frame_expression_stack_at method
736 */
737 class InterpreterFrameClosure : public OffsetClosure {
738 private:
739 frame* _fr;
740 OopClosure* _f;
741 int _max_locals;
742 int _max_stack;
744 public:
745 InterpreterFrameClosure(frame* fr, int max_locals, int max_stack,
746 OopClosure* f) {
747 _fr = fr;
748 _max_locals = max_locals;
749 _max_stack = max_stack;
750 _f = f;
751 }
753 void offset_do(int offset) {
754 oop* addr;
755 if (offset < _max_locals) {
756 addr = (oop*) _fr->interpreter_frame_local_at(offset);
757 assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame");
758 _f->do_oop(addr);
759 } else {
760 addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals));
761 // In case of exceptions, the expression stack is invalid and the esp will be reset to express
762 // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel).
763 bool in_stack;
764 if (frame::interpreter_frame_expression_stack_direction() > 0) {
765 in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address();
766 } else {
767 in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address();
768 }
769 if (in_stack) {
770 _f->do_oop(addr);
771 }
772 }
773 }
775 int max_locals() { return _max_locals; }
776 frame* fr() { return _fr; }
777 };
780 class InterpretedArgumentOopFinder: public SignatureInfo {
781 private:
782 OopClosure* _f; // Closure to invoke
783 int _offset; // TOS-relative offset, decremented with each argument
784 bool _has_receiver; // true if the callee has a receiver
785 frame* _fr;
787 void set(int size, BasicType type) {
788 _offset -= size;
789 if (type == T_OBJECT || type == T_ARRAY) oop_offset_do();
790 }
792 void oop_offset_do() {
793 oop* addr;
794 addr = (oop*)_fr->interpreter_frame_tos_at(_offset);
795 _f->do_oop(addr);
796 }
798 public:
799 InterpretedArgumentOopFinder(symbolHandle signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) {
800 // compute size of arguments
801 int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
802 assert(!fr->is_interpreted_frame() ||
803 args_size <= fr->interpreter_frame_expression_stack_size(),
804 "args cannot be on stack anymore");
805 // initialize InterpretedArgumentOopFinder
806 _f = f;
807 _fr = fr;
808 _offset = args_size;
809 }
811 void oops_do() {
812 if (_has_receiver) {
813 --_offset;
814 oop_offset_do();
815 }
816 iterate_parameters();
817 }
818 };
821 // Entry frame has following form (n arguments)
822 // +-----------+
823 // sp -> | last arg |
824 // +-----------+
825 // : ::: :
826 // +-----------+
827 // (sp+n)->| first arg|
828 // +-----------+
832 // visits and GC's all the arguments in entry frame
833 class EntryFrameOopFinder: public SignatureInfo {
834 private:
835 bool _is_static;
836 int _offset;
837 frame* _fr;
838 OopClosure* _f;
840 void set(int size, BasicType type) {
841 assert (_offset >= 0, "illegal offset");
842 if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset);
843 _offset -= size;
844 }
846 void oop_at_offset_do(int offset) {
847 assert (offset >= 0, "illegal offset")
848 oop* addr = (oop*) _fr->entry_frame_argument_at(offset);
849 _f->do_oop(addr);
850 }
852 public:
853 EntryFrameOopFinder(frame* frame, symbolHandle signature, bool is_static) : SignatureInfo(signature) {
854 _f = NULL; // will be set later
855 _fr = frame;
856 _is_static = is_static;
857 _offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0
858 }
860 void arguments_do(OopClosure* f) {
861 _f = f;
862 if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver
863 iterate_parameters();
864 }
866 };
868 oop* frame::interpreter_callee_receiver_addr(symbolHandle signature) {
869 ArgumentSizeComputer asc(signature);
870 int size = asc.size();
871 return (oop *)interpreter_frame_tos_at(size);
872 }
875 void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) {
876 assert(is_interpreted_frame(), "Not an interpreted frame");
877 assert(map != NULL, "map must be set");
878 Thread *thread = Thread::current();
879 methodHandle m (thread, interpreter_frame_method());
880 jint bci = interpreter_frame_bci();
882 assert(Universe::heap()->is_in(m()), "must be valid oop");
883 assert(m->is_method(), "checking frame value");
884 assert((m->is_native() && bci == 0) || (!m->is_native() && bci >= 0 && bci < m->code_size()), "invalid bci value");
886 // Handle the monitor elements in the activation
887 for (
888 BasicObjectLock* current = interpreter_frame_monitor_end();
889 current < interpreter_frame_monitor_begin();
890 current = next_monitor_in_interpreter_frame(current)
891 ) {
892 #ifdef ASSERT
893 interpreter_frame_verify_monitor(current);
894 #endif
895 current->oops_do(f);
896 }
898 // process fixed part
899 f->do_oop((oop*)interpreter_frame_method_addr());
900 f->do_oop((oop*)interpreter_frame_cache_addr());
902 // Hmm what about the mdp?
903 #ifdef CC_INTERP
904 // Interpreter frame in the midst of a call have a methodOop within the
905 // object.
906 interpreterState istate = get_interpreterState();
907 if (istate->msg() == BytecodeInterpreter::call_method) {
908 f->do_oop((oop*)&istate->_result._to_call._callee);
909 }
911 #endif /* CC_INTERP */
913 if (m->is_native()) {
914 #ifdef CC_INTERP
915 f->do_oop((oop*)&istate->_oop_temp);
916 #else
917 f->do_oop((oop*)( fp() + interpreter_frame_oop_temp_offset ));
918 #endif /* CC_INTERP */
919 }
921 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
923 symbolHandle signature;
924 bool has_receiver = false;
926 // Process a callee's arguments if we are at a call site
927 // (i.e., if we are at an invoke bytecode)
928 // This is used sometimes for calling into the VM, not for another
929 // interpreted or compiled frame.
930 if (!m->is_native()) {
931 Bytecode_invoke *call = Bytecode_invoke_at_check(m, bci);
932 if (call != NULL) {
933 signature = symbolHandle(thread, call->signature());
934 has_receiver = call->has_receiver();
935 if (map->include_argument_oops() &&
936 interpreter_frame_expression_stack_size() > 0) {
937 ResourceMark rm(thread); // is this right ???
938 // we are at a call site & the expression stack is not empty
939 // => process callee's arguments
940 //
941 // Note: The expression stack can be empty if an exception
942 // occurred during method resolution/execution. In all
943 // cases we empty the expression stack completely be-
944 // fore handling the exception (the exception handling
945 // code in the interpreter calls a blocking runtime
946 // routine which can cause this code to be executed).
947 // (was bug gri 7/27/98)
948 oops_interpreted_arguments_do(signature, has_receiver, f);
949 }
950 }
951 }
953 if (TaggedStackInterpreter) {
954 // process locals & expression stack
955 InterpreterOopMap *mask = NULL;
956 #ifdef ASSERT
957 InterpreterOopMap oopmap_mask;
958 OopMapCache::compute_one_oop_map(m, bci, &oopmap_mask);
959 mask = &oopmap_mask;
960 #endif // ASSERT
961 oops_interpreted_locals_do(f, max_locals, mask);
962 oops_interpreted_expressions_do(f, signature, has_receiver,
963 m->max_stack(),
964 max_locals, mask);
965 } else {
966 InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f);
968 // process locals & expression stack
969 InterpreterOopMap mask;
970 if (query_oop_map_cache) {
971 m->mask_for(bci, &mask);
972 } else {
973 OopMapCache::compute_one_oop_map(m, bci, &mask);
974 }
975 mask.iterate_oop(&blk);
976 }
977 }
980 void frame::oops_interpreted_locals_do(OopClosure *f,
981 int max_locals,
982 InterpreterOopMap *mask) {
983 // Process locals then interpreter expression stack
984 for (int i = 0; i < max_locals; i++ ) {
985 Tag tag = interpreter_frame_local_tag(i);
986 if (tag == TagReference) {
987 oop* addr = (oop*) interpreter_frame_local_at(i);
988 assert((intptr_t*)addr >= sp(), "must be inside the frame");
989 f->do_oop(addr);
990 #ifdef ASSERT
991 } else {
992 assert(tag == TagValue, "bad tag value for locals");
993 oop* p = (oop*) interpreter_frame_local_at(i);
994 // Not always true - too bad. May have dead oops without tags in locals.
995 // assert(*p == NULL || !(*p)->is_oop(), "oop not tagged on interpreter locals");
996 assert(*p == NULL || !mask->is_oop(i), "local oop map mismatch");
997 #endif // ASSERT
998 }
999 }
1000 }
1002 void frame::oops_interpreted_expressions_do(OopClosure *f,
1003 symbolHandle signature,
1004 bool has_receiver,
1005 int max_stack,
1006 int max_locals,
1007 InterpreterOopMap *mask) {
1008 // There is no stack no matter what the esp is pointing to (native methods
1009 // might look like expression stack is nonempty).
1010 if (max_stack == 0) return;
1012 // Point the top of the expression stack above arguments to a call so
1013 // arguments aren't gc'ed as both stack values for callee and callee
1014 // arguments in callee's locals.
1015 int args_size = 0;
1016 if (!signature.is_null()) {
1017 args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
1018 }
1020 intptr_t *tos_addr = interpreter_frame_tos_at(args_size);
1021 assert(args_size != 0 || tos_addr == interpreter_frame_tos_address(), "these are same");
1022 intptr_t *frst_expr = interpreter_frame_expression_stack_at(0);
1023 // In case of exceptions, the expression stack is invalid and the esp
1024 // will be reset to express this condition. Therefore, we call f only
1025 // if addr is 'inside' the stack (i.e., addr >= esp for Intel).
1026 bool in_stack;
1027 if (interpreter_frame_expression_stack_direction() > 0) {
1028 in_stack = (intptr_t*)frst_expr <= tos_addr;
1029 } else {
1030 in_stack = (intptr_t*)frst_expr >= tos_addr;
1031 }
1032 if (!in_stack) return;
1034 jint stack_size = interpreter_frame_expression_stack_size() - args_size;
1035 for (int j = 0; j < stack_size; j++) {
1036 Tag tag = interpreter_frame_expression_stack_tag(j);
1037 if (tag == TagReference) {
1038 oop *addr = (oop*) interpreter_frame_expression_stack_at(j);
1039 f->do_oop(addr);
1040 #ifdef ASSERT
1041 } else {
1042 assert(tag == TagValue, "bad tag value for stack element");
1043 oop *p = (oop*) interpreter_frame_expression_stack_at((j));
1044 assert(*p == NULL || !mask->is_oop(j+max_locals), "stack oop map mismatch");
1045 #endif // ASSERT
1046 }
1047 }
1048 }
1050 void frame::oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f) {
1051 InterpretedArgumentOopFinder finder(signature, has_receiver, this, f);
1052 finder.oops_do();
1053 }
1055 void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) {
1056 assert(_cb != NULL, "sanity check");
1057 if (_cb->oop_maps() != NULL) {
1058 OopMapSet::oops_do(this, reg_map, f);
1060 // Preserve potential arguments for a callee. We handle this by dispatching
1061 // on the codeblob. For c2i, we do
1062 if (reg_map->include_argument_oops()) {
1063 _cb->preserve_callee_argument_oops(*this, reg_map, f);
1064 }
1065 }
1066 // In cases where perm gen is collected, GC will want to mark
1067 // oops referenced from nmethods active on thread stacks so as to
1068 // prevent them from being collected. However, this visit should be
1069 // restricted to certain phases of the collection only. The
1070 // closure decides how it wants nmethods to be traced.
1071 if (cf != NULL)
1072 cf->do_code_blob(_cb);
1073 }
1075 class CompiledArgumentOopFinder: public SignatureInfo {
1076 protected:
1077 OopClosure* _f;
1078 int _offset; // the current offset, incremented with each argument
1079 bool _has_receiver; // true if the callee has a receiver
1080 frame _fr;
1081 RegisterMap* _reg_map;
1082 int _arg_size;
1083 VMRegPair* _regs; // VMReg list of arguments
1085 void set(int size, BasicType type) {
1086 if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset();
1087 _offset += size;
1088 }
1090 virtual void handle_oop_offset() {
1091 // Extract low order register number from register array.
1092 // In LP64-land, the high-order bits are valid but unhelpful.
1093 VMReg reg = _regs[_offset].first();
1094 oop *loc = _fr.oopmapreg_to_location(reg, _reg_map);
1095 _f->do_oop(loc);
1096 }
1098 public:
1099 CompiledArgumentOopFinder(symbolHandle signature, bool has_receiver, OopClosure* f, frame fr, const RegisterMap* reg_map)
1100 : SignatureInfo(signature) {
1102 // initialize CompiledArgumentOopFinder
1103 _f = f;
1104 _offset = 0;
1105 _has_receiver = has_receiver;
1106 _fr = fr;
1107 _reg_map = (RegisterMap*)reg_map;
1108 _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0);
1110 int arg_size;
1111 _regs = SharedRuntime::find_callee_arguments(signature(), has_receiver, &arg_size);
1112 assert(arg_size == _arg_size, "wrong arg size");
1113 }
1115 void oops_do() {
1116 if (_has_receiver) {
1117 handle_oop_offset();
1118 _offset++;
1119 }
1120 iterate_parameters();
1121 }
1122 };
1124 void frame::oops_compiled_arguments_do(symbolHandle signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f) {
1125 ResourceMark rm;
1126 CompiledArgumentOopFinder finder(signature, has_receiver, f, *this, reg_map);
1127 finder.oops_do();
1128 }
1131 // Get receiver out of callers frame, i.e. find parameter 0 in callers
1132 // frame. Consult ADLC for where parameter 0 is to be found. Then
1133 // check local reg_map for it being a callee-save register or argument
1134 // register, both of which are saved in the local frame. If not found
1135 // there, it must be an in-stack argument of the caller.
1136 // Note: caller.sp() points to callee-arguments
1137 oop frame::retrieve_receiver(RegisterMap* reg_map) {
1138 frame caller = *this;
1140 // First consult the ADLC on where it puts parameter 0 for this signature.
1141 VMReg reg = SharedRuntime::name_for_receiver();
1142 oop r = *caller.oopmapreg_to_location(reg, reg_map);
1143 assert( Universe::heap()->is_in_or_null(r), "bad receiver" );
1144 return r;
1145 }
1148 oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const {
1149 if(reg->is_reg()) {
1150 // If it is passed in a register, it got spilled in the stub frame.
1151 return (oop *)reg_map->location(reg);
1152 } else {
1153 int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size;
1154 return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes);
1155 }
1156 }
1158 BasicLock* frame::compiled_synchronized_native_monitor(nmethod* nm) {
1159 if (nm == NULL) {
1160 assert(_cb != NULL && _cb->is_nmethod() &&
1161 nm->method()->is_native() &&
1162 nm->method()->is_synchronized(),
1163 "should not call this otherwise");
1164 nm = (nmethod*) _cb;
1165 }
1166 int byte_offset = in_bytes(nm->compiled_synchronized_native_basic_lock_sp_offset());
1167 assert(byte_offset >= 0, "should not see invalid offset");
1168 return (BasicLock*) &sp()[byte_offset / wordSize];
1169 }
1171 oop frame::compiled_synchronized_native_monitor_owner(nmethod* nm) {
1172 if (nm == NULL) {
1173 assert(_cb != NULL && _cb->is_nmethod() &&
1174 nm->method()->is_native() &&
1175 nm->method()->is_synchronized(),
1176 "should not call this otherwise");
1177 nm = (nmethod*) _cb;
1178 }
1179 int byte_offset = in_bytes(nm->compiled_synchronized_native_basic_lock_owner_sp_offset());
1180 assert(byte_offset >= 0, "should not see invalid offset");
1181 oop owner = ((oop*) sp())[byte_offset / wordSize];
1182 assert( Universe::heap()->is_in(owner), "bad receiver" );
1183 return owner;
1184 }
1186 void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) {
1187 assert(map != NULL, "map must be set");
1188 if (map->include_argument_oops()) {
1189 // must collect argument oops, as nobody else is doing it
1190 Thread *thread = Thread::current();
1191 methodHandle m (thread, entry_frame_call_wrapper()->callee_method());
1192 symbolHandle signature (thread, m->signature());
1193 EntryFrameOopFinder finder(this, signature, m->is_static());
1194 finder.arguments_do(f);
1195 }
1196 // Traverse the Handle Block saved in the entry frame
1197 entry_frame_call_wrapper()->oops_do(f);
1198 }
1201 void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) {
1202 #ifndef PRODUCT
1203 // simulate GC crash here to dump java thread in error report
1204 if (CrashGCForDumpingJavaThread) {
1205 char *t = NULL;
1206 *t = 'c';
1207 }
1208 #endif
1209 if (is_interpreted_frame()) {
1210 oops_interpreted_do(f, map, use_interpreter_oop_map_cache);
1211 } else if (is_entry_frame()) {
1212 oops_entry_do(f, map);
1213 } else if (CodeCache::contains(pc())) {
1214 oops_code_blob_do(f, cf, map);
1215 } else {
1216 ShouldNotReachHere();
1217 }
1218 }
1220 void frame::nmethods_do(CodeBlobClosure* cf) {
1221 if (_cb != NULL && _cb->is_nmethod()) {
1222 cf->do_code_blob(_cb);
1223 }
1224 }
1227 void frame::gc_prologue() {
1228 if (is_interpreted_frame()) {
1229 // set bcx to bci to become methodOop position independent during GC
1230 interpreter_frame_set_bcx(interpreter_frame_bci());
1231 }
1232 }
1235 void frame::gc_epilogue() {
1236 if (is_interpreted_frame()) {
1237 // set bcx back to bcp for interpreter
1238 interpreter_frame_set_bcx((intptr_t)interpreter_frame_bcp());
1239 }
1240 // call processor specific epilog function
1241 pd_gc_epilog();
1242 }
1245 # ifdef ENABLE_ZAP_DEAD_LOCALS
1247 void frame::CheckValueClosure::do_oop(oop* p) {
1248 if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) {
1249 warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
1250 }
1251 }
1252 frame::CheckValueClosure frame::_check_value;
1255 void frame::CheckOopClosure::do_oop(oop* p) {
1256 if (*p != NULL && !(*p)->is_oop()) {
1257 warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current());
1258 }
1259 }
1260 frame::CheckOopClosure frame::_check_oop;
1262 void frame::check_derived_oop(oop* base, oop* derived) {
1263 _check_oop.do_oop(base);
1264 }
1267 void frame::ZapDeadClosure::do_oop(oop* p) {
1268 if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p);
1269 // Need cast because on _LP64 the conversion to oop is ambiguous. Constant
1270 // can be either long or int.
1271 *p = (oop)(int)0xbabebabe;
1272 }
1273 frame::ZapDeadClosure frame::_zap_dead;
1275 void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) {
1276 assert(thread == Thread::current(), "need to synchronize to do this to another thread");
1277 // Tracing - part 1
1278 if (TraceZapDeadLocals) {
1279 ResourceMark rm(thread);
1280 tty->print_cr("--------------------------------------------------------------------------------");
1281 tty->print("Zapping dead locals in ");
1282 print_on(tty);
1283 tty->cr();
1284 }
1285 // Zapping
1286 if (is_entry_frame ()) zap_dead_entry_locals (thread, map);
1287 else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map);
1288 else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map);
1290 else
1291 // could be is_runtime_frame
1292 // so remove error: ShouldNotReachHere();
1293 ;
1294 // Tracing - part 2
1295 if (TraceZapDeadLocals) {
1296 tty->cr();
1297 }
1298 }
1301 void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) {
1302 // get current interpreter 'pc'
1303 assert(is_interpreted_frame(), "Not an interpreted frame");
1304 methodOop m = interpreter_frame_method();
1305 int bci = interpreter_frame_bci();
1307 int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals();
1309 if (TaggedStackInterpreter) {
1310 InterpreterOopMap *mask = NULL;
1311 #ifdef ASSERT
1312 InterpreterOopMap oopmap_mask;
1313 methodHandle method(thread, m);
1314 OopMapCache::compute_one_oop_map(method, bci, &oopmap_mask);
1315 mask = &oopmap_mask;
1316 #endif // ASSERT
1317 oops_interpreted_locals_do(&_check_oop, max_locals, mask);
1318 } else {
1319 // process dynamic part
1320 InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(),
1321 &_check_value);
1322 InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(),
1323 &_check_oop );
1324 InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(),
1325 &_zap_dead );
1327 // get frame map
1328 InterpreterOopMap mask;
1329 m->mask_for(bci, &mask);
1330 mask.iterate_all( &oop_blk, &value_blk, &dead_blk);
1331 }
1332 }
1335 void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) {
1337 ResourceMark rm(thread);
1338 assert(_cb != NULL, "sanity check");
1339 if (_cb->oop_maps() != NULL) {
1340 OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value);
1341 }
1342 }
1345 void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) {
1346 if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented");
1347 }
1350 void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) {
1351 if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented");
1352 }
1354 # endif // ENABLE_ZAP_DEAD_LOCALS
1356 void frame::verify(const RegisterMap* map) {
1357 // for now make sure receiver type is correct
1358 if (is_interpreted_frame()) {
1359 methodOop method = interpreter_frame_method();
1360 guarantee(method->is_method(), "method is wrong in frame::verify");
1361 if (!method->is_static()) {
1362 // fetch the receiver
1363 oop* p = (oop*) interpreter_frame_local_at(0);
1364 // make sure we have the right receiver type
1365 }
1366 }
1367 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");)
1368 oops_do_internal(&VerifyOopClosure::verify_oop, NULL, (RegisterMap*)map, false);
1369 }
1372 #ifdef ASSERT
1373 bool frame::verify_return_pc(address x) {
1374 if (StubRoutines::returns_to_call_stub(x)) {
1375 return true;
1376 }
1377 if (CodeCache::contains(x)) {
1378 return true;
1379 }
1380 if (Interpreter::contains(x)) {
1381 return true;
1382 }
1383 return false;
1384 }
1385 #endif
1388 #ifdef ASSERT
1389 void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const {
1390 assert(is_interpreted_frame(), "Not an interpreted frame");
1391 // verify that the value is in the right part of the frame
1392 address low_mark = (address) interpreter_frame_monitor_end();
1393 address high_mark = (address) interpreter_frame_monitor_begin();
1394 address current = (address) value;
1396 const int monitor_size = frame::interpreter_frame_monitor_size();
1397 guarantee((high_mark - current) % monitor_size == 0 , "Misaligned top of BasicObjectLock*");
1398 guarantee( high_mark > current , "Current BasicObjectLock* higher than high_mark");
1400 guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*");
1401 guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark");
1402 }
1403 #endif
1406 //-----------------------------------------------------------------------------------
1407 // StackFrameStream implementation
1409 StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) {
1410 assert(thread->has_last_Java_frame(), "sanity check");
1411 _fr = thread->last_frame();
1412 _is_done = false;
1413 }