1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/runtime/frame.cpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,1537 @@ 1.4 +/* 1.5 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "precompiled.hpp" 1.29 +#include "compiler/abstractCompiler.hpp" 1.30 +#include "compiler/disassembler.hpp" 1.31 +#include "gc_interface/collectedHeap.inline.hpp" 1.32 +#include "interpreter/interpreter.hpp" 1.33 +#include "interpreter/oopMapCache.hpp" 1.34 +#include "memory/resourceArea.hpp" 1.35 +#include "memory/universe.inline.hpp" 1.36 +#include "oops/markOop.hpp" 1.37 +#include "oops/methodData.hpp" 1.38 +#include "oops/method.hpp" 1.39 +#include "oops/oop.inline.hpp" 1.40 +#include "oops/oop.inline2.hpp" 1.41 +#include "prims/methodHandles.hpp" 1.42 +#include "runtime/frame.inline.hpp" 1.43 +#include "runtime/handles.inline.hpp" 1.44 +#include "runtime/javaCalls.hpp" 1.45 +#include "runtime/monitorChunk.hpp" 1.46 +#include "runtime/sharedRuntime.hpp" 1.47 +#include "runtime/signature.hpp" 1.48 +#include "runtime/stubCodeGenerator.hpp" 1.49 +#include "runtime/stubRoutines.hpp" 1.50 +#include "utilities/decoder.hpp" 1.51 + 1.52 +#ifdef TARGET_ARCH_x86 1.53 +# include "nativeInst_x86.hpp" 1.54 +#endif 1.55 +#ifdef TARGET_ARCH_sparc 1.56 +# include "nativeInst_sparc.hpp" 1.57 +#endif 1.58 +#ifdef TARGET_ARCH_zero 1.59 +# include "nativeInst_zero.hpp" 1.60 +#endif 1.61 +#ifdef TARGET_ARCH_arm 1.62 +# include "nativeInst_arm.hpp" 1.63 +#endif 1.64 +#ifdef TARGET_ARCH_ppc 1.65 +# include "nativeInst_ppc.hpp" 1.66 +#endif 1.67 + 1.68 +PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC 1.69 + 1.70 +RegisterMap::RegisterMap(JavaThread *thread, bool update_map) { 1.71 + _thread = thread; 1.72 + _update_map = update_map; 1.73 + clear(); 1.74 + debug_only(_update_for_id = NULL;) 1.75 +#ifndef PRODUCT 1.76 + for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; 1.77 +#endif /* PRODUCT */ 1.78 +} 1.79 + 1.80 +RegisterMap::RegisterMap(const RegisterMap* map) { 1.81 + assert(map != this, "bad initialization parameter"); 1.82 + assert(map != NULL, "RegisterMap must be present"); 1.83 + _thread = map->thread(); 1.84 + _update_map = map->update_map(); 1.85 + _include_argument_oops = map->include_argument_oops(); 1.86 + debug_only(_update_for_id = map->_update_for_id;) 1.87 + pd_initialize_from(map); 1.88 + if (update_map()) { 1.89 + for(int i = 0; i < location_valid_size; i++) { 1.90 + LocationValidType bits = !update_map() ? 0 : map->_location_valid[i]; 1.91 + _location_valid[i] = bits; 1.92 + // for whichever bits are set, pull in the corresponding map->_location 1.93 + int j = i*location_valid_type_size; 1.94 + while (bits != 0) { 1.95 + if ((bits & 1) != 0) { 1.96 + assert(0 <= j && j < reg_count, "range check"); 1.97 + _location[j] = map->_location[j]; 1.98 + } 1.99 + bits >>= 1; 1.100 + j += 1; 1.101 + } 1.102 + } 1.103 + } 1.104 +} 1.105 + 1.106 +void RegisterMap::clear() { 1.107 + set_include_argument_oops(true); 1.108 + if (_update_map) { 1.109 + for(int i = 0; i < location_valid_size; i++) { 1.110 + _location_valid[i] = 0; 1.111 + } 1.112 + pd_clear(); 1.113 + } else { 1.114 + pd_initialize(); 1.115 + } 1.116 +} 1.117 + 1.118 +#ifndef PRODUCT 1.119 + 1.120 +void RegisterMap::print_on(outputStream* st) const { 1.121 + st->print_cr("Register map"); 1.122 + for(int i = 0; i < reg_count; i++) { 1.123 + 1.124 + VMReg r = VMRegImpl::as_VMReg(i); 1.125 + intptr_t* src = (intptr_t*) location(r); 1.126 + if (src != NULL) { 1.127 + 1.128 + r->print_on(st); 1.129 + st->print(" [" INTPTR_FORMAT "] = ", src); 1.130 + if (((uintptr_t)src & (sizeof(*src)-1)) != 0) { 1.131 + st->print_cr("<misaligned>"); 1.132 + } else { 1.133 + st->print_cr(INTPTR_FORMAT, *src); 1.134 + } 1.135 + } 1.136 + } 1.137 +} 1.138 + 1.139 +void RegisterMap::print() const { 1.140 + print_on(tty); 1.141 +} 1.142 + 1.143 +#endif 1.144 +// This returns the pc that if you were in the debugger you'd see. Not 1.145 +// the idealized value in the frame object. This undoes the magic conversion 1.146 +// that happens for deoptimized frames. In addition it makes the value the 1.147 +// hardware would want to see in the native frame. The only user (at this point) 1.148 +// is deoptimization. It likely no one else should ever use it. 1.149 + 1.150 +address frame::raw_pc() const { 1.151 + if (is_deoptimized_frame()) { 1.152 + nmethod* nm = cb()->as_nmethod_or_null(); 1.153 + if (nm->is_method_handle_return(pc())) 1.154 + return nm->deopt_mh_handler_begin() - pc_return_offset; 1.155 + else 1.156 + return nm->deopt_handler_begin() - pc_return_offset; 1.157 + } else { 1.158 + return (pc() - pc_return_offset); 1.159 + } 1.160 +} 1.161 + 1.162 +// Change the pc in a frame object. This does not change the actual pc in 1.163 +// actual frame. To do that use patch_pc. 1.164 +// 1.165 +void frame::set_pc(address newpc ) { 1.166 +#ifdef ASSERT 1.167 + if (_cb != NULL && _cb->is_nmethod()) { 1.168 + assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation"); 1.169 + } 1.170 +#endif // ASSERT 1.171 + 1.172 + // Unsafe to use the is_deoptimzed tester after changing pc 1.173 + _deopt_state = unknown; 1.174 + _pc = newpc; 1.175 + _cb = CodeCache::find_blob_unsafe(_pc); 1.176 + 1.177 +} 1.178 + 1.179 +// type testers 1.180 +bool frame::is_ignored_frame() const { 1.181 + return false; // FIXME: some LambdaForm frames should be ignored 1.182 +} 1.183 +bool frame::is_deoptimized_frame() const { 1.184 + assert(_deopt_state != unknown, "not answerable"); 1.185 + return _deopt_state == is_deoptimized; 1.186 +} 1.187 + 1.188 +bool frame::is_native_frame() const { 1.189 + return (_cb != NULL && 1.190 + _cb->is_nmethod() && 1.191 + ((nmethod*)_cb)->is_native_method()); 1.192 +} 1.193 + 1.194 +bool frame::is_java_frame() const { 1.195 + if (is_interpreted_frame()) return true; 1.196 + if (is_compiled_frame()) return true; 1.197 + return false; 1.198 +} 1.199 + 1.200 + 1.201 +bool frame::is_compiled_frame() const { 1.202 + if (_cb != NULL && 1.203 + _cb->is_nmethod() && 1.204 + ((nmethod*)_cb)->is_java_method()) { 1.205 + return true; 1.206 + } 1.207 + return false; 1.208 +} 1.209 + 1.210 + 1.211 +bool frame::is_runtime_frame() const { 1.212 + return (_cb != NULL && _cb->is_runtime_stub()); 1.213 +} 1.214 + 1.215 +bool frame::is_safepoint_blob_frame() const { 1.216 + return (_cb != NULL && _cb->is_safepoint_stub()); 1.217 +} 1.218 + 1.219 +// testers 1.220 + 1.221 +bool frame::is_first_java_frame() const { 1.222 + RegisterMap map(JavaThread::current(), false); // No update 1.223 + frame s; 1.224 + for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)); 1.225 + return s.is_first_frame(); 1.226 +} 1.227 + 1.228 + 1.229 +bool frame::entry_frame_is_first() const { 1.230 + return entry_frame_call_wrapper()->is_first_frame(); 1.231 +} 1.232 + 1.233 +JavaCallWrapper* frame::entry_frame_call_wrapper_if_safe(JavaThread* thread) const { 1.234 + JavaCallWrapper** jcw = entry_frame_call_wrapper_addr(); 1.235 + address addr = (address) jcw; 1.236 + 1.237 + // addr must be within the usable part of the stack 1.238 + if (thread->is_in_usable_stack(addr)) { 1.239 + return *jcw; 1.240 + } 1.241 + 1.242 + return NULL; 1.243 +} 1.244 + 1.245 +bool frame::should_be_deoptimized() const { 1.246 + if (_deopt_state == is_deoptimized || 1.247 + !is_compiled_frame() ) return false; 1.248 + assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod"); 1.249 + nmethod* nm = (nmethod *)_cb; 1.250 + if (TraceDependencies) { 1.251 + tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false"); 1.252 + nm->print_value_on(tty); 1.253 + tty->cr(); 1.254 + } 1.255 + 1.256 + if( !nm->is_marked_for_deoptimization() ) 1.257 + return false; 1.258 + 1.259 + // If at the return point, then the frame has already been popped, and 1.260 + // only the return needs to be executed. Don't deoptimize here. 1.261 + return !nm->is_at_poll_return(pc()); 1.262 +} 1.263 + 1.264 +bool frame::can_be_deoptimized() const { 1.265 + if (!is_compiled_frame()) return false; 1.266 + nmethod* nm = (nmethod*)_cb; 1.267 + 1.268 + if( !nm->can_be_deoptimized() ) 1.269 + return false; 1.270 + 1.271 + return !nm->is_at_poll_return(pc()); 1.272 +} 1.273 + 1.274 +void frame::deoptimize(JavaThread* thread) { 1.275 + // Schedule deoptimization of an nmethod activation with this frame. 1.276 + assert(_cb != NULL && _cb->is_nmethod(), "must be"); 1.277 + nmethod* nm = (nmethod*)_cb; 1.278 + 1.279 + // This is a fix for register window patching race 1.280 + if (NeedsDeoptSuspend && Thread::current() != thread) { 1.281 + assert(SafepointSynchronize::is_at_safepoint(), 1.282 + "patching other threads for deopt may only occur at a safepoint"); 1.283 + 1.284 + // It is possible especially with DeoptimizeALot/DeoptimizeRandom that 1.285 + // we could see the frame again and ask for it to be deoptimized since 1.286 + // it might move for a long time. That is harmless and we just ignore it. 1.287 + if (id() == thread->must_deopt_id()) { 1.288 + assert(thread->is_deopt_suspend(), "lost suspension"); 1.289 + return; 1.290 + } 1.291 + 1.292 + // We are at a safepoint so the target thread can only be 1.293 + // in 4 states: 1.294 + // blocked - no problem 1.295 + // blocked_trans - no problem (i.e. could have woken up from blocked 1.296 + // during a safepoint). 1.297 + // native - register window pc patching race 1.298 + // native_trans - momentary state 1.299 + // 1.300 + // We could just wait out a thread in native_trans to block. 1.301 + // Then we'd have all the issues that the safepoint code has as to 1.302 + // whether to spin or block. It isn't worth it. Just treat it like 1.303 + // native and be done with it. 1.304 + // 1.305 + // Examine the state of the thread at the start of safepoint since 1.306 + // threads that were in native at the start of the safepoint could 1.307 + // come to a halt during the safepoint, changing the current value 1.308 + // of the safepoint_state. 1.309 + JavaThreadState state = thread->safepoint_state()->orig_thread_state(); 1.310 + if (state == _thread_in_native || state == _thread_in_native_trans) { 1.311 + // Since we are at a safepoint the target thread will stop itself 1.312 + // before it can return to java as long as we remain at the safepoint. 1.313 + // Therefore we can put an additional request for the thread to stop 1.314 + // no matter what no (like a suspend). This will cause the thread 1.315 + // to notice it needs to do the deopt on its own once it leaves native. 1.316 + // 1.317 + // The only reason we must do this is because on machine with register 1.318 + // windows we have a race with patching the return address and the 1.319 + // window coming live as the thread returns to the Java code (but still 1.320 + // in native mode) and then blocks. It is only this top most frame 1.321 + // that is at risk. So in truth we could add an additional check to 1.322 + // see if this frame is one that is at risk. 1.323 + RegisterMap map(thread, false); 1.324 + frame at_risk = thread->last_frame().sender(&map); 1.325 + if (id() == at_risk.id()) { 1.326 + thread->set_must_deopt_id(id()); 1.327 + thread->set_deopt_suspend(); 1.328 + return; 1.329 + } 1.330 + } 1.331 + } // NeedsDeoptSuspend 1.332 + 1.333 + 1.334 + // If the call site is a MethodHandle call site use the MH deopt 1.335 + // handler. 1.336 + address deopt = nm->is_method_handle_return(pc()) ? 1.337 + nm->deopt_mh_handler_begin() : 1.338 + nm->deopt_handler_begin(); 1.339 + 1.340 + // Save the original pc before we patch in the new one 1.341 + nm->set_original_pc(this, pc()); 1.342 + patch_pc(thread, deopt); 1.343 + 1.344 +#ifdef ASSERT 1.345 + { 1.346 + RegisterMap map(thread, false); 1.347 + frame check = thread->last_frame(); 1.348 + while (id() != check.id()) { 1.349 + check = check.sender(&map); 1.350 + } 1.351 + assert(check.is_deoptimized_frame(), "missed deopt"); 1.352 + } 1.353 +#endif // ASSERT 1.354 +} 1.355 + 1.356 +frame frame::java_sender() const { 1.357 + RegisterMap map(JavaThread::current(), false); 1.358 + frame s; 1.359 + for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)) ; 1.360 + guarantee(s.is_java_frame(), "tried to get caller of first java frame"); 1.361 + return s; 1.362 +} 1.363 + 1.364 +frame frame::real_sender(RegisterMap* map) const { 1.365 + frame result = sender(map); 1.366 + while (result.is_runtime_frame() || 1.367 + result.is_ignored_frame()) { 1.368 + result = result.sender(map); 1.369 + } 1.370 + return result; 1.371 +} 1.372 + 1.373 +// Note: called by profiler - NOT for current thread 1.374 +frame frame::profile_find_Java_sender_frame(JavaThread *thread) { 1.375 +// If we don't recognize this frame, walk back up the stack until we do 1.376 + RegisterMap map(thread, false); 1.377 + frame first_java_frame = frame(); 1.378 + 1.379 + // Find the first Java frame on the stack starting with input frame 1.380 + if (is_java_frame()) { 1.381 + // top frame is compiled frame or deoptimized frame 1.382 + first_java_frame = *this; 1.383 + } else if (safe_for_sender(thread)) { 1.384 + for (frame sender_frame = sender(&map); 1.385 + sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame(); 1.386 + sender_frame = sender_frame.sender(&map)) { 1.387 + if (sender_frame.is_java_frame()) { 1.388 + first_java_frame = sender_frame; 1.389 + break; 1.390 + } 1.391 + } 1.392 + } 1.393 + return first_java_frame; 1.394 +} 1.395 + 1.396 +// Interpreter frames 1.397 + 1.398 + 1.399 +void frame::interpreter_frame_set_locals(intptr_t* locs) { 1.400 + assert(is_interpreted_frame(), "Not an interpreted frame"); 1.401 + *interpreter_frame_locals_addr() = locs; 1.402 +} 1.403 + 1.404 +Method* frame::interpreter_frame_method() const { 1.405 + assert(is_interpreted_frame(), "interpreted frame expected"); 1.406 + Method* m = *interpreter_frame_method_addr(); 1.407 + assert(m->is_method(), "not a Method*"); 1.408 + return m; 1.409 +} 1.410 + 1.411 +void frame::interpreter_frame_set_method(Method* method) { 1.412 + assert(is_interpreted_frame(), "interpreted frame expected"); 1.413 + *interpreter_frame_method_addr() = method; 1.414 +} 1.415 + 1.416 +void frame::interpreter_frame_set_bcx(intptr_t bcx) { 1.417 + assert(is_interpreted_frame(), "Not an interpreted frame"); 1.418 + if (ProfileInterpreter) { 1.419 + bool formerly_bci = is_bci(interpreter_frame_bcx()); 1.420 + bool is_now_bci = is_bci(bcx); 1.421 + *interpreter_frame_bcx_addr() = bcx; 1.422 + 1.423 + intptr_t mdx = interpreter_frame_mdx(); 1.424 + 1.425 + if (mdx != 0) { 1.426 + if (formerly_bci) { 1.427 + if (!is_now_bci) { 1.428 + // The bcx was just converted from bci to bcp. 1.429 + // Convert the mdx in parallel. 1.430 + MethodData* mdo = interpreter_frame_method()->method_data(); 1.431 + assert(mdo != NULL, ""); 1.432 + int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. 1.433 + address mdp = mdo->di_to_dp(mdi); 1.434 + interpreter_frame_set_mdx((intptr_t)mdp); 1.435 + } 1.436 + } else { 1.437 + if (is_now_bci) { 1.438 + // The bcx was just converted from bcp to bci. 1.439 + // Convert the mdx in parallel. 1.440 + MethodData* mdo = interpreter_frame_method()->method_data(); 1.441 + assert(mdo != NULL, ""); 1.442 + int mdi = mdo->dp_to_di((address)mdx); 1.443 + interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. 1.444 + } 1.445 + } 1.446 + } 1.447 + } else { 1.448 + *interpreter_frame_bcx_addr() = bcx; 1.449 + } 1.450 +} 1.451 + 1.452 +jint frame::interpreter_frame_bci() const { 1.453 + assert(is_interpreted_frame(), "interpreted frame expected"); 1.454 + intptr_t bcx = interpreter_frame_bcx(); 1.455 + return is_bci(bcx) ? bcx : interpreter_frame_method()->bci_from((address)bcx); 1.456 +} 1.457 + 1.458 +void frame::interpreter_frame_set_bci(jint bci) { 1.459 + assert(is_interpreted_frame(), "interpreted frame expected"); 1.460 + assert(!is_bci(interpreter_frame_bcx()), "should not set bci during GC"); 1.461 + interpreter_frame_set_bcx((intptr_t)interpreter_frame_method()->bcp_from(bci)); 1.462 +} 1.463 + 1.464 +address frame::interpreter_frame_bcp() const { 1.465 + assert(is_interpreted_frame(), "interpreted frame expected"); 1.466 + intptr_t bcx = interpreter_frame_bcx(); 1.467 + return is_bci(bcx) ? interpreter_frame_method()->bcp_from(bcx) : (address)bcx; 1.468 +} 1.469 + 1.470 +void frame::interpreter_frame_set_bcp(address bcp) { 1.471 + assert(is_interpreted_frame(), "interpreted frame expected"); 1.472 + assert(!is_bci(interpreter_frame_bcx()), "should not set bcp during GC"); 1.473 + interpreter_frame_set_bcx((intptr_t)bcp); 1.474 +} 1.475 + 1.476 +void frame::interpreter_frame_set_mdx(intptr_t mdx) { 1.477 + assert(is_interpreted_frame(), "Not an interpreted frame"); 1.478 + assert(ProfileInterpreter, "must be profiling interpreter"); 1.479 + *interpreter_frame_mdx_addr() = mdx; 1.480 +} 1.481 + 1.482 +address frame::interpreter_frame_mdp() const { 1.483 + assert(ProfileInterpreter, "must be profiling interpreter"); 1.484 + assert(is_interpreted_frame(), "interpreted frame expected"); 1.485 + intptr_t bcx = interpreter_frame_bcx(); 1.486 + intptr_t mdx = interpreter_frame_mdx(); 1.487 + 1.488 + assert(!is_bci(bcx), "should not access mdp during GC"); 1.489 + return (address)mdx; 1.490 +} 1.491 + 1.492 +void frame::interpreter_frame_set_mdp(address mdp) { 1.493 + assert(is_interpreted_frame(), "interpreted frame expected"); 1.494 + if (mdp == NULL) { 1.495 + // Always allow the mdp to be cleared. 1.496 + interpreter_frame_set_mdx((intptr_t)mdp); 1.497 + } 1.498 + intptr_t bcx = interpreter_frame_bcx(); 1.499 + assert(!is_bci(bcx), "should not set mdp during GC"); 1.500 + interpreter_frame_set_mdx((intptr_t)mdp); 1.501 +} 1.502 + 1.503 +BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const { 1.504 + assert(is_interpreted_frame(), "Not an interpreted frame"); 1.505 +#ifdef ASSERT 1.506 + interpreter_frame_verify_monitor(current); 1.507 +#endif 1.508 + BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size()); 1.509 + return next; 1.510 +} 1.511 + 1.512 +BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const { 1.513 + assert(is_interpreted_frame(), "Not an interpreted frame"); 1.514 +#ifdef ASSERT 1.515 +// // This verification needs to be checked before being enabled 1.516 +// interpreter_frame_verify_monitor(current); 1.517 +#endif 1.518 + BasicObjectLock* previous = (BasicObjectLock*) (((intptr_t*) current) - interpreter_frame_monitor_size()); 1.519 + return previous; 1.520 +} 1.521 + 1.522 +// Interpreter locals and expression stack locations. 1.523 + 1.524 +intptr_t* frame::interpreter_frame_local_at(int index) const { 1.525 + const int n = Interpreter::local_offset_in_bytes(index)/wordSize; 1.526 + return &((*interpreter_frame_locals_addr())[n]); 1.527 +} 1.528 + 1.529 +intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const { 1.530 + const int i = offset * interpreter_frame_expression_stack_direction(); 1.531 + const int n = i * Interpreter::stackElementWords; 1.532 + return &(interpreter_frame_expression_stack()[n]); 1.533 +} 1.534 + 1.535 +jint frame::interpreter_frame_expression_stack_size() const { 1.536 + // Number of elements on the interpreter expression stack 1.537 + // Callers should span by stackElementWords 1.538 + int element_size = Interpreter::stackElementWords; 1.539 + size_t stack_size = 0; 1.540 + if (frame::interpreter_frame_expression_stack_direction() < 0) { 1.541 + stack_size = (interpreter_frame_expression_stack() - 1.542 + interpreter_frame_tos_address() + 1)/element_size; 1.543 + } else { 1.544 + stack_size = (interpreter_frame_tos_address() - 1.545 + interpreter_frame_expression_stack() + 1)/element_size; 1.546 + } 1.547 + assert( stack_size <= (size_t)max_jint, "stack size too big"); 1.548 + return ((jint)stack_size); 1.549 +} 1.550 + 1.551 + 1.552 +// (frame::interpreter_frame_sender_sp accessor is in frame_<arch>.cpp) 1.553 + 1.554 +const char* frame::print_name() const { 1.555 + if (is_native_frame()) return "Native"; 1.556 + if (is_interpreted_frame()) return "Interpreted"; 1.557 + if (is_compiled_frame()) { 1.558 + if (is_deoptimized_frame()) return "Deoptimized"; 1.559 + return "Compiled"; 1.560 + } 1.561 + if (sp() == NULL) return "Empty"; 1.562 + return "C"; 1.563 +} 1.564 + 1.565 +void frame::print_value_on(outputStream* st, JavaThread *thread) const { 1.566 + NOT_PRODUCT(address begin = pc()-40;) 1.567 + NOT_PRODUCT(address end = NULL;) 1.568 + 1.569 + st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp()); 1.570 + if (sp() != NULL) 1.571 + st->print(", fp=" INTPTR_FORMAT ", real_fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), real_fp(), pc()); 1.572 + 1.573 + if (StubRoutines::contains(pc())) { 1.574 + st->print_cr(")"); 1.575 + st->print("("); 1.576 + StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); 1.577 + st->print("~Stub::%s", desc->name()); 1.578 + NOT_PRODUCT(begin = desc->begin(); end = desc->end();) 1.579 + } else if (Interpreter::contains(pc())) { 1.580 + st->print_cr(")"); 1.581 + st->print("("); 1.582 + InterpreterCodelet* desc = Interpreter::codelet_containing(pc()); 1.583 + if (desc != NULL) { 1.584 + st->print("~"); 1.585 + desc->print_on(st); 1.586 + NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();) 1.587 + } else { 1.588 + st->print("~interpreter"); 1.589 + } 1.590 + } 1.591 + st->print_cr(")"); 1.592 + 1.593 + if (_cb != NULL) { 1.594 + st->print(" "); 1.595 + _cb->print_value_on(st); 1.596 + st->cr(); 1.597 +#ifndef PRODUCT 1.598 + if (end == NULL) { 1.599 + begin = _cb->code_begin(); 1.600 + end = _cb->code_end(); 1.601 + } 1.602 +#endif 1.603 + } 1.604 + NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);) 1.605 +} 1.606 + 1.607 + 1.608 +void frame::print_on(outputStream* st) const { 1.609 + print_value_on(st,NULL); 1.610 + if (is_interpreted_frame()) { 1.611 + interpreter_frame_print_on(st); 1.612 + } 1.613 +} 1.614 + 1.615 + 1.616 +void frame::interpreter_frame_print_on(outputStream* st) const { 1.617 +#ifndef PRODUCT 1.618 + assert(is_interpreted_frame(), "Not an interpreted frame"); 1.619 + jint i; 1.620 + for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) { 1.621 + intptr_t x = *interpreter_frame_local_at(i); 1.622 + st->print(" - local [" INTPTR_FORMAT "]", x); 1.623 + st->fill_to(23); 1.624 + st->print_cr("; #%d", i); 1.625 + } 1.626 + for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) { 1.627 + intptr_t x = *interpreter_frame_expression_stack_at(i); 1.628 + st->print(" - stack [" INTPTR_FORMAT "]", x); 1.629 + st->fill_to(23); 1.630 + st->print_cr("; #%d", i); 1.631 + } 1.632 + // locks for synchronization 1.633 + for (BasicObjectLock* current = interpreter_frame_monitor_end(); 1.634 + current < interpreter_frame_monitor_begin(); 1.635 + current = next_monitor_in_interpreter_frame(current)) { 1.636 + st->print(" - obj ["); 1.637 + current->obj()->print_value_on(st); 1.638 + st->print_cr("]"); 1.639 + st->print(" - lock ["); 1.640 + current->lock()->print_on(st); 1.641 + st->print_cr("]"); 1.642 + } 1.643 + // monitor 1.644 + st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin()); 1.645 + // bcp 1.646 + st->print(" - bcp [" INTPTR_FORMAT "]", interpreter_frame_bcp()); 1.647 + st->fill_to(23); 1.648 + st->print_cr("; @%d", interpreter_frame_bci()); 1.649 + // locals 1.650 + st->print_cr(" - locals [" INTPTR_FORMAT "]", interpreter_frame_local_at(0)); 1.651 + // method 1.652 + st->print(" - method [" INTPTR_FORMAT "]", (address)interpreter_frame_method()); 1.653 + st->fill_to(23); 1.654 + st->print("; "); 1.655 + interpreter_frame_method()->print_name(st); 1.656 + st->cr(); 1.657 +#endif 1.658 +} 1.659 + 1.660 +// Return whether the frame is in the VM or os indicating a Hotspot problem. 1.661 +// Otherwise, it's likely a bug in the native library that the Java code calls, 1.662 +// hopefully indicating where to submit bugs. 1.663 +void frame::print_C_frame(outputStream* st, char* buf, int buflen, address pc) { 1.664 + // C/C++ frame 1.665 + bool in_vm = os::address_is_in_vm(pc); 1.666 + st->print(in_vm ? "V" : "C"); 1.667 + 1.668 + int offset; 1.669 + bool found; 1.670 + 1.671 + // libname 1.672 + found = os::dll_address_to_library_name(pc, buf, buflen, &offset); 1.673 + if (found) { 1.674 + // skip directory names 1.675 + const char *p1, *p2; 1.676 + p1 = buf; 1.677 + int len = (int)strlen(os::file_separator()); 1.678 + while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; 1.679 + st->print(" [%s+0x%x]", p1, offset); 1.680 + } else { 1.681 + st->print(" " PTR_FORMAT, pc); 1.682 + } 1.683 + 1.684 + // function name - os::dll_address_to_function_name() may return confusing 1.685 + // names if pc is within jvm.dll or libjvm.so, because JVM only has 1.686 + // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this 1.687 + // only for native libraries. 1.688 + if (!in_vm || Decoder::can_decode_C_frame_in_vm()) { 1.689 + found = os::dll_address_to_function_name(pc, buf, buflen, &offset); 1.690 + 1.691 + if (found) { 1.692 + st->print(" %s+0x%x", buf, offset); 1.693 + } 1.694 + } 1.695 +} 1.696 + 1.697 +// frame::print_on_error() is called by fatal error handler. Notice that we may 1.698 +// crash inside this function if stack frame is corrupted. The fatal error 1.699 +// handler can catch and handle the crash. Here we assume the frame is valid. 1.700 +// 1.701 +// First letter indicates type of the frame: 1.702 +// J: Java frame (compiled) 1.703 +// j: Java frame (interpreted) 1.704 +// V: VM frame (C/C++) 1.705 +// v: Other frames running VM generated code (e.g. stubs, adapters, etc.) 1.706 +// C: C/C++ frame 1.707 +// 1.708 +// We don't need detailed frame type as that in frame::print_name(). "C" 1.709 +// suggests the problem is in user lib; everything else is likely a VM bug. 1.710 + 1.711 +void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const { 1.712 + if (_cb != NULL) { 1.713 + if (Interpreter::contains(pc())) { 1.714 + Method* m = this->interpreter_frame_method(); 1.715 + if (m != NULL) { 1.716 + m->name_and_sig_as_C_string(buf, buflen); 1.717 + st->print("j %s", buf); 1.718 + st->print("+%d", this->interpreter_frame_bci()); 1.719 + } else { 1.720 + st->print("j " PTR_FORMAT, pc()); 1.721 + } 1.722 + } else if (StubRoutines::contains(pc())) { 1.723 + StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); 1.724 + if (desc != NULL) { 1.725 + st->print("v ~StubRoutines::%s", desc->name()); 1.726 + } else { 1.727 + st->print("v ~StubRoutines::" PTR_FORMAT, pc()); 1.728 + } 1.729 + } else if (_cb->is_buffer_blob()) { 1.730 + st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name()); 1.731 + } else if (_cb->is_nmethod()) { 1.732 + nmethod* nm = (nmethod*)_cb; 1.733 + Method* m = nm->method(); 1.734 + if (m != NULL) { 1.735 + m->name_and_sig_as_C_string(buf, buflen); 1.736 + st->print("J %d%s %s %s (%d bytes) @ " PTR_FORMAT " [" PTR_FORMAT "+0x%x]", 1.737 + nm->compile_id(), (nm->is_osr_method() ? "%" : ""), 1.738 + ((nm->compiler() != NULL) ? nm->compiler()->name() : ""), 1.739 + buf, m->code_size(), _pc, _cb->code_begin(), _pc - _cb->code_begin()); 1.740 + } else { 1.741 + st->print("J " PTR_FORMAT, pc()); 1.742 + } 1.743 + } else if (_cb->is_runtime_stub()) { 1.744 + st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name()); 1.745 + } else if (_cb->is_deoptimization_stub()) { 1.746 + st->print("v ~DeoptimizationBlob"); 1.747 + } else if (_cb->is_exception_stub()) { 1.748 + st->print("v ~ExceptionBlob"); 1.749 + } else if (_cb->is_safepoint_stub()) { 1.750 + st->print("v ~SafepointBlob"); 1.751 + } else { 1.752 + st->print("v blob " PTR_FORMAT, pc()); 1.753 + } 1.754 + } else { 1.755 + print_C_frame(st, buf, buflen, pc()); 1.756 + } 1.757 +} 1.758 + 1.759 + 1.760 +/* 1.761 + The interpreter_frame_expression_stack_at method in the case of SPARC needs the 1.762 + max_stack value of the method in order to compute the expression stack address. 1.763 + It uses the Method* in order to get the max_stack value but during GC this 1.764 + Method* value saved on the frame is changed by reverse_and_push and hence cannot 1.765 + be used. So we save the max_stack value in the FrameClosure object and pass it 1.766 + down to the interpreter_frame_expression_stack_at method 1.767 +*/ 1.768 +class InterpreterFrameClosure : public OffsetClosure { 1.769 + private: 1.770 + frame* _fr; 1.771 + OopClosure* _f; 1.772 + int _max_locals; 1.773 + int _max_stack; 1.774 + 1.775 + public: 1.776 + InterpreterFrameClosure(frame* fr, int max_locals, int max_stack, 1.777 + OopClosure* f) { 1.778 + _fr = fr; 1.779 + _max_locals = max_locals; 1.780 + _max_stack = max_stack; 1.781 + _f = f; 1.782 + } 1.783 + 1.784 + void offset_do(int offset) { 1.785 + oop* addr; 1.786 + if (offset < _max_locals) { 1.787 + addr = (oop*) _fr->interpreter_frame_local_at(offset); 1.788 + assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame"); 1.789 + _f->do_oop(addr); 1.790 + } else { 1.791 + addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals)); 1.792 + // In case of exceptions, the expression stack is invalid and the esp will be reset to express 1.793 + // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel). 1.794 + bool in_stack; 1.795 + if (frame::interpreter_frame_expression_stack_direction() > 0) { 1.796 + in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address(); 1.797 + } else { 1.798 + in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address(); 1.799 + } 1.800 + if (in_stack) { 1.801 + _f->do_oop(addr); 1.802 + } 1.803 + } 1.804 + } 1.805 + 1.806 + int max_locals() { return _max_locals; } 1.807 + frame* fr() { return _fr; } 1.808 +}; 1.809 + 1.810 + 1.811 +class InterpretedArgumentOopFinder: public SignatureInfo { 1.812 + private: 1.813 + OopClosure* _f; // Closure to invoke 1.814 + int _offset; // TOS-relative offset, decremented with each argument 1.815 + bool _has_receiver; // true if the callee has a receiver 1.816 + frame* _fr; 1.817 + 1.818 + void set(int size, BasicType type) { 1.819 + _offset -= size; 1.820 + if (type == T_OBJECT || type == T_ARRAY) oop_offset_do(); 1.821 + } 1.822 + 1.823 + void oop_offset_do() { 1.824 + oop* addr; 1.825 + addr = (oop*)_fr->interpreter_frame_tos_at(_offset); 1.826 + _f->do_oop(addr); 1.827 + } 1.828 + 1.829 + public: 1.830 + InterpretedArgumentOopFinder(Symbol* signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) { 1.831 + // compute size of arguments 1.832 + int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0); 1.833 + assert(!fr->is_interpreted_frame() || 1.834 + args_size <= fr->interpreter_frame_expression_stack_size(), 1.835 + "args cannot be on stack anymore"); 1.836 + // initialize InterpretedArgumentOopFinder 1.837 + _f = f; 1.838 + _fr = fr; 1.839 + _offset = args_size; 1.840 + } 1.841 + 1.842 + void oops_do() { 1.843 + if (_has_receiver) { 1.844 + --_offset; 1.845 + oop_offset_do(); 1.846 + } 1.847 + iterate_parameters(); 1.848 + } 1.849 +}; 1.850 + 1.851 + 1.852 +// Entry frame has following form (n arguments) 1.853 +// +-----------+ 1.854 +// sp -> | last arg | 1.855 +// +-----------+ 1.856 +// : ::: : 1.857 +// +-----------+ 1.858 +// (sp+n)->| first arg| 1.859 +// +-----------+ 1.860 + 1.861 + 1.862 + 1.863 +// visits and GC's all the arguments in entry frame 1.864 +class EntryFrameOopFinder: public SignatureInfo { 1.865 + private: 1.866 + bool _is_static; 1.867 + int _offset; 1.868 + frame* _fr; 1.869 + OopClosure* _f; 1.870 + 1.871 + void set(int size, BasicType type) { 1.872 + assert (_offset >= 0, "illegal offset"); 1.873 + if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset); 1.874 + _offset -= size; 1.875 + } 1.876 + 1.877 + void oop_at_offset_do(int offset) { 1.878 + assert (offset >= 0, "illegal offset"); 1.879 + oop* addr = (oop*) _fr->entry_frame_argument_at(offset); 1.880 + _f->do_oop(addr); 1.881 + } 1.882 + 1.883 + public: 1.884 + EntryFrameOopFinder(frame* frame, Symbol* signature, bool is_static) : SignatureInfo(signature) { 1.885 + _f = NULL; // will be set later 1.886 + _fr = frame; 1.887 + _is_static = is_static; 1.888 + _offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0 1.889 + } 1.890 + 1.891 + void arguments_do(OopClosure* f) { 1.892 + _f = f; 1.893 + if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver 1.894 + iterate_parameters(); 1.895 + } 1.896 + 1.897 +}; 1.898 + 1.899 +oop* frame::interpreter_callee_receiver_addr(Symbol* signature) { 1.900 + ArgumentSizeComputer asc(signature); 1.901 + int size = asc.size(); 1.902 + return (oop *)interpreter_frame_tos_at(size); 1.903 +} 1.904 + 1.905 + 1.906 +void frame::oops_interpreted_do(OopClosure* f, CLDToOopClosure* cld_f, 1.907 + const RegisterMap* map, bool query_oop_map_cache) { 1.908 + assert(is_interpreted_frame(), "Not an interpreted frame"); 1.909 + assert(map != NULL, "map must be set"); 1.910 + Thread *thread = Thread::current(); 1.911 + methodHandle m (thread, interpreter_frame_method()); 1.912 + jint bci = interpreter_frame_bci(); 1.913 + 1.914 + assert(!Universe::heap()->is_in(m()), 1.915 + "must be valid oop"); 1.916 + assert(m->is_method(), "checking frame value"); 1.917 + assert((m->is_native() && bci == 0) || 1.918 + (!m->is_native() && bci >= 0 && bci < m->code_size()), 1.919 + "invalid bci value"); 1.920 + 1.921 + // Handle the monitor elements in the activation 1.922 + for ( 1.923 + BasicObjectLock* current = interpreter_frame_monitor_end(); 1.924 + current < interpreter_frame_monitor_begin(); 1.925 + current = next_monitor_in_interpreter_frame(current) 1.926 + ) { 1.927 +#ifdef ASSERT 1.928 + interpreter_frame_verify_monitor(current); 1.929 +#endif 1.930 + current->oops_do(f); 1.931 + } 1.932 + 1.933 + // process fixed part 1.934 + if (cld_f != NULL) { 1.935 + // The method pointer in the frame might be the only path to the method's 1.936 + // klass, and the klass needs to be kept alive while executing. The GCs 1.937 + // don't trace through method pointers, so typically in similar situations 1.938 + // the mirror or the class loader of the klass are installed as a GC root. 1.939 + // To minimze the overhead of doing that here, we ask the GC to pass down a 1.940 + // closure that knows how to keep klasses alive given a ClassLoaderData. 1.941 + cld_f->do_cld(m->method_holder()->class_loader_data()); 1.942 + } 1.943 + 1.944 + if (m->is_native() PPC32_ONLY(&& m->is_static())) { 1.945 + f->do_oop(interpreter_frame_temp_oop_addr()); 1.946 + } 1.947 + 1.948 + int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); 1.949 + 1.950 + Symbol* signature = NULL; 1.951 + bool has_receiver = false; 1.952 + 1.953 + // Process a callee's arguments if we are at a call site 1.954 + // (i.e., if we are at an invoke bytecode) 1.955 + // This is used sometimes for calling into the VM, not for another 1.956 + // interpreted or compiled frame. 1.957 + if (!m->is_native()) { 1.958 + Bytecode_invoke call = Bytecode_invoke_check(m, bci); 1.959 + if (call.is_valid()) { 1.960 + signature = call.signature(); 1.961 + has_receiver = call.has_receiver(); 1.962 + if (map->include_argument_oops() && 1.963 + interpreter_frame_expression_stack_size() > 0) { 1.964 + ResourceMark rm(thread); // is this right ??? 1.965 + // we are at a call site & the expression stack is not empty 1.966 + // => process callee's arguments 1.967 + // 1.968 + // Note: The expression stack can be empty if an exception 1.969 + // occurred during method resolution/execution. In all 1.970 + // cases we empty the expression stack completely be- 1.971 + // fore handling the exception (the exception handling 1.972 + // code in the interpreter calls a blocking runtime 1.973 + // routine which can cause this code to be executed). 1.974 + // (was bug gri 7/27/98) 1.975 + oops_interpreted_arguments_do(signature, has_receiver, f); 1.976 + } 1.977 + } 1.978 + } 1.979 + 1.980 + InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f); 1.981 + 1.982 + // process locals & expression stack 1.983 + InterpreterOopMap mask; 1.984 + if (query_oop_map_cache) { 1.985 + m->mask_for(bci, &mask); 1.986 + } else { 1.987 + OopMapCache::compute_one_oop_map(m, bci, &mask); 1.988 + } 1.989 + mask.iterate_oop(&blk); 1.990 +} 1.991 + 1.992 + 1.993 +void frame::oops_interpreted_arguments_do(Symbol* signature, bool has_receiver, OopClosure* f) { 1.994 + InterpretedArgumentOopFinder finder(signature, has_receiver, this, f); 1.995 + finder.oops_do(); 1.996 +} 1.997 + 1.998 +void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { 1.999 + assert(_cb != NULL, "sanity check"); 1.1000 + if (_cb->oop_maps() != NULL) { 1.1001 + OopMapSet::oops_do(this, reg_map, f); 1.1002 + 1.1003 + // Preserve potential arguments for a callee. We handle this by dispatching 1.1004 + // on the codeblob. For c2i, we do 1.1005 + if (reg_map->include_argument_oops()) { 1.1006 + _cb->preserve_callee_argument_oops(*this, reg_map, f); 1.1007 + } 1.1008 + } 1.1009 + // In cases where perm gen is collected, GC will want to mark 1.1010 + // oops referenced from nmethods active on thread stacks so as to 1.1011 + // prevent them from being collected. However, this visit should be 1.1012 + // restricted to certain phases of the collection only. The 1.1013 + // closure decides how it wants nmethods to be traced. 1.1014 + if (cf != NULL) 1.1015 + cf->do_code_blob(_cb); 1.1016 +} 1.1017 + 1.1018 +class CompiledArgumentOopFinder: public SignatureInfo { 1.1019 + protected: 1.1020 + OopClosure* _f; 1.1021 + int _offset; // the current offset, incremented with each argument 1.1022 + bool _has_receiver; // true if the callee has a receiver 1.1023 + bool _has_appendix; // true if the call has an appendix 1.1024 + frame _fr; 1.1025 + RegisterMap* _reg_map; 1.1026 + int _arg_size; 1.1027 + VMRegPair* _regs; // VMReg list of arguments 1.1028 + 1.1029 + void set(int size, BasicType type) { 1.1030 + if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset(); 1.1031 + _offset += size; 1.1032 + } 1.1033 + 1.1034 + virtual void handle_oop_offset() { 1.1035 + // Extract low order register number from register array. 1.1036 + // In LP64-land, the high-order bits are valid but unhelpful. 1.1037 + VMReg reg = _regs[_offset].first(); 1.1038 + oop *loc = _fr.oopmapreg_to_location(reg, _reg_map); 1.1039 + _f->do_oop(loc); 1.1040 + } 1.1041 + 1.1042 + public: 1.1043 + CompiledArgumentOopFinder(Symbol* signature, bool has_receiver, bool has_appendix, OopClosure* f, frame fr, const RegisterMap* reg_map) 1.1044 + : SignatureInfo(signature) { 1.1045 + 1.1046 + // initialize CompiledArgumentOopFinder 1.1047 + _f = f; 1.1048 + _offset = 0; 1.1049 + _has_receiver = has_receiver; 1.1050 + _has_appendix = has_appendix; 1.1051 + _fr = fr; 1.1052 + _reg_map = (RegisterMap*)reg_map; 1.1053 + _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0) + (has_appendix ? 1 : 0); 1.1054 + 1.1055 + int arg_size; 1.1056 + _regs = SharedRuntime::find_callee_arguments(signature, has_receiver, has_appendix, &arg_size); 1.1057 + assert(arg_size == _arg_size, "wrong arg size"); 1.1058 + } 1.1059 + 1.1060 + void oops_do() { 1.1061 + if (_has_receiver) { 1.1062 + handle_oop_offset(); 1.1063 + _offset++; 1.1064 + } 1.1065 + iterate_parameters(); 1.1066 + if (_has_appendix) { 1.1067 + handle_oop_offset(); 1.1068 + _offset++; 1.1069 + } 1.1070 + } 1.1071 +}; 1.1072 + 1.1073 +void frame::oops_compiled_arguments_do(Symbol* signature, bool has_receiver, bool has_appendix, const RegisterMap* reg_map, OopClosure* f) { 1.1074 + ResourceMark rm; 1.1075 + CompiledArgumentOopFinder finder(signature, has_receiver, has_appendix, f, *this, reg_map); 1.1076 + finder.oops_do(); 1.1077 +} 1.1078 + 1.1079 + 1.1080 +// Get receiver out of callers frame, i.e. find parameter 0 in callers 1.1081 +// frame. Consult ADLC for where parameter 0 is to be found. Then 1.1082 +// check local reg_map for it being a callee-save register or argument 1.1083 +// register, both of which are saved in the local frame. If not found 1.1084 +// there, it must be an in-stack argument of the caller. 1.1085 +// Note: caller.sp() points to callee-arguments 1.1086 +oop frame::retrieve_receiver(RegisterMap* reg_map) { 1.1087 + frame caller = *this; 1.1088 + 1.1089 + // First consult the ADLC on where it puts parameter 0 for this signature. 1.1090 + VMReg reg = SharedRuntime::name_for_receiver(); 1.1091 + oop* oop_adr = caller.oopmapreg_to_location(reg, reg_map); 1.1092 + if (oop_adr == NULL) { 1.1093 + guarantee(oop_adr != NULL, "bad register save location"); 1.1094 + return NULL; 1.1095 + } 1.1096 + oop r = *oop_adr; 1.1097 + assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (void *) r, (void *) r)); 1.1098 + return r; 1.1099 +} 1.1100 + 1.1101 + 1.1102 +oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const { 1.1103 + if(reg->is_reg()) { 1.1104 + // If it is passed in a register, it got spilled in the stub frame. 1.1105 + return (oop *)reg_map->location(reg); 1.1106 + } else { 1.1107 + int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size; 1.1108 + return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes); 1.1109 + } 1.1110 +} 1.1111 + 1.1112 +BasicLock* frame::get_native_monitor() { 1.1113 + nmethod* nm = (nmethod*)_cb; 1.1114 + assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), 1.1115 + "Should not call this unless it's a native nmethod"); 1.1116 + int byte_offset = in_bytes(nm->native_basic_lock_sp_offset()); 1.1117 + assert(byte_offset >= 0, "should not see invalid offset"); 1.1118 + return (BasicLock*) &sp()[byte_offset / wordSize]; 1.1119 +} 1.1120 + 1.1121 +oop frame::get_native_receiver() { 1.1122 + nmethod* nm = (nmethod*)_cb; 1.1123 + assert(_cb != NULL && _cb->is_nmethod() && nm->method()->is_native(), 1.1124 + "Should not call this unless it's a native nmethod"); 1.1125 + int byte_offset = in_bytes(nm->native_receiver_sp_offset()); 1.1126 + assert(byte_offset >= 0, "should not see invalid offset"); 1.1127 + oop owner = ((oop*) sp())[byte_offset / wordSize]; 1.1128 + assert( Universe::heap()->is_in(owner), "bad receiver" ); 1.1129 + return owner; 1.1130 +} 1.1131 + 1.1132 +void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) { 1.1133 + assert(map != NULL, "map must be set"); 1.1134 + if (map->include_argument_oops()) { 1.1135 + // must collect argument oops, as nobody else is doing it 1.1136 + Thread *thread = Thread::current(); 1.1137 + methodHandle m (thread, entry_frame_call_wrapper()->callee_method()); 1.1138 + EntryFrameOopFinder finder(this, m->signature(), m->is_static()); 1.1139 + finder.arguments_do(f); 1.1140 + } 1.1141 + // Traverse the Handle Block saved in the entry frame 1.1142 + entry_frame_call_wrapper()->oops_do(f); 1.1143 +} 1.1144 + 1.1145 + 1.1146 +void frame::oops_do_internal(OopClosure* f, CLDToOopClosure* cld_f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) { 1.1147 +#ifndef PRODUCT 1.1148 + // simulate GC crash here to dump java thread in error report 1.1149 + if (CrashGCForDumpingJavaThread) { 1.1150 + char *t = NULL; 1.1151 + *t = 'c'; 1.1152 + } 1.1153 +#endif 1.1154 + if (is_interpreted_frame()) { 1.1155 + oops_interpreted_do(f, cld_f, map, use_interpreter_oop_map_cache); 1.1156 + } else if (is_entry_frame()) { 1.1157 + oops_entry_do(f, map); 1.1158 + } else if (CodeCache::contains(pc())) { 1.1159 + oops_code_blob_do(f, cf, map); 1.1160 +#ifdef SHARK 1.1161 + } else if (is_fake_stub_frame()) { 1.1162 + // nothing to do 1.1163 +#endif // SHARK 1.1164 + } else { 1.1165 + ShouldNotReachHere(); 1.1166 + } 1.1167 +} 1.1168 + 1.1169 +void frame::nmethods_do(CodeBlobClosure* cf) { 1.1170 + if (_cb != NULL && _cb->is_nmethod()) { 1.1171 + cf->do_code_blob(_cb); 1.1172 + } 1.1173 +} 1.1174 + 1.1175 + 1.1176 +// call f() on the interpreted Method*s in the stack. 1.1177 +// Have to walk the entire code cache for the compiled frames Yuck. 1.1178 +void frame::metadata_do(void f(Metadata*)) { 1.1179 + if (_cb != NULL && Interpreter::contains(pc())) { 1.1180 + Method* m = this->interpreter_frame_method(); 1.1181 + assert(m != NULL, "huh?"); 1.1182 + f(m); 1.1183 + } 1.1184 +} 1.1185 + 1.1186 +void frame::gc_prologue() { 1.1187 + if (is_interpreted_frame()) { 1.1188 + // set bcx to bci to become Method* position independent during GC 1.1189 + interpreter_frame_set_bcx(interpreter_frame_bci()); 1.1190 + } 1.1191 +} 1.1192 + 1.1193 + 1.1194 +void frame::gc_epilogue() { 1.1195 + if (is_interpreted_frame()) { 1.1196 + // set bcx back to bcp for interpreter 1.1197 + interpreter_frame_set_bcx((intptr_t)interpreter_frame_bcp()); 1.1198 + } 1.1199 + // call processor specific epilog function 1.1200 + pd_gc_epilog(); 1.1201 +} 1.1202 + 1.1203 + 1.1204 +# ifdef ENABLE_ZAP_DEAD_LOCALS 1.1205 + 1.1206 +void frame::CheckValueClosure::do_oop(oop* p) { 1.1207 + if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) { 1.1208 + warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); 1.1209 + } 1.1210 +} 1.1211 +frame::CheckValueClosure frame::_check_value; 1.1212 + 1.1213 + 1.1214 +void frame::CheckOopClosure::do_oop(oop* p) { 1.1215 + if (*p != NULL && !(*p)->is_oop()) { 1.1216 + warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); 1.1217 + } 1.1218 +} 1.1219 +frame::CheckOopClosure frame::_check_oop; 1.1220 + 1.1221 +void frame::check_derived_oop(oop* base, oop* derived) { 1.1222 + _check_oop.do_oop(base); 1.1223 +} 1.1224 + 1.1225 + 1.1226 +void frame::ZapDeadClosure::do_oop(oop* p) { 1.1227 + if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p); 1.1228 + *p = cast_to_oop<intptr_t>(0xbabebabe); 1.1229 +} 1.1230 +frame::ZapDeadClosure frame::_zap_dead; 1.1231 + 1.1232 +void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) { 1.1233 + assert(thread == Thread::current(), "need to synchronize to do this to another thread"); 1.1234 + // Tracing - part 1 1.1235 + if (TraceZapDeadLocals) { 1.1236 + ResourceMark rm(thread); 1.1237 + tty->print_cr("--------------------------------------------------------------------------------"); 1.1238 + tty->print("Zapping dead locals in "); 1.1239 + print_on(tty); 1.1240 + tty->cr(); 1.1241 + } 1.1242 + // Zapping 1.1243 + if (is_entry_frame ()) zap_dead_entry_locals (thread, map); 1.1244 + else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map); 1.1245 + else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map); 1.1246 + 1.1247 + else 1.1248 + // could be is_runtime_frame 1.1249 + // so remove error: ShouldNotReachHere(); 1.1250 + ; 1.1251 + // Tracing - part 2 1.1252 + if (TraceZapDeadLocals) { 1.1253 + tty->cr(); 1.1254 + } 1.1255 +} 1.1256 + 1.1257 + 1.1258 +void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) { 1.1259 + // get current interpreter 'pc' 1.1260 + assert(is_interpreted_frame(), "Not an interpreted frame"); 1.1261 + Method* m = interpreter_frame_method(); 1.1262 + int bci = interpreter_frame_bci(); 1.1263 + 1.1264 + int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); 1.1265 + 1.1266 + // process dynamic part 1.1267 + InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(), 1.1268 + &_check_value); 1.1269 + InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(), 1.1270 + &_check_oop ); 1.1271 + InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(), 1.1272 + &_zap_dead ); 1.1273 + 1.1274 + // get frame map 1.1275 + InterpreterOopMap mask; 1.1276 + m->mask_for(bci, &mask); 1.1277 + mask.iterate_all( &oop_blk, &value_blk, &dead_blk); 1.1278 +} 1.1279 + 1.1280 + 1.1281 +void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) { 1.1282 + 1.1283 + ResourceMark rm(thread); 1.1284 + assert(_cb != NULL, "sanity check"); 1.1285 + if (_cb->oop_maps() != NULL) { 1.1286 + OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value); 1.1287 + } 1.1288 +} 1.1289 + 1.1290 + 1.1291 +void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) { 1.1292 + if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented"); 1.1293 +} 1.1294 + 1.1295 + 1.1296 +void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) { 1.1297 + if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented"); 1.1298 +} 1.1299 + 1.1300 +# endif // ENABLE_ZAP_DEAD_LOCALS 1.1301 + 1.1302 +void frame::verify(const RegisterMap* map) { 1.1303 + // for now make sure receiver type is correct 1.1304 + if (is_interpreted_frame()) { 1.1305 + Method* method = interpreter_frame_method(); 1.1306 + guarantee(method->is_method(), "method is wrong in frame::verify"); 1.1307 + if (!method->is_static()) { 1.1308 + // fetch the receiver 1.1309 + oop* p = (oop*) interpreter_frame_local_at(0); 1.1310 + // make sure we have the right receiver type 1.1311 + } 1.1312 + } 1.1313 + COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");) 1.1314 + oops_do_internal(&VerifyOopClosure::verify_oop, NULL, NULL, (RegisterMap*)map, false); 1.1315 +} 1.1316 + 1.1317 + 1.1318 +#ifdef ASSERT 1.1319 +bool frame::verify_return_pc(address x) { 1.1320 + if (StubRoutines::returns_to_call_stub(x)) { 1.1321 + return true; 1.1322 + } 1.1323 + if (CodeCache::contains(x)) { 1.1324 + return true; 1.1325 + } 1.1326 + if (Interpreter::contains(x)) { 1.1327 + return true; 1.1328 + } 1.1329 + return false; 1.1330 +} 1.1331 +#endif 1.1332 + 1.1333 +#ifdef ASSERT 1.1334 +void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const { 1.1335 + assert(is_interpreted_frame(), "Not an interpreted frame"); 1.1336 + // verify that the value is in the right part of the frame 1.1337 + address low_mark = (address) interpreter_frame_monitor_end(); 1.1338 + address high_mark = (address) interpreter_frame_monitor_begin(); 1.1339 + address current = (address) value; 1.1340 + 1.1341 + const int monitor_size = frame::interpreter_frame_monitor_size(); 1.1342 + guarantee((high_mark - current) % monitor_size == 0 , "Misaligned top of BasicObjectLock*"); 1.1343 + guarantee( high_mark > current , "Current BasicObjectLock* higher than high_mark"); 1.1344 + 1.1345 + guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*"); 1.1346 + guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark"); 1.1347 +} 1.1348 +#endif 1.1349 + 1.1350 +#ifndef PRODUCT 1.1351 +void frame::describe(FrameValues& values, int frame_no) { 1.1352 + // boundaries: sp and the 'real' frame pointer 1.1353 + values.describe(-1, sp(), err_msg("sp for #%d", frame_no), 1); 1.1354 + intptr_t* frame_pointer = real_fp(); // Note: may differ from fp() 1.1355 + 1.1356 + // print frame info at the highest boundary 1.1357 + intptr_t* info_address = MAX2(sp(), frame_pointer); 1.1358 + 1.1359 + if (info_address != frame_pointer) { 1.1360 + // print frame_pointer explicitly if not marked by the frame info 1.1361 + values.describe(-1, frame_pointer, err_msg("frame pointer for #%d", frame_no), 1); 1.1362 + } 1.1363 + 1.1364 + if (is_entry_frame() || is_compiled_frame() || is_interpreted_frame() || is_native_frame()) { 1.1365 + // Label values common to most frames 1.1366 + values.describe(-1, unextended_sp(), err_msg("unextended_sp for #%d", frame_no)); 1.1367 + } 1.1368 + 1.1369 + if (is_interpreted_frame()) { 1.1370 + Method* m = interpreter_frame_method(); 1.1371 + int bci = interpreter_frame_bci(); 1.1372 + 1.1373 + // Label the method and current bci 1.1374 + values.describe(-1, info_address, 1.1375 + FormatBuffer<1024>("#%d method %s @ %d", frame_no, m->name_and_sig_as_C_string(), bci), 2); 1.1376 + values.describe(-1, info_address, 1.1377 + err_msg("- %d locals %d max stack", m->max_locals(), m->max_stack()), 1); 1.1378 + if (m->max_locals() > 0) { 1.1379 + intptr_t* l0 = interpreter_frame_local_at(0); 1.1380 + intptr_t* ln = interpreter_frame_local_at(m->max_locals() - 1); 1.1381 + values.describe(-1, MAX2(l0, ln), err_msg("locals for #%d", frame_no), 1); 1.1382 + // Report each local and mark as owned by this frame 1.1383 + for (int l = 0; l < m->max_locals(); l++) { 1.1384 + intptr_t* l0 = interpreter_frame_local_at(l); 1.1385 + values.describe(frame_no, l0, err_msg("local %d", l)); 1.1386 + } 1.1387 + } 1.1388 + 1.1389 + // Compute the actual expression stack size 1.1390 + InterpreterOopMap mask; 1.1391 + OopMapCache::compute_one_oop_map(m, bci, &mask); 1.1392 + intptr_t* tos = NULL; 1.1393 + // Report each stack element and mark as owned by this frame 1.1394 + for (int e = 0; e < mask.expression_stack_size(); e++) { 1.1395 + tos = MAX2(tos, interpreter_frame_expression_stack_at(e)); 1.1396 + values.describe(frame_no, interpreter_frame_expression_stack_at(e), 1.1397 + err_msg("stack %d", e)); 1.1398 + } 1.1399 + if (tos != NULL) { 1.1400 + values.describe(-1, tos, err_msg("expression stack for #%d", frame_no), 1); 1.1401 + } 1.1402 + if (interpreter_frame_monitor_begin() != interpreter_frame_monitor_end()) { 1.1403 + values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_begin(), "monitors begin"); 1.1404 + values.describe(frame_no, (intptr_t*)interpreter_frame_monitor_end(), "monitors end"); 1.1405 + } 1.1406 + } else if (is_entry_frame()) { 1.1407 + // For now just label the frame 1.1408 + values.describe(-1, info_address, err_msg("#%d entry frame", frame_no), 2); 1.1409 + } else if (is_compiled_frame()) { 1.1410 + // For now just label the frame 1.1411 + nmethod* nm = cb()->as_nmethod_or_null(); 1.1412 + values.describe(-1, info_address, 1.1413 + FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no, 1.1414 + nm, nm->method()->name_and_sig_as_C_string(), 1.1415 + (_deopt_state == is_deoptimized) ? 1.1416 + " (deoptimized)" : 1.1417 + ((_deopt_state == unknown) ? " (state unknown)" : "")), 1.1418 + 2); 1.1419 + } else if (is_native_frame()) { 1.1420 + // For now just label the frame 1.1421 + nmethod* nm = cb()->as_nmethod_or_null(); 1.1422 + values.describe(-1, info_address, 1.1423 + FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for native method %s", frame_no, 1.1424 + nm, nm->method()->name_and_sig_as_C_string()), 2); 1.1425 + } else { 1.1426 + // provide default info if not handled before 1.1427 + char *info = (char *) "special frame"; 1.1428 + if ((_cb != NULL) && 1.1429 + (_cb->name() != NULL)) { 1.1430 + info = (char *)_cb->name(); 1.1431 + } 1.1432 + values.describe(-1, info_address, err_msg("#%d <%s>", frame_no, info), 2); 1.1433 + } 1.1434 + 1.1435 + // platform dependent additional data 1.1436 + describe_pd(values, frame_no); 1.1437 +} 1.1438 + 1.1439 +#endif 1.1440 + 1.1441 + 1.1442 +//----------------------------------------------------------------------------------- 1.1443 +// StackFrameStream implementation 1.1444 + 1.1445 +StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) { 1.1446 + assert(thread->has_last_Java_frame(), "sanity check"); 1.1447 + _fr = thread->last_frame(); 1.1448 + _is_done = false; 1.1449 +} 1.1450 + 1.1451 + 1.1452 +#ifndef PRODUCT 1.1453 + 1.1454 +void FrameValues::describe(int owner, intptr_t* location, const char* description, int priority) { 1.1455 + FrameValue fv; 1.1456 + fv.location = location; 1.1457 + fv.owner = owner; 1.1458 + fv.priority = priority; 1.1459 + fv.description = NEW_RESOURCE_ARRAY(char, strlen(description) + 1); 1.1460 + strcpy(fv.description, description); 1.1461 + _values.append(fv); 1.1462 +} 1.1463 + 1.1464 + 1.1465 +#ifdef ASSERT 1.1466 +void FrameValues::validate() { 1.1467 + _values.sort(compare); 1.1468 + bool error = false; 1.1469 + FrameValue prev; 1.1470 + prev.owner = -1; 1.1471 + for (int i = _values.length() - 1; i >= 0; i--) { 1.1472 + FrameValue fv = _values.at(i); 1.1473 + if (fv.owner == -1) continue; 1.1474 + if (prev.owner == -1) { 1.1475 + prev = fv; 1.1476 + continue; 1.1477 + } 1.1478 + if (prev.location == fv.location) { 1.1479 + if (fv.owner != prev.owner) { 1.1480 + tty->print_cr("overlapping storage"); 1.1481 + tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", prev.location, *prev.location, prev.description); 1.1482 + tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); 1.1483 + error = true; 1.1484 + } 1.1485 + } else { 1.1486 + prev = fv; 1.1487 + } 1.1488 + } 1.1489 + assert(!error, "invalid layout"); 1.1490 +} 1.1491 +#endif // ASSERT 1.1492 + 1.1493 +void FrameValues::print(JavaThread* thread) { 1.1494 + _values.sort(compare); 1.1495 + 1.1496 + // Sometimes values like the fp can be invalid values if the 1.1497 + // register map wasn't updated during the walk. Trim out values 1.1498 + // that aren't actually in the stack of the thread. 1.1499 + int min_index = 0; 1.1500 + int max_index = _values.length() - 1; 1.1501 + intptr_t* v0 = _values.at(min_index).location; 1.1502 + intptr_t* v1 = _values.at(max_index).location; 1.1503 + 1.1504 + if (thread == Thread::current()) { 1.1505 + while (!thread->is_in_stack((address)v0)) { 1.1506 + v0 = _values.at(++min_index).location; 1.1507 + } 1.1508 + while (!thread->is_in_stack((address)v1)) { 1.1509 + v1 = _values.at(--max_index).location; 1.1510 + } 1.1511 + } else { 1.1512 + while (!thread->on_local_stack((address)v0)) { 1.1513 + v0 = _values.at(++min_index).location; 1.1514 + } 1.1515 + while (!thread->on_local_stack((address)v1)) { 1.1516 + v1 = _values.at(--max_index).location; 1.1517 + } 1.1518 + } 1.1519 + intptr_t* min = MIN2(v0, v1); 1.1520 + intptr_t* max = MAX2(v0, v1); 1.1521 + intptr_t* cur = max; 1.1522 + intptr_t* last = NULL; 1.1523 + for (int i = max_index; i >= min_index; i--) { 1.1524 + FrameValue fv = _values.at(i); 1.1525 + while (cur > fv.location) { 1.1526 + tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT, cur, *cur); 1.1527 + cur--; 1.1528 + } 1.1529 + if (last == fv.location) { 1.1530 + const char* spacer = " " LP64_ONLY(" "); 1.1531 + tty->print_cr(" %s %s %s", spacer, spacer, fv.description); 1.1532 + } else { 1.1533 + tty->print_cr(" " INTPTR_FORMAT ": " INTPTR_FORMAT " %s", fv.location, *fv.location, fv.description); 1.1534 + last = fv.location; 1.1535 + cur--; 1.1536 + } 1.1537 + } 1.1538 +} 1.1539 + 1.1540 +#endif // ndef PRODUCT