Wed, 13 Jan 2010 23:05:52 -0800
6912065: final fields in objects need to support inlining optimizations for JSR 292
Reviewed-by: twisti, kvn
duke@435 | 1 | /* |
twisti@1573 | 2 | * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | # include "incls/_precompiled.incl" |
duke@435 | 26 | # include "incls/_frame.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | RegisterMap::RegisterMap(JavaThread *thread, bool update_map) { |
duke@435 | 29 | _thread = thread; |
duke@435 | 30 | _update_map = update_map; |
duke@435 | 31 | clear(); |
duke@435 | 32 | debug_only(_update_for_id = NULL;) |
duke@435 | 33 | #ifndef PRODUCT |
duke@435 | 34 | for (int i = 0; i < reg_count ; i++ ) _location[i] = NULL; |
duke@435 | 35 | #endif /* PRODUCT */ |
duke@435 | 36 | } |
duke@435 | 37 | |
duke@435 | 38 | RegisterMap::RegisterMap(const RegisterMap* map) { |
duke@435 | 39 | assert(map != this, "bad initialization parameter"); |
duke@435 | 40 | assert(map != NULL, "RegisterMap must be present"); |
duke@435 | 41 | _thread = map->thread(); |
duke@435 | 42 | _update_map = map->update_map(); |
duke@435 | 43 | _include_argument_oops = map->include_argument_oops(); |
duke@435 | 44 | debug_only(_update_for_id = map->_update_for_id;) |
duke@435 | 45 | pd_initialize_from(map); |
duke@435 | 46 | if (update_map()) { |
duke@435 | 47 | for(int i = 0; i < location_valid_size; i++) { |
duke@435 | 48 | LocationValidType bits = !update_map() ? 0 : map->_location_valid[i]; |
duke@435 | 49 | _location_valid[i] = bits; |
duke@435 | 50 | // for whichever bits are set, pull in the corresponding map->_location |
duke@435 | 51 | int j = i*location_valid_type_size; |
duke@435 | 52 | while (bits != 0) { |
duke@435 | 53 | if ((bits & 1) != 0) { |
duke@435 | 54 | assert(0 <= j && j < reg_count, "range check"); |
duke@435 | 55 | _location[j] = map->_location[j]; |
duke@435 | 56 | } |
duke@435 | 57 | bits >>= 1; |
duke@435 | 58 | j += 1; |
duke@435 | 59 | } |
duke@435 | 60 | } |
duke@435 | 61 | } |
duke@435 | 62 | } |
duke@435 | 63 | |
duke@435 | 64 | void RegisterMap::clear() { |
duke@435 | 65 | set_include_argument_oops(true); |
duke@435 | 66 | if (_update_map) { |
duke@435 | 67 | for(int i = 0; i < location_valid_size; i++) { |
duke@435 | 68 | _location_valid[i] = 0; |
duke@435 | 69 | } |
duke@435 | 70 | pd_clear(); |
duke@435 | 71 | } else { |
duke@435 | 72 | pd_initialize(); |
duke@435 | 73 | } |
duke@435 | 74 | } |
duke@435 | 75 | |
duke@435 | 76 | #ifndef PRODUCT |
duke@435 | 77 | |
duke@435 | 78 | void RegisterMap::print_on(outputStream* st) const { |
duke@435 | 79 | st->print_cr("Register map"); |
duke@435 | 80 | for(int i = 0; i < reg_count; i++) { |
duke@435 | 81 | |
duke@435 | 82 | VMReg r = VMRegImpl::as_VMReg(i); |
duke@435 | 83 | intptr_t* src = (intptr_t*) location(r); |
duke@435 | 84 | if (src != NULL) { |
duke@435 | 85 | |
never@852 | 86 | r->print_on(st); |
never@852 | 87 | st->print(" [" INTPTR_FORMAT "] = ", src); |
duke@435 | 88 | if (((uintptr_t)src & (sizeof(*src)-1)) != 0) { |
never@852 | 89 | st->print_cr("<misaligned>"); |
duke@435 | 90 | } else { |
never@852 | 91 | st->print_cr(INTPTR_FORMAT, *src); |
duke@435 | 92 | } |
duke@435 | 93 | } |
duke@435 | 94 | } |
duke@435 | 95 | } |
duke@435 | 96 | |
duke@435 | 97 | void RegisterMap::print() const { |
duke@435 | 98 | print_on(tty); |
duke@435 | 99 | } |
duke@435 | 100 | |
duke@435 | 101 | #endif |
duke@435 | 102 | // This returns the pc that if you were in the debugger you'd see. Not |
duke@435 | 103 | // the idealized value in the frame object. This undoes the magic conversion |
duke@435 | 104 | // that happens for deoptimized frames. In addition it makes the value the |
duke@435 | 105 | // hardware would want to see in the native frame. The only user (at this point) |
duke@435 | 106 | // is deoptimization. It likely no one else should ever use it. |
duke@435 | 107 | |
duke@435 | 108 | address frame::raw_pc() const { |
duke@435 | 109 | if (is_deoptimized_frame()) { |
duke@435 | 110 | return ((nmethod*) cb())->deopt_handler_begin() - pc_return_offset; |
duke@435 | 111 | } else { |
duke@435 | 112 | return (pc() - pc_return_offset); |
duke@435 | 113 | } |
duke@435 | 114 | } |
duke@435 | 115 | |
duke@435 | 116 | // Change the pc in a frame object. This does not change the actual pc in |
duke@435 | 117 | // actual frame. To do that use patch_pc. |
duke@435 | 118 | // |
duke@435 | 119 | void frame::set_pc(address newpc ) { |
duke@435 | 120 | #ifdef ASSERT |
duke@435 | 121 | if (_cb != NULL && _cb->is_nmethod()) { |
duke@435 | 122 | assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant violation"); |
duke@435 | 123 | } |
duke@435 | 124 | #endif // ASSERT |
duke@435 | 125 | |
duke@435 | 126 | // Unsafe to use the is_deoptimzed tester after changing pc |
duke@435 | 127 | _deopt_state = unknown; |
duke@435 | 128 | _pc = newpc; |
duke@435 | 129 | _cb = CodeCache::find_blob_unsafe(_pc); |
duke@435 | 130 | |
duke@435 | 131 | } |
duke@435 | 132 | |
duke@435 | 133 | // type testers |
duke@435 | 134 | bool frame::is_deoptimized_frame() const { |
duke@435 | 135 | assert(_deopt_state != unknown, "not answerable"); |
duke@435 | 136 | return _deopt_state == is_deoptimized; |
duke@435 | 137 | } |
duke@435 | 138 | |
duke@435 | 139 | bool frame::is_native_frame() const { |
duke@435 | 140 | return (_cb != NULL && |
duke@435 | 141 | _cb->is_nmethod() && |
duke@435 | 142 | ((nmethod*)_cb)->is_native_method()); |
duke@435 | 143 | } |
duke@435 | 144 | |
duke@435 | 145 | bool frame::is_java_frame() const { |
duke@435 | 146 | if (is_interpreted_frame()) return true; |
duke@435 | 147 | if (is_compiled_frame()) return true; |
duke@435 | 148 | return false; |
duke@435 | 149 | } |
duke@435 | 150 | |
duke@435 | 151 | |
duke@435 | 152 | bool frame::is_compiled_frame() const { |
duke@435 | 153 | if (_cb != NULL && |
duke@435 | 154 | _cb->is_nmethod() && |
duke@435 | 155 | ((nmethod*)_cb)->is_java_method()) { |
duke@435 | 156 | return true; |
duke@435 | 157 | } |
duke@435 | 158 | return false; |
duke@435 | 159 | } |
duke@435 | 160 | |
duke@435 | 161 | |
duke@435 | 162 | bool frame::is_runtime_frame() const { |
duke@435 | 163 | return (_cb != NULL && _cb->is_runtime_stub()); |
duke@435 | 164 | } |
duke@435 | 165 | |
duke@435 | 166 | bool frame::is_safepoint_blob_frame() const { |
duke@435 | 167 | return (_cb != NULL && _cb->is_safepoint_stub()); |
duke@435 | 168 | } |
duke@435 | 169 | |
duke@435 | 170 | // testers |
duke@435 | 171 | |
duke@435 | 172 | bool frame::is_first_java_frame() const { |
duke@435 | 173 | RegisterMap map(JavaThread::current(), false); // No update |
duke@435 | 174 | frame s; |
duke@435 | 175 | for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)); |
duke@435 | 176 | return s.is_first_frame(); |
duke@435 | 177 | } |
duke@435 | 178 | |
duke@435 | 179 | |
duke@435 | 180 | bool frame::entry_frame_is_first() const { |
duke@435 | 181 | return entry_frame_call_wrapper()->anchor()->last_Java_sp() == NULL; |
duke@435 | 182 | } |
duke@435 | 183 | |
duke@435 | 184 | |
duke@435 | 185 | bool frame::should_be_deoptimized() const { |
duke@435 | 186 | if (_deopt_state == is_deoptimized || |
duke@435 | 187 | !is_compiled_frame() ) return false; |
duke@435 | 188 | assert(_cb != NULL && _cb->is_nmethod(), "must be an nmethod"); |
duke@435 | 189 | nmethod* nm = (nmethod *)_cb; |
duke@435 | 190 | if (TraceDependencies) { |
duke@435 | 191 | tty->print("checking (%s) ", nm->is_marked_for_deoptimization() ? "true" : "false"); |
duke@435 | 192 | nm->print_value_on(tty); |
duke@435 | 193 | tty->cr(); |
duke@435 | 194 | } |
duke@435 | 195 | |
duke@435 | 196 | if( !nm->is_marked_for_deoptimization() ) |
duke@435 | 197 | return false; |
duke@435 | 198 | |
duke@435 | 199 | // If at the return point, then the frame has already been popped, and |
duke@435 | 200 | // only the return needs to be executed. Don't deoptimize here. |
duke@435 | 201 | return !nm->is_at_poll_return(pc()); |
duke@435 | 202 | } |
duke@435 | 203 | |
duke@435 | 204 | bool frame::can_be_deoptimized() const { |
duke@435 | 205 | if (!is_compiled_frame()) return false; |
duke@435 | 206 | nmethod* nm = (nmethod*)_cb; |
duke@435 | 207 | |
duke@435 | 208 | if( !nm->can_be_deoptimized() ) |
duke@435 | 209 | return false; |
duke@435 | 210 | |
duke@435 | 211 | return !nm->is_at_poll_return(pc()); |
duke@435 | 212 | } |
duke@435 | 213 | |
duke@435 | 214 | void frame::deoptimize(JavaThread* thread, bool thread_is_known_safe) { |
duke@435 | 215 | // Schedule deoptimization of an nmethod activation with this frame. |
duke@435 | 216 | |
duke@435 | 217 | // Store the original pc before an patch (or request to self-deopt) |
duke@435 | 218 | // in the published location of the frame. |
duke@435 | 219 | |
duke@435 | 220 | assert(_cb != NULL && _cb->is_nmethod(), "must be"); |
duke@435 | 221 | nmethod* nm = (nmethod*)_cb; |
duke@435 | 222 | |
duke@435 | 223 | // This is a fix for register window patching race |
duke@435 | 224 | if (NeedsDeoptSuspend && !thread_is_known_safe) { |
duke@435 | 225 | |
duke@435 | 226 | // It is possible especially with DeoptimizeALot/DeoptimizeRandom that |
duke@435 | 227 | // we could see the frame again and ask for it to be deoptimized since |
duke@435 | 228 | // it might move for a long time. That is harmless and we just ignore it. |
duke@435 | 229 | if (id() == thread->must_deopt_id()) { |
duke@435 | 230 | assert(thread->is_deopt_suspend(), "lost suspension"); |
duke@435 | 231 | return; |
duke@435 | 232 | } |
duke@435 | 233 | |
duke@435 | 234 | // We are at a safepoint so the target thread can only be |
duke@435 | 235 | // in 4 states: |
duke@435 | 236 | // blocked - no problem |
duke@435 | 237 | // blocked_trans - no problem (i.e. could have woken up from blocked |
duke@435 | 238 | // during a safepoint). |
duke@435 | 239 | // native - register window pc patching race |
duke@435 | 240 | // native_trans - momentary state |
duke@435 | 241 | // |
duke@435 | 242 | // We could just wait out a thread in native_trans to block. |
duke@435 | 243 | // Then we'd have all the issues that the safepoint code has as to |
duke@435 | 244 | // whether to spin or block. It isn't worth it. Just treat it like |
duke@435 | 245 | // native and be done with it. |
duke@435 | 246 | // |
duke@435 | 247 | JavaThreadState state = thread->thread_state(); |
duke@435 | 248 | if (state == _thread_in_native || state == _thread_in_native_trans) { |
duke@435 | 249 | // Since we are at a safepoint the target thread will stop itself |
duke@435 | 250 | // before it can return to java as long as we remain at the safepoint. |
duke@435 | 251 | // Therefore we can put an additional request for the thread to stop |
duke@435 | 252 | // no matter what no (like a suspend). This will cause the thread |
duke@435 | 253 | // to notice it needs to do the deopt on its own once it leaves native. |
duke@435 | 254 | // |
duke@435 | 255 | // The only reason we must do this is because on machine with register |
duke@435 | 256 | // windows we have a race with patching the return address and the |
duke@435 | 257 | // window coming live as the thread returns to the Java code (but still |
duke@435 | 258 | // in native mode) and then blocks. It is only this top most frame |
duke@435 | 259 | // that is at risk. So in truth we could add an additional check to |
duke@435 | 260 | // see if this frame is one that is at risk. |
duke@435 | 261 | RegisterMap map(thread, false); |
duke@435 | 262 | frame at_risk = thread->last_frame().sender(&map); |
duke@435 | 263 | if (id() == at_risk.id()) { |
duke@435 | 264 | thread->set_must_deopt_id(id()); |
duke@435 | 265 | thread->set_deopt_suspend(); |
duke@435 | 266 | return; |
duke@435 | 267 | } |
duke@435 | 268 | } |
duke@435 | 269 | } // NeedsDeoptSuspend |
duke@435 | 270 | |
duke@435 | 271 | |
duke@435 | 272 | address deopt = nm->deopt_handler_begin(); |
duke@435 | 273 | // Save the original pc before we patch in the new one |
duke@435 | 274 | nm->set_original_pc(this, pc()); |
duke@435 | 275 | patch_pc(thread, deopt); |
duke@435 | 276 | #ifdef ASSERT |
duke@435 | 277 | { |
duke@435 | 278 | RegisterMap map(thread, false); |
duke@435 | 279 | frame check = thread->last_frame(); |
duke@435 | 280 | while (id() != check.id()) { |
duke@435 | 281 | check = check.sender(&map); |
duke@435 | 282 | } |
duke@435 | 283 | assert(check.is_deoptimized_frame(), "missed deopt"); |
duke@435 | 284 | } |
duke@435 | 285 | #endif // ASSERT |
duke@435 | 286 | } |
duke@435 | 287 | |
duke@435 | 288 | frame frame::java_sender() const { |
duke@435 | 289 | RegisterMap map(JavaThread::current(), false); |
duke@435 | 290 | frame s; |
duke@435 | 291 | for (s = sender(&map); !(s.is_java_frame() || s.is_first_frame()); s = s.sender(&map)) ; |
duke@435 | 292 | guarantee(s.is_java_frame(), "tried to get caller of first java frame"); |
duke@435 | 293 | return s; |
duke@435 | 294 | } |
duke@435 | 295 | |
duke@435 | 296 | frame frame::real_sender(RegisterMap* map) const { |
duke@435 | 297 | frame result = sender(map); |
duke@435 | 298 | while (result.is_runtime_frame()) { |
duke@435 | 299 | result = result.sender(map); |
duke@435 | 300 | } |
duke@435 | 301 | return result; |
duke@435 | 302 | } |
duke@435 | 303 | |
duke@435 | 304 | // Note: called by profiler - NOT for current thread |
duke@435 | 305 | frame frame::profile_find_Java_sender_frame(JavaThread *thread) { |
duke@435 | 306 | // If we don't recognize this frame, walk back up the stack until we do |
duke@435 | 307 | RegisterMap map(thread, false); |
duke@435 | 308 | frame first_java_frame = frame(); |
duke@435 | 309 | |
duke@435 | 310 | // Find the first Java frame on the stack starting with input frame |
duke@435 | 311 | if (is_java_frame()) { |
duke@435 | 312 | // top frame is compiled frame or deoptimized frame |
duke@435 | 313 | first_java_frame = *this; |
duke@435 | 314 | } else if (safe_for_sender(thread)) { |
duke@435 | 315 | for (frame sender_frame = sender(&map); |
duke@435 | 316 | sender_frame.safe_for_sender(thread) && !sender_frame.is_first_frame(); |
duke@435 | 317 | sender_frame = sender_frame.sender(&map)) { |
duke@435 | 318 | if (sender_frame.is_java_frame()) { |
duke@435 | 319 | first_java_frame = sender_frame; |
duke@435 | 320 | break; |
duke@435 | 321 | } |
duke@435 | 322 | } |
duke@435 | 323 | } |
duke@435 | 324 | return first_java_frame; |
duke@435 | 325 | } |
duke@435 | 326 | |
duke@435 | 327 | // Interpreter frames |
duke@435 | 328 | |
duke@435 | 329 | |
duke@435 | 330 | void frame::interpreter_frame_set_locals(intptr_t* locs) { |
duke@435 | 331 | assert(is_interpreted_frame(), "Not an interpreted frame"); |
duke@435 | 332 | *interpreter_frame_locals_addr() = locs; |
duke@435 | 333 | } |
duke@435 | 334 | |
duke@435 | 335 | methodOop frame::interpreter_frame_method() const { |
duke@435 | 336 | assert(is_interpreted_frame(), "interpreted frame expected"); |
duke@435 | 337 | methodOop m = *interpreter_frame_method_addr(); |
duke@435 | 338 | assert(m->is_perm(), "bad methodOop in interpreter frame"); |
duke@435 | 339 | assert(m->is_method(), "not a methodOop"); |
duke@435 | 340 | return m; |
duke@435 | 341 | } |
duke@435 | 342 | |
duke@435 | 343 | void frame::interpreter_frame_set_method(methodOop method) { |
duke@435 | 344 | assert(is_interpreted_frame(), "interpreted frame expected"); |
duke@435 | 345 | *interpreter_frame_method_addr() = method; |
duke@435 | 346 | } |
duke@435 | 347 | |
duke@435 | 348 | void frame::interpreter_frame_set_bcx(intptr_t bcx) { |
duke@435 | 349 | assert(is_interpreted_frame(), "Not an interpreted frame"); |
duke@435 | 350 | if (ProfileInterpreter) { |
duke@435 | 351 | bool formerly_bci = is_bci(interpreter_frame_bcx()); |
duke@435 | 352 | bool is_now_bci = is_bci(bcx); |
duke@435 | 353 | *interpreter_frame_bcx_addr() = bcx; |
duke@435 | 354 | |
duke@435 | 355 | intptr_t mdx = interpreter_frame_mdx(); |
duke@435 | 356 | |
duke@435 | 357 | if (mdx != 0) { |
duke@435 | 358 | if (formerly_bci) { |
duke@435 | 359 | if (!is_now_bci) { |
duke@435 | 360 | // The bcx was just converted from bci to bcp. |
duke@435 | 361 | // Convert the mdx in parallel. |
duke@435 | 362 | methodDataOop mdo = interpreter_frame_method()->method_data(); |
duke@435 | 363 | assert(mdo != NULL, ""); |
duke@435 | 364 | int mdi = mdx - 1; // We distinguish valid mdi from zero by adding one. |
duke@435 | 365 | address mdp = mdo->di_to_dp(mdi); |
duke@435 | 366 | interpreter_frame_set_mdx((intptr_t)mdp); |
duke@435 | 367 | } |
duke@435 | 368 | } else { |
duke@435 | 369 | if (is_now_bci) { |
duke@435 | 370 | // The bcx was just converted from bcp to bci. |
duke@435 | 371 | // Convert the mdx in parallel. |
duke@435 | 372 | methodDataOop mdo = interpreter_frame_method()->method_data(); |
duke@435 | 373 | assert(mdo != NULL, ""); |
duke@435 | 374 | int mdi = mdo->dp_to_di((address)mdx); |
duke@435 | 375 | interpreter_frame_set_mdx((intptr_t)mdi + 1); // distinguish valid from 0. |
duke@435 | 376 | } |
duke@435 | 377 | } |
duke@435 | 378 | } |
duke@435 | 379 | } else { |
duke@435 | 380 | *interpreter_frame_bcx_addr() = bcx; |
duke@435 | 381 | } |
duke@435 | 382 | } |
duke@435 | 383 | |
duke@435 | 384 | jint frame::interpreter_frame_bci() const { |
duke@435 | 385 | assert(is_interpreted_frame(), "interpreted frame expected"); |
duke@435 | 386 | intptr_t bcx = interpreter_frame_bcx(); |
duke@435 | 387 | return is_bci(bcx) ? bcx : interpreter_frame_method()->bci_from((address)bcx); |
duke@435 | 388 | } |
duke@435 | 389 | |
duke@435 | 390 | void frame::interpreter_frame_set_bci(jint bci) { |
duke@435 | 391 | assert(is_interpreted_frame(), "interpreted frame expected"); |
duke@435 | 392 | assert(!is_bci(interpreter_frame_bcx()), "should not set bci during GC"); |
duke@435 | 393 | interpreter_frame_set_bcx((intptr_t)interpreter_frame_method()->bcp_from(bci)); |
duke@435 | 394 | } |
duke@435 | 395 | |
duke@435 | 396 | address frame::interpreter_frame_bcp() const { |
duke@435 | 397 | assert(is_interpreted_frame(), "interpreted frame expected"); |
duke@435 | 398 | intptr_t bcx = interpreter_frame_bcx(); |
duke@435 | 399 | return is_bci(bcx) ? interpreter_frame_method()->bcp_from(bcx) : (address)bcx; |
duke@435 | 400 | } |
duke@435 | 401 | |
duke@435 | 402 | void frame::interpreter_frame_set_bcp(address bcp) { |
duke@435 | 403 | assert(is_interpreted_frame(), "interpreted frame expected"); |
duke@435 | 404 | assert(!is_bci(interpreter_frame_bcx()), "should not set bcp during GC"); |
duke@435 | 405 | interpreter_frame_set_bcx((intptr_t)bcp); |
duke@435 | 406 | } |
duke@435 | 407 | |
duke@435 | 408 | void frame::interpreter_frame_set_mdx(intptr_t mdx) { |
duke@435 | 409 | assert(is_interpreted_frame(), "Not an interpreted frame"); |
duke@435 | 410 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 411 | *interpreter_frame_mdx_addr() = mdx; |
duke@435 | 412 | } |
duke@435 | 413 | |
duke@435 | 414 | address frame::interpreter_frame_mdp() const { |
duke@435 | 415 | assert(ProfileInterpreter, "must be profiling interpreter"); |
duke@435 | 416 | assert(is_interpreted_frame(), "interpreted frame expected"); |
duke@435 | 417 | intptr_t bcx = interpreter_frame_bcx(); |
duke@435 | 418 | intptr_t mdx = interpreter_frame_mdx(); |
duke@435 | 419 | |
duke@435 | 420 | assert(!is_bci(bcx), "should not access mdp during GC"); |
duke@435 | 421 | return (address)mdx; |
duke@435 | 422 | } |
duke@435 | 423 | |
duke@435 | 424 | void frame::interpreter_frame_set_mdp(address mdp) { |
duke@435 | 425 | assert(is_interpreted_frame(), "interpreted frame expected"); |
duke@435 | 426 | if (mdp == NULL) { |
duke@435 | 427 | // Always allow the mdp to be cleared. |
duke@435 | 428 | interpreter_frame_set_mdx((intptr_t)mdp); |
duke@435 | 429 | } |
duke@435 | 430 | intptr_t bcx = interpreter_frame_bcx(); |
duke@435 | 431 | assert(!is_bci(bcx), "should not set mdp during GC"); |
duke@435 | 432 | interpreter_frame_set_mdx((intptr_t)mdp); |
duke@435 | 433 | } |
duke@435 | 434 | |
duke@435 | 435 | BasicObjectLock* frame::next_monitor_in_interpreter_frame(BasicObjectLock* current) const { |
duke@435 | 436 | assert(is_interpreted_frame(), "Not an interpreted frame"); |
duke@435 | 437 | #ifdef ASSERT |
duke@435 | 438 | interpreter_frame_verify_monitor(current); |
duke@435 | 439 | #endif |
duke@435 | 440 | BasicObjectLock* next = (BasicObjectLock*) (((intptr_t*) current) + interpreter_frame_monitor_size()); |
duke@435 | 441 | return next; |
duke@435 | 442 | } |
duke@435 | 443 | |
duke@435 | 444 | BasicObjectLock* frame::previous_monitor_in_interpreter_frame(BasicObjectLock* current) const { |
duke@435 | 445 | assert(is_interpreted_frame(), "Not an interpreted frame"); |
duke@435 | 446 | #ifdef ASSERT |
duke@435 | 447 | // // This verification needs to be checked before being enabled |
duke@435 | 448 | // interpreter_frame_verify_monitor(current); |
duke@435 | 449 | #endif |
duke@435 | 450 | BasicObjectLock* previous = (BasicObjectLock*) (((intptr_t*) current) - interpreter_frame_monitor_size()); |
duke@435 | 451 | return previous; |
duke@435 | 452 | } |
duke@435 | 453 | |
duke@435 | 454 | // Interpreter locals and expression stack locations. |
duke@435 | 455 | |
duke@435 | 456 | intptr_t* frame::interpreter_frame_local_at(int index) const { |
duke@435 | 457 | const int n = Interpreter::local_offset_in_bytes(index)/wordSize; |
duke@435 | 458 | return &((*interpreter_frame_locals_addr())[n]); |
duke@435 | 459 | } |
duke@435 | 460 | |
duke@435 | 461 | frame::Tag frame::interpreter_frame_local_tag(int index) const { |
duke@435 | 462 | const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize; |
duke@435 | 463 | return (Tag)(*interpreter_frame_locals_addr()) [n]; |
duke@435 | 464 | } |
duke@435 | 465 | |
duke@435 | 466 | void frame::interpreter_frame_set_local_tag(int index, Tag tag) const { |
duke@435 | 467 | const int n = Interpreter::local_tag_offset_in_bytes(index)/wordSize; |
duke@435 | 468 | (*interpreter_frame_locals_addr())[n] = (intptr_t)tag; |
duke@435 | 469 | } |
duke@435 | 470 | |
duke@435 | 471 | intptr_t* frame::interpreter_frame_expression_stack_at(jint offset) const { |
duke@435 | 472 | const int i = offset * interpreter_frame_expression_stack_direction(); |
duke@435 | 473 | const int n = ((i * Interpreter::stackElementSize()) + |
duke@435 | 474 | Interpreter::value_offset_in_bytes())/wordSize; |
duke@435 | 475 | return &(interpreter_frame_expression_stack()[n]); |
duke@435 | 476 | } |
duke@435 | 477 | |
duke@435 | 478 | frame::Tag frame::interpreter_frame_expression_stack_tag(jint offset) const { |
duke@435 | 479 | const int i = offset * interpreter_frame_expression_stack_direction(); |
duke@435 | 480 | const int n = ((i * Interpreter::stackElementSize()) + |
duke@435 | 481 | Interpreter::tag_offset_in_bytes())/wordSize; |
duke@435 | 482 | return (Tag)(interpreter_frame_expression_stack()[n]); |
duke@435 | 483 | } |
duke@435 | 484 | |
duke@435 | 485 | void frame::interpreter_frame_set_expression_stack_tag(jint offset, |
duke@435 | 486 | Tag tag) const { |
duke@435 | 487 | const int i = offset * interpreter_frame_expression_stack_direction(); |
duke@435 | 488 | const int n = ((i * Interpreter::stackElementSize()) + |
duke@435 | 489 | Interpreter::tag_offset_in_bytes())/wordSize; |
duke@435 | 490 | interpreter_frame_expression_stack()[n] = (intptr_t)tag; |
duke@435 | 491 | } |
duke@435 | 492 | |
duke@435 | 493 | jint frame::interpreter_frame_expression_stack_size() const { |
duke@435 | 494 | // Number of elements on the interpreter expression stack |
duke@435 | 495 | // Callers should span by stackElementWords |
duke@435 | 496 | int element_size = Interpreter::stackElementWords(); |
duke@435 | 497 | if (frame::interpreter_frame_expression_stack_direction() < 0) { |
duke@435 | 498 | return (interpreter_frame_expression_stack() - |
duke@435 | 499 | interpreter_frame_tos_address() + 1)/element_size; |
duke@435 | 500 | } else { |
duke@435 | 501 | return (interpreter_frame_tos_address() - |
duke@435 | 502 | interpreter_frame_expression_stack() + 1)/element_size; |
duke@435 | 503 | } |
duke@435 | 504 | } |
duke@435 | 505 | |
duke@435 | 506 | |
duke@435 | 507 | // (frame::interpreter_frame_sender_sp accessor is in frame_<arch>.cpp) |
duke@435 | 508 | |
duke@435 | 509 | const char* frame::print_name() const { |
duke@435 | 510 | if (is_native_frame()) return "Native"; |
duke@435 | 511 | if (is_interpreted_frame()) return "Interpreted"; |
duke@435 | 512 | if (is_compiled_frame()) { |
duke@435 | 513 | if (is_deoptimized_frame()) return "Deoptimized"; |
duke@435 | 514 | return "Compiled"; |
duke@435 | 515 | } |
duke@435 | 516 | if (sp() == NULL) return "Empty"; |
duke@435 | 517 | return "C"; |
duke@435 | 518 | } |
duke@435 | 519 | |
duke@435 | 520 | void frame::print_value_on(outputStream* st, JavaThread *thread) const { |
duke@435 | 521 | NOT_PRODUCT(address begin = pc()-40;) |
duke@435 | 522 | NOT_PRODUCT(address end = NULL;) |
duke@435 | 523 | |
duke@435 | 524 | st->print("%s frame (sp=" INTPTR_FORMAT " unextended sp=" INTPTR_FORMAT, print_name(), sp(), unextended_sp()); |
duke@435 | 525 | if (sp() != NULL) |
duke@435 | 526 | st->print(", fp=" INTPTR_FORMAT ", pc=" INTPTR_FORMAT, fp(), pc()); |
duke@435 | 527 | |
duke@435 | 528 | if (StubRoutines::contains(pc())) { |
duke@435 | 529 | st->print_cr(")"); |
duke@435 | 530 | st->print("("); |
duke@435 | 531 | StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); |
duke@435 | 532 | st->print("~Stub::%s", desc->name()); |
duke@435 | 533 | NOT_PRODUCT(begin = desc->begin(); end = desc->end();) |
duke@435 | 534 | } else if (Interpreter::contains(pc())) { |
duke@435 | 535 | st->print_cr(")"); |
duke@435 | 536 | st->print("("); |
duke@435 | 537 | InterpreterCodelet* desc = Interpreter::codelet_containing(pc()); |
duke@435 | 538 | if (desc != NULL) { |
duke@435 | 539 | st->print("~"); |
duke@435 | 540 | desc->print(); |
duke@435 | 541 | NOT_PRODUCT(begin = desc->code_begin(); end = desc->code_end();) |
duke@435 | 542 | } else { |
duke@435 | 543 | st->print("~interpreter"); |
duke@435 | 544 | } |
duke@435 | 545 | } |
duke@435 | 546 | st->print_cr(")"); |
duke@435 | 547 | |
duke@435 | 548 | if (_cb != NULL) { |
duke@435 | 549 | st->print(" "); |
duke@435 | 550 | _cb->print_value_on(st); |
duke@435 | 551 | st->cr(); |
duke@435 | 552 | #ifndef PRODUCT |
duke@435 | 553 | if (end == NULL) { |
duke@435 | 554 | begin = _cb->instructions_begin(); |
duke@435 | 555 | end = _cb->instructions_end(); |
duke@435 | 556 | } |
duke@435 | 557 | #endif |
duke@435 | 558 | } |
duke@435 | 559 | NOT_PRODUCT(if (WizardMode && Verbose) Disassembler::decode(begin, end);) |
duke@435 | 560 | } |
duke@435 | 561 | |
duke@435 | 562 | |
duke@435 | 563 | void frame::print_on(outputStream* st) const { |
duke@435 | 564 | print_value_on(st,NULL); |
duke@435 | 565 | if (is_interpreted_frame()) { |
duke@435 | 566 | interpreter_frame_print_on(st); |
duke@435 | 567 | } |
duke@435 | 568 | } |
duke@435 | 569 | |
duke@435 | 570 | |
duke@435 | 571 | void frame::interpreter_frame_print_on(outputStream* st) const { |
duke@435 | 572 | #ifndef PRODUCT |
duke@435 | 573 | assert(is_interpreted_frame(), "Not an interpreted frame"); |
duke@435 | 574 | jint i; |
duke@435 | 575 | for (i = 0; i < interpreter_frame_method()->max_locals(); i++ ) { |
duke@435 | 576 | intptr_t x = *interpreter_frame_local_at(i); |
duke@435 | 577 | st->print(" - local [" INTPTR_FORMAT "]", x); |
duke@435 | 578 | if (TaggedStackInterpreter) { |
duke@435 | 579 | Tag x = interpreter_frame_local_tag(i); |
duke@435 | 580 | st->print(" - local tag [" INTPTR_FORMAT "]", x); |
duke@435 | 581 | } |
duke@435 | 582 | st->fill_to(23); |
duke@435 | 583 | st->print_cr("; #%d", i); |
duke@435 | 584 | } |
duke@435 | 585 | for (i = interpreter_frame_expression_stack_size() - 1; i >= 0; --i ) { |
duke@435 | 586 | intptr_t x = *interpreter_frame_expression_stack_at(i); |
duke@435 | 587 | st->print(" - stack [" INTPTR_FORMAT "]", x); |
duke@435 | 588 | if (TaggedStackInterpreter) { |
duke@435 | 589 | Tag x = interpreter_frame_expression_stack_tag(i); |
duke@435 | 590 | st->print(" - stack tag [" INTPTR_FORMAT "]", x); |
duke@435 | 591 | } |
duke@435 | 592 | st->fill_to(23); |
duke@435 | 593 | st->print_cr("; #%d", i); |
duke@435 | 594 | } |
duke@435 | 595 | // locks for synchronization |
duke@435 | 596 | for (BasicObjectLock* current = interpreter_frame_monitor_end(); |
duke@435 | 597 | current < interpreter_frame_monitor_begin(); |
duke@435 | 598 | current = next_monitor_in_interpreter_frame(current)) { |
duke@435 | 599 | st->print_cr(" [ - obj "); |
duke@435 | 600 | current->obj()->print_value_on(st); |
duke@435 | 601 | st->cr(); |
duke@435 | 602 | st->print_cr(" - lock "); |
duke@435 | 603 | current->lock()->print_on(st); |
duke@435 | 604 | st->cr(); |
duke@435 | 605 | } |
duke@435 | 606 | // monitor |
duke@435 | 607 | st->print_cr(" - monitor[" INTPTR_FORMAT "]", interpreter_frame_monitor_begin()); |
duke@435 | 608 | // bcp |
duke@435 | 609 | st->print(" - bcp [" INTPTR_FORMAT "]", interpreter_frame_bcp()); |
duke@435 | 610 | st->fill_to(23); |
duke@435 | 611 | st->print_cr("; @%d", interpreter_frame_bci()); |
duke@435 | 612 | // locals |
duke@435 | 613 | st->print_cr(" - locals [" INTPTR_FORMAT "]", interpreter_frame_local_at(0)); |
duke@435 | 614 | // method |
duke@435 | 615 | st->print(" - method [" INTPTR_FORMAT "]", (address)interpreter_frame_method()); |
duke@435 | 616 | st->fill_to(23); |
duke@435 | 617 | st->print("; "); |
duke@435 | 618 | interpreter_frame_method()->print_name(st); |
duke@435 | 619 | st->cr(); |
duke@435 | 620 | #endif |
duke@435 | 621 | } |
duke@435 | 622 | |
duke@435 | 623 | // Return whether the frame is in the VM or os indicating a Hotspot problem. |
duke@435 | 624 | // Otherwise, it's likely a bug in the native library that the Java code calls, |
duke@435 | 625 | // hopefully indicating where to submit bugs. |
duke@435 | 626 | static void print_C_frame(outputStream* st, char* buf, int buflen, address pc) { |
duke@435 | 627 | // C/C++ frame |
duke@435 | 628 | bool in_vm = os::address_is_in_vm(pc); |
duke@435 | 629 | st->print(in_vm ? "V" : "C"); |
duke@435 | 630 | |
duke@435 | 631 | int offset; |
duke@435 | 632 | bool found; |
duke@435 | 633 | |
duke@435 | 634 | // libname |
duke@435 | 635 | found = os::dll_address_to_library_name(pc, buf, buflen, &offset); |
duke@435 | 636 | if (found) { |
duke@435 | 637 | // skip directory names |
duke@435 | 638 | const char *p1, *p2; |
duke@435 | 639 | p1 = buf; |
duke@435 | 640 | int len = (int)strlen(os::file_separator()); |
duke@435 | 641 | while ((p2 = strstr(p1, os::file_separator())) != NULL) p1 = p2 + len; |
duke@435 | 642 | st->print(" [%s+0x%x]", p1, offset); |
duke@435 | 643 | } else { |
duke@435 | 644 | st->print(" " PTR_FORMAT, pc); |
duke@435 | 645 | } |
duke@435 | 646 | |
duke@435 | 647 | // function name - os::dll_address_to_function_name() may return confusing |
duke@435 | 648 | // names if pc is within jvm.dll or libjvm.so, because JVM only has |
duke@435 | 649 | // JVM_xxxx and a few other symbols in the dynamic symbol table. Do this |
duke@435 | 650 | // only for native libraries. |
duke@435 | 651 | if (!in_vm) { |
duke@435 | 652 | found = os::dll_address_to_function_name(pc, buf, buflen, &offset); |
duke@435 | 653 | |
duke@435 | 654 | if (found) { |
duke@435 | 655 | st->print(" %s+0x%x", buf, offset); |
duke@435 | 656 | } |
duke@435 | 657 | } |
duke@435 | 658 | } |
duke@435 | 659 | |
duke@435 | 660 | // frame::print_on_error() is called by fatal error handler. Notice that we may |
duke@435 | 661 | // crash inside this function if stack frame is corrupted. The fatal error |
duke@435 | 662 | // handler can catch and handle the crash. Here we assume the frame is valid. |
duke@435 | 663 | // |
duke@435 | 664 | // First letter indicates type of the frame: |
duke@435 | 665 | // J: Java frame (compiled) |
duke@435 | 666 | // j: Java frame (interpreted) |
duke@435 | 667 | // V: VM frame (C/C++) |
duke@435 | 668 | // v: Other frames running VM generated code (e.g. stubs, adapters, etc.) |
duke@435 | 669 | // C: C/C++ frame |
duke@435 | 670 | // |
duke@435 | 671 | // We don't need detailed frame type as that in frame::print_name(). "C" |
duke@435 | 672 | // suggests the problem is in user lib; everything else is likely a VM bug. |
duke@435 | 673 | |
duke@435 | 674 | void frame::print_on_error(outputStream* st, char* buf, int buflen, bool verbose) const { |
duke@435 | 675 | if (_cb != NULL) { |
duke@435 | 676 | if (Interpreter::contains(pc())) { |
duke@435 | 677 | methodOop m = this->interpreter_frame_method(); |
duke@435 | 678 | if (m != NULL) { |
duke@435 | 679 | m->name_and_sig_as_C_string(buf, buflen); |
duke@435 | 680 | st->print("j %s", buf); |
duke@435 | 681 | st->print("+%d", this->interpreter_frame_bci()); |
duke@435 | 682 | } else { |
duke@435 | 683 | st->print("j " PTR_FORMAT, pc()); |
duke@435 | 684 | } |
duke@435 | 685 | } else if (StubRoutines::contains(pc())) { |
duke@435 | 686 | StubCodeDesc* desc = StubCodeDesc::desc_for(pc()); |
duke@435 | 687 | if (desc != NULL) { |
duke@435 | 688 | st->print("v ~StubRoutines::%s", desc->name()); |
duke@435 | 689 | } else { |
duke@435 | 690 | st->print("v ~StubRoutines::" PTR_FORMAT, pc()); |
duke@435 | 691 | } |
duke@435 | 692 | } else if (_cb->is_buffer_blob()) { |
duke@435 | 693 | st->print("v ~BufferBlob::%s", ((BufferBlob *)_cb)->name()); |
duke@435 | 694 | } else if (_cb->is_nmethod()) { |
duke@435 | 695 | methodOop m = ((nmethod *)_cb)->method(); |
duke@435 | 696 | if (m != NULL) { |
duke@435 | 697 | m->name_and_sig_as_C_string(buf, buflen); |
duke@435 | 698 | st->print("J %s", buf); |
duke@435 | 699 | } else { |
duke@435 | 700 | st->print("J " PTR_FORMAT, pc()); |
duke@435 | 701 | } |
duke@435 | 702 | } else if (_cb->is_runtime_stub()) { |
duke@435 | 703 | st->print("v ~RuntimeStub::%s", ((RuntimeStub *)_cb)->name()); |
duke@435 | 704 | } else if (_cb->is_deoptimization_stub()) { |
duke@435 | 705 | st->print("v ~DeoptimizationBlob"); |
duke@435 | 706 | } else if (_cb->is_exception_stub()) { |
duke@435 | 707 | st->print("v ~ExceptionBlob"); |
duke@435 | 708 | } else if (_cb->is_safepoint_stub()) { |
duke@435 | 709 | st->print("v ~SafepointBlob"); |
duke@435 | 710 | } else { |
duke@435 | 711 | st->print("v blob " PTR_FORMAT, pc()); |
duke@435 | 712 | } |
duke@435 | 713 | } else { |
duke@435 | 714 | print_C_frame(st, buf, buflen, pc()); |
duke@435 | 715 | } |
duke@435 | 716 | } |
duke@435 | 717 | |
duke@435 | 718 | |
duke@435 | 719 | /* |
duke@435 | 720 | The interpreter_frame_expression_stack_at method in the case of SPARC needs the |
duke@435 | 721 | max_stack value of the method in order to compute the expression stack address. |
duke@435 | 722 | It uses the methodOop in order to get the max_stack value but during GC this |
duke@435 | 723 | methodOop value saved on the frame is changed by reverse_and_push and hence cannot |
duke@435 | 724 | be used. So we save the max_stack value in the FrameClosure object and pass it |
duke@435 | 725 | down to the interpreter_frame_expression_stack_at method |
duke@435 | 726 | */ |
duke@435 | 727 | class InterpreterFrameClosure : public OffsetClosure { |
duke@435 | 728 | private: |
duke@435 | 729 | frame* _fr; |
duke@435 | 730 | OopClosure* _f; |
duke@435 | 731 | int _max_locals; |
duke@435 | 732 | int _max_stack; |
duke@435 | 733 | |
duke@435 | 734 | public: |
duke@435 | 735 | InterpreterFrameClosure(frame* fr, int max_locals, int max_stack, |
duke@435 | 736 | OopClosure* f) { |
duke@435 | 737 | _fr = fr; |
duke@435 | 738 | _max_locals = max_locals; |
duke@435 | 739 | _max_stack = max_stack; |
duke@435 | 740 | _f = f; |
duke@435 | 741 | } |
duke@435 | 742 | |
duke@435 | 743 | void offset_do(int offset) { |
duke@435 | 744 | oop* addr; |
duke@435 | 745 | if (offset < _max_locals) { |
duke@435 | 746 | addr = (oop*) _fr->interpreter_frame_local_at(offset); |
duke@435 | 747 | assert((intptr_t*)addr >= _fr->sp(), "must be inside the frame"); |
duke@435 | 748 | _f->do_oop(addr); |
duke@435 | 749 | } else { |
duke@435 | 750 | addr = (oop*) _fr->interpreter_frame_expression_stack_at((offset - _max_locals)); |
duke@435 | 751 | // In case of exceptions, the expression stack is invalid and the esp will be reset to express |
duke@435 | 752 | // this condition. Therefore, we call f only if addr is 'inside' the stack (i.e., addr >= esp for Intel). |
duke@435 | 753 | bool in_stack; |
duke@435 | 754 | if (frame::interpreter_frame_expression_stack_direction() > 0) { |
duke@435 | 755 | in_stack = (intptr_t*)addr <= _fr->interpreter_frame_tos_address(); |
duke@435 | 756 | } else { |
duke@435 | 757 | in_stack = (intptr_t*)addr >= _fr->interpreter_frame_tos_address(); |
duke@435 | 758 | } |
duke@435 | 759 | if (in_stack) { |
duke@435 | 760 | _f->do_oop(addr); |
duke@435 | 761 | } |
duke@435 | 762 | } |
duke@435 | 763 | } |
duke@435 | 764 | |
duke@435 | 765 | int max_locals() { return _max_locals; } |
duke@435 | 766 | frame* fr() { return _fr; } |
duke@435 | 767 | }; |
duke@435 | 768 | |
duke@435 | 769 | |
duke@435 | 770 | class InterpretedArgumentOopFinder: public SignatureInfo { |
duke@435 | 771 | private: |
twisti@1573 | 772 | OopClosure* _f; // Closure to invoke |
twisti@1573 | 773 | int _offset; // TOS-relative offset, decremented with each argument |
twisti@1573 | 774 | bool _has_receiver; // true if the callee has a receiver |
duke@435 | 775 | frame* _fr; |
duke@435 | 776 | |
duke@435 | 777 | void set(int size, BasicType type) { |
duke@435 | 778 | _offset -= size; |
duke@435 | 779 | if (type == T_OBJECT || type == T_ARRAY) oop_offset_do(); |
duke@435 | 780 | } |
duke@435 | 781 | |
duke@435 | 782 | void oop_offset_do() { |
duke@435 | 783 | oop* addr; |
duke@435 | 784 | addr = (oop*)_fr->interpreter_frame_tos_at(_offset); |
duke@435 | 785 | _f->do_oop(addr); |
duke@435 | 786 | } |
duke@435 | 787 | |
duke@435 | 788 | public: |
twisti@1573 | 789 | InterpretedArgumentOopFinder(symbolHandle signature, bool has_receiver, frame* fr, OopClosure* f) : SignatureInfo(signature), _has_receiver(has_receiver) { |
duke@435 | 790 | // compute size of arguments |
twisti@1573 | 791 | int args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0); |
duke@435 | 792 | assert(!fr->is_interpreted_frame() || |
duke@435 | 793 | args_size <= fr->interpreter_frame_expression_stack_size(), |
duke@435 | 794 | "args cannot be on stack anymore"); |
duke@435 | 795 | // initialize InterpretedArgumentOopFinder |
duke@435 | 796 | _f = f; |
duke@435 | 797 | _fr = fr; |
duke@435 | 798 | _offset = args_size; |
duke@435 | 799 | } |
duke@435 | 800 | |
duke@435 | 801 | void oops_do() { |
twisti@1573 | 802 | if (_has_receiver) { |
duke@435 | 803 | --_offset; |
duke@435 | 804 | oop_offset_do(); |
duke@435 | 805 | } |
duke@435 | 806 | iterate_parameters(); |
duke@435 | 807 | } |
duke@435 | 808 | }; |
duke@435 | 809 | |
duke@435 | 810 | |
duke@435 | 811 | // Entry frame has following form (n arguments) |
duke@435 | 812 | // +-----------+ |
duke@435 | 813 | // sp -> | last arg | |
duke@435 | 814 | // +-----------+ |
duke@435 | 815 | // : ::: : |
duke@435 | 816 | // +-----------+ |
duke@435 | 817 | // (sp+n)->| first arg| |
duke@435 | 818 | // +-----------+ |
duke@435 | 819 | |
duke@435 | 820 | |
duke@435 | 821 | |
duke@435 | 822 | // visits and GC's all the arguments in entry frame |
duke@435 | 823 | class EntryFrameOopFinder: public SignatureInfo { |
duke@435 | 824 | private: |
duke@435 | 825 | bool _is_static; |
duke@435 | 826 | int _offset; |
duke@435 | 827 | frame* _fr; |
duke@435 | 828 | OopClosure* _f; |
duke@435 | 829 | |
duke@435 | 830 | void set(int size, BasicType type) { |
duke@435 | 831 | assert (_offset >= 0, "illegal offset"); |
duke@435 | 832 | if (type == T_OBJECT || type == T_ARRAY) oop_at_offset_do(_offset); |
duke@435 | 833 | _offset -= size; |
duke@435 | 834 | } |
duke@435 | 835 | |
duke@435 | 836 | void oop_at_offset_do(int offset) { |
duke@435 | 837 | assert (offset >= 0, "illegal offset") |
duke@435 | 838 | oop* addr = (oop*) _fr->entry_frame_argument_at(offset); |
duke@435 | 839 | _f->do_oop(addr); |
duke@435 | 840 | } |
duke@435 | 841 | |
duke@435 | 842 | public: |
duke@435 | 843 | EntryFrameOopFinder(frame* frame, symbolHandle signature, bool is_static) : SignatureInfo(signature) { |
duke@435 | 844 | _f = NULL; // will be set later |
duke@435 | 845 | _fr = frame; |
duke@435 | 846 | _is_static = is_static; |
duke@435 | 847 | _offset = ArgumentSizeComputer(signature).size() - 1; // last parameter is at index 0 |
duke@435 | 848 | } |
duke@435 | 849 | |
duke@435 | 850 | void arguments_do(OopClosure* f) { |
duke@435 | 851 | _f = f; |
duke@435 | 852 | if (!_is_static) oop_at_offset_do(_offset+1); // do the receiver |
duke@435 | 853 | iterate_parameters(); |
duke@435 | 854 | } |
duke@435 | 855 | |
duke@435 | 856 | }; |
duke@435 | 857 | |
duke@435 | 858 | oop* frame::interpreter_callee_receiver_addr(symbolHandle signature) { |
duke@435 | 859 | ArgumentSizeComputer asc(signature); |
duke@435 | 860 | int size = asc.size(); |
duke@435 | 861 | return (oop *)interpreter_frame_tos_at(size); |
duke@435 | 862 | } |
duke@435 | 863 | |
duke@435 | 864 | |
duke@435 | 865 | void frame::oops_interpreted_do(OopClosure* f, const RegisterMap* map, bool query_oop_map_cache) { |
duke@435 | 866 | assert(is_interpreted_frame(), "Not an interpreted frame"); |
duke@435 | 867 | assert(map != NULL, "map must be set"); |
duke@435 | 868 | Thread *thread = Thread::current(); |
duke@435 | 869 | methodHandle m (thread, interpreter_frame_method()); |
duke@435 | 870 | jint bci = interpreter_frame_bci(); |
duke@435 | 871 | |
duke@435 | 872 | assert(Universe::heap()->is_in(m()), "must be valid oop"); |
duke@435 | 873 | assert(m->is_method(), "checking frame value"); |
duke@435 | 874 | assert((m->is_native() && bci == 0) || (!m->is_native() && bci >= 0 && bci < m->code_size()), "invalid bci value"); |
duke@435 | 875 | |
duke@435 | 876 | // Handle the monitor elements in the activation |
duke@435 | 877 | for ( |
duke@435 | 878 | BasicObjectLock* current = interpreter_frame_monitor_end(); |
duke@435 | 879 | current < interpreter_frame_monitor_begin(); |
duke@435 | 880 | current = next_monitor_in_interpreter_frame(current) |
duke@435 | 881 | ) { |
duke@435 | 882 | #ifdef ASSERT |
duke@435 | 883 | interpreter_frame_verify_monitor(current); |
duke@435 | 884 | #endif |
duke@435 | 885 | current->oops_do(f); |
duke@435 | 886 | } |
duke@435 | 887 | |
duke@435 | 888 | // process fixed part |
duke@435 | 889 | f->do_oop((oop*)interpreter_frame_method_addr()); |
duke@435 | 890 | f->do_oop((oop*)interpreter_frame_cache_addr()); |
duke@435 | 891 | |
duke@435 | 892 | // Hmm what about the mdp? |
duke@435 | 893 | #ifdef CC_INTERP |
duke@435 | 894 | // Interpreter frame in the midst of a call have a methodOop within the |
duke@435 | 895 | // object. |
duke@435 | 896 | interpreterState istate = get_interpreterState(); |
duke@435 | 897 | if (istate->msg() == BytecodeInterpreter::call_method) { |
duke@435 | 898 | f->do_oop((oop*)&istate->_result._to_call._callee); |
duke@435 | 899 | } |
duke@435 | 900 | |
duke@435 | 901 | #endif /* CC_INTERP */ |
duke@435 | 902 | |
duke@435 | 903 | if (m->is_native()) { |
duke@435 | 904 | #ifdef CC_INTERP |
duke@435 | 905 | f->do_oop((oop*)&istate->_oop_temp); |
duke@435 | 906 | #else |
duke@435 | 907 | f->do_oop((oop*)( fp() + interpreter_frame_oop_temp_offset )); |
duke@435 | 908 | #endif /* CC_INTERP */ |
duke@435 | 909 | } |
duke@435 | 910 | |
duke@435 | 911 | int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); |
duke@435 | 912 | |
duke@435 | 913 | symbolHandle signature; |
twisti@1573 | 914 | bool has_receiver = false; |
duke@435 | 915 | |
duke@435 | 916 | // Process a callee's arguments if we are at a call site |
duke@435 | 917 | // (i.e., if we are at an invoke bytecode) |
duke@435 | 918 | // This is used sometimes for calling into the VM, not for another |
duke@435 | 919 | // interpreted or compiled frame. |
duke@435 | 920 | if (!m->is_native()) { |
duke@435 | 921 | Bytecode_invoke *call = Bytecode_invoke_at_check(m, bci); |
duke@435 | 922 | if (call != NULL) { |
duke@435 | 923 | signature = symbolHandle(thread, call->signature()); |
twisti@1573 | 924 | has_receiver = call->has_receiver(); |
duke@435 | 925 | if (map->include_argument_oops() && |
duke@435 | 926 | interpreter_frame_expression_stack_size() > 0) { |
duke@435 | 927 | ResourceMark rm(thread); // is this right ??? |
duke@435 | 928 | // we are at a call site & the expression stack is not empty |
duke@435 | 929 | // => process callee's arguments |
duke@435 | 930 | // |
duke@435 | 931 | // Note: The expression stack can be empty if an exception |
twisti@1040 | 932 | // occurred during method resolution/execution. In all |
duke@435 | 933 | // cases we empty the expression stack completely be- |
duke@435 | 934 | // fore handling the exception (the exception handling |
duke@435 | 935 | // code in the interpreter calls a blocking runtime |
duke@435 | 936 | // routine which can cause this code to be executed). |
duke@435 | 937 | // (was bug gri 7/27/98) |
twisti@1573 | 938 | oops_interpreted_arguments_do(signature, has_receiver, f); |
duke@435 | 939 | } |
duke@435 | 940 | } |
duke@435 | 941 | } |
duke@435 | 942 | |
duke@435 | 943 | if (TaggedStackInterpreter) { |
duke@435 | 944 | // process locals & expression stack |
duke@435 | 945 | InterpreterOopMap *mask = NULL; |
duke@435 | 946 | #ifdef ASSERT |
duke@435 | 947 | InterpreterOopMap oopmap_mask; |
duke@435 | 948 | OopMapCache::compute_one_oop_map(m, bci, &oopmap_mask); |
duke@435 | 949 | mask = &oopmap_mask; |
duke@435 | 950 | #endif // ASSERT |
duke@435 | 951 | oops_interpreted_locals_do(f, max_locals, mask); |
twisti@1573 | 952 | oops_interpreted_expressions_do(f, signature, has_receiver, |
duke@435 | 953 | m->max_stack(), |
duke@435 | 954 | max_locals, mask); |
duke@435 | 955 | } else { |
duke@435 | 956 | InterpreterFrameClosure blk(this, max_locals, m->max_stack(), f); |
duke@435 | 957 | |
duke@435 | 958 | // process locals & expression stack |
duke@435 | 959 | InterpreterOopMap mask; |
duke@435 | 960 | if (query_oop_map_cache) { |
duke@435 | 961 | m->mask_for(bci, &mask); |
duke@435 | 962 | } else { |
duke@435 | 963 | OopMapCache::compute_one_oop_map(m, bci, &mask); |
duke@435 | 964 | } |
duke@435 | 965 | mask.iterate_oop(&blk); |
duke@435 | 966 | } |
duke@435 | 967 | } |
duke@435 | 968 | |
duke@435 | 969 | |
duke@435 | 970 | void frame::oops_interpreted_locals_do(OopClosure *f, |
duke@435 | 971 | int max_locals, |
duke@435 | 972 | InterpreterOopMap *mask) { |
duke@435 | 973 | // Process locals then interpreter expression stack |
duke@435 | 974 | for (int i = 0; i < max_locals; i++ ) { |
duke@435 | 975 | Tag tag = interpreter_frame_local_tag(i); |
duke@435 | 976 | if (tag == TagReference) { |
duke@435 | 977 | oop* addr = (oop*) interpreter_frame_local_at(i); |
duke@435 | 978 | assert((intptr_t*)addr >= sp(), "must be inside the frame"); |
duke@435 | 979 | f->do_oop(addr); |
duke@435 | 980 | #ifdef ASSERT |
duke@435 | 981 | } else { |
duke@435 | 982 | assert(tag == TagValue, "bad tag value for locals"); |
duke@435 | 983 | oop* p = (oop*) interpreter_frame_local_at(i); |
duke@435 | 984 | // Not always true - too bad. May have dead oops without tags in locals. |
duke@435 | 985 | // assert(*p == NULL || !(*p)->is_oop(), "oop not tagged on interpreter locals"); |
duke@435 | 986 | assert(*p == NULL || !mask->is_oop(i), "local oop map mismatch"); |
duke@435 | 987 | #endif // ASSERT |
duke@435 | 988 | } |
duke@435 | 989 | } |
duke@435 | 990 | } |
duke@435 | 991 | |
duke@435 | 992 | void frame::oops_interpreted_expressions_do(OopClosure *f, |
duke@435 | 993 | symbolHandle signature, |
twisti@1573 | 994 | bool has_receiver, |
duke@435 | 995 | int max_stack, |
duke@435 | 996 | int max_locals, |
duke@435 | 997 | InterpreterOopMap *mask) { |
duke@435 | 998 | // There is no stack no matter what the esp is pointing to (native methods |
duke@435 | 999 | // might look like expression stack is nonempty). |
duke@435 | 1000 | if (max_stack == 0) return; |
duke@435 | 1001 | |
duke@435 | 1002 | // Point the top of the expression stack above arguments to a call so |
duke@435 | 1003 | // arguments aren't gc'ed as both stack values for callee and callee |
duke@435 | 1004 | // arguments in callee's locals. |
duke@435 | 1005 | int args_size = 0; |
duke@435 | 1006 | if (!signature.is_null()) { |
twisti@1573 | 1007 | args_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0); |
duke@435 | 1008 | } |
duke@435 | 1009 | |
duke@435 | 1010 | intptr_t *tos_addr = interpreter_frame_tos_at(args_size); |
duke@435 | 1011 | assert(args_size != 0 || tos_addr == interpreter_frame_tos_address(), "these are same"); |
duke@435 | 1012 | intptr_t *frst_expr = interpreter_frame_expression_stack_at(0); |
duke@435 | 1013 | // In case of exceptions, the expression stack is invalid and the esp |
duke@435 | 1014 | // will be reset to express this condition. Therefore, we call f only |
duke@435 | 1015 | // if addr is 'inside' the stack (i.e., addr >= esp for Intel). |
duke@435 | 1016 | bool in_stack; |
duke@435 | 1017 | if (interpreter_frame_expression_stack_direction() > 0) { |
duke@435 | 1018 | in_stack = (intptr_t*)frst_expr <= tos_addr; |
duke@435 | 1019 | } else { |
duke@435 | 1020 | in_stack = (intptr_t*)frst_expr >= tos_addr; |
duke@435 | 1021 | } |
duke@435 | 1022 | if (!in_stack) return; |
duke@435 | 1023 | |
duke@435 | 1024 | jint stack_size = interpreter_frame_expression_stack_size() - args_size; |
duke@435 | 1025 | for (int j = 0; j < stack_size; j++) { |
duke@435 | 1026 | Tag tag = interpreter_frame_expression_stack_tag(j); |
duke@435 | 1027 | if (tag == TagReference) { |
duke@435 | 1028 | oop *addr = (oop*) interpreter_frame_expression_stack_at(j); |
duke@435 | 1029 | f->do_oop(addr); |
duke@435 | 1030 | #ifdef ASSERT |
duke@435 | 1031 | } else { |
duke@435 | 1032 | assert(tag == TagValue, "bad tag value for stack element"); |
duke@435 | 1033 | oop *p = (oop*) interpreter_frame_expression_stack_at((j)); |
duke@435 | 1034 | assert(*p == NULL || !mask->is_oop(j+max_locals), "stack oop map mismatch"); |
duke@435 | 1035 | #endif // ASSERT |
duke@435 | 1036 | } |
duke@435 | 1037 | } |
duke@435 | 1038 | } |
duke@435 | 1039 | |
twisti@1573 | 1040 | void frame::oops_interpreted_arguments_do(symbolHandle signature, bool has_receiver, OopClosure* f) { |
twisti@1573 | 1041 | InterpretedArgumentOopFinder finder(signature, has_receiver, this, f); |
duke@435 | 1042 | finder.oops_do(); |
duke@435 | 1043 | } |
duke@435 | 1044 | |
jrose@1424 | 1045 | void frame::oops_code_blob_do(OopClosure* f, CodeBlobClosure* cf, const RegisterMap* reg_map) { |
duke@435 | 1046 | assert(_cb != NULL, "sanity check"); |
duke@435 | 1047 | if (_cb->oop_maps() != NULL) { |
duke@435 | 1048 | OopMapSet::oops_do(this, reg_map, f); |
duke@435 | 1049 | |
duke@435 | 1050 | // Preserve potential arguments for a callee. We handle this by dispatching |
duke@435 | 1051 | // on the codeblob. For c2i, we do |
duke@435 | 1052 | if (reg_map->include_argument_oops()) { |
duke@435 | 1053 | _cb->preserve_callee_argument_oops(*this, reg_map, f); |
duke@435 | 1054 | } |
duke@435 | 1055 | } |
duke@435 | 1056 | // In cases where perm gen is collected, GC will want to mark |
duke@435 | 1057 | // oops referenced from nmethods active on thread stacks so as to |
duke@435 | 1058 | // prevent them from being collected. However, this visit should be |
duke@435 | 1059 | // restricted to certain phases of the collection only. The |
jrose@1424 | 1060 | // closure decides how it wants nmethods to be traced. |
jrose@1424 | 1061 | if (cf != NULL) |
jrose@1424 | 1062 | cf->do_code_blob(_cb); |
duke@435 | 1063 | } |
duke@435 | 1064 | |
duke@435 | 1065 | class CompiledArgumentOopFinder: public SignatureInfo { |
duke@435 | 1066 | protected: |
duke@435 | 1067 | OopClosure* _f; |
twisti@1573 | 1068 | int _offset; // the current offset, incremented with each argument |
twisti@1573 | 1069 | bool _has_receiver; // true if the callee has a receiver |
duke@435 | 1070 | frame _fr; |
duke@435 | 1071 | RegisterMap* _reg_map; |
duke@435 | 1072 | int _arg_size; |
duke@435 | 1073 | VMRegPair* _regs; // VMReg list of arguments |
duke@435 | 1074 | |
duke@435 | 1075 | void set(int size, BasicType type) { |
duke@435 | 1076 | if (type == T_OBJECT || type == T_ARRAY) handle_oop_offset(); |
duke@435 | 1077 | _offset += size; |
duke@435 | 1078 | } |
duke@435 | 1079 | |
duke@435 | 1080 | virtual void handle_oop_offset() { |
duke@435 | 1081 | // Extract low order register number from register array. |
duke@435 | 1082 | // In LP64-land, the high-order bits are valid but unhelpful. |
duke@435 | 1083 | VMReg reg = _regs[_offset].first(); |
duke@435 | 1084 | oop *loc = _fr.oopmapreg_to_location(reg, _reg_map); |
duke@435 | 1085 | _f->do_oop(loc); |
duke@435 | 1086 | } |
duke@435 | 1087 | |
duke@435 | 1088 | public: |
twisti@1573 | 1089 | CompiledArgumentOopFinder(symbolHandle signature, bool has_receiver, OopClosure* f, frame fr, const RegisterMap* reg_map) |
duke@435 | 1090 | : SignatureInfo(signature) { |
duke@435 | 1091 | |
duke@435 | 1092 | // initialize CompiledArgumentOopFinder |
duke@435 | 1093 | _f = f; |
duke@435 | 1094 | _offset = 0; |
twisti@1573 | 1095 | _has_receiver = has_receiver; |
duke@435 | 1096 | _fr = fr; |
duke@435 | 1097 | _reg_map = (RegisterMap*)reg_map; |
twisti@1573 | 1098 | _arg_size = ArgumentSizeComputer(signature).size() + (has_receiver ? 1 : 0); |
duke@435 | 1099 | |
duke@435 | 1100 | int arg_size; |
twisti@1573 | 1101 | _regs = SharedRuntime::find_callee_arguments(signature(), has_receiver, &arg_size); |
duke@435 | 1102 | assert(arg_size == _arg_size, "wrong arg size"); |
duke@435 | 1103 | } |
duke@435 | 1104 | |
duke@435 | 1105 | void oops_do() { |
twisti@1573 | 1106 | if (_has_receiver) { |
duke@435 | 1107 | handle_oop_offset(); |
duke@435 | 1108 | _offset++; |
duke@435 | 1109 | } |
duke@435 | 1110 | iterate_parameters(); |
duke@435 | 1111 | } |
duke@435 | 1112 | }; |
duke@435 | 1113 | |
twisti@1573 | 1114 | void frame::oops_compiled_arguments_do(symbolHandle signature, bool has_receiver, const RegisterMap* reg_map, OopClosure* f) { |
duke@435 | 1115 | ResourceMark rm; |
twisti@1573 | 1116 | CompiledArgumentOopFinder finder(signature, has_receiver, f, *this, reg_map); |
duke@435 | 1117 | finder.oops_do(); |
duke@435 | 1118 | } |
duke@435 | 1119 | |
duke@435 | 1120 | |
duke@435 | 1121 | // Get receiver out of callers frame, i.e. find parameter 0 in callers |
duke@435 | 1122 | // frame. Consult ADLC for where parameter 0 is to be found. Then |
duke@435 | 1123 | // check local reg_map for it being a callee-save register or argument |
duke@435 | 1124 | // register, both of which are saved in the local frame. If not found |
duke@435 | 1125 | // there, it must be an in-stack argument of the caller. |
duke@435 | 1126 | // Note: caller.sp() points to callee-arguments |
duke@435 | 1127 | oop frame::retrieve_receiver(RegisterMap* reg_map) { |
duke@435 | 1128 | frame caller = *this; |
duke@435 | 1129 | |
duke@435 | 1130 | // First consult the ADLC on where it puts parameter 0 for this signature. |
duke@435 | 1131 | VMReg reg = SharedRuntime::name_for_receiver(); |
duke@435 | 1132 | oop r = *caller.oopmapreg_to_location(reg, reg_map); |
duke@435 | 1133 | assert( Universe::heap()->is_in_or_null(r), "bad receiver" ); |
duke@435 | 1134 | return r; |
duke@435 | 1135 | } |
duke@435 | 1136 | |
duke@435 | 1137 | |
duke@435 | 1138 | oop* frame::oopmapreg_to_location(VMReg reg, const RegisterMap* reg_map) const { |
duke@435 | 1139 | if(reg->is_reg()) { |
duke@435 | 1140 | // If it is passed in a register, it got spilled in the stub frame. |
duke@435 | 1141 | return (oop *)reg_map->location(reg); |
duke@435 | 1142 | } else { |
coleenp@548 | 1143 | int sp_offset_in_bytes = reg->reg2stack() * VMRegImpl::stack_slot_size; |
coleenp@548 | 1144 | return (oop*)(((address)unextended_sp()) + sp_offset_in_bytes); |
duke@435 | 1145 | } |
duke@435 | 1146 | } |
duke@435 | 1147 | |
duke@435 | 1148 | BasicLock* frame::compiled_synchronized_native_monitor(nmethod* nm) { |
duke@435 | 1149 | if (nm == NULL) { |
duke@435 | 1150 | assert(_cb != NULL && _cb->is_nmethod() && |
duke@435 | 1151 | nm->method()->is_native() && |
duke@435 | 1152 | nm->method()->is_synchronized(), |
duke@435 | 1153 | "should not call this otherwise"); |
duke@435 | 1154 | nm = (nmethod*) _cb; |
duke@435 | 1155 | } |
duke@435 | 1156 | int byte_offset = in_bytes(nm->compiled_synchronized_native_basic_lock_sp_offset()); |
duke@435 | 1157 | assert(byte_offset >= 0, "should not see invalid offset"); |
duke@435 | 1158 | return (BasicLock*) &sp()[byte_offset / wordSize]; |
duke@435 | 1159 | } |
duke@435 | 1160 | |
duke@435 | 1161 | oop frame::compiled_synchronized_native_monitor_owner(nmethod* nm) { |
duke@435 | 1162 | if (nm == NULL) { |
duke@435 | 1163 | assert(_cb != NULL && _cb->is_nmethod() && |
duke@435 | 1164 | nm->method()->is_native() && |
duke@435 | 1165 | nm->method()->is_synchronized(), |
duke@435 | 1166 | "should not call this otherwise"); |
duke@435 | 1167 | nm = (nmethod*) _cb; |
duke@435 | 1168 | } |
duke@435 | 1169 | int byte_offset = in_bytes(nm->compiled_synchronized_native_basic_lock_owner_sp_offset()); |
duke@435 | 1170 | assert(byte_offset >= 0, "should not see invalid offset"); |
duke@435 | 1171 | oop owner = ((oop*) sp())[byte_offset / wordSize]; |
duke@435 | 1172 | assert( Universe::heap()->is_in(owner), "bad receiver" ); |
duke@435 | 1173 | return owner; |
duke@435 | 1174 | } |
duke@435 | 1175 | |
duke@435 | 1176 | void frame::oops_entry_do(OopClosure* f, const RegisterMap* map) { |
duke@435 | 1177 | assert(map != NULL, "map must be set"); |
duke@435 | 1178 | if (map->include_argument_oops()) { |
duke@435 | 1179 | // must collect argument oops, as nobody else is doing it |
duke@435 | 1180 | Thread *thread = Thread::current(); |
duke@435 | 1181 | methodHandle m (thread, entry_frame_call_wrapper()->callee_method()); |
duke@435 | 1182 | symbolHandle signature (thread, m->signature()); |
duke@435 | 1183 | EntryFrameOopFinder finder(this, signature, m->is_static()); |
duke@435 | 1184 | finder.arguments_do(f); |
duke@435 | 1185 | } |
duke@435 | 1186 | // Traverse the Handle Block saved in the entry frame |
duke@435 | 1187 | entry_frame_call_wrapper()->oops_do(f); |
duke@435 | 1188 | } |
duke@435 | 1189 | |
duke@435 | 1190 | |
jrose@1424 | 1191 | void frame::oops_do_internal(OopClosure* f, CodeBlobClosure* cf, RegisterMap* map, bool use_interpreter_oop_map_cache) { |
minqi@1554 | 1192 | #ifndef PRODUCT |
minqi@1554 | 1193 | // simulate GC crash here to dump java thread in error report |
minqi@1554 | 1194 | if (CrashGCForDumpingJavaThread) { |
minqi@1554 | 1195 | char *t = NULL; |
minqi@1554 | 1196 | *t = 'c'; |
minqi@1554 | 1197 | } |
minqi@1554 | 1198 | #endif |
minqi@1554 | 1199 | if (is_interpreted_frame()) { |
minqi@1554 | 1200 | oops_interpreted_do(f, map, use_interpreter_oop_map_cache); |
minqi@1554 | 1201 | } else if (is_entry_frame()) { |
minqi@1554 | 1202 | oops_entry_do(f, map); |
minqi@1554 | 1203 | } else if (CodeCache::contains(pc())) { |
minqi@1554 | 1204 | oops_code_blob_do(f, cf, map); |
duke@435 | 1205 | } else { |
duke@435 | 1206 | ShouldNotReachHere(); |
duke@435 | 1207 | } |
duke@435 | 1208 | } |
duke@435 | 1209 | |
jrose@1424 | 1210 | void frame::nmethods_do(CodeBlobClosure* cf) { |
duke@435 | 1211 | if (_cb != NULL && _cb->is_nmethod()) { |
jrose@1424 | 1212 | cf->do_code_blob(_cb); |
duke@435 | 1213 | } |
duke@435 | 1214 | } |
duke@435 | 1215 | |
duke@435 | 1216 | |
duke@435 | 1217 | void frame::gc_prologue() { |
duke@435 | 1218 | if (is_interpreted_frame()) { |
duke@435 | 1219 | // set bcx to bci to become methodOop position independent during GC |
duke@435 | 1220 | interpreter_frame_set_bcx(interpreter_frame_bci()); |
duke@435 | 1221 | } |
duke@435 | 1222 | } |
duke@435 | 1223 | |
duke@435 | 1224 | |
duke@435 | 1225 | void frame::gc_epilogue() { |
duke@435 | 1226 | if (is_interpreted_frame()) { |
duke@435 | 1227 | // set bcx back to bcp for interpreter |
duke@435 | 1228 | interpreter_frame_set_bcx((intptr_t)interpreter_frame_bcp()); |
duke@435 | 1229 | } |
duke@435 | 1230 | // call processor specific epilog function |
duke@435 | 1231 | pd_gc_epilog(); |
duke@435 | 1232 | } |
duke@435 | 1233 | |
duke@435 | 1234 | |
duke@435 | 1235 | # ifdef ENABLE_ZAP_DEAD_LOCALS |
duke@435 | 1236 | |
duke@435 | 1237 | void frame::CheckValueClosure::do_oop(oop* p) { |
duke@435 | 1238 | if (CheckOopishValues && Universe::heap()->is_in_reserved(*p)) { |
duke@435 | 1239 | warning("value @ " INTPTR_FORMAT " looks oopish (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); |
duke@435 | 1240 | } |
duke@435 | 1241 | } |
duke@435 | 1242 | frame::CheckValueClosure frame::_check_value; |
duke@435 | 1243 | |
duke@435 | 1244 | |
duke@435 | 1245 | void frame::CheckOopClosure::do_oop(oop* p) { |
duke@435 | 1246 | if (*p != NULL && !(*p)->is_oop()) { |
duke@435 | 1247 | warning("value @ " INTPTR_FORMAT " should be an oop (" INTPTR_FORMAT ") (thread = " INTPTR_FORMAT ")", p, (address)*p, Thread::current()); |
duke@435 | 1248 | } |
duke@435 | 1249 | } |
duke@435 | 1250 | frame::CheckOopClosure frame::_check_oop; |
duke@435 | 1251 | |
duke@435 | 1252 | void frame::check_derived_oop(oop* base, oop* derived) { |
duke@435 | 1253 | _check_oop.do_oop(base); |
duke@435 | 1254 | } |
duke@435 | 1255 | |
duke@435 | 1256 | |
duke@435 | 1257 | void frame::ZapDeadClosure::do_oop(oop* p) { |
duke@435 | 1258 | if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p); |
duke@435 | 1259 | // Need cast because on _LP64 the conversion to oop is ambiguous. Constant |
duke@435 | 1260 | // can be either long or int. |
duke@435 | 1261 | *p = (oop)(int)0xbabebabe; |
duke@435 | 1262 | } |
duke@435 | 1263 | frame::ZapDeadClosure frame::_zap_dead; |
duke@435 | 1264 | |
duke@435 | 1265 | void frame::zap_dead_locals(JavaThread* thread, const RegisterMap* map) { |
duke@435 | 1266 | assert(thread == Thread::current(), "need to synchronize to do this to another thread"); |
duke@435 | 1267 | // Tracing - part 1 |
duke@435 | 1268 | if (TraceZapDeadLocals) { |
duke@435 | 1269 | ResourceMark rm(thread); |
duke@435 | 1270 | tty->print_cr("--------------------------------------------------------------------------------"); |
duke@435 | 1271 | tty->print("Zapping dead locals in "); |
duke@435 | 1272 | print_on(tty); |
duke@435 | 1273 | tty->cr(); |
duke@435 | 1274 | } |
duke@435 | 1275 | // Zapping |
duke@435 | 1276 | if (is_entry_frame ()) zap_dead_entry_locals (thread, map); |
duke@435 | 1277 | else if (is_interpreted_frame()) zap_dead_interpreted_locals(thread, map); |
duke@435 | 1278 | else if (is_compiled_frame()) zap_dead_compiled_locals (thread, map); |
duke@435 | 1279 | |
duke@435 | 1280 | else |
duke@435 | 1281 | // could be is_runtime_frame |
duke@435 | 1282 | // so remove error: ShouldNotReachHere(); |
duke@435 | 1283 | ; |
duke@435 | 1284 | // Tracing - part 2 |
duke@435 | 1285 | if (TraceZapDeadLocals) { |
duke@435 | 1286 | tty->cr(); |
duke@435 | 1287 | } |
duke@435 | 1288 | } |
duke@435 | 1289 | |
duke@435 | 1290 | |
duke@435 | 1291 | void frame::zap_dead_interpreted_locals(JavaThread *thread, const RegisterMap* map) { |
duke@435 | 1292 | // get current interpreter 'pc' |
duke@435 | 1293 | assert(is_interpreted_frame(), "Not an interpreted frame"); |
duke@435 | 1294 | methodOop m = interpreter_frame_method(); |
duke@435 | 1295 | int bci = interpreter_frame_bci(); |
duke@435 | 1296 | |
duke@435 | 1297 | int max_locals = m->is_native() ? m->size_of_parameters() : m->max_locals(); |
duke@435 | 1298 | |
duke@435 | 1299 | if (TaggedStackInterpreter) { |
duke@435 | 1300 | InterpreterOopMap *mask = NULL; |
duke@435 | 1301 | #ifdef ASSERT |
duke@435 | 1302 | InterpreterOopMap oopmap_mask; |
duke@435 | 1303 | methodHandle method(thread, m); |
duke@435 | 1304 | OopMapCache::compute_one_oop_map(method, bci, &oopmap_mask); |
duke@435 | 1305 | mask = &oopmap_mask; |
duke@435 | 1306 | #endif // ASSERT |
duke@435 | 1307 | oops_interpreted_locals_do(&_check_oop, max_locals, mask); |
duke@435 | 1308 | } else { |
duke@435 | 1309 | // process dynamic part |
duke@435 | 1310 | InterpreterFrameClosure value_blk(this, max_locals, m->max_stack(), |
duke@435 | 1311 | &_check_value); |
duke@435 | 1312 | InterpreterFrameClosure oop_blk(this, max_locals, m->max_stack(), |
duke@435 | 1313 | &_check_oop ); |
duke@435 | 1314 | InterpreterFrameClosure dead_blk(this, max_locals, m->max_stack(), |
duke@435 | 1315 | &_zap_dead ); |
duke@435 | 1316 | |
duke@435 | 1317 | // get frame map |
duke@435 | 1318 | InterpreterOopMap mask; |
duke@435 | 1319 | m->mask_for(bci, &mask); |
duke@435 | 1320 | mask.iterate_all( &oop_blk, &value_blk, &dead_blk); |
duke@435 | 1321 | } |
duke@435 | 1322 | } |
duke@435 | 1323 | |
duke@435 | 1324 | |
duke@435 | 1325 | void frame::zap_dead_compiled_locals(JavaThread* thread, const RegisterMap* reg_map) { |
duke@435 | 1326 | |
duke@435 | 1327 | ResourceMark rm(thread); |
duke@435 | 1328 | assert(_cb != NULL, "sanity check"); |
duke@435 | 1329 | if (_cb->oop_maps() != NULL) { |
coleenp@548 | 1330 | OopMapSet::all_do(this, reg_map, &_check_oop, check_derived_oop, &_check_value); |
duke@435 | 1331 | } |
duke@435 | 1332 | } |
duke@435 | 1333 | |
duke@435 | 1334 | |
duke@435 | 1335 | void frame::zap_dead_entry_locals(JavaThread*, const RegisterMap*) { |
duke@435 | 1336 | if (TraceZapDeadLocals) warning("frame::zap_dead_entry_locals unimplemented"); |
duke@435 | 1337 | } |
duke@435 | 1338 | |
duke@435 | 1339 | |
duke@435 | 1340 | void frame::zap_dead_deoptimized_locals(JavaThread*, const RegisterMap*) { |
duke@435 | 1341 | if (TraceZapDeadLocals) warning("frame::zap_dead_deoptimized_locals unimplemented"); |
duke@435 | 1342 | } |
duke@435 | 1343 | |
duke@435 | 1344 | # endif // ENABLE_ZAP_DEAD_LOCALS |
duke@435 | 1345 | |
duke@435 | 1346 | void frame::verify(const RegisterMap* map) { |
duke@435 | 1347 | // for now make sure receiver type is correct |
duke@435 | 1348 | if (is_interpreted_frame()) { |
duke@435 | 1349 | methodOop method = interpreter_frame_method(); |
duke@435 | 1350 | guarantee(method->is_method(), "method is wrong in frame::verify"); |
duke@435 | 1351 | if (!method->is_static()) { |
duke@435 | 1352 | // fetch the receiver |
duke@435 | 1353 | oop* p = (oop*) interpreter_frame_local_at(0); |
duke@435 | 1354 | // make sure we have the right receiver type |
duke@435 | 1355 | } |
duke@435 | 1356 | } |
duke@435 | 1357 | COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(), "must be empty before verify");) |
jrose@1424 | 1358 | oops_do_internal(&VerifyOopClosure::verify_oop, NULL, (RegisterMap*)map, false); |
duke@435 | 1359 | } |
duke@435 | 1360 | |
duke@435 | 1361 | |
duke@435 | 1362 | #ifdef ASSERT |
duke@435 | 1363 | bool frame::verify_return_pc(address x) { |
duke@435 | 1364 | if (StubRoutines::returns_to_call_stub(x)) { |
duke@435 | 1365 | return true; |
duke@435 | 1366 | } |
duke@435 | 1367 | if (CodeCache::contains(x)) { |
duke@435 | 1368 | return true; |
duke@435 | 1369 | } |
duke@435 | 1370 | if (Interpreter::contains(x)) { |
duke@435 | 1371 | return true; |
duke@435 | 1372 | } |
duke@435 | 1373 | return false; |
duke@435 | 1374 | } |
duke@435 | 1375 | #endif |
duke@435 | 1376 | |
duke@435 | 1377 | |
duke@435 | 1378 | #ifdef ASSERT |
duke@435 | 1379 | void frame::interpreter_frame_verify_monitor(BasicObjectLock* value) const { |
duke@435 | 1380 | assert(is_interpreted_frame(), "Not an interpreted frame"); |
duke@435 | 1381 | // verify that the value is in the right part of the frame |
duke@435 | 1382 | address low_mark = (address) interpreter_frame_monitor_end(); |
duke@435 | 1383 | address high_mark = (address) interpreter_frame_monitor_begin(); |
duke@435 | 1384 | address current = (address) value; |
duke@435 | 1385 | |
duke@435 | 1386 | const int monitor_size = frame::interpreter_frame_monitor_size(); |
duke@435 | 1387 | guarantee((high_mark - current) % monitor_size == 0 , "Misaligned top of BasicObjectLock*"); |
duke@435 | 1388 | guarantee( high_mark > current , "Current BasicObjectLock* higher than high_mark"); |
duke@435 | 1389 | |
duke@435 | 1390 | guarantee((current - low_mark) % monitor_size == 0 , "Misaligned bottom of BasicObjectLock*"); |
duke@435 | 1391 | guarantee( current >= low_mark , "Current BasicObjectLock* below than low_mark"); |
duke@435 | 1392 | } |
duke@435 | 1393 | #endif |
duke@435 | 1394 | |
duke@435 | 1395 | |
duke@435 | 1396 | //----------------------------------------------------------------------------------- |
duke@435 | 1397 | // StackFrameStream implementation |
duke@435 | 1398 | |
duke@435 | 1399 | StackFrameStream::StackFrameStream(JavaThread *thread, bool update) : _reg_map(thread, update) { |
duke@435 | 1400 | assert(thread->has_last_Java_frame(), "sanity check"); |
duke@435 | 1401 | _fr = thread->last_frame(); |
duke@435 | 1402 | _is_done = false; |
duke@435 | 1403 | } |