Mon, 23 Jun 2008 14:11:12 -0700
6708714: Optimize long LShift on 32-bits x86
Summary: For small (1-3 bits) left long shifts in 32-bits VM use sets of add+addc instructions instead of shld+shl on new AMD cpus.
Reviewed-by: never
Contributed-by: shrinivas.joshi@amd.com
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_sharedRuntime.cpp.incl" |
duke@435 | 27 | #include <math.h> |
duke@435 | 28 | |
duke@435 | 29 | HS_DTRACE_PROBE_DECL4(hotspot, object__alloc, Thread*, char*, int, size_t); |
duke@435 | 30 | HS_DTRACE_PROBE_DECL7(hotspot, method__entry, int, |
duke@435 | 31 | char*, int, char*, int, char*, int); |
duke@435 | 32 | HS_DTRACE_PROBE_DECL7(hotspot, method__return, int, |
duke@435 | 33 | char*, int, char*, int, char*, int); |
duke@435 | 34 | |
duke@435 | 35 | // Implementation of SharedRuntime |
duke@435 | 36 | |
duke@435 | 37 | #ifndef PRODUCT |
duke@435 | 38 | // For statistics |
duke@435 | 39 | int SharedRuntime::_ic_miss_ctr = 0; |
duke@435 | 40 | int SharedRuntime::_wrong_method_ctr = 0; |
duke@435 | 41 | int SharedRuntime::_resolve_static_ctr = 0; |
duke@435 | 42 | int SharedRuntime::_resolve_virtual_ctr = 0; |
duke@435 | 43 | int SharedRuntime::_resolve_opt_virtual_ctr = 0; |
duke@435 | 44 | int SharedRuntime::_implicit_null_throws = 0; |
duke@435 | 45 | int SharedRuntime::_implicit_div0_throws = 0; |
duke@435 | 46 | int SharedRuntime::_throw_null_ctr = 0; |
duke@435 | 47 | |
duke@435 | 48 | int SharedRuntime::_nof_normal_calls = 0; |
duke@435 | 49 | int SharedRuntime::_nof_optimized_calls = 0; |
duke@435 | 50 | int SharedRuntime::_nof_inlined_calls = 0; |
duke@435 | 51 | int SharedRuntime::_nof_megamorphic_calls = 0; |
duke@435 | 52 | int SharedRuntime::_nof_static_calls = 0; |
duke@435 | 53 | int SharedRuntime::_nof_inlined_static_calls = 0; |
duke@435 | 54 | int SharedRuntime::_nof_interface_calls = 0; |
duke@435 | 55 | int SharedRuntime::_nof_optimized_interface_calls = 0; |
duke@435 | 56 | int SharedRuntime::_nof_inlined_interface_calls = 0; |
duke@435 | 57 | int SharedRuntime::_nof_megamorphic_interface_calls = 0; |
duke@435 | 58 | int SharedRuntime::_nof_removable_exceptions = 0; |
duke@435 | 59 | |
duke@435 | 60 | int SharedRuntime::_new_instance_ctr=0; |
duke@435 | 61 | int SharedRuntime::_new_array_ctr=0; |
duke@435 | 62 | int SharedRuntime::_multi1_ctr=0; |
duke@435 | 63 | int SharedRuntime::_multi2_ctr=0; |
duke@435 | 64 | int SharedRuntime::_multi3_ctr=0; |
duke@435 | 65 | int SharedRuntime::_multi4_ctr=0; |
duke@435 | 66 | int SharedRuntime::_multi5_ctr=0; |
duke@435 | 67 | int SharedRuntime::_mon_enter_stub_ctr=0; |
duke@435 | 68 | int SharedRuntime::_mon_exit_stub_ctr=0; |
duke@435 | 69 | int SharedRuntime::_mon_enter_ctr=0; |
duke@435 | 70 | int SharedRuntime::_mon_exit_ctr=0; |
duke@435 | 71 | int SharedRuntime::_partial_subtype_ctr=0; |
duke@435 | 72 | int SharedRuntime::_jbyte_array_copy_ctr=0; |
duke@435 | 73 | int SharedRuntime::_jshort_array_copy_ctr=0; |
duke@435 | 74 | int SharedRuntime::_jint_array_copy_ctr=0; |
duke@435 | 75 | int SharedRuntime::_jlong_array_copy_ctr=0; |
duke@435 | 76 | int SharedRuntime::_oop_array_copy_ctr=0; |
duke@435 | 77 | int SharedRuntime::_checkcast_array_copy_ctr=0; |
duke@435 | 78 | int SharedRuntime::_unsafe_array_copy_ctr=0; |
duke@435 | 79 | int SharedRuntime::_generic_array_copy_ctr=0; |
duke@435 | 80 | int SharedRuntime::_slow_array_copy_ctr=0; |
duke@435 | 81 | int SharedRuntime::_find_handler_ctr=0; |
duke@435 | 82 | int SharedRuntime::_rethrow_ctr=0; |
duke@435 | 83 | |
duke@435 | 84 | int SharedRuntime::_ICmiss_index = 0; |
duke@435 | 85 | int SharedRuntime::_ICmiss_count[SharedRuntime::maxICmiss_count]; |
duke@435 | 86 | address SharedRuntime::_ICmiss_at[SharedRuntime::maxICmiss_count]; |
duke@435 | 87 | |
duke@435 | 88 | void SharedRuntime::trace_ic_miss(address at) { |
duke@435 | 89 | for (int i = 0; i < _ICmiss_index; i++) { |
duke@435 | 90 | if (_ICmiss_at[i] == at) { |
duke@435 | 91 | _ICmiss_count[i]++; |
duke@435 | 92 | return; |
duke@435 | 93 | } |
duke@435 | 94 | } |
duke@435 | 95 | int index = _ICmiss_index++; |
duke@435 | 96 | if (_ICmiss_index >= maxICmiss_count) _ICmiss_index = maxICmiss_count - 1; |
duke@435 | 97 | _ICmiss_at[index] = at; |
duke@435 | 98 | _ICmiss_count[index] = 1; |
duke@435 | 99 | } |
duke@435 | 100 | |
duke@435 | 101 | void SharedRuntime::print_ic_miss_histogram() { |
duke@435 | 102 | if (ICMissHistogram) { |
duke@435 | 103 | tty->print_cr ("IC Miss Histogram:"); |
duke@435 | 104 | int tot_misses = 0; |
duke@435 | 105 | for (int i = 0; i < _ICmiss_index; i++) { |
duke@435 | 106 | tty->print_cr(" at: " INTPTR_FORMAT " nof: %d", _ICmiss_at[i], _ICmiss_count[i]); |
duke@435 | 107 | tot_misses += _ICmiss_count[i]; |
duke@435 | 108 | } |
duke@435 | 109 | tty->print_cr ("Total IC misses: %7d", tot_misses); |
duke@435 | 110 | } |
duke@435 | 111 | } |
duke@435 | 112 | #endif // PRODUCT |
duke@435 | 113 | |
duke@435 | 114 | |
duke@435 | 115 | JRT_LEAF(jlong, SharedRuntime::lmul(jlong y, jlong x)) |
duke@435 | 116 | return x * y; |
duke@435 | 117 | JRT_END |
duke@435 | 118 | |
duke@435 | 119 | |
duke@435 | 120 | JRT_LEAF(jlong, SharedRuntime::ldiv(jlong y, jlong x)) |
duke@435 | 121 | if (x == min_jlong && y == CONST64(-1)) { |
duke@435 | 122 | return x; |
duke@435 | 123 | } else { |
duke@435 | 124 | return x / y; |
duke@435 | 125 | } |
duke@435 | 126 | JRT_END |
duke@435 | 127 | |
duke@435 | 128 | |
duke@435 | 129 | JRT_LEAF(jlong, SharedRuntime::lrem(jlong y, jlong x)) |
duke@435 | 130 | if (x == min_jlong && y == CONST64(-1)) { |
duke@435 | 131 | return 0; |
duke@435 | 132 | } else { |
duke@435 | 133 | return x % y; |
duke@435 | 134 | } |
duke@435 | 135 | JRT_END |
duke@435 | 136 | |
duke@435 | 137 | |
duke@435 | 138 | const juint float_sign_mask = 0x7FFFFFFF; |
duke@435 | 139 | const juint float_infinity = 0x7F800000; |
duke@435 | 140 | const julong double_sign_mask = CONST64(0x7FFFFFFFFFFFFFFF); |
duke@435 | 141 | const julong double_infinity = CONST64(0x7FF0000000000000); |
duke@435 | 142 | |
duke@435 | 143 | JRT_LEAF(jfloat, SharedRuntime::frem(jfloat x, jfloat y)) |
duke@435 | 144 | #ifdef _WIN64 |
duke@435 | 145 | // 64-bit Windows on amd64 returns the wrong values for |
duke@435 | 146 | // infinity operands. |
duke@435 | 147 | union { jfloat f; juint i; } xbits, ybits; |
duke@435 | 148 | xbits.f = x; |
duke@435 | 149 | ybits.f = y; |
duke@435 | 150 | // x Mod Infinity == x unless x is infinity |
duke@435 | 151 | if ( ((xbits.i & float_sign_mask) != float_infinity) && |
duke@435 | 152 | ((ybits.i & float_sign_mask) == float_infinity) ) { |
duke@435 | 153 | return x; |
duke@435 | 154 | } |
duke@435 | 155 | #endif |
duke@435 | 156 | return ((jfloat)fmod((double)x,(double)y)); |
duke@435 | 157 | JRT_END |
duke@435 | 158 | |
duke@435 | 159 | |
duke@435 | 160 | JRT_LEAF(jdouble, SharedRuntime::drem(jdouble x, jdouble y)) |
duke@435 | 161 | #ifdef _WIN64 |
duke@435 | 162 | union { jdouble d; julong l; } xbits, ybits; |
duke@435 | 163 | xbits.d = x; |
duke@435 | 164 | ybits.d = y; |
duke@435 | 165 | // x Mod Infinity == x unless x is infinity |
duke@435 | 166 | if ( ((xbits.l & double_sign_mask) != double_infinity) && |
duke@435 | 167 | ((ybits.l & double_sign_mask) == double_infinity) ) { |
duke@435 | 168 | return x; |
duke@435 | 169 | } |
duke@435 | 170 | #endif |
duke@435 | 171 | return ((jdouble)fmod((double)x,(double)y)); |
duke@435 | 172 | JRT_END |
duke@435 | 173 | |
duke@435 | 174 | |
duke@435 | 175 | JRT_LEAF(jint, SharedRuntime::f2i(jfloat x)) |
duke@435 | 176 | if (g_isnan(x)) {return 0;} |
duke@435 | 177 | jlong lltmp = (jlong)x; |
duke@435 | 178 | jint ltmp = (jint)lltmp; |
duke@435 | 179 | if (ltmp == lltmp) { |
duke@435 | 180 | return ltmp; |
duke@435 | 181 | } else { |
duke@435 | 182 | if (x < 0) { |
duke@435 | 183 | return min_jint; |
duke@435 | 184 | } else { |
duke@435 | 185 | return max_jint; |
duke@435 | 186 | } |
duke@435 | 187 | } |
duke@435 | 188 | JRT_END |
duke@435 | 189 | |
duke@435 | 190 | |
duke@435 | 191 | JRT_LEAF(jlong, SharedRuntime::f2l(jfloat x)) |
duke@435 | 192 | if (g_isnan(x)) {return 0;} |
duke@435 | 193 | jlong lltmp = (jlong)x; |
duke@435 | 194 | if (lltmp != min_jlong) { |
duke@435 | 195 | return lltmp; |
duke@435 | 196 | } else { |
duke@435 | 197 | if (x < 0) { |
duke@435 | 198 | return min_jlong; |
duke@435 | 199 | } else { |
duke@435 | 200 | return max_jlong; |
duke@435 | 201 | } |
duke@435 | 202 | } |
duke@435 | 203 | JRT_END |
duke@435 | 204 | |
duke@435 | 205 | |
duke@435 | 206 | JRT_LEAF(jint, SharedRuntime::d2i(jdouble x)) |
duke@435 | 207 | if (g_isnan(x)) {return 0;} |
duke@435 | 208 | jlong lltmp = (jlong)x; |
duke@435 | 209 | jint ltmp = (jint)lltmp; |
duke@435 | 210 | if (ltmp == lltmp) { |
duke@435 | 211 | return ltmp; |
duke@435 | 212 | } else { |
duke@435 | 213 | if (x < 0) { |
duke@435 | 214 | return min_jint; |
duke@435 | 215 | } else { |
duke@435 | 216 | return max_jint; |
duke@435 | 217 | } |
duke@435 | 218 | } |
duke@435 | 219 | JRT_END |
duke@435 | 220 | |
duke@435 | 221 | |
duke@435 | 222 | JRT_LEAF(jlong, SharedRuntime::d2l(jdouble x)) |
duke@435 | 223 | if (g_isnan(x)) {return 0;} |
duke@435 | 224 | jlong lltmp = (jlong)x; |
duke@435 | 225 | if (lltmp != min_jlong) { |
duke@435 | 226 | return lltmp; |
duke@435 | 227 | } else { |
duke@435 | 228 | if (x < 0) { |
duke@435 | 229 | return min_jlong; |
duke@435 | 230 | } else { |
duke@435 | 231 | return max_jlong; |
duke@435 | 232 | } |
duke@435 | 233 | } |
duke@435 | 234 | JRT_END |
duke@435 | 235 | |
duke@435 | 236 | |
duke@435 | 237 | JRT_LEAF(jfloat, SharedRuntime::d2f(jdouble x)) |
duke@435 | 238 | return (jfloat)x; |
duke@435 | 239 | JRT_END |
duke@435 | 240 | |
duke@435 | 241 | |
duke@435 | 242 | JRT_LEAF(jfloat, SharedRuntime::l2f(jlong x)) |
duke@435 | 243 | return (jfloat)x; |
duke@435 | 244 | JRT_END |
duke@435 | 245 | |
duke@435 | 246 | |
duke@435 | 247 | JRT_LEAF(jdouble, SharedRuntime::l2d(jlong x)) |
duke@435 | 248 | return (jdouble)x; |
duke@435 | 249 | JRT_END |
duke@435 | 250 | |
duke@435 | 251 | // Exception handling accross interpreter/compiler boundaries |
duke@435 | 252 | // |
duke@435 | 253 | // exception_handler_for_return_address(...) returns the continuation address. |
duke@435 | 254 | // The continuation address is the entry point of the exception handler of the |
duke@435 | 255 | // previous frame depending on the return address. |
duke@435 | 256 | |
duke@435 | 257 | address SharedRuntime::raw_exception_handler_for_return_address(address return_address) { |
duke@435 | 258 | assert(frame::verify_return_pc(return_address), "must be a return pc"); |
duke@435 | 259 | |
duke@435 | 260 | // the fastest case first |
duke@435 | 261 | CodeBlob* blob = CodeCache::find_blob(return_address); |
duke@435 | 262 | if (blob != NULL && blob->is_nmethod()) { |
duke@435 | 263 | nmethod* code = (nmethod*)blob; |
duke@435 | 264 | assert(code != NULL, "nmethod must be present"); |
duke@435 | 265 | // native nmethods don't have exception handlers |
duke@435 | 266 | assert(!code->is_native_method(), "no exception handler"); |
duke@435 | 267 | assert(code->header_begin() != code->exception_begin(), "no exception handler"); |
duke@435 | 268 | if (code->is_deopt_pc(return_address)) { |
duke@435 | 269 | return SharedRuntime::deopt_blob()->unpack_with_exception(); |
duke@435 | 270 | } else { |
duke@435 | 271 | return code->exception_begin(); |
duke@435 | 272 | } |
duke@435 | 273 | } |
duke@435 | 274 | |
duke@435 | 275 | // Entry code |
duke@435 | 276 | if (StubRoutines::returns_to_call_stub(return_address)) { |
duke@435 | 277 | return StubRoutines::catch_exception_entry(); |
duke@435 | 278 | } |
duke@435 | 279 | // Interpreted code |
duke@435 | 280 | if (Interpreter::contains(return_address)) { |
duke@435 | 281 | return Interpreter::rethrow_exception_entry(); |
duke@435 | 282 | } |
duke@435 | 283 | |
duke@435 | 284 | // Compiled code |
duke@435 | 285 | if (CodeCache::contains(return_address)) { |
duke@435 | 286 | CodeBlob* blob = CodeCache::find_blob(return_address); |
duke@435 | 287 | if (blob->is_nmethod()) { |
duke@435 | 288 | nmethod* code = (nmethod*)blob; |
duke@435 | 289 | assert(code != NULL, "nmethod must be present"); |
duke@435 | 290 | assert(code->header_begin() != code->exception_begin(), "no exception handler"); |
duke@435 | 291 | return code->exception_begin(); |
duke@435 | 292 | } |
duke@435 | 293 | if (blob->is_runtime_stub()) { |
duke@435 | 294 | ShouldNotReachHere(); // callers are responsible for skipping runtime stub frames |
duke@435 | 295 | } |
duke@435 | 296 | } |
duke@435 | 297 | guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); |
duke@435 | 298 | #ifndef PRODUCT |
duke@435 | 299 | { ResourceMark rm; |
duke@435 | 300 | tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address); |
duke@435 | 301 | tty->print_cr("a) exception happened in (new?) code stubs/buffers that is not handled here"); |
duke@435 | 302 | tty->print_cr("b) other problem"); |
duke@435 | 303 | } |
duke@435 | 304 | #endif // PRODUCT |
duke@435 | 305 | ShouldNotReachHere(); |
duke@435 | 306 | return NULL; |
duke@435 | 307 | } |
duke@435 | 308 | |
duke@435 | 309 | |
duke@435 | 310 | JRT_LEAF(address, SharedRuntime::exception_handler_for_return_address(address return_address)) |
duke@435 | 311 | return raw_exception_handler_for_return_address(return_address); |
duke@435 | 312 | JRT_END |
duke@435 | 313 | |
duke@435 | 314 | address SharedRuntime::get_poll_stub(address pc) { |
duke@435 | 315 | address stub; |
duke@435 | 316 | // Look up the code blob |
duke@435 | 317 | CodeBlob *cb = CodeCache::find_blob(pc); |
duke@435 | 318 | |
duke@435 | 319 | // Should be an nmethod |
duke@435 | 320 | assert( cb && cb->is_nmethod(), "safepoint polling: pc must refer to an nmethod" ); |
duke@435 | 321 | |
duke@435 | 322 | // Look up the relocation information |
duke@435 | 323 | assert( ((nmethod*)cb)->is_at_poll_or_poll_return(pc), |
duke@435 | 324 | "safepoint polling: type must be poll" ); |
duke@435 | 325 | |
duke@435 | 326 | assert( ((NativeInstruction*)pc)->is_safepoint_poll(), |
duke@435 | 327 | "Only polling locations are used for safepoint"); |
duke@435 | 328 | |
duke@435 | 329 | bool at_poll_return = ((nmethod*)cb)->is_at_poll_return(pc); |
duke@435 | 330 | if (at_poll_return) { |
duke@435 | 331 | assert(SharedRuntime::polling_page_return_handler_blob() != NULL, |
duke@435 | 332 | "polling page return stub not created yet"); |
duke@435 | 333 | stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin(); |
duke@435 | 334 | } else { |
duke@435 | 335 | assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL, |
duke@435 | 336 | "polling page safepoint stub not created yet"); |
duke@435 | 337 | stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin(); |
duke@435 | 338 | } |
duke@435 | 339 | #ifndef PRODUCT |
duke@435 | 340 | if( TraceSafepoint ) { |
duke@435 | 341 | char buf[256]; |
duke@435 | 342 | jio_snprintf(buf, sizeof(buf), |
duke@435 | 343 | "... found polling page %s exception at pc = " |
duke@435 | 344 | INTPTR_FORMAT ", stub =" INTPTR_FORMAT, |
duke@435 | 345 | at_poll_return ? "return" : "loop", |
duke@435 | 346 | (intptr_t)pc, (intptr_t)stub); |
duke@435 | 347 | tty->print_raw_cr(buf); |
duke@435 | 348 | } |
duke@435 | 349 | #endif // PRODUCT |
duke@435 | 350 | return stub; |
duke@435 | 351 | } |
duke@435 | 352 | |
duke@435 | 353 | |
duke@435 | 354 | oop SharedRuntime::retrieve_receiver( symbolHandle sig, frame caller ) { |
duke@435 | 355 | assert(caller.is_interpreted_frame(), ""); |
duke@435 | 356 | int args_size = ArgumentSizeComputer(sig).size() + 1; |
duke@435 | 357 | assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack"); |
duke@435 | 358 | oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1); |
duke@435 | 359 | assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop"); |
duke@435 | 360 | return result; |
duke@435 | 361 | } |
duke@435 | 362 | |
duke@435 | 363 | |
duke@435 | 364 | void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, Handle h_exception) { |
duke@435 | 365 | if (JvmtiExport::can_post_exceptions()) { |
duke@435 | 366 | vframeStream vfst(thread, true); |
duke@435 | 367 | methodHandle method = methodHandle(thread, vfst.method()); |
duke@435 | 368 | address bcp = method()->bcp_from(vfst.bci()); |
duke@435 | 369 | JvmtiExport::post_exception_throw(thread, method(), bcp, h_exception()); |
duke@435 | 370 | } |
duke@435 | 371 | Exceptions::_throw(thread, __FILE__, __LINE__, h_exception); |
duke@435 | 372 | } |
duke@435 | 373 | |
duke@435 | 374 | void SharedRuntime::throw_and_post_jvmti_exception(JavaThread *thread, symbolOop name, const char *message) { |
duke@435 | 375 | Handle h_exception = Exceptions::new_exception(thread, name, message); |
duke@435 | 376 | throw_and_post_jvmti_exception(thread, h_exception); |
duke@435 | 377 | } |
duke@435 | 378 | |
duke@435 | 379 | // ret_pc points into caller; we are returning caller's exception handler |
duke@435 | 380 | // for given exception |
duke@435 | 381 | address SharedRuntime::compute_compiled_exc_handler(nmethod* nm, address ret_pc, Handle& exception, |
duke@435 | 382 | bool force_unwind, bool top_frame_only) { |
duke@435 | 383 | assert(nm != NULL, "must exist"); |
duke@435 | 384 | ResourceMark rm; |
duke@435 | 385 | |
duke@435 | 386 | ScopeDesc* sd = nm->scope_desc_at(ret_pc); |
duke@435 | 387 | // determine handler bci, if any |
duke@435 | 388 | EXCEPTION_MARK; |
duke@435 | 389 | |
duke@435 | 390 | int handler_bci = -1; |
duke@435 | 391 | int scope_depth = 0; |
duke@435 | 392 | if (!force_unwind) { |
duke@435 | 393 | int bci = sd->bci(); |
duke@435 | 394 | do { |
duke@435 | 395 | bool skip_scope_increment = false; |
duke@435 | 396 | // exception handler lookup |
duke@435 | 397 | KlassHandle ek (THREAD, exception->klass()); |
duke@435 | 398 | handler_bci = sd->method()->fast_exception_handler_bci_for(ek, bci, THREAD); |
duke@435 | 399 | if (HAS_PENDING_EXCEPTION) { |
duke@435 | 400 | // We threw an exception while trying to find the exception handler. |
duke@435 | 401 | // Transfer the new exception to the exception handle which will |
duke@435 | 402 | // be set into thread local storage, and do another lookup for an |
duke@435 | 403 | // exception handler for this exception, this time starting at the |
duke@435 | 404 | // BCI of the exception handler which caused the exception to be |
duke@435 | 405 | // thrown (bugs 4307310 and 4546590). Set "exception" reference |
duke@435 | 406 | // argument to ensure that the correct exception is thrown (4870175). |
duke@435 | 407 | exception = Handle(THREAD, PENDING_EXCEPTION); |
duke@435 | 408 | CLEAR_PENDING_EXCEPTION; |
duke@435 | 409 | if (handler_bci >= 0) { |
duke@435 | 410 | bci = handler_bci; |
duke@435 | 411 | handler_bci = -1; |
duke@435 | 412 | skip_scope_increment = true; |
duke@435 | 413 | } |
duke@435 | 414 | } |
duke@435 | 415 | if (!top_frame_only && handler_bci < 0 && !skip_scope_increment) { |
duke@435 | 416 | sd = sd->sender(); |
duke@435 | 417 | if (sd != NULL) { |
duke@435 | 418 | bci = sd->bci(); |
duke@435 | 419 | } |
duke@435 | 420 | ++scope_depth; |
duke@435 | 421 | } |
duke@435 | 422 | } while (!top_frame_only && handler_bci < 0 && sd != NULL); |
duke@435 | 423 | } |
duke@435 | 424 | |
duke@435 | 425 | // found handling method => lookup exception handler |
duke@435 | 426 | int catch_pco = ret_pc - nm->instructions_begin(); |
duke@435 | 427 | |
duke@435 | 428 | ExceptionHandlerTable table(nm); |
duke@435 | 429 | HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth); |
duke@435 | 430 | if (t == NULL && (nm->is_compiled_by_c1() || handler_bci != -1)) { |
duke@435 | 431 | // Allow abbreviated catch tables. The idea is to allow a method |
duke@435 | 432 | // to materialize its exceptions without committing to the exact |
duke@435 | 433 | // routing of exceptions. In particular this is needed for adding |
duke@435 | 434 | // a synthethic handler to unlock monitors when inlining |
duke@435 | 435 | // synchonized methods since the unlock path isn't represented in |
duke@435 | 436 | // the bytecodes. |
duke@435 | 437 | t = table.entry_for(catch_pco, -1, 0); |
duke@435 | 438 | } |
duke@435 | 439 | |
duke@435 | 440 | #ifdef COMPILER1 |
duke@435 | 441 | if (nm->is_compiled_by_c1() && t == NULL && handler_bci == -1) { |
duke@435 | 442 | // Exception is not handled by this frame so unwind. Note that |
duke@435 | 443 | // this is not the same as how C2 does this. C2 emits a table |
duke@435 | 444 | // entry that dispatches to the unwind code in the nmethod. |
duke@435 | 445 | return NULL; |
duke@435 | 446 | } |
duke@435 | 447 | #endif /* COMPILER1 */ |
duke@435 | 448 | |
duke@435 | 449 | |
duke@435 | 450 | if (t == NULL) { |
duke@435 | 451 | tty->print_cr("MISSING EXCEPTION HANDLER for pc " INTPTR_FORMAT " and handler bci %d", ret_pc, handler_bci); |
duke@435 | 452 | tty->print_cr(" Exception:"); |
duke@435 | 453 | exception->print(); |
duke@435 | 454 | tty->cr(); |
duke@435 | 455 | tty->print_cr(" Compiled exception table :"); |
duke@435 | 456 | table.print(); |
duke@435 | 457 | nm->print_code(); |
duke@435 | 458 | guarantee(false, "missing exception handler"); |
duke@435 | 459 | return NULL; |
duke@435 | 460 | } |
duke@435 | 461 | |
duke@435 | 462 | return nm->instructions_begin() + t->pco(); |
duke@435 | 463 | } |
duke@435 | 464 | |
duke@435 | 465 | JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread)) |
duke@435 | 466 | // These errors occur only at call sites |
duke@435 | 467 | throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError()); |
duke@435 | 468 | JRT_END |
duke@435 | 469 | |
dcubed@451 | 470 | JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread)) |
dcubed@451 | 471 | // These errors occur only at call sites |
dcubed@451 | 472 | throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub"); |
dcubed@451 | 473 | JRT_END |
dcubed@451 | 474 | |
duke@435 | 475 | JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread)) |
duke@435 | 476 | throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero"); |
duke@435 | 477 | JRT_END |
duke@435 | 478 | |
duke@435 | 479 | JRT_ENTRY(void, SharedRuntime::throw_NullPointerException(JavaThread* thread)) |
duke@435 | 480 | throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); |
duke@435 | 481 | JRT_END |
duke@435 | 482 | |
duke@435 | 483 | JRT_ENTRY(void, SharedRuntime::throw_NullPointerException_at_call(JavaThread* thread)) |
duke@435 | 484 | // This entry point is effectively only used for NullPointerExceptions which occur at inline |
duke@435 | 485 | // cache sites (when the callee activation is not yet set up) so we are at a call site |
duke@435 | 486 | throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_NullPointerException()); |
duke@435 | 487 | JRT_END |
duke@435 | 488 | |
duke@435 | 489 | JRT_ENTRY(void, SharedRuntime::throw_StackOverflowError(JavaThread* thread)) |
duke@435 | 490 | // We avoid using the normal exception construction in this case because |
duke@435 | 491 | // it performs an upcall to Java, and we're already out of stack space. |
duke@435 | 492 | klassOop k = SystemDictionary::StackOverflowError_klass(); |
duke@435 | 493 | oop exception_oop = instanceKlass::cast(k)->allocate_instance(CHECK); |
duke@435 | 494 | Handle exception (thread, exception_oop); |
duke@435 | 495 | if (StackTraceInThrowable) { |
duke@435 | 496 | java_lang_Throwable::fill_in_stack_trace(exception); |
duke@435 | 497 | } |
duke@435 | 498 | throw_and_post_jvmti_exception(thread, exception); |
duke@435 | 499 | JRT_END |
duke@435 | 500 | |
duke@435 | 501 | address SharedRuntime::continuation_for_implicit_exception(JavaThread* thread, |
duke@435 | 502 | address pc, |
duke@435 | 503 | SharedRuntime::ImplicitExceptionKind exception_kind) |
duke@435 | 504 | { |
duke@435 | 505 | address target_pc = NULL; |
duke@435 | 506 | |
duke@435 | 507 | if (Interpreter::contains(pc)) { |
duke@435 | 508 | #ifdef CC_INTERP |
duke@435 | 509 | // C++ interpreter doesn't throw implicit exceptions |
duke@435 | 510 | ShouldNotReachHere(); |
duke@435 | 511 | #else |
duke@435 | 512 | switch (exception_kind) { |
duke@435 | 513 | case IMPLICIT_NULL: return Interpreter::throw_NullPointerException_entry(); |
duke@435 | 514 | case IMPLICIT_DIVIDE_BY_ZERO: return Interpreter::throw_ArithmeticException_entry(); |
duke@435 | 515 | case STACK_OVERFLOW: return Interpreter::throw_StackOverflowError_entry(); |
duke@435 | 516 | default: ShouldNotReachHere(); |
duke@435 | 517 | } |
duke@435 | 518 | #endif // !CC_INTERP |
duke@435 | 519 | } else { |
duke@435 | 520 | switch (exception_kind) { |
duke@435 | 521 | case STACK_OVERFLOW: { |
duke@435 | 522 | // Stack overflow only occurs upon frame setup; the callee is |
duke@435 | 523 | // going to be unwound. Dispatch to a shared runtime stub |
duke@435 | 524 | // which will cause the StackOverflowError to be fabricated |
duke@435 | 525 | // and processed. |
duke@435 | 526 | // For stack overflow in deoptimization blob, cleanup thread. |
duke@435 | 527 | if (thread->deopt_mark() != NULL) { |
duke@435 | 528 | Deoptimization::cleanup_deopt_info(thread, NULL); |
duke@435 | 529 | } |
duke@435 | 530 | return StubRoutines::throw_StackOverflowError_entry(); |
duke@435 | 531 | } |
duke@435 | 532 | |
duke@435 | 533 | case IMPLICIT_NULL: { |
duke@435 | 534 | if (VtableStubs::contains(pc)) { |
duke@435 | 535 | // We haven't yet entered the callee frame. Fabricate an |
duke@435 | 536 | // exception and begin dispatching it in the caller. Since |
duke@435 | 537 | // the caller was at a call site, it's safe to destroy all |
duke@435 | 538 | // caller-saved registers, as these entry points do. |
duke@435 | 539 | VtableStub* vt_stub = VtableStubs::stub_containing(pc); |
duke@435 | 540 | guarantee(vt_stub != NULL, "unable to find SEGVing vtable stub"); |
duke@435 | 541 | if (vt_stub->is_abstract_method_error(pc)) { |
duke@435 | 542 | assert(!vt_stub->is_vtable_stub(), "should never see AbstractMethodErrors from vtable-type VtableStubs"); |
duke@435 | 543 | return StubRoutines::throw_AbstractMethodError_entry(); |
duke@435 | 544 | } else { |
duke@435 | 545 | return StubRoutines::throw_NullPointerException_at_call_entry(); |
duke@435 | 546 | } |
duke@435 | 547 | } else { |
duke@435 | 548 | CodeBlob* cb = CodeCache::find_blob(pc); |
duke@435 | 549 | guarantee(cb != NULL, "exception happened outside interpreter, nmethods and vtable stubs (1)"); |
duke@435 | 550 | |
duke@435 | 551 | // Exception happened in CodeCache. Must be either: |
duke@435 | 552 | // 1. Inline-cache check in C2I handler blob, |
duke@435 | 553 | // 2. Inline-cache check in nmethod, or |
duke@435 | 554 | // 3. Implict null exception in nmethod |
duke@435 | 555 | |
duke@435 | 556 | if (!cb->is_nmethod()) { |
duke@435 | 557 | guarantee(cb->is_adapter_blob(), |
duke@435 | 558 | "exception happened outside interpreter, nmethods and vtable stubs (2)"); |
duke@435 | 559 | // There is no handler here, so we will simply unwind. |
duke@435 | 560 | return StubRoutines::throw_NullPointerException_at_call_entry(); |
duke@435 | 561 | } |
duke@435 | 562 | |
duke@435 | 563 | // Otherwise, it's an nmethod. Consult its exception handlers. |
duke@435 | 564 | nmethod* nm = (nmethod*)cb; |
duke@435 | 565 | if (nm->inlinecache_check_contains(pc)) { |
duke@435 | 566 | // exception happened inside inline-cache check code |
duke@435 | 567 | // => the nmethod is not yet active (i.e., the frame |
duke@435 | 568 | // is not set up yet) => use return address pushed by |
duke@435 | 569 | // caller => don't push another return address |
duke@435 | 570 | return StubRoutines::throw_NullPointerException_at_call_entry(); |
duke@435 | 571 | } |
duke@435 | 572 | |
duke@435 | 573 | #ifndef PRODUCT |
duke@435 | 574 | _implicit_null_throws++; |
duke@435 | 575 | #endif |
duke@435 | 576 | target_pc = nm->continuation_for_implicit_exception(pc); |
duke@435 | 577 | guarantee(target_pc != 0, "must have a continuation point"); |
duke@435 | 578 | } |
duke@435 | 579 | |
duke@435 | 580 | break; // fall through |
duke@435 | 581 | } |
duke@435 | 582 | |
duke@435 | 583 | |
duke@435 | 584 | case IMPLICIT_DIVIDE_BY_ZERO: { |
duke@435 | 585 | nmethod* nm = CodeCache::find_nmethod(pc); |
duke@435 | 586 | guarantee(nm != NULL, "must have containing nmethod for implicit division-by-zero exceptions"); |
duke@435 | 587 | #ifndef PRODUCT |
duke@435 | 588 | _implicit_div0_throws++; |
duke@435 | 589 | #endif |
duke@435 | 590 | target_pc = nm->continuation_for_implicit_exception(pc); |
duke@435 | 591 | guarantee(target_pc != 0, "must have a continuation point"); |
duke@435 | 592 | break; // fall through |
duke@435 | 593 | } |
duke@435 | 594 | |
duke@435 | 595 | default: ShouldNotReachHere(); |
duke@435 | 596 | } |
duke@435 | 597 | |
duke@435 | 598 | guarantee(target_pc != NULL, "must have computed destination PC for implicit exception"); |
duke@435 | 599 | assert(exception_kind == IMPLICIT_NULL || exception_kind == IMPLICIT_DIVIDE_BY_ZERO, "wrong implicit exception kind"); |
duke@435 | 600 | |
duke@435 | 601 | // for AbortVMOnException flag |
duke@435 | 602 | NOT_PRODUCT(Exceptions::debug_check_abort("java.lang.NullPointerException")); |
duke@435 | 603 | if (exception_kind == IMPLICIT_NULL) { |
duke@435 | 604 | Events::log("Implicit null exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc); |
duke@435 | 605 | } else { |
duke@435 | 606 | Events::log("Implicit division by zero exception at " INTPTR_FORMAT " to " INTPTR_FORMAT, pc, target_pc); |
duke@435 | 607 | } |
duke@435 | 608 | return target_pc; |
duke@435 | 609 | } |
duke@435 | 610 | |
duke@435 | 611 | ShouldNotReachHere(); |
duke@435 | 612 | return NULL; |
duke@435 | 613 | } |
duke@435 | 614 | |
duke@435 | 615 | |
duke@435 | 616 | JNI_ENTRY(void, throw_unsatisfied_link_error(JNIEnv* env, ...)) |
duke@435 | 617 | { |
duke@435 | 618 | THROW(vmSymbols::java_lang_UnsatisfiedLinkError()); |
duke@435 | 619 | } |
duke@435 | 620 | JNI_END |
duke@435 | 621 | |
duke@435 | 622 | |
duke@435 | 623 | address SharedRuntime::native_method_throw_unsatisfied_link_error_entry() { |
duke@435 | 624 | return CAST_FROM_FN_PTR(address, &throw_unsatisfied_link_error); |
duke@435 | 625 | } |
duke@435 | 626 | |
duke@435 | 627 | |
duke@435 | 628 | #ifndef PRODUCT |
duke@435 | 629 | JRT_ENTRY(intptr_t, SharedRuntime::trace_bytecode(JavaThread* thread, intptr_t preserve_this_value, intptr_t tos, intptr_t tos2)) |
duke@435 | 630 | const frame f = thread->last_frame(); |
duke@435 | 631 | assert(f.is_interpreted_frame(), "must be an interpreted frame"); |
duke@435 | 632 | #ifndef PRODUCT |
duke@435 | 633 | methodHandle mh(THREAD, f.interpreter_frame_method()); |
duke@435 | 634 | BytecodeTracer::trace(mh, f.interpreter_frame_bcp(), tos, tos2); |
duke@435 | 635 | #endif // !PRODUCT |
duke@435 | 636 | return preserve_this_value; |
duke@435 | 637 | JRT_END |
duke@435 | 638 | #endif // !PRODUCT |
duke@435 | 639 | |
duke@435 | 640 | |
duke@435 | 641 | JRT_ENTRY(void, SharedRuntime::yield_all(JavaThread* thread, int attempts)) |
duke@435 | 642 | os::yield_all(attempts); |
duke@435 | 643 | JRT_END |
duke@435 | 644 | |
duke@435 | 645 | |
duke@435 | 646 | // --------------------------------------------------------------------------------------------------------- |
duke@435 | 647 | // Non-product code |
duke@435 | 648 | #ifndef PRODUCT |
duke@435 | 649 | |
duke@435 | 650 | void SharedRuntime::verify_caller_frame(frame caller_frame, methodHandle callee_method) { |
duke@435 | 651 | ResourceMark rm; |
duke@435 | 652 | assert (caller_frame.is_interpreted_frame(), "sanity check"); |
duke@435 | 653 | assert (callee_method->has_compiled_code(), "callee must be compiled"); |
duke@435 | 654 | methodHandle caller_method (Thread::current(), caller_frame.interpreter_frame_method()); |
duke@435 | 655 | jint bci = caller_frame.interpreter_frame_bci(); |
duke@435 | 656 | methodHandle method = find_callee_method_inside_interpreter(caller_frame, caller_method, bci); |
duke@435 | 657 | assert (callee_method == method, "incorrect method"); |
duke@435 | 658 | } |
duke@435 | 659 | |
duke@435 | 660 | methodHandle SharedRuntime::find_callee_method_inside_interpreter(frame caller_frame, methodHandle caller_method, int bci) { |
duke@435 | 661 | EXCEPTION_MARK; |
duke@435 | 662 | Bytecode_invoke* bytecode = Bytecode_invoke_at(caller_method, bci); |
duke@435 | 663 | methodHandle staticCallee = bytecode->static_target(CATCH); // Non-product code |
duke@435 | 664 | |
duke@435 | 665 | bytecode = Bytecode_invoke_at(caller_method, bci); |
duke@435 | 666 | int bytecode_index = bytecode->index(); |
duke@435 | 667 | Bytecodes::Code bc = bytecode->adjusted_invoke_code(); |
duke@435 | 668 | |
duke@435 | 669 | Handle receiver; |
duke@435 | 670 | if (bc == Bytecodes::_invokeinterface || |
duke@435 | 671 | bc == Bytecodes::_invokevirtual || |
duke@435 | 672 | bc == Bytecodes::_invokespecial) { |
duke@435 | 673 | symbolHandle signature (THREAD, staticCallee->signature()); |
duke@435 | 674 | receiver = Handle(THREAD, retrieve_receiver(signature, caller_frame)); |
duke@435 | 675 | } else { |
duke@435 | 676 | receiver = Handle(); |
duke@435 | 677 | } |
duke@435 | 678 | CallInfo result; |
duke@435 | 679 | constantPoolHandle constants (THREAD, caller_method->constants()); |
duke@435 | 680 | LinkResolver::resolve_invoke(result, receiver, constants, bytecode_index, bc, CATCH); // Non-product code |
duke@435 | 681 | methodHandle calleeMethod = result.selected_method(); |
duke@435 | 682 | return calleeMethod; |
duke@435 | 683 | } |
duke@435 | 684 | |
duke@435 | 685 | #endif // PRODUCT |
duke@435 | 686 | |
duke@435 | 687 | |
duke@435 | 688 | JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) |
duke@435 | 689 | assert(obj->is_oop(), "must be a valid oop"); |
duke@435 | 690 | assert(obj->klass()->klass_part()->has_finalizer(), "shouldn't be here otherwise"); |
duke@435 | 691 | instanceKlass::register_finalizer(instanceOop(obj), CHECK); |
duke@435 | 692 | JRT_END |
duke@435 | 693 | |
duke@435 | 694 | |
duke@435 | 695 | jlong SharedRuntime::get_java_tid(Thread* thread) { |
duke@435 | 696 | if (thread != NULL) { |
duke@435 | 697 | if (thread->is_Java_thread()) { |
duke@435 | 698 | oop obj = ((JavaThread*)thread)->threadObj(); |
duke@435 | 699 | return (obj == NULL) ? 0 : java_lang_Thread::thread_id(obj); |
duke@435 | 700 | } |
duke@435 | 701 | } |
duke@435 | 702 | return 0; |
duke@435 | 703 | } |
duke@435 | 704 | |
duke@435 | 705 | /** |
duke@435 | 706 | * This function ought to be a void function, but cannot be because |
duke@435 | 707 | * it gets turned into a tail-call on sparc, which runs into dtrace bug |
duke@435 | 708 | * 6254741. Once that is fixed we can remove the dummy return value. |
duke@435 | 709 | */ |
duke@435 | 710 | int SharedRuntime::dtrace_object_alloc(oopDesc* o) { |
duke@435 | 711 | return dtrace_object_alloc_base(Thread::current(), o); |
duke@435 | 712 | } |
duke@435 | 713 | |
duke@435 | 714 | int SharedRuntime::dtrace_object_alloc_base(Thread* thread, oopDesc* o) { |
duke@435 | 715 | assert(DTraceAllocProbes, "wrong call"); |
duke@435 | 716 | Klass* klass = o->blueprint(); |
duke@435 | 717 | int size = o->size(); |
duke@435 | 718 | symbolOop name = klass->name(); |
duke@435 | 719 | HS_DTRACE_PROBE4(hotspot, object__alloc, get_java_tid(thread), |
duke@435 | 720 | name->bytes(), name->utf8_length(), size * HeapWordSize); |
duke@435 | 721 | return 0; |
duke@435 | 722 | } |
duke@435 | 723 | |
duke@435 | 724 | JRT_LEAF(int, SharedRuntime::dtrace_method_entry( |
duke@435 | 725 | JavaThread* thread, methodOopDesc* method)) |
duke@435 | 726 | assert(DTraceMethodProbes, "wrong call"); |
duke@435 | 727 | symbolOop kname = method->klass_name(); |
duke@435 | 728 | symbolOop name = method->name(); |
duke@435 | 729 | symbolOop sig = method->signature(); |
duke@435 | 730 | HS_DTRACE_PROBE7(hotspot, method__entry, get_java_tid(thread), |
duke@435 | 731 | kname->bytes(), kname->utf8_length(), |
duke@435 | 732 | name->bytes(), name->utf8_length(), |
duke@435 | 733 | sig->bytes(), sig->utf8_length()); |
duke@435 | 734 | return 0; |
duke@435 | 735 | JRT_END |
duke@435 | 736 | |
duke@435 | 737 | JRT_LEAF(int, SharedRuntime::dtrace_method_exit( |
duke@435 | 738 | JavaThread* thread, methodOopDesc* method)) |
duke@435 | 739 | assert(DTraceMethodProbes, "wrong call"); |
duke@435 | 740 | symbolOop kname = method->klass_name(); |
duke@435 | 741 | symbolOop name = method->name(); |
duke@435 | 742 | symbolOop sig = method->signature(); |
duke@435 | 743 | HS_DTRACE_PROBE7(hotspot, method__return, get_java_tid(thread), |
duke@435 | 744 | kname->bytes(), kname->utf8_length(), |
duke@435 | 745 | name->bytes(), name->utf8_length(), |
duke@435 | 746 | sig->bytes(), sig->utf8_length()); |
duke@435 | 747 | return 0; |
duke@435 | 748 | JRT_END |
duke@435 | 749 | |
duke@435 | 750 | |
duke@435 | 751 | // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode) |
duke@435 | 752 | // for a call current in progress, i.e., arguments has been pushed on stack |
duke@435 | 753 | // put callee has not been invoked yet. Used by: resolve virtual/static, |
duke@435 | 754 | // vtable updates, etc. Caller frame must be compiled. |
duke@435 | 755 | Handle SharedRuntime::find_callee_info(JavaThread* thread, Bytecodes::Code& bc, CallInfo& callinfo, TRAPS) { |
duke@435 | 756 | ResourceMark rm(THREAD); |
duke@435 | 757 | |
duke@435 | 758 | // last java frame on stack (which includes native call frames) |
duke@435 | 759 | vframeStream vfst(thread, true); // Do not skip and javaCalls |
duke@435 | 760 | |
duke@435 | 761 | return find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(Handle())); |
duke@435 | 762 | } |
duke@435 | 763 | |
duke@435 | 764 | |
duke@435 | 765 | // Finds receiver, CallInfo (i.e. receiver method), and calling bytecode |
duke@435 | 766 | // for a call current in progress, i.e., arguments has been pushed on stack |
duke@435 | 767 | // but callee has not been invoked yet. Caller frame must be compiled. |
duke@435 | 768 | Handle SharedRuntime::find_callee_info_helper(JavaThread* thread, |
duke@435 | 769 | vframeStream& vfst, |
duke@435 | 770 | Bytecodes::Code& bc, |
duke@435 | 771 | CallInfo& callinfo, TRAPS) { |
duke@435 | 772 | Handle receiver; |
duke@435 | 773 | Handle nullHandle; //create a handy null handle for exception returns |
duke@435 | 774 | |
duke@435 | 775 | assert(!vfst.at_end(), "Java frame must exist"); |
duke@435 | 776 | |
duke@435 | 777 | // Find caller and bci from vframe |
duke@435 | 778 | methodHandle caller (THREAD, vfst.method()); |
duke@435 | 779 | int bci = vfst.bci(); |
duke@435 | 780 | |
duke@435 | 781 | // Find bytecode |
duke@435 | 782 | Bytecode_invoke* bytecode = Bytecode_invoke_at(caller, bci); |
duke@435 | 783 | bc = bytecode->adjusted_invoke_code(); |
duke@435 | 784 | int bytecode_index = bytecode->index(); |
duke@435 | 785 | |
duke@435 | 786 | // Find receiver for non-static call |
duke@435 | 787 | if (bc != Bytecodes::_invokestatic) { |
duke@435 | 788 | // This register map must be update since we need to find the receiver for |
duke@435 | 789 | // compiled frames. The receiver might be in a register. |
duke@435 | 790 | RegisterMap reg_map2(thread); |
duke@435 | 791 | frame stubFrame = thread->last_frame(); |
duke@435 | 792 | // Caller-frame is a compiled frame |
duke@435 | 793 | frame callerFrame = stubFrame.sender(®_map2); |
duke@435 | 794 | |
duke@435 | 795 | methodHandle callee = bytecode->static_target(CHECK_(nullHandle)); |
duke@435 | 796 | if (callee.is_null()) { |
duke@435 | 797 | THROW_(vmSymbols::java_lang_NoSuchMethodException(), nullHandle); |
duke@435 | 798 | } |
duke@435 | 799 | // Retrieve from a compiled argument list |
duke@435 | 800 | receiver = Handle(THREAD, callerFrame.retrieve_receiver(®_map2)); |
duke@435 | 801 | |
duke@435 | 802 | if (receiver.is_null()) { |
duke@435 | 803 | THROW_(vmSymbols::java_lang_NullPointerException(), nullHandle); |
duke@435 | 804 | } |
duke@435 | 805 | } |
duke@435 | 806 | |
duke@435 | 807 | // Resolve method. This is parameterized by bytecode. |
duke@435 | 808 | constantPoolHandle constants (THREAD, caller->constants()); |
duke@435 | 809 | assert (receiver.is_null() || receiver->is_oop(), "wrong receiver"); |
duke@435 | 810 | LinkResolver::resolve_invoke(callinfo, receiver, constants, bytecode_index, bc, CHECK_(nullHandle)); |
duke@435 | 811 | |
duke@435 | 812 | #ifdef ASSERT |
duke@435 | 813 | // Check that the receiver klass is of the right subtype and that it is initialized for virtual calls |
duke@435 | 814 | if (bc != Bytecodes::_invokestatic) { |
duke@435 | 815 | assert(receiver.not_null(), "should have thrown exception"); |
duke@435 | 816 | KlassHandle receiver_klass (THREAD, receiver->klass()); |
duke@435 | 817 | klassOop rk = constants->klass_ref_at(bytecode_index, CHECK_(nullHandle)); |
duke@435 | 818 | // klass is already loaded |
duke@435 | 819 | KlassHandle static_receiver_klass (THREAD, rk); |
duke@435 | 820 | assert(receiver_klass->is_subtype_of(static_receiver_klass()), "actual receiver must be subclass of static receiver klass"); |
duke@435 | 821 | if (receiver_klass->oop_is_instance()) { |
duke@435 | 822 | if (instanceKlass::cast(receiver_klass())->is_not_initialized()) { |
duke@435 | 823 | tty->print_cr("ERROR: Klass not yet initialized!!"); |
duke@435 | 824 | receiver_klass.print(); |
duke@435 | 825 | } |
duke@435 | 826 | assert (!instanceKlass::cast(receiver_klass())->is_not_initialized(), "receiver_klass must be initialized"); |
duke@435 | 827 | } |
duke@435 | 828 | } |
duke@435 | 829 | #endif |
duke@435 | 830 | |
duke@435 | 831 | return receiver; |
duke@435 | 832 | } |
duke@435 | 833 | |
duke@435 | 834 | methodHandle SharedRuntime::find_callee_method(JavaThread* thread, TRAPS) { |
duke@435 | 835 | ResourceMark rm(THREAD); |
duke@435 | 836 | // We need first to check if any Java activations (compiled, interpreted) |
duke@435 | 837 | // exist on the stack since last JavaCall. If not, we need |
duke@435 | 838 | // to get the target method from the JavaCall wrapper. |
duke@435 | 839 | vframeStream vfst(thread, true); // Do not skip any javaCalls |
duke@435 | 840 | methodHandle callee_method; |
duke@435 | 841 | if (vfst.at_end()) { |
duke@435 | 842 | // No Java frames were found on stack since we did the JavaCall. |
duke@435 | 843 | // Hence the stack can only contain an entry_frame. We need to |
duke@435 | 844 | // find the target method from the stub frame. |
duke@435 | 845 | RegisterMap reg_map(thread, false); |
duke@435 | 846 | frame fr = thread->last_frame(); |
duke@435 | 847 | assert(fr.is_runtime_frame(), "must be a runtimeStub"); |
duke@435 | 848 | fr = fr.sender(®_map); |
duke@435 | 849 | assert(fr.is_entry_frame(), "must be"); |
duke@435 | 850 | // fr is now pointing to the entry frame. |
duke@435 | 851 | callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method()); |
duke@435 | 852 | assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??"); |
duke@435 | 853 | } else { |
duke@435 | 854 | Bytecodes::Code bc; |
duke@435 | 855 | CallInfo callinfo; |
duke@435 | 856 | find_callee_info_helper(thread, vfst, bc, callinfo, CHECK_(methodHandle())); |
duke@435 | 857 | callee_method = callinfo.selected_method(); |
duke@435 | 858 | } |
duke@435 | 859 | assert(callee_method()->is_method(), "must be"); |
duke@435 | 860 | return callee_method; |
duke@435 | 861 | } |
duke@435 | 862 | |
duke@435 | 863 | // Resolves a call. |
duke@435 | 864 | methodHandle SharedRuntime::resolve_helper(JavaThread *thread, |
duke@435 | 865 | bool is_virtual, |
duke@435 | 866 | bool is_optimized, TRAPS) { |
duke@435 | 867 | methodHandle callee_method; |
duke@435 | 868 | callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); |
duke@435 | 869 | if (JvmtiExport::can_hotswap_or_post_breakpoint()) { |
duke@435 | 870 | int retry_count = 0; |
duke@435 | 871 | while (!HAS_PENDING_EXCEPTION && callee_method->is_old() && |
duke@435 | 872 | callee_method->method_holder() != SystemDictionary::object_klass()) { |
duke@435 | 873 | // If has a pending exception then there is no need to re-try to |
duke@435 | 874 | // resolve this method. |
duke@435 | 875 | // If the method has been redefined, we need to try again. |
duke@435 | 876 | // Hack: we have no way to update the vtables of arrays, so don't |
duke@435 | 877 | // require that java.lang.Object has been updated. |
duke@435 | 878 | |
duke@435 | 879 | // It is very unlikely that method is redefined more than 100 times |
duke@435 | 880 | // in the middle of resolve. If it is looping here more than 100 times |
duke@435 | 881 | // means then there could be a bug here. |
duke@435 | 882 | guarantee((retry_count++ < 100), |
duke@435 | 883 | "Could not resolve to latest version of redefined method"); |
duke@435 | 884 | // method is redefined in the middle of resolve so re-try. |
duke@435 | 885 | callee_method = resolve_sub_helper(thread, is_virtual, is_optimized, THREAD); |
duke@435 | 886 | } |
duke@435 | 887 | } |
duke@435 | 888 | return callee_method; |
duke@435 | 889 | } |
duke@435 | 890 | |
duke@435 | 891 | // Resolves a call. The compilers generate code for calls that go here |
duke@435 | 892 | // and are patched with the real destination of the call. |
duke@435 | 893 | methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread, |
duke@435 | 894 | bool is_virtual, |
duke@435 | 895 | bool is_optimized, TRAPS) { |
duke@435 | 896 | |
duke@435 | 897 | ResourceMark rm(thread); |
duke@435 | 898 | RegisterMap cbl_map(thread, false); |
duke@435 | 899 | frame caller_frame = thread->last_frame().sender(&cbl_map); |
duke@435 | 900 | |
duke@435 | 901 | CodeBlob* cb = caller_frame.cb(); |
duke@435 | 902 | guarantee(cb != NULL && cb->is_nmethod(), "must be called from nmethod"); |
duke@435 | 903 | // make sure caller is not getting deoptimized |
duke@435 | 904 | // and removed before we are done with it. |
duke@435 | 905 | // CLEANUP - with lazy deopt shouldn't need this lock |
duke@435 | 906 | nmethodLocker caller_lock((nmethod*)cb); |
duke@435 | 907 | |
duke@435 | 908 | |
duke@435 | 909 | // determine call info & receiver |
duke@435 | 910 | // note: a) receiver is NULL for static calls |
duke@435 | 911 | // b) an exception is thrown if receiver is NULL for non-static calls |
duke@435 | 912 | CallInfo call_info; |
duke@435 | 913 | Bytecodes::Code invoke_code = Bytecodes::_illegal; |
duke@435 | 914 | Handle receiver = find_callee_info(thread, invoke_code, |
duke@435 | 915 | call_info, CHECK_(methodHandle())); |
duke@435 | 916 | methodHandle callee_method = call_info.selected_method(); |
duke@435 | 917 | |
duke@435 | 918 | assert((!is_virtual && invoke_code == Bytecodes::_invokestatic) || |
duke@435 | 919 | ( is_virtual && invoke_code != Bytecodes::_invokestatic), "inconsistent bytecode"); |
duke@435 | 920 | |
duke@435 | 921 | #ifndef PRODUCT |
duke@435 | 922 | // tracing/debugging/statistics |
duke@435 | 923 | int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) : |
duke@435 | 924 | (is_virtual) ? (&_resolve_virtual_ctr) : |
duke@435 | 925 | (&_resolve_static_ctr); |
duke@435 | 926 | Atomic::inc(addr); |
duke@435 | 927 | |
duke@435 | 928 | if (TraceCallFixup) { |
duke@435 | 929 | ResourceMark rm(thread); |
duke@435 | 930 | tty->print("resolving %s%s (%s) call to", |
duke@435 | 931 | (is_optimized) ? "optimized " : "", (is_virtual) ? "virtual" : "static", |
duke@435 | 932 | Bytecodes::name(invoke_code)); |
duke@435 | 933 | callee_method->print_short_name(tty); |
duke@435 | 934 | tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); |
duke@435 | 935 | } |
duke@435 | 936 | #endif |
duke@435 | 937 | |
duke@435 | 938 | // Compute entry points. This might require generation of C2I converter |
duke@435 | 939 | // frames, so we cannot be holding any locks here. Furthermore, the |
duke@435 | 940 | // computation of the entry points is independent of patching the call. We |
duke@435 | 941 | // always return the entry-point, but we only patch the stub if the call has |
duke@435 | 942 | // not been deoptimized. Return values: For a virtual call this is an |
duke@435 | 943 | // (cached_oop, destination address) pair. For a static call/optimized |
duke@435 | 944 | // virtual this is just a destination address. |
duke@435 | 945 | |
duke@435 | 946 | StaticCallInfo static_call_info; |
duke@435 | 947 | CompiledICInfo virtual_call_info; |
duke@435 | 948 | |
duke@435 | 949 | |
duke@435 | 950 | // Make sure the callee nmethod does not get deoptimized and removed before |
duke@435 | 951 | // we are done patching the code. |
duke@435 | 952 | nmethod* nm = callee_method->code(); |
duke@435 | 953 | nmethodLocker nl_callee(nm); |
duke@435 | 954 | #ifdef ASSERT |
duke@435 | 955 | address dest_entry_point = nm == NULL ? 0 : nm->entry_point(); // used below |
duke@435 | 956 | #endif |
duke@435 | 957 | |
duke@435 | 958 | if (is_virtual) { |
duke@435 | 959 | assert(receiver.not_null(), "sanity check"); |
duke@435 | 960 | bool static_bound = call_info.resolved_method()->can_be_statically_bound(); |
duke@435 | 961 | KlassHandle h_klass(THREAD, receiver->klass()); |
duke@435 | 962 | CompiledIC::compute_monomorphic_entry(callee_method, h_klass, |
duke@435 | 963 | is_optimized, static_bound, virtual_call_info, |
duke@435 | 964 | CHECK_(methodHandle())); |
duke@435 | 965 | } else { |
duke@435 | 966 | // static call |
duke@435 | 967 | CompiledStaticCall::compute_entry(callee_method, static_call_info); |
duke@435 | 968 | } |
duke@435 | 969 | |
duke@435 | 970 | // grab lock, check for deoptimization and potentially patch caller |
duke@435 | 971 | { |
duke@435 | 972 | MutexLocker ml_patch(CompiledIC_lock); |
duke@435 | 973 | |
duke@435 | 974 | // Now that we are ready to patch if the methodOop was redefined then |
duke@435 | 975 | // don't update call site and let the caller retry. |
duke@435 | 976 | |
duke@435 | 977 | if (!callee_method->is_old()) { |
duke@435 | 978 | #ifdef ASSERT |
duke@435 | 979 | // We must not try to patch to jump to an already unloaded method. |
duke@435 | 980 | if (dest_entry_point != 0) { |
duke@435 | 981 | assert(CodeCache::find_blob(dest_entry_point) != NULL, |
duke@435 | 982 | "should not unload nmethod while locked"); |
duke@435 | 983 | } |
duke@435 | 984 | #endif |
duke@435 | 985 | if (is_virtual) { |
duke@435 | 986 | CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); |
duke@435 | 987 | if (inline_cache->is_clean()) { |
duke@435 | 988 | inline_cache->set_to_monomorphic(virtual_call_info); |
duke@435 | 989 | } |
duke@435 | 990 | } else { |
duke@435 | 991 | CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc()); |
duke@435 | 992 | if (ssc->is_clean()) ssc->set(static_call_info); |
duke@435 | 993 | } |
duke@435 | 994 | } |
duke@435 | 995 | |
duke@435 | 996 | } // unlock CompiledIC_lock |
duke@435 | 997 | |
duke@435 | 998 | return callee_method; |
duke@435 | 999 | } |
duke@435 | 1000 | |
duke@435 | 1001 | |
duke@435 | 1002 | // Inline caches exist only in compiled code |
duke@435 | 1003 | JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method_ic_miss(JavaThread* thread)) |
duke@435 | 1004 | #ifdef ASSERT |
duke@435 | 1005 | RegisterMap reg_map(thread, false); |
duke@435 | 1006 | frame stub_frame = thread->last_frame(); |
duke@435 | 1007 | assert(stub_frame.is_runtime_frame(), "sanity check"); |
duke@435 | 1008 | frame caller_frame = stub_frame.sender(®_map); |
duke@435 | 1009 | assert(!caller_frame.is_interpreted_frame() && !caller_frame.is_entry_frame(), "unexpected frame"); |
duke@435 | 1010 | #endif /* ASSERT */ |
duke@435 | 1011 | |
duke@435 | 1012 | methodHandle callee_method; |
duke@435 | 1013 | JRT_BLOCK |
duke@435 | 1014 | callee_method = SharedRuntime::handle_ic_miss_helper(thread, CHECK_NULL); |
duke@435 | 1015 | // Return methodOop through TLS |
duke@435 | 1016 | thread->set_vm_result(callee_method()); |
duke@435 | 1017 | JRT_BLOCK_END |
duke@435 | 1018 | // return compiled code entry point after potential safepoints |
duke@435 | 1019 | assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); |
duke@435 | 1020 | return callee_method->verified_code_entry(); |
duke@435 | 1021 | JRT_END |
duke@435 | 1022 | |
duke@435 | 1023 | |
duke@435 | 1024 | // Handle call site that has been made non-entrant |
duke@435 | 1025 | JRT_BLOCK_ENTRY(address, SharedRuntime::handle_wrong_method(JavaThread* thread)) |
duke@435 | 1026 | // 6243940 We might end up in here if the callee is deoptimized |
duke@435 | 1027 | // as we race to call it. We don't want to take a safepoint if |
duke@435 | 1028 | // the caller was interpreted because the caller frame will look |
duke@435 | 1029 | // interpreted to the stack walkers and arguments are now |
duke@435 | 1030 | // "compiled" so it is much better to make this transition |
duke@435 | 1031 | // invisible to the stack walking code. The i2c path will |
duke@435 | 1032 | // place the callee method in the callee_target. It is stashed |
duke@435 | 1033 | // there because if we try and find the callee by normal means a |
duke@435 | 1034 | // safepoint is possible and have trouble gc'ing the compiled args. |
duke@435 | 1035 | RegisterMap reg_map(thread, false); |
duke@435 | 1036 | frame stub_frame = thread->last_frame(); |
duke@435 | 1037 | assert(stub_frame.is_runtime_frame(), "sanity check"); |
duke@435 | 1038 | frame caller_frame = stub_frame.sender(®_map); |
duke@435 | 1039 | if (caller_frame.is_interpreted_frame() || caller_frame.is_entry_frame() ) { |
duke@435 | 1040 | methodOop callee = thread->callee_target(); |
duke@435 | 1041 | guarantee(callee != NULL && callee->is_method(), "bad handshake"); |
duke@435 | 1042 | thread->set_vm_result(callee); |
duke@435 | 1043 | thread->set_callee_target(NULL); |
duke@435 | 1044 | return callee->get_c2i_entry(); |
duke@435 | 1045 | } |
duke@435 | 1046 | |
duke@435 | 1047 | // Must be compiled to compiled path which is safe to stackwalk |
duke@435 | 1048 | methodHandle callee_method; |
duke@435 | 1049 | JRT_BLOCK |
duke@435 | 1050 | // Force resolving of caller (if we called from compiled frame) |
duke@435 | 1051 | callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_NULL); |
duke@435 | 1052 | thread->set_vm_result(callee_method()); |
duke@435 | 1053 | JRT_BLOCK_END |
duke@435 | 1054 | // return compiled code entry point after potential safepoints |
duke@435 | 1055 | assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); |
duke@435 | 1056 | return callee_method->verified_code_entry(); |
duke@435 | 1057 | JRT_END |
duke@435 | 1058 | |
duke@435 | 1059 | |
duke@435 | 1060 | // resolve a static call and patch code |
duke@435 | 1061 | JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_static_call_C(JavaThread *thread )) |
duke@435 | 1062 | methodHandle callee_method; |
duke@435 | 1063 | JRT_BLOCK |
duke@435 | 1064 | callee_method = SharedRuntime::resolve_helper(thread, false, false, CHECK_NULL); |
duke@435 | 1065 | thread->set_vm_result(callee_method()); |
duke@435 | 1066 | JRT_BLOCK_END |
duke@435 | 1067 | // return compiled code entry point after potential safepoints |
duke@435 | 1068 | assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); |
duke@435 | 1069 | return callee_method->verified_code_entry(); |
duke@435 | 1070 | JRT_END |
duke@435 | 1071 | |
duke@435 | 1072 | |
duke@435 | 1073 | // resolve virtual call and update inline cache to monomorphic |
duke@435 | 1074 | JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_virtual_call_C(JavaThread *thread )) |
duke@435 | 1075 | methodHandle callee_method; |
duke@435 | 1076 | JRT_BLOCK |
duke@435 | 1077 | callee_method = SharedRuntime::resolve_helper(thread, true, false, CHECK_NULL); |
duke@435 | 1078 | thread->set_vm_result(callee_method()); |
duke@435 | 1079 | JRT_BLOCK_END |
duke@435 | 1080 | // return compiled code entry point after potential safepoints |
duke@435 | 1081 | assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); |
duke@435 | 1082 | return callee_method->verified_code_entry(); |
duke@435 | 1083 | JRT_END |
duke@435 | 1084 | |
duke@435 | 1085 | |
duke@435 | 1086 | // Resolve a virtual call that can be statically bound (e.g., always |
duke@435 | 1087 | // monomorphic, so it has no inline cache). Patch code to resolved target. |
duke@435 | 1088 | JRT_BLOCK_ENTRY(address, SharedRuntime::resolve_opt_virtual_call_C(JavaThread *thread)) |
duke@435 | 1089 | methodHandle callee_method; |
duke@435 | 1090 | JRT_BLOCK |
duke@435 | 1091 | callee_method = SharedRuntime::resolve_helper(thread, true, true, CHECK_NULL); |
duke@435 | 1092 | thread->set_vm_result(callee_method()); |
duke@435 | 1093 | JRT_BLOCK_END |
duke@435 | 1094 | // return compiled code entry point after potential safepoints |
duke@435 | 1095 | assert(callee_method->verified_code_entry() != NULL, " Jump to zero!"); |
duke@435 | 1096 | return callee_method->verified_code_entry(); |
duke@435 | 1097 | JRT_END |
duke@435 | 1098 | |
duke@435 | 1099 | |
duke@435 | 1100 | |
duke@435 | 1101 | |
duke@435 | 1102 | |
duke@435 | 1103 | methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { |
duke@435 | 1104 | ResourceMark rm(thread); |
duke@435 | 1105 | CallInfo call_info; |
duke@435 | 1106 | Bytecodes::Code bc; |
duke@435 | 1107 | |
duke@435 | 1108 | // receiver is NULL for static calls. An exception is thrown for NULL |
duke@435 | 1109 | // receivers for non-static calls |
duke@435 | 1110 | Handle receiver = find_callee_info(thread, bc, call_info, |
duke@435 | 1111 | CHECK_(methodHandle())); |
duke@435 | 1112 | // Compiler1 can produce virtual call sites that can actually be statically bound |
duke@435 | 1113 | // If we fell thru to below we would think that the site was going megamorphic |
duke@435 | 1114 | // when in fact the site can never miss. Worse because we'd think it was megamorphic |
duke@435 | 1115 | // we'd try and do a vtable dispatch however methods that can be statically bound |
duke@435 | 1116 | // don't have vtable entries (vtable_index < 0) and we'd blow up. So we force a |
duke@435 | 1117 | // reresolution of the call site (as if we did a handle_wrong_method and not an |
duke@435 | 1118 | // plain ic_miss) and the site will be converted to an optimized virtual call site |
duke@435 | 1119 | // never to miss again. I don't believe C2 will produce code like this but if it |
duke@435 | 1120 | // did this would still be the correct thing to do for it too, hence no ifdef. |
duke@435 | 1121 | // |
duke@435 | 1122 | if (call_info.resolved_method()->can_be_statically_bound()) { |
duke@435 | 1123 | methodHandle callee_method = SharedRuntime::reresolve_call_site(thread, CHECK_(methodHandle())); |
duke@435 | 1124 | if (TraceCallFixup) { |
duke@435 | 1125 | RegisterMap reg_map(thread, false); |
duke@435 | 1126 | frame caller_frame = thread->last_frame().sender(®_map); |
duke@435 | 1127 | ResourceMark rm(thread); |
duke@435 | 1128 | tty->print("converting IC miss to reresolve (%s) call to", Bytecodes::name(bc)); |
duke@435 | 1129 | callee_method->print_short_name(tty); |
duke@435 | 1130 | tty->print_cr(" from pc: " INTPTR_FORMAT, caller_frame.pc()); |
duke@435 | 1131 | tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); |
duke@435 | 1132 | } |
duke@435 | 1133 | return callee_method; |
duke@435 | 1134 | } |
duke@435 | 1135 | |
duke@435 | 1136 | methodHandle callee_method = call_info.selected_method(); |
duke@435 | 1137 | |
duke@435 | 1138 | bool should_be_mono = false; |
duke@435 | 1139 | |
duke@435 | 1140 | #ifndef PRODUCT |
duke@435 | 1141 | Atomic::inc(&_ic_miss_ctr); |
duke@435 | 1142 | |
duke@435 | 1143 | // Statistics & Tracing |
duke@435 | 1144 | if (TraceCallFixup) { |
duke@435 | 1145 | ResourceMark rm(thread); |
duke@435 | 1146 | tty->print("IC miss (%s) call to", Bytecodes::name(bc)); |
duke@435 | 1147 | callee_method->print_short_name(tty); |
duke@435 | 1148 | tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); |
duke@435 | 1149 | } |
duke@435 | 1150 | |
duke@435 | 1151 | if (ICMissHistogram) { |
duke@435 | 1152 | MutexLocker m(VMStatistic_lock); |
duke@435 | 1153 | RegisterMap reg_map(thread, false); |
duke@435 | 1154 | frame f = thread->last_frame().real_sender(®_map);// skip runtime stub |
duke@435 | 1155 | // produce statistics under the lock |
duke@435 | 1156 | trace_ic_miss(f.pc()); |
duke@435 | 1157 | } |
duke@435 | 1158 | #endif |
duke@435 | 1159 | |
duke@435 | 1160 | // install an event collector so that when a vtable stub is created the |
duke@435 | 1161 | // profiler can be notified via a DYNAMIC_CODE_GENERATED event. The |
duke@435 | 1162 | // event can't be posted when the stub is created as locks are held |
duke@435 | 1163 | // - instead the event will be deferred until the event collector goes |
duke@435 | 1164 | // out of scope. |
duke@435 | 1165 | JvmtiDynamicCodeEventCollector event_collector; |
duke@435 | 1166 | |
duke@435 | 1167 | // Update inline cache to megamorphic. Skip update if caller has been |
duke@435 | 1168 | // made non-entrant or we are called from interpreted. |
duke@435 | 1169 | { MutexLocker ml_patch (CompiledIC_lock); |
duke@435 | 1170 | RegisterMap reg_map(thread, false); |
duke@435 | 1171 | frame caller_frame = thread->last_frame().sender(®_map); |
duke@435 | 1172 | CodeBlob* cb = caller_frame.cb(); |
duke@435 | 1173 | if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) { |
duke@435 | 1174 | // Not a non-entrant nmethod, so find inline_cache |
duke@435 | 1175 | CompiledIC* inline_cache = CompiledIC_before(caller_frame.pc()); |
duke@435 | 1176 | bool should_be_mono = false; |
duke@435 | 1177 | if (inline_cache->is_optimized()) { |
duke@435 | 1178 | if (TraceCallFixup) { |
duke@435 | 1179 | ResourceMark rm(thread); |
duke@435 | 1180 | tty->print("OPTIMIZED IC miss (%s) call to", Bytecodes::name(bc)); |
duke@435 | 1181 | callee_method->print_short_name(tty); |
duke@435 | 1182 | tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); |
duke@435 | 1183 | } |
duke@435 | 1184 | should_be_mono = true; |
duke@435 | 1185 | } else { |
duke@435 | 1186 | compiledICHolderOop ic_oop = (compiledICHolderOop) inline_cache->cached_oop(); |
duke@435 | 1187 | if ( ic_oop != NULL && ic_oop->is_compiledICHolder()) { |
duke@435 | 1188 | |
duke@435 | 1189 | if (receiver()->klass() == ic_oop->holder_klass()) { |
duke@435 | 1190 | // This isn't a real miss. We must have seen that compiled code |
duke@435 | 1191 | // is now available and we want the call site converted to a |
duke@435 | 1192 | // monomorphic compiled call site. |
duke@435 | 1193 | // We can't assert for callee_method->code() != NULL because it |
duke@435 | 1194 | // could have been deoptimized in the meantime |
duke@435 | 1195 | if (TraceCallFixup) { |
duke@435 | 1196 | ResourceMark rm(thread); |
duke@435 | 1197 | tty->print("FALSE IC miss (%s) converting to compiled call to", Bytecodes::name(bc)); |
duke@435 | 1198 | callee_method->print_short_name(tty); |
duke@435 | 1199 | tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); |
duke@435 | 1200 | } |
duke@435 | 1201 | should_be_mono = true; |
duke@435 | 1202 | } |
duke@435 | 1203 | } |
duke@435 | 1204 | } |
duke@435 | 1205 | |
duke@435 | 1206 | if (should_be_mono) { |
duke@435 | 1207 | |
duke@435 | 1208 | // We have a path that was monomorphic but was going interpreted |
duke@435 | 1209 | // and now we have (or had) a compiled entry. We correct the IC |
duke@435 | 1210 | // by using a new icBuffer. |
duke@435 | 1211 | CompiledICInfo info; |
duke@435 | 1212 | KlassHandle receiver_klass(THREAD, receiver()->klass()); |
duke@435 | 1213 | inline_cache->compute_monomorphic_entry(callee_method, |
duke@435 | 1214 | receiver_klass, |
duke@435 | 1215 | inline_cache->is_optimized(), |
duke@435 | 1216 | false, |
duke@435 | 1217 | info, CHECK_(methodHandle())); |
duke@435 | 1218 | inline_cache->set_to_monomorphic(info); |
duke@435 | 1219 | } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { |
duke@435 | 1220 | // Change to megamorphic |
duke@435 | 1221 | inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle())); |
duke@435 | 1222 | } else { |
duke@435 | 1223 | // Either clean or megamorphic |
duke@435 | 1224 | } |
duke@435 | 1225 | } |
duke@435 | 1226 | } // Release CompiledIC_lock |
duke@435 | 1227 | |
duke@435 | 1228 | return callee_method; |
duke@435 | 1229 | } |
duke@435 | 1230 | |
duke@435 | 1231 | // |
duke@435 | 1232 | // Resets a call-site in compiled code so it will get resolved again. |
duke@435 | 1233 | // This routines handles both virtual call sites, optimized virtual call |
duke@435 | 1234 | // sites, and static call sites. Typically used to change a call sites |
duke@435 | 1235 | // destination from compiled to interpreted. |
duke@435 | 1236 | // |
duke@435 | 1237 | methodHandle SharedRuntime::reresolve_call_site(JavaThread *thread, TRAPS) { |
duke@435 | 1238 | ResourceMark rm(thread); |
duke@435 | 1239 | RegisterMap reg_map(thread, false); |
duke@435 | 1240 | frame stub_frame = thread->last_frame(); |
duke@435 | 1241 | assert(stub_frame.is_runtime_frame(), "must be a runtimeStub"); |
duke@435 | 1242 | frame caller = stub_frame.sender(®_map); |
duke@435 | 1243 | |
duke@435 | 1244 | // Do nothing if the frame isn't a live compiled frame. |
duke@435 | 1245 | // nmethod could be deoptimized by the time we get here |
duke@435 | 1246 | // so no update to the caller is needed. |
duke@435 | 1247 | |
duke@435 | 1248 | if (caller.is_compiled_frame() && !caller.is_deoptimized_frame()) { |
duke@435 | 1249 | |
duke@435 | 1250 | address pc = caller.pc(); |
duke@435 | 1251 | Events::log("update call-site at pc " INTPTR_FORMAT, pc); |
duke@435 | 1252 | |
duke@435 | 1253 | // Default call_addr is the location of the "basic" call. |
duke@435 | 1254 | // Determine the address of the call we a reresolving. With |
duke@435 | 1255 | // Inline Caches we will always find a recognizable call. |
duke@435 | 1256 | // With Inline Caches disabled we may or may not find a |
duke@435 | 1257 | // recognizable call. We will always find a call for static |
duke@435 | 1258 | // calls and for optimized virtual calls. For vanilla virtual |
duke@435 | 1259 | // calls it depends on the state of the UseInlineCaches switch. |
duke@435 | 1260 | // |
duke@435 | 1261 | // With Inline Caches disabled we can get here for a virtual call |
duke@435 | 1262 | // for two reasons: |
duke@435 | 1263 | // 1 - calling an abstract method. The vtable for abstract methods |
duke@435 | 1264 | // will run us thru handle_wrong_method and we will eventually |
duke@435 | 1265 | // end up in the interpreter to throw the ame. |
duke@435 | 1266 | // 2 - a racing deoptimization. We could be doing a vanilla vtable |
duke@435 | 1267 | // call and between the time we fetch the entry address and |
duke@435 | 1268 | // we jump to it the target gets deoptimized. Similar to 1 |
duke@435 | 1269 | // we will wind up in the interprter (thru a c2i with c2). |
duke@435 | 1270 | // |
duke@435 | 1271 | address call_addr = NULL; |
duke@435 | 1272 | { |
duke@435 | 1273 | // Get call instruction under lock because another thread may be |
duke@435 | 1274 | // busy patching it. |
duke@435 | 1275 | MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 1276 | // Location of call instruction |
duke@435 | 1277 | if (NativeCall::is_call_before(pc)) { |
duke@435 | 1278 | NativeCall *ncall = nativeCall_before(pc); |
duke@435 | 1279 | call_addr = ncall->instruction_address(); |
duke@435 | 1280 | } |
duke@435 | 1281 | } |
duke@435 | 1282 | |
duke@435 | 1283 | // Check for static or virtual call |
duke@435 | 1284 | bool is_static_call = false; |
duke@435 | 1285 | nmethod* caller_nm = CodeCache::find_nmethod(pc); |
duke@435 | 1286 | // Make sure nmethod doesn't get deoptimized and removed until |
duke@435 | 1287 | // this is done with it. |
duke@435 | 1288 | // CLEANUP - with lazy deopt shouldn't need this lock |
duke@435 | 1289 | nmethodLocker nmlock(caller_nm); |
duke@435 | 1290 | |
duke@435 | 1291 | if (call_addr != NULL) { |
duke@435 | 1292 | RelocIterator iter(caller_nm, call_addr, call_addr+1); |
duke@435 | 1293 | int ret = iter.next(); // Get item |
duke@435 | 1294 | if (ret) { |
duke@435 | 1295 | assert(iter.addr() == call_addr, "must find call"); |
duke@435 | 1296 | if (iter.type() == relocInfo::static_call_type) { |
duke@435 | 1297 | is_static_call = true; |
duke@435 | 1298 | } else { |
duke@435 | 1299 | assert(iter.type() == relocInfo::virtual_call_type || |
duke@435 | 1300 | iter.type() == relocInfo::opt_virtual_call_type |
duke@435 | 1301 | , "unexpected relocInfo. type"); |
duke@435 | 1302 | } |
duke@435 | 1303 | } else { |
duke@435 | 1304 | assert(!UseInlineCaches, "relocation info. must exist for this address"); |
duke@435 | 1305 | } |
duke@435 | 1306 | |
duke@435 | 1307 | // Cleaning the inline cache will force a new resolve. This is more robust |
duke@435 | 1308 | // than directly setting it to the new destination, since resolving of calls |
duke@435 | 1309 | // is always done through the same code path. (experience shows that it |
duke@435 | 1310 | // leads to very hard to track down bugs, if an inline cache gets updated |
duke@435 | 1311 | // to a wrong method). It should not be performance critical, since the |
duke@435 | 1312 | // resolve is only done once. |
duke@435 | 1313 | |
duke@435 | 1314 | MutexLocker ml(CompiledIC_lock); |
duke@435 | 1315 | // |
duke@435 | 1316 | // We do not patch the call site if the nmethod has been made non-entrant |
duke@435 | 1317 | // as it is a waste of time |
duke@435 | 1318 | // |
duke@435 | 1319 | if (caller_nm->is_in_use()) { |
duke@435 | 1320 | if (is_static_call) { |
duke@435 | 1321 | CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); |
duke@435 | 1322 | ssc->set_to_clean(); |
duke@435 | 1323 | } else { |
duke@435 | 1324 | // compiled, dispatched call (which used to call an interpreted method) |
duke@435 | 1325 | CompiledIC* inline_cache = CompiledIC_at(call_addr); |
duke@435 | 1326 | inline_cache->set_to_clean(); |
duke@435 | 1327 | } |
duke@435 | 1328 | } |
duke@435 | 1329 | } |
duke@435 | 1330 | |
duke@435 | 1331 | } |
duke@435 | 1332 | |
duke@435 | 1333 | methodHandle callee_method = find_callee_method(thread, CHECK_(methodHandle())); |
duke@435 | 1334 | |
duke@435 | 1335 | |
duke@435 | 1336 | #ifndef PRODUCT |
duke@435 | 1337 | Atomic::inc(&_wrong_method_ctr); |
duke@435 | 1338 | |
duke@435 | 1339 | if (TraceCallFixup) { |
duke@435 | 1340 | ResourceMark rm(thread); |
duke@435 | 1341 | tty->print("handle_wrong_method reresolving call to"); |
duke@435 | 1342 | callee_method->print_short_name(tty); |
duke@435 | 1343 | tty->print_cr(" code: " INTPTR_FORMAT, callee_method->code()); |
duke@435 | 1344 | } |
duke@435 | 1345 | #endif |
duke@435 | 1346 | |
duke@435 | 1347 | return callee_method; |
duke@435 | 1348 | } |
duke@435 | 1349 | |
duke@435 | 1350 | // --------------------------------------------------------------------------- |
duke@435 | 1351 | // We are calling the interpreter via a c2i. Normally this would mean that |
duke@435 | 1352 | // we were called by a compiled method. However we could have lost a race |
duke@435 | 1353 | // where we went int -> i2c -> c2i and so the caller could in fact be |
duke@435 | 1354 | // interpreted. If the caller is compiled we attampt to patch the caller |
duke@435 | 1355 | // so he no longer calls into the interpreter. |
duke@435 | 1356 | IRT_LEAF(void, SharedRuntime::fixup_callers_callsite(methodOopDesc* method, address caller_pc)) |
duke@435 | 1357 | methodOop moop(method); |
duke@435 | 1358 | |
duke@435 | 1359 | address entry_point = moop->from_compiled_entry(); |
duke@435 | 1360 | |
duke@435 | 1361 | // It's possible that deoptimization can occur at a call site which hasn't |
duke@435 | 1362 | // been resolved yet, in which case this function will be called from |
duke@435 | 1363 | // an nmethod that has been patched for deopt and we can ignore the |
duke@435 | 1364 | // request for a fixup. |
duke@435 | 1365 | // Also it is possible that we lost a race in that from_compiled_entry |
duke@435 | 1366 | // is now back to the i2c in that case we don't need to patch and if |
duke@435 | 1367 | // we did we'd leap into space because the callsite needs to use |
duke@435 | 1368 | // "to interpreter" stub in order to load up the methodOop. Don't |
duke@435 | 1369 | // ask me how I know this... |
duke@435 | 1370 | // |
duke@435 | 1371 | |
duke@435 | 1372 | CodeBlob* cb = CodeCache::find_blob(caller_pc); |
duke@435 | 1373 | if ( !cb->is_nmethod() || entry_point == moop->get_c2i_entry()) { |
duke@435 | 1374 | return; |
duke@435 | 1375 | } |
duke@435 | 1376 | |
duke@435 | 1377 | // There is a benign race here. We could be attempting to patch to a compiled |
duke@435 | 1378 | // entry point at the same time the callee is being deoptimized. If that is |
duke@435 | 1379 | // the case then entry_point may in fact point to a c2i and we'd patch the |
duke@435 | 1380 | // call site with the same old data. clear_code will set code() to NULL |
duke@435 | 1381 | // at the end of it. If we happen to see that NULL then we can skip trying |
duke@435 | 1382 | // to patch. If we hit the window where the callee has a c2i in the |
duke@435 | 1383 | // from_compiled_entry and the NULL isn't present yet then we lose the race |
duke@435 | 1384 | // and patch the code with the same old data. Asi es la vida. |
duke@435 | 1385 | |
duke@435 | 1386 | if (moop->code() == NULL) return; |
duke@435 | 1387 | |
duke@435 | 1388 | if (((nmethod*)cb)->is_in_use()) { |
duke@435 | 1389 | |
duke@435 | 1390 | // Expect to find a native call there (unless it was no-inline cache vtable dispatch) |
duke@435 | 1391 | MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 1392 | if (NativeCall::is_call_before(caller_pc + frame::pc_return_offset)) { |
duke@435 | 1393 | NativeCall *call = nativeCall_before(caller_pc + frame::pc_return_offset); |
duke@435 | 1394 | // |
duke@435 | 1395 | // bug 6281185. We might get here after resolving a call site to a vanilla |
duke@435 | 1396 | // virtual call. Because the resolvee uses the verified entry it may then |
duke@435 | 1397 | // see compiled code and attempt to patch the site by calling us. This would |
duke@435 | 1398 | // then incorrectly convert the call site to optimized and its downhill from |
duke@435 | 1399 | // there. If you're lucky you'll get the assert in the bugid, if not you've |
duke@435 | 1400 | // just made a call site that could be megamorphic into a monomorphic site |
duke@435 | 1401 | // for the rest of its life! Just another racing bug in the life of |
duke@435 | 1402 | // fixup_callers_callsite ... |
duke@435 | 1403 | // |
duke@435 | 1404 | RelocIterator iter(cb, call->instruction_address(), call->next_instruction_address()); |
duke@435 | 1405 | iter.next(); |
duke@435 | 1406 | assert(iter.has_current(), "must have a reloc at java call site"); |
duke@435 | 1407 | relocInfo::relocType typ = iter.reloc()->type(); |
duke@435 | 1408 | if ( typ != relocInfo::static_call_type && |
duke@435 | 1409 | typ != relocInfo::opt_virtual_call_type && |
duke@435 | 1410 | typ != relocInfo::static_stub_type) { |
duke@435 | 1411 | return; |
duke@435 | 1412 | } |
duke@435 | 1413 | address destination = call->destination(); |
duke@435 | 1414 | if (destination != entry_point) { |
duke@435 | 1415 | CodeBlob* callee = CodeCache::find_blob(destination); |
duke@435 | 1416 | // callee == cb seems weird. It means calling interpreter thru stub. |
duke@435 | 1417 | if (callee == cb || callee->is_adapter_blob()) { |
duke@435 | 1418 | // static call or optimized virtual |
duke@435 | 1419 | if (TraceCallFixup) { |
duke@435 | 1420 | tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); |
duke@435 | 1421 | moop->print_short_name(tty); |
duke@435 | 1422 | tty->print_cr(" to " INTPTR_FORMAT, entry_point); |
duke@435 | 1423 | } |
duke@435 | 1424 | call->set_destination_mt_safe(entry_point); |
duke@435 | 1425 | } else { |
duke@435 | 1426 | if (TraceCallFixup) { |
duke@435 | 1427 | tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); |
duke@435 | 1428 | moop->print_short_name(tty); |
duke@435 | 1429 | tty->print_cr(" to " INTPTR_FORMAT, entry_point); |
duke@435 | 1430 | } |
duke@435 | 1431 | // assert is too strong could also be resolve destinations. |
duke@435 | 1432 | // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be"); |
duke@435 | 1433 | } |
duke@435 | 1434 | } else { |
duke@435 | 1435 | if (TraceCallFixup) { |
duke@435 | 1436 | tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", caller_pc); |
duke@435 | 1437 | moop->print_short_name(tty); |
duke@435 | 1438 | tty->print_cr(" to " INTPTR_FORMAT, entry_point); |
duke@435 | 1439 | } |
duke@435 | 1440 | } |
duke@435 | 1441 | } |
duke@435 | 1442 | } |
duke@435 | 1443 | |
duke@435 | 1444 | IRT_END |
duke@435 | 1445 | |
duke@435 | 1446 | |
duke@435 | 1447 | // same as JVM_Arraycopy, but called directly from compiled code |
duke@435 | 1448 | JRT_ENTRY(void, SharedRuntime::slow_arraycopy_C(oopDesc* src, jint src_pos, |
duke@435 | 1449 | oopDesc* dest, jint dest_pos, |
duke@435 | 1450 | jint length, |
duke@435 | 1451 | JavaThread* thread)) { |
duke@435 | 1452 | #ifndef PRODUCT |
duke@435 | 1453 | _slow_array_copy_ctr++; |
duke@435 | 1454 | #endif |
duke@435 | 1455 | // Check if we have null pointers |
duke@435 | 1456 | if (src == NULL || dest == NULL) { |
duke@435 | 1457 | THROW(vmSymbols::java_lang_NullPointerException()); |
duke@435 | 1458 | } |
duke@435 | 1459 | // Do the copy. The casts to arrayOop are necessary to the copy_array API, |
duke@435 | 1460 | // even though the copy_array API also performs dynamic checks to ensure |
duke@435 | 1461 | // that src and dest are truly arrays (and are conformable). |
duke@435 | 1462 | // The copy_array mechanism is awkward and could be removed, but |
duke@435 | 1463 | // the compilers don't call this function except as a last resort, |
duke@435 | 1464 | // so it probably doesn't matter. |
duke@435 | 1465 | Klass::cast(src->klass())->copy_array((arrayOopDesc*)src, src_pos, |
duke@435 | 1466 | (arrayOopDesc*)dest, dest_pos, |
duke@435 | 1467 | length, thread); |
duke@435 | 1468 | } |
duke@435 | 1469 | JRT_END |
duke@435 | 1470 | |
duke@435 | 1471 | char* SharedRuntime::generate_class_cast_message( |
duke@435 | 1472 | JavaThread* thread, const char* objName) { |
duke@435 | 1473 | |
duke@435 | 1474 | // Get target class name from the checkcast instruction |
duke@435 | 1475 | vframeStream vfst(thread, true); |
duke@435 | 1476 | assert(!vfst.at_end(), "Java frame must exist"); |
duke@435 | 1477 | Bytecode_checkcast* cc = Bytecode_checkcast_at( |
duke@435 | 1478 | vfst.method()->bcp_from(vfst.bci())); |
duke@435 | 1479 | Klass* targetKlass = Klass::cast(vfst.method()->constants()->klass_at( |
duke@435 | 1480 | cc->index(), thread)); |
duke@435 | 1481 | return generate_class_cast_message(objName, targetKlass->external_name()); |
duke@435 | 1482 | } |
duke@435 | 1483 | |
duke@435 | 1484 | char* SharedRuntime::generate_class_cast_message( |
duke@435 | 1485 | const char* objName, const char* targetKlassName) { |
duke@435 | 1486 | const char* desc = " cannot be cast to "; |
duke@435 | 1487 | size_t msglen = strlen(objName) + strlen(desc) + strlen(targetKlassName) + 1; |
duke@435 | 1488 | |
kamg@488 | 1489 | char* message = NEW_RESOURCE_ARRAY(char, msglen); |
duke@435 | 1490 | if (NULL == message) { |
kamg@488 | 1491 | // Shouldn't happen, but don't cause even more problems if it does |
duke@435 | 1492 | message = const_cast<char*>(objName); |
duke@435 | 1493 | } else { |
duke@435 | 1494 | jio_snprintf(message, msglen, "%s%s%s", objName, desc, targetKlassName); |
duke@435 | 1495 | } |
duke@435 | 1496 | return message; |
duke@435 | 1497 | } |
duke@435 | 1498 | |
duke@435 | 1499 | JRT_LEAF(void, SharedRuntime::reguard_yellow_pages()) |
duke@435 | 1500 | (void) JavaThread::current()->reguard_stack(); |
duke@435 | 1501 | JRT_END |
duke@435 | 1502 | |
duke@435 | 1503 | |
duke@435 | 1504 | // Handles the uncommon case in locking, i.e., contention or an inflated lock. |
duke@435 | 1505 | #ifndef PRODUCT |
duke@435 | 1506 | int SharedRuntime::_monitor_enter_ctr=0; |
duke@435 | 1507 | #endif |
duke@435 | 1508 | JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread)) |
duke@435 | 1509 | oop obj(_obj); |
duke@435 | 1510 | #ifndef PRODUCT |
duke@435 | 1511 | _monitor_enter_ctr++; // monitor enter slow |
duke@435 | 1512 | #endif |
duke@435 | 1513 | if (PrintBiasedLockingStatistics) { |
duke@435 | 1514 | Atomic::inc(BiasedLocking::slow_path_entry_count_addr()); |
duke@435 | 1515 | } |
duke@435 | 1516 | Handle h_obj(THREAD, obj); |
duke@435 | 1517 | if (UseBiasedLocking) { |
duke@435 | 1518 | // Retry fast entry if bias is revoked to avoid unnecessary inflation |
duke@435 | 1519 | ObjectSynchronizer::fast_enter(h_obj, lock, true, CHECK); |
duke@435 | 1520 | } else { |
duke@435 | 1521 | ObjectSynchronizer::slow_enter(h_obj, lock, CHECK); |
duke@435 | 1522 | } |
duke@435 | 1523 | assert(!HAS_PENDING_EXCEPTION, "Should have no exception here"); |
duke@435 | 1524 | JRT_END |
duke@435 | 1525 | |
duke@435 | 1526 | #ifndef PRODUCT |
duke@435 | 1527 | int SharedRuntime::_monitor_exit_ctr=0; |
duke@435 | 1528 | #endif |
duke@435 | 1529 | // Handles the uncommon cases of monitor unlocking in compiled code |
duke@435 | 1530 | JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock)) |
duke@435 | 1531 | oop obj(_obj); |
duke@435 | 1532 | #ifndef PRODUCT |
duke@435 | 1533 | _monitor_exit_ctr++; // monitor exit slow |
duke@435 | 1534 | #endif |
duke@435 | 1535 | Thread* THREAD = JavaThread::current(); |
duke@435 | 1536 | // I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore |
duke@435 | 1537 | // testing was unable to ever fire the assert that guarded it so I have removed it. |
duke@435 | 1538 | assert(!HAS_PENDING_EXCEPTION, "Do we need code below anymore?"); |
duke@435 | 1539 | #undef MIGHT_HAVE_PENDING |
duke@435 | 1540 | #ifdef MIGHT_HAVE_PENDING |
duke@435 | 1541 | // Save and restore any pending_exception around the exception mark. |
duke@435 | 1542 | // While the slow_exit must not throw an exception, we could come into |
duke@435 | 1543 | // this routine with one set. |
duke@435 | 1544 | oop pending_excep = NULL; |
duke@435 | 1545 | const char* pending_file; |
duke@435 | 1546 | int pending_line; |
duke@435 | 1547 | if (HAS_PENDING_EXCEPTION) { |
duke@435 | 1548 | pending_excep = PENDING_EXCEPTION; |
duke@435 | 1549 | pending_file = THREAD->exception_file(); |
duke@435 | 1550 | pending_line = THREAD->exception_line(); |
duke@435 | 1551 | CLEAR_PENDING_EXCEPTION; |
duke@435 | 1552 | } |
duke@435 | 1553 | #endif /* MIGHT_HAVE_PENDING */ |
duke@435 | 1554 | |
duke@435 | 1555 | { |
duke@435 | 1556 | // Exit must be non-blocking, and therefore no exceptions can be thrown. |
duke@435 | 1557 | EXCEPTION_MARK; |
duke@435 | 1558 | ObjectSynchronizer::slow_exit(obj, lock, THREAD); |
duke@435 | 1559 | } |
duke@435 | 1560 | |
duke@435 | 1561 | #ifdef MIGHT_HAVE_PENDING |
duke@435 | 1562 | if (pending_excep != NULL) { |
duke@435 | 1563 | THREAD->set_pending_exception(pending_excep, pending_file, pending_line); |
duke@435 | 1564 | } |
duke@435 | 1565 | #endif /* MIGHT_HAVE_PENDING */ |
duke@435 | 1566 | JRT_END |
duke@435 | 1567 | |
duke@435 | 1568 | #ifndef PRODUCT |
duke@435 | 1569 | |
duke@435 | 1570 | void SharedRuntime::print_statistics() { |
duke@435 | 1571 | ttyLocker ttyl; |
duke@435 | 1572 | if (xtty != NULL) xtty->head("statistics type='SharedRuntime'"); |
duke@435 | 1573 | |
duke@435 | 1574 | if (_monitor_enter_ctr ) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr); |
duke@435 | 1575 | if (_monitor_exit_ctr ) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr); |
duke@435 | 1576 | if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr); |
duke@435 | 1577 | |
duke@435 | 1578 | SharedRuntime::print_ic_miss_histogram(); |
duke@435 | 1579 | |
duke@435 | 1580 | if (CountRemovableExceptions) { |
duke@435 | 1581 | if (_nof_removable_exceptions > 0) { |
duke@435 | 1582 | Unimplemented(); // this counter is not yet incremented |
duke@435 | 1583 | tty->print_cr("Removable exceptions: %d", _nof_removable_exceptions); |
duke@435 | 1584 | } |
duke@435 | 1585 | } |
duke@435 | 1586 | |
duke@435 | 1587 | // Dump the JRT_ENTRY counters |
duke@435 | 1588 | if( _new_instance_ctr ) tty->print_cr("%5d new instance requires GC", _new_instance_ctr); |
duke@435 | 1589 | if( _new_array_ctr ) tty->print_cr("%5d new array requires GC", _new_array_ctr); |
duke@435 | 1590 | if( _multi1_ctr ) tty->print_cr("%5d multianewarray 1 dim", _multi1_ctr); |
duke@435 | 1591 | if( _multi2_ctr ) tty->print_cr("%5d multianewarray 2 dim", _multi2_ctr); |
duke@435 | 1592 | if( _multi3_ctr ) tty->print_cr("%5d multianewarray 3 dim", _multi3_ctr); |
duke@435 | 1593 | if( _multi4_ctr ) tty->print_cr("%5d multianewarray 4 dim", _multi4_ctr); |
duke@435 | 1594 | if( _multi5_ctr ) tty->print_cr("%5d multianewarray 5 dim", _multi5_ctr); |
duke@435 | 1595 | |
duke@435 | 1596 | tty->print_cr("%5d inline cache miss in compiled", _ic_miss_ctr ); |
duke@435 | 1597 | tty->print_cr("%5d wrong method", _wrong_method_ctr ); |
duke@435 | 1598 | tty->print_cr("%5d unresolved static call site", _resolve_static_ctr ); |
duke@435 | 1599 | tty->print_cr("%5d unresolved virtual call site", _resolve_virtual_ctr ); |
duke@435 | 1600 | tty->print_cr("%5d unresolved opt virtual call site", _resolve_opt_virtual_ctr ); |
duke@435 | 1601 | |
duke@435 | 1602 | if( _mon_enter_stub_ctr ) tty->print_cr("%5d monitor enter stub", _mon_enter_stub_ctr ); |
duke@435 | 1603 | if( _mon_exit_stub_ctr ) tty->print_cr("%5d monitor exit stub", _mon_exit_stub_ctr ); |
duke@435 | 1604 | if( _mon_enter_ctr ) tty->print_cr("%5d monitor enter slow", _mon_enter_ctr ); |
duke@435 | 1605 | if( _mon_exit_ctr ) tty->print_cr("%5d monitor exit slow", _mon_exit_ctr ); |
duke@435 | 1606 | if( _partial_subtype_ctr) tty->print_cr("%5d slow partial subtype", _partial_subtype_ctr ); |
duke@435 | 1607 | if( _jbyte_array_copy_ctr ) tty->print_cr("%5d byte array copies", _jbyte_array_copy_ctr ); |
duke@435 | 1608 | if( _jshort_array_copy_ctr ) tty->print_cr("%5d short array copies", _jshort_array_copy_ctr ); |
duke@435 | 1609 | if( _jint_array_copy_ctr ) tty->print_cr("%5d int array copies", _jint_array_copy_ctr ); |
duke@435 | 1610 | if( _jlong_array_copy_ctr ) tty->print_cr("%5d long array copies", _jlong_array_copy_ctr ); |
duke@435 | 1611 | if( _oop_array_copy_ctr ) tty->print_cr("%5d oop array copies", _oop_array_copy_ctr ); |
duke@435 | 1612 | if( _checkcast_array_copy_ctr ) tty->print_cr("%5d checkcast array copies", _checkcast_array_copy_ctr ); |
duke@435 | 1613 | if( _unsafe_array_copy_ctr ) tty->print_cr("%5d unsafe array copies", _unsafe_array_copy_ctr ); |
duke@435 | 1614 | if( _generic_array_copy_ctr ) tty->print_cr("%5d generic array copies", _generic_array_copy_ctr ); |
duke@435 | 1615 | if( _slow_array_copy_ctr ) tty->print_cr("%5d slow array copies", _slow_array_copy_ctr ); |
duke@435 | 1616 | if( _find_handler_ctr ) tty->print_cr("%5d find exception handler", _find_handler_ctr ); |
duke@435 | 1617 | if( _rethrow_ctr ) tty->print_cr("%5d rethrow handler", _rethrow_ctr ); |
duke@435 | 1618 | |
duke@435 | 1619 | if (xtty != NULL) xtty->tail("statistics"); |
duke@435 | 1620 | } |
duke@435 | 1621 | |
duke@435 | 1622 | inline double percent(int x, int y) { |
duke@435 | 1623 | return 100.0 * x / MAX2(y, 1); |
duke@435 | 1624 | } |
duke@435 | 1625 | |
duke@435 | 1626 | class MethodArityHistogram { |
duke@435 | 1627 | public: |
duke@435 | 1628 | enum { MAX_ARITY = 256 }; |
duke@435 | 1629 | private: |
duke@435 | 1630 | static int _arity_histogram[MAX_ARITY]; // histogram of #args |
duke@435 | 1631 | static int _size_histogram[MAX_ARITY]; // histogram of arg size in words |
duke@435 | 1632 | static int _max_arity; // max. arity seen |
duke@435 | 1633 | static int _max_size; // max. arg size seen |
duke@435 | 1634 | |
duke@435 | 1635 | static void add_method_to_histogram(nmethod* nm) { |
duke@435 | 1636 | methodOop m = nm->method(); |
duke@435 | 1637 | ArgumentCount args(m->signature()); |
duke@435 | 1638 | int arity = args.size() + (m->is_static() ? 0 : 1); |
duke@435 | 1639 | int argsize = m->size_of_parameters(); |
duke@435 | 1640 | arity = MIN2(arity, MAX_ARITY-1); |
duke@435 | 1641 | argsize = MIN2(argsize, MAX_ARITY-1); |
duke@435 | 1642 | int count = nm->method()->compiled_invocation_count(); |
duke@435 | 1643 | _arity_histogram[arity] += count; |
duke@435 | 1644 | _size_histogram[argsize] += count; |
duke@435 | 1645 | _max_arity = MAX2(_max_arity, arity); |
duke@435 | 1646 | _max_size = MAX2(_max_size, argsize); |
duke@435 | 1647 | } |
duke@435 | 1648 | |
duke@435 | 1649 | void print_histogram_helper(int n, int* histo, const char* name) { |
duke@435 | 1650 | const int N = MIN2(5, n); |
duke@435 | 1651 | tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); |
duke@435 | 1652 | double sum = 0; |
duke@435 | 1653 | double weighted_sum = 0; |
duke@435 | 1654 | int i; |
duke@435 | 1655 | for (i = 0; i <= n; i++) { sum += histo[i]; weighted_sum += i*histo[i]; } |
duke@435 | 1656 | double rest = sum; |
duke@435 | 1657 | double percent = sum / 100; |
duke@435 | 1658 | for (i = 0; i <= N; i++) { |
duke@435 | 1659 | rest -= histo[i]; |
duke@435 | 1660 | tty->print_cr("%4d: %7d (%5.1f%%)", i, histo[i], histo[i] / percent); |
duke@435 | 1661 | } |
duke@435 | 1662 | tty->print_cr("rest: %7d (%5.1f%%))", (int)rest, rest / percent); |
duke@435 | 1663 | tty->print_cr("(avg. %s = %3.1f, max = %d)", name, weighted_sum / sum, n); |
duke@435 | 1664 | } |
duke@435 | 1665 | |
duke@435 | 1666 | void print_histogram() { |
duke@435 | 1667 | tty->print_cr("\nHistogram of call arity (incl. rcvr, calls to compiled methods only):"); |
duke@435 | 1668 | print_histogram_helper(_max_arity, _arity_histogram, "arity"); |
duke@435 | 1669 | tty->print_cr("\nSame for parameter size (in words):"); |
duke@435 | 1670 | print_histogram_helper(_max_size, _size_histogram, "size"); |
duke@435 | 1671 | tty->cr(); |
duke@435 | 1672 | } |
duke@435 | 1673 | |
duke@435 | 1674 | public: |
duke@435 | 1675 | MethodArityHistogram() { |
duke@435 | 1676 | MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); |
duke@435 | 1677 | _max_arity = _max_size = 0; |
duke@435 | 1678 | for (int i = 0; i < MAX_ARITY; i++) _arity_histogram[i] = _size_histogram [i] = 0; |
duke@435 | 1679 | CodeCache::nmethods_do(add_method_to_histogram); |
duke@435 | 1680 | print_histogram(); |
duke@435 | 1681 | } |
duke@435 | 1682 | }; |
duke@435 | 1683 | |
duke@435 | 1684 | int MethodArityHistogram::_arity_histogram[MethodArityHistogram::MAX_ARITY]; |
duke@435 | 1685 | int MethodArityHistogram::_size_histogram[MethodArityHistogram::MAX_ARITY]; |
duke@435 | 1686 | int MethodArityHistogram::_max_arity; |
duke@435 | 1687 | int MethodArityHistogram::_max_size; |
duke@435 | 1688 | |
duke@435 | 1689 | void SharedRuntime::print_call_statistics(int comp_total) { |
duke@435 | 1690 | tty->print_cr("Calls from compiled code:"); |
duke@435 | 1691 | int total = _nof_normal_calls + _nof_interface_calls + _nof_static_calls; |
duke@435 | 1692 | int mono_c = _nof_normal_calls - _nof_optimized_calls - _nof_megamorphic_calls; |
duke@435 | 1693 | int mono_i = _nof_interface_calls - _nof_optimized_interface_calls - _nof_megamorphic_interface_calls; |
duke@435 | 1694 | tty->print_cr("\t%9d (%4.1f%%) total non-inlined ", total, percent(total, total)); |
duke@435 | 1695 | tty->print_cr("\t%9d (%4.1f%%) virtual calls ", _nof_normal_calls, percent(_nof_normal_calls, total)); |
duke@435 | 1696 | tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_calls, percent(_nof_inlined_calls, _nof_normal_calls)); |
duke@435 | 1697 | tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_calls, percent(_nof_optimized_calls, _nof_normal_calls)); |
duke@435 | 1698 | tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_c, percent(mono_c, _nof_normal_calls)); |
duke@435 | 1699 | tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_calls, percent(_nof_megamorphic_calls, _nof_normal_calls)); |
duke@435 | 1700 | tty->print_cr("\t%9d (%4.1f%%) interface calls ", _nof_interface_calls, percent(_nof_interface_calls, total)); |
duke@435 | 1701 | tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_interface_calls, percent(_nof_inlined_interface_calls, _nof_interface_calls)); |
duke@435 | 1702 | tty->print_cr("\t %9d (%3.0f%%) optimized ", _nof_optimized_interface_calls, percent(_nof_optimized_interface_calls, _nof_interface_calls)); |
duke@435 | 1703 | tty->print_cr("\t %9d (%3.0f%%) monomorphic ", mono_i, percent(mono_i, _nof_interface_calls)); |
duke@435 | 1704 | tty->print_cr("\t %9d (%3.0f%%) megamorphic ", _nof_megamorphic_interface_calls, percent(_nof_megamorphic_interface_calls, _nof_interface_calls)); |
duke@435 | 1705 | tty->print_cr("\t%9d (%4.1f%%) static/special calls", _nof_static_calls, percent(_nof_static_calls, total)); |
duke@435 | 1706 | tty->print_cr("\t %9d (%3.0f%%) inlined ", _nof_inlined_static_calls, percent(_nof_inlined_static_calls, _nof_static_calls)); |
duke@435 | 1707 | tty->cr(); |
duke@435 | 1708 | tty->print_cr("Note 1: counter updates are not MT-safe."); |
duke@435 | 1709 | tty->print_cr("Note 2: %% in major categories are relative to total non-inlined calls;"); |
duke@435 | 1710 | tty->print_cr(" %% in nested categories are relative to their category"); |
duke@435 | 1711 | tty->print_cr(" (and thus add up to more than 100%% with inlining)"); |
duke@435 | 1712 | tty->cr(); |
duke@435 | 1713 | |
duke@435 | 1714 | MethodArityHistogram h; |
duke@435 | 1715 | } |
duke@435 | 1716 | #endif |
duke@435 | 1717 | |
duke@435 | 1718 | |
duke@435 | 1719 | // --------------------------------------------------------------------------- |
duke@435 | 1720 | // Implementation of AdapterHandlerLibrary |
duke@435 | 1721 | const char* AdapterHandlerEntry::name = "I2C/C2I adapters"; |
duke@435 | 1722 | GrowableArray<uint64_t>* AdapterHandlerLibrary::_fingerprints = NULL; |
duke@435 | 1723 | GrowableArray<AdapterHandlerEntry* >* AdapterHandlerLibrary::_handlers = NULL; |
duke@435 | 1724 | const int AdapterHandlerLibrary_size = 16*K; |
duke@435 | 1725 | u_char AdapterHandlerLibrary::_buffer[AdapterHandlerLibrary_size + 32]; |
duke@435 | 1726 | |
duke@435 | 1727 | void AdapterHandlerLibrary::initialize() { |
duke@435 | 1728 | if (_fingerprints != NULL) return; |
duke@435 | 1729 | _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true); |
duke@435 | 1730 | _handlers = new(ResourceObj::C_HEAP)GrowableArray<AdapterHandlerEntry*>(32, true); |
duke@435 | 1731 | // Index 0 reserved for the slow path handler |
duke@435 | 1732 | _fingerprints->append(0/*the never-allowed 0 fingerprint*/); |
duke@435 | 1733 | _handlers->append(NULL); |
duke@435 | 1734 | |
duke@435 | 1735 | // Create a special handler for abstract methods. Abstract methods |
duke@435 | 1736 | // are never compiled so an i2c entry is somewhat meaningless, but |
duke@435 | 1737 | // fill it in with something appropriate just in case. Pass handle |
duke@435 | 1738 | // wrong method for the c2i transitions. |
duke@435 | 1739 | address wrong_method = SharedRuntime::get_handle_wrong_method_stub(); |
duke@435 | 1740 | _fingerprints->append(0/*the never-allowed 0 fingerprint*/); |
duke@435 | 1741 | assert(_handlers->length() == AbstractMethodHandler, "in wrong slot"); |
duke@435 | 1742 | _handlers->append(new AdapterHandlerEntry(StubRoutines::throw_AbstractMethodError_entry(), |
duke@435 | 1743 | wrong_method, wrong_method)); |
duke@435 | 1744 | } |
duke@435 | 1745 | |
duke@435 | 1746 | int AdapterHandlerLibrary::get_create_adapter_index(methodHandle method) { |
duke@435 | 1747 | // Use customized signature handler. Need to lock around updates to the |
duke@435 | 1748 | // _fingerprints array (it is not safe for concurrent readers and a single |
duke@435 | 1749 | // writer: this can be fixed if it becomes a problem). |
duke@435 | 1750 | |
duke@435 | 1751 | // Get the address of the ic_miss handlers before we grab the |
duke@435 | 1752 | // AdapterHandlerLibrary_lock. This fixes bug 6236259 which |
duke@435 | 1753 | // was caused by the initialization of the stubs happening |
duke@435 | 1754 | // while we held the lock and then notifying jvmti while |
duke@435 | 1755 | // holding it. This just forces the initialization to be a little |
duke@435 | 1756 | // earlier. |
duke@435 | 1757 | address ic_miss = SharedRuntime::get_ic_miss_stub(); |
duke@435 | 1758 | assert(ic_miss != NULL, "must have handler"); |
duke@435 | 1759 | |
duke@435 | 1760 | int result; |
duke@435 | 1761 | BufferBlob *B = NULL; |
duke@435 | 1762 | uint64_t fingerprint; |
duke@435 | 1763 | { |
duke@435 | 1764 | MutexLocker mu(AdapterHandlerLibrary_lock); |
duke@435 | 1765 | // make sure data structure is initialized |
duke@435 | 1766 | initialize(); |
duke@435 | 1767 | |
duke@435 | 1768 | if (method->is_abstract()) { |
duke@435 | 1769 | return AbstractMethodHandler; |
duke@435 | 1770 | } |
duke@435 | 1771 | |
duke@435 | 1772 | // Lookup method signature's fingerprint |
duke@435 | 1773 | fingerprint = Fingerprinter(method).fingerprint(); |
duke@435 | 1774 | assert( fingerprint != CONST64( 0), "no zero fingerprints allowed" ); |
duke@435 | 1775 | // Fingerprints are small fixed-size condensed representations of |
duke@435 | 1776 | // signatures. If the signature is too large, it won't fit in a |
duke@435 | 1777 | // fingerprint. Signatures which cannot support a fingerprint get a new i2c |
duke@435 | 1778 | // adapter gen'd each time, instead of searching the cache for one. This -1 |
duke@435 | 1779 | // game can be avoided if I compared signatures instead of using |
duke@435 | 1780 | // fingerprints. However, -1 fingerprints are very rare. |
duke@435 | 1781 | if( fingerprint != UCONST64(-1) ) { // If this is a cache-able fingerprint |
duke@435 | 1782 | // Turns out i2c adapters do not care what the return value is. Mask it |
duke@435 | 1783 | // out so signatures that only differ in return type will share the same |
duke@435 | 1784 | // adapter. |
duke@435 | 1785 | fingerprint &= ~(SignatureIterator::result_feature_mask << SignatureIterator::static_feature_size); |
duke@435 | 1786 | // Search for a prior existing i2c/c2i adapter |
duke@435 | 1787 | int index = _fingerprints->find(fingerprint); |
duke@435 | 1788 | if( index >= 0 ) return index; // Found existing handlers? |
duke@435 | 1789 | } else { |
duke@435 | 1790 | // Annoyingly, I end up adding -1 fingerprints to the array of handlers, |
duke@435 | 1791 | // because I need a unique handler index. It cannot be scanned for |
duke@435 | 1792 | // because all -1's look alike. Instead, the matching index is passed out |
duke@435 | 1793 | // and immediately used to collect the 2 return values (the c2i and i2c |
duke@435 | 1794 | // adapters). |
duke@435 | 1795 | } |
duke@435 | 1796 | |
duke@435 | 1797 | // Create I2C & C2I handlers |
duke@435 | 1798 | ResourceMark rm; |
duke@435 | 1799 | // Improve alignment slightly |
duke@435 | 1800 | u_char *buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); |
duke@435 | 1801 | CodeBuffer buffer(buf, AdapterHandlerLibrary_size); |
duke@435 | 1802 | short buffer_locs[20]; |
duke@435 | 1803 | buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, |
duke@435 | 1804 | sizeof(buffer_locs)/sizeof(relocInfo)); |
duke@435 | 1805 | MacroAssembler _masm(&buffer); |
duke@435 | 1806 | |
duke@435 | 1807 | // Fill in the signature array, for the calling-convention call. |
duke@435 | 1808 | int total_args_passed = method->size_of_parameters(); // All args on stack |
duke@435 | 1809 | |
duke@435 | 1810 | BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed); |
duke@435 | 1811 | VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed); |
duke@435 | 1812 | int i=0; |
duke@435 | 1813 | if( !method->is_static() ) // Pass in receiver first |
duke@435 | 1814 | sig_bt[i++] = T_OBJECT; |
duke@435 | 1815 | for( SignatureStream ss(method->signature()); !ss.at_return_type(); ss.next()) { |
duke@435 | 1816 | sig_bt[i++] = ss.type(); // Collect remaining bits of signature |
duke@435 | 1817 | if( ss.type() == T_LONG || ss.type() == T_DOUBLE ) |
duke@435 | 1818 | sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots |
duke@435 | 1819 | } |
duke@435 | 1820 | assert( i==total_args_passed, "" ); |
duke@435 | 1821 | |
duke@435 | 1822 | // Now get the re-packed compiled-Java layout. |
duke@435 | 1823 | int comp_args_on_stack; |
duke@435 | 1824 | |
duke@435 | 1825 | // Get a description of the compiled java calling convention and the largest used (VMReg) stack slot usage |
duke@435 | 1826 | comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); |
duke@435 | 1827 | |
duke@435 | 1828 | AdapterHandlerEntry* entry = SharedRuntime::generate_i2c2i_adapters(&_masm, |
duke@435 | 1829 | total_args_passed, |
duke@435 | 1830 | comp_args_on_stack, |
duke@435 | 1831 | sig_bt, |
duke@435 | 1832 | regs); |
duke@435 | 1833 | |
duke@435 | 1834 | B = BufferBlob::create(AdapterHandlerEntry::name, &buffer); |
kvn@463 | 1835 | if (B == NULL) { |
kvn@463 | 1836 | // CodeCache is full, disable compilation |
kvn@463 | 1837 | // Ought to log this but compile log is only per compile thread |
kvn@463 | 1838 | // and we're some non descript Java thread. |
kvn@463 | 1839 | UseInterpreter = true; |
kvn@463 | 1840 | if (UseCompiler || AlwaysCompileLoopMethods ) { |
kvn@463 | 1841 | #ifndef PRODUCT |
kvn@463 | 1842 | warning("CodeCache is full. Compiler has been disabled"); |
kvn@463 | 1843 | if (CompileTheWorld || ExitOnFullCodeCache) { |
kvn@463 | 1844 | before_exit(JavaThread::current()); |
kvn@463 | 1845 | exit_globals(); // will delete tty |
kvn@463 | 1846 | vm_direct_exit(CompileTheWorld ? 0 : 1); |
kvn@463 | 1847 | } |
kvn@463 | 1848 | #endif |
kvn@463 | 1849 | UseCompiler = false; |
kvn@463 | 1850 | AlwaysCompileLoopMethods = false; |
kvn@463 | 1851 | } |
kvn@463 | 1852 | return 0; // Out of CodeCache space (_handlers[0] == NULL) |
kvn@463 | 1853 | } |
duke@435 | 1854 | entry->relocate(B->instructions_begin()); |
duke@435 | 1855 | #ifndef PRODUCT |
duke@435 | 1856 | // debugging suppport |
duke@435 | 1857 | if (PrintAdapterHandlers) { |
duke@435 | 1858 | tty->cr(); |
duke@435 | 1859 | tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = 0x%llx, %d bytes generated)", |
duke@435 | 1860 | _handlers->length(), (method->is_static() ? "static" : "receiver"), |
duke@435 | 1861 | method->signature()->as_C_string(), fingerprint, buffer.code_size() ); |
duke@435 | 1862 | tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry()); |
duke@435 | 1863 | Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + buffer.code_size()); |
duke@435 | 1864 | } |
duke@435 | 1865 | #endif |
duke@435 | 1866 | |
duke@435 | 1867 | // add handlers to library |
duke@435 | 1868 | _fingerprints->append(fingerprint); |
duke@435 | 1869 | _handlers->append(entry); |
duke@435 | 1870 | // set handler index |
duke@435 | 1871 | assert(_fingerprints->length() == _handlers->length(), "sanity check"); |
duke@435 | 1872 | result = _fingerprints->length() - 1; |
duke@435 | 1873 | } |
duke@435 | 1874 | // Outside of the lock |
duke@435 | 1875 | if (B != NULL) { |
duke@435 | 1876 | char blob_id[256]; |
duke@435 | 1877 | jio_snprintf(blob_id, |
duke@435 | 1878 | sizeof(blob_id), |
duke@435 | 1879 | "%s(" PTR64_FORMAT ")@" PTR_FORMAT, |
duke@435 | 1880 | AdapterHandlerEntry::name, |
duke@435 | 1881 | fingerprint, |
duke@435 | 1882 | B->instructions_begin()); |
duke@435 | 1883 | VTune::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); |
duke@435 | 1884 | Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); |
duke@435 | 1885 | |
duke@435 | 1886 | if (JvmtiExport::should_post_dynamic_code_generated()) { |
duke@435 | 1887 | JvmtiExport::post_dynamic_code_generated(blob_id, |
duke@435 | 1888 | B->instructions_begin(), |
duke@435 | 1889 | B->instructions_end()); |
duke@435 | 1890 | } |
duke@435 | 1891 | } |
duke@435 | 1892 | return result; |
duke@435 | 1893 | } |
duke@435 | 1894 | |
duke@435 | 1895 | void AdapterHandlerEntry::relocate(address new_base) { |
duke@435 | 1896 | ptrdiff_t delta = new_base - _i2c_entry; |
duke@435 | 1897 | _i2c_entry += delta; |
duke@435 | 1898 | _c2i_entry += delta; |
duke@435 | 1899 | _c2i_unverified_entry += delta; |
duke@435 | 1900 | } |
duke@435 | 1901 | |
duke@435 | 1902 | // Create a native wrapper for this native method. The wrapper converts the |
duke@435 | 1903 | // java compiled calling convention to the native convention, handlizes |
duke@435 | 1904 | // arguments, and transitions to native. On return from the native we transition |
duke@435 | 1905 | // back to java blocking if a safepoint is in progress. |
duke@435 | 1906 | nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) { |
duke@435 | 1907 | ResourceMark rm; |
duke@435 | 1908 | nmethod* nm = NULL; |
duke@435 | 1909 | |
duke@435 | 1910 | if (PrintCompilation) { |
duke@435 | 1911 | ttyLocker ttyl; |
duke@435 | 1912 | tty->print("--- n%s ", (method->is_synchronized() ? "s" : " ")); |
duke@435 | 1913 | method->print_short_name(tty); |
duke@435 | 1914 | if (method->is_static()) { |
duke@435 | 1915 | tty->print(" (static)"); |
duke@435 | 1916 | } |
duke@435 | 1917 | tty->cr(); |
duke@435 | 1918 | } |
duke@435 | 1919 | |
duke@435 | 1920 | assert(method->has_native_function(), "must have something valid to call!"); |
duke@435 | 1921 | |
duke@435 | 1922 | { |
duke@435 | 1923 | // perform the work while holding the lock, but perform any printing outside the lock |
duke@435 | 1924 | MutexLocker mu(AdapterHandlerLibrary_lock); |
duke@435 | 1925 | // See if somebody beat us to it |
duke@435 | 1926 | nm = method->code(); |
duke@435 | 1927 | if (nm) { |
duke@435 | 1928 | return nm; |
duke@435 | 1929 | } |
duke@435 | 1930 | |
duke@435 | 1931 | // Improve alignment slightly |
duke@435 | 1932 | u_char* buf = (u_char*)(((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); |
duke@435 | 1933 | CodeBuffer buffer(buf, AdapterHandlerLibrary_size); |
duke@435 | 1934 | // Need a few relocation entries |
duke@435 | 1935 | double locs_buf[20]; |
duke@435 | 1936 | buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); |
duke@435 | 1937 | MacroAssembler _masm(&buffer); |
duke@435 | 1938 | |
duke@435 | 1939 | // Fill in the signature array, for the calling-convention call. |
duke@435 | 1940 | int total_args_passed = method->size_of_parameters(); |
duke@435 | 1941 | |
duke@435 | 1942 | BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType,total_args_passed); |
duke@435 | 1943 | VMRegPair * regs = NEW_RESOURCE_ARRAY(VMRegPair ,total_args_passed); |
duke@435 | 1944 | int i=0; |
duke@435 | 1945 | if( !method->is_static() ) // Pass in receiver first |
duke@435 | 1946 | sig_bt[i++] = T_OBJECT; |
duke@435 | 1947 | SignatureStream ss(method->signature()); |
duke@435 | 1948 | for( ; !ss.at_return_type(); ss.next()) { |
duke@435 | 1949 | sig_bt[i++] = ss.type(); // Collect remaining bits of signature |
duke@435 | 1950 | if( ss.type() == T_LONG || ss.type() == T_DOUBLE ) |
duke@435 | 1951 | sig_bt[i++] = T_VOID; // Longs & doubles take 2 Java slots |
duke@435 | 1952 | } |
duke@435 | 1953 | assert( i==total_args_passed, "" ); |
duke@435 | 1954 | BasicType ret_type = ss.type(); |
duke@435 | 1955 | |
duke@435 | 1956 | // Now get the compiled-Java layout as input arguments |
duke@435 | 1957 | int comp_args_on_stack; |
duke@435 | 1958 | comp_args_on_stack = SharedRuntime::java_calling_convention(sig_bt, regs, total_args_passed, false); |
duke@435 | 1959 | |
duke@435 | 1960 | // Generate the compiled-to-native wrapper code |
duke@435 | 1961 | nm = SharedRuntime::generate_native_wrapper(&_masm, |
duke@435 | 1962 | method, |
duke@435 | 1963 | total_args_passed, |
duke@435 | 1964 | comp_args_on_stack, |
duke@435 | 1965 | sig_bt,regs, |
duke@435 | 1966 | ret_type); |
duke@435 | 1967 | } |
duke@435 | 1968 | |
duke@435 | 1969 | // Must unlock before calling set_code |
duke@435 | 1970 | // Install the generated code. |
duke@435 | 1971 | if (nm != NULL) { |
duke@435 | 1972 | method->set_code(method, nm); |
duke@435 | 1973 | nm->post_compiled_method_load_event(); |
duke@435 | 1974 | } else { |
duke@435 | 1975 | // CodeCache is full, disable compilation |
duke@435 | 1976 | // Ought to log this but compile log is only per compile thread |
duke@435 | 1977 | // and we're some non descript Java thread. |
duke@435 | 1978 | UseInterpreter = true; |
duke@435 | 1979 | if (UseCompiler || AlwaysCompileLoopMethods ) { |
duke@435 | 1980 | #ifndef PRODUCT |
duke@435 | 1981 | warning("CodeCache is full. Compiler has been disabled"); |
duke@435 | 1982 | if (CompileTheWorld || ExitOnFullCodeCache) { |
duke@435 | 1983 | before_exit(JavaThread::current()); |
duke@435 | 1984 | exit_globals(); // will delete tty |
duke@435 | 1985 | vm_direct_exit(CompileTheWorld ? 0 : 1); |
duke@435 | 1986 | } |
duke@435 | 1987 | #endif |
duke@435 | 1988 | UseCompiler = false; |
duke@435 | 1989 | AlwaysCompileLoopMethods = false; |
duke@435 | 1990 | } |
duke@435 | 1991 | } |
duke@435 | 1992 | return nm; |
duke@435 | 1993 | } |
duke@435 | 1994 | |
kamg@551 | 1995 | #ifdef HAVE_DTRACE_H |
kamg@551 | 1996 | // Create a dtrace nmethod for this method. The wrapper converts the |
kamg@551 | 1997 | // java compiled calling convention to the native convention, makes a dummy call |
kamg@551 | 1998 | // (actually nops for the size of the call instruction, which become a trap if |
kamg@551 | 1999 | // probe is enabled). The returns to the caller. Since this all looks like a |
kamg@551 | 2000 | // leaf no thread transition is needed. |
kamg@551 | 2001 | |
kamg@551 | 2002 | nmethod *AdapterHandlerLibrary::create_dtrace_nmethod(methodHandle method) { |
kamg@551 | 2003 | ResourceMark rm; |
kamg@551 | 2004 | nmethod* nm = NULL; |
kamg@551 | 2005 | |
kamg@551 | 2006 | if (PrintCompilation) { |
kamg@551 | 2007 | ttyLocker ttyl; |
kamg@551 | 2008 | tty->print("--- n%s "); |
kamg@551 | 2009 | method->print_short_name(tty); |
kamg@551 | 2010 | if (method->is_static()) { |
kamg@551 | 2011 | tty->print(" (static)"); |
kamg@551 | 2012 | } |
kamg@551 | 2013 | tty->cr(); |
kamg@551 | 2014 | } |
kamg@551 | 2015 | |
kamg@551 | 2016 | { |
kamg@551 | 2017 | // perform the work while holding the lock, but perform any printing |
kamg@551 | 2018 | // outside the lock |
kamg@551 | 2019 | MutexLocker mu(AdapterHandlerLibrary_lock); |
kamg@551 | 2020 | // See if somebody beat us to it |
kamg@551 | 2021 | nm = method->code(); |
kamg@551 | 2022 | if (nm) { |
kamg@551 | 2023 | return nm; |
kamg@551 | 2024 | } |
kamg@551 | 2025 | |
kamg@551 | 2026 | // Improve alignment slightly |
kamg@551 | 2027 | u_char* buf = (u_char*) |
kamg@551 | 2028 | (((intptr_t)_buffer + CodeEntryAlignment-1) & ~(CodeEntryAlignment-1)); |
kamg@551 | 2029 | CodeBuffer buffer(buf, AdapterHandlerLibrary_size); |
kamg@551 | 2030 | // Need a few relocation entries |
kamg@551 | 2031 | double locs_buf[20]; |
kamg@551 | 2032 | buffer.insts()->initialize_shared_locs( |
kamg@551 | 2033 | (relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); |
kamg@551 | 2034 | MacroAssembler _masm(&buffer); |
kamg@551 | 2035 | |
kamg@551 | 2036 | // Generate the compiled-to-native wrapper code |
kamg@551 | 2037 | nm = SharedRuntime::generate_dtrace_nmethod(&_masm, method); |
kamg@551 | 2038 | } |
kamg@551 | 2039 | return nm; |
kamg@551 | 2040 | } |
kamg@551 | 2041 | |
kamg@551 | 2042 | // the dtrace method needs to convert java lang string to utf8 string. |
kamg@551 | 2043 | void SharedRuntime::get_utf(oopDesc* src, address dst) { |
kamg@551 | 2044 | typeArrayOop jlsValue = java_lang_String::value(src); |
kamg@551 | 2045 | int jlsOffset = java_lang_String::offset(src); |
kamg@551 | 2046 | int jlsLen = java_lang_String::length(src); |
kamg@551 | 2047 | jchar* jlsPos = (jlsLen == 0) ? NULL : |
kamg@551 | 2048 | jlsValue->char_at_addr(jlsOffset); |
kamg@551 | 2049 | (void) UNICODE::as_utf8(jlsPos, jlsLen, (char *)dst, max_dtrace_string_size); |
kamg@551 | 2050 | } |
kamg@551 | 2051 | #endif // ndef HAVE_DTRACE_H |
kamg@551 | 2052 | |
duke@435 | 2053 | // ------------------------------------------------------------------------- |
duke@435 | 2054 | // Java-Java calling convention |
duke@435 | 2055 | // (what you use when Java calls Java) |
duke@435 | 2056 | |
duke@435 | 2057 | //------------------------------name_for_receiver---------------------------------- |
duke@435 | 2058 | // For a given signature, return the VMReg for parameter 0. |
duke@435 | 2059 | VMReg SharedRuntime::name_for_receiver() { |
duke@435 | 2060 | VMRegPair regs; |
duke@435 | 2061 | BasicType sig_bt = T_OBJECT; |
duke@435 | 2062 | (void) java_calling_convention(&sig_bt, ®s, 1, true); |
duke@435 | 2063 | // Return argument 0 register. In the LP64 build pointers |
duke@435 | 2064 | // take 2 registers, but the VM wants only the 'main' name. |
duke@435 | 2065 | return regs.first(); |
duke@435 | 2066 | } |
duke@435 | 2067 | |
duke@435 | 2068 | VMRegPair *SharedRuntime::find_callee_arguments(symbolOop sig, bool is_static, int* arg_size) { |
duke@435 | 2069 | // This method is returning a data structure allocating as a |
duke@435 | 2070 | // ResourceObject, so do not put any ResourceMarks in here. |
duke@435 | 2071 | char *s = sig->as_C_string(); |
duke@435 | 2072 | int len = (int)strlen(s); |
duke@435 | 2073 | *s++; len--; // Skip opening paren |
duke@435 | 2074 | char *t = s+len; |
duke@435 | 2075 | while( *(--t) != ')' ) ; // Find close paren |
duke@435 | 2076 | |
duke@435 | 2077 | BasicType *sig_bt = NEW_RESOURCE_ARRAY( BasicType, 256 ); |
duke@435 | 2078 | VMRegPair *regs = NEW_RESOURCE_ARRAY( VMRegPair, 256 ); |
duke@435 | 2079 | int cnt = 0; |
duke@435 | 2080 | if (!is_static) { |
duke@435 | 2081 | sig_bt[cnt++] = T_OBJECT; // Receiver is argument 0; not in signature |
duke@435 | 2082 | } |
duke@435 | 2083 | |
duke@435 | 2084 | while( s < t ) { |
duke@435 | 2085 | switch( *s++ ) { // Switch on signature character |
duke@435 | 2086 | case 'B': sig_bt[cnt++] = T_BYTE; break; |
duke@435 | 2087 | case 'C': sig_bt[cnt++] = T_CHAR; break; |
duke@435 | 2088 | case 'D': sig_bt[cnt++] = T_DOUBLE; sig_bt[cnt++] = T_VOID; break; |
duke@435 | 2089 | case 'F': sig_bt[cnt++] = T_FLOAT; break; |
duke@435 | 2090 | case 'I': sig_bt[cnt++] = T_INT; break; |
duke@435 | 2091 | case 'J': sig_bt[cnt++] = T_LONG; sig_bt[cnt++] = T_VOID; break; |
duke@435 | 2092 | case 'S': sig_bt[cnt++] = T_SHORT; break; |
duke@435 | 2093 | case 'Z': sig_bt[cnt++] = T_BOOLEAN; break; |
duke@435 | 2094 | case 'V': sig_bt[cnt++] = T_VOID; break; |
duke@435 | 2095 | case 'L': // Oop |
duke@435 | 2096 | while( *s++ != ';' ) ; // Skip signature |
duke@435 | 2097 | sig_bt[cnt++] = T_OBJECT; |
duke@435 | 2098 | break; |
duke@435 | 2099 | case '[': { // Array |
duke@435 | 2100 | do { // Skip optional size |
duke@435 | 2101 | while( *s >= '0' && *s <= '9' ) s++; |
duke@435 | 2102 | } while( *s++ == '[' ); // Nested arrays? |
duke@435 | 2103 | // Skip element type |
duke@435 | 2104 | if( s[-1] == 'L' ) |
duke@435 | 2105 | while( *s++ != ';' ) ; // Skip signature |
duke@435 | 2106 | sig_bt[cnt++] = T_ARRAY; |
duke@435 | 2107 | break; |
duke@435 | 2108 | } |
duke@435 | 2109 | default : ShouldNotReachHere(); |
duke@435 | 2110 | } |
duke@435 | 2111 | } |
duke@435 | 2112 | assert( cnt < 256, "grow table size" ); |
duke@435 | 2113 | |
duke@435 | 2114 | int comp_args_on_stack; |
duke@435 | 2115 | comp_args_on_stack = java_calling_convention(sig_bt, regs, cnt, true); |
duke@435 | 2116 | |
duke@435 | 2117 | // the calling convention doesn't count out_preserve_stack_slots so |
duke@435 | 2118 | // we must add that in to get "true" stack offsets. |
duke@435 | 2119 | |
duke@435 | 2120 | if (comp_args_on_stack) { |
duke@435 | 2121 | for (int i = 0; i < cnt; i++) { |
duke@435 | 2122 | VMReg reg1 = regs[i].first(); |
duke@435 | 2123 | if( reg1->is_stack()) { |
duke@435 | 2124 | // Yuck |
duke@435 | 2125 | reg1 = reg1->bias(out_preserve_stack_slots()); |
duke@435 | 2126 | } |
duke@435 | 2127 | VMReg reg2 = regs[i].second(); |
duke@435 | 2128 | if( reg2->is_stack()) { |
duke@435 | 2129 | // Yuck |
duke@435 | 2130 | reg2 = reg2->bias(out_preserve_stack_slots()); |
duke@435 | 2131 | } |
duke@435 | 2132 | regs[i].set_pair(reg2, reg1); |
duke@435 | 2133 | } |
duke@435 | 2134 | } |
duke@435 | 2135 | |
duke@435 | 2136 | // results |
duke@435 | 2137 | *arg_size = cnt; |
duke@435 | 2138 | return regs; |
duke@435 | 2139 | } |
duke@435 | 2140 | |
duke@435 | 2141 | // OSR Migration Code |
duke@435 | 2142 | // |
duke@435 | 2143 | // This code is used convert interpreter frames into compiled frames. It is |
duke@435 | 2144 | // called from very start of a compiled OSR nmethod. A temp array is |
duke@435 | 2145 | // allocated to hold the interesting bits of the interpreter frame. All |
duke@435 | 2146 | // active locks are inflated to allow them to move. The displaced headers and |
duke@435 | 2147 | // active interpeter locals are copied into the temp buffer. Then we return |
duke@435 | 2148 | // back to the compiled code. The compiled code then pops the current |
duke@435 | 2149 | // interpreter frame off the stack and pushes a new compiled frame. Then it |
duke@435 | 2150 | // copies the interpreter locals and displaced headers where it wants. |
duke@435 | 2151 | // Finally it calls back to free the temp buffer. |
duke@435 | 2152 | // |
duke@435 | 2153 | // All of this is done NOT at any Safepoint, nor is any safepoint or GC allowed. |
duke@435 | 2154 | |
duke@435 | 2155 | JRT_LEAF(intptr_t*, SharedRuntime::OSR_migration_begin( JavaThread *thread) ) |
duke@435 | 2156 | |
duke@435 | 2157 | #ifdef IA64 |
duke@435 | 2158 | ShouldNotReachHere(); // NYI |
duke@435 | 2159 | #endif /* IA64 */ |
duke@435 | 2160 | |
duke@435 | 2161 | // |
duke@435 | 2162 | // This code is dependent on the memory layout of the interpreter local |
duke@435 | 2163 | // array and the monitors. On all of our platforms the layout is identical |
duke@435 | 2164 | // so this code is shared. If some platform lays the their arrays out |
duke@435 | 2165 | // differently then this code could move to platform specific code or |
duke@435 | 2166 | // the code here could be modified to copy items one at a time using |
duke@435 | 2167 | // frame accessor methods and be platform independent. |
duke@435 | 2168 | |
duke@435 | 2169 | frame fr = thread->last_frame(); |
duke@435 | 2170 | assert( fr.is_interpreted_frame(), "" ); |
duke@435 | 2171 | assert( fr.interpreter_frame_expression_stack_size()==0, "only handle empty stacks" ); |
duke@435 | 2172 | |
duke@435 | 2173 | // Figure out how many monitors are active. |
duke@435 | 2174 | int active_monitor_count = 0; |
duke@435 | 2175 | for( BasicObjectLock *kptr = fr.interpreter_frame_monitor_end(); |
duke@435 | 2176 | kptr < fr.interpreter_frame_monitor_begin(); |
duke@435 | 2177 | kptr = fr.next_monitor_in_interpreter_frame(kptr) ) { |
duke@435 | 2178 | if( kptr->obj() != NULL ) active_monitor_count++; |
duke@435 | 2179 | } |
duke@435 | 2180 | |
duke@435 | 2181 | // QQQ we could place number of active monitors in the array so that compiled code |
duke@435 | 2182 | // could double check it. |
duke@435 | 2183 | |
duke@435 | 2184 | methodOop moop = fr.interpreter_frame_method(); |
duke@435 | 2185 | int max_locals = moop->max_locals(); |
duke@435 | 2186 | // Allocate temp buffer, 1 word per local & 2 per active monitor |
duke@435 | 2187 | int buf_size_words = max_locals + active_monitor_count*2; |
duke@435 | 2188 | intptr_t *buf = NEW_C_HEAP_ARRAY(intptr_t,buf_size_words); |
duke@435 | 2189 | |
duke@435 | 2190 | // Copy the locals. Order is preserved so that loading of longs works. |
duke@435 | 2191 | // Since there's no GC I can copy the oops blindly. |
duke@435 | 2192 | assert( sizeof(HeapWord)==sizeof(intptr_t), "fix this code"); |
duke@435 | 2193 | if (TaggedStackInterpreter) { |
duke@435 | 2194 | for (int i = 0; i < max_locals; i++) { |
duke@435 | 2195 | // copy only each local separately to the buffer avoiding the tag |
duke@435 | 2196 | buf[i] = *fr.interpreter_frame_local_at(max_locals-i-1); |
duke@435 | 2197 | } |
duke@435 | 2198 | } else { |
duke@435 | 2199 | Copy::disjoint_words( |
duke@435 | 2200 | (HeapWord*)fr.interpreter_frame_local_at(max_locals-1), |
duke@435 | 2201 | (HeapWord*)&buf[0], |
duke@435 | 2202 | max_locals); |
duke@435 | 2203 | } |
duke@435 | 2204 | |
duke@435 | 2205 | // Inflate locks. Copy the displaced headers. Be careful, there can be holes. |
duke@435 | 2206 | int i = max_locals; |
duke@435 | 2207 | for( BasicObjectLock *kptr2 = fr.interpreter_frame_monitor_end(); |
duke@435 | 2208 | kptr2 < fr.interpreter_frame_monitor_begin(); |
duke@435 | 2209 | kptr2 = fr.next_monitor_in_interpreter_frame(kptr2) ) { |
duke@435 | 2210 | if( kptr2->obj() != NULL) { // Avoid 'holes' in the monitor array |
duke@435 | 2211 | BasicLock *lock = kptr2->lock(); |
duke@435 | 2212 | // Inflate so the displaced header becomes position-independent |
duke@435 | 2213 | if (lock->displaced_header()->is_unlocked()) |
duke@435 | 2214 | ObjectSynchronizer::inflate_helper(kptr2->obj()); |
duke@435 | 2215 | // Now the displaced header is free to move |
duke@435 | 2216 | buf[i++] = (intptr_t)lock->displaced_header(); |
duke@435 | 2217 | buf[i++] = (intptr_t)kptr2->obj(); |
duke@435 | 2218 | } |
duke@435 | 2219 | } |
duke@435 | 2220 | assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" ); |
duke@435 | 2221 | |
duke@435 | 2222 | return buf; |
duke@435 | 2223 | JRT_END |
duke@435 | 2224 | |
duke@435 | 2225 | JRT_LEAF(void, SharedRuntime::OSR_migration_end( intptr_t* buf) ) |
duke@435 | 2226 | FREE_C_HEAP_ARRAY(intptr_t,buf); |
duke@435 | 2227 | JRT_END |
duke@435 | 2228 | |
duke@435 | 2229 | #ifndef PRODUCT |
duke@435 | 2230 | bool AdapterHandlerLibrary::contains(CodeBlob* b) { |
duke@435 | 2231 | |
kvn@559 | 2232 | if (_handlers == NULL) return false; |
kvn@559 | 2233 | |
duke@435 | 2234 | for (int i = 0 ; i < _handlers->length() ; i++) { |
duke@435 | 2235 | AdapterHandlerEntry* a = get_entry(i); |
duke@435 | 2236 | if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) return true; |
duke@435 | 2237 | } |
duke@435 | 2238 | return false; |
duke@435 | 2239 | } |
duke@435 | 2240 | |
duke@435 | 2241 | void AdapterHandlerLibrary::print_handler(CodeBlob* b) { |
duke@435 | 2242 | |
duke@435 | 2243 | for (int i = 0 ; i < _handlers->length() ; i++) { |
duke@435 | 2244 | AdapterHandlerEntry* a = get_entry(i); |
duke@435 | 2245 | if ( a != NULL && b == CodeCache::find_blob(a->get_i2c_entry()) ) { |
duke@435 | 2246 | tty->print("Adapter for signature: "); |
duke@435 | 2247 | // Fingerprinter::print(_fingerprints->at(i)); |
duke@435 | 2248 | tty->print("0x%" FORMAT64_MODIFIER "x", _fingerprints->at(i)); |
duke@435 | 2249 | tty->print_cr(" i2c: " INTPTR_FORMAT " c2i: " INTPTR_FORMAT " c2iUV: " INTPTR_FORMAT, |
duke@435 | 2250 | a->get_i2c_entry(), a->get_c2i_entry(), a->get_c2i_unverified_entry()); |
duke@435 | 2251 | |
duke@435 | 2252 | return; |
duke@435 | 2253 | } |
duke@435 | 2254 | } |
duke@435 | 2255 | assert(false, "Should have found handler"); |
duke@435 | 2256 | } |
duke@435 | 2257 | #endif /* PRODUCT */ |