1207 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) || |
1207 assert((!is_virtual && invoke_code == Bytecodes::_invokestatic ) || |
1208 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) || |
1208 (!is_virtual && invoke_code == Bytecodes::_invokehandle ) || |
1209 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) || |
1209 (!is_virtual && invoke_code == Bytecodes::_invokedynamic) || |
1210 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode"); |
1210 ( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode"); |
1211 |
1211 |
1212 // We do not patch the call site if the caller nmethod has been made non-entrant. |
1212 assert(caller_nm->is_alive(), "It should be alive"); |
1213 if (!caller_nm->is_in_use()) { |
|
1214 return callee_method; |
|
1215 } |
|
1216 |
1213 |
1217 #ifndef PRODUCT |
1214 #ifndef PRODUCT |
1218 // tracing/debugging/statistics |
1215 // tracing/debugging/statistics |
1219 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) : |
1216 int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) : |
1220 (is_virtual) ? (&_resolve_virtual_ctr) : |
1217 (is_virtual) ? (&_resolve_virtual_ctr) : |
1280 |
1277 |
1281 // Lock blocks for safepoint during which both nmethods can change state. |
1278 // Lock blocks for safepoint during which both nmethods can change state. |
1282 |
1279 |
1283 // Now that we are ready to patch if the Method* was redefined then |
1280 // Now that we are ready to patch if the Method* was redefined then |
1284 // don't update call site and let the caller retry. |
1281 // don't update call site and let the caller retry. |
1285 // Don't update call site if caller nmethod has been made non-entrant |
|
1286 // as it is a waste of time. |
|
1287 // Don't update call site if callee nmethod was unloaded or deoptimized. |
1282 // Don't update call site if callee nmethod was unloaded or deoptimized. |
1288 // Don't update call site if callee nmethod was replaced by an other nmethod |
1283 // Don't update call site if callee nmethod was replaced by an other nmethod |
1289 // which may happen when multiply alive nmethod (tiered compilation) |
1284 // which may happen when multiply alive nmethod (tiered compilation) |
1290 // will be supported. |
1285 // will be supported. |
1291 if (!callee_method->is_old() && caller_nm->is_in_use() && |
1286 if (!callee_method->is_old() && |
1292 (callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) { |
1287 (callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) { |
1293 #ifdef ASSERT |
1288 #ifdef ASSERT |
1294 // We must not try to patch to jump to an already unloaded method. |
1289 // We must not try to patch to jump to an already unloaded method. |
1295 if (dest_entry_point != 0) { |
1290 if (dest_entry_point != 0) { |
1296 CodeBlob* cb = CodeCache::find_blob(dest_entry_point); |
1291 CodeBlob* cb = CodeCache::find_blob(dest_entry_point); |
1487 // event can't be posted when the stub is created as locks are held |
1482 // event can't be posted when the stub is created as locks are held |
1488 // - instead the event will be deferred until the event collector goes |
1483 // - instead the event will be deferred until the event collector goes |
1489 // out of scope. |
1484 // out of scope. |
1490 JvmtiDynamicCodeEventCollector event_collector; |
1485 JvmtiDynamicCodeEventCollector event_collector; |
1491 |
1486 |
1492 // Update inline cache to megamorphic. Skip update if caller has been |
1487 // Update inline cache to megamorphic. Skip update if we are called from interpreted. |
1493 // made non-entrant or we are called from interpreted. |
|
1494 { MutexLocker ml_patch (CompiledIC_lock); |
1488 { MutexLocker ml_patch (CompiledIC_lock); |
1495 RegisterMap reg_map(thread, false); |
1489 RegisterMap reg_map(thread, false); |
1496 frame caller_frame = thread->last_frame().sender(®_map); |
1490 frame caller_frame = thread->last_frame().sender(®_map); |
1497 CodeBlob* cb = caller_frame.cb(); |
1491 CodeBlob* cb = caller_frame.cb(); |
1498 if (cb->is_nmethod() && ((nmethod*)cb)->is_in_use()) { |
1492 if (cb->is_nmethod()) { |
1499 // Not a non-entrant nmethod, so find inline_cache |
|
1500 CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc()); |
1493 CompiledIC* inline_cache = CompiledIC_before(((nmethod*)cb), caller_frame.pc()); |
1501 bool should_be_mono = false; |
1494 bool should_be_mono = false; |
1502 if (inline_cache->is_optimized()) { |
1495 if (inline_cache->is_optimized()) { |
1503 if (TraceCallFixup) { |
1496 if (TraceCallFixup) { |
1504 ResourceMark rm(thread); |
1497 ResourceMark rm(thread); |
1637 // leads to very hard to track down bugs, if an inline cache gets updated |
1630 // leads to very hard to track down bugs, if an inline cache gets updated |
1638 // to a wrong method). It should not be performance critical, since the |
1631 // to a wrong method). It should not be performance critical, since the |
1639 // resolve is only done once. |
1632 // resolve is only done once. |
1640 |
1633 |
1641 MutexLocker ml(CompiledIC_lock); |
1634 MutexLocker ml(CompiledIC_lock); |
1642 // |
1635 if (is_static_call) { |
1643 // We do not patch the call site if the nmethod has been made non-entrant |
1636 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); |
1644 // as it is a waste of time |
1637 ssc->set_to_clean(); |
1645 // |
1638 } else { |
1646 if (caller_nm->is_in_use()) { |
1639 // compiled, dispatched call (which used to call an interpreted method) |
1647 if (is_static_call) { |
1640 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); |
1648 CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); |
1641 inline_cache->set_to_clean(); |
1649 ssc->set_to_clean(); |
|
1650 } else { |
|
1651 // compiled, dispatched call (which used to call an interpreted method) |
|
1652 CompiledIC* inline_cache = CompiledIC_at(caller_nm, call_addr); |
|
1653 inline_cache->set_to_clean(); |
|
1654 } |
|
1655 } |
1642 } |
1656 } |
1643 } |
1657 |
1644 |
1658 } |
1645 } |
1659 |
1646 |