duke@435: /* dbuck@8997: * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_CODE_COMPILEDIC_HPP stefank@2314: #define SHARE_VM_CODE_COMPILEDIC_HPP stefank@2314: stefank@2314: #include "interpreter/linkResolver.hpp" coleenp@4037: #include "oops/compiledICHolder.hpp" stefank@2314: #ifdef TARGET_ARCH_x86 stefank@2314: # include "nativeInst_x86.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_ARCH_sparc stefank@2314: # include "nativeInst_sparc.hpp" stefank@2314: #endif stefank@2314: #ifdef TARGET_ARCH_zero stefank@2314: # include "nativeInst_zero.hpp" stefank@2314: #endif bobv@2508: #ifdef TARGET_ARCH_arm bobv@2508: # include "nativeInst_arm.hpp" bobv@2508: #endif bobv@2508: #ifdef TARGET_ARCH_ppc bobv@2508: # include "nativeInst_ppc.hpp" bobv@2508: #endif stefank@2314: duke@435: //----------------------------------------------------------------------------- duke@435: // The CompiledIC represents a compiled inline cache. duke@435: // duke@435: // In order to make patching of the inline cache MT-safe, we only allow the following duke@435: // transitions (when not at a safepoint): duke@435: // duke@435: // duke@435: // [1] --<-- Clean -->--- [1] duke@435: // / (null) \ duke@435: // / \ /-<-\ duke@435: // / [2] \ / \ duke@435: // Interpreted ---------> Monomorphic | [3] coleenp@4037: // (CompiledICHolder*) (Klass*) | duke@435: // \ / \ / duke@435: // [4] \ / [4] \->-/ duke@435: // \->- Megamorphic -<-/ dbuck@8997: // (CompiledICHolder*) duke@435: // dbuck@8997: // The text in parentheses () refers to the value of the inline cache receiver (mov instruction) duke@435: // dbuck@8997: // The numbers in square brackets refer to the kind of transition: duke@435: // [1]: Initial fixup. Receiver it found from debug information duke@435: // [2]: Compilation of a method coleenp@4037: // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same) duke@435: // [4]: Inline cache miss. We go directly to megamorphic call. duke@435: // duke@435: // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe duke@435: // transition is made to a stub. duke@435: // duke@435: class CompiledIC; coleenp@4037: class ICStub; duke@435: coleenp@4037: class CompiledICInfo : public StackObj { duke@435: private: duke@435: address _entry; // entry point for call coleenp@4037: void* _cached_value; // Value of cached_value (either in stub or inline cache) coleenp@4037: bool _is_icholder; // Is the cached value a CompiledICHolder* duke@435: bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound) duke@435: bool _to_interpreter; // Call it to interpreter coleenp@4037: bool _release_icholder; duke@435: public: duke@435: address entry() const { return _entry; } coleenp@4037: Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; } coleenp@4037: CompiledICHolder* claim_cached_icholder() { coleenp@4037: assert(_is_icholder, ""); coleenp@4037: assert(_cached_value != NULL, "must be non-NULL"); coleenp@4037: _release_icholder = false; coleenp@4037: CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; coleenp@4037: icholder->claim(); coleenp@4037: return icholder; coleenp@4037: } duke@435: bool is_optimized() const { return _is_optimized; } coleenp@4037: bool to_interpreter() const { return _to_interpreter; } coleenp@4037: coleenp@4037: void set_compiled_entry(address entry, Klass* klass, bool is_optimized) { coleenp@4037: _entry = entry; coleenp@4037: _cached_value = (void*)klass; coleenp@4037: _to_interpreter = false; coleenp@4037: _is_icholder = false; coleenp@4037: _is_optimized = is_optimized; coleenp@4037: _release_icholder = false; coleenp@4037: } coleenp@4037: coleenp@4037: void set_interpreter_entry(address entry, Method* method) { coleenp@4037: _entry = entry; coleenp@4037: _cached_value = (void*)method; coleenp@4037: _to_interpreter = true; coleenp@4037: _is_icholder = false; coleenp@4037: _is_optimized = true; coleenp@4037: _release_icholder = false; coleenp@4037: } coleenp@4037: coleenp@4037: void set_icholder_entry(address entry, CompiledICHolder* icholder) { coleenp@4037: _entry = entry; coleenp@4037: _cached_value = (void*)icholder; coleenp@4037: _to_interpreter = true; coleenp@4037: _is_icholder = true; coleenp@4037: _is_optimized = false; coleenp@4037: _release_icholder = true; coleenp@4037: } coleenp@4037: coleenp@4037: CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false), coleenp@4037: _to_interpreter(false), _is_optimized(false), _release_icholder(false) { coleenp@4037: } coleenp@4037: ~CompiledICInfo() { coleenp@4037: // In rare cases the info is computed but not used, so release any coleenp@4037: // CompiledICHolder* that was created coleenp@4037: if (_release_icholder) { coleenp@4037: assert(_is_icholder, "must be"); coleenp@4037: CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; coleenp@4037: icholder->claim(); coleenp@4037: delete icholder; coleenp@4037: } coleenp@4037: } duke@435: }; duke@435: duke@435: class CompiledIC: public ResourceObj { duke@435: friend class InlineCacheBuffer; duke@435: friend class ICStub; duke@435: duke@435: duke@435: private: duke@435: NativeCall* _ic_call; // the call instruction coleenp@4037: NativeMovConstReg* _value; // patchable value cell for this IC duke@435: bool _is_optimized; // an optimized virtual call (i.e., no compiled IC) duke@435: coleenp@4037: CompiledIC(nmethod* nm, NativeCall* ic_call); stefank@6991: CompiledIC(RelocIterator* iter); stefank@6991: stefank@6991: void initialize_from_iter(RelocIterator* iter); coleenp@4037: coleenp@4037: static bool is_icholder_entry(address entry); duke@435: duke@435: // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe duke@435: // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make duke@435: // changes to a transition stub. coleenp@4037: void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder); coleenp@4037: void set_ic_destination(ICStub* stub); coleenp@4037: void set_ic_destination(address entry_point) { coleenp@4037: assert(_is_optimized, "use set_ic_destination_and_value instead"); coleenp@4037: internal_set_ic_destination(entry_point, false, NULL, false); coleenp@4037: } coleenp@4037: // This only for use by ICStubs where the type of the value isn't known coleenp@4037: void set_ic_destination_and_value(address entry_point, void* value) { coleenp@4037: internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point)); coleenp@4037: } coleenp@4037: void set_ic_destination_and_value(address entry_point, Metadata* value) { coleenp@4037: internal_set_ic_destination(entry_point, false, value, false); coleenp@4037: } coleenp@4037: void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) { coleenp@4037: internal_set_ic_destination(entry_point, false, value, true); coleenp@4037: } duke@435: duke@435: // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is duke@435: // associated with the inline cache. duke@435: address stub_address() const; duke@435: bool is_in_transition_state() const; // Use InlineCacheBuffer duke@435: duke@435: public: duke@435: // conversion (machine PC to CompiledIC*) coleenp@4037: friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr); coleenp@4037: friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site); duke@435: friend CompiledIC* CompiledIC_at(Relocation* call_site); stefank@6991: friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter); duke@435: coleenp@4037: // This is used to release CompiledICHolder*s from nmethods that coleenp@4037: // are about to be freed. The callsite might contain other stale coleenp@4037: // values of other kinds so it must be careful. coleenp@4037: static void cleanup_call_site(virtual_call_Relocation* call_site); coleenp@4037: static bool is_icholder_call_site(virtual_call_Relocation* call_site); coleenp@4037: coleenp@4037: // Return the cached_metadata/destination associated with this inline cache. If the cache currently points duke@435: // to a transition stub, it will read the values from the transition stub. coleenp@4037: void* cached_value() const; coleenp@4037: CompiledICHolder* cached_icholder() const { coleenp@4037: assert(is_icholder_call(), "must be"); coleenp@4037: return (CompiledICHolder*) cached_value(); coleenp@4037: } coleenp@4037: Metadata* cached_metadata() const { coleenp@4037: assert(!is_icholder_call(), "must be"); coleenp@4037: return (Metadata*) cached_value(); coleenp@4037: } coleenp@4037: duke@435: address ic_destination() const; duke@435: duke@435: bool is_optimized() const { return _is_optimized; } duke@435: duke@435: // State duke@435: bool is_clean() const; duke@435: bool is_megamorphic() const; duke@435: bool is_call_to_compiled() const; duke@435: bool is_call_to_interpreted() const; duke@435: coleenp@4037: bool is_icholder_call() const; coleenp@4037: duke@435: address end_of_call() { return _ic_call->return_address(); } duke@435: duke@435: // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock duke@435: // so you are guaranteed that no patching takes place. The same goes for verify. duke@435: // duke@435: // Note: We do not provide any direct access to the stub code, to prevent parts of the code duke@435: // to manipulate the inline cache in MT-unsafe ways. duke@435: // duke@435: // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full. duke@435: // thartmann@8075: void set_to_clean(bool in_use = true); coleenp@4037: void set_to_monomorphic(CompiledICInfo& info); thartmann@8073: void clear_ic_stub(); anoll@5762: anoll@5762: // Returns true if successful and false otherwise. The call can fail if memory anoll@5762: // allocation in the code cache fails. anoll@5762: bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS); duke@435: duke@435: static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass, duke@435: bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS); duke@435: duke@435: // Location duke@435: address instruction_address() const { return _ic_call->instruction_address(); } duke@435: duke@435: // Misc duke@435: void print() PRODUCT_RETURN; duke@435: void print_compiled_ic() PRODUCT_RETURN; duke@435: void verify() PRODUCT_RETURN; duke@435: }; duke@435: coleenp@4037: inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) { coleenp@4037: CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr)); duke@435: c_ic->verify(); duke@435: return c_ic; duke@435: } duke@435: coleenp@4037: inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) { coleenp@4037: CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site)); duke@435: c_ic->verify(); duke@435: return c_ic; duke@435: } duke@435: duke@435: inline CompiledIC* CompiledIC_at(Relocation* call_site) { coleenp@4037: assert(call_site->type() == relocInfo::virtual_call_type || coleenp@4037: call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); coleenp@4037: CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr())); duke@435: c_ic->verify(); duke@435: return c_ic; duke@435: } duke@435: stefank@6991: inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) { stefank@6991: assert(reloc_iter->type() == relocInfo::virtual_call_type || stefank@6991: reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); stefank@6991: CompiledIC* c_ic = new CompiledIC(reloc_iter); stefank@6991: c_ic->verify(); stefank@6991: return c_ic; stefank@6991: } duke@435: duke@435: //----------------------------------------------------------------------------- duke@435: // The CompiledStaticCall represents a call to a static method in the compiled duke@435: // duke@435: // Transition diagram of a static call site is somewhat simpler than for an inlined cache: duke@435: // duke@435: // duke@435: // -----<----- Clean ----->----- duke@435: // / \ duke@435: // / \ duke@435: // compilled code <------------> interpreted code duke@435: // duke@435: // Clean: Calls directly to runtime method for fixup duke@435: // Compiled code: Calls directly to compiled code coleenp@4037: // Interpreted code: Calls to stub that set Method* reference duke@435: // duke@435: // duke@435: class CompiledStaticCall; duke@435: duke@435: class StaticCallInfo { duke@435: private: duke@435: address _entry; // Entrypoint duke@435: methodHandle _callee; // Callee (used when calling interpreter) duke@435: bool _to_interpreter; // call to interpreted method (otherwise compiled) duke@435: duke@435: friend class CompiledStaticCall; duke@435: public: duke@435: address entry() const { return _entry; } duke@435: methodHandle callee() const { return _callee; } duke@435: }; duke@435: duke@435: duke@435: class CompiledStaticCall: public NativeCall { duke@435: friend class CompiledIC; duke@435: duke@435: // Also used by CompiledIC duke@435: void set_to_interpreted(methodHandle callee, address entry); duke@435: bool is_optimized_virtual(); duke@435: duke@435: public: duke@435: friend CompiledStaticCall* compiledStaticCall_before(address return_addr); duke@435: friend CompiledStaticCall* compiledStaticCall_at(address native_call); duke@435: friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site); duke@435: dlong@5000: // Code vkempik@8427: static address emit_to_interp_stub(CodeBuffer &cbuf); dlong@5000: static int to_interp_stub_size(); dlong@5000: static int reloc_to_interp_stub(); dlong@5000: duke@435: // State duke@435: bool is_clean() const; duke@435: bool is_call_to_compiled() const; duke@435: bool is_call_to_interpreted() const; duke@435: duke@435: // Clean static call (will force resolving on next use) duke@435: void set_to_clean(); duke@435: duke@435: // Set state. The entry must be the same, as computed by compute_entry. duke@435: // Computation and setting is split up, since the actions are separate during duke@435: // a OptoRuntime::resolve_xxx. duke@435: void set(const StaticCallInfo& info); duke@435: duke@435: // Compute entry point given a method duke@435: static void compute_entry(methodHandle m, StaticCallInfo& info); duke@435: duke@435: // Stub support duke@435: address find_stub(); duke@435: static void set_stub_to_clean(static_stub_Relocation* static_stub); duke@435: duke@435: // Misc. duke@435: void print() PRODUCT_RETURN; duke@435: void verify() PRODUCT_RETURN; duke@435: }; duke@435: duke@435: duke@435: inline CompiledStaticCall* compiledStaticCall_before(address return_addr) { duke@435: CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr); duke@435: st->verify(); duke@435: return st; duke@435: } duke@435: duke@435: inline CompiledStaticCall* compiledStaticCall_at(address native_call) { duke@435: CompiledStaticCall* st = (CompiledStaticCall*)native_call; duke@435: st->verify(); duke@435: return st; duke@435: } duke@435: duke@435: inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) { duke@435: return compiledStaticCall_at(call_site->addr()); duke@435: } stefank@2314: stefank@2314: #endif // SHARE_VM_CODE_COMPILEDIC_HPP