aoqi@0: /* aoqi@0: * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. aoqi@0: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. aoqi@0: * aoqi@0: * This code is free software; you can redistribute it and/or modify it aoqi@0: * under the terms of the GNU General Public License version 2 only, as aoqi@0: * published by the Free Software Foundation. aoqi@0: * aoqi@0: * This code is distributed in the hope that it will be useful, but WITHOUT aoqi@0: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or aoqi@0: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License aoqi@0: * version 2 for more details (a copy is included in the LICENSE file that aoqi@0: * accompanied this code). aoqi@0: * aoqi@0: * You should have received a copy of the GNU General Public License version aoqi@0: * 2 along with this work; if not, write to the Free Software Foundation, aoqi@0: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. aoqi@0: * aoqi@0: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA aoqi@0: * or visit www.oracle.com if you need additional information or have any aoqi@0: * questions. aoqi@0: * aoqi@0: */ aoqi@0: aoqi@1: /* aoqi@1: * This file has been modified by Loongson Technology in 2015. These aoqi@1: * modifications are Copyright (c) 2015 Loongson Technology, and are made aoqi@1: * available on the same license terms set forth above. aoqi@1: */ aoqi@1: aoqi@0: #ifndef SHARE_VM_CODE_COMPILEDIC_HPP aoqi@0: #define SHARE_VM_CODE_COMPILEDIC_HPP aoqi@0: aoqi@0: #include "interpreter/linkResolver.hpp" aoqi@0: #include "oops/compiledICHolder.hpp" aoqi@0: #ifdef TARGET_ARCH_x86 aoqi@0: # include "nativeInst_x86.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_ARCH_sparc aoqi@0: # include "nativeInst_sparc.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_ARCH_zero aoqi@0: # include "nativeInst_zero.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_ARCH_arm aoqi@0: # include "nativeInst_arm.hpp" aoqi@0: #endif aoqi@0: #ifdef TARGET_ARCH_ppc aoqi@0: # include "nativeInst_ppc.hpp" aoqi@0: #endif aoqi@1: #ifdef TARGET_ARCH_mips aoqi@1: # include "nativeInst_mips.hpp" aoqi@1: #endif aoqi@0: aoqi@0: //----------------------------------------------------------------------------- aoqi@0: // The CompiledIC represents a compiled inline cache. aoqi@0: // aoqi@0: // In order to make patching of the inline cache MT-safe, we only allow the following aoqi@0: // transitions (when not at a safepoint): aoqi@0: // aoqi@0: // aoqi@0: // [1] --<-- Clean -->--- [1] aoqi@0: // / (null) \ aoqi@0: // / \ /-<-\ aoqi@0: // / [2] \ / \ aoqi@0: // Interpreted ---------> Monomorphic | [3] aoqi@0: // (CompiledICHolder*) (Klass*) | aoqi@0: // \ / \ / aoqi@0: // [4] \ / [4] \->-/ aoqi@0: // \->- Megamorphic -<-/ aoqi@0: // (Method*) aoqi@0: // aoqi@0: // The text in paranteses () refere to the value of the inline cache receiver (mov instruction) aoqi@0: // aoqi@0: // The numbers in square brackets refere to the kind of transition: aoqi@0: // [1]: Initial fixup. Receiver it found from debug information aoqi@0: // [2]: Compilation of a method aoqi@0: // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same) aoqi@0: // [4]: Inline cache miss. We go directly to megamorphic call. aoqi@0: // aoqi@0: // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe aoqi@0: // transition is made to a stub. aoqi@0: // aoqi@0: class CompiledIC; aoqi@0: class ICStub; aoqi@0: aoqi@0: class CompiledICInfo : public StackObj { aoqi@0: private: aoqi@0: address _entry; // entry point for call aoqi@0: void* _cached_value; // Value of cached_value (either in stub or inline cache) aoqi@0: bool _is_icholder; // Is the cached value a CompiledICHolder* aoqi@0: bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound) aoqi@0: bool _to_interpreter; // Call it to interpreter aoqi@0: bool _release_icholder; aoqi@0: public: aoqi@0: address entry() const { return _entry; } aoqi@0: Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; } aoqi@0: CompiledICHolder* claim_cached_icholder() { aoqi@0: assert(_is_icholder, ""); aoqi@0: assert(_cached_value != NULL, "must be non-NULL"); aoqi@0: _release_icholder = false; aoqi@0: CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; aoqi@0: icholder->claim(); aoqi@0: return icholder; aoqi@0: } aoqi@0: bool is_optimized() const { return _is_optimized; } aoqi@0: bool to_interpreter() const { return _to_interpreter; } aoqi@0: aoqi@0: void set_compiled_entry(address entry, Klass* klass, bool is_optimized) { aoqi@0: _entry = entry; aoqi@0: _cached_value = (void*)klass; aoqi@0: _to_interpreter = false; aoqi@0: _is_icholder = false; aoqi@0: _is_optimized = is_optimized; aoqi@0: _release_icholder = false; aoqi@0: } aoqi@0: aoqi@0: void set_interpreter_entry(address entry, Method* method) { aoqi@0: _entry = entry; aoqi@0: _cached_value = (void*)method; aoqi@0: _to_interpreter = true; aoqi@0: _is_icholder = false; aoqi@0: _is_optimized = true; aoqi@0: _release_icholder = false; aoqi@0: } aoqi@0: aoqi@0: void set_icholder_entry(address entry, CompiledICHolder* icholder) { aoqi@0: _entry = entry; aoqi@0: _cached_value = (void*)icholder; aoqi@0: _to_interpreter = true; aoqi@0: _is_icholder = true; aoqi@0: _is_optimized = false; aoqi@0: _release_icholder = true; aoqi@0: } aoqi@0: aoqi@0: CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false), aoqi@0: _to_interpreter(false), _is_optimized(false), _release_icholder(false) { aoqi@0: } aoqi@0: ~CompiledICInfo() { aoqi@0: // In rare cases the info is computed but not used, so release any aoqi@0: // CompiledICHolder* that was created aoqi@0: if (_release_icholder) { aoqi@0: assert(_is_icholder, "must be"); aoqi@0: CompiledICHolder* icholder = (CompiledICHolder*)_cached_value; aoqi@0: icholder->claim(); aoqi@0: delete icholder; aoqi@0: } aoqi@0: } aoqi@0: }; aoqi@0: aoqi@0: class CompiledIC: public ResourceObj { aoqi@0: friend class InlineCacheBuffer; aoqi@0: friend class ICStub; aoqi@0: aoqi@0: aoqi@0: private: aoqi@0: NativeCall* _ic_call; // the call instruction aoqi@0: NativeMovConstReg* _value; // patchable value cell for this IC aoqi@0: bool _is_optimized; // an optimized virtual call (i.e., no compiled IC) aoqi@0: aoqi@0: CompiledIC(nmethod* nm, NativeCall* ic_call); stefank@6991: CompiledIC(RelocIterator* iter); stefank@6991: stefank@6991: void initialize_from_iter(RelocIterator* iter); aoqi@0: aoqi@0: static bool is_icholder_entry(address entry); aoqi@0: aoqi@0: // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe aoqi@0: // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make aoqi@0: // changes to a transition stub. aoqi@0: void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder); aoqi@0: void set_ic_destination(ICStub* stub); aoqi@0: void set_ic_destination(address entry_point) { aoqi@0: assert(_is_optimized, "use set_ic_destination_and_value instead"); aoqi@0: internal_set_ic_destination(entry_point, false, NULL, false); aoqi@0: } aoqi@0: // This only for use by ICStubs where the type of the value isn't known aoqi@0: void set_ic_destination_and_value(address entry_point, void* value) { aoqi@0: internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point)); aoqi@0: } aoqi@0: void set_ic_destination_and_value(address entry_point, Metadata* value) { aoqi@0: internal_set_ic_destination(entry_point, false, value, false); aoqi@0: } aoqi@0: void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) { aoqi@0: internal_set_ic_destination(entry_point, false, value, true); aoqi@0: } aoqi@0: aoqi@0: // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is aoqi@0: // associated with the inline cache. aoqi@0: address stub_address() const; aoqi@0: bool is_in_transition_state() const; // Use InlineCacheBuffer aoqi@0: aoqi@0: public: aoqi@0: // conversion (machine PC to CompiledIC*) aoqi@0: friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr); aoqi@0: friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site); aoqi@0: friend CompiledIC* CompiledIC_at(Relocation* call_site); stefank@6991: friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter); aoqi@0: aoqi@0: // This is used to release CompiledICHolder*s from nmethods that aoqi@0: // are about to be freed. The callsite might contain other stale aoqi@0: // values of other kinds so it must be careful. aoqi@0: static void cleanup_call_site(virtual_call_Relocation* call_site); aoqi@0: static bool is_icholder_call_site(virtual_call_Relocation* call_site); aoqi@0: aoqi@0: // Return the cached_metadata/destination associated with this inline cache. If the cache currently points aoqi@0: // to a transition stub, it will read the values from the transition stub. aoqi@0: void* cached_value() const; aoqi@0: CompiledICHolder* cached_icholder() const { aoqi@0: assert(is_icholder_call(), "must be"); aoqi@0: return (CompiledICHolder*) cached_value(); aoqi@0: } aoqi@0: Metadata* cached_metadata() const { aoqi@0: assert(!is_icholder_call(), "must be"); aoqi@0: return (Metadata*) cached_value(); aoqi@0: } aoqi@0: aoqi@0: address ic_destination() const; aoqi@0: aoqi@0: bool is_optimized() const { return _is_optimized; } aoqi@0: aoqi@0: // State aoqi@0: bool is_clean() const; aoqi@0: bool is_megamorphic() const; aoqi@0: bool is_call_to_compiled() const; aoqi@0: bool is_call_to_interpreted() const; aoqi@0: aoqi@0: bool is_icholder_call() const; aoqi@0: aoqi@0: address end_of_call() { return _ic_call->return_address(); } aoqi@0: aoqi@0: // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock aoqi@0: // so you are guaranteed that no patching takes place. The same goes for verify. aoqi@0: // aoqi@0: // Note: We do not provide any direct access to the stub code, to prevent parts of the code aoqi@0: // to manipulate the inline cache in MT-unsafe ways. aoqi@0: // aoqi@0: // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full. aoqi@0: // thartmann@8075: void set_to_clean(bool in_use = true); aoqi@0: void set_to_monomorphic(CompiledICInfo& info); thartmann@8073: void clear_ic_stub(); aoqi@0: aoqi@0: // Returns true if successful and false otherwise. The call can fail if memory aoqi@0: // allocation in the code cache fails. aoqi@0: bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS); aoqi@0: aoqi@0: static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass, aoqi@0: bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS); aoqi@0: aoqi@0: // Location aoqi@0: address instruction_address() const { return _ic_call->instruction_address(); } aoqi@0: aoqi@0: // Misc aoqi@0: void print() PRODUCT_RETURN; aoqi@0: void print_compiled_ic() PRODUCT_RETURN; aoqi@0: void verify() PRODUCT_RETURN; aoqi@0: }; aoqi@0: aoqi@0: inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) { aoqi@0: CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr)); aoqi@0: c_ic->verify(); aoqi@0: return c_ic; aoqi@0: } aoqi@0: aoqi@0: inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) { aoqi@0: CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site)); aoqi@0: c_ic->verify(); aoqi@0: return c_ic; aoqi@0: } aoqi@0: aoqi@0: inline CompiledIC* CompiledIC_at(Relocation* call_site) { aoqi@0: assert(call_site->type() == relocInfo::virtual_call_type || aoqi@0: call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); aoqi@0: CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr())); aoqi@0: c_ic->verify(); aoqi@0: return c_ic; aoqi@0: } aoqi@0: stefank@6991: inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) { stefank@6991: assert(reloc_iter->type() == relocInfo::virtual_call_type || stefank@6991: reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info"); stefank@6991: CompiledIC* c_ic = new CompiledIC(reloc_iter); stefank@6991: c_ic->verify(); stefank@6991: return c_ic; stefank@6991: } aoqi@0: aoqi@0: //----------------------------------------------------------------------------- aoqi@0: // The CompiledStaticCall represents a call to a static method in the compiled aoqi@0: // aoqi@0: // Transition diagram of a static call site is somewhat simpler than for an inlined cache: aoqi@0: // aoqi@0: // aoqi@0: // -----<----- Clean ----->----- aoqi@0: // / \ aoqi@0: // / \ aoqi@0: // compilled code <------------> interpreted code aoqi@0: // aoqi@0: // Clean: Calls directly to runtime method for fixup aoqi@0: // Compiled code: Calls directly to compiled code aoqi@0: // Interpreted code: Calls to stub that set Method* reference aoqi@0: // aoqi@0: // aoqi@0: class CompiledStaticCall; aoqi@0: aoqi@0: class StaticCallInfo { aoqi@0: private: aoqi@0: address _entry; // Entrypoint aoqi@0: methodHandle _callee; // Callee (used when calling interpreter) aoqi@0: bool _to_interpreter; // call to interpreted method (otherwise compiled) aoqi@0: aoqi@0: friend class CompiledStaticCall; aoqi@0: public: aoqi@0: address entry() const { return _entry; } aoqi@0: methodHandle callee() const { return _callee; } aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: class CompiledStaticCall: public NativeCall { aoqi@0: friend class CompiledIC; aoqi@0: aoqi@0: // Also used by CompiledIC aoqi@0: void set_to_interpreted(methodHandle callee, address entry); aoqi@0: bool is_optimized_virtual(); aoqi@0: aoqi@0: public: aoqi@0: friend CompiledStaticCall* compiledStaticCall_before(address return_addr); aoqi@0: friend CompiledStaticCall* compiledStaticCall_at(address native_call); aoqi@0: friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site); aoqi@0: aoqi@0: // Code vkempik@8427: static address emit_to_interp_stub(CodeBuffer &cbuf); aoqi@0: static int to_interp_stub_size(); aoqi@0: static int reloc_to_interp_stub(); aoqi@0: aoqi@0: // State aoqi@0: bool is_clean() const; aoqi@0: bool is_call_to_compiled() const; aoqi@0: bool is_call_to_interpreted() const; aoqi@0: aoqi@0: // Clean static call (will force resolving on next use) aoqi@0: void set_to_clean(); aoqi@0: aoqi@0: // Set state. The entry must be the same, as computed by compute_entry. aoqi@0: // Computation and setting is split up, since the actions are separate during aoqi@0: // a OptoRuntime::resolve_xxx. aoqi@0: void set(const StaticCallInfo& info); aoqi@0: aoqi@0: // Compute entry point given a method aoqi@0: static void compute_entry(methodHandle m, StaticCallInfo& info); aoqi@0: aoqi@0: // Stub support aoqi@0: address find_stub(); aoqi@0: static void set_stub_to_clean(static_stub_Relocation* static_stub); aoqi@0: aoqi@0: // Misc. aoqi@0: void print() PRODUCT_RETURN; aoqi@0: void verify() PRODUCT_RETURN; aoqi@0: }; aoqi@0: aoqi@0: aoqi@0: inline CompiledStaticCall* compiledStaticCall_before(address return_addr) { aoqi@0: CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr); aoqi@0: st->verify(); aoqi@0: return st; aoqi@0: } aoqi@0: aoqi@0: inline CompiledStaticCall* compiledStaticCall_at(address native_call) { aoqi@0: CompiledStaticCall* st = (CompiledStaticCall*)native_call; aoqi@0: st->verify(); aoqi@0: return st; aoqi@0: } aoqi@0: aoqi@0: inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) { aoqi@0: return compiledStaticCall_at(call_site->addr()); aoqi@0: } aoqi@0: aoqi@0: #endif // SHARE_VM_CODE_COMPILEDIC_HPP