src/share/vm/code/compiledIC.hpp

Mon, 19 Aug 2019 10:11:31 +0200

author
neugens
date
Mon, 19 Aug 2019 10:11:31 +0200
changeset 9861
a248d0be1309
parent 8997
f8a45a60bc6b
child 9041
95a08233f46c
permissions
-rw-r--r--

8229401: Fix JFR code cache test failures
8223689: Add JFR Thread Sampling Support
8223690: Add JFR BiasedLock Event Support
8223691: Add JFR G1 Region Type Change Event Support
8223692: Add JFR G1 Heap Summary Event Support
Summary: Backport JFR from JDK11, additional fixes
Reviewed-by: neugens, apetushkov
Contributed-by: denghui.ddh@alibaba-inc.com

     1 /*
     2  * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_CODE_COMPILEDIC_HPP
    26 #define SHARE_VM_CODE_COMPILEDIC_HPP
    28 #include "interpreter/linkResolver.hpp"
    29 #include "oops/compiledICHolder.hpp"
    30 #ifdef TARGET_ARCH_x86
    31 # include "nativeInst_x86.hpp"
    32 #endif
    33 #ifdef TARGET_ARCH_sparc
    34 # include "nativeInst_sparc.hpp"
    35 #endif
    36 #ifdef TARGET_ARCH_zero
    37 # include "nativeInst_zero.hpp"
    38 #endif
    39 #ifdef TARGET_ARCH_arm
    40 # include "nativeInst_arm.hpp"
    41 #endif
    42 #ifdef TARGET_ARCH_ppc
    43 # include "nativeInst_ppc.hpp"
    44 #endif
    46 //-----------------------------------------------------------------------------
    47 // The CompiledIC represents a compiled inline cache.
    48 //
    49 // In order to make patching of the inline cache MT-safe, we only allow the following
    50 // transitions (when not at a safepoint):
    51 //
    52 //
    53 //         [1] --<--  Clean -->---  [1]
    54 //            /       (null)      \
    55 //           /                     \      /-<-\
    56 //          /          [2]          \    /     \
    57 //      Interpreted  ---------> Monomorphic     | [3]
    58 //  (CompiledICHolder*)            (Klass*)     |
    59 //          \                        /   \     /
    60 //       [4] \                      / [4] \->-/
    61 //            \->-  Megamorphic -<-/
    62 //              (CompiledICHolder*)
    63 //
    64 // The text in parentheses () refers to the value of the inline cache receiver (mov instruction)
    65 //
    66 // The numbers in square brackets refer to the kind of transition:
    67 // [1]: Initial fixup. Receiver it found from debug information
    68 // [2]: Compilation of a method
    69 // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
    70 // [4]: Inline cache miss. We go directly to megamorphic call.
    71 //
    72 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
    73 // transition is made to a stub.
    74 //
    75 class CompiledIC;
    76 class ICStub;
    78 class CompiledICInfo : public StackObj {
    79  private:
    80   address _entry;              // entry point for call
    81   void*   _cached_value;         // Value of cached_value (either in stub or inline cache)
    82   bool    _is_icholder;          // Is the cached value a CompiledICHolder*
    83   bool    _is_optimized;       // it is an optimized virtual call (i.e., can be statically bound)
    84   bool    _to_interpreter;     // Call it to interpreter
    85   bool    _release_icholder;
    86  public:
    87   address entry() const        { return _entry; }
    88   Metadata*    cached_metadata() const         { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
    89   CompiledICHolder*    claim_cached_icholder() {
    90     assert(_is_icholder, "");
    91     assert(_cached_value != NULL, "must be non-NULL");
    92     _release_icholder = false;
    93     CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
    94     icholder->claim();
    95     return icholder;
    96   }
    97   bool    is_optimized() const { return _is_optimized; }
    98   bool         to_interpreter() const  { return _to_interpreter; }
   100   void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
   101     _entry      = entry;
   102     _cached_value = (void*)klass;
   103     _to_interpreter = false;
   104     _is_icholder = false;
   105     _is_optimized = is_optimized;
   106     _release_icholder = false;
   107   }
   109   void set_interpreter_entry(address entry, Method* method) {
   110     _entry      = entry;
   111     _cached_value = (void*)method;
   112     _to_interpreter = true;
   113     _is_icholder = false;
   114     _is_optimized = true;
   115     _release_icholder = false;
   116   }
   118   void set_icholder_entry(address entry, CompiledICHolder* icholder) {
   119     _entry      = entry;
   120     _cached_value = (void*)icholder;
   121     _to_interpreter = true;
   122     _is_icholder = true;
   123     _is_optimized = false;
   124     _release_icholder = true;
   125   }
   127   CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
   128                     _to_interpreter(false), _is_optimized(false), _release_icholder(false) {
   129   }
   130   ~CompiledICInfo() {
   131     // In rare cases the info is computed but not used, so release any
   132     // CompiledICHolder* that was created
   133     if (_release_icholder) {
   134       assert(_is_icholder, "must be");
   135       CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
   136       icholder->claim();
   137       delete icholder;
   138     }
   139   }
   140 };
   142 class CompiledIC: public ResourceObj {
   143   friend class InlineCacheBuffer;
   144   friend class ICStub;
   147  private:
   148   NativeCall*   _ic_call;       // the call instruction
   149   NativeMovConstReg* _value;    // patchable value cell for this IC
   150   bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)
   152   CompiledIC(nmethod* nm, NativeCall* ic_call);
   153   CompiledIC(RelocIterator* iter);
   155   void initialize_from_iter(RelocIterator* iter);
   157   static bool is_icholder_entry(address entry);
   159   // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
   160   // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
   161   // changes to a transition stub.
   162   void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
   163   void set_ic_destination(ICStub* stub);
   164   void set_ic_destination(address entry_point) {
   165     assert(_is_optimized, "use set_ic_destination_and_value instead");
   166     internal_set_ic_destination(entry_point, false, NULL, false);
   167   }
   168   // This only for use by ICStubs where the type of the value isn't known
   169   void set_ic_destination_and_value(address entry_point, void* value) {
   170     internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
   171   }
   172   void set_ic_destination_and_value(address entry_point, Metadata* value) {
   173     internal_set_ic_destination(entry_point, false, value, false);
   174   }
   175   void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
   176     internal_set_ic_destination(entry_point, false, value, true);
   177   }
   179   // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
   180   // associated with the inline cache.
   181   address stub_address() const;
   182   bool is_in_transition_state() const;  // Use InlineCacheBuffer
   184  public:
   185   // conversion (machine PC to CompiledIC*)
   186   friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
   187   friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
   188   friend CompiledIC* CompiledIC_at(Relocation* call_site);
   189   friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
   191   // This is used to release CompiledICHolder*s from nmethods that
   192   // are about to be freed.  The callsite might contain other stale
   193   // values of other kinds so it must be careful.
   194   static void cleanup_call_site(virtual_call_Relocation* call_site);
   195   static bool is_icholder_call_site(virtual_call_Relocation* call_site);
   197   // Return the cached_metadata/destination associated with this inline cache. If the cache currently points
   198   // to a transition stub, it will read the values from the transition stub.
   199   void* cached_value() const;
   200   CompiledICHolder* cached_icholder() const {
   201     assert(is_icholder_call(), "must be");
   202     return (CompiledICHolder*) cached_value();
   203   }
   204   Metadata* cached_metadata() const {
   205     assert(!is_icholder_call(), "must be");
   206     return (Metadata*) cached_value();
   207   }
   209   address ic_destination() const;
   211   bool is_optimized() const   { return _is_optimized; }
   213   // State
   214   bool is_clean() const;
   215   bool is_megamorphic() const;
   216   bool is_call_to_compiled() const;
   217   bool is_call_to_interpreted() const;
   219   bool is_icholder_call() const;
   221   address end_of_call() { return  _ic_call->return_address(); }
   223   // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
   224   // so you are guaranteed that no patching takes place. The same goes for verify.
   225   //
   226   // Note: We do not provide any direct access to the stub code, to prevent parts of the code
   227   // to manipulate the inline cache in MT-unsafe ways.
   228   //
   229   // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
   230   //
   231   void set_to_clean(bool in_use = true);
   232   void set_to_monomorphic(CompiledICInfo& info);
   233   void clear_ic_stub();
   235   // Returns true if successful and false otherwise. The call can fail if memory
   236   // allocation in the code cache fails.
   237   bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
   239   static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
   240                                         bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
   242   // Location
   243   address instruction_address() const { return _ic_call->instruction_address(); }
   245   // Misc
   246   void print()             PRODUCT_RETURN;
   247   void print_compiled_ic() PRODUCT_RETURN;
   248   void verify()            PRODUCT_RETURN;
   249 };
   251 inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
   252   CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
   253   c_ic->verify();
   254   return c_ic;
   255 }
   257 inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
   258   CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
   259   c_ic->verify();
   260   return c_ic;
   261 }
   263 inline CompiledIC* CompiledIC_at(Relocation* call_site) {
   264   assert(call_site->type() == relocInfo::virtual_call_type ||
   265          call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
   266   CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
   267   c_ic->verify();
   268   return c_ic;
   269 }
   271 inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
   272   assert(reloc_iter->type() == relocInfo::virtual_call_type ||
   273       reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
   274   CompiledIC* c_ic = new CompiledIC(reloc_iter);
   275   c_ic->verify();
   276   return c_ic;
   277 }
   279 //-----------------------------------------------------------------------------
   280 // The CompiledStaticCall represents a call to a static method in the compiled
   281 //
   282 // Transition diagram of a static call site is somewhat simpler than for an inlined cache:
   283 //
   284 //
   285 //           -----<----- Clean ----->-----
   286 //          /                             \
   287 //         /                               \
   288 //    compilled code <------------> interpreted code
   289 //
   290 //  Clean:            Calls directly to runtime method for fixup
   291 //  Compiled code:    Calls directly to compiled code
   292 //  Interpreted code: Calls to stub that set Method* reference
   293 //
   294 //
   295 class CompiledStaticCall;
   297 class StaticCallInfo {
   298  private:
   299   address      _entry;          // Entrypoint
   300   methodHandle _callee;         // Callee (used when calling interpreter)
   301   bool         _to_interpreter; // call to interpreted method (otherwise compiled)
   303   friend class CompiledStaticCall;
   304  public:
   305   address      entry() const    { return _entry;  }
   306   methodHandle callee() const   { return _callee; }
   307 };
   310 class CompiledStaticCall: public NativeCall {
   311   friend class CompiledIC;
   313   // Also used by CompiledIC
   314   void set_to_interpreted(methodHandle callee, address entry);
   315   bool is_optimized_virtual();
   317  public:
   318   friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
   319   friend CompiledStaticCall* compiledStaticCall_at(address native_call);
   320   friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
   322   // Code
   323   static address emit_to_interp_stub(CodeBuffer &cbuf);
   324   static int to_interp_stub_size();
   325   static int reloc_to_interp_stub();
   327   // State
   328   bool is_clean() const;
   329   bool is_call_to_compiled() const;
   330   bool is_call_to_interpreted() const;
   332   // Clean static call (will force resolving on next use)
   333   void set_to_clean();
   335   // Set state. The entry must be the same, as computed by compute_entry.
   336   // Computation and setting is split up, since the actions are separate during
   337   // a OptoRuntime::resolve_xxx.
   338   void set(const StaticCallInfo& info);
   340   // Compute entry point given a method
   341   static void compute_entry(methodHandle m, StaticCallInfo& info);
   343   // Stub support
   344   address find_stub();
   345   static void set_stub_to_clean(static_stub_Relocation* static_stub);
   347   // Misc.
   348   void print()  PRODUCT_RETURN;
   349   void verify() PRODUCT_RETURN;
   350 };
   353 inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
   354   CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
   355   st->verify();
   356   return st;
   357 }
   359 inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
   360   CompiledStaticCall* st = (CompiledStaticCall*)native_call;
   361   st->verify();
   362   return st;
   363 }
   365 inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
   366   return compiledStaticCall_at(call_site->addr());
   367 }
   369 #endif // SHARE_VM_CODE_COMPILEDIC_HPP

mercurial