src/share/vm/code/compiledIC.hpp

changeset 0
f90c822e73f8
child 1
2d8a650513c2
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/code/compiledIC.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,357 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_CODE_COMPILEDIC_HPP
    1.29 +#define SHARE_VM_CODE_COMPILEDIC_HPP
    1.30 +
    1.31 +#include "interpreter/linkResolver.hpp"
    1.32 +#include "oops/compiledICHolder.hpp"
    1.33 +#ifdef TARGET_ARCH_x86
    1.34 +# include "nativeInst_x86.hpp"
    1.35 +#endif
    1.36 +#ifdef TARGET_ARCH_sparc
    1.37 +# include "nativeInst_sparc.hpp"
    1.38 +#endif
    1.39 +#ifdef TARGET_ARCH_zero
    1.40 +# include "nativeInst_zero.hpp"
    1.41 +#endif
    1.42 +#ifdef TARGET_ARCH_arm
    1.43 +# include "nativeInst_arm.hpp"
    1.44 +#endif
    1.45 +#ifdef TARGET_ARCH_ppc
    1.46 +# include "nativeInst_ppc.hpp"
    1.47 +#endif
    1.48 +
    1.49 +//-----------------------------------------------------------------------------
    1.50 +// The CompiledIC represents a compiled inline cache.
    1.51 +//
    1.52 +// In order to make patching of the inline cache MT-safe, we only allow the following
    1.53 +// transitions (when not at a safepoint):
    1.54 +//
    1.55 +//
    1.56 +//         [1] --<--  Clean -->---  [1]
    1.57 +//            /       (null)      \
    1.58 +//           /                     \      /-<-\
    1.59 +//          /          [2]          \    /     \
    1.60 +//      Interpreted  ---------> Monomorphic     | [3]
    1.61 +//  (CompiledICHolder*)            (Klass*)     |
    1.62 +//          \                        /   \     /
    1.63 +//       [4] \                      / [4] \->-/
    1.64 +//            \->-  Megamorphic -<-/
    1.65 +//                  (Method*)
    1.66 +//
    1.67 +// The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
    1.68 +//
    1.69 +// The numbers in square brackets refere to the kind of transition:
    1.70 +// [1]: Initial fixup. Receiver it found from debug information
    1.71 +// [2]: Compilation of a method
    1.72 +// [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
    1.73 +// [4]: Inline cache miss. We go directly to megamorphic call.
    1.74 +//
    1.75 +// The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
    1.76 +// transition is made to a stub.
    1.77 +//
    1.78 +class CompiledIC;
    1.79 +class ICStub;
    1.80 +
    1.81 +class CompiledICInfo : public StackObj {
    1.82 + private:
    1.83 +  address _entry;              // entry point for call
    1.84 +  void*   _cached_value;         // Value of cached_value (either in stub or inline cache)
    1.85 +  bool    _is_icholder;          // Is the cached value a CompiledICHolder*
    1.86 +  bool    _is_optimized;       // it is an optimized virtual call (i.e., can be statically bound)
    1.87 +  bool    _to_interpreter;     // Call it to interpreter
    1.88 +  bool    _release_icholder;
    1.89 + public:
    1.90 +  address entry() const        { return _entry; }
    1.91 +  Metadata*    cached_metadata() const         { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
    1.92 +  CompiledICHolder*    claim_cached_icholder() {
    1.93 +    assert(_is_icholder, "");
    1.94 +    assert(_cached_value != NULL, "must be non-NULL");
    1.95 +    _release_icholder = false;
    1.96 +    CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
    1.97 +    icholder->claim();
    1.98 +    return icholder;
    1.99 +  }
   1.100 +  bool    is_optimized() const { return _is_optimized; }
   1.101 +  bool         to_interpreter() const  { return _to_interpreter; }
   1.102 +
   1.103 +  void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
   1.104 +    _entry      = entry;
   1.105 +    _cached_value = (void*)klass;
   1.106 +    _to_interpreter = false;
   1.107 +    _is_icholder = false;
   1.108 +    _is_optimized = is_optimized;
   1.109 +    _release_icholder = false;
   1.110 +  }
   1.111 +
   1.112 +  void set_interpreter_entry(address entry, Method* method) {
   1.113 +    _entry      = entry;
   1.114 +    _cached_value = (void*)method;
   1.115 +    _to_interpreter = true;
   1.116 +    _is_icholder = false;
   1.117 +    _is_optimized = true;
   1.118 +    _release_icholder = false;
   1.119 +  }
   1.120 +
   1.121 +  void set_icholder_entry(address entry, CompiledICHolder* icholder) {
   1.122 +    _entry      = entry;
   1.123 +    _cached_value = (void*)icholder;
   1.124 +    _to_interpreter = true;
   1.125 +    _is_icholder = true;
   1.126 +    _is_optimized = false;
   1.127 +    _release_icholder = true;
   1.128 +  }
   1.129 +
   1.130 +  CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
   1.131 +                    _to_interpreter(false), _is_optimized(false), _release_icholder(false) {
   1.132 +  }
   1.133 +  ~CompiledICInfo() {
   1.134 +    // In rare cases the info is computed but not used, so release any
   1.135 +    // CompiledICHolder* that was created
   1.136 +    if (_release_icholder) {
   1.137 +      assert(_is_icholder, "must be");
   1.138 +      CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
   1.139 +      icholder->claim();
   1.140 +      delete icholder;
   1.141 +    }
   1.142 +  }
   1.143 +};
   1.144 +
   1.145 +class CompiledIC: public ResourceObj {
   1.146 +  friend class InlineCacheBuffer;
   1.147 +  friend class ICStub;
   1.148 +
   1.149 +
   1.150 + private:
   1.151 +  NativeCall*   _ic_call;       // the call instruction
   1.152 +  NativeMovConstReg* _value;    // patchable value cell for this IC
   1.153 +  bool          _is_optimized;  // an optimized virtual call (i.e., no compiled IC)
   1.154 +
   1.155 +  CompiledIC(nmethod* nm, NativeCall* ic_call);
   1.156 +
   1.157 +  static bool is_icholder_entry(address entry);
   1.158 +
   1.159 +  // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
   1.160 +  // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
   1.161 +  // changes to a transition stub.
   1.162 +  void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
   1.163 +  void set_ic_destination(ICStub* stub);
   1.164 +  void set_ic_destination(address entry_point) {
   1.165 +    assert(_is_optimized, "use set_ic_destination_and_value instead");
   1.166 +    internal_set_ic_destination(entry_point, false, NULL, false);
   1.167 +  }
   1.168 +  // This only for use by ICStubs where the type of the value isn't known
   1.169 +  void set_ic_destination_and_value(address entry_point, void* value) {
   1.170 +    internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
   1.171 +  }
   1.172 +  void set_ic_destination_and_value(address entry_point, Metadata* value) {
   1.173 +    internal_set_ic_destination(entry_point, false, value, false);
   1.174 +  }
   1.175 +  void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
   1.176 +    internal_set_ic_destination(entry_point, false, value, true);
   1.177 +  }
   1.178 +
   1.179 +  // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
   1.180 +  // associated with the inline cache.
   1.181 +  address stub_address() const;
   1.182 +  bool is_in_transition_state() const;  // Use InlineCacheBuffer
   1.183 +
   1.184 + public:
   1.185 +  // conversion (machine PC to CompiledIC*)
   1.186 +  friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
   1.187 +  friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
   1.188 +  friend CompiledIC* CompiledIC_at(Relocation* call_site);
   1.189 +
   1.190 +  // This is used to release CompiledICHolder*s from nmethods that
   1.191 +  // are about to be freed.  The callsite might contain other stale
   1.192 +  // values of other kinds so it must be careful.
   1.193 +  static void cleanup_call_site(virtual_call_Relocation* call_site);
   1.194 +  static bool is_icholder_call_site(virtual_call_Relocation* call_site);
   1.195 +
   1.196 +  // Return the cached_metadata/destination associated with this inline cache. If the cache currently points
   1.197 +  // to a transition stub, it will read the values from the transition stub.
   1.198 +  void* cached_value() const;
   1.199 +  CompiledICHolder* cached_icholder() const {
   1.200 +    assert(is_icholder_call(), "must be");
   1.201 +    return (CompiledICHolder*) cached_value();
   1.202 +  }
   1.203 +  Metadata* cached_metadata() const {
   1.204 +    assert(!is_icholder_call(), "must be");
   1.205 +    return (Metadata*) cached_value();
   1.206 +  }
   1.207 +
   1.208 +  address ic_destination() const;
   1.209 +
   1.210 +  bool is_optimized() const   { return _is_optimized; }
   1.211 +
   1.212 +  // State
   1.213 +  bool is_clean() const;
   1.214 +  bool is_megamorphic() const;
   1.215 +  bool is_call_to_compiled() const;
   1.216 +  bool is_call_to_interpreted() const;
   1.217 +
   1.218 +  bool is_icholder_call() const;
   1.219 +
   1.220 +  address end_of_call() { return  _ic_call->return_address(); }
   1.221 +
   1.222 +  // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
   1.223 +  // so you are guaranteed that no patching takes place. The same goes for verify.
   1.224 +  //
   1.225 +  // Note: We do not provide any direct access to the stub code, to prevent parts of the code
   1.226 +  // to manipulate the inline cache in MT-unsafe ways.
   1.227 +  //
   1.228 +  // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
   1.229 +  //
   1.230 +  void set_to_clean();  // Can only be called during a safepoint operation
   1.231 +  void set_to_monomorphic(CompiledICInfo& info);
   1.232 +
   1.233 +  // Returns true if successful and false otherwise. The call can fail if memory
   1.234 +  // allocation in the code cache fails.
   1.235 +  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
   1.236 +
   1.237 +  static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
   1.238 +                                        bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
   1.239 +
   1.240 +  // Location
   1.241 +  address instruction_address() const { return _ic_call->instruction_address(); }
   1.242 +
   1.243 +  // Misc
   1.244 +  void print()             PRODUCT_RETURN;
   1.245 +  void print_compiled_ic() PRODUCT_RETURN;
   1.246 +  void verify()            PRODUCT_RETURN;
   1.247 +};
   1.248 +
   1.249 +inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
   1.250 +  CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
   1.251 +  c_ic->verify();
   1.252 +  return c_ic;
   1.253 +}
   1.254 +
   1.255 +inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
   1.256 +  CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
   1.257 +  c_ic->verify();
   1.258 +  return c_ic;
   1.259 +}
   1.260 +
   1.261 +inline CompiledIC* CompiledIC_at(Relocation* call_site) {
   1.262 +  assert(call_site->type() == relocInfo::virtual_call_type ||
   1.263 +         call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
   1.264 +  CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
   1.265 +  c_ic->verify();
   1.266 +  return c_ic;
   1.267 +}
   1.268 +
   1.269 +
   1.270 +//-----------------------------------------------------------------------------
   1.271 +// The CompiledStaticCall represents a call to a static method in the compiled
   1.272 +//
   1.273 +// Transition diagram of a static call site is somewhat simpler than for an inlined cache:
   1.274 +//
   1.275 +//
   1.276 +//           -----<----- Clean ----->-----
   1.277 +//          /                             \
   1.278 +//         /                               \
   1.279 +//    compilled code <------------> interpreted code
   1.280 +//
   1.281 +//  Clean:            Calls directly to runtime method for fixup
   1.282 +//  Compiled code:    Calls directly to compiled code
   1.283 +//  Interpreted code: Calls to stub that set Method* reference
   1.284 +//
   1.285 +//
   1.286 +class CompiledStaticCall;
   1.287 +
   1.288 +class StaticCallInfo {
   1.289 + private:
   1.290 +  address      _entry;          // Entrypoint
   1.291 +  methodHandle _callee;         // Callee (used when calling interpreter)
   1.292 +  bool         _to_interpreter; // call to interpreted method (otherwise compiled)
   1.293 +
   1.294 +  friend class CompiledStaticCall;
   1.295 + public:
   1.296 +  address      entry() const    { return _entry;  }
   1.297 +  methodHandle callee() const   { return _callee; }
   1.298 +};
   1.299 +
   1.300 +
   1.301 +class CompiledStaticCall: public NativeCall {
   1.302 +  friend class CompiledIC;
   1.303 +
   1.304 +  // Also used by CompiledIC
   1.305 +  void set_to_interpreted(methodHandle callee, address entry);
   1.306 +  bool is_optimized_virtual();
   1.307 +
   1.308 + public:
   1.309 +  friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
   1.310 +  friend CompiledStaticCall* compiledStaticCall_at(address native_call);
   1.311 +  friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
   1.312 +
   1.313 +  // Code
   1.314 +  static void emit_to_interp_stub(CodeBuffer &cbuf);
   1.315 +  static int to_interp_stub_size();
   1.316 +  static int reloc_to_interp_stub();
   1.317 +
   1.318 +  // State
   1.319 +  bool is_clean() const;
   1.320 +  bool is_call_to_compiled() const;
   1.321 +  bool is_call_to_interpreted() const;
   1.322 +
   1.323 +  // Clean static call (will force resolving on next use)
   1.324 +  void set_to_clean();
   1.325 +
   1.326 +  // Set state. The entry must be the same, as computed by compute_entry.
   1.327 +  // Computation and setting is split up, since the actions are separate during
   1.328 +  // a OptoRuntime::resolve_xxx.
   1.329 +  void set(const StaticCallInfo& info);
   1.330 +
   1.331 +  // Compute entry point given a method
   1.332 +  static void compute_entry(methodHandle m, StaticCallInfo& info);
   1.333 +
   1.334 +  // Stub support
   1.335 +  address find_stub();
   1.336 +  static void set_stub_to_clean(static_stub_Relocation* static_stub);
   1.337 +
   1.338 +  // Misc.
   1.339 +  void print()  PRODUCT_RETURN;
   1.340 +  void verify() PRODUCT_RETURN;
   1.341 +};
   1.342 +
   1.343 +
   1.344 +inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
   1.345 +  CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
   1.346 +  st->verify();
   1.347 +  return st;
   1.348 +}
   1.349 +
   1.350 +inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
   1.351 +  CompiledStaticCall* st = (CompiledStaticCall*)native_call;
   1.352 +  st->verify();
   1.353 +  return st;
   1.354 +}
   1.355 +
   1.356 +inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
   1.357 +  return compiledStaticCall_at(call_site->addr());
   1.358 +}
   1.359 +
   1.360 +#endif // SHARE_VM_CODE_COMPILEDIC_HPP

mercurial