src/share/vm/opto/callGenerator.hpp

Fri, 27 Sep 2013 11:52:24 +0400

author
shade
date
Fri, 27 Sep 2013 11:52:24 +0400
changeset 5798
29bdcf12457c
parent 5763
1b64d46620a3
child 5981
3213ba4d3dff
permissions
-rw-r--r--

8014447: Object.hashCode intrinsic breaks inline caches
Summary: Try to inline as normal method first, then fall back to intrinsic.
Reviewed-by: kvn, twisti

duke@435 1 /*
coleenp@5614 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_OPTO_CALLGENERATOR_HPP
stefank@2314 26 #define SHARE_VM_OPTO_CALLGENERATOR_HPP
stefank@2314 27
twisti@3969 28 #include "compiler/compileBroker.hpp"
stefank@2314 29 #include "opto/callnode.hpp"
stefank@2314 30 #include "opto/compile.hpp"
stefank@2314 31 #include "opto/type.hpp"
stefank@2314 32 #include "runtime/deoptimization.hpp"
stefank@2314 33
duke@435 34 //---------------------------CallGenerator-------------------------------------
duke@435 35 // The subclasses of this class handle generation of ideal nodes for
duke@435 36 // call sites and method entry points.
duke@435 37
duke@435 38 class CallGenerator : public ResourceObj {
duke@435 39 public:
duke@435 40 enum {
duke@435 41 xxxunusedxxx
duke@435 42 };
duke@435 43
duke@435 44 private:
duke@435 45 ciMethod* _method; // The method being called.
duke@435 46
duke@435 47 protected:
twisti@3969 48 CallGenerator(ciMethod* method) : _method(method) {}
duke@435 49
duke@435 50 public:
duke@435 51 // Accessors
duke@435 52 ciMethod* method() const { return _method; }
duke@435 53
duke@435 54 // is_inline: At least some code implementing the method is copied here.
duke@435 55 virtual bool is_inline() const { return false; }
duke@435 56 // is_intrinsic: There's a method-specific way of generating the inline code.
duke@435 57 virtual bool is_intrinsic() const { return false; }
duke@435 58 // is_parse: Bytecodes implementing the specific method are copied here.
duke@435 59 virtual bool is_parse() const { return false; }
duke@435 60 // is_virtual: The call uses the receiver type to select or check the method.
duke@435 61 virtual bool is_virtual() const { return false; }
duke@435 62 // is_deferred: The decision whether to inline or not is deferred.
duke@435 63 virtual bool is_deferred() const { return false; }
duke@435 64 // is_predicted: Uses an explicit check against a predicted type.
duke@435 65 virtual bool is_predicted() const { return false; }
duke@435 66 // is_trap: Does not return to the caller. (E.g., uncommon trap.)
duke@435 67 virtual bool is_trap() const { return false; }
shade@5798 68 // does_virtual_dispatch: Should try inlining as normal method first.
shade@5798 69 virtual bool does_virtual_dispatch() const { return false; }
duke@435 70
never@1515 71 // is_late_inline: supports conversion of call into an inline
never@1515 72 virtual bool is_late_inline() const { return false; }
roland@4409 73 // same but for method handle calls
roland@4409 74 virtual bool is_mh_late_inline() const { return false; }
roland@4409 75
roland@4409 76 // for method handle calls: have we tried inlinining the call already?
roland@4409 77 virtual bool already_attempted() const { ShouldNotReachHere(); return false; }
roland@4409 78
never@1515 79 // Replace the call with an inline version of the code
never@1515 80 virtual void do_late_inline() { ShouldNotReachHere(); }
never@1515 81
never@1515 82 virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
never@1515 83
duke@435 84 // Note: It is possible for a CG to be both inline and virtual.
duke@435 85 // (The hashCode intrinsic does a vtable check and an inlined fast path.)
duke@435 86
duke@435 87 // Utilities:
duke@435 88 const TypeFunc* tf() const;
duke@435 89
duke@435 90 // The given jvms has state and arguments for a call to my method.
duke@435 91 // Edges after jvms->argoff() carry all (pre-popped) argument values.
duke@435 92 //
duke@435 93 // Update the map with state and return values (if any) and return it.
duke@435 94 // The return values (0, 1, or 2) must be pushed on the map's stack,
duke@435 95 // and the sp of the jvms incremented accordingly.
duke@435 96 //
duke@435 97 // The jvms is returned on success. Alternatively, a copy of the
duke@435 98 // given jvms, suitably updated, may be returned, in which case the
duke@435 99 // caller should discard the original jvms.
duke@435 100 //
duke@435 101 // The non-Parm edges of the returned map will contain updated global state,
duke@435 102 // and one or two edges before jvms->sp() will carry any return values.
duke@435 103 // Other map edges may contain locals or monitors, and should not
duke@435 104 // be changed in meaning.
duke@435 105 //
duke@435 106 // If the call traps, the returned map must have a control edge of top.
duke@435 107 // If the call can throw, the returned map must report has_exceptions().
duke@435 108 //
duke@435 109 // If the result is NULL, it means that this CallGenerator was unable
duke@435 110 // to handle the given call, and another CallGenerator should be consulted.
duke@435 111 virtual JVMState* generate(JVMState* jvms) = 0;
duke@435 112
duke@435 113 // How to generate a call site that is inlined:
duke@435 114 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
duke@435 115 // How to generate code for an on-stack replacement handler.
duke@435 116 static CallGenerator* for_osr(ciMethod* m, int osr_bci);
duke@435 117
duke@435 118 // How to generate vanilla out-of-line call sites:
never@1515 119 static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special
twisti@3313 120 static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
twisti@1572 121 static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
twisti@3313 122
roland@4409 123 static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden);
roland@4409 124 static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const);
never@2949 125
never@1515 126 // How to generate a replace a direct call with an inline version
never@1515 127 static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
roland@4409 128 static CallGenerator* for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const);
roland@4409 129 static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg);
kvn@5110 130 static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
never@1515 131
duke@435 132 // How to make a call but defer the decision whether to inline or not.
duke@435 133 static CallGenerator* for_warm_call(WarmCallInfo* ci,
duke@435 134 CallGenerator* if_cold,
duke@435 135 CallGenerator* if_hot);
duke@435 136
duke@435 137 // How to make a call that optimistically assumes a receiver type:
duke@435 138 static CallGenerator* for_predicted_call(ciKlass* predicted_receiver,
duke@435 139 CallGenerator* if_missed,
duke@435 140 CallGenerator* if_hit,
duke@435 141 float hit_prob);
duke@435 142
twisti@1573 143 // How to make a call that optimistically assumes a MethodHandle target:
twisti@1573 144 static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
twisti@1573 145 CallGenerator* if_missed,
twisti@1573 146 CallGenerator* if_hit,
twisti@1573 147 float hit_prob);
twisti@1573 148
duke@435 149 // How to make a call that gives up and goes back to the interpreter:
duke@435 150 static CallGenerator* for_uncommon_trap(ciMethod* m,
duke@435 151 Deoptimization::DeoptReason reason,
duke@435 152 Deoptimization::DeoptAction action);
duke@435 153
duke@435 154 // Registry for intrinsics:
duke@435 155 static CallGenerator* for_intrinsic(ciMethod* m);
duke@435 156 static void register_intrinsic(ciMethod* m, CallGenerator* cg);
kvn@4205 157 static CallGenerator* for_predicted_intrinsic(CallGenerator* intrinsic,
kvn@4205 158 CallGenerator* cg);
kvn@4205 159 virtual Node* generate_predicate(JVMState* jvms) { return NULL; };
twisti@3969 160
roland@4409 161 virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
roland@4409 162
roland@4357 163 static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
kvn@5763 164 if (C->print_inlining()) {
roland@4357 165 C->print_inlining(callee, inline_level, bci, msg);
kvn@5763 166 }
twisti@3969 167 }
duke@435 168 };
duke@435 169
twisti@3969 170
twisti@3969 171 //------------------------InlineCallGenerator----------------------------------
duke@435 172 class InlineCallGenerator : public CallGenerator {
twisti@3969 173 protected:
twisti@3969 174 InlineCallGenerator(ciMethod* method) : CallGenerator(method) {}
twisti@3969 175
twisti@3969 176 public:
duke@435 177 virtual bool is_inline() const { return true; }
duke@435 178 };
duke@435 179
duke@435 180
duke@435 181 //---------------------------WarmCallInfo--------------------------------------
duke@435 182 // A struct to collect information about a given call site.
duke@435 183 // Helps sort call sites into "hot", "medium", and "cold".
duke@435 184 // Participates in the queueing of "medium" call sites for possible inlining.
duke@435 185 class WarmCallInfo : public ResourceObj {
duke@435 186 private:
duke@435 187
duke@435 188 CallNode* _call; // The CallNode which may be inlined.
duke@435 189 CallGenerator* _hot_cg;// CG for expanding the call node
duke@435 190
duke@435 191 // These are the metrics we use to evaluate call sites:
duke@435 192
duke@435 193 float _count; // How often do we expect to reach this site?
duke@435 194 float _profit; // How much time do we expect to save by inlining?
duke@435 195 float _work; // How long do we expect the average call to take?
duke@435 196 float _size; // How big do we expect the inlined code to be?
duke@435 197
duke@435 198 float _heat; // Combined score inducing total order on call sites.
duke@435 199 WarmCallInfo* _next; // Next cooler call info in pending queue.
duke@435 200
duke@435 201 // Count is the number of times this call site is expected to be executed.
duke@435 202 // Large count is favorable for inlining, because the extra compilation
duke@435 203 // work will be amortized more completely.
duke@435 204
duke@435 205 // Profit is a rough measure of the amount of time we expect to save
duke@435 206 // per execution of this site if we inline it. (1.0 == call overhead)
duke@435 207 // Large profit favors inlining. Negative profit disables inlining.
duke@435 208
duke@435 209 // Work is a rough measure of the amount of time a typical out-of-line
duke@435 210 // call from this site is expected to take. (1.0 == call, no-op, return)
duke@435 211 // Small work is somewhat favorable for inlining, since methods with
duke@435 212 // short "hot" traces are more likely to inline smoothly.
duke@435 213
duke@435 214 // Size is the number of graph nodes we expect this method to produce,
duke@435 215 // not counting the inlining of any further warm calls it may include.
duke@435 216 // Small size favors inlining, since small methods are more likely to
duke@435 217 // inline smoothly. The size is estimated by examining the native code
duke@435 218 // if available. The method bytecodes are also examined, assuming
duke@435 219 // empirically observed node counts for each kind of bytecode.
duke@435 220
duke@435 221 // Heat is the combined "goodness" of a site's inlining. If we were
duke@435 222 // omniscient, it would be the difference of two sums of future execution
duke@435 223 // times of code emitted for this site (amortized across multiple sites if
duke@435 224 // sharing applies). The two sums are for versions of this call site with
duke@435 225 // and without inlining.
duke@435 226
duke@435 227 // We approximate this mythical quantity by playing with averages,
duke@435 228 // rough estimates, and assumptions that history repeats itself.
duke@435 229 // The basic formula count * profit is heuristically adjusted
duke@435 230 // by looking at the expected compilation and execution times of
duke@435 231 // of the inlined call.
duke@435 232
duke@435 233 // Note: Some of these metrics may not be present in the final product,
duke@435 234 // but exist in development builds to experiment with inline policy tuning.
duke@435 235
duke@435 236 // This heuristic framework does not model well the very significant
duke@435 237 // effects of multiple-level inlining. It is possible to see no immediate
duke@435 238 // profit from inlining X->Y, but to get great profit from a subsequent
duke@435 239 // inlining X->Y->Z.
duke@435 240
duke@435 241 // This framework does not take well into account the problem of N**2 code
duke@435 242 // size in a clique of mutually inlinable methods.
duke@435 243
duke@435 244 WarmCallInfo* next() const { return _next; }
duke@435 245 void set_next(WarmCallInfo* n) { _next = n; }
duke@435 246
never@2725 247 static WarmCallInfo _always_hot;
never@2725 248 static WarmCallInfo _always_cold;
never@2725 249
never@2725 250 // Constructor intitialization of always_hot and always_cold
never@2725 251 WarmCallInfo(float c, float p, float w, float s) {
never@2725 252 _call = NULL;
never@2725 253 _hot_cg = NULL;
never@2725 254 _next = NULL;
never@2725 255 _count = c;
never@2725 256 _profit = p;
never@2725 257 _work = w;
never@2725 258 _size = s;
never@2725 259 _heat = 0;
never@2725 260 }
duke@435 261
duke@435 262 public:
duke@435 263 // Because WarmInfo objects live over the entire lifetime of the
duke@435 264 // Compile object, they are allocated into the comp_arena, which
duke@435 265 // does not get resource marked or reset during the compile process
coleenp@5614 266 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
duke@435 267 void operator delete( void * ) { } // fast deallocation
duke@435 268
duke@435 269 static WarmCallInfo* always_hot();
duke@435 270 static WarmCallInfo* always_cold();
duke@435 271
duke@435 272 WarmCallInfo() {
duke@435 273 _call = NULL;
duke@435 274 _hot_cg = NULL;
duke@435 275 _next = NULL;
duke@435 276 _count = _profit = _work = _size = _heat = 0;
duke@435 277 }
duke@435 278
duke@435 279 CallNode* call() const { return _call; }
duke@435 280 float count() const { return _count; }
duke@435 281 float size() const { return _size; }
duke@435 282 float work() const { return _work; }
duke@435 283 float profit() const { return _profit; }
duke@435 284 float heat() const { return _heat; }
duke@435 285
duke@435 286 void set_count(float x) { _count = x; }
duke@435 287 void set_size(float x) { _size = x; }
duke@435 288 void set_work(float x) { _work = x; }
duke@435 289 void set_profit(float x) { _profit = x; }
duke@435 290 void set_heat(float x) { _heat = x; }
duke@435 291
duke@435 292 // Load initial heuristics from profiles, etc.
duke@435 293 // The heuristics can be tweaked further by the caller.
duke@435 294 void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor);
duke@435 295
duke@435 296 static float MAX_VALUE() { return +1.0e10; }
duke@435 297 static float MIN_VALUE() { return -1.0e10; }
duke@435 298
duke@435 299 float compute_heat() const;
duke@435 300
duke@435 301 void set_call(CallNode* call) { _call = call; }
duke@435 302 void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; }
duke@435 303
duke@435 304 // Do not queue very hot or very cold calls.
duke@435 305 // Make very cold ones out of line immediately.
duke@435 306 // Inline very hot ones immediately.
duke@435 307 // These queries apply various tunable limits
duke@435 308 // to the above metrics in a systematic way.
duke@435 309 // Test for coldness before testing for hotness.
duke@435 310 bool is_cold() const;
duke@435 311 bool is_hot() const;
duke@435 312
duke@435 313 // Force a warm call to be hot. This worklists the call node for inlining.
duke@435 314 void make_hot();
duke@435 315
duke@435 316 // Force a warm call to be cold. This worklists the call node for out-of-lining.
duke@435 317 void make_cold();
duke@435 318
duke@435 319 // A reproducible total ordering, in which heat is the major key.
duke@435 320 bool warmer_than(WarmCallInfo* that);
duke@435 321
duke@435 322 // List management. These methods are called with the list head,
duke@435 323 // and return the new list head, inserting or removing the receiver.
duke@435 324 WarmCallInfo* insert_into(WarmCallInfo* head);
duke@435 325 WarmCallInfo* remove_from(WarmCallInfo* head);
duke@435 326
duke@435 327 #ifndef PRODUCT
duke@435 328 void print() const;
duke@435 329 void print_all() const;
duke@435 330 int count_all() const;
duke@435 331 #endif
duke@435 332 };
stefank@2314 333
stefank@2314 334 #endif // SHARE_VM_OPTO_CALLGENERATOR_HPP

mercurial