src/share/vm/opto/callGenerator.hpp

Thu, 21 Nov 2013 12:30:35 -0800

author
kvn
date
Thu, 21 Nov 2013 12:30:35 -0800
changeset 6485
da862781b584
parent 5991
b2ee5dc63353
child 6876
710a3c8b516e
child 7026
922c87c9aed4
permissions
-rw-r--r--

Merge

duke@435 1 /*
coleenp@5614 2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_OPTO_CALLGENERATOR_HPP
stefank@2314 26 #define SHARE_VM_OPTO_CALLGENERATOR_HPP
stefank@2314 27
twisti@3969 28 #include "compiler/compileBroker.hpp"
stefank@2314 29 #include "opto/callnode.hpp"
stefank@2314 30 #include "opto/compile.hpp"
stefank@2314 31 #include "opto/type.hpp"
stefank@2314 32 #include "runtime/deoptimization.hpp"
stefank@2314 33
roland@5981 34 class Parse;
roland@5981 35
duke@435 36 //---------------------------CallGenerator-------------------------------------
duke@435 37 // The subclasses of this class handle generation of ideal nodes for
duke@435 38 // call sites and method entry points.
duke@435 39
duke@435 40 class CallGenerator : public ResourceObj {
duke@435 41 public:
duke@435 42 enum {
duke@435 43 xxxunusedxxx
duke@435 44 };
duke@435 45
duke@435 46 private:
duke@435 47 ciMethod* _method; // The method being called.
duke@435 48
duke@435 49 protected:
twisti@3969 50 CallGenerator(ciMethod* method) : _method(method) {}
duke@435 51
duke@435 52 public:
duke@435 53 // Accessors
duke@435 54 ciMethod* method() const { return _method; }
duke@435 55
duke@435 56 // is_inline: At least some code implementing the method is copied here.
duke@435 57 virtual bool is_inline() const { return false; }
duke@435 58 // is_intrinsic: There's a method-specific way of generating the inline code.
duke@435 59 virtual bool is_intrinsic() const { return false; }
duke@435 60 // is_parse: Bytecodes implementing the specific method are copied here.
duke@435 61 virtual bool is_parse() const { return false; }
duke@435 62 // is_virtual: The call uses the receiver type to select or check the method.
duke@435 63 virtual bool is_virtual() const { return false; }
duke@435 64 // is_deferred: The decision whether to inline or not is deferred.
duke@435 65 virtual bool is_deferred() const { return false; }
duke@435 66 // is_predicted: Uses an explicit check against a predicted type.
duke@435 67 virtual bool is_predicted() const { return false; }
duke@435 68 // is_trap: Does not return to the caller. (E.g., uncommon trap.)
duke@435 69 virtual bool is_trap() const { return false; }
shade@5798 70 // does_virtual_dispatch: Should try inlining as normal method first.
shade@5798 71 virtual bool does_virtual_dispatch() const { return false; }
duke@435 72
never@1515 73 // is_late_inline: supports conversion of call into an inline
never@1515 74 virtual bool is_late_inline() const { return false; }
roland@4409 75 // same but for method handle calls
roland@4409 76 virtual bool is_mh_late_inline() const { return false; }
roland@5991 77 virtual bool is_string_late_inline() const{ return false; }
roland@4409 78
roland@4409 79 // for method handle calls: have we tried inlinining the call already?
roland@4409 80 virtual bool already_attempted() const { ShouldNotReachHere(); return false; }
roland@4409 81
never@1515 82 // Replace the call with an inline version of the code
never@1515 83 virtual void do_late_inline() { ShouldNotReachHere(); }
never@1515 84
never@1515 85 virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
never@1515 86
duke@435 87 // Note: It is possible for a CG to be both inline and virtual.
duke@435 88 // (The hashCode intrinsic does a vtable check and an inlined fast path.)
duke@435 89
duke@435 90 // Utilities:
duke@435 91 const TypeFunc* tf() const;
duke@435 92
duke@435 93 // The given jvms has state and arguments for a call to my method.
duke@435 94 // Edges after jvms->argoff() carry all (pre-popped) argument values.
duke@435 95 //
duke@435 96 // Update the map with state and return values (if any) and return it.
duke@435 97 // The return values (0, 1, or 2) must be pushed on the map's stack,
duke@435 98 // and the sp of the jvms incremented accordingly.
duke@435 99 //
duke@435 100 // The jvms is returned on success. Alternatively, a copy of the
duke@435 101 // given jvms, suitably updated, may be returned, in which case the
duke@435 102 // caller should discard the original jvms.
duke@435 103 //
duke@435 104 // The non-Parm edges of the returned map will contain updated global state,
duke@435 105 // and one or two edges before jvms->sp() will carry any return values.
duke@435 106 // Other map edges may contain locals or monitors, and should not
duke@435 107 // be changed in meaning.
duke@435 108 //
duke@435 109 // If the call traps, the returned map must have a control edge of top.
duke@435 110 // If the call can throw, the returned map must report has_exceptions().
duke@435 111 //
duke@435 112 // If the result is NULL, it means that this CallGenerator was unable
duke@435 113 // to handle the given call, and another CallGenerator should be consulted.
roland@5981 114 virtual JVMState* generate(JVMState* jvms, Parse* parent_parser) = 0;
duke@435 115
duke@435 116 // How to generate a call site that is inlined:
duke@435 117 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
duke@435 118 // How to generate code for an on-stack replacement handler.
duke@435 119 static CallGenerator* for_osr(ciMethod* m, int osr_bci);
duke@435 120
duke@435 121 // How to generate vanilla out-of-line call sites:
never@1515 122 static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special
twisti@3313 123 static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
twisti@1572 124 static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
twisti@3313 125
roland@4409 126 static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden);
roland@4409 127 static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const);
never@2949 128
never@1515 129 // How to generate a replace a direct call with an inline version
never@1515 130 static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
roland@4409 131 static CallGenerator* for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const);
roland@4409 132 static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg);
kvn@5110 133 static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
never@1515 134
duke@435 135 // How to make a call but defer the decision whether to inline or not.
duke@435 136 static CallGenerator* for_warm_call(WarmCallInfo* ci,
duke@435 137 CallGenerator* if_cold,
duke@435 138 CallGenerator* if_hot);
duke@435 139
duke@435 140 // How to make a call that optimistically assumes a receiver type:
duke@435 141 static CallGenerator* for_predicted_call(ciKlass* predicted_receiver,
duke@435 142 CallGenerator* if_missed,
duke@435 143 CallGenerator* if_hit,
duke@435 144 float hit_prob);
duke@435 145
twisti@1573 146 // How to make a call that optimistically assumes a MethodHandle target:
twisti@1573 147 static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
twisti@1573 148 CallGenerator* if_missed,
twisti@1573 149 CallGenerator* if_hit,
twisti@1573 150 float hit_prob);
twisti@1573 151
duke@435 152 // How to make a call that gives up and goes back to the interpreter:
duke@435 153 static CallGenerator* for_uncommon_trap(ciMethod* m,
duke@435 154 Deoptimization::DeoptReason reason,
duke@435 155 Deoptimization::DeoptAction action);
duke@435 156
duke@435 157 // Registry for intrinsics:
duke@435 158 static CallGenerator* for_intrinsic(ciMethod* m);
duke@435 159 static void register_intrinsic(ciMethod* m, CallGenerator* cg);
kvn@4205 160 static CallGenerator* for_predicted_intrinsic(CallGenerator* intrinsic,
kvn@4205 161 CallGenerator* cg);
kvn@4205 162 virtual Node* generate_predicate(JVMState* jvms) { return NULL; };
twisti@3969 163
roland@4409 164 virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
roland@4409 165
roland@4357 166 static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
kvn@5763 167 if (C->print_inlining()) {
roland@4357 168 C->print_inlining(callee, inline_level, bci, msg);
kvn@5763 169 }
twisti@3969 170 }
duke@435 171 };
duke@435 172
twisti@3969 173
twisti@3969 174 //------------------------InlineCallGenerator----------------------------------
duke@435 175 class InlineCallGenerator : public CallGenerator {
twisti@3969 176 protected:
twisti@3969 177 InlineCallGenerator(ciMethod* method) : CallGenerator(method) {}
twisti@3969 178
twisti@3969 179 public:
duke@435 180 virtual bool is_inline() const { return true; }
duke@435 181 };
duke@435 182
duke@435 183
duke@435 184 //---------------------------WarmCallInfo--------------------------------------
duke@435 185 // A struct to collect information about a given call site.
duke@435 186 // Helps sort call sites into "hot", "medium", and "cold".
duke@435 187 // Participates in the queueing of "medium" call sites for possible inlining.
duke@435 188 class WarmCallInfo : public ResourceObj {
duke@435 189 private:
duke@435 190
duke@435 191 CallNode* _call; // The CallNode which may be inlined.
duke@435 192 CallGenerator* _hot_cg;// CG for expanding the call node
duke@435 193
duke@435 194 // These are the metrics we use to evaluate call sites:
duke@435 195
duke@435 196 float _count; // How often do we expect to reach this site?
duke@435 197 float _profit; // How much time do we expect to save by inlining?
duke@435 198 float _work; // How long do we expect the average call to take?
duke@435 199 float _size; // How big do we expect the inlined code to be?
duke@435 200
duke@435 201 float _heat; // Combined score inducing total order on call sites.
duke@435 202 WarmCallInfo* _next; // Next cooler call info in pending queue.
duke@435 203
duke@435 204 // Count is the number of times this call site is expected to be executed.
duke@435 205 // Large count is favorable for inlining, because the extra compilation
duke@435 206 // work will be amortized more completely.
duke@435 207
duke@435 208 // Profit is a rough measure of the amount of time we expect to save
duke@435 209 // per execution of this site if we inline it. (1.0 == call overhead)
duke@435 210 // Large profit favors inlining. Negative profit disables inlining.
duke@435 211
duke@435 212 // Work is a rough measure of the amount of time a typical out-of-line
duke@435 213 // call from this site is expected to take. (1.0 == call, no-op, return)
duke@435 214 // Small work is somewhat favorable for inlining, since methods with
duke@435 215 // short "hot" traces are more likely to inline smoothly.
duke@435 216
duke@435 217 // Size is the number of graph nodes we expect this method to produce,
duke@435 218 // not counting the inlining of any further warm calls it may include.
duke@435 219 // Small size favors inlining, since small methods are more likely to
duke@435 220 // inline smoothly. The size is estimated by examining the native code
duke@435 221 // if available. The method bytecodes are also examined, assuming
duke@435 222 // empirically observed node counts for each kind of bytecode.
duke@435 223
duke@435 224 // Heat is the combined "goodness" of a site's inlining. If we were
duke@435 225 // omniscient, it would be the difference of two sums of future execution
duke@435 226 // times of code emitted for this site (amortized across multiple sites if
duke@435 227 // sharing applies). The two sums are for versions of this call site with
duke@435 228 // and without inlining.
duke@435 229
duke@435 230 // We approximate this mythical quantity by playing with averages,
duke@435 231 // rough estimates, and assumptions that history repeats itself.
duke@435 232 // The basic formula count * profit is heuristically adjusted
duke@435 233 // by looking at the expected compilation and execution times of
duke@435 234 // of the inlined call.
duke@435 235
duke@435 236 // Note: Some of these metrics may not be present in the final product,
duke@435 237 // but exist in development builds to experiment with inline policy tuning.
duke@435 238
duke@435 239 // This heuristic framework does not model well the very significant
duke@435 240 // effects of multiple-level inlining. It is possible to see no immediate
duke@435 241 // profit from inlining X->Y, but to get great profit from a subsequent
duke@435 242 // inlining X->Y->Z.
duke@435 243
duke@435 244 // This framework does not take well into account the problem of N**2 code
duke@435 245 // size in a clique of mutually inlinable methods.
duke@435 246
duke@435 247 WarmCallInfo* next() const { return _next; }
duke@435 248 void set_next(WarmCallInfo* n) { _next = n; }
duke@435 249
never@2725 250 static WarmCallInfo _always_hot;
never@2725 251 static WarmCallInfo _always_cold;
never@2725 252
never@2725 253 // Constructor intitialization of always_hot and always_cold
never@2725 254 WarmCallInfo(float c, float p, float w, float s) {
never@2725 255 _call = NULL;
never@2725 256 _hot_cg = NULL;
never@2725 257 _next = NULL;
never@2725 258 _count = c;
never@2725 259 _profit = p;
never@2725 260 _work = w;
never@2725 261 _size = s;
never@2725 262 _heat = 0;
never@2725 263 }
duke@435 264
duke@435 265 public:
duke@435 266 // Because WarmInfo objects live over the entire lifetime of the
duke@435 267 // Compile object, they are allocated into the comp_arena, which
duke@435 268 // does not get resource marked or reset during the compile process
coleenp@5614 269 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
duke@435 270 void operator delete( void * ) { } // fast deallocation
duke@435 271
duke@435 272 static WarmCallInfo* always_hot();
duke@435 273 static WarmCallInfo* always_cold();
duke@435 274
duke@435 275 WarmCallInfo() {
duke@435 276 _call = NULL;
duke@435 277 _hot_cg = NULL;
duke@435 278 _next = NULL;
duke@435 279 _count = _profit = _work = _size = _heat = 0;
duke@435 280 }
duke@435 281
duke@435 282 CallNode* call() const { return _call; }
duke@435 283 float count() const { return _count; }
duke@435 284 float size() const { return _size; }
duke@435 285 float work() const { return _work; }
duke@435 286 float profit() const { return _profit; }
duke@435 287 float heat() const { return _heat; }
duke@435 288
duke@435 289 void set_count(float x) { _count = x; }
duke@435 290 void set_size(float x) { _size = x; }
duke@435 291 void set_work(float x) { _work = x; }
duke@435 292 void set_profit(float x) { _profit = x; }
duke@435 293 void set_heat(float x) { _heat = x; }
duke@435 294
duke@435 295 // Load initial heuristics from profiles, etc.
duke@435 296 // The heuristics can be tweaked further by the caller.
duke@435 297 void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor);
duke@435 298
duke@435 299 static float MAX_VALUE() { return +1.0e10; }
duke@435 300 static float MIN_VALUE() { return -1.0e10; }
duke@435 301
duke@435 302 float compute_heat() const;
duke@435 303
duke@435 304 void set_call(CallNode* call) { _call = call; }
duke@435 305 void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; }
duke@435 306
duke@435 307 // Do not queue very hot or very cold calls.
duke@435 308 // Make very cold ones out of line immediately.
duke@435 309 // Inline very hot ones immediately.
duke@435 310 // These queries apply various tunable limits
duke@435 311 // to the above metrics in a systematic way.
duke@435 312 // Test for coldness before testing for hotness.
duke@435 313 bool is_cold() const;
duke@435 314 bool is_hot() const;
duke@435 315
duke@435 316 // Force a warm call to be hot. This worklists the call node for inlining.
duke@435 317 void make_hot();
duke@435 318
duke@435 319 // Force a warm call to be cold. This worklists the call node for out-of-lining.
duke@435 320 void make_cold();
duke@435 321
duke@435 322 // A reproducible total ordering, in which heat is the major key.
duke@435 323 bool warmer_than(WarmCallInfo* that);
duke@435 324
duke@435 325 // List management. These methods are called with the list head,
duke@435 326 // and return the new list head, inserting or removing the receiver.
duke@435 327 WarmCallInfo* insert_into(WarmCallInfo* head);
duke@435 328 WarmCallInfo* remove_from(WarmCallInfo* head);
duke@435 329
duke@435 330 #ifndef PRODUCT
duke@435 331 void print() const;
duke@435 332 void print_all() const;
duke@435 333 int count_all() const;
duke@435 334 #endif
duke@435 335 };
stefank@2314 336
stefank@2314 337 #endif // SHARE_VM_OPTO_CALLGENERATOR_HPP

mercurial