Fri, 18 Oct 2013 10:50:17 +0200
8022783: Nashorn test fails with: assert(!def_outside->member(r))
Summary: Enables private copies of inputs for recent spill copies as well
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_CALLGENERATOR_HPP
26 #define SHARE_VM_OPTO_CALLGENERATOR_HPP
28 #include "compiler/compileBroker.hpp"
29 #include "opto/callnode.hpp"
30 #include "opto/compile.hpp"
31 #include "opto/type.hpp"
32 #include "runtime/deoptimization.hpp"
34 //---------------------------CallGenerator-------------------------------------
35 // The subclasses of this class handle generation of ideal nodes for
36 // call sites and method entry points.
38 class CallGenerator : public ResourceObj {
39 public:
40 enum {
41 xxxunusedxxx
42 };
44 private:
45 ciMethod* _method; // The method being called.
47 protected:
48 CallGenerator(ciMethod* method) : _method(method) {}
50 public:
51 // Accessors
52 ciMethod* method() const { return _method; }
54 // is_inline: At least some code implementing the method is copied here.
55 virtual bool is_inline() const { return false; }
56 // is_intrinsic: There's a method-specific way of generating the inline code.
57 virtual bool is_intrinsic() const { return false; }
58 // is_parse: Bytecodes implementing the specific method are copied here.
59 virtual bool is_parse() const { return false; }
60 // is_virtual: The call uses the receiver type to select or check the method.
61 virtual bool is_virtual() const { return false; }
62 // is_deferred: The decision whether to inline or not is deferred.
63 virtual bool is_deferred() const { return false; }
64 // is_predicted: Uses an explicit check against a predicted type.
65 virtual bool is_predicted() const { return false; }
66 // is_trap: Does not return to the caller. (E.g., uncommon trap.)
67 virtual bool is_trap() const { return false; }
68 // does_virtual_dispatch: Should try inlining as normal method first.
69 virtual bool does_virtual_dispatch() const { return false; }
71 // is_late_inline: supports conversion of call into an inline
72 virtual bool is_late_inline() const { return false; }
73 // same but for method handle calls
74 virtual bool is_mh_late_inline() const { return false; }
76 // for method handle calls: have we tried inlinining the call already?
77 virtual bool already_attempted() const { ShouldNotReachHere(); return false; }
79 // Replace the call with an inline version of the code
80 virtual void do_late_inline() { ShouldNotReachHere(); }
82 virtual CallStaticJavaNode* call_node() const { ShouldNotReachHere(); return NULL; }
84 // Note: It is possible for a CG to be both inline and virtual.
85 // (The hashCode intrinsic does a vtable check and an inlined fast path.)
87 // Utilities:
88 const TypeFunc* tf() const;
90 // The given jvms has state and arguments for a call to my method.
91 // Edges after jvms->argoff() carry all (pre-popped) argument values.
92 //
93 // Update the map with state and return values (if any) and return it.
94 // The return values (0, 1, or 2) must be pushed on the map's stack,
95 // and the sp of the jvms incremented accordingly.
96 //
97 // The jvms is returned on success. Alternatively, a copy of the
98 // given jvms, suitably updated, may be returned, in which case the
99 // caller should discard the original jvms.
100 //
101 // The non-Parm edges of the returned map will contain updated global state,
102 // and one or two edges before jvms->sp() will carry any return values.
103 // Other map edges may contain locals or monitors, and should not
104 // be changed in meaning.
105 //
106 // If the call traps, the returned map must have a control edge of top.
107 // If the call can throw, the returned map must report has_exceptions().
108 //
109 // If the result is NULL, it means that this CallGenerator was unable
110 // to handle the given call, and another CallGenerator should be consulted.
111 virtual JVMState* generate(JVMState* jvms) = 0;
113 // How to generate a call site that is inlined:
114 static CallGenerator* for_inline(ciMethod* m, float expected_uses = -1);
115 // How to generate code for an on-stack replacement handler.
116 static CallGenerator* for_osr(ciMethod* m, int osr_bci);
118 // How to generate vanilla out-of-line call sites:
119 static CallGenerator* for_direct_call(ciMethod* m, bool separate_io_projs = false); // static, special
120 static CallGenerator* for_virtual_call(ciMethod* m, int vtable_index); // virtual, interface
121 static CallGenerator* for_dynamic_call(ciMethod* m); // invokedynamic
123 static CallGenerator* for_method_handle_call( JVMState* jvms, ciMethod* caller, ciMethod* callee, bool delayed_forbidden);
124 static CallGenerator* for_method_handle_inline(JVMState* jvms, ciMethod* caller, ciMethod* callee, bool& input_not_const);
126 // How to generate a replace a direct call with an inline version
127 static CallGenerator* for_late_inline(ciMethod* m, CallGenerator* inline_cg);
128 static CallGenerator* for_mh_late_inline(ciMethod* caller, ciMethod* callee, bool input_not_const);
129 static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg);
130 static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
132 // How to make a call but defer the decision whether to inline or not.
133 static CallGenerator* for_warm_call(WarmCallInfo* ci,
134 CallGenerator* if_cold,
135 CallGenerator* if_hot);
137 // How to make a call that optimistically assumes a receiver type:
138 static CallGenerator* for_predicted_call(ciKlass* predicted_receiver,
139 CallGenerator* if_missed,
140 CallGenerator* if_hit,
141 float hit_prob);
143 // How to make a call that optimistically assumes a MethodHandle target:
144 static CallGenerator* for_predicted_dynamic_call(ciMethodHandle* predicted_method_handle,
145 CallGenerator* if_missed,
146 CallGenerator* if_hit,
147 float hit_prob);
149 // How to make a call that gives up and goes back to the interpreter:
150 static CallGenerator* for_uncommon_trap(ciMethod* m,
151 Deoptimization::DeoptReason reason,
152 Deoptimization::DeoptAction action);
154 // Registry for intrinsics:
155 static CallGenerator* for_intrinsic(ciMethod* m);
156 static void register_intrinsic(ciMethod* m, CallGenerator* cg);
157 static CallGenerator* for_predicted_intrinsic(CallGenerator* intrinsic,
158 CallGenerator* cg);
159 virtual Node* generate_predicate(JVMState* jvms) { return NULL; };
161 virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
163 static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
164 if (C->print_inlining()) {
165 C->print_inlining(callee, inline_level, bci, msg);
166 }
167 }
168 };
171 //------------------------InlineCallGenerator----------------------------------
172 class InlineCallGenerator : public CallGenerator {
173 protected:
174 InlineCallGenerator(ciMethod* method) : CallGenerator(method) {}
176 public:
177 virtual bool is_inline() const { return true; }
178 };
181 //---------------------------WarmCallInfo--------------------------------------
182 // A struct to collect information about a given call site.
183 // Helps sort call sites into "hot", "medium", and "cold".
184 // Participates in the queueing of "medium" call sites for possible inlining.
185 class WarmCallInfo : public ResourceObj {
186 private:
188 CallNode* _call; // The CallNode which may be inlined.
189 CallGenerator* _hot_cg;// CG for expanding the call node
191 // These are the metrics we use to evaluate call sites:
193 float _count; // How often do we expect to reach this site?
194 float _profit; // How much time do we expect to save by inlining?
195 float _work; // How long do we expect the average call to take?
196 float _size; // How big do we expect the inlined code to be?
198 float _heat; // Combined score inducing total order on call sites.
199 WarmCallInfo* _next; // Next cooler call info in pending queue.
201 // Count is the number of times this call site is expected to be executed.
202 // Large count is favorable for inlining, because the extra compilation
203 // work will be amortized more completely.
205 // Profit is a rough measure of the amount of time we expect to save
206 // per execution of this site if we inline it. (1.0 == call overhead)
207 // Large profit favors inlining. Negative profit disables inlining.
209 // Work is a rough measure of the amount of time a typical out-of-line
210 // call from this site is expected to take. (1.0 == call, no-op, return)
211 // Small work is somewhat favorable for inlining, since methods with
212 // short "hot" traces are more likely to inline smoothly.
214 // Size is the number of graph nodes we expect this method to produce,
215 // not counting the inlining of any further warm calls it may include.
216 // Small size favors inlining, since small methods are more likely to
217 // inline smoothly. The size is estimated by examining the native code
218 // if available. The method bytecodes are also examined, assuming
219 // empirically observed node counts for each kind of bytecode.
221 // Heat is the combined "goodness" of a site's inlining. If we were
222 // omniscient, it would be the difference of two sums of future execution
223 // times of code emitted for this site (amortized across multiple sites if
224 // sharing applies). The two sums are for versions of this call site with
225 // and without inlining.
227 // We approximate this mythical quantity by playing with averages,
228 // rough estimates, and assumptions that history repeats itself.
229 // The basic formula count * profit is heuristically adjusted
230 // by looking at the expected compilation and execution times of
231 // of the inlined call.
233 // Note: Some of these metrics may not be present in the final product,
234 // but exist in development builds to experiment with inline policy tuning.
236 // This heuristic framework does not model well the very significant
237 // effects of multiple-level inlining. It is possible to see no immediate
238 // profit from inlining X->Y, but to get great profit from a subsequent
239 // inlining X->Y->Z.
241 // This framework does not take well into account the problem of N**2 code
242 // size in a clique of mutually inlinable methods.
244 WarmCallInfo* next() const { return _next; }
245 void set_next(WarmCallInfo* n) { _next = n; }
247 static WarmCallInfo _always_hot;
248 static WarmCallInfo _always_cold;
250 // Constructor intitialization of always_hot and always_cold
251 WarmCallInfo(float c, float p, float w, float s) {
252 _call = NULL;
253 _hot_cg = NULL;
254 _next = NULL;
255 _count = c;
256 _profit = p;
257 _work = w;
258 _size = s;
259 _heat = 0;
260 }
262 public:
263 // Because WarmInfo objects live over the entire lifetime of the
264 // Compile object, they are allocated into the comp_arena, which
265 // does not get resource marked or reset during the compile process
266 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
267 void operator delete( void * ) { } // fast deallocation
269 static WarmCallInfo* always_hot();
270 static WarmCallInfo* always_cold();
272 WarmCallInfo() {
273 _call = NULL;
274 _hot_cg = NULL;
275 _next = NULL;
276 _count = _profit = _work = _size = _heat = 0;
277 }
279 CallNode* call() const { return _call; }
280 float count() const { return _count; }
281 float size() const { return _size; }
282 float work() const { return _work; }
283 float profit() const { return _profit; }
284 float heat() const { return _heat; }
286 void set_count(float x) { _count = x; }
287 void set_size(float x) { _size = x; }
288 void set_work(float x) { _work = x; }
289 void set_profit(float x) { _profit = x; }
290 void set_heat(float x) { _heat = x; }
292 // Load initial heuristics from profiles, etc.
293 // The heuristics can be tweaked further by the caller.
294 void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor);
296 static float MAX_VALUE() { return +1.0e10; }
297 static float MIN_VALUE() { return -1.0e10; }
299 float compute_heat() const;
301 void set_call(CallNode* call) { _call = call; }
302 void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; }
304 // Do not queue very hot or very cold calls.
305 // Make very cold ones out of line immediately.
306 // Inline very hot ones immediately.
307 // These queries apply various tunable limits
308 // to the above metrics in a systematic way.
309 // Test for coldness before testing for hotness.
310 bool is_cold() const;
311 bool is_hot() const;
313 // Force a warm call to be hot. This worklists the call node for inlining.
314 void make_hot();
316 // Force a warm call to be cold. This worklists the call node for out-of-lining.
317 void make_cold();
319 // A reproducible total ordering, in which heat is the major key.
320 bool warmer_than(WarmCallInfo* that);
322 // List management. These methods are called with the list head,
323 // and return the new list head, inserting or removing the receiver.
324 WarmCallInfo* insert_into(WarmCallInfo* head);
325 WarmCallInfo* remove_from(WarmCallInfo* head);
327 #ifndef PRODUCT
328 void print() const;
329 void print_all() const;
330 int count_all() const;
331 #endif
332 };
334 #endif // SHARE_VM_OPTO_CALLGENERATOR_HPP