src/share/vm/code/compiledIC.hpp

changeset 0
f90c822e73f8
child 1
2d8a650513c2
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_CODE_COMPILEDIC_HPP
26 #define SHARE_VM_CODE_COMPILEDIC_HPP
27
28 #include "interpreter/linkResolver.hpp"
29 #include "oops/compiledICHolder.hpp"
30 #ifdef TARGET_ARCH_x86
31 # include "nativeInst_x86.hpp"
32 #endif
33 #ifdef TARGET_ARCH_sparc
34 # include "nativeInst_sparc.hpp"
35 #endif
36 #ifdef TARGET_ARCH_zero
37 # include "nativeInst_zero.hpp"
38 #endif
39 #ifdef TARGET_ARCH_arm
40 # include "nativeInst_arm.hpp"
41 #endif
42 #ifdef TARGET_ARCH_ppc
43 # include "nativeInst_ppc.hpp"
44 #endif
45
46 //-----------------------------------------------------------------------------
47 // The CompiledIC represents a compiled inline cache.
48 //
49 // In order to make patching of the inline cache MT-safe, we only allow the following
50 // transitions (when not at a safepoint):
51 //
52 //
53 // [1] --<-- Clean -->--- [1]
54 // / (null) \
55 // / \ /-<-\
56 // / [2] \ / \
57 // Interpreted ---------> Monomorphic | [3]
58 // (CompiledICHolder*) (Klass*) |
59 // \ / \ /
60 // [4] \ / [4] \->-/
61 // \->- Megamorphic -<-/
62 // (Method*)
63 //
64 // The text in paranteses () refere to the value of the inline cache receiver (mov instruction)
65 //
66 // The numbers in square brackets refere to the kind of transition:
67 // [1]: Initial fixup. Receiver it found from debug information
68 // [2]: Compilation of a method
69 // [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
70 // [4]: Inline cache miss. We go directly to megamorphic call.
71 //
72 // The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
73 // transition is made to a stub.
74 //
75 class CompiledIC;
76 class ICStub;
77
78 class CompiledICInfo : public StackObj {
79 private:
80 address _entry; // entry point for call
81 void* _cached_value; // Value of cached_value (either in stub or inline cache)
82 bool _is_icholder; // Is the cached value a CompiledICHolder*
83 bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
84 bool _to_interpreter; // Call it to interpreter
85 bool _release_icholder;
86 public:
87 address entry() const { return _entry; }
88 Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
89 CompiledICHolder* claim_cached_icholder() {
90 assert(_is_icholder, "");
91 assert(_cached_value != NULL, "must be non-NULL");
92 _release_icholder = false;
93 CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
94 icholder->claim();
95 return icholder;
96 }
97 bool is_optimized() const { return _is_optimized; }
98 bool to_interpreter() const { return _to_interpreter; }
99
100 void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
101 _entry = entry;
102 _cached_value = (void*)klass;
103 _to_interpreter = false;
104 _is_icholder = false;
105 _is_optimized = is_optimized;
106 _release_icholder = false;
107 }
108
109 void set_interpreter_entry(address entry, Method* method) {
110 _entry = entry;
111 _cached_value = (void*)method;
112 _to_interpreter = true;
113 _is_icholder = false;
114 _is_optimized = true;
115 _release_icholder = false;
116 }
117
118 void set_icholder_entry(address entry, CompiledICHolder* icholder) {
119 _entry = entry;
120 _cached_value = (void*)icholder;
121 _to_interpreter = true;
122 _is_icholder = true;
123 _is_optimized = false;
124 _release_icholder = true;
125 }
126
127 CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false),
128 _to_interpreter(false), _is_optimized(false), _release_icholder(false) {
129 }
130 ~CompiledICInfo() {
131 // In rare cases the info is computed but not used, so release any
132 // CompiledICHolder* that was created
133 if (_release_icholder) {
134 assert(_is_icholder, "must be");
135 CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
136 icholder->claim();
137 delete icholder;
138 }
139 }
140 };
141
142 class CompiledIC: public ResourceObj {
143 friend class InlineCacheBuffer;
144 friend class ICStub;
145
146
147 private:
148 NativeCall* _ic_call; // the call instruction
149 NativeMovConstReg* _value; // patchable value cell for this IC
150 bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
151
152 CompiledIC(nmethod* nm, NativeCall* ic_call);
153
154 static bool is_icholder_entry(address entry);
155
156 // low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
157 // to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
158 // changes to a transition stub.
159 void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
160 void set_ic_destination(ICStub* stub);
161 void set_ic_destination(address entry_point) {
162 assert(_is_optimized, "use set_ic_destination_and_value instead");
163 internal_set_ic_destination(entry_point, false, NULL, false);
164 }
165 // This only for use by ICStubs where the type of the value isn't known
166 void set_ic_destination_and_value(address entry_point, void* value) {
167 internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
168 }
169 void set_ic_destination_and_value(address entry_point, Metadata* value) {
170 internal_set_ic_destination(entry_point, false, value, false);
171 }
172 void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
173 internal_set_ic_destination(entry_point, false, value, true);
174 }
175
176 // Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
177 // associated with the inline cache.
178 address stub_address() const;
179 bool is_in_transition_state() const; // Use InlineCacheBuffer
180
181 public:
182 // conversion (machine PC to CompiledIC*)
183 friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
184 friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
185 friend CompiledIC* CompiledIC_at(Relocation* call_site);
186
187 // This is used to release CompiledICHolder*s from nmethods that
188 // are about to be freed. The callsite might contain other stale
189 // values of other kinds so it must be careful.
190 static void cleanup_call_site(virtual_call_Relocation* call_site);
191 static bool is_icholder_call_site(virtual_call_Relocation* call_site);
192
193 // Return the cached_metadata/destination associated with this inline cache. If the cache currently points
194 // to a transition stub, it will read the values from the transition stub.
195 void* cached_value() const;
196 CompiledICHolder* cached_icholder() const {
197 assert(is_icholder_call(), "must be");
198 return (CompiledICHolder*) cached_value();
199 }
200 Metadata* cached_metadata() const {
201 assert(!is_icholder_call(), "must be");
202 return (Metadata*) cached_value();
203 }
204
205 address ic_destination() const;
206
207 bool is_optimized() const { return _is_optimized; }
208
209 // State
210 bool is_clean() const;
211 bool is_megamorphic() const;
212 bool is_call_to_compiled() const;
213 bool is_call_to_interpreted() const;
214
215 bool is_icholder_call() const;
216
217 address end_of_call() { return _ic_call->return_address(); }
218
219 // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
220 // so you are guaranteed that no patching takes place. The same goes for verify.
221 //
222 // Note: We do not provide any direct access to the stub code, to prevent parts of the code
223 // to manipulate the inline cache in MT-unsafe ways.
224 //
225 // They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
226 //
227 void set_to_clean(); // Can only be called during a safepoint operation
228 void set_to_monomorphic(CompiledICInfo& info);
229
230 // Returns true if successful and false otherwise. The call can fail if memory
231 // allocation in the code cache fails.
232 bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
233
234 static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
235 bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
236
237 // Location
238 address instruction_address() const { return _ic_call->instruction_address(); }
239
240 // Misc
241 void print() PRODUCT_RETURN;
242 void print_compiled_ic() PRODUCT_RETURN;
243 void verify() PRODUCT_RETURN;
244 };
245
246 inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
247 CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
248 c_ic->verify();
249 return c_ic;
250 }
251
252 inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
253 CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
254 c_ic->verify();
255 return c_ic;
256 }
257
258 inline CompiledIC* CompiledIC_at(Relocation* call_site) {
259 assert(call_site->type() == relocInfo::virtual_call_type ||
260 call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
261 CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
262 c_ic->verify();
263 return c_ic;
264 }
265
266
267 //-----------------------------------------------------------------------------
268 // The CompiledStaticCall represents a call to a static method in the compiled
269 //
270 // Transition diagram of a static call site is somewhat simpler than for an inlined cache:
271 //
272 //
273 // -----<----- Clean ----->-----
274 // / \
275 // / \
276 // compilled code <------------> interpreted code
277 //
278 // Clean: Calls directly to runtime method for fixup
279 // Compiled code: Calls directly to compiled code
280 // Interpreted code: Calls to stub that set Method* reference
281 //
282 //
283 class CompiledStaticCall;
284
285 class StaticCallInfo {
286 private:
287 address _entry; // Entrypoint
288 methodHandle _callee; // Callee (used when calling interpreter)
289 bool _to_interpreter; // call to interpreted method (otherwise compiled)
290
291 friend class CompiledStaticCall;
292 public:
293 address entry() const { return _entry; }
294 methodHandle callee() const { return _callee; }
295 };
296
297
298 class CompiledStaticCall: public NativeCall {
299 friend class CompiledIC;
300
301 // Also used by CompiledIC
302 void set_to_interpreted(methodHandle callee, address entry);
303 bool is_optimized_virtual();
304
305 public:
306 friend CompiledStaticCall* compiledStaticCall_before(address return_addr);
307 friend CompiledStaticCall* compiledStaticCall_at(address native_call);
308 friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site);
309
310 // Code
311 static void emit_to_interp_stub(CodeBuffer &cbuf);
312 static int to_interp_stub_size();
313 static int reloc_to_interp_stub();
314
315 // State
316 bool is_clean() const;
317 bool is_call_to_compiled() const;
318 bool is_call_to_interpreted() const;
319
320 // Clean static call (will force resolving on next use)
321 void set_to_clean();
322
323 // Set state. The entry must be the same, as computed by compute_entry.
324 // Computation and setting is split up, since the actions are separate during
325 // a OptoRuntime::resolve_xxx.
326 void set(const StaticCallInfo& info);
327
328 // Compute entry point given a method
329 static void compute_entry(methodHandle m, StaticCallInfo& info);
330
331 // Stub support
332 address find_stub();
333 static void set_stub_to_clean(static_stub_Relocation* static_stub);
334
335 // Misc.
336 void print() PRODUCT_RETURN;
337 void verify() PRODUCT_RETURN;
338 };
339
340
341 inline CompiledStaticCall* compiledStaticCall_before(address return_addr) {
342 CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr);
343 st->verify();
344 return st;
345 }
346
347 inline CompiledStaticCall* compiledStaticCall_at(address native_call) {
348 CompiledStaticCall* st = (CompiledStaticCall*)native_call;
349 st->verify();
350 return st;
351 }
352
353 inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) {
354 return compiledStaticCall_at(call_site->addr());
355 }
356
357 #endif // SHARE_VM_CODE_COMPILEDIC_HPP

mercurial