|
1 /* |
|
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP |
|
26 #define SHARE_VM_OPTO_CALLNODE_HPP |
|
27 |
|
28 #include "opto/connode.hpp" |
|
29 #include "opto/mulnode.hpp" |
|
30 #include "opto/multnode.hpp" |
|
31 #include "opto/opcodes.hpp" |
|
32 #include "opto/phaseX.hpp" |
|
33 #include "opto/type.hpp" |
|
34 |
|
35 // Portions of code courtesy of Clifford Click |
|
36 |
|
37 // Optimization - Graph Style |
|
38 |
|
39 class Chaitin; |
|
40 class NamedCounter; |
|
41 class MultiNode; |
|
42 class SafePointNode; |
|
43 class CallNode; |
|
44 class CallJavaNode; |
|
45 class CallStaticJavaNode; |
|
46 class CallDynamicJavaNode; |
|
47 class CallRuntimeNode; |
|
48 class CallLeafNode; |
|
49 class CallLeafNoFPNode; |
|
50 class AllocateNode; |
|
51 class AllocateArrayNode; |
|
52 class BoxLockNode; |
|
53 class LockNode; |
|
54 class UnlockNode; |
|
55 class JVMState; |
|
56 class OopMap; |
|
57 class State; |
|
58 class StartNode; |
|
59 class MachCallNode; |
|
60 class FastLockNode; |
|
61 |
|
62 //------------------------------StartNode-------------------------------------- |
|
63 // The method start node |
|
64 class StartNode : public MultiNode { |
|
65 virtual uint cmp( const Node &n ) const; |
|
66 virtual uint size_of() const; // Size is bigger |
|
67 public: |
|
68 const TypeTuple *_domain; |
|
69 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) { |
|
70 init_class_id(Class_Start); |
|
71 init_req(0,this); |
|
72 init_req(1,root); |
|
73 } |
|
74 virtual int Opcode() const; |
|
75 virtual bool pinned() const { return true; }; |
|
76 virtual const Type *bottom_type() const; |
|
77 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } |
|
78 virtual const Type *Value( PhaseTransform *phase ) const; |
|
79 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
80 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const; |
|
81 virtual const RegMask &in_RegMask(uint) const; |
|
82 virtual Node *match( const ProjNode *proj, const Matcher *m ); |
|
83 virtual uint ideal_reg() const { return 0; } |
|
84 #ifndef PRODUCT |
|
85 virtual void dump_spec(outputStream *st) const; |
|
86 #endif |
|
87 }; |
|
88 |
|
89 //------------------------------StartOSRNode----------------------------------- |
|
90 // The method start node for on stack replacement code |
|
91 class StartOSRNode : public StartNode { |
|
92 public: |
|
93 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {} |
|
94 virtual int Opcode() const; |
|
95 static const TypeTuple *osr_domain(); |
|
96 }; |
|
97 |
|
98 |
|
99 //------------------------------ParmNode--------------------------------------- |
|
100 // Incoming parameters |
|
101 class ParmNode : public ProjNode { |
|
102 static const char * const names[TypeFunc::Parms+1]; |
|
103 public: |
|
104 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) { |
|
105 init_class_id(Class_Parm); |
|
106 } |
|
107 virtual int Opcode() const; |
|
108 virtual bool is_CFG() const { return (_con == TypeFunc::Control); } |
|
109 virtual uint ideal_reg() const; |
|
110 #ifndef PRODUCT |
|
111 virtual void dump_spec(outputStream *st) const; |
|
112 #endif |
|
113 }; |
|
114 |
|
115 |
|
116 //------------------------------ReturnNode------------------------------------- |
|
117 // Return from subroutine node |
|
118 class ReturnNode : public Node { |
|
119 public: |
|
120 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr ); |
|
121 virtual int Opcode() const; |
|
122 virtual bool is_CFG() const { return true; } |
|
123 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash |
|
124 virtual bool depends_only_on_test() const { return false; } |
|
125 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
126 virtual const Type *Value( PhaseTransform *phase ) const; |
|
127 virtual uint ideal_reg() const { return NotAMachineReg; } |
|
128 virtual uint match_edge(uint idx) const; |
|
129 #ifndef PRODUCT |
|
130 virtual void dump_req(outputStream *st = tty) const; |
|
131 #endif |
|
132 }; |
|
133 |
|
134 |
|
135 //------------------------------RethrowNode------------------------------------ |
|
136 // Rethrow of exception at call site. Ends a procedure before rethrowing; |
|
137 // ends the current basic block like a ReturnNode. Restores registers and |
|
138 // unwinds stack. Rethrow happens in the caller's method. |
|
139 class RethrowNode : public Node { |
|
140 public: |
|
141 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception ); |
|
142 virtual int Opcode() const; |
|
143 virtual bool is_CFG() const { return true; } |
|
144 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash |
|
145 virtual bool depends_only_on_test() const { return false; } |
|
146 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
147 virtual const Type *Value( PhaseTransform *phase ) const; |
|
148 virtual uint match_edge(uint idx) const; |
|
149 virtual uint ideal_reg() const { return NotAMachineReg; } |
|
150 #ifndef PRODUCT |
|
151 virtual void dump_req(outputStream *st = tty) const; |
|
152 #endif |
|
153 }; |
|
154 |
|
155 |
|
156 //------------------------------TailCallNode----------------------------------- |
|
157 // Pop stack frame and jump indirect |
|
158 class TailCallNode : public ReturnNode { |
|
159 public: |
|
160 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop ) |
|
161 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) { |
|
162 init_req(TypeFunc::Parms, target); |
|
163 init_req(TypeFunc::Parms+1, moop); |
|
164 } |
|
165 |
|
166 virtual int Opcode() const; |
|
167 virtual uint match_edge(uint idx) const; |
|
168 }; |
|
169 |
|
170 //------------------------------TailJumpNode----------------------------------- |
|
171 // Pop stack frame and jump indirect |
|
172 class TailJumpNode : public ReturnNode { |
|
173 public: |
|
174 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop) |
|
175 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) { |
|
176 init_req(TypeFunc::Parms, target); |
|
177 init_req(TypeFunc::Parms+1, ex_oop); |
|
178 } |
|
179 |
|
180 virtual int Opcode() const; |
|
181 virtual uint match_edge(uint idx) const; |
|
182 }; |
|
183 |
|
184 //-------------------------------JVMState------------------------------------- |
|
185 // A linked list of JVMState nodes captures the whole interpreter state, |
|
186 // plus GC roots, for all active calls at some call site in this compilation |
|
187 // unit. (If there is no inlining, then the list has exactly one link.) |
|
188 // This provides a way to map the optimized program back into the interpreter, |
|
189 // or to let the GC mark the stack. |
|
190 class JVMState : public ResourceObj { |
|
191 friend class VMStructs; |
|
192 public: |
|
193 typedef enum { |
|
194 Reexecute_Undefined = -1, // not defined -- will be translated into false later |
|
195 Reexecute_False = 0, // false -- do not reexecute |
|
196 Reexecute_True = 1 // true -- reexecute the bytecode |
|
197 } ReexecuteState; //Reexecute State |
|
198 |
|
199 private: |
|
200 JVMState* _caller; // List pointer for forming scope chains |
|
201 uint _depth; // One more than caller depth, or one. |
|
202 uint _locoff; // Offset to locals in input edge mapping |
|
203 uint _stkoff; // Offset to stack in input edge mapping |
|
204 uint _monoff; // Offset to monitors in input edge mapping |
|
205 uint _scloff; // Offset to fields of scalar objs in input edge mapping |
|
206 uint _endoff; // Offset to end of input edge mapping |
|
207 uint _sp; // Jave Expression Stack Pointer for this state |
|
208 int _bci; // Byte Code Index of this JVM point |
|
209 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed |
|
210 ciMethod* _method; // Method Pointer |
|
211 SafePointNode* _map; // Map node associated with this scope |
|
212 public: |
|
213 friend class Compile; |
|
214 friend class PreserveReexecuteState; |
|
215 |
|
216 // Because JVMState objects live over the entire lifetime of the |
|
217 // Compile object, they are allocated into the comp_arena, which |
|
218 // does not get resource marked or reset during the compile process |
|
219 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); } |
|
220 void operator delete( void * ) { } // fast deallocation |
|
221 |
|
222 // Create a new JVMState, ready for abstract interpretation. |
|
223 JVMState(ciMethod* method, JVMState* caller); |
|
224 JVMState(int stack_size); // root state; has a null method |
|
225 |
|
226 // Access functions for the JVM |
|
227 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---| |
|
228 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff |
|
229 uint locoff() const { return _locoff; } |
|
230 uint stkoff() const { return _stkoff; } |
|
231 uint argoff() const { return _stkoff + _sp; } |
|
232 uint monoff() const { return _monoff; } |
|
233 uint scloff() const { return _scloff; } |
|
234 uint endoff() const { return _endoff; } |
|
235 uint oopoff() const { return debug_end(); } |
|
236 |
|
237 int loc_size() const { return stkoff() - locoff(); } |
|
238 int stk_size() const { return monoff() - stkoff(); } |
|
239 int mon_size() const { return scloff() - monoff(); } |
|
240 int scl_size() const { return endoff() - scloff(); } |
|
241 |
|
242 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); } |
|
243 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); } |
|
244 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); } |
|
245 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); } |
|
246 |
|
247 uint sp() const { return _sp; } |
|
248 int bci() const { return _bci; } |
|
249 bool should_reexecute() const { return _reexecute==Reexecute_True; } |
|
250 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; } |
|
251 bool has_method() const { return _method != NULL; } |
|
252 ciMethod* method() const { assert(has_method(), ""); return _method; } |
|
253 JVMState* caller() const { return _caller; } |
|
254 SafePointNode* map() const { return _map; } |
|
255 uint depth() const { return _depth; } |
|
256 uint debug_start() const; // returns locoff of root caller |
|
257 uint debug_end() const; // returns endoff of self |
|
258 uint debug_size() const { |
|
259 return loc_size() + sp() + mon_size() + scl_size(); |
|
260 } |
|
261 uint debug_depth() const; // returns sum of debug_size values at all depths |
|
262 |
|
263 // Returns the JVM state at the desired depth (1 == root). |
|
264 JVMState* of_depth(int d) const; |
|
265 |
|
266 // Tells if two JVM states have the same call chain (depth, methods, & bcis). |
|
267 bool same_calls_as(const JVMState* that) const; |
|
268 |
|
269 // Monitors (monitors are stored as (boxNode, objNode) pairs |
|
270 enum { logMonitorEdges = 1 }; |
|
271 int nof_monitors() const { return mon_size() >> logMonitorEdges; } |
|
272 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); } |
|
273 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; } |
|
274 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; } |
|
275 bool is_monitor_box(uint off) const { |
|
276 assert(is_mon(off), "should be called only for monitor edge"); |
|
277 return (0 == bitfield(off - monoff(), 0, logMonitorEdges)); |
|
278 } |
|
279 bool is_monitor_use(uint off) const { return (is_mon(off) |
|
280 && is_monitor_box(off)) |
|
281 || (caller() && caller()->is_monitor_use(off)); } |
|
282 |
|
283 // Initialization functions for the JVM |
|
284 void set_locoff(uint off) { _locoff = off; } |
|
285 void set_stkoff(uint off) { _stkoff = off; } |
|
286 void set_monoff(uint off) { _monoff = off; } |
|
287 void set_scloff(uint off) { _scloff = off; } |
|
288 void set_endoff(uint off) { _endoff = off; } |
|
289 void set_offsets(uint off) { |
|
290 _locoff = _stkoff = _monoff = _scloff = _endoff = off; |
|
291 } |
|
292 void set_map(SafePointNode *map) { _map = map; } |
|
293 void set_sp(uint sp) { _sp = sp; } |
|
294 // _reexecute is initialized to "undefined" for a new bci |
|
295 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; } |
|
296 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;} |
|
297 |
|
298 // Miscellaneous utility functions |
|
299 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain |
|
300 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller |
|
301 void set_map_deep(SafePointNode *map);// reset map for all callers |
|
302 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge. |
|
303 int interpreter_frame_size() const; |
|
304 |
|
305 #ifndef PRODUCT |
|
306 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const; |
|
307 void dump_spec(outputStream *st) const; |
|
308 void dump_on(outputStream* st) const; |
|
309 void dump() const { |
|
310 dump_on(tty); |
|
311 } |
|
312 #endif |
|
313 }; |
|
314 |
|
315 //------------------------------SafePointNode---------------------------------- |
|
316 // A SafePointNode is a subclass of a MultiNode for convenience (and |
|
317 // potential code sharing) only - conceptually it is independent of |
|
318 // the Node semantics. |
|
319 class SafePointNode : public MultiNode { |
|
320 virtual uint cmp( const Node &n ) const; |
|
321 virtual uint size_of() const; // Size is bigger |
|
322 |
|
323 public: |
|
324 SafePointNode(uint edges, JVMState* jvms, |
|
325 // A plain safepoint advertises no memory effects (NULL): |
|
326 const TypePtr* adr_type = NULL) |
|
327 : MultiNode( edges ), |
|
328 _jvms(jvms), |
|
329 _oop_map(NULL), |
|
330 _adr_type(adr_type) |
|
331 { |
|
332 init_class_id(Class_SafePoint); |
|
333 } |
|
334 |
|
335 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC |
|
336 JVMState* const _jvms; // Pointer to list of JVM State objects |
|
337 const TypePtr* _adr_type; // What type of memory does this node produce? |
|
338 |
|
339 // Many calls take *all* of memory as input, |
|
340 // but some produce a limited subset of that memory as output. |
|
341 // The adr_type reports the call's behavior as a store, not a load. |
|
342 |
|
343 virtual JVMState* jvms() const { return _jvms; } |
|
344 void set_jvms(JVMState* s) { |
|
345 *(JVMState**)&_jvms = s; // override const attribute in the accessor |
|
346 } |
|
347 OopMap *oop_map() const { return _oop_map; } |
|
348 void set_oop_map(OopMap *om) { _oop_map = om; } |
|
349 |
|
350 private: |
|
351 void verify_input(JVMState* jvms, uint idx) const { |
|
352 assert(verify_jvms(jvms), "jvms must match"); |
|
353 Node* n = in(idx); |
|
354 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) || |
|
355 in(idx + 1)->is_top(), "2nd half of long/double"); |
|
356 } |
|
357 |
|
358 public: |
|
359 // Functionality from old debug nodes which has changed |
|
360 Node *local(JVMState* jvms, uint idx) const { |
|
361 verify_input(jvms, jvms->locoff() + idx); |
|
362 return in(jvms->locoff() + idx); |
|
363 } |
|
364 Node *stack(JVMState* jvms, uint idx) const { |
|
365 verify_input(jvms, jvms->stkoff() + idx); |
|
366 return in(jvms->stkoff() + idx); |
|
367 } |
|
368 Node *argument(JVMState* jvms, uint idx) const { |
|
369 verify_input(jvms, jvms->argoff() + idx); |
|
370 return in(jvms->argoff() + idx); |
|
371 } |
|
372 Node *monitor_box(JVMState* jvms, uint idx) const { |
|
373 assert(verify_jvms(jvms), "jvms must match"); |
|
374 return in(jvms->monitor_box_offset(idx)); |
|
375 } |
|
376 Node *monitor_obj(JVMState* jvms, uint idx) const { |
|
377 assert(verify_jvms(jvms), "jvms must match"); |
|
378 return in(jvms->monitor_obj_offset(idx)); |
|
379 } |
|
380 |
|
381 void set_local(JVMState* jvms, uint idx, Node *c); |
|
382 |
|
383 void set_stack(JVMState* jvms, uint idx, Node *c) { |
|
384 assert(verify_jvms(jvms), "jvms must match"); |
|
385 set_req(jvms->stkoff() + idx, c); |
|
386 } |
|
387 void set_argument(JVMState* jvms, uint idx, Node *c) { |
|
388 assert(verify_jvms(jvms), "jvms must match"); |
|
389 set_req(jvms->argoff() + idx, c); |
|
390 } |
|
391 void ensure_stack(JVMState* jvms, uint stk_size) { |
|
392 assert(verify_jvms(jvms), "jvms must match"); |
|
393 int grow_by = (int)stk_size - (int)jvms->stk_size(); |
|
394 if (grow_by > 0) grow_stack(jvms, grow_by); |
|
395 } |
|
396 void grow_stack(JVMState* jvms, uint grow_by); |
|
397 // Handle monitor stack |
|
398 void push_monitor( const FastLockNode *lock ); |
|
399 void pop_monitor (); |
|
400 Node *peek_monitor_box() const; |
|
401 Node *peek_monitor_obj() const; |
|
402 |
|
403 // Access functions for the JVM |
|
404 Node *control () const { return in(TypeFunc::Control ); } |
|
405 Node *i_o () const { return in(TypeFunc::I_O ); } |
|
406 Node *memory () const { return in(TypeFunc::Memory ); } |
|
407 Node *returnadr() const { return in(TypeFunc::ReturnAdr); } |
|
408 Node *frameptr () const { return in(TypeFunc::FramePtr ); } |
|
409 |
|
410 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); } |
|
411 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); } |
|
412 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); } |
|
413 |
|
414 MergeMemNode* merged_memory() const { |
|
415 return in(TypeFunc::Memory)->as_MergeMem(); |
|
416 } |
|
417 |
|
418 // The parser marks useless maps as dead when it's done with them: |
|
419 bool is_killed() { return in(TypeFunc::Control) == NULL; } |
|
420 |
|
421 // Exception states bubbling out of subgraphs such as inlined calls |
|
422 // are recorded here. (There might be more than one, hence the "next".) |
|
423 // This feature is used only for safepoints which serve as "maps" |
|
424 // for JVM states during parsing, intrinsic expansion, etc. |
|
425 SafePointNode* next_exception() const; |
|
426 void set_next_exception(SafePointNode* n); |
|
427 bool has_exceptions() const { return next_exception() != NULL; } |
|
428 |
|
429 // Standard Node stuff |
|
430 virtual int Opcode() const; |
|
431 virtual bool pinned() const { return true; } |
|
432 virtual const Type *Value( PhaseTransform *phase ) const; |
|
433 virtual const Type *bottom_type() const { return Type::CONTROL; } |
|
434 virtual const TypePtr *adr_type() const { return _adr_type; } |
|
435 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
436 virtual Node *Identity( PhaseTransform *phase ); |
|
437 virtual uint ideal_reg() const { return 0; } |
|
438 virtual const RegMask &in_RegMask(uint) const; |
|
439 virtual const RegMask &out_RegMask() const; |
|
440 virtual uint match_edge(uint idx) const; |
|
441 |
|
442 static bool needs_polling_address_input(); |
|
443 |
|
444 #ifndef PRODUCT |
|
445 virtual void dump_spec(outputStream *st) const; |
|
446 #endif |
|
447 }; |
|
448 |
|
449 //------------------------------SafePointScalarObjectNode---------------------- |
|
450 // A SafePointScalarObjectNode represents the state of a scalarized object |
|
451 // at a safepoint. |
|
452 |
|
453 class SafePointScalarObjectNode: public TypeNode { |
|
454 uint _first_index; // First input edge relative index of a SafePoint node where |
|
455 // states of the scalarized object fields are collected. |
|
456 // It is relative to the last (youngest) jvms->_scloff. |
|
457 uint _n_fields; // Number of non-static fields of the scalarized object. |
|
458 DEBUG_ONLY(AllocateNode* _alloc;) |
|
459 |
|
460 virtual uint hash() const ; // { return NO_HASH; } |
|
461 virtual uint cmp( const Node &n ) const; |
|
462 |
|
463 uint first_index() const { return _first_index; } |
|
464 |
|
465 public: |
|
466 SafePointScalarObjectNode(const TypeOopPtr* tp, |
|
467 #ifdef ASSERT |
|
468 AllocateNode* alloc, |
|
469 #endif |
|
470 uint first_index, uint n_fields); |
|
471 virtual int Opcode() const; |
|
472 virtual uint ideal_reg() const; |
|
473 virtual const RegMask &in_RegMask(uint) const; |
|
474 virtual const RegMask &out_RegMask() const; |
|
475 virtual uint match_edge(uint idx) const; |
|
476 |
|
477 uint first_index(JVMState* jvms) const { |
|
478 assert(jvms != NULL, "missed JVMS"); |
|
479 return jvms->scloff() + _first_index; |
|
480 } |
|
481 uint n_fields() const { return _n_fields; } |
|
482 |
|
483 #ifdef ASSERT |
|
484 AllocateNode* alloc() const { return _alloc; } |
|
485 #endif |
|
486 |
|
487 virtual uint size_of() const { return sizeof(*this); } |
|
488 |
|
489 // Assumes that "this" is an argument to a safepoint node "s", and that |
|
490 // "new_call" is being created to correspond to "s". But the difference |
|
491 // between the start index of the jvmstates of "new_call" and "s" is |
|
492 // "jvms_adj". Produce and return a SafePointScalarObjectNode that |
|
493 // corresponds appropriately to "this" in "new_call". Assumes that |
|
494 // "sosn_map" is a map, specific to the translation of "s" to "new_call", |
|
495 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies. |
|
496 SafePointScalarObjectNode* clone(Dict* sosn_map) const; |
|
497 |
|
498 #ifndef PRODUCT |
|
499 virtual void dump_spec(outputStream *st) const; |
|
500 #endif |
|
501 }; |
|
502 |
|
503 |
|
504 // Simple container for the outgoing projections of a call. Useful |
|
505 // for serious surgery on calls. |
|
506 class CallProjections : public StackObj { |
|
507 public: |
|
508 Node* fallthrough_proj; |
|
509 Node* fallthrough_catchproj; |
|
510 Node* fallthrough_memproj; |
|
511 Node* fallthrough_ioproj; |
|
512 Node* catchall_catchproj; |
|
513 Node* catchall_memproj; |
|
514 Node* catchall_ioproj; |
|
515 Node* resproj; |
|
516 Node* exobj; |
|
517 }; |
|
518 |
|
519 class CallGenerator; |
|
520 |
|
521 //------------------------------CallNode--------------------------------------- |
|
522 // Call nodes now subsume the function of debug nodes at callsites, so they |
|
523 // contain the functionality of a full scope chain of debug nodes. |
|
524 class CallNode : public SafePointNode { |
|
525 friend class VMStructs; |
|
526 public: |
|
527 const TypeFunc *_tf; // Function type |
|
528 address _entry_point; // Address of method being called |
|
529 float _cnt; // Estimate of number of times called |
|
530 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls |
|
531 |
|
532 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type) |
|
533 : SafePointNode(tf->domain()->cnt(), NULL, adr_type), |
|
534 _tf(tf), |
|
535 _entry_point(addr), |
|
536 _cnt(COUNT_UNKNOWN), |
|
537 _generator(NULL) |
|
538 { |
|
539 init_class_id(Class_Call); |
|
540 } |
|
541 |
|
542 const TypeFunc* tf() const { return _tf; } |
|
543 const address entry_point() const { return _entry_point; } |
|
544 const float cnt() const { return _cnt; } |
|
545 CallGenerator* generator() const { return _generator; } |
|
546 |
|
547 void set_tf(const TypeFunc* tf) { _tf = tf; } |
|
548 void set_entry_point(address p) { _entry_point = p; } |
|
549 void set_cnt(float c) { _cnt = c; } |
|
550 void set_generator(CallGenerator* cg) { _generator = cg; } |
|
551 |
|
552 virtual const Type *bottom_type() const; |
|
553 virtual const Type *Value( PhaseTransform *phase ) const; |
|
554 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
555 virtual Node *Identity( PhaseTransform *phase ) { return this; } |
|
556 virtual uint cmp( const Node &n ) const; |
|
557 virtual uint size_of() const = 0; |
|
558 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; |
|
559 virtual Node *match( const ProjNode *proj, const Matcher *m ); |
|
560 virtual uint ideal_reg() const { return NotAMachineReg; } |
|
561 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and |
|
562 // for some macro nodes whose expansion does not have a safepoint on the fast path. |
|
563 virtual bool guaranteed_safepoint() { return true; } |
|
564 // For macro nodes, the JVMState gets modified during expansion. If calls |
|
565 // use MachConstantBase, it gets modified during matching. So when cloning |
|
566 // the node the JVMState must be cloned. Default is not to clone. |
|
567 virtual void clone_jvms(Compile* C) { |
|
568 if (C->needs_clone_jvms() && jvms() != NULL) { |
|
569 set_jvms(jvms()->clone_deep(C)); |
|
570 jvms()->set_map_deep(this); |
|
571 } |
|
572 } |
|
573 |
|
574 // Returns true if the call may modify n |
|
575 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase); |
|
576 // Does this node have a use of n other than in debug information? |
|
577 bool has_non_debug_use(Node *n); |
|
578 // Returns the unique CheckCastPP of a call |
|
579 // or result projection is there are several CheckCastPP |
|
580 // or returns NULL if there is no one. |
|
581 Node *result_cast(); |
|
582 // Does this node returns pointer? |
|
583 bool returns_pointer() const { |
|
584 const TypeTuple *r = tf()->range(); |
|
585 return (r->cnt() > TypeFunc::Parms && |
|
586 r->field_at(TypeFunc::Parms)->isa_ptr()); |
|
587 } |
|
588 |
|
589 // Collect all the interesting edges from a call for use in |
|
590 // replacing the call by something else. Used by macro expansion |
|
591 // and the late inlining support. |
|
592 void extract_projections(CallProjections* projs, bool separate_io_proj); |
|
593 |
|
594 virtual uint match_edge(uint idx) const; |
|
595 |
|
596 #ifndef PRODUCT |
|
597 virtual void dump_req(outputStream *st = tty) const; |
|
598 virtual void dump_spec(outputStream *st) const; |
|
599 #endif |
|
600 }; |
|
601 |
|
602 |
|
603 //------------------------------CallJavaNode----------------------------------- |
|
604 // Make a static or dynamic subroutine call node using Java calling |
|
605 // convention. (The "Java" calling convention is the compiler's calling |
|
606 // convention, as opposed to the interpreter's or that of native C.) |
|
607 class CallJavaNode : public CallNode { |
|
608 friend class VMStructs; |
|
609 protected: |
|
610 virtual uint cmp( const Node &n ) const; |
|
611 virtual uint size_of() const; // Size is bigger |
|
612 |
|
613 bool _optimized_virtual; |
|
614 bool _method_handle_invoke; |
|
615 ciMethod* _method; // Method being direct called |
|
616 public: |
|
617 const int _bci; // Byte Code Index of call byte code |
|
618 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci) |
|
619 : CallNode(tf, addr, TypePtr::BOTTOM), |
|
620 _method(method), _bci(bci), |
|
621 _optimized_virtual(false), |
|
622 _method_handle_invoke(false) |
|
623 { |
|
624 init_class_id(Class_CallJava); |
|
625 } |
|
626 |
|
627 virtual int Opcode() const; |
|
628 ciMethod* method() const { return _method; } |
|
629 void set_method(ciMethod *m) { _method = m; } |
|
630 void set_optimized_virtual(bool f) { _optimized_virtual = f; } |
|
631 bool is_optimized_virtual() const { return _optimized_virtual; } |
|
632 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; } |
|
633 bool is_method_handle_invoke() const { return _method_handle_invoke; } |
|
634 |
|
635 #ifndef PRODUCT |
|
636 virtual void dump_spec(outputStream *st) const; |
|
637 #endif |
|
638 }; |
|
639 |
|
640 //------------------------------CallStaticJavaNode----------------------------- |
|
641 // Make a direct subroutine call using Java calling convention (for static |
|
642 // calls and optimized virtual calls, plus calls to wrappers for run-time |
|
643 // routines); generates static stub. |
|
644 class CallStaticJavaNode : public CallJavaNode { |
|
645 virtual uint cmp( const Node &n ) const; |
|
646 virtual uint size_of() const; // Size is bigger |
|
647 public: |
|
648 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci) |
|
649 : CallJavaNode(tf, addr, method, bci), _name(NULL) { |
|
650 init_class_id(Class_CallStaticJava); |
|
651 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) { |
|
652 init_flags(Flag_is_macro); |
|
653 C->add_macro_node(this); |
|
654 } |
|
655 _is_scalar_replaceable = false; |
|
656 _is_non_escaping = false; |
|
657 } |
|
658 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci, |
|
659 const TypePtr* adr_type) |
|
660 : CallJavaNode(tf, addr, NULL, bci), _name(name) { |
|
661 init_class_id(Class_CallStaticJava); |
|
662 // This node calls a runtime stub, which often has narrow memory effects. |
|
663 _adr_type = adr_type; |
|
664 _is_scalar_replaceable = false; |
|
665 _is_non_escaping = false; |
|
666 } |
|
667 const char *_name; // Runtime wrapper name |
|
668 |
|
669 // Result of Escape Analysis |
|
670 bool _is_scalar_replaceable; |
|
671 bool _is_non_escaping; |
|
672 |
|
673 // If this is an uncommon trap, return the request code, else zero. |
|
674 int uncommon_trap_request() const; |
|
675 static int extract_uncommon_trap_request(const Node* call); |
|
676 |
|
677 bool is_boxing_method() const { |
|
678 return is_macro() && (method() != NULL) && method()->is_boxing_method(); |
|
679 } |
|
680 // Later inlining modifies the JVMState, so we need to clone it |
|
681 // when the call node is cloned (because it is macro node). |
|
682 virtual void clone_jvms(Compile* C) { |
|
683 if ((jvms() != NULL) && is_boxing_method()) { |
|
684 set_jvms(jvms()->clone_deep(C)); |
|
685 jvms()->set_map_deep(this); |
|
686 } |
|
687 } |
|
688 |
|
689 virtual int Opcode() const; |
|
690 #ifndef PRODUCT |
|
691 virtual void dump_spec(outputStream *st) const; |
|
692 #endif |
|
693 }; |
|
694 |
|
695 //------------------------------CallDynamicJavaNode---------------------------- |
|
696 // Make a dispatched call using Java calling convention. |
|
697 class CallDynamicJavaNode : public CallJavaNode { |
|
698 virtual uint cmp( const Node &n ) const; |
|
699 virtual uint size_of() const; // Size is bigger |
|
700 public: |
|
701 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) { |
|
702 init_class_id(Class_CallDynamicJava); |
|
703 } |
|
704 |
|
705 int _vtable_index; |
|
706 virtual int Opcode() const; |
|
707 #ifndef PRODUCT |
|
708 virtual void dump_spec(outputStream *st) const; |
|
709 #endif |
|
710 }; |
|
711 |
|
712 //------------------------------CallRuntimeNode-------------------------------- |
|
713 // Make a direct subroutine call node into compiled C++ code. |
|
714 class CallRuntimeNode : public CallNode { |
|
715 virtual uint cmp( const Node &n ) const; |
|
716 virtual uint size_of() const; // Size is bigger |
|
717 public: |
|
718 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name, |
|
719 const TypePtr* adr_type) |
|
720 : CallNode(tf, addr, adr_type), |
|
721 _name(name) |
|
722 { |
|
723 init_class_id(Class_CallRuntime); |
|
724 } |
|
725 |
|
726 const char *_name; // Printable name, if _method is NULL |
|
727 virtual int Opcode() const; |
|
728 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const; |
|
729 |
|
730 #ifndef PRODUCT |
|
731 virtual void dump_spec(outputStream *st) const; |
|
732 #endif |
|
733 }; |
|
734 |
|
735 //------------------------------CallLeafNode----------------------------------- |
|
736 // Make a direct subroutine call node into compiled C++ code, without |
|
737 // safepoints |
|
738 class CallLeafNode : public CallRuntimeNode { |
|
739 public: |
|
740 CallLeafNode(const TypeFunc* tf, address addr, const char* name, |
|
741 const TypePtr* adr_type) |
|
742 : CallRuntimeNode(tf, addr, name, adr_type) |
|
743 { |
|
744 init_class_id(Class_CallLeaf); |
|
745 } |
|
746 virtual int Opcode() const; |
|
747 virtual bool guaranteed_safepoint() { return false; } |
|
748 #ifndef PRODUCT |
|
749 virtual void dump_spec(outputStream *st) const; |
|
750 #endif |
|
751 }; |
|
752 |
|
753 //------------------------------CallLeafNoFPNode------------------------------- |
|
754 // CallLeafNode, not using floating point or using it in the same manner as |
|
755 // the generated code |
|
756 class CallLeafNoFPNode : public CallLeafNode { |
|
757 public: |
|
758 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name, |
|
759 const TypePtr* adr_type) |
|
760 : CallLeafNode(tf, addr, name, adr_type) |
|
761 { |
|
762 } |
|
763 virtual int Opcode() const; |
|
764 }; |
|
765 |
|
766 |
|
767 //------------------------------Allocate--------------------------------------- |
|
768 // High-level memory allocation |
|
769 // |
|
770 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will |
|
771 // get expanded into a code sequence containing a call. Unlike other CallNodes, |
|
772 // they have 2 memory projections and 2 i_o projections (which are distinguished by |
|
773 // the _is_io_use flag in the projection.) This is needed when expanding the node in |
|
774 // order to differentiate the uses of the projection on the normal control path from |
|
775 // those on the exception return path. |
|
776 // |
|
777 class AllocateNode : public CallNode { |
|
778 public: |
|
779 enum { |
|
780 // Output: |
|
781 RawAddress = TypeFunc::Parms, // the newly-allocated raw address |
|
782 // Inputs: |
|
783 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object |
|
784 KlassNode, // type (maybe dynamic) of the obj. |
|
785 InitialTest, // slow-path test (may be constant) |
|
786 ALength, // array length (or TOP if none) |
|
787 ParmLimit |
|
788 }; |
|
789 |
|
790 static const TypeFunc* alloc_type(const Type* t) { |
|
791 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms); |
|
792 fields[AllocSize] = TypeInt::POS; |
|
793 fields[KlassNode] = TypeInstPtr::NOTNULL; |
|
794 fields[InitialTest] = TypeInt::BOOL; |
|
795 fields[ALength] = t; // length (can be a bad length) |
|
796 |
|
797 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields); |
|
798 |
|
799 // create result type (range) |
|
800 fields = TypeTuple::fields(1); |
|
801 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop |
|
802 |
|
803 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields); |
|
804 |
|
805 return TypeFunc::make(domain, range); |
|
806 } |
|
807 |
|
808 // Result of Escape Analysis |
|
809 bool _is_scalar_replaceable; |
|
810 bool _is_non_escaping; |
|
811 |
|
812 virtual uint size_of() const; // Size is bigger |
|
813 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, |
|
814 Node *size, Node *klass_node, Node *initial_test); |
|
815 // Expansion modifies the JVMState, so we need to clone it |
|
816 virtual void clone_jvms(Compile* C) { |
|
817 if (jvms() != NULL) { |
|
818 set_jvms(jvms()->clone_deep(C)); |
|
819 jvms()->set_map_deep(this); |
|
820 } |
|
821 } |
|
822 virtual int Opcode() const; |
|
823 virtual uint ideal_reg() const { return Op_RegP; } |
|
824 virtual bool guaranteed_safepoint() { return false; } |
|
825 |
|
826 // allocations do not modify their arguments |
|
827 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;} |
|
828 |
|
829 // Pattern-match a possible usage of AllocateNode. |
|
830 // Return null if no allocation is recognized. |
|
831 // The operand is the pointer produced by the (possible) allocation. |
|
832 // It must be a projection of the Allocate or its subsequent CastPP. |
|
833 // (Note: This function is defined in file graphKit.cpp, near |
|
834 // GraphKit::new_instance/new_array, whose output it recognizes.) |
|
835 // The 'ptr' may not have an offset unless the 'offset' argument is given. |
|
836 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase); |
|
837 |
|
838 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip |
|
839 // an offset, which is reported back to the caller. |
|
840 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.) |
|
841 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase, |
|
842 intptr_t& offset); |
|
843 |
|
844 // Dig the klass operand out of a (possible) allocation site. |
|
845 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) { |
|
846 AllocateNode* allo = Ideal_allocation(ptr, phase); |
|
847 return (allo == NULL) ? NULL : allo->in(KlassNode); |
|
848 } |
|
849 |
|
850 // Conservatively small estimate of offset of first non-header byte. |
|
851 int minimum_header_size() { |
|
852 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : |
|
853 instanceOopDesc::base_offset_in_bytes(); |
|
854 } |
|
855 |
|
856 // Return the corresponding initialization barrier (or null if none). |
|
857 // Walks out edges to find it... |
|
858 // (Note: Both InitializeNode::allocation and AllocateNode::initialization |
|
859 // are defined in graphKit.cpp, which sets up the bidirectional relation.) |
|
860 InitializeNode* initialization(); |
|
861 |
|
862 // Convenience for initialization->maybe_set_complete(phase) |
|
863 bool maybe_set_complete(PhaseGVN* phase); |
|
864 }; |
|
865 |
|
866 //------------------------------AllocateArray--------------------------------- |
|
867 // |
|
868 // High-level array allocation |
|
869 // |
|
870 class AllocateArrayNode : public AllocateNode { |
|
871 public: |
|
872 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, |
|
873 Node* size, Node* klass_node, Node* initial_test, |
|
874 Node* count_val |
|
875 ) |
|
876 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node, |
|
877 initial_test) |
|
878 { |
|
879 init_class_id(Class_AllocateArray); |
|
880 set_req(AllocateNode::ALength, count_val); |
|
881 } |
|
882 virtual int Opcode() const; |
|
883 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
884 |
|
885 // Dig the length operand out of a array allocation site. |
|
886 Node* Ideal_length() { |
|
887 return in(AllocateNode::ALength); |
|
888 } |
|
889 |
|
890 // Dig the length operand out of a array allocation site and narrow the |
|
891 // type with a CastII, if necesssary |
|
892 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true); |
|
893 |
|
894 // Pattern-match a possible usage of AllocateArrayNode. |
|
895 // Return null if no allocation is recognized. |
|
896 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) { |
|
897 AllocateNode* allo = Ideal_allocation(ptr, phase); |
|
898 return (allo == NULL || !allo->is_AllocateArray()) |
|
899 ? NULL : allo->as_AllocateArray(); |
|
900 } |
|
901 }; |
|
902 |
|
903 //------------------------------AbstractLockNode----------------------------------- |
|
904 class AbstractLockNode: public CallNode { |
|
905 private: |
|
906 enum { |
|
907 Regular = 0, // Normal lock |
|
908 NonEscObj, // Lock is used for non escaping object |
|
909 Coarsened, // Lock was coarsened |
|
910 Nested // Nested lock |
|
911 } _kind; |
|
912 #ifndef PRODUCT |
|
913 NamedCounter* _counter; |
|
914 #endif |
|
915 |
|
916 protected: |
|
917 // helper functions for lock elimination |
|
918 // |
|
919 |
|
920 bool find_matching_unlock(const Node* ctrl, LockNode* lock, |
|
921 GrowableArray<AbstractLockNode*> &lock_ops); |
|
922 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock, |
|
923 GrowableArray<AbstractLockNode*> &lock_ops); |
|
924 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock, |
|
925 GrowableArray<AbstractLockNode*> &lock_ops); |
|
926 LockNode *find_matching_lock(UnlockNode* unlock); |
|
927 |
|
928 // Update the counter to indicate that this lock was eliminated. |
|
929 void set_eliminated_lock_counter() PRODUCT_RETURN; |
|
930 |
|
931 public: |
|
932 AbstractLockNode(const TypeFunc *tf) |
|
933 : CallNode(tf, NULL, TypeRawPtr::BOTTOM), |
|
934 _kind(Regular) |
|
935 { |
|
936 #ifndef PRODUCT |
|
937 _counter = NULL; |
|
938 #endif |
|
939 } |
|
940 virtual int Opcode() const = 0; |
|
941 Node * obj_node() const {return in(TypeFunc::Parms + 0); } |
|
942 Node * box_node() const {return in(TypeFunc::Parms + 1); } |
|
943 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); } |
|
944 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); } |
|
945 |
|
946 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;} |
|
947 |
|
948 virtual uint size_of() const { return sizeof(*this); } |
|
949 |
|
950 bool is_eliminated() const { return (_kind != Regular); } |
|
951 bool is_non_esc_obj() const { return (_kind == NonEscObj); } |
|
952 bool is_coarsened() const { return (_kind == Coarsened); } |
|
953 bool is_nested() const { return (_kind == Nested); } |
|
954 |
|
955 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); } |
|
956 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); } |
|
957 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); } |
|
958 |
|
959 // locking does not modify its arguments |
|
960 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;} |
|
961 |
|
962 #ifndef PRODUCT |
|
963 void create_lock_counter(JVMState* s); |
|
964 NamedCounter* counter() const { return _counter; } |
|
965 #endif |
|
966 }; |
|
967 |
|
968 //------------------------------Lock--------------------------------------- |
|
969 // High-level lock operation |
|
970 // |
|
971 // This is a subclass of CallNode because it is a macro node which gets expanded |
|
972 // into a code sequence containing a call. This node takes 3 "parameters": |
|
973 // 0 - object to lock |
|
974 // 1 - a BoxLockNode |
|
975 // 2 - a FastLockNode |
|
976 // |
|
977 class LockNode : public AbstractLockNode { |
|
978 public: |
|
979 |
|
980 static const TypeFunc *lock_type() { |
|
981 // create input type (domain) |
|
982 const Type **fields = TypeTuple::fields(3); |
|
983 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked |
|
984 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock |
|
985 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock |
|
986 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields); |
|
987 |
|
988 // create result type (range) |
|
989 fields = TypeTuple::fields(0); |
|
990 |
|
991 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields); |
|
992 |
|
993 return TypeFunc::make(domain,range); |
|
994 } |
|
995 |
|
996 virtual int Opcode() const; |
|
997 virtual uint size_of() const; // Size is bigger |
|
998 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { |
|
999 init_class_id(Class_Lock); |
|
1000 init_flags(Flag_is_macro); |
|
1001 C->add_macro_node(this); |
|
1002 } |
|
1003 virtual bool guaranteed_safepoint() { return false; } |
|
1004 |
|
1005 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
1006 // Expansion modifies the JVMState, so we need to clone it |
|
1007 virtual void clone_jvms(Compile* C) { |
|
1008 if (jvms() != NULL) { |
|
1009 set_jvms(jvms()->clone_deep(C)); |
|
1010 jvms()->set_map_deep(this); |
|
1011 } |
|
1012 } |
|
1013 |
|
1014 bool is_nested_lock_region(); // Is this Lock nested? |
|
1015 }; |
|
1016 |
|
1017 //------------------------------Unlock--------------------------------------- |
|
1018 // High-level unlock operation |
|
1019 class UnlockNode : public AbstractLockNode { |
|
1020 public: |
|
1021 virtual int Opcode() const; |
|
1022 virtual uint size_of() const; // Size is bigger |
|
1023 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) { |
|
1024 init_class_id(Class_Unlock); |
|
1025 init_flags(Flag_is_macro); |
|
1026 C->add_macro_node(this); |
|
1027 } |
|
1028 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
|
1029 // unlock is never a safepoint |
|
1030 virtual bool guaranteed_safepoint() { return false; } |
|
1031 }; |
|
1032 |
|
1033 #endif // SHARE_VM_OPTO_CALLNODE_HPP |