Sat, 07 Jan 2012 13:26:43 -0800
7125896: Eliminate nested locks
Summary: Nested locks elimination done before lock nodes expansion by looking for outer locks of the same object.
Reviewed-by: never, twisti
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP
26 #define SHARE_VM_OPTO_CALLNODE_HPP
28 #include "opto/connode.hpp"
29 #include "opto/mulnode.hpp"
30 #include "opto/multnode.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/type.hpp"
35 // Portions of code courtesy of Clifford Click
37 // Optimization - Graph Style
39 class Chaitin;
40 class NamedCounter;
41 class MultiNode;
42 class SafePointNode;
43 class CallNode;
44 class CallJavaNode;
45 class CallStaticJavaNode;
46 class CallDynamicJavaNode;
47 class CallRuntimeNode;
48 class CallLeafNode;
49 class CallLeafNoFPNode;
50 class AllocateNode;
51 class AllocateArrayNode;
52 class LockNode;
53 class UnlockNode;
54 class JVMState;
55 class OopMap;
56 class State;
57 class StartNode;
58 class MachCallNode;
59 class FastLockNode;
61 //------------------------------StartNode--------------------------------------
62 // The method start node
63 class StartNode : public MultiNode {
64 virtual uint cmp( const Node &n ) const;
65 virtual uint size_of() const; // Size is bigger
66 public:
67 const TypeTuple *_domain;
68 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
69 init_class_id(Class_Start);
70 init_req(0,this);
71 init_req(1,root);
72 }
73 virtual int Opcode() const;
74 virtual bool pinned() const { return true; };
75 virtual const Type *bottom_type() const;
76 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
77 virtual const Type *Value( PhaseTransform *phase ) const;
78 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
79 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
80 virtual const RegMask &in_RegMask(uint) const;
81 virtual Node *match( const ProjNode *proj, const Matcher *m );
82 virtual uint ideal_reg() const { return 0; }
83 #ifndef PRODUCT
84 virtual void dump_spec(outputStream *st) const;
85 #endif
86 };
88 //------------------------------StartOSRNode-----------------------------------
89 // The method start node for on stack replacement code
90 class StartOSRNode : public StartNode {
91 public:
92 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
93 virtual int Opcode() const;
94 static const TypeTuple *osr_domain();
95 };
98 //------------------------------ParmNode---------------------------------------
99 // Incoming parameters
100 class ParmNode : public ProjNode {
101 static const char * const names[TypeFunc::Parms+1];
102 public:
103 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
104 init_class_id(Class_Parm);
105 }
106 virtual int Opcode() const;
107 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
108 virtual uint ideal_reg() const;
109 #ifndef PRODUCT
110 virtual void dump_spec(outputStream *st) const;
111 #endif
112 };
115 //------------------------------ReturnNode-------------------------------------
116 // Return from subroutine node
117 class ReturnNode : public Node {
118 public:
119 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
120 virtual int Opcode() const;
121 virtual bool is_CFG() const { return true; }
122 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
123 virtual bool depends_only_on_test() const { return false; }
124 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
125 virtual const Type *Value( PhaseTransform *phase ) const;
126 virtual uint ideal_reg() const { return NotAMachineReg; }
127 virtual uint match_edge(uint idx) const;
128 #ifndef PRODUCT
129 virtual void dump_req() const;
130 #endif
131 };
134 //------------------------------RethrowNode------------------------------------
135 // Rethrow of exception at call site. Ends a procedure before rethrowing;
136 // ends the current basic block like a ReturnNode. Restores registers and
137 // unwinds stack. Rethrow happens in the caller's method.
138 class RethrowNode : public Node {
139 public:
140 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
141 virtual int Opcode() const;
142 virtual bool is_CFG() const { return true; }
143 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
144 virtual bool depends_only_on_test() const { return false; }
145 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
146 virtual const Type *Value( PhaseTransform *phase ) const;
147 virtual uint match_edge(uint idx) const;
148 virtual uint ideal_reg() const { return NotAMachineReg; }
149 #ifndef PRODUCT
150 virtual void dump_req() const;
151 #endif
152 };
155 //------------------------------TailCallNode-----------------------------------
156 // Pop stack frame and jump indirect
157 class TailCallNode : public ReturnNode {
158 public:
159 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
160 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
161 init_req(TypeFunc::Parms, target);
162 init_req(TypeFunc::Parms+1, moop);
163 }
165 virtual int Opcode() const;
166 virtual uint match_edge(uint idx) const;
167 };
169 //------------------------------TailJumpNode-----------------------------------
170 // Pop stack frame and jump indirect
171 class TailJumpNode : public ReturnNode {
172 public:
173 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
174 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
175 init_req(TypeFunc::Parms, target);
176 init_req(TypeFunc::Parms+1, ex_oop);
177 }
179 virtual int Opcode() const;
180 virtual uint match_edge(uint idx) const;
181 };
183 //-------------------------------JVMState-------------------------------------
184 // A linked list of JVMState nodes captures the whole interpreter state,
185 // plus GC roots, for all active calls at some call site in this compilation
186 // unit. (If there is no inlining, then the list has exactly one link.)
187 // This provides a way to map the optimized program back into the interpreter,
188 // or to let the GC mark the stack.
189 class JVMState : public ResourceObj {
190 friend class VMStructs;
191 public:
192 typedef enum {
193 Reexecute_Undefined = -1, // not defined -- will be translated into false later
194 Reexecute_False = 0, // false -- do not reexecute
195 Reexecute_True = 1 // true -- reexecute the bytecode
196 } ReexecuteState; //Reexecute State
198 private:
199 JVMState* _caller; // List pointer for forming scope chains
200 uint _depth; // One mroe than caller depth, or one.
201 uint _locoff; // Offset to locals in input edge mapping
202 uint _stkoff; // Offset to stack in input edge mapping
203 uint _monoff; // Offset to monitors in input edge mapping
204 uint _scloff; // Offset to fields of scalar objs in input edge mapping
205 uint _endoff; // Offset to end of input edge mapping
206 uint _sp; // Jave Expression Stack Pointer for this state
207 int _bci; // Byte Code Index of this JVM point
208 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
209 ciMethod* _method; // Method Pointer
210 SafePointNode* _map; // Map node associated with this scope
211 public:
212 friend class Compile;
213 friend class PreserveReexecuteState;
215 // Because JVMState objects live over the entire lifetime of the
216 // Compile object, they are allocated into the comp_arena, which
217 // does not get resource marked or reset during the compile process
218 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
219 void operator delete( void * ) { } // fast deallocation
221 // Create a new JVMState, ready for abstract interpretation.
222 JVMState(ciMethod* method, JVMState* caller);
223 JVMState(int stack_size); // root state; has a null method
225 // Access functions for the JVM
226 uint locoff() const { return _locoff; }
227 uint stkoff() const { return _stkoff; }
228 uint argoff() const { return _stkoff + _sp; }
229 uint monoff() const { return _monoff; }
230 uint scloff() const { return _scloff; }
231 uint endoff() const { return _endoff; }
232 uint oopoff() const { return debug_end(); }
234 int loc_size() const { return _stkoff - _locoff; }
235 int stk_size() const { return _monoff - _stkoff; }
236 int mon_size() const { return _scloff - _monoff; }
237 int scl_size() const { return _endoff - _scloff; }
239 bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; }
240 bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; }
241 bool is_mon(uint i) const { return i >= _monoff && i < _scloff; }
242 bool is_scl(uint i) const { return i >= _scloff && i < _endoff; }
244 uint sp() const { return _sp; }
245 int bci() const { return _bci; }
246 bool should_reexecute() const { return _reexecute==Reexecute_True; }
247 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
248 bool has_method() const { return _method != NULL; }
249 ciMethod* method() const { assert(has_method(), ""); return _method; }
250 JVMState* caller() const { return _caller; }
251 SafePointNode* map() const { return _map; }
252 uint depth() const { return _depth; }
253 uint debug_start() const; // returns locoff of root caller
254 uint debug_end() const; // returns endoff of self
255 uint debug_size() const {
256 return loc_size() + sp() + mon_size() + scl_size();
257 }
258 uint debug_depth() const; // returns sum of debug_size values at all depths
260 // Returns the JVM state at the desired depth (1 == root).
261 JVMState* of_depth(int d) const;
263 // Tells if two JVM states have the same call chain (depth, methods, & bcis).
264 bool same_calls_as(const JVMState* that) const;
266 // Monitors (monitors are stored as (boxNode, objNode) pairs
267 enum { logMonitorEdges = 1 };
268 int nof_monitors() const { return mon_size() >> logMonitorEdges; }
269 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
270 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
271 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
272 bool is_monitor_box(uint off) const {
273 assert(is_mon(off), "should be called only for monitor edge");
274 return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
275 }
276 bool is_monitor_use(uint off) const { return (is_mon(off)
277 && is_monitor_box(off))
278 || (caller() && caller()->is_monitor_use(off)); }
280 // Initialization functions for the JVM
281 void set_locoff(uint off) { _locoff = off; }
282 void set_stkoff(uint off) { _stkoff = off; }
283 void set_monoff(uint off) { _monoff = off; }
284 void set_scloff(uint off) { _scloff = off; }
285 void set_endoff(uint off) { _endoff = off; }
286 void set_offsets(uint off) {
287 _locoff = _stkoff = _monoff = _scloff = _endoff = off;
288 }
289 void set_map(SafePointNode *map) { _map = map; }
290 void set_sp(uint sp) { _sp = sp; }
291 // _reexecute is initialized to "undefined" for a new bci
292 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
293 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
295 // Miscellaneous utility functions
296 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
297 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
299 #ifndef PRODUCT
300 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
301 void dump_spec(outputStream *st) const;
302 void dump_on(outputStream* st) const;
303 void dump() const {
304 dump_on(tty);
305 }
306 #endif
307 };
309 //------------------------------SafePointNode----------------------------------
310 // A SafePointNode is a subclass of a MultiNode for convenience (and
311 // potential code sharing) only - conceptually it is independent of
312 // the Node semantics.
313 class SafePointNode : public MultiNode {
314 virtual uint cmp( const Node &n ) const;
315 virtual uint size_of() const; // Size is bigger
317 public:
318 SafePointNode(uint edges, JVMState* jvms,
319 // A plain safepoint advertises no memory effects (NULL):
320 const TypePtr* adr_type = NULL)
321 : MultiNode( edges ),
322 _jvms(jvms),
323 _oop_map(NULL),
324 _adr_type(adr_type)
325 {
326 init_class_id(Class_SafePoint);
327 }
329 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC
330 JVMState* const _jvms; // Pointer to list of JVM State objects
331 const TypePtr* _adr_type; // What type of memory does this node produce?
333 // Many calls take *all* of memory as input,
334 // but some produce a limited subset of that memory as output.
335 // The adr_type reports the call's behavior as a store, not a load.
337 virtual JVMState* jvms() const { return _jvms; }
338 void set_jvms(JVMState* s) {
339 *(JVMState**)&_jvms = s; // override const attribute in the accessor
340 }
341 OopMap *oop_map() const { return _oop_map; }
342 void set_oop_map(OopMap *om) { _oop_map = om; }
344 // Functionality from old debug nodes which has changed
345 Node *local(JVMState* jvms, uint idx) const {
346 assert(verify_jvms(jvms), "jvms must match");
347 return in(jvms->locoff() + idx);
348 }
349 Node *stack(JVMState* jvms, uint idx) const {
350 assert(verify_jvms(jvms), "jvms must match");
351 return in(jvms->stkoff() + idx);
352 }
353 Node *argument(JVMState* jvms, uint idx) const {
354 assert(verify_jvms(jvms), "jvms must match");
355 return in(jvms->argoff() + idx);
356 }
357 Node *monitor_box(JVMState* jvms, uint idx) const {
358 assert(verify_jvms(jvms), "jvms must match");
359 return in(jvms->monitor_box_offset(idx));
360 }
361 Node *monitor_obj(JVMState* jvms, uint idx) const {
362 assert(verify_jvms(jvms), "jvms must match");
363 return in(jvms->monitor_obj_offset(idx));
364 }
366 void set_local(JVMState* jvms, uint idx, Node *c);
368 void set_stack(JVMState* jvms, uint idx, Node *c) {
369 assert(verify_jvms(jvms), "jvms must match");
370 set_req(jvms->stkoff() + idx, c);
371 }
372 void set_argument(JVMState* jvms, uint idx, Node *c) {
373 assert(verify_jvms(jvms), "jvms must match");
374 set_req(jvms->argoff() + idx, c);
375 }
376 void ensure_stack(JVMState* jvms, uint stk_size) {
377 assert(verify_jvms(jvms), "jvms must match");
378 int grow_by = (int)stk_size - (int)jvms->stk_size();
379 if (grow_by > 0) grow_stack(jvms, grow_by);
380 }
381 void grow_stack(JVMState* jvms, uint grow_by);
382 // Handle monitor stack
383 void push_monitor( const FastLockNode *lock );
384 void pop_monitor ();
385 Node *peek_monitor_box() const;
386 Node *peek_monitor_obj() const;
388 // Access functions for the JVM
389 Node *control () const { return in(TypeFunc::Control ); }
390 Node *i_o () const { return in(TypeFunc::I_O ); }
391 Node *memory () const { return in(TypeFunc::Memory ); }
392 Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
393 Node *frameptr () const { return in(TypeFunc::FramePtr ); }
395 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
396 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
397 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
399 MergeMemNode* merged_memory() const {
400 return in(TypeFunc::Memory)->as_MergeMem();
401 }
403 // The parser marks useless maps as dead when it's done with them:
404 bool is_killed() { return in(TypeFunc::Control) == NULL; }
406 // Exception states bubbling out of subgraphs such as inlined calls
407 // are recorded here. (There might be more than one, hence the "next".)
408 // This feature is used only for safepoints which serve as "maps"
409 // for JVM states during parsing, intrinsic expansion, etc.
410 SafePointNode* next_exception() const;
411 void set_next_exception(SafePointNode* n);
412 bool has_exceptions() const { return next_exception() != NULL; }
414 // Standard Node stuff
415 virtual int Opcode() const;
416 virtual bool pinned() const { return true; }
417 virtual const Type *Value( PhaseTransform *phase ) const;
418 virtual const Type *bottom_type() const { return Type::CONTROL; }
419 virtual const TypePtr *adr_type() const { return _adr_type; }
420 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
421 virtual Node *Identity( PhaseTransform *phase );
422 virtual uint ideal_reg() const { return 0; }
423 virtual const RegMask &in_RegMask(uint) const;
424 virtual const RegMask &out_RegMask() const;
425 virtual uint match_edge(uint idx) const;
427 static bool needs_polling_address_input();
429 #ifndef PRODUCT
430 virtual void dump_spec(outputStream *st) const;
431 #endif
432 };
434 //------------------------------SafePointScalarObjectNode----------------------
435 // A SafePointScalarObjectNode represents the state of a scalarized object
436 // at a safepoint.
438 class SafePointScalarObjectNode: public TypeNode {
439 uint _first_index; // First input edge index of a SafePoint node where
440 // states of the scalarized object fields are collected.
441 uint _n_fields; // Number of non-static fields of the scalarized object.
442 DEBUG_ONLY(AllocateNode* _alloc;)
444 virtual uint hash() const ; // { return NO_HASH; }
445 virtual uint cmp( const Node &n ) const;
447 public:
448 SafePointScalarObjectNode(const TypeOopPtr* tp,
449 #ifdef ASSERT
450 AllocateNode* alloc,
451 #endif
452 uint first_index, uint n_fields);
453 virtual int Opcode() const;
454 virtual uint ideal_reg() const;
455 virtual const RegMask &in_RegMask(uint) const;
456 virtual const RegMask &out_RegMask() const;
457 virtual uint match_edge(uint idx) const;
459 uint first_index() const { return _first_index; }
460 uint n_fields() const { return _n_fields; }
462 #ifdef ASSERT
463 AllocateNode* alloc() const { return _alloc; }
464 #endif
466 virtual uint size_of() const { return sizeof(*this); }
468 // Assumes that "this" is an argument to a safepoint node "s", and that
469 // "new_call" is being created to correspond to "s". But the difference
470 // between the start index of the jvmstates of "new_call" and "s" is
471 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
472 // corresponds appropriately to "this" in "new_call". Assumes that
473 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
474 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
475 SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
477 #ifndef PRODUCT
478 virtual void dump_spec(outputStream *st) const;
479 #endif
480 };
483 // Simple container for the outgoing projections of a call. Useful
484 // for serious surgery on calls.
485 class CallProjections : public StackObj {
486 public:
487 Node* fallthrough_proj;
488 Node* fallthrough_catchproj;
489 Node* fallthrough_memproj;
490 Node* fallthrough_ioproj;
491 Node* catchall_catchproj;
492 Node* catchall_memproj;
493 Node* catchall_ioproj;
494 Node* resproj;
495 Node* exobj;
496 };
499 //------------------------------CallNode---------------------------------------
500 // Call nodes now subsume the function of debug nodes at callsites, so they
501 // contain the functionality of a full scope chain of debug nodes.
502 class CallNode : public SafePointNode {
503 friend class VMStructs;
504 public:
505 const TypeFunc *_tf; // Function type
506 address _entry_point; // Address of method being called
507 float _cnt; // Estimate of number of times called
509 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
510 : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
511 _tf(tf),
512 _entry_point(addr),
513 _cnt(COUNT_UNKNOWN)
514 {
515 init_class_id(Class_Call);
516 }
518 const TypeFunc* tf() const { return _tf; }
519 const address entry_point() const { return _entry_point; }
520 const float cnt() const { return _cnt; }
522 void set_tf(const TypeFunc* tf) { _tf = tf; }
523 void set_entry_point(address p) { _entry_point = p; }
524 void set_cnt(float c) { _cnt = c; }
526 virtual const Type *bottom_type() const;
527 virtual const Type *Value( PhaseTransform *phase ) const;
528 virtual Node *Identity( PhaseTransform *phase ) { return this; }
529 virtual uint cmp( const Node &n ) const;
530 virtual uint size_of() const = 0;
531 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
532 virtual Node *match( const ProjNode *proj, const Matcher *m );
533 virtual uint ideal_reg() const { return NotAMachineReg; }
534 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
535 // for some macro nodes whose expansion does not have a safepoint on the fast path.
536 virtual bool guaranteed_safepoint() { return true; }
537 // For macro nodes, the JVMState gets modified during expansion, so when cloning
538 // the node the JVMState must be cloned.
539 virtual void clone_jvms() { } // default is not to clone
541 // Returns true if the call may modify n
542 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase);
543 // Does this node have a use of n other than in debug information?
544 bool has_non_debug_use(Node *n);
545 // Returns the unique CheckCastPP of a call
546 // or result projection is there are several CheckCastPP
547 // or returns NULL if there is no one.
548 Node *result_cast();
550 // Collect all the interesting edges from a call for use in
551 // replacing the call by something else. Used by macro expansion
552 // and the late inlining support.
553 void extract_projections(CallProjections* projs, bool separate_io_proj);
555 virtual uint match_edge(uint idx) const;
557 #ifndef PRODUCT
558 virtual void dump_req() const;
559 virtual void dump_spec(outputStream *st) const;
560 #endif
561 };
564 //------------------------------CallJavaNode-----------------------------------
565 // Make a static or dynamic subroutine call node using Java calling
566 // convention. (The "Java" calling convention is the compiler's calling
567 // convention, as opposed to the interpreter's or that of native C.)
568 class CallJavaNode : public CallNode {
569 friend class VMStructs;
570 protected:
571 virtual uint cmp( const Node &n ) const;
572 virtual uint size_of() const; // Size is bigger
574 bool _optimized_virtual;
575 bool _method_handle_invoke;
576 ciMethod* _method; // Method being direct called
577 public:
578 const int _bci; // Byte Code Index of call byte code
579 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
580 : CallNode(tf, addr, TypePtr::BOTTOM),
581 _method(method), _bci(bci),
582 _optimized_virtual(false),
583 _method_handle_invoke(false)
584 {
585 init_class_id(Class_CallJava);
586 }
588 virtual int Opcode() const;
589 ciMethod* method() const { return _method; }
590 void set_method(ciMethod *m) { _method = m; }
591 void set_optimized_virtual(bool f) { _optimized_virtual = f; }
592 bool is_optimized_virtual() const { return _optimized_virtual; }
593 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }
594 bool is_method_handle_invoke() const { return _method_handle_invoke; }
596 #ifndef PRODUCT
597 virtual void dump_spec(outputStream *st) const;
598 #endif
599 };
601 //------------------------------CallStaticJavaNode-----------------------------
602 // Make a direct subroutine call using Java calling convention (for static
603 // calls and optimized virtual calls, plus calls to wrappers for run-time
604 // routines); generates static stub.
605 class CallStaticJavaNode : public CallJavaNode {
606 virtual uint cmp( const Node &n ) const;
607 virtual uint size_of() const; // Size is bigger
608 public:
609 CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci)
610 : CallJavaNode(tf, addr, method, bci), _name(NULL) {
611 init_class_id(Class_CallStaticJava);
612 }
613 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
614 const TypePtr* adr_type)
615 : CallJavaNode(tf, addr, NULL, bci), _name(name) {
616 init_class_id(Class_CallStaticJava);
617 // This node calls a runtime stub, which often has narrow memory effects.
618 _adr_type = adr_type;
619 }
620 const char *_name; // Runtime wrapper name
622 // If this is an uncommon trap, return the request code, else zero.
623 int uncommon_trap_request() const;
624 static int extract_uncommon_trap_request(const Node* call);
626 virtual int Opcode() const;
627 #ifndef PRODUCT
628 virtual void dump_spec(outputStream *st) const;
629 #endif
630 };
632 //------------------------------CallDynamicJavaNode----------------------------
633 // Make a dispatched call using Java calling convention.
634 class CallDynamicJavaNode : public CallJavaNode {
635 virtual uint cmp( const Node &n ) const;
636 virtual uint size_of() const; // Size is bigger
637 public:
638 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
639 init_class_id(Class_CallDynamicJava);
640 }
642 int _vtable_index;
643 virtual int Opcode() const;
644 #ifndef PRODUCT
645 virtual void dump_spec(outputStream *st) const;
646 #endif
647 };
649 //------------------------------CallRuntimeNode--------------------------------
650 // Make a direct subroutine call node into compiled C++ code.
651 class CallRuntimeNode : public CallNode {
652 virtual uint cmp( const Node &n ) const;
653 virtual uint size_of() const; // Size is bigger
654 public:
655 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
656 const TypePtr* adr_type)
657 : CallNode(tf, addr, adr_type),
658 _name(name)
659 {
660 init_class_id(Class_CallRuntime);
661 }
663 const char *_name; // Printable name, if _method is NULL
664 virtual int Opcode() const;
665 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
667 #ifndef PRODUCT
668 virtual void dump_spec(outputStream *st) const;
669 #endif
670 };
672 //------------------------------CallLeafNode-----------------------------------
673 // Make a direct subroutine call node into compiled C++ code, without
674 // safepoints
675 class CallLeafNode : public CallRuntimeNode {
676 public:
677 CallLeafNode(const TypeFunc* tf, address addr, const char* name,
678 const TypePtr* adr_type)
679 : CallRuntimeNode(tf, addr, name, adr_type)
680 {
681 init_class_id(Class_CallLeaf);
682 }
683 virtual int Opcode() const;
684 virtual bool guaranteed_safepoint() { return false; }
685 #ifndef PRODUCT
686 virtual void dump_spec(outputStream *st) const;
687 #endif
688 };
690 //------------------------------CallLeafNoFPNode-------------------------------
691 // CallLeafNode, not using floating point or using it in the same manner as
692 // the generated code
693 class CallLeafNoFPNode : public CallLeafNode {
694 public:
695 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
696 const TypePtr* adr_type)
697 : CallLeafNode(tf, addr, name, adr_type)
698 {
699 }
700 virtual int Opcode() const;
701 };
704 //------------------------------Allocate---------------------------------------
705 // High-level memory allocation
706 //
707 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
708 // get expanded into a code sequence containing a call. Unlike other CallNodes,
709 // they have 2 memory projections and 2 i_o projections (which are distinguished by
710 // the _is_io_use flag in the projection.) This is needed when expanding the node in
711 // order to differentiate the uses of the projection on the normal control path from
712 // those on the exception return path.
713 //
714 class AllocateNode : public CallNode {
715 public:
716 enum {
717 // Output:
718 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
719 // Inputs:
720 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
721 KlassNode, // type (maybe dynamic) of the obj.
722 InitialTest, // slow-path test (may be constant)
723 ALength, // array length (or TOP if none)
724 ParmLimit
725 };
727 static const TypeFunc* alloc_type() {
728 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
729 fields[AllocSize] = TypeInt::POS;
730 fields[KlassNode] = TypeInstPtr::NOTNULL;
731 fields[InitialTest] = TypeInt::BOOL;
732 fields[ALength] = TypeInt::INT; // length (can be a bad length)
734 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
736 // create result type (range)
737 fields = TypeTuple::fields(1);
738 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
740 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
742 return TypeFunc::make(domain, range);
743 }
745 bool _is_scalar_replaceable; // Result of Escape Analysis
747 virtual uint size_of() const; // Size is bigger
748 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
749 Node *size, Node *klass_node, Node *initial_test);
750 // Expansion modifies the JVMState, so we need to clone it
751 virtual void clone_jvms() {
752 set_jvms(jvms()->clone_deep(Compile::current()));
753 }
754 virtual int Opcode() const;
755 virtual uint ideal_reg() const { return Op_RegP; }
756 virtual bool guaranteed_safepoint() { return false; }
758 // allocations do not modify their arguments
759 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;}
761 // Pattern-match a possible usage of AllocateNode.
762 // Return null if no allocation is recognized.
763 // The operand is the pointer produced by the (possible) allocation.
764 // It must be a projection of the Allocate or its subsequent CastPP.
765 // (Note: This function is defined in file graphKit.cpp, near
766 // GraphKit::new_instance/new_array, whose output it recognizes.)
767 // The 'ptr' may not have an offset unless the 'offset' argument is given.
768 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
770 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
771 // an offset, which is reported back to the caller.
772 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
773 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
774 intptr_t& offset);
776 // Dig the klass operand out of a (possible) allocation site.
777 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
778 AllocateNode* allo = Ideal_allocation(ptr, phase);
779 return (allo == NULL) ? NULL : allo->in(KlassNode);
780 }
782 // Conservatively small estimate of offset of first non-header byte.
783 int minimum_header_size() {
784 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
785 instanceOopDesc::base_offset_in_bytes();
786 }
788 // Return the corresponding initialization barrier (or null if none).
789 // Walks out edges to find it...
790 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
791 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
792 InitializeNode* initialization();
794 // Return the corresponding storestore barrier (or null if none).
795 // Walks out edges to find it...
796 MemBarStoreStoreNode* storestore();
798 // Convenience for initialization->maybe_set_complete(phase)
799 bool maybe_set_complete(PhaseGVN* phase);
800 };
802 //------------------------------AllocateArray---------------------------------
803 //
804 // High-level array allocation
805 //
806 class AllocateArrayNode : public AllocateNode {
807 public:
808 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
809 Node* size, Node* klass_node, Node* initial_test,
810 Node* count_val
811 )
812 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
813 initial_test)
814 {
815 init_class_id(Class_AllocateArray);
816 set_req(AllocateNode::ALength, count_val);
817 }
818 virtual int Opcode() const;
819 virtual uint size_of() const; // Size is bigger
820 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
822 // Dig the length operand out of a array allocation site.
823 Node* Ideal_length() {
824 return in(AllocateNode::ALength);
825 }
827 // Dig the length operand out of a array allocation site and narrow the
828 // type with a CastII, if necesssary
829 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
831 // Pattern-match a possible usage of AllocateArrayNode.
832 // Return null if no allocation is recognized.
833 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
834 AllocateNode* allo = Ideal_allocation(ptr, phase);
835 return (allo == NULL || !allo->is_AllocateArray())
836 ? NULL : allo->as_AllocateArray();
837 }
838 };
840 //------------------------------AbstractLockNode-----------------------------------
841 class AbstractLockNode: public CallNode {
842 private:
843 enum {
844 Regular = 0, // Normal lock
845 NonEscObj, // Lock is used for non escaping object
846 Coarsened, // Lock was coarsened
847 Nested // Nested lock
848 } _kind;
849 #ifndef PRODUCT
850 NamedCounter* _counter;
851 #endif
853 protected:
854 // helper functions for lock elimination
855 //
857 bool find_matching_unlock(const Node* ctrl, LockNode* lock,
858 GrowableArray<AbstractLockNode*> &lock_ops);
859 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
860 GrowableArray<AbstractLockNode*> &lock_ops);
861 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
862 GrowableArray<AbstractLockNode*> &lock_ops);
863 LockNode *find_matching_lock(UnlockNode* unlock);
865 // Update the counter to indicate that this lock was eliminated.
866 void set_eliminated_lock_counter() PRODUCT_RETURN;
868 public:
869 AbstractLockNode(const TypeFunc *tf)
870 : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
871 _kind(Regular)
872 {
873 #ifndef PRODUCT
874 _counter = NULL;
875 #endif
876 }
877 virtual int Opcode() const = 0;
878 Node * obj_node() const {return in(TypeFunc::Parms + 0); }
879 Node * box_node() const {return in(TypeFunc::Parms + 1); }
880 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
881 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
883 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
885 virtual uint size_of() const { return sizeof(*this); }
887 bool is_eliminated() const { return (_kind != Regular); }
888 bool is_non_esc_obj() const { return (_kind == NonEscObj); }
889 bool is_coarsened() const { return (_kind == Coarsened); }
890 bool is_nested() const { return (_kind == Nested); }
892 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
893 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
894 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
896 // locking does not modify its arguments
897 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
899 #ifndef PRODUCT
900 void create_lock_counter(JVMState* s);
901 NamedCounter* counter() const { return _counter; }
902 #endif
903 };
905 //------------------------------Lock---------------------------------------
906 // High-level lock operation
907 //
908 // This is a subclass of CallNode because it is a macro node which gets expanded
909 // into a code sequence containing a call. This node takes 3 "parameters":
910 // 0 - object to lock
911 // 1 - a BoxLockNode
912 // 2 - a FastLockNode
913 //
914 class LockNode : public AbstractLockNode {
915 public:
917 static const TypeFunc *lock_type() {
918 // create input type (domain)
919 const Type **fields = TypeTuple::fields(3);
920 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
921 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
922 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
923 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
925 // create result type (range)
926 fields = TypeTuple::fields(0);
928 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
930 return TypeFunc::make(domain,range);
931 }
933 virtual int Opcode() const;
934 virtual uint size_of() const; // Size is bigger
935 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
936 init_class_id(Class_Lock);
937 init_flags(Flag_is_macro);
938 C->add_macro_node(this);
939 }
940 virtual bool guaranteed_safepoint() { return false; }
942 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
943 // Expansion modifies the JVMState, so we need to clone it
944 virtual void clone_jvms() {
945 set_jvms(jvms()->clone_deep(Compile::current()));
946 }
948 bool is_nested_lock_region(); // Is this Lock nested?
949 };
951 //------------------------------Unlock---------------------------------------
952 // High-level unlock operation
953 class UnlockNode : public AbstractLockNode {
954 public:
955 virtual int Opcode() const;
956 virtual uint size_of() const; // Size is bigger
957 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
958 init_class_id(Class_Unlock);
959 init_flags(Flag_is_macro);
960 C->add_macro_node(this);
961 }
962 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
963 // unlock is never a safepoint
964 virtual bool guaranteed_safepoint() { return false; }
965 };
967 #endif // SHARE_VM_OPTO_CALLNODE_HPP