Mon, 26 Nov 2012 17:25:11 -0800
7172640: C2: instrinsic implementations in LibraryCallKit should use argument() instead of pop()
Reviewed-by: kvn, jrose
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP
26 #define SHARE_VM_OPTO_CALLNODE_HPP
28 #include "opto/connode.hpp"
29 #include "opto/mulnode.hpp"
30 #include "opto/multnode.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/type.hpp"
35 // Portions of code courtesy of Clifford Click
37 // Optimization - Graph Style
39 class Chaitin;
40 class NamedCounter;
41 class MultiNode;
42 class SafePointNode;
43 class CallNode;
44 class CallJavaNode;
45 class CallStaticJavaNode;
46 class CallDynamicJavaNode;
47 class CallRuntimeNode;
48 class CallLeafNode;
49 class CallLeafNoFPNode;
50 class AllocateNode;
51 class AllocateArrayNode;
52 class LockNode;
53 class UnlockNode;
54 class JVMState;
55 class OopMap;
56 class State;
57 class StartNode;
58 class MachCallNode;
59 class FastLockNode;
61 //------------------------------StartNode--------------------------------------
62 // The method start node
63 class StartNode : public MultiNode {
64 virtual uint cmp( const Node &n ) const;
65 virtual uint size_of() const; // Size is bigger
66 public:
67 const TypeTuple *_domain;
68 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
69 init_class_id(Class_Start);
70 init_req(0,this);
71 init_req(1,root);
72 }
73 virtual int Opcode() const;
74 virtual bool pinned() const { return true; };
75 virtual const Type *bottom_type() const;
76 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
77 virtual const Type *Value( PhaseTransform *phase ) const;
78 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
79 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
80 virtual const RegMask &in_RegMask(uint) const;
81 virtual Node *match( const ProjNode *proj, const Matcher *m );
82 virtual uint ideal_reg() const { return 0; }
83 #ifndef PRODUCT
84 virtual void dump_spec(outputStream *st) const;
85 #endif
86 };
88 //------------------------------StartOSRNode-----------------------------------
89 // The method start node for on stack replacement code
90 class StartOSRNode : public StartNode {
91 public:
92 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
93 virtual int Opcode() const;
94 static const TypeTuple *osr_domain();
95 };
98 //------------------------------ParmNode---------------------------------------
99 // Incoming parameters
100 class ParmNode : public ProjNode {
101 static const char * const names[TypeFunc::Parms+1];
102 public:
103 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
104 init_class_id(Class_Parm);
105 }
106 virtual int Opcode() const;
107 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
108 virtual uint ideal_reg() const;
109 #ifndef PRODUCT
110 virtual void dump_spec(outputStream *st) const;
111 #endif
112 };
115 //------------------------------ReturnNode-------------------------------------
116 // Return from subroutine node
117 class ReturnNode : public Node {
118 public:
119 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
120 virtual int Opcode() const;
121 virtual bool is_CFG() const { return true; }
122 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
123 virtual bool depends_only_on_test() const { return false; }
124 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
125 virtual const Type *Value( PhaseTransform *phase ) const;
126 virtual uint ideal_reg() const { return NotAMachineReg; }
127 virtual uint match_edge(uint idx) const;
128 #ifndef PRODUCT
129 virtual void dump_req() const;
130 #endif
131 };
134 //------------------------------RethrowNode------------------------------------
135 // Rethrow of exception at call site. Ends a procedure before rethrowing;
136 // ends the current basic block like a ReturnNode. Restores registers and
137 // unwinds stack. Rethrow happens in the caller's method.
138 class RethrowNode : public Node {
139 public:
140 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
141 virtual int Opcode() const;
142 virtual bool is_CFG() const { return true; }
143 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
144 virtual bool depends_only_on_test() const { return false; }
145 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
146 virtual const Type *Value( PhaseTransform *phase ) const;
147 virtual uint match_edge(uint idx) const;
148 virtual uint ideal_reg() const { return NotAMachineReg; }
149 #ifndef PRODUCT
150 virtual void dump_req() const;
151 #endif
152 };
155 //------------------------------TailCallNode-----------------------------------
156 // Pop stack frame and jump indirect
157 class TailCallNode : public ReturnNode {
158 public:
159 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
160 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
161 init_req(TypeFunc::Parms, target);
162 init_req(TypeFunc::Parms+1, moop);
163 }
165 virtual int Opcode() const;
166 virtual uint match_edge(uint idx) const;
167 };
169 //------------------------------TailJumpNode-----------------------------------
170 // Pop stack frame and jump indirect
171 class TailJumpNode : public ReturnNode {
172 public:
173 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
174 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
175 init_req(TypeFunc::Parms, target);
176 init_req(TypeFunc::Parms+1, ex_oop);
177 }
179 virtual int Opcode() const;
180 virtual uint match_edge(uint idx) const;
181 };
183 //-------------------------------JVMState-------------------------------------
184 // A linked list of JVMState nodes captures the whole interpreter state,
185 // plus GC roots, for all active calls at some call site in this compilation
186 // unit. (If there is no inlining, then the list has exactly one link.)
187 // This provides a way to map the optimized program back into the interpreter,
188 // or to let the GC mark the stack.
189 class JVMState : public ResourceObj {
190 friend class VMStructs;
191 public:
192 typedef enum {
193 Reexecute_Undefined = -1, // not defined -- will be translated into false later
194 Reexecute_False = 0, // false -- do not reexecute
195 Reexecute_True = 1 // true -- reexecute the bytecode
196 } ReexecuteState; //Reexecute State
198 private:
199 JVMState* _caller; // List pointer for forming scope chains
200 uint _depth; // One more than caller depth, or one.
201 uint _locoff; // Offset to locals in input edge mapping
202 uint _stkoff; // Offset to stack in input edge mapping
203 uint _monoff; // Offset to monitors in input edge mapping
204 uint _scloff; // Offset to fields of scalar objs in input edge mapping
205 uint _endoff; // Offset to end of input edge mapping
206 uint _sp; // Jave Expression Stack Pointer for this state
207 int _bci; // Byte Code Index of this JVM point
208 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
209 ciMethod* _method; // Method Pointer
210 SafePointNode* _map; // Map node associated with this scope
211 public:
212 friend class Compile;
213 friend class PreserveReexecuteState;
215 // Because JVMState objects live over the entire lifetime of the
216 // Compile object, they are allocated into the comp_arena, which
217 // does not get resource marked or reset during the compile process
218 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
219 void operator delete( void * ) { } // fast deallocation
221 // Create a new JVMState, ready for abstract interpretation.
222 JVMState(ciMethod* method, JVMState* caller);
223 JVMState(int stack_size); // root state; has a null method
225 // Access functions for the JVM
226 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
227 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff
228 uint locoff() const { return _locoff; }
229 uint stkoff() const { return _stkoff; }
230 uint argoff() const { return _stkoff + _sp; }
231 uint monoff() const { return _monoff; }
232 uint scloff() const { return _scloff; }
233 uint endoff() const { return _endoff; }
234 uint oopoff() const { return debug_end(); }
236 int loc_size() const { return stkoff() - locoff(); }
237 int stk_size() const { return monoff() - stkoff(); }
238 int arg_size() const { return monoff() - argoff(); }
239 int mon_size() const { return scloff() - monoff(); }
240 int scl_size() const { return endoff() - scloff(); }
242 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
243 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
244 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); }
245 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); }
247 uint sp() const { return _sp; }
248 int bci() const { return _bci; }
249 bool should_reexecute() const { return _reexecute==Reexecute_True; }
250 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
251 bool has_method() const { return _method != NULL; }
252 ciMethod* method() const { assert(has_method(), ""); return _method; }
253 JVMState* caller() const { return _caller; }
254 SafePointNode* map() const { return _map; }
255 uint depth() const { return _depth; }
256 uint debug_start() const; // returns locoff of root caller
257 uint debug_end() const; // returns endoff of self
258 uint debug_size() const {
259 return loc_size() + sp() + mon_size() + scl_size();
260 }
261 uint debug_depth() const; // returns sum of debug_size values at all depths
263 // Returns the JVM state at the desired depth (1 == root).
264 JVMState* of_depth(int d) const;
266 // Tells if two JVM states have the same call chain (depth, methods, & bcis).
267 bool same_calls_as(const JVMState* that) const;
269 // Monitors (monitors are stored as (boxNode, objNode) pairs
270 enum { logMonitorEdges = 1 };
271 int nof_monitors() const { return mon_size() >> logMonitorEdges; }
272 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
273 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
274 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
275 bool is_monitor_box(uint off) const {
276 assert(is_mon(off), "should be called only for monitor edge");
277 return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
278 }
279 bool is_monitor_use(uint off) const { return (is_mon(off)
280 && is_monitor_box(off))
281 || (caller() && caller()->is_monitor_use(off)); }
283 // Initialization functions for the JVM
284 void set_locoff(uint off) { _locoff = off; }
285 void set_stkoff(uint off) { _stkoff = off; }
286 void set_monoff(uint off) { _monoff = off; }
287 void set_scloff(uint off) { _scloff = off; }
288 void set_endoff(uint off) { _endoff = off; }
289 void set_offsets(uint off) {
290 _locoff = _stkoff = _monoff = _scloff = _endoff = off;
291 }
292 void set_map(SafePointNode *map) { _map = map; }
293 void set_sp(uint sp) { _sp = sp; }
294 // _reexecute is initialized to "undefined" for a new bci
295 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
296 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
298 // Miscellaneous utility functions
299 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
300 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
302 #ifndef PRODUCT
303 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
304 void dump_spec(outputStream *st) const;
305 void dump_on(outputStream* st) const;
306 void dump() const {
307 dump_on(tty);
308 }
309 #endif
310 };
312 //------------------------------SafePointNode----------------------------------
313 // A SafePointNode is a subclass of a MultiNode for convenience (and
314 // potential code sharing) only - conceptually it is independent of
315 // the Node semantics.
316 class SafePointNode : public MultiNode {
317 virtual uint cmp( const Node &n ) const;
318 virtual uint size_of() const; // Size is bigger
320 public:
321 SafePointNode(uint edges, JVMState* jvms,
322 // A plain safepoint advertises no memory effects (NULL):
323 const TypePtr* adr_type = NULL)
324 : MultiNode( edges ),
325 _jvms(jvms),
326 _oop_map(NULL),
327 _adr_type(adr_type)
328 {
329 init_class_id(Class_SafePoint);
330 }
332 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC
333 JVMState* const _jvms; // Pointer to list of JVM State objects
334 const TypePtr* _adr_type; // What type of memory does this node produce?
336 // Many calls take *all* of memory as input,
337 // but some produce a limited subset of that memory as output.
338 // The adr_type reports the call's behavior as a store, not a load.
340 virtual JVMState* jvms() const { return _jvms; }
341 void set_jvms(JVMState* s) {
342 *(JVMState**)&_jvms = s; // override const attribute in the accessor
343 }
344 OopMap *oop_map() const { return _oop_map; }
345 void set_oop_map(OopMap *om) { _oop_map = om; }
347 private:
348 void verify_input(JVMState* jvms, uint idx) const {
349 assert(verify_jvms(jvms), "jvms must match");
350 Node* n = in(idx);
351 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
352 in(idx + 1)->is_top(), "2nd half of long/double");
353 }
355 public:
356 // Functionality from old debug nodes which has changed
357 Node *local(JVMState* jvms, uint idx) const {
358 verify_input(jvms, jvms->locoff() + idx);
359 return in(jvms->locoff() + idx);
360 }
361 Node *stack(JVMState* jvms, uint idx) const {
362 verify_input(jvms, jvms->stkoff() + idx);
363 return in(jvms->stkoff() + idx);
364 }
365 Node *argument(JVMState* jvms, uint idx) const {
366 verify_input(jvms, jvms->argoff() + idx);
367 return in(jvms->argoff() + idx);
368 }
369 Node *monitor_box(JVMState* jvms, uint idx) const {
370 assert(verify_jvms(jvms), "jvms must match");
371 return in(jvms->monitor_box_offset(idx));
372 }
373 Node *monitor_obj(JVMState* jvms, uint idx) const {
374 assert(verify_jvms(jvms), "jvms must match");
375 return in(jvms->monitor_obj_offset(idx));
376 }
378 void set_local(JVMState* jvms, uint idx, Node *c);
380 void set_stack(JVMState* jvms, uint idx, Node *c) {
381 assert(verify_jvms(jvms), "jvms must match");
382 set_req(jvms->stkoff() + idx, c);
383 }
384 void set_argument(JVMState* jvms, uint idx, Node *c) {
385 assert(verify_jvms(jvms), "jvms must match");
386 set_req(jvms->argoff() + idx, c);
387 }
388 void ensure_stack(JVMState* jvms, uint stk_size) {
389 assert(verify_jvms(jvms), "jvms must match");
390 int grow_by = (int)stk_size - (int)jvms->stk_size();
391 if (grow_by > 0) grow_stack(jvms, grow_by);
392 }
393 void grow_stack(JVMState* jvms, uint grow_by);
394 // Handle monitor stack
395 void push_monitor( const FastLockNode *lock );
396 void pop_monitor ();
397 Node *peek_monitor_box() const;
398 Node *peek_monitor_obj() const;
400 // Access functions for the JVM
401 Node *control () const { return in(TypeFunc::Control ); }
402 Node *i_o () const { return in(TypeFunc::I_O ); }
403 Node *memory () const { return in(TypeFunc::Memory ); }
404 Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
405 Node *frameptr () const { return in(TypeFunc::FramePtr ); }
407 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
408 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
409 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
411 MergeMemNode* merged_memory() const {
412 return in(TypeFunc::Memory)->as_MergeMem();
413 }
415 // The parser marks useless maps as dead when it's done with them:
416 bool is_killed() { return in(TypeFunc::Control) == NULL; }
418 // Exception states bubbling out of subgraphs such as inlined calls
419 // are recorded here. (There might be more than one, hence the "next".)
420 // This feature is used only for safepoints which serve as "maps"
421 // for JVM states during parsing, intrinsic expansion, etc.
422 SafePointNode* next_exception() const;
423 void set_next_exception(SafePointNode* n);
424 bool has_exceptions() const { return next_exception() != NULL; }
426 // Standard Node stuff
427 virtual int Opcode() const;
428 virtual bool pinned() const { return true; }
429 virtual const Type *Value( PhaseTransform *phase ) const;
430 virtual const Type *bottom_type() const { return Type::CONTROL; }
431 virtual const TypePtr *adr_type() const { return _adr_type; }
432 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
433 virtual Node *Identity( PhaseTransform *phase );
434 virtual uint ideal_reg() const { return 0; }
435 virtual const RegMask &in_RegMask(uint) const;
436 virtual const RegMask &out_RegMask() const;
437 virtual uint match_edge(uint idx) const;
439 static bool needs_polling_address_input();
441 #ifndef PRODUCT
442 virtual void dump_spec(outputStream *st) const;
443 #endif
444 };
446 //------------------------------SafePointScalarObjectNode----------------------
447 // A SafePointScalarObjectNode represents the state of a scalarized object
448 // at a safepoint.
450 class SafePointScalarObjectNode: public TypeNode {
451 uint _first_index; // First input edge index of a SafePoint node where
452 // states of the scalarized object fields are collected.
453 uint _n_fields; // Number of non-static fields of the scalarized object.
454 DEBUG_ONLY(AllocateNode* _alloc;)
456 virtual uint hash() const ; // { return NO_HASH; }
457 virtual uint cmp( const Node &n ) const;
459 public:
460 SafePointScalarObjectNode(const TypeOopPtr* tp,
461 #ifdef ASSERT
462 AllocateNode* alloc,
463 #endif
464 uint first_index, uint n_fields);
465 virtual int Opcode() const;
466 virtual uint ideal_reg() const;
467 virtual const RegMask &in_RegMask(uint) const;
468 virtual const RegMask &out_RegMask() const;
469 virtual uint match_edge(uint idx) const;
471 uint first_index() const { return _first_index; }
472 uint n_fields() const { return _n_fields; }
474 #ifdef ASSERT
475 AllocateNode* alloc() const { return _alloc; }
476 #endif
478 virtual uint size_of() const { return sizeof(*this); }
480 // Assumes that "this" is an argument to a safepoint node "s", and that
481 // "new_call" is being created to correspond to "s". But the difference
482 // between the start index of the jvmstates of "new_call" and "s" is
483 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
484 // corresponds appropriately to "this" in "new_call". Assumes that
485 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
486 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
487 SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
489 #ifndef PRODUCT
490 virtual void dump_spec(outputStream *st) const;
491 #endif
492 };
495 // Simple container for the outgoing projections of a call. Useful
496 // for serious surgery on calls.
497 class CallProjections : public StackObj {
498 public:
499 Node* fallthrough_proj;
500 Node* fallthrough_catchproj;
501 Node* fallthrough_memproj;
502 Node* fallthrough_ioproj;
503 Node* catchall_catchproj;
504 Node* catchall_memproj;
505 Node* catchall_ioproj;
506 Node* resproj;
507 Node* exobj;
508 };
511 //------------------------------CallNode---------------------------------------
512 // Call nodes now subsume the function of debug nodes at callsites, so they
513 // contain the functionality of a full scope chain of debug nodes.
514 class CallNode : public SafePointNode {
515 friend class VMStructs;
516 public:
517 const TypeFunc *_tf; // Function type
518 address _entry_point; // Address of method being called
519 float _cnt; // Estimate of number of times called
521 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
522 : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
523 _tf(tf),
524 _entry_point(addr),
525 _cnt(COUNT_UNKNOWN)
526 {
527 init_class_id(Class_Call);
528 }
530 const TypeFunc* tf() const { return _tf; }
531 const address entry_point() const { return _entry_point; }
532 const float cnt() const { return _cnt; }
534 void set_tf(const TypeFunc* tf) { _tf = tf; }
535 void set_entry_point(address p) { _entry_point = p; }
536 void set_cnt(float c) { _cnt = c; }
538 virtual const Type *bottom_type() const;
539 virtual const Type *Value( PhaseTransform *phase ) const;
540 virtual Node *Identity( PhaseTransform *phase ) { return this; }
541 virtual uint cmp( const Node &n ) const;
542 virtual uint size_of() const = 0;
543 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
544 virtual Node *match( const ProjNode *proj, const Matcher *m );
545 virtual uint ideal_reg() const { return NotAMachineReg; }
546 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
547 // for some macro nodes whose expansion does not have a safepoint on the fast path.
548 virtual bool guaranteed_safepoint() { return true; }
549 // For macro nodes, the JVMState gets modified during expansion, so when cloning
550 // the node the JVMState must be cloned.
551 virtual void clone_jvms() { } // default is not to clone
553 // Returns true if the call may modify n
554 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase);
555 // Does this node have a use of n other than in debug information?
556 bool has_non_debug_use(Node *n);
557 // Returns the unique CheckCastPP of a call
558 // or result projection is there are several CheckCastPP
559 // or returns NULL if there is no one.
560 Node *result_cast();
561 // Does this node returns pointer?
562 bool returns_pointer() const {
563 const TypeTuple *r = tf()->range();
564 return (r->cnt() > TypeFunc::Parms &&
565 r->field_at(TypeFunc::Parms)->isa_ptr());
566 }
568 // Collect all the interesting edges from a call for use in
569 // replacing the call by something else. Used by macro expansion
570 // and the late inlining support.
571 void extract_projections(CallProjections* projs, bool separate_io_proj);
573 virtual uint match_edge(uint idx) const;
575 #ifndef PRODUCT
576 virtual void dump_req() const;
577 virtual void dump_spec(outputStream *st) const;
578 #endif
579 };
582 //------------------------------CallJavaNode-----------------------------------
583 // Make a static or dynamic subroutine call node using Java calling
584 // convention. (The "Java" calling convention is the compiler's calling
585 // convention, as opposed to the interpreter's or that of native C.)
586 class CallJavaNode : public CallNode {
587 friend class VMStructs;
588 protected:
589 virtual uint cmp( const Node &n ) const;
590 virtual uint size_of() const; // Size is bigger
592 bool _optimized_virtual;
593 bool _method_handle_invoke;
594 ciMethod* _method; // Method being direct called
595 public:
596 const int _bci; // Byte Code Index of call byte code
597 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
598 : CallNode(tf, addr, TypePtr::BOTTOM),
599 _method(method), _bci(bci),
600 _optimized_virtual(false),
601 _method_handle_invoke(false)
602 {
603 init_class_id(Class_CallJava);
604 }
606 virtual int Opcode() const;
607 ciMethod* method() const { return _method; }
608 void set_method(ciMethod *m) { _method = m; }
609 void set_optimized_virtual(bool f) { _optimized_virtual = f; }
610 bool is_optimized_virtual() const { return _optimized_virtual; }
611 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }
612 bool is_method_handle_invoke() const { return _method_handle_invoke; }
614 #ifndef PRODUCT
615 virtual void dump_spec(outputStream *st) const;
616 #endif
617 };
619 //------------------------------CallStaticJavaNode-----------------------------
620 // Make a direct subroutine call using Java calling convention (for static
621 // calls and optimized virtual calls, plus calls to wrappers for run-time
622 // routines); generates static stub.
623 class CallStaticJavaNode : public CallJavaNode {
624 virtual uint cmp( const Node &n ) const;
625 virtual uint size_of() const; // Size is bigger
626 public:
627 CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci)
628 : CallJavaNode(tf, addr, method, bci), _name(NULL) {
629 init_class_id(Class_CallStaticJava);
630 }
631 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
632 const TypePtr* adr_type)
633 : CallJavaNode(tf, addr, NULL, bci), _name(name) {
634 init_class_id(Class_CallStaticJava);
635 // This node calls a runtime stub, which often has narrow memory effects.
636 _adr_type = adr_type;
637 }
638 const char *_name; // Runtime wrapper name
640 // If this is an uncommon trap, return the request code, else zero.
641 int uncommon_trap_request() const;
642 static int extract_uncommon_trap_request(const Node* call);
644 virtual int Opcode() const;
645 #ifndef PRODUCT
646 virtual void dump_spec(outputStream *st) const;
647 #endif
648 };
650 //------------------------------CallDynamicJavaNode----------------------------
651 // Make a dispatched call using Java calling convention.
652 class CallDynamicJavaNode : public CallJavaNode {
653 virtual uint cmp( const Node &n ) const;
654 virtual uint size_of() const; // Size is bigger
655 public:
656 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
657 init_class_id(Class_CallDynamicJava);
658 }
660 int _vtable_index;
661 virtual int Opcode() const;
662 #ifndef PRODUCT
663 virtual void dump_spec(outputStream *st) const;
664 #endif
665 };
667 //------------------------------CallRuntimeNode--------------------------------
668 // Make a direct subroutine call node into compiled C++ code.
669 class CallRuntimeNode : public CallNode {
670 virtual uint cmp( const Node &n ) const;
671 virtual uint size_of() const; // Size is bigger
672 public:
673 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
674 const TypePtr* adr_type)
675 : CallNode(tf, addr, adr_type),
676 _name(name)
677 {
678 init_class_id(Class_CallRuntime);
679 }
681 const char *_name; // Printable name, if _method is NULL
682 virtual int Opcode() const;
683 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
685 #ifndef PRODUCT
686 virtual void dump_spec(outputStream *st) const;
687 #endif
688 };
690 //------------------------------CallLeafNode-----------------------------------
691 // Make a direct subroutine call node into compiled C++ code, without
692 // safepoints
693 class CallLeafNode : public CallRuntimeNode {
694 public:
695 CallLeafNode(const TypeFunc* tf, address addr, const char* name,
696 const TypePtr* adr_type)
697 : CallRuntimeNode(tf, addr, name, adr_type)
698 {
699 init_class_id(Class_CallLeaf);
700 }
701 virtual int Opcode() const;
702 virtual bool guaranteed_safepoint() { return false; }
703 #ifndef PRODUCT
704 virtual void dump_spec(outputStream *st) const;
705 #endif
706 };
708 //------------------------------CallLeafNoFPNode-------------------------------
709 // CallLeafNode, not using floating point or using it in the same manner as
710 // the generated code
711 class CallLeafNoFPNode : public CallLeafNode {
712 public:
713 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
714 const TypePtr* adr_type)
715 : CallLeafNode(tf, addr, name, adr_type)
716 {
717 }
718 virtual int Opcode() const;
719 };
722 //------------------------------Allocate---------------------------------------
723 // High-level memory allocation
724 //
725 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
726 // get expanded into a code sequence containing a call. Unlike other CallNodes,
727 // they have 2 memory projections and 2 i_o projections (which are distinguished by
728 // the _is_io_use flag in the projection.) This is needed when expanding the node in
729 // order to differentiate the uses of the projection on the normal control path from
730 // those on the exception return path.
731 //
732 class AllocateNode : public CallNode {
733 public:
734 enum {
735 // Output:
736 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
737 // Inputs:
738 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
739 KlassNode, // type (maybe dynamic) of the obj.
740 InitialTest, // slow-path test (may be constant)
741 ALength, // array length (or TOP if none)
742 ParmLimit
743 };
745 static const TypeFunc* alloc_type() {
746 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
747 fields[AllocSize] = TypeInt::POS;
748 fields[KlassNode] = TypeInstPtr::NOTNULL;
749 fields[InitialTest] = TypeInt::BOOL;
750 fields[ALength] = TypeInt::INT; // length (can be a bad length)
752 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
754 // create result type (range)
755 fields = TypeTuple::fields(1);
756 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
758 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
760 return TypeFunc::make(domain, range);
761 }
763 bool _is_scalar_replaceable; // Result of Escape Analysis
765 virtual uint size_of() const; // Size is bigger
766 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
767 Node *size, Node *klass_node, Node *initial_test);
768 // Expansion modifies the JVMState, so we need to clone it
769 virtual void clone_jvms() {
770 set_jvms(jvms()->clone_deep(Compile::current()));
771 }
772 virtual int Opcode() const;
773 virtual uint ideal_reg() const { return Op_RegP; }
774 virtual bool guaranteed_safepoint() { return false; }
776 // allocations do not modify their arguments
777 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;}
779 // Pattern-match a possible usage of AllocateNode.
780 // Return null if no allocation is recognized.
781 // The operand is the pointer produced by the (possible) allocation.
782 // It must be a projection of the Allocate or its subsequent CastPP.
783 // (Note: This function is defined in file graphKit.cpp, near
784 // GraphKit::new_instance/new_array, whose output it recognizes.)
785 // The 'ptr' may not have an offset unless the 'offset' argument is given.
786 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
788 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
789 // an offset, which is reported back to the caller.
790 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
791 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
792 intptr_t& offset);
794 // Dig the klass operand out of a (possible) allocation site.
795 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
796 AllocateNode* allo = Ideal_allocation(ptr, phase);
797 return (allo == NULL) ? NULL : allo->in(KlassNode);
798 }
800 // Conservatively small estimate of offset of first non-header byte.
801 int minimum_header_size() {
802 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
803 instanceOopDesc::base_offset_in_bytes();
804 }
806 // Return the corresponding initialization barrier (or null if none).
807 // Walks out edges to find it...
808 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
809 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
810 InitializeNode* initialization();
812 // Return the corresponding storestore barrier (or null if none).
813 // Walks out edges to find it...
814 MemBarStoreStoreNode* storestore();
816 // Convenience for initialization->maybe_set_complete(phase)
817 bool maybe_set_complete(PhaseGVN* phase);
818 };
820 //------------------------------AllocateArray---------------------------------
821 //
822 // High-level array allocation
823 //
824 class AllocateArrayNode : public AllocateNode {
825 public:
826 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
827 Node* size, Node* klass_node, Node* initial_test,
828 Node* count_val
829 )
830 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
831 initial_test)
832 {
833 init_class_id(Class_AllocateArray);
834 set_req(AllocateNode::ALength, count_val);
835 }
836 virtual int Opcode() const;
837 virtual uint size_of() const; // Size is bigger
838 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
840 // Dig the length operand out of a array allocation site.
841 Node* Ideal_length() {
842 return in(AllocateNode::ALength);
843 }
845 // Dig the length operand out of a array allocation site and narrow the
846 // type with a CastII, if necesssary
847 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
849 // Pattern-match a possible usage of AllocateArrayNode.
850 // Return null if no allocation is recognized.
851 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
852 AllocateNode* allo = Ideal_allocation(ptr, phase);
853 return (allo == NULL || !allo->is_AllocateArray())
854 ? NULL : allo->as_AllocateArray();
855 }
856 };
858 //------------------------------AbstractLockNode-----------------------------------
859 class AbstractLockNode: public CallNode {
860 private:
861 enum {
862 Regular = 0, // Normal lock
863 NonEscObj, // Lock is used for non escaping object
864 Coarsened, // Lock was coarsened
865 Nested // Nested lock
866 } _kind;
867 #ifndef PRODUCT
868 NamedCounter* _counter;
869 #endif
871 protected:
872 // helper functions for lock elimination
873 //
875 bool find_matching_unlock(const Node* ctrl, LockNode* lock,
876 GrowableArray<AbstractLockNode*> &lock_ops);
877 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
878 GrowableArray<AbstractLockNode*> &lock_ops);
879 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
880 GrowableArray<AbstractLockNode*> &lock_ops);
881 LockNode *find_matching_lock(UnlockNode* unlock);
883 // Update the counter to indicate that this lock was eliminated.
884 void set_eliminated_lock_counter() PRODUCT_RETURN;
886 public:
887 AbstractLockNode(const TypeFunc *tf)
888 : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
889 _kind(Regular)
890 {
891 #ifndef PRODUCT
892 _counter = NULL;
893 #endif
894 }
895 virtual int Opcode() const = 0;
896 Node * obj_node() const {return in(TypeFunc::Parms + 0); }
897 Node * box_node() const {return in(TypeFunc::Parms + 1); }
898 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
899 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
901 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
903 virtual uint size_of() const { return sizeof(*this); }
905 bool is_eliminated() const { return (_kind != Regular); }
906 bool is_non_esc_obj() const { return (_kind == NonEscObj); }
907 bool is_coarsened() const { return (_kind == Coarsened); }
908 bool is_nested() const { return (_kind == Nested); }
910 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
911 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
912 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
914 // locking does not modify its arguments
915 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
917 #ifndef PRODUCT
918 void create_lock_counter(JVMState* s);
919 NamedCounter* counter() const { return _counter; }
920 #endif
921 };
923 //------------------------------Lock---------------------------------------
924 // High-level lock operation
925 //
926 // This is a subclass of CallNode because it is a macro node which gets expanded
927 // into a code sequence containing a call. This node takes 3 "parameters":
928 // 0 - object to lock
929 // 1 - a BoxLockNode
930 // 2 - a FastLockNode
931 //
932 class LockNode : public AbstractLockNode {
933 public:
935 static const TypeFunc *lock_type() {
936 // create input type (domain)
937 const Type **fields = TypeTuple::fields(3);
938 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
939 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
940 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
941 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
943 // create result type (range)
944 fields = TypeTuple::fields(0);
946 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
948 return TypeFunc::make(domain,range);
949 }
951 virtual int Opcode() const;
952 virtual uint size_of() const; // Size is bigger
953 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
954 init_class_id(Class_Lock);
955 init_flags(Flag_is_macro);
956 C->add_macro_node(this);
957 }
958 virtual bool guaranteed_safepoint() { return false; }
960 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
961 // Expansion modifies the JVMState, so we need to clone it
962 virtual void clone_jvms() {
963 set_jvms(jvms()->clone_deep(Compile::current()));
964 }
966 bool is_nested_lock_region(); // Is this Lock nested?
967 };
969 //------------------------------Unlock---------------------------------------
970 // High-level unlock operation
971 class UnlockNode : public AbstractLockNode {
972 public:
973 virtual int Opcode() const;
974 virtual uint size_of() const; // Size is bigger
975 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
976 init_class_id(Class_Unlock);
977 init_flags(Flag_is_macro);
978 C->add_macro_node(this);
979 }
980 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
981 // unlock is never a safepoint
982 virtual bool guaranteed_safepoint() { return false; }
983 };
985 #endif // SHARE_VM_OPTO_CALLNODE_HPP