Fri, 20 Dec 2013 13:51:14 +0100
8030863: PPC64: (part 220): ConstantTableBase for calls between args and jvms
Summary: Add ConstantTableBase node edge after parameters and before jvms. Adapt jvms offsets.
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP
26 #define SHARE_VM_OPTO_CALLNODE_HPP
28 #include "opto/connode.hpp"
29 #include "opto/mulnode.hpp"
30 #include "opto/multnode.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/type.hpp"
35 // Portions of code courtesy of Clifford Click
37 // Optimization - Graph Style
39 class Chaitin;
40 class NamedCounter;
41 class MultiNode;
42 class SafePointNode;
43 class CallNode;
44 class CallJavaNode;
45 class CallStaticJavaNode;
46 class CallDynamicJavaNode;
47 class CallRuntimeNode;
48 class CallLeafNode;
49 class CallLeafNoFPNode;
50 class AllocateNode;
51 class AllocateArrayNode;
52 class BoxLockNode;
53 class LockNode;
54 class UnlockNode;
55 class JVMState;
56 class OopMap;
57 class State;
58 class StartNode;
59 class MachCallNode;
60 class FastLockNode;
62 //------------------------------StartNode--------------------------------------
63 // The method start node
64 class StartNode : public MultiNode {
65 virtual uint cmp( const Node &n ) const;
66 virtual uint size_of() const; // Size is bigger
67 public:
68 const TypeTuple *_domain;
69 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
70 init_class_id(Class_Start);
71 init_req(0,this);
72 init_req(1,root);
73 }
74 virtual int Opcode() const;
75 virtual bool pinned() const { return true; };
76 virtual const Type *bottom_type() const;
77 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
78 virtual const Type *Value( PhaseTransform *phase ) const;
79 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
80 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
81 virtual const RegMask &in_RegMask(uint) const;
82 virtual Node *match( const ProjNode *proj, const Matcher *m );
83 virtual uint ideal_reg() const { return 0; }
84 #ifndef PRODUCT
85 virtual void dump_spec(outputStream *st) const;
86 #endif
87 };
89 //------------------------------StartOSRNode-----------------------------------
90 // The method start node for on stack replacement code
91 class StartOSRNode : public StartNode {
92 public:
93 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
94 virtual int Opcode() const;
95 static const TypeTuple *osr_domain();
96 };
99 //------------------------------ParmNode---------------------------------------
100 // Incoming parameters
101 class ParmNode : public ProjNode {
102 static const char * const names[TypeFunc::Parms+1];
103 public:
104 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
105 init_class_id(Class_Parm);
106 }
107 virtual int Opcode() const;
108 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
109 virtual uint ideal_reg() const;
110 #ifndef PRODUCT
111 virtual void dump_spec(outputStream *st) const;
112 #endif
113 };
116 //------------------------------ReturnNode-------------------------------------
117 // Return from subroutine node
118 class ReturnNode : public Node {
119 public:
120 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
121 virtual int Opcode() const;
122 virtual bool is_CFG() const { return true; }
123 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
124 virtual bool depends_only_on_test() const { return false; }
125 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
126 virtual const Type *Value( PhaseTransform *phase ) const;
127 virtual uint ideal_reg() const { return NotAMachineReg; }
128 virtual uint match_edge(uint idx) const;
129 #ifndef PRODUCT
130 virtual void dump_req(outputStream *st = tty) const;
131 #endif
132 };
135 //------------------------------RethrowNode------------------------------------
136 // Rethrow of exception at call site. Ends a procedure before rethrowing;
137 // ends the current basic block like a ReturnNode. Restores registers and
138 // unwinds stack. Rethrow happens in the caller's method.
139 class RethrowNode : public Node {
140 public:
141 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
142 virtual int Opcode() const;
143 virtual bool is_CFG() const { return true; }
144 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
145 virtual bool depends_only_on_test() const { return false; }
146 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
147 virtual const Type *Value( PhaseTransform *phase ) const;
148 virtual uint match_edge(uint idx) const;
149 virtual uint ideal_reg() const { return NotAMachineReg; }
150 #ifndef PRODUCT
151 virtual void dump_req(outputStream *st = tty) const;
152 #endif
153 };
156 //------------------------------TailCallNode-----------------------------------
157 // Pop stack frame and jump indirect
158 class TailCallNode : public ReturnNode {
159 public:
160 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
161 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
162 init_req(TypeFunc::Parms, target);
163 init_req(TypeFunc::Parms+1, moop);
164 }
166 virtual int Opcode() const;
167 virtual uint match_edge(uint idx) const;
168 };
170 //------------------------------TailJumpNode-----------------------------------
171 // Pop stack frame and jump indirect
172 class TailJumpNode : public ReturnNode {
173 public:
174 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
175 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
176 init_req(TypeFunc::Parms, target);
177 init_req(TypeFunc::Parms+1, ex_oop);
178 }
180 virtual int Opcode() const;
181 virtual uint match_edge(uint idx) const;
182 };
184 //-------------------------------JVMState-------------------------------------
185 // A linked list of JVMState nodes captures the whole interpreter state,
186 // plus GC roots, for all active calls at some call site in this compilation
187 // unit. (If there is no inlining, then the list has exactly one link.)
188 // This provides a way to map the optimized program back into the interpreter,
189 // or to let the GC mark the stack.
190 class JVMState : public ResourceObj {
191 friend class VMStructs;
192 public:
193 typedef enum {
194 Reexecute_Undefined = -1, // not defined -- will be translated into false later
195 Reexecute_False = 0, // false -- do not reexecute
196 Reexecute_True = 1 // true -- reexecute the bytecode
197 } ReexecuteState; //Reexecute State
199 private:
200 JVMState* _caller; // List pointer for forming scope chains
201 uint _depth; // One more than caller depth, or one.
202 uint _locoff; // Offset to locals in input edge mapping
203 uint _stkoff; // Offset to stack in input edge mapping
204 uint _monoff; // Offset to monitors in input edge mapping
205 uint _scloff; // Offset to fields of scalar objs in input edge mapping
206 uint _endoff; // Offset to end of input edge mapping
207 uint _sp; // Jave Expression Stack Pointer for this state
208 int _bci; // Byte Code Index of this JVM point
209 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
210 ciMethod* _method; // Method Pointer
211 SafePointNode* _map; // Map node associated with this scope
212 public:
213 friend class Compile;
214 friend class PreserveReexecuteState;
216 // Because JVMState objects live over the entire lifetime of the
217 // Compile object, they are allocated into the comp_arena, which
218 // does not get resource marked or reset during the compile process
219 void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
220 void operator delete( void * ) { } // fast deallocation
222 // Create a new JVMState, ready for abstract interpretation.
223 JVMState(ciMethod* method, JVMState* caller);
224 JVMState(int stack_size); // root state; has a null method
226 // Access functions for the JVM
227 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
228 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff
229 uint locoff() const { return _locoff; }
230 uint stkoff() const { return _stkoff; }
231 uint argoff() const { return _stkoff + _sp; }
232 uint monoff() const { return _monoff; }
233 uint scloff() const { return _scloff; }
234 uint endoff() const { return _endoff; }
235 uint oopoff() const { return debug_end(); }
237 int loc_size() const { return stkoff() - locoff(); }
238 int stk_size() const { return monoff() - stkoff(); }
239 int mon_size() const { return scloff() - monoff(); }
240 int scl_size() const { return endoff() - scloff(); }
242 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
243 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
244 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); }
245 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); }
247 uint sp() const { return _sp; }
248 int bci() const { return _bci; }
249 bool should_reexecute() const { return _reexecute==Reexecute_True; }
250 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
251 bool has_method() const { return _method != NULL; }
252 ciMethod* method() const { assert(has_method(), ""); return _method; }
253 JVMState* caller() const { return _caller; }
254 SafePointNode* map() const { return _map; }
255 uint depth() const { return _depth; }
256 uint debug_start() const; // returns locoff of root caller
257 uint debug_end() const; // returns endoff of self
258 uint debug_size() const {
259 return loc_size() + sp() + mon_size() + scl_size();
260 }
261 uint debug_depth() const; // returns sum of debug_size values at all depths
263 // Returns the JVM state at the desired depth (1 == root).
264 JVMState* of_depth(int d) const;
266 // Tells if two JVM states have the same call chain (depth, methods, & bcis).
267 bool same_calls_as(const JVMState* that) const;
269 // Monitors (monitors are stored as (boxNode, objNode) pairs
270 enum { logMonitorEdges = 1 };
271 int nof_monitors() const { return mon_size() >> logMonitorEdges; }
272 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
273 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
274 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
275 bool is_monitor_box(uint off) const {
276 assert(is_mon(off), "should be called only for monitor edge");
277 return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
278 }
279 bool is_monitor_use(uint off) const { return (is_mon(off)
280 && is_monitor_box(off))
281 || (caller() && caller()->is_monitor_use(off)); }
283 // Initialization functions for the JVM
284 void set_locoff(uint off) { _locoff = off; }
285 void set_stkoff(uint off) { _stkoff = off; }
286 void set_monoff(uint off) { _monoff = off; }
287 void set_scloff(uint off) { _scloff = off; }
288 void set_endoff(uint off) { _endoff = off; }
289 void set_offsets(uint off) {
290 _locoff = _stkoff = _monoff = _scloff = _endoff = off;
291 }
292 void set_map(SafePointNode *map) { _map = map; }
293 void set_sp(uint sp) { _sp = sp; }
294 // _reexecute is initialized to "undefined" for a new bci
295 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
296 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
298 // Miscellaneous utility functions
299 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
300 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
301 void set_map_deep(SafePointNode *map);// reset map for all callers
302 void adapt_position(int delta); // Adapt offsets in in-array after adding an edge.
304 #ifndef PRODUCT
305 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
306 void dump_spec(outputStream *st) const;
307 void dump_on(outputStream* st) const;
308 void dump() const {
309 dump_on(tty);
310 }
311 #endif
312 };
314 //------------------------------SafePointNode----------------------------------
315 // A SafePointNode is a subclass of a MultiNode for convenience (and
316 // potential code sharing) only - conceptually it is independent of
317 // the Node semantics.
318 class SafePointNode : public MultiNode {
319 virtual uint cmp( const Node &n ) const;
320 virtual uint size_of() const; // Size is bigger
322 public:
323 SafePointNode(uint edges, JVMState* jvms,
324 // A plain safepoint advertises no memory effects (NULL):
325 const TypePtr* adr_type = NULL)
326 : MultiNode( edges ),
327 _jvms(jvms),
328 _oop_map(NULL),
329 _adr_type(adr_type)
330 {
331 init_class_id(Class_SafePoint);
332 }
334 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC
335 JVMState* const _jvms; // Pointer to list of JVM State objects
336 const TypePtr* _adr_type; // What type of memory does this node produce?
338 // Many calls take *all* of memory as input,
339 // but some produce a limited subset of that memory as output.
340 // The adr_type reports the call's behavior as a store, not a load.
342 virtual JVMState* jvms() const { return _jvms; }
343 void set_jvms(JVMState* s) {
344 *(JVMState**)&_jvms = s; // override const attribute in the accessor
345 }
346 OopMap *oop_map() const { return _oop_map; }
347 void set_oop_map(OopMap *om) { _oop_map = om; }
349 private:
350 void verify_input(JVMState* jvms, uint idx) const {
351 assert(verify_jvms(jvms), "jvms must match");
352 Node* n = in(idx);
353 assert((!n->bottom_type()->isa_long() && !n->bottom_type()->isa_double()) ||
354 in(idx + 1)->is_top(), "2nd half of long/double");
355 }
357 public:
358 // Functionality from old debug nodes which has changed
359 Node *local(JVMState* jvms, uint idx) const {
360 verify_input(jvms, jvms->locoff() + idx);
361 return in(jvms->locoff() + idx);
362 }
363 Node *stack(JVMState* jvms, uint idx) const {
364 verify_input(jvms, jvms->stkoff() + idx);
365 return in(jvms->stkoff() + idx);
366 }
367 Node *argument(JVMState* jvms, uint idx) const {
368 verify_input(jvms, jvms->argoff() + idx);
369 return in(jvms->argoff() + idx);
370 }
371 Node *monitor_box(JVMState* jvms, uint idx) const {
372 assert(verify_jvms(jvms), "jvms must match");
373 return in(jvms->monitor_box_offset(idx));
374 }
375 Node *monitor_obj(JVMState* jvms, uint idx) const {
376 assert(verify_jvms(jvms), "jvms must match");
377 return in(jvms->monitor_obj_offset(idx));
378 }
380 void set_local(JVMState* jvms, uint idx, Node *c);
382 void set_stack(JVMState* jvms, uint idx, Node *c) {
383 assert(verify_jvms(jvms), "jvms must match");
384 set_req(jvms->stkoff() + idx, c);
385 }
386 void set_argument(JVMState* jvms, uint idx, Node *c) {
387 assert(verify_jvms(jvms), "jvms must match");
388 set_req(jvms->argoff() + idx, c);
389 }
390 void ensure_stack(JVMState* jvms, uint stk_size) {
391 assert(verify_jvms(jvms), "jvms must match");
392 int grow_by = (int)stk_size - (int)jvms->stk_size();
393 if (grow_by > 0) grow_stack(jvms, grow_by);
394 }
395 void grow_stack(JVMState* jvms, uint grow_by);
396 // Handle monitor stack
397 void push_monitor( const FastLockNode *lock );
398 void pop_monitor ();
399 Node *peek_monitor_box() const;
400 Node *peek_monitor_obj() const;
402 // Access functions for the JVM
403 Node *control () const { return in(TypeFunc::Control ); }
404 Node *i_o () const { return in(TypeFunc::I_O ); }
405 Node *memory () const { return in(TypeFunc::Memory ); }
406 Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
407 Node *frameptr () const { return in(TypeFunc::FramePtr ); }
409 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
410 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
411 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
413 MergeMemNode* merged_memory() const {
414 return in(TypeFunc::Memory)->as_MergeMem();
415 }
417 // The parser marks useless maps as dead when it's done with them:
418 bool is_killed() { return in(TypeFunc::Control) == NULL; }
420 // Exception states bubbling out of subgraphs such as inlined calls
421 // are recorded here. (There might be more than one, hence the "next".)
422 // This feature is used only for safepoints which serve as "maps"
423 // for JVM states during parsing, intrinsic expansion, etc.
424 SafePointNode* next_exception() const;
425 void set_next_exception(SafePointNode* n);
426 bool has_exceptions() const { return next_exception() != NULL; }
428 // Standard Node stuff
429 virtual int Opcode() const;
430 virtual bool pinned() const { return true; }
431 virtual const Type *Value( PhaseTransform *phase ) const;
432 virtual const Type *bottom_type() const { return Type::CONTROL; }
433 virtual const TypePtr *adr_type() const { return _adr_type; }
434 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
435 virtual Node *Identity( PhaseTransform *phase );
436 virtual uint ideal_reg() const { return 0; }
437 virtual const RegMask &in_RegMask(uint) const;
438 virtual const RegMask &out_RegMask() const;
439 virtual uint match_edge(uint idx) const;
441 static bool needs_polling_address_input();
443 #ifndef PRODUCT
444 virtual void dump_spec(outputStream *st) const;
445 #endif
446 };
448 //------------------------------SafePointScalarObjectNode----------------------
449 // A SafePointScalarObjectNode represents the state of a scalarized object
450 // at a safepoint.
452 class SafePointScalarObjectNode: public TypeNode {
453 uint _first_index; // First input edge relative index of a SafePoint node where
454 // states of the scalarized object fields are collected.
455 // It is relative to the last (youngest) jvms->_scloff.
456 uint _n_fields; // Number of non-static fields of the scalarized object.
457 DEBUG_ONLY(AllocateNode* _alloc;)
459 virtual uint hash() const ; // { return NO_HASH; }
460 virtual uint cmp( const Node &n ) const;
462 uint first_index() const { return _first_index; }
464 public:
465 SafePointScalarObjectNode(const TypeOopPtr* tp,
466 #ifdef ASSERT
467 AllocateNode* alloc,
468 #endif
469 uint first_index, uint n_fields);
470 virtual int Opcode() const;
471 virtual uint ideal_reg() const;
472 virtual const RegMask &in_RegMask(uint) const;
473 virtual const RegMask &out_RegMask() const;
474 virtual uint match_edge(uint idx) const;
476 uint first_index(JVMState* jvms) const {
477 assert(jvms != NULL, "missed JVMS");
478 return jvms->scloff() + _first_index;
479 }
480 uint n_fields() const { return _n_fields; }
482 #ifdef ASSERT
483 AllocateNode* alloc() const { return _alloc; }
484 #endif
486 virtual uint size_of() const { return sizeof(*this); }
488 // Assumes that "this" is an argument to a safepoint node "s", and that
489 // "new_call" is being created to correspond to "s". But the difference
490 // between the start index of the jvmstates of "new_call" and "s" is
491 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
492 // corresponds appropriately to "this" in "new_call". Assumes that
493 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
494 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
495 SafePointScalarObjectNode* clone(Dict* sosn_map) const;
497 #ifndef PRODUCT
498 virtual void dump_spec(outputStream *st) const;
499 #endif
500 };
503 // Simple container for the outgoing projections of a call. Useful
504 // for serious surgery on calls.
505 class CallProjections : public StackObj {
506 public:
507 Node* fallthrough_proj;
508 Node* fallthrough_catchproj;
509 Node* fallthrough_memproj;
510 Node* fallthrough_ioproj;
511 Node* catchall_catchproj;
512 Node* catchall_memproj;
513 Node* catchall_ioproj;
514 Node* resproj;
515 Node* exobj;
516 };
518 class CallGenerator;
520 //------------------------------CallNode---------------------------------------
521 // Call nodes now subsume the function of debug nodes at callsites, so they
522 // contain the functionality of a full scope chain of debug nodes.
523 class CallNode : public SafePointNode {
524 friend class VMStructs;
525 public:
526 const TypeFunc *_tf; // Function type
527 address _entry_point; // Address of method being called
528 float _cnt; // Estimate of number of times called
529 CallGenerator* _generator; // corresponding CallGenerator for some late inline calls
531 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
532 : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
533 _tf(tf),
534 _entry_point(addr),
535 _cnt(COUNT_UNKNOWN),
536 _generator(NULL)
537 {
538 init_class_id(Class_Call);
539 }
541 const TypeFunc* tf() const { return _tf; }
542 const address entry_point() const { return _entry_point; }
543 const float cnt() const { return _cnt; }
544 CallGenerator* generator() const { return _generator; }
546 void set_tf(const TypeFunc* tf) { _tf = tf; }
547 void set_entry_point(address p) { _entry_point = p; }
548 void set_cnt(float c) { _cnt = c; }
549 void set_generator(CallGenerator* cg) { _generator = cg; }
551 virtual const Type *bottom_type() const;
552 virtual const Type *Value( PhaseTransform *phase ) const;
553 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
554 virtual Node *Identity( PhaseTransform *phase ) { return this; }
555 virtual uint cmp( const Node &n ) const;
556 virtual uint size_of() const = 0;
557 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
558 virtual Node *match( const ProjNode *proj, const Matcher *m );
559 virtual uint ideal_reg() const { return NotAMachineReg; }
560 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
561 // for some macro nodes whose expansion does not have a safepoint on the fast path.
562 virtual bool guaranteed_safepoint() { return true; }
563 // For macro nodes, the JVMState gets modified during expansion. If calls
564 // use MachConstantBase, it gets modified during matching. So when cloning
565 // the node the JVMState must be cloned. Default is not to clone.
566 virtual void clone_jvms(Compile* C) {
567 if (C->needs_clone_jvms() && jvms() != NULL) {
568 set_jvms(jvms()->clone_deep(C));
569 jvms()->set_map_deep(this);
570 }
571 }
573 // Returns true if the call may modify n
574 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase);
575 // Does this node have a use of n other than in debug information?
576 bool has_non_debug_use(Node *n);
577 // Returns the unique CheckCastPP of a call
578 // or result projection is there are several CheckCastPP
579 // or returns NULL if there is no one.
580 Node *result_cast();
581 // Does this node returns pointer?
582 bool returns_pointer() const {
583 const TypeTuple *r = tf()->range();
584 return (r->cnt() > TypeFunc::Parms &&
585 r->field_at(TypeFunc::Parms)->isa_ptr());
586 }
588 // Collect all the interesting edges from a call for use in
589 // replacing the call by something else. Used by macro expansion
590 // and the late inlining support.
591 void extract_projections(CallProjections* projs, bool separate_io_proj);
593 virtual uint match_edge(uint idx) const;
595 #ifndef PRODUCT
596 virtual void dump_req(outputStream *st = tty) const;
597 virtual void dump_spec(outputStream *st) const;
598 #endif
599 };
602 //------------------------------CallJavaNode-----------------------------------
603 // Make a static or dynamic subroutine call node using Java calling
604 // convention. (The "Java" calling convention is the compiler's calling
605 // convention, as opposed to the interpreter's or that of native C.)
606 class CallJavaNode : public CallNode {
607 friend class VMStructs;
608 protected:
609 virtual uint cmp( const Node &n ) const;
610 virtual uint size_of() const; // Size is bigger
612 bool _optimized_virtual;
613 bool _method_handle_invoke;
614 ciMethod* _method; // Method being direct called
615 public:
616 const int _bci; // Byte Code Index of call byte code
617 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
618 : CallNode(tf, addr, TypePtr::BOTTOM),
619 _method(method), _bci(bci),
620 _optimized_virtual(false),
621 _method_handle_invoke(false)
622 {
623 init_class_id(Class_CallJava);
624 }
626 virtual int Opcode() const;
627 ciMethod* method() const { return _method; }
628 void set_method(ciMethod *m) { _method = m; }
629 void set_optimized_virtual(bool f) { _optimized_virtual = f; }
630 bool is_optimized_virtual() const { return _optimized_virtual; }
631 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }
632 bool is_method_handle_invoke() const { return _method_handle_invoke; }
634 #ifndef PRODUCT
635 virtual void dump_spec(outputStream *st) const;
636 #endif
637 };
639 //------------------------------CallStaticJavaNode-----------------------------
640 // Make a direct subroutine call using Java calling convention (for static
641 // calls and optimized virtual calls, plus calls to wrappers for run-time
642 // routines); generates static stub.
643 class CallStaticJavaNode : public CallJavaNode {
644 virtual uint cmp( const Node &n ) const;
645 virtual uint size_of() const; // Size is bigger
646 public:
647 CallStaticJavaNode(Compile* C, const TypeFunc* tf, address addr, ciMethod* method, int bci)
648 : CallJavaNode(tf, addr, method, bci), _name(NULL) {
649 init_class_id(Class_CallStaticJava);
650 if (C->eliminate_boxing() && (method != NULL) && method->is_boxing_method()) {
651 init_flags(Flag_is_macro);
652 C->add_macro_node(this);
653 }
654 _is_scalar_replaceable = false;
655 _is_non_escaping = false;
656 }
657 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
658 const TypePtr* adr_type)
659 : CallJavaNode(tf, addr, NULL, bci), _name(name) {
660 init_class_id(Class_CallStaticJava);
661 // This node calls a runtime stub, which often has narrow memory effects.
662 _adr_type = adr_type;
663 _is_scalar_replaceable = false;
664 _is_non_escaping = false;
665 }
666 const char *_name; // Runtime wrapper name
668 // Result of Escape Analysis
669 bool _is_scalar_replaceable;
670 bool _is_non_escaping;
672 // If this is an uncommon trap, return the request code, else zero.
673 int uncommon_trap_request() const;
674 static int extract_uncommon_trap_request(const Node* call);
676 bool is_boxing_method() const {
677 return is_macro() && (method() != NULL) && method()->is_boxing_method();
678 }
679 // Later inlining modifies the JVMState, so we need to clone it
680 // when the call node is cloned (because it is macro node).
681 virtual void clone_jvms(Compile* C) {
682 if ((jvms() != NULL) && is_boxing_method()) {
683 set_jvms(jvms()->clone_deep(C));
684 jvms()->set_map_deep(this);
685 }
686 }
688 virtual int Opcode() const;
689 #ifndef PRODUCT
690 virtual void dump_spec(outputStream *st) const;
691 #endif
692 };
694 //------------------------------CallDynamicJavaNode----------------------------
695 // Make a dispatched call using Java calling convention.
696 class CallDynamicJavaNode : public CallJavaNode {
697 virtual uint cmp( const Node &n ) const;
698 virtual uint size_of() const; // Size is bigger
699 public:
700 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
701 init_class_id(Class_CallDynamicJava);
702 }
704 int _vtable_index;
705 virtual int Opcode() const;
706 #ifndef PRODUCT
707 virtual void dump_spec(outputStream *st) const;
708 #endif
709 };
711 //------------------------------CallRuntimeNode--------------------------------
712 // Make a direct subroutine call node into compiled C++ code.
713 class CallRuntimeNode : public CallNode {
714 virtual uint cmp( const Node &n ) const;
715 virtual uint size_of() const; // Size is bigger
716 public:
717 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
718 const TypePtr* adr_type)
719 : CallNode(tf, addr, adr_type),
720 _name(name)
721 {
722 init_class_id(Class_CallRuntime);
723 }
725 const char *_name; // Printable name, if _method is NULL
726 virtual int Opcode() const;
727 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
729 #ifndef PRODUCT
730 virtual void dump_spec(outputStream *st) const;
731 #endif
732 };
734 //------------------------------CallLeafNode-----------------------------------
735 // Make a direct subroutine call node into compiled C++ code, without
736 // safepoints
737 class CallLeafNode : public CallRuntimeNode {
738 public:
739 CallLeafNode(const TypeFunc* tf, address addr, const char* name,
740 const TypePtr* adr_type)
741 : CallRuntimeNode(tf, addr, name, adr_type)
742 {
743 init_class_id(Class_CallLeaf);
744 }
745 virtual int Opcode() const;
746 virtual bool guaranteed_safepoint() { return false; }
747 #ifndef PRODUCT
748 virtual void dump_spec(outputStream *st) const;
749 #endif
750 };
752 //------------------------------CallLeafNoFPNode-------------------------------
753 // CallLeafNode, not using floating point or using it in the same manner as
754 // the generated code
755 class CallLeafNoFPNode : public CallLeafNode {
756 public:
757 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
758 const TypePtr* adr_type)
759 : CallLeafNode(tf, addr, name, adr_type)
760 {
761 }
762 virtual int Opcode() const;
763 };
766 //------------------------------Allocate---------------------------------------
767 // High-level memory allocation
768 //
769 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
770 // get expanded into a code sequence containing a call. Unlike other CallNodes,
771 // they have 2 memory projections and 2 i_o projections (which are distinguished by
772 // the _is_io_use flag in the projection.) This is needed when expanding the node in
773 // order to differentiate the uses of the projection on the normal control path from
774 // those on the exception return path.
775 //
776 class AllocateNode : public CallNode {
777 public:
778 enum {
779 // Output:
780 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
781 // Inputs:
782 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
783 KlassNode, // type (maybe dynamic) of the obj.
784 InitialTest, // slow-path test (may be constant)
785 ALength, // array length (or TOP if none)
786 ParmLimit
787 };
789 static const TypeFunc* alloc_type(const Type* t) {
790 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
791 fields[AllocSize] = TypeInt::POS;
792 fields[KlassNode] = TypeInstPtr::NOTNULL;
793 fields[InitialTest] = TypeInt::BOOL;
794 fields[ALength] = t; // length (can be a bad length)
796 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
798 // create result type (range)
799 fields = TypeTuple::fields(1);
800 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
802 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
804 return TypeFunc::make(domain, range);
805 }
807 // Result of Escape Analysis
808 bool _is_scalar_replaceable;
809 bool _is_non_escaping;
811 virtual uint size_of() const; // Size is bigger
812 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
813 Node *size, Node *klass_node, Node *initial_test);
814 // Expansion modifies the JVMState, so we need to clone it
815 virtual void clone_jvms(Compile* C) {
816 if (jvms() != NULL) {
817 set_jvms(jvms()->clone_deep(C));
818 jvms()->set_map_deep(this);
819 }
820 }
821 virtual int Opcode() const;
822 virtual uint ideal_reg() const { return Op_RegP; }
823 virtual bool guaranteed_safepoint() { return false; }
825 // allocations do not modify their arguments
826 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { return false;}
828 // Pattern-match a possible usage of AllocateNode.
829 // Return null if no allocation is recognized.
830 // The operand is the pointer produced by the (possible) allocation.
831 // It must be a projection of the Allocate or its subsequent CastPP.
832 // (Note: This function is defined in file graphKit.cpp, near
833 // GraphKit::new_instance/new_array, whose output it recognizes.)
834 // The 'ptr' may not have an offset unless the 'offset' argument is given.
835 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
837 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
838 // an offset, which is reported back to the caller.
839 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
840 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
841 intptr_t& offset);
843 // Dig the klass operand out of a (possible) allocation site.
844 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
845 AllocateNode* allo = Ideal_allocation(ptr, phase);
846 return (allo == NULL) ? NULL : allo->in(KlassNode);
847 }
849 // Conservatively small estimate of offset of first non-header byte.
850 int minimum_header_size() {
851 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
852 instanceOopDesc::base_offset_in_bytes();
853 }
855 // Return the corresponding initialization barrier (or null if none).
856 // Walks out edges to find it...
857 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
858 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
859 InitializeNode* initialization();
861 // Convenience for initialization->maybe_set_complete(phase)
862 bool maybe_set_complete(PhaseGVN* phase);
863 };
865 //------------------------------AllocateArray---------------------------------
866 //
867 // High-level array allocation
868 //
869 class AllocateArrayNode : public AllocateNode {
870 public:
871 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
872 Node* size, Node* klass_node, Node* initial_test,
873 Node* count_val
874 )
875 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
876 initial_test)
877 {
878 init_class_id(Class_AllocateArray);
879 set_req(AllocateNode::ALength, count_val);
880 }
881 virtual int Opcode() const;
882 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
884 // Dig the length operand out of a array allocation site.
885 Node* Ideal_length() {
886 return in(AllocateNode::ALength);
887 }
889 // Dig the length operand out of a array allocation site and narrow the
890 // type with a CastII, if necesssary
891 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
893 // Pattern-match a possible usage of AllocateArrayNode.
894 // Return null if no allocation is recognized.
895 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
896 AllocateNode* allo = Ideal_allocation(ptr, phase);
897 return (allo == NULL || !allo->is_AllocateArray())
898 ? NULL : allo->as_AllocateArray();
899 }
900 };
902 //------------------------------AbstractLockNode-----------------------------------
903 class AbstractLockNode: public CallNode {
904 private:
905 enum {
906 Regular = 0, // Normal lock
907 NonEscObj, // Lock is used for non escaping object
908 Coarsened, // Lock was coarsened
909 Nested // Nested lock
910 } _kind;
911 #ifndef PRODUCT
912 NamedCounter* _counter;
913 #endif
915 protected:
916 // helper functions for lock elimination
917 //
919 bool find_matching_unlock(const Node* ctrl, LockNode* lock,
920 GrowableArray<AbstractLockNode*> &lock_ops);
921 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
922 GrowableArray<AbstractLockNode*> &lock_ops);
923 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
924 GrowableArray<AbstractLockNode*> &lock_ops);
925 LockNode *find_matching_lock(UnlockNode* unlock);
927 // Update the counter to indicate that this lock was eliminated.
928 void set_eliminated_lock_counter() PRODUCT_RETURN;
930 public:
931 AbstractLockNode(const TypeFunc *tf)
932 : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
933 _kind(Regular)
934 {
935 #ifndef PRODUCT
936 _counter = NULL;
937 #endif
938 }
939 virtual int Opcode() const = 0;
940 Node * obj_node() const {return in(TypeFunc::Parms + 0); }
941 Node * box_node() const {return in(TypeFunc::Parms + 1); }
942 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
943 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
945 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
947 virtual uint size_of() const { return sizeof(*this); }
949 bool is_eliminated() const { return (_kind != Regular); }
950 bool is_non_esc_obj() const { return (_kind == NonEscObj); }
951 bool is_coarsened() const { return (_kind == Coarsened); }
952 bool is_nested() const { return (_kind == Nested); }
954 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
955 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
956 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
958 // locking does not modify its arguments
959 virtual bool may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase){ return false;}
961 #ifndef PRODUCT
962 void create_lock_counter(JVMState* s);
963 NamedCounter* counter() const { return _counter; }
964 #endif
965 };
967 //------------------------------Lock---------------------------------------
968 // High-level lock operation
969 //
970 // This is a subclass of CallNode because it is a macro node which gets expanded
971 // into a code sequence containing a call. This node takes 3 "parameters":
972 // 0 - object to lock
973 // 1 - a BoxLockNode
974 // 2 - a FastLockNode
975 //
976 class LockNode : public AbstractLockNode {
977 public:
979 static const TypeFunc *lock_type() {
980 // create input type (domain)
981 const Type **fields = TypeTuple::fields(3);
982 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
983 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
984 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
985 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
987 // create result type (range)
988 fields = TypeTuple::fields(0);
990 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
992 return TypeFunc::make(domain,range);
993 }
995 virtual int Opcode() const;
996 virtual uint size_of() const; // Size is bigger
997 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
998 init_class_id(Class_Lock);
999 init_flags(Flag_is_macro);
1000 C->add_macro_node(this);
1001 }
1002 virtual bool guaranteed_safepoint() { return false; }
1004 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1005 // Expansion modifies the JVMState, so we need to clone it
1006 virtual void clone_jvms(Compile* C) {
1007 if (jvms() != NULL) {
1008 set_jvms(jvms()->clone_deep(C));
1009 jvms()->set_map_deep(this);
1010 }
1011 }
1013 bool is_nested_lock_region(); // Is this Lock nested?
1014 };
1016 //------------------------------Unlock---------------------------------------
1017 // High-level unlock operation
1018 class UnlockNode : public AbstractLockNode {
1019 public:
1020 virtual int Opcode() const;
1021 virtual uint size_of() const; // Size is bigger
1022 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
1023 init_class_id(Class_Unlock);
1024 init_flags(Flag_is_macro);
1025 C->add_macro_node(this);
1026 }
1027 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1028 // unlock is never a safepoint
1029 virtual bool guaranteed_safepoint() { return false; }
1030 };
1032 #endif // SHARE_VM_OPTO_CALLNODE_HPP