Tue, 17 Oct 2017 12:58:25 +0800
merge
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_NODE_HPP
26 #define SHARE_VM_OPTO_NODE_HPP
28 #include "libadt/port.hpp"
29 #include "libadt/vectset.hpp"
30 #include "opto/compile.hpp"
31 #include "opto/type.hpp"
33 // Portions of code courtesy of Clifford Click
35 // Optimization - Graph Style
38 class AbstractLockNode;
39 class AddNode;
40 class AddPNode;
41 class AliasInfo;
42 class AllocateArrayNode;
43 class AllocateNode;
44 class Block;
45 class BoolNode;
46 class BoxLockNode;
47 class CMoveNode;
48 class CallDynamicJavaNode;
49 class CallJavaNode;
50 class CallLeafNode;
51 class CallNode;
52 class CallRuntimeNode;
53 class CallStaticJavaNode;
54 class CatchNode;
55 class CatchProjNode;
56 class CheckCastPPNode;
57 class ClearArrayNode;
58 class CmpNode;
59 class CodeBuffer;
60 class ConstraintCastNode;
61 class ConNode;
62 class CountedLoopNode;
63 class CountedLoopEndNode;
64 class DecodeNarrowPtrNode;
65 class DecodeNNode;
66 class DecodeNKlassNode;
67 class EncodeNarrowPtrNode;
68 class EncodePNode;
69 class EncodePKlassNode;
70 class FastLockNode;
71 class FastUnlockNode;
72 class IfNode;
73 class IfFalseNode;
74 class IfTrueNode;
75 class InitializeNode;
76 class JVMState;
77 class JumpNode;
78 class JumpProjNode;
79 class LoadNode;
80 class LoadStoreNode;
81 class LockNode;
82 class LoopNode;
83 class MachBranchNode;
84 class MachCallDynamicJavaNode;
85 class MachCallJavaNode;
86 class MachCallLeafNode;
87 class MachCallNode;
88 class MachCallRuntimeNode;
89 class MachCallStaticJavaNode;
90 class MachConstantBaseNode;
91 class MachConstantNode;
92 class MachGotoNode;
93 class MachIfNode;
94 class MachNode;
95 class MachNullCheckNode;
96 class MachProjNode;
97 class MachReturnNode;
98 class MachSafePointNode;
99 class MachSpillCopyNode;
100 class MachTempNode;
101 class MachMergeNode;
102 class Matcher;
103 class MemBarNode;
104 class MemBarStoreStoreNode;
105 class MemNode;
106 class MergeMemNode;
107 class MulNode;
108 class MultiNode;
109 class MultiBranchNode;
110 class NeverBranchNode;
111 class Node;
112 class Node_Array;
113 class Node_List;
114 class Node_Stack;
115 class NullCheckNode;
116 class OopMap;
117 class ParmNode;
118 class PCTableNode;
119 class PhaseCCP;
120 class PhaseGVN;
121 class PhaseIterGVN;
122 class PhaseRegAlloc;
123 class PhaseTransform;
124 class PhaseValues;
125 class PhiNode;
126 class Pipeline;
127 class ProjNode;
128 class RegMask;
129 class RegionNode;
130 class RootNode;
131 class SafePointNode;
132 class SafePointScalarObjectNode;
133 class StartNode;
134 class State;
135 class StoreNode;
136 class SubNode;
137 class Type;
138 class TypeNode;
139 class UnlockNode;
140 class VectorNode;
141 class LoadVectorNode;
142 class StoreVectorNode;
143 class VectorSet;
144 typedef void (*NFunc)(Node&,void*);
145 extern "C" {
146 typedef int (*C_sort_func_t)(const void *, const void *);
147 }
149 // The type of all node counts and indexes.
150 // It must hold at least 16 bits, but must also be fast to load and store.
151 // This type, if less than 32 bits, could limit the number of possible nodes.
152 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
153 typedef unsigned int node_idx_t;
156 #ifndef OPTO_DU_ITERATOR_ASSERT
157 #ifdef ASSERT
158 #define OPTO_DU_ITERATOR_ASSERT 1
159 #else
160 #define OPTO_DU_ITERATOR_ASSERT 0
161 #endif
162 #endif //OPTO_DU_ITERATOR_ASSERT
164 #if OPTO_DU_ITERATOR_ASSERT
165 class DUIterator;
166 class DUIterator_Fast;
167 class DUIterator_Last;
168 #else
169 typedef uint DUIterator;
170 typedef Node** DUIterator_Fast;
171 typedef Node** DUIterator_Last;
172 #endif
174 // Node Sentinel
175 #define NodeSentinel (Node*)-1
177 // Unknown count frequency
178 #define COUNT_UNKNOWN (-1.0f)
180 //------------------------------Node-------------------------------------------
181 // Nodes define actions in the program. They create values, which have types.
182 // They are both vertices in a directed graph and program primitives. Nodes
183 // are labeled; the label is the "opcode", the primitive function in the lambda
184 // calculus sense that gives meaning to the Node. Node inputs are ordered (so
185 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to
186 // the Node's function. These inputs also define a Type equation for the Node.
187 // Solving these Type equations amounts to doing dataflow analysis.
188 // Control and data are uniformly represented in the graph. Finally, Nodes
189 // have a unique dense integer index which is used to index into side arrays
190 // whenever I have phase-specific information.
192 class Node {
193 friend class VMStructs;
195 // Lots of restrictions on cloning Nodes
196 Node(const Node&); // not defined; linker error to use these
197 Node &operator=(const Node &rhs);
199 public:
200 friend class Compile;
201 #if OPTO_DU_ITERATOR_ASSERT
202 friend class DUIterator_Common;
203 friend class DUIterator;
204 friend class DUIterator_Fast;
205 friend class DUIterator_Last;
206 #endif
208 // Because Nodes come and go, I define an Arena of Node structures to pull
209 // from. This should allow fast access to node creation & deletion. This
210 // field is a local cache of a value defined in some "program fragment" for
211 // which these Nodes are just a part of.
213 // New Operator that takes a Compile pointer, this will eventually
214 // be the "new" New operator.
215 inline void* operator new( size_t x, Compile* C) throw() {
216 Node* n = (Node*)C->node_arena()->Amalloc_D(x);
217 #ifdef ASSERT
218 n->_in = (Node**)n; // magic cookie for assertion check
219 #endif
220 n->_out = (Node**)C;
221 return (void*)n;
222 }
224 // Delete is a NOP
225 void operator delete( void *ptr ) {}
226 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
227 void destruct();
229 // Create a new Node. Required is the number is of inputs required for
230 // semantic correctness.
231 Node( uint required );
233 // Create a new Node with given input edges.
234 // This version requires use of the "edge-count" new.
235 // E.g. new (C,3) FooNode( C, NULL, left, right );
236 Node( Node *n0 );
237 Node( Node *n0, Node *n1 );
238 Node( Node *n0, Node *n1, Node *n2 );
239 Node( Node *n0, Node *n1, Node *n2, Node *n3 );
240 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
241 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
242 Node( Node *n0, Node *n1, Node *n2, Node *n3,
243 Node *n4, Node *n5, Node *n6 );
245 // Clone an inherited Node given only the base Node type.
246 Node* clone() const;
248 // Clone a Node, immediately supplying one or two new edges.
249 // The first and second arguments, if non-null, replace in(1) and in(2),
250 // respectively.
251 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
252 Node* nn = clone();
253 if (in1 != NULL) nn->set_req(1, in1);
254 if (in2 != NULL) nn->set_req(2, in2);
255 return nn;
256 }
258 private:
259 // Shared setup for the above constructors.
260 // Handles all interactions with Compile::current.
261 // Puts initial values in all Node fields except _idx.
262 // Returns the initial value for _idx, which cannot
263 // be initialized by assignment.
264 inline int Init(int req, Compile* C);
266 //----------------- input edge handling
267 protected:
268 friend class PhaseCFG; // Access to address of _in array elements
269 Node **_in; // Array of use-def references to Nodes
270 Node **_out; // Array of def-use references to Nodes
272 // Input edges are split into two categories. Required edges are required
273 // for semantic correctness; order is important and NULLs are allowed.
274 // Precedence edges are used to help determine execution order and are
275 // added, e.g., for scheduling purposes. They are unordered and not
276 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1
277 // are required, from _cnt to _max-1 are precedence edges.
278 node_idx_t _cnt; // Total number of required Node inputs.
280 node_idx_t _max; // Actual length of input array.
282 // Output edges are an unordered list of def-use edges which exactly
283 // correspond to required input edges which point from other nodes
284 // to this one. Thus the count of the output edges is the number of
285 // users of this node.
286 node_idx_t _outcnt; // Total number of Node outputs.
288 node_idx_t _outmax; // Actual length of output array.
290 // Grow the actual input array to the next larger power-of-2 bigger than len.
291 void grow( uint len );
292 // Grow the output array to the next larger power-of-2 bigger than len.
293 void out_grow( uint len );
295 public:
296 // Each Node is assigned a unique small/dense number. This number is used
297 // to index into auxiliary arrays of data and bitvectors.
298 // It is declared const to defend against inadvertant assignment,
299 // since it is used by clients as a naked field.
300 const node_idx_t _idx;
302 // Get the (read-only) number of input edges
303 uint req() const { return _cnt; }
304 uint len() const { return _max; }
305 // Get the (read-only) number of output edges
306 uint outcnt() const { return _outcnt; }
308 #if OPTO_DU_ITERATOR_ASSERT
309 // Iterate over the out-edges of this node. Deletions are illegal.
310 inline DUIterator outs() const;
311 // Use this when the out array might have changed to suppress asserts.
312 inline DUIterator& refresh_out_pos(DUIterator& i) const;
313 // Does the node have an out at this position? (Used for iteration.)
314 inline bool has_out(DUIterator& i) const;
315 inline Node* out(DUIterator& i) const;
316 // Iterate over the out-edges of this node. All changes are illegal.
317 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
318 inline Node* fast_out(DUIterator_Fast& i) const;
319 // Iterate over the out-edges of this node, deleting one at a time.
320 inline DUIterator_Last last_outs(DUIterator_Last& min) const;
321 inline Node* last_out(DUIterator_Last& i) const;
322 // The inline bodies of all these methods are after the iterator definitions.
323 #else
324 // Iterate over the out-edges of this node. Deletions are illegal.
325 // This iteration uses integral indexes, to decouple from array reallocations.
326 DUIterator outs() const { return 0; }
327 // Use this when the out array might have changed to suppress asserts.
328 DUIterator refresh_out_pos(DUIterator i) const { return i; }
330 // Reference to the i'th output Node. Error if out of bounds.
331 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
332 // Does the node have an out at this position? (Used for iteration.)
333 bool has_out(DUIterator i) const { return i < _outcnt; }
335 // Iterate over the out-edges of this node. All changes are illegal.
336 // This iteration uses a pointer internal to the out array.
337 DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
338 Node** out = _out;
339 // Assign a limit pointer to the reference argument:
340 max = out + (ptrdiff_t)_outcnt;
341 // Return the base pointer:
342 return out;
343 }
344 Node* fast_out(DUIterator_Fast i) const { return *i; }
345 // Iterate over the out-edges of this node, deleting one at a time.
346 // This iteration uses a pointer internal to the out array.
347 DUIterator_Last last_outs(DUIterator_Last& min) const {
348 Node** out = _out;
349 // Assign a limit pointer to the reference argument:
350 min = out;
351 // Return the pointer to the start of the iteration:
352 return out + (ptrdiff_t)_outcnt - 1;
353 }
354 Node* last_out(DUIterator_Last i) const { return *i; }
355 #endif
357 // Reference to the i'th input Node. Error if out of bounds.
358 Node* in(uint i) const { assert(i < _max, err_msg_res("oob: i=%d, _max=%d", i, _max)); return _in[i]; }
359 // Reference to the i'th input Node. NULL if out of bounds.
360 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); }
361 // Reference to the i'th output Node. Error if out of bounds.
362 // Use this accessor sparingly. We are going trying to use iterators instead.
363 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
364 // Return the unique out edge.
365 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
366 // Delete out edge at position 'i' by moving last out edge to position 'i'
367 void raw_del_out(uint i) {
368 assert(i < _outcnt,"oob");
369 assert(_outcnt > 0,"oob");
370 #if OPTO_DU_ITERATOR_ASSERT
371 // Record that a change happened here.
372 debug_only(_last_del = _out[i]; ++_del_tick);
373 #endif
374 _out[i] = _out[--_outcnt];
375 // Smash the old edge so it can't be used accidentally.
376 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
377 }
379 #ifdef ASSERT
380 bool is_dead() const;
381 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
382 #endif
383 // Check whether node has become unreachable
384 bool is_unreachable(PhaseIterGVN &igvn) const;
386 // Set a required input edge, also updates corresponding output edge
387 void add_req( Node *n ); // Append a NEW required input
388 void add_req( Node *n0, Node *n1 ) {
389 add_req(n0); add_req(n1); }
390 void add_req( Node *n0, Node *n1, Node *n2 ) {
391 add_req(n0); add_req(n1); add_req(n2); }
392 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
393 void del_req( uint idx ); // Delete required edge & compact
394 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
395 void ins_req( uint i, Node *n ); // Insert a NEW required input
396 void set_req( uint i, Node *n ) {
397 assert( is_not_dead(n), "can not use dead node");
398 assert( i < _cnt, err_msg_res("oob: i=%d, _cnt=%d", i, _cnt));
399 assert( !VerifyHashTableKeys || _hash_lock == 0,
400 "remove node from hash table before modifying it");
401 Node** p = &_in[i]; // cache this._in, across the del_out call
402 if (*p != NULL) (*p)->del_out((Node *)this);
403 (*p) = n;
404 if (n != NULL) n->add_out((Node *)this);
405 }
406 // Light version of set_req() to init inputs after node creation.
407 void init_req( uint i, Node *n ) {
408 assert( i == 0 && this == n ||
409 is_not_dead(n), "can not use dead node");
410 assert( i < _cnt, "oob");
411 assert( !VerifyHashTableKeys || _hash_lock == 0,
412 "remove node from hash table before modifying it");
413 assert( _in[i] == NULL, "sanity");
414 _in[i] = n;
415 if (n != NULL) n->add_out((Node *)this);
416 }
417 // Find first occurrence of n among my edges:
418 int find_edge(Node* n);
419 int replace_edge(Node* old, Node* neww);
420 int replace_edges_in_range(Node* old, Node* neww, int start, int end);
421 // NULL out all inputs to eliminate incoming Def-Use edges.
422 // Return the number of edges between 'n' and 'this'
423 int disconnect_inputs(Node *n, Compile *c);
425 // Quickly, return true if and only if I am Compile::current()->top().
426 bool is_top() const {
427 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "");
428 return (_out == NULL);
429 }
430 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
431 void setup_is_top();
433 // Strip away casting. (It is depth-limited.)
434 Node* uncast() const;
435 // Return whether two Nodes are equivalent, after stripping casting.
436 bool eqv_uncast(const Node* n) const {
437 return (this->uncast() == n->uncast());
438 }
440 private:
441 static Node* uncast_helper(const Node* n);
443 // Add an output edge to the end of the list
444 void add_out( Node *n ) {
445 if (is_top()) return;
446 if( _outcnt == _outmax ) out_grow(_outcnt);
447 _out[_outcnt++] = n;
448 }
449 // Delete an output edge
450 void del_out( Node *n ) {
451 if (is_top()) return;
452 Node** outp = &_out[_outcnt];
453 // Find and remove n
454 do {
455 assert(outp > _out, "Missing Def-Use edge");
456 } while (*--outp != n);
457 *outp = _out[--_outcnt];
458 // Smash the old edge so it can't be used accidentally.
459 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
460 // Record that a change happened here.
461 #if OPTO_DU_ITERATOR_ASSERT
462 debug_only(_last_del = n; ++_del_tick);
463 #endif
464 }
466 public:
467 // Globally replace this node by a given new node, updating all uses.
468 void replace_by(Node* new_node);
469 // Globally replace this node by a given new node, updating all uses
470 // and cutting input edges of old node.
471 void subsume_by(Node* new_node, Compile* c) {
472 replace_by(new_node);
473 disconnect_inputs(NULL, c);
474 }
475 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn );
476 // Find the one non-null required input. RegionNode only
477 Node *nonnull_req() const;
478 // Add or remove precedence edges
479 void add_prec( Node *n );
480 void rm_prec( uint i );
481 void set_prec( uint i, Node *n ) {
482 assert( is_not_dead(n), "can not use dead node");
483 assert( i >= _cnt, "not a precedence edge");
484 if (_in[i] != NULL) _in[i]->del_out((Node *)this);
485 _in[i] = n;
486 if (n != NULL) n->add_out((Node *)this);
487 }
488 // Set this node's index, used by cisc_version to replace current node
489 void set_idx(uint new_idx) {
490 const node_idx_t* ref = &_idx;
491 *(node_idx_t*)ref = new_idx;
492 }
493 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
494 void swap_edges(uint i1, uint i2) {
495 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
496 // Def-Use info is unchanged
497 Node* n1 = in(i1);
498 Node* n2 = in(i2);
499 _in[i1] = n2;
500 _in[i2] = n1;
501 // If this node is in the hash table, make sure it doesn't need a rehash.
502 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
503 }
505 // Iterators over input Nodes for a Node X are written as:
506 // for( i = 0; i < X.req(); i++ ) ... X[i] ...
507 // NOTE: Required edges can contain embedded NULL pointers.
509 //----------------- Other Node Properties
511 // Generate class id for some ideal nodes to avoid virtual query
512 // methods is_<Node>().
513 // Class id is the set of bits corresponded to the node class and all its
514 // super classes so that queries for super classes are also valid.
515 // Subclasses of the same super class have different assigned bit
516 // (the third parameter in the macro DEFINE_CLASS_ID).
517 // Classes with deeper hierarchy are declared first.
518 // Classes with the same hierarchy depth are sorted by usage frequency.
519 //
520 // The query method masks the bits to cut off bits of subclasses
521 // and then compare the result with the class id
522 // (see the macro DEFINE_CLASS_QUERY below).
523 //
524 // Class_MachCall=30, ClassMask_MachCall=31
525 // 12 8 4 0
526 // 0 0 0 0 0 0 0 0 1 1 1 1 0
527 // | | | |
528 // | | | Bit_Mach=2
529 // | | Bit_MachReturn=4
530 // | Bit_MachSafePoint=8
531 // Bit_MachCall=16
532 //
533 // Class_CountedLoop=56, ClassMask_CountedLoop=63
534 // 12 8 4 0
535 // 0 0 0 0 0 0 0 1 1 1 0 0 0
536 // | | |
537 // | | Bit_Region=8
538 // | Bit_Loop=16
539 // Bit_CountedLoop=32
541 #define DEFINE_CLASS_ID(cl, supcl, subn) \
542 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
543 Class_##cl = Class_##supcl + Bit_##cl , \
544 ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
546 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
547 // so that it's values fits into 16 bits.
548 enum NodeClasses {
549 Bit_Node = 0x0000,
550 Class_Node = 0x0000,
551 ClassMask_Node = 0xFFFF,
553 DEFINE_CLASS_ID(Multi, Node, 0)
554 DEFINE_CLASS_ID(SafePoint, Multi, 0)
555 DEFINE_CLASS_ID(Call, SafePoint, 0)
556 DEFINE_CLASS_ID(CallJava, Call, 0)
557 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0)
558 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1)
559 DEFINE_CLASS_ID(CallRuntime, Call, 1)
560 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0)
561 DEFINE_CLASS_ID(Allocate, Call, 2)
562 DEFINE_CLASS_ID(AllocateArray, Allocate, 0)
563 DEFINE_CLASS_ID(AbstractLock, Call, 3)
564 DEFINE_CLASS_ID(Lock, AbstractLock, 0)
565 DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
566 DEFINE_CLASS_ID(MultiBranch, Multi, 1)
567 DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
568 DEFINE_CLASS_ID(Catch, PCTable, 0)
569 DEFINE_CLASS_ID(Jump, PCTable, 1)
570 DEFINE_CLASS_ID(If, MultiBranch, 1)
571 DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
572 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
573 DEFINE_CLASS_ID(Start, Multi, 2)
574 DEFINE_CLASS_ID(MemBar, Multi, 3)
575 DEFINE_CLASS_ID(Initialize, MemBar, 0)
576 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
578 DEFINE_CLASS_ID(Mach, Node, 1)
579 DEFINE_CLASS_ID(MachReturn, Mach, 0)
580 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
581 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
582 DEFINE_CLASS_ID(MachCallJava, MachCall, 0)
583 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0)
584 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
585 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
586 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
587 DEFINE_CLASS_ID(MachBranch, Mach, 1)
588 DEFINE_CLASS_ID(MachIf, MachBranch, 0)
589 DEFINE_CLASS_ID(MachGoto, MachBranch, 1)
590 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2)
591 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2)
592 DEFINE_CLASS_ID(MachTemp, Mach, 3)
593 DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
594 DEFINE_CLASS_ID(MachConstant, Mach, 5)
595 DEFINE_CLASS_ID(MachMerge, Mach, 6)
597 DEFINE_CLASS_ID(Type, Node, 2)
598 DEFINE_CLASS_ID(Phi, Type, 0)
599 DEFINE_CLASS_ID(ConstraintCast, Type, 1)
600 DEFINE_CLASS_ID(CheckCastPP, Type, 2)
601 DEFINE_CLASS_ID(CMove, Type, 3)
602 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
603 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
604 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
605 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
606 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
607 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
608 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
610 DEFINE_CLASS_ID(Proj, Node, 3)
611 DEFINE_CLASS_ID(CatchProj, Proj, 0)
612 DEFINE_CLASS_ID(JumpProj, Proj, 1)
613 DEFINE_CLASS_ID(IfTrue, Proj, 2)
614 DEFINE_CLASS_ID(IfFalse, Proj, 3)
615 DEFINE_CLASS_ID(Parm, Proj, 4)
616 DEFINE_CLASS_ID(MachProj, Proj, 5)
618 DEFINE_CLASS_ID(Mem, Node, 4)
619 DEFINE_CLASS_ID(Load, Mem, 0)
620 DEFINE_CLASS_ID(LoadVector, Load, 0)
621 DEFINE_CLASS_ID(Store, Mem, 1)
622 DEFINE_CLASS_ID(StoreVector, Store, 0)
623 DEFINE_CLASS_ID(LoadStore, Mem, 2)
625 DEFINE_CLASS_ID(Region, Node, 5)
626 DEFINE_CLASS_ID(Loop, Region, 0)
627 DEFINE_CLASS_ID(Root, Loop, 0)
628 DEFINE_CLASS_ID(CountedLoop, Loop, 1)
630 DEFINE_CLASS_ID(Sub, Node, 6)
631 DEFINE_CLASS_ID(Cmp, Sub, 0)
632 DEFINE_CLASS_ID(FastLock, Cmp, 0)
633 DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
635 DEFINE_CLASS_ID(MergeMem, Node, 7)
636 DEFINE_CLASS_ID(Bool, Node, 8)
637 DEFINE_CLASS_ID(AddP, Node, 9)
638 DEFINE_CLASS_ID(BoxLock, Node, 10)
639 DEFINE_CLASS_ID(Add, Node, 11)
640 DEFINE_CLASS_ID(Mul, Node, 12)
641 DEFINE_CLASS_ID(Vector, Node, 13)
642 DEFINE_CLASS_ID(ClearArray, Node, 14)
644 _max_classes = ClassMask_ClearArray
645 };
646 #undef DEFINE_CLASS_ID
648 // Flags are sorted by usage frequency.
649 enum NodeFlags {
650 Flag_is_Copy = 0x01, // should be first bit to avoid shift
651 Flag_rematerialize = Flag_is_Copy << 1,
652 Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
653 Flag_is_macro = Flag_needs_anti_dependence_check << 1,
654 Flag_is_Con = Flag_is_macro << 1,
655 Flag_is_cisc_alternate = Flag_is_Con << 1,
656 Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
657 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
658 Flag_avoid_back_to_back_before = Flag_may_be_short_branch << 1,
659 Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1,
660 Flag_has_call = Flag_avoid_back_to_back_after << 1,
661 Flag_is_expensive = Flag_has_call << 1,
662 _max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
663 };
665 private:
666 jushort _class_id;
667 jushort _flags;
669 protected:
670 // These methods should be called from constructors only.
671 void init_class_id(jushort c) {
672 assert(c <= _max_classes, "invalid node class");
673 _class_id = c; // cast out const
674 }
675 void init_flags(jushort fl) {
676 assert(fl <= _max_flags, "invalid node flag");
677 _flags |= fl;
678 }
679 void clear_flag(jushort fl) {
680 assert(fl <= _max_flags, "invalid node flag");
681 _flags &= ~fl;
682 }
684 public:
685 const jushort class_id() const { return _class_id; }
687 const jushort flags() const { return _flags; }
689 // Return a dense integer opcode number
690 virtual int Opcode() const;
692 // Virtual inherited Node size
693 virtual uint size_of() const;
695 // Other interesting Node properties
696 #define DEFINE_CLASS_QUERY(type) \
697 bool is_##type() const { \
698 return ((_class_id & ClassMask_##type) == Class_##type); \
699 } \
700 type##Node *as_##type() const { \
701 assert(is_##type(), "invalid node class"); \
702 return (type##Node*)this; \
703 } \
704 type##Node* isa_##type() const { \
705 return (is_##type()) ? as_##type() : NULL; \
706 }
708 DEFINE_CLASS_QUERY(AbstractLock)
709 DEFINE_CLASS_QUERY(Add)
710 DEFINE_CLASS_QUERY(AddP)
711 DEFINE_CLASS_QUERY(Allocate)
712 DEFINE_CLASS_QUERY(AllocateArray)
713 DEFINE_CLASS_QUERY(Bool)
714 DEFINE_CLASS_QUERY(BoxLock)
715 DEFINE_CLASS_QUERY(Call)
716 DEFINE_CLASS_QUERY(CallDynamicJava)
717 DEFINE_CLASS_QUERY(CallJava)
718 DEFINE_CLASS_QUERY(CallLeaf)
719 DEFINE_CLASS_QUERY(CallRuntime)
720 DEFINE_CLASS_QUERY(CallStaticJava)
721 DEFINE_CLASS_QUERY(Catch)
722 DEFINE_CLASS_QUERY(CatchProj)
723 DEFINE_CLASS_QUERY(CheckCastPP)
724 DEFINE_CLASS_QUERY(ConstraintCast)
725 DEFINE_CLASS_QUERY(ClearArray)
726 DEFINE_CLASS_QUERY(CMove)
727 DEFINE_CLASS_QUERY(Cmp)
728 DEFINE_CLASS_QUERY(CountedLoop)
729 DEFINE_CLASS_QUERY(CountedLoopEnd)
730 DEFINE_CLASS_QUERY(DecodeNarrowPtr)
731 DEFINE_CLASS_QUERY(DecodeN)
732 DEFINE_CLASS_QUERY(DecodeNKlass)
733 DEFINE_CLASS_QUERY(EncodeNarrowPtr)
734 DEFINE_CLASS_QUERY(EncodeP)
735 DEFINE_CLASS_QUERY(EncodePKlass)
736 DEFINE_CLASS_QUERY(FastLock)
737 DEFINE_CLASS_QUERY(FastUnlock)
738 DEFINE_CLASS_QUERY(If)
739 DEFINE_CLASS_QUERY(IfFalse)
740 DEFINE_CLASS_QUERY(IfTrue)
741 DEFINE_CLASS_QUERY(Initialize)
742 DEFINE_CLASS_QUERY(Jump)
743 DEFINE_CLASS_QUERY(JumpProj)
744 DEFINE_CLASS_QUERY(Load)
745 DEFINE_CLASS_QUERY(LoadStore)
746 DEFINE_CLASS_QUERY(Lock)
747 DEFINE_CLASS_QUERY(Loop)
748 DEFINE_CLASS_QUERY(Mach)
749 DEFINE_CLASS_QUERY(MachBranch)
750 DEFINE_CLASS_QUERY(MachCall)
751 DEFINE_CLASS_QUERY(MachCallDynamicJava)
752 DEFINE_CLASS_QUERY(MachCallJava)
753 DEFINE_CLASS_QUERY(MachCallLeaf)
754 DEFINE_CLASS_QUERY(MachCallRuntime)
755 DEFINE_CLASS_QUERY(MachCallStaticJava)
756 DEFINE_CLASS_QUERY(MachConstantBase)
757 DEFINE_CLASS_QUERY(MachConstant)
758 DEFINE_CLASS_QUERY(MachGoto)
759 DEFINE_CLASS_QUERY(MachIf)
760 DEFINE_CLASS_QUERY(MachNullCheck)
761 DEFINE_CLASS_QUERY(MachProj)
762 DEFINE_CLASS_QUERY(MachReturn)
763 DEFINE_CLASS_QUERY(MachSafePoint)
764 DEFINE_CLASS_QUERY(MachSpillCopy)
765 DEFINE_CLASS_QUERY(MachTemp)
766 DEFINE_CLASS_QUERY(MachMerge)
767 DEFINE_CLASS_QUERY(Mem)
768 DEFINE_CLASS_QUERY(MemBar)
769 DEFINE_CLASS_QUERY(MemBarStoreStore)
770 DEFINE_CLASS_QUERY(MergeMem)
771 DEFINE_CLASS_QUERY(Mul)
772 DEFINE_CLASS_QUERY(Multi)
773 DEFINE_CLASS_QUERY(MultiBranch)
774 DEFINE_CLASS_QUERY(Parm)
775 DEFINE_CLASS_QUERY(PCTable)
776 DEFINE_CLASS_QUERY(Phi)
777 DEFINE_CLASS_QUERY(Proj)
778 DEFINE_CLASS_QUERY(Region)
779 DEFINE_CLASS_QUERY(Root)
780 DEFINE_CLASS_QUERY(SafePoint)
781 DEFINE_CLASS_QUERY(SafePointScalarObject)
782 DEFINE_CLASS_QUERY(Start)
783 DEFINE_CLASS_QUERY(Store)
784 DEFINE_CLASS_QUERY(Sub)
785 DEFINE_CLASS_QUERY(Type)
786 DEFINE_CLASS_QUERY(Vector)
787 DEFINE_CLASS_QUERY(LoadVector)
788 DEFINE_CLASS_QUERY(StoreVector)
789 DEFINE_CLASS_QUERY(Unlock)
791 #undef DEFINE_CLASS_QUERY
793 // duplicate of is_MachSpillCopy()
794 bool is_SpillCopy () const {
795 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
796 }
798 bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
799 // The data node which is safe to leave in dead loop during IGVN optimization.
800 bool is_dead_loop_safe() const {
801 return is_Phi() || (is_Proj() && in(0) == NULL) ||
802 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 &&
803 (!is_Proj() || !in(0)->is_Allocate()));
804 }
806 // is_Copy() returns copied edge index (0 or 1)
807 uint is_Copy() const { return (_flags & Flag_is_Copy); }
809 virtual bool is_CFG() const { return false; }
811 // If this node is control-dependent on a test, can it be
812 // rerouted to a dominating equivalent test? This is usually
813 // true of non-CFG nodes, but can be false for operations which
814 // depend for their correct sequencing on more than one test.
815 // (In that case, hoisting to a dominating test may silently
816 // skip some other important test.)
817 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
819 // When building basic blocks, I need to have a notion of block beginning
820 // Nodes, next block selector Nodes (block enders), and next block
821 // projections. These calls need to work on their machine equivalents. The
822 // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
823 bool is_block_start() const {
824 if ( is_Region() )
825 return this == (const Node*)in(0);
826 else
827 return is_Start();
828 }
830 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
831 // Goto and Return. This call also returns the block ending Node.
832 virtual const Node *is_block_proj() const;
834 // The node is a "macro" node which needs to be expanded before matching
835 bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
836 // The node is expensive: the best control is set during loop opts
837 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; }
839 //----------------- Optimization
841 // Get the worst-case Type output for this Node.
842 virtual const class Type *bottom_type() const;
844 // If we find a better type for a node, try to record it permanently.
845 // Return true if this node actually changed.
846 // Be sure to do the hash_delete game in the "rehash" variant.
847 void raise_bottom_type(const Type* new_type);
849 // Get the address type with which this node uses and/or defs memory,
850 // or NULL if none. The address type is conservatively wide.
851 // Returns non-null for calls, membars, loads, stores, etc.
852 // Returns TypePtr::BOTTOM if the node touches memory "broadly".
853 virtual const class TypePtr *adr_type() const { return NULL; }
855 // Return an existing node which computes the same function as this node.
856 // The optimistic combined algorithm requires this to return a Node which
857 // is a small number of steps away (e.g., one of my inputs).
858 virtual Node *Identity( PhaseTransform *phase );
860 // Return the set of values this Node can take on at runtime.
861 virtual const Type *Value( PhaseTransform *phase ) const;
863 // Return a node which is more "ideal" than the current node.
864 // The invariants on this call are subtle. If in doubt, read the
865 // treatise in node.cpp above the default implemention AND TEST WITH
866 // +VerifyIterativeGVN!
867 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
869 // Some nodes have specific Ideal subgraph transformations only if they are
870 // unique users of specific nodes. Such nodes should be put on IGVN worklist
871 // for the transformations to happen.
872 bool has_special_unique_user() const;
874 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
875 Node* find_exact_control(Node* ctrl);
877 // Check if 'this' node dominates or equal to 'sub'.
878 bool dominates(Node* sub, Node_List &nlist);
880 protected:
881 bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
882 public:
884 // Idealize graph, using DU info. Done after constant propagation
885 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
887 // See if there is valid pipeline info
888 static const Pipeline *pipeline_class();
889 virtual const Pipeline *pipeline() const;
891 // Compute the latency from the def to this instruction of the ith input node
892 uint latency(uint i);
894 // Hash & compare functions, for pessimistic value numbering
896 // If the hash function returns the special sentinel value NO_HASH,
897 // the node is guaranteed never to compare equal to any other node.
898 // If we accidentally generate a hash with value NO_HASH the node
899 // won't go into the table and we'll lose a little optimization.
900 enum { NO_HASH = 0 };
901 virtual uint hash() const;
902 virtual uint cmp( const Node &n ) const;
904 // Operation appears to be iteratively computed (such as an induction variable)
905 // It is possible for this operation to return false for a loop-varying
906 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
907 bool is_iteratively_computed();
909 // Determine if a node is Counted loop induction variable.
910 // The method is defined in loopnode.cpp.
911 const Node* is_loop_iv() const;
913 // Return a node with opcode "opc" and same inputs as "this" if one can
914 // be found; Otherwise return NULL;
915 Node* find_similar(int opc);
917 // Return the unique control out if only one. Null if none or more than one.
918 Node* unique_ctrl_out();
920 //----------------- Code Generation
922 // Ideal register class for Matching. Zero means unmatched instruction
923 // (these are cloned instead of converted to machine nodes).
924 virtual uint ideal_reg() const;
926 static const uint NotAMachineReg; // must be > max. machine register
928 // Do we Match on this edge index or not? Generally false for Control
929 // and true for everything else. Weird for calls & returns.
930 virtual uint match_edge(uint idx) const;
932 // Register class output is returned in
933 virtual const RegMask &out_RegMask() const;
934 // Register class input is expected in
935 virtual const RegMask &in_RegMask(uint) const;
936 // Should we clone rather than spill this instruction?
937 bool rematerialize() const;
939 // Return JVM State Object if this Node carries debug info, or NULL otherwise
940 virtual JVMState* jvms() const;
942 // Print as assembly
943 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
944 // Emit bytes starting at parameter 'ptr'
945 // Bump 'ptr' by the number of output bytes
946 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
947 // Size of instruction in bytes
948 virtual uint size(PhaseRegAlloc *ra_) const;
950 // Convenience function to extract an integer constant from a node.
951 // If it is not an integer constant (either Con, CastII, or Mach),
952 // return value_if_unknown.
953 jint find_int_con(jint value_if_unknown) const {
954 const TypeInt* t = find_int_type();
955 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
956 }
957 // Return the constant, knowing it is an integer constant already
958 jint get_int() const {
959 const TypeInt* t = find_int_type();
960 guarantee(t != NULL, "must be con");
961 return t->get_con();
962 }
963 // Here's where the work is done. Can produce non-constant int types too.
964 const TypeInt* find_int_type() const;
966 // Same thing for long (and intptr_t, via type.hpp):
967 jlong get_long() const {
968 const TypeLong* t = find_long_type();
969 guarantee(t != NULL, "must be con");
970 return t->get_con();
971 }
972 jlong find_long_con(jint value_if_unknown) const {
973 const TypeLong* t = find_long_type();
974 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
975 }
976 const TypeLong* find_long_type() const;
978 const TypePtr* get_ptr_type() const;
980 // These guys are called by code generated by ADLC:
981 intptr_t get_ptr() const;
982 intptr_t get_narrowcon() const;
983 jdouble getd() const;
984 jfloat getf() const;
986 // Nodes which are pinned into basic blocks
987 virtual bool pinned() const { return false; }
989 // Nodes which use memory without consuming it, hence need antidependences
990 // More specifically, needs_anti_dependence_check returns true iff the node
991 // (a) does a load, and (b) does not perform a store (except perhaps to a
992 // stack slot or some other unaliased location).
993 bool needs_anti_dependence_check() const;
995 // Return which operand this instruction may cisc-spill. In other words,
996 // return operand position that can convert from reg to memory access
997 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
998 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
1000 //----------------- Graph walking
1001 public:
1002 // Walk and apply member functions recursively.
1003 // Supplied (this) pointer is root.
1004 void walk(NFunc pre, NFunc post, void *env);
1005 static void nop(Node &, void*); // Dummy empty function
1006 static void packregion( Node &n, void* );
1007 private:
1008 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited);
1010 //----------------- Printing, etc
1011 public:
1012 #ifndef PRODUCT
1013 Node* find(int idx) const; // Search the graph for the given idx.
1014 Node* find_ctrl(int idx) const; // Search control ancestors for the given idx.
1015 void dump() const { dump("\n"); } // Print this node.
1016 void dump(const char* suffix, outputStream *st = tty) const;// Print this node.
1017 void dump(int depth) const; // Print this node, recursively to depth d
1018 void dump_ctrl(int depth) const; // Print control nodes, to depth d
1019 virtual void dump_req(outputStream *st = tty) const; // Print required-edge info
1020 virtual void dump_prec(outputStream *st = tty) const; // Print precedence-edge info
1021 virtual void dump_out(outputStream *st = tty) const; // Print the output edge info
1022 virtual void dump_spec(outputStream *st) const {}; // Print per-node info
1023 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
1024 void verify() const; // Check Def-Use info for my subgraph
1025 static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space);
1027 // This call defines a class-unique string used to identify class instances
1028 virtual const char *Name() const;
1030 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
1031 // RegMask Print Functions
1032 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
1033 void dump_out_regmask() { out_RegMask().dump(); }
1034 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; }
1035 void fast_dump() const {
1036 tty->print("%4d: %-17s", _idx, Name());
1037 for (uint i = 0; i < len(); i++)
1038 if (in(i))
1039 tty->print(" %4d", in(i)->_idx);
1040 else
1041 tty->print(" NULL");
1042 tty->print("\n");
1043 }
1044 #endif
1045 #ifdef ASSERT
1046 void verify_construction();
1047 bool verify_jvms(const JVMState* jvms) const;
1048 int _debug_idx; // Unique value assigned to every node.
1049 int debug_idx() const { return _debug_idx; }
1050 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
1052 Node* _debug_orig; // Original version of this, if any.
1053 Node* debug_orig() const { return _debug_orig; }
1054 void set_debug_orig(Node* orig); // _debug_orig = orig
1056 int _hash_lock; // Barrier to modifications of nodes in the hash table
1057 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
1058 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
1060 static void init_NodeProperty();
1062 #if OPTO_DU_ITERATOR_ASSERT
1063 const Node* _last_del; // The last deleted node.
1064 uint _del_tick; // Bumped when a deletion happens..
1065 #endif
1066 #endif
1067 };
1069 //-----------------------------------------------------------------------------
1070 // Iterators over DU info, and associated Node functions.
1072 #if OPTO_DU_ITERATOR_ASSERT
1074 // Common code for assertion checking on DU iterators.
1075 class DUIterator_Common VALUE_OBJ_CLASS_SPEC {
1076 #ifdef ASSERT
1077 protected:
1078 bool _vdui; // cached value of VerifyDUIterators
1079 const Node* _node; // the node containing the _out array
1080 uint _outcnt; // cached node->_outcnt
1081 uint _del_tick; // cached node->_del_tick
1082 Node* _last; // last value produced by the iterator
1084 void sample(const Node* node); // used by c'tor to set up for verifies
1085 void verify(const Node* node, bool at_end_ok = false);
1086 void verify_resync();
1087 void reset(const DUIterator_Common& that);
1089 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1090 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1091 #else
1092 #define I_VDUI_ONLY(i,x) { }
1093 #endif //ASSERT
1094 };
1096 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
1098 // Default DU iterator. Allows appends onto the out array.
1099 // Allows deletion from the out array only at the current point.
1100 // Usage:
1101 // for (DUIterator i = x->outs(); x->has_out(i); i++) {
1102 // Node* y = x->out(i);
1103 // ...
1104 // }
1105 // Compiles in product mode to a unsigned integer index, which indexes
1106 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate
1107 // also reloads x->_outcnt. If you delete, you must perform "--i" just
1108 // before continuing the loop. You must delete only the last-produced
1109 // edge. You must delete only a single copy of the last-produced edge,
1110 // or else you must delete all copies at once (the first time the edge
1111 // is produced by the iterator).
1112 class DUIterator : public DUIterator_Common {
1113 friend class Node;
1115 // This is the index which provides the product-mode behavior.
1116 // Whatever the product-mode version of the system does to the
1117 // DUI index is done to this index. All other fields in
1118 // this class are used only for assertion checking.
1119 uint _idx;
1121 #ifdef ASSERT
1122 uint _refresh_tick; // Records the refresh activity.
1124 void sample(const Node* node); // Initialize _refresh_tick etc.
1125 void verify(const Node* node, bool at_end_ok = false);
1126 void verify_increment(); // Verify an increment operation.
1127 void verify_resync(); // Verify that we can back up over a deletion.
1128 void verify_finish(); // Verify that the loop terminated properly.
1129 void refresh(); // Resample verification info.
1130 void reset(const DUIterator& that); // Resample after assignment.
1131 #endif
1133 DUIterator(const Node* node, int dummy_to_avoid_conversion)
1134 { _idx = 0; debug_only(sample(node)); }
1136 public:
1137 // initialize to garbage; clear _vdui to disable asserts
1138 DUIterator()
1139 { /*initialize to garbage*/ debug_only(_vdui = false); }
1141 void operator++(int dummy_to_specify_postfix_op)
1142 { _idx++; VDUI_ONLY(verify_increment()); }
1144 void operator--()
1145 { VDUI_ONLY(verify_resync()); --_idx; }
1147 ~DUIterator()
1148 { VDUI_ONLY(verify_finish()); }
1150 void operator=(const DUIterator& that)
1151 { _idx = that._idx; debug_only(reset(that)); }
1152 };
1154 DUIterator Node::outs() const
1155 { return DUIterator(this, 0); }
1156 DUIterator& Node::refresh_out_pos(DUIterator& i) const
1157 { I_VDUI_ONLY(i, i.refresh()); return i; }
1158 bool Node::has_out(DUIterator& i) const
1159 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1160 Node* Node::out(DUIterator& i) const
1161 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; }
1164 // Faster DU iterator. Disallows insertions into the out array.
1165 // Allows deletion from the out array only at the current point.
1166 // Usage:
1167 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1168 // Node* y = x->fast_out(i);
1169 // ...
1170 // }
1171 // Compiles in product mode to raw Node** pointer arithmetic, with
1172 // no reloading of pointers from the original node x. If you delete,
1173 // you must perform "--i; --imax" just before continuing the loop.
1174 // If you delete multiple copies of the same edge, you must decrement
1175 // imax, but not i, multiple times: "--i, imax -= num_edges".
1176 class DUIterator_Fast : public DUIterator_Common {
1177 friend class Node;
1178 friend class DUIterator_Last;
1180 // This is the pointer which provides the product-mode behavior.
1181 // Whatever the product-mode version of the system does to the
1182 // DUI pointer is done to this pointer. All other fields in
1183 // this class are used only for assertion checking.
1184 Node** _outp;
1186 #ifdef ASSERT
1187 void verify(const Node* node, bool at_end_ok = false);
1188 void verify_limit();
1189 void verify_resync();
1190 void verify_relimit(uint n);
1191 void reset(const DUIterator_Fast& that);
1192 #endif
1194 // Note: offset must be signed, since -1 is sometimes passed
1195 DUIterator_Fast(const Node* node, ptrdiff_t offset)
1196 { _outp = node->_out + offset; debug_only(sample(node)); }
1198 public:
1199 // initialize to garbage; clear _vdui to disable asserts
1200 DUIterator_Fast()
1201 { /*initialize to garbage*/ debug_only(_vdui = false); }
1203 void operator++(int dummy_to_specify_postfix_op)
1204 { _outp++; VDUI_ONLY(verify(_node, true)); }
1206 void operator--()
1207 { VDUI_ONLY(verify_resync()); --_outp; }
1209 void operator-=(uint n) // applied to the limit only
1210 { _outp -= n; VDUI_ONLY(verify_relimit(n)); }
1212 bool operator<(DUIterator_Fast& limit) {
1213 I_VDUI_ONLY(*this, this->verify(_node, true));
1214 I_VDUI_ONLY(limit, limit.verify_limit());
1215 return _outp < limit._outp;
1216 }
1218 void operator=(const DUIterator_Fast& that)
1219 { _outp = that._outp; debug_only(reset(that)); }
1220 };
1222 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1223 // Assign a limit pointer to the reference argument:
1224 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1225 // Return the base pointer:
1226 return DUIterator_Fast(this, 0);
1227 }
1228 Node* Node::fast_out(DUIterator_Fast& i) const {
1229 I_VDUI_ONLY(i, i.verify(this));
1230 return debug_only(i._last=) *i._outp;
1231 }
1234 // Faster DU iterator. Requires each successive edge to be removed.
1235 // Does not allow insertion of any edges.
1236 // Usage:
1237 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1238 // Node* y = x->last_out(i);
1239 // ...
1240 // }
1241 // Compiles in product mode to raw Node** pointer arithmetic, with
1242 // no reloading of pointers from the original node x.
1243 class DUIterator_Last : private DUIterator_Fast {
1244 friend class Node;
1246 #ifdef ASSERT
1247 void verify(const Node* node, bool at_end_ok = false);
1248 void verify_limit();
1249 void verify_step(uint num_edges);
1250 #endif
1252 // Note: offset must be signed, since -1 is sometimes passed
1253 DUIterator_Last(const Node* node, ptrdiff_t offset)
1254 : DUIterator_Fast(node, offset) { }
1256 void operator++(int dummy_to_specify_postfix_op) {} // do not use
1257 void operator<(int) {} // do not use
1259 public:
1260 DUIterator_Last() { }
1261 // initialize to garbage
1263 void operator--()
1264 { _outp--; VDUI_ONLY(verify_step(1)); }
1266 void operator-=(uint n)
1267 { _outp -= n; VDUI_ONLY(verify_step(n)); }
1269 bool operator>=(DUIterator_Last& limit) {
1270 I_VDUI_ONLY(*this, this->verify(_node, true));
1271 I_VDUI_ONLY(limit, limit.verify_limit());
1272 return _outp >= limit._outp;
1273 }
1275 void operator=(const DUIterator_Last& that)
1276 { DUIterator_Fast::operator=(that); }
1277 };
1279 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1280 // Assign a limit pointer to the reference argument:
1281 imin = DUIterator_Last(this, 0);
1282 // Return the initial pointer:
1283 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1284 }
1285 Node* Node::last_out(DUIterator_Last& i) const {
1286 I_VDUI_ONLY(i, i.verify(this));
1287 return debug_only(i._last=) *i._outp;
1288 }
1290 #endif //OPTO_DU_ITERATOR_ASSERT
1292 #undef I_VDUI_ONLY
1293 #undef VDUI_ONLY
1295 // An Iterator that truly follows the iterator pattern. Doesn't
1296 // support deletion but could be made to.
1297 //
1298 // for (SimpleDUIterator i(n); i.has_next(); i.next()) {
1299 // Node* m = i.get();
1300 //
1301 class SimpleDUIterator : public StackObj {
1302 private:
1303 Node* node;
1304 DUIterator_Fast i;
1305 DUIterator_Fast imax;
1306 public:
1307 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
1308 bool has_next() { return i < imax; }
1309 void next() { i++; }
1310 Node* get() { return node->fast_out(i); }
1311 };
1314 //-----------------------------------------------------------------------------
1315 // Map dense integer indices to Nodes. Uses classic doubling-array trick.
1316 // Abstractly provides an infinite array of Node*'s, initialized to NULL.
1317 // Note that the constructor just zeros things, and since I use Arena
1318 // allocation I do not need a destructor to reclaim storage.
1319 class Node_Array : public ResourceObj {
1320 friend class VMStructs;
1321 protected:
1322 Arena *_a; // Arena to allocate in
1323 uint _max;
1324 Node **_nodes;
1325 void grow( uint i ); // Grow array node to fit
1326 public:
1327 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) {
1328 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize );
1329 for( int i = 0; i < OptoNodeListSize; i++ ) {
1330 _nodes[i] = NULL;
1331 }
1332 }
1334 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
1335 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
1336 { return (i<_max) ? _nodes[i] : (Node*)NULL; }
1337 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; }
1338 Node **adr() { return _nodes; }
1339 // Extend the mapping: index i maps to Node *n.
1340 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
1341 void insert( uint i, Node *n );
1342 void remove( uint i ); // Remove, preserving order
1343 void sort( C_sort_func_t func);
1344 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage
1345 void clear(); // Set all entries to NULL, keep storage
1346 uint Size() const { return _max; }
1347 void dump() const;
1348 };
1350 class Node_List : public Node_Array {
1351 friend class VMStructs;
1352 uint _cnt;
1353 public:
1354 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
1355 Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
1356 bool contains(const Node* n) const {
1357 for (uint e = 0; e < size(); e++) {
1358 if (at(e) == n) return true;
1359 }
1360 return false;
1361 }
1362 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1363 void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1364 void push( Node *b ) { map(_cnt++,b); }
1365 void yank( Node *n ); // Find and remove
1366 Node *pop() { return _nodes[--_cnt]; }
1367 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;}
1368 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1369 uint size() const { return _cnt; }
1370 void dump() const;
1371 };
1373 //------------------------------Unique_Node_List-------------------------------
1374 class Unique_Node_List : public Node_List {
1375 friend class VMStructs;
1376 VectorSet _in_worklist;
1377 uint _clock_index; // Index in list where to pop from next
1378 public:
1379 Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {}
1380 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1382 void remove( Node *n );
1383 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
1384 VectorSet &member_set(){ return _in_worklist; }
1386 void push( Node *b ) {
1387 if( !_in_worklist.test_set(b->_idx) )
1388 Node_List::push(b);
1389 }
1390 Node *pop() {
1391 if( _clock_index >= size() ) _clock_index = 0;
1392 Node *b = at(_clock_index);
1393 map( _clock_index, Node_List::pop());
1394 if (size() != 0) _clock_index++; // Always start from 0
1395 _in_worklist >>= b->_idx;
1396 return b;
1397 }
1398 Node *remove( uint i ) {
1399 Node *b = Node_List::at(i);
1400 _in_worklist >>= b->_idx;
1401 map(i,Node_List::pop());
1402 return b;
1403 }
1404 void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); }
1405 void clear() {
1406 _in_worklist.Clear(); // Discards storage but grows automatically
1407 Node_List::clear();
1408 _clock_index = 0;
1409 }
1411 // Used after parsing to remove useless nodes before Iterative GVN
1412 void remove_useless_nodes(VectorSet &useful);
1414 #ifndef PRODUCT
1415 void print_set() const { _in_worklist.print(); }
1416 #endif
1417 };
1419 // Inline definition of Compile::record_for_igvn must be deferred to this point.
1420 inline void Compile::record_for_igvn(Node* n) {
1421 _for_igvn->push(n);
1422 }
1424 //------------------------------Node_Stack-------------------------------------
1425 class Node_Stack {
1426 friend class VMStructs;
1427 protected:
1428 struct INode {
1429 Node *node; // Processed node
1430 uint indx; // Index of next node's child
1431 };
1432 INode *_inode_top; // tos, stack grows up
1433 INode *_inode_max; // End of _inodes == _inodes + _max
1434 INode *_inodes; // Array storage for the stack
1435 Arena *_a; // Arena to allocate in
1436 void grow();
1437 public:
1438 Node_Stack(int size) {
1439 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1440 _a = Thread::current()->resource_area();
1441 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1442 _inode_max = _inodes + max;
1443 _inode_top = _inodes - 1; // stack is empty
1444 }
1446 Node_Stack(Arena *a, int size) : _a(a) {
1447 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1448 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1449 _inode_max = _inodes + max;
1450 _inode_top = _inodes - 1; // stack is empty
1451 }
1453 void pop() {
1454 assert(_inode_top >= _inodes, "node stack underflow");
1455 --_inode_top;
1456 }
1457 void push(Node *n, uint i) {
1458 ++_inode_top;
1459 if (_inode_top >= _inode_max) grow();
1460 INode *top = _inode_top; // optimization
1461 top->node = n;
1462 top->indx = i;
1463 }
1464 Node *node() const {
1465 return _inode_top->node;
1466 }
1467 Node* node_at(uint i) const {
1468 assert(_inodes + i <= _inode_top, "in range");
1469 return _inodes[i].node;
1470 }
1471 uint index() const {
1472 return _inode_top->indx;
1473 }
1474 uint index_at(uint i) const {
1475 assert(_inodes + i <= _inode_top, "in range");
1476 return _inodes[i].indx;
1477 }
1478 void set_node(Node *n) {
1479 _inode_top->node = n;
1480 }
1481 void set_index(uint i) {
1482 _inode_top->indx = i;
1483 }
1484 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
1485 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
1486 bool is_nonempty() const { return (_inode_top >= _inodes); }
1487 bool is_empty() const { return (_inode_top < _inodes); }
1488 void clear() { _inode_top = _inodes - 1; } // retain storage
1490 // Node_Stack is used to map nodes.
1491 Node* find(uint idx) const;
1492 };
1495 //-----------------------------Node_Notes--------------------------------------
1496 // Debugging or profiling annotations loosely and sparsely associated
1497 // with some nodes. See Compile::node_notes_at for the accessor.
1498 class Node_Notes VALUE_OBJ_CLASS_SPEC {
1499 friend class VMStructs;
1500 JVMState* _jvms;
1502 public:
1503 Node_Notes(JVMState* jvms = NULL) {
1504 _jvms = jvms;
1505 }
1507 JVMState* jvms() { return _jvms; }
1508 void set_jvms(JVMState* x) { _jvms = x; }
1510 // True if there is nothing here.
1511 bool is_clear() {
1512 return (_jvms == NULL);
1513 }
1515 // Make there be nothing here.
1516 void clear() {
1517 _jvms = NULL;
1518 }
1520 // Make a new, clean node notes.
1521 static Node_Notes* make(Compile* C) {
1522 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1523 nn->clear();
1524 return nn;
1525 }
1527 Node_Notes* clone(Compile* C) {
1528 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1529 (*nn) = (*this);
1530 return nn;
1531 }
1533 // Absorb any information from source.
1534 bool update_from(Node_Notes* source) {
1535 bool changed = false;
1536 if (source != NULL) {
1537 if (source->jvms() != NULL) {
1538 set_jvms(source->jvms());
1539 changed = true;
1540 }
1541 }
1542 return changed;
1543 }
1544 };
1546 // Inlined accessors for Compile::node_nodes that require the preceding class:
1547 inline Node_Notes*
1548 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
1549 int idx, bool can_grow) {
1550 assert(idx >= 0, "oob");
1551 int block_idx = (idx >> _log2_node_notes_block_size);
1552 int grow_by = (block_idx - (arr == NULL? 0: arr->length()));
1553 if (grow_by >= 0) {
1554 if (!can_grow) return NULL;
1555 grow_node_notes(arr, grow_by + 1);
1556 }
1557 // (Every element of arr is a sub-array of length _node_notes_block_size.)
1558 return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
1559 }
1561 inline bool
1562 Compile::set_node_notes_at(int idx, Node_Notes* value) {
1563 if (value == NULL || value->is_clear())
1564 return false; // nothing to write => write nothing
1565 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
1566 assert(loc != NULL, "");
1567 return loc->update_from(value);
1568 }
1571 //------------------------------TypeNode---------------------------------------
1572 // Node with a Type constant.
1573 class TypeNode : public Node {
1574 protected:
1575 virtual uint hash() const; // Check the type
1576 virtual uint cmp( const Node &n ) const;
1577 virtual uint size_of() const; // Size is bigger
1578 const Type* const _type;
1579 public:
1580 void set_type(const Type* t) {
1581 assert(t != NULL, "sanity");
1582 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
1583 *(const Type**)&_type = t; // cast away const-ness
1584 // If this node is in the hash table, make sure it doesn't need a rehash.
1585 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
1586 }
1587 const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
1588 TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
1589 init_class_id(Class_Type);
1590 }
1591 virtual const Type *Value( PhaseTransform *phase ) const;
1592 virtual const Type *bottom_type() const;
1593 virtual uint ideal_reg() const;
1594 #ifndef PRODUCT
1595 virtual void dump_spec(outputStream *st) const;
1596 #endif
1597 };
1599 #endif // SHARE_VM_OPTO_NODE_HPP