Mon, 25 May 2020 14:24:27 +0800
8244407: JVM crashes after transformation in C2 IdealLoopTree::split_fall_in
Reviewed-by: thartmann, kvn, andrew
Contributed-by: zhouyong44@huawei.com
1 /*
2 * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_NODE_HPP
26 #define SHARE_VM_OPTO_NODE_HPP
28 #include "libadt/port.hpp"
29 #include "libadt/vectset.hpp"
30 #include "opto/compile.hpp"
31 #include "opto/type.hpp"
33 // Portions of code courtesy of Clifford Click
35 // Optimization - Graph Style
38 class AbstractLockNode;
39 class AddNode;
40 class AddPNode;
41 class AliasInfo;
42 class AllocateArrayNode;
43 class AllocateNode;
44 class Block;
45 class BoolNode;
46 class BoxLockNode;
47 class CMoveNode;
48 class CallDynamicJavaNode;
49 class CallJavaNode;
50 class CallLeafNode;
51 class CallNode;
52 class CallRuntimeNode;
53 class CallStaticJavaNode;
54 class CatchNode;
55 class CatchProjNode;
56 class CheckCastPPNode;
57 class CastIINode;
58 class ClearArrayNode;
59 class CmpNode;
60 class CodeBuffer;
61 class ConstraintCastNode;
62 class ConNode;
63 class CountedLoopNode;
64 class CountedLoopEndNode;
65 class DecodeNarrowPtrNode;
66 class DecodeNNode;
67 class DecodeNKlassNode;
68 class EncodeNarrowPtrNode;
69 class EncodePNode;
70 class EncodePKlassNode;
71 class FastLockNode;
72 class FastUnlockNode;
73 class IfNode;
74 class IfFalseNode;
75 class IfTrueNode;
76 class InitializeNode;
77 class JVMState;
78 class JumpNode;
79 class JumpProjNode;
80 class LoadNode;
81 class LoadStoreNode;
82 class LockNode;
83 class LoopNode;
84 class MachBranchNode;
85 class MachCallDynamicJavaNode;
86 class MachCallJavaNode;
87 class MachCallLeafNode;
88 class MachCallNode;
89 class MachCallRuntimeNode;
90 class MachCallStaticJavaNode;
91 class MachConstantBaseNode;
92 class MachConstantNode;
93 class MachGotoNode;
94 class MachIfNode;
95 class MachNode;
96 class MachNullCheckNode;
97 class MachProjNode;
98 class MachReturnNode;
99 class MachSafePointNode;
100 class MachSpillCopyNode;
101 class MachTempNode;
102 class MachMergeNode;
103 class Matcher;
104 class MemBarNode;
105 class MemBarStoreStoreNode;
106 class MemNode;
107 class MergeMemNode;
108 class MulNode;
109 class MultiNode;
110 class MultiBranchNode;
111 class NeverBranchNode;
112 class Node;
113 class Node_Array;
114 class Node_List;
115 class Node_Stack;
116 class NullCheckNode;
117 class OopMap;
118 class ParmNode;
119 class PCTableNode;
120 class PhaseCCP;
121 class PhaseGVN;
122 class PhaseIterGVN;
123 class PhaseRegAlloc;
124 class PhaseTransform;
125 class PhaseValues;
126 class PhiNode;
127 class Pipeline;
128 class ProjNode;
129 class RegMask;
130 class RegionNode;
131 class RootNode;
132 class SafePointNode;
133 class SafePointScalarObjectNode;
134 class StartNode;
135 class State;
136 class StoreNode;
137 class SubNode;
138 class Type;
139 class TypeNode;
140 class UnlockNode;
141 class VectorNode;
142 class LoadVectorNode;
143 class StoreVectorNode;
144 class VectorSet;
145 typedef void (*NFunc)(Node&,void*);
146 extern "C" {
147 typedef int (*C_sort_func_t)(const void *, const void *);
148 }
150 // The type of all node counts and indexes.
151 // It must hold at least 16 bits, but must also be fast to load and store.
152 // This type, if less than 32 bits, could limit the number of possible nodes.
153 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
154 typedef unsigned int node_idx_t;
157 #ifndef OPTO_DU_ITERATOR_ASSERT
158 #ifdef ASSERT
159 #define OPTO_DU_ITERATOR_ASSERT 1
160 #else
161 #define OPTO_DU_ITERATOR_ASSERT 0
162 #endif
163 #endif //OPTO_DU_ITERATOR_ASSERT
165 #if OPTO_DU_ITERATOR_ASSERT
166 class DUIterator;
167 class DUIterator_Fast;
168 class DUIterator_Last;
169 #else
170 typedef uint DUIterator;
171 typedef Node** DUIterator_Fast;
172 typedef Node** DUIterator_Last;
173 #endif
175 // Node Sentinel
176 #define NodeSentinel (Node*)-1
178 // Unknown count frequency
179 #define COUNT_UNKNOWN (-1.0f)
181 //------------------------------Node-------------------------------------------
182 // Nodes define actions in the program. They create values, which have types.
183 // They are both vertices in a directed graph and program primitives. Nodes
184 // are labeled; the label is the "opcode", the primitive function in the lambda
185 // calculus sense that gives meaning to the Node. Node inputs are ordered (so
186 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to
187 // the Node's function. These inputs also define a Type equation for the Node.
188 // Solving these Type equations amounts to doing dataflow analysis.
189 // Control and data are uniformly represented in the graph. Finally, Nodes
190 // have a unique dense integer index which is used to index into side arrays
191 // whenever I have phase-specific information.
193 class Node {
194 friend class VMStructs;
196 // Lots of restrictions on cloning Nodes
197 Node(const Node&); // not defined; linker error to use these
198 Node &operator=(const Node &rhs);
200 public:
201 friend class Compile;
202 #if OPTO_DU_ITERATOR_ASSERT
203 friend class DUIterator_Common;
204 friend class DUIterator;
205 friend class DUIterator_Fast;
206 friend class DUIterator_Last;
207 #endif
209 // Because Nodes come and go, I define an Arena of Node structures to pull
210 // from. This should allow fast access to node creation & deletion. This
211 // field is a local cache of a value defined in some "program fragment" for
212 // which these Nodes are just a part of.
214 // New Operator that takes a Compile pointer, this will eventually
215 // be the "new" New operator.
216 inline void* operator new( size_t x, Compile* C) throw() {
217 Node* n = (Node*)C->node_arena()->Amalloc_D(x);
218 #ifdef ASSERT
219 n->_in = (Node**)n; // magic cookie for assertion check
220 #endif
221 n->_out = (Node**)C;
222 return (void*)n;
223 }
225 // Delete is a NOP
226 void operator delete( void *ptr ) {}
227 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
228 void destruct();
230 // Create a new Node. Required is the number is of inputs required for
231 // semantic correctness.
232 Node( uint required );
234 // Create a new Node with given input edges.
235 // This version requires use of the "edge-count" new.
236 // E.g. new (C,3) FooNode( C, NULL, left, right );
237 Node( Node *n0 );
238 Node( Node *n0, Node *n1 );
239 Node( Node *n0, Node *n1, Node *n2 );
240 Node( Node *n0, Node *n1, Node *n2, Node *n3 );
241 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
242 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
243 Node( Node *n0, Node *n1, Node *n2, Node *n3,
244 Node *n4, Node *n5, Node *n6 );
246 // Clone an inherited Node given only the base Node type.
247 Node* clone() const;
249 // Clone a Node, immediately supplying one or two new edges.
250 // The first and second arguments, if non-null, replace in(1) and in(2),
251 // respectively.
252 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
253 Node* nn = clone();
254 if (in1 != NULL) nn->set_req(1, in1);
255 if (in2 != NULL) nn->set_req(2, in2);
256 return nn;
257 }
259 private:
260 // Shared setup for the above constructors.
261 // Handles all interactions with Compile::current.
262 // Puts initial values in all Node fields except _idx.
263 // Returns the initial value for _idx, which cannot
264 // be initialized by assignment.
265 inline int Init(int req, Compile* C);
267 //----------------- input edge handling
268 protected:
269 friend class PhaseCFG; // Access to address of _in array elements
270 Node **_in; // Array of use-def references to Nodes
271 Node **_out; // Array of def-use references to Nodes
273 // Input edges are split into two categories. Required edges are required
274 // for semantic correctness; order is important and NULLs are allowed.
275 // Precedence edges are used to help determine execution order and are
276 // added, e.g., for scheduling purposes. They are unordered and not
277 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1
278 // are required, from _cnt to _max-1 are precedence edges.
279 node_idx_t _cnt; // Total number of required Node inputs.
281 node_idx_t _max; // Actual length of input array.
283 // Output edges are an unordered list of def-use edges which exactly
284 // correspond to required input edges which point from other nodes
285 // to this one. Thus the count of the output edges is the number of
286 // users of this node.
287 node_idx_t _outcnt; // Total number of Node outputs.
289 node_idx_t _outmax; // Actual length of output array.
291 // Grow the actual input array to the next larger power-of-2 bigger than len.
292 void grow( uint len );
293 // Grow the output array to the next larger power-of-2 bigger than len.
294 void out_grow( uint len );
296 public:
297 // Each Node is assigned a unique small/dense number. This number is used
298 // to index into auxiliary arrays of data and bit vectors.
299 // The field _idx is declared constant to defend against inadvertent assignments,
300 // since it is used by clients as a naked field. However, the field's value can be
301 // changed using the set_idx() method.
302 //
303 // The PhaseRenumberLive phase renumbers nodes based on liveness information.
304 // Therefore, it updates the value of the _idx field. The parse-time _idx is
305 // preserved in _parse_idx.
306 const node_idx_t _idx;
307 DEBUG_ONLY(const node_idx_t _parse_idx;)
309 // Get the (read-only) number of input edges
310 uint req() const { return _cnt; }
311 uint len() const { return _max; }
312 // Get the (read-only) number of output edges
313 uint outcnt() const { return _outcnt; }
315 #if OPTO_DU_ITERATOR_ASSERT
316 // Iterate over the out-edges of this node. Deletions are illegal.
317 inline DUIterator outs() const;
318 // Use this when the out array might have changed to suppress asserts.
319 inline DUIterator& refresh_out_pos(DUIterator& i) const;
320 // Does the node have an out at this position? (Used for iteration.)
321 inline bool has_out(DUIterator& i) const;
322 inline Node* out(DUIterator& i) const;
323 // Iterate over the out-edges of this node. All changes are illegal.
324 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
325 inline Node* fast_out(DUIterator_Fast& i) const;
326 // Iterate over the out-edges of this node, deleting one at a time.
327 inline DUIterator_Last last_outs(DUIterator_Last& min) const;
328 inline Node* last_out(DUIterator_Last& i) const;
329 // The inline bodies of all these methods are after the iterator definitions.
330 #else
331 // Iterate over the out-edges of this node. Deletions are illegal.
332 // This iteration uses integral indexes, to decouple from array reallocations.
333 DUIterator outs() const { return 0; }
334 // Use this when the out array might have changed to suppress asserts.
335 DUIterator refresh_out_pos(DUIterator i) const { return i; }
337 // Reference to the i'th output Node. Error if out of bounds.
338 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
339 // Does the node have an out at this position? (Used for iteration.)
340 bool has_out(DUIterator i) const { return i < _outcnt; }
342 // Iterate over the out-edges of this node. All changes are illegal.
343 // This iteration uses a pointer internal to the out array.
344 DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
345 Node** out = _out;
346 // Assign a limit pointer to the reference argument:
347 max = out + (ptrdiff_t)_outcnt;
348 // Return the base pointer:
349 return out;
350 }
351 Node* fast_out(DUIterator_Fast i) const { return *i; }
352 // Iterate over the out-edges of this node, deleting one at a time.
353 // This iteration uses a pointer internal to the out array.
354 DUIterator_Last last_outs(DUIterator_Last& min) const {
355 Node** out = _out;
356 // Assign a limit pointer to the reference argument:
357 min = out;
358 // Return the pointer to the start of the iteration:
359 return out + (ptrdiff_t)_outcnt - 1;
360 }
361 Node* last_out(DUIterator_Last i) const { return *i; }
362 #endif
364 // Reference to the i'th input Node. Error if out of bounds.
365 Node* in(uint i) const { assert(i < _max, err_msg_res("oob: i=%d, _max=%d", i, _max)); return _in[i]; }
366 // Reference to the i'th input Node. NULL if out of bounds.
367 Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); }
368 // Reference to the i'th output Node. Error if out of bounds.
369 // Use this accessor sparingly. We are going trying to use iterators instead.
370 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
371 // Return the unique out edge.
372 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
373 // Delete out edge at position 'i' by moving last out edge to position 'i'
374 void raw_del_out(uint i) {
375 assert(i < _outcnt,"oob");
376 assert(_outcnt > 0,"oob");
377 #if OPTO_DU_ITERATOR_ASSERT
378 // Record that a change happened here.
379 debug_only(_last_del = _out[i]; ++_del_tick);
380 #endif
381 _out[i] = _out[--_outcnt];
382 // Smash the old edge so it can't be used accidentally.
383 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
384 }
386 #ifdef ASSERT
387 bool is_dead() const;
388 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
389 #endif
390 // Check whether node has become unreachable
391 bool is_unreachable(PhaseIterGVN &igvn) const;
393 // Set a required input edge, also updates corresponding output edge
394 void add_req( Node *n ); // Append a NEW required input
395 void add_req( Node *n0, Node *n1 ) {
396 add_req(n0); add_req(n1); }
397 void add_req( Node *n0, Node *n1, Node *n2 ) {
398 add_req(n0); add_req(n1); add_req(n2); }
399 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
400 void del_req( uint idx ); // Delete required edge & compact
401 void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
402 void ins_req( uint i, Node *n ); // Insert a NEW required input
403 void set_req( uint i, Node *n ) {
404 assert( is_not_dead(n), "can not use dead node");
405 assert( i < _cnt, err_msg_res("oob: i=%d, _cnt=%d", i, _cnt));
406 assert( !VerifyHashTableKeys || _hash_lock == 0,
407 "remove node from hash table before modifying it");
408 Node** p = &_in[i]; // cache this._in, across the del_out call
409 if (*p != NULL) (*p)->del_out((Node *)this);
410 (*p) = n;
411 if (n != NULL) n->add_out((Node *)this);
412 }
413 // Light version of set_req() to init inputs after node creation.
414 void init_req( uint i, Node *n ) {
415 assert( i == 0 && this == n ||
416 is_not_dead(n), "can not use dead node");
417 assert( i < _cnt, "oob");
418 assert( !VerifyHashTableKeys || _hash_lock == 0,
419 "remove node from hash table before modifying it");
420 assert( _in[i] == NULL, "sanity");
421 _in[i] = n;
422 if (n != NULL) n->add_out((Node *)this);
423 }
424 // Find first occurrence of n among my edges:
425 int find_edge(Node* n);
426 int find_prec_edge(Node* n) {
427 for (uint i = req(); i < len(); i++) {
428 if (_in[i] == n) return i;
429 if (_in[i] == NULL) {
430 DEBUG_ONLY( while ((++i) < len()) assert(_in[i] == NULL, "Gap in prec edges!"); )
431 break;
432 }
433 }
434 return -1;
435 }
436 int replace_edge(Node* old, Node* neww);
437 int replace_edges_in_range(Node* old, Node* neww, int start, int end);
438 // NULL out all inputs to eliminate incoming Def-Use edges.
439 // Return the number of edges between 'n' and 'this'
440 int disconnect_inputs(Node *n, Compile *c);
442 // Quickly, return true if and only if I am Compile::current()->top().
443 bool is_top() const {
444 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "");
445 return (_out == NULL);
446 }
447 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
448 void setup_is_top();
450 // Strip away casting. (It is depth-limited.)
451 Node* uncast() const;
452 // Return whether two Nodes are equivalent, after stripping casting.
453 bool eqv_uncast(const Node* n) const {
454 return (this->uncast() == n->uncast());
455 }
457 private:
458 static Node* uncast_helper(const Node* n);
460 // Add an output edge to the end of the list
461 void add_out( Node *n ) {
462 if (is_top()) return;
463 if( _outcnt == _outmax ) out_grow(_outcnt);
464 _out[_outcnt++] = n;
465 }
466 // Delete an output edge
467 void del_out( Node *n ) {
468 if (is_top()) return;
469 Node** outp = &_out[_outcnt];
470 // Find and remove n
471 do {
472 assert(outp > _out, "Missing Def-Use edge");
473 } while (*--outp != n);
474 *outp = _out[--_outcnt];
475 // Smash the old edge so it can't be used accidentally.
476 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
477 // Record that a change happened here.
478 #if OPTO_DU_ITERATOR_ASSERT
479 debug_only(_last_del = n; ++_del_tick);
480 #endif
481 }
482 // Close gap after removing edge.
483 void close_prec_gap_at(uint gap) {
484 assert(_cnt <= gap && gap < _max, "no valid prec edge");
485 uint i = gap;
486 Node *last = NULL;
487 for (; i < _max-1; ++i) {
488 Node *next = _in[i+1];
489 if (next == NULL) break;
490 last = next;
491 }
492 _in[gap] = last; // Move last slot to empty one.
493 _in[i] = NULL; // NULL out last slot.
494 }
496 public:
497 // Globally replace this node by a given new node, updating all uses.
498 void replace_by(Node* new_node);
499 // Globally replace this node by a given new node, updating all uses
500 // and cutting input edges of old node.
501 void subsume_by(Node* new_node, Compile* c) {
502 replace_by(new_node);
503 disconnect_inputs(NULL, c);
504 }
505 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn );
506 // Find the one non-null required input. RegionNode only
507 Node *nonnull_req() const;
508 // Add or remove precedence edges
509 void add_prec( Node *n );
510 void rm_prec( uint i );
512 // Note: prec(i) will not necessarily point to n if edge already exists.
513 void set_prec( uint i, Node *n ) {
514 assert(i < _max, err_msg("oob: i=%d, _max=%d", i, _max));
515 assert(is_not_dead(n), "can not use dead node");
516 assert(i >= _cnt, "not a precedence edge");
517 // Avoid spec violation: duplicated prec edge.
518 if (_in[i] == n) return;
519 if (n == NULL || find_prec_edge(n) != -1) {
520 rm_prec(i);
521 return;
522 }
523 if (_in[i] != NULL) _in[i]->del_out((Node *)this);
524 _in[i] = n;
525 if (n != NULL) n->add_out((Node *)this);
526 }
528 // Set this node's index, used by cisc_version to replace current node
529 void set_idx(uint new_idx) {
530 const node_idx_t* ref = &_idx;
531 *(node_idx_t*)ref = new_idx;
532 }
533 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
534 void swap_edges(uint i1, uint i2) {
535 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
536 // Def-Use info is unchanged
537 Node* n1 = in(i1);
538 Node* n2 = in(i2);
539 _in[i1] = n2;
540 _in[i2] = n1;
541 // If this node is in the hash table, make sure it doesn't need a rehash.
542 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
543 }
545 // Iterators over input Nodes for a Node X are written as:
546 // for( i = 0; i < X.req(); i++ ) ... X[i] ...
547 // NOTE: Required edges can contain embedded NULL pointers.
549 //----------------- Other Node Properties
551 // Generate class id for some ideal nodes to avoid virtual query
552 // methods is_<Node>().
553 // Class id is the set of bits corresponded to the node class and all its
554 // super classes so that queries for super classes are also valid.
555 // Subclasses of the same super class have different assigned bit
556 // (the third parameter in the macro DEFINE_CLASS_ID).
557 // Classes with deeper hierarchy are declared first.
558 // Classes with the same hierarchy depth are sorted by usage frequency.
559 //
560 // The query method masks the bits to cut off bits of subclasses
561 // and then compare the result with the class id
562 // (see the macro DEFINE_CLASS_QUERY below).
563 //
564 // Class_MachCall=30, ClassMask_MachCall=31
565 // 12 8 4 0
566 // 0 0 0 0 0 0 0 0 1 1 1 1 0
567 // | | | |
568 // | | | Bit_Mach=2
569 // | | Bit_MachReturn=4
570 // | Bit_MachSafePoint=8
571 // Bit_MachCall=16
572 //
573 // Class_CountedLoop=56, ClassMask_CountedLoop=63
574 // 12 8 4 0
575 // 0 0 0 0 0 0 0 1 1 1 0 0 0
576 // | | |
577 // | | Bit_Region=8
578 // | Bit_Loop=16
579 // Bit_CountedLoop=32
581 #define DEFINE_CLASS_ID(cl, supcl, subn) \
582 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
583 Class_##cl = Class_##supcl + Bit_##cl , \
584 ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
586 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
587 // so that it's values fits into 16 bits.
588 enum NodeClasses {
589 Bit_Node = 0x0000,
590 Class_Node = 0x0000,
591 ClassMask_Node = 0xFFFF,
593 DEFINE_CLASS_ID(Multi, Node, 0)
594 DEFINE_CLASS_ID(SafePoint, Multi, 0)
595 DEFINE_CLASS_ID(Call, SafePoint, 0)
596 DEFINE_CLASS_ID(CallJava, Call, 0)
597 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0)
598 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1)
599 DEFINE_CLASS_ID(CallRuntime, Call, 1)
600 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0)
601 DEFINE_CLASS_ID(Allocate, Call, 2)
602 DEFINE_CLASS_ID(AllocateArray, Allocate, 0)
603 DEFINE_CLASS_ID(AbstractLock, Call, 3)
604 DEFINE_CLASS_ID(Lock, AbstractLock, 0)
605 DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
606 DEFINE_CLASS_ID(MultiBranch, Multi, 1)
607 DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
608 DEFINE_CLASS_ID(Catch, PCTable, 0)
609 DEFINE_CLASS_ID(Jump, PCTable, 1)
610 DEFINE_CLASS_ID(If, MultiBranch, 1)
611 DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
612 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
613 DEFINE_CLASS_ID(Start, Multi, 2)
614 DEFINE_CLASS_ID(MemBar, Multi, 3)
615 DEFINE_CLASS_ID(Initialize, MemBar, 0)
616 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
618 DEFINE_CLASS_ID(Mach, Node, 1)
619 DEFINE_CLASS_ID(MachReturn, Mach, 0)
620 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
621 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
622 DEFINE_CLASS_ID(MachCallJava, MachCall, 0)
623 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0)
624 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
625 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
626 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
627 DEFINE_CLASS_ID(MachBranch, Mach, 1)
628 DEFINE_CLASS_ID(MachIf, MachBranch, 0)
629 DEFINE_CLASS_ID(MachGoto, MachBranch, 1)
630 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2)
631 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2)
632 DEFINE_CLASS_ID(MachTemp, Mach, 3)
633 DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
634 DEFINE_CLASS_ID(MachConstant, Mach, 5)
635 DEFINE_CLASS_ID(MachMerge, Mach, 6)
637 DEFINE_CLASS_ID(Type, Node, 2)
638 DEFINE_CLASS_ID(Phi, Type, 0)
639 DEFINE_CLASS_ID(ConstraintCast, Type, 1)
640 DEFINE_CLASS_ID(CastII, ConstraintCast, 0)
641 DEFINE_CLASS_ID(CheckCastPP, Type, 2)
642 DEFINE_CLASS_ID(CMove, Type, 3)
643 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
644 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
645 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
646 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
647 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
648 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
649 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
651 DEFINE_CLASS_ID(Proj, Node, 3)
652 DEFINE_CLASS_ID(CatchProj, Proj, 0)
653 DEFINE_CLASS_ID(JumpProj, Proj, 1)
654 DEFINE_CLASS_ID(IfTrue, Proj, 2)
655 DEFINE_CLASS_ID(IfFalse, Proj, 3)
656 DEFINE_CLASS_ID(Parm, Proj, 4)
657 DEFINE_CLASS_ID(MachProj, Proj, 5)
659 DEFINE_CLASS_ID(Mem, Node, 4)
660 DEFINE_CLASS_ID(Load, Mem, 0)
661 DEFINE_CLASS_ID(LoadVector, Load, 0)
662 DEFINE_CLASS_ID(Store, Mem, 1)
663 DEFINE_CLASS_ID(StoreVector, Store, 0)
664 DEFINE_CLASS_ID(LoadStore, Mem, 2)
666 DEFINE_CLASS_ID(Region, Node, 5)
667 DEFINE_CLASS_ID(Loop, Region, 0)
668 DEFINE_CLASS_ID(Root, Loop, 0)
669 DEFINE_CLASS_ID(CountedLoop, Loop, 1)
671 DEFINE_CLASS_ID(Sub, Node, 6)
672 DEFINE_CLASS_ID(Cmp, Sub, 0)
673 DEFINE_CLASS_ID(FastLock, Cmp, 0)
674 DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
676 DEFINE_CLASS_ID(MergeMem, Node, 7)
677 DEFINE_CLASS_ID(Bool, Node, 8)
678 DEFINE_CLASS_ID(AddP, Node, 9)
679 DEFINE_CLASS_ID(BoxLock, Node, 10)
680 DEFINE_CLASS_ID(Add, Node, 11)
681 DEFINE_CLASS_ID(Mul, Node, 12)
682 DEFINE_CLASS_ID(Vector, Node, 13)
683 DEFINE_CLASS_ID(ClearArray, Node, 14)
685 _max_classes = ClassMask_ClearArray
686 };
687 #undef DEFINE_CLASS_ID
689 // Flags are sorted by usage frequency.
690 enum NodeFlags {
691 Flag_is_Copy = 0x01, // should be first bit to avoid shift
692 Flag_rematerialize = Flag_is_Copy << 1,
693 Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
694 Flag_is_macro = Flag_needs_anti_dependence_check << 1,
695 Flag_is_Con = Flag_is_macro << 1,
696 Flag_is_cisc_alternate = Flag_is_Con << 1,
697 Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
698 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
699 Flag_avoid_back_to_back_before = Flag_may_be_short_branch << 1,
700 Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1,
701 Flag_has_call = Flag_avoid_back_to_back_after << 1,
702 Flag_is_expensive = Flag_has_call << 1,
703 _max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
704 };
706 private:
707 jushort _class_id;
708 jushort _flags;
710 protected:
711 // These methods should be called from constructors only.
712 void init_class_id(jushort c) {
713 assert(c <= _max_classes, "invalid node class");
714 _class_id = c; // cast out const
715 }
716 void init_flags(jushort fl) {
717 assert(fl <= _max_flags, "invalid node flag");
718 _flags |= fl;
719 }
720 void clear_flag(jushort fl) {
721 assert(fl <= _max_flags, "invalid node flag");
722 _flags &= ~fl;
723 }
725 public:
726 const jushort class_id() const { return _class_id; }
728 const jushort flags() const { return _flags; }
730 // Return a dense integer opcode number
731 virtual int Opcode() const;
733 // Virtual inherited Node size
734 virtual uint size_of() const;
736 // Other interesting Node properties
737 #define DEFINE_CLASS_QUERY(type) \
738 bool is_##type() const { \
739 return ((_class_id & ClassMask_##type) == Class_##type); \
740 } \
741 type##Node *as_##type() const { \
742 assert(is_##type(), "invalid node class"); \
743 return (type##Node*)this; \
744 } \
745 type##Node* isa_##type() const { \
746 return (is_##type()) ? as_##type() : NULL; \
747 }
749 DEFINE_CLASS_QUERY(AbstractLock)
750 DEFINE_CLASS_QUERY(Add)
751 DEFINE_CLASS_QUERY(AddP)
752 DEFINE_CLASS_QUERY(Allocate)
753 DEFINE_CLASS_QUERY(AllocateArray)
754 DEFINE_CLASS_QUERY(Bool)
755 DEFINE_CLASS_QUERY(BoxLock)
756 DEFINE_CLASS_QUERY(Call)
757 DEFINE_CLASS_QUERY(CallDynamicJava)
758 DEFINE_CLASS_QUERY(CallJava)
759 DEFINE_CLASS_QUERY(CallLeaf)
760 DEFINE_CLASS_QUERY(CallRuntime)
761 DEFINE_CLASS_QUERY(CallStaticJava)
762 DEFINE_CLASS_QUERY(Catch)
763 DEFINE_CLASS_QUERY(CatchProj)
764 DEFINE_CLASS_QUERY(CheckCastPP)
765 DEFINE_CLASS_QUERY(CastII)
766 DEFINE_CLASS_QUERY(ConstraintCast)
767 DEFINE_CLASS_QUERY(ClearArray)
768 DEFINE_CLASS_QUERY(CMove)
769 DEFINE_CLASS_QUERY(Cmp)
770 DEFINE_CLASS_QUERY(CountedLoop)
771 DEFINE_CLASS_QUERY(CountedLoopEnd)
772 DEFINE_CLASS_QUERY(DecodeNarrowPtr)
773 DEFINE_CLASS_QUERY(DecodeN)
774 DEFINE_CLASS_QUERY(DecodeNKlass)
775 DEFINE_CLASS_QUERY(EncodeNarrowPtr)
776 DEFINE_CLASS_QUERY(EncodeP)
777 DEFINE_CLASS_QUERY(EncodePKlass)
778 DEFINE_CLASS_QUERY(FastLock)
779 DEFINE_CLASS_QUERY(FastUnlock)
780 DEFINE_CLASS_QUERY(If)
781 DEFINE_CLASS_QUERY(IfFalse)
782 DEFINE_CLASS_QUERY(IfTrue)
783 DEFINE_CLASS_QUERY(Initialize)
784 DEFINE_CLASS_QUERY(Jump)
785 DEFINE_CLASS_QUERY(JumpProj)
786 DEFINE_CLASS_QUERY(Load)
787 DEFINE_CLASS_QUERY(LoadStore)
788 DEFINE_CLASS_QUERY(Lock)
789 DEFINE_CLASS_QUERY(Loop)
790 DEFINE_CLASS_QUERY(Mach)
791 DEFINE_CLASS_QUERY(MachBranch)
792 DEFINE_CLASS_QUERY(MachCall)
793 DEFINE_CLASS_QUERY(MachCallDynamicJava)
794 DEFINE_CLASS_QUERY(MachCallJava)
795 DEFINE_CLASS_QUERY(MachCallLeaf)
796 DEFINE_CLASS_QUERY(MachCallRuntime)
797 DEFINE_CLASS_QUERY(MachCallStaticJava)
798 DEFINE_CLASS_QUERY(MachConstantBase)
799 DEFINE_CLASS_QUERY(MachConstant)
800 DEFINE_CLASS_QUERY(MachGoto)
801 DEFINE_CLASS_QUERY(MachIf)
802 DEFINE_CLASS_QUERY(MachNullCheck)
803 DEFINE_CLASS_QUERY(MachProj)
804 DEFINE_CLASS_QUERY(MachReturn)
805 DEFINE_CLASS_QUERY(MachSafePoint)
806 DEFINE_CLASS_QUERY(MachSpillCopy)
807 DEFINE_CLASS_QUERY(MachTemp)
808 DEFINE_CLASS_QUERY(MachMerge)
809 DEFINE_CLASS_QUERY(Mem)
810 DEFINE_CLASS_QUERY(MemBar)
811 DEFINE_CLASS_QUERY(MemBarStoreStore)
812 DEFINE_CLASS_QUERY(MergeMem)
813 DEFINE_CLASS_QUERY(Mul)
814 DEFINE_CLASS_QUERY(Multi)
815 DEFINE_CLASS_QUERY(MultiBranch)
816 DEFINE_CLASS_QUERY(Parm)
817 DEFINE_CLASS_QUERY(PCTable)
818 DEFINE_CLASS_QUERY(Phi)
819 DEFINE_CLASS_QUERY(Proj)
820 DEFINE_CLASS_QUERY(Region)
821 DEFINE_CLASS_QUERY(Root)
822 DEFINE_CLASS_QUERY(SafePoint)
823 DEFINE_CLASS_QUERY(SafePointScalarObject)
824 DEFINE_CLASS_QUERY(Start)
825 DEFINE_CLASS_QUERY(Store)
826 DEFINE_CLASS_QUERY(Sub)
827 DEFINE_CLASS_QUERY(Type)
828 DEFINE_CLASS_QUERY(Vector)
829 DEFINE_CLASS_QUERY(LoadVector)
830 DEFINE_CLASS_QUERY(StoreVector)
831 DEFINE_CLASS_QUERY(Unlock)
833 #undef DEFINE_CLASS_QUERY
835 // duplicate of is_MachSpillCopy()
836 bool is_SpillCopy () const {
837 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
838 }
840 bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
841 // The data node which is safe to leave in dead loop during IGVN optimization.
842 bool is_dead_loop_safe() const {
843 return is_Phi() || (is_Proj() && in(0) == NULL) ||
844 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 &&
845 (!is_Proj() || !in(0)->is_Allocate()));
846 }
848 // is_Copy() returns copied edge index (0 or 1)
849 uint is_Copy() const { return (_flags & Flag_is_Copy); }
851 virtual bool is_CFG() const { return false; }
853 // If this node is control-dependent on a test, can it be
854 // rerouted to a dominating equivalent test? This is usually
855 // true of non-CFG nodes, but can be false for operations which
856 // depend for their correct sequencing on more than one test.
857 // (In that case, hoisting to a dominating test may silently
858 // skip some other important test.)
859 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
861 // When building basic blocks, I need to have a notion of block beginning
862 // Nodes, next block selector Nodes (block enders), and next block
863 // projections. These calls need to work on their machine equivalents. The
864 // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
865 bool is_block_start() const {
866 if ( is_Region() )
867 return this == (const Node*)in(0);
868 else
869 return is_Start();
870 }
872 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
873 // Goto and Return. This call also returns the block ending Node.
874 virtual const Node *is_block_proj() const;
876 // The node is a "macro" node which needs to be expanded before matching
877 bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
878 // The node is expensive: the best control is set during loop opts
879 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; }
881 //----------------- Optimization
883 // Get the worst-case Type output for this Node.
884 virtual const class Type *bottom_type() const;
886 // If we find a better type for a node, try to record it permanently.
887 // Return true if this node actually changed.
888 // Be sure to do the hash_delete game in the "rehash" variant.
889 void raise_bottom_type(const Type* new_type);
891 // Get the address type with which this node uses and/or defs memory,
892 // or NULL if none. The address type is conservatively wide.
893 // Returns non-null for calls, membars, loads, stores, etc.
894 // Returns TypePtr::BOTTOM if the node touches memory "broadly".
895 virtual const class TypePtr *adr_type() const { return NULL; }
897 // Return an existing node which computes the same function as this node.
898 // The optimistic combined algorithm requires this to return a Node which
899 // is a small number of steps away (e.g., one of my inputs).
900 virtual Node *Identity( PhaseTransform *phase );
902 // Return the set of values this Node can take on at runtime.
903 virtual const Type *Value( PhaseTransform *phase ) const;
905 // Return a node which is more "ideal" than the current node.
906 // The invariants on this call are subtle. If in doubt, read the
907 // treatise in node.cpp above the default implemention AND TEST WITH
908 // +VerifyIterativeGVN!
909 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
911 // Some nodes have specific Ideal subgraph transformations only if they are
912 // unique users of specific nodes. Such nodes should be put on IGVN worklist
913 // for the transformations to happen.
914 bool has_special_unique_user() const;
916 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
917 Node* find_exact_control(Node* ctrl);
919 // Check if 'this' node dominates or equal to 'sub'.
920 bool dominates(Node* sub, Node_List &nlist);
922 protected:
923 bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
924 public:
926 // Idealize graph, using DU info. Done after constant propagation
927 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
929 // See if there is valid pipeline info
930 static const Pipeline *pipeline_class();
931 virtual const Pipeline *pipeline() const;
933 // Compute the latency from the def to this instruction of the ith input node
934 uint latency(uint i);
936 // Hash & compare functions, for pessimistic value numbering
938 // If the hash function returns the special sentinel value NO_HASH,
939 // the node is guaranteed never to compare equal to any other node.
940 // If we accidentally generate a hash with value NO_HASH the node
941 // won't go into the table and we'll lose a little optimization.
942 enum { NO_HASH = 0 };
943 virtual uint hash() const;
944 virtual uint cmp( const Node &n ) const;
946 // Operation appears to be iteratively computed (such as an induction variable)
947 // It is possible for this operation to return false for a loop-varying
948 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
949 bool is_iteratively_computed();
951 // Determine if a node is Counted loop induction variable.
952 // The method is defined in loopnode.cpp.
953 const Node* is_loop_iv() const;
955 // Return a node with opcode "opc" and same inputs as "this" if one can
956 // be found; Otherwise return NULL;
957 Node* find_similar(int opc);
959 // Return the unique control out if only one. Null if none or more than one.
960 Node* unique_ctrl_out();
962 //----------------- Code Generation
964 // Ideal register class for Matching. Zero means unmatched instruction
965 // (these are cloned instead of converted to machine nodes).
966 virtual uint ideal_reg() const;
968 static const uint NotAMachineReg; // must be > max. machine register
970 // Do we Match on this edge index or not? Generally false for Control
971 // and true for everything else. Weird for calls & returns.
972 virtual uint match_edge(uint idx) const;
974 // Register class output is returned in
975 virtual const RegMask &out_RegMask() const;
976 // Register class input is expected in
977 virtual const RegMask &in_RegMask(uint) const;
978 // Should we clone rather than spill this instruction?
979 bool rematerialize() const;
981 // Return JVM State Object if this Node carries debug info, or NULL otherwise
982 virtual JVMState* jvms() const;
984 // Print as assembly
985 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
986 // Emit bytes starting at parameter 'ptr'
987 // Bump 'ptr' by the number of output bytes
988 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
989 // Size of instruction in bytes
990 virtual uint size(PhaseRegAlloc *ra_) const;
992 // Convenience function to extract an integer constant from a node.
993 // If it is not an integer constant (either Con, CastII, or Mach),
994 // return value_if_unknown.
995 jint find_int_con(jint value_if_unknown) const {
996 const TypeInt* t = find_int_type();
997 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
998 }
999 // Return the constant, knowing it is an integer constant already
1000 jint get_int() const {
1001 const TypeInt* t = find_int_type();
1002 guarantee(t != NULL, "must be con");
1003 return t->get_con();
1004 }
1005 // Here's where the work is done. Can produce non-constant int types too.
1006 const TypeInt* find_int_type() const;
1008 // Same thing for long (and intptr_t, via type.hpp):
1009 jlong get_long() const {
1010 const TypeLong* t = find_long_type();
1011 guarantee(t != NULL, "must be con");
1012 return t->get_con();
1013 }
1014 jlong find_long_con(jint value_if_unknown) const {
1015 const TypeLong* t = find_long_type();
1016 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
1017 }
1018 const TypeLong* find_long_type() const;
1020 const TypePtr* get_ptr_type() const;
1022 // These guys are called by code generated by ADLC:
1023 intptr_t get_ptr() const;
1024 intptr_t get_narrowcon() const;
1025 jdouble getd() const;
1026 jfloat getf() const;
1028 // Nodes which are pinned into basic blocks
1029 virtual bool pinned() const { return false; }
1031 // Nodes which use memory without consuming it, hence need antidependences
1032 // More specifically, needs_anti_dependence_check returns true iff the node
1033 // (a) does a load, and (b) does not perform a store (except perhaps to a
1034 // stack slot or some other unaliased location).
1035 bool needs_anti_dependence_check() const;
1037 // Return which operand this instruction may cisc-spill. In other words,
1038 // return operand position that can convert from reg to memory access
1039 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
1040 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
1042 //----------------- Graph walking
1043 public:
1044 // Walk and apply member functions recursively.
1045 // Supplied (this) pointer is root.
1046 void walk(NFunc pre, NFunc post, void *env);
1047 static void nop(Node &, void*); // Dummy empty function
1048 static void packregion( Node &n, void* );
1049 private:
1050 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited);
1052 //----------------- Printing, etc
1053 public:
1054 #ifndef PRODUCT
1055 Node* find(int idx) const; // Search the graph for the given idx.
1056 Node* find_ctrl(int idx) const; // Search control ancestors for the given idx.
1057 void dump() const { dump("\n"); } // Print this node.
1058 void dump(const char* suffix, outputStream *st = tty) const;// Print this node.
1059 void dump(int depth) const; // Print this node, recursively to depth d
1060 void dump_ctrl(int depth) const; // Print control nodes, to depth d
1061 virtual void dump_req(outputStream *st = tty) const; // Print required-edge info
1062 virtual void dump_prec(outputStream *st = tty) const; // Print precedence-edge info
1063 virtual void dump_out(outputStream *st = tty) const; // Print the output edge info
1064 virtual void dump_spec(outputStream *st) const {}; // Print per-node info
1065 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
1066 void verify() const; // Check Def-Use info for my subgraph
1067 static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space);
1069 // This call defines a class-unique string used to identify class instances
1070 virtual const char *Name() const;
1072 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
1073 // RegMask Print Functions
1074 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
1075 void dump_out_regmask() { out_RegMask().dump(); }
1076 static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; }
1077 void fast_dump() const {
1078 tty->print("%4d: %-17s", _idx, Name());
1079 for (uint i = 0; i < len(); i++)
1080 if (in(i))
1081 tty->print(" %4d", in(i)->_idx);
1082 else
1083 tty->print(" NULL");
1084 tty->print("\n");
1085 }
1086 #endif
1087 #ifdef ASSERT
1088 void verify_construction();
1089 bool verify_jvms(const JVMState* jvms) const;
1090 int _debug_idx; // Unique value assigned to every node.
1091 int debug_idx() const { return _debug_idx; }
1092 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
1094 Node* _debug_orig; // Original version of this, if any.
1095 Node* debug_orig() const { return _debug_orig; }
1096 void set_debug_orig(Node* orig); // _debug_orig = orig
1098 int _hash_lock; // Barrier to modifications of nodes in the hash table
1099 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
1100 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
1102 static void init_NodeProperty();
1104 #if OPTO_DU_ITERATOR_ASSERT
1105 const Node* _last_del; // The last deleted node.
1106 uint _del_tick; // Bumped when a deletion happens..
1107 #endif
1108 #endif
1109 };
1111 //-----------------------------------------------------------------------------
1112 // Iterators over DU info, and associated Node functions.
1114 #if OPTO_DU_ITERATOR_ASSERT
1116 // Common code for assertion checking on DU iterators.
1117 class DUIterator_Common VALUE_OBJ_CLASS_SPEC {
1118 #ifdef ASSERT
1119 protected:
1120 bool _vdui; // cached value of VerifyDUIterators
1121 const Node* _node; // the node containing the _out array
1122 uint _outcnt; // cached node->_outcnt
1123 uint _del_tick; // cached node->_del_tick
1124 Node* _last; // last value produced by the iterator
1126 void sample(const Node* node); // used by c'tor to set up for verifies
1127 void verify(const Node* node, bool at_end_ok = false);
1128 void verify_resync();
1129 void reset(const DUIterator_Common& that);
1131 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1132 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1133 #else
1134 #define I_VDUI_ONLY(i,x) { }
1135 #endif //ASSERT
1136 };
1138 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
1140 // Default DU iterator. Allows appends onto the out array.
1141 // Allows deletion from the out array only at the current point.
1142 // Usage:
1143 // for (DUIterator i = x->outs(); x->has_out(i); i++) {
1144 // Node* y = x->out(i);
1145 // ...
1146 // }
1147 // Compiles in product mode to a unsigned integer index, which indexes
1148 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate
1149 // also reloads x->_outcnt. If you delete, you must perform "--i" just
1150 // before continuing the loop. You must delete only the last-produced
1151 // edge. You must delete only a single copy of the last-produced edge,
1152 // or else you must delete all copies at once (the first time the edge
1153 // is produced by the iterator).
1154 class DUIterator : public DUIterator_Common {
1155 friend class Node;
1157 // This is the index which provides the product-mode behavior.
1158 // Whatever the product-mode version of the system does to the
1159 // DUI index is done to this index. All other fields in
1160 // this class are used only for assertion checking.
1161 uint _idx;
1163 #ifdef ASSERT
1164 uint _refresh_tick; // Records the refresh activity.
1166 void sample(const Node* node); // Initialize _refresh_tick etc.
1167 void verify(const Node* node, bool at_end_ok = false);
1168 void verify_increment(); // Verify an increment operation.
1169 void verify_resync(); // Verify that we can back up over a deletion.
1170 void verify_finish(); // Verify that the loop terminated properly.
1171 void refresh(); // Resample verification info.
1172 void reset(const DUIterator& that); // Resample after assignment.
1173 #endif
1175 DUIterator(const Node* node, int dummy_to_avoid_conversion)
1176 { _idx = 0; debug_only(sample(node)); }
1178 public:
1179 // initialize to garbage; clear _vdui to disable asserts
1180 DUIterator()
1181 { /*initialize to garbage*/ debug_only(_vdui = false); }
1183 void operator++(int dummy_to_specify_postfix_op)
1184 { _idx++; VDUI_ONLY(verify_increment()); }
1186 void operator--()
1187 { VDUI_ONLY(verify_resync()); --_idx; }
1189 ~DUIterator()
1190 { VDUI_ONLY(verify_finish()); }
1192 void operator=(const DUIterator& that)
1193 { _idx = that._idx; debug_only(reset(that)); }
1194 };
1196 DUIterator Node::outs() const
1197 { return DUIterator(this, 0); }
1198 DUIterator& Node::refresh_out_pos(DUIterator& i) const
1199 { I_VDUI_ONLY(i, i.refresh()); return i; }
1200 bool Node::has_out(DUIterator& i) const
1201 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1202 Node* Node::out(DUIterator& i) const
1203 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; }
1206 // Faster DU iterator. Disallows insertions into the out array.
1207 // Allows deletion from the out array only at the current point.
1208 // Usage:
1209 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1210 // Node* y = x->fast_out(i);
1211 // ...
1212 // }
1213 // Compiles in product mode to raw Node** pointer arithmetic, with
1214 // no reloading of pointers from the original node x. If you delete,
1215 // you must perform "--i; --imax" just before continuing the loop.
1216 // If you delete multiple copies of the same edge, you must decrement
1217 // imax, but not i, multiple times: "--i, imax -= num_edges".
1218 class DUIterator_Fast : public DUIterator_Common {
1219 friend class Node;
1220 friend class DUIterator_Last;
1222 // This is the pointer which provides the product-mode behavior.
1223 // Whatever the product-mode version of the system does to the
1224 // DUI pointer is done to this pointer. All other fields in
1225 // this class are used only for assertion checking.
1226 Node** _outp;
1228 #ifdef ASSERT
1229 void verify(const Node* node, bool at_end_ok = false);
1230 void verify_limit();
1231 void verify_resync();
1232 void verify_relimit(uint n);
1233 void reset(const DUIterator_Fast& that);
1234 #endif
1236 // Note: offset must be signed, since -1 is sometimes passed
1237 DUIterator_Fast(const Node* node, ptrdiff_t offset)
1238 { _outp = node->_out + offset; debug_only(sample(node)); }
1240 public:
1241 // initialize to garbage; clear _vdui to disable asserts
1242 DUIterator_Fast()
1243 { /*initialize to garbage*/ debug_only(_vdui = false); }
1245 void operator++(int dummy_to_specify_postfix_op)
1246 { _outp++; VDUI_ONLY(verify(_node, true)); }
1248 void operator--()
1249 { VDUI_ONLY(verify_resync()); --_outp; }
1251 void operator-=(uint n) // applied to the limit only
1252 { _outp -= n; VDUI_ONLY(verify_relimit(n)); }
1254 bool operator<(DUIterator_Fast& limit) {
1255 I_VDUI_ONLY(*this, this->verify(_node, true));
1256 I_VDUI_ONLY(limit, limit.verify_limit());
1257 return _outp < limit._outp;
1258 }
1260 void operator=(const DUIterator_Fast& that)
1261 { _outp = that._outp; debug_only(reset(that)); }
1262 };
1264 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1265 // Assign a limit pointer to the reference argument:
1266 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1267 // Return the base pointer:
1268 return DUIterator_Fast(this, 0);
1269 }
1270 Node* Node::fast_out(DUIterator_Fast& i) const {
1271 I_VDUI_ONLY(i, i.verify(this));
1272 return debug_only(i._last=) *i._outp;
1273 }
1276 // Faster DU iterator. Requires each successive edge to be removed.
1277 // Does not allow insertion of any edges.
1278 // Usage:
1279 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1280 // Node* y = x->last_out(i);
1281 // ...
1282 // }
1283 // Compiles in product mode to raw Node** pointer arithmetic, with
1284 // no reloading of pointers from the original node x.
1285 class DUIterator_Last : private DUIterator_Fast {
1286 friend class Node;
1288 #ifdef ASSERT
1289 void verify(const Node* node, bool at_end_ok = false);
1290 void verify_limit();
1291 void verify_step(uint num_edges);
1292 #endif
1294 // Note: offset must be signed, since -1 is sometimes passed
1295 DUIterator_Last(const Node* node, ptrdiff_t offset)
1296 : DUIterator_Fast(node, offset) { }
1298 void operator++(int dummy_to_specify_postfix_op) {} // do not use
1299 void operator<(int) {} // do not use
1301 public:
1302 DUIterator_Last() { }
1303 // initialize to garbage
1305 void operator--()
1306 { _outp--; VDUI_ONLY(verify_step(1)); }
1308 void operator-=(uint n)
1309 { _outp -= n; VDUI_ONLY(verify_step(n)); }
1311 bool operator>=(DUIterator_Last& limit) {
1312 I_VDUI_ONLY(*this, this->verify(_node, true));
1313 I_VDUI_ONLY(limit, limit.verify_limit());
1314 return _outp >= limit._outp;
1315 }
1317 void operator=(const DUIterator_Last& that)
1318 { DUIterator_Fast::operator=(that); }
1319 };
1321 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1322 // Assign a limit pointer to the reference argument:
1323 imin = DUIterator_Last(this, 0);
1324 // Return the initial pointer:
1325 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1326 }
1327 Node* Node::last_out(DUIterator_Last& i) const {
1328 I_VDUI_ONLY(i, i.verify(this));
1329 return debug_only(i._last=) *i._outp;
1330 }
1332 #endif //OPTO_DU_ITERATOR_ASSERT
1334 #undef I_VDUI_ONLY
1335 #undef VDUI_ONLY
1337 // An Iterator that truly follows the iterator pattern. Doesn't
1338 // support deletion but could be made to.
1339 //
1340 // for (SimpleDUIterator i(n); i.has_next(); i.next()) {
1341 // Node* m = i.get();
1342 //
1343 class SimpleDUIterator : public StackObj {
1344 private:
1345 Node* node;
1346 DUIterator_Fast i;
1347 DUIterator_Fast imax;
1348 public:
1349 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
1350 bool has_next() { return i < imax; }
1351 void next() { i++; }
1352 Node* get() { return node->fast_out(i); }
1353 };
1356 //-----------------------------------------------------------------------------
1357 // Map dense integer indices to Nodes. Uses classic doubling-array trick.
1358 // Abstractly provides an infinite array of Node*'s, initialized to NULL.
1359 // Note that the constructor just zeros things, and since I use Arena
1360 // allocation I do not need a destructor to reclaim storage.
1361 class Node_Array : public ResourceObj {
1362 friend class VMStructs;
1363 protected:
1364 Arena *_a; // Arena to allocate in
1365 uint _max;
1366 Node **_nodes;
1367 void grow( uint i ); // Grow array node to fit
1368 public:
1369 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) {
1370 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize );
1371 for( int i = 0; i < OptoNodeListSize; i++ ) {
1372 _nodes[i] = NULL;
1373 }
1374 }
1376 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
1377 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
1378 { return (i<_max) ? _nodes[i] : (Node*)NULL; }
1379 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; }
1380 Node **adr() { return _nodes; }
1381 // Extend the mapping: index i maps to Node *n.
1382 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
1383 void insert( uint i, Node *n );
1384 void remove( uint i ); // Remove, preserving order
1385 void sort( C_sort_func_t func);
1386 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage
1387 void clear(); // Set all entries to NULL, keep storage
1388 uint Size() const { return _max; }
1389 void dump() const;
1390 };
1392 class Node_List : public Node_Array {
1393 friend class VMStructs;
1394 uint _cnt;
1395 public:
1396 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
1397 Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
1398 bool contains(const Node* n) const {
1399 for (uint e = 0; e < size(); e++) {
1400 if (at(e) == n) return true;
1401 }
1402 return false;
1403 }
1404 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1405 void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1406 void push( Node *b ) { map(_cnt++,b); }
1407 void yank( Node *n ); // Find and remove
1408 Node *pop() { return _nodes[--_cnt]; }
1409 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;}
1410 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1411 uint size() const { return _cnt; }
1412 void dump() const;
1413 void dump_simple() const;
1414 };
1416 //------------------------------Unique_Node_List-------------------------------
1417 class Unique_Node_List : public Node_List {
1418 friend class VMStructs;
1419 VectorSet _in_worklist;
1420 uint _clock_index; // Index in list where to pop from next
1421 public:
1422 Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {}
1423 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1425 void remove( Node *n );
1426 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
1427 VectorSet &member_set(){ return _in_worklist; }
1429 void push( Node *b ) {
1430 if( !_in_worklist.test_set(b->_idx) )
1431 Node_List::push(b);
1432 }
1433 Node *pop() {
1434 if( _clock_index >= size() ) _clock_index = 0;
1435 Node *b = at(_clock_index);
1436 map( _clock_index, Node_List::pop());
1437 if (size() != 0) _clock_index++; // Always start from 0
1438 _in_worklist >>= b->_idx;
1439 return b;
1440 }
1441 Node *remove( uint i ) {
1442 Node *b = Node_List::at(i);
1443 _in_worklist >>= b->_idx;
1444 map(i,Node_List::pop());
1445 return b;
1446 }
1447 void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); }
1448 void clear() {
1449 _in_worklist.Clear(); // Discards storage but grows automatically
1450 Node_List::clear();
1451 _clock_index = 0;
1452 }
1454 // Used after parsing to remove useless nodes before Iterative GVN
1455 void remove_useless_nodes(VectorSet &useful);
1457 #ifndef PRODUCT
1458 void print_set() const { _in_worklist.print(); }
1459 #endif
1460 };
1462 // Inline definition of Compile::record_for_igvn must be deferred to this point.
1463 inline void Compile::record_for_igvn(Node* n) {
1464 _for_igvn->push(n);
1465 }
1467 //------------------------------Node_Stack-------------------------------------
1468 class Node_Stack {
1469 friend class VMStructs;
1470 protected:
1471 struct INode {
1472 Node *node; // Processed node
1473 uint indx; // Index of next node's child
1474 };
1475 INode *_inode_top; // tos, stack grows up
1476 INode *_inode_max; // End of _inodes == _inodes + _max
1477 INode *_inodes; // Array storage for the stack
1478 Arena *_a; // Arena to allocate in
1479 void grow();
1480 public:
1481 Node_Stack(int size) {
1482 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1483 _a = Thread::current()->resource_area();
1484 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1485 _inode_max = _inodes + max;
1486 _inode_top = _inodes - 1; // stack is empty
1487 }
1489 Node_Stack(Arena *a, int size) : _a(a) {
1490 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1491 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1492 _inode_max = _inodes + max;
1493 _inode_top = _inodes - 1; // stack is empty
1494 }
1496 void pop() {
1497 assert(_inode_top >= _inodes, "node stack underflow");
1498 --_inode_top;
1499 }
1500 void push(Node *n, uint i) {
1501 ++_inode_top;
1502 if (_inode_top >= _inode_max) grow();
1503 INode *top = _inode_top; // optimization
1504 top->node = n;
1505 top->indx = i;
1506 }
1507 Node *node() const {
1508 return _inode_top->node;
1509 }
1510 Node* node_at(uint i) const {
1511 assert(_inodes + i <= _inode_top, "in range");
1512 return _inodes[i].node;
1513 }
1514 uint index() const {
1515 return _inode_top->indx;
1516 }
1517 uint index_at(uint i) const {
1518 assert(_inodes + i <= _inode_top, "in range");
1519 return _inodes[i].indx;
1520 }
1521 void set_node(Node *n) {
1522 _inode_top->node = n;
1523 }
1524 void set_index(uint i) {
1525 _inode_top->indx = i;
1526 }
1527 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
1528 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
1529 bool is_nonempty() const { return (_inode_top >= _inodes); }
1530 bool is_empty() const { return (_inode_top < _inodes); }
1531 void clear() { _inode_top = _inodes - 1; } // retain storage
1533 // Node_Stack is used to map nodes.
1534 Node* find(uint idx) const;
1535 };
1538 //-----------------------------Node_Notes--------------------------------------
1539 // Debugging or profiling annotations loosely and sparsely associated
1540 // with some nodes. See Compile::node_notes_at for the accessor.
1541 class Node_Notes VALUE_OBJ_CLASS_SPEC {
1542 friend class VMStructs;
1543 JVMState* _jvms;
1545 public:
1546 Node_Notes(JVMState* jvms = NULL) {
1547 _jvms = jvms;
1548 }
1550 JVMState* jvms() { return _jvms; }
1551 void set_jvms(JVMState* x) { _jvms = x; }
1553 // True if there is nothing here.
1554 bool is_clear() {
1555 return (_jvms == NULL);
1556 }
1558 // Make there be nothing here.
1559 void clear() {
1560 _jvms = NULL;
1561 }
1563 // Make a new, clean node notes.
1564 static Node_Notes* make(Compile* C) {
1565 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1566 nn->clear();
1567 return nn;
1568 }
1570 Node_Notes* clone(Compile* C) {
1571 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1572 (*nn) = (*this);
1573 return nn;
1574 }
1576 // Absorb any information from source.
1577 bool update_from(Node_Notes* source) {
1578 bool changed = false;
1579 if (source != NULL) {
1580 if (source->jvms() != NULL) {
1581 set_jvms(source->jvms());
1582 changed = true;
1583 }
1584 }
1585 return changed;
1586 }
1587 };
1589 // Inlined accessors for Compile::node_nodes that require the preceding class:
1590 inline Node_Notes*
1591 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
1592 int idx, bool can_grow) {
1593 assert(idx >= 0, "oob");
1594 int block_idx = (idx >> _log2_node_notes_block_size);
1595 int grow_by = (block_idx - (arr == NULL? 0: arr->length()));
1596 if (grow_by >= 0) {
1597 if (!can_grow) return NULL;
1598 grow_node_notes(arr, grow_by + 1);
1599 }
1600 // (Every element of arr is a sub-array of length _node_notes_block_size.)
1601 return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
1602 }
1604 inline bool
1605 Compile::set_node_notes_at(int idx, Node_Notes* value) {
1606 if (value == NULL || value->is_clear())
1607 return false; // nothing to write => write nothing
1608 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
1609 assert(loc != NULL, "");
1610 return loc->update_from(value);
1611 }
1614 //------------------------------TypeNode---------------------------------------
1615 // Node with a Type constant.
1616 class TypeNode : public Node {
1617 protected:
1618 virtual uint hash() const; // Check the type
1619 virtual uint cmp( const Node &n ) const;
1620 virtual uint size_of() const; // Size is bigger
1621 const Type* const _type;
1622 public:
1623 void set_type(const Type* t) {
1624 assert(t != NULL, "sanity");
1625 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
1626 *(const Type**)&_type = t; // cast away const-ness
1627 // If this node is in the hash table, make sure it doesn't need a rehash.
1628 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
1629 }
1630 const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
1631 TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
1632 init_class_id(Class_Type);
1633 }
1634 virtual const Type *Value( PhaseTransform *phase ) const;
1635 virtual const Type *bottom_type() const;
1636 virtual uint ideal_reg() const;
1637 #ifndef PRODUCT
1638 virtual void dump_spec(outputStream *st) const;
1639 #endif
1640 };
1642 #endif // SHARE_VM_OPTO_NODE_HPP