Thu, 12 Nov 2009 09:24:21 -0800
6892658: C2 should optimize some stringbuilder patterns
Reviewed-by: kvn, twisti
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // Portions of code courtesy of Clifford Click
27 // Optimization - Graph Style
30 class AbstractLockNode;
31 class AddNode;
32 class AddPNode;
33 class AliasInfo;
34 class AllocateArrayNode;
35 class AllocateNode;
36 class Block;
37 class Block_Array;
38 class BoolNode;
39 class BoxLockNode;
40 class CMoveNode;
41 class CallDynamicJavaNode;
42 class CallJavaNode;
43 class CallLeafNode;
44 class CallNode;
45 class CallRuntimeNode;
46 class CallStaticJavaNode;
47 class CatchNode;
48 class CatchProjNode;
49 class CheckCastPPNode;
50 class CmpNode;
51 class CodeBuffer;
52 class ConstraintCastNode;
53 class ConNode;
54 class CountedLoopNode;
55 class CountedLoopEndNode;
56 class DecodeNNode;
57 class EncodePNode;
58 class FastLockNode;
59 class FastUnlockNode;
60 class IfNode;
61 class InitializeNode;
62 class JVMState;
63 class JumpNode;
64 class JumpProjNode;
65 class LoadNode;
66 class LoadStoreNode;
67 class LockNode;
68 class LoopNode;
69 class MachCallDynamicJavaNode;
70 class MachCallJavaNode;
71 class MachCallLeafNode;
72 class MachCallNode;
73 class MachCallRuntimeNode;
74 class MachCallStaticJavaNode;
75 class MachIfNode;
76 class MachNode;
77 class MachNullCheckNode;
78 class MachReturnNode;
79 class MachSafePointNode;
80 class MachSpillCopyNode;
81 class MachTempNode;
82 class Matcher;
83 class MemBarNode;
84 class MemNode;
85 class MergeMemNode;
86 class MulNode;
87 class MultiNode;
88 class MultiBranchNode;
89 class NeverBranchNode;
90 class Node;
91 class Node_Array;
92 class Node_List;
93 class Node_Stack;
94 class NullCheckNode;
95 class OopMap;
96 class ParmNode;
97 class PCTableNode;
98 class PhaseCCP;
99 class PhaseGVN;
100 class PhaseIterGVN;
101 class PhaseRegAlloc;
102 class PhaseTransform;
103 class PhaseValues;
104 class PhiNode;
105 class Pipeline;
106 class ProjNode;
107 class RegMask;
108 class RegionNode;
109 class RootNode;
110 class SafePointNode;
111 class SafePointScalarObjectNode;
112 class StartNode;
113 class State;
114 class StoreNode;
115 class SubNode;
116 class Type;
117 class TypeNode;
118 class UnlockNode;
119 class VectorSet;
120 class IfTrueNode;
121 class IfFalseNode;
122 typedef void (*NFunc)(Node&,void*);
123 extern "C" {
124 typedef int (*C_sort_func_t)(const void *, const void *);
125 }
127 // The type of all node counts and indexes.
128 // It must hold at least 16 bits, but must also be fast to load and store.
129 // This type, if less than 32 bits, could limit the number of possible nodes.
130 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
131 typedef unsigned int node_idx_t;
134 #ifndef OPTO_DU_ITERATOR_ASSERT
135 #ifdef ASSERT
136 #define OPTO_DU_ITERATOR_ASSERT 1
137 #else
138 #define OPTO_DU_ITERATOR_ASSERT 0
139 #endif
140 #endif //OPTO_DU_ITERATOR_ASSERT
142 #if OPTO_DU_ITERATOR_ASSERT
143 class DUIterator;
144 class DUIterator_Fast;
145 class DUIterator_Last;
146 #else
147 typedef uint DUIterator;
148 typedef Node** DUIterator_Fast;
149 typedef Node** DUIterator_Last;
150 #endif
152 // Node Sentinel
153 #define NodeSentinel (Node*)-1
155 // Unknown count frequency
156 #define COUNT_UNKNOWN (-1.0f)
158 //------------------------------Node-------------------------------------------
159 // Nodes define actions in the program. They create values, which have types.
160 // They are both vertices in a directed graph and program primitives. Nodes
161 // are labeled; the label is the "opcode", the primitive function in the lambda
162 // calculus sense that gives meaning to the Node. Node inputs are ordered (so
163 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to
164 // the Node's function. These inputs also define a Type equation for the Node.
165 // Solving these Type equations amounts to doing dataflow analysis.
166 // Control and data are uniformly represented in the graph. Finally, Nodes
167 // have a unique dense integer index which is used to index into side arrays
168 // whenever I have phase-specific information.
170 class Node {
171 // Lots of restrictions on cloning Nodes
172 Node(const Node&); // not defined; linker error to use these
173 Node &operator=(const Node &rhs);
175 public:
176 friend class Compile;
177 #if OPTO_DU_ITERATOR_ASSERT
178 friend class DUIterator_Common;
179 friend class DUIterator;
180 friend class DUIterator_Fast;
181 friend class DUIterator_Last;
182 #endif
184 // Because Nodes come and go, I define an Arena of Node structures to pull
185 // from. This should allow fast access to node creation & deletion. This
186 // field is a local cache of a value defined in some "program fragment" for
187 // which these Nodes are just a part of.
189 // New Operator that takes a Compile pointer, this will eventually
190 // be the "new" New operator.
191 inline void* operator new( size_t x, Compile* C) {
192 Node* n = (Node*)C->node_arena()->Amalloc_D(x);
193 #ifdef ASSERT
194 n->_in = (Node**)n; // magic cookie for assertion check
195 #endif
196 n->_out = (Node**)C;
197 return (void*)n;
198 }
200 // New Operator that takes a Compile pointer, this will eventually
201 // be the "new" New operator.
202 inline void* operator new( size_t x, Compile* C, int y) {
203 Node* n = (Node*)C->node_arena()->Amalloc_D(x + y*sizeof(void*));
204 n->_in = (Node**)(((char*)n) + x);
205 #ifdef ASSERT
206 n->_in[y-1] = n; // magic cookie for assertion check
207 #endif
208 n->_out = (Node**)C;
209 return (void*)n;
210 }
212 // Delete is a NOP
213 void operator delete( void *ptr ) {}
214 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
215 void destruct();
217 // Create a new Node. Required is the number is of inputs required for
218 // semantic correctness.
219 Node( uint required );
221 // Create a new Node with given input edges.
222 // This version requires use of the "edge-count" new.
223 // E.g. new (C,3) FooNode( C, NULL, left, right );
224 Node( Node *n0 );
225 Node( Node *n0, Node *n1 );
226 Node( Node *n0, Node *n1, Node *n2 );
227 Node( Node *n0, Node *n1, Node *n2, Node *n3 );
228 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
229 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
230 Node( Node *n0, Node *n1, Node *n2, Node *n3,
231 Node *n4, Node *n5, Node *n6 );
233 // Clone an inherited Node given only the base Node type.
234 Node* clone() const;
236 // Clone a Node, immediately supplying one or two new edges.
237 // The first and second arguments, if non-null, replace in(1) and in(2),
238 // respectively.
239 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
240 Node* nn = clone();
241 if (in1 != NULL) nn->set_req(1, in1);
242 if (in2 != NULL) nn->set_req(2, in2);
243 return nn;
244 }
246 private:
247 // Shared setup for the above constructors.
248 // Handles all interactions with Compile::current.
249 // Puts initial values in all Node fields except _idx.
250 // Returns the initial value for _idx, which cannot
251 // be initialized by assignment.
252 inline int Init(int req, Compile* C);
254 //----------------- input edge handling
255 protected:
256 friend class PhaseCFG; // Access to address of _in array elements
257 Node **_in; // Array of use-def references to Nodes
258 Node **_out; // Array of def-use references to Nodes
260 // Input edges are split into two categories. Required edges are required
261 // for semantic correctness; order is important and NULLs are allowed.
262 // Precedence edges are used to help determine execution order and are
263 // added, e.g., for scheduling purposes. They are unordered and not
264 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1
265 // are required, from _cnt to _max-1 are precedence edges.
266 node_idx_t _cnt; // Total number of required Node inputs.
268 node_idx_t _max; // Actual length of input array.
270 // Output edges are an unordered list of def-use edges which exactly
271 // correspond to required input edges which point from other nodes
272 // to this one. Thus the count of the output edges is the number of
273 // users of this node.
274 node_idx_t _outcnt; // Total number of Node outputs.
276 node_idx_t _outmax; // Actual length of output array.
278 // Grow the actual input array to the next larger power-of-2 bigger than len.
279 void grow( uint len );
280 // Grow the output array to the next larger power-of-2 bigger than len.
281 void out_grow( uint len );
283 public:
284 // Each Node is assigned a unique small/dense number. This number is used
285 // to index into auxiliary arrays of data and bitvectors.
286 // It is declared const to defend against inadvertant assignment,
287 // since it is used by clients as a naked field.
288 const node_idx_t _idx;
290 // Get the (read-only) number of input edges
291 uint req() const { return _cnt; }
292 uint len() const { return _max; }
293 // Get the (read-only) number of output edges
294 uint outcnt() const { return _outcnt; }
296 #if OPTO_DU_ITERATOR_ASSERT
297 // Iterate over the out-edges of this node. Deletions are illegal.
298 inline DUIterator outs() const;
299 // Use this when the out array might have changed to suppress asserts.
300 inline DUIterator& refresh_out_pos(DUIterator& i) const;
301 // Does the node have an out at this position? (Used for iteration.)
302 inline bool has_out(DUIterator& i) const;
303 inline Node* out(DUIterator& i) const;
304 // Iterate over the out-edges of this node. All changes are illegal.
305 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
306 inline Node* fast_out(DUIterator_Fast& i) const;
307 // Iterate over the out-edges of this node, deleting one at a time.
308 inline DUIterator_Last last_outs(DUIterator_Last& min) const;
309 inline Node* last_out(DUIterator_Last& i) const;
310 // The inline bodies of all these methods are after the iterator definitions.
311 #else
312 // Iterate over the out-edges of this node. Deletions are illegal.
313 // This iteration uses integral indexes, to decouple from array reallocations.
314 DUIterator outs() const { return 0; }
315 // Use this when the out array might have changed to suppress asserts.
316 DUIterator refresh_out_pos(DUIterator i) const { return i; }
318 // Reference to the i'th output Node. Error if out of bounds.
319 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
320 // Does the node have an out at this position? (Used for iteration.)
321 bool has_out(DUIterator i) const { return i < _outcnt; }
323 // Iterate over the out-edges of this node. All changes are illegal.
324 // This iteration uses a pointer internal to the out array.
325 DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
326 Node** out = _out;
327 // Assign a limit pointer to the reference argument:
328 max = out + (ptrdiff_t)_outcnt;
329 // Return the base pointer:
330 return out;
331 }
332 Node* fast_out(DUIterator_Fast i) const { return *i; }
333 // Iterate over the out-edges of this node, deleting one at a time.
334 // This iteration uses a pointer internal to the out array.
335 DUIterator_Last last_outs(DUIterator_Last& min) const {
336 Node** out = _out;
337 // Assign a limit pointer to the reference argument:
338 min = out;
339 // Return the pointer to the start of the iteration:
340 return out + (ptrdiff_t)_outcnt - 1;
341 }
342 Node* last_out(DUIterator_Last i) const { return *i; }
343 #endif
345 // Reference to the i'th input Node. Error if out of bounds.
346 Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; }
347 // Reference to the i'th output Node. Error if out of bounds.
348 // Use this accessor sparingly. We are going trying to use iterators instead.
349 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
350 // Return the unique out edge.
351 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
352 // Delete out edge at position 'i' by moving last out edge to position 'i'
353 void raw_del_out(uint i) {
354 assert(i < _outcnt,"oob");
355 assert(_outcnt > 0,"oob");
356 #if OPTO_DU_ITERATOR_ASSERT
357 // Record that a change happened here.
358 debug_only(_last_del = _out[i]; ++_del_tick);
359 #endif
360 _out[i] = _out[--_outcnt];
361 // Smash the old edge so it can't be used accidentally.
362 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
363 }
365 #ifdef ASSERT
366 bool is_dead() const;
367 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
368 #endif
370 // Set a required input edge, also updates corresponding output edge
371 void add_req( Node *n ); // Append a NEW required input
372 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
373 void del_req( uint idx ); // Delete required edge & compact
374 void ins_req( uint i, Node *n ); // Insert a NEW required input
375 void set_req( uint i, Node *n ) {
376 assert( is_not_dead(n), "can not use dead node");
377 assert( i < _cnt, "oob");
378 assert( !VerifyHashTableKeys || _hash_lock == 0,
379 "remove node from hash table before modifying it");
380 Node** p = &_in[i]; // cache this._in, across the del_out call
381 if (*p != NULL) (*p)->del_out((Node *)this);
382 (*p) = n;
383 if (n != NULL) n->add_out((Node *)this);
384 }
385 // Light version of set_req() to init inputs after node creation.
386 void init_req( uint i, Node *n ) {
387 assert( i == 0 && this == n ||
388 is_not_dead(n), "can not use dead node");
389 assert( i < _cnt, "oob");
390 assert( !VerifyHashTableKeys || _hash_lock == 0,
391 "remove node from hash table before modifying it");
392 assert( _in[i] == NULL, "sanity");
393 _in[i] = n;
394 if (n != NULL) n->add_out((Node *)this);
395 }
396 // Find first occurrence of n among my edges:
397 int find_edge(Node* n);
398 int replace_edge(Node* old, Node* neww);
399 // NULL out all inputs to eliminate incoming Def-Use edges.
400 // Return the number of edges between 'n' and 'this'
401 int disconnect_inputs(Node *n);
403 // Quickly, return true if and only if I am Compile::current()->top().
404 bool is_top() const {
405 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "");
406 return (_out == NULL);
407 }
408 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
409 void setup_is_top();
411 // Strip away casting. (It is depth-limited.)
412 Node* uncast() const;
414 private:
415 static Node* uncast_helper(const Node* n);
417 // Add an output edge to the end of the list
418 void add_out( Node *n ) {
419 if (is_top()) return;
420 if( _outcnt == _outmax ) out_grow(_outcnt);
421 _out[_outcnt++] = n;
422 }
423 // Delete an output edge
424 void del_out( Node *n ) {
425 if (is_top()) return;
426 Node** outp = &_out[_outcnt];
427 // Find and remove n
428 do {
429 assert(outp > _out, "Missing Def-Use edge");
430 } while (*--outp != n);
431 *outp = _out[--_outcnt];
432 // Smash the old edge so it can't be used accidentally.
433 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
434 // Record that a change happened here.
435 #if OPTO_DU_ITERATOR_ASSERT
436 debug_only(_last_del = n; ++_del_tick);
437 #endif
438 }
440 public:
441 // Globally replace this node by a given new node, updating all uses.
442 void replace_by(Node* new_node);
443 // Globally replace this node by a given new node, updating all uses
444 // and cutting input edges of old node.
445 void subsume_by(Node* new_node) {
446 replace_by(new_node);
447 disconnect_inputs(NULL);
448 }
449 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn );
450 // Find the one non-null required input. RegionNode only
451 Node *nonnull_req() const;
452 // Add or remove precedence edges
453 void add_prec( Node *n );
454 void rm_prec( uint i );
455 void set_prec( uint i, Node *n ) {
456 assert( is_not_dead(n), "can not use dead node");
457 assert( i >= _cnt, "not a precedence edge");
458 if (_in[i] != NULL) _in[i]->del_out((Node *)this);
459 _in[i] = n;
460 if (n != NULL) n->add_out((Node *)this);
461 }
462 // Set this node's index, used by cisc_version to replace current node
463 void set_idx(uint new_idx) {
464 const node_idx_t* ref = &_idx;
465 *(node_idx_t*)ref = new_idx;
466 }
467 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
468 void swap_edges(uint i1, uint i2) {
469 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
470 // Def-Use info is unchanged
471 Node* n1 = in(i1);
472 Node* n2 = in(i2);
473 _in[i1] = n2;
474 _in[i2] = n1;
475 // If this node is in the hash table, make sure it doesn't need a rehash.
476 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
477 }
479 // Iterators over input Nodes for a Node X are written as:
480 // for( i = 0; i < X.req(); i++ ) ... X[i] ...
481 // NOTE: Required edges can contain embedded NULL pointers.
483 //----------------- Other Node Properties
485 // Generate class id for some ideal nodes to avoid virtual query
486 // methods is_<Node>().
487 // Class id is the set of bits corresponded to the node class and all its
488 // super classes so that queries for super classes are also valid.
489 // Subclasses of the same super class have different assigned bit
490 // (the third parameter in the macro DEFINE_CLASS_ID).
491 // Classes with deeper hierarchy are declared first.
492 // Classes with the same hierarchy depth are sorted by usage frequency.
493 //
494 // The query method masks the bits to cut off bits of subclasses
495 // and then compare the result with the class id
496 // (see the macro DEFINE_CLASS_QUERY below).
497 //
498 // Class_MachCall=30, ClassMask_MachCall=31
499 // 12 8 4 0
500 // 0 0 0 0 0 0 0 0 1 1 1 1 0
501 // | | | |
502 // | | | Bit_Mach=2
503 // | | Bit_MachReturn=4
504 // | Bit_MachSafePoint=8
505 // Bit_MachCall=16
506 //
507 // Class_CountedLoop=56, ClassMask_CountedLoop=63
508 // 12 8 4 0
509 // 0 0 0 0 0 0 0 1 1 1 0 0 0
510 // | | |
511 // | | Bit_Region=8
512 // | Bit_Loop=16
513 // Bit_CountedLoop=32
515 #define DEFINE_CLASS_ID(cl, supcl, subn) \
516 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
517 Class_##cl = Class_##supcl + Bit_##cl , \
518 ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
520 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
521 // so that it's values fits into 16 bits.
522 enum NodeClasses {
523 Bit_Node = 0x0000,
524 Class_Node = 0x0000,
525 ClassMask_Node = 0xFFFF,
527 DEFINE_CLASS_ID(Multi, Node, 0)
528 DEFINE_CLASS_ID(SafePoint, Multi, 0)
529 DEFINE_CLASS_ID(Call, SafePoint, 0)
530 DEFINE_CLASS_ID(CallJava, Call, 0)
531 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0)
532 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1)
533 DEFINE_CLASS_ID(CallRuntime, Call, 1)
534 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0)
535 DEFINE_CLASS_ID(Allocate, Call, 2)
536 DEFINE_CLASS_ID(AllocateArray, Allocate, 0)
537 DEFINE_CLASS_ID(AbstractLock, Call, 3)
538 DEFINE_CLASS_ID(Lock, AbstractLock, 0)
539 DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
540 DEFINE_CLASS_ID(MultiBranch, Multi, 1)
541 DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
542 DEFINE_CLASS_ID(Catch, PCTable, 0)
543 DEFINE_CLASS_ID(Jump, PCTable, 1)
544 DEFINE_CLASS_ID(If, MultiBranch, 1)
545 DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
546 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
547 DEFINE_CLASS_ID(Start, Multi, 2)
548 DEFINE_CLASS_ID(MemBar, Multi, 3)
549 DEFINE_CLASS_ID(Initialize, MemBar, 0)
551 DEFINE_CLASS_ID(Mach, Node, 1)
552 DEFINE_CLASS_ID(MachReturn, Mach, 0)
553 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
554 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
555 DEFINE_CLASS_ID(MachCallJava, MachCall, 0)
556 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0)
557 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
558 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
559 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
560 DEFINE_CLASS_ID(MachSpillCopy, Mach, 1)
561 DEFINE_CLASS_ID(MachNullCheck, Mach, 2)
562 DEFINE_CLASS_ID(MachIf, Mach, 3)
563 DEFINE_CLASS_ID(MachTemp, Mach, 4)
565 DEFINE_CLASS_ID(Proj, Node, 2)
566 DEFINE_CLASS_ID(CatchProj, Proj, 0)
567 DEFINE_CLASS_ID(JumpProj, Proj, 1)
568 DEFINE_CLASS_ID(IfTrue, Proj, 2)
569 DEFINE_CLASS_ID(IfFalse, Proj, 3)
570 DEFINE_CLASS_ID(Parm, Proj, 4)
572 DEFINE_CLASS_ID(Region, Node, 3)
573 DEFINE_CLASS_ID(Loop, Region, 0)
574 DEFINE_CLASS_ID(Root, Loop, 0)
575 DEFINE_CLASS_ID(CountedLoop, Loop, 1)
577 DEFINE_CLASS_ID(Sub, Node, 4)
578 DEFINE_CLASS_ID(Cmp, Sub, 0)
579 DEFINE_CLASS_ID(FastLock, Cmp, 0)
580 DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
582 DEFINE_CLASS_ID(Type, Node, 5)
583 DEFINE_CLASS_ID(Phi, Type, 0)
584 DEFINE_CLASS_ID(ConstraintCast, Type, 1)
585 DEFINE_CLASS_ID(CheckCastPP, Type, 2)
586 DEFINE_CLASS_ID(CMove, Type, 3)
587 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
588 DEFINE_CLASS_ID(DecodeN, Type, 5)
589 DEFINE_CLASS_ID(EncodeP, Type, 6)
591 DEFINE_CLASS_ID(Mem, Node, 6)
592 DEFINE_CLASS_ID(Load, Mem, 0)
593 DEFINE_CLASS_ID(Store, Mem, 1)
594 DEFINE_CLASS_ID(LoadStore, Mem, 2)
596 DEFINE_CLASS_ID(MergeMem, Node, 7)
597 DEFINE_CLASS_ID(Bool, Node, 8)
598 DEFINE_CLASS_ID(AddP, Node, 9)
599 DEFINE_CLASS_ID(BoxLock, Node, 10)
600 DEFINE_CLASS_ID(Add, Node, 11)
601 DEFINE_CLASS_ID(Mul, Node, 12)
603 _max_classes = ClassMask_Mul
604 };
605 #undef DEFINE_CLASS_ID
607 // Flags are sorted by usage frequency.
608 enum NodeFlags {
609 Flag_is_Copy = 0x01, // should be first bit to avoid shift
610 Flag_is_Call = Flag_is_Copy << 1,
611 Flag_rematerialize = Flag_is_Call << 1,
612 Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
613 Flag_is_macro = Flag_needs_anti_dependence_check << 1,
614 Flag_is_Con = Flag_is_macro << 1,
615 Flag_is_cisc_alternate = Flag_is_Con << 1,
616 Flag_is_Branch = Flag_is_cisc_alternate << 1,
617 Flag_is_block_start = Flag_is_Branch << 1,
618 Flag_is_Goto = Flag_is_block_start << 1,
619 Flag_is_dead_loop_safe = Flag_is_Goto << 1,
620 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
621 Flag_is_safepoint_node = Flag_may_be_short_branch << 1,
622 Flag_is_pc_relative = Flag_is_safepoint_node << 1,
623 Flag_is_Vector = Flag_is_pc_relative << 1,
624 _max_flags = (Flag_is_Vector << 1) - 1 // allow flags combination
625 };
627 private:
628 jushort _class_id;
629 jushort _flags;
631 protected:
632 // These methods should be called from constructors only.
633 void init_class_id(jushort c) {
634 assert(c <= _max_classes, "invalid node class");
635 _class_id = c; // cast out const
636 }
637 void init_flags(jushort fl) {
638 assert(fl <= _max_flags, "invalid node flag");
639 _flags |= fl;
640 }
641 void clear_flag(jushort fl) {
642 assert(fl <= _max_flags, "invalid node flag");
643 _flags &= ~fl;
644 }
646 public:
647 const jushort class_id() const { return _class_id; }
649 const jushort flags() const { return _flags; }
651 // Return a dense integer opcode number
652 virtual int Opcode() const;
654 // Virtual inherited Node size
655 virtual uint size_of() const;
657 // Other interesting Node properties
659 // Special case: is_Call() returns true for both CallNode and MachCallNode.
660 bool is_Call() const {
661 return (_flags & Flag_is_Call) != 0;
662 }
664 CallNode* isa_Call() const {
665 return is_Call() ? as_Call() : NULL;
666 }
668 CallNode *as_Call() const { // Only for CallNode (not for MachCallNode)
669 assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class");
670 return (CallNode*)this;
671 }
673 #define DEFINE_CLASS_QUERY(type) \
674 bool is_##type() const { \
675 return ((_class_id & ClassMask_##type) == Class_##type); \
676 } \
677 type##Node *as_##type() const { \
678 assert(is_##type(), "invalid node class"); \
679 return (type##Node*)this; \
680 } \
681 type##Node* isa_##type() const { \
682 return (is_##type()) ? as_##type() : NULL; \
683 }
685 DEFINE_CLASS_QUERY(AbstractLock)
686 DEFINE_CLASS_QUERY(Add)
687 DEFINE_CLASS_QUERY(AddP)
688 DEFINE_CLASS_QUERY(Allocate)
689 DEFINE_CLASS_QUERY(AllocateArray)
690 DEFINE_CLASS_QUERY(Bool)
691 DEFINE_CLASS_QUERY(BoxLock)
692 DEFINE_CLASS_QUERY(CallDynamicJava)
693 DEFINE_CLASS_QUERY(CallJava)
694 DEFINE_CLASS_QUERY(CallLeaf)
695 DEFINE_CLASS_QUERY(CallRuntime)
696 DEFINE_CLASS_QUERY(CallStaticJava)
697 DEFINE_CLASS_QUERY(Catch)
698 DEFINE_CLASS_QUERY(CatchProj)
699 DEFINE_CLASS_QUERY(CheckCastPP)
700 DEFINE_CLASS_QUERY(ConstraintCast)
701 DEFINE_CLASS_QUERY(CMove)
702 DEFINE_CLASS_QUERY(Cmp)
703 DEFINE_CLASS_QUERY(CountedLoop)
704 DEFINE_CLASS_QUERY(CountedLoopEnd)
705 DEFINE_CLASS_QUERY(DecodeN)
706 DEFINE_CLASS_QUERY(EncodeP)
707 DEFINE_CLASS_QUERY(FastLock)
708 DEFINE_CLASS_QUERY(FastUnlock)
709 DEFINE_CLASS_QUERY(If)
710 DEFINE_CLASS_QUERY(IfFalse)
711 DEFINE_CLASS_QUERY(IfTrue)
712 DEFINE_CLASS_QUERY(Initialize)
713 DEFINE_CLASS_QUERY(Jump)
714 DEFINE_CLASS_QUERY(JumpProj)
715 DEFINE_CLASS_QUERY(Load)
716 DEFINE_CLASS_QUERY(LoadStore)
717 DEFINE_CLASS_QUERY(Lock)
718 DEFINE_CLASS_QUERY(Loop)
719 DEFINE_CLASS_QUERY(Mach)
720 DEFINE_CLASS_QUERY(MachCall)
721 DEFINE_CLASS_QUERY(MachCallDynamicJava)
722 DEFINE_CLASS_QUERY(MachCallJava)
723 DEFINE_CLASS_QUERY(MachCallLeaf)
724 DEFINE_CLASS_QUERY(MachCallRuntime)
725 DEFINE_CLASS_QUERY(MachCallStaticJava)
726 DEFINE_CLASS_QUERY(MachIf)
727 DEFINE_CLASS_QUERY(MachNullCheck)
728 DEFINE_CLASS_QUERY(MachReturn)
729 DEFINE_CLASS_QUERY(MachSafePoint)
730 DEFINE_CLASS_QUERY(MachSpillCopy)
731 DEFINE_CLASS_QUERY(MachTemp)
732 DEFINE_CLASS_QUERY(Mem)
733 DEFINE_CLASS_QUERY(MemBar)
734 DEFINE_CLASS_QUERY(MergeMem)
735 DEFINE_CLASS_QUERY(Mul)
736 DEFINE_CLASS_QUERY(Multi)
737 DEFINE_CLASS_QUERY(MultiBranch)
738 DEFINE_CLASS_QUERY(Parm)
739 DEFINE_CLASS_QUERY(PCTable)
740 DEFINE_CLASS_QUERY(Phi)
741 DEFINE_CLASS_QUERY(Proj)
742 DEFINE_CLASS_QUERY(Region)
743 DEFINE_CLASS_QUERY(Root)
744 DEFINE_CLASS_QUERY(SafePoint)
745 DEFINE_CLASS_QUERY(SafePointScalarObject)
746 DEFINE_CLASS_QUERY(Start)
747 DEFINE_CLASS_QUERY(Store)
748 DEFINE_CLASS_QUERY(Sub)
749 DEFINE_CLASS_QUERY(Type)
750 DEFINE_CLASS_QUERY(Unlock)
752 #undef DEFINE_CLASS_QUERY
754 // duplicate of is_MachSpillCopy()
755 bool is_SpillCopy () const {
756 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
757 }
759 bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
760 bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; }
761 // The data node which is safe to leave in dead loop during IGVN optimization.
762 bool is_dead_loop_safe() const {
763 return is_Phi() || (is_Proj() && in(0) == NULL) ||
764 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 &&
765 (!is_Proj() || !in(0)->is_Allocate()));
766 }
768 // is_Copy() returns copied edge index (0 or 1)
769 uint is_Copy() const { return (_flags & Flag_is_Copy); }
771 virtual bool is_CFG() const { return false; }
773 // If this node is control-dependent on a test, can it be
774 // rerouted to a dominating equivalent test? This is usually
775 // true of non-CFG nodes, but can be false for operations which
776 // depend for their correct sequencing on more than one test.
777 // (In that case, hoisting to a dominating test may silently
778 // skip some other important test.)
779 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
781 // defined for MachNodes that match 'If' | 'Goto' | 'CountedLoopEnd'
782 bool is_Branch() const { return (_flags & Flag_is_Branch) != 0; }
784 // When building basic blocks, I need to have a notion of block beginning
785 // Nodes, next block selector Nodes (block enders), and next block
786 // projections. These calls need to work on their machine equivalents. The
787 // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
788 bool is_block_start() const {
789 if ( is_Region() )
790 return this == (const Node*)in(0);
791 else
792 return (_flags & Flag_is_block_start) != 0;
793 }
795 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
796 // Goto and Return. This call also returns the block ending Node.
797 virtual const Node *is_block_proj() const;
799 // The node is a "macro" node which needs to be expanded before matching
800 bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
802 // Value is a vector of primitive values
803 bool is_Vector() const { return (_flags & Flag_is_Vector) != 0; }
805 //----------------- Optimization
807 // Get the worst-case Type output for this Node.
808 virtual const class Type *bottom_type() const;
810 // If we find a better type for a node, try to record it permanently.
811 // Return true if this node actually changed.
812 // Be sure to do the hash_delete game in the "rehash" variant.
813 void raise_bottom_type(const Type* new_type);
815 // Get the address type with which this node uses and/or defs memory,
816 // or NULL if none. The address type is conservatively wide.
817 // Returns non-null for calls, membars, loads, stores, etc.
818 // Returns TypePtr::BOTTOM if the node touches memory "broadly".
819 virtual const class TypePtr *adr_type() const { return NULL; }
821 // Return an existing node which computes the same function as this node.
822 // The optimistic combined algorithm requires this to return a Node which
823 // is a small number of steps away (e.g., one of my inputs).
824 virtual Node *Identity( PhaseTransform *phase );
826 // Return the set of values this Node can take on at runtime.
827 virtual const Type *Value( PhaseTransform *phase ) const;
829 // Return a node which is more "ideal" than the current node.
830 // The invariants on this call are subtle. If in doubt, read the
831 // treatise in node.cpp above the default implemention AND TEST WITH
832 // +VerifyIterativeGVN!
833 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
835 // Some nodes have specific Ideal subgraph transformations only if they are
836 // unique users of specific nodes. Such nodes should be put on IGVN worklist
837 // for the transformations to happen.
838 bool has_special_unique_user() const;
840 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
841 Node* find_exact_control(Node* ctrl);
843 // Check if 'this' node dominates or equal to 'sub'.
844 bool dominates(Node* sub, Node_List &nlist);
846 protected:
847 bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
848 public:
850 // Idealize graph, using DU info. Done after constant propagation
851 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
853 // See if there is valid pipeline info
854 static const Pipeline *pipeline_class();
855 virtual const Pipeline *pipeline() const;
857 // Compute the latency from the def to this instruction of the ith input node
858 uint latency(uint i);
860 // Hash & compare functions, for pessimistic value numbering
862 // If the hash function returns the special sentinel value NO_HASH,
863 // the node is guaranteed never to compare equal to any other node.
864 // If we accidentally generate a hash with value NO_HASH the node
865 // won't go into the table and we'll lose a little optimization.
866 enum { NO_HASH = 0 };
867 virtual uint hash() const;
868 virtual uint cmp( const Node &n ) const;
870 // Operation appears to be iteratively computed (such as an induction variable)
871 // It is possible for this operation to return false for a loop-varying
872 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
873 bool is_iteratively_computed();
875 // Determine if a node is Counted loop induction variable.
876 // The method is defined in loopnode.cpp.
877 const Node* is_loop_iv() const;
879 // Return a node with opcode "opc" and same inputs as "this" if one can
880 // be found; Otherwise return NULL;
881 Node* find_similar(int opc);
883 // Return the unique control out if only one. Null if none or more than one.
884 Node* unique_ctrl_out();
886 //----------------- Code Generation
888 // Ideal register class for Matching. Zero means unmatched instruction
889 // (these are cloned instead of converted to machine nodes).
890 virtual uint ideal_reg() const;
892 static const uint NotAMachineReg; // must be > max. machine register
894 // Do we Match on this edge index or not? Generally false for Control
895 // and true for everything else. Weird for calls & returns.
896 virtual uint match_edge(uint idx) const;
898 // Register class output is returned in
899 virtual const RegMask &out_RegMask() const;
900 // Register class input is expected in
901 virtual const RegMask &in_RegMask(uint) const;
902 // Should we clone rather than spill this instruction?
903 bool rematerialize() const;
905 // Return JVM State Object if this Node carries debug info, or NULL otherwise
906 virtual JVMState* jvms() const;
908 // Print as assembly
909 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
910 // Emit bytes starting at parameter 'ptr'
911 // Bump 'ptr' by the number of output bytes
912 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
913 // Size of instruction in bytes
914 virtual uint size(PhaseRegAlloc *ra_) const;
916 // Convenience function to extract an integer constant from a node.
917 // If it is not an integer constant (either Con, CastII, or Mach),
918 // return value_if_unknown.
919 jint find_int_con(jint value_if_unknown) const {
920 const TypeInt* t = find_int_type();
921 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
922 }
923 // Return the constant, knowing it is an integer constant already
924 jint get_int() const {
925 const TypeInt* t = find_int_type();
926 guarantee(t != NULL, "must be con");
927 return t->get_con();
928 }
929 // Here's where the work is done. Can produce non-constant int types too.
930 const TypeInt* find_int_type() const;
932 // Same thing for long (and intptr_t, via type.hpp):
933 jlong get_long() const {
934 const TypeLong* t = find_long_type();
935 guarantee(t != NULL, "must be con");
936 return t->get_con();
937 }
938 jlong find_long_con(jint value_if_unknown) const {
939 const TypeLong* t = find_long_type();
940 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
941 }
942 const TypeLong* find_long_type() const;
944 // These guys are called by code generated by ADLC:
945 intptr_t get_ptr() const;
946 intptr_t get_narrowcon() const;
947 jdouble getd() const;
948 jfloat getf() const;
950 // Nodes which are pinned into basic blocks
951 virtual bool pinned() const { return false; }
953 // Nodes which use memory without consuming it, hence need antidependences
954 // More specifically, needs_anti_dependence_check returns true iff the node
955 // (a) does a load, and (b) does not perform a store (except perhaps to a
956 // stack slot or some other unaliased location).
957 bool needs_anti_dependence_check() const;
959 // Return which operand this instruction may cisc-spill. In other words,
960 // return operand position that can convert from reg to memory access
961 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
962 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
964 //----------------- Graph walking
965 public:
966 // Walk and apply member functions recursively.
967 // Supplied (this) pointer is root.
968 void walk(NFunc pre, NFunc post, void *env);
969 static void nop(Node &, void*); // Dummy empty function
970 static void packregion( Node &n, void* );
971 private:
972 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited);
974 //----------------- Printing, etc
975 public:
976 #ifndef PRODUCT
977 Node* find(int idx) const; // Search the graph for the given idx.
978 Node* find_ctrl(int idx) const; // Search control ancestors for the given idx.
979 void dump() const; // Print this node,
980 void dump(int depth) const; // Print this node, recursively to depth d
981 void dump_ctrl(int depth) const; // Print control nodes, to depth d
982 virtual void dump_req() const; // Print required-edge info
983 virtual void dump_prec() const; // Print precedence-edge info
984 virtual void dump_out() const; // Print the output edge info
985 virtual void dump_spec(outputStream *st) const {}; // Print per-node info
986 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
987 void verify() const; // Check Def-Use info for my subgraph
988 static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space);
990 // This call defines a class-unique string used to identify class instances
991 virtual const char *Name() const;
993 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
994 // RegMask Print Functions
995 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
996 void dump_out_regmask() { out_RegMask().dump(); }
997 static int _in_dump_cnt;
998 static bool in_dump() { return _in_dump_cnt > 0; }
999 void fast_dump() const {
1000 tty->print("%4d: %-17s", _idx, Name());
1001 for (uint i = 0; i < len(); i++)
1002 if (in(i))
1003 tty->print(" %4d", in(i)->_idx);
1004 else
1005 tty->print(" NULL");
1006 tty->print("\n");
1007 }
1008 #endif
1009 #ifdef ASSERT
1010 void verify_construction();
1011 bool verify_jvms(const JVMState* jvms) const;
1012 int _debug_idx; // Unique value assigned to every node.
1013 int debug_idx() const { return _debug_idx; }
1014 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
1016 Node* _debug_orig; // Original version of this, if any.
1017 Node* debug_orig() const { return _debug_orig; }
1018 void set_debug_orig(Node* orig); // _debug_orig = orig
1020 int _hash_lock; // Barrier to modifications of nodes in the hash table
1021 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
1022 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
1024 static void init_NodeProperty();
1026 #if OPTO_DU_ITERATOR_ASSERT
1027 const Node* _last_del; // The last deleted node.
1028 uint _del_tick; // Bumped when a deletion happens..
1029 #endif
1030 #endif
1031 };
1033 //-----------------------------------------------------------------------------
1034 // Iterators over DU info, and associated Node functions.
1036 #if OPTO_DU_ITERATOR_ASSERT
1038 // Common code for assertion checking on DU iterators.
1039 class DUIterator_Common VALUE_OBJ_CLASS_SPEC {
1040 #ifdef ASSERT
1041 protected:
1042 bool _vdui; // cached value of VerifyDUIterators
1043 const Node* _node; // the node containing the _out array
1044 uint _outcnt; // cached node->_outcnt
1045 uint _del_tick; // cached node->_del_tick
1046 Node* _last; // last value produced by the iterator
1048 void sample(const Node* node); // used by c'tor to set up for verifies
1049 void verify(const Node* node, bool at_end_ok = false);
1050 void verify_resync();
1051 void reset(const DUIterator_Common& that);
1053 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1054 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1055 #else
1056 #define I_VDUI_ONLY(i,x) { }
1057 #endif //ASSERT
1058 };
1060 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
1062 // Default DU iterator. Allows appends onto the out array.
1063 // Allows deletion from the out array only at the current point.
1064 // Usage:
1065 // for (DUIterator i = x->outs(); x->has_out(i); i++) {
1066 // Node* y = x->out(i);
1067 // ...
1068 // }
1069 // Compiles in product mode to a unsigned integer index, which indexes
1070 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate
1071 // also reloads x->_outcnt. If you delete, you must perform "--i" just
1072 // before continuing the loop. You must delete only the last-produced
1073 // edge. You must delete only a single copy of the last-produced edge,
1074 // or else you must delete all copies at once (the first time the edge
1075 // is produced by the iterator).
1076 class DUIterator : public DUIterator_Common {
1077 friend class Node;
1079 // This is the index which provides the product-mode behavior.
1080 // Whatever the product-mode version of the system does to the
1081 // DUI index is done to this index. All other fields in
1082 // this class are used only for assertion checking.
1083 uint _idx;
1085 #ifdef ASSERT
1086 uint _refresh_tick; // Records the refresh activity.
1088 void sample(const Node* node); // Initialize _refresh_tick etc.
1089 void verify(const Node* node, bool at_end_ok = false);
1090 void verify_increment(); // Verify an increment operation.
1091 void verify_resync(); // Verify that we can back up over a deletion.
1092 void verify_finish(); // Verify that the loop terminated properly.
1093 void refresh(); // Resample verification info.
1094 void reset(const DUIterator& that); // Resample after assignment.
1095 #endif
1097 DUIterator(const Node* node, int dummy_to_avoid_conversion)
1098 { _idx = 0; debug_only(sample(node)); }
1100 public:
1101 // initialize to garbage; clear _vdui to disable asserts
1102 DUIterator()
1103 { /*initialize to garbage*/ debug_only(_vdui = false); }
1105 void operator++(int dummy_to_specify_postfix_op)
1106 { _idx++; VDUI_ONLY(verify_increment()); }
1108 void operator--()
1109 { VDUI_ONLY(verify_resync()); --_idx; }
1111 ~DUIterator()
1112 { VDUI_ONLY(verify_finish()); }
1114 void operator=(const DUIterator& that)
1115 { _idx = that._idx; debug_only(reset(that)); }
1116 };
1118 DUIterator Node::outs() const
1119 { return DUIterator(this, 0); }
1120 DUIterator& Node::refresh_out_pos(DUIterator& i) const
1121 { I_VDUI_ONLY(i, i.refresh()); return i; }
1122 bool Node::has_out(DUIterator& i) const
1123 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1124 Node* Node::out(DUIterator& i) const
1125 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; }
1128 // Faster DU iterator. Disallows insertions into the out array.
1129 // Allows deletion from the out array only at the current point.
1130 // Usage:
1131 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1132 // Node* y = x->fast_out(i);
1133 // ...
1134 // }
1135 // Compiles in product mode to raw Node** pointer arithmetic, with
1136 // no reloading of pointers from the original node x. If you delete,
1137 // you must perform "--i; --imax" just before continuing the loop.
1138 // If you delete multiple copies of the same edge, you must decrement
1139 // imax, but not i, multiple times: "--i, imax -= num_edges".
1140 class DUIterator_Fast : public DUIterator_Common {
1141 friend class Node;
1142 friend class DUIterator_Last;
1144 // This is the pointer which provides the product-mode behavior.
1145 // Whatever the product-mode version of the system does to the
1146 // DUI pointer is done to this pointer. All other fields in
1147 // this class are used only for assertion checking.
1148 Node** _outp;
1150 #ifdef ASSERT
1151 void verify(const Node* node, bool at_end_ok = false);
1152 void verify_limit();
1153 void verify_resync();
1154 void verify_relimit(uint n);
1155 void reset(const DUIterator_Fast& that);
1156 #endif
1158 // Note: offset must be signed, since -1 is sometimes passed
1159 DUIterator_Fast(const Node* node, ptrdiff_t offset)
1160 { _outp = node->_out + offset; debug_only(sample(node)); }
1162 public:
1163 // initialize to garbage; clear _vdui to disable asserts
1164 DUIterator_Fast()
1165 { /*initialize to garbage*/ debug_only(_vdui = false); }
1167 void operator++(int dummy_to_specify_postfix_op)
1168 { _outp++; VDUI_ONLY(verify(_node, true)); }
1170 void operator--()
1171 { VDUI_ONLY(verify_resync()); --_outp; }
1173 void operator-=(uint n) // applied to the limit only
1174 { _outp -= n; VDUI_ONLY(verify_relimit(n)); }
1176 bool operator<(DUIterator_Fast& limit) {
1177 I_VDUI_ONLY(*this, this->verify(_node, true));
1178 I_VDUI_ONLY(limit, limit.verify_limit());
1179 return _outp < limit._outp;
1180 }
1182 void operator=(const DUIterator_Fast& that)
1183 { _outp = that._outp; debug_only(reset(that)); }
1184 };
1186 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1187 // Assign a limit pointer to the reference argument:
1188 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1189 // Return the base pointer:
1190 return DUIterator_Fast(this, 0);
1191 }
1192 Node* Node::fast_out(DUIterator_Fast& i) const {
1193 I_VDUI_ONLY(i, i.verify(this));
1194 return debug_only(i._last=) *i._outp;
1195 }
1198 // Faster DU iterator. Requires each successive edge to be removed.
1199 // Does not allow insertion of any edges.
1200 // Usage:
1201 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1202 // Node* y = x->last_out(i);
1203 // ...
1204 // }
1205 // Compiles in product mode to raw Node** pointer arithmetic, with
1206 // no reloading of pointers from the original node x.
1207 class DUIterator_Last : private DUIterator_Fast {
1208 friend class Node;
1210 #ifdef ASSERT
1211 void verify(const Node* node, bool at_end_ok = false);
1212 void verify_limit();
1213 void verify_step(uint num_edges);
1214 #endif
1216 // Note: offset must be signed, since -1 is sometimes passed
1217 DUIterator_Last(const Node* node, ptrdiff_t offset)
1218 : DUIterator_Fast(node, offset) { }
1220 void operator++(int dummy_to_specify_postfix_op) {} // do not use
1221 void operator<(int) {} // do not use
1223 public:
1224 DUIterator_Last() { }
1225 // initialize to garbage
1227 void operator--()
1228 { _outp--; VDUI_ONLY(verify_step(1)); }
1230 void operator-=(uint n)
1231 { _outp -= n; VDUI_ONLY(verify_step(n)); }
1233 bool operator>=(DUIterator_Last& limit) {
1234 I_VDUI_ONLY(*this, this->verify(_node, true));
1235 I_VDUI_ONLY(limit, limit.verify_limit());
1236 return _outp >= limit._outp;
1237 }
1239 void operator=(const DUIterator_Last& that)
1240 { DUIterator_Fast::operator=(that); }
1241 };
1243 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1244 // Assign a limit pointer to the reference argument:
1245 imin = DUIterator_Last(this, 0);
1246 // Return the initial pointer:
1247 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1248 }
1249 Node* Node::last_out(DUIterator_Last& i) const {
1250 I_VDUI_ONLY(i, i.verify(this));
1251 return debug_only(i._last=) *i._outp;
1252 }
1254 #endif //OPTO_DU_ITERATOR_ASSERT
1256 #undef I_VDUI_ONLY
1257 #undef VDUI_ONLY
1259 // An Iterator that truly follows the iterator pattern. Doesn't
1260 // support deletion but could be made to.
1261 //
1262 // for (SimpleDUIterator i(n); i.has_next(); i.next()) {
1263 // Node* m = i.get();
1264 //
1265 class SimpleDUIterator : public StackObj {
1266 private:
1267 Node* node;
1268 DUIterator_Fast i;
1269 DUIterator_Fast imax;
1270 public:
1271 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
1272 bool has_next() { return i < imax; }
1273 void next() { i++; }
1274 Node* get() { return node->fast_out(i); }
1275 };
1278 //-----------------------------------------------------------------------------
1279 // Map dense integer indices to Nodes. Uses classic doubling-array trick.
1280 // Abstractly provides an infinite array of Node*'s, initialized to NULL.
1281 // Note that the constructor just zeros things, and since I use Arena
1282 // allocation I do not need a destructor to reclaim storage.
1283 class Node_Array : public ResourceObj {
1284 protected:
1285 Arena *_a; // Arena to allocate in
1286 uint _max;
1287 Node **_nodes;
1288 void grow( uint i ); // Grow array node to fit
1289 public:
1290 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) {
1291 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize );
1292 for( int i = 0; i < OptoNodeListSize; i++ ) {
1293 _nodes[i] = NULL;
1294 }
1295 }
1297 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
1298 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
1299 { return (i<_max) ? _nodes[i] : (Node*)NULL; }
1300 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; }
1301 Node **adr() { return _nodes; }
1302 // Extend the mapping: index i maps to Node *n.
1303 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
1304 void insert( uint i, Node *n );
1305 void remove( uint i ); // Remove, preserving order
1306 void sort( C_sort_func_t func);
1307 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage
1308 void clear(); // Set all entries to NULL, keep storage
1309 uint Size() const { return _max; }
1310 void dump() const;
1311 };
1313 class Node_List : public Node_Array {
1314 uint _cnt;
1315 public:
1316 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
1317 Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
1318 bool contains(Node* n) {
1319 for (uint e = 0; e < size(); e++) {
1320 if (at(e) == n) return true;
1321 }
1322 return false;
1323 }
1324 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1325 void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1326 void push( Node *b ) { map(_cnt++,b); }
1327 void yank( Node *n ); // Find and remove
1328 Node *pop() { return _nodes[--_cnt]; }
1329 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;}
1330 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1331 uint size() const { return _cnt; }
1332 void dump() const;
1333 };
1335 //------------------------------Unique_Node_List-------------------------------
1336 class Unique_Node_List : public Node_List {
1337 VectorSet _in_worklist;
1338 uint _clock_index; // Index in list where to pop from next
1339 public:
1340 Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {}
1341 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1343 void remove( Node *n );
1344 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
1345 VectorSet &member_set(){ return _in_worklist; }
1347 void push( Node *b ) {
1348 if( !_in_worklist.test_set(b->_idx) )
1349 Node_List::push(b);
1350 }
1351 Node *pop() {
1352 if( _clock_index >= size() ) _clock_index = 0;
1353 Node *b = at(_clock_index);
1354 map( _clock_index, Node_List::pop());
1355 if (size() != 0) _clock_index++; // Always start from 0
1356 _in_worklist >>= b->_idx;
1357 return b;
1358 }
1359 Node *remove( uint i ) {
1360 Node *b = Node_List::at(i);
1361 _in_worklist >>= b->_idx;
1362 map(i,Node_List::pop());
1363 return b;
1364 }
1365 void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); }
1366 void clear() {
1367 _in_worklist.Clear(); // Discards storage but grows automatically
1368 Node_List::clear();
1369 _clock_index = 0;
1370 }
1372 // Used after parsing to remove useless nodes before Iterative GVN
1373 void remove_useless_nodes(VectorSet &useful);
1375 #ifndef PRODUCT
1376 void print_set() const { _in_worklist.print(); }
1377 #endif
1378 };
1380 // Inline definition of Compile::record_for_igvn must be deferred to this point.
1381 inline void Compile::record_for_igvn(Node* n) {
1382 _for_igvn->push(n);
1383 }
1385 //------------------------------Node_Stack-------------------------------------
1386 class Node_Stack {
1387 protected:
1388 struct INode {
1389 Node *node; // Processed node
1390 uint indx; // Index of next node's child
1391 };
1392 INode *_inode_top; // tos, stack grows up
1393 INode *_inode_max; // End of _inodes == _inodes + _max
1394 INode *_inodes; // Array storage for the stack
1395 Arena *_a; // Arena to allocate in
1396 void grow();
1397 public:
1398 Node_Stack(int size) {
1399 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1400 _a = Thread::current()->resource_area();
1401 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1402 _inode_max = _inodes + max;
1403 _inode_top = _inodes - 1; // stack is empty
1404 }
1406 Node_Stack(Arena *a, int size) : _a(a) {
1407 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1408 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1409 _inode_max = _inodes + max;
1410 _inode_top = _inodes - 1; // stack is empty
1411 }
1413 void pop() {
1414 assert(_inode_top >= _inodes, "node stack underflow");
1415 --_inode_top;
1416 }
1417 void push(Node *n, uint i) {
1418 ++_inode_top;
1419 if (_inode_top >= _inode_max) grow();
1420 INode *top = _inode_top; // optimization
1421 top->node = n;
1422 top->indx = i;
1423 }
1424 Node *node() const {
1425 return _inode_top->node;
1426 }
1427 Node* node_at(uint i) const {
1428 assert(_inodes + i <= _inode_top, "in range");
1429 return _inodes[i].node;
1430 }
1431 uint index() const {
1432 return _inode_top->indx;
1433 }
1434 uint index_at(uint i) const {
1435 assert(_inodes + i <= _inode_top, "in range");
1436 return _inodes[i].indx;
1437 }
1438 void set_node(Node *n) {
1439 _inode_top->node = n;
1440 }
1441 void set_index(uint i) {
1442 _inode_top->indx = i;
1443 }
1444 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
1445 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
1446 bool is_nonempty() const { return (_inode_top >= _inodes); }
1447 bool is_empty() const { return (_inode_top < _inodes); }
1448 void clear() { _inode_top = _inodes - 1; } // retain storage
1449 };
1452 //-----------------------------Node_Notes--------------------------------------
1453 // Debugging or profiling annotations loosely and sparsely associated
1454 // with some nodes. See Compile::node_notes_at for the accessor.
1455 class Node_Notes VALUE_OBJ_CLASS_SPEC {
1456 JVMState* _jvms;
1458 public:
1459 Node_Notes(JVMState* jvms = NULL) {
1460 _jvms = jvms;
1461 }
1463 JVMState* jvms() { return _jvms; }
1464 void set_jvms(JVMState* x) { _jvms = x; }
1466 // True if there is nothing here.
1467 bool is_clear() {
1468 return (_jvms == NULL);
1469 }
1471 // Make there be nothing here.
1472 void clear() {
1473 _jvms = NULL;
1474 }
1476 // Make a new, clean node notes.
1477 static Node_Notes* make(Compile* C) {
1478 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1479 nn->clear();
1480 return nn;
1481 }
1483 Node_Notes* clone(Compile* C) {
1484 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1485 (*nn) = (*this);
1486 return nn;
1487 }
1489 // Absorb any information from source.
1490 bool update_from(Node_Notes* source) {
1491 bool changed = false;
1492 if (source != NULL) {
1493 if (source->jvms() != NULL) {
1494 set_jvms(source->jvms());
1495 changed = true;
1496 }
1497 }
1498 return changed;
1499 }
1500 };
1502 // Inlined accessors for Compile::node_nodes that require the preceding class:
1503 inline Node_Notes*
1504 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
1505 int idx, bool can_grow) {
1506 assert(idx >= 0, "oob");
1507 int block_idx = (idx >> _log2_node_notes_block_size);
1508 int grow_by = (block_idx - (arr == NULL? 0: arr->length()));
1509 if (grow_by >= 0) {
1510 if (!can_grow) return NULL;
1511 grow_node_notes(arr, grow_by + 1);
1512 }
1513 // (Every element of arr is a sub-array of length _node_notes_block_size.)
1514 return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
1515 }
1517 inline bool
1518 Compile::set_node_notes_at(int idx, Node_Notes* value) {
1519 if (value == NULL || value->is_clear())
1520 return false; // nothing to write => write nothing
1521 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
1522 assert(loc != NULL, "");
1523 return loc->update_from(value);
1524 }
1527 //------------------------------TypeNode---------------------------------------
1528 // Node with a Type constant.
1529 class TypeNode : public Node {
1530 protected:
1531 virtual uint hash() const; // Check the type
1532 virtual uint cmp( const Node &n ) const;
1533 virtual uint size_of() const; // Size is bigger
1534 const Type* const _type;
1535 public:
1536 void set_type(const Type* t) {
1537 assert(t != NULL, "sanity");
1538 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
1539 *(const Type**)&_type = t; // cast away const-ness
1540 // If this node is in the hash table, make sure it doesn't need a rehash.
1541 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
1542 }
1543 const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
1544 TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
1545 init_class_id(Class_Type);
1546 }
1547 virtual const Type *Value( PhaseTransform *phase ) const;
1548 virtual const Type *bottom_type() const;
1549 virtual uint ideal_reg() const;
1550 #ifndef PRODUCT
1551 virtual void dump_spec(outputStream *st) const;
1552 #endif
1553 };