Wed, 18 Sep 2013 14:34:56 -0700
8024342: PPC64 (part 111): Support for C calling conventions that require 64-bit ints.
Summary: Some platforms, as ppc and s390x/zArch require that 32-bit ints are passed as 64-bit values to C functions. This change adds support to adapt the signature and to issue proper casts to c2-compiled stubs. The functions are used in generate_native_wrapper(). Adapt signature used by the compiler as in PhaseIdealLoop::intrinsify_fill().
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_NODE_HPP
26 #define SHARE_VM_OPTO_NODE_HPP
28 #include "libadt/port.hpp"
29 #include "libadt/vectset.hpp"
30 #include "opto/compile.hpp"
31 #include "opto/type.hpp"
33 // Portions of code courtesy of Clifford Click
35 // Optimization - Graph Style
38 class AbstractLockNode;
39 class AddNode;
40 class AddPNode;
41 class AliasInfo;
42 class AllocateArrayNode;
43 class AllocateNode;
44 class Block;
45 class BoolNode;
46 class BoxLockNode;
47 class CMoveNode;
48 class CallDynamicJavaNode;
49 class CallJavaNode;
50 class CallLeafNode;
51 class CallNode;
52 class CallRuntimeNode;
53 class CallStaticJavaNode;
54 class CatchNode;
55 class CatchProjNode;
56 class CheckCastPPNode;
57 class ClearArrayNode;
58 class CmpNode;
59 class CodeBuffer;
60 class ConstraintCastNode;
61 class ConNode;
62 class CountedLoopNode;
63 class CountedLoopEndNode;
64 class DecodeNarrowPtrNode;
65 class DecodeNNode;
66 class DecodeNKlassNode;
67 class EncodeNarrowPtrNode;
68 class EncodePNode;
69 class EncodePKlassNode;
70 class FastLockNode;
71 class FastUnlockNode;
72 class IfNode;
73 class IfFalseNode;
74 class IfTrueNode;
75 class InitializeNode;
76 class JVMState;
77 class JumpNode;
78 class JumpProjNode;
79 class LoadNode;
80 class LoadStoreNode;
81 class LockNode;
82 class LoopNode;
83 class MachBranchNode;
84 class MachCallDynamicJavaNode;
85 class MachCallJavaNode;
86 class MachCallLeafNode;
87 class MachCallNode;
88 class MachCallRuntimeNode;
89 class MachCallStaticJavaNode;
90 class MachConstantBaseNode;
91 class MachConstantNode;
92 class MachGotoNode;
93 class MachIfNode;
94 class MachNode;
95 class MachNullCheckNode;
96 class MachProjNode;
97 class MachReturnNode;
98 class MachSafePointNode;
99 class MachSpillCopyNode;
100 class MachTempNode;
101 class Matcher;
102 class MemBarNode;
103 class MemBarStoreStoreNode;
104 class MemNode;
105 class MergeMemNode;
106 class MulNode;
107 class MultiNode;
108 class MultiBranchNode;
109 class NeverBranchNode;
110 class Node;
111 class Node_Array;
112 class Node_List;
113 class Node_Stack;
114 class NullCheckNode;
115 class OopMap;
116 class ParmNode;
117 class PCTableNode;
118 class PhaseCCP;
119 class PhaseGVN;
120 class PhaseIterGVN;
121 class PhaseRegAlloc;
122 class PhaseTransform;
123 class PhaseValues;
124 class PhiNode;
125 class Pipeline;
126 class ProjNode;
127 class RegMask;
128 class RegionNode;
129 class RootNode;
130 class SafePointNode;
131 class SafePointScalarObjectNode;
132 class StartNode;
133 class State;
134 class StoreNode;
135 class SubNode;
136 class Type;
137 class TypeNode;
138 class UnlockNode;
139 class VectorNode;
140 class LoadVectorNode;
141 class StoreVectorNode;
142 class VectorSet;
143 typedef void (*NFunc)(Node&,void*);
144 extern "C" {
145 typedef int (*C_sort_func_t)(const void *, const void *);
146 }
148 // The type of all node counts and indexes.
149 // It must hold at least 16 bits, but must also be fast to load and store.
150 // This type, if less than 32 bits, could limit the number of possible nodes.
151 // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
152 typedef unsigned int node_idx_t;
155 #ifndef OPTO_DU_ITERATOR_ASSERT
156 #ifdef ASSERT
157 #define OPTO_DU_ITERATOR_ASSERT 1
158 #else
159 #define OPTO_DU_ITERATOR_ASSERT 0
160 #endif
161 #endif //OPTO_DU_ITERATOR_ASSERT
163 #if OPTO_DU_ITERATOR_ASSERT
164 class DUIterator;
165 class DUIterator_Fast;
166 class DUIterator_Last;
167 #else
168 typedef uint DUIterator;
169 typedef Node** DUIterator_Fast;
170 typedef Node** DUIterator_Last;
171 #endif
173 // Node Sentinel
174 #define NodeSentinel (Node*)-1
176 // Unknown count frequency
177 #define COUNT_UNKNOWN (-1.0f)
179 //------------------------------Node-------------------------------------------
180 // Nodes define actions in the program. They create values, which have types.
181 // They are both vertices in a directed graph and program primitives. Nodes
182 // are labeled; the label is the "opcode", the primitive function in the lambda
183 // calculus sense that gives meaning to the Node. Node inputs are ordered (so
184 // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to
185 // the Node's function. These inputs also define a Type equation for the Node.
186 // Solving these Type equations amounts to doing dataflow analysis.
187 // Control and data are uniformly represented in the graph. Finally, Nodes
188 // have a unique dense integer index which is used to index into side arrays
189 // whenever I have phase-specific information.
191 class Node {
192 friend class VMStructs;
194 // Lots of restrictions on cloning Nodes
195 Node(const Node&); // not defined; linker error to use these
196 Node &operator=(const Node &rhs);
198 public:
199 friend class Compile;
200 #if OPTO_DU_ITERATOR_ASSERT
201 friend class DUIterator_Common;
202 friend class DUIterator;
203 friend class DUIterator_Fast;
204 friend class DUIterator_Last;
205 #endif
207 // Because Nodes come and go, I define an Arena of Node structures to pull
208 // from. This should allow fast access to node creation & deletion. This
209 // field is a local cache of a value defined in some "program fragment" for
210 // which these Nodes are just a part of.
212 // New Operator that takes a Compile pointer, this will eventually
213 // be the "new" New operator.
214 inline void* operator new( size_t x, Compile* C) {
215 Node* n = (Node*)C->node_arena()->Amalloc_D(x);
216 #ifdef ASSERT
217 n->_in = (Node**)n; // magic cookie for assertion check
218 #endif
219 n->_out = (Node**)C;
220 return (void*)n;
221 }
223 // Delete is a NOP
224 void operator delete( void *ptr ) {}
225 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
226 void destruct();
228 // Create a new Node. Required is the number is of inputs required for
229 // semantic correctness.
230 Node( uint required );
232 // Create a new Node with given input edges.
233 // This version requires use of the "edge-count" new.
234 // E.g. new (C,3) FooNode( C, NULL, left, right );
235 Node( Node *n0 );
236 Node( Node *n0, Node *n1 );
237 Node( Node *n0, Node *n1, Node *n2 );
238 Node( Node *n0, Node *n1, Node *n2, Node *n3 );
239 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
240 Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
241 Node( Node *n0, Node *n1, Node *n2, Node *n3,
242 Node *n4, Node *n5, Node *n6 );
244 // Clone an inherited Node given only the base Node type.
245 Node* clone() const;
247 // Clone a Node, immediately supplying one or two new edges.
248 // The first and second arguments, if non-null, replace in(1) and in(2),
249 // respectively.
250 Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
251 Node* nn = clone();
252 if (in1 != NULL) nn->set_req(1, in1);
253 if (in2 != NULL) nn->set_req(2, in2);
254 return nn;
255 }
257 private:
258 // Shared setup for the above constructors.
259 // Handles all interactions with Compile::current.
260 // Puts initial values in all Node fields except _idx.
261 // Returns the initial value for _idx, which cannot
262 // be initialized by assignment.
263 inline int Init(int req, Compile* C);
265 //----------------- input edge handling
266 protected:
267 friend class PhaseCFG; // Access to address of _in array elements
268 Node **_in; // Array of use-def references to Nodes
269 Node **_out; // Array of def-use references to Nodes
271 // Input edges are split into two categories. Required edges are required
272 // for semantic correctness; order is important and NULLs are allowed.
273 // Precedence edges are used to help determine execution order and are
274 // added, e.g., for scheduling purposes. They are unordered and not
275 // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1
276 // are required, from _cnt to _max-1 are precedence edges.
277 node_idx_t _cnt; // Total number of required Node inputs.
279 node_idx_t _max; // Actual length of input array.
281 // Output edges are an unordered list of def-use edges which exactly
282 // correspond to required input edges which point from other nodes
283 // to this one. Thus the count of the output edges is the number of
284 // users of this node.
285 node_idx_t _outcnt; // Total number of Node outputs.
287 node_idx_t _outmax; // Actual length of output array.
289 // Grow the actual input array to the next larger power-of-2 bigger than len.
290 void grow( uint len );
291 // Grow the output array to the next larger power-of-2 bigger than len.
292 void out_grow( uint len );
294 public:
295 // Each Node is assigned a unique small/dense number. This number is used
296 // to index into auxiliary arrays of data and bitvectors.
297 // It is declared const to defend against inadvertant assignment,
298 // since it is used by clients as a naked field.
299 const node_idx_t _idx;
301 // Get the (read-only) number of input edges
302 uint req() const { return _cnt; }
303 uint len() const { return _max; }
304 // Get the (read-only) number of output edges
305 uint outcnt() const { return _outcnt; }
307 #if OPTO_DU_ITERATOR_ASSERT
308 // Iterate over the out-edges of this node. Deletions are illegal.
309 inline DUIterator outs() const;
310 // Use this when the out array might have changed to suppress asserts.
311 inline DUIterator& refresh_out_pos(DUIterator& i) const;
312 // Does the node have an out at this position? (Used for iteration.)
313 inline bool has_out(DUIterator& i) const;
314 inline Node* out(DUIterator& i) const;
315 // Iterate over the out-edges of this node. All changes are illegal.
316 inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
317 inline Node* fast_out(DUIterator_Fast& i) const;
318 // Iterate over the out-edges of this node, deleting one at a time.
319 inline DUIterator_Last last_outs(DUIterator_Last& min) const;
320 inline Node* last_out(DUIterator_Last& i) const;
321 // The inline bodies of all these methods are after the iterator definitions.
322 #else
323 // Iterate over the out-edges of this node. Deletions are illegal.
324 // This iteration uses integral indexes, to decouple from array reallocations.
325 DUIterator outs() const { return 0; }
326 // Use this when the out array might have changed to suppress asserts.
327 DUIterator refresh_out_pos(DUIterator i) const { return i; }
329 // Reference to the i'th output Node. Error if out of bounds.
330 Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
331 // Does the node have an out at this position? (Used for iteration.)
332 bool has_out(DUIterator i) const { return i < _outcnt; }
334 // Iterate over the out-edges of this node. All changes are illegal.
335 // This iteration uses a pointer internal to the out array.
336 DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
337 Node** out = _out;
338 // Assign a limit pointer to the reference argument:
339 max = out + (ptrdiff_t)_outcnt;
340 // Return the base pointer:
341 return out;
342 }
343 Node* fast_out(DUIterator_Fast i) const { return *i; }
344 // Iterate over the out-edges of this node, deleting one at a time.
345 // This iteration uses a pointer internal to the out array.
346 DUIterator_Last last_outs(DUIterator_Last& min) const {
347 Node** out = _out;
348 // Assign a limit pointer to the reference argument:
349 min = out;
350 // Return the pointer to the start of the iteration:
351 return out + (ptrdiff_t)_outcnt - 1;
352 }
353 Node* last_out(DUIterator_Last i) const { return *i; }
354 #endif
356 // Reference to the i'th input Node. Error if out of bounds.
357 Node* in(uint i) const { assert(i < _max, err_msg_res("oob: i=%d, _max=%d", i, _max)); return _in[i]; }
358 // Reference to the i'th output Node. Error if out of bounds.
359 // Use this accessor sparingly. We are going trying to use iterators instead.
360 Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
361 // Return the unique out edge.
362 Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
363 // Delete out edge at position 'i' by moving last out edge to position 'i'
364 void raw_del_out(uint i) {
365 assert(i < _outcnt,"oob");
366 assert(_outcnt > 0,"oob");
367 #if OPTO_DU_ITERATOR_ASSERT
368 // Record that a change happened here.
369 debug_only(_last_del = _out[i]; ++_del_tick);
370 #endif
371 _out[i] = _out[--_outcnt];
372 // Smash the old edge so it can't be used accidentally.
373 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
374 }
376 #ifdef ASSERT
377 bool is_dead() const;
378 #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
379 #endif
380 // Check whether node has become unreachable
381 bool is_unreachable(PhaseIterGVN &igvn) const;
383 // Set a required input edge, also updates corresponding output edge
384 void add_req( Node *n ); // Append a NEW required input
385 void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
386 void del_req( uint idx ); // Delete required edge & compact
387 void ins_req( uint i, Node *n ); // Insert a NEW required input
388 void set_req( uint i, Node *n ) {
389 assert( is_not_dead(n), "can not use dead node");
390 assert( i < _cnt, err_msg_res("oob: i=%d, _cnt=%d", i, _cnt));
391 assert( !VerifyHashTableKeys || _hash_lock == 0,
392 "remove node from hash table before modifying it");
393 Node** p = &_in[i]; // cache this._in, across the del_out call
394 if (*p != NULL) (*p)->del_out((Node *)this);
395 (*p) = n;
396 if (n != NULL) n->add_out((Node *)this);
397 }
398 // Light version of set_req() to init inputs after node creation.
399 void init_req( uint i, Node *n ) {
400 assert( i == 0 && this == n ||
401 is_not_dead(n), "can not use dead node");
402 assert( i < _cnt, "oob");
403 assert( !VerifyHashTableKeys || _hash_lock == 0,
404 "remove node from hash table before modifying it");
405 assert( _in[i] == NULL, "sanity");
406 _in[i] = n;
407 if (n != NULL) n->add_out((Node *)this);
408 }
409 // Find first occurrence of n among my edges:
410 int find_edge(Node* n);
411 int replace_edge(Node* old, Node* neww);
412 int replace_edges_in_range(Node* old, Node* neww, int start, int end);
413 // NULL out all inputs to eliminate incoming Def-Use edges.
414 // Return the number of edges between 'n' and 'this'
415 int disconnect_inputs(Node *n, Compile *c);
417 // Quickly, return true if and only if I am Compile::current()->top().
418 bool is_top() const {
419 assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "");
420 return (_out == NULL);
421 }
422 // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.)
423 void setup_is_top();
425 // Strip away casting. (It is depth-limited.)
426 Node* uncast() const;
427 // Return whether two Nodes are equivalent, after stripping casting.
428 bool eqv_uncast(const Node* n) const {
429 return (this->uncast() == n->uncast());
430 }
432 private:
433 static Node* uncast_helper(const Node* n);
435 // Add an output edge to the end of the list
436 void add_out( Node *n ) {
437 if (is_top()) return;
438 if( _outcnt == _outmax ) out_grow(_outcnt);
439 _out[_outcnt++] = n;
440 }
441 // Delete an output edge
442 void del_out( Node *n ) {
443 if (is_top()) return;
444 Node** outp = &_out[_outcnt];
445 // Find and remove n
446 do {
447 assert(outp > _out, "Missing Def-Use edge");
448 } while (*--outp != n);
449 *outp = _out[--_outcnt];
450 // Smash the old edge so it can't be used accidentally.
451 debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
452 // Record that a change happened here.
453 #if OPTO_DU_ITERATOR_ASSERT
454 debug_only(_last_del = n; ++_del_tick);
455 #endif
456 }
458 public:
459 // Globally replace this node by a given new node, updating all uses.
460 void replace_by(Node* new_node);
461 // Globally replace this node by a given new node, updating all uses
462 // and cutting input edges of old node.
463 void subsume_by(Node* new_node, Compile* c) {
464 replace_by(new_node);
465 disconnect_inputs(NULL, c);
466 }
467 void set_req_X( uint i, Node *n, PhaseIterGVN *igvn );
468 // Find the one non-null required input. RegionNode only
469 Node *nonnull_req() const;
470 // Add or remove precedence edges
471 void add_prec( Node *n );
472 void rm_prec( uint i );
473 void set_prec( uint i, Node *n ) {
474 assert( is_not_dead(n), "can not use dead node");
475 assert( i >= _cnt, "not a precedence edge");
476 if (_in[i] != NULL) _in[i]->del_out((Node *)this);
477 _in[i] = n;
478 if (n != NULL) n->add_out((Node *)this);
479 }
480 // Set this node's index, used by cisc_version to replace current node
481 void set_idx(uint new_idx) {
482 const node_idx_t* ref = &_idx;
483 *(node_idx_t*)ref = new_idx;
484 }
485 // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.)
486 void swap_edges(uint i1, uint i2) {
487 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
488 // Def-Use info is unchanged
489 Node* n1 = in(i1);
490 Node* n2 = in(i2);
491 _in[i1] = n2;
492 _in[i2] = n1;
493 // If this node is in the hash table, make sure it doesn't need a rehash.
494 assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
495 }
497 // Iterators over input Nodes for a Node X are written as:
498 // for( i = 0; i < X.req(); i++ ) ... X[i] ...
499 // NOTE: Required edges can contain embedded NULL pointers.
501 //----------------- Other Node Properties
503 // Generate class id for some ideal nodes to avoid virtual query
504 // methods is_<Node>().
505 // Class id is the set of bits corresponded to the node class and all its
506 // super classes so that queries for super classes are also valid.
507 // Subclasses of the same super class have different assigned bit
508 // (the third parameter in the macro DEFINE_CLASS_ID).
509 // Classes with deeper hierarchy are declared first.
510 // Classes with the same hierarchy depth are sorted by usage frequency.
511 //
512 // The query method masks the bits to cut off bits of subclasses
513 // and then compare the result with the class id
514 // (see the macro DEFINE_CLASS_QUERY below).
515 //
516 // Class_MachCall=30, ClassMask_MachCall=31
517 // 12 8 4 0
518 // 0 0 0 0 0 0 0 0 1 1 1 1 0
519 // | | | |
520 // | | | Bit_Mach=2
521 // | | Bit_MachReturn=4
522 // | Bit_MachSafePoint=8
523 // Bit_MachCall=16
524 //
525 // Class_CountedLoop=56, ClassMask_CountedLoop=63
526 // 12 8 4 0
527 // 0 0 0 0 0 0 0 1 1 1 0 0 0
528 // | | |
529 // | | Bit_Region=8
530 // | Bit_Loop=16
531 // Bit_CountedLoop=32
533 #define DEFINE_CLASS_ID(cl, supcl, subn) \
534 Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
535 Class_##cl = Class_##supcl + Bit_##cl , \
536 ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
538 // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
539 // so that it's values fits into 16 bits.
540 enum NodeClasses {
541 Bit_Node = 0x0000,
542 Class_Node = 0x0000,
543 ClassMask_Node = 0xFFFF,
545 DEFINE_CLASS_ID(Multi, Node, 0)
546 DEFINE_CLASS_ID(SafePoint, Multi, 0)
547 DEFINE_CLASS_ID(Call, SafePoint, 0)
548 DEFINE_CLASS_ID(CallJava, Call, 0)
549 DEFINE_CLASS_ID(CallStaticJava, CallJava, 0)
550 DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1)
551 DEFINE_CLASS_ID(CallRuntime, Call, 1)
552 DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0)
553 DEFINE_CLASS_ID(Allocate, Call, 2)
554 DEFINE_CLASS_ID(AllocateArray, Allocate, 0)
555 DEFINE_CLASS_ID(AbstractLock, Call, 3)
556 DEFINE_CLASS_ID(Lock, AbstractLock, 0)
557 DEFINE_CLASS_ID(Unlock, AbstractLock, 1)
558 DEFINE_CLASS_ID(MultiBranch, Multi, 1)
559 DEFINE_CLASS_ID(PCTable, MultiBranch, 0)
560 DEFINE_CLASS_ID(Catch, PCTable, 0)
561 DEFINE_CLASS_ID(Jump, PCTable, 1)
562 DEFINE_CLASS_ID(If, MultiBranch, 1)
563 DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
564 DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
565 DEFINE_CLASS_ID(Start, Multi, 2)
566 DEFINE_CLASS_ID(MemBar, Multi, 3)
567 DEFINE_CLASS_ID(Initialize, MemBar, 0)
568 DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
570 DEFINE_CLASS_ID(Mach, Node, 1)
571 DEFINE_CLASS_ID(MachReturn, Mach, 0)
572 DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
573 DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
574 DEFINE_CLASS_ID(MachCallJava, MachCall, 0)
575 DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0)
576 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
577 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
578 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
579 DEFINE_CLASS_ID(MachBranch, Mach, 1)
580 DEFINE_CLASS_ID(MachIf, MachBranch, 0)
581 DEFINE_CLASS_ID(MachGoto, MachBranch, 1)
582 DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2)
583 DEFINE_CLASS_ID(MachSpillCopy, Mach, 2)
584 DEFINE_CLASS_ID(MachTemp, Mach, 3)
585 DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
586 DEFINE_CLASS_ID(MachConstant, Mach, 5)
588 DEFINE_CLASS_ID(Type, Node, 2)
589 DEFINE_CLASS_ID(Phi, Type, 0)
590 DEFINE_CLASS_ID(ConstraintCast, Type, 1)
591 DEFINE_CLASS_ID(CheckCastPP, Type, 2)
592 DEFINE_CLASS_ID(CMove, Type, 3)
593 DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
594 DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
595 DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
596 DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
597 DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
598 DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
599 DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
601 DEFINE_CLASS_ID(Proj, Node, 3)
602 DEFINE_CLASS_ID(CatchProj, Proj, 0)
603 DEFINE_CLASS_ID(JumpProj, Proj, 1)
604 DEFINE_CLASS_ID(IfTrue, Proj, 2)
605 DEFINE_CLASS_ID(IfFalse, Proj, 3)
606 DEFINE_CLASS_ID(Parm, Proj, 4)
607 DEFINE_CLASS_ID(MachProj, Proj, 5)
609 DEFINE_CLASS_ID(Mem, Node, 4)
610 DEFINE_CLASS_ID(Load, Mem, 0)
611 DEFINE_CLASS_ID(LoadVector, Load, 0)
612 DEFINE_CLASS_ID(Store, Mem, 1)
613 DEFINE_CLASS_ID(StoreVector, Store, 0)
614 DEFINE_CLASS_ID(LoadStore, Mem, 2)
616 DEFINE_CLASS_ID(Region, Node, 5)
617 DEFINE_CLASS_ID(Loop, Region, 0)
618 DEFINE_CLASS_ID(Root, Loop, 0)
619 DEFINE_CLASS_ID(CountedLoop, Loop, 1)
621 DEFINE_CLASS_ID(Sub, Node, 6)
622 DEFINE_CLASS_ID(Cmp, Sub, 0)
623 DEFINE_CLASS_ID(FastLock, Cmp, 0)
624 DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
626 DEFINE_CLASS_ID(MergeMem, Node, 7)
627 DEFINE_CLASS_ID(Bool, Node, 8)
628 DEFINE_CLASS_ID(AddP, Node, 9)
629 DEFINE_CLASS_ID(BoxLock, Node, 10)
630 DEFINE_CLASS_ID(Add, Node, 11)
631 DEFINE_CLASS_ID(Mul, Node, 12)
632 DEFINE_CLASS_ID(Vector, Node, 13)
633 DEFINE_CLASS_ID(ClearArray, Node, 14)
635 _max_classes = ClassMask_ClearArray
636 };
637 #undef DEFINE_CLASS_ID
639 // Flags are sorted by usage frequency.
640 enum NodeFlags {
641 Flag_is_Copy = 0x01, // should be first bit to avoid shift
642 Flag_rematerialize = Flag_is_Copy << 1,
643 Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
644 Flag_is_macro = Flag_needs_anti_dependence_check << 1,
645 Flag_is_Con = Flag_is_macro << 1,
646 Flag_is_cisc_alternate = Flag_is_Con << 1,
647 Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1,
648 Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
649 Flag_avoid_back_to_back = Flag_may_be_short_branch << 1,
650 Flag_has_call = Flag_avoid_back_to_back << 1,
651 Flag_is_expensive = Flag_has_call << 1,
652 _max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
653 };
655 private:
656 jushort _class_id;
657 jushort _flags;
659 protected:
660 // These methods should be called from constructors only.
661 void init_class_id(jushort c) {
662 assert(c <= _max_classes, "invalid node class");
663 _class_id = c; // cast out const
664 }
665 void init_flags(jushort fl) {
666 assert(fl <= _max_flags, "invalid node flag");
667 _flags |= fl;
668 }
669 void clear_flag(jushort fl) {
670 assert(fl <= _max_flags, "invalid node flag");
671 _flags &= ~fl;
672 }
674 public:
675 const jushort class_id() const { return _class_id; }
677 const jushort flags() const { return _flags; }
679 // Return a dense integer opcode number
680 virtual int Opcode() const;
682 // Virtual inherited Node size
683 virtual uint size_of() const;
685 // Other interesting Node properties
686 #define DEFINE_CLASS_QUERY(type) \
687 bool is_##type() const { \
688 return ((_class_id & ClassMask_##type) == Class_##type); \
689 } \
690 type##Node *as_##type() const { \
691 assert(is_##type(), "invalid node class"); \
692 return (type##Node*)this; \
693 } \
694 type##Node* isa_##type() const { \
695 return (is_##type()) ? as_##type() : NULL; \
696 }
698 DEFINE_CLASS_QUERY(AbstractLock)
699 DEFINE_CLASS_QUERY(Add)
700 DEFINE_CLASS_QUERY(AddP)
701 DEFINE_CLASS_QUERY(Allocate)
702 DEFINE_CLASS_QUERY(AllocateArray)
703 DEFINE_CLASS_QUERY(Bool)
704 DEFINE_CLASS_QUERY(BoxLock)
705 DEFINE_CLASS_QUERY(Call)
706 DEFINE_CLASS_QUERY(CallDynamicJava)
707 DEFINE_CLASS_QUERY(CallJava)
708 DEFINE_CLASS_QUERY(CallLeaf)
709 DEFINE_CLASS_QUERY(CallRuntime)
710 DEFINE_CLASS_QUERY(CallStaticJava)
711 DEFINE_CLASS_QUERY(Catch)
712 DEFINE_CLASS_QUERY(CatchProj)
713 DEFINE_CLASS_QUERY(CheckCastPP)
714 DEFINE_CLASS_QUERY(ConstraintCast)
715 DEFINE_CLASS_QUERY(ClearArray)
716 DEFINE_CLASS_QUERY(CMove)
717 DEFINE_CLASS_QUERY(Cmp)
718 DEFINE_CLASS_QUERY(CountedLoop)
719 DEFINE_CLASS_QUERY(CountedLoopEnd)
720 DEFINE_CLASS_QUERY(DecodeNarrowPtr)
721 DEFINE_CLASS_QUERY(DecodeN)
722 DEFINE_CLASS_QUERY(DecodeNKlass)
723 DEFINE_CLASS_QUERY(EncodeNarrowPtr)
724 DEFINE_CLASS_QUERY(EncodeP)
725 DEFINE_CLASS_QUERY(EncodePKlass)
726 DEFINE_CLASS_QUERY(FastLock)
727 DEFINE_CLASS_QUERY(FastUnlock)
728 DEFINE_CLASS_QUERY(If)
729 DEFINE_CLASS_QUERY(IfFalse)
730 DEFINE_CLASS_QUERY(IfTrue)
731 DEFINE_CLASS_QUERY(Initialize)
732 DEFINE_CLASS_QUERY(Jump)
733 DEFINE_CLASS_QUERY(JumpProj)
734 DEFINE_CLASS_QUERY(Load)
735 DEFINE_CLASS_QUERY(LoadStore)
736 DEFINE_CLASS_QUERY(Lock)
737 DEFINE_CLASS_QUERY(Loop)
738 DEFINE_CLASS_QUERY(Mach)
739 DEFINE_CLASS_QUERY(MachBranch)
740 DEFINE_CLASS_QUERY(MachCall)
741 DEFINE_CLASS_QUERY(MachCallDynamicJava)
742 DEFINE_CLASS_QUERY(MachCallJava)
743 DEFINE_CLASS_QUERY(MachCallLeaf)
744 DEFINE_CLASS_QUERY(MachCallRuntime)
745 DEFINE_CLASS_QUERY(MachCallStaticJava)
746 DEFINE_CLASS_QUERY(MachConstantBase)
747 DEFINE_CLASS_QUERY(MachConstant)
748 DEFINE_CLASS_QUERY(MachGoto)
749 DEFINE_CLASS_QUERY(MachIf)
750 DEFINE_CLASS_QUERY(MachNullCheck)
751 DEFINE_CLASS_QUERY(MachProj)
752 DEFINE_CLASS_QUERY(MachReturn)
753 DEFINE_CLASS_QUERY(MachSafePoint)
754 DEFINE_CLASS_QUERY(MachSpillCopy)
755 DEFINE_CLASS_QUERY(MachTemp)
756 DEFINE_CLASS_QUERY(Mem)
757 DEFINE_CLASS_QUERY(MemBar)
758 DEFINE_CLASS_QUERY(MemBarStoreStore)
759 DEFINE_CLASS_QUERY(MergeMem)
760 DEFINE_CLASS_QUERY(Mul)
761 DEFINE_CLASS_QUERY(Multi)
762 DEFINE_CLASS_QUERY(MultiBranch)
763 DEFINE_CLASS_QUERY(Parm)
764 DEFINE_CLASS_QUERY(PCTable)
765 DEFINE_CLASS_QUERY(Phi)
766 DEFINE_CLASS_QUERY(Proj)
767 DEFINE_CLASS_QUERY(Region)
768 DEFINE_CLASS_QUERY(Root)
769 DEFINE_CLASS_QUERY(SafePoint)
770 DEFINE_CLASS_QUERY(SafePointScalarObject)
771 DEFINE_CLASS_QUERY(Start)
772 DEFINE_CLASS_QUERY(Store)
773 DEFINE_CLASS_QUERY(Sub)
774 DEFINE_CLASS_QUERY(Type)
775 DEFINE_CLASS_QUERY(Vector)
776 DEFINE_CLASS_QUERY(LoadVector)
777 DEFINE_CLASS_QUERY(StoreVector)
778 DEFINE_CLASS_QUERY(Unlock)
780 #undef DEFINE_CLASS_QUERY
782 // duplicate of is_MachSpillCopy()
783 bool is_SpillCopy () const {
784 return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
785 }
787 bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
788 // The data node which is safe to leave in dead loop during IGVN optimization.
789 bool is_dead_loop_safe() const {
790 return is_Phi() || (is_Proj() && in(0) == NULL) ||
791 ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 &&
792 (!is_Proj() || !in(0)->is_Allocate()));
793 }
795 // is_Copy() returns copied edge index (0 or 1)
796 uint is_Copy() const { return (_flags & Flag_is_Copy); }
798 virtual bool is_CFG() const { return false; }
800 // If this node is control-dependent on a test, can it be
801 // rerouted to a dominating equivalent test? This is usually
802 // true of non-CFG nodes, but can be false for operations which
803 // depend for their correct sequencing on more than one test.
804 // (In that case, hoisting to a dominating test may silently
805 // skip some other important test.)
806 virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
808 // When building basic blocks, I need to have a notion of block beginning
809 // Nodes, next block selector Nodes (block enders), and next block
810 // projections. These calls need to work on their machine equivalents. The
811 // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
812 bool is_block_start() const {
813 if ( is_Region() )
814 return this == (const Node*)in(0);
815 else
816 return is_Start();
817 }
819 // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
820 // Goto and Return. This call also returns the block ending Node.
821 virtual const Node *is_block_proj() const;
823 // The node is a "macro" node which needs to be expanded before matching
824 bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
825 // The node is expensive: the best control is set during loop opts
826 bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; }
828 //----------------- Optimization
830 // Get the worst-case Type output for this Node.
831 virtual const class Type *bottom_type() const;
833 // If we find a better type for a node, try to record it permanently.
834 // Return true if this node actually changed.
835 // Be sure to do the hash_delete game in the "rehash" variant.
836 void raise_bottom_type(const Type* new_type);
838 // Get the address type with which this node uses and/or defs memory,
839 // or NULL if none. The address type is conservatively wide.
840 // Returns non-null for calls, membars, loads, stores, etc.
841 // Returns TypePtr::BOTTOM if the node touches memory "broadly".
842 virtual const class TypePtr *adr_type() const { return NULL; }
844 // Return an existing node which computes the same function as this node.
845 // The optimistic combined algorithm requires this to return a Node which
846 // is a small number of steps away (e.g., one of my inputs).
847 virtual Node *Identity( PhaseTransform *phase );
849 // Return the set of values this Node can take on at runtime.
850 virtual const Type *Value( PhaseTransform *phase ) const;
852 // Return a node which is more "ideal" than the current node.
853 // The invariants on this call are subtle. If in doubt, read the
854 // treatise in node.cpp above the default implemention AND TEST WITH
855 // +VerifyIterativeGVN!
856 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
858 // Some nodes have specific Ideal subgraph transformations only if they are
859 // unique users of specific nodes. Such nodes should be put on IGVN worklist
860 // for the transformations to happen.
861 bool has_special_unique_user() const;
863 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
864 Node* find_exact_control(Node* ctrl);
866 // Check if 'this' node dominates or equal to 'sub'.
867 bool dominates(Node* sub, Node_List &nlist);
869 protected:
870 bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
871 public:
873 // Idealize graph, using DU info. Done after constant propagation
874 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
876 // See if there is valid pipeline info
877 static const Pipeline *pipeline_class();
878 virtual const Pipeline *pipeline() const;
880 // Compute the latency from the def to this instruction of the ith input node
881 uint latency(uint i);
883 // Hash & compare functions, for pessimistic value numbering
885 // If the hash function returns the special sentinel value NO_HASH,
886 // the node is guaranteed never to compare equal to any other node.
887 // If we accidentally generate a hash with value NO_HASH the node
888 // won't go into the table and we'll lose a little optimization.
889 enum { NO_HASH = 0 };
890 virtual uint hash() const;
891 virtual uint cmp( const Node &n ) const;
893 // Operation appears to be iteratively computed (such as an induction variable)
894 // It is possible for this operation to return false for a loop-varying
895 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
896 bool is_iteratively_computed();
898 // Determine if a node is Counted loop induction variable.
899 // The method is defined in loopnode.cpp.
900 const Node* is_loop_iv() const;
902 // Return a node with opcode "opc" and same inputs as "this" if one can
903 // be found; Otherwise return NULL;
904 Node* find_similar(int opc);
906 // Return the unique control out if only one. Null if none or more than one.
907 Node* unique_ctrl_out();
909 //----------------- Code Generation
911 // Ideal register class for Matching. Zero means unmatched instruction
912 // (these are cloned instead of converted to machine nodes).
913 virtual uint ideal_reg() const;
915 static const uint NotAMachineReg; // must be > max. machine register
917 // Do we Match on this edge index or not? Generally false for Control
918 // and true for everything else. Weird for calls & returns.
919 virtual uint match_edge(uint idx) const;
921 // Register class output is returned in
922 virtual const RegMask &out_RegMask() const;
923 // Register class input is expected in
924 virtual const RegMask &in_RegMask(uint) const;
925 // Should we clone rather than spill this instruction?
926 bool rematerialize() const;
928 // Return JVM State Object if this Node carries debug info, or NULL otherwise
929 virtual JVMState* jvms() const;
931 // Print as assembly
932 virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
933 // Emit bytes starting at parameter 'ptr'
934 // Bump 'ptr' by the number of output bytes
935 virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
936 // Size of instruction in bytes
937 virtual uint size(PhaseRegAlloc *ra_) const;
939 // Convenience function to extract an integer constant from a node.
940 // If it is not an integer constant (either Con, CastII, or Mach),
941 // return value_if_unknown.
942 jint find_int_con(jint value_if_unknown) const {
943 const TypeInt* t = find_int_type();
944 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
945 }
946 // Return the constant, knowing it is an integer constant already
947 jint get_int() const {
948 const TypeInt* t = find_int_type();
949 guarantee(t != NULL, "must be con");
950 return t->get_con();
951 }
952 // Here's where the work is done. Can produce non-constant int types too.
953 const TypeInt* find_int_type() const;
955 // Same thing for long (and intptr_t, via type.hpp):
956 jlong get_long() const {
957 const TypeLong* t = find_long_type();
958 guarantee(t != NULL, "must be con");
959 return t->get_con();
960 }
961 jlong find_long_con(jint value_if_unknown) const {
962 const TypeLong* t = find_long_type();
963 return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
964 }
965 const TypeLong* find_long_type() const;
967 const TypePtr* get_ptr_type() const;
969 // These guys are called by code generated by ADLC:
970 intptr_t get_ptr() const;
971 intptr_t get_narrowcon() const;
972 jdouble getd() const;
973 jfloat getf() const;
975 // Nodes which are pinned into basic blocks
976 virtual bool pinned() const { return false; }
978 // Nodes which use memory without consuming it, hence need antidependences
979 // More specifically, needs_anti_dependence_check returns true iff the node
980 // (a) does a load, and (b) does not perform a store (except perhaps to a
981 // stack slot or some other unaliased location).
982 bool needs_anti_dependence_check() const;
984 // Return which operand this instruction may cisc-spill. In other words,
985 // return operand position that can convert from reg to memory access
986 virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
987 bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
989 //----------------- Graph walking
990 public:
991 // Walk and apply member functions recursively.
992 // Supplied (this) pointer is root.
993 void walk(NFunc pre, NFunc post, void *env);
994 static void nop(Node &, void*); // Dummy empty function
995 static void packregion( Node &n, void* );
996 private:
997 void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited);
999 //----------------- Printing, etc
1000 public:
1001 #ifndef PRODUCT
1002 Node* find(int idx) const; // Search the graph for the given idx.
1003 Node* find_ctrl(int idx) const; // Search control ancestors for the given idx.
1004 void dump() const { dump("\n"); } // Print this node.
1005 void dump(const char* suffix, outputStream *st = tty) const;// Print this node.
1006 void dump(int depth) const; // Print this node, recursively to depth d
1007 void dump_ctrl(int depth) const; // Print control nodes, to depth d
1008 virtual void dump_req(outputStream *st = tty) const; // Print required-edge info
1009 virtual void dump_prec(outputStream *st = tty) const; // Print precedence-edge info
1010 virtual void dump_out(outputStream *st = tty) const; // Print the output edge info
1011 virtual void dump_spec(outputStream *st) const {}; // Print per-node info
1012 void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
1013 void verify() const; // Check Def-Use info for my subgraph
1014 static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space);
1016 // This call defines a class-unique string used to identify class instances
1017 virtual const char *Name() const;
1019 void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
1020 // RegMask Print Functions
1021 void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
1022 void dump_out_regmask() { out_RegMask().dump(); }
1023 static int _in_dump_cnt;
1024 static bool in_dump() { return _in_dump_cnt > 0; }
1025 void fast_dump() const {
1026 tty->print("%4d: %-17s", _idx, Name());
1027 for (uint i = 0; i < len(); i++)
1028 if (in(i))
1029 tty->print(" %4d", in(i)->_idx);
1030 else
1031 tty->print(" NULL");
1032 tty->print("\n");
1033 }
1034 #endif
1035 #ifdef ASSERT
1036 void verify_construction();
1037 bool verify_jvms(const JVMState* jvms) const;
1038 int _debug_idx; // Unique value assigned to every node.
1039 int debug_idx() const { return _debug_idx; }
1040 void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
1042 Node* _debug_orig; // Original version of this, if any.
1043 Node* debug_orig() const { return _debug_orig; }
1044 void set_debug_orig(Node* orig); // _debug_orig = orig
1046 int _hash_lock; // Barrier to modifications of nodes in the hash table
1047 void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
1048 void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
1050 static void init_NodeProperty();
1052 #if OPTO_DU_ITERATOR_ASSERT
1053 const Node* _last_del; // The last deleted node.
1054 uint _del_tick; // Bumped when a deletion happens..
1055 #endif
1056 #endif
1057 };
1059 //-----------------------------------------------------------------------------
1060 // Iterators over DU info, and associated Node functions.
1062 #if OPTO_DU_ITERATOR_ASSERT
1064 // Common code for assertion checking on DU iterators.
1065 class DUIterator_Common VALUE_OBJ_CLASS_SPEC {
1066 #ifdef ASSERT
1067 protected:
1068 bool _vdui; // cached value of VerifyDUIterators
1069 const Node* _node; // the node containing the _out array
1070 uint _outcnt; // cached node->_outcnt
1071 uint _del_tick; // cached node->_del_tick
1072 Node* _last; // last value produced by the iterator
1074 void sample(const Node* node); // used by c'tor to set up for verifies
1075 void verify(const Node* node, bool at_end_ok = false);
1076 void verify_resync();
1077 void reset(const DUIterator_Common& that);
1079 // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
1080 #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
1081 #else
1082 #define I_VDUI_ONLY(i,x) { }
1083 #endif //ASSERT
1084 };
1086 #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x)
1088 // Default DU iterator. Allows appends onto the out array.
1089 // Allows deletion from the out array only at the current point.
1090 // Usage:
1091 // for (DUIterator i = x->outs(); x->has_out(i); i++) {
1092 // Node* y = x->out(i);
1093 // ...
1094 // }
1095 // Compiles in product mode to a unsigned integer index, which indexes
1096 // onto a repeatedly reloaded base pointer of x->_out. The loop predicate
1097 // also reloads x->_outcnt. If you delete, you must perform "--i" just
1098 // before continuing the loop. You must delete only the last-produced
1099 // edge. You must delete only a single copy of the last-produced edge,
1100 // or else you must delete all copies at once (the first time the edge
1101 // is produced by the iterator).
1102 class DUIterator : public DUIterator_Common {
1103 friend class Node;
1105 // This is the index which provides the product-mode behavior.
1106 // Whatever the product-mode version of the system does to the
1107 // DUI index is done to this index. All other fields in
1108 // this class are used only for assertion checking.
1109 uint _idx;
1111 #ifdef ASSERT
1112 uint _refresh_tick; // Records the refresh activity.
1114 void sample(const Node* node); // Initialize _refresh_tick etc.
1115 void verify(const Node* node, bool at_end_ok = false);
1116 void verify_increment(); // Verify an increment operation.
1117 void verify_resync(); // Verify that we can back up over a deletion.
1118 void verify_finish(); // Verify that the loop terminated properly.
1119 void refresh(); // Resample verification info.
1120 void reset(const DUIterator& that); // Resample after assignment.
1121 #endif
1123 DUIterator(const Node* node, int dummy_to_avoid_conversion)
1124 { _idx = 0; debug_only(sample(node)); }
1126 public:
1127 // initialize to garbage; clear _vdui to disable asserts
1128 DUIterator()
1129 { /*initialize to garbage*/ debug_only(_vdui = false); }
1131 void operator++(int dummy_to_specify_postfix_op)
1132 { _idx++; VDUI_ONLY(verify_increment()); }
1134 void operator--()
1135 { VDUI_ONLY(verify_resync()); --_idx; }
1137 ~DUIterator()
1138 { VDUI_ONLY(verify_finish()); }
1140 void operator=(const DUIterator& that)
1141 { _idx = that._idx; debug_only(reset(that)); }
1142 };
1144 DUIterator Node::outs() const
1145 { return DUIterator(this, 0); }
1146 DUIterator& Node::refresh_out_pos(DUIterator& i) const
1147 { I_VDUI_ONLY(i, i.refresh()); return i; }
1148 bool Node::has_out(DUIterator& i) const
1149 { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
1150 Node* Node::out(DUIterator& i) const
1151 { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; }
1154 // Faster DU iterator. Disallows insertions into the out array.
1155 // Allows deletion from the out array only at the current point.
1156 // Usage:
1157 // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
1158 // Node* y = x->fast_out(i);
1159 // ...
1160 // }
1161 // Compiles in product mode to raw Node** pointer arithmetic, with
1162 // no reloading of pointers from the original node x. If you delete,
1163 // you must perform "--i; --imax" just before continuing the loop.
1164 // If you delete multiple copies of the same edge, you must decrement
1165 // imax, but not i, multiple times: "--i, imax -= num_edges".
1166 class DUIterator_Fast : public DUIterator_Common {
1167 friend class Node;
1168 friend class DUIterator_Last;
1170 // This is the pointer which provides the product-mode behavior.
1171 // Whatever the product-mode version of the system does to the
1172 // DUI pointer is done to this pointer. All other fields in
1173 // this class are used only for assertion checking.
1174 Node** _outp;
1176 #ifdef ASSERT
1177 void verify(const Node* node, bool at_end_ok = false);
1178 void verify_limit();
1179 void verify_resync();
1180 void verify_relimit(uint n);
1181 void reset(const DUIterator_Fast& that);
1182 #endif
1184 // Note: offset must be signed, since -1 is sometimes passed
1185 DUIterator_Fast(const Node* node, ptrdiff_t offset)
1186 { _outp = node->_out + offset; debug_only(sample(node)); }
1188 public:
1189 // initialize to garbage; clear _vdui to disable asserts
1190 DUIterator_Fast()
1191 { /*initialize to garbage*/ debug_only(_vdui = false); }
1193 void operator++(int dummy_to_specify_postfix_op)
1194 { _outp++; VDUI_ONLY(verify(_node, true)); }
1196 void operator--()
1197 { VDUI_ONLY(verify_resync()); --_outp; }
1199 void operator-=(uint n) // applied to the limit only
1200 { _outp -= n; VDUI_ONLY(verify_relimit(n)); }
1202 bool operator<(DUIterator_Fast& limit) {
1203 I_VDUI_ONLY(*this, this->verify(_node, true));
1204 I_VDUI_ONLY(limit, limit.verify_limit());
1205 return _outp < limit._outp;
1206 }
1208 void operator=(const DUIterator_Fast& that)
1209 { _outp = that._outp; debug_only(reset(that)); }
1210 };
1212 DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
1213 // Assign a limit pointer to the reference argument:
1214 imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
1215 // Return the base pointer:
1216 return DUIterator_Fast(this, 0);
1217 }
1218 Node* Node::fast_out(DUIterator_Fast& i) const {
1219 I_VDUI_ONLY(i, i.verify(this));
1220 return debug_only(i._last=) *i._outp;
1221 }
1224 // Faster DU iterator. Requires each successive edge to be removed.
1225 // Does not allow insertion of any edges.
1226 // Usage:
1227 // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
1228 // Node* y = x->last_out(i);
1229 // ...
1230 // }
1231 // Compiles in product mode to raw Node** pointer arithmetic, with
1232 // no reloading of pointers from the original node x.
1233 class DUIterator_Last : private DUIterator_Fast {
1234 friend class Node;
1236 #ifdef ASSERT
1237 void verify(const Node* node, bool at_end_ok = false);
1238 void verify_limit();
1239 void verify_step(uint num_edges);
1240 #endif
1242 // Note: offset must be signed, since -1 is sometimes passed
1243 DUIterator_Last(const Node* node, ptrdiff_t offset)
1244 : DUIterator_Fast(node, offset) { }
1246 void operator++(int dummy_to_specify_postfix_op) {} // do not use
1247 void operator<(int) {} // do not use
1249 public:
1250 DUIterator_Last() { }
1251 // initialize to garbage
1253 void operator--()
1254 { _outp--; VDUI_ONLY(verify_step(1)); }
1256 void operator-=(uint n)
1257 { _outp -= n; VDUI_ONLY(verify_step(n)); }
1259 bool operator>=(DUIterator_Last& limit) {
1260 I_VDUI_ONLY(*this, this->verify(_node, true));
1261 I_VDUI_ONLY(limit, limit.verify_limit());
1262 return _outp >= limit._outp;
1263 }
1265 void operator=(const DUIterator_Last& that)
1266 { DUIterator_Fast::operator=(that); }
1267 };
1269 DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
1270 // Assign a limit pointer to the reference argument:
1271 imin = DUIterator_Last(this, 0);
1272 // Return the initial pointer:
1273 return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
1274 }
1275 Node* Node::last_out(DUIterator_Last& i) const {
1276 I_VDUI_ONLY(i, i.verify(this));
1277 return debug_only(i._last=) *i._outp;
1278 }
1280 #endif //OPTO_DU_ITERATOR_ASSERT
1282 #undef I_VDUI_ONLY
1283 #undef VDUI_ONLY
1285 // An Iterator that truly follows the iterator pattern. Doesn't
1286 // support deletion but could be made to.
1287 //
1288 // for (SimpleDUIterator i(n); i.has_next(); i.next()) {
1289 // Node* m = i.get();
1290 //
1291 class SimpleDUIterator : public StackObj {
1292 private:
1293 Node* node;
1294 DUIterator_Fast i;
1295 DUIterator_Fast imax;
1296 public:
1297 SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
1298 bool has_next() { return i < imax; }
1299 void next() { i++; }
1300 Node* get() { return node->fast_out(i); }
1301 };
1304 //-----------------------------------------------------------------------------
1305 // Map dense integer indices to Nodes. Uses classic doubling-array trick.
1306 // Abstractly provides an infinite array of Node*'s, initialized to NULL.
1307 // Note that the constructor just zeros things, and since I use Arena
1308 // allocation I do not need a destructor to reclaim storage.
1309 class Node_Array : public ResourceObj {
1310 friend class VMStructs;
1311 protected:
1312 Arena *_a; // Arena to allocate in
1313 uint _max;
1314 Node **_nodes;
1315 void grow( uint i ); // Grow array node to fit
1316 public:
1317 Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) {
1318 _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize );
1319 for( int i = 0; i < OptoNodeListSize; i++ ) {
1320 _nodes[i] = NULL;
1321 }
1322 }
1324 Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
1325 Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
1326 { return (i<_max) ? _nodes[i] : (Node*)NULL; }
1327 Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; }
1328 Node **adr() { return _nodes; }
1329 // Extend the mapping: index i maps to Node *n.
1330 void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
1331 void insert( uint i, Node *n );
1332 void remove( uint i ); // Remove, preserving order
1333 void sort( C_sort_func_t func);
1334 void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage
1335 void clear(); // Set all entries to NULL, keep storage
1336 uint Size() const { return _max; }
1337 void dump() const;
1338 };
1340 class Node_List : public Node_Array {
1341 friend class VMStructs;
1342 uint _cnt;
1343 public:
1344 Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
1345 Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
1346 bool contains(Node* n) {
1347 for (uint e = 0; e < size(); e++) {
1348 if (at(e) == n) return true;
1349 }
1350 return false;
1351 }
1352 void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
1353 void remove( uint i ) { Node_Array::remove(i); _cnt--; }
1354 void push( Node *b ) { map(_cnt++,b); }
1355 void yank( Node *n ); // Find and remove
1356 Node *pop() { return _nodes[--_cnt]; }
1357 Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;}
1358 void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
1359 uint size() const { return _cnt; }
1360 void dump() const;
1361 };
1363 //------------------------------Unique_Node_List-------------------------------
1364 class Unique_Node_List : public Node_List {
1365 friend class VMStructs;
1366 VectorSet _in_worklist;
1367 uint _clock_index; // Index in list where to pop from next
1368 public:
1369 Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {}
1370 Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
1372 void remove( Node *n );
1373 bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
1374 VectorSet &member_set(){ return _in_worklist; }
1376 void push( Node *b ) {
1377 if( !_in_worklist.test_set(b->_idx) )
1378 Node_List::push(b);
1379 }
1380 Node *pop() {
1381 if( _clock_index >= size() ) _clock_index = 0;
1382 Node *b = at(_clock_index);
1383 map( _clock_index, Node_List::pop());
1384 if (size() != 0) _clock_index++; // Always start from 0
1385 _in_worklist >>= b->_idx;
1386 return b;
1387 }
1388 Node *remove( uint i ) {
1389 Node *b = Node_List::at(i);
1390 _in_worklist >>= b->_idx;
1391 map(i,Node_List::pop());
1392 return b;
1393 }
1394 void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); }
1395 void clear() {
1396 _in_worklist.Clear(); // Discards storage but grows automatically
1397 Node_List::clear();
1398 _clock_index = 0;
1399 }
1401 // Used after parsing to remove useless nodes before Iterative GVN
1402 void remove_useless_nodes(VectorSet &useful);
1404 #ifndef PRODUCT
1405 void print_set() const { _in_worklist.print(); }
1406 #endif
1407 };
1409 // Inline definition of Compile::record_for_igvn must be deferred to this point.
1410 inline void Compile::record_for_igvn(Node* n) {
1411 _for_igvn->push(n);
1412 }
1414 //------------------------------Node_Stack-------------------------------------
1415 class Node_Stack {
1416 friend class VMStructs;
1417 protected:
1418 struct INode {
1419 Node *node; // Processed node
1420 uint indx; // Index of next node's child
1421 };
1422 INode *_inode_top; // tos, stack grows up
1423 INode *_inode_max; // End of _inodes == _inodes + _max
1424 INode *_inodes; // Array storage for the stack
1425 Arena *_a; // Arena to allocate in
1426 void grow();
1427 public:
1428 Node_Stack(int size) {
1429 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1430 _a = Thread::current()->resource_area();
1431 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1432 _inode_max = _inodes + max;
1433 _inode_top = _inodes - 1; // stack is empty
1434 }
1436 Node_Stack(Arena *a, int size) : _a(a) {
1437 size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
1438 _inodes = NEW_ARENA_ARRAY( _a, INode, max );
1439 _inode_max = _inodes + max;
1440 _inode_top = _inodes - 1; // stack is empty
1441 }
1443 void pop() {
1444 assert(_inode_top >= _inodes, "node stack underflow");
1445 --_inode_top;
1446 }
1447 void push(Node *n, uint i) {
1448 ++_inode_top;
1449 if (_inode_top >= _inode_max) grow();
1450 INode *top = _inode_top; // optimization
1451 top->node = n;
1452 top->indx = i;
1453 }
1454 Node *node() const {
1455 return _inode_top->node;
1456 }
1457 Node* node_at(uint i) const {
1458 assert(_inodes + i <= _inode_top, "in range");
1459 return _inodes[i].node;
1460 }
1461 uint index() const {
1462 return _inode_top->indx;
1463 }
1464 uint index_at(uint i) const {
1465 assert(_inodes + i <= _inode_top, "in range");
1466 return _inodes[i].indx;
1467 }
1468 void set_node(Node *n) {
1469 _inode_top->node = n;
1470 }
1471 void set_index(uint i) {
1472 _inode_top->indx = i;
1473 }
1474 uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
1475 uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
1476 bool is_nonempty() const { return (_inode_top >= _inodes); }
1477 bool is_empty() const { return (_inode_top < _inodes); }
1478 void clear() { _inode_top = _inodes - 1; } // retain storage
1480 // Node_Stack is used to map nodes.
1481 Node* find(uint idx) const;
1482 };
1485 //-----------------------------Node_Notes--------------------------------------
1486 // Debugging or profiling annotations loosely and sparsely associated
1487 // with some nodes. See Compile::node_notes_at for the accessor.
1488 class Node_Notes VALUE_OBJ_CLASS_SPEC {
1489 friend class VMStructs;
1490 JVMState* _jvms;
1492 public:
1493 Node_Notes(JVMState* jvms = NULL) {
1494 _jvms = jvms;
1495 }
1497 JVMState* jvms() { return _jvms; }
1498 void set_jvms(JVMState* x) { _jvms = x; }
1500 // True if there is nothing here.
1501 bool is_clear() {
1502 return (_jvms == NULL);
1503 }
1505 // Make there be nothing here.
1506 void clear() {
1507 _jvms = NULL;
1508 }
1510 // Make a new, clean node notes.
1511 static Node_Notes* make(Compile* C) {
1512 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1513 nn->clear();
1514 return nn;
1515 }
1517 Node_Notes* clone(Compile* C) {
1518 Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
1519 (*nn) = (*this);
1520 return nn;
1521 }
1523 // Absorb any information from source.
1524 bool update_from(Node_Notes* source) {
1525 bool changed = false;
1526 if (source != NULL) {
1527 if (source->jvms() != NULL) {
1528 set_jvms(source->jvms());
1529 changed = true;
1530 }
1531 }
1532 return changed;
1533 }
1534 };
1536 // Inlined accessors for Compile::node_nodes that require the preceding class:
1537 inline Node_Notes*
1538 Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
1539 int idx, bool can_grow) {
1540 assert(idx >= 0, "oob");
1541 int block_idx = (idx >> _log2_node_notes_block_size);
1542 int grow_by = (block_idx - (arr == NULL? 0: arr->length()));
1543 if (grow_by >= 0) {
1544 if (!can_grow) return NULL;
1545 grow_node_notes(arr, grow_by + 1);
1546 }
1547 // (Every element of arr is a sub-array of length _node_notes_block_size.)
1548 return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
1549 }
1551 inline bool
1552 Compile::set_node_notes_at(int idx, Node_Notes* value) {
1553 if (value == NULL || value->is_clear())
1554 return false; // nothing to write => write nothing
1555 Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
1556 assert(loc != NULL, "");
1557 return loc->update_from(value);
1558 }
1561 //------------------------------TypeNode---------------------------------------
1562 // Node with a Type constant.
1563 class TypeNode : public Node {
1564 protected:
1565 virtual uint hash() const; // Check the type
1566 virtual uint cmp( const Node &n ) const;
1567 virtual uint size_of() const; // Size is bigger
1568 const Type* const _type;
1569 public:
1570 void set_type(const Type* t) {
1571 assert(t != NULL, "sanity");
1572 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
1573 *(const Type**)&_type = t; // cast away const-ness
1574 // If this node is in the hash table, make sure it doesn't need a rehash.
1575 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
1576 }
1577 const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
1578 TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
1579 init_class_id(Class_Type);
1580 }
1581 virtual const Type *Value( PhaseTransform *phase ) const;
1582 virtual const Type *bottom_type() const;
1583 virtual uint ideal_reg() const;
1584 #ifndef PRODUCT
1585 virtual void dump_spec(outputStream *st) const;
1586 #endif
1587 };
1589 #endif // SHARE_VM_OPTO_NODE_HPP