Thu, 03 Jan 2013 15:09:55 -0800
8005522: use fast-string instructions on x86 for zeroing
Summary: use 'rep stosb' instead of 'rep stosq' when fast-string operations are available.
Reviewed-by: twisti, roland
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "libadt/vectset.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/cfgnode.hpp"
29 #include "opto/connode.hpp"
30 #include "opto/machnode.hpp"
31 #include "opto/matcher.hpp"
32 #include "opto/node.hpp"
33 #include "opto/opcodes.hpp"
34 #include "opto/regmask.hpp"
35 #include "opto/type.hpp"
36 #include "utilities/copy.hpp"
38 class RegMask;
39 // #include "phase.hpp"
40 class PhaseTransform;
41 class PhaseGVN;
43 // Arena we are currently building Nodes in
44 const uint Node::NotAMachineReg = 0xffff0000;
46 #ifndef PRODUCT
47 extern int nodes_created;
48 #endif
50 #ifdef ASSERT
52 //-------------------------- construct_node------------------------------------
53 // Set a breakpoint here to identify where a particular node index is built.
54 void Node::verify_construction() {
55 _debug_orig = NULL;
56 int old_debug_idx = Compile::debug_idx();
57 int new_debug_idx = old_debug_idx+1;
58 if (new_debug_idx > 0) {
59 // Arrange that the lowest five decimal digits of _debug_idx
60 // will repeat those of _idx. In case this is somehow pathological,
61 // we continue to assign negative numbers (!) consecutively.
62 const int mod = 100000;
63 int bump = (int)(_idx - new_debug_idx) % mod;
64 if (bump < 0) bump += mod;
65 assert(bump >= 0 && bump < mod, "");
66 new_debug_idx += bump;
67 }
68 Compile::set_debug_idx(new_debug_idx);
69 set_debug_idx( new_debug_idx );
70 assert(Compile::current()->unique() < (UINT_MAX - 1), "Node limit exceeded UINT_MAX");
71 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
72 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
73 BREAKPOINT;
74 }
75 #if OPTO_DU_ITERATOR_ASSERT
76 _last_del = NULL;
77 _del_tick = 0;
78 #endif
79 _hash_lock = 0;
80 }
83 // #ifdef ASSERT ...
85 #if OPTO_DU_ITERATOR_ASSERT
86 void DUIterator_Common::sample(const Node* node) {
87 _vdui = VerifyDUIterators;
88 _node = node;
89 _outcnt = node->_outcnt;
90 _del_tick = node->_del_tick;
91 _last = NULL;
92 }
94 void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
95 assert(_node == node, "consistent iterator source");
96 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed");
97 }
99 void DUIterator_Common::verify_resync() {
100 // Ensure that the loop body has just deleted the last guy produced.
101 const Node* node = _node;
102 // Ensure that at least one copy of the last-seen edge was deleted.
103 // Note: It is OK to delete multiple copies of the last-seen edge.
104 // Unfortunately, we have no way to verify that all the deletions delete
105 // that same edge. On this point we must use the Honor System.
106 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge");
107 assert(node->_last_del == _last, "must have deleted the edge just produced");
108 // We liked this deletion, so accept the resulting outcnt and tick.
109 _outcnt = node->_outcnt;
110 _del_tick = node->_del_tick;
111 }
113 void DUIterator_Common::reset(const DUIterator_Common& that) {
114 if (this == &that) return; // ignore assignment to self
115 if (!_vdui) {
116 // We need to initialize everything, overwriting garbage values.
117 _last = that._last;
118 _vdui = that._vdui;
119 }
120 // Note: It is legal (though odd) for an iterator over some node x
121 // to be reassigned to iterate over another node y. Some doubly-nested
122 // progress loops depend on being able to do this.
123 const Node* node = that._node;
124 // Re-initialize everything, except _last.
125 _node = node;
126 _outcnt = node->_outcnt;
127 _del_tick = node->_del_tick;
128 }
130 void DUIterator::sample(const Node* node) {
131 DUIterator_Common::sample(node); // Initialize the assertion data.
132 _refresh_tick = 0; // No refreshes have happened, as yet.
133 }
135 void DUIterator::verify(const Node* node, bool at_end_ok) {
136 DUIterator_Common::verify(node, at_end_ok);
137 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range");
138 }
140 void DUIterator::verify_increment() {
141 if (_refresh_tick & 1) {
142 // We have refreshed the index during this loop.
143 // Fix up _idx to meet asserts.
144 if (_idx > _outcnt) _idx = _outcnt;
145 }
146 verify(_node, true);
147 }
149 void DUIterator::verify_resync() {
150 // Note: We do not assert on _outcnt, because insertions are OK here.
151 DUIterator_Common::verify_resync();
152 // Make sure we are still in sync, possibly with no more out-edges:
153 verify(_node, true);
154 }
156 void DUIterator::reset(const DUIterator& that) {
157 if (this == &that) return; // self assignment is always a no-op
158 assert(that._refresh_tick == 0, "assign only the result of Node::outs()");
159 assert(that._idx == 0, "assign only the result of Node::outs()");
160 assert(_idx == that._idx, "already assigned _idx");
161 if (!_vdui) {
162 // We need to initialize everything, overwriting garbage values.
163 sample(that._node);
164 } else {
165 DUIterator_Common::reset(that);
166 if (_refresh_tick & 1) {
167 _refresh_tick++; // Clear the "was refreshed" flag.
168 }
169 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly");
170 }
171 }
173 void DUIterator::refresh() {
174 DUIterator_Common::sample(_node); // Re-fetch assertion data.
175 _refresh_tick |= 1; // Set the "was refreshed" flag.
176 }
178 void DUIterator::verify_finish() {
179 // If the loop has killed the node, do not require it to re-run.
180 if (_node->_outcnt == 0) _refresh_tick &= ~1;
181 // If this assert triggers, it means that a loop used refresh_out_pos
182 // to re-synch an iteration index, but the loop did not correctly
183 // re-run itself, using a "while (progress)" construct.
184 // This iterator enforces the rule that you must keep trying the loop
185 // until it "runs clean" without any need for refreshing.
186 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing");
187 }
190 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) {
191 DUIterator_Common::verify(node, at_end_ok);
192 Node** out = node->_out;
193 uint cnt = node->_outcnt;
194 assert(cnt == _outcnt, "no insertions allowed");
195 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range");
196 // This last check is carefully designed to work for NO_OUT_ARRAY.
197 }
199 void DUIterator_Fast::verify_limit() {
200 const Node* node = _node;
201 verify(node, true);
202 assert(_outp == node->_out + node->_outcnt, "limit still correct");
203 }
205 void DUIterator_Fast::verify_resync() {
206 const Node* node = _node;
207 if (_outp == node->_out + _outcnt) {
208 // Note that the limit imax, not the pointer i, gets updated with the
209 // exact count of deletions. (For the pointer it's always "--i".)
210 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)");
211 // This is a limit pointer, with a name like "imax".
212 // Fudge the _last field so that the common assert will be happy.
213 _last = (Node*) node->_last_del;
214 DUIterator_Common::verify_resync();
215 } else {
216 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)");
217 // A normal internal pointer.
218 DUIterator_Common::verify_resync();
219 // Make sure we are still in sync, possibly with no more out-edges:
220 verify(node, true);
221 }
222 }
224 void DUIterator_Fast::verify_relimit(uint n) {
225 const Node* node = _node;
226 assert((int)n > 0, "use imax -= n only with a positive count");
227 // This must be a limit pointer, with a name like "imax".
228 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)");
229 // The reported number of deletions must match what the node saw.
230 assert(node->_del_tick == _del_tick + n, "must have deleted n edges");
231 // Fudge the _last field so that the common assert will be happy.
232 _last = (Node*) node->_last_del;
233 DUIterator_Common::verify_resync();
234 }
236 void DUIterator_Fast::reset(const DUIterator_Fast& that) {
237 assert(_outp == that._outp, "already assigned _outp");
238 DUIterator_Common::reset(that);
239 }
241 void DUIterator_Last::verify(const Node* node, bool at_end_ok) {
242 // at_end_ok means the _outp is allowed to underflow by 1
243 _outp += at_end_ok;
244 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc.
245 _outp -= at_end_ok;
246 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes");
247 }
249 void DUIterator_Last::verify_limit() {
250 // Do not require the limit address to be resynched.
251 //verify(node, true);
252 assert(_outp == _node->_out, "limit still correct");
253 }
255 void DUIterator_Last::verify_step(uint num_edges) {
256 assert((int)num_edges > 0, "need non-zero edge count for loop progress");
257 _outcnt -= num_edges;
258 _del_tick += num_edges;
259 // Make sure we are still in sync, possibly with no more out-edges:
260 const Node* node = _node;
261 verify(node, true);
262 assert(node->_last_del == _last, "must have deleted the edge just produced");
263 }
265 #endif //OPTO_DU_ITERATOR_ASSERT
268 #endif //ASSERT
271 // This constant used to initialize _out may be any non-null value.
272 // The value NULL is reserved for the top node only.
273 #define NO_OUT_ARRAY ((Node**)-1)
275 // This funny expression handshakes with Node::operator new
276 // to pull Compile::current out of the new node's _out field,
277 // and then calls a subroutine which manages most field
278 // initializations. The only one which is tricky is the
279 // _idx field, which is const, and so must be initialized
280 // by a return value, not an assignment.
281 //
282 // (Aren't you thankful that Java finals don't require so many tricks?)
283 #define IDX_INIT(req) this->Init((req), (Compile*) this->_out)
284 #ifdef _MSC_VER // the IDX_INIT hack falls foul of warning C4355
285 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
286 #endif
288 // Out-of-line code from node constructors.
289 // Executed only when extra debug info. is being passed around.
290 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
291 C->set_node_notes_at(idx, nn);
292 }
294 // Shared initialization code.
295 inline int Node::Init(int req, Compile* C) {
296 assert(Compile::current() == C, "must use operator new(Compile*)");
297 int idx = C->next_unique();
299 // Allocate memory for the necessary number of edges.
300 if (req > 0) {
301 // Allocate space for _in array to have double alignment.
302 _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*))));
303 #ifdef ASSERT
304 _in[req-1] = this; // magic cookie for assertion check
305 #endif
306 }
307 // If there are default notes floating around, capture them:
308 Node_Notes* nn = C->default_node_notes();
309 if (nn != NULL) init_node_notes(C, idx, nn);
311 // Note: At this point, C is dead,
312 // and we begin to initialize the new Node.
314 _cnt = _max = req;
315 _outcnt = _outmax = 0;
316 _class_id = Class_Node;
317 _flags = 0;
318 _out = NO_OUT_ARRAY;
319 return idx;
320 }
322 //------------------------------Node-------------------------------------------
323 // Create a Node, with a given number of required edges.
324 Node::Node(uint req)
325 : _idx(IDX_INIT(req))
326 {
327 assert( req < (uint)(MaxNodeLimit - NodeLimitFudgeFactor), "Input limit exceeded" );
328 debug_only( verify_construction() );
329 NOT_PRODUCT(nodes_created++);
330 if (req == 0) {
331 assert( _in == (Node**)this, "Must not pass arg count to 'new'" );
332 _in = NULL;
333 } else {
334 assert( _in[req-1] == this, "Must pass arg count to 'new'" );
335 Node** to = _in;
336 for(uint i = 0; i < req; i++) {
337 to[i] = NULL;
338 }
339 }
340 }
342 //------------------------------Node-------------------------------------------
343 Node::Node(Node *n0)
344 : _idx(IDX_INIT(1))
345 {
346 debug_only( verify_construction() );
347 NOT_PRODUCT(nodes_created++);
348 // Assert we allocated space for input array already
349 assert( _in[0] == this, "Must pass arg count to 'new'" );
350 assert( is_not_dead(n0), "can not use dead node");
351 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
352 }
354 //------------------------------Node-------------------------------------------
355 Node::Node(Node *n0, Node *n1)
356 : _idx(IDX_INIT(2))
357 {
358 debug_only( verify_construction() );
359 NOT_PRODUCT(nodes_created++);
360 // Assert we allocated space for input array already
361 assert( _in[1] == this, "Must pass arg count to 'new'" );
362 assert( is_not_dead(n0), "can not use dead node");
363 assert( is_not_dead(n1), "can not use dead node");
364 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
365 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
366 }
368 //------------------------------Node-------------------------------------------
369 Node::Node(Node *n0, Node *n1, Node *n2)
370 : _idx(IDX_INIT(3))
371 {
372 debug_only( verify_construction() );
373 NOT_PRODUCT(nodes_created++);
374 // Assert we allocated space for input array already
375 assert( _in[2] == this, "Must pass arg count to 'new'" );
376 assert( is_not_dead(n0), "can not use dead node");
377 assert( is_not_dead(n1), "can not use dead node");
378 assert( is_not_dead(n2), "can not use dead node");
379 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
380 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
381 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
382 }
384 //------------------------------Node-------------------------------------------
385 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
386 : _idx(IDX_INIT(4))
387 {
388 debug_only( verify_construction() );
389 NOT_PRODUCT(nodes_created++);
390 // Assert we allocated space for input array already
391 assert( _in[3] == this, "Must pass arg count to 'new'" );
392 assert( is_not_dead(n0), "can not use dead node");
393 assert( is_not_dead(n1), "can not use dead node");
394 assert( is_not_dead(n2), "can not use dead node");
395 assert( is_not_dead(n3), "can not use dead node");
396 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
397 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
398 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
399 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
400 }
402 //------------------------------Node-------------------------------------------
403 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
404 : _idx(IDX_INIT(5))
405 {
406 debug_only( verify_construction() );
407 NOT_PRODUCT(nodes_created++);
408 // Assert we allocated space for input array already
409 assert( _in[4] == this, "Must pass arg count to 'new'" );
410 assert( is_not_dead(n0), "can not use dead node");
411 assert( is_not_dead(n1), "can not use dead node");
412 assert( is_not_dead(n2), "can not use dead node");
413 assert( is_not_dead(n3), "can not use dead node");
414 assert( is_not_dead(n4), "can not use dead node");
415 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
416 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
417 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
418 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
419 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
420 }
422 //------------------------------Node-------------------------------------------
423 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
424 Node *n4, Node *n5)
425 : _idx(IDX_INIT(6))
426 {
427 debug_only( verify_construction() );
428 NOT_PRODUCT(nodes_created++);
429 // Assert we allocated space for input array already
430 assert( _in[5] == this, "Must pass arg count to 'new'" );
431 assert( is_not_dead(n0), "can not use dead node");
432 assert( is_not_dead(n1), "can not use dead node");
433 assert( is_not_dead(n2), "can not use dead node");
434 assert( is_not_dead(n3), "can not use dead node");
435 assert( is_not_dead(n4), "can not use dead node");
436 assert( is_not_dead(n5), "can not use dead node");
437 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
438 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
439 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
440 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
441 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
442 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
443 }
445 //------------------------------Node-------------------------------------------
446 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
447 Node *n4, Node *n5, Node *n6)
448 : _idx(IDX_INIT(7))
449 {
450 debug_only( verify_construction() );
451 NOT_PRODUCT(nodes_created++);
452 // Assert we allocated space for input array already
453 assert( _in[6] == this, "Must pass arg count to 'new'" );
454 assert( is_not_dead(n0), "can not use dead node");
455 assert( is_not_dead(n1), "can not use dead node");
456 assert( is_not_dead(n2), "can not use dead node");
457 assert( is_not_dead(n3), "can not use dead node");
458 assert( is_not_dead(n4), "can not use dead node");
459 assert( is_not_dead(n5), "can not use dead node");
460 assert( is_not_dead(n6), "can not use dead node");
461 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
462 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
463 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
464 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
465 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
466 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
467 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this);
468 }
471 //------------------------------clone------------------------------------------
472 // Clone a Node.
473 Node *Node::clone() const {
474 Compile *compile = Compile::current();
475 uint s = size_of(); // Size of inherited Node
476 Node *n = (Node*)compile->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
477 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
478 // Set the new input pointer array
479 n->_in = (Node**)(((char*)n)+s);
480 // Cannot share the old output pointer array, so kill it
481 n->_out = NO_OUT_ARRAY;
482 // And reset the counters to 0
483 n->_outcnt = 0;
484 n->_outmax = 0;
485 // Unlock this guy, since he is not in any hash table.
486 debug_only(n->_hash_lock = 0);
487 // Walk the old node's input list to duplicate its edges
488 uint i;
489 for( i = 0; i < len(); i++ ) {
490 Node *x = in(i);
491 n->_in[i] = x;
492 if (x != NULL) x->add_out(n);
493 }
494 if (is_macro())
495 compile->add_macro_node(n);
497 n->set_idx(compile->next_unique()); // Get new unique index as well
498 debug_only( n->verify_construction() );
499 NOT_PRODUCT(nodes_created++);
500 // Do not patch over the debug_idx of a clone, because it makes it
501 // impossible to break on the clone's moment of creation.
502 //debug_only( n->set_debug_idx( debug_idx() ) );
504 compile->copy_node_notes_to(n, (Node*) this);
506 // MachNode clone
507 uint nopnds;
508 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
509 MachNode *mach = n->as_Mach();
510 MachNode *mthis = this->as_Mach();
511 // Get address of _opnd_array.
512 // It should be the same offset since it is the clone of this node.
513 MachOper **from = mthis->_opnds;
514 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
515 pointer_delta((const void*)from,
516 (const void*)(&mthis->_opnds), 1));
517 mach->_opnds = to;
518 for ( uint i = 0; i < nopnds; ++i ) {
519 to[i] = from[i]->clone(compile);
520 }
521 }
522 // cloning CallNode may need to clone JVMState
523 if (n->is_Call()) {
524 CallNode *call = n->as_Call();
525 call->clone_jvms();
526 }
527 return n; // Return the clone
528 }
530 //---------------------------setup_is_top--------------------------------------
531 // Call this when changing the top node, to reassert the invariants
532 // required by Node::is_top. See Compile::set_cached_top_node.
533 void Node::setup_is_top() {
534 if (this == (Node*)Compile::current()->top()) {
535 // This node has just become top. Kill its out array.
536 _outcnt = _outmax = 0;
537 _out = NULL; // marker value for top
538 assert(is_top(), "must be top");
539 } else {
540 if (_out == NULL) _out = NO_OUT_ARRAY;
541 assert(!is_top(), "must not be top");
542 }
543 }
546 //------------------------------~Node------------------------------------------
547 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
548 extern int reclaim_idx ;
549 extern int reclaim_in ;
550 extern int reclaim_node;
551 void Node::destruct() {
552 // Eagerly reclaim unique Node numberings
553 Compile* compile = Compile::current();
554 if ((uint)_idx+1 == compile->unique()) {
555 compile->set_unique(compile->unique()-1);
556 #ifdef ASSERT
557 reclaim_idx++;
558 #endif
559 }
560 // Clear debug info:
561 Node_Notes* nn = compile->node_notes_at(_idx);
562 if (nn != NULL) nn->clear();
563 // Walk the input array, freeing the corresponding output edges
564 _cnt = _max; // forget req/prec distinction
565 uint i;
566 for( i = 0; i < _max; i++ ) {
567 set_req(i, NULL);
568 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim");
569 }
570 assert(outcnt() == 0, "deleting a node must not leave a dangling use");
571 // See if the input array was allocated just prior to the object
572 int edge_size = _max*sizeof(void*);
573 int out_edge_size = _outmax*sizeof(void*);
574 char *edge_end = ((char*)_in) + edge_size;
575 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
576 char *out_edge_end = out_array + out_edge_size;
577 int node_size = size_of();
579 // Free the output edge array
580 if (out_edge_size > 0) {
581 #ifdef ASSERT
582 if( out_edge_end == compile->node_arena()->hwm() )
583 reclaim_in += out_edge_size; // count reclaimed out edges with in edges
584 #endif
585 compile->node_arena()->Afree(out_array, out_edge_size);
586 }
588 // Free the input edge array and the node itself
589 if( edge_end == (char*)this ) {
590 #ifdef ASSERT
591 if( edge_end+node_size == compile->node_arena()->hwm() ) {
592 reclaim_in += edge_size;
593 reclaim_node+= node_size;
594 }
595 #else
596 // It was; free the input array and object all in one hit
597 compile->node_arena()->Afree(_in,edge_size+node_size);
598 #endif
599 } else {
601 // Free just the input array
602 #ifdef ASSERT
603 if( edge_end == compile->node_arena()->hwm() )
604 reclaim_in += edge_size;
605 #endif
606 compile->node_arena()->Afree(_in,edge_size);
608 // Free just the object
609 #ifdef ASSERT
610 if( ((char*)this) + node_size == compile->node_arena()->hwm() )
611 reclaim_node+= node_size;
612 #else
613 compile->node_arena()->Afree(this,node_size);
614 #endif
615 }
616 if (is_macro()) {
617 compile->remove_macro_node(this);
618 }
619 #ifdef ASSERT
620 // We will not actually delete the storage, but we'll make the node unusable.
621 *(address*)this = badAddress; // smash the C++ vtbl, probably
622 _in = _out = (Node**) badAddress;
623 _max = _cnt = _outmax = _outcnt = 0;
624 #endif
625 }
627 //------------------------------grow-------------------------------------------
628 // Grow the input array, making space for more edges
629 void Node::grow( uint len ) {
630 Arena* arena = Compile::current()->node_arena();
631 uint new_max = _max;
632 if( new_max == 0 ) {
633 _max = 4;
634 _in = (Node**)arena->Amalloc(4*sizeof(Node*));
635 Node** to = _in;
636 to[0] = NULL;
637 to[1] = NULL;
638 to[2] = NULL;
639 to[3] = NULL;
640 return;
641 }
642 while( new_max <= len ) new_max <<= 1; // Find next power-of-2
643 // Trimming to limit allows a uint8 to handle up to 255 edges.
644 // Previously I was using only powers-of-2 which peaked at 128 edges.
645 //if( new_max >= limit ) new_max = limit-1;
646 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*));
647 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space
648 _max = new_max; // Record new max length
649 // This assertion makes sure that Node::_max is wide enough to
650 // represent the numerical value of new_max.
651 assert(_max == new_max && _max > len, "int width of _max is too small");
652 }
654 //-----------------------------out_grow----------------------------------------
655 // Grow the input array, making space for more edges
656 void Node::out_grow( uint len ) {
657 assert(!is_top(), "cannot grow a top node's out array");
658 Arena* arena = Compile::current()->node_arena();
659 uint new_max = _outmax;
660 if( new_max == 0 ) {
661 _outmax = 4;
662 _out = (Node **)arena->Amalloc(4*sizeof(Node*));
663 return;
664 }
665 while( new_max <= len ) new_max <<= 1; // Find next power-of-2
666 // Trimming to limit allows a uint8 to handle up to 255 edges.
667 // Previously I was using only powers-of-2 which peaked at 128 edges.
668 //if( new_max >= limit ) new_max = limit-1;
669 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value");
670 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*));
671 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space
672 _outmax = new_max; // Record new max length
673 // This assertion makes sure that Node::_max is wide enough to
674 // represent the numerical value of new_max.
675 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small");
676 }
678 #ifdef ASSERT
679 //------------------------------is_dead----------------------------------------
680 bool Node::is_dead() const {
681 // Mach and pinch point nodes may look like dead.
682 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
683 return false;
684 for( uint i = 0; i < _max; i++ )
685 if( _in[i] != NULL )
686 return false;
687 dump();
688 return true;
689 }
690 #endif
692 //------------------------------add_req----------------------------------------
693 // Add a new required input at the end
694 void Node::add_req( Node *n ) {
695 assert( is_not_dead(n), "can not use dead node");
697 // Look to see if I can move precedence down one without reallocating
698 if( (_cnt >= _max) || (in(_max-1) != NULL) )
699 grow( _max+1 );
701 // Find a precedence edge to move
702 if( in(_cnt) != NULL ) { // Next precedence edge is busy?
703 uint i;
704 for( i=_cnt; i<_max; i++ )
705 if( in(i) == NULL ) // Find the NULL at end of prec edge list
706 break; // There must be one, since we grew the array
707 _in[i] = in(_cnt); // Move prec over, making space for req edge
708 }
709 _in[_cnt++] = n; // Stuff over old prec edge
710 if (n != NULL) n->add_out((Node *)this);
711 }
713 //---------------------------add_req_batch-------------------------------------
714 // Add a new required input at the end
715 void Node::add_req_batch( Node *n, uint m ) {
716 assert( is_not_dead(n), "can not use dead node");
717 // check various edge cases
718 if ((int)m <= 1) {
719 assert((int)m >= 0, "oob");
720 if (m != 0) add_req(n);
721 return;
722 }
724 // Look to see if I can move precedence down one without reallocating
725 if( (_cnt+m) > _max || _in[_max-m] )
726 grow( _max+m );
728 // Find a precedence edge to move
729 if( _in[_cnt] != NULL ) { // Next precedence edge is busy?
730 uint i;
731 for( i=_cnt; i<_max; i++ )
732 if( _in[i] == NULL ) // Find the NULL at end of prec edge list
733 break; // There must be one, since we grew the array
734 // Slide all the precs over by m positions (assume #prec << m).
735 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
736 }
738 // Stuff over the old prec edges
739 for(uint i=0; i<m; i++ ) {
740 _in[_cnt++] = n;
741 }
743 // Insert multiple out edges on the node.
744 if (n != NULL && !n->is_top()) {
745 for(uint i=0; i<m; i++ ) {
746 n->add_out((Node *)this);
747 }
748 }
749 }
751 //------------------------------del_req----------------------------------------
752 // Delete the required edge and compact the edge array
753 void Node::del_req( uint idx ) {
754 assert( idx < _cnt, "oob");
755 assert( !VerifyHashTableKeys || _hash_lock == 0,
756 "remove node from hash table before modifying it");
757 // First remove corresponding def-use edge
758 Node *n = in(idx);
759 if (n != NULL) n->del_out((Node *)this);
760 _in[idx] = in(--_cnt); // Compact the array
761 _in[_cnt] = NULL; // NULL out emptied slot
762 }
764 //------------------------------ins_req----------------------------------------
765 // Insert a new required input at the end
766 void Node::ins_req( uint idx, Node *n ) {
767 assert( is_not_dead(n), "can not use dead node");
768 add_req(NULL); // Make space
769 assert( idx < _max, "Must have allocated enough space");
770 // Slide over
771 if(_cnt-idx-1 > 0) {
772 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
773 }
774 _in[idx] = n; // Stuff over old required edge
775 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge
776 }
778 //-----------------------------find_edge---------------------------------------
779 int Node::find_edge(Node* n) {
780 for (uint i = 0; i < len(); i++) {
781 if (_in[i] == n) return i;
782 }
783 return -1;
784 }
786 //----------------------------replace_edge-------------------------------------
787 int Node::replace_edge(Node* old, Node* neww) {
788 if (old == neww) return 0; // nothing to do
789 uint nrep = 0;
790 for (uint i = 0; i < len(); i++) {
791 if (in(i) == old) {
792 if (i < req())
793 set_req(i, neww);
794 else
795 set_prec(i, neww);
796 nrep++;
797 }
798 }
799 return nrep;
800 }
802 //-------------------------disconnect_inputs-----------------------------------
803 // NULL out all inputs to eliminate incoming Def-Use edges.
804 // Return the number of edges between 'n' and 'this'
805 int Node::disconnect_inputs(Node *n, Compile* C) {
806 int edges_to_n = 0;
808 uint cnt = req();
809 for( uint i = 0; i < cnt; ++i ) {
810 if( in(i) == 0 ) continue;
811 if( in(i) == n ) ++edges_to_n;
812 set_req(i, NULL);
813 }
814 // Remove precedence edges if any exist
815 // Note: Safepoints may have precedence edges, even during parsing
816 if( (req() != len()) && (in(req()) != NULL) ) {
817 uint max = len();
818 for( uint i = 0; i < max; ++i ) {
819 if( in(i) == 0 ) continue;
820 if( in(i) == n ) ++edges_to_n;
821 set_prec(i, NULL);
822 }
823 }
825 // Node::destruct requires all out edges be deleted first
826 // debug_only(destruct();) // no reuse benefit expected
827 if (edges_to_n == 0) {
828 C->record_dead_node(_idx);
829 }
830 return edges_to_n;
831 }
833 //-----------------------------uncast---------------------------------------
834 // %%% Temporary, until we sort out CheckCastPP vs. CastPP.
835 // Strip away casting. (It is depth-limited.)
836 Node* Node::uncast() const {
837 // Should be inline:
838 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this;
839 if (is_ConstraintCast() || is_CheckCastPP())
840 return uncast_helper(this);
841 else
842 return (Node*) this;
843 }
845 //---------------------------uncast_helper-------------------------------------
846 Node* Node::uncast_helper(const Node* p) {
847 #ifdef ASSERT
848 uint depth_count = 0;
849 const Node* orig_p = p;
850 #endif
852 while (true) {
853 #ifdef ASSERT
854 if (depth_count >= K) {
855 orig_p->dump(4);
856 if (p != orig_p)
857 p->dump(1);
858 }
859 assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
860 #endif
861 if (p == NULL || p->req() != 2) {
862 break;
863 } else if (p->is_ConstraintCast()) {
864 p = p->in(1);
865 } else if (p->is_CheckCastPP()) {
866 p = p->in(1);
867 } else {
868 break;
869 }
870 }
871 return (Node*) p;
872 }
874 //------------------------------add_prec---------------------------------------
875 // Add a new precedence input. Precedence inputs are unordered, with
876 // duplicates removed and NULLs packed down at the end.
877 void Node::add_prec( Node *n ) {
878 assert( is_not_dead(n), "can not use dead node");
880 // Check for NULL at end
881 if( _cnt >= _max || in(_max-1) )
882 grow( _max+1 );
884 // Find a precedence edge to move
885 uint i = _cnt;
886 while( in(i) != NULL ) i++;
887 _in[i] = n; // Stuff prec edge over NULL
888 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge
889 }
891 //------------------------------rm_prec----------------------------------------
892 // Remove a precedence input. Precedence inputs are unordered, with
893 // duplicates removed and NULLs packed down at the end.
894 void Node::rm_prec( uint j ) {
896 // Find end of precedence list to pack NULLs
897 uint i;
898 for( i=j; i<_max; i++ )
899 if( !_in[i] ) // Find the NULL at end of prec edge list
900 break;
901 if (_in[j] != NULL) _in[j]->del_out((Node *)this);
902 _in[j] = _in[--i]; // Move last element over removed guy
903 _in[i] = NULL; // NULL out last element
904 }
906 //------------------------------size_of----------------------------------------
907 uint Node::size_of() const { return sizeof(*this); }
909 //------------------------------ideal_reg--------------------------------------
910 uint Node::ideal_reg() const { return 0; }
912 //------------------------------jvms-------------------------------------------
913 JVMState* Node::jvms() const { return NULL; }
915 #ifdef ASSERT
916 //------------------------------jvms-------------------------------------------
917 bool Node::verify_jvms(const JVMState* using_jvms) const {
918 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
919 if (jvms == using_jvms) return true;
920 }
921 return false;
922 }
924 //------------------------------init_NodeProperty------------------------------
925 void Node::init_NodeProperty() {
926 assert(_max_classes <= max_jushort, "too many NodeProperty classes");
927 assert(_max_flags <= max_jushort, "too many NodeProperty flags");
928 }
929 #endif
931 //------------------------------format-----------------------------------------
932 // Print as assembly
933 void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
934 //------------------------------emit-------------------------------------------
935 // Emit bytes starting at parameter 'ptr'.
936 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
937 //------------------------------size-------------------------------------------
938 // Size of instruction in bytes
939 uint Node::size(PhaseRegAlloc *ra_) const { return 0; }
941 //------------------------------CFG Construction-------------------------------
942 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root,
943 // Goto and Return.
944 const Node *Node::is_block_proj() const { return 0; }
946 // Minimum guaranteed type
947 const Type *Node::bottom_type() const { return Type::BOTTOM; }
950 //------------------------------raise_bottom_type------------------------------
951 // Get the worst-case Type output for this Node.
952 void Node::raise_bottom_type(const Type* new_type) {
953 if (is_Type()) {
954 TypeNode *n = this->as_Type();
955 if (VerifyAliases) {
956 assert(new_type->higher_equal(n->type()), "new type must refine old type");
957 }
958 n->set_type(new_type);
959 } else if (is_Load()) {
960 LoadNode *n = this->as_Load();
961 if (VerifyAliases) {
962 assert(new_type->higher_equal(n->type()), "new type must refine old type");
963 }
964 n->set_type(new_type);
965 }
966 }
968 //------------------------------Identity---------------------------------------
969 // Return a node that the given node is equivalent to.
970 Node *Node::Identity( PhaseTransform * ) {
971 return this; // Default to no identities
972 }
974 //------------------------------Value------------------------------------------
975 // Compute a new Type for a node using the Type of the inputs.
976 const Type *Node::Value( PhaseTransform * ) const {
977 return bottom_type(); // Default to worst-case Type
978 }
980 //------------------------------Ideal------------------------------------------
981 //
982 // 'Idealize' the graph rooted at this Node.
983 //
984 // In order to be efficient and flexible there are some subtle invariants
985 // these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks
986 // these invariants, although its too slow to have on by default. If you are
987 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN!
988 //
989 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
990 // pointer. If ANY change is made, it must return the root of the reshaped
991 // graph - even if the root is the same Node. Example: swapping the inputs
992 // to an AddINode gives the same answer and same root, but you still have to
993 // return the 'this' pointer instead of NULL.
994 //
995 // You cannot return an OLD Node, except for the 'this' pointer. Use the
996 // Identity call to return an old Node; basically if Identity can find
997 // another Node have the Ideal call make no change and return NULL.
998 // Example: AddINode::Ideal must check for add of zero; in this case it
999 // returns NULL instead of doing any graph reshaping.
1000 //
1001 // You cannot modify any old Nodes except for the 'this' pointer. Due to
1002 // sharing there may be other users of the old Nodes relying on their current
1003 // semantics. Modifying them will break the other users.
1004 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
1005 // "X+3" unchanged in case it is shared.
1006 //
1007 // If you modify the 'this' pointer's inputs, you should use
1008 // 'set_req'. If you are making a new Node (either as the new root or
1009 // some new internal piece) you may use 'init_req' to set the initial
1010 // value. You can make a new Node with either 'new' or 'clone'. In
1011 // either case, def-use info is correctly maintained.
1012 //
1013 // Example: reshape "(X+3)+4" into "X+7":
1014 // set_req(1, in(1)->in(1));
1015 // set_req(2, phase->intcon(7));
1016 // return this;
1017 // Example: reshape "X*4" into "X<<2"
1018 // return new (C) LShiftINode(in(1), phase->intcon(2));
1019 //
1020 // You must call 'phase->transform(X)' on any new Nodes X you make, except
1021 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X".
1022 // Node *shift=phase->transform(new(C)LShiftINode(in(1),phase->intcon(5)));
1023 // return new (C) AddINode(shift, in(1));
1024 //
1025 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
1026 // These forms are faster than 'phase->transform(new (C) ConNode())' and Do
1027 // The Right Thing with def-use info.
1028 //
1029 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped
1030 // graph uses the 'this' Node it must be the root. If you want a Node with
1031 // the same Opcode as the 'this' pointer use 'clone'.
1032 //
1033 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
1034 return NULL; // Default to being Ideal already
1035 }
1037 // Some nodes have specific Ideal subgraph transformations only if they are
1038 // unique users of specific nodes. Such nodes should be put on IGVN worklist
1039 // for the transformations to happen.
1040 bool Node::has_special_unique_user() const {
1041 assert(outcnt() == 1, "match only for unique out");
1042 Node* n = unique_out();
1043 int op = Opcode();
1044 if( this->is_Store() ) {
1045 // Condition for back-to-back stores folding.
1046 return n->Opcode() == op && n->in(MemNode::Memory) == this;
1047 } else if( op == Op_AddL ) {
1048 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
1049 return n->Opcode() == Op_ConvL2I && n->in(1) == this;
1050 } else if( op == Op_SubI || op == Op_SubL ) {
1051 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y)
1052 return n->Opcode() == op && n->in(2) == this;
1053 }
1054 return false;
1055 };
1057 //--------------------------find_exact_control---------------------------------
1058 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
1059 Node* Node::find_exact_control(Node* ctrl) {
1060 if (ctrl == NULL && this->is_Region())
1061 ctrl = this->as_Region()->is_copy();
1063 if (ctrl != NULL && ctrl->is_CatchProj()) {
1064 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
1065 ctrl = ctrl->in(0);
1066 if (ctrl != NULL && !ctrl->is_top())
1067 ctrl = ctrl->in(0);
1068 }
1070 if (ctrl != NULL && ctrl->is_Proj())
1071 ctrl = ctrl->in(0);
1073 return ctrl;
1074 }
1076 //--------------------------dominates------------------------------------------
1077 // Helper function for MemNode::all_controls_dominate().
1078 // Check if 'this' control node dominates or equal to 'sub' control node.
1079 // We already know that if any path back to Root or Start reaches 'this',
1080 // then all paths so, so this is a simple search for one example,
1081 // not an exhaustive search for a counterexample.
1082 bool Node::dominates(Node* sub, Node_List &nlist) {
1083 assert(this->is_CFG(), "expecting control");
1084 assert(sub != NULL && sub->is_CFG(), "expecting control");
1086 // detect dead cycle without regions
1087 int iterations_without_region_limit = DominatorSearchLimit;
1089 Node* orig_sub = sub;
1090 Node* dom = this;
1091 bool met_dom = false;
1092 nlist.clear();
1094 // Walk 'sub' backward up the chain to 'dom', watching for regions.
1095 // After seeing 'dom', continue up to Root or Start.
1096 // If we hit a region (backward split point), it may be a loop head.
1097 // Keep going through one of the region's inputs. If we reach the
1098 // same region again, go through a different input. Eventually we
1099 // will either exit through the loop head, or give up.
1100 // (If we get confused, break out and return a conservative 'false'.)
1101 while (sub != NULL) {
1102 if (sub->is_top()) break; // Conservative answer for dead code.
1103 if (sub == dom) {
1104 if (nlist.size() == 0) {
1105 // No Region nodes except loops were visited before and the EntryControl
1106 // path was taken for loops: it did not walk in a cycle.
1107 return true;
1108 } else if (met_dom) {
1109 break; // already met before: walk in a cycle
1110 } else {
1111 // Region nodes were visited. Continue walk up to Start or Root
1112 // to make sure that it did not walk in a cycle.
1113 met_dom = true; // first time meet
1114 iterations_without_region_limit = DominatorSearchLimit; // Reset
1115 }
1116 }
1117 if (sub->is_Start() || sub->is_Root()) {
1118 // Success if we met 'dom' along a path to Start or Root.
1119 // We assume there are no alternative paths that avoid 'dom'.
1120 // (This assumption is up to the caller to ensure!)
1121 return met_dom;
1122 }
1123 Node* up = sub->in(0);
1124 // Normalize simple pass-through regions and projections:
1125 up = sub->find_exact_control(up);
1126 // If sub == up, we found a self-loop. Try to push past it.
1127 if (sub == up && sub->is_Loop()) {
1128 // Take loop entry path on the way up to 'dom'.
1129 up = sub->in(1); // in(LoopNode::EntryControl);
1130 } else if (sub == up && sub->is_Region() && sub->req() != 3) {
1131 // Always take in(1) path on the way up to 'dom' for clone regions
1132 // (with only one input) or regions which merge > 2 paths
1133 // (usually used to merge fast/slow paths).
1134 up = sub->in(1);
1135 } else if (sub == up && sub->is_Region()) {
1136 // Try both paths for Regions with 2 input paths (it may be a loop head).
1137 // It could give conservative 'false' answer without information
1138 // which region's input is the entry path.
1139 iterations_without_region_limit = DominatorSearchLimit; // Reset
1141 bool region_was_visited_before = false;
1142 // Was this Region node visited before?
1143 // If so, we have reached it because we accidentally took a
1144 // loop-back edge from 'sub' back into the body of the loop,
1145 // and worked our way up again to the loop header 'sub'.
1146 // So, take the first unexplored path on the way up to 'dom'.
1147 for (int j = nlist.size() - 1; j >= 0; j--) {
1148 intptr_t ni = (intptr_t)nlist.at(j);
1149 Node* visited = (Node*)(ni & ~1);
1150 bool visited_twice_already = ((ni & 1) != 0);
1151 if (visited == sub) {
1152 if (visited_twice_already) {
1153 // Visited 2 paths, but still stuck in loop body. Give up.
1154 return false;
1155 }
1156 // The Region node was visited before only once.
1157 // (We will repush with the low bit set, below.)
1158 nlist.remove(j);
1159 // We will find a new edge and re-insert.
1160 region_was_visited_before = true;
1161 break;
1162 }
1163 }
1165 // Find an incoming edge which has not been seen yet; walk through it.
1166 assert(up == sub, "");
1167 uint skip = region_was_visited_before ? 1 : 0;
1168 for (uint i = 1; i < sub->req(); i++) {
1169 Node* in = sub->in(i);
1170 if (in != NULL && !in->is_top() && in != sub) {
1171 if (skip == 0) {
1172 up = in;
1173 break;
1174 }
1175 --skip; // skip this nontrivial input
1176 }
1177 }
1179 // Set 0 bit to indicate that both paths were taken.
1180 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
1181 }
1183 if (up == sub) {
1184 break; // some kind of tight cycle
1185 }
1186 if (up == orig_sub && met_dom) {
1187 // returned back after visiting 'dom'
1188 break; // some kind of cycle
1189 }
1190 if (--iterations_without_region_limit < 0) {
1191 break; // dead cycle
1192 }
1193 sub = up;
1194 }
1196 // Did not meet Root or Start node in pred. chain.
1197 // Conservative answer for dead code.
1198 return false;
1199 }
1201 //------------------------------remove_dead_region-----------------------------
1202 // This control node is dead. Follow the subgraph below it making everything
1203 // using it dead as well. This will happen normally via the usual IterGVN
1204 // worklist but this call is more efficient. Do not update use-def info
1205 // inside the dead region, just at the borders.
1206 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
1207 // Con's are a popular node to re-hit in the hash table again.
1208 if( dead->is_Con() ) return;
1210 // Can't put ResourceMark here since igvn->_worklist uses the same arena
1211 // for verify pass with +VerifyOpto and we add/remove elements in it here.
1212 Node_List nstack(Thread::current()->resource_area());
1214 Node *top = igvn->C->top();
1215 nstack.push(dead);
1217 while (nstack.size() > 0) {
1218 dead = nstack.pop();
1219 if (dead->outcnt() > 0) {
1220 // Keep dead node on stack until all uses are processed.
1221 nstack.push(dead);
1222 // For all Users of the Dead... ;-)
1223 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) {
1224 Node* use = dead->last_out(k);
1225 igvn->hash_delete(use); // Yank from hash table prior to mod
1226 if (use->in(0) == dead) { // Found another dead node
1227 assert (!use->is_Con(), "Control for Con node should be Root node.");
1228 use->set_req(0, top); // Cut dead edge to prevent processing
1229 nstack.push(use); // the dead node again.
1230 } else { // Else found a not-dead user
1231 for (uint j = 1; j < use->req(); j++) {
1232 if (use->in(j) == dead) { // Turn all dead inputs into TOP
1233 use->set_req(j, top);
1234 }
1235 }
1236 igvn->_worklist.push(use);
1237 }
1238 // Refresh the iterator, since any number of kills might have happened.
1239 k = dead->last_outs(kmin);
1240 }
1241 } else { // (dead->outcnt() == 0)
1242 // Done with outputs.
1243 igvn->hash_delete(dead);
1244 igvn->_worklist.remove(dead);
1245 igvn->set_type(dead, Type::TOP);
1246 if (dead->is_macro()) {
1247 igvn->C->remove_macro_node(dead);
1248 }
1249 // Kill all inputs to the dead guy
1250 for (uint i=0; i < dead->req(); i++) {
1251 Node *n = dead->in(i); // Get input to dead guy
1252 if (n != NULL && !n->is_top()) { // Input is valid?
1253 dead->set_req(i, top); // Smash input away
1254 if (n->outcnt() == 0) { // Input also goes dead?
1255 if (!n->is_Con())
1256 nstack.push(n); // Clear it out as well
1257 } else if (n->outcnt() == 1 &&
1258 n->has_special_unique_user()) {
1259 igvn->add_users_to_worklist( n );
1260 } else if (n->outcnt() <= 2 && n->is_Store()) {
1261 // Push store's uses on worklist to enable folding optimization for
1262 // store/store and store/load to the same address.
1263 // The restriction (outcnt() <= 2) is the same as in set_req_X()
1264 // and remove_globally_dead_node().
1265 igvn->add_users_to_worklist( n );
1266 }
1267 }
1268 }
1269 } // (dead->outcnt() == 0)
1270 } // while (nstack.size() > 0) for outputs
1271 return;
1272 }
1274 //------------------------------remove_dead_region-----------------------------
1275 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
1276 Node *n = in(0);
1277 if( !n ) return false;
1278 // Lost control into this guy? I.e., it became unreachable?
1279 // Aggressively kill all unreachable code.
1280 if (can_reshape && n->is_top()) {
1281 kill_dead_code(this, phase->is_IterGVN());
1282 return false; // Node is dead.
1283 }
1285 if( n->is_Region() && n->as_Region()->is_copy() ) {
1286 Node *m = n->nonnull_req();
1287 set_req(0, m);
1288 return true;
1289 }
1290 return false;
1291 }
1293 //------------------------------Ideal_DU_postCCP-------------------------------
1294 // Idealize graph, using DU info. Must clone result into new-space
1295 Node *Node::Ideal_DU_postCCP( PhaseCCP * ) {
1296 return NULL; // Default to no change
1297 }
1299 //------------------------------hash-------------------------------------------
1300 // Hash function over Nodes.
1301 uint Node::hash() const {
1302 uint sum = 0;
1303 for( uint i=0; i<_cnt; i++ ) // Add in all inputs
1304 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs
1305 return (sum>>2) + _cnt + Opcode();
1306 }
1308 //------------------------------cmp--------------------------------------------
1309 // Compare special parts of simple Nodes
1310 uint Node::cmp( const Node &n ) const {
1311 return 1; // Must be same
1312 }
1314 //------------------------------rematerialize-----------------------------------
1315 // Should we clone rather than spill this instruction?
1316 bool Node::rematerialize() const {
1317 if ( is_Mach() )
1318 return this->as_Mach()->rematerialize();
1319 else
1320 return (_flags & Flag_rematerialize) != 0;
1321 }
1323 //------------------------------needs_anti_dependence_check---------------------
1324 // Nodes which use memory without consuming it, hence need antidependences.
1325 bool Node::needs_anti_dependence_check() const {
1326 if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 )
1327 return false;
1328 else
1329 return in(1)->bottom_type()->has_memory();
1330 }
1333 // Get an integer constant from a ConNode (or CastIINode).
1334 // Return a default value if there is no apparent constant here.
1335 const TypeInt* Node::find_int_type() const {
1336 if (this->is_Type()) {
1337 return this->as_Type()->type()->isa_int();
1338 } else if (this->is_Con()) {
1339 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1340 return this->bottom_type()->isa_int();
1341 }
1342 return NULL;
1343 }
1345 // Get a pointer constant from a ConstNode.
1346 // Returns the constant if it is a pointer ConstNode
1347 intptr_t Node::get_ptr() const {
1348 assert( Opcode() == Op_ConP, "" );
1349 return ((ConPNode*)this)->type()->is_ptr()->get_con();
1350 }
1352 // Get a narrow oop constant from a ConNNode.
1353 intptr_t Node::get_narrowcon() const {
1354 assert( Opcode() == Op_ConN, "" );
1355 return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
1356 }
1358 // Get a long constant from a ConNode.
1359 // Return a default value if there is no apparent constant here.
1360 const TypeLong* Node::find_long_type() const {
1361 if (this->is_Type()) {
1362 return this->as_Type()->type()->isa_long();
1363 } else if (this->is_Con()) {
1364 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1365 return this->bottom_type()->isa_long();
1366 }
1367 return NULL;
1368 }
1370 // Get a double constant from a ConstNode.
1371 // Returns the constant if it is a double ConstNode
1372 jdouble Node::getd() const {
1373 assert( Opcode() == Op_ConD, "" );
1374 return ((ConDNode*)this)->type()->is_double_constant()->getd();
1375 }
1377 // Get a float constant from a ConstNode.
1378 // Returns the constant if it is a float ConstNode
1379 jfloat Node::getf() const {
1380 assert( Opcode() == Op_ConF, "" );
1381 return ((ConFNode*)this)->type()->is_float_constant()->getf();
1382 }
1384 #ifndef PRODUCT
1386 //----------------------------NotANode----------------------------------------
1387 // Used in debugging code to avoid walking across dead or uninitialized edges.
1388 static inline bool NotANode(const Node* n) {
1389 if (n == NULL) return true;
1390 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
1391 if (*(address*)n == badAddress) return true; // kill by Node::destruct
1392 return false;
1393 }
1396 //------------------------------find------------------------------------------
1397 // Find a neighbor of this Node with the given _idx
1398 // If idx is negative, find its absolute value, following both _in and _out.
1399 static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl,
1400 VectorSet* old_space, VectorSet* new_space ) {
1401 int node_idx = (idx >= 0) ? idx : -idx;
1402 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc.
1403 // Contained in new_space or old_space? Check old_arena first since it's mostly empty.
1404 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space;
1405 if( v->test(n->_idx) ) return;
1406 if( (int)n->_idx == node_idx
1407 debug_only(|| n->debug_idx() == node_idx) ) {
1408 if (result != NULL)
1409 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n",
1410 (uintptr_t)result, (uintptr_t)n, node_idx);
1411 result = n;
1412 }
1413 v->set(n->_idx);
1414 for( uint i=0; i<n->len(); i++ ) {
1415 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue;
1416 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space );
1417 }
1418 // Search along forward edges also:
1419 if (idx < 0 && !only_ctrl) {
1420 for( uint j=0; j<n->outcnt(); j++ ) {
1421 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space );
1422 }
1423 }
1424 #ifdef ASSERT
1425 // Search along debug_orig edges last, checking for cycles
1426 Node* orig = n->debug_orig();
1427 if (orig != NULL) {
1428 do {
1429 if (NotANode(orig)) break;
1430 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space );
1431 orig = orig->debug_orig();
1432 } while (orig != NULL && orig != n->debug_orig());
1433 }
1434 #endif //ASSERT
1435 }
1437 // call this from debugger:
1438 Node* find_node(Node* n, int idx) {
1439 return n->find(idx);
1440 }
1442 //------------------------------find-------------------------------------------
1443 Node* Node::find(int idx) const {
1444 ResourceArea *area = Thread::current()->resource_area();
1445 VectorSet old_space(area), new_space(area);
1446 Node* result = NULL;
1447 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space );
1448 return result;
1449 }
1451 //------------------------------find_ctrl--------------------------------------
1452 // Find an ancestor to this node in the control history with given _idx
1453 Node* Node::find_ctrl(int idx) const {
1454 ResourceArea *area = Thread::current()->resource_area();
1455 VectorSet old_space(area), new_space(area);
1456 Node* result = NULL;
1457 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space );
1458 return result;
1459 }
1460 #endif
1464 #ifndef PRODUCT
1465 int Node::_in_dump_cnt = 0;
1467 // -----------------------------Name-------------------------------------------
1468 extern const char *NodeClassNames[];
1469 const char *Node::Name() const { return NodeClassNames[Opcode()]; }
1471 static bool is_disconnected(const Node* n) {
1472 for (uint i = 0; i < n->req(); i++) {
1473 if (n->in(i) != NULL) return false;
1474 }
1475 return true;
1476 }
1478 #ifdef ASSERT
1479 static void dump_orig(Node* orig) {
1480 Compile* C = Compile::current();
1481 if (NotANode(orig)) orig = NULL;
1482 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1483 if (orig == NULL) return;
1484 tty->print(" !orig=");
1485 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
1486 if (NotANode(fast)) fast = NULL;
1487 while (orig != NULL) {
1488 bool discon = is_disconnected(orig); // if discon, print [123] else 123
1489 if (discon) tty->print("[");
1490 if (!Compile::current()->node_arena()->contains(orig))
1491 tty->print("o");
1492 tty->print("%d", orig->_idx);
1493 if (discon) tty->print("]");
1494 orig = orig->debug_orig();
1495 if (NotANode(orig)) orig = NULL;
1496 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1497 if (orig != NULL) tty->print(",");
1498 if (fast != NULL) {
1499 // Step fast twice for each single step of orig:
1500 fast = fast->debug_orig();
1501 if (NotANode(fast)) fast = NULL;
1502 if (fast != NULL && fast != orig) {
1503 fast = fast->debug_orig();
1504 if (NotANode(fast)) fast = NULL;
1505 }
1506 if (fast == orig) {
1507 tty->print("...");
1508 break;
1509 }
1510 }
1511 }
1512 }
1514 void Node::set_debug_orig(Node* orig) {
1515 _debug_orig = orig;
1516 if (BreakAtNode == 0) return;
1517 if (NotANode(orig)) orig = NULL;
1518 int trip = 10;
1519 while (orig != NULL) {
1520 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
1521 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
1522 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
1523 BREAKPOINT;
1524 }
1525 orig = orig->debug_orig();
1526 if (NotANode(orig)) orig = NULL;
1527 if (trip-- <= 0) break;
1528 }
1529 }
1530 #endif //ASSERT
1532 //------------------------------dump------------------------------------------
1533 // Dump a Node
1534 void Node::dump() const {
1535 Compile* C = Compile::current();
1536 bool is_new = C->node_arena()->contains(this);
1537 _in_dump_cnt++;
1538 tty->print("%c%d\t%s\t=== ",
1539 is_new ? ' ' : 'o', _idx, Name());
1541 // Dump the required and precedence inputs
1542 dump_req();
1543 dump_prec();
1544 // Dump the outputs
1545 dump_out();
1547 if (is_disconnected(this)) {
1548 #ifdef ASSERT
1549 tty->print(" [%d]",debug_idx());
1550 dump_orig(debug_orig());
1551 #endif
1552 tty->cr();
1553 _in_dump_cnt--;
1554 return; // don't process dead nodes
1555 }
1557 // Dump node-specific info
1558 dump_spec(tty);
1559 #ifdef ASSERT
1560 // Dump the non-reset _debug_idx
1561 if( Verbose && WizardMode ) {
1562 tty->print(" [%d]",debug_idx());
1563 }
1564 #endif
1566 const Type *t = bottom_type();
1568 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
1569 const TypeInstPtr *toop = t->isa_instptr();
1570 const TypeKlassPtr *tkls = t->isa_klassptr();
1571 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
1572 if( klass && klass->is_loaded() && klass->is_interface() ) {
1573 tty->print(" Interface:");
1574 } else if( toop ) {
1575 tty->print(" Oop:");
1576 } else if( tkls ) {
1577 tty->print(" Klass:");
1578 }
1579 t->dump();
1580 } else if( t == Type::MEMORY ) {
1581 tty->print(" Memory:");
1582 MemNode::dump_adr_type(this, adr_type(), tty);
1583 } else if( Verbose || WizardMode ) {
1584 tty->print(" Type:");
1585 if( t ) {
1586 t->dump();
1587 } else {
1588 tty->print("no type");
1589 }
1590 } else if (t->isa_vect() && this->is_MachSpillCopy()) {
1591 // Dump MachSpillcopy vector type.
1592 t->dump();
1593 }
1594 if (is_new) {
1595 debug_only(dump_orig(debug_orig()));
1596 Node_Notes* nn = C->node_notes_at(_idx);
1597 if (nn != NULL && !nn->is_clear()) {
1598 if (nn->jvms() != NULL) {
1599 tty->print(" !jvms:");
1600 nn->jvms()->dump_spec(tty);
1601 }
1602 }
1603 }
1604 tty->cr();
1605 _in_dump_cnt--;
1606 }
1608 //------------------------------dump_req--------------------------------------
1609 void Node::dump_req() const {
1610 // Dump the required input edges
1611 for (uint i = 0; i < req(); i++) { // For all required inputs
1612 Node* d = in(i);
1613 if (d == NULL) {
1614 tty->print("_ ");
1615 } else if (NotANode(d)) {
1616 tty->print("NotANode "); // uninitialized, sentinel, garbage, etc.
1617 } else {
1618 tty->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
1619 }
1620 }
1621 }
1624 //------------------------------dump_prec-------------------------------------
1625 void Node::dump_prec() const {
1626 // Dump the precedence edges
1627 int any_prec = 0;
1628 for (uint i = req(); i < len(); i++) { // For all precedence inputs
1629 Node* p = in(i);
1630 if (p != NULL) {
1631 if( !any_prec++ ) tty->print(" |");
1632 if (NotANode(p)) { tty->print("NotANode "); continue; }
1633 tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
1634 }
1635 }
1636 }
1638 //------------------------------dump_out--------------------------------------
1639 void Node::dump_out() const {
1640 // Delimit the output edges
1641 tty->print(" [[");
1642 // Dump the output edges
1643 for (uint i = 0; i < _outcnt; i++) { // For all outputs
1644 Node* u = _out[i];
1645 if (u == NULL) {
1646 tty->print("_ ");
1647 } else if (NotANode(u)) {
1648 tty->print("NotANode ");
1649 } else {
1650 tty->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
1651 }
1652 }
1653 tty->print("]] ");
1654 }
1656 //------------------------------dump_nodes-------------------------------------
1657 static void dump_nodes(const Node* start, int d, bool only_ctrl) {
1658 Node* s = (Node*)start; // remove const
1659 if (NotANode(s)) return;
1661 uint depth = (uint)ABS(d);
1662 int direction = d;
1663 Compile* C = Compile::current();
1664 GrowableArray <Node *> nstack(C->unique());
1666 nstack.append(s);
1667 int begin = 0;
1668 int end = 0;
1669 for(uint i = 0; i < depth; i++) {
1670 end = nstack.length();
1671 for(int j = begin; j < end; j++) {
1672 Node* tp = nstack.at(j);
1673 uint limit = direction > 0 ? tp->len() : tp->outcnt();
1674 for(uint k = 0; k < limit; k++) {
1675 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k);
1677 if (NotANode(n)) continue;
1678 // do not recurse through top or the root (would reach unrelated stuff)
1679 if (n->is_Root() || n->is_top()) continue;
1680 if (only_ctrl && !n->is_CFG()) continue;
1682 bool on_stack = nstack.contains(n);
1683 if (!on_stack) {
1684 nstack.append(n);
1685 }
1686 }
1687 }
1688 begin = end;
1689 }
1690 end = nstack.length();
1691 if (direction > 0) {
1692 for(int j = end-1; j >= 0; j--) {
1693 nstack.at(j)->dump();
1694 }
1695 } else {
1696 for(int j = 0; j < end; j++) {
1697 nstack.at(j)->dump();
1698 }
1699 }
1700 }
1702 //------------------------------dump-------------------------------------------
1703 void Node::dump(int d) const {
1704 dump_nodes(this, d, false);
1705 }
1707 //------------------------------dump_ctrl--------------------------------------
1708 // Dump a Node's control history to depth
1709 void Node::dump_ctrl(int d) const {
1710 dump_nodes(this, d, true);
1711 }
1713 // VERIFICATION CODE
1714 // For each input edge to a node (ie - for each Use-Def edge), verify that
1715 // there is a corresponding Def-Use edge.
1716 //------------------------------verify_edges-----------------------------------
1717 void Node::verify_edges(Unique_Node_List &visited) {
1718 uint i, j, idx;
1719 int cnt;
1720 Node *n;
1722 // Recursive termination test
1723 if (visited.member(this)) return;
1724 visited.push(this);
1726 // Walk over all input edges, checking for correspondence
1727 for( i = 0; i < len(); i++ ) {
1728 n = in(i);
1729 if (n != NULL && !n->is_top()) {
1730 // Count instances of (Node *)this
1731 cnt = 0;
1732 for (idx = 0; idx < n->_outcnt; idx++ ) {
1733 if (n->_out[idx] == (Node *)this) cnt++;
1734 }
1735 assert( cnt > 0,"Failed to find Def-Use edge." );
1736 // Check for duplicate edges
1737 // walk the input array downcounting the input edges to n
1738 for( j = 0; j < len(); j++ ) {
1739 if( in(j) == n ) cnt--;
1740 }
1741 assert( cnt == 0,"Mismatched edge count.");
1742 } else if (n == NULL) {
1743 assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges");
1744 } else {
1745 assert(n->is_top(), "sanity");
1746 // Nothing to check.
1747 }
1748 }
1749 // Recursive walk over all input edges
1750 for( i = 0; i < len(); i++ ) {
1751 n = in(i);
1752 if( n != NULL )
1753 in(i)->verify_edges(visited);
1754 }
1755 }
1757 //------------------------------verify_recur-----------------------------------
1758 static const Node *unique_top = NULL;
1760 void Node::verify_recur(const Node *n, int verify_depth,
1761 VectorSet &old_space, VectorSet &new_space) {
1762 if ( verify_depth == 0 ) return;
1763 if (verify_depth > 0) --verify_depth;
1765 Compile* C = Compile::current();
1767 // Contained in new_space or old_space?
1768 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space;
1769 // Check for visited in the proper space. Numberings are not unique
1770 // across spaces so we need a separate VectorSet for each space.
1771 if( v->test_set(n->_idx) ) return;
1773 if (n->is_Con() && n->bottom_type() == Type::TOP) {
1774 if (C->cached_top_node() == NULL)
1775 C->set_cached_top_node((Node*)n);
1776 assert(C->cached_top_node() == n, "TOP node must be unique");
1777 }
1779 for( uint i = 0; i < n->len(); i++ ) {
1780 Node *x = n->in(i);
1781 if (!x || x->is_top()) continue;
1783 // Verify my input has a def-use edge to me
1784 if (true /*VerifyDefUse*/) {
1785 // Count use-def edges from n to x
1786 int cnt = 0;
1787 for( uint j = 0; j < n->len(); j++ )
1788 if( n->in(j) == x )
1789 cnt++;
1790 // Count def-use edges from x to n
1791 uint max = x->_outcnt;
1792 for( uint k = 0; k < max; k++ )
1793 if (x->_out[k] == n)
1794 cnt--;
1795 assert( cnt == 0, "mismatched def-use edge counts" );
1796 }
1798 verify_recur(x, verify_depth, old_space, new_space);
1799 }
1801 }
1803 //------------------------------verify-----------------------------------------
1804 // Check Def-Use info for my subgraph
1805 void Node::verify() const {
1806 Compile* C = Compile::current();
1807 Node* old_top = C->cached_top_node();
1808 ResourceMark rm;
1809 ResourceArea *area = Thread::current()->resource_area();
1810 VectorSet old_space(area), new_space(area);
1811 verify_recur(this, -1, old_space, new_space);
1812 C->set_cached_top_node(old_top);
1813 }
1814 #endif
1817 //------------------------------walk-------------------------------------------
1818 // Graph walk, with both pre-order and post-order functions
1819 void Node::walk(NFunc pre, NFunc post, void *env) {
1820 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk
1821 walk_(pre, post, env, visited);
1822 }
1824 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) {
1825 if( visited.test_set(_idx) ) return;
1826 pre(*this,env); // Call the pre-order walk function
1827 for( uint i=0; i<_max; i++ )
1828 if( in(i) ) // Input exists and is not walked?
1829 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions
1830 post(*this,env); // Call the post-order walk function
1831 }
1833 void Node::nop(Node &, void*) {}
1835 //------------------------------Registers--------------------------------------
1836 // Do we Match on this edge index or not? Generally false for Control
1837 // and true for everything else. Weird for calls & returns.
1838 uint Node::match_edge(uint idx) const {
1839 return idx; // True for other than index 0 (control)
1840 }
1842 static RegMask _not_used_at_all;
1843 // Register classes are defined for specific machines
1844 const RegMask &Node::out_RegMask() const {
1845 ShouldNotCallThis();
1846 return _not_used_at_all;
1847 }
1849 const RegMask &Node::in_RegMask(uint) const {
1850 ShouldNotCallThis();
1851 return _not_used_at_all;
1852 }
1854 //=============================================================================
1855 //-----------------------------------------------------------------------------
1856 void Node_Array::reset( Arena *new_arena ) {
1857 _a->Afree(_nodes,_max*sizeof(Node*));
1858 _max = 0;
1859 _nodes = NULL;
1860 _a = new_arena;
1861 }
1863 //------------------------------clear------------------------------------------
1864 // Clear all entries in _nodes to NULL but keep storage
1865 void Node_Array::clear() {
1866 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) );
1867 }
1869 //-----------------------------------------------------------------------------
1870 void Node_Array::grow( uint i ) {
1871 if( !_max ) {
1872 _max = 1;
1873 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) );
1874 _nodes[0] = NULL;
1875 }
1876 uint old = _max;
1877 while( i >= _max ) _max <<= 1; // Double to fit
1878 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*));
1879 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) );
1880 }
1882 //-----------------------------------------------------------------------------
1883 void Node_Array::insert( uint i, Node *n ) {
1884 if( _nodes[_max-1] ) grow(_max); // Get more space if full
1885 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*)));
1886 _nodes[i] = n;
1887 }
1889 //-----------------------------------------------------------------------------
1890 void Node_Array::remove( uint i ) {
1891 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*)));
1892 _nodes[_max-1] = NULL;
1893 }
1895 //-----------------------------------------------------------------------------
1896 void Node_Array::sort( C_sort_func_t func) {
1897 qsort( _nodes, _max, sizeof( Node* ), func );
1898 }
1900 //-----------------------------------------------------------------------------
1901 void Node_Array::dump() const {
1902 #ifndef PRODUCT
1903 for( uint i = 0; i < _max; i++ ) {
1904 Node *nn = _nodes[i];
1905 if( nn != NULL ) {
1906 tty->print("%5d--> ",i); nn->dump();
1907 }
1908 }
1909 #endif
1910 }
1912 //--------------------------is_iteratively_computed------------------------------
1913 // Operation appears to be iteratively computed (such as an induction variable)
1914 // It is possible for this operation to return false for a loop-varying
1915 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
1916 bool Node::is_iteratively_computed() {
1917 if (ideal_reg()) { // does operation have a result register?
1918 for (uint i = 1; i < req(); i++) {
1919 Node* n = in(i);
1920 if (n != NULL && n->is_Phi()) {
1921 for (uint j = 1; j < n->req(); j++) {
1922 if (n->in(j) == this) {
1923 return true;
1924 }
1925 }
1926 }
1927 }
1928 }
1929 return false;
1930 }
1932 //--------------------------find_similar------------------------------
1933 // Return a node with opcode "opc" and same inputs as "this" if one can
1934 // be found; Otherwise return NULL;
1935 Node* Node::find_similar(int opc) {
1936 if (req() >= 2) {
1937 Node* def = in(1);
1938 if (def && def->outcnt() >= 2) {
1939 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) {
1940 Node* use = def->fast_out(i);
1941 if (use->Opcode() == opc &&
1942 use->req() == req()) {
1943 uint j;
1944 for (j = 0; j < use->req(); j++) {
1945 if (use->in(j) != in(j)) {
1946 break;
1947 }
1948 }
1949 if (j == use->req()) {
1950 return use;
1951 }
1952 }
1953 }
1954 }
1955 }
1956 return NULL;
1957 }
1960 //--------------------------unique_ctrl_out------------------------------
1961 // Return the unique control out if only one. Null if none or more than one.
1962 Node* Node::unique_ctrl_out() {
1963 Node* found = NULL;
1964 for (uint i = 0; i < outcnt(); i++) {
1965 Node* use = raw_out(i);
1966 if (use->is_CFG() && use != this) {
1967 if (found != NULL) return NULL;
1968 found = use;
1969 }
1970 }
1971 return found;
1972 }
1974 //=============================================================================
1975 //------------------------------yank-------------------------------------------
1976 // Find and remove
1977 void Node_List::yank( Node *n ) {
1978 uint i;
1979 for( i = 0; i < _cnt; i++ )
1980 if( _nodes[i] == n )
1981 break;
1983 if( i < _cnt )
1984 _nodes[i] = _nodes[--_cnt];
1985 }
1987 //------------------------------dump-------------------------------------------
1988 void Node_List::dump() const {
1989 #ifndef PRODUCT
1990 for( uint i = 0; i < _cnt; i++ )
1991 if( _nodes[i] ) {
1992 tty->print("%5d--> ",i);
1993 _nodes[i]->dump();
1994 }
1995 #endif
1996 }
1998 //=============================================================================
1999 //------------------------------remove-----------------------------------------
2000 void Unique_Node_List::remove( Node *n ) {
2001 if( _in_worklist[n->_idx] ) {
2002 for( uint i = 0; i < size(); i++ )
2003 if( _nodes[i] == n ) {
2004 map(i,Node_List::pop());
2005 _in_worklist >>= n->_idx;
2006 return;
2007 }
2008 ShouldNotReachHere();
2009 }
2010 }
2012 //-----------------------remove_useless_nodes----------------------------------
2013 // Remove useless nodes from worklist
2014 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) {
2016 for( uint i = 0; i < size(); ++i ) {
2017 Node *n = at(i);
2018 assert( n != NULL, "Did not expect null entries in worklist");
2019 if( ! useful.test(n->_idx) ) {
2020 _in_worklist >>= n->_idx;
2021 map(i,Node_List::pop());
2022 // Node *replacement = Node_List::pop();
2023 // if( i != size() ) { // Check if removing last entry
2024 // _nodes[i] = replacement;
2025 // }
2026 --i; // Visit popped node
2027 // If it was last entry, loop terminates since size() was also reduced
2028 }
2029 }
2030 }
2032 //=============================================================================
2033 void Node_Stack::grow() {
2034 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top
2035 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode));
2036 size_t max = old_max << 1; // max * 2
2037 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max);
2038 _inode_max = _inodes + max;
2039 _inode_top = _inodes + old_top; // restore _top
2040 }
2042 // Node_Stack is used to map nodes.
2043 Node* Node_Stack::find(uint idx) const {
2044 uint sz = size();
2045 for (uint i=0; i < sz; i++) {
2046 if (idx == index_at(i) )
2047 return node_at(i);
2048 }
2049 return NULL;
2050 }
2052 //=============================================================================
2053 uint TypeNode::size_of() const { return sizeof(*this); }
2054 #ifndef PRODUCT
2055 void TypeNode::dump_spec(outputStream *st) const {
2056 if( !Verbose && !WizardMode ) {
2057 // standard dump does this in Verbose and WizardMode
2058 st->print(" #"); _type->dump_on(st);
2059 }
2060 }
2061 #endif
2062 uint TypeNode::hash() const {
2063 return Node::hash() + _type->hash();
2064 }
2065 uint TypeNode::cmp( const Node &n ) const
2066 { return !Type::cmp( _type, ((TypeNode&)n)._type ); }
2067 const Type *TypeNode::bottom_type() const { return _type; }
2068 const Type *TypeNode::Value( PhaseTransform * ) const { return _type; }
2070 //------------------------------ideal_reg--------------------------------------
2071 uint TypeNode::ideal_reg() const {
2072 return _type->ideal_reg();
2073 }