Wed, 24 Apr 2013 20:55:28 -0400
8003853: specify offset of IC load in java_to_interp stub
Summary: refactored code to allow platform-specific differences
Reviewed-by: dlong, twisti
Contributed-by: Goetz Lindenmaier <goetz.lindenmaier@sap.com>
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "libadt/vectset.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/cfgnode.hpp"
29 #include "opto/connode.hpp"
30 #include "opto/machnode.hpp"
31 #include "opto/matcher.hpp"
32 #include "opto/node.hpp"
33 #include "opto/opcodes.hpp"
34 #include "opto/regmask.hpp"
35 #include "opto/type.hpp"
36 #include "utilities/copy.hpp"
38 class RegMask;
39 // #include "phase.hpp"
40 class PhaseTransform;
41 class PhaseGVN;
43 // Arena we are currently building Nodes in
44 const uint Node::NotAMachineReg = 0xffff0000;
46 #ifndef PRODUCT
47 extern int nodes_created;
48 #endif
50 #ifdef ASSERT
52 //-------------------------- construct_node------------------------------------
53 // Set a breakpoint here to identify where a particular node index is built.
54 void Node::verify_construction() {
55 _debug_orig = NULL;
56 int old_debug_idx = Compile::debug_idx();
57 int new_debug_idx = old_debug_idx+1;
58 if (new_debug_idx > 0) {
59 // Arrange that the lowest five decimal digits of _debug_idx
60 // will repeat those of _idx. In case this is somehow pathological,
61 // we continue to assign negative numbers (!) consecutively.
62 const int mod = 100000;
63 int bump = (int)(_idx - new_debug_idx) % mod;
64 if (bump < 0) bump += mod;
65 assert(bump >= 0 && bump < mod, "");
66 new_debug_idx += bump;
67 }
68 Compile::set_debug_idx(new_debug_idx);
69 set_debug_idx( new_debug_idx );
70 assert(Compile::current()->unique() < (UINT_MAX - 1), "Node limit exceeded UINT_MAX");
71 if (BreakAtNode != 0 && (_debug_idx == BreakAtNode || (int)_idx == BreakAtNode)) {
72 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d", _idx, _debug_idx);
73 BREAKPOINT;
74 }
75 #if OPTO_DU_ITERATOR_ASSERT
76 _last_del = NULL;
77 _del_tick = 0;
78 #endif
79 _hash_lock = 0;
80 }
83 // #ifdef ASSERT ...
85 #if OPTO_DU_ITERATOR_ASSERT
86 void DUIterator_Common::sample(const Node* node) {
87 _vdui = VerifyDUIterators;
88 _node = node;
89 _outcnt = node->_outcnt;
90 _del_tick = node->_del_tick;
91 _last = NULL;
92 }
94 void DUIterator_Common::verify(const Node* node, bool at_end_ok) {
95 assert(_node == node, "consistent iterator source");
96 assert(_del_tick == node->_del_tick, "no unexpected deletions allowed");
97 }
99 void DUIterator_Common::verify_resync() {
100 // Ensure that the loop body has just deleted the last guy produced.
101 const Node* node = _node;
102 // Ensure that at least one copy of the last-seen edge was deleted.
103 // Note: It is OK to delete multiple copies of the last-seen edge.
104 // Unfortunately, we have no way to verify that all the deletions delete
105 // that same edge. On this point we must use the Honor System.
106 assert(node->_del_tick >= _del_tick+1, "must have deleted an edge");
107 assert(node->_last_del == _last, "must have deleted the edge just produced");
108 // We liked this deletion, so accept the resulting outcnt and tick.
109 _outcnt = node->_outcnt;
110 _del_tick = node->_del_tick;
111 }
113 void DUIterator_Common::reset(const DUIterator_Common& that) {
114 if (this == &that) return; // ignore assignment to self
115 if (!_vdui) {
116 // We need to initialize everything, overwriting garbage values.
117 _last = that._last;
118 _vdui = that._vdui;
119 }
120 // Note: It is legal (though odd) for an iterator over some node x
121 // to be reassigned to iterate over another node y. Some doubly-nested
122 // progress loops depend on being able to do this.
123 const Node* node = that._node;
124 // Re-initialize everything, except _last.
125 _node = node;
126 _outcnt = node->_outcnt;
127 _del_tick = node->_del_tick;
128 }
130 void DUIterator::sample(const Node* node) {
131 DUIterator_Common::sample(node); // Initialize the assertion data.
132 _refresh_tick = 0; // No refreshes have happened, as yet.
133 }
135 void DUIterator::verify(const Node* node, bool at_end_ok) {
136 DUIterator_Common::verify(node, at_end_ok);
137 assert(_idx < node->_outcnt + (uint)at_end_ok, "idx in range");
138 }
140 void DUIterator::verify_increment() {
141 if (_refresh_tick & 1) {
142 // We have refreshed the index during this loop.
143 // Fix up _idx to meet asserts.
144 if (_idx > _outcnt) _idx = _outcnt;
145 }
146 verify(_node, true);
147 }
149 void DUIterator::verify_resync() {
150 // Note: We do not assert on _outcnt, because insertions are OK here.
151 DUIterator_Common::verify_resync();
152 // Make sure we are still in sync, possibly with no more out-edges:
153 verify(_node, true);
154 }
156 void DUIterator::reset(const DUIterator& that) {
157 if (this == &that) return; // self assignment is always a no-op
158 assert(that._refresh_tick == 0, "assign only the result of Node::outs()");
159 assert(that._idx == 0, "assign only the result of Node::outs()");
160 assert(_idx == that._idx, "already assigned _idx");
161 if (!_vdui) {
162 // We need to initialize everything, overwriting garbage values.
163 sample(that._node);
164 } else {
165 DUIterator_Common::reset(that);
166 if (_refresh_tick & 1) {
167 _refresh_tick++; // Clear the "was refreshed" flag.
168 }
169 assert(_refresh_tick < 2*100000, "DU iteration must converge quickly");
170 }
171 }
173 void DUIterator::refresh() {
174 DUIterator_Common::sample(_node); // Re-fetch assertion data.
175 _refresh_tick |= 1; // Set the "was refreshed" flag.
176 }
178 void DUIterator::verify_finish() {
179 // If the loop has killed the node, do not require it to re-run.
180 if (_node->_outcnt == 0) _refresh_tick &= ~1;
181 // If this assert triggers, it means that a loop used refresh_out_pos
182 // to re-synch an iteration index, but the loop did not correctly
183 // re-run itself, using a "while (progress)" construct.
184 // This iterator enforces the rule that you must keep trying the loop
185 // until it "runs clean" without any need for refreshing.
186 assert(!(_refresh_tick & 1), "the loop must run once with no refreshing");
187 }
190 void DUIterator_Fast::verify(const Node* node, bool at_end_ok) {
191 DUIterator_Common::verify(node, at_end_ok);
192 Node** out = node->_out;
193 uint cnt = node->_outcnt;
194 assert(cnt == _outcnt, "no insertions allowed");
195 assert(_outp >= out && _outp <= out + cnt - !at_end_ok, "outp in range");
196 // This last check is carefully designed to work for NO_OUT_ARRAY.
197 }
199 void DUIterator_Fast::verify_limit() {
200 const Node* node = _node;
201 verify(node, true);
202 assert(_outp == node->_out + node->_outcnt, "limit still correct");
203 }
205 void DUIterator_Fast::verify_resync() {
206 const Node* node = _node;
207 if (_outp == node->_out + _outcnt) {
208 // Note that the limit imax, not the pointer i, gets updated with the
209 // exact count of deletions. (For the pointer it's always "--i".)
210 assert(node->_outcnt+node->_del_tick == _outcnt+_del_tick, "no insertions allowed with deletion(s)");
211 // This is a limit pointer, with a name like "imax".
212 // Fudge the _last field so that the common assert will be happy.
213 _last = (Node*) node->_last_del;
214 DUIterator_Common::verify_resync();
215 } else {
216 assert(node->_outcnt < _outcnt, "no insertions allowed with deletion(s)");
217 // A normal internal pointer.
218 DUIterator_Common::verify_resync();
219 // Make sure we are still in sync, possibly with no more out-edges:
220 verify(node, true);
221 }
222 }
224 void DUIterator_Fast::verify_relimit(uint n) {
225 const Node* node = _node;
226 assert((int)n > 0, "use imax -= n only with a positive count");
227 // This must be a limit pointer, with a name like "imax".
228 assert(_outp == node->_out + node->_outcnt, "apply -= only to a limit (imax)");
229 // The reported number of deletions must match what the node saw.
230 assert(node->_del_tick == _del_tick + n, "must have deleted n edges");
231 // Fudge the _last field so that the common assert will be happy.
232 _last = (Node*) node->_last_del;
233 DUIterator_Common::verify_resync();
234 }
236 void DUIterator_Fast::reset(const DUIterator_Fast& that) {
237 assert(_outp == that._outp, "already assigned _outp");
238 DUIterator_Common::reset(that);
239 }
241 void DUIterator_Last::verify(const Node* node, bool at_end_ok) {
242 // at_end_ok means the _outp is allowed to underflow by 1
243 _outp += at_end_ok;
244 DUIterator_Fast::verify(node, at_end_ok); // check _del_tick, etc.
245 _outp -= at_end_ok;
246 assert(_outp == (node->_out + node->_outcnt) - 1, "pointer must point to end of nodes");
247 }
249 void DUIterator_Last::verify_limit() {
250 // Do not require the limit address to be resynched.
251 //verify(node, true);
252 assert(_outp == _node->_out, "limit still correct");
253 }
255 void DUIterator_Last::verify_step(uint num_edges) {
256 assert((int)num_edges > 0, "need non-zero edge count for loop progress");
257 _outcnt -= num_edges;
258 _del_tick += num_edges;
259 // Make sure we are still in sync, possibly with no more out-edges:
260 const Node* node = _node;
261 verify(node, true);
262 assert(node->_last_del == _last, "must have deleted the edge just produced");
263 }
265 #endif //OPTO_DU_ITERATOR_ASSERT
268 #endif //ASSERT
271 // This constant used to initialize _out may be any non-null value.
272 // The value NULL is reserved for the top node only.
273 #define NO_OUT_ARRAY ((Node**)-1)
275 // This funny expression handshakes with Node::operator new
276 // to pull Compile::current out of the new node's _out field,
277 // and then calls a subroutine which manages most field
278 // initializations. The only one which is tricky is the
279 // _idx field, which is const, and so must be initialized
280 // by a return value, not an assignment.
281 //
282 // (Aren't you thankful that Java finals don't require so many tricks?)
283 #define IDX_INIT(req) this->Init((req), (Compile*) this->_out)
284 #ifdef _MSC_VER // the IDX_INIT hack falls foul of warning C4355
285 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
286 #endif
288 // Out-of-line code from node constructors.
289 // Executed only when extra debug info. is being passed around.
290 static void init_node_notes(Compile* C, int idx, Node_Notes* nn) {
291 C->set_node_notes_at(idx, nn);
292 }
294 // Shared initialization code.
295 inline int Node::Init(int req, Compile* C) {
296 assert(Compile::current() == C, "must use operator new(Compile*)");
297 int idx = C->next_unique();
299 // Allocate memory for the necessary number of edges.
300 if (req > 0) {
301 // Allocate space for _in array to have double alignment.
302 _in = (Node **) ((char *) (C->node_arena()->Amalloc_D(req * sizeof(void*))));
303 #ifdef ASSERT
304 _in[req-1] = this; // magic cookie for assertion check
305 #endif
306 }
307 // If there are default notes floating around, capture them:
308 Node_Notes* nn = C->default_node_notes();
309 if (nn != NULL) init_node_notes(C, idx, nn);
311 // Note: At this point, C is dead,
312 // and we begin to initialize the new Node.
314 _cnt = _max = req;
315 _outcnt = _outmax = 0;
316 _class_id = Class_Node;
317 _flags = 0;
318 _out = NO_OUT_ARRAY;
319 return idx;
320 }
322 //------------------------------Node-------------------------------------------
323 // Create a Node, with a given number of required edges.
324 Node::Node(uint req)
325 : _idx(IDX_INIT(req))
326 {
327 assert( req < (uint)(MaxNodeLimit - NodeLimitFudgeFactor), "Input limit exceeded" );
328 debug_only( verify_construction() );
329 NOT_PRODUCT(nodes_created++);
330 if (req == 0) {
331 assert( _in == (Node**)this, "Must not pass arg count to 'new'" );
332 _in = NULL;
333 } else {
334 assert( _in[req-1] == this, "Must pass arg count to 'new'" );
335 Node** to = _in;
336 for(uint i = 0; i < req; i++) {
337 to[i] = NULL;
338 }
339 }
340 }
342 //------------------------------Node-------------------------------------------
343 Node::Node(Node *n0)
344 : _idx(IDX_INIT(1))
345 {
346 debug_only( verify_construction() );
347 NOT_PRODUCT(nodes_created++);
348 // Assert we allocated space for input array already
349 assert( _in[0] == this, "Must pass arg count to 'new'" );
350 assert( is_not_dead(n0), "can not use dead node");
351 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
352 }
354 //------------------------------Node-------------------------------------------
355 Node::Node(Node *n0, Node *n1)
356 : _idx(IDX_INIT(2))
357 {
358 debug_only( verify_construction() );
359 NOT_PRODUCT(nodes_created++);
360 // Assert we allocated space for input array already
361 assert( _in[1] == this, "Must pass arg count to 'new'" );
362 assert( is_not_dead(n0), "can not use dead node");
363 assert( is_not_dead(n1), "can not use dead node");
364 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
365 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
366 }
368 //------------------------------Node-------------------------------------------
369 Node::Node(Node *n0, Node *n1, Node *n2)
370 : _idx(IDX_INIT(3))
371 {
372 debug_only( verify_construction() );
373 NOT_PRODUCT(nodes_created++);
374 // Assert we allocated space for input array already
375 assert( _in[2] == this, "Must pass arg count to 'new'" );
376 assert( is_not_dead(n0), "can not use dead node");
377 assert( is_not_dead(n1), "can not use dead node");
378 assert( is_not_dead(n2), "can not use dead node");
379 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
380 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
381 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
382 }
384 //------------------------------Node-------------------------------------------
385 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3)
386 : _idx(IDX_INIT(4))
387 {
388 debug_only( verify_construction() );
389 NOT_PRODUCT(nodes_created++);
390 // Assert we allocated space for input array already
391 assert( _in[3] == this, "Must pass arg count to 'new'" );
392 assert( is_not_dead(n0), "can not use dead node");
393 assert( is_not_dead(n1), "can not use dead node");
394 assert( is_not_dead(n2), "can not use dead node");
395 assert( is_not_dead(n3), "can not use dead node");
396 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
397 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
398 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
399 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
400 }
402 //------------------------------Node-------------------------------------------
403 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3, Node *n4)
404 : _idx(IDX_INIT(5))
405 {
406 debug_only( verify_construction() );
407 NOT_PRODUCT(nodes_created++);
408 // Assert we allocated space for input array already
409 assert( _in[4] == this, "Must pass arg count to 'new'" );
410 assert( is_not_dead(n0), "can not use dead node");
411 assert( is_not_dead(n1), "can not use dead node");
412 assert( is_not_dead(n2), "can not use dead node");
413 assert( is_not_dead(n3), "can not use dead node");
414 assert( is_not_dead(n4), "can not use dead node");
415 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
416 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
417 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
418 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
419 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
420 }
422 //------------------------------Node-------------------------------------------
423 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
424 Node *n4, Node *n5)
425 : _idx(IDX_INIT(6))
426 {
427 debug_only( verify_construction() );
428 NOT_PRODUCT(nodes_created++);
429 // Assert we allocated space for input array already
430 assert( _in[5] == this, "Must pass arg count to 'new'" );
431 assert( is_not_dead(n0), "can not use dead node");
432 assert( is_not_dead(n1), "can not use dead node");
433 assert( is_not_dead(n2), "can not use dead node");
434 assert( is_not_dead(n3), "can not use dead node");
435 assert( is_not_dead(n4), "can not use dead node");
436 assert( is_not_dead(n5), "can not use dead node");
437 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
438 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
439 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
440 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
441 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
442 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
443 }
445 //------------------------------Node-------------------------------------------
446 Node::Node(Node *n0, Node *n1, Node *n2, Node *n3,
447 Node *n4, Node *n5, Node *n6)
448 : _idx(IDX_INIT(7))
449 {
450 debug_only( verify_construction() );
451 NOT_PRODUCT(nodes_created++);
452 // Assert we allocated space for input array already
453 assert( _in[6] == this, "Must pass arg count to 'new'" );
454 assert( is_not_dead(n0), "can not use dead node");
455 assert( is_not_dead(n1), "can not use dead node");
456 assert( is_not_dead(n2), "can not use dead node");
457 assert( is_not_dead(n3), "can not use dead node");
458 assert( is_not_dead(n4), "can not use dead node");
459 assert( is_not_dead(n5), "can not use dead node");
460 assert( is_not_dead(n6), "can not use dead node");
461 _in[0] = n0; if (n0 != NULL) n0->add_out((Node *)this);
462 _in[1] = n1; if (n1 != NULL) n1->add_out((Node *)this);
463 _in[2] = n2; if (n2 != NULL) n2->add_out((Node *)this);
464 _in[3] = n3; if (n3 != NULL) n3->add_out((Node *)this);
465 _in[4] = n4; if (n4 != NULL) n4->add_out((Node *)this);
466 _in[5] = n5; if (n5 != NULL) n5->add_out((Node *)this);
467 _in[6] = n6; if (n6 != NULL) n6->add_out((Node *)this);
468 }
471 //------------------------------clone------------------------------------------
472 // Clone a Node.
473 Node *Node::clone() const {
474 Compile *compile = Compile::current();
475 uint s = size_of(); // Size of inherited Node
476 Node *n = (Node*)compile->node_arena()->Amalloc_D(size_of() + _max*sizeof(Node*));
477 Copy::conjoint_words_to_lower((HeapWord*)this, (HeapWord*)n, s);
478 // Set the new input pointer array
479 n->_in = (Node**)(((char*)n)+s);
480 // Cannot share the old output pointer array, so kill it
481 n->_out = NO_OUT_ARRAY;
482 // And reset the counters to 0
483 n->_outcnt = 0;
484 n->_outmax = 0;
485 // Unlock this guy, since he is not in any hash table.
486 debug_only(n->_hash_lock = 0);
487 // Walk the old node's input list to duplicate its edges
488 uint i;
489 for( i = 0; i < len(); i++ ) {
490 Node *x = in(i);
491 n->_in[i] = x;
492 if (x != NULL) x->add_out(n);
493 }
494 if (is_macro())
495 compile->add_macro_node(n);
496 if (is_expensive())
497 compile->add_expensive_node(n);
499 n->set_idx(compile->next_unique()); // Get new unique index as well
500 debug_only( n->verify_construction() );
501 NOT_PRODUCT(nodes_created++);
502 // Do not patch over the debug_idx of a clone, because it makes it
503 // impossible to break on the clone's moment of creation.
504 //debug_only( n->set_debug_idx( debug_idx() ) );
506 compile->copy_node_notes_to(n, (Node*) this);
508 // MachNode clone
509 uint nopnds;
510 if (this->is_Mach() && (nopnds = this->as_Mach()->num_opnds()) > 0) {
511 MachNode *mach = n->as_Mach();
512 MachNode *mthis = this->as_Mach();
513 // Get address of _opnd_array.
514 // It should be the same offset since it is the clone of this node.
515 MachOper **from = mthis->_opnds;
516 MachOper **to = (MachOper **)((size_t)(&mach->_opnds) +
517 pointer_delta((const void*)from,
518 (const void*)(&mthis->_opnds), 1));
519 mach->_opnds = to;
520 for ( uint i = 0; i < nopnds; ++i ) {
521 to[i] = from[i]->clone(compile);
522 }
523 }
524 // cloning CallNode may need to clone JVMState
525 if (n->is_Call()) {
526 CallNode *call = n->as_Call();
527 call->clone_jvms();
528 }
529 return n; // Return the clone
530 }
532 //---------------------------setup_is_top--------------------------------------
533 // Call this when changing the top node, to reassert the invariants
534 // required by Node::is_top. See Compile::set_cached_top_node.
535 void Node::setup_is_top() {
536 if (this == (Node*)Compile::current()->top()) {
537 // This node has just become top. Kill its out array.
538 _outcnt = _outmax = 0;
539 _out = NULL; // marker value for top
540 assert(is_top(), "must be top");
541 } else {
542 if (_out == NULL) _out = NO_OUT_ARRAY;
543 assert(!is_top(), "must not be top");
544 }
545 }
548 //------------------------------~Node------------------------------------------
549 // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
550 extern int reclaim_idx ;
551 extern int reclaim_in ;
552 extern int reclaim_node;
553 void Node::destruct() {
554 // Eagerly reclaim unique Node numberings
555 Compile* compile = Compile::current();
556 if ((uint)_idx+1 == compile->unique()) {
557 compile->set_unique(compile->unique()-1);
558 #ifdef ASSERT
559 reclaim_idx++;
560 #endif
561 }
562 // Clear debug info:
563 Node_Notes* nn = compile->node_notes_at(_idx);
564 if (nn != NULL) nn->clear();
565 // Walk the input array, freeing the corresponding output edges
566 _cnt = _max; // forget req/prec distinction
567 uint i;
568 for( i = 0; i < _max; i++ ) {
569 set_req(i, NULL);
570 //assert(def->out(def->outcnt()-1) == (Node *)this,"bad def-use hacking in reclaim");
571 }
572 assert(outcnt() == 0, "deleting a node must not leave a dangling use");
573 // See if the input array was allocated just prior to the object
574 int edge_size = _max*sizeof(void*);
575 int out_edge_size = _outmax*sizeof(void*);
576 char *edge_end = ((char*)_in) + edge_size;
577 char *out_array = (char*)(_out == NO_OUT_ARRAY? NULL: _out);
578 char *out_edge_end = out_array + out_edge_size;
579 int node_size = size_of();
581 // Free the output edge array
582 if (out_edge_size > 0) {
583 #ifdef ASSERT
584 if( out_edge_end == compile->node_arena()->hwm() )
585 reclaim_in += out_edge_size; // count reclaimed out edges with in edges
586 #endif
587 compile->node_arena()->Afree(out_array, out_edge_size);
588 }
590 // Free the input edge array and the node itself
591 if( edge_end == (char*)this ) {
592 #ifdef ASSERT
593 if( edge_end+node_size == compile->node_arena()->hwm() ) {
594 reclaim_in += edge_size;
595 reclaim_node+= node_size;
596 }
597 #else
598 // It was; free the input array and object all in one hit
599 compile->node_arena()->Afree(_in,edge_size+node_size);
600 #endif
601 } else {
603 // Free just the input array
604 #ifdef ASSERT
605 if( edge_end == compile->node_arena()->hwm() )
606 reclaim_in += edge_size;
607 #endif
608 compile->node_arena()->Afree(_in,edge_size);
610 // Free just the object
611 #ifdef ASSERT
612 if( ((char*)this) + node_size == compile->node_arena()->hwm() )
613 reclaim_node+= node_size;
614 #else
615 compile->node_arena()->Afree(this,node_size);
616 #endif
617 }
618 if (is_macro()) {
619 compile->remove_macro_node(this);
620 }
621 if (is_expensive()) {
622 compile->remove_expensive_node(this);
623 }
624 #ifdef ASSERT
625 // We will not actually delete the storage, but we'll make the node unusable.
626 *(address*)this = badAddress; // smash the C++ vtbl, probably
627 _in = _out = (Node**) badAddress;
628 _max = _cnt = _outmax = _outcnt = 0;
629 #endif
630 }
632 //------------------------------grow-------------------------------------------
633 // Grow the input array, making space for more edges
634 void Node::grow( uint len ) {
635 Arena* arena = Compile::current()->node_arena();
636 uint new_max = _max;
637 if( new_max == 0 ) {
638 _max = 4;
639 _in = (Node**)arena->Amalloc(4*sizeof(Node*));
640 Node** to = _in;
641 to[0] = NULL;
642 to[1] = NULL;
643 to[2] = NULL;
644 to[3] = NULL;
645 return;
646 }
647 while( new_max <= len ) new_max <<= 1; // Find next power-of-2
648 // Trimming to limit allows a uint8 to handle up to 255 edges.
649 // Previously I was using only powers-of-2 which peaked at 128 edges.
650 //if( new_max >= limit ) new_max = limit-1;
651 _in = (Node**)arena->Arealloc(_in, _max*sizeof(Node*), new_max*sizeof(Node*));
652 Copy::zero_to_bytes(&_in[_max], (new_max-_max)*sizeof(Node*)); // NULL all new space
653 _max = new_max; // Record new max length
654 // This assertion makes sure that Node::_max is wide enough to
655 // represent the numerical value of new_max.
656 assert(_max == new_max && _max > len, "int width of _max is too small");
657 }
659 //-----------------------------out_grow----------------------------------------
660 // Grow the input array, making space for more edges
661 void Node::out_grow( uint len ) {
662 assert(!is_top(), "cannot grow a top node's out array");
663 Arena* arena = Compile::current()->node_arena();
664 uint new_max = _outmax;
665 if( new_max == 0 ) {
666 _outmax = 4;
667 _out = (Node **)arena->Amalloc(4*sizeof(Node*));
668 return;
669 }
670 while( new_max <= len ) new_max <<= 1; // Find next power-of-2
671 // Trimming to limit allows a uint8 to handle up to 255 edges.
672 // Previously I was using only powers-of-2 which peaked at 128 edges.
673 //if( new_max >= limit ) new_max = limit-1;
674 assert(_out != NULL && _out != NO_OUT_ARRAY, "out must have sensible value");
675 _out = (Node**)arena->Arealloc(_out,_outmax*sizeof(Node*),new_max*sizeof(Node*));
676 //Copy::zero_to_bytes(&_out[_outmax], (new_max-_outmax)*sizeof(Node*)); // NULL all new space
677 _outmax = new_max; // Record new max length
678 // This assertion makes sure that Node::_max is wide enough to
679 // represent the numerical value of new_max.
680 assert(_outmax == new_max && _outmax > len, "int width of _outmax is too small");
681 }
683 #ifdef ASSERT
684 //------------------------------is_dead----------------------------------------
685 bool Node::is_dead() const {
686 // Mach and pinch point nodes may look like dead.
687 if( is_top() || is_Mach() || (Opcode() == Op_Node && _outcnt > 0) )
688 return false;
689 for( uint i = 0; i < _max; i++ )
690 if( _in[i] != NULL )
691 return false;
692 dump();
693 return true;
694 }
695 #endif
698 //------------------------------is_unreachable---------------------------------
699 bool Node::is_unreachable(PhaseIterGVN &igvn) const {
700 assert(!is_Mach(), "doesn't work with MachNodes");
701 return outcnt() == 0 || igvn.type(this) == Type::TOP || in(0)->is_top();
702 }
704 //------------------------------add_req----------------------------------------
705 // Add a new required input at the end
706 void Node::add_req( Node *n ) {
707 assert( is_not_dead(n), "can not use dead node");
709 // Look to see if I can move precedence down one without reallocating
710 if( (_cnt >= _max) || (in(_max-1) != NULL) )
711 grow( _max+1 );
713 // Find a precedence edge to move
714 if( in(_cnt) != NULL ) { // Next precedence edge is busy?
715 uint i;
716 for( i=_cnt; i<_max; i++ )
717 if( in(i) == NULL ) // Find the NULL at end of prec edge list
718 break; // There must be one, since we grew the array
719 _in[i] = in(_cnt); // Move prec over, making space for req edge
720 }
721 _in[_cnt++] = n; // Stuff over old prec edge
722 if (n != NULL) n->add_out((Node *)this);
723 }
725 //---------------------------add_req_batch-------------------------------------
726 // Add a new required input at the end
727 void Node::add_req_batch( Node *n, uint m ) {
728 assert( is_not_dead(n), "can not use dead node");
729 // check various edge cases
730 if ((int)m <= 1) {
731 assert((int)m >= 0, "oob");
732 if (m != 0) add_req(n);
733 return;
734 }
736 // Look to see if I can move precedence down one without reallocating
737 if( (_cnt+m) > _max || _in[_max-m] )
738 grow( _max+m );
740 // Find a precedence edge to move
741 if( _in[_cnt] != NULL ) { // Next precedence edge is busy?
742 uint i;
743 for( i=_cnt; i<_max; i++ )
744 if( _in[i] == NULL ) // Find the NULL at end of prec edge list
745 break; // There must be one, since we grew the array
746 // Slide all the precs over by m positions (assume #prec << m).
747 Copy::conjoint_words_to_higher((HeapWord*)&_in[_cnt], (HeapWord*)&_in[_cnt+m], ((i-_cnt)*sizeof(Node*)));
748 }
750 // Stuff over the old prec edges
751 for(uint i=0; i<m; i++ ) {
752 _in[_cnt++] = n;
753 }
755 // Insert multiple out edges on the node.
756 if (n != NULL && !n->is_top()) {
757 for(uint i=0; i<m; i++ ) {
758 n->add_out((Node *)this);
759 }
760 }
761 }
763 //------------------------------del_req----------------------------------------
764 // Delete the required edge and compact the edge array
765 void Node::del_req( uint idx ) {
766 assert( idx < _cnt, "oob");
767 assert( !VerifyHashTableKeys || _hash_lock == 0,
768 "remove node from hash table before modifying it");
769 // First remove corresponding def-use edge
770 Node *n = in(idx);
771 if (n != NULL) n->del_out((Node *)this);
772 _in[idx] = in(--_cnt); // Compact the array
773 _in[_cnt] = NULL; // NULL out emptied slot
774 }
776 //------------------------------ins_req----------------------------------------
777 // Insert a new required input at the end
778 void Node::ins_req( uint idx, Node *n ) {
779 assert( is_not_dead(n), "can not use dead node");
780 add_req(NULL); // Make space
781 assert( idx < _max, "Must have allocated enough space");
782 // Slide over
783 if(_cnt-idx-1 > 0) {
784 Copy::conjoint_words_to_higher((HeapWord*)&_in[idx], (HeapWord*)&_in[idx+1], ((_cnt-idx-1)*sizeof(Node*)));
785 }
786 _in[idx] = n; // Stuff over old required edge
787 if (n != NULL) n->add_out((Node *)this); // Add reciprocal def-use edge
788 }
790 //-----------------------------find_edge---------------------------------------
791 int Node::find_edge(Node* n) {
792 for (uint i = 0; i < len(); i++) {
793 if (_in[i] == n) return i;
794 }
795 return -1;
796 }
798 //----------------------------replace_edge-------------------------------------
799 int Node::replace_edge(Node* old, Node* neww) {
800 if (old == neww) return 0; // nothing to do
801 uint nrep = 0;
802 for (uint i = 0; i < len(); i++) {
803 if (in(i) == old) {
804 if (i < req())
805 set_req(i, neww);
806 else
807 set_prec(i, neww);
808 nrep++;
809 }
810 }
811 return nrep;
812 }
814 //-------------------------disconnect_inputs-----------------------------------
815 // NULL out all inputs to eliminate incoming Def-Use edges.
816 // Return the number of edges between 'n' and 'this'
817 int Node::disconnect_inputs(Node *n, Compile* C) {
818 int edges_to_n = 0;
820 uint cnt = req();
821 for( uint i = 0; i < cnt; ++i ) {
822 if( in(i) == 0 ) continue;
823 if( in(i) == n ) ++edges_to_n;
824 set_req(i, NULL);
825 }
826 // Remove precedence edges if any exist
827 // Note: Safepoints may have precedence edges, even during parsing
828 if( (req() != len()) && (in(req()) != NULL) ) {
829 uint max = len();
830 for( uint i = 0; i < max; ++i ) {
831 if( in(i) == 0 ) continue;
832 if( in(i) == n ) ++edges_to_n;
833 set_prec(i, NULL);
834 }
835 }
837 // Node::destruct requires all out edges be deleted first
838 // debug_only(destruct();) // no reuse benefit expected
839 if (edges_to_n == 0) {
840 C->record_dead_node(_idx);
841 }
842 return edges_to_n;
843 }
845 //-----------------------------uncast---------------------------------------
846 // %%% Temporary, until we sort out CheckCastPP vs. CastPP.
847 // Strip away casting. (It is depth-limited.)
848 Node* Node::uncast() const {
849 // Should be inline:
850 //return is_ConstraintCast() ? uncast_helper(this) : (Node*) this;
851 if (is_ConstraintCast() || is_CheckCastPP())
852 return uncast_helper(this);
853 else
854 return (Node*) this;
855 }
857 //---------------------------uncast_helper-------------------------------------
858 Node* Node::uncast_helper(const Node* p) {
859 #ifdef ASSERT
860 uint depth_count = 0;
861 const Node* orig_p = p;
862 #endif
864 while (true) {
865 #ifdef ASSERT
866 if (depth_count >= K) {
867 orig_p->dump(4);
868 if (p != orig_p)
869 p->dump(1);
870 }
871 assert(depth_count++ < K, "infinite loop in Node::uncast_helper");
872 #endif
873 if (p == NULL || p->req() != 2) {
874 break;
875 } else if (p->is_ConstraintCast()) {
876 p = p->in(1);
877 } else if (p->is_CheckCastPP()) {
878 p = p->in(1);
879 } else {
880 break;
881 }
882 }
883 return (Node*) p;
884 }
886 //------------------------------add_prec---------------------------------------
887 // Add a new precedence input. Precedence inputs are unordered, with
888 // duplicates removed and NULLs packed down at the end.
889 void Node::add_prec( Node *n ) {
890 assert( is_not_dead(n), "can not use dead node");
892 // Check for NULL at end
893 if( _cnt >= _max || in(_max-1) )
894 grow( _max+1 );
896 // Find a precedence edge to move
897 uint i = _cnt;
898 while( in(i) != NULL ) i++;
899 _in[i] = n; // Stuff prec edge over NULL
900 if ( n != NULL) n->add_out((Node *)this); // Add mirror edge
901 }
903 //------------------------------rm_prec----------------------------------------
904 // Remove a precedence input. Precedence inputs are unordered, with
905 // duplicates removed and NULLs packed down at the end.
906 void Node::rm_prec( uint j ) {
908 // Find end of precedence list to pack NULLs
909 uint i;
910 for( i=j; i<_max; i++ )
911 if( !_in[i] ) // Find the NULL at end of prec edge list
912 break;
913 if (_in[j] != NULL) _in[j]->del_out((Node *)this);
914 _in[j] = _in[--i]; // Move last element over removed guy
915 _in[i] = NULL; // NULL out last element
916 }
918 //------------------------------size_of----------------------------------------
919 uint Node::size_of() const { return sizeof(*this); }
921 //------------------------------ideal_reg--------------------------------------
922 uint Node::ideal_reg() const { return 0; }
924 //------------------------------jvms-------------------------------------------
925 JVMState* Node::jvms() const { return NULL; }
927 #ifdef ASSERT
928 //------------------------------jvms-------------------------------------------
929 bool Node::verify_jvms(const JVMState* using_jvms) const {
930 for (JVMState* jvms = this->jvms(); jvms != NULL; jvms = jvms->caller()) {
931 if (jvms == using_jvms) return true;
932 }
933 return false;
934 }
936 //------------------------------init_NodeProperty------------------------------
937 void Node::init_NodeProperty() {
938 assert(_max_classes <= max_jushort, "too many NodeProperty classes");
939 assert(_max_flags <= max_jushort, "too many NodeProperty flags");
940 }
941 #endif
943 //------------------------------format-----------------------------------------
944 // Print as assembly
945 void Node::format( PhaseRegAlloc *, outputStream *st ) const {}
946 //------------------------------emit-------------------------------------------
947 // Emit bytes starting at parameter 'ptr'.
948 void Node::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {}
949 //------------------------------size-------------------------------------------
950 // Size of instruction in bytes
951 uint Node::size(PhaseRegAlloc *ra_) const { return 0; }
953 //------------------------------CFG Construction-------------------------------
954 // Nodes that end basic blocks, e.g. IfTrue/IfFalse, JumpProjNode, Root,
955 // Goto and Return.
956 const Node *Node::is_block_proj() const { return 0; }
958 // Minimum guaranteed type
959 const Type *Node::bottom_type() const { return Type::BOTTOM; }
962 //------------------------------raise_bottom_type------------------------------
963 // Get the worst-case Type output for this Node.
964 void Node::raise_bottom_type(const Type* new_type) {
965 if (is_Type()) {
966 TypeNode *n = this->as_Type();
967 if (VerifyAliases) {
968 assert(new_type->higher_equal(n->type()), "new type must refine old type");
969 }
970 n->set_type(new_type);
971 } else if (is_Load()) {
972 LoadNode *n = this->as_Load();
973 if (VerifyAliases) {
974 assert(new_type->higher_equal(n->type()), "new type must refine old type");
975 }
976 n->set_type(new_type);
977 }
978 }
980 //------------------------------Identity---------------------------------------
981 // Return a node that the given node is equivalent to.
982 Node *Node::Identity( PhaseTransform * ) {
983 return this; // Default to no identities
984 }
986 //------------------------------Value------------------------------------------
987 // Compute a new Type for a node using the Type of the inputs.
988 const Type *Node::Value( PhaseTransform * ) const {
989 return bottom_type(); // Default to worst-case Type
990 }
992 //------------------------------Ideal------------------------------------------
993 //
994 // 'Idealize' the graph rooted at this Node.
995 //
996 // In order to be efficient and flexible there are some subtle invariants
997 // these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks
998 // these invariants, although its too slow to have on by default. If you are
999 // hacking an Ideal call, be sure to test with +VerifyIterativeGVN!
1000 //
1001 // The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
1002 // pointer. If ANY change is made, it must return the root of the reshaped
1003 // graph - even if the root is the same Node. Example: swapping the inputs
1004 // to an AddINode gives the same answer and same root, but you still have to
1005 // return the 'this' pointer instead of NULL.
1006 //
1007 // You cannot return an OLD Node, except for the 'this' pointer. Use the
1008 // Identity call to return an old Node; basically if Identity can find
1009 // another Node have the Ideal call make no change and return NULL.
1010 // Example: AddINode::Ideal must check for add of zero; in this case it
1011 // returns NULL instead of doing any graph reshaping.
1012 //
1013 // You cannot modify any old Nodes except for the 'this' pointer. Due to
1014 // sharing there may be other users of the old Nodes relying on their current
1015 // semantics. Modifying them will break the other users.
1016 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for
1017 // "X+3" unchanged in case it is shared.
1018 //
1019 // If you modify the 'this' pointer's inputs, you should use
1020 // 'set_req'. If you are making a new Node (either as the new root or
1021 // some new internal piece) you may use 'init_req' to set the initial
1022 // value. You can make a new Node with either 'new' or 'clone'. In
1023 // either case, def-use info is correctly maintained.
1024 //
1025 // Example: reshape "(X+3)+4" into "X+7":
1026 // set_req(1, in(1)->in(1));
1027 // set_req(2, phase->intcon(7));
1028 // return this;
1029 // Example: reshape "X*4" into "X<<2"
1030 // return new (C) LShiftINode(in(1), phase->intcon(2));
1031 //
1032 // You must call 'phase->transform(X)' on any new Nodes X you make, except
1033 // for the returned root node. Example: reshape "X*31" with "(X<<5)-X".
1034 // Node *shift=phase->transform(new(C)LShiftINode(in(1),phase->intcon(5)));
1035 // return new (C) AddINode(shift, in(1));
1036 //
1037 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'.
1038 // These forms are faster than 'phase->transform(new (C) ConNode())' and Do
1039 // The Right Thing with def-use info.
1040 //
1041 // You cannot bury the 'this' Node inside of a graph reshape. If the reshaped
1042 // graph uses the 'this' Node it must be the root. If you want a Node with
1043 // the same Opcode as the 'this' pointer use 'clone'.
1044 //
1045 Node *Node::Ideal(PhaseGVN *phase, bool can_reshape) {
1046 return NULL; // Default to being Ideal already
1047 }
1049 // Some nodes have specific Ideal subgraph transformations only if they are
1050 // unique users of specific nodes. Such nodes should be put on IGVN worklist
1051 // for the transformations to happen.
1052 bool Node::has_special_unique_user() const {
1053 assert(outcnt() == 1, "match only for unique out");
1054 Node* n = unique_out();
1055 int op = Opcode();
1056 if( this->is_Store() ) {
1057 // Condition for back-to-back stores folding.
1058 return n->Opcode() == op && n->in(MemNode::Memory) == this;
1059 } else if( op == Op_AddL ) {
1060 // Condition for convL2I(addL(x,y)) ==> addI(convL2I(x),convL2I(y))
1061 return n->Opcode() == Op_ConvL2I && n->in(1) == this;
1062 } else if( op == Op_SubI || op == Op_SubL ) {
1063 // Condition for subI(x,subI(y,z)) ==> subI(addI(x,z),y)
1064 return n->Opcode() == op && n->in(2) == this;
1065 }
1066 return false;
1067 };
1069 //--------------------------find_exact_control---------------------------------
1070 // Skip Proj and CatchProj nodes chains. Check for Null and Top.
1071 Node* Node::find_exact_control(Node* ctrl) {
1072 if (ctrl == NULL && this->is_Region())
1073 ctrl = this->as_Region()->is_copy();
1075 if (ctrl != NULL && ctrl->is_CatchProj()) {
1076 if (ctrl->as_CatchProj()->_con == CatchProjNode::fall_through_index)
1077 ctrl = ctrl->in(0);
1078 if (ctrl != NULL && !ctrl->is_top())
1079 ctrl = ctrl->in(0);
1080 }
1082 if (ctrl != NULL && ctrl->is_Proj())
1083 ctrl = ctrl->in(0);
1085 return ctrl;
1086 }
1088 //--------------------------dominates------------------------------------------
1089 // Helper function for MemNode::all_controls_dominate().
1090 // Check if 'this' control node dominates or equal to 'sub' control node.
1091 // We already know that if any path back to Root or Start reaches 'this',
1092 // then all paths so, so this is a simple search for one example,
1093 // not an exhaustive search for a counterexample.
1094 bool Node::dominates(Node* sub, Node_List &nlist) {
1095 assert(this->is_CFG(), "expecting control");
1096 assert(sub != NULL && sub->is_CFG(), "expecting control");
1098 // detect dead cycle without regions
1099 int iterations_without_region_limit = DominatorSearchLimit;
1101 Node* orig_sub = sub;
1102 Node* dom = this;
1103 bool met_dom = false;
1104 nlist.clear();
1106 // Walk 'sub' backward up the chain to 'dom', watching for regions.
1107 // After seeing 'dom', continue up to Root or Start.
1108 // If we hit a region (backward split point), it may be a loop head.
1109 // Keep going through one of the region's inputs. If we reach the
1110 // same region again, go through a different input. Eventually we
1111 // will either exit through the loop head, or give up.
1112 // (If we get confused, break out and return a conservative 'false'.)
1113 while (sub != NULL) {
1114 if (sub->is_top()) break; // Conservative answer for dead code.
1115 if (sub == dom) {
1116 if (nlist.size() == 0) {
1117 // No Region nodes except loops were visited before and the EntryControl
1118 // path was taken for loops: it did not walk in a cycle.
1119 return true;
1120 } else if (met_dom) {
1121 break; // already met before: walk in a cycle
1122 } else {
1123 // Region nodes were visited. Continue walk up to Start or Root
1124 // to make sure that it did not walk in a cycle.
1125 met_dom = true; // first time meet
1126 iterations_without_region_limit = DominatorSearchLimit; // Reset
1127 }
1128 }
1129 if (sub->is_Start() || sub->is_Root()) {
1130 // Success if we met 'dom' along a path to Start or Root.
1131 // We assume there are no alternative paths that avoid 'dom'.
1132 // (This assumption is up to the caller to ensure!)
1133 return met_dom;
1134 }
1135 Node* up = sub->in(0);
1136 // Normalize simple pass-through regions and projections:
1137 up = sub->find_exact_control(up);
1138 // If sub == up, we found a self-loop. Try to push past it.
1139 if (sub == up && sub->is_Loop()) {
1140 // Take loop entry path on the way up to 'dom'.
1141 up = sub->in(1); // in(LoopNode::EntryControl);
1142 } else if (sub == up && sub->is_Region() && sub->req() != 3) {
1143 // Always take in(1) path on the way up to 'dom' for clone regions
1144 // (with only one input) or regions which merge > 2 paths
1145 // (usually used to merge fast/slow paths).
1146 up = sub->in(1);
1147 } else if (sub == up && sub->is_Region()) {
1148 // Try both paths for Regions with 2 input paths (it may be a loop head).
1149 // It could give conservative 'false' answer without information
1150 // which region's input is the entry path.
1151 iterations_without_region_limit = DominatorSearchLimit; // Reset
1153 bool region_was_visited_before = false;
1154 // Was this Region node visited before?
1155 // If so, we have reached it because we accidentally took a
1156 // loop-back edge from 'sub' back into the body of the loop,
1157 // and worked our way up again to the loop header 'sub'.
1158 // So, take the first unexplored path on the way up to 'dom'.
1159 for (int j = nlist.size() - 1; j >= 0; j--) {
1160 intptr_t ni = (intptr_t)nlist.at(j);
1161 Node* visited = (Node*)(ni & ~1);
1162 bool visited_twice_already = ((ni & 1) != 0);
1163 if (visited == sub) {
1164 if (visited_twice_already) {
1165 // Visited 2 paths, but still stuck in loop body. Give up.
1166 return false;
1167 }
1168 // The Region node was visited before only once.
1169 // (We will repush with the low bit set, below.)
1170 nlist.remove(j);
1171 // We will find a new edge and re-insert.
1172 region_was_visited_before = true;
1173 break;
1174 }
1175 }
1177 // Find an incoming edge which has not been seen yet; walk through it.
1178 assert(up == sub, "");
1179 uint skip = region_was_visited_before ? 1 : 0;
1180 for (uint i = 1; i < sub->req(); i++) {
1181 Node* in = sub->in(i);
1182 if (in != NULL && !in->is_top() && in != sub) {
1183 if (skip == 0) {
1184 up = in;
1185 break;
1186 }
1187 --skip; // skip this nontrivial input
1188 }
1189 }
1191 // Set 0 bit to indicate that both paths were taken.
1192 nlist.push((Node*)((intptr_t)sub + (region_was_visited_before ? 1 : 0)));
1193 }
1195 if (up == sub) {
1196 break; // some kind of tight cycle
1197 }
1198 if (up == orig_sub && met_dom) {
1199 // returned back after visiting 'dom'
1200 break; // some kind of cycle
1201 }
1202 if (--iterations_without_region_limit < 0) {
1203 break; // dead cycle
1204 }
1205 sub = up;
1206 }
1208 // Did not meet Root or Start node in pred. chain.
1209 // Conservative answer for dead code.
1210 return false;
1211 }
1213 //------------------------------remove_dead_region-----------------------------
1214 // This control node is dead. Follow the subgraph below it making everything
1215 // using it dead as well. This will happen normally via the usual IterGVN
1216 // worklist but this call is more efficient. Do not update use-def info
1217 // inside the dead region, just at the borders.
1218 static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
1219 // Con's are a popular node to re-hit in the hash table again.
1220 if( dead->is_Con() ) return;
1222 // Can't put ResourceMark here since igvn->_worklist uses the same arena
1223 // for verify pass with +VerifyOpto and we add/remove elements in it here.
1224 Node_List nstack(Thread::current()->resource_area());
1226 Node *top = igvn->C->top();
1227 nstack.push(dead);
1229 while (nstack.size() > 0) {
1230 dead = nstack.pop();
1231 if (dead->outcnt() > 0) {
1232 // Keep dead node on stack until all uses are processed.
1233 nstack.push(dead);
1234 // For all Users of the Dead... ;-)
1235 for (DUIterator_Last kmin, k = dead->last_outs(kmin); k >= kmin; ) {
1236 Node* use = dead->last_out(k);
1237 igvn->hash_delete(use); // Yank from hash table prior to mod
1238 if (use->in(0) == dead) { // Found another dead node
1239 assert (!use->is_Con(), "Control for Con node should be Root node.");
1240 use->set_req(0, top); // Cut dead edge to prevent processing
1241 nstack.push(use); // the dead node again.
1242 } else { // Else found a not-dead user
1243 for (uint j = 1; j < use->req(); j++) {
1244 if (use->in(j) == dead) { // Turn all dead inputs into TOP
1245 use->set_req(j, top);
1246 }
1247 }
1248 igvn->_worklist.push(use);
1249 }
1250 // Refresh the iterator, since any number of kills might have happened.
1251 k = dead->last_outs(kmin);
1252 }
1253 } else { // (dead->outcnt() == 0)
1254 // Done with outputs.
1255 igvn->hash_delete(dead);
1256 igvn->_worklist.remove(dead);
1257 igvn->set_type(dead, Type::TOP);
1258 if (dead->is_macro()) {
1259 igvn->C->remove_macro_node(dead);
1260 }
1261 if (dead->is_expensive()) {
1262 igvn->C->remove_expensive_node(dead);
1263 }
1264 igvn->C->record_dead_node(dead->_idx);
1265 // Kill all inputs to the dead guy
1266 for (uint i=0; i < dead->req(); i++) {
1267 Node *n = dead->in(i); // Get input to dead guy
1268 if (n != NULL && !n->is_top()) { // Input is valid?
1269 dead->set_req(i, top); // Smash input away
1270 if (n->outcnt() == 0) { // Input also goes dead?
1271 if (!n->is_Con())
1272 nstack.push(n); // Clear it out as well
1273 } else if (n->outcnt() == 1 &&
1274 n->has_special_unique_user()) {
1275 igvn->add_users_to_worklist( n );
1276 } else if (n->outcnt() <= 2 && n->is_Store()) {
1277 // Push store's uses on worklist to enable folding optimization for
1278 // store/store and store/load to the same address.
1279 // The restriction (outcnt() <= 2) is the same as in set_req_X()
1280 // and remove_globally_dead_node().
1281 igvn->add_users_to_worklist( n );
1282 }
1283 }
1284 }
1285 } // (dead->outcnt() == 0)
1286 } // while (nstack.size() > 0) for outputs
1287 return;
1288 }
1290 //------------------------------remove_dead_region-----------------------------
1291 bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
1292 Node *n = in(0);
1293 if( !n ) return false;
1294 // Lost control into this guy? I.e., it became unreachable?
1295 // Aggressively kill all unreachable code.
1296 if (can_reshape && n->is_top()) {
1297 kill_dead_code(this, phase->is_IterGVN());
1298 return false; // Node is dead.
1299 }
1301 if( n->is_Region() && n->as_Region()->is_copy() ) {
1302 Node *m = n->nonnull_req();
1303 set_req(0, m);
1304 return true;
1305 }
1306 return false;
1307 }
1309 //------------------------------Ideal_DU_postCCP-------------------------------
1310 // Idealize graph, using DU info. Must clone result into new-space
1311 Node *Node::Ideal_DU_postCCP( PhaseCCP * ) {
1312 return NULL; // Default to no change
1313 }
1315 //------------------------------hash-------------------------------------------
1316 // Hash function over Nodes.
1317 uint Node::hash() const {
1318 uint sum = 0;
1319 for( uint i=0; i<_cnt; i++ ) // Add in all inputs
1320 sum = (sum<<1)-(uintptr_t)in(i); // Ignore embedded NULLs
1321 return (sum>>2) + _cnt + Opcode();
1322 }
1324 //------------------------------cmp--------------------------------------------
1325 // Compare special parts of simple Nodes
1326 uint Node::cmp( const Node &n ) const {
1327 return 1; // Must be same
1328 }
1330 //------------------------------rematerialize-----------------------------------
1331 // Should we clone rather than spill this instruction?
1332 bool Node::rematerialize() const {
1333 if ( is_Mach() )
1334 return this->as_Mach()->rematerialize();
1335 else
1336 return (_flags & Flag_rematerialize) != 0;
1337 }
1339 //------------------------------needs_anti_dependence_check---------------------
1340 // Nodes which use memory without consuming it, hence need antidependences.
1341 bool Node::needs_anti_dependence_check() const {
1342 if( req() < 2 || (_flags & Flag_needs_anti_dependence_check) == 0 )
1343 return false;
1344 else
1345 return in(1)->bottom_type()->has_memory();
1346 }
1349 // Get an integer constant from a ConNode (or CastIINode).
1350 // Return a default value if there is no apparent constant here.
1351 const TypeInt* Node::find_int_type() const {
1352 if (this->is_Type()) {
1353 return this->as_Type()->type()->isa_int();
1354 } else if (this->is_Con()) {
1355 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1356 return this->bottom_type()->isa_int();
1357 }
1358 return NULL;
1359 }
1361 // Get a pointer constant from a ConstNode.
1362 // Returns the constant if it is a pointer ConstNode
1363 intptr_t Node::get_ptr() const {
1364 assert( Opcode() == Op_ConP, "" );
1365 return ((ConPNode*)this)->type()->is_ptr()->get_con();
1366 }
1368 // Get a narrow oop constant from a ConNNode.
1369 intptr_t Node::get_narrowcon() const {
1370 assert( Opcode() == Op_ConN, "" );
1371 return ((ConNNode*)this)->type()->is_narrowoop()->get_con();
1372 }
1374 // Get a long constant from a ConNode.
1375 // Return a default value if there is no apparent constant here.
1376 const TypeLong* Node::find_long_type() const {
1377 if (this->is_Type()) {
1378 return this->as_Type()->type()->isa_long();
1379 } else if (this->is_Con()) {
1380 assert(is_Mach(), "should be ConNode(TypeNode) or else a MachNode");
1381 return this->bottom_type()->isa_long();
1382 }
1383 return NULL;
1384 }
1386 // Get a double constant from a ConstNode.
1387 // Returns the constant if it is a double ConstNode
1388 jdouble Node::getd() const {
1389 assert( Opcode() == Op_ConD, "" );
1390 return ((ConDNode*)this)->type()->is_double_constant()->getd();
1391 }
1393 // Get a float constant from a ConstNode.
1394 // Returns the constant if it is a float ConstNode
1395 jfloat Node::getf() const {
1396 assert( Opcode() == Op_ConF, "" );
1397 return ((ConFNode*)this)->type()->is_float_constant()->getf();
1398 }
1400 #ifndef PRODUCT
1402 //----------------------------NotANode----------------------------------------
1403 // Used in debugging code to avoid walking across dead or uninitialized edges.
1404 static inline bool NotANode(const Node* n) {
1405 if (n == NULL) return true;
1406 if (((intptr_t)n & 1) != 0) return true; // uninitialized, etc.
1407 if (*(address*)n == badAddress) return true; // kill by Node::destruct
1408 return false;
1409 }
1412 //------------------------------find------------------------------------------
1413 // Find a neighbor of this Node with the given _idx
1414 // If idx is negative, find its absolute value, following both _in and _out.
1415 static void find_recur(Compile* C, Node* &result, Node *n, int idx, bool only_ctrl,
1416 VectorSet* old_space, VectorSet* new_space ) {
1417 int node_idx = (idx >= 0) ? idx : -idx;
1418 if (NotANode(n)) return; // Gracefully handle NULL, -1, 0xabababab, etc.
1419 // Contained in new_space or old_space? Check old_arena first since it's mostly empty.
1420 VectorSet *v = C->old_arena()->contains(n) ? old_space : new_space;
1421 if( v->test(n->_idx) ) return;
1422 if( (int)n->_idx == node_idx
1423 debug_only(|| n->debug_idx() == node_idx) ) {
1424 if (result != NULL)
1425 tty->print("find: " INTPTR_FORMAT " and " INTPTR_FORMAT " both have idx==%d\n",
1426 (uintptr_t)result, (uintptr_t)n, node_idx);
1427 result = n;
1428 }
1429 v->set(n->_idx);
1430 for( uint i=0; i<n->len(); i++ ) {
1431 if( only_ctrl && !(n->is_Region()) && (n->Opcode() != Op_Root) && (i != TypeFunc::Control) ) continue;
1432 find_recur(C, result, n->in(i), idx, only_ctrl, old_space, new_space );
1433 }
1434 // Search along forward edges also:
1435 if (idx < 0 && !only_ctrl) {
1436 for( uint j=0; j<n->outcnt(); j++ ) {
1437 find_recur(C, result, n->raw_out(j), idx, only_ctrl, old_space, new_space );
1438 }
1439 }
1440 #ifdef ASSERT
1441 // Search along debug_orig edges last, checking for cycles
1442 Node* orig = n->debug_orig();
1443 if (orig != NULL) {
1444 do {
1445 if (NotANode(orig)) break;
1446 find_recur(C, result, orig, idx, only_ctrl, old_space, new_space );
1447 orig = orig->debug_orig();
1448 } while (orig != NULL && orig != n->debug_orig());
1449 }
1450 #endif //ASSERT
1451 }
1453 // call this from debugger:
1454 Node* find_node(Node* n, int idx) {
1455 return n->find(idx);
1456 }
1458 //------------------------------find-------------------------------------------
1459 Node* Node::find(int idx) const {
1460 ResourceArea *area = Thread::current()->resource_area();
1461 VectorSet old_space(area), new_space(area);
1462 Node* result = NULL;
1463 find_recur(Compile::current(), result, (Node*) this, idx, false, &old_space, &new_space );
1464 return result;
1465 }
1467 //------------------------------find_ctrl--------------------------------------
1468 // Find an ancestor to this node in the control history with given _idx
1469 Node* Node::find_ctrl(int idx) const {
1470 ResourceArea *area = Thread::current()->resource_area();
1471 VectorSet old_space(area), new_space(area);
1472 Node* result = NULL;
1473 find_recur(Compile::current(), result, (Node*) this, idx, true, &old_space, &new_space );
1474 return result;
1475 }
1476 #endif
1480 #ifndef PRODUCT
1481 int Node::_in_dump_cnt = 0;
1483 // -----------------------------Name-------------------------------------------
1484 extern const char *NodeClassNames[];
1485 const char *Node::Name() const { return NodeClassNames[Opcode()]; }
1487 static bool is_disconnected(const Node* n) {
1488 for (uint i = 0; i < n->req(); i++) {
1489 if (n->in(i) != NULL) return false;
1490 }
1491 return true;
1492 }
1494 #ifdef ASSERT
1495 static void dump_orig(Node* orig, outputStream *st) {
1496 Compile* C = Compile::current();
1497 if (NotANode(orig)) orig = NULL;
1498 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1499 if (orig == NULL) return;
1500 st->print(" !orig=");
1501 Node* fast = orig->debug_orig(); // tortoise & hare algorithm to detect loops
1502 if (NotANode(fast)) fast = NULL;
1503 while (orig != NULL) {
1504 bool discon = is_disconnected(orig); // if discon, print [123] else 123
1505 if (discon) st->print("[");
1506 if (!Compile::current()->node_arena()->contains(orig))
1507 st->print("o");
1508 st->print("%d", orig->_idx);
1509 if (discon) st->print("]");
1510 orig = orig->debug_orig();
1511 if (NotANode(orig)) orig = NULL;
1512 if (orig != NULL && !C->node_arena()->contains(orig)) orig = NULL;
1513 if (orig != NULL) st->print(",");
1514 if (fast != NULL) {
1515 // Step fast twice for each single step of orig:
1516 fast = fast->debug_orig();
1517 if (NotANode(fast)) fast = NULL;
1518 if (fast != NULL && fast != orig) {
1519 fast = fast->debug_orig();
1520 if (NotANode(fast)) fast = NULL;
1521 }
1522 if (fast == orig) {
1523 st->print("...");
1524 break;
1525 }
1526 }
1527 }
1528 }
1530 void Node::set_debug_orig(Node* orig) {
1531 _debug_orig = orig;
1532 if (BreakAtNode == 0) return;
1533 if (NotANode(orig)) orig = NULL;
1534 int trip = 10;
1535 while (orig != NULL) {
1536 if (orig->debug_idx() == BreakAtNode || (int)orig->_idx == BreakAtNode) {
1537 tty->print_cr("BreakAtNode: _idx=%d _debug_idx=%d orig._idx=%d orig._debug_idx=%d",
1538 this->_idx, this->debug_idx(), orig->_idx, orig->debug_idx());
1539 BREAKPOINT;
1540 }
1541 orig = orig->debug_orig();
1542 if (NotANode(orig)) orig = NULL;
1543 if (trip-- <= 0) break;
1544 }
1545 }
1546 #endif //ASSERT
1548 //------------------------------dump------------------------------------------
1549 // Dump a Node
1550 void Node::dump(const char* suffix, outputStream *st) const {
1551 Compile* C = Compile::current();
1552 bool is_new = C->node_arena()->contains(this);
1553 _in_dump_cnt++;
1554 st->print("%c%d\t%s\t=== ", is_new ? ' ' : 'o', _idx, Name());
1556 // Dump the required and precedence inputs
1557 dump_req(st);
1558 dump_prec(st);
1559 // Dump the outputs
1560 dump_out(st);
1562 if (is_disconnected(this)) {
1563 #ifdef ASSERT
1564 st->print(" [%d]",debug_idx());
1565 dump_orig(debug_orig(), st);
1566 #endif
1567 st->cr();
1568 _in_dump_cnt--;
1569 return; // don't process dead nodes
1570 }
1572 // Dump node-specific info
1573 dump_spec(st);
1574 #ifdef ASSERT
1575 // Dump the non-reset _debug_idx
1576 if (Verbose && WizardMode) {
1577 st->print(" [%d]",debug_idx());
1578 }
1579 #endif
1581 const Type *t = bottom_type();
1583 if (t != NULL && (t->isa_instptr() || t->isa_klassptr())) {
1584 const TypeInstPtr *toop = t->isa_instptr();
1585 const TypeKlassPtr *tkls = t->isa_klassptr();
1586 ciKlass* klass = toop ? toop->klass() : (tkls ? tkls->klass() : NULL );
1587 if (klass && klass->is_loaded() && klass->is_interface()) {
1588 st->print(" Interface:");
1589 } else if (toop) {
1590 st->print(" Oop:");
1591 } else if (tkls) {
1592 st->print(" Klass:");
1593 }
1594 t->dump_on(st);
1595 } else if (t == Type::MEMORY) {
1596 st->print(" Memory:");
1597 MemNode::dump_adr_type(this, adr_type(), st);
1598 } else if (Verbose || WizardMode) {
1599 st->print(" Type:");
1600 if (t) {
1601 t->dump_on(st);
1602 } else {
1603 st->print("no type");
1604 }
1605 } else if (t->isa_vect() && this->is_MachSpillCopy()) {
1606 // Dump MachSpillcopy vector type.
1607 t->dump_on(st);
1608 }
1609 if (is_new) {
1610 debug_only(dump_orig(debug_orig(), st));
1611 Node_Notes* nn = C->node_notes_at(_idx);
1612 if (nn != NULL && !nn->is_clear()) {
1613 if (nn->jvms() != NULL) {
1614 st->print(" !jvms:");
1615 nn->jvms()->dump_spec(st);
1616 }
1617 }
1618 }
1619 if (suffix) st->print(suffix);
1620 _in_dump_cnt--;
1621 }
1623 //------------------------------dump_req--------------------------------------
1624 void Node::dump_req(outputStream *st) const {
1625 // Dump the required input edges
1626 for (uint i = 0; i < req(); i++) { // For all required inputs
1627 Node* d = in(i);
1628 if (d == NULL) {
1629 st->print("_ ");
1630 } else if (NotANode(d)) {
1631 st->print("NotANode "); // uninitialized, sentinel, garbage, etc.
1632 } else {
1633 st->print("%c%d ", Compile::current()->node_arena()->contains(d) ? ' ' : 'o', d->_idx);
1634 }
1635 }
1636 }
1639 //------------------------------dump_prec-------------------------------------
1640 void Node::dump_prec(outputStream *st) const {
1641 // Dump the precedence edges
1642 int any_prec = 0;
1643 for (uint i = req(); i < len(); i++) { // For all precedence inputs
1644 Node* p = in(i);
1645 if (p != NULL) {
1646 if (!any_prec++) st->print(" |");
1647 if (NotANode(p)) { st->print("NotANode "); continue; }
1648 st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
1649 }
1650 }
1651 }
1653 //------------------------------dump_out--------------------------------------
1654 void Node::dump_out(outputStream *st) const {
1655 // Delimit the output edges
1656 st->print(" [[");
1657 // Dump the output edges
1658 for (uint i = 0; i < _outcnt; i++) { // For all outputs
1659 Node* u = _out[i];
1660 if (u == NULL) {
1661 st->print("_ ");
1662 } else if (NotANode(u)) {
1663 st->print("NotANode ");
1664 } else {
1665 st->print("%c%d ", Compile::current()->node_arena()->contains(u) ? ' ' : 'o', u->_idx);
1666 }
1667 }
1668 st->print("]] ");
1669 }
1671 //------------------------------dump_nodes-------------------------------------
1672 static void dump_nodes(const Node* start, int d, bool only_ctrl) {
1673 Node* s = (Node*)start; // remove const
1674 if (NotANode(s)) return;
1676 uint depth = (uint)ABS(d);
1677 int direction = d;
1678 Compile* C = Compile::current();
1679 GrowableArray <Node *> nstack(C->unique());
1681 nstack.append(s);
1682 int begin = 0;
1683 int end = 0;
1684 for(uint i = 0; i < depth; i++) {
1685 end = nstack.length();
1686 for(int j = begin; j < end; j++) {
1687 Node* tp = nstack.at(j);
1688 uint limit = direction > 0 ? tp->len() : tp->outcnt();
1689 for(uint k = 0; k < limit; k++) {
1690 Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k);
1692 if (NotANode(n)) continue;
1693 // do not recurse through top or the root (would reach unrelated stuff)
1694 if (n->is_Root() || n->is_top()) continue;
1695 if (only_ctrl && !n->is_CFG()) continue;
1697 bool on_stack = nstack.contains(n);
1698 if (!on_stack) {
1699 nstack.append(n);
1700 }
1701 }
1702 }
1703 begin = end;
1704 }
1705 end = nstack.length();
1706 if (direction > 0) {
1707 for(int j = end-1; j >= 0; j--) {
1708 nstack.at(j)->dump();
1709 }
1710 } else {
1711 for(int j = 0; j < end; j++) {
1712 nstack.at(j)->dump();
1713 }
1714 }
1715 }
1717 //------------------------------dump-------------------------------------------
1718 void Node::dump(int d) const {
1719 dump_nodes(this, d, false);
1720 }
1722 //------------------------------dump_ctrl--------------------------------------
1723 // Dump a Node's control history to depth
1724 void Node::dump_ctrl(int d) const {
1725 dump_nodes(this, d, true);
1726 }
1728 // VERIFICATION CODE
1729 // For each input edge to a node (ie - for each Use-Def edge), verify that
1730 // there is a corresponding Def-Use edge.
1731 //------------------------------verify_edges-----------------------------------
1732 void Node::verify_edges(Unique_Node_List &visited) {
1733 uint i, j, idx;
1734 int cnt;
1735 Node *n;
1737 // Recursive termination test
1738 if (visited.member(this)) return;
1739 visited.push(this);
1741 // Walk over all input edges, checking for correspondence
1742 for( i = 0; i < len(); i++ ) {
1743 n = in(i);
1744 if (n != NULL && !n->is_top()) {
1745 // Count instances of (Node *)this
1746 cnt = 0;
1747 for (idx = 0; idx < n->_outcnt; idx++ ) {
1748 if (n->_out[idx] == (Node *)this) cnt++;
1749 }
1750 assert( cnt > 0,"Failed to find Def-Use edge." );
1751 // Check for duplicate edges
1752 // walk the input array downcounting the input edges to n
1753 for( j = 0; j < len(); j++ ) {
1754 if( in(j) == n ) cnt--;
1755 }
1756 assert( cnt == 0,"Mismatched edge count.");
1757 } else if (n == NULL) {
1758 assert(i >= req() || i == 0 || is_Region() || is_Phi(), "only regions or phis have null data edges");
1759 } else {
1760 assert(n->is_top(), "sanity");
1761 // Nothing to check.
1762 }
1763 }
1764 // Recursive walk over all input edges
1765 for( i = 0; i < len(); i++ ) {
1766 n = in(i);
1767 if( n != NULL )
1768 in(i)->verify_edges(visited);
1769 }
1770 }
1772 //------------------------------verify_recur-----------------------------------
1773 static const Node *unique_top = NULL;
1775 void Node::verify_recur(const Node *n, int verify_depth,
1776 VectorSet &old_space, VectorSet &new_space) {
1777 if ( verify_depth == 0 ) return;
1778 if (verify_depth > 0) --verify_depth;
1780 Compile* C = Compile::current();
1782 // Contained in new_space or old_space?
1783 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space;
1784 // Check for visited in the proper space. Numberings are not unique
1785 // across spaces so we need a separate VectorSet for each space.
1786 if( v->test_set(n->_idx) ) return;
1788 if (n->is_Con() && n->bottom_type() == Type::TOP) {
1789 if (C->cached_top_node() == NULL)
1790 C->set_cached_top_node((Node*)n);
1791 assert(C->cached_top_node() == n, "TOP node must be unique");
1792 }
1794 for( uint i = 0; i < n->len(); i++ ) {
1795 Node *x = n->in(i);
1796 if (!x || x->is_top()) continue;
1798 // Verify my input has a def-use edge to me
1799 if (true /*VerifyDefUse*/) {
1800 // Count use-def edges from n to x
1801 int cnt = 0;
1802 for( uint j = 0; j < n->len(); j++ )
1803 if( n->in(j) == x )
1804 cnt++;
1805 // Count def-use edges from x to n
1806 uint max = x->_outcnt;
1807 for( uint k = 0; k < max; k++ )
1808 if (x->_out[k] == n)
1809 cnt--;
1810 assert( cnt == 0, "mismatched def-use edge counts" );
1811 }
1813 verify_recur(x, verify_depth, old_space, new_space);
1814 }
1816 }
1818 //------------------------------verify-----------------------------------------
1819 // Check Def-Use info for my subgraph
1820 void Node::verify() const {
1821 Compile* C = Compile::current();
1822 Node* old_top = C->cached_top_node();
1823 ResourceMark rm;
1824 ResourceArea *area = Thread::current()->resource_area();
1825 VectorSet old_space(area), new_space(area);
1826 verify_recur(this, -1, old_space, new_space);
1827 C->set_cached_top_node(old_top);
1828 }
1829 #endif
1832 //------------------------------walk-------------------------------------------
1833 // Graph walk, with both pre-order and post-order functions
1834 void Node::walk(NFunc pre, NFunc post, void *env) {
1835 VectorSet visited(Thread::current()->resource_area()); // Setup for local walk
1836 walk_(pre, post, env, visited);
1837 }
1839 void Node::walk_(NFunc pre, NFunc post, void *env, VectorSet &visited) {
1840 if( visited.test_set(_idx) ) return;
1841 pre(*this,env); // Call the pre-order walk function
1842 for( uint i=0; i<_max; i++ )
1843 if( in(i) ) // Input exists and is not walked?
1844 in(i)->walk_(pre,post,env,visited); // Walk it with pre & post functions
1845 post(*this,env); // Call the post-order walk function
1846 }
1848 void Node::nop(Node &, void*) {}
1850 //------------------------------Registers--------------------------------------
1851 // Do we Match on this edge index or not? Generally false for Control
1852 // and true for everything else. Weird for calls & returns.
1853 uint Node::match_edge(uint idx) const {
1854 return idx; // True for other than index 0 (control)
1855 }
1857 static RegMask _not_used_at_all;
1858 // Register classes are defined for specific machines
1859 const RegMask &Node::out_RegMask() const {
1860 ShouldNotCallThis();
1861 return _not_used_at_all;
1862 }
1864 const RegMask &Node::in_RegMask(uint) const {
1865 ShouldNotCallThis();
1866 return _not_used_at_all;
1867 }
1869 //=============================================================================
1870 //-----------------------------------------------------------------------------
1871 void Node_Array::reset( Arena *new_arena ) {
1872 _a->Afree(_nodes,_max*sizeof(Node*));
1873 _max = 0;
1874 _nodes = NULL;
1875 _a = new_arena;
1876 }
1878 //------------------------------clear------------------------------------------
1879 // Clear all entries in _nodes to NULL but keep storage
1880 void Node_Array::clear() {
1881 Copy::zero_to_bytes( _nodes, _max*sizeof(Node*) );
1882 }
1884 //-----------------------------------------------------------------------------
1885 void Node_Array::grow( uint i ) {
1886 if( !_max ) {
1887 _max = 1;
1888 _nodes = (Node**)_a->Amalloc( _max * sizeof(Node*) );
1889 _nodes[0] = NULL;
1890 }
1891 uint old = _max;
1892 while( i >= _max ) _max <<= 1; // Double to fit
1893 _nodes = (Node**)_a->Arealloc( _nodes, old*sizeof(Node*),_max*sizeof(Node*));
1894 Copy::zero_to_bytes( &_nodes[old], (_max-old)*sizeof(Node*) );
1895 }
1897 //-----------------------------------------------------------------------------
1898 void Node_Array::insert( uint i, Node *n ) {
1899 if( _nodes[_max-1] ) grow(_max); // Get more space if full
1900 Copy::conjoint_words_to_higher((HeapWord*)&_nodes[i], (HeapWord*)&_nodes[i+1], ((_max-i-1)*sizeof(Node*)));
1901 _nodes[i] = n;
1902 }
1904 //-----------------------------------------------------------------------------
1905 void Node_Array::remove( uint i ) {
1906 Copy::conjoint_words_to_lower((HeapWord*)&_nodes[i+1], (HeapWord*)&_nodes[i], ((_max-i-1)*sizeof(Node*)));
1907 _nodes[_max-1] = NULL;
1908 }
1910 //-----------------------------------------------------------------------------
1911 void Node_Array::sort( C_sort_func_t func) {
1912 qsort( _nodes, _max, sizeof( Node* ), func );
1913 }
1915 //-----------------------------------------------------------------------------
1916 void Node_Array::dump() const {
1917 #ifndef PRODUCT
1918 for( uint i = 0; i < _max; i++ ) {
1919 Node *nn = _nodes[i];
1920 if( nn != NULL ) {
1921 tty->print("%5d--> ",i); nn->dump();
1922 }
1923 }
1924 #endif
1925 }
1927 //--------------------------is_iteratively_computed------------------------------
1928 // Operation appears to be iteratively computed (such as an induction variable)
1929 // It is possible for this operation to return false for a loop-varying
1930 // value, if it appears (by local graph inspection) to be computed by a simple conditional.
1931 bool Node::is_iteratively_computed() {
1932 if (ideal_reg()) { // does operation have a result register?
1933 for (uint i = 1; i < req(); i++) {
1934 Node* n = in(i);
1935 if (n != NULL && n->is_Phi()) {
1936 for (uint j = 1; j < n->req(); j++) {
1937 if (n->in(j) == this) {
1938 return true;
1939 }
1940 }
1941 }
1942 }
1943 }
1944 return false;
1945 }
1947 //--------------------------find_similar------------------------------
1948 // Return a node with opcode "opc" and same inputs as "this" if one can
1949 // be found; Otherwise return NULL;
1950 Node* Node::find_similar(int opc) {
1951 if (req() >= 2) {
1952 Node* def = in(1);
1953 if (def && def->outcnt() >= 2) {
1954 for (DUIterator_Fast dmax, i = def->fast_outs(dmax); i < dmax; i++) {
1955 Node* use = def->fast_out(i);
1956 if (use->Opcode() == opc &&
1957 use->req() == req()) {
1958 uint j;
1959 for (j = 0; j < use->req(); j++) {
1960 if (use->in(j) != in(j)) {
1961 break;
1962 }
1963 }
1964 if (j == use->req()) {
1965 return use;
1966 }
1967 }
1968 }
1969 }
1970 }
1971 return NULL;
1972 }
1975 //--------------------------unique_ctrl_out------------------------------
1976 // Return the unique control out if only one. Null if none or more than one.
1977 Node* Node::unique_ctrl_out() {
1978 Node* found = NULL;
1979 for (uint i = 0; i < outcnt(); i++) {
1980 Node* use = raw_out(i);
1981 if (use->is_CFG() && use != this) {
1982 if (found != NULL) return NULL;
1983 found = use;
1984 }
1985 }
1986 return found;
1987 }
1989 //=============================================================================
1990 //------------------------------yank-------------------------------------------
1991 // Find and remove
1992 void Node_List::yank( Node *n ) {
1993 uint i;
1994 for( i = 0; i < _cnt; i++ )
1995 if( _nodes[i] == n )
1996 break;
1998 if( i < _cnt )
1999 _nodes[i] = _nodes[--_cnt];
2000 }
2002 //------------------------------dump-------------------------------------------
2003 void Node_List::dump() const {
2004 #ifndef PRODUCT
2005 for( uint i = 0; i < _cnt; i++ )
2006 if( _nodes[i] ) {
2007 tty->print("%5d--> ",i);
2008 _nodes[i]->dump();
2009 }
2010 #endif
2011 }
2013 //=============================================================================
2014 //------------------------------remove-----------------------------------------
2015 void Unique_Node_List::remove( Node *n ) {
2016 if( _in_worklist[n->_idx] ) {
2017 for( uint i = 0; i < size(); i++ )
2018 if( _nodes[i] == n ) {
2019 map(i,Node_List::pop());
2020 _in_worklist >>= n->_idx;
2021 return;
2022 }
2023 ShouldNotReachHere();
2024 }
2025 }
2027 //-----------------------remove_useless_nodes----------------------------------
2028 // Remove useless nodes from worklist
2029 void Unique_Node_List::remove_useless_nodes(VectorSet &useful) {
2031 for( uint i = 0; i < size(); ++i ) {
2032 Node *n = at(i);
2033 assert( n != NULL, "Did not expect null entries in worklist");
2034 if( ! useful.test(n->_idx) ) {
2035 _in_worklist >>= n->_idx;
2036 map(i,Node_List::pop());
2037 // Node *replacement = Node_List::pop();
2038 // if( i != size() ) { // Check if removing last entry
2039 // _nodes[i] = replacement;
2040 // }
2041 --i; // Visit popped node
2042 // If it was last entry, loop terminates since size() was also reduced
2043 }
2044 }
2045 }
2047 //=============================================================================
2048 void Node_Stack::grow() {
2049 size_t old_top = pointer_delta(_inode_top,_inodes,sizeof(INode)); // save _top
2050 size_t old_max = pointer_delta(_inode_max,_inodes,sizeof(INode));
2051 size_t max = old_max << 1; // max * 2
2052 _inodes = REALLOC_ARENA_ARRAY(_a, INode, _inodes, old_max, max);
2053 _inode_max = _inodes + max;
2054 _inode_top = _inodes + old_top; // restore _top
2055 }
2057 // Node_Stack is used to map nodes.
2058 Node* Node_Stack::find(uint idx) const {
2059 uint sz = size();
2060 for (uint i=0; i < sz; i++) {
2061 if (idx == index_at(i) )
2062 return node_at(i);
2063 }
2064 return NULL;
2065 }
2067 //=============================================================================
2068 uint TypeNode::size_of() const { return sizeof(*this); }
2069 #ifndef PRODUCT
2070 void TypeNode::dump_spec(outputStream *st) const {
2071 if( !Verbose && !WizardMode ) {
2072 // standard dump does this in Verbose and WizardMode
2073 st->print(" #"); _type->dump_on(st);
2074 }
2075 }
2076 #endif
2077 uint TypeNode::hash() const {
2078 return Node::hash() + _type->hash();
2079 }
2080 uint TypeNode::cmp( const Node &n ) const
2081 { return !Type::cmp( _type, ((TypeNode&)n)._type ); }
2082 const Type *TypeNode::bottom_type() const { return _type; }
2083 const Type *TypeNode::Value( PhaseTransform * ) const { return _type; }
2085 //------------------------------ideal_reg--------------------------------------
2086 uint TypeNode::ideal_reg() const {
2087 return _type->ideal_reg();
2088 }