src/share/vm/opto/node.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/opto/node.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,1596 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef SHARE_VM_OPTO_NODE_HPP
    1.29 +#define SHARE_VM_OPTO_NODE_HPP
    1.30 +
    1.31 +#include "libadt/port.hpp"
    1.32 +#include "libadt/vectset.hpp"
    1.33 +#include "opto/compile.hpp"
    1.34 +#include "opto/type.hpp"
    1.35 +
    1.36 +// Portions of code courtesy of Clifford Click
    1.37 +
    1.38 +// Optimization - Graph Style
    1.39 +
    1.40 +
    1.41 +class AbstractLockNode;
    1.42 +class AddNode;
    1.43 +class AddPNode;
    1.44 +class AliasInfo;
    1.45 +class AllocateArrayNode;
    1.46 +class AllocateNode;
    1.47 +class Block;
    1.48 +class BoolNode;
    1.49 +class BoxLockNode;
    1.50 +class CMoveNode;
    1.51 +class CallDynamicJavaNode;
    1.52 +class CallJavaNode;
    1.53 +class CallLeafNode;
    1.54 +class CallNode;
    1.55 +class CallRuntimeNode;
    1.56 +class CallStaticJavaNode;
    1.57 +class CatchNode;
    1.58 +class CatchProjNode;
    1.59 +class CheckCastPPNode;
    1.60 +class ClearArrayNode;
    1.61 +class CmpNode;
    1.62 +class CodeBuffer;
    1.63 +class ConstraintCastNode;
    1.64 +class ConNode;
    1.65 +class CountedLoopNode;
    1.66 +class CountedLoopEndNode;
    1.67 +class DecodeNarrowPtrNode;
    1.68 +class DecodeNNode;
    1.69 +class DecodeNKlassNode;
    1.70 +class EncodeNarrowPtrNode;
    1.71 +class EncodePNode;
    1.72 +class EncodePKlassNode;
    1.73 +class FastLockNode;
    1.74 +class FastUnlockNode;
    1.75 +class IfNode;
    1.76 +class IfFalseNode;
    1.77 +class IfTrueNode;
    1.78 +class InitializeNode;
    1.79 +class JVMState;
    1.80 +class JumpNode;
    1.81 +class JumpProjNode;
    1.82 +class LoadNode;
    1.83 +class LoadStoreNode;
    1.84 +class LockNode;
    1.85 +class LoopNode;
    1.86 +class MachBranchNode;
    1.87 +class MachCallDynamicJavaNode;
    1.88 +class MachCallJavaNode;
    1.89 +class MachCallLeafNode;
    1.90 +class MachCallNode;
    1.91 +class MachCallRuntimeNode;
    1.92 +class MachCallStaticJavaNode;
    1.93 +class MachConstantBaseNode;
    1.94 +class MachConstantNode;
    1.95 +class MachGotoNode;
    1.96 +class MachIfNode;
    1.97 +class MachNode;
    1.98 +class MachNullCheckNode;
    1.99 +class MachProjNode;
   1.100 +class MachReturnNode;
   1.101 +class MachSafePointNode;
   1.102 +class MachSpillCopyNode;
   1.103 +class MachTempNode;
   1.104 +class Matcher;
   1.105 +class MemBarNode;
   1.106 +class MemBarStoreStoreNode;
   1.107 +class MemNode;
   1.108 +class MergeMemNode;
   1.109 +class MulNode;
   1.110 +class MultiNode;
   1.111 +class MultiBranchNode;
   1.112 +class NeverBranchNode;
   1.113 +class Node;
   1.114 +class Node_Array;
   1.115 +class Node_List;
   1.116 +class Node_Stack;
   1.117 +class NullCheckNode;
   1.118 +class OopMap;
   1.119 +class ParmNode;
   1.120 +class PCTableNode;
   1.121 +class PhaseCCP;
   1.122 +class PhaseGVN;
   1.123 +class PhaseIterGVN;
   1.124 +class PhaseRegAlloc;
   1.125 +class PhaseTransform;
   1.126 +class PhaseValues;
   1.127 +class PhiNode;
   1.128 +class Pipeline;
   1.129 +class ProjNode;
   1.130 +class RegMask;
   1.131 +class RegionNode;
   1.132 +class RootNode;
   1.133 +class SafePointNode;
   1.134 +class SafePointScalarObjectNode;
   1.135 +class StartNode;
   1.136 +class State;
   1.137 +class StoreNode;
   1.138 +class SubNode;
   1.139 +class Type;
   1.140 +class TypeNode;
   1.141 +class UnlockNode;
   1.142 +class VectorNode;
   1.143 +class LoadVectorNode;
   1.144 +class StoreVectorNode;
   1.145 +class VectorSet;
   1.146 +typedef void (*NFunc)(Node&,void*);
   1.147 +extern "C" {
   1.148 +  typedef int (*C_sort_func_t)(const void *, const void *);
   1.149 +}
   1.150 +
   1.151 +// The type of all node counts and indexes.
   1.152 +// It must hold at least 16 bits, but must also be fast to load and store.
   1.153 +// This type, if less than 32 bits, could limit the number of possible nodes.
   1.154 +// (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
   1.155 +typedef unsigned int node_idx_t;
   1.156 +
   1.157 +
   1.158 +#ifndef OPTO_DU_ITERATOR_ASSERT
   1.159 +#ifdef ASSERT
   1.160 +#define OPTO_DU_ITERATOR_ASSERT 1
   1.161 +#else
   1.162 +#define OPTO_DU_ITERATOR_ASSERT 0
   1.163 +#endif
   1.164 +#endif //OPTO_DU_ITERATOR_ASSERT
   1.165 +
   1.166 +#if OPTO_DU_ITERATOR_ASSERT
   1.167 +class DUIterator;
   1.168 +class DUIterator_Fast;
   1.169 +class DUIterator_Last;
   1.170 +#else
   1.171 +typedef uint   DUIterator;
   1.172 +typedef Node** DUIterator_Fast;
   1.173 +typedef Node** DUIterator_Last;
   1.174 +#endif
   1.175 +
   1.176 +// Node Sentinel
   1.177 +#define NodeSentinel (Node*)-1
   1.178 +
   1.179 +// Unknown count frequency
   1.180 +#define COUNT_UNKNOWN (-1.0f)
   1.181 +
   1.182 +//------------------------------Node-------------------------------------------
   1.183 +// Nodes define actions in the program.  They create values, which have types.
   1.184 +// They are both vertices in a directed graph and program primitives.  Nodes
   1.185 +// are labeled; the label is the "opcode", the primitive function in the lambda
   1.186 +// calculus sense that gives meaning to the Node.  Node inputs are ordered (so
   1.187 +// that "a-b" is different from "b-a").  The inputs to a Node are the inputs to
   1.188 +// the Node's function.  These inputs also define a Type equation for the Node.
   1.189 +// Solving these Type equations amounts to doing dataflow analysis.
   1.190 +// Control and data are uniformly represented in the graph.  Finally, Nodes
   1.191 +// have a unique dense integer index which is used to index into side arrays
   1.192 +// whenever I have phase-specific information.
   1.193 +
   1.194 +class Node {
   1.195 +  friend class VMStructs;
   1.196 +
   1.197 +  // Lots of restrictions on cloning Nodes
   1.198 +  Node(const Node&);            // not defined; linker error to use these
   1.199 +  Node &operator=(const Node &rhs);
   1.200 +
   1.201 +public:
   1.202 +  friend class Compile;
   1.203 +  #if OPTO_DU_ITERATOR_ASSERT
   1.204 +  friend class DUIterator_Common;
   1.205 +  friend class DUIterator;
   1.206 +  friend class DUIterator_Fast;
   1.207 +  friend class DUIterator_Last;
   1.208 +  #endif
   1.209 +
   1.210 +  // Because Nodes come and go, I define an Arena of Node structures to pull
   1.211 +  // from.  This should allow fast access to node creation & deletion.  This
   1.212 +  // field is a local cache of a value defined in some "program fragment" for
   1.213 +  // which these Nodes are just a part of.
   1.214 +
   1.215 +  // New Operator that takes a Compile pointer, this will eventually
   1.216 +  // be the "new" New operator.
   1.217 +  inline void* operator new( size_t x, Compile* C) throw() {
   1.218 +    Node* n = (Node*)C->node_arena()->Amalloc_D(x);
   1.219 +#ifdef ASSERT
   1.220 +    n->_in = (Node**)n; // magic cookie for assertion check
   1.221 +#endif
   1.222 +    n->_out = (Node**)C;
   1.223 +    return (void*)n;
   1.224 +  }
   1.225 +
   1.226 +  // Delete is a NOP
   1.227 +  void operator delete( void *ptr ) {}
   1.228 +  // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
   1.229 +  void destruct();
   1.230 +
   1.231 +  // Create a new Node.  Required is the number is of inputs required for
   1.232 +  // semantic correctness.
   1.233 +  Node( uint required );
   1.234 +
   1.235 +  // Create a new Node with given input edges.
   1.236 +  // This version requires use of the "edge-count" new.
   1.237 +  // E.g.  new (C,3) FooNode( C, NULL, left, right );
   1.238 +  Node( Node *n0 );
   1.239 +  Node( Node *n0, Node *n1 );
   1.240 +  Node( Node *n0, Node *n1, Node *n2 );
   1.241 +  Node( Node *n0, Node *n1, Node *n2, Node *n3 );
   1.242 +  Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
   1.243 +  Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
   1.244 +  Node( Node *n0, Node *n1, Node *n2, Node *n3,
   1.245 +            Node *n4, Node *n5, Node *n6 );
   1.246 +
   1.247 +  // Clone an inherited Node given only the base Node type.
   1.248 +  Node* clone() const;
   1.249 +
   1.250 +  // Clone a Node, immediately supplying one or two new edges.
   1.251 +  // The first and second arguments, if non-null, replace in(1) and in(2),
   1.252 +  // respectively.
   1.253 +  Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
   1.254 +    Node* nn = clone();
   1.255 +    if (in1 != NULL)  nn->set_req(1, in1);
   1.256 +    if (in2 != NULL)  nn->set_req(2, in2);
   1.257 +    return nn;
   1.258 +  }
   1.259 +
   1.260 +private:
   1.261 +  // Shared setup for the above constructors.
   1.262 +  // Handles all interactions with Compile::current.
   1.263 +  // Puts initial values in all Node fields except _idx.
   1.264 +  // Returns the initial value for _idx, which cannot
   1.265 +  // be initialized by assignment.
   1.266 +  inline int Init(int req, Compile* C);
   1.267 +
   1.268 +//----------------- input edge handling
   1.269 +protected:
   1.270 +  friend class PhaseCFG;        // Access to address of _in array elements
   1.271 +  Node **_in;                   // Array of use-def references to Nodes
   1.272 +  Node **_out;                  // Array of def-use references to Nodes
   1.273 +
   1.274 +  // Input edges are split into two categories.  Required edges are required
   1.275 +  // for semantic correctness; order is important and NULLs are allowed.
   1.276 +  // Precedence edges are used to help determine execution order and are
   1.277 +  // added, e.g., for scheduling purposes.  They are unordered and not
   1.278 +  // duplicated; they have no embedded NULLs.  Edges from 0 to _cnt-1
   1.279 +  // are required, from _cnt to _max-1 are precedence edges.
   1.280 +  node_idx_t _cnt;              // Total number of required Node inputs.
   1.281 +
   1.282 +  node_idx_t _max;              // Actual length of input array.
   1.283 +
   1.284 +  // Output edges are an unordered list of def-use edges which exactly
   1.285 +  // correspond to required input edges which point from other nodes
   1.286 +  // to this one.  Thus the count of the output edges is the number of
   1.287 +  // users of this node.
   1.288 +  node_idx_t _outcnt;           // Total number of Node outputs.
   1.289 +
   1.290 +  node_idx_t _outmax;           // Actual length of output array.
   1.291 +
   1.292 +  // Grow the actual input array to the next larger power-of-2 bigger than len.
   1.293 +  void grow( uint len );
   1.294 +  // Grow the output array to the next larger power-of-2 bigger than len.
   1.295 +  void out_grow( uint len );
   1.296 +
   1.297 + public:
   1.298 +  // Each Node is assigned a unique small/dense number.  This number is used
   1.299 +  // to index into auxiliary arrays of data and bitvectors.
   1.300 +  // It is declared const to defend against inadvertant assignment,
   1.301 +  // since it is used by clients as a naked field.
   1.302 +  const node_idx_t _idx;
   1.303 +
   1.304 +  // Get the (read-only) number of input edges
   1.305 +  uint req() const { return _cnt; }
   1.306 +  uint len() const { return _max; }
   1.307 +  // Get the (read-only) number of output edges
   1.308 +  uint outcnt() const { return _outcnt; }
   1.309 +
   1.310 +#if OPTO_DU_ITERATOR_ASSERT
   1.311 +  // Iterate over the out-edges of this node.  Deletions are illegal.
   1.312 +  inline DUIterator outs() const;
   1.313 +  // Use this when the out array might have changed to suppress asserts.
   1.314 +  inline DUIterator& refresh_out_pos(DUIterator& i) const;
   1.315 +  // Does the node have an out at this position?  (Used for iteration.)
   1.316 +  inline bool has_out(DUIterator& i) const;
   1.317 +  inline Node*    out(DUIterator& i) const;
   1.318 +  // Iterate over the out-edges of this node.  All changes are illegal.
   1.319 +  inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
   1.320 +  inline Node*    fast_out(DUIterator_Fast& i) const;
   1.321 +  // Iterate over the out-edges of this node, deleting one at a time.
   1.322 +  inline DUIterator_Last last_outs(DUIterator_Last& min) const;
   1.323 +  inline Node*    last_out(DUIterator_Last& i) const;
   1.324 +  // The inline bodies of all these methods are after the iterator definitions.
   1.325 +#else
   1.326 +  // Iterate over the out-edges of this node.  Deletions are illegal.
   1.327 +  // This iteration uses integral indexes, to decouple from array reallocations.
   1.328 +  DUIterator outs() const  { return 0; }
   1.329 +  // Use this when the out array might have changed to suppress asserts.
   1.330 +  DUIterator refresh_out_pos(DUIterator i) const { return i; }
   1.331 +
   1.332 +  // Reference to the i'th output Node.  Error if out of bounds.
   1.333 +  Node*    out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
   1.334 +  // Does the node have an out at this position?  (Used for iteration.)
   1.335 +  bool has_out(DUIterator i) const { return i < _outcnt; }
   1.336 +
   1.337 +  // Iterate over the out-edges of this node.  All changes are illegal.
   1.338 +  // This iteration uses a pointer internal to the out array.
   1.339 +  DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
   1.340 +    Node** out = _out;
   1.341 +    // Assign a limit pointer to the reference argument:
   1.342 +    max = out + (ptrdiff_t)_outcnt;
   1.343 +    // Return the base pointer:
   1.344 +    return out;
   1.345 +  }
   1.346 +  Node*    fast_out(DUIterator_Fast i) const  { return *i; }
   1.347 +  // Iterate over the out-edges of this node, deleting one at a time.
   1.348 +  // This iteration uses a pointer internal to the out array.
   1.349 +  DUIterator_Last last_outs(DUIterator_Last& min) const {
   1.350 +    Node** out = _out;
   1.351 +    // Assign a limit pointer to the reference argument:
   1.352 +    min = out;
   1.353 +    // Return the pointer to the start of the iteration:
   1.354 +    return out + (ptrdiff_t)_outcnt - 1;
   1.355 +  }
   1.356 +  Node*    last_out(DUIterator_Last i) const  { return *i; }
   1.357 +#endif
   1.358 +
   1.359 +  // Reference to the i'th input Node.  Error if out of bounds.
   1.360 +  Node* in(uint i) const { assert(i < _max, err_msg_res("oob: i=%d, _max=%d", i, _max)); return _in[i]; }
   1.361 +  // Reference to the i'th input Node.  NULL if out of bounds.
   1.362 +  Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); }
   1.363 +  // Reference to the i'th output Node.  Error if out of bounds.
   1.364 +  // Use this accessor sparingly.  We are going trying to use iterators instead.
   1.365 +  Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
   1.366 +  // Return the unique out edge.
   1.367 +  Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
   1.368 +  // Delete out edge at position 'i' by moving last out edge to position 'i'
   1.369 +  void  raw_del_out(uint i) {
   1.370 +    assert(i < _outcnt,"oob");
   1.371 +    assert(_outcnt > 0,"oob");
   1.372 +    #if OPTO_DU_ITERATOR_ASSERT
   1.373 +    // Record that a change happened here.
   1.374 +    debug_only(_last_del = _out[i]; ++_del_tick);
   1.375 +    #endif
   1.376 +    _out[i] = _out[--_outcnt];
   1.377 +    // Smash the old edge so it can't be used accidentally.
   1.378 +    debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
   1.379 +  }
   1.380 +
   1.381 +#ifdef ASSERT
   1.382 +  bool is_dead() const;
   1.383 +#define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
   1.384 +#endif
   1.385 +  // Check whether node has become unreachable
   1.386 +  bool is_unreachable(PhaseIterGVN &igvn) const;
   1.387 +
   1.388 +  // Set a required input edge, also updates corresponding output edge
   1.389 +  void add_req( Node *n ); // Append a NEW required input
   1.390 +  void add_req( Node *n0, Node *n1 ) {
   1.391 +    add_req(n0); add_req(n1); }
   1.392 +  void add_req( Node *n0, Node *n1, Node *n2 ) {
   1.393 +    add_req(n0); add_req(n1); add_req(n2); }
   1.394 +  void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
   1.395 +  void del_req( uint idx ); // Delete required edge & compact
   1.396 +  void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order
   1.397 +  void ins_req( uint i, Node *n ); // Insert a NEW required input
   1.398 +  void set_req( uint i, Node *n ) {
   1.399 +    assert( is_not_dead(n), "can not use dead node");
   1.400 +    assert( i < _cnt, err_msg_res("oob: i=%d, _cnt=%d", i, _cnt));
   1.401 +    assert( !VerifyHashTableKeys || _hash_lock == 0,
   1.402 +            "remove node from hash table before modifying it");
   1.403 +    Node** p = &_in[i];    // cache this._in, across the del_out call
   1.404 +    if (*p != NULL)  (*p)->del_out((Node *)this);
   1.405 +    (*p) = n;
   1.406 +    if (n != NULL)      n->add_out((Node *)this);
   1.407 +  }
   1.408 +  // Light version of set_req() to init inputs after node creation.
   1.409 +  void init_req( uint i, Node *n ) {
   1.410 +    assert( i == 0 && this == n ||
   1.411 +            is_not_dead(n), "can not use dead node");
   1.412 +    assert( i < _cnt, "oob");
   1.413 +    assert( !VerifyHashTableKeys || _hash_lock == 0,
   1.414 +            "remove node from hash table before modifying it");
   1.415 +    assert( _in[i] == NULL, "sanity");
   1.416 +    _in[i] = n;
   1.417 +    if (n != NULL)      n->add_out((Node *)this);
   1.418 +  }
   1.419 +  // Find first occurrence of n among my edges:
   1.420 +  int find_edge(Node* n);
   1.421 +  int replace_edge(Node* old, Node* neww);
   1.422 +  int replace_edges_in_range(Node* old, Node* neww, int start, int end);
   1.423 +  // NULL out all inputs to eliminate incoming Def-Use edges.
   1.424 +  // Return the number of edges between 'n' and 'this'
   1.425 +  int  disconnect_inputs(Node *n, Compile *c);
   1.426 +
   1.427 +  // Quickly, return true if and only if I am Compile::current()->top().
   1.428 +  bool is_top() const {
   1.429 +    assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "");
   1.430 +    return (_out == NULL);
   1.431 +  }
   1.432 +  // Reaffirm invariants for is_top.  (Only from Compile::set_cached_top_node.)
   1.433 +  void setup_is_top();
   1.434 +
   1.435 +  // Strip away casting.  (It is depth-limited.)
   1.436 +  Node* uncast() const;
   1.437 +  // Return whether two Nodes are equivalent, after stripping casting.
   1.438 +  bool eqv_uncast(const Node* n) const {
   1.439 +    return (this->uncast() == n->uncast());
   1.440 +  }
   1.441 +
   1.442 +private:
   1.443 +  static Node* uncast_helper(const Node* n);
   1.444 +
   1.445 +  // Add an output edge to the end of the list
   1.446 +  void add_out( Node *n ) {
   1.447 +    if (is_top())  return;
   1.448 +    if( _outcnt == _outmax ) out_grow(_outcnt);
   1.449 +    _out[_outcnt++] = n;
   1.450 +  }
   1.451 +  // Delete an output edge
   1.452 +  void del_out( Node *n ) {
   1.453 +    if (is_top())  return;
   1.454 +    Node** outp = &_out[_outcnt];
   1.455 +    // Find and remove n
   1.456 +    do {
   1.457 +      assert(outp > _out, "Missing Def-Use edge");
   1.458 +    } while (*--outp != n);
   1.459 +    *outp = _out[--_outcnt];
   1.460 +    // Smash the old edge so it can't be used accidentally.
   1.461 +    debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
   1.462 +    // Record that a change happened here.
   1.463 +    #if OPTO_DU_ITERATOR_ASSERT
   1.464 +    debug_only(_last_del = n; ++_del_tick);
   1.465 +    #endif
   1.466 +  }
   1.467 +
   1.468 +public:
   1.469 +  // Globally replace this node by a given new node, updating all uses.
   1.470 +  void replace_by(Node* new_node);
   1.471 +  // Globally replace this node by a given new node, updating all uses
   1.472 +  // and cutting input edges of old node.
   1.473 +  void subsume_by(Node* new_node, Compile* c) {
   1.474 +    replace_by(new_node);
   1.475 +    disconnect_inputs(NULL, c);
   1.476 +  }
   1.477 +  void set_req_X( uint i, Node *n, PhaseIterGVN *igvn );
   1.478 +  // Find the one non-null required input.  RegionNode only
   1.479 +  Node *nonnull_req() const;
   1.480 +  // Add or remove precedence edges
   1.481 +  void add_prec( Node *n );
   1.482 +  void rm_prec( uint i );
   1.483 +  void set_prec( uint i, Node *n ) {
   1.484 +    assert( is_not_dead(n), "can not use dead node");
   1.485 +    assert( i >= _cnt, "not a precedence edge");
   1.486 +    if (_in[i] != NULL) _in[i]->del_out((Node *)this);
   1.487 +    _in[i] = n;
   1.488 +    if (n != NULL) n->add_out((Node *)this);
   1.489 +  }
   1.490 +  // Set this node's index, used by cisc_version to replace current node
   1.491 +  void set_idx(uint new_idx) {
   1.492 +    const node_idx_t* ref = &_idx;
   1.493 +    *(node_idx_t*)ref = new_idx;
   1.494 +  }
   1.495 +  // Swap input edge order.  (Edge indexes i1 and i2 are usually 1 and 2.)
   1.496 +  void swap_edges(uint i1, uint i2) {
   1.497 +    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
   1.498 +    // Def-Use info is unchanged
   1.499 +    Node* n1 = in(i1);
   1.500 +    Node* n2 = in(i2);
   1.501 +    _in[i1] = n2;
   1.502 +    _in[i2] = n1;
   1.503 +    // If this node is in the hash table, make sure it doesn't need a rehash.
   1.504 +    assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
   1.505 +  }
   1.506 +
   1.507 +  // Iterators over input Nodes for a Node X are written as:
   1.508 +  // for( i = 0; i < X.req(); i++ ) ... X[i] ...
   1.509 +  // NOTE: Required edges can contain embedded NULL pointers.
   1.510 +
   1.511 +//----------------- Other Node Properties
   1.512 +
   1.513 +  // Generate class id for some ideal nodes to avoid virtual query
   1.514 +  // methods is_<Node>().
   1.515 +  // Class id is the set of bits corresponded to the node class and all its
   1.516 +  // super classes so that queries for super classes are also valid.
   1.517 +  // Subclasses of the same super class have different assigned bit
   1.518 +  // (the third parameter in the macro DEFINE_CLASS_ID).
   1.519 +  // Classes with deeper hierarchy are declared first.
   1.520 +  // Classes with the same hierarchy depth are sorted by usage frequency.
   1.521 +  //
   1.522 +  // The query method masks the bits to cut off bits of subclasses
   1.523 +  // and then compare the result with the class id
   1.524 +  // (see the macro DEFINE_CLASS_QUERY below).
   1.525 +  //
   1.526 +  //  Class_MachCall=30, ClassMask_MachCall=31
   1.527 +  // 12               8               4               0
   1.528 +  //  0   0   0   0   0   0   0   0   1   1   1   1   0
   1.529 +  //                                  |   |   |   |
   1.530 +  //                                  |   |   |   Bit_Mach=2
   1.531 +  //                                  |   |   Bit_MachReturn=4
   1.532 +  //                                  |   Bit_MachSafePoint=8
   1.533 +  //                                  Bit_MachCall=16
   1.534 +  //
   1.535 +  //  Class_CountedLoop=56, ClassMask_CountedLoop=63
   1.536 +  // 12               8               4               0
   1.537 +  //  0   0   0   0   0   0   0   1   1   1   0   0   0
   1.538 +  //                              |   |   |
   1.539 +  //                              |   |   Bit_Region=8
   1.540 +  //                              |   Bit_Loop=16
   1.541 +  //                              Bit_CountedLoop=32
   1.542 +
   1.543 +  #define DEFINE_CLASS_ID(cl, supcl, subn) \
   1.544 +  Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
   1.545 +  Class_##cl = Class_##supcl + Bit_##cl , \
   1.546 +  ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
   1.547 +
   1.548 +  // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
   1.549 +  // so that it's values fits into 16 bits.
   1.550 +  enum NodeClasses {
   1.551 +    Bit_Node   = 0x0000,
   1.552 +    Class_Node = 0x0000,
   1.553 +    ClassMask_Node = 0xFFFF,
   1.554 +
   1.555 +    DEFINE_CLASS_ID(Multi, Node, 0)
   1.556 +      DEFINE_CLASS_ID(SafePoint, Multi, 0)
   1.557 +        DEFINE_CLASS_ID(Call,      SafePoint, 0)
   1.558 +          DEFINE_CLASS_ID(CallJava,         Call, 0)
   1.559 +            DEFINE_CLASS_ID(CallStaticJava,   CallJava, 0)
   1.560 +            DEFINE_CLASS_ID(CallDynamicJava,  CallJava, 1)
   1.561 +          DEFINE_CLASS_ID(CallRuntime,      Call, 1)
   1.562 +            DEFINE_CLASS_ID(CallLeaf,         CallRuntime, 0)
   1.563 +          DEFINE_CLASS_ID(Allocate,         Call, 2)
   1.564 +            DEFINE_CLASS_ID(AllocateArray,    Allocate, 0)
   1.565 +          DEFINE_CLASS_ID(AbstractLock,     Call, 3)
   1.566 +            DEFINE_CLASS_ID(Lock,             AbstractLock, 0)
   1.567 +            DEFINE_CLASS_ID(Unlock,           AbstractLock, 1)
   1.568 +      DEFINE_CLASS_ID(MultiBranch, Multi, 1)
   1.569 +        DEFINE_CLASS_ID(PCTable,     MultiBranch, 0)
   1.570 +          DEFINE_CLASS_ID(Catch,       PCTable, 0)
   1.571 +          DEFINE_CLASS_ID(Jump,        PCTable, 1)
   1.572 +        DEFINE_CLASS_ID(If,          MultiBranch, 1)
   1.573 +          DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
   1.574 +        DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
   1.575 +      DEFINE_CLASS_ID(Start,       Multi, 2)
   1.576 +      DEFINE_CLASS_ID(MemBar,      Multi, 3)
   1.577 +        DEFINE_CLASS_ID(Initialize,       MemBar, 0)
   1.578 +        DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1)
   1.579 +
   1.580 +    DEFINE_CLASS_ID(Mach,  Node, 1)
   1.581 +      DEFINE_CLASS_ID(MachReturn, Mach, 0)
   1.582 +        DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
   1.583 +          DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
   1.584 +            DEFINE_CLASS_ID(MachCallJava,         MachCall, 0)
   1.585 +              DEFINE_CLASS_ID(MachCallStaticJava,   MachCallJava, 0)
   1.586 +              DEFINE_CLASS_ID(MachCallDynamicJava,  MachCallJava, 1)
   1.587 +            DEFINE_CLASS_ID(MachCallRuntime,      MachCall, 1)
   1.588 +              DEFINE_CLASS_ID(MachCallLeaf,         MachCallRuntime, 0)
   1.589 +      DEFINE_CLASS_ID(MachBranch, Mach, 1)
   1.590 +        DEFINE_CLASS_ID(MachIf,         MachBranch, 0)
   1.591 +        DEFINE_CLASS_ID(MachGoto,       MachBranch, 1)
   1.592 +        DEFINE_CLASS_ID(MachNullCheck,  MachBranch, 2)
   1.593 +      DEFINE_CLASS_ID(MachSpillCopy,    Mach, 2)
   1.594 +      DEFINE_CLASS_ID(MachTemp,         Mach, 3)
   1.595 +      DEFINE_CLASS_ID(MachConstantBase, Mach, 4)
   1.596 +      DEFINE_CLASS_ID(MachConstant,     Mach, 5)
   1.597 +
   1.598 +    DEFINE_CLASS_ID(Type,  Node, 2)
   1.599 +      DEFINE_CLASS_ID(Phi,   Type, 0)
   1.600 +      DEFINE_CLASS_ID(ConstraintCast, Type, 1)
   1.601 +      DEFINE_CLASS_ID(CheckCastPP, Type, 2)
   1.602 +      DEFINE_CLASS_ID(CMove, Type, 3)
   1.603 +      DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
   1.604 +      DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5)
   1.605 +        DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0)
   1.606 +        DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1)
   1.607 +      DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6)
   1.608 +        DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0)
   1.609 +        DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1)
   1.610 +
   1.611 +    DEFINE_CLASS_ID(Proj,  Node, 3)
   1.612 +      DEFINE_CLASS_ID(CatchProj, Proj, 0)
   1.613 +      DEFINE_CLASS_ID(JumpProj,  Proj, 1)
   1.614 +      DEFINE_CLASS_ID(IfTrue,    Proj, 2)
   1.615 +      DEFINE_CLASS_ID(IfFalse,   Proj, 3)
   1.616 +      DEFINE_CLASS_ID(Parm,      Proj, 4)
   1.617 +      DEFINE_CLASS_ID(MachProj,  Proj, 5)
   1.618 +
   1.619 +    DEFINE_CLASS_ID(Mem,   Node, 4)
   1.620 +      DEFINE_CLASS_ID(Load,  Mem, 0)
   1.621 +        DEFINE_CLASS_ID(LoadVector,  Load, 0)
   1.622 +      DEFINE_CLASS_ID(Store, Mem, 1)
   1.623 +        DEFINE_CLASS_ID(StoreVector, Store, 0)
   1.624 +      DEFINE_CLASS_ID(LoadStore, Mem, 2)
   1.625 +
   1.626 +    DEFINE_CLASS_ID(Region, Node, 5)
   1.627 +      DEFINE_CLASS_ID(Loop, Region, 0)
   1.628 +        DEFINE_CLASS_ID(Root,        Loop, 0)
   1.629 +        DEFINE_CLASS_ID(CountedLoop, Loop, 1)
   1.630 +
   1.631 +    DEFINE_CLASS_ID(Sub,   Node, 6)
   1.632 +      DEFINE_CLASS_ID(Cmp,   Sub, 0)
   1.633 +        DEFINE_CLASS_ID(FastLock,   Cmp, 0)
   1.634 +        DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
   1.635 +
   1.636 +    DEFINE_CLASS_ID(MergeMem, Node, 7)
   1.637 +    DEFINE_CLASS_ID(Bool,     Node, 8)
   1.638 +    DEFINE_CLASS_ID(AddP,     Node, 9)
   1.639 +    DEFINE_CLASS_ID(BoxLock,  Node, 10)
   1.640 +    DEFINE_CLASS_ID(Add,      Node, 11)
   1.641 +    DEFINE_CLASS_ID(Mul,      Node, 12)
   1.642 +    DEFINE_CLASS_ID(Vector,   Node, 13)
   1.643 +    DEFINE_CLASS_ID(ClearArray, Node, 14)
   1.644 +
   1.645 +    _max_classes  = ClassMask_ClearArray
   1.646 +  };
   1.647 +  #undef DEFINE_CLASS_ID
   1.648 +
   1.649 +  // Flags are sorted by usage frequency.
   1.650 +  enum NodeFlags {
   1.651 +    Flag_is_Copy                     = 0x01, // should be first bit to avoid shift
   1.652 +    Flag_rematerialize               = Flag_is_Copy << 1,
   1.653 +    Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
   1.654 +    Flag_is_macro                    = Flag_needs_anti_dependence_check << 1,
   1.655 +    Flag_is_Con                      = Flag_is_macro << 1,
   1.656 +    Flag_is_cisc_alternate           = Flag_is_Con << 1,
   1.657 +    Flag_is_dead_loop_safe           = Flag_is_cisc_alternate << 1,
   1.658 +    Flag_may_be_short_branch         = Flag_is_dead_loop_safe << 1,
   1.659 +    Flag_avoid_back_to_back_before   = Flag_may_be_short_branch << 1,
   1.660 +    Flag_avoid_back_to_back_after    = Flag_avoid_back_to_back_before << 1,
   1.661 +    Flag_has_call                    = Flag_avoid_back_to_back_after << 1,
   1.662 +    Flag_is_expensive                = Flag_has_call << 1,
   1.663 +    _max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination
   1.664 +  };
   1.665 +
   1.666 +private:
   1.667 +  jushort _class_id;
   1.668 +  jushort _flags;
   1.669 +
   1.670 +protected:
   1.671 +  // These methods should be called from constructors only.
   1.672 +  void init_class_id(jushort c) {
   1.673 +    assert(c <= _max_classes, "invalid node class");
   1.674 +    _class_id = c; // cast out const
   1.675 +  }
   1.676 +  void init_flags(jushort fl) {
   1.677 +    assert(fl <= _max_flags, "invalid node flag");
   1.678 +    _flags |= fl;
   1.679 +  }
   1.680 +  void clear_flag(jushort fl) {
   1.681 +    assert(fl <= _max_flags, "invalid node flag");
   1.682 +    _flags &= ~fl;
   1.683 +  }
   1.684 +
   1.685 +public:
   1.686 +  const jushort class_id() const { return _class_id; }
   1.687 +
   1.688 +  const jushort flags() const { return _flags; }
   1.689 +
   1.690 +  // Return a dense integer opcode number
   1.691 +  virtual int Opcode() const;
   1.692 +
   1.693 +  // Virtual inherited Node size
   1.694 +  virtual uint size_of() const;
   1.695 +
   1.696 +  // Other interesting Node properties
   1.697 +  #define DEFINE_CLASS_QUERY(type)                           \
   1.698 +  bool is_##type() const {                                   \
   1.699 +    return ((_class_id & ClassMask_##type) == Class_##type); \
   1.700 +  }                                                          \
   1.701 +  type##Node *as_##type() const {                            \
   1.702 +    assert(is_##type(), "invalid node class");               \
   1.703 +    return (type##Node*)this;                                \
   1.704 +  }                                                          \
   1.705 +  type##Node* isa_##type() const {                           \
   1.706 +    return (is_##type()) ? as_##type() : NULL;               \
   1.707 +  }
   1.708 +
   1.709 +  DEFINE_CLASS_QUERY(AbstractLock)
   1.710 +  DEFINE_CLASS_QUERY(Add)
   1.711 +  DEFINE_CLASS_QUERY(AddP)
   1.712 +  DEFINE_CLASS_QUERY(Allocate)
   1.713 +  DEFINE_CLASS_QUERY(AllocateArray)
   1.714 +  DEFINE_CLASS_QUERY(Bool)
   1.715 +  DEFINE_CLASS_QUERY(BoxLock)
   1.716 +  DEFINE_CLASS_QUERY(Call)
   1.717 +  DEFINE_CLASS_QUERY(CallDynamicJava)
   1.718 +  DEFINE_CLASS_QUERY(CallJava)
   1.719 +  DEFINE_CLASS_QUERY(CallLeaf)
   1.720 +  DEFINE_CLASS_QUERY(CallRuntime)
   1.721 +  DEFINE_CLASS_QUERY(CallStaticJava)
   1.722 +  DEFINE_CLASS_QUERY(Catch)
   1.723 +  DEFINE_CLASS_QUERY(CatchProj)
   1.724 +  DEFINE_CLASS_QUERY(CheckCastPP)
   1.725 +  DEFINE_CLASS_QUERY(ConstraintCast)
   1.726 +  DEFINE_CLASS_QUERY(ClearArray)
   1.727 +  DEFINE_CLASS_QUERY(CMove)
   1.728 +  DEFINE_CLASS_QUERY(Cmp)
   1.729 +  DEFINE_CLASS_QUERY(CountedLoop)
   1.730 +  DEFINE_CLASS_QUERY(CountedLoopEnd)
   1.731 +  DEFINE_CLASS_QUERY(DecodeNarrowPtr)
   1.732 +  DEFINE_CLASS_QUERY(DecodeN)
   1.733 +  DEFINE_CLASS_QUERY(DecodeNKlass)
   1.734 +  DEFINE_CLASS_QUERY(EncodeNarrowPtr)
   1.735 +  DEFINE_CLASS_QUERY(EncodeP)
   1.736 +  DEFINE_CLASS_QUERY(EncodePKlass)
   1.737 +  DEFINE_CLASS_QUERY(FastLock)
   1.738 +  DEFINE_CLASS_QUERY(FastUnlock)
   1.739 +  DEFINE_CLASS_QUERY(If)
   1.740 +  DEFINE_CLASS_QUERY(IfFalse)
   1.741 +  DEFINE_CLASS_QUERY(IfTrue)
   1.742 +  DEFINE_CLASS_QUERY(Initialize)
   1.743 +  DEFINE_CLASS_QUERY(Jump)
   1.744 +  DEFINE_CLASS_QUERY(JumpProj)
   1.745 +  DEFINE_CLASS_QUERY(Load)
   1.746 +  DEFINE_CLASS_QUERY(LoadStore)
   1.747 +  DEFINE_CLASS_QUERY(Lock)
   1.748 +  DEFINE_CLASS_QUERY(Loop)
   1.749 +  DEFINE_CLASS_QUERY(Mach)
   1.750 +  DEFINE_CLASS_QUERY(MachBranch)
   1.751 +  DEFINE_CLASS_QUERY(MachCall)
   1.752 +  DEFINE_CLASS_QUERY(MachCallDynamicJava)
   1.753 +  DEFINE_CLASS_QUERY(MachCallJava)
   1.754 +  DEFINE_CLASS_QUERY(MachCallLeaf)
   1.755 +  DEFINE_CLASS_QUERY(MachCallRuntime)
   1.756 +  DEFINE_CLASS_QUERY(MachCallStaticJava)
   1.757 +  DEFINE_CLASS_QUERY(MachConstantBase)
   1.758 +  DEFINE_CLASS_QUERY(MachConstant)
   1.759 +  DEFINE_CLASS_QUERY(MachGoto)
   1.760 +  DEFINE_CLASS_QUERY(MachIf)
   1.761 +  DEFINE_CLASS_QUERY(MachNullCheck)
   1.762 +  DEFINE_CLASS_QUERY(MachProj)
   1.763 +  DEFINE_CLASS_QUERY(MachReturn)
   1.764 +  DEFINE_CLASS_QUERY(MachSafePoint)
   1.765 +  DEFINE_CLASS_QUERY(MachSpillCopy)
   1.766 +  DEFINE_CLASS_QUERY(MachTemp)
   1.767 +  DEFINE_CLASS_QUERY(Mem)
   1.768 +  DEFINE_CLASS_QUERY(MemBar)
   1.769 +  DEFINE_CLASS_QUERY(MemBarStoreStore)
   1.770 +  DEFINE_CLASS_QUERY(MergeMem)
   1.771 +  DEFINE_CLASS_QUERY(Mul)
   1.772 +  DEFINE_CLASS_QUERY(Multi)
   1.773 +  DEFINE_CLASS_QUERY(MultiBranch)
   1.774 +  DEFINE_CLASS_QUERY(Parm)
   1.775 +  DEFINE_CLASS_QUERY(PCTable)
   1.776 +  DEFINE_CLASS_QUERY(Phi)
   1.777 +  DEFINE_CLASS_QUERY(Proj)
   1.778 +  DEFINE_CLASS_QUERY(Region)
   1.779 +  DEFINE_CLASS_QUERY(Root)
   1.780 +  DEFINE_CLASS_QUERY(SafePoint)
   1.781 +  DEFINE_CLASS_QUERY(SafePointScalarObject)
   1.782 +  DEFINE_CLASS_QUERY(Start)
   1.783 +  DEFINE_CLASS_QUERY(Store)
   1.784 +  DEFINE_CLASS_QUERY(Sub)
   1.785 +  DEFINE_CLASS_QUERY(Type)
   1.786 +  DEFINE_CLASS_QUERY(Vector)
   1.787 +  DEFINE_CLASS_QUERY(LoadVector)
   1.788 +  DEFINE_CLASS_QUERY(StoreVector)
   1.789 +  DEFINE_CLASS_QUERY(Unlock)
   1.790 +
   1.791 +  #undef DEFINE_CLASS_QUERY
   1.792 +
   1.793 +  // duplicate of is_MachSpillCopy()
   1.794 +  bool is_SpillCopy () const {
   1.795 +    return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
   1.796 +  }
   1.797 +
   1.798 +  bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
   1.799 +  // The data node which is safe to leave in dead loop during IGVN optimization.
   1.800 +  bool is_dead_loop_safe() const {
   1.801 +    return is_Phi() || (is_Proj() && in(0) == NULL) ||
   1.802 +           ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 &&
   1.803 +            (!is_Proj() || !in(0)->is_Allocate()));
   1.804 +  }
   1.805 +
   1.806 +  // is_Copy() returns copied edge index (0 or 1)
   1.807 +  uint is_Copy() const { return (_flags & Flag_is_Copy); }
   1.808 +
   1.809 +  virtual bool is_CFG() const { return false; }
   1.810 +
   1.811 +  // If this node is control-dependent on a test, can it be
   1.812 +  // rerouted to a dominating equivalent test?  This is usually
   1.813 +  // true of non-CFG nodes, but can be false for operations which
   1.814 +  // depend for their correct sequencing on more than one test.
   1.815 +  // (In that case, hoisting to a dominating test may silently
   1.816 +  // skip some other important test.)
   1.817 +  virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
   1.818 +
   1.819 +  // When building basic blocks, I need to have a notion of block beginning
   1.820 +  // Nodes, next block selector Nodes (block enders), and next block
   1.821 +  // projections.  These calls need to work on their machine equivalents.  The
   1.822 +  // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
   1.823 +  bool is_block_start() const {
   1.824 +    if ( is_Region() )
   1.825 +      return this == (const Node*)in(0);
   1.826 +    else
   1.827 +      return is_Start();
   1.828 +  }
   1.829 +
   1.830 +  // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
   1.831 +  // Goto and Return.  This call also returns the block ending Node.
   1.832 +  virtual const Node *is_block_proj() const;
   1.833 +
   1.834 +  // The node is a "macro" node which needs to be expanded before matching
   1.835 +  bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
   1.836 +  // The node is expensive: the best control is set during loop opts
   1.837 +  bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; }
   1.838 +
   1.839 +//----------------- Optimization
   1.840 +
   1.841 +  // Get the worst-case Type output for this Node.
   1.842 +  virtual const class Type *bottom_type() const;
   1.843 +
   1.844 +  // If we find a better type for a node, try to record it permanently.
   1.845 +  // Return true if this node actually changed.
   1.846 +  // Be sure to do the hash_delete game in the "rehash" variant.
   1.847 +  void raise_bottom_type(const Type* new_type);
   1.848 +
   1.849 +  // Get the address type with which this node uses and/or defs memory,
   1.850 +  // or NULL if none.  The address type is conservatively wide.
   1.851 +  // Returns non-null for calls, membars, loads, stores, etc.
   1.852 +  // Returns TypePtr::BOTTOM if the node touches memory "broadly".
   1.853 +  virtual const class TypePtr *adr_type() const { return NULL; }
   1.854 +
   1.855 +  // Return an existing node which computes the same function as this node.
   1.856 +  // The optimistic combined algorithm requires this to return a Node which
   1.857 +  // is a small number of steps away (e.g., one of my inputs).
   1.858 +  virtual Node *Identity( PhaseTransform *phase );
   1.859 +
   1.860 +  // Return the set of values this Node can take on at runtime.
   1.861 +  virtual const Type *Value( PhaseTransform *phase ) const;
   1.862 +
   1.863 +  // Return a node which is more "ideal" than the current node.
   1.864 +  // The invariants on this call are subtle.  If in doubt, read the
   1.865 +  // treatise in node.cpp above the default implemention AND TEST WITH
   1.866 +  // +VerifyIterativeGVN!
   1.867 +  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   1.868 +
   1.869 +  // Some nodes have specific Ideal subgraph transformations only if they are
   1.870 +  // unique users of specific nodes. Such nodes should be put on IGVN worklist
   1.871 +  // for the transformations to happen.
   1.872 +  bool has_special_unique_user() const;
   1.873 +
   1.874 +  // Skip Proj and CatchProj nodes chains. Check for Null and Top.
   1.875 +  Node* find_exact_control(Node* ctrl);
   1.876 +
   1.877 +  // Check if 'this' node dominates or equal to 'sub'.
   1.878 +  bool dominates(Node* sub, Node_List &nlist);
   1.879 +
   1.880 +protected:
   1.881 +  bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
   1.882 +public:
   1.883 +
   1.884 +  // Idealize graph, using DU info.  Done after constant propagation
   1.885 +  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
   1.886 +
   1.887 +  // See if there is valid pipeline info
   1.888 +  static  const Pipeline *pipeline_class();
   1.889 +  virtual const Pipeline *pipeline() const;
   1.890 +
   1.891 +  // Compute the latency from the def to this instruction of the ith input node
   1.892 +  uint latency(uint i);
   1.893 +
   1.894 +  // Hash & compare functions, for pessimistic value numbering
   1.895 +
   1.896 +  // If the hash function returns the special sentinel value NO_HASH,
   1.897 +  // the node is guaranteed never to compare equal to any other node.
   1.898 +  // If we accidentally generate a hash with value NO_HASH the node
   1.899 +  // won't go into the table and we'll lose a little optimization.
   1.900 +  enum { NO_HASH = 0 };
   1.901 +  virtual uint hash() const;
   1.902 +  virtual uint cmp( const Node &n ) const;
   1.903 +
   1.904 +  // Operation appears to be iteratively computed (such as an induction variable)
   1.905 +  // It is possible for this operation to return false for a loop-varying
   1.906 +  // value, if it appears (by local graph inspection) to be computed by a simple conditional.
   1.907 +  bool is_iteratively_computed();
   1.908 +
   1.909 +  // Determine if a node is Counted loop induction variable.
   1.910 +  // The method is defined in loopnode.cpp.
   1.911 +  const Node* is_loop_iv() const;
   1.912 +
   1.913 +  // Return a node with opcode "opc" and same inputs as "this" if one can
   1.914 +  // be found; Otherwise return NULL;
   1.915 +  Node* find_similar(int opc);
   1.916 +
   1.917 +  // Return the unique control out if only one. Null if none or more than one.
   1.918 +  Node* unique_ctrl_out();
   1.919 +
   1.920 +//----------------- Code Generation
   1.921 +
   1.922 +  // Ideal register class for Matching.  Zero means unmatched instruction
   1.923 +  // (these are cloned instead of converted to machine nodes).
   1.924 +  virtual uint ideal_reg() const;
   1.925 +
   1.926 +  static const uint NotAMachineReg;   // must be > max. machine register
   1.927 +
   1.928 +  // Do we Match on this edge index or not?  Generally false for Control
   1.929 +  // and true for everything else.  Weird for calls & returns.
   1.930 +  virtual uint match_edge(uint idx) const;
   1.931 +
   1.932 +  // Register class output is returned in
   1.933 +  virtual const RegMask &out_RegMask() const;
   1.934 +  // Register class input is expected in
   1.935 +  virtual const RegMask &in_RegMask(uint) const;
   1.936 +  // Should we clone rather than spill this instruction?
   1.937 +  bool rematerialize() const;
   1.938 +
   1.939 +  // Return JVM State Object if this Node carries debug info, or NULL otherwise
   1.940 +  virtual JVMState* jvms() const;
   1.941 +
   1.942 +  // Print as assembly
   1.943 +  virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
   1.944 +  // Emit bytes starting at parameter 'ptr'
   1.945 +  // Bump 'ptr' by the number of output bytes
   1.946 +  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
   1.947 +  // Size of instruction in bytes
   1.948 +  virtual uint size(PhaseRegAlloc *ra_) const;
   1.949 +
   1.950 +  // Convenience function to extract an integer constant from a node.
   1.951 +  // If it is not an integer constant (either Con, CastII, or Mach),
   1.952 +  // return value_if_unknown.
   1.953 +  jint find_int_con(jint value_if_unknown) const {
   1.954 +    const TypeInt* t = find_int_type();
   1.955 +    return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
   1.956 +  }
   1.957 +  // Return the constant, knowing it is an integer constant already
   1.958 +  jint get_int() const {
   1.959 +    const TypeInt* t = find_int_type();
   1.960 +    guarantee(t != NULL, "must be con");
   1.961 +    return t->get_con();
   1.962 +  }
   1.963 +  // Here's where the work is done.  Can produce non-constant int types too.
   1.964 +  const TypeInt* find_int_type() const;
   1.965 +
   1.966 +  // Same thing for long (and intptr_t, via type.hpp):
   1.967 +  jlong get_long() const {
   1.968 +    const TypeLong* t = find_long_type();
   1.969 +    guarantee(t != NULL, "must be con");
   1.970 +    return t->get_con();
   1.971 +  }
   1.972 +  jlong find_long_con(jint value_if_unknown) const {
   1.973 +    const TypeLong* t = find_long_type();
   1.974 +    return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
   1.975 +  }
   1.976 +  const TypeLong* find_long_type() const;
   1.977 +
   1.978 +  const TypePtr* get_ptr_type() const;
   1.979 +
   1.980 +  // These guys are called by code generated by ADLC:
   1.981 +  intptr_t get_ptr() const;
   1.982 +  intptr_t get_narrowcon() const;
   1.983 +  jdouble getd() const;
   1.984 +  jfloat getf() const;
   1.985 +
   1.986 +  // Nodes which are pinned into basic blocks
   1.987 +  virtual bool pinned() const { return false; }
   1.988 +
   1.989 +  // Nodes which use memory without consuming it, hence need antidependences
   1.990 +  // More specifically, needs_anti_dependence_check returns true iff the node
   1.991 +  // (a) does a load, and (b) does not perform a store (except perhaps to a
   1.992 +  // stack slot or some other unaliased location).
   1.993 +  bool needs_anti_dependence_check() const;
   1.994 +
   1.995 +  // Return which operand this instruction may cisc-spill. In other words,
   1.996 +  // return operand position that can convert from reg to memory access
   1.997 +  virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
   1.998 +  bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
   1.999 +
  1.1000 +//----------------- Graph walking
  1.1001 +public:
  1.1002 +  // Walk and apply member functions recursively.
  1.1003 +  // Supplied (this) pointer is root.
  1.1004 +  void walk(NFunc pre, NFunc post, void *env);
  1.1005 +  static void nop(Node &, void*); // Dummy empty function
  1.1006 +  static void packregion( Node &n, void* );
  1.1007 +private:
  1.1008 +  void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited);
  1.1009 +
  1.1010 +//----------------- Printing, etc
  1.1011 +public:
  1.1012 +#ifndef PRODUCT
  1.1013 +  Node* find(int idx) const;         // Search the graph for the given idx.
  1.1014 +  Node* find_ctrl(int idx) const;    // Search control ancestors for the given idx.
  1.1015 +  void dump() const { dump("\n"); }  // Print this node.
  1.1016 +  void dump(const char* suffix, outputStream *st = tty) const;// Print this node.
  1.1017 +  void dump(int depth) const;        // Print this node, recursively to depth d
  1.1018 +  void dump_ctrl(int depth) const;   // Print control nodes, to depth d
  1.1019 +  virtual void dump_req(outputStream *st = tty) const;     // Print required-edge info
  1.1020 +  virtual void dump_prec(outputStream *st = tty) const;    // Print precedence-edge info
  1.1021 +  virtual void dump_out(outputStream *st = tty) const;     // Print the output edge info
  1.1022 +  virtual void dump_spec(outputStream *st) const {}; // Print per-node info
  1.1023 +  void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
  1.1024 +  void verify() const;               // Check Def-Use info for my subgraph
  1.1025 +  static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space);
  1.1026 +
  1.1027 +  // This call defines a class-unique string used to identify class instances
  1.1028 +  virtual const char *Name() const;
  1.1029 +
  1.1030 +  void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
  1.1031 +  // RegMask Print Functions
  1.1032 +  void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
  1.1033 +  void dump_out_regmask() { out_RegMask().dump(); }
  1.1034 +  static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; }
  1.1035 +  void fast_dump() const {
  1.1036 +    tty->print("%4d: %-17s", _idx, Name());
  1.1037 +    for (uint i = 0; i < len(); i++)
  1.1038 +      if (in(i))
  1.1039 +        tty->print(" %4d", in(i)->_idx);
  1.1040 +      else
  1.1041 +        tty->print(" NULL");
  1.1042 +    tty->print("\n");
  1.1043 +  }
  1.1044 +#endif
  1.1045 +#ifdef ASSERT
  1.1046 +  void verify_construction();
  1.1047 +  bool verify_jvms(const JVMState* jvms) const;
  1.1048 +  int  _debug_idx;                     // Unique value assigned to every node.
  1.1049 +  int   debug_idx() const              { return _debug_idx; }
  1.1050 +  void  set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
  1.1051 +
  1.1052 +  Node* _debug_orig;                   // Original version of this, if any.
  1.1053 +  Node*  debug_orig() const            { return _debug_orig; }
  1.1054 +  void   set_debug_orig(Node* orig);   // _debug_orig = orig
  1.1055 +
  1.1056 +  int        _hash_lock;               // Barrier to modifications of nodes in the hash table
  1.1057 +  void  enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
  1.1058 +  void   exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
  1.1059 +
  1.1060 +  static void init_NodeProperty();
  1.1061 +
  1.1062 +  #if OPTO_DU_ITERATOR_ASSERT
  1.1063 +  const Node* _last_del;               // The last deleted node.
  1.1064 +  uint        _del_tick;               // Bumped when a deletion happens..
  1.1065 +  #endif
  1.1066 +#endif
  1.1067 +};
  1.1068 +
  1.1069 +//-----------------------------------------------------------------------------
  1.1070 +// Iterators over DU info, and associated Node functions.
  1.1071 +
  1.1072 +#if OPTO_DU_ITERATOR_ASSERT
  1.1073 +
  1.1074 +// Common code for assertion checking on DU iterators.
  1.1075 +class DUIterator_Common VALUE_OBJ_CLASS_SPEC {
  1.1076 +#ifdef ASSERT
  1.1077 + protected:
  1.1078 +  bool         _vdui;               // cached value of VerifyDUIterators
  1.1079 +  const Node*  _node;               // the node containing the _out array
  1.1080 +  uint         _outcnt;             // cached node->_outcnt
  1.1081 +  uint         _del_tick;           // cached node->_del_tick
  1.1082 +  Node*        _last;               // last value produced by the iterator
  1.1083 +
  1.1084 +  void sample(const Node* node);    // used by c'tor to set up for verifies
  1.1085 +  void verify(const Node* node, bool at_end_ok = false);
  1.1086 +  void verify_resync();
  1.1087 +  void reset(const DUIterator_Common& that);
  1.1088 +
  1.1089 +// The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
  1.1090 +  #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
  1.1091 +#else
  1.1092 +  #define I_VDUI_ONLY(i,x) { }
  1.1093 +#endif //ASSERT
  1.1094 +};
  1.1095 +
  1.1096 +#define VDUI_ONLY(x)     I_VDUI_ONLY(*this, x)
  1.1097 +
  1.1098 +// Default DU iterator.  Allows appends onto the out array.
  1.1099 +// Allows deletion from the out array only at the current point.
  1.1100 +// Usage:
  1.1101 +//  for (DUIterator i = x->outs(); x->has_out(i); i++) {
  1.1102 +//    Node* y = x->out(i);
  1.1103 +//    ...
  1.1104 +//  }
  1.1105 +// Compiles in product mode to a unsigned integer index, which indexes
  1.1106 +// onto a repeatedly reloaded base pointer of x->_out.  The loop predicate
  1.1107 +// also reloads x->_outcnt.  If you delete, you must perform "--i" just
  1.1108 +// before continuing the loop.  You must delete only the last-produced
  1.1109 +// edge.  You must delete only a single copy of the last-produced edge,
  1.1110 +// or else you must delete all copies at once (the first time the edge
  1.1111 +// is produced by the iterator).
  1.1112 +class DUIterator : public DUIterator_Common {
  1.1113 +  friend class Node;
  1.1114 +
  1.1115 +  // This is the index which provides the product-mode behavior.
  1.1116 +  // Whatever the product-mode version of the system does to the
  1.1117 +  // DUI index is done to this index.  All other fields in
  1.1118 +  // this class are used only for assertion checking.
  1.1119 +  uint         _idx;
  1.1120 +
  1.1121 +  #ifdef ASSERT
  1.1122 +  uint         _refresh_tick;    // Records the refresh activity.
  1.1123 +
  1.1124 +  void sample(const Node* node); // Initialize _refresh_tick etc.
  1.1125 +  void verify(const Node* node, bool at_end_ok = false);
  1.1126 +  void verify_increment();       // Verify an increment operation.
  1.1127 +  void verify_resync();          // Verify that we can back up over a deletion.
  1.1128 +  void verify_finish();          // Verify that the loop terminated properly.
  1.1129 +  void refresh();                // Resample verification info.
  1.1130 +  void reset(const DUIterator& that);  // Resample after assignment.
  1.1131 +  #endif
  1.1132 +
  1.1133 +  DUIterator(const Node* node, int dummy_to_avoid_conversion)
  1.1134 +    { _idx = 0;                         debug_only(sample(node)); }
  1.1135 +
  1.1136 + public:
  1.1137 +  // initialize to garbage; clear _vdui to disable asserts
  1.1138 +  DUIterator()
  1.1139 +    { /*initialize to garbage*/         debug_only(_vdui = false); }
  1.1140 +
  1.1141 +  void operator++(int dummy_to_specify_postfix_op)
  1.1142 +    { _idx++;                           VDUI_ONLY(verify_increment()); }
  1.1143 +
  1.1144 +  void operator--()
  1.1145 +    { VDUI_ONLY(verify_resync());       --_idx; }
  1.1146 +
  1.1147 +  ~DUIterator()
  1.1148 +    { VDUI_ONLY(verify_finish()); }
  1.1149 +
  1.1150 +  void operator=(const DUIterator& that)
  1.1151 +    { _idx = that._idx;                 debug_only(reset(that)); }
  1.1152 +};
  1.1153 +
  1.1154 +DUIterator Node::outs() const
  1.1155 +  { return DUIterator(this, 0); }
  1.1156 +DUIterator& Node::refresh_out_pos(DUIterator& i) const
  1.1157 +  { I_VDUI_ONLY(i, i.refresh());        return i; }
  1.1158 +bool Node::has_out(DUIterator& i) const
  1.1159 +  { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
  1.1160 +Node*    Node::out(DUIterator& i) const
  1.1161 +  { I_VDUI_ONLY(i, i.verify(this));     return debug_only(i._last=) _out[i._idx]; }
  1.1162 +
  1.1163 +
  1.1164 +// Faster DU iterator.  Disallows insertions into the out array.
  1.1165 +// Allows deletion from the out array only at the current point.
  1.1166 +// Usage:
  1.1167 +//  for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
  1.1168 +//    Node* y = x->fast_out(i);
  1.1169 +//    ...
  1.1170 +//  }
  1.1171 +// Compiles in product mode to raw Node** pointer arithmetic, with
  1.1172 +// no reloading of pointers from the original node x.  If you delete,
  1.1173 +// you must perform "--i; --imax" just before continuing the loop.
  1.1174 +// If you delete multiple copies of the same edge, you must decrement
  1.1175 +// imax, but not i, multiple times:  "--i, imax -= num_edges".
  1.1176 +class DUIterator_Fast : public DUIterator_Common {
  1.1177 +  friend class Node;
  1.1178 +  friend class DUIterator_Last;
  1.1179 +
  1.1180 +  // This is the pointer which provides the product-mode behavior.
  1.1181 +  // Whatever the product-mode version of the system does to the
  1.1182 +  // DUI pointer is done to this pointer.  All other fields in
  1.1183 +  // this class are used only for assertion checking.
  1.1184 +  Node**       _outp;
  1.1185 +
  1.1186 +  #ifdef ASSERT
  1.1187 +  void verify(const Node* node, bool at_end_ok = false);
  1.1188 +  void verify_limit();
  1.1189 +  void verify_resync();
  1.1190 +  void verify_relimit(uint n);
  1.1191 +  void reset(const DUIterator_Fast& that);
  1.1192 +  #endif
  1.1193 +
  1.1194 +  // Note:  offset must be signed, since -1 is sometimes passed
  1.1195 +  DUIterator_Fast(const Node* node, ptrdiff_t offset)
  1.1196 +    { _outp = node->_out + offset;      debug_only(sample(node)); }
  1.1197 +
  1.1198 + public:
  1.1199 +  // initialize to garbage; clear _vdui to disable asserts
  1.1200 +  DUIterator_Fast()
  1.1201 +    { /*initialize to garbage*/         debug_only(_vdui = false); }
  1.1202 +
  1.1203 +  void operator++(int dummy_to_specify_postfix_op)
  1.1204 +    { _outp++;                          VDUI_ONLY(verify(_node, true)); }
  1.1205 +
  1.1206 +  void operator--()
  1.1207 +    { VDUI_ONLY(verify_resync());       --_outp; }
  1.1208 +
  1.1209 +  void operator-=(uint n)   // applied to the limit only
  1.1210 +    { _outp -= n;           VDUI_ONLY(verify_relimit(n));  }
  1.1211 +
  1.1212 +  bool operator<(DUIterator_Fast& limit) {
  1.1213 +    I_VDUI_ONLY(*this, this->verify(_node, true));
  1.1214 +    I_VDUI_ONLY(limit, limit.verify_limit());
  1.1215 +    return _outp < limit._outp;
  1.1216 +  }
  1.1217 +
  1.1218 +  void operator=(const DUIterator_Fast& that)
  1.1219 +    { _outp = that._outp;               debug_only(reset(that)); }
  1.1220 +};
  1.1221 +
  1.1222 +DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
  1.1223 +  // Assign a limit pointer to the reference argument:
  1.1224 +  imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
  1.1225 +  // Return the base pointer:
  1.1226 +  return DUIterator_Fast(this, 0);
  1.1227 +}
  1.1228 +Node* Node::fast_out(DUIterator_Fast& i) const {
  1.1229 +  I_VDUI_ONLY(i, i.verify(this));
  1.1230 +  return debug_only(i._last=) *i._outp;
  1.1231 +}
  1.1232 +
  1.1233 +
  1.1234 +// Faster DU iterator.  Requires each successive edge to be removed.
  1.1235 +// Does not allow insertion of any edges.
  1.1236 +// Usage:
  1.1237 +//  for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
  1.1238 +//    Node* y = x->last_out(i);
  1.1239 +//    ...
  1.1240 +//  }
  1.1241 +// Compiles in product mode to raw Node** pointer arithmetic, with
  1.1242 +// no reloading of pointers from the original node x.
  1.1243 +class DUIterator_Last : private DUIterator_Fast {
  1.1244 +  friend class Node;
  1.1245 +
  1.1246 +  #ifdef ASSERT
  1.1247 +  void verify(const Node* node, bool at_end_ok = false);
  1.1248 +  void verify_limit();
  1.1249 +  void verify_step(uint num_edges);
  1.1250 +  #endif
  1.1251 +
  1.1252 +  // Note:  offset must be signed, since -1 is sometimes passed
  1.1253 +  DUIterator_Last(const Node* node, ptrdiff_t offset)
  1.1254 +    : DUIterator_Fast(node, offset) { }
  1.1255 +
  1.1256 +  void operator++(int dummy_to_specify_postfix_op) {} // do not use
  1.1257 +  void operator<(int)                              {} // do not use
  1.1258 +
  1.1259 + public:
  1.1260 +  DUIterator_Last() { }
  1.1261 +  // initialize to garbage
  1.1262 +
  1.1263 +  void operator--()
  1.1264 +    { _outp--;              VDUI_ONLY(verify_step(1));  }
  1.1265 +
  1.1266 +  void operator-=(uint n)
  1.1267 +    { _outp -= n;           VDUI_ONLY(verify_step(n));  }
  1.1268 +
  1.1269 +  bool operator>=(DUIterator_Last& limit) {
  1.1270 +    I_VDUI_ONLY(*this, this->verify(_node, true));
  1.1271 +    I_VDUI_ONLY(limit, limit.verify_limit());
  1.1272 +    return _outp >= limit._outp;
  1.1273 +  }
  1.1274 +
  1.1275 +  void operator=(const DUIterator_Last& that)
  1.1276 +    { DUIterator_Fast::operator=(that); }
  1.1277 +};
  1.1278 +
  1.1279 +DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
  1.1280 +  // Assign a limit pointer to the reference argument:
  1.1281 +  imin = DUIterator_Last(this, 0);
  1.1282 +  // Return the initial pointer:
  1.1283 +  return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
  1.1284 +}
  1.1285 +Node* Node::last_out(DUIterator_Last& i) const {
  1.1286 +  I_VDUI_ONLY(i, i.verify(this));
  1.1287 +  return debug_only(i._last=) *i._outp;
  1.1288 +}
  1.1289 +
  1.1290 +#endif //OPTO_DU_ITERATOR_ASSERT
  1.1291 +
  1.1292 +#undef I_VDUI_ONLY
  1.1293 +#undef VDUI_ONLY
  1.1294 +
  1.1295 +// An Iterator that truly follows the iterator pattern.  Doesn't
  1.1296 +// support deletion but could be made to.
  1.1297 +//
  1.1298 +//   for (SimpleDUIterator i(n); i.has_next(); i.next()) {
  1.1299 +//     Node* m = i.get();
  1.1300 +//
  1.1301 +class SimpleDUIterator : public StackObj {
  1.1302 + private:
  1.1303 +  Node* node;
  1.1304 +  DUIterator_Fast i;
  1.1305 +  DUIterator_Fast imax;
  1.1306 + public:
  1.1307 +  SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {}
  1.1308 +  bool has_next() { return i < imax; }
  1.1309 +  void next() { i++; }
  1.1310 +  Node* get() { return node->fast_out(i); }
  1.1311 +};
  1.1312 +
  1.1313 +
  1.1314 +//-----------------------------------------------------------------------------
  1.1315 +// Map dense integer indices to Nodes.  Uses classic doubling-array trick.
  1.1316 +// Abstractly provides an infinite array of Node*'s, initialized to NULL.
  1.1317 +// Note that the constructor just zeros things, and since I use Arena
  1.1318 +// allocation I do not need a destructor to reclaim storage.
  1.1319 +class Node_Array : public ResourceObj {
  1.1320 +  friend class VMStructs;
  1.1321 +protected:
  1.1322 +  Arena *_a;                    // Arena to allocate in
  1.1323 +  uint   _max;
  1.1324 +  Node **_nodes;
  1.1325 +  void   grow( uint i );        // Grow array node to fit
  1.1326 +public:
  1.1327 +  Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) {
  1.1328 +    _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize );
  1.1329 +    for( int i = 0; i < OptoNodeListSize; i++ ) {
  1.1330 +      _nodes[i] = NULL;
  1.1331 +    }
  1.1332 +  }
  1.1333 +
  1.1334 +  Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
  1.1335 +  Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
  1.1336 +  { return (i<_max) ? _nodes[i] : (Node*)NULL; }
  1.1337 +  Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; }
  1.1338 +  Node **adr() { return _nodes; }
  1.1339 +  // Extend the mapping: index i maps to Node *n.
  1.1340 +  void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
  1.1341 +  void insert( uint i, Node *n );
  1.1342 +  void remove( uint i );        // Remove, preserving order
  1.1343 +  void sort( C_sort_func_t func);
  1.1344 +  void reset( Arena *new_a );   // Zap mapping to empty; reclaim storage
  1.1345 +  void clear();                 // Set all entries to NULL, keep storage
  1.1346 +  uint Size() const { return _max; }
  1.1347 +  void dump() const;
  1.1348 +};
  1.1349 +
  1.1350 +class Node_List : public Node_Array {
  1.1351 +  friend class VMStructs;
  1.1352 +  uint _cnt;
  1.1353 +public:
  1.1354 +  Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
  1.1355 +  Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
  1.1356 +  bool contains(const Node* n) const {
  1.1357 +    for (uint e = 0; e < size(); e++) {
  1.1358 +      if (at(e) == n) return true;
  1.1359 +    }
  1.1360 +    return false;
  1.1361 +  }
  1.1362 +  void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
  1.1363 +  void remove( uint i ) { Node_Array::remove(i); _cnt--; }
  1.1364 +  void push( Node *b ) { map(_cnt++,b); }
  1.1365 +  void yank( Node *n );         // Find and remove
  1.1366 +  Node *pop() { return _nodes[--_cnt]; }
  1.1367 +  Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;}
  1.1368 +  void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
  1.1369 +  uint size() const { return _cnt; }
  1.1370 +  void dump() const;
  1.1371 +};
  1.1372 +
  1.1373 +//------------------------------Unique_Node_List-------------------------------
  1.1374 +class Unique_Node_List : public Node_List {
  1.1375 +  friend class VMStructs;
  1.1376 +  VectorSet _in_worklist;
  1.1377 +  uint _clock_index;            // Index in list where to pop from next
  1.1378 +public:
  1.1379 +  Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {}
  1.1380 +  Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
  1.1381 +
  1.1382 +  void remove( Node *n );
  1.1383 +  bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
  1.1384 +  VectorSet &member_set(){ return _in_worklist; }
  1.1385 +
  1.1386 +  void push( Node *b ) {
  1.1387 +    if( !_in_worklist.test_set(b->_idx) )
  1.1388 +      Node_List::push(b);
  1.1389 +  }
  1.1390 +  Node *pop() {
  1.1391 +    if( _clock_index >= size() ) _clock_index = 0;
  1.1392 +    Node *b = at(_clock_index);
  1.1393 +    map( _clock_index, Node_List::pop());
  1.1394 +    if (size() != 0) _clock_index++; // Always start from 0
  1.1395 +    _in_worklist >>= b->_idx;
  1.1396 +    return b;
  1.1397 +  }
  1.1398 +  Node *remove( uint i ) {
  1.1399 +    Node *b = Node_List::at(i);
  1.1400 +    _in_worklist >>= b->_idx;
  1.1401 +    map(i,Node_List::pop());
  1.1402 +    return b;
  1.1403 +  }
  1.1404 +  void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); }
  1.1405 +  void  clear() {
  1.1406 +    _in_worklist.Clear();        // Discards storage but grows automatically
  1.1407 +    Node_List::clear();
  1.1408 +    _clock_index = 0;
  1.1409 +  }
  1.1410 +
  1.1411 +  // Used after parsing to remove useless nodes before Iterative GVN
  1.1412 +  void remove_useless_nodes(VectorSet &useful);
  1.1413 +
  1.1414 +#ifndef PRODUCT
  1.1415 +  void print_set() const { _in_worklist.print(); }
  1.1416 +#endif
  1.1417 +};
  1.1418 +
  1.1419 +// Inline definition of Compile::record_for_igvn must be deferred to this point.
  1.1420 +inline void Compile::record_for_igvn(Node* n) {
  1.1421 +  _for_igvn->push(n);
  1.1422 +}
  1.1423 +
  1.1424 +//------------------------------Node_Stack-------------------------------------
  1.1425 +class Node_Stack {
  1.1426 +  friend class VMStructs;
  1.1427 +protected:
  1.1428 +  struct INode {
  1.1429 +    Node *node; // Processed node
  1.1430 +    uint  indx; // Index of next node's child
  1.1431 +  };
  1.1432 +  INode *_inode_top; // tos, stack grows up
  1.1433 +  INode *_inode_max; // End of _inodes == _inodes + _max
  1.1434 +  INode *_inodes;    // Array storage for the stack
  1.1435 +  Arena *_a;         // Arena to allocate in
  1.1436 +  void grow();
  1.1437 +public:
  1.1438 +  Node_Stack(int size) {
  1.1439 +    size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
  1.1440 +    _a = Thread::current()->resource_area();
  1.1441 +    _inodes = NEW_ARENA_ARRAY( _a, INode, max );
  1.1442 +    _inode_max = _inodes + max;
  1.1443 +    _inode_top = _inodes - 1; // stack is empty
  1.1444 +  }
  1.1445 +
  1.1446 +  Node_Stack(Arena *a, int size) : _a(a) {
  1.1447 +    size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
  1.1448 +    _inodes = NEW_ARENA_ARRAY( _a, INode, max );
  1.1449 +    _inode_max = _inodes + max;
  1.1450 +    _inode_top = _inodes - 1; // stack is empty
  1.1451 +  }
  1.1452 +
  1.1453 +  void pop() {
  1.1454 +    assert(_inode_top >= _inodes, "node stack underflow");
  1.1455 +    --_inode_top;
  1.1456 +  }
  1.1457 +  void push(Node *n, uint i) {
  1.1458 +    ++_inode_top;
  1.1459 +    if (_inode_top >= _inode_max) grow();
  1.1460 +    INode *top = _inode_top; // optimization
  1.1461 +    top->node = n;
  1.1462 +    top->indx = i;
  1.1463 +  }
  1.1464 +  Node *node() const {
  1.1465 +    return _inode_top->node;
  1.1466 +  }
  1.1467 +  Node* node_at(uint i) const {
  1.1468 +    assert(_inodes + i <= _inode_top, "in range");
  1.1469 +    return _inodes[i].node;
  1.1470 +  }
  1.1471 +  uint index() const {
  1.1472 +    return _inode_top->indx;
  1.1473 +  }
  1.1474 +  uint index_at(uint i) const {
  1.1475 +    assert(_inodes + i <= _inode_top, "in range");
  1.1476 +    return _inodes[i].indx;
  1.1477 +  }
  1.1478 +  void set_node(Node *n) {
  1.1479 +    _inode_top->node = n;
  1.1480 +  }
  1.1481 +  void set_index(uint i) {
  1.1482 +    _inode_top->indx = i;
  1.1483 +  }
  1.1484 +  uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes,  sizeof(INode)); } // Max size
  1.1485 +  uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes,  sizeof(INode)); } // Current size
  1.1486 +  bool is_nonempty() const { return (_inode_top >= _inodes); }
  1.1487 +  bool is_empty() const { return (_inode_top < _inodes); }
  1.1488 +  void clear() { _inode_top = _inodes - 1; } // retain storage
  1.1489 +
  1.1490 +  // Node_Stack is used to map nodes.
  1.1491 +  Node* find(uint idx) const;
  1.1492 +};
  1.1493 +
  1.1494 +
  1.1495 +//-----------------------------Node_Notes--------------------------------------
  1.1496 +// Debugging or profiling annotations loosely and sparsely associated
  1.1497 +// with some nodes.  See Compile::node_notes_at for the accessor.
  1.1498 +class Node_Notes VALUE_OBJ_CLASS_SPEC {
  1.1499 +  friend class VMStructs;
  1.1500 +  JVMState* _jvms;
  1.1501 +
  1.1502 +public:
  1.1503 +  Node_Notes(JVMState* jvms = NULL) {
  1.1504 +    _jvms = jvms;
  1.1505 +  }
  1.1506 +
  1.1507 +  JVMState* jvms()            { return _jvms; }
  1.1508 +  void  set_jvms(JVMState* x) {        _jvms = x; }
  1.1509 +
  1.1510 +  // True if there is nothing here.
  1.1511 +  bool is_clear() {
  1.1512 +    return (_jvms == NULL);
  1.1513 +  }
  1.1514 +
  1.1515 +  // Make there be nothing here.
  1.1516 +  void clear() {
  1.1517 +    _jvms = NULL;
  1.1518 +  }
  1.1519 +
  1.1520 +  // Make a new, clean node notes.
  1.1521 +  static Node_Notes* make(Compile* C) {
  1.1522 +    Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
  1.1523 +    nn->clear();
  1.1524 +    return nn;
  1.1525 +  }
  1.1526 +
  1.1527 +  Node_Notes* clone(Compile* C) {
  1.1528 +    Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
  1.1529 +    (*nn) = (*this);
  1.1530 +    return nn;
  1.1531 +  }
  1.1532 +
  1.1533 +  // Absorb any information from source.
  1.1534 +  bool update_from(Node_Notes* source) {
  1.1535 +    bool changed = false;
  1.1536 +    if (source != NULL) {
  1.1537 +      if (source->jvms() != NULL) {
  1.1538 +        set_jvms(source->jvms());
  1.1539 +        changed = true;
  1.1540 +      }
  1.1541 +    }
  1.1542 +    return changed;
  1.1543 +  }
  1.1544 +};
  1.1545 +
  1.1546 +// Inlined accessors for Compile::node_nodes that require the preceding class:
  1.1547 +inline Node_Notes*
  1.1548 +Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
  1.1549 +                           int idx, bool can_grow) {
  1.1550 +  assert(idx >= 0, "oob");
  1.1551 +  int block_idx = (idx >> _log2_node_notes_block_size);
  1.1552 +  int grow_by = (block_idx - (arr == NULL? 0: arr->length()));
  1.1553 +  if (grow_by >= 0) {
  1.1554 +    if (!can_grow)  return NULL;
  1.1555 +    grow_node_notes(arr, grow_by + 1);
  1.1556 +  }
  1.1557 +  // (Every element of arr is a sub-array of length _node_notes_block_size.)
  1.1558 +  return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
  1.1559 +}
  1.1560 +
  1.1561 +inline bool
  1.1562 +Compile::set_node_notes_at(int idx, Node_Notes* value) {
  1.1563 +  if (value == NULL || value->is_clear())
  1.1564 +    return false;  // nothing to write => write nothing
  1.1565 +  Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
  1.1566 +  assert(loc != NULL, "");
  1.1567 +  return loc->update_from(value);
  1.1568 +}
  1.1569 +
  1.1570 +
  1.1571 +//------------------------------TypeNode---------------------------------------
  1.1572 +// Node with a Type constant.
  1.1573 +class TypeNode : public Node {
  1.1574 +protected:
  1.1575 +  virtual uint hash() const;    // Check the type
  1.1576 +  virtual uint cmp( const Node &n ) const;
  1.1577 +  virtual uint size_of() const; // Size is bigger
  1.1578 +  const Type* const _type;
  1.1579 +public:
  1.1580 +  void set_type(const Type* t) {
  1.1581 +    assert(t != NULL, "sanity");
  1.1582 +    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
  1.1583 +    *(const Type**)&_type = t;   // cast away const-ness
  1.1584 +    // If this node is in the hash table, make sure it doesn't need a rehash.
  1.1585 +    assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
  1.1586 +  }
  1.1587 +  const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
  1.1588 +  TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
  1.1589 +    init_class_id(Class_Type);
  1.1590 +  }
  1.1591 +  virtual const Type *Value( PhaseTransform *phase ) const;
  1.1592 +  virtual const Type *bottom_type() const;
  1.1593 +  virtual       uint  ideal_reg() const;
  1.1594 +#ifndef PRODUCT
  1.1595 +  virtual void dump_spec(outputStream *st) const;
  1.1596 +#endif
  1.1597 +};
  1.1598 +
  1.1599 +#endif // SHARE_VM_OPTO_NODE_HPP

mercurial