src/share/vm/opto/node.hpp

changeset 435
a61af66fc99e
child 468
3288958bf319
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/opto/node.hpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,1492 @@
     1.4 +/*
     1.5 + * Copyright 1997-2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +// Portions of code courtesy of Clifford Click
    1.29 +
    1.30 +// Optimization - Graph Style
    1.31 +
    1.32 +
    1.33 +class AbstractLockNode;
    1.34 +class AddNode;
    1.35 +class AddPNode;
    1.36 +class AliasInfo;
    1.37 +class AllocateArrayNode;
    1.38 +class AllocateNode;
    1.39 +class Block;
    1.40 +class Block_Array;
    1.41 +class BoolNode;
    1.42 +class BoxLockNode;
    1.43 +class CMoveNode;
    1.44 +class CallDynamicJavaNode;
    1.45 +class CallJavaNode;
    1.46 +class CallLeafNode;
    1.47 +class CallNode;
    1.48 +class CallRuntimeNode;
    1.49 +class CallStaticJavaNode;
    1.50 +class CatchNode;
    1.51 +class CatchProjNode;
    1.52 +class CheckCastPPNode;
    1.53 +class CmpNode;
    1.54 +class CodeBuffer;
    1.55 +class ConstraintCastNode;
    1.56 +class ConNode;
    1.57 +class CountedLoopNode;
    1.58 +class CountedLoopEndNode;
    1.59 +class FastLockNode;
    1.60 +class FastUnlockNode;
    1.61 +class IfNode;
    1.62 +class InitializeNode;
    1.63 +class JVMState;
    1.64 +class JumpNode;
    1.65 +class JumpProjNode;
    1.66 +class LoadNode;
    1.67 +class LoadStoreNode;
    1.68 +class LockNode;
    1.69 +class LoopNode;
    1.70 +class MachCallDynamicJavaNode;
    1.71 +class MachCallJavaNode;
    1.72 +class MachCallLeafNode;
    1.73 +class MachCallNode;
    1.74 +class MachCallRuntimeNode;
    1.75 +class MachCallStaticJavaNode;
    1.76 +class MachIfNode;
    1.77 +class MachNode;
    1.78 +class MachNullCheckNode;
    1.79 +class MachReturnNode;
    1.80 +class MachSafePointNode;
    1.81 +class MachSpillCopyNode;
    1.82 +class MachTempNode;
    1.83 +class Matcher;
    1.84 +class MemBarNode;
    1.85 +class MemNode;
    1.86 +class MergeMemNode;
    1.87 +class MulNode;
    1.88 +class MultiNode;
    1.89 +class MultiBranchNode;
    1.90 +class NeverBranchNode;
    1.91 +class Node;
    1.92 +class Node_Array;
    1.93 +class Node_List;
    1.94 +class Node_Stack;
    1.95 +class NullCheckNode;
    1.96 +class OopMap;
    1.97 +class PCTableNode;
    1.98 +class PhaseCCP;
    1.99 +class PhaseGVN;
   1.100 +class PhaseIterGVN;
   1.101 +class PhaseRegAlloc;
   1.102 +class PhaseTransform;
   1.103 +class PhaseValues;
   1.104 +class PhiNode;
   1.105 +class Pipeline;
   1.106 +class ProjNode;
   1.107 +class RegMask;
   1.108 +class RegionNode;
   1.109 +class RootNode;
   1.110 +class SafePointNode;
   1.111 +class StartNode;
   1.112 +class State;
   1.113 +class StoreNode;
   1.114 +class SubNode;
   1.115 +class Type;
   1.116 +class TypeNode;
   1.117 +class UnlockNode;
   1.118 +class VectorSet;
   1.119 +class IfTrueNode;
   1.120 +class IfFalseNode;
   1.121 +typedef void (*NFunc)(Node&,void*);
   1.122 +extern "C" {
   1.123 +  typedef int (*C_sort_func_t)(const void *, const void *);
   1.124 +}
   1.125 +
   1.126 +// The type of all node counts and indexes.
   1.127 +// It must hold at least 16 bits, but must also be fast to load and store.
   1.128 +// This type, if less than 32 bits, could limit the number of possible nodes.
   1.129 +// (To make this type platform-specific, move to globalDefinitions_xxx.hpp.)
   1.130 +typedef unsigned int node_idx_t;
   1.131 +
   1.132 +
   1.133 +#ifndef OPTO_DU_ITERATOR_ASSERT
   1.134 +#ifdef ASSERT
   1.135 +#define OPTO_DU_ITERATOR_ASSERT 1
   1.136 +#else
   1.137 +#define OPTO_DU_ITERATOR_ASSERT 0
   1.138 +#endif
   1.139 +#endif //OPTO_DU_ITERATOR_ASSERT
   1.140 +
   1.141 +#if OPTO_DU_ITERATOR_ASSERT
   1.142 +class DUIterator;
   1.143 +class DUIterator_Fast;
   1.144 +class DUIterator_Last;
   1.145 +#else
   1.146 +typedef uint   DUIterator;
   1.147 +typedef Node** DUIterator_Fast;
   1.148 +typedef Node** DUIterator_Last;
   1.149 +#endif
   1.150 +
   1.151 +// Node Sentinel
   1.152 +#define NodeSentinel (Node*)-1
   1.153 +
   1.154 +// Unknown count frequency
   1.155 +#define COUNT_UNKNOWN (-1.0f)
   1.156 +
   1.157 +//------------------------------Node-------------------------------------------
   1.158 +// Nodes define actions in the program.  They create values, which have types.
   1.159 +// They are both vertices in a directed graph and program primitives.  Nodes
   1.160 +// are labeled; the label is the "opcode", the primitive function in the lambda
   1.161 +// calculus sense that gives meaning to the Node.  Node inputs are ordered (so
   1.162 +// that "a-b" is different from "b-a").  The inputs to a Node are the inputs to
   1.163 +// the Node's function.  These inputs also define a Type equation for the Node.
   1.164 +// Solving these Type equations amounts to doing dataflow analysis.
   1.165 +// Control and data are uniformly represented in the graph.  Finally, Nodes
   1.166 +// have a unique dense integer index which is used to index into side arrays
   1.167 +// whenever I have phase-specific information.
   1.168 +
   1.169 +class Node {
   1.170 +  // Lots of restrictions on cloning Nodes
   1.171 +  Node(const Node&);            // not defined; linker error to use these
   1.172 +  Node &operator=(const Node &rhs);
   1.173 +
   1.174 +public:
   1.175 +  friend class Compile;
   1.176 +  #if OPTO_DU_ITERATOR_ASSERT
   1.177 +  friend class DUIterator_Common;
   1.178 +  friend class DUIterator;
   1.179 +  friend class DUIterator_Fast;
   1.180 +  friend class DUIterator_Last;
   1.181 +  #endif
   1.182 +
   1.183 +  // Because Nodes come and go, I define an Arena of Node structures to pull
   1.184 +  // from.  This should allow fast access to node creation & deletion.  This
   1.185 +  // field is a local cache of a value defined in some "program fragment" for
   1.186 +  // which these Nodes are just a part of.
   1.187 +
   1.188 +  // New Operator that takes a Compile pointer, this will eventually
   1.189 +  // be the "new" New operator.
   1.190 +  inline void* operator new( size_t x, Compile* C) {
   1.191 +    Node* n = (Node*)C->node_arena()->Amalloc_D(x);
   1.192 +#ifdef ASSERT
   1.193 +    n->_in = (Node**)n; // magic cookie for assertion check
   1.194 +#endif
   1.195 +    n->_out = (Node**)C;
   1.196 +    return (void*)n;
   1.197 +  }
   1.198 +
   1.199 +  // New Operator that takes a Compile pointer, this will eventually
   1.200 +  // be the "new" New operator.
   1.201 +  inline void* operator new( size_t x, Compile* C, int y) {
   1.202 +    Node* n = (Node*)C->node_arena()->Amalloc_D(x + y*sizeof(void*));
   1.203 +    n->_in = (Node**)(((char*)n) + x);
   1.204 +#ifdef ASSERT
   1.205 +    n->_in[y-1] = n; // magic cookie for assertion check
   1.206 +#endif
   1.207 +    n->_out = (Node**)C;
   1.208 +    return (void*)n;
   1.209 +  }
   1.210 +
   1.211 +  // Delete is a NOP
   1.212 +  void operator delete( void *ptr ) {}
   1.213 +  // Fancy destructor; eagerly attempt to reclaim Node numberings and storage
   1.214 +  void destruct();
   1.215 +
   1.216 +  // Create a new Node.  Required is the number is of inputs required for
   1.217 +  // semantic correctness.
   1.218 +  Node( uint required );
   1.219 +
   1.220 +  // Create a new Node with given input edges.
   1.221 +  // This version requires use of the "edge-count" new.
   1.222 +  // E.g.  new (C,3) FooNode( C, NULL, left, right );
   1.223 +  Node( Node *n0 );
   1.224 +  Node( Node *n0, Node *n1 );
   1.225 +  Node( Node *n0, Node *n1, Node *n2 );
   1.226 +  Node( Node *n0, Node *n1, Node *n2, Node *n3 );
   1.227 +  Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 );
   1.228 +  Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 );
   1.229 +  Node( Node *n0, Node *n1, Node *n2, Node *n3,
   1.230 +            Node *n4, Node *n5, Node *n6 );
   1.231 +
   1.232 +  // Clone an inherited Node given only the base Node type.
   1.233 +  Node* clone() const;
   1.234 +
   1.235 +  // Clone a Node, immediately supplying one or two new edges.
   1.236 +  // The first and second arguments, if non-null, replace in(1) and in(2),
   1.237 +  // respectively.
   1.238 +  Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const {
   1.239 +    Node* nn = clone();
   1.240 +    if (in1 != NULL)  nn->set_req(1, in1);
   1.241 +    if (in2 != NULL)  nn->set_req(2, in2);
   1.242 +    return nn;
   1.243 +  }
   1.244 +
   1.245 +private:
   1.246 +  // Shared setup for the above constructors.
   1.247 +  // Handles all interactions with Compile::current.
   1.248 +  // Puts initial values in all Node fields except _idx.
   1.249 +  // Returns the initial value for _idx, which cannot
   1.250 +  // be initialized by assignment.
   1.251 +  inline int Init(int req, Compile* C);
   1.252 +
   1.253 +//----------------- input edge handling
   1.254 +protected:
   1.255 +  friend class PhaseCFG;        // Access to address of _in array elements
   1.256 +  Node **_in;                   // Array of use-def references to Nodes
   1.257 +  Node **_out;                  // Array of def-use references to Nodes
   1.258 +
   1.259 +  // Input edges are split into two catagories.  Required edges are required
   1.260 +  // for semantic correctness; order is important and NULLs are allowed.
   1.261 +  // Precedence edges are used to help determine execution order and are
   1.262 +  // added, e.g., for scheduling purposes.  They are unordered and not
   1.263 +  // duplicated; they have no embedded NULLs.  Edges from 0 to _cnt-1
   1.264 +  // are required, from _cnt to _max-1 are precedence edges.
   1.265 +  node_idx_t _cnt;              // Total number of required Node inputs.
   1.266 +
   1.267 +  node_idx_t _max;              // Actual length of input array.
   1.268 +
   1.269 +  // Output edges are an unordered list of def-use edges which exactly
   1.270 +  // correspond to required input edges which point from other nodes
   1.271 +  // to this one.  Thus the count of the output edges is the number of
   1.272 +  // users of this node.
   1.273 +  node_idx_t _outcnt;           // Total number of Node outputs.
   1.274 +
   1.275 +  node_idx_t _outmax;           // Actual length of output array.
   1.276 +
   1.277 +  // Grow the actual input array to the next larger power-of-2 bigger than len.
   1.278 +  void grow( uint len );
   1.279 +  // Grow the output array to the next larger power-of-2 bigger than len.
   1.280 +  void out_grow( uint len );
   1.281 +
   1.282 + public:
   1.283 +  // Each Node is assigned a unique small/dense number.  This number is used
   1.284 +  // to index into auxiliary arrays of data and bitvectors.
   1.285 +  // It is declared const to defend against inadvertant assignment,
   1.286 +  // since it is used by clients as a naked field.
   1.287 +  const node_idx_t _idx;
   1.288 +
   1.289 +  // Get the (read-only) number of input edges
   1.290 +  uint req() const { return _cnt; }
   1.291 +  uint len() const { return _max; }
   1.292 +  // Get the (read-only) number of output edges
   1.293 +  uint outcnt() const { return _outcnt; }
   1.294 +
   1.295 +#if OPTO_DU_ITERATOR_ASSERT
   1.296 +  // Iterate over the out-edges of this node.  Deletions are illegal.
   1.297 +  inline DUIterator outs() const;
   1.298 +  // Use this when the out array might have changed to suppress asserts.
   1.299 +  inline DUIterator& refresh_out_pos(DUIterator& i) const;
   1.300 +  // Does the node have an out at this position?  (Used for iteration.)
   1.301 +  inline bool has_out(DUIterator& i) const;
   1.302 +  inline Node*    out(DUIterator& i) const;
   1.303 +  // Iterate over the out-edges of this node.  All changes are illegal.
   1.304 +  inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const;
   1.305 +  inline Node*    fast_out(DUIterator_Fast& i) const;
   1.306 +  // Iterate over the out-edges of this node, deleting one at a time.
   1.307 +  inline DUIterator_Last last_outs(DUIterator_Last& min) const;
   1.308 +  inline Node*    last_out(DUIterator_Last& i) const;
   1.309 +  // The inline bodies of all these methods are after the iterator definitions.
   1.310 +#else
   1.311 +  // Iterate over the out-edges of this node.  Deletions are illegal.
   1.312 +  // This iteration uses integral indexes, to decouple from array reallocations.
   1.313 +  DUIterator outs() const  { return 0; }
   1.314 +  // Use this when the out array might have changed to suppress asserts.
   1.315 +  DUIterator refresh_out_pos(DUIterator i) const { return i; }
   1.316 +
   1.317 +  // Reference to the i'th output Node.  Error if out of bounds.
   1.318 +  Node*    out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; }
   1.319 +  // Does the node have an out at this position?  (Used for iteration.)
   1.320 +  bool has_out(DUIterator i) const { return i < _outcnt; }
   1.321 +
   1.322 +  // Iterate over the out-edges of this node.  All changes are illegal.
   1.323 +  // This iteration uses a pointer internal to the out array.
   1.324 +  DUIterator_Fast fast_outs(DUIterator_Fast& max) const {
   1.325 +    Node** out = _out;
   1.326 +    // Assign a limit pointer to the reference argument:
   1.327 +    max = out + (ptrdiff_t)_outcnt;
   1.328 +    // Return the base pointer:
   1.329 +    return out;
   1.330 +  }
   1.331 +  Node*    fast_out(DUIterator_Fast i) const  { return *i; }
   1.332 +  // Iterate over the out-edges of this node, deleting one at a time.
   1.333 +  // This iteration uses a pointer internal to the out array.
   1.334 +  DUIterator_Last last_outs(DUIterator_Last& min) const {
   1.335 +    Node** out = _out;
   1.336 +    // Assign a limit pointer to the reference argument:
   1.337 +    min = out;
   1.338 +    // Return the pointer to the start of the iteration:
   1.339 +    return out + (ptrdiff_t)_outcnt - 1;
   1.340 +  }
   1.341 +  Node*    last_out(DUIterator_Last i) const  { return *i; }
   1.342 +#endif
   1.343 +
   1.344 +  // Reference to the i'th input Node.  Error if out of bounds.
   1.345 +  Node* in(uint i) const { assert(i < _max,"oob"); return _in[i]; }
   1.346 +  // Reference to the i'th output Node.  Error if out of bounds.
   1.347 +  // Use this accessor sparingly.  We are going trying to use iterators instead.
   1.348 +  Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; }
   1.349 +  // Return the unique out edge.
   1.350 +  Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; }
   1.351 +  // Delete out edge at position 'i' by moving last out edge to position 'i'
   1.352 +  void  raw_del_out(uint i) {
   1.353 +    assert(i < _outcnt,"oob");
   1.354 +    assert(_outcnt > 0,"oob");
   1.355 +    #if OPTO_DU_ITERATOR_ASSERT
   1.356 +    // Record that a change happened here.
   1.357 +    debug_only(_last_del = _out[i]; ++_del_tick);
   1.358 +    #endif
   1.359 +    _out[i] = _out[--_outcnt];
   1.360 +    // Smash the old edge so it can't be used accidentally.
   1.361 +    debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
   1.362 +  }
   1.363 +
   1.364 +#ifdef ASSERT
   1.365 +  bool is_dead() const;
   1.366 +#define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
   1.367 +#endif
   1.368 +
   1.369 +  // Set a required input edge, also updates corresponding output edge
   1.370 +  void add_req( Node *n ); // Append a NEW required input
   1.371 +  void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n).
   1.372 +  void del_req( uint idx ); // Delete required edge & compact
   1.373 +  void ins_req( uint i, Node *n ); // Insert a NEW required input
   1.374 +  void set_req( uint i, Node *n ) {
   1.375 +    assert( is_not_dead(n), "can not use dead node");
   1.376 +    assert( i < _cnt, "oob");
   1.377 +    assert( !VerifyHashTableKeys || _hash_lock == 0,
   1.378 +            "remove node from hash table before modifying it");
   1.379 +    Node** p = &_in[i];    // cache this._in, across the del_out call
   1.380 +    if (*p != NULL)  (*p)->del_out((Node *)this);
   1.381 +    (*p) = n;
   1.382 +    if (n != NULL)      n->add_out((Node *)this);
   1.383 +  }
   1.384 +  // Light version of set_req() to init inputs after node creation.
   1.385 +  void init_req( uint i, Node *n ) {
   1.386 +    assert( i == 0 && this == n ||
   1.387 +            is_not_dead(n), "can not use dead node");
   1.388 +    assert( i < _cnt, "oob");
   1.389 +    assert( !VerifyHashTableKeys || _hash_lock == 0,
   1.390 +            "remove node from hash table before modifying it");
   1.391 +    assert( _in[i] == NULL, "sanity");
   1.392 +    _in[i] = n;
   1.393 +    if (n != NULL)      n->add_out((Node *)this);
   1.394 +  }
   1.395 +  // Find first occurrence of n among my edges:
   1.396 +  int find_edge(Node* n);
   1.397 +  int replace_edge(Node* old, Node* neww);
   1.398 +  // NULL out all inputs to eliminate incoming Def-Use edges.
   1.399 +  // Return the number of edges between 'n' and 'this'
   1.400 +  int  disconnect_inputs(Node *n);
   1.401 +
   1.402 +  // Quickly, return true if and only if I am Compile::current()->top().
   1.403 +  bool is_top() const {
   1.404 +    assert((this == (Node*) Compile::current()->top()) == (_out == NULL), "");
   1.405 +    return (_out == NULL);
   1.406 +  }
   1.407 +  // Reaffirm invariants for is_top.  (Only from Compile::set_cached_top_node.)
   1.408 +  void setup_is_top();
   1.409 +
   1.410 +  // Strip away casting.  (It is depth-limited.)
   1.411 +  Node* uncast() const;
   1.412 +
   1.413 +private:
   1.414 +  static Node* uncast_helper(const Node* n);
   1.415 +
   1.416 +  // Add an output edge to the end of the list
   1.417 +  void add_out( Node *n ) {
   1.418 +    if (is_top())  return;
   1.419 +    if( _outcnt == _outmax ) out_grow(_outcnt);
   1.420 +    _out[_outcnt++] = n;
   1.421 +  }
   1.422 +  // Delete an output edge
   1.423 +  void del_out( Node *n ) {
   1.424 +    if (is_top())  return;
   1.425 +    Node** outp = &_out[_outcnt];
   1.426 +    // Find and remove n
   1.427 +    do {
   1.428 +      assert(outp > _out, "Missing Def-Use edge");
   1.429 +    } while (*--outp != n);
   1.430 +    *outp = _out[--_outcnt];
   1.431 +    // Smash the old edge so it can't be used accidentally.
   1.432 +    debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef);
   1.433 +    // Record that a change happened here.
   1.434 +    #if OPTO_DU_ITERATOR_ASSERT
   1.435 +    debug_only(_last_del = n; ++_del_tick);
   1.436 +    #endif
   1.437 +  }
   1.438 +
   1.439 +public:
   1.440 +  // Globally replace this node by a given new node, updating all uses.
   1.441 +  void replace_by(Node* new_node);
   1.442 +  void set_req_X( uint i, Node *n, PhaseIterGVN *igvn );
   1.443 +  // Find the one non-null required input.  RegionNode only
   1.444 +  Node *nonnull_req() const;
   1.445 +  // Add or remove precedence edges
   1.446 +  void add_prec( Node *n );
   1.447 +  void rm_prec( uint i );
   1.448 +  void set_prec( uint i, Node *n ) {
   1.449 +    assert( is_not_dead(n), "can not use dead node");
   1.450 +    assert( i >= _cnt, "not a precedence edge");
   1.451 +    if (_in[i] != NULL) _in[i]->del_out((Node *)this);
   1.452 +    _in[i] = n;
   1.453 +    if (n != NULL) n->add_out((Node *)this);
   1.454 +  }
   1.455 +  // Set this node's index, used by cisc_version to replace current node
   1.456 +  void set_idx(uint new_idx) {
   1.457 +    const node_idx_t* ref = &_idx;
   1.458 +    *(node_idx_t*)ref = new_idx;
   1.459 +  }
   1.460 +  // Swap input edge order.  (Edge indexes i1 and i2 are usually 1 and 2.)
   1.461 +  void swap_edges(uint i1, uint i2) {
   1.462 +    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
   1.463 +    // Def-Use info is unchanged
   1.464 +    Node* n1 = in(i1);
   1.465 +    Node* n2 = in(i2);
   1.466 +    _in[i1] = n2;
   1.467 +    _in[i2] = n1;
   1.468 +    // If this node is in the hash table, make sure it doesn't need a rehash.
   1.469 +    assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code");
   1.470 +  }
   1.471 +
   1.472 +  // Iterators over input Nodes for a Node X are written as:
   1.473 +  // for( i = 0; i < X.req(); i++ ) ... X[i] ...
   1.474 +  // NOTE: Required edges can contain embedded NULL pointers.
   1.475 +
   1.476 +//----------------- Other Node Properties
   1.477 +
   1.478 +  // Generate class id for some ideal nodes to avoid virtual query
   1.479 +  // methods is_<Node>().
   1.480 +  // Class id is the set of bits corresponded to the node class and all its
   1.481 +  // super classes so that queries for super classes are also valid.
   1.482 +  // Subclasses of the same super class have different assigned bit
   1.483 +  // (the third parameter in the macro DEFINE_CLASS_ID).
   1.484 +  // Classes with deeper hierarchy are declared first.
   1.485 +  // Classes with the same hierarchy depth are sorted by usage frequency.
   1.486 +  //
   1.487 +  // The query method masks the bits to cut off bits of subclasses
   1.488 +  // and then compare the result with the class id
   1.489 +  // (see the macro DEFINE_CLASS_QUERY below).
   1.490 +  //
   1.491 +  //  Class_MachCall=30, ClassMask_MachCall=31
   1.492 +  // 12               8               4               0
   1.493 +  //  0   0   0   0   0   0   0   0   1   1   1   1   0
   1.494 +  //                                  |   |   |   |
   1.495 +  //                                  |   |   |   Bit_Mach=2
   1.496 +  //                                  |   |   Bit_MachReturn=4
   1.497 +  //                                  |   Bit_MachSafePoint=8
   1.498 +  //                                  Bit_MachCall=16
   1.499 +  //
   1.500 +  //  Class_CountedLoop=56, ClassMask_CountedLoop=63
   1.501 +  // 12               8               4               0
   1.502 +  //  0   0   0   0   0   0   0   1   1   1   0   0   0
   1.503 +  //                              |   |   |
   1.504 +  //                              |   |   Bit_Region=8
   1.505 +  //                              |   Bit_Loop=16
   1.506 +  //                              Bit_CountedLoop=32
   1.507 +
   1.508 +  #define DEFINE_CLASS_ID(cl, supcl, subn) \
   1.509 +  Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \
   1.510 +  Class_##cl = Class_##supcl + Bit_##cl , \
   1.511 +  ClassMask_##cl = ((Bit_##cl << 1) - 1) ,
   1.512 +
   1.513 +  // This enum is used only for C2 ideal and mach nodes with is_<node>() methods
   1.514 +  // so that it's values fits into 16 bits.
   1.515 +  enum NodeClasses {
   1.516 +    Bit_Node   = 0x0000,
   1.517 +    Class_Node = 0x0000,
   1.518 +    ClassMask_Node = 0xFFFF,
   1.519 +
   1.520 +    DEFINE_CLASS_ID(Multi, Node, 0)
   1.521 +      DEFINE_CLASS_ID(SafePoint, Multi, 0)
   1.522 +        DEFINE_CLASS_ID(Call,      SafePoint, 0)
   1.523 +          DEFINE_CLASS_ID(CallJava,         Call, 0)
   1.524 +            DEFINE_CLASS_ID(CallStaticJava,   CallJava, 0)
   1.525 +            DEFINE_CLASS_ID(CallDynamicJava,  CallJava, 1)
   1.526 +          DEFINE_CLASS_ID(CallRuntime,      Call, 1)
   1.527 +            DEFINE_CLASS_ID(CallLeaf,         CallRuntime, 0)
   1.528 +          DEFINE_CLASS_ID(Allocate,         Call, 2)
   1.529 +            DEFINE_CLASS_ID(AllocateArray,    Allocate, 0)
   1.530 +          DEFINE_CLASS_ID(AbstractLock,     Call, 3)
   1.531 +            DEFINE_CLASS_ID(Lock,             AbstractLock, 0)
   1.532 +            DEFINE_CLASS_ID(Unlock,           AbstractLock, 1)
   1.533 +      DEFINE_CLASS_ID(MultiBranch, Multi, 1)
   1.534 +        DEFINE_CLASS_ID(PCTable,     MultiBranch, 0)
   1.535 +          DEFINE_CLASS_ID(Catch,       PCTable, 0)
   1.536 +          DEFINE_CLASS_ID(Jump,        PCTable, 1)
   1.537 +        DEFINE_CLASS_ID(If,          MultiBranch, 1)
   1.538 +          DEFINE_CLASS_ID(CountedLoopEnd, If, 0)
   1.539 +        DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2)
   1.540 +      DEFINE_CLASS_ID(Start,       Multi, 2)
   1.541 +      DEFINE_CLASS_ID(MemBar,      Multi, 3)
   1.542 +        DEFINE_CLASS_ID(Initialize,    MemBar, 0)
   1.543 +
   1.544 +    DEFINE_CLASS_ID(Mach,  Node, 1)
   1.545 +      DEFINE_CLASS_ID(MachReturn, Mach, 0)
   1.546 +        DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0)
   1.547 +          DEFINE_CLASS_ID(MachCall, MachSafePoint, 0)
   1.548 +            DEFINE_CLASS_ID(MachCallJava,         MachCall, 0)
   1.549 +              DEFINE_CLASS_ID(MachCallStaticJava,   MachCallJava, 0)
   1.550 +              DEFINE_CLASS_ID(MachCallDynamicJava,  MachCallJava, 1)
   1.551 +            DEFINE_CLASS_ID(MachCallRuntime,      MachCall, 1)
   1.552 +              DEFINE_CLASS_ID(MachCallLeaf,         MachCallRuntime, 0)
   1.553 +      DEFINE_CLASS_ID(MachSpillCopy, Mach, 1)
   1.554 +      DEFINE_CLASS_ID(MachNullCheck, Mach, 2)
   1.555 +      DEFINE_CLASS_ID(MachIf,        Mach, 3)
   1.556 +      DEFINE_CLASS_ID(MachTemp,      Mach, 4)
   1.557 +
   1.558 +    DEFINE_CLASS_ID(Proj,  Node, 2)
   1.559 +      DEFINE_CLASS_ID(CatchProj, Proj, 0)
   1.560 +      DEFINE_CLASS_ID(JumpProj,  Proj, 1)
   1.561 +      DEFINE_CLASS_ID(IfTrue,    Proj, 2)
   1.562 +      DEFINE_CLASS_ID(IfFalse,   Proj, 3)
   1.563 +
   1.564 +    DEFINE_CLASS_ID(Region, Node, 3)
   1.565 +      DEFINE_CLASS_ID(Loop, Region, 0)
   1.566 +        DEFINE_CLASS_ID(Root,        Loop, 0)
   1.567 +        DEFINE_CLASS_ID(CountedLoop, Loop, 1)
   1.568 +
   1.569 +    DEFINE_CLASS_ID(Sub,   Node, 4)
   1.570 +      DEFINE_CLASS_ID(Cmp,   Sub, 0)
   1.571 +        DEFINE_CLASS_ID(FastLock,   Cmp, 0)
   1.572 +        DEFINE_CLASS_ID(FastUnlock, Cmp, 1)
   1.573 +
   1.574 +    DEFINE_CLASS_ID(Type,  Node, 5)
   1.575 +      DEFINE_CLASS_ID(Phi,   Type, 0)
   1.576 +      DEFINE_CLASS_ID(ConstraintCast, Type, 1)
   1.577 +      DEFINE_CLASS_ID(CheckCastPP, Type, 2)
   1.578 +      DEFINE_CLASS_ID(CMove, Type, 3)
   1.579 +
   1.580 +    DEFINE_CLASS_ID(Mem,   Node, 6)
   1.581 +      DEFINE_CLASS_ID(Load,  Mem, 0)
   1.582 +      DEFINE_CLASS_ID(Store, Mem, 1)
   1.583 +      DEFINE_CLASS_ID(LoadStore, Mem, 2)
   1.584 +
   1.585 +    DEFINE_CLASS_ID(MergeMem, Node, 7)
   1.586 +    DEFINE_CLASS_ID(Bool,     Node, 8)
   1.587 +    DEFINE_CLASS_ID(AddP,     Node, 9)
   1.588 +    DEFINE_CLASS_ID(BoxLock,  Node, 10)
   1.589 +    DEFINE_CLASS_ID(Add,      Node, 11)
   1.590 +    DEFINE_CLASS_ID(Mul,      Node, 12)
   1.591 +
   1.592 +    _max_classes  = ClassMask_Mul
   1.593 +  };
   1.594 +  #undef DEFINE_CLASS_ID
   1.595 +
   1.596 +  // Flags are sorted by usage frequency.
   1.597 +  enum NodeFlags {
   1.598 +    Flag_is_Copy             = 0x01, // should be first bit to avoid shift
   1.599 +    Flag_is_Call             = Flag_is_Copy << 1,
   1.600 +    Flag_rematerialize       = Flag_is_Call << 1,
   1.601 +    Flag_needs_anti_dependence_check = Flag_rematerialize << 1,
   1.602 +    Flag_is_macro            = Flag_needs_anti_dependence_check << 1,
   1.603 +    Flag_is_Con              = Flag_is_macro << 1,
   1.604 +    Flag_is_cisc_alternate   = Flag_is_Con << 1,
   1.605 +    Flag_is_Branch           = Flag_is_cisc_alternate << 1,
   1.606 +    Flag_is_block_start      = Flag_is_Branch << 1,
   1.607 +    Flag_is_Goto             = Flag_is_block_start << 1,
   1.608 +    Flag_is_dead_loop_safe   = Flag_is_Goto << 1,
   1.609 +    Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1,
   1.610 +    Flag_is_safepoint_node   = Flag_may_be_short_branch << 1,
   1.611 +    Flag_is_pc_relative      = Flag_is_safepoint_node << 1,
   1.612 +    Flag_is_Vector           = Flag_is_pc_relative << 1,
   1.613 +    _max_flags = (Flag_is_Vector << 1) - 1 // allow flags combination
   1.614 +  };
   1.615 +
   1.616 +private:
   1.617 +  jushort _class_id;
   1.618 +  jushort _flags;
   1.619 +
   1.620 +protected:
   1.621 +  // These methods should be called from constructors only.
   1.622 +  void init_class_id(jushort c) {
   1.623 +    assert(c <= _max_classes, "invalid node class");
   1.624 +    _class_id = c; // cast out const
   1.625 +  }
   1.626 +  void init_flags(jushort fl) {
   1.627 +    assert(fl <= _max_flags, "invalid node flag");
   1.628 +    _flags |= fl;
   1.629 +  }
   1.630 +  void clear_flag(jushort fl) {
   1.631 +    assert(fl <= _max_flags, "invalid node flag");
   1.632 +    _flags &= ~fl;
   1.633 +  }
   1.634 +
   1.635 +public:
   1.636 +  const jushort class_id() const { return _class_id; }
   1.637 +
   1.638 +  const jushort flags() const { return _flags; }
   1.639 +
   1.640 +  // Return a dense integer opcode number
   1.641 +  virtual int Opcode() const;
   1.642 +
   1.643 +  // Virtual inherited Node size
   1.644 +  virtual uint size_of() const;
   1.645 +
   1.646 +  // Other interesting Node properties
   1.647 +
   1.648 +  // Special case: is_Call() returns true for both CallNode and MachCallNode.
   1.649 +  bool is_Call() const {
   1.650 +    return (_flags & Flag_is_Call) != 0;
   1.651 +  }
   1.652 +
   1.653 +  CallNode *as_Call() const { // Only for CallNode (not for MachCallNode)
   1.654 +    assert((_class_id & ClassMask_Call) == Class_Call, "invalid node class");
   1.655 +    return (CallNode*)this;
   1.656 +  }
   1.657 +
   1.658 +  #define DEFINE_CLASS_QUERY(type) \
   1.659 +  bool is_##type() const { \
   1.660 +    return ((_class_id & ClassMask_##type) == Class_##type); \
   1.661 +  } \
   1.662 +  type##Node *as_##type() const { \
   1.663 +    assert(is_##type(), "invalid node class"); \
   1.664 +    return (type##Node*)this; \
   1.665 +  }
   1.666 +
   1.667 +  DEFINE_CLASS_QUERY(AbstractLock)
   1.668 +  DEFINE_CLASS_QUERY(Add)
   1.669 +  DEFINE_CLASS_QUERY(AddP)
   1.670 +  DEFINE_CLASS_QUERY(Allocate)
   1.671 +  DEFINE_CLASS_QUERY(AllocateArray)
   1.672 +  DEFINE_CLASS_QUERY(Bool)
   1.673 +  DEFINE_CLASS_QUERY(BoxLock)
   1.674 +  DEFINE_CLASS_QUERY(CallDynamicJava)
   1.675 +  DEFINE_CLASS_QUERY(CallJava)
   1.676 +  DEFINE_CLASS_QUERY(CallLeaf)
   1.677 +  DEFINE_CLASS_QUERY(CallRuntime)
   1.678 +  DEFINE_CLASS_QUERY(CallStaticJava)
   1.679 +  DEFINE_CLASS_QUERY(Catch)
   1.680 +  DEFINE_CLASS_QUERY(CatchProj)
   1.681 +  DEFINE_CLASS_QUERY(CheckCastPP)
   1.682 +  DEFINE_CLASS_QUERY(ConstraintCast)
   1.683 +  DEFINE_CLASS_QUERY(CMove)
   1.684 +  DEFINE_CLASS_QUERY(Cmp)
   1.685 +  DEFINE_CLASS_QUERY(CountedLoop)
   1.686 +  DEFINE_CLASS_QUERY(CountedLoopEnd)
   1.687 +  DEFINE_CLASS_QUERY(FastLock)
   1.688 +  DEFINE_CLASS_QUERY(FastUnlock)
   1.689 +  DEFINE_CLASS_QUERY(If)
   1.690 +  DEFINE_CLASS_QUERY(IfFalse)
   1.691 +  DEFINE_CLASS_QUERY(IfTrue)
   1.692 +  DEFINE_CLASS_QUERY(Initialize)
   1.693 +  DEFINE_CLASS_QUERY(Jump)
   1.694 +  DEFINE_CLASS_QUERY(JumpProj)
   1.695 +  DEFINE_CLASS_QUERY(Load)
   1.696 +  DEFINE_CLASS_QUERY(LoadStore)
   1.697 +  DEFINE_CLASS_QUERY(Lock)
   1.698 +  DEFINE_CLASS_QUERY(Loop)
   1.699 +  DEFINE_CLASS_QUERY(Mach)
   1.700 +  DEFINE_CLASS_QUERY(MachCall)
   1.701 +  DEFINE_CLASS_QUERY(MachCallDynamicJava)
   1.702 +  DEFINE_CLASS_QUERY(MachCallJava)
   1.703 +  DEFINE_CLASS_QUERY(MachCallLeaf)
   1.704 +  DEFINE_CLASS_QUERY(MachCallRuntime)
   1.705 +  DEFINE_CLASS_QUERY(MachCallStaticJava)
   1.706 +  DEFINE_CLASS_QUERY(MachIf)
   1.707 +  DEFINE_CLASS_QUERY(MachNullCheck)
   1.708 +  DEFINE_CLASS_QUERY(MachReturn)
   1.709 +  DEFINE_CLASS_QUERY(MachSafePoint)
   1.710 +  DEFINE_CLASS_QUERY(MachSpillCopy)
   1.711 +  DEFINE_CLASS_QUERY(MachTemp)
   1.712 +  DEFINE_CLASS_QUERY(Mem)
   1.713 +  DEFINE_CLASS_QUERY(MemBar)
   1.714 +  DEFINE_CLASS_QUERY(MergeMem)
   1.715 +  DEFINE_CLASS_QUERY(Mul)
   1.716 +  DEFINE_CLASS_QUERY(Multi)
   1.717 +  DEFINE_CLASS_QUERY(MultiBranch)
   1.718 +  DEFINE_CLASS_QUERY(PCTable)
   1.719 +  DEFINE_CLASS_QUERY(Phi)
   1.720 +  DEFINE_CLASS_QUERY(Proj)
   1.721 +  DEFINE_CLASS_QUERY(Region)
   1.722 +  DEFINE_CLASS_QUERY(Root)
   1.723 +  DEFINE_CLASS_QUERY(SafePoint)
   1.724 +  DEFINE_CLASS_QUERY(Start)
   1.725 +  DEFINE_CLASS_QUERY(Store)
   1.726 +  DEFINE_CLASS_QUERY(Sub)
   1.727 +  DEFINE_CLASS_QUERY(Type)
   1.728 +  DEFINE_CLASS_QUERY(Unlock)
   1.729 +
   1.730 +  #undef DEFINE_CLASS_QUERY
   1.731 +
   1.732 +  // duplicate of is_MachSpillCopy()
   1.733 +  bool is_SpillCopy () const {
   1.734 +    return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy);
   1.735 +  }
   1.736 +
   1.737 +  bool is_Con () const { return (_flags & Flag_is_Con) != 0; }
   1.738 +  bool is_Goto() const { return (_flags & Flag_is_Goto) != 0; }
   1.739 +  // The data node which is safe to leave in dead loop during IGVN optimization.
   1.740 +  bool is_dead_loop_safe() const {
   1.741 +    return is_Phi() || is_Proj() ||
   1.742 +           (_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0;
   1.743 +  }
   1.744 +
   1.745 +  // is_Copy() returns copied edge index (0 or 1)
   1.746 +  uint is_Copy() const { return (_flags & Flag_is_Copy); }
   1.747 +
   1.748 +  virtual bool is_CFG() const { return false; }
   1.749 +
   1.750 +  // If this node is control-dependent on a test, can it be
   1.751 +  // rerouted to a dominating equivalent test?  This is usually
   1.752 +  // true of non-CFG nodes, but can be false for operations which
   1.753 +  // depend for their correct sequencing on more than one test.
   1.754 +  // (In that case, hoisting to a dominating test may silently
   1.755 +  // skip some other important test.)
   1.756 +  virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; };
   1.757 +
   1.758 +  // defined for MachNodes that match 'If' | 'Goto' | 'CountedLoopEnd'
   1.759 +  bool is_Branch() const { return (_flags & Flag_is_Branch) != 0; }
   1.760 +
   1.761 +  // When building basic blocks, I need to have a notion of block beginning
   1.762 +  // Nodes, next block selector Nodes (block enders), and next block
   1.763 +  // projections.  These calls need to work on their machine equivalents.  The
   1.764 +  // Ideal beginning Nodes are RootNode, RegionNode and StartNode.
   1.765 +  bool is_block_start() const {
   1.766 +    if ( is_Region() )
   1.767 +      return this == (const Node*)in(0);
   1.768 +    else
   1.769 +      return (_flags & Flag_is_block_start) != 0;
   1.770 +  }
   1.771 +
   1.772 +  // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root,
   1.773 +  // Goto and Return.  This call also returns the block ending Node.
   1.774 +  virtual const Node *is_block_proj() const;
   1.775 +
   1.776 +  // The node is a "macro" node which needs to be expanded before matching
   1.777 +  bool is_macro() const { return (_flags & Flag_is_macro) != 0; }
   1.778 +
   1.779 +  // Value is a vector of primitive values
   1.780 +  bool is_Vector() const { return (_flags & Flag_is_Vector) != 0; }
   1.781 +
   1.782 +//----------------- Optimization
   1.783 +
   1.784 +  // Get the worst-case Type output for this Node.
   1.785 +  virtual const class Type *bottom_type() const;
   1.786 +
   1.787 +  // If we find a better type for a node, try to record it permanently.
   1.788 +  // Return true if this node actually changed.
   1.789 +  // Be sure to do the hash_delete game in the "rehash" variant.
   1.790 +  void raise_bottom_type(const Type* new_type);
   1.791 +
   1.792 +  // Get the address type with which this node uses and/or defs memory,
   1.793 +  // or NULL if none.  The address type is conservatively wide.
   1.794 +  // Returns non-null for calls, membars, loads, stores, etc.
   1.795 +  // Returns TypePtr::BOTTOM if the node touches memory "broadly".
   1.796 +  virtual const class TypePtr *adr_type() const { return NULL; }
   1.797 +
   1.798 +  // Return an existing node which computes the same function as this node.
   1.799 +  // The optimistic combined algorithm requires this to return a Node which
   1.800 +  // is a small number of steps away (e.g., one of my inputs).
   1.801 +  virtual Node *Identity( PhaseTransform *phase );
   1.802 +
   1.803 +  // Return the set of values this Node can take on at runtime.
   1.804 +  virtual const Type *Value( PhaseTransform *phase ) const;
   1.805 +
   1.806 +  // Return a node which is more "ideal" than the current node.
   1.807 +  // The invariants on this call are subtle.  If in doubt, read the
   1.808 +  // treatise in node.cpp above the default implemention AND TEST WITH
   1.809 +  // +VerifyIterativeGVN!
   1.810 +  virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
   1.811 +
   1.812 +  // Some nodes have specific Ideal subgraph transformations only if they are
   1.813 +  // unique users of specific nodes. Such nodes should be put on IGVN worklist
   1.814 +  // for the transformations to happen.
   1.815 +  bool has_special_unique_user() const;
   1.816 +
   1.817 +protected:
   1.818 +  bool remove_dead_region(PhaseGVN *phase, bool can_reshape);
   1.819 +public:
   1.820 +
   1.821 +  // Idealize graph, using DU info.  Done after constant propagation
   1.822 +  virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
   1.823 +
   1.824 +  // See if there is valid pipeline info
   1.825 +  static  const Pipeline *pipeline_class();
   1.826 +  virtual const Pipeline *pipeline() const;
   1.827 +
   1.828 +  // Compute the latency from the def to this instruction of the ith input node
   1.829 +  uint latency(uint i);
   1.830 +
   1.831 +  // Hash & compare functions, for pessimistic value numbering
   1.832 +
   1.833 +  // If the hash function returns the special sentinel value NO_HASH,
   1.834 +  // the node is guaranteed never to compare equal to any other node.
   1.835 +  // If we accidently generate a hash with value NO_HASH the node
   1.836 +  // won't go into the table and we'll lose a little optimization.
   1.837 +  enum { NO_HASH = 0 };
   1.838 +  virtual uint hash() const;
   1.839 +  virtual uint cmp( const Node &n ) const;
   1.840 +
   1.841 +  // Operation appears to be iteratively computed (such as an induction variable)
   1.842 +  // It is possible for this operation to return false for a loop-varying
   1.843 +  // value, if it appears (by local graph inspection) to be computed by a simple conditional.
   1.844 +  bool is_iteratively_computed();
   1.845 +
   1.846 +  // Determine if a node is Counted loop induction variable.
   1.847 +  // The method is defined in loopnode.cpp.
   1.848 +  const Node* is_loop_iv() const;
   1.849 +
   1.850 +  // Return a node with opcode "opc" and same inputs as "this" if one can
   1.851 +  // be found; Otherwise return NULL;
   1.852 +  Node* find_similar(int opc);
   1.853 +
   1.854 +  // Return the unique control out if only one. Null if none or more than one.
   1.855 +  Node* unique_ctrl_out();
   1.856 +
   1.857 +//----------------- Code Generation
   1.858 +
   1.859 +  // Ideal register class for Matching.  Zero means unmatched instruction
   1.860 +  // (these are cloned instead of converted to machine nodes).
   1.861 +  virtual uint ideal_reg() const;
   1.862 +
   1.863 +  static const uint NotAMachineReg;   // must be > max. machine register
   1.864 +
   1.865 +  // Do we Match on this edge index or not?  Generally false for Control
   1.866 +  // and true for everything else.  Weird for calls & returns.
   1.867 +  virtual uint match_edge(uint idx) const;
   1.868 +
   1.869 +  // Register class output is returned in
   1.870 +  virtual const RegMask &out_RegMask() const;
   1.871 +  // Register class input is expected in
   1.872 +  virtual const RegMask &in_RegMask(uint) const;
   1.873 +  // Should we clone rather than spill this instruction?
   1.874 +  bool rematerialize() const;
   1.875 +
   1.876 +  // Return JVM State Object if this Node carries debug info, or NULL otherwise
   1.877 +  virtual JVMState* jvms() const;
   1.878 +
   1.879 +  // Print as assembly
   1.880 +  virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const;
   1.881 +  // Emit bytes starting at parameter 'ptr'
   1.882 +  // Bump 'ptr' by the number of output bytes
   1.883 +  virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const;
   1.884 +  // Size of instruction in bytes
   1.885 +  virtual uint size(PhaseRegAlloc *ra_) const;
   1.886 +
   1.887 +  // Convenience function to extract an integer constant from a node.
   1.888 +  // If it is not an integer constant (either Con, CastII, or Mach),
   1.889 +  // return value_if_unknown.
   1.890 +  jint find_int_con(jint value_if_unknown) const {
   1.891 +    const TypeInt* t = find_int_type();
   1.892 +    return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
   1.893 +  }
   1.894 +  // Return the constant, knowing it is an integer constant already
   1.895 +  jint get_int() const {
   1.896 +    const TypeInt* t = find_int_type();
   1.897 +    guarantee(t != NULL, "must be con");
   1.898 +    return t->get_con();
   1.899 +  }
   1.900 +  // Here's where the work is done.  Can produce non-constant int types too.
   1.901 +  const TypeInt* find_int_type() const;
   1.902 +
   1.903 +  // Same thing for long (and intptr_t, via type.hpp):
   1.904 +  jlong get_long() const {
   1.905 +    const TypeLong* t = find_long_type();
   1.906 +    guarantee(t != NULL, "must be con");
   1.907 +    return t->get_con();
   1.908 +  }
   1.909 +  jlong find_long_con(jint value_if_unknown) const {
   1.910 +    const TypeLong* t = find_long_type();
   1.911 +    return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown;
   1.912 +  }
   1.913 +  const TypeLong* find_long_type() const;
   1.914 +
   1.915 +  // These guys are called by code generated by ADLC:
   1.916 +  intptr_t get_ptr() const;
   1.917 +  jdouble getd() const;
   1.918 +  jfloat getf() const;
   1.919 +
   1.920 +  // Nodes which are pinned into basic blocks
   1.921 +  virtual bool pinned() const { return false; }
   1.922 +
   1.923 +  // Nodes which use memory without consuming it, hence need antidependences
   1.924 +  // More specifically, needs_anti_dependence_check returns true iff the node
   1.925 +  // (a) does a load, and (b) does not perform a store (except perhaps to a
   1.926 +  // stack slot or some other unaliased location).
   1.927 +  bool needs_anti_dependence_check() const;
   1.928 +
   1.929 +  // Return which operand this instruction may cisc-spill. In other words,
   1.930 +  // return operand position that can convert from reg to memory access
   1.931 +  virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; }
   1.932 +  bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; }
   1.933 +
   1.934 +//----------------- Graph walking
   1.935 +public:
   1.936 +  // Walk and apply member functions recursively.
   1.937 +  // Supplied (this) pointer is root.
   1.938 +  void walk(NFunc pre, NFunc post, void *env);
   1.939 +  static void nop(Node &, void*); // Dummy empty function
   1.940 +  static void packregion( Node &n, void* );
   1.941 +private:
   1.942 +  void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited);
   1.943 +
   1.944 +//----------------- Printing, etc
   1.945 +public:
   1.946 +#ifndef PRODUCT
   1.947 +  Node* find(int idx) const;         // Search the graph for the given idx.
   1.948 +  Node* find_ctrl(int idx) const;    // Search control ancestors for the given idx.
   1.949 +  void dump() const;                 // Print this node,
   1.950 +  void dump(int depth) const;        // Print this node, recursively to depth d
   1.951 +  void dump_ctrl(int depth) const;   // Print control nodes, to depth d
   1.952 +  virtual void dump_req() const;     // Print required-edge info
   1.953 +  virtual void dump_prec() const;    // Print precedence-edge info
   1.954 +  virtual void dump_out() const;     // Print the output edge info
   1.955 +  virtual void dump_spec(outputStream *st) const {}; // Print per-node info
   1.956 +  void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges
   1.957 +  void verify() const;               // Check Def-Use info for my subgraph
   1.958 +  static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space);
   1.959 +
   1.960 +  // This call defines a class-unique string used to identify class instances
   1.961 +  virtual const char *Name() const;
   1.962 +
   1.963 +  void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...)
   1.964 +  // RegMask Print Functions
   1.965 +  void dump_in_regmask(int idx) { in_RegMask(idx).dump(); }
   1.966 +  void dump_out_regmask() { out_RegMask().dump(); }
   1.967 +  static int _in_dump_cnt;
   1.968 +  static bool in_dump() { return _in_dump_cnt > 0; }
   1.969 +  void fast_dump() const {
   1.970 +    tty->print("%4d: %-17s", _idx, Name());
   1.971 +    for (uint i = 0; i < len(); i++)
   1.972 +      if (in(i))
   1.973 +        tty->print(" %4d", in(i)->_idx);
   1.974 +      else
   1.975 +        tty->print(" NULL");
   1.976 +    tty->print("\n");
   1.977 +  }
   1.978 +#endif
   1.979 +#ifdef ASSERT
   1.980 +  void verify_construction();
   1.981 +  bool verify_jvms(const JVMState* jvms) const;
   1.982 +  int  _debug_idx;                     // Unique value assigned to every node.
   1.983 +  int   debug_idx() const              { return _debug_idx; }
   1.984 +  void  set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; }
   1.985 +
   1.986 +  Node* _debug_orig;                   // Original version of this, if any.
   1.987 +  Node*  debug_orig() const            { return _debug_orig; }
   1.988 +  void   set_debug_orig(Node* orig);   // _debug_orig = orig
   1.989 +
   1.990 +  int        _hash_lock;               // Barrier to modifications of nodes in the hash table
   1.991 +  void  enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); }
   1.992 +  void   exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); }
   1.993 +
   1.994 +  static void init_NodeProperty();
   1.995 +
   1.996 +  #if OPTO_DU_ITERATOR_ASSERT
   1.997 +  const Node* _last_del;               // The last deleted node.
   1.998 +  uint        _del_tick;               // Bumped when a deletion happens..
   1.999 +  #endif
  1.1000 +#endif
  1.1001 +};
  1.1002 +
  1.1003 +//-----------------------------------------------------------------------------
  1.1004 +// Iterators over DU info, and associated Node functions.
  1.1005 +
  1.1006 +#if OPTO_DU_ITERATOR_ASSERT
  1.1007 +
  1.1008 +// Common code for assertion checking on DU iterators.
  1.1009 +class DUIterator_Common VALUE_OBJ_CLASS_SPEC {
  1.1010 +#ifdef ASSERT
  1.1011 + protected:
  1.1012 +  bool         _vdui;               // cached value of VerifyDUIterators
  1.1013 +  const Node*  _node;               // the node containing the _out array
  1.1014 +  uint         _outcnt;             // cached node->_outcnt
  1.1015 +  uint         _del_tick;           // cached node->_del_tick
  1.1016 +  Node*        _last;               // last value produced by the iterator
  1.1017 +
  1.1018 +  void sample(const Node* node);    // used by c'tor to set up for verifies
  1.1019 +  void verify(const Node* node, bool at_end_ok = false);
  1.1020 +  void verify_resync();
  1.1021 +  void reset(const DUIterator_Common& that);
  1.1022 +
  1.1023 +// The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators
  1.1024 +  #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } }
  1.1025 +#else
  1.1026 +  #define I_VDUI_ONLY(i,x) { }
  1.1027 +#endif //ASSERT
  1.1028 +};
  1.1029 +
  1.1030 +#define VDUI_ONLY(x)     I_VDUI_ONLY(*this, x)
  1.1031 +
  1.1032 +// Default DU iterator.  Allows appends onto the out array.
  1.1033 +// Allows deletion from the out array only at the current point.
  1.1034 +// Usage:
  1.1035 +//  for (DUIterator i = x->outs(); x->has_out(i); i++) {
  1.1036 +//    Node* y = x->out(i);
  1.1037 +//    ...
  1.1038 +//  }
  1.1039 +// Compiles in product mode to a unsigned integer index, which indexes
  1.1040 +// onto a repeatedly reloaded base pointer of x->_out.  The loop predicate
  1.1041 +// also reloads x->_outcnt.  If you delete, you must perform "--i" just
  1.1042 +// before continuing the loop.  You must delete only the last-produced
  1.1043 +// edge.  You must delete only a single copy of the last-produced edge,
  1.1044 +// or else you must delete all copies at once (the first time the edge
  1.1045 +// is produced by the iterator).
  1.1046 +class DUIterator : public DUIterator_Common {
  1.1047 +  friend class Node;
  1.1048 +
  1.1049 +  // This is the index which provides the product-mode behavior.
  1.1050 +  // Whatever the product-mode version of the system does to the
  1.1051 +  // DUI index is done to this index.  All other fields in
  1.1052 +  // this class are used only for assertion checking.
  1.1053 +  uint         _idx;
  1.1054 +
  1.1055 +  #ifdef ASSERT
  1.1056 +  uint         _refresh_tick;    // Records the refresh activity.
  1.1057 +
  1.1058 +  void sample(const Node* node); // Initialize _refresh_tick etc.
  1.1059 +  void verify(const Node* node, bool at_end_ok = false);
  1.1060 +  void verify_increment();       // Verify an increment operation.
  1.1061 +  void verify_resync();          // Verify that we can back up over a deletion.
  1.1062 +  void verify_finish();          // Verify that the loop terminated properly.
  1.1063 +  void refresh();                // Resample verification info.
  1.1064 +  void reset(const DUIterator& that);  // Resample after assignment.
  1.1065 +  #endif
  1.1066 +
  1.1067 +  DUIterator(const Node* node, int dummy_to_avoid_conversion)
  1.1068 +    { _idx = 0;                         debug_only(sample(node)); }
  1.1069 +
  1.1070 + public:
  1.1071 +  // initialize to garbage; clear _vdui to disable asserts
  1.1072 +  DUIterator()
  1.1073 +    { /*initialize to garbage*/         debug_only(_vdui = false); }
  1.1074 +
  1.1075 +  void operator++(int dummy_to_specify_postfix_op)
  1.1076 +    { _idx++;                           VDUI_ONLY(verify_increment()); }
  1.1077 +
  1.1078 +  void operator--()
  1.1079 +    { VDUI_ONLY(verify_resync());       --_idx; }
  1.1080 +
  1.1081 +  ~DUIterator()
  1.1082 +    { VDUI_ONLY(verify_finish()); }
  1.1083 +
  1.1084 +  void operator=(const DUIterator& that)
  1.1085 +    { _idx = that._idx;                 debug_only(reset(that)); }
  1.1086 +};
  1.1087 +
  1.1088 +DUIterator Node::outs() const
  1.1089 +  { return DUIterator(this, 0); }
  1.1090 +DUIterator& Node::refresh_out_pos(DUIterator& i) const
  1.1091 +  { I_VDUI_ONLY(i, i.refresh());        return i; }
  1.1092 +bool Node::has_out(DUIterator& i) const
  1.1093 +  { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; }
  1.1094 +Node*    Node::out(DUIterator& i) const
  1.1095 +  { I_VDUI_ONLY(i, i.verify(this));     return debug_only(i._last=) _out[i._idx]; }
  1.1096 +
  1.1097 +
  1.1098 +// Faster DU iterator.  Disallows insertions into the out array.
  1.1099 +// Allows deletion from the out array only at the current point.
  1.1100 +// Usage:
  1.1101 +//  for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) {
  1.1102 +//    Node* y = x->fast_out(i);
  1.1103 +//    ...
  1.1104 +//  }
  1.1105 +// Compiles in product mode to raw Node** pointer arithmetic, with
  1.1106 +// no reloading of pointers from the original node x.  If you delete,
  1.1107 +// you must perform "--i; --imax" just before continuing the loop.
  1.1108 +// If you delete multiple copies of the same edge, you must decrement
  1.1109 +// imax, but not i, multiple times:  "--i, imax -= num_edges".
  1.1110 +class DUIterator_Fast : public DUIterator_Common {
  1.1111 +  friend class Node;
  1.1112 +  friend class DUIterator_Last;
  1.1113 +
  1.1114 +  // This is the pointer which provides the product-mode behavior.
  1.1115 +  // Whatever the product-mode version of the system does to the
  1.1116 +  // DUI pointer is done to this pointer.  All other fields in
  1.1117 +  // this class are used only for assertion checking.
  1.1118 +  Node**       _outp;
  1.1119 +
  1.1120 +  #ifdef ASSERT
  1.1121 +  void verify(const Node* node, bool at_end_ok = false);
  1.1122 +  void verify_limit();
  1.1123 +  void verify_resync();
  1.1124 +  void verify_relimit(uint n);
  1.1125 +  void reset(const DUIterator_Fast& that);
  1.1126 +  #endif
  1.1127 +
  1.1128 +  // Note:  offset must be signed, since -1 is sometimes passed
  1.1129 +  DUIterator_Fast(const Node* node, ptrdiff_t offset)
  1.1130 +    { _outp = node->_out + offset;      debug_only(sample(node)); }
  1.1131 +
  1.1132 + public:
  1.1133 +  // initialize to garbage; clear _vdui to disable asserts
  1.1134 +  DUIterator_Fast()
  1.1135 +    { /*initialize to garbage*/         debug_only(_vdui = false); }
  1.1136 +
  1.1137 +  void operator++(int dummy_to_specify_postfix_op)
  1.1138 +    { _outp++;                          VDUI_ONLY(verify(_node, true)); }
  1.1139 +
  1.1140 +  void operator--()
  1.1141 +    { VDUI_ONLY(verify_resync());       --_outp; }
  1.1142 +
  1.1143 +  void operator-=(uint n)   // applied to the limit only
  1.1144 +    { _outp -= n;           VDUI_ONLY(verify_relimit(n));  }
  1.1145 +
  1.1146 +  bool operator<(DUIterator_Fast& limit) {
  1.1147 +    I_VDUI_ONLY(*this, this->verify(_node, true));
  1.1148 +    I_VDUI_ONLY(limit, limit.verify_limit());
  1.1149 +    return _outp < limit._outp;
  1.1150 +  }
  1.1151 +
  1.1152 +  void operator=(const DUIterator_Fast& that)
  1.1153 +    { _outp = that._outp;               debug_only(reset(that)); }
  1.1154 +};
  1.1155 +
  1.1156 +DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const {
  1.1157 +  // Assign a limit pointer to the reference argument:
  1.1158 +  imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt);
  1.1159 +  // Return the base pointer:
  1.1160 +  return DUIterator_Fast(this, 0);
  1.1161 +}
  1.1162 +Node* Node::fast_out(DUIterator_Fast& i) const {
  1.1163 +  I_VDUI_ONLY(i, i.verify(this));
  1.1164 +  return debug_only(i._last=) *i._outp;
  1.1165 +}
  1.1166 +
  1.1167 +
  1.1168 +// Faster DU iterator.  Requires each successive edge to be removed.
  1.1169 +// Does not allow insertion of any edges.
  1.1170 +// Usage:
  1.1171 +//  for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) {
  1.1172 +//    Node* y = x->last_out(i);
  1.1173 +//    ...
  1.1174 +//  }
  1.1175 +// Compiles in product mode to raw Node** pointer arithmetic, with
  1.1176 +// no reloading of pointers from the original node x.
  1.1177 +class DUIterator_Last : private DUIterator_Fast {
  1.1178 +  friend class Node;
  1.1179 +
  1.1180 +  #ifdef ASSERT
  1.1181 +  void verify(const Node* node, bool at_end_ok = false);
  1.1182 +  void verify_limit();
  1.1183 +  void verify_step(uint num_edges);
  1.1184 +  #endif
  1.1185 +
  1.1186 +  // Note:  offset must be signed, since -1 is sometimes passed
  1.1187 +  DUIterator_Last(const Node* node, ptrdiff_t offset)
  1.1188 +    : DUIterator_Fast(node, offset) { }
  1.1189 +
  1.1190 +  void operator++(int dummy_to_specify_postfix_op) {} // do not use
  1.1191 +  void operator<(int)                              {} // do not use
  1.1192 +
  1.1193 + public:
  1.1194 +  DUIterator_Last() { }
  1.1195 +  // initialize to garbage
  1.1196 +
  1.1197 +  void operator--()
  1.1198 +    { _outp--;              VDUI_ONLY(verify_step(1));  }
  1.1199 +
  1.1200 +  void operator-=(uint n)
  1.1201 +    { _outp -= n;           VDUI_ONLY(verify_step(n));  }
  1.1202 +
  1.1203 +  bool operator>=(DUIterator_Last& limit) {
  1.1204 +    I_VDUI_ONLY(*this, this->verify(_node, true));
  1.1205 +    I_VDUI_ONLY(limit, limit.verify_limit());
  1.1206 +    return _outp >= limit._outp;
  1.1207 +  }
  1.1208 +
  1.1209 +  void operator=(const DUIterator_Last& that)
  1.1210 +    { DUIterator_Fast::operator=(that); }
  1.1211 +};
  1.1212 +
  1.1213 +DUIterator_Last Node::last_outs(DUIterator_Last& imin) const {
  1.1214 +  // Assign a limit pointer to the reference argument:
  1.1215 +  imin = DUIterator_Last(this, 0);
  1.1216 +  // Return the initial pointer:
  1.1217 +  return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1);
  1.1218 +}
  1.1219 +Node* Node::last_out(DUIterator_Last& i) const {
  1.1220 +  I_VDUI_ONLY(i, i.verify(this));
  1.1221 +  return debug_only(i._last=) *i._outp;
  1.1222 +}
  1.1223 +
  1.1224 +#endif //OPTO_DU_ITERATOR_ASSERT
  1.1225 +
  1.1226 +#undef I_VDUI_ONLY
  1.1227 +#undef VDUI_ONLY
  1.1228 +
  1.1229 +
  1.1230 +//-----------------------------------------------------------------------------
  1.1231 +// Map dense integer indices to Nodes.  Uses classic doubling-array trick.
  1.1232 +// Abstractly provides an infinite array of Node*'s, initialized to NULL.
  1.1233 +// Note that the constructor just zeros things, and since I use Arena
  1.1234 +// allocation I do not need a destructor to reclaim storage.
  1.1235 +class Node_Array : public ResourceObj {
  1.1236 +protected:
  1.1237 +  Arena *_a;                    // Arena to allocate in
  1.1238 +  uint   _max;
  1.1239 +  Node **_nodes;
  1.1240 +  void   grow( uint i );        // Grow array node to fit
  1.1241 +public:
  1.1242 +  Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) {
  1.1243 +    _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize );
  1.1244 +    for( int i = 0; i < OptoNodeListSize; i++ ) {
  1.1245 +      _nodes[i] = NULL;
  1.1246 +    }
  1.1247 +  }
  1.1248 +
  1.1249 +  Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {}
  1.1250 +  Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped
  1.1251 +  { return (i<_max) ? _nodes[i] : (Node*)NULL; }
  1.1252 +  Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; }
  1.1253 +  Node **adr() { return _nodes; }
  1.1254 +  // Extend the mapping: index i maps to Node *n.
  1.1255 +  void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; }
  1.1256 +  void insert( uint i, Node *n );
  1.1257 +  void remove( uint i );        // Remove, preserving order
  1.1258 +  void sort( C_sort_func_t func);
  1.1259 +  void reset( Arena *new_a );   // Zap mapping to empty; reclaim storage
  1.1260 +  void clear();                 // Set all entries to NULL, keep storage
  1.1261 +  uint Size() const { return _max; }
  1.1262 +  void dump() const;
  1.1263 +};
  1.1264 +
  1.1265 +class Node_List : public Node_Array {
  1.1266 +  uint _cnt;
  1.1267 +public:
  1.1268 +  Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {}
  1.1269 +  Node_List(Arena *a) : Node_Array(a), _cnt(0) {}
  1.1270 +  void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; }
  1.1271 +  void remove( uint i ) { Node_Array::remove(i); _cnt--; }
  1.1272 +  void push( Node *b ) { map(_cnt++,b); }
  1.1273 +  void yank( Node *n );         // Find and remove
  1.1274 +  Node *pop() { return _nodes[--_cnt]; }
  1.1275 +  Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;}
  1.1276 +  void clear() { _cnt = 0; Node_Array::clear(); } // retain storage
  1.1277 +  uint size() const { return _cnt; }
  1.1278 +  void dump() const;
  1.1279 +};
  1.1280 +
  1.1281 +//------------------------------Unique_Node_List-------------------------------
  1.1282 +class Unique_Node_List : public Node_List {
  1.1283 +  VectorSet _in_worklist;
  1.1284 +  uint _clock_index;            // Index in list where to pop from next
  1.1285 +public:
  1.1286 +  Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {}
  1.1287 +  Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {}
  1.1288 +
  1.1289 +  void remove( Node *n );
  1.1290 +  bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; }
  1.1291 +  VectorSet &member_set(){ return _in_worklist; }
  1.1292 +
  1.1293 +  void push( Node *b ) {
  1.1294 +    if( !_in_worklist.test_set(b->_idx) )
  1.1295 +      Node_List::push(b);
  1.1296 +  }
  1.1297 +  Node *pop() {
  1.1298 +    if( _clock_index >= size() ) _clock_index = 0;
  1.1299 +    Node *b = at(_clock_index);
  1.1300 +    map( _clock_index++, Node_List::pop());
  1.1301 +    _in_worklist >>= b->_idx;
  1.1302 +    return b;
  1.1303 +  }
  1.1304 +  Node *remove( uint i ) {
  1.1305 +    Node *b = Node_List::at(i);
  1.1306 +    _in_worklist >>= b->_idx;
  1.1307 +    map(i,Node_List::pop());
  1.1308 +    return b;
  1.1309 +  }
  1.1310 +  void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); }
  1.1311 +  void  clear() {
  1.1312 +    _in_worklist.Clear();        // Discards storage but grows automatically
  1.1313 +    Node_List::clear();
  1.1314 +    _clock_index = 0;
  1.1315 +  }
  1.1316 +
  1.1317 +  // Used after parsing to remove useless nodes before Iterative GVN
  1.1318 +  void remove_useless_nodes(VectorSet &useful);
  1.1319 +
  1.1320 +#ifndef PRODUCT
  1.1321 +  void print_set() const { _in_worklist.print(); }
  1.1322 +#endif
  1.1323 +};
  1.1324 +
  1.1325 +// Inline definition of Compile::record_for_igvn must be deferred to this point.
  1.1326 +inline void Compile::record_for_igvn(Node* n) {
  1.1327 +  _for_igvn->push(n);
  1.1328 +  record_for_escape_analysis(n);
  1.1329 +}
  1.1330 +
  1.1331 +//------------------------------Node_Stack-------------------------------------
  1.1332 +class Node_Stack {
  1.1333 +protected:
  1.1334 +  struct INode {
  1.1335 +    Node *node; // Processed node
  1.1336 +    uint  indx; // Index of next node's child
  1.1337 +  };
  1.1338 +  INode *_inode_top; // tos, stack grows up
  1.1339 +  INode *_inode_max; // End of _inodes == _inodes + _max
  1.1340 +  INode *_inodes;    // Array storage for the stack
  1.1341 +  Arena *_a;         // Arena to allocate in
  1.1342 +  void grow();
  1.1343 +public:
  1.1344 +  Node_Stack(int size) {
  1.1345 +    size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
  1.1346 +    _a = Thread::current()->resource_area();
  1.1347 +    _inodes = NEW_ARENA_ARRAY( _a, INode, max );
  1.1348 +    _inode_max = _inodes + max;
  1.1349 +    _inode_top = _inodes - 1; // stack is empty
  1.1350 +  }
  1.1351 +
  1.1352 +  Node_Stack(Arena *a, int size) : _a(a) {
  1.1353 +    size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize;
  1.1354 +    _inodes = NEW_ARENA_ARRAY( _a, INode, max );
  1.1355 +    _inode_max = _inodes + max;
  1.1356 +    _inode_top = _inodes - 1; // stack is empty
  1.1357 +  }
  1.1358 +
  1.1359 +  void pop() {
  1.1360 +    assert(_inode_top >= _inodes, "node stack underflow");
  1.1361 +    --_inode_top;
  1.1362 +  }
  1.1363 +  void push(Node *n, uint i) {
  1.1364 +    ++_inode_top;
  1.1365 +    if (_inode_top >= _inode_max) grow();
  1.1366 +    INode *top = _inode_top; // optimization
  1.1367 +    top->node = n;
  1.1368 +    top->indx = i;
  1.1369 +  }
  1.1370 +  Node *node() const {
  1.1371 +    return _inode_top->node;
  1.1372 +  }
  1.1373 +  Node* node_at(uint i) const {
  1.1374 +    assert(_inodes + i <= _inode_top, "in range");
  1.1375 +    return _inodes[i].node;
  1.1376 +  }
  1.1377 +  uint index() const {
  1.1378 +    return _inode_top->indx;
  1.1379 +  }
  1.1380 +  void set_node(Node *n) {
  1.1381 +    _inode_top->node = n;
  1.1382 +  }
  1.1383 +  void set_index(uint i) {
  1.1384 +    _inode_top->indx = i;
  1.1385 +  }
  1.1386 +  uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes,  sizeof(INode)); } // Max size
  1.1387 +  uint size() const { return (uint)pointer_delta(_inode_top, _inodes,  sizeof(INode)) + 1; } // Current size
  1.1388 +  bool is_nonempty() const { return (_inode_top >= _inodes); }
  1.1389 +  bool is_empty() const { return (_inode_top < _inodes); }
  1.1390 +  void clear() { _inode_top = _inodes - 1; } // retain storage
  1.1391 +};
  1.1392 +
  1.1393 +
  1.1394 +//-----------------------------Node_Notes--------------------------------------
  1.1395 +// Debugging or profiling annotations loosely and sparsely associated
  1.1396 +// with some nodes.  See Compile::node_notes_at for the accessor.
  1.1397 +class Node_Notes VALUE_OBJ_CLASS_SPEC {
  1.1398 +  JVMState* _jvms;
  1.1399 +
  1.1400 +public:
  1.1401 +  Node_Notes(JVMState* jvms = NULL) {
  1.1402 +    _jvms = jvms;
  1.1403 +  }
  1.1404 +
  1.1405 +  JVMState* jvms()            { return _jvms; }
  1.1406 +  void  set_jvms(JVMState* x) {        _jvms = x; }
  1.1407 +
  1.1408 +  // True if there is nothing here.
  1.1409 +  bool is_clear() {
  1.1410 +    return (_jvms == NULL);
  1.1411 +  }
  1.1412 +
  1.1413 +  // Make there be nothing here.
  1.1414 +  void clear() {
  1.1415 +    _jvms = NULL;
  1.1416 +  }
  1.1417 +
  1.1418 +  // Make a new, clean node notes.
  1.1419 +  static Node_Notes* make(Compile* C) {
  1.1420 +    Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
  1.1421 +    nn->clear();
  1.1422 +    return nn;
  1.1423 +  }
  1.1424 +
  1.1425 +  Node_Notes* clone(Compile* C) {
  1.1426 +    Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1);
  1.1427 +    (*nn) = (*this);
  1.1428 +    return nn;
  1.1429 +  }
  1.1430 +
  1.1431 +  // Absorb any information from source.
  1.1432 +  bool update_from(Node_Notes* source) {
  1.1433 +    bool changed = false;
  1.1434 +    if (source != NULL) {
  1.1435 +      if (source->jvms() != NULL) {
  1.1436 +        set_jvms(source->jvms());
  1.1437 +        changed = true;
  1.1438 +      }
  1.1439 +    }
  1.1440 +    return changed;
  1.1441 +  }
  1.1442 +};
  1.1443 +
  1.1444 +// Inlined accessors for Compile::node_nodes that require the preceding class:
  1.1445 +inline Node_Notes*
  1.1446 +Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr,
  1.1447 +                           int idx, bool can_grow) {
  1.1448 +  assert(idx >= 0, "oob");
  1.1449 +  int block_idx = (idx >> _log2_node_notes_block_size);
  1.1450 +  int grow_by = (block_idx - (arr == NULL? 0: arr->length()));
  1.1451 +  if (grow_by >= 0) {
  1.1452 +    if (!can_grow)  return NULL;
  1.1453 +    grow_node_notes(arr, grow_by + 1);
  1.1454 +  }
  1.1455 +  // (Every element of arr is a sub-array of length _node_notes_block_size.)
  1.1456 +  return arr->at(block_idx) + (idx & (_node_notes_block_size-1));
  1.1457 +}
  1.1458 +
  1.1459 +inline bool
  1.1460 +Compile::set_node_notes_at(int idx, Node_Notes* value) {
  1.1461 +  if (value == NULL || value->is_clear())
  1.1462 +    return false;  // nothing to write => write nothing
  1.1463 +  Node_Notes* loc = locate_node_notes(_node_note_array, idx, true);
  1.1464 +  assert(loc != NULL, "");
  1.1465 +  return loc->update_from(value);
  1.1466 +}
  1.1467 +
  1.1468 +
  1.1469 +//------------------------------TypeNode---------------------------------------
  1.1470 +// Node with a Type constant.
  1.1471 +class TypeNode : public Node {
  1.1472 +protected:
  1.1473 +  virtual uint hash() const;    // Check the type
  1.1474 +  virtual uint cmp( const Node &n ) const;
  1.1475 +  virtual uint size_of() const; // Size is bigger
  1.1476 +  const Type* const _type;
  1.1477 +public:
  1.1478 +  void set_type(const Type* t) {
  1.1479 +    assert(t != NULL, "sanity");
  1.1480 +    debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
  1.1481 +    *(const Type**)&_type = t;   // cast away const-ness
  1.1482 +    // If this node is in the hash table, make sure it doesn't need a rehash.
  1.1483 +    assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
  1.1484 +  }
  1.1485 +  const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
  1.1486 +  TypeNode( const Type *t, uint required ) : Node(required), _type(t) {
  1.1487 +    init_class_id(Class_Type);
  1.1488 +  }
  1.1489 +  virtual const Type *Value( PhaseTransform *phase ) const;
  1.1490 +  virtual const Type *bottom_type() const;
  1.1491 +  virtual       uint  ideal_reg() const;
  1.1492 +#ifndef PRODUCT
  1.1493 +  virtual void dump_spec(outputStream *st) const;
  1.1494 +#endif
  1.1495 +};

mercurial