Mon, 24 Nov 2014 07:29:03 -0800
8058148: MaxNodeLimit and LiveNodeCountInliningCutoff
Reviewed-by: kvn, roland
duke@435 | 1 | /* |
coleenp@5614 | 2 | * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_OPTO_NODE_HPP |
stefank@2314 | 26 | #define SHARE_VM_OPTO_NODE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "libadt/port.hpp" |
stefank@2314 | 29 | #include "libadt/vectset.hpp" |
stefank@2314 | 30 | #include "opto/compile.hpp" |
stefank@2314 | 31 | #include "opto/type.hpp" |
stefank@2314 | 32 | |
duke@435 | 33 | // Portions of code courtesy of Clifford Click |
duke@435 | 34 | |
duke@435 | 35 | // Optimization - Graph Style |
duke@435 | 36 | |
duke@435 | 37 | |
duke@435 | 38 | class AbstractLockNode; |
duke@435 | 39 | class AddNode; |
duke@435 | 40 | class AddPNode; |
duke@435 | 41 | class AliasInfo; |
duke@435 | 42 | class AllocateArrayNode; |
duke@435 | 43 | class AllocateNode; |
duke@435 | 44 | class Block; |
duke@435 | 45 | class BoolNode; |
duke@435 | 46 | class BoxLockNode; |
duke@435 | 47 | class CMoveNode; |
duke@435 | 48 | class CallDynamicJavaNode; |
duke@435 | 49 | class CallJavaNode; |
duke@435 | 50 | class CallLeafNode; |
duke@435 | 51 | class CallNode; |
duke@435 | 52 | class CallRuntimeNode; |
duke@435 | 53 | class CallStaticJavaNode; |
duke@435 | 54 | class CatchNode; |
duke@435 | 55 | class CatchProjNode; |
duke@435 | 56 | class CheckCastPPNode; |
kvn@1535 | 57 | class ClearArrayNode; |
duke@435 | 58 | class CmpNode; |
duke@435 | 59 | class CodeBuffer; |
duke@435 | 60 | class ConstraintCastNode; |
duke@435 | 61 | class ConNode; |
duke@435 | 62 | class CountedLoopNode; |
duke@435 | 63 | class CountedLoopEndNode; |
roland@4159 | 64 | class DecodeNarrowPtrNode; |
kvn@603 | 65 | class DecodeNNode; |
roland@4159 | 66 | class DecodeNKlassNode; |
roland@4159 | 67 | class EncodeNarrowPtrNode; |
kvn@603 | 68 | class EncodePNode; |
roland@4159 | 69 | class EncodePKlassNode; |
duke@435 | 70 | class FastLockNode; |
duke@435 | 71 | class FastUnlockNode; |
duke@435 | 72 | class IfNode; |
kvn@3040 | 73 | class IfFalseNode; |
kvn@3040 | 74 | class IfTrueNode; |
duke@435 | 75 | class InitializeNode; |
duke@435 | 76 | class JVMState; |
duke@435 | 77 | class JumpNode; |
duke@435 | 78 | class JumpProjNode; |
duke@435 | 79 | class LoadNode; |
duke@435 | 80 | class LoadStoreNode; |
duke@435 | 81 | class LockNode; |
duke@435 | 82 | class LoopNode; |
kvn@3051 | 83 | class MachBranchNode; |
duke@435 | 84 | class MachCallDynamicJavaNode; |
duke@435 | 85 | class MachCallJavaNode; |
duke@435 | 86 | class MachCallLeafNode; |
duke@435 | 87 | class MachCallNode; |
duke@435 | 88 | class MachCallRuntimeNode; |
duke@435 | 89 | class MachCallStaticJavaNode; |
twisti@2350 | 90 | class MachConstantBaseNode; |
twisti@2350 | 91 | class MachConstantNode; |
kvn@3040 | 92 | class MachGotoNode; |
duke@435 | 93 | class MachIfNode; |
duke@435 | 94 | class MachNode; |
duke@435 | 95 | class MachNullCheckNode; |
kvn@3040 | 96 | class MachProjNode; |
duke@435 | 97 | class MachReturnNode; |
duke@435 | 98 | class MachSafePointNode; |
duke@435 | 99 | class MachSpillCopyNode; |
duke@435 | 100 | class MachTempNode; |
duke@435 | 101 | class Matcher; |
duke@435 | 102 | class MemBarNode; |
roland@3392 | 103 | class MemBarStoreStoreNode; |
duke@435 | 104 | class MemNode; |
duke@435 | 105 | class MergeMemNode; |
kvn@3882 | 106 | class MulNode; |
duke@435 | 107 | class MultiNode; |
duke@435 | 108 | class MultiBranchNode; |
duke@435 | 109 | class NeverBranchNode; |
duke@435 | 110 | class Node; |
duke@435 | 111 | class Node_Array; |
duke@435 | 112 | class Node_List; |
duke@435 | 113 | class Node_Stack; |
duke@435 | 114 | class NullCheckNode; |
duke@435 | 115 | class OopMap; |
kvn@468 | 116 | class ParmNode; |
duke@435 | 117 | class PCTableNode; |
duke@435 | 118 | class PhaseCCP; |
duke@435 | 119 | class PhaseGVN; |
duke@435 | 120 | class PhaseIterGVN; |
duke@435 | 121 | class PhaseRegAlloc; |
duke@435 | 122 | class PhaseTransform; |
duke@435 | 123 | class PhaseValues; |
duke@435 | 124 | class PhiNode; |
duke@435 | 125 | class Pipeline; |
duke@435 | 126 | class ProjNode; |
duke@435 | 127 | class RegMask; |
duke@435 | 128 | class RegionNode; |
duke@435 | 129 | class RootNode; |
duke@435 | 130 | class SafePointNode; |
kvn@498 | 131 | class SafePointScalarObjectNode; |
duke@435 | 132 | class StartNode; |
duke@435 | 133 | class State; |
duke@435 | 134 | class StoreNode; |
duke@435 | 135 | class SubNode; |
duke@435 | 136 | class Type; |
duke@435 | 137 | class TypeNode; |
duke@435 | 138 | class UnlockNode; |
kvn@3040 | 139 | class VectorNode; |
kvn@3882 | 140 | class LoadVectorNode; |
kvn@3882 | 141 | class StoreVectorNode; |
duke@435 | 142 | class VectorSet; |
duke@435 | 143 | typedef void (*NFunc)(Node&,void*); |
duke@435 | 144 | extern "C" { |
duke@435 | 145 | typedef int (*C_sort_func_t)(const void *, const void *); |
duke@435 | 146 | } |
duke@435 | 147 | |
duke@435 | 148 | // The type of all node counts and indexes. |
duke@435 | 149 | // It must hold at least 16 bits, but must also be fast to load and store. |
duke@435 | 150 | // This type, if less than 32 bits, could limit the number of possible nodes. |
duke@435 | 151 | // (To make this type platform-specific, move to globalDefinitions_xxx.hpp.) |
duke@435 | 152 | typedef unsigned int node_idx_t; |
duke@435 | 153 | |
duke@435 | 154 | |
duke@435 | 155 | #ifndef OPTO_DU_ITERATOR_ASSERT |
duke@435 | 156 | #ifdef ASSERT |
duke@435 | 157 | #define OPTO_DU_ITERATOR_ASSERT 1 |
duke@435 | 158 | #else |
duke@435 | 159 | #define OPTO_DU_ITERATOR_ASSERT 0 |
duke@435 | 160 | #endif |
duke@435 | 161 | #endif //OPTO_DU_ITERATOR_ASSERT |
duke@435 | 162 | |
duke@435 | 163 | #if OPTO_DU_ITERATOR_ASSERT |
duke@435 | 164 | class DUIterator; |
duke@435 | 165 | class DUIterator_Fast; |
duke@435 | 166 | class DUIterator_Last; |
duke@435 | 167 | #else |
duke@435 | 168 | typedef uint DUIterator; |
duke@435 | 169 | typedef Node** DUIterator_Fast; |
duke@435 | 170 | typedef Node** DUIterator_Last; |
duke@435 | 171 | #endif |
duke@435 | 172 | |
duke@435 | 173 | // Node Sentinel |
duke@435 | 174 | #define NodeSentinel (Node*)-1 |
duke@435 | 175 | |
duke@435 | 176 | // Unknown count frequency |
duke@435 | 177 | #define COUNT_UNKNOWN (-1.0f) |
duke@435 | 178 | |
duke@435 | 179 | //------------------------------Node------------------------------------------- |
duke@435 | 180 | // Nodes define actions in the program. They create values, which have types. |
duke@435 | 181 | // They are both vertices in a directed graph and program primitives. Nodes |
duke@435 | 182 | // are labeled; the label is the "opcode", the primitive function in the lambda |
duke@435 | 183 | // calculus sense that gives meaning to the Node. Node inputs are ordered (so |
duke@435 | 184 | // that "a-b" is different from "b-a"). The inputs to a Node are the inputs to |
duke@435 | 185 | // the Node's function. These inputs also define a Type equation for the Node. |
duke@435 | 186 | // Solving these Type equations amounts to doing dataflow analysis. |
duke@435 | 187 | // Control and data are uniformly represented in the graph. Finally, Nodes |
duke@435 | 188 | // have a unique dense integer index which is used to index into side arrays |
duke@435 | 189 | // whenever I have phase-specific information. |
duke@435 | 190 | |
duke@435 | 191 | class Node { |
never@3138 | 192 | friend class VMStructs; |
never@3138 | 193 | |
duke@435 | 194 | // Lots of restrictions on cloning Nodes |
duke@435 | 195 | Node(const Node&); // not defined; linker error to use these |
duke@435 | 196 | Node &operator=(const Node &rhs); |
duke@435 | 197 | |
duke@435 | 198 | public: |
duke@435 | 199 | friend class Compile; |
duke@435 | 200 | #if OPTO_DU_ITERATOR_ASSERT |
duke@435 | 201 | friend class DUIterator_Common; |
duke@435 | 202 | friend class DUIterator; |
duke@435 | 203 | friend class DUIterator_Fast; |
duke@435 | 204 | friend class DUIterator_Last; |
duke@435 | 205 | #endif |
duke@435 | 206 | |
duke@435 | 207 | // Because Nodes come and go, I define an Arena of Node structures to pull |
duke@435 | 208 | // from. This should allow fast access to node creation & deletion. This |
duke@435 | 209 | // field is a local cache of a value defined in some "program fragment" for |
duke@435 | 210 | // which these Nodes are just a part of. |
duke@435 | 211 | |
duke@435 | 212 | // New Operator that takes a Compile pointer, this will eventually |
duke@435 | 213 | // be the "new" New operator. |
coleenp@5614 | 214 | inline void* operator new( size_t x, Compile* C) throw() { |
duke@435 | 215 | Node* n = (Node*)C->node_arena()->Amalloc_D(x); |
duke@435 | 216 | #ifdef ASSERT |
duke@435 | 217 | n->_in = (Node**)n; // magic cookie for assertion check |
duke@435 | 218 | #endif |
duke@435 | 219 | n->_out = (Node**)C; |
duke@435 | 220 | return (void*)n; |
duke@435 | 221 | } |
duke@435 | 222 | |
duke@435 | 223 | // Delete is a NOP |
duke@435 | 224 | void operator delete( void *ptr ) {} |
duke@435 | 225 | // Fancy destructor; eagerly attempt to reclaim Node numberings and storage |
duke@435 | 226 | void destruct(); |
duke@435 | 227 | |
duke@435 | 228 | // Create a new Node. Required is the number is of inputs required for |
duke@435 | 229 | // semantic correctness. |
duke@435 | 230 | Node( uint required ); |
duke@435 | 231 | |
duke@435 | 232 | // Create a new Node with given input edges. |
duke@435 | 233 | // This version requires use of the "edge-count" new. |
duke@435 | 234 | // E.g. new (C,3) FooNode( C, NULL, left, right ); |
duke@435 | 235 | Node( Node *n0 ); |
duke@435 | 236 | Node( Node *n0, Node *n1 ); |
duke@435 | 237 | Node( Node *n0, Node *n1, Node *n2 ); |
duke@435 | 238 | Node( Node *n0, Node *n1, Node *n2, Node *n3 ); |
duke@435 | 239 | Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4 ); |
duke@435 | 240 | Node( Node *n0, Node *n1, Node *n2, Node *n3, Node *n4, Node *n5 ); |
duke@435 | 241 | Node( Node *n0, Node *n1, Node *n2, Node *n3, |
duke@435 | 242 | Node *n4, Node *n5, Node *n6 ); |
duke@435 | 243 | |
duke@435 | 244 | // Clone an inherited Node given only the base Node type. |
duke@435 | 245 | Node* clone() const; |
duke@435 | 246 | |
duke@435 | 247 | // Clone a Node, immediately supplying one or two new edges. |
duke@435 | 248 | // The first and second arguments, if non-null, replace in(1) and in(2), |
duke@435 | 249 | // respectively. |
duke@435 | 250 | Node* clone_with_data_edge(Node* in1, Node* in2 = NULL) const { |
duke@435 | 251 | Node* nn = clone(); |
duke@435 | 252 | if (in1 != NULL) nn->set_req(1, in1); |
duke@435 | 253 | if (in2 != NULL) nn->set_req(2, in2); |
duke@435 | 254 | return nn; |
duke@435 | 255 | } |
duke@435 | 256 | |
duke@435 | 257 | private: |
duke@435 | 258 | // Shared setup for the above constructors. |
duke@435 | 259 | // Handles all interactions with Compile::current. |
duke@435 | 260 | // Puts initial values in all Node fields except _idx. |
duke@435 | 261 | // Returns the initial value for _idx, which cannot |
duke@435 | 262 | // be initialized by assignment. |
duke@435 | 263 | inline int Init(int req, Compile* C); |
duke@435 | 264 | |
duke@435 | 265 | //----------------- input edge handling |
duke@435 | 266 | protected: |
duke@435 | 267 | friend class PhaseCFG; // Access to address of _in array elements |
duke@435 | 268 | Node **_in; // Array of use-def references to Nodes |
duke@435 | 269 | Node **_out; // Array of def-use references to Nodes |
duke@435 | 270 | |
twisti@1040 | 271 | // Input edges are split into two categories. Required edges are required |
duke@435 | 272 | // for semantic correctness; order is important and NULLs are allowed. |
duke@435 | 273 | // Precedence edges are used to help determine execution order and are |
duke@435 | 274 | // added, e.g., for scheduling purposes. They are unordered and not |
duke@435 | 275 | // duplicated; they have no embedded NULLs. Edges from 0 to _cnt-1 |
duke@435 | 276 | // are required, from _cnt to _max-1 are precedence edges. |
duke@435 | 277 | node_idx_t _cnt; // Total number of required Node inputs. |
duke@435 | 278 | |
duke@435 | 279 | node_idx_t _max; // Actual length of input array. |
duke@435 | 280 | |
duke@435 | 281 | // Output edges are an unordered list of def-use edges which exactly |
duke@435 | 282 | // correspond to required input edges which point from other nodes |
duke@435 | 283 | // to this one. Thus the count of the output edges is the number of |
duke@435 | 284 | // users of this node. |
duke@435 | 285 | node_idx_t _outcnt; // Total number of Node outputs. |
duke@435 | 286 | |
duke@435 | 287 | node_idx_t _outmax; // Actual length of output array. |
duke@435 | 288 | |
duke@435 | 289 | // Grow the actual input array to the next larger power-of-2 bigger than len. |
duke@435 | 290 | void grow( uint len ); |
duke@435 | 291 | // Grow the output array to the next larger power-of-2 bigger than len. |
duke@435 | 292 | void out_grow( uint len ); |
duke@435 | 293 | |
duke@435 | 294 | public: |
duke@435 | 295 | // Each Node is assigned a unique small/dense number. This number is used |
duke@435 | 296 | // to index into auxiliary arrays of data and bitvectors. |
duke@435 | 297 | // It is declared const to defend against inadvertant assignment, |
duke@435 | 298 | // since it is used by clients as a naked field. |
duke@435 | 299 | const node_idx_t _idx; |
duke@435 | 300 | |
duke@435 | 301 | // Get the (read-only) number of input edges |
duke@435 | 302 | uint req() const { return _cnt; } |
duke@435 | 303 | uint len() const { return _max; } |
duke@435 | 304 | // Get the (read-only) number of output edges |
duke@435 | 305 | uint outcnt() const { return _outcnt; } |
duke@435 | 306 | |
duke@435 | 307 | #if OPTO_DU_ITERATOR_ASSERT |
duke@435 | 308 | // Iterate over the out-edges of this node. Deletions are illegal. |
duke@435 | 309 | inline DUIterator outs() const; |
duke@435 | 310 | // Use this when the out array might have changed to suppress asserts. |
duke@435 | 311 | inline DUIterator& refresh_out_pos(DUIterator& i) const; |
duke@435 | 312 | // Does the node have an out at this position? (Used for iteration.) |
duke@435 | 313 | inline bool has_out(DUIterator& i) const; |
duke@435 | 314 | inline Node* out(DUIterator& i) const; |
duke@435 | 315 | // Iterate over the out-edges of this node. All changes are illegal. |
duke@435 | 316 | inline DUIterator_Fast fast_outs(DUIterator_Fast& max) const; |
duke@435 | 317 | inline Node* fast_out(DUIterator_Fast& i) const; |
duke@435 | 318 | // Iterate over the out-edges of this node, deleting one at a time. |
duke@435 | 319 | inline DUIterator_Last last_outs(DUIterator_Last& min) const; |
duke@435 | 320 | inline Node* last_out(DUIterator_Last& i) const; |
duke@435 | 321 | // The inline bodies of all these methods are after the iterator definitions. |
duke@435 | 322 | #else |
duke@435 | 323 | // Iterate over the out-edges of this node. Deletions are illegal. |
duke@435 | 324 | // This iteration uses integral indexes, to decouple from array reallocations. |
duke@435 | 325 | DUIterator outs() const { return 0; } |
duke@435 | 326 | // Use this when the out array might have changed to suppress asserts. |
duke@435 | 327 | DUIterator refresh_out_pos(DUIterator i) const { return i; } |
duke@435 | 328 | |
duke@435 | 329 | // Reference to the i'th output Node. Error if out of bounds. |
duke@435 | 330 | Node* out(DUIterator i) const { assert(i < _outcnt, "oob"); return _out[i]; } |
duke@435 | 331 | // Does the node have an out at this position? (Used for iteration.) |
duke@435 | 332 | bool has_out(DUIterator i) const { return i < _outcnt; } |
duke@435 | 333 | |
duke@435 | 334 | // Iterate over the out-edges of this node. All changes are illegal. |
duke@435 | 335 | // This iteration uses a pointer internal to the out array. |
duke@435 | 336 | DUIterator_Fast fast_outs(DUIterator_Fast& max) const { |
duke@435 | 337 | Node** out = _out; |
duke@435 | 338 | // Assign a limit pointer to the reference argument: |
duke@435 | 339 | max = out + (ptrdiff_t)_outcnt; |
duke@435 | 340 | // Return the base pointer: |
duke@435 | 341 | return out; |
duke@435 | 342 | } |
duke@435 | 343 | Node* fast_out(DUIterator_Fast i) const { return *i; } |
duke@435 | 344 | // Iterate over the out-edges of this node, deleting one at a time. |
duke@435 | 345 | // This iteration uses a pointer internal to the out array. |
duke@435 | 346 | DUIterator_Last last_outs(DUIterator_Last& min) const { |
duke@435 | 347 | Node** out = _out; |
duke@435 | 348 | // Assign a limit pointer to the reference argument: |
duke@435 | 349 | min = out; |
duke@435 | 350 | // Return the pointer to the start of the iteration: |
duke@435 | 351 | return out + (ptrdiff_t)_outcnt - 1; |
duke@435 | 352 | } |
duke@435 | 353 | Node* last_out(DUIterator_Last i) const { return *i; } |
duke@435 | 354 | #endif |
duke@435 | 355 | |
duke@435 | 356 | // Reference to the i'th input Node. Error if out of bounds. |
kvn@3971 | 357 | Node* in(uint i) const { assert(i < _max, err_msg_res("oob: i=%d, _max=%d", i, _max)); return _in[i]; } |
goetz@6478 | 358 | // Reference to the i'th input Node. NULL if out of bounds. |
goetz@6478 | 359 | Node* lookup(uint i) const { return ((i < _max) ? _in[i] : NULL); } |
duke@435 | 360 | // Reference to the i'th output Node. Error if out of bounds. |
duke@435 | 361 | // Use this accessor sparingly. We are going trying to use iterators instead. |
duke@435 | 362 | Node* raw_out(uint i) const { assert(i < _outcnt,"oob"); return _out[i]; } |
duke@435 | 363 | // Return the unique out edge. |
duke@435 | 364 | Node* unique_out() const { assert(_outcnt==1,"not unique"); return _out[0]; } |
duke@435 | 365 | // Delete out edge at position 'i' by moving last out edge to position 'i' |
duke@435 | 366 | void raw_del_out(uint i) { |
duke@435 | 367 | assert(i < _outcnt,"oob"); |
duke@435 | 368 | assert(_outcnt > 0,"oob"); |
duke@435 | 369 | #if OPTO_DU_ITERATOR_ASSERT |
duke@435 | 370 | // Record that a change happened here. |
duke@435 | 371 | debug_only(_last_del = _out[i]; ++_del_tick); |
duke@435 | 372 | #endif |
duke@435 | 373 | _out[i] = _out[--_outcnt]; |
duke@435 | 374 | // Smash the old edge so it can't be used accidentally. |
duke@435 | 375 | debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); |
duke@435 | 376 | } |
duke@435 | 377 | |
duke@435 | 378 | #ifdef ASSERT |
duke@435 | 379 | bool is_dead() const; |
duke@435 | 380 | #define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead())) |
duke@435 | 381 | #endif |
roland@4589 | 382 | // Check whether node has become unreachable |
roland@4589 | 383 | bool is_unreachable(PhaseIterGVN &igvn) const; |
duke@435 | 384 | |
duke@435 | 385 | // Set a required input edge, also updates corresponding output edge |
duke@435 | 386 | void add_req( Node *n ); // Append a NEW required input |
goetz@6478 | 387 | void add_req( Node *n0, Node *n1 ) { |
goetz@6478 | 388 | add_req(n0); add_req(n1); } |
goetz@6478 | 389 | void add_req( Node *n0, Node *n1, Node *n2 ) { |
goetz@6478 | 390 | add_req(n0); add_req(n1); add_req(n2); } |
duke@435 | 391 | void add_req_batch( Node* n, uint m ); // Append m NEW required inputs (all n). |
duke@435 | 392 | void del_req( uint idx ); // Delete required edge & compact |
kvn@5626 | 393 | void del_req_ordered( uint idx ); // Delete required edge & compact with preserved order |
duke@435 | 394 | void ins_req( uint i, Node *n ); // Insert a NEW required input |
duke@435 | 395 | void set_req( uint i, Node *n ) { |
duke@435 | 396 | assert( is_not_dead(n), "can not use dead node"); |
kvn@3971 | 397 | assert( i < _cnt, err_msg_res("oob: i=%d, _cnt=%d", i, _cnt)); |
duke@435 | 398 | assert( !VerifyHashTableKeys || _hash_lock == 0, |
duke@435 | 399 | "remove node from hash table before modifying it"); |
duke@435 | 400 | Node** p = &_in[i]; // cache this._in, across the del_out call |
duke@435 | 401 | if (*p != NULL) (*p)->del_out((Node *)this); |
duke@435 | 402 | (*p) = n; |
duke@435 | 403 | if (n != NULL) n->add_out((Node *)this); |
duke@435 | 404 | } |
duke@435 | 405 | // Light version of set_req() to init inputs after node creation. |
duke@435 | 406 | void init_req( uint i, Node *n ) { |
duke@435 | 407 | assert( i == 0 && this == n || |
duke@435 | 408 | is_not_dead(n), "can not use dead node"); |
duke@435 | 409 | assert( i < _cnt, "oob"); |
duke@435 | 410 | assert( !VerifyHashTableKeys || _hash_lock == 0, |
duke@435 | 411 | "remove node from hash table before modifying it"); |
duke@435 | 412 | assert( _in[i] == NULL, "sanity"); |
duke@435 | 413 | _in[i] = n; |
duke@435 | 414 | if (n != NULL) n->add_out((Node *)this); |
duke@435 | 415 | } |
duke@435 | 416 | // Find first occurrence of n among my edges: |
duke@435 | 417 | int find_edge(Node* n); |
duke@435 | 418 | int replace_edge(Node* old, Node* neww); |
kvn@5110 | 419 | int replace_edges_in_range(Node* old, Node* neww, int start, int end); |
duke@435 | 420 | // NULL out all inputs to eliminate incoming Def-Use edges. |
duke@435 | 421 | // Return the number of edges between 'n' and 'this' |
bharadwaj@4315 | 422 | int disconnect_inputs(Node *n, Compile *c); |
duke@435 | 423 | |
duke@435 | 424 | // Quickly, return true if and only if I am Compile::current()->top(). |
duke@435 | 425 | bool is_top() const { |
duke@435 | 426 | assert((this == (Node*) Compile::current()->top()) == (_out == NULL), ""); |
duke@435 | 427 | return (_out == NULL); |
duke@435 | 428 | } |
duke@435 | 429 | // Reaffirm invariants for is_top. (Only from Compile::set_cached_top_node.) |
duke@435 | 430 | void setup_is_top(); |
duke@435 | 431 | |
duke@435 | 432 | // Strip away casting. (It is depth-limited.) |
duke@435 | 433 | Node* uncast() const; |
kvn@3407 | 434 | // Return whether two Nodes are equivalent, after stripping casting. |
kvn@3407 | 435 | bool eqv_uncast(const Node* n) const { |
kvn@3407 | 436 | return (this->uncast() == n->uncast()); |
kvn@3407 | 437 | } |
duke@435 | 438 | |
duke@435 | 439 | private: |
duke@435 | 440 | static Node* uncast_helper(const Node* n); |
duke@435 | 441 | |
duke@435 | 442 | // Add an output edge to the end of the list |
duke@435 | 443 | void add_out( Node *n ) { |
duke@435 | 444 | if (is_top()) return; |
duke@435 | 445 | if( _outcnt == _outmax ) out_grow(_outcnt); |
duke@435 | 446 | _out[_outcnt++] = n; |
duke@435 | 447 | } |
duke@435 | 448 | // Delete an output edge |
duke@435 | 449 | void del_out( Node *n ) { |
duke@435 | 450 | if (is_top()) return; |
duke@435 | 451 | Node** outp = &_out[_outcnt]; |
duke@435 | 452 | // Find and remove n |
duke@435 | 453 | do { |
duke@435 | 454 | assert(outp > _out, "Missing Def-Use edge"); |
duke@435 | 455 | } while (*--outp != n); |
duke@435 | 456 | *outp = _out[--_outcnt]; |
duke@435 | 457 | // Smash the old edge so it can't be used accidentally. |
duke@435 | 458 | debug_only(_out[_outcnt] = (Node *)(uintptr_t)0xdeadbeef); |
duke@435 | 459 | // Record that a change happened here. |
duke@435 | 460 | #if OPTO_DU_ITERATOR_ASSERT |
duke@435 | 461 | debug_only(_last_del = n; ++_del_tick); |
duke@435 | 462 | #endif |
duke@435 | 463 | } |
duke@435 | 464 | |
duke@435 | 465 | public: |
duke@435 | 466 | // Globally replace this node by a given new node, updating all uses. |
duke@435 | 467 | void replace_by(Node* new_node); |
kvn@603 | 468 | // Globally replace this node by a given new node, updating all uses |
kvn@603 | 469 | // and cutting input edges of old node. |
bharadwaj@4315 | 470 | void subsume_by(Node* new_node, Compile* c) { |
kvn@603 | 471 | replace_by(new_node); |
bharadwaj@4315 | 472 | disconnect_inputs(NULL, c); |
kvn@603 | 473 | } |
duke@435 | 474 | void set_req_X( uint i, Node *n, PhaseIterGVN *igvn ); |
duke@435 | 475 | // Find the one non-null required input. RegionNode only |
duke@435 | 476 | Node *nonnull_req() const; |
duke@435 | 477 | // Add or remove precedence edges |
duke@435 | 478 | void add_prec( Node *n ); |
duke@435 | 479 | void rm_prec( uint i ); |
duke@435 | 480 | void set_prec( uint i, Node *n ) { |
duke@435 | 481 | assert( is_not_dead(n), "can not use dead node"); |
duke@435 | 482 | assert( i >= _cnt, "not a precedence edge"); |
duke@435 | 483 | if (_in[i] != NULL) _in[i]->del_out((Node *)this); |
duke@435 | 484 | _in[i] = n; |
duke@435 | 485 | if (n != NULL) n->add_out((Node *)this); |
duke@435 | 486 | } |
duke@435 | 487 | // Set this node's index, used by cisc_version to replace current node |
duke@435 | 488 | void set_idx(uint new_idx) { |
duke@435 | 489 | const node_idx_t* ref = &_idx; |
duke@435 | 490 | *(node_idx_t*)ref = new_idx; |
duke@435 | 491 | } |
duke@435 | 492 | // Swap input edge order. (Edge indexes i1 and i2 are usually 1 and 2.) |
duke@435 | 493 | void swap_edges(uint i1, uint i2) { |
duke@435 | 494 | debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); |
duke@435 | 495 | // Def-Use info is unchanged |
duke@435 | 496 | Node* n1 = in(i1); |
duke@435 | 497 | Node* n2 = in(i2); |
duke@435 | 498 | _in[i1] = n2; |
duke@435 | 499 | _in[i2] = n1; |
duke@435 | 500 | // If this node is in the hash table, make sure it doesn't need a rehash. |
duke@435 | 501 | assert(check_hash == NO_HASH || check_hash == hash(), "edge swap must preserve hash code"); |
duke@435 | 502 | } |
duke@435 | 503 | |
duke@435 | 504 | // Iterators over input Nodes for a Node X are written as: |
duke@435 | 505 | // for( i = 0; i < X.req(); i++ ) ... X[i] ... |
duke@435 | 506 | // NOTE: Required edges can contain embedded NULL pointers. |
duke@435 | 507 | |
duke@435 | 508 | //----------------- Other Node Properties |
duke@435 | 509 | |
duke@435 | 510 | // Generate class id for some ideal nodes to avoid virtual query |
duke@435 | 511 | // methods is_<Node>(). |
duke@435 | 512 | // Class id is the set of bits corresponded to the node class and all its |
duke@435 | 513 | // super classes so that queries for super classes are also valid. |
duke@435 | 514 | // Subclasses of the same super class have different assigned bit |
duke@435 | 515 | // (the third parameter in the macro DEFINE_CLASS_ID). |
duke@435 | 516 | // Classes with deeper hierarchy are declared first. |
duke@435 | 517 | // Classes with the same hierarchy depth are sorted by usage frequency. |
duke@435 | 518 | // |
duke@435 | 519 | // The query method masks the bits to cut off bits of subclasses |
duke@435 | 520 | // and then compare the result with the class id |
duke@435 | 521 | // (see the macro DEFINE_CLASS_QUERY below). |
duke@435 | 522 | // |
duke@435 | 523 | // Class_MachCall=30, ClassMask_MachCall=31 |
duke@435 | 524 | // 12 8 4 0 |
duke@435 | 525 | // 0 0 0 0 0 0 0 0 1 1 1 1 0 |
duke@435 | 526 | // | | | | |
duke@435 | 527 | // | | | Bit_Mach=2 |
duke@435 | 528 | // | | Bit_MachReturn=4 |
duke@435 | 529 | // | Bit_MachSafePoint=8 |
duke@435 | 530 | // Bit_MachCall=16 |
duke@435 | 531 | // |
duke@435 | 532 | // Class_CountedLoop=56, ClassMask_CountedLoop=63 |
duke@435 | 533 | // 12 8 4 0 |
duke@435 | 534 | // 0 0 0 0 0 0 0 1 1 1 0 0 0 |
duke@435 | 535 | // | | | |
duke@435 | 536 | // | | Bit_Region=8 |
duke@435 | 537 | // | Bit_Loop=16 |
duke@435 | 538 | // Bit_CountedLoop=32 |
duke@435 | 539 | |
duke@435 | 540 | #define DEFINE_CLASS_ID(cl, supcl, subn) \ |
duke@435 | 541 | Bit_##cl = (Class_##supcl == 0) ? 1 << subn : (Bit_##supcl) << (1 + subn) , \ |
duke@435 | 542 | Class_##cl = Class_##supcl + Bit_##cl , \ |
duke@435 | 543 | ClassMask_##cl = ((Bit_##cl << 1) - 1) , |
duke@435 | 544 | |
duke@435 | 545 | // This enum is used only for C2 ideal and mach nodes with is_<node>() methods |
duke@435 | 546 | // so that it's values fits into 16 bits. |
duke@435 | 547 | enum NodeClasses { |
duke@435 | 548 | Bit_Node = 0x0000, |
duke@435 | 549 | Class_Node = 0x0000, |
duke@435 | 550 | ClassMask_Node = 0xFFFF, |
duke@435 | 551 | |
duke@435 | 552 | DEFINE_CLASS_ID(Multi, Node, 0) |
duke@435 | 553 | DEFINE_CLASS_ID(SafePoint, Multi, 0) |
duke@435 | 554 | DEFINE_CLASS_ID(Call, SafePoint, 0) |
duke@435 | 555 | DEFINE_CLASS_ID(CallJava, Call, 0) |
duke@435 | 556 | DEFINE_CLASS_ID(CallStaticJava, CallJava, 0) |
duke@435 | 557 | DEFINE_CLASS_ID(CallDynamicJava, CallJava, 1) |
duke@435 | 558 | DEFINE_CLASS_ID(CallRuntime, Call, 1) |
duke@435 | 559 | DEFINE_CLASS_ID(CallLeaf, CallRuntime, 0) |
duke@435 | 560 | DEFINE_CLASS_ID(Allocate, Call, 2) |
duke@435 | 561 | DEFINE_CLASS_ID(AllocateArray, Allocate, 0) |
duke@435 | 562 | DEFINE_CLASS_ID(AbstractLock, Call, 3) |
duke@435 | 563 | DEFINE_CLASS_ID(Lock, AbstractLock, 0) |
duke@435 | 564 | DEFINE_CLASS_ID(Unlock, AbstractLock, 1) |
duke@435 | 565 | DEFINE_CLASS_ID(MultiBranch, Multi, 1) |
duke@435 | 566 | DEFINE_CLASS_ID(PCTable, MultiBranch, 0) |
duke@435 | 567 | DEFINE_CLASS_ID(Catch, PCTable, 0) |
duke@435 | 568 | DEFINE_CLASS_ID(Jump, PCTable, 1) |
duke@435 | 569 | DEFINE_CLASS_ID(If, MultiBranch, 1) |
duke@435 | 570 | DEFINE_CLASS_ID(CountedLoopEnd, If, 0) |
duke@435 | 571 | DEFINE_CLASS_ID(NeverBranch, MultiBranch, 2) |
duke@435 | 572 | DEFINE_CLASS_ID(Start, Multi, 2) |
duke@435 | 573 | DEFINE_CLASS_ID(MemBar, Multi, 3) |
roland@3392 | 574 | DEFINE_CLASS_ID(Initialize, MemBar, 0) |
roland@3392 | 575 | DEFINE_CLASS_ID(MemBarStoreStore, MemBar, 1) |
duke@435 | 576 | |
duke@435 | 577 | DEFINE_CLASS_ID(Mach, Node, 1) |
duke@435 | 578 | DEFINE_CLASS_ID(MachReturn, Mach, 0) |
duke@435 | 579 | DEFINE_CLASS_ID(MachSafePoint, MachReturn, 0) |
duke@435 | 580 | DEFINE_CLASS_ID(MachCall, MachSafePoint, 0) |
duke@435 | 581 | DEFINE_CLASS_ID(MachCallJava, MachCall, 0) |
duke@435 | 582 | DEFINE_CLASS_ID(MachCallStaticJava, MachCallJava, 0) |
duke@435 | 583 | DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) |
duke@435 | 584 | DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) |
duke@435 | 585 | DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) |
kvn@3051 | 586 | DEFINE_CLASS_ID(MachBranch, Mach, 1) |
kvn@3051 | 587 | DEFINE_CLASS_ID(MachIf, MachBranch, 0) |
kvn@3051 | 588 | DEFINE_CLASS_ID(MachGoto, MachBranch, 1) |
kvn@3051 | 589 | DEFINE_CLASS_ID(MachNullCheck, MachBranch, 2) |
kvn@3051 | 590 | DEFINE_CLASS_ID(MachSpillCopy, Mach, 2) |
kvn@3051 | 591 | DEFINE_CLASS_ID(MachTemp, Mach, 3) |
kvn@3051 | 592 | DEFINE_CLASS_ID(MachConstantBase, Mach, 4) |
kvn@3051 | 593 | DEFINE_CLASS_ID(MachConstant, Mach, 5) |
duke@435 | 594 | |
kvn@3040 | 595 | DEFINE_CLASS_ID(Type, Node, 2) |
duke@435 | 596 | DEFINE_CLASS_ID(Phi, Type, 0) |
duke@435 | 597 | DEFINE_CLASS_ID(ConstraintCast, Type, 1) |
duke@435 | 598 | DEFINE_CLASS_ID(CheckCastPP, Type, 2) |
duke@435 | 599 | DEFINE_CLASS_ID(CMove, Type, 3) |
kvn@498 | 600 | DEFINE_CLASS_ID(SafePointScalarObject, Type, 4) |
roland@4159 | 601 | DEFINE_CLASS_ID(DecodeNarrowPtr, Type, 5) |
roland@4159 | 602 | DEFINE_CLASS_ID(DecodeN, DecodeNarrowPtr, 0) |
roland@4159 | 603 | DEFINE_CLASS_ID(DecodeNKlass, DecodeNarrowPtr, 1) |
roland@4159 | 604 | DEFINE_CLASS_ID(EncodeNarrowPtr, Type, 6) |
roland@4159 | 605 | DEFINE_CLASS_ID(EncodeP, EncodeNarrowPtr, 0) |
roland@4159 | 606 | DEFINE_CLASS_ID(EncodePKlass, EncodeNarrowPtr, 1) |
duke@435 | 607 | |
kvn@3040 | 608 | DEFINE_CLASS_ID(Proj, Node, 3) |
kvn@3040 | 609 | DEFINE_CLASS_ID(CatchProj, Proj, 0) |
kvn@3040 | 610 | DEFINE_CLASS_ID(JumpProj, Proj, 1) |
kvn@3040 | 611 | DEFINE_CLASS_ID(IfTrue, Proj, 2) |
kvn@3040 | 612 | DEFINE_CLASS_ID(IfFalse, Proj, 3) |
kvn@3040 | 613 | DEFINE_CLASS_ID(Parm, Proj, 4) |
kvn@3040 | 614 | DEFINE_CLASS_ID(MachProj, Proj, 5) |
kvn@3040 | 615 | |
kvn@3040 | 616 | DEFINE_CLASS_ID(Mem, Node, 4) |
duke@435 | 617 | DEFINE_CLASS_ID(Load, Mem, 0) |
kvn@3882 | 618 | DEFINE_CLASS_ID(LoadVector, Load, 0) |
duke@435 | 619 | DEFINE_CLASS_ID(Store, Mem, 1) |
kvn@3882 | 620 | DEFINE_CLASS_ID(StoreVector, Store, 0) |
duke@435 | 621 | DEFINE_CLASS_ID(LoadStore, Mem, 2) |
duke@435 | 622 | |
kvn@3040 | 623 | DEFINE_CLASS_ID(Region, Node, 5) |
kvn@3040 | 624 | DEFINE_CLASS_ID(Loop, Region, 0) |
kvn@3040 | 625 | DEFINE_CLASS_ID(Root, Loop, 0) |
kvn@3040 | 626 | DEFINE_CLASS_ID(CountedLoop, Loop, 1) |
kvn@3040 | 627 | |
kvn@3040 | 628 | DEFINE_CLASS_ID(Sub, Node, 6) |
kvn@3040 | 629 | DEFINE_CLASS_ID(Cmp, Sub, 0) |
kvn@3040 | 630 | DEFINE_CLASS_ID(FastLock, Cmp, 0) |
kvn@3040 | 631 | DEFINE_CLASS_ID(FastUnlock, Cmp, 1) |
kvn@3040 | 632 | |
duke@435 | 633 | DEFINE_CLASS_ID(MergeMem, Node, 7) |
duke@435 | 634 | DEFINE_CLASS_ID(Bool, Node, 8) |
duke@435 | 635 | DEFINE_CLASS_ID(AddP, Node, 9) |
duke@435 | 636 | DEFINE_CLASS_ID(BoxLock, Node, 10) |
duke@435 | 637 | DEFINE_CLASS_ID(Add, Node, 11) |
kvn@3882 | 638 | DEFINE_CLASS_ID(Mul, Node, 12) |
kvn@3882 | 639 | DEFINE_CLASS_ID(Vector, Node, 13) |
kvn@3882 | 640 | DEFINE_CLASS_ID(ClearArray, Node, 14) |
duke@435 | 641 | |
kvn@1535 | 642 | _max_classes = ClassMask_ClearArray |
duke@435 | 643 | }; |
duke@435 | 644 | #undef DEFINE_CLASS_ID |
duke@435 | 645 | |
duke@435 | 646 | // Flags are sorted by usage frequency. |
duke@435 | 647 | enum NodeFlags { |
iveresov@6620 | 648 | Flag_is_Copy = 0x01, // should be first bit to avoid shift |
iveresov@6620 | 649 | Flag_rematerialize = Flag_is_Copy << 1, |
duke@435 | 650 | Flag_needs_anti_dependence_check = Flag_rematerialize << 1, |
iveresov@6620 | 651 | Flag_is_macro = Flag_needs_anti_dependence_check << 1, |
iveresov@6620 | 652 | Flag_is_Con = Flag_is_macro << 1, |
iveresov@6620 | 653 | Flag_is_cisc_alternate = Flag_is_Con << 1, |
iveresov@6620 | 654 | Flag_is_dead_loop_safe = Flag_is_cisc_alternate << 1, |
iveresov@6620 | 655 | Flag_may_be_short_branch = Flag_is_dead_loop_safe << 1, |
iveresov@6620 | 656 | Flag_avoid_back_to_back_before = Flag_may_be_short_branch << 1, |
iveresov@6620 | 657 | Flag_avoid_back_to_back_after = Flag_avoid_back_to_back_before << 1, |
iveresov@6620 | 658 | Flag_has_call = Flag_avoid_back_to_back_after << 1, |
iveresov@6620 | 659 | Flag_is_expensive = Flag_has_call << 1, |
roland@4589 | 660 | _max_flags = (Flag_is_expensive << 1) - 1 // allow flags combination |
duke@435 | 661 | }; |
duke@435 | 662 | |
duke@435 | 663 | private: |
duke@435 | 664 | jushort _class_id; |
duke@435 | 665 | jushort _flags; |
duke@435 | 666 | |
duke@435 | 667 | protected: |
duke@435 | 668 | // These methods should be called from constructors only. |
duke@435 | 669 | void init_class_id(jushort c) { |
duke@435 | 670 | assert(c <= _max_classes, "invalid node class"); |
duke@435 | 671 | _class_id = c; // cast out const |
duke@435 | 672 | } |
duke@435 | 673 | void init_flags(jushort fl) { |
duke@435 | 674 | assert(fl <= _max_flags, "invalid node flag"); |
duke@435 | 675 | _flags |= fl; |
duke@435 | 676 | } |
duke@435 | 677 | void clear_flag(jushort fl) { |
duke@435 | 678 | assert(fl <= _max_flags, "invalid node flag"); |
duke@435 | 679 | _flags &= ~fl; |
duke@435 | 680 | } |
duke@435 | 681 | |
duke@435 | 682 | public: |
duke@435 | 683 | const jushort class_id() const { return _class_id; } |
duke@435 | 684 | |
duke@435 | 685 | const jushort flags() const { return _flags; } |
duke@435 | 686 | |
duke@435 | 687 | // Return a dense integer opcode number |
duke@435 | 688 | virtual int Opcode() const; |
duke@435 | 689 | |
duke@435 | 690 | // Virtual inherited Node size |
duke@435 | 691 | virtual uint size_of() const; |
duke@435 | 692 | |
duke@435 | 693 | // Other interesting Node properties |
never@1515 | 694 | #define DEFINE_CLASS_QUERY(type) \ |
never@1515 | 695 | bool is_##type() const { \ |
duke@435 | 696 | return ((_class_id & ClassMask_##type) == Class_##type); \ |
never@1515 | 697 | } \ |
never@1515 | 698 | type##Node *as_##type() const { \ |
never@1515 | 699 | assert(is_##type(), "invalid node class"); \ |
never@1515 | 700 | return (type##Node*)this; \ |
never@1515 | 701 | } \ |
never@1515 | 702 | type##Node* isa_##type() const { \ |
never@1515 | 703 | return (is_##type()) ? as_##type() : NULL; \ |
duke@435 | 704 | } |
duke@435 | 705 | |
duke@435 | 706 | DEFINE_CLASS_QUERY(AbstractLock) |
duke@435 | 707 | DEFINE_CLASS_QUERY(Add) |
duke@435 | 708 | DEFINE_CLASS_QUERY(AddP) |
duke@435 | 709 | DEFINE_CLASS_QUERY(Allocate) |
duke@435 | 710 | DEFINE_CLASS_QUERY(AllocateArray) |
duke@435 | 711 | DEFINE_CLASS_QUERY(Bool) |
duke@435 | 712 | DEFINE_CLASS_QUERY(BoxLock) |
kvn@3040 | 713 | DEFINE_CLASS_QUERY(Call) |
duke@435 | 714 | DEFINE_CLASS_QUERY(CallDynamicJava) |
duke@435 | 715 | DEFINE_CLASS_QUERY(CallJava) |
duke@435 | 716 | DEFINE_CLASS_QUERY(CallLeaf) |
duke@435 | 717 | DEFINE_CLASS_QUERY(CallRuntime) |
duke@435 | 718 | DEFINE_CLASS_QUERY(CallStaticJava) |
duke@435 | 719 | DEFINE_CLASS_QUERY(Catch) |
duke@435 | 720 | DEFINE_CLASS_QUERY(CatchProj) |
duke@435 | 721 | DEFINE_CLASS_QUERY(CheckCastPP) |
duke@435 | 722 | DEFINE_CLASS_QUERY(ConstraintCast) |
kvn@1535 | 723 | DEFINE_CLASS_QUERY(ClearArray) |
duke@435 | 724 | DEFINE_CLASS_QUERY(CMove) |
duke@435 | 725 | DEFINE_CLASS_QUERY(Cmp) |
duke@435 | 726 | DEFINE_CLASS_QUERY(CountedLoop) |
duke@435 | 727 | DEFINE_CLASS_QUERY(CountedLoopEnd) |
roland@4159 | 728 | DEFINE_CLASS_QUERY(DecodeNarrowPtr) |
kvn@603 | 729 | DEFINE_CLASS_QUERY(DecodeN) |
roland@4159 | 730 | DEFINE_CLASS_QUERY(DecodeNKlass) |
roland@4159 | 731 | DEFINE_CLASS_QUERY(EncodeNarrowPtr) |
kvn@603 | 732 | DEFINE_CLASS_QUERY(EncodeP) |
roland@4159 | 733 | DEFINE_CLASS_QUERY(EncodePKlass) |
duke@435 | 734 | DEFINE_CLASS_QUERY(FastLock) |
duke@435 | 735 | DEFINE_CLASS_QUERY(FastUnlock) |
duke@435 | 736 | DEFINE_CLASS_QUERY(If) |
duke@435 | 737 | DEFINE_CLASS_QUERY(IfFalse) |
duke@435 | 738 | DEFINE_CLASS_QUERY(IfTrue) |
duke@435 | 739 | DEFINE_CLASS_QUERY(Initialize) |
duke@435 | 740 | DEFINE_CLASS_QUERY(Jump) |
duke@435 | 741 | DEFINE_CLASS_QUERY(JumpProj) |
duke@435 | 742 | DEFINE_CLASS_QUERY(Load) |
duke@435 | 743 | DEFINE_CLASS_QUERY(LoadStore) |
duke@435 | 744 | DEFINE_CLASS_QUERY(Lock) |
duke@435 | 745 | DEFINE_CLASS_QUERY(Loop) |
duke@435 | 746 | DEFINE_CLASS_QUERY(Mach) |
kvn@3051 | 747 | DEFINE_CLASS_QUERY(MachBranch) |
duke@435 | 748 | DEFINE_CLASS_QUERY(MachCall) |
duke@435 | 749 | DEFINE_CLASS_QUERY(MachCallDynamicJava) |
duke@435 | 750 | DEFINE_CLASS_QUERY(MachCallJava) |
duke@435 | 751 | DEFINE_CLASS_QUERY(MachCallLeaf) |
duke@435 | 752 | DEFINE_CLASS_QUERY(MachCallRuntime) |
duke@435 | 753 | DEFINE_CLASS_QUERY(MachCallStaticJava) |
twisti@2350 | 754 | DEFINE_CLASS_QUERY(MachConstantBase) |
twisti@2350 | 755 | DEFINE_CLASS_QUERY(MachConstant) |
kvn@3040 | 756 | DEFINE_CLASS_QUERY(MachGoto) |
duke@435 | 757 | DEFINE_CLASS_QUERY(MachIf) |
duke@435 | 758 | DEFINE_CLASS_QUERY(MachNullCheck) |
kvn@3040 | 759 | DEFINE_CLASS_QUERY(MachProj) |
duke@435 | 760 | DEFINE_CLASS_QUERY(MachReturn) |
duke@435 | 761 | DEFINE_CLASS_QUERY(MachSafePoint) |
duke@435 | 762 | DEFINE_CLASS_QUERY(MachSpillCopy) |
duke@435 | 763 | DEFINE_CLASS_QUERY(MachTemp) |
duke@435 | 764 | DEFINE_CLASS_QUERY(Mem) |
duke@435 | 765 | DEFINE_CLASS_QUERY(MemBar) |
roland@3392 | 766 | DEFINE_CLASS_QUERY(MemBarStoreStore) |
duke@435 | 767 | DEFINE_CLASS_QUERY(MergeMem) |
kvn@3882 | 768 | DEFINE_CLASS_QUERY(Mul) |
duke@435 | 769 | DEFINE_CLASS_QUERY(Multi) |
duke@435 | 770 | DEFINE_CLASS_QUERY(MultiBranch) |
kvn@468 | 771 | DEFINE_CLASS_QUERY(Parm) |
duke@435 | 772 | DEFINE_CLASS_QUERY(PCTable) |
duke@435 | 773 | DEFINE_CLASS_QUERY(Phi) |
duke@435 | 774 | DEFINE_CLASS_QUERY(Proj) |
duke@435 | 775 | DEFINE_CLASS_QUERY(Region) |
duke@435 | 776 | DEFINE_CLASS_QUERY(Root) |
duke@435 | 777 | DEFINE_CLASS_QUERY(SafePoint) |
kvn@498 | 778 | DEFINE_CLASS_QUERY(SafePointScalarObject) |
duke@435 | 779 | DEFINE_CLASS_QUERY(Start) |
duke@435 | 780 | DEFINE_CLASS_QUERY(Store) |
duke@435 | 781 | DEFINE_CLASS_QUERY(Sub) |
duke@435 | 782 | DEFINE_CLASS_QUERY(Type) |
kvn@3040 | 783 | DEFINE_CLASS_QUERY(Vector) |
kvn@3882 | 784 | DEFINE_CLASS_QUERY(LoadVector) |
kvn@3882 | 785 | DEFINE_CLASS_QUERY(StoreVector) |
duke@435 | 786 | DEFINE_CLASS_QUERY(Unlock) |
duke@435 | 787 | |
duke@435 | 788 | #undef DEFINE_CLASS_QUERY |
duke@435 | 789 | |
duke@435 | 790 | // duplicate of is_MachSpillCopy() |
duke@435 | 791 | bool is_SpillCopy () const { |
duke@435 | 792 | return ((_class_id & ClassMask_MachSpillCopy) == Class_MachSpillCopy); |
duke@435 | 793 | } |
duke@435 | 794 | |
duke@435 | 795 | bool is_Con () const { return (_flags & Flag_is_Con) != 0; } |
duke@435 | 796 | // The data node which is safe to leave in dead loop during IGVN optimization. |
duke@435 | 797 | bool is_dead_loop_safe() const { |
kvn@561 | 798 | return is_Phi() || (is_Proj() && in(0) == NULL) || |
kvn@561 | 799 | ((_flags & (Flag_is_dead_loop_safe | Flag_is_Con)) != 0 && |
kvn@561 | 800 | (!is_Proj() || !in(0)->is_Allocate())); |
duke@435 | 801 | } |
duke@435 | 802 | |
duke@435 | 803 | // is_Copy() returns copied edge index (0 or 1) |
duke@435 | 804 | uint is_Copy() const { return (_flags & Flag_is_Copy); } |
duke@435 | 805 | |
duke@435 | 806 | virtual bool is_CFG() const { return false; } |
duke@435 | 807 | |
duke@435 | 808 | // If this node is control-dependent on a test, can it be |
duke@435 | 809 | // rerouted to a dominating equivalent test? This is usually |
duke@435 | 810 | // true of non-CFG nodes, but can be false for operations which |
duke@435 | 811 | // depend for their correct sequencing on more than one test. |
duke@435 | 812 | // (In that case, hoisting to a dominating test may silently |
duke@435 | 813 | // skip some other important test.) |
duke@435 | 814 | virtual bool depends_only_on_test() const { assert(!is_CFG(), ""); return true; }; |
duke@435 | 815 | |
duke@435 | 816 | // When building basic blocks, I need to have a notion of block beginning |
duke@435 | 817 | // Nodes, next block selector Nodes (block enders), and next block |
duke@435 | 818 | // projections. These calls need to work on their machine equivalents. The |
duke@435 | 819 | // Ideal beginning Nodes are RootNode, RegionNode and StartNode. |
duke@435 | 820 | bool is_block_start() const { |
duke@435 | 821 | if ( is_Region() ) |
duke@435 | 822 | return this == (const Node*)in(0); |
duke@435 | 823 | else |
kvn@3040 | 824 | return is_Start(); |
duke@435 | 825 | } |
duke@435 | 826 | |
duke@435 | 827 | // The Ideal control projection Nodes are IfTrue/IfFalse, JumpProjNode, Root, |
duke@435 | 828 | // Goto and Return. This call also returns the block ending Node. |
duke@435 | 829 | virtual const Node *is_block_proj() const; |
duke@435 | 830 | |
duke@435 | 831 | // The node is a "macro" node which needs to be expanded before matching |
duke@435 | 832 | bool is_macro() const { return (_flags & Flag_is_macro) != 0; } |
roland@4589 | 833 | // The node is expensive: the best control is set during loop opts |
roland@4589 | 834 | bool is_expensive() const { return (_flags & Flag_is_expensive) != 0 && in(0) != NULL; } |
duke@435 | 835 | |
duke@435 | 836 | //----------------- Optimization |
duke@435 | 837 | |
duke@435 | 838 | // Get the worst-case Type output for this Node. |
duke@435 | 839 | virtual const class Type *bottom_type() const; |
duke@435 | 840 | |
duke@435 | 841 | // If we find a better type for a node, try to record it permanently. |
duke@435 | 842 | // Return true if this node actually changed. |
duke@435 | 843 | // Be sure to do the hash_delete game in the "rehash" variant. |
duke@435 | 844 | void raise_bottom_type(const Type* new_type); |
duke@435 | 845 | |
duke@435 | 846 | // Get the address type with which this node uses and/or defs memory, |
duke@435 | 847 | // or NULL if none. The address type is conservatively wide. |
duke@435 | 848 | // Returns non-null for calls, membars, loads, stores, etc. |
duke@435 | 849 | // Returns TypePtr::BOTTOM if the node touches memory "broadly". |
duke@435 | 850 | virtual const class TypePtr *adr_type() const { return NULL; } |
duke@435 | 851 | |
duke@435 | 852 | // Return an existing node which computes the same function as this node. |
duke@435 | 853 | // The optimistic combined algorithm requires this to return a Node which |
duke@435 | 854 | // is a small number of steps away (e.g., one of my inputs). |
duke@435 | 855 | virtual Node *Identity( PhaseTransform *phase ); |
duke@435 | 856 | |
duke@435 | 857 | // Return the set of values this Node can take on at runtime. |
duke@435 | 858 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 859 | |
duke@435 | 860 | // Return a node which is more "ideal" than the current node. |
duke@435 | 861 | // The invariants on this call are subtle. If in doubt, read the |
duke@435 | 862 | // treatise in node.cpp above the default implemention AND TEST WITH |
duke@435 | 863 | // +VerifyIterativeGVN! |
duke@435 | 864 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 865 | |
duke@435 | 866 | // Some nodes have specific Ideal subgraph transformations only if they are |
duke@435 | 867 | // unique users of specific nodes. Such nodes should be put on IGVN worklist |
duke@435 | 868 | // for the transformations to happen. |
duke@435 | 869 | bool has_special_unique_user() const; |
duke@435 | 870 | |
kvn@554 | 871 | // Skip Proj and CatchProj nodes chains. Check for Null and Top. |
kvn@554 | 872 | Node* find_exact_control(Node* ctrl); |
kvn@554 | 873 | |
kvn@554 | 874 | // Check if 'this' node dominates or equal to 'sub'. |
kvn@554 | 875 | bool dominates(Node* sub, Node_List &nlist); |
kvn@554 | 876 | |
duke@435 | 877 | protected: |
duke@435 | 878 | bool remove_dead_region(PhaseGVN *phase, bool can_reshape); |
duke@435 | 879 | public: |
duke@435 | 880 | |
duke@435 | 881 | // Idealize graph, using DU info. Done after constant propagation |
duke@435 | 882 | virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); |
duke@435 | 883 | |
duke@435 | 884 | // See if there is valid pipeline info |
duke@435 | 885 | static const Pipeline *pipeline_class(); |
duke@435 | 886 | virtual const Pipeline *pipeline() const; |
duke@435 | 887 | |
duke@435 | 888 | // Compute the latency from the def to this instruction of the ith input node |
duke@435 | 889 | uint latency(uint i); |
duke@435 | 890 | |
duke@435 | 891 | // Hash & compare functions, for pessimistic value numbering |
duke@435 | 892 | |
duke@435 | 893 | // If the hash function returns the special sentinel value NO_HASH, |
duke@435 | 894 | // the node is guaranteed never to compare equal to any other node. |
twisti@1040 | 895 | // If we accidentally generate a hash with value NO_HASH the node |
duke@435 | 896 | // won't go into the table and we'll lose a little optimization. |
duke@435 | 897 | enum { NO_HASH = 0 }; |
duke@435 | 898 | virtual uint hash() const; |
duke@435 | 899 | virtual uint cmp( const Node &n ) const; |
duke@435 | 900 | |
duke@435 | 901 | // Operation appears to be iteratively computed (such as an induction variable) |
duke@435 | 902 | // It is possible for this operation to return false for a loop-varying |
duke@435 | 903 | // value, if it appears (by local graph inspection) to be computed by a simple conditional. |
duke@435 | 904 | bool is_iteratively_computed(); |
duke@435 | 905 | |
duke@435 | 906 | // Determine if a node is Counted loop induction variable. |
duke@435 | 907 | // The method is defined in loopnode.cpp. |
duke@435 | 908 | const Node* is_loop_iv() const; |
duke@435 | 909 | |
duke@435 | 910 | // Return a node with opcode "opc" and same inputs as "this" if one can |
duke@435 | 911 | // be found; Otherwise return NULL; |
duke@435 | 912 | Node* find_similar(int opc); |
duke@435 | 913 | |
duke@435 | 914 | // Return the unique control out if only one. Null if none or more than one. |
duke@435 | 915 | Node* unique_ctrl_out(); |
duke@435 | 916 | |
duke@435 | 917 | //----------------- Code Generation |
duke@435 | 918 | |
duke@435 | 919 | // Ideal register class for Matching. Zero means unmatched instruction |
duke@435 | 920 | // (these are cloned instead of converted to machine nodes). |
duke@435 | 921 | virtual uint ideal_reg() const; |
duke@435 | 922 | |
duke@435 | 923 | static const uint NotAMachineReg; // must be > max. machine register |
duke@435 | 924 | |
duke@435 | 925 | // Do we Match on this edge index or not? Generally false for Control |
duke@435 | 926 | // and true for everything else. Weird for calls & returns. |
duke@435 | 927 | virtual uint match_edge(uint idx) const; |
duke@435 | 928 | |
duke@435 | 929 | // Register class output is returned in |
duke@435 | 930 | virtual const RegMask &out_RegMask() const; |
duke@435 | 931 | // Register class input is expected in |
duke@435 | 932 | virtual const RegMask &in_RegMask(uint) const; |
duke@435 | 933 | // Should we clone rather than spill this instruction? |
duke@435 | 934 | bool rematerialize() const; |
duke@435 | 935 | |
duke@435 | 936 | // Return JVM State Object if this Node carries debug info, or NULL otherwise |
duke@435 | 937 | virtual JVMState* jvms() const; |
duke@435 | 938 | |
duke@435 | 939 | // Print as assembly |
duke@435 | 940 | virtual void format( PhaseRegAlloc *, outputStream* st = tty ) const; |
duke@435 | 941 | // Emit bytes starting at parameter 'ptr' |
duke@435 | 942 | // Bump 'ptr' by the number of output bytes |
duke@435 | 943 | virtual void emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const; |
duke@435 | 944 | // Size of instruction in bytes |
duke@435 | 945 | virtual uint size(PhaseRegAlloc *ra_) const; |
duke@435 | 946 | |
duke@435 | 947 | // Convenience function to extract an integer constant from a node. |
duke@435 | 948 | // If it is not an integer constant (either Con, CastII, or Mach), |
duke@435 | 949 | // return value_if_unknown. |
duke@435 | 950 | jint find_int_con(jint value_if_unknown) const { |
duke@435 | 951 | const TypeInt* t = find_int_type(); |
duke@435 | 952 | return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; |
duke@435 | 953 | } |
duke@435 | 954 | // Return the constant, knowing it is an integer constant already |
duke@435 | 955 | jint get_int() const { |
duke@435 | 956 | const TypeInt* t = find_int_type(); |
duke@435 | 957 | guarantee(t != NULL, "must be con"); |
duke@435 | 958 | return t->get_con(); |
duke@435 | 959 | } |
duke@435 | 960 | // Here's where the work is done. Can produce non-constant int types too. |
duke@435 | 961 | const TypeInt* find_int_type() const; |
duke@435 | 962 | |
duke@435 | 963 | // Same thing for long (and intptr_t, via type.hpp): |
duke@435 | 964 | jlong get_long() const { |
duke@435 | 965 | const TypeLong* t = find_long_type(); |
duke@435 | 966 | guarantee(t != NULL, "must be con"); |
duke@435 | 967 | return t->get_con(); |
duke@435 | 968 | } |
duke@435 | 969 | jlong find_long_con(jint value_if_unknown) const { |
duke@435 | 970 | const TypeLong* t = find_long_type(); |
duke@435 | 971 | return (t != NULL && t->is_con()) ? t->get_con() : value_if_unknown; |
duke@435 | 972 | } |
duke@435 | 973 | const TypeLong* find_long_type() const; |
duke@435 | 974 | |
kvn@5111 | 975 | const TypePtr* get_ptr_type() const; |
kvn@5111 | 976 | |
duke@435 | 977 | // These guys are called by code generated by ADLC: |
duke@435 | 978 | intptr_t get_ptr() const; |
coleenp@548 | 979 | intptr_t get_narrowcon() const; |
duke@435 | 980 | jdouble getd() const; |
duke@435 | 981 | jfloat getf() const; |
duke@435 | 982 | |
duke@435 | 983 | // Nodes which are pinned into basic blocks |
duke@435 | 984 | virtual bool pinned() const { return false; } |
duke@435 | 985 | |
duke@435 | 986 | // Nodes which use memory without consuming it, hence need antidependences |
duke@435 | 987 | // More specifically, needs_anti_dependence_check returns true iff the node |
duke@435 | 988 | // (a) does a load, and (b) does not perform a store (except perhaps to a |
duke@435 | 989 | // stack slot or some other unaliased location). |
duke@435 | 990 | bool needs_anti_dependence_check() const; |
duke@435 | 991 | |
duke@435 | 992 | // Return which operand this instruction may cisc-spill. In other words, |
duke@435 | 993 | // return operand position that can convert from reg to memory access |
duke@435 | 994 | virtual int cisc_operand() const { return AdlcVMDeps::Not_cisc_spillable; } |
duke@435 | 995 | bool is_cisc_alternate() const { return (_flags & Flag_is_cisc_alternate) != 0; } |
duke@435 | 996 | |
duke@435 | 997 | //----------------- Graph walking |
duke@435 | 998 | public: |
duke@435 | 999 | // Walk and apply member functions recursively. |
duke@435 | 1000 | // Supplied (this) pointer is root. |
duke@435 | 1001 | void walk(NFunc pre, NFunc post, void *env); |
duke@435 | 1002 | static void nop(Node &, void*); // Dummy empty function |
duke@435 | 1003 | static void packregion( Node &n, void* ); |
duke@435 | 1004 | private: |
duke@435 | 1005 | void walk_(NFunc pre, NFunc post, void *env, VectorSet &visited); |
duke@435 | 1006 | |
duke@435 | 1007 | //----------------- Printing, etc |
duke@435 | 1008 | public: |
duke@435 | 1009 | #ifndef PRODUCT |
duke@435 | 1010 | Node* find(int idx) const; // Search the graph for the given idx. |
duke@435 | 1011 | Node* find_ctrl(int idx) const; // Search control ancestors for the given idx. |
kvn@4478 | 1012 | void dump() const { dump("\n"); } // Print this node. |
kvn@4478 | 1013 | void dump(const char* suffix, outputStream *st = tty) const;// Print this node. |
duke@435 | 1014 | void dump(int depth) const; // Print this node, recursively to depth d |
duke@435 | 1015 | void dump_ctrl(int depth) const; // Print control nodes, to depth d |
kvn@4478 | 1016 | virtual void dump_req(outputStream *st = tty) const; // Print required-edge info |
kvn@4478 | 1017 | virtual void dump_prec(outputStream *st = tty) const; // Print precedence-edge info |
kvn@4478 | 1018 | virtual void dump_out(outputStream *st = tty) const; // Print the output edge info |
duke@435 | 1019 | virtual void dump_spec(outputStream *st) const {}; // Print per-node info |
duke@435 | 1020 | void verify_edges(Unique_Node_List &visited); // Verify bi-directional edges |
duke@435 | 1021 | void verify() const; // Check Def-Use info for my subgraph |
duke@435 | 1022 | static void verify_recur(const Node *n, int verify_depth, VectorSet &old_space, VectorSet &new_space); |
duke@435 | 1023 | |
duke@435 | 1024 | // This call defines a class-unique string used to identify class instances |
duke@435 | 1025 | virtual const char *Name() const; |
duke@435 | 1026 | |
duke@435 | 1027 | void dump_format(PhaseRegAlloc *ra) const; // debug access to MachNode::format(...) |
duke@435 | 1028 | // RegMask Print Functions |
duke@435 | 1029 | void dump_in_regmask(int idx) { in_RegMask(idx).dump(); } |
duke@435 | 1030 | void dump_out_regmask() { out_RegMask().dump(); } |
goetz@6488 | 1031 | static bool in_dump() { return Compile::current()->_in_dump_cnt > 0; } |
duke@435 | 1032 | void fast_dump() const { |
duke@435 | 1033 | tty->print("%4d: %-17s", _idx, Name()); |
duke@435 | 1034 | for (uint i = 0; i < len(); i++) |
duke@435 | 1035 | if (in(i)) |
duke@435 | 1036 | tty->print(" %4d", in(i)->_idx); |
duke@435 | 1037 | else |
duke@435 | 1038 | tty->print(" NULL"); |
duke@435 | 1039 | tty->print("\n"); |
duke@435 | 1040 | } |
duke@435 | 1041 | #endif |
duke@435 | 1042 | #ifdef ASSERT |
duke@435 | 1043 | void verify_construction(); |
duke@435 | 1044 | bool verify_jvms(const JVMState* jvms) const; |
duke@435 | 1045 | int _debug_idx; // Unique value assigned to every node. |
duke@435 | 1046 | int debug_idx() const { return _debug_idx; } |
duke@435 | 1047 | void set_debug_idx( int debug_idx ) { _debug_idx = debug_idx; } |
duke@435 | 1048 | |
duke@435 | 1049 | Node* _debug_orig; // Original version of this, if any. |
duke@435 | 1050 | Node* debug_orig() const { return _debug_orig; } |
duke@435 | 1051 | void set_debug_orig(Node* orig); // _debug_orig = orig |
duke@435 | 1052 | |
duke@435 | 1053 | int _hash_lock; // Barrier to modifications of nodes in the hash table |
duke@435 | 1054 | void enter_hash_lock() { ++_hash_lock; assert(_hash_lock < 99, "in too many hash tables?"); } |
duke@435 | 1055 | void exit_hash_lock() { --_hash_lock; assert(_hash_lock >= 0, "mispaired hash locks"); } |
duke@435 | 1056 | |
duke@435 | 1057 | static void init_NodeProperty(); |
duke@435 | 1058 | |
duke@435 | 1059 | #if OPTO_DU_ITERATOR_ASSERT |
duke@435 | 1060 | const Node* _last_del; // The last deleted node. |
duke@435 | 1061 | uint _del_tick; // Bumped when a deletion happens.. |
duke@435 | 1062 | #endif |
duke@435 | 1063 | #endif |
duke@435 | 1064 | }; |
duke@435 | 1065 | |
duke@435 | 1066 | //----------------------------------------------------------------------------- |
duke@435 | 1067 | // Iterators over DU info, and associated Node functions. |
duke@435 | 1068 | |
duke@435 | 1069 | #if OPTO_DU_ITERATOR_ASSERT |
duke@435 | 1070 | |
duke@435 | 1071 | // Common code for assertion checking on DU iterators. |
duke@435 | 1072 | class DUIterator_Common VALUE_OBJ_CLASS_SPEC { |
duke@435 | 1073 | #ifdef ASSERT |
duke@435 | 1074 | protected: |
duke@435 | 1075 | bool _vdui; // cached value of VerifyDUIterators |
duke@435 | 1076 | const Node* _node; // the node containing the _out array |
duke@435 | 1077 | uint _outcnt; // cached node->_outcnt |
duke@435 | 1078 | uint _del_tick; // cached node->_del_tick |
duke@435 | 1079 | Node* _last; // last value produced by the iterator |
duke@435 | 1080 | |
duke@435 | 1081 | void sample(const Node* node); // used by c'tor to set up for verifies |
duke@435 | 1082 | void verify(const Node* node, bool at_end_ok = false); |
duke@435 | 1083 | void verify_resync(); |
duke@435 | 1084 | void reset(const DUIterator_Common& that); |
duke@435 | 1085 | |
duke@435 | 1086 | // The VDUI_ONLY macro protects code conditionalized on VerifyDUIterators |
duke@435 | 1087 | #define I_VDUI_ONLY(i,x) { if ((i)._vdui) { x; } } |
duke@435 | 1088 | #else |
duke@435 | 1089 | #define I_VDUI_ONLY(i,x) { } |
duke@435 | 1090 | #endif //ASSERT |
duke@435 | 1091 | }; |
duke@435 | 1092 | |
duke@435 | 1093 | #define VDUI_ONLY(x) I_VDUI_ONLY(*this, x) |
duke@435 | 1094 | |
duke@435 | 1095 | // Default DU iterator. Allows appends onto the out array. |
duke@435 | 1096 | // Allows deletion from the out array only at the current point. |
duke@435 | 1097 | // Usage: |
duke@435 | 1098 | // for (DUIterator i = x->outs(); x->has_out(i); i++) { |
duke@435 | 1099 | // Node* y = x->out(i); |
duke@435 | 1100 | // ... |
duke@435 | 1101 | // } |
duke@435 | 1102 | // Compiles in product mode to a unsigned integer index, which indexes |
duke@435 | 1103 | // onto a repeatedly reloaded base pointer of x->_out. The loop predicate |
duke@435 | 1104 | // also reloads x->_outcnt. If you delete, you must perform "--i" just |
duke@435 | 1105 | // before continuing the loop. You must delete only the last-produced |
duke@435 | 1106 | // edge. You must delete only a single copy of the last-produced edge, |
duke@435 | 1107 | // or else you must delete all copies at once (the first time the edge |
duke@435 | 1108 | // is produced by the iterator). |
duke@435 | 1109 | class DUIterator : public DUIterator_Common { |
duke@435 | 1110 | friend class Node; |
duke@435 | 1111 | |
duke@435 | 1112 | // This is the index which provides the product-mode behavior. |
duke@435 | 1113 | // Whatever the product-mode version of the system does to the |
duke@435 | 1114 | // DUI index is done to this index. All other fields in |
duke@435 | 1115 | // this class are used only for assertion checking. |
duke@435 | 1116 | uint _idx; |
duke@435 | 1117 | |
duke@435 | 1118 | #ifdef ASSERT |
duke@435 | 1119 | uint _refresh_tick; // Records the refresh activity. |
duke@435 | 1120 | |
duke@435 | 1121 | void sample(const Node* node); // Initialize _refresh_tick etc. |
duke@435 | 1122 | void verify(const Node* node, bool at_end_ok = false); |
duke@435 | 1123 | void verify_increment(); // Verify an increment operation. |
duke@435 | 1124 | void verify_resync(); // Verify that we can back up over a deletion. |
duke@435 | 1125 | void verify_finish(); // Verify that the loop terminated properly. |
duke@435 | 1126 | void refresh(); // Resample verification info. |
duke@435 | 1127 | void reset(const DUIterator& that); // Resample after assignment. |
duke@435 | 1128 | #endif |
duke@435 | 1129 | |
duke@435 | 1130 | DUIterator(const Node* node, int dummy_to_avoid_conversion) |
duke@435 | 1131 | { _idx = 0; debug_only(sample(node)); } |
duke@435 | 1132 | |
duke@435 | 1133 | public: |
duke@435 | 1134 | // initialize to garbage; clear _vdui to disable asserts |
duke@435 | 1135 | DUIterator() |
duke@435 | 1136 | { /*initialize to garbage*/ debug_only(_vdui = false); } |
duke@435 | 1137 | |
duke@435 | 1138 | void operator++(int dummy_to_specify_postfix_op) |
duke@435 | 1139 | { _idx++; VDUI_ONLY(verify_increment()); } |
duke@435 | 1140 | |
duke@435 | 1141 | void operator--() |
duke@435 | 1142 | { VDUI_ONLY(verify_resync()); --_idx; } |
duke@435 | 1143 | |
duke@435 | 1144 | ~DUIterator() |
duke@435 | 1145 | { VDUI_ONLY(verify_finish()); } |
duke@435 | 1146 | |
duke@435 | 1147 | void operator=(const DUIterator& that) |
duke@435 | 1148 | { _idx = that._idx; debug_only(reset(that)); } |
duke@435 | 1149 | }; |
duke@435 | 1150 | |
duke@435 | 1151 | DUIterator Node::outs() const |
duke@435 | 1152 | { return DUIterator(this, 0); } |
duke@435 | 1153 | DUIterator& Node::refresh_out_pos(DUIterator& i) const |
duke@435 | 1154 | { I_VDUI_ONLY(i, i.refresh()); return i; } |
duke@435 | 1155 | bool Node::has_out(DUIterator& i) const |
duke@435 | 1156 | { I_VDUI_ONLY(i, i.verify(this,true));return i._idx < _outcnt; } |
duke@435 | 1157 | Node* Node::out(DUIterator& i) const |
duke@435 | 1158 | { I_VDUI_ONLY(i, i.verify(this)); return debug_only(i._last=) _out[i._idx]; } |
duke@435 | 1159 | |
duke@435 | 1160 | |
duke@435 | 1161 | // Faster DU iterator. Disallows insertions into the out array. |
duke@435 | 1162 | // Allows deletion from the out array only at the current point. |
duke@435 | 1163 | // Usage: |
duke@435 | 1164 | // for (DUIterator_Fast imax, i = x->fast_outs(imax); i < imax; i++) { |
duke@435 | 1165 | // Node* y = x->fast_out(i); |
duke@435 | 1166 | // ... |
duke@435 | 1167 | // } |
duke@435 | 1168 | // Compiles in product mode to raw Node** pointer arithmetic, with |
duke@435 | 1169 | // no reloading of pointers from the original node x. If you delete, |
duke@435 | 1170 | // you must perform "--i; --imax" just before continuing the loop. |
duke@435 | 1171 | // If you delete multiple copies of the same edge, you must decrement |
duke@435 | 1172 | // imax, but not i, multiple times: "--i, imax -= num_edges". |
duke@435 | 1173 | class DUIterator_Fast : public DUIterator_Common { |
duke@435 | 1174 | friend class Node; |
duke@435 | 1175 | friend class DUIterator_Last; |
duke@435 | 1176 | |
duke@435 | 1177 | // This is the pointer which provides the product-mode behavior. |
duke@435 | 1178 | // Whatever the product-mode version of the system does to the |
duke@435 | 1179 | // DUI pointer is done to this pointer. All other fields in |
duke@435 | 1180 | // this class are used only for assertion checking. |
duke@435 | 1181 | Node** _outp; |
duke@435 | 1182 | |
duke@435 | 1183 | #ifdef ASSERT |
duke@435 | 1184 | void verify(const Node* node, bool at_end_ok = false); |
duke@435 | 1185 | void verify_limit(); |
duke@435 | 1186 | void verify_resync(); |
duke@435 | 1187 | void verify_relimit(uint n); |
duke@435 | 1188 | void reset(const DUIterator_Fast& that); |
duke@435 | 1189 | #endif |
duke@435 | 1190 | |
duke@435 | 1191 | // Note: offset must be signed, since -1 is sometimes passed |
duke@435 | 1192 | DUIterator_Fast(const Node* node, ptrdiff_t offset) |
duke@435 | 1193 | { _outp = node->_out + offset; debug_only(sample(node)); } |
duke@435 | 1194 | |
duke@435 | 1195 | public: |
duke@435 | 1196 | // initialize to garbage; clear _vdui to disable asserts |
duke@435 | 1197 | DUIterator_Fast() |
duke@435 | 1198 | { /*initialize to garbage*/ debug_only(_vdui = false); } |
duke@435 | 1199 | |
duke@435 | 1200 | void operator++(int dummy_to_specify_postfix_op) |
duke@435 | 1201 | { _outp++; VDUI_ONLY(verify(_node, true)); } |
duke@435 | 1202 | |
duke@435 | 1203 | void operator--() |
duke@435 | 1204 | { VDUI_ONLY(verify_resync()); --_outp; } |
duke@435 | 1205 | |
duke@435 | 1206 | void operator-=(uint n) // applied to the limit only |
duke@435 | 1207 | { _outp -= n; VDUI_ONLY(verify_relimit(n)); } |
duke@435 | 1208 | |
duke@435 | 1209 | bool operator<(DUIterator_Fast& limit) { |
duke@435 | 1210 | I_VDUI_ONLY(*this, this->verify(_node, true)); |
duke@435 | 1211 | I_VDUI_ONLY(limit, limit.verify_limit()); |
duke@435 | 1212 | return _outp < limit._outp; |
duke@435 | 1213 | } |
duke@435 | 1214 | |
duke@435 | 1215 | void operator=(const DUIterator_Fast& that) |
duke@435 | 1216 | { _outp = that._outp; debug_only(reset(that)); } |
duke@435 | 1217 | }; |
duke@435 | 1218 | |
duke@435 | 1219 | DUIterator_Fast Node::fast_outs(DUIterator_Fast& imax) const { |
duke@435 | 1220 | // Assign a limit pointer to the reference argument: |
duke@435 | 1221 | imax = DUIterator_Fast(this, (ptrdiff_t)_outcnt); |
duke@435 | 1222 | // Return the base pointer: |
duke@435 | 1223 | return DUIterator_Fast(this, 0); |
duke@435 | 1224 | } |
duke@435 | 1225 | Node* Node::fast_out(DUIterator_Fast& i) const { |
duke@435 | 1226 | I_VDUI_ONLY(i, i.verify(this)); |
duke@435 | 1227 | return debug_only(i._last=) *i._outp; |
duke@435 | 1228 | } |
duke@435 | 1229 | |
duke@435 | 1230 | |
duke@435 | 1231 | // Faster DU iterator. Requires each successive edge to be removed. |
duke@435 | 1232 | // Does not allow insertion of any edges. |
duke@435 | 1233 | // Usage: |
duke@435 | 1234 | // for (DUIterator_Last imin, i = x->last_outs(imin); i >= imin; i -= num_edges) { |
duke@435 | 1235 | // Node* y = x->last_out(i); |
duke@435 | 1236 | // ... |
duke@435 | 1237 | // } |
duke@435 | 1238 | // Compiles in product mode to raw Node** pointer arithmetic, with |
duke@435 | 1239 | // no reloading of pointers from the original node x. |
duke@435 | 1240 | class DUIterator_Last : private DUIterator_Fast { |
duke@435 | 1241 | friend class Node; |
duke@435 | 1242 | |
duke@435 | 1243 | #ifdef ASSERT |
duke@435 | 1244 | void verify(const Node* node, bool at_end_ok = false); |
duke@435 | 1245 | void verify_limit(); |
duke@435 | 1246 | void verify_step(uint num_edges); |
duke@435 | 1247 | #endif |
duke@435 | 1248 | |
duke@435 | 1249 | // Note: offset must be signed, since -1 is sometimes passed |
duke@435 | 1250 | DUIterator_Last(const Node* node, ptrdiff_t offset) |
duke@435 | 1251 | : DUIterator_Fast(node, offset) { } |
duke@435 | 1252 | |
duke@435 | 1253 | void operator++(int dummy_to_specify_postfix_op) {} // do not use |
duke@435 | 1254 | void operator<(int) {} // do not use |
duke@435 | 1255 | |
duke@435 | 1256 | public: |
duke@435 | 1257 | DUIterator_Last() { } |
duke@435 | 1258 | // initialize to garbage |
duke@435 | 1259 | |
duke@435 | 1260 | void operator--() |
duke@435 | 1261 | { _outp--; VDUI_ONLY(verify_step(1)); } |
duke@435 | 1262 | |
duke@435 | 1263 | void operator-=(uint n) |
duke@435 | 1264 | { _outp -= n; VDUI_ONLY(verify_step(n)); } |
duke@435 | 1265 | |
duke@435 | 1266 | bool operator>=(DUIterator_Last& limit) { |
duke@435 | 1267 | I_VDUI_ONLY(*this, this->verify(_node, true)); |
duke@435 | 1268 | I_VDUI_ONLY(limit, limit.verify_limit()); |
duke@435 | 1269 | return _outp >= limit._outp; |
duke@435 | 1270 | } |
duke@435 | 1271 | |
duke@435 | 1272 | void operator=(const DUIterator_Last& that) |
duke@435 | 1273 | { DUIterator_Fast::operator=(that); } |
duke@435 | 1274 | }; |
duke@435 | 1275 | |
duke@435 | 1276 | DUIterator_Last Node::last_outs(DUIterator_Last& imin) const { |
duke@435 | 1277 | // Assign a limit pointer to the reference argument: |
duke@435 | 1278 | imin = DUIterator_Last(this, 0); |
duke@435 | 1279 | // Return the initial pointer: |
duke@435 | 1280 | return DUIterator_Last(this, (ptrdiff_t)_outcnt - 1); |
duke@435 | 1281 | } |
duke@435 | 1282 | Node* Node::last_out(DUIterator_Last& i) const { |
duke@435 | 1283 | I_VDUI_ONLY(i, i.verify(this)); |
duke@435 | 1284 | return debug_only(i._last=) *i._outp; |
duke@435 | 1285 | } |
duke@435 | 1286 | |
duke@435 | 1287 | #endif //OPTO_DU_ITERATOR_ASSERT |
duke@435 | 1288 | |
duke@435 | 1289 | #undef I_VDUI_ONLY |
duke@435 | 1290 | #undef VDUI_ONLY |
duke@435 | 1291 | |
never@1515 | 1292 | // An Iterator that truly follows the iterator pattern. Doesn't |
never@1515 | 1293 | // support deletion but could be made to. |
never@1515 | 1294 | // |
never@1515 | 1295 | // for (SimpleDUIterator i(n); i.has_next(); i.next()) { |
never@1515 | 1296 | // Node* m = i.get(); |
never@1515 | 1297 | // |
never@1515 | 1298 | class SimpleDUIterator : public StackObj { |
never@1515 | 1299 | private: |
never@1515 | 1300 | Node* node; |
never@1515 | 1301 | DUIterator_Fast i; |
never@1515 | 1302 | DUIterator_Fast imax; |
never@1515 | 1303 | public: |
never@1515 | 1304 | SimpleDUIterator(Node* n): node(n), i(n->fast_outs(imax)) {} |
never@1515 | 1305 | bool has_next() { return i < imax; } |
never@1515 | 1306 | void next() { i++; } |
never@1515 | 1307 | Node* get() { return node->fast_out(i); } |
never@1515 | 1308 | }; |
never@1515 | 1309 | |
duke@435 | 1310 | |
duke@435 | 1311 | //----------------------------------------------------------------------------- |
duke@435 | 1312 | // Map dense integer indices to Nodes. Uses classic doubling-array trick. |
duke@435 | 1313 | // Abstractly provides an infinite array of Node*'s, initialized to NULL. |
duke@435 | 1314 | // Note that the constructor just zeros things, and since I use Arena |
duke@435 | 1315 | // allocation I do not need a destructor to reclaim storage. |
duke@435 | 1316 | class Node_Array : public ResourceObj { |
never@3138 | 1317 | friend class VMStructs; |
duke@435 | 1318 | protected: |
duke@435 | 1319 | Arena *_a; // Arena to allocate in |
duke@435 | 1320 | uint _max; |
duke@435 | 1321 | Node **_nodes; |
duke@435 | 1322 | void grow( uint i ); // Grow array node to fit |
duke@435 | 1323 | public: |
duke@435 | 1324 | Node_Array(Arena *a) : _a(a), _max(OptoNodeListSize) { |
duke@435 | 1325 | _nodes = NEW_ARENA_ARRAY( a, Node *, OptoNodeListSize ); |
duke@435 | 1326 | for( int i = 0; i < OptoNodeListSize; i++ ) { |
duke@435 | 1327 | _nodes[i] = NULL; |
duke@435 | 1328 | } |
duke@435 | 1329 | } |
duke@435 | 1330 | |
duke@435 | 1331 | Node_Array(Node_Array *na) : _a(na->_a), _max(na->_max), _nodes(na->_nodes) {} |
duke@435 | 1332 | Node *operator[] ( uint i ) const // Lookup, or NULL for not mapped |
duke@435 | 1333 | { return (i<_max) ? _nodes[i] : (Node*)NULL; } |
duke@435 | 1334 | Node *at( uint i ) const { assert(i<_max,"oob"); return _nodes[i]; } |
duke@435 | 1335 | Node **adr() { return _nodes; } |
duke@435 | 1336 | // Extend the mapping: index i maps to Node *n. |
duke@435 | 1337 | void map( uint i, Node *n ) { if( i>=_max ) grow(i); _nodes[i] = n; } |
duke@435 | 1338 | void insert( uint i, Node *n ); |
duke@435 | 1339 | void remove( uint i ); // Remove, preserving order |
duke@435 | 1340 | void sort( C_sort_func_t func); |
duke@435 | 1341 | void reset( Arena *new_a ); // Zap mapping to empty; reclaim storage |
duke@435 | 1342 | void clear(); // Set all entries to NULL, keep storage |
duke@435 | 1343 | uint Size() const { return _max; } |
duke@435 | 1344 | void dump() const; |
duke@435 | 1345 | }; |
duke@435 | 1346 | |
duke@435 | 1347 | class Node_List : public Node_Array { |
never@3138 | 1348 | friend class VMStructs; |
duke@435 | 1349 | uint _cnt; |
duke@435 | 1350 | public: |
duke@435 | 1351 | Node_List() : Node_Array(Thread::current()->resource_area()), _cnt(0) {} |
duke@435 | 1352 | Node_List(Arena *a) : Node_Array(a), _cnt(0) {} |
goetz@6478 | 1353 | bool contains(const Node* n) const { |
never@1515 | 1354 | for (uint e = 0; e < size(); e++) { |
never@1515 | 1355 | if (at(e) == n) return true; |
never@1515 | 1356 | } |
never@1515 | 1357 | return false; |
never@1515 | 1358 | } |
duke@435 | 1359 | void insert( uint i, Node *n ) { Node_Array::insert(i,n); _cnt++; } |
duke@435 | 1360 | void remove( uint i ) { Node_Array::remove(i); _cnt--; } |
duke@435 | 1361 | void push( Node *b ) { map(_cnt++,b); } |
duke@435 | 1362 | void yank( Node *n ); // Find and remove |
duke@435 | 1363 | Node *pop() { return _nodes[--_cnt]; } |
duke@435 | 1364 | Node *rpop() { Node *b = _nodes[0]; _nodes[0]=_nodes[--_cnt]; return b;} |
duke@435 | 1365 | void clear() { _cnt = 0; Node_Array::clear(); } // retain storage |
duke@435 | 1366 | uint size() const { return _cnt; } |
duke@435 | 1367 | void dump() const; |
duke@435 | 1368 | }; |
duke@435 | 1369 | |
duke@435 | 1370 | //------------------------------Unique_Node_List------------------------------- |
duke@435 | 1371 | class Unique_Node_List : public Node_List { |
never@3138 | 1372 | friend class VMStructs; |
duke@435 | 1373 | VectorSet _in_worklist; |
duke@435 | 1374 | uint _clock_index; // Index in list where to pop from next |
duke@435 | 1375 | public: |
duke@435 | 1376 | Unique_Node_List() : Node_List(), _in_worklist(Thread::current()->resource_area()), _clock_index(0) {} |
duke@435 | 1377 | Unique_Node_List(Arena *a) : Node_List(a), _in_worklist(a), _clock_index(0) {} |
duke@435 | 1378 | |
duke@435 | 1379 | void remove( Node *n ); |
duke@435 | 1380 | bool member( Node *n ) { return _in_worklist.test(n->_idx) != 0; } |
duke@435 | 1381 | VectorSet &member_set(){ return _in_worklist; } |
duke@435 | 1382 | |
duke@435 | 1383 | void push( Node *b ) { |
duke@435 | 1384 | if( !_in_worklist.test_set(b->_idx) ) |
duke@435 | 1385 | Node_List::push(b); |
duke@435 | 1386 | } |
duke@435 | 1387 | Node *pop() { |
duke@435 | 1388 | if( _clock_index >= size() ) _clock_index = 0; |
duke@435 | 1389 | Node *b = at(_clock_index); |
kvn@835 | 1390 | map( _clock_index, Node_List::pop()); |
kvn@835 | 1391 | if (size() != 0) _clock_index++; // Always start from 0 |
duke@435 | 1392 | _in_worklist >>= b->_idx; |
duke@435 | 1393 | return b; |
duke@435 | 1394 | } |
duke@435 | 1395 | Node *remove( uint i ) { |
duke@435 | 1396 | Node *b = Node_List::at(i); |
duke@435 | 1397 | _in_worklist >>= b->_idx; |
duke@435 | 1398 | map(i,Node_List::pop()); |
duke@435 | 1399 | return b; |
duke@435 | 1400 | } |
duke@435 | 1401 | void yank( Node *n ) { _in_worklist >>= n->_idx; Node_List::yank(n); } |
duke@435 | 1402 | void clear() { |
duke@435 | 1403 | _in_worklist.Clear(); // Discards storage but grows automatically |
duke@435 | 1404 | Node_List::clear(); |
duke@435 | 1405 | _clock_index = 0; |
duke@435 | 1406 | } |
duke@435 | 1407 | |
duke@435 | 1408 | // Used after parsing to remove useless nodes before Iterative GVN |
duke@435 | 1409 | void remove_useless_nodes(VectorSet &useful); |
duke@435 | 1410 | |
duke@435 | 1411 | #ifndef PRODUCT |
duke@435 | 1412 | void print_set() const { _in_worklist.print(); } |
duke@435 | 1413 | #endif |
duke@435 | 1414 | }; |
duke@435 | 1415 | |
duke@435 | 1416 | // Inline definition of Compile::record_for_igvn must be deferred to this point. |
duke@435 | 1417 | inline void Compile::record_for_igvn(Node* n) { |
duke@435 | 1418 | _for_igvn->push(n); |
duke@435 | 1419 | } |
duke@435 | 1420 | |
duke@435 | 1421 | //------------------------------Node_Stack------------------------------------- |
duke@435 | 1422 | class Node_Stack { |
never@3138 | 1423 | friend class VMStructs; |
duke@435 | 1424 | protected: |
duke@435 | 1425 | struct INode { |
duke@435 | 1426 | Node *node; // Processed node |
duke@435 | 1427 | uint indx; // Index of next node's child |
duke@435 | 1428 | }; |
duke@435 | 1429 | INode *_inode_top; // tos, stack grows up |
duke@435 | 1430 | INode *_inode_max; // End of _inodes == _inodes + _max |
duke@435 | 1431 | INode *_inodes; // Array storage for the stack |
duke@435 | 1432 | Arena *_a; // Arena to allocate in |
duke@435 | 1433 | void grow(); |
duke@435 | 1434 | public: |
duke@435 | 1435 | Node_Stack(int size) { |
duke@435 | 1436 | size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; |
duke@435 | 1437 | _a = Thread::current()->resource_area(); |
duke@435 | 1438 | _inodes = NEW_ARENA_ARRAY( _a, INode, max ); |
duke@435 | 1439 | _inode_max = _inodes + max; |
duke@435 | 1440 | _inode_top = _inodes - 1; // stack is empty |
duke@435 | 1441 | } |
duke@435 | 1442 | |
duke@435 | 1443 | Node_Stack(Arena *a, int size) : _a(a) { |
duke@435 | 1444 | size_t max = (size > OptoNodeListSize) ? size : OptoNodeListSize; |
duke@435 | 1445 | _inodes = NEW_ARENA_ARRAY( _a, INode, max ); |
duke@435 | 1446 | _inode_max = _inodes + max; |
duke@435 | 1447 | _inode_top = _inodes - 1; // stack is empty |
duke@435 | 1448 | } |
duke@435 | 1449 | |
duke@435 | 1450 | void pop() { |
duke@435 | 1451 | assert(_inode_top >= _inodes, "node stack underflow"); |
duke@435 | 1452 | --_inode_top; |
duke@435 | 1453 | } |
duke@435 | 1454 | void push(Node *n, uint i) { |
duke@435 | 1455 | ++_inode_top; |
duke@435 | 1456 | if (_inode_top >= _inode_max) grow(); |
duke@435 | 1457 | INode *top = _inode_top; // optimization |
duke@435 | 1458 | top->node = n; |
duke@435 | 1459 | top->indx = i; |
duke@435 | 1460 | } |
duke@435 | 1461 | Node *node() const { |
duke@435 | 1462 | return _inode_top->node; |
duke@435 | 1463 | } |
duke@435 | 1464 | Node* node_at(uint i) const { |
duke@435 | 1465 | assert(_inodes + i <= _inode_top, "in range"); |
duke@435 | 1466 | return _inodes[i].node; |
duke@435 | 1467 | } |
duke@435 | 1468 | uint index() const { |
duke@435 | 1469 | return _inode_top->indx; |
duke@435 | 1470 | } |
kvn@682 | 1471 | uint index_at(uint i) const { |
kvn@682 | 1472 | assert(_inodes + i <= _inode_top, "in range"); |
kvn@682 | 1473 | return _inodes[i].indx; |
kvn@682 | 1474 | } |
duke@435 | 1475 | void set_node(Node *n) { |
duke@435 | 1476 | _inode_top->node = n; |
duke@435 | 1477 | } |
duke@435 | 1478 | void set_index(uint i) { |
duke@435 | 1479 | _inode_top->indx = i; |
duke@435 | 1480 | } |
duke@435 | 1481 | uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size |
kvn@475 | 1482 | uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size |
duke@435 | 1483 | bool is_nonempty() const { return (_inode_top >= _inodes); } |
duke@435 | 1484 | bool is_empty() const { return (_inode_top < _inodes); } |
duke@435 | 1485 | void clear() { _inode_top = _inodes - 1; } // retain storage |
kvn@2985 | 1486 | |
kvn@2985 | 1487 | // Node_Stack is used to map nodes. |
kvn@2985 | 1488 | Node* find(uint idx) const; |
duke@435 | 1489 | }; |
duke@435 | 1490 | |
duke@435 | 1491 | |
duke@435 | 1492 | //-----------------------------Node_Notes-------------------------------------- |
duke@435 | 1493 | // Debugging or profiling annotations loosely and sparsely associated |
duke@435 | 1494 | // with some nodes. See Compile::node_notes_at for the accessor. |
duke@435 | 1495 | class Node_Notes VALUE_OBJ_CLASS_SPEC { |
never@3138 | 1496 | friend class VMStructs; |
duke@435 | 1497 | JVMState* _jvms; |
duke@435 | 1498 | |
duke@435 | 1499 | public: |
duke@435 | 1500 | Node_Notes(JVMState* jvms = NULL) { |
duke@435 | 1501 | _jvms = jvms; |
duke@435 | 1502 | } |
duke@435 | 1503 | |
duke@435 | 1504 | JVMState* jvms() { return _jvms; } |
duke@435 | 1505 | void set_jvms(JVMState* x) { _jvms = x; } |
duke@435 | 1506 | |
duke@435 | 1507 | // True if there is nothing here. |
duke@435 | 1508 | bool is_clear() { |
duke@435 | 1509 | return (_jvms == NULL); |
duke@435 | 1510 | } |
duke@435 | 1511 | |
duke@435 | 1512 | // Make there be nothing here. |
duke@435 | 1513 | void clear() { |
duke@435 | 1514 | _jvms = NULL; |
duke@435 | 1515 | } |
duke@435 | 1516 | |
duke@435 | 1517 | // Make a new, clean node notes. |
duke@435 | 1518 | static Node_Notes* make(Compile* C) { |
duke@435 | 1519 | Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); |
duke@435 | 1520 | nn->clear(); |
duke@435 | 1521 | return nn; |
duke@435 | 1522 | } |
duke@435 | 1523 | |
duke@435 | 1524 | Node_Notes* clone(Compile* C) { |
duke@435 | 1525 | Node_Notes* nn = NEW_ARENA_ARRAY(C->comp_arena(), Node_Notes, 1); |
duke@435 | 1526 | (*nn) = (*this); |
duke@435 | 1527 | return nn; |
duke@435 | 1528 | } |
duke@435 | 1529 | |
duke@435 | 1530 | // Absorb any information from source. |
duke@435 | 1531 | bool update_from(Node_Notes* source) { |
duke@435 | 1532 | bool changed = false; |
duke@435 | 1533 | if (source != NULL) { |
duke@435 | 1534 | if (source->jvms() != NULL) { |
duke@435 | 1535 | set_jvms(source->jvms()); |
duke@435 | 1536 | changed = true; |
duke@435 | 1537 | } |
duke@435 | 1538 | } |
duke@435 | 1539 | return changed; |
duke@435 | 1540 | } |
duke@435 | 1541 | }; |
duke@435 | 1542 | |
duke@435 | 1543 | // Inlined accessors for Compile::node_nodes that require the preceding class: |
duke@435 | 1544 | inline Node_Notes* |
duke@435 | 1545 | Compile::locate_node_notes(GrowableArray<Node_Notes*>* arr, |
duke@435 | 1546 | int idx, bool can_grow) { |
duke@435 | 1547 | assert(idx >= 0, "oob"); |
duke@435 | 1548 | int block_idx = (idx >> _log2_node_notes_block_size); |
duke@435 | 1549 | int grow_by = (block_idx - (arr == NULL? 0: arr->length())); |
duke@435 | 1550 | if (grow_by >= 0) { |
duke@435 | 1551 | if (!can_grow) return NULL; |
duke@435 | 1552 | grow_node_notes(arr, grow_by + 1); |
duke@435 | 1553 | } |
duke@435 | 1554 | // (Every element of arr is a sub-array of length _node_notes_block_size.) |
duke@435 | 1555 | return arr->at(block_idx) + (idx & (_node_notes_block_size-1)); |
duke@435 | 1556 | } |
duke@435 | 1557 | |
duke@435 | 1558 | inline bool |
duke@435 | 1559 | Compile::set_node_notes_at(int idx, Node_Notes* value) { |
duke@435 | 1560 | if (value == NULL || value->is_clear()) |
duke@435 | 1561 | return false; // nothing to write => write nothing |
duke@435 | 1562 | Node_Notes* loc = locate_node_notes(_node_note_array, idx, true); |
duke@435 | 1563 | assert(loc != NULL, ""); |
duke@435 | 1564 | return loc->update_from(value); |
duke@435 | 1565 | } |
duke@435 | 1566 | |
duke@435 | 1567 | |
duke@435 | 1568 | //------------------------------TypeNode--------------------------------------- |
duke@435 | 1569 | // Node with a Type constant. |
duke@435 | 1570 | class TypeNode : public Node { |
duke@435 | 1571 | protected: |
duke@435 | 1572 | virtual uint hash() const; // Check the type |
duke@435 | 1573 | virtual uint cmp( const Node &n ) const; |
duke@435 | 1574 | virtual uint size_of() const; // Size is bigger |
duke@435 | 1575 | const Type* const _type; |
duke@435 | 1576 | public: |
duke@435 | 1577 | void set_type(const Type* t) { |
duke@435 | 1578 | assert(t != NULL, "sanity"); |
duke@435 | 1579 | debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); |
duke@435 | 1580 | *(const Type**)&_type = t; // cast away const-ness |
duke@435 | 1581 | // If this node is in the hash table, make sure it doesn't need a rehash. |
duke@435 | 1582 | assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); |
duke@435 | 1583 | } |
duke@435 | 1584 | const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; |
duke@435 | 1585 | TypeNode( const Type *t, uint required ) : Node(required), _type(t) { |
duke@435 | 1586 | init_class_id(Class_Type); |
duke@435 | 1587 | } |
duke@435 | 1588 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 1589 | virtual const Type *bottom_type() const; |
duke@435 | 1590 | virtual uint ideal_reg() const; |
duke@435 | 1591 | #ifndef PRODUCT |
duke@435 | 1592 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 1593 | #endif |
duke@435 | 1594 | }; |
stefank@2314 | 1595 | |
stefank@2314 | 1596 | #endif // SHARE_VM_OPTO_NODE_HPP |