Tue, 25 Jul 2017 10:10:41 -0400
8178047: Aliasing problem with raw memory accesses
Summary: Require equal bases when unaliasing offsets for raw accesses
Reviewed-by: thartmann, kvn
duke@435 | 1 | /* |
dbuck@8879 | 2 | * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_OPTO_MEMNODE_HPP |
stefank@2314 | 26 | #define SHARE_VM_OPTO_MEMNODE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "opto/multnode.hpp" |
stefank@2314 | 29 | #include "opto/node.hpp" |
stefank@2314 | 30 | #include "opto/opcodes.hpp" |
stefank@2314 | 31 | #include "opto/type.hpp" |
stefank@2314 | 32 | |
duke@435 | 33 | // Portions of code courtesy of Clifford Click |
duke@435 | 34 | |
duke@435 | 35 | class MultiNode; |
duke@435 | 36 | class PhaseCCP; |
duke@435 | 37 | class PhaseTransform; |
duke@435 | 38 | |
duke@435 | 39 | //------------------------------MemNode---------------------------------------- |
duke@435 | 40 | // Load or Store, possibly throwing a NULL pointer exception |
duke@435 | 41 | class MemNode : public Node { |
shshahma@8653 | 42 | private: |
shshahma@8653 | 43 | bool _unaligned_access; // Unaligned access from unsafe |
shshahma@8653 | 44 | bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance |
duke@435 | 45 | protected: |
duke@435 | 46 | #ifdef ASSERT |
duke@435 | 47 | const TypePtr* _adr_type; // What kind of memory is being addressed? |
duke@435 | 48 | #endif |
shshahma@8653 | 49 | virtual uint size_of() const; |
duke@435 | 50 | public: |
duke@435 | 51 | enum { Control, // When is it safe to do this load? |
duke@435 | 52 | Memory, // Chunk of memory is being loaded from |
duke@435 | 53 | Address, // Actually address, derived from base |
duke@435 | 54 | ValueIn, // Value to store |
duke@435 | 55 | OopStore // Preceeding oop store, only in StoreCM |
duke@435 | 56 | }; |
goetz@6479 | 57 | typedef enum { unordered = 0, |
goetz@6479 | 58 | acquire, // Load has to acquire or be succeeded by MemBarAcquire. |
goetz@6479 | 59 | release // Store has to release or be preceded by MemBarRelease. |
goetz@6479 | 60 | } MemOrd; |
duke@435 | 61 | protected: |
duke@435 | 62 | MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at ) |
shshahma@8653 | 63 | : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) { |
duke@435 | 64 | init_class_id(Class_Mem); |
duke@435 | 65 | debug_only(_adr_type=at; adr_type();) |
duke@435 | 66 | } |
duke@435 | 67 | MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 ) |
shshahma@8653 | 68 | : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) { |
duke@435 | 69 | init_class_id(Class_Mem); |
duke@435 | 70 | debug_only(_adr_type=at; adr_type();) |
duke@435 | 71 | } |
duke@435 | 72 | MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4) |
shshahma@8653 | 73 | : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) { |
duke@435 | 74 | init_class_id(Class_Mem); |
duke@435 | 75 | debug_only(_adr_type=at; adr_type();) |
duke@435 | 76 | } |
duke@435 | 77 | |
dbuck@8879 | 78 | static bool check_if_adr_maybe_raw(Node* adr); |
dbuck@8879 | 79 | |
kvn@468 | 80 | public: |
duke@435 | 81 | // Helpers for the optimizer. Documented in memnode.cpp. |
duke@435 | 82 | static bool detect_ptr_independence(Node* p1, AllocateNode* a1, |
duke@435 | 83 | Node* p2, AllocateNode* a2, |
duke@435 | 84 | PhaseTransform* phase); |
duke@435 | 85 | static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast); |
duke@435 | 86 | |
kvn@5110 | 87 | static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase); |
kvn@5110 | 88 | static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase); |
duke@435 | 89 | // This one should probably be a phase-specific function: |
kvn@520 | 90 | static bool all_controls_dominate(Node* dom, Node* sub); |
duke@435 | 91 | |
kvn@598 | 92 | // Find any cast-away of null-ness and keep its control. |
kvn@598 | 93 | static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr ); |
duke@435 | 94 | virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp ); |
duke@435 | 95 | |
duke@435 | 96 | virtual const class TypePtr *adr_type() const; // returns bottom_type of address |
duke@435 | 97 | |
duke@435 | 98 | // Shared code for Ideal methods: |
duke@435 | 99 | Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL. |
duke@435 | 100 | |
duke@435 | 101 | // Helper function for adr_type() implementations. |
duke@435 | 102 | static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL); |
duke@435 | 103 | |
duke@435 | 104 | // Raw access function, to allow copying of adr_type efficiently in |
duke@435 | 105 | // product builds and retain the debug info for debug builds. |
duke@435 | 106 | const TypePtr *raw_adr_type() const { |
duke@435 | 107 | #ifdef ASSERT |
duke@435 | 108 | return _adr_type; |
duke@435 | 109 | #else |
duke@435 | 110 | return 0; |
duke@435 | 111 | #endif |
duke@435 | 112 | } |
duke@435 | 113 | |
duke@435 | 114 | // Map a load or store opcode to its corresponding store opcode. |
duke@435 | 115 | // (Return -1 if unknown.) |
duke@435 | 116 | virtual int store_Opcode() const { return -1; } |
duke@435 | 117 | |
duke@435 | 118 | // What is the type of the value in memory? (T_VOID mean "unspecified".) |
duke@435 | 119 | virtual BasicType memory_type() const = 0; |
kvn@464 | 120 | virtual int memory_size() const { |
kvn@464 | 121 | #ifdef ASSERT |
kvn@464 | 122 | return type2aelembytes(memory_type(), true); |
kvn@464 | 123 | #else |
kvn@464 | 124 | return type2aelembytes(memory_type()); |
kvn@464 | 125 | #endif |
kvn@464 | 126 | } |
duke@435 | 127 | |
duke@435 | 128 | // Search through memory states which precede this node (load or store). |
duke@435 | 129 | // Look for an exact match for the address, with no intervening |
duke@435 | 130 | // aliased stores. |
duke@435 | 131 | Node* find_previous_store(PhaseTransform* phase); |
duke@435 | 132 | |
duke@435 | 133 | // Can this node (load or store) accurately see a stored value in |
duke@435 | 134 | // the given memory state? (The state may or may not be in(Memory).) |
duke@435 | 135 | Node* can_see_stored_value(Node* st, PhaseTransform* phase) const; |
duke@435 | 136 | |
shshahma@8653 | 137 | void set_unaligned_access() { _unaligned_access = true; } |
shshahma@8653 | 138 | bool is_unaligned_access() const { return _unaligned_access; } |
shshahma@8653 | 139 | void set_mismatched_access() { _mismatched_access = true; } |
shshahma@8653 | 140 | bool is_mismatched_access() const { return _mismatched_access; } |
shshahma@8653 | 141 | |
duke@435 | 142 | #ifndef PRODUCT |
duke@435 | 143 | static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st); |
duke@435 | 144 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 145 | #endif |
duke@435 | 146 | }; |
duke@435 | 147 | |
duke@435 | 148 | //------------------------------LoadNode--------------------------------------- |
duke@435 | 149 | // Load value; requires Memory and Address |
duke@435 | 150 | class LoadNode : public MemNode { |
roland@7859 | 151 | public: |
roland@7859 | 152 | // Some loads (from unsafe) should be pinned: they don't depend only |
roland@7859 | 153 | // on the dominating test. The boolean field _depends_only_on_test |
roland@7859 | 154 | // below records whether that node depends only on the dominating |
roland@7859 | 155 | // test. |
roland@7859 | 156 | // Methods used to build LoadNodes pass an argument of type enum |
roland@7859 | 157 | // ControlDependency instead of a boolean because those methods |
roland@7859 | 158 | // typically have multiple boolean parameters with default values: |
roland@7859 | 159 | // passing the wrong boolean to one of these parameters by mistake |
roland@7859 | 160 | // goes easily unnoticed. Using an enum, the compiler can check that |
roland@7859 | 161 | // the type of a value and the type of the parameter match. |
roland@7859 | 162 | enum ControlDependency { |
roland@7859 | 163 | Pinned, |
roland@7859 | 164 | DependsOnlyOnTest |
roland@7859 | 165 | }; |
goetz@6479 | 166 | private: |
roland@7859 | 167 | // LoadNode::hash() doesn't take the _depends_only_on_test field |
roland@7859 | 168 | // into account: If the graph already has a non-pinned LoadNode and |
roland@7859 | 169 | // we add a pinned LoadNode with the same inputs, it's safe for GVN |
roland@7859 | 170 | // to replace the pinned LoadNode with the non-pinned LoadNode, |
roland@7859 | 171 | // otherwise it wouldn't be safe to have a non pinned LoadNode with |
roland@7859 | 172 | // those inputs in the first place. If the graph already has a |
roland@7859 | 173 | // pinned LoadNode and we add a non pinned LoadNode with the same |
roland@7859 | 174 | // inputs, it's safe (but suboptimal) for GVN to replace the |
roland@7859 | 175 | // non-pinned LoadNode by the pinned LoadNode. |
roland@7859 | 176 | bool _depends_only_on_test; |
roland@7859 | 177 | |
goetz@6479 | 178 | // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish |
goetz@6479 | 179 | // loads that can be reordered, and such requiring acquire semantics to |
goetz@6479 | 180 | // adhere to the Java specification. The required behaviour is stored in |
goetz@6479 | 181 | // this field. |
goetz@6479 | 182 | const MemOrd _mo; |
goetz@6479 | 183 | |
duke@435 | 184 | protected: |
goetz@6479 | 185 | virtual uint cmp(const Node &n) const; |
duke@435 | 186 | virtual uint size_of() const; // Size is bigger |
zmajo@7341 | 187 | // Should LoadNode::Ideal() attempt to remove control edges? |
zmajo@7341 | 188 | virtual bool can_remove_control() const; |
duke@435 | 189 | const Type* const _type; // What kind of value is loaded? |
duke@435 | 190 | public: |
duke@435 | 191 | |
roland@7859 | 192 | LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency) |
roland@7859 | 193 | : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) { |
duke@435 | 194 | init_class_id(Class_Load); |
duke@435 | 195 | } |
goetz@6479 | 196 | inline bool is_unordered() const { return !is_acquire(); } |
goetz@6479 | 197 | inline bool is_acquire() const { |
goetz@6479 | 198 | assert(_mo == unordered || _mo == acquire, "unexpected"); |
goetz@6479 | 199 | return _mo == acquire; |
goetz@6479 | 200 | } |
duke@435 | 201 | |
duke@435 | 202 | // Polymorphic factory method: |
goetz@6479 | 203 | static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, |
roland@7859 | 204 | const TypePtr* at, const Type *rt, BasicType bt, |
roland@7859 | 205 | MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest); |
duke@435 | 206 | |
duke@435 | 207 | virtual uint hash() const; // Check the type |
duke@435 | 208 | |
duke@435 | 209 | // Handle algebraic identities here. If we have an identity, return the Node |
duke@435 | 210 | // we are equivalent to. We look for Load of a Store. |
duke@435 | 211 | virtual Node *Identity( PhaseTransform *phase ); |
duke@435 | 212 | |
zmajo@7341 | 213 | // If the load is from Field memory and the pointer is non-null, it might be possible to |
duke@435 | 214 | // zero out the control input. |
zmajo@7341 | 215 | // If the offset is constant and the base is an object allocation, |
zmajo@7341 | 216 | // try to hook me up to the exact initializing store. |
duke@435 | 217 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 218 | |
kvn@598 | 219 | // Split instance field load through Phi. |
kvn@598 | 220 | Node* split_through_phi(PhaseGVN *phase); |
kvn@598 | 221 | |
never@452 | 222 | // Recover original value from boxed values |
never@452 | 223 | Node *eliminate_autobox(PhaseGVN *phase); |
never@452 | 224 | |
duke@435 | 225 | // Compute a new Type for this node. Basically we just do the pre-check, |
duke@435 | 226 | // then call the virtual add() to set the type. |
duke@435 | 227 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 228 | |
kvn@599 | 229 | // Common methods for LoadKlass and LoadNKlass nodes. |
kvn@599 | 230 | const Type *klass_value_common( PhaseTransform *phase ) const; |
kvn@599 | 231 | Node *klass_identity_common( PhaseTransform *phase ); |
kvn@599 | 232 | |
duke@435 | 233 | virtual uint ideal_reg() const; |
duke@435 | 234 | virtual const Type *bottom_type() const; |
duke@435 | 235 | // Following method is copied from TypeNode: |
duke@435 | 236 | void set_type(const Type* t) { |
duke@435 | 237 | assert(t != NULL, "sanity"); |
duke@435 | 238 | debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH); |
duke@435 | 239 | *(const Type**)&_type = t; // cast away const-ness |
duke@435 | 240 | // If this node is in the hash table, make sure it doesn't need a rehash. |
duke@435 | 241 | assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code"); |
duke@435 | 242 | } |
duke@435 | 243 | const Type* type() const { assert(_type != NULL, "sanity"); return _type; }; |
duke@435 | 244 | |
duke@435 | 245 | // Do not match memory edge |
duke@435 | 246 | virtual uint match_edge(uint idx) const; |
duke@435 | 247 | |
duke@435 | 248 | // Map a load opcode to its corresponding store opcode. |
duke@435 | 249 | virtual int store_Opcode() const = 0; |
duke@435 | 250 | |
kvn@499 | 251 | // Check if the load's memory input is a Phi node with the same control. |
kvn@499 | 252 | bool is_instance_field_load_with_local_phi(Node* ctrl); |
kvn@499 | 253 | |
duke@435 | 254 | #ifndef PRODUCT |
duke@435 | 255 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 256 | #endif |
kvn@1964 | 257 | #ifdef ASSERT |
kvn@1964 | 258 | // Helper function to allow a raw load without control edge for some cases |
kvn@1964 | 259 | static bool is_immutable_value(Node* adr); |
kvn@1964 | 260 | #endif |
duke@435 | 261 | protected: |
duke@435 | 262 | const Type* load_array_final_field(const TypeKlassPtr *tkls, |
duke@435 | 263 | ciKlass* klass) const; |
iveresov@6070 | 264 | // depends_only_on_test is almost always true, and needs to be almost always |
iveresov@6070 | 265 | // true to enable key hoisting & commoning optimizations. However, for the |
iveresov@6070 | 266 | // special case of RawPtr loads from TLS top & end, and other loads performed by |
iveresov@6070 | 267 | // GC barriers, the control edge carries the dependence preventing hoisting past |
iveresov@6070 | 268 | // a Safepoint instead of the memory edge. (An unfortunate consequence of having |
iveresov@6070 | 269 | // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes |
iveresov@6070 | 270 | // which produce results (new raw memory state) inside of loops preventing all |
iveresov@6070 | 271 | // manner of other optimizations). Basically, it's ugly but so is the alternative. |
iveresov@6070 | 272 | // See comment in macro.cpp, around line 125 expand_allocate_common(). |
roland@7859 | 273 | virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; } |
duke@435 | 274 | }; |
duke@435 | 275 | |
duke@435 | 276 | //------------------------------LoadBNode-------------------------------------- |
duke@435 | 277 | // Load a byte (8bits signed) from memory |
duke@435 | 278 | class LoadBNode : public LoadNode { |
duke@435 | 279 | public: |
roland@7859 | 280 | LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 281 | : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} |
duke@435 | 282 | virtual int Opcode() const; |
duke@435 | 283 | virtual uint ideal_reg() const { return Op_RegI; } |
duke@435 | 284 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
kvn@3442 | 285 | virtual const Type *Value(PhaseTransform *phase) const; |
duke@435 | 286 | virtual int store_Opcode() const { return Op_StoreB; } |
duke@435 | 287 | virtual BasicType memory_type() const { return T_BYTE; } |
duke@435 | 288 | }; |
duke@435 | 289 | |
twisti@1059 | 290 | //------------------------------LoadUBNode------------------------------------- |
twisti@1059 | 291 | // Load a unsigned byte (8bits unsigned) from memory |
twisti@1059 | 292 | class LoadUBNode : public LoadNode { |
twisti@1059 | 293 | public: |
roland@7859 | 294 | LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 295 | : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} |
twisti@1059 | 296 | virtual int Opcode() const; |
twisti@1059 | 297 | virtual uint ideal_reg() const { return Op_RegI; } |
twisti@1059 | 298 | virtual Node* Ideal(PhaseGVN *phase, bool can_reshape); |
kvn@3442 | 299 | virtual const Type *Value(PhaseTransform *phase) const; |
twisti@1059 | 300 | virtual int store_Opcode() const { return Op_StoreB; } |
twisti@1059 | 301 | virtual BasicType memory_type() const { return T_BYTE; } |
twisti@1059 | 302 | }; |
twisti@1059 | 303 | |
twisti@993 | 304 | //------------------------------LoadUSNode------------------------------------- |
twisti@993 | 305 | // Load an unsigned short/char (16bits unsigned) from memory |
twisti@993 | 306 | class LoadUSNode : public LoadNode { |
duke@435 | 307 | public: |
roland@7859 | 308 | LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 309 | : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} |
duke@435 | 310 | virtual int Opcode() const; |
duke@435 | 311 | virtual uint ideal_reg() const { return Op_RegI; } |
duke@435 | 312 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
kvn@3442 | 313 | virtual const Type *Value(PhaseTransform *phase) const; |
duke@435 | 314 | virtual int store_Opcode() const { return Op_StoreC; } |
duke@435 | 315 | virtual BasicType memory_type() const { return T_CHAR; } |
duke@435 | 316 | }; |
duke@435 | 317 | |
kvn@3442 | 318 | //------------------------------LoadSNode-------------------------------------- |
kvn@3442 | 319 | // Load a short (16bits signed) from memory |
kvn@3442 | 320 | class LoadSNode : public LoadNode { |
kvn@3442 | 321 | public: |
roland@7859 | 322 | LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 323 | : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} |
kvn@3442 | 324 | virtual int Opcode() const; |
kvn@3442 | 325 | virtual uint ideal_reg() const { return Op_RegI; } |
kvn@3442 | 326 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
kvn@3442 | 327 | virtual const Type *Value(PhaseTransform *phase) const; |
kvn@3442 | 328 | virtual int store_Opcode() const { return Op_StoreC; } |
kvn@3442 | 329 | virtual BasicType memory_type() const { return T_SHORT; } |
kvn@3442 | 330 | }; |
kvn@3442 | 331 | |
duke@435 | 332 | //------------------------------LoadINode-------------------------------------- |
duke@435 | 333 | // Load an integer from memory |
duke@435 | 334 | class LoadINode : public LoadNode { |
duke@435 | 335 | public: |
roland@7859 | 336 | LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 337 | : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {} |
duke@435 | 338 | virtual int Opcode() const; |
duke@435 | 339 | virtual uint ideal_reg() const { return Op_RegI; } |
duke@435 | 340 | virtual int store_Opcode() const { return Op_StoreI; } |
duke@435 | 341 | virtual BasicType memory_type() const { return T_INT; } |
duke@435 | 342 | }; |
duke@435 | 343 | |
duke@435 | 344 | //------------------------------LoadRangeNode---------------------------------- |
duke@435 | 345 | // Load an array length from the array |
duke@435 | 346 | class LoadRangeNode : public LoadINode { |
duke@435 | 347 | public: |
goetz@6479 | 348 | LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS) |
goetz@6479 | 349 | : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {} |
duke@435 | 350 | virtual int Opcode() const; |
duke@435 | 351 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 352 | virtual Node *Identity( PhaseTransform *phase ); |
rasbold@801 | 353 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 354 | }; |
duke@435 | 355 | |
duke@435 | 356 | //------------------------------LoadLNode-------------------------------------- |
duke@435 | 357 | // Load a long from memory |
duke@435 | 358 | class LoadLNode : public LoadNode { |
duke@435 | 359 | virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } |
duke@435 | 360 | virtual uint cmp( const Node &n ) const { |
duke@435 | 361 | return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access |
duke@435 | 362 | && LoadNode::cmp(n); |
duke@435 | 363 | } |
duke@435 | 364 | virtual uint size_of() const { return sizeof(*this); } |
duke@435 | 365 | const bool _require_atomic_access; // is piecewise load forbidden? |
duke@435 | 366 | |
duke@435 | 367 | public: |
goetz@6479 | 368 | LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl, |
roland@7859 | 369 | MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) |
roland@7859 | 370 | : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {} |
duke@435 | 371 | virtual int Opcode() const; |
duke@435 | 372 | virtual uint ideal_reg() const { return Op_RegL; } |
duke@435 | 373 | virtual int store_Opcode() const { return Op_StoreL; } |
duke@435 | 374 | virtual BasicType memory_type() const { return T_LONG; } |
anoll@7858 | 375 | bool require_atomic_access() const { return _require_atomic_access; } |
goetz@6479 | 376 | static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, |
roland@7859 | 377 | const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest); |
duke@435 | 378 | #ifndef PRODUCT |
duke@435 | 379 | virtual void dump_spec(outputStream *st) const { |
duke@435 | 380 | LoadNode::dump_spec(st); |
duke@435 | 381 | if (_require_atomic_access) st->print(" Atomic!"); |
duke@435 | 382 | } |
duke@435 | 383 | #endif |
duke@435 | 384 | }; |
duke@435 | 385 | |
duke@435 | 386 | //------------------------------LoadL_unalignedNode---------------------------- |
duke@435 | 387 | // Load a long from unaligned memory |
duke@435 | 388 | class LoadL_unalignedNode : public LoadLNode { |
duke@435 | 389 | public: |
roland@7859 | 390 | LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 391 | : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {} |
duke@435 | 392 | virtual int Opcode() const; |
duke@435 | 393 | }; |
duke@435 | 394 | |
duke@435 | 395 | //------------------------------LoadFNode-------------------------------------- |
duke@435 | 396 | // Load a float (64 bits) from memory |
duke@435 | 397 | class LoadFNode : public LoadNode { |
duke@435 | 398 | public: |
roland@7859 | 399 | LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 400 | : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} |
duke@435 | 401 | virtual int Opcode() const; |
duke@435 | 402 | virtual uint ideal_reg() const { return Op_RegF; } |
duke@435 | 403 | virtual int store_Opcode() const { return Op_StoreF; } |
duke@435 | 404 | virtual BasicType memory_type() const { return T_FLOAT; } |
duke@435 | 405 | }; |
duke@435 | 406 | |
duke@435 | 407 | //------------------------------LoadDNode-------------------------------------- |
duke@435 | 408 | // Load a double (64 bits) from memory |
duke@435 | 409 | class LoadDNode : public LoadNode { |
anoll@7858 | 410 | virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; } |
anoll@7858 | 411 | virtual uint cmp( const Node &n ) const { |
anoll@7858 | 412 | return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access |
anoll@7858 | 413 | && LoadNode::cmp(n); |
anoll@7858 | 414 | } |
anoll@7858 | 415 | virtual uint size_of() const { return sizeof(*this); } |
anoll@7858 | 416 | const bool _require_atomic_access; // is piecewise load forbidden? |
anoll@7858 | 417 | |
duke@435 | 418 | public: |
anoll@7858 | 419 | LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, |
roland@7859 | 420 | MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false) |
roland@7859 | 421 | : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {} |
duke@435 | 422 | virtual int Opcode() const; |
duke@435 | 423 | virtual uint ideal_reg() const { return Op_RegD; } |
duke@435 | 424 | virtual int store_Opcode() const { return Op_StoreD; } |
duke@435 | 425 | virtual BasicType memory_type() const { return T_DOUBLE; } |
anoll@7858 | 426 | bool require_atomic_access() const { return _require_atomic_access; } |
anoll@7858 | 427 | static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, |
roland@7859 | 428 | const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest); |
anoll@7858 | 429 | #ifndef PRODUCT |
anoll@7858 | 430 | virtual void dump_spec(outputStream *st) const { |
anoll@7858 | 431 | LoadNode::dump_spec(st); |
anoll@7858 | 432 | if (_require_atomic_access) st->print(" Atomic!"); |
anoll@7858 | 433 | } |
anoll@7858 | 434 | #endif |
duke@435 | 435 | }; |
duke@435 | 436 | |
duke@435 | 437 | //------------------------------LoadD_unalignedNode---------------------------- |
duke@435 | 438 | // Load a double from unaligned memory |
duke@435 | 439 | class LoadD_unalignedNode : public LoadDNode { |
duke@435 | 440 | public: |
roland@7859 | 441 | LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 442 | : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {} |
duke@435 | 443 | virtual int Opcode() const; |
duke@435 | 444 | }; |
duke@435 | 445 | |
duke@435 | 446 | //------------------------------LoadPNode-------------------------------------- |
duke@435 | 447 | // Load a pointer from memory (either object or array) |
duke@435 | 448 | class LoadPNode : public LoadNode { |
duke@435 | 449 | public: |
roland@7859 | 450 | LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 451 | : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} |
duke@435 | 452 | virtual int Opcode() const; |
duke@435 | 453 | virtual uint ideal_reg() const { return Op_RegP; } |
duke@435 | 454 | virtual int store_Opcode() const { return Op_StoreP; } |
duke@435 | 455 | virtual BasicType memory_type() const { return T_ADDRESS; } |
duke@435 | 456 | }; |
duke@435 | 457 | |
coleenp@548 | 458 | |
coleenp@548 | 459 | //------------------------------LoadNNode-------------------------------------- |
coleenp@548 | 460 | // Load a narrow oop from memory (either object or array) |
coleenp@548 | 461 | class LoadNNode : public LoadNode { |
coleenp@548 | 462 | public: |
roland@7859 | 463 | LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest) |
roland@7859 | 464 | : LoadNode(c, mem, adr, at, t, mo, control_dependency) {} |
coleenp@548 | 465 | virtual int Opcode() const; |
coleenp@548 | 466 | virtual uint ideal_reg() const { return Op_RegN; } |
coleenp@548 | 467 | virtual int store_Opcode() const { return Op_StoreN; } |
coleenp@548 | 468 | virtual BasicType memory_type() const { return T_NARROWOOP; } |
coleenp@548 | 469 | }; |
coleenp@548 | 470 | |
duke@435 | 471 | //------------------------------LoadKlassNode---------------------------------- |
duke@435 | 472 | // Load a Klass from an object |
duke@435 | 473 | class LoadKlassNode : public LoadPNode { |
zmajo@7341 | 474 | protected: |
zmajo@7341 | 475 | // In most cases, LoadKlassNode does not have the control input set. If the control |
zmajo@7341 | 476 | // input is set, it must not be removed (by LoadNode::Ideal()). |
zmajo@7341 | 477 | virtual bool can_remove_control() const; |
duke@435 | 478 | public: |
goetz@6479 | 479 | LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo) |
goetz@6479 | 480 | : LoadPNode(c, mem, adr, at, tk, mo) {} |
duke@435 | 481 | virtual int Opcode() const; |
duke@435 | 482 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 483 | virtual Node *Identity( PhaseTransform *phase ); |
duke@435 | 484 | virtual bool depends_only_on_test() const { return true; } |
kvn@599 | 485 | |
kvn@599 | 486 | // Polymorphic factory method: |
zmajo@7341 | 487 | static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at, |
zmajo@7341 | 488 | const TypeKlassPtr* tk = TypeKlassPtr::OBJECT); |
duke@435 | 489 | }; |
duke@435 | 490 | |
kvn@599 | 491 | //------------------------------LoadNKlassNode--------------------------------- |
kvn@599 | 492 | // Load a narrow Klass from an object. |
kvn@599 | 493 | class LoadNKlassNode : public LoadNNode { |
kvn@599 | 494 | public: |
goetz@6479 | 495 | LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo) |
goetz@6479 | 496 | : LoadNNode(c, mem, adr, at, tk, mo) {} |
kvn@599 | 497 | virtual int Opcode() const; |
kvn@599 | 498 | virtual uint ideal_reg() const { return Op_RegN; } |
roland@4159 | 499 | virtual int store_Opcode() const { return Op_StoreNKlass; } |
roland@4159 | 500 | virtual BasicType memory_type() const { return T_NARROWKLASS; } |
kvn@599 | 501 | |
kvn@599 | 502 | virtual const Type *Value( PhaseTransform *phase ) const; |
kvn@599 | 503 | virtual Node *Identity( PhaseTransform *phase ); |
kvn@599 | 504 | virtual bool depends_only_on_test() const { return true; } |
kvn@599 | 505 | }; |
kvn@599 | 506 | |
kvn@599 | 507 | |
duke@435 | 508 | //------------------------------StoreNode-------------------------------------- |
duke@435 | 509 | // Store value; requires Store, Address and Value |
duke@435 | 510 | class StoreNode : public MemNode { |
goetz@6479 | 511 | private: |
goetz@6479 | 512 | // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish |
goetz@6479 | 513 | // stores that can be reordered, and such requiring release semantics to |
goetz@6479 | 514 | // adhere to the Java specification. The required behaviour is stored in |
goetz@6479 | 515 | // this field. |
goetz@6479 | 516 | const MemOrd _mo; |
goetz@6479 | 517 | // Needed for proper cloning. |
goetz@6479 | 518 | virtual uint size_of() const { return sizeof(*this); } |
duke@435 | 519 | protected: |
duke@435 | 520 | virtual uint cmp( const Node &n ) const; |
duke@435 | 521 | virtual bool depends_only_on_test() const { return false; } |
duke@435 | 522 | |
duke@435 | 523 | Node *Ideal_masked_input (PhaseGVN *phase, uint mask); |
duke@435 | 524 | Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits); |
duke@435 | 525 | |
duke@435 | 526 | public: |
goetz@6479 | 527 | // We must ensure that stores of object references will be visible |
goetz@6479 | 528 | // only after the object's initialization. So the callers of this |
goetz@6479 | 529 | // procedure must indicate that the store requires `release' |
goetz@6479 | 530 | // semantics, if the stored value is an object reference that might |
goetz@6479 | 531 | // point to a new object and may become externally visible. |
goetz@6479 | 532 | StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) |
goetz@6479 | 533 | : MemNode(c, mem, adr, at, val), _mo(mo) { |
duke@435 | 534 | init_class_id(Class_Store); |
duke@435 | 535 | } |
goetz@6479 | 536 | StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo) |
goetz@6479 | 537 | : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) { |
duke@435 | 538 | init_class_id(Class_Store); |
duke@435 | 539 | } |
duke@435 | 540 | |
goetz@6479 | 541 | inline bool is_unordered() const { return !is_release(); } |
goetz@6479 | 542 | inline bool is_release() const { |
goetz@6479 | 543 | assert((_mo == unordered || _mo == release), "unexpected"); |
goetz@6479 | 544 | return _mo == release; |
goetz@6479 | 545 | } |
goetz@6479 | 546 | |
goetz@6479 | 547 | // Conservatively release stores of object references in order to |
goetz@6479 | 548 | // ensure visibility of object initialization. |
goetz@6479 | 549 | static inline MemOrd release_if_reference(const BasicType t) { |
goetz@6479 | 550 | const MemOrd mo = (t == T_ARRAY || |
goetz@6479 | 551 | t == T_ADDRESS || // Might be the address of an object reference (`boxing'). |
goetz@6479 | 552 | t == T_OBJECT) ? release : unordered; |
goetz@6479 | 553 | return mo; |
goetz@6479 | 554 | } |
goetz@6479 | 555 | |
goetz@6479 | 556 | // Polymorphic factory method |
goetz@6479 | 557 | // |
goetz@6479 | 558 | // We must ensure that stores of object references will be visible |
goetz@6479 | 559 | // only after the object's initialization. So the callers of this |
goetz@6479 | 560 | // procedure must indicate that the store requires `release' |
goetz@6479 | 561 | // semantics, if the stored value is an object reference that might |
goetz@6479 | 562 | // point to a new object and may become externally visible. |
goetz@6479 | 563 | static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr, |
goetz@6479 | 564 | const TypePtr* at, Node *val, BasicType bt, MemOrd mo); |
duke@435 | 565 | |
duke@435 | 566 | virtual uint hash() const; // Check the type |
duke@435 | 567 | |
duke@435 | 568 | // If the store is to Field memory and the pointer is non-null, we can |
duke@435 | 569 | // zero out the control input. |
duke@435 | 570 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 571 | |
duke@435 | 572 | // Compute a new Type for this node. Basically we just do the pre-check, |
duke@435 | 573 | // then call the virtual add() to set the type. |
duke@435 | 574 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 575 | |
duke@435 | 576 | // Check for identity function on memory (Load then Store at same address) |
duke@435 | 577 | virtual Node *Identity( PhaseTransform *phase ); |
duke@435 | 578 | |
duke@435 | 579 | // Do not match memory edge |
duke@435 | 580 | virtual uint match_edge(uint idx) const; |
duke@435 | 581 | |
duke@435 | 582 | virtual const Type *bottom_type() const; // returns Type::MEMORY |
duke@435 | 583 | |
duke@435 | 584 | // Map a store opcode to its corresponding own opcode, trivially. |
duke@435 | 585 | virtual int store_Opcode() const { return Opcode(); } |
duke@435 | 586 | |
duke@435 | 587 | // have all possible loads of the value stored been optimized away? |
duke@435 | 588 | bool value_never_loaded(PhaseTransform *phase) const; |
duke@435 | 589 | }; |
duke@435 | 590 | |
duke@435 | 591 | //------------------------------StoreBNode------------------------------------- |
duke@435 | 592 | // Store byte to memory |
duke@435 | 593 | class StoreBNode : public StoreNode { |
duke@435 | 594 | public: |
goetz@6479 | 595 | StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) |
goetz@6479 | 596 | : StoreNode(c, mem, adr, at, val, mo) {} |
duke@435 | 597 | virtual int Opcode() const; |
duke@435 | 598 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 599 | virtual BasicType memory_type() const { return T_BYTE; } |
duke@435 | 600 | }; |
duke@435 | 601 | |
duke@435 | 602 | //------------------------------StoreCNode------------------------------------- |
duke@435 | 603 | // Store char/short to memory |
duke@435 | 604 | class StoreCNode : public StoreNode { |
duke@435 | 605 | public: |
goetz@6479 | 606 | StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) |
goetz@6479 | 607 | : StoreNode(c, mem, adr, at, val, mo) {} |
duke@435 | 608 | virtual int Opcode() const; |
duke@435 | 609 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 610 | virtual BasicType memory_type() const { return T_CHAR; } |
duke@435 | 611 | }; |
duke@435 | 612 | |
duke@435 | 613 | //------------------------------StoreINode------------------------------------- |
duke@435 | 614 | // Store int to memory |
duke@435 | 615 | class StoreINode : public StoreNode { |
duke@435 | 616 | public: |
goetz@6479 | 617 | StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) |
goetz@6479 | 618 | : StoreNode(c, mem, adr, at, val, mo) {} |
duke@435 | 619 | virtual int Opcode() const; |
duke@435 | 620 | virtual BasicType memory_type() const { return T_INT; } |
duke@435 | 621 | }; |
duke@435 | 622 | |
duke@435 | 623 | //------------------------------StoreLNode------------------------------------- |
duke@435 | 624 | // Store long to memory |
duke@435 | 625 | class StoreLNode : public StoreNode { |
duke@435 | 626 | virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } |
duke@435 | 627 | virtual uint cmp( const Node &n ) const { |
duke@435 | 628 | return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access |
duke@435 | 629 | && StoreNode::cmp(n); |
duke@435 | 630 | } |
duke@435 | 631 | virtual uint size_of() const { return sizeof(*this); } |
duke@435 | 632 | const bool _require_atomic_access; // is piecewise store forbidden? |
duke@435 | 633 | |
duke@435 | 634 | public: |
goetz@6479 | 635 | StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false) |
goetz@6479 | 636 | : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} |
duke@435 | 637 | virtual int Opcode() const; |
duke@435 | 638 | virtual BasicType memory_type() const { return T_LONG; } |
anoll@7858 | 639 | bool require_atomic_access() const { return _require_atomic_access; } |
goetz@6479 | 640 | static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); |
duke@435 | 641 | #ifndef PRODUCT |
duke@435 | 642 | virtual void dump_spec(outputStream *st) const { |
duke@435 | 643 | StoreNode::dump_spec(st); |
duke@435 | 644 | if (_require_atomic_access) st->print(" Atomic!"); |
duke@435 | 645 | } |
duke@435 | 646 | #endif |
duke@435 | 647 | }; |
duke@435 | 648 | |
duke@435 | 649 | //------------------------------StoreFNode------------------------------------- |
duke@435 | 650 | // Store float to memory |
duke@435 | 651 | class StoreFNode : public StoreNode { |
duke@435 | 652 | public: |
goetz@6479 | 653 | StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) |
goetz@6479 | 654 | : StoreNode(c, mem, adr, at, val, mo) {} |
duke@435 | 655 | virtual int Opcode() const; |
duke@435 | 656 | virtual BasicType memory_type() const { return T_FLOAT; } |
duke@435 | 657 | }; |
duke@435 | 658 | |
duke@435 | 659 | //------------------------------StoreDNode------------------------------------- |
duke@435 | 660 | // Store double to memory |
duke@435 | 661 | class StoreDNode : public StoreNode { |
anoll@7858 | 662 | virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; } |
anoll@7858 | 663 | virtual uint cmp( const Node &n ) const { |
anoll@7858 | 664 | return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access |
anoll@7858 | 665 | && StoreNode::cmp(n); |
anoll@7858 | 666 | } |
anoll@7858 | 667 | virtual uint size_of() const { return sizeof(*this); } |
anoll@7858 | 668 | const bool _require_atomic_access; // is piecewise store forbidden? |
duke@435 | 669 | public: |
anoll@7858 | 670 | StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, |
anoll@7858 | 671 | MemOrd mo, bool require_atomic_access = false) |
anoll@7858 | 672 | : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {} |
duke@435 | 673 | virtual int Opcode() const; |
duke@435 | 674 | virtual BasicType memory_type() const { return T_DOUBLE; } |
anoll@7858 | 675 | bool require_atomic_access() const { return _require_atomic_access; } |
anoll@7858 | 676 | static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo); |
anoll@7858 | 677 | #ifndef PRODUCT |
anoll@7858 | 678 | virtual void dump_spec(outputStream *st) const { |
anoll@7858 | 679 | StoreNode::dump_spec(st); |
anoll@7858 | 680 | if (_require_atomic_access) st->print(" Atomic!"); |
anoll@7858 | 681 | } |
anoll@7858 | 682 | #endif |
anoll@7858 | 683 | |
duke@435 | 684 | }; |
duke@435 | 685 | |
duke@435 | 686 | //------------------------------StorePNode------------------------------------- |
duke@435 | 687 | // Store pointer to memory |
duke@435 | 688 | class StorePNode : public StoreNode { |
duke@435 | 689 | public: |
goetz@6479 | 690 | StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) |
goetz@6479 | 691 | : StoreNode(c, mem, adr, at, val, mo) {} |
duke@435 | 692 | virtual int Opcode() const; |
duke@435 | 693 | virtual BasicType memory_type() const { return T_ADDRESS; } |
duke@435 | 694 | }; |
duke@435 | 695 | |
coleenp@548 | 696 | //------------------------------StoreNNode------------------------------------- |
coleenp@548 | 697 | // Store narrow oop to memory |
coleenp@548 | 698 | class StoreNNode : public StoreNode { |
coleenp@548 | 699 | public: |
goetz@6479 | 700 | StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) |
goetz@6479 | 701 | : StoreNode(c, mem, adr, at, val, mo) {} |
coleenp@548 | 702 | virtual int Opcode() const; |
coleenp@548 | 703 | virtual BasicType memory_type() const { return T_NARROWOOP; } |
coleenp@548 | 704 | }; |
coleenp@548 | 705 | |
roland@4159 | 706 | //------------------------------StoreNKlassNode-------------------------------------- |
roland@4159 | 707 | // Store narrow klass to memory |
roland@4159 | 708 | class StoreNKlassNode : public StoreNNode { |
roland@4159 | 709 | public: |
goetz@6479 | 710 | StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo) |
goetz@6479 | 711 | : StoreNNode(c, mem, adr, at, val, mo) {} |
roland@4159 | 712 | virtual int Opcode() const; |
roland@4159 | 713 | virtual BasicType memory_type() const { return T_NARROWKLASS; } |
roland@4159 | 714 | }; |
roland@4159 | 715 | |
duke@435 | 716 | //------------------------------StoreCMNode----------------------------------- |
duke@435 | 717 | // Store card-mark byte to memory for CM |
duke@435 | 718 | // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store |
duke@435 | 719 | // Preceeding equivalent StoreCMs may be eliminated. |
duke@435 | 720 | class StoreCMNode : public StoreNode { |
cfang@1420 | 721 | private: |
never@1633 | 722 | virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; } |
never@1633 | 723 | virtual uint cmp( const Node &n ) const { |
never@1633 | 724 | return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx |
never@1633 | 725 | && StoreNode::cmp(n); |
never@1633 | 726 | } |
never@1633 | 727 | virtual uint size_of() const { return sizeof(*this); } |
cfang@1420 | 728 | int _oop_alias_idx; // The alias_idx of OopStore |
never@1633 | 729 | |
duke@435 | 730 | public: |
never@1633 | 731 | StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) : |
goetz@6479 | 732 | StoreNode(c, mem, adr, at, val, oop_store, MemNode::release), |
never@1633 | 733 | _oop_alias_idx(oop_alias_idx) { |
never@1633 | 734 | assert(_oop_alias_idx >= Compile::AliasIdxRaw || |
never@1633 | 735 | _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0, |
never@1633 | 736 | "bad oop alias idx"); |
never@1633 | 737 | } |
duke@435 | 738 | virtual int Opcode() const; |
duke@435 | 739 | virtual Node *Identity( PhaseTransform *phase ); |
cfang@1420 | 740 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 741 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 742 | virtual BasicType memory_type() const { return T_VOID; } // unspecific |
cfang@1420 | 743 | int oop_alias_idx() const { return _oop_alias_idx; } |
duke@435 | 744 | }; |
duke@435 | 745 | |
duke@435 | 746 | //------------------------------LoadPLockedNode--------------------------------- |
duke@435 | 747 | // Load-locked a pointer from memory (either object or array). |
duke@435 | 748 | // On Sparc & Intel this is implemented as a normal pointer load. |
duke@435 | 749 | // On PowerPC and friends it's a real load-locked. |
duke@435 | 750 | class LoadPLockedNode : public LoadPNode { |
duke@435 | 751 | public: |
goetz@6479 | 752 | LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo) |
goetz@6479 | 753 | : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {} |
duke@435 | 754 | virtual int Opcode() const; |
duke@435 | 755 | virtual int store_Opcode() const { return Op_StorePConditional; } |
duke@435 | 756 | virtual bool depends_only_on_test() const { return true; } |
duke@435 | 757 | }; |
duke@435 | 758 | |
duke@435 | 759 | //------------------------------SCMemProjNode--------------------------------------- |
duke@435 | 760 | // This class defines a projection of the memory state of a store conditional node. |
duke@435 | 761 | // These nodes return a value, but also update memory. |
duke@435 | 762 | class SCMemProjNode : public ProjNode { |
duke@435 | 763 | public: |
duke@435 | 764 | enum {SCMEMPROJCON = (uint)-2}; |
duke@435 | 765 | SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { } |
duke@435 | 766 | virtual int Opcode() const; |
duke@435 | 767 | virtual bool is_CFG() const { return false; } |
duke@435 | 768 | virtual const Type *bottom_type() const {return Type::MEMORY;} |
duke@435 | 769 | virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();} |
duke@435 | 770 | virtual uint ideal_reg() const { return 0;} // memory projections don't have a register |
duke@435 | 771 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 772 | #ifndef PRODUCT |
duke@435 | 773 | virtual void dump_spec(outputStream *st) const {}; |
duke@435 | 774 | #endif |
duke@435 | 775 | }; |
duke@435 | 776 | |
duke@435 | 777 | //------------------------------LoadStoreNode--------------------------- |
kvn@688 | 778 | // Note: is_Mem() method returns 'true' for this class. |
duke@435 | 779 | class LoadStoreNode : public Node { |
roland@4106 | 780 | private: |
roland@4106 | 781 | const Type* const _type; // What kind of value is loaded? |
roland@4106 | 782 | const TypePtr* _adr_type; // What kind of memory is being addressed? |
roland@4106 | 783 | virtual uint size_of() const; // Size is bigger |
roland@4106 | 784 | public: |
roland@4106 | 785 | LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required ); |
roland@4106 | 786 | virtual bool depends_only_on_test() const { return false; } |
roland@4106 | 787 | virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; } |
roland@4106 | 788 | |
roland@4106 | 789 | virtual const Type *bottom_type() const { return _type; } |
roland@4106 | 790 | virtual uint ideal_reg() const; |
roland@4106 | 791 | virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address |
roland@4106 | 792 | |
roland@4106 | 793 | bool result_not_used() const; |
roland@4106 | 794 | }; |
roland@4106 | 795 | |
roland@4106 | 796 | class LoadStoreConditionalNode : public LoadStoreNode { |
duke@435 | 797 | public: |
duke@435 | 798 | enum { |
duke@435 | 799 | ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode |
duke@435 | 800 | }; |
roland@4106 | 801 | LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex); |
duke@435 | 802 | }; |
duke@435 | 803 | |
duke@435 | 804 | //------------------------------StorePConditionalNode--------------------------- |
duke@435 | 805 | // Conditionally store pointer to memory, if no change since prior |
duke@435 | 806 | // load-locked. Sets flags for success or failure of the store. |
roland@4106 | 807 | class StorePConditionalNode : public LoadStoreConditionalNode { |
duke@435 | 808 | public: |
roland@4106 | 809 | StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } |
duke@435 | 810 | virtual int Opcode() const; |
duke@435 | 811 | // Produces flags |
duke@435 | 812 | virtual uint ideal_reg() const { return Op_RegFlags; } |
duke@435 | 813 | }; |
duke@435 | 814 | |
kvn@855 | 815 | //------------------------------StoreIConditionalNode--------------------------- |
kvn@855 | 816 | // Conditionally store int to memory, if no change since prior |
kvn@855 | 817 | // load-locked. Sets flags for success or failure of the store. |
roland@4106 | 818 | class StoreIConditionalNode : public LoadStoreConditionalNode { |
kvn@855 | 819 | public: |
roland@4106 | 820 | StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { } |
kvn@855 | 821 | virtual int Opcode() const; |
kvn@855 | 822 | // Produces flags |
kvn@855 | 823 | virtual uint ideal_reg() const { return Op_RegFlags; } |
kvn@855 | 824 | }; |
kvn@855 | 825 | |
duke@435 | 826 | //------------------------------StoreLConditionalNode--------------------------- |
duke@435 | 827 | // Conditionally store long to memory, if no change since prior |
duke@435 | 828 | // load-locked. Sets flags for success or failure of the store. |
roland@4106 | 829 | class StoreLConditionalNode : public LoadStoreConditionalNode { |
duke@435 | 830 | public: |
roland@4106 | 831 | StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { } |
duke@435 | 832 | virtual int Opcode() const; |
kvn@855 | 833 | // Produces flags |
kvn@855 | 834 | virtual uint ideal_reg() const { return Op_RegFlags; } |
duke@435 | 835 | }; |
duke@435 | 836 | |
duke@435 | 837 | |
duke@435 | 838 | //------------------------------CompareAndSwapLNode--------------------------- |
roland@4106 | 839 | class CompareAndSwapLNode : public LoadStoreConditionalNode { |
duke@435 | 840 | public: |
roland@4106 | 841 | CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } |
duke@435 | 842 | virtual int Opcode() const; |
duke@435 | 843 | }; |
duke@435 | 844 | |
duke@435 | 845 | |
duke@435 | 846 | //------------------------------CompareAndSwapINode--------------------------- |
roland@4106 | 847 | class CompareAndSwapINode : public LoadStoreConditionalNode { |
duke@435 | 848 | public: |
roland@4106 | 849 | CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } |
duke@435 | 850 | virtual int Opcode() const; |
duke@435 | 851 | }; |
duke@435 | 852 | |
duke@435 | 853 | |
duke@435 | 854 | //------------------------------CompareAndSwapPNode--------------------------- |
roland@4106 | 855 | class CompareAndSwapPNode : public LoadStoreConditionalNode { |
duke@435 | 856 | public: |
roland@4106 | 857 | CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } |
duke@435 | 858 | virtual int Opcode() const; |
duke@435 | 859 | }; |
duke@435 | 860 | |
coleenp@548 | 861 | //------------------------------CompareAndSwapNNode--------------------------- |
roland@4106 | 862 | class CompareAndSwapNNode : public LoadStoreConditionalNode { |
coleenp@548 | 863 | public: |
roland@4106 | 864 | CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { } |
roland@4106 | 865 | virtual int Opcode() const; |
roland@4106 | 866 | }; |
roland@4106 | 867 | |
roland@4106 | 868 | //------------------------------GetAndAddINode--------------------------- |
roland@4106 | 869 | class GetAndAddINode : public LoadStoreNode { |
roland@4106 | 870 | public: |
roland@4106 | 871 | GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } |
roland@4106 | 872 | virtual int Opcode() const; |
roland@4106 | 873 | }; |
roland@4106 | 874 | |
roland@4106 | 875 | //------------------------------GetAndAddLNode--------------------------- |
roland@4106 | 876 | class GetAndAddLNode : public LoadStoreNode { |
roland@4106 | 877 | public: |
roland@4106 | 878 | GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } |
roland@4106 | 879 | virtual int Opcode() const; |
roland@4106 | 880 | }; |
roland@4106 | 881 | |
roland@4106 | 882 | |
roland@4106 | 883 | //------------------------------GetAndSetINode--------------------------- |
roland@4106 | 884 | class GetAndSetINode : public LoadStoreNode { |
roland@4106 | 885 | public: |
roland@4106 | 886 | GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { } |
roland@4106 | 887 | virtual int Opcode() const; |
roland@4106 | 888 | }; |
roland@4106 | 889 | |
roland@4106 | 890 | //------------------------------GetAndSetINode--------------------------- |
roland@4106 | 891 | class GetAndSetLNode : public LoadStoreNode { |
roland@4106 | 892 | public: |
roland@4106 | 893 | GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { } |
roland@4106 | 894 | virtual int Opcode() const; |
roland@4106 | 895 | }; |
roland@4106 | 896 | |
roland@4106 | 897 | //------------------------------GetAndSetPNode--------------------------- |
roland@4106 | 898 | class GetAndSetPNode : public LoadStoreNode { |
roland@4106 | 899 | public: |
roland@4106 | 900 | GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } |
roland@4106 | 901 | virtual int Opcode() const; |
roland@4106 | 902 | }; |
roland@4106 | 903 | |
roland@4106 | 904 | //------------------------------GetAndSetNNode--------------------------- |
roland@4106 | 905 | class GetAndSetNNode : public LoadStoreNode { |
roland@4106 | 906 | public: |
roland@4106 | 907 | GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { } |
coleenp@548 | 908 | virtual int Opcode() const; |
coleenp@548 | 909 | }; |
coleenp@548 | 910 | |
duke@435 | 911 | //------------------------------ClearArray------------------------------------- |
duke@435 | 912 | class ClearArrayNode: public Node { |
duke@435 | 913 | public: |
kvn@1535 | 914 | ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) |
kvn@1535 | 915 | : Node(ctrl,arymem,word_cnt,base) { |
kvn@1535 | 916 | init_class_id(Class_ClearArray); |
kvn@1535 | 917 | } |
duke@435 | 918 | virtual int Opcode() const; |
duke@435 | 919 | virtual const Type *bottom_type() const { return Type::MEMORY; } |
duke@435 | 920 | // ClearArray modifies array elements, and so affects only the |
duke@435 | 921 | // array memory addressed by the bottom_type of its base address. |
duke@435 | 922 | virtual const class TypePtr *adr_type() const; |
duke@435 | 923 | virtual Node *Identity( PhaseTransform *phase ); |
duke@435 | 924 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 925 | virtual uint match_edge(uint idx) const; |
duke@435 | 926 | |
duke@435 | 927 | // Clear the given area of an object or array. |
duke@435 | 928 | // The start offset must always be aligned mod BytesPerInt. |
duke@435 | 929 | // The end offset must always be aligned mod BytesPerLong. |
duke@435 | 930 | // Return the new memory. |
duke@435 | 931 | static Node* clear_memory(Node* control, Node* mem, Node* dest, |
duke@435 | 932 | intptr_t start_offset, |
duke@435 | 933 | intptr_t end_offset, |
duke@435 | 934 | PhaseGVN* phase); |
duke@435 | 935 | static Node* clear_memory(Node* control, Node* mem, Node* dest, |
duke@435 | 936 | intptr_t start_offset, |
duke@435 | 937 | Node* end_offset, |
duke@435 | 938 | PhaseGVN* phase); |
duke@435 | 939 | static Node* clear_memory(Node* control, Node* mem, Node* dest, |
duke@435 | 940 | Node* start_offset, |
duke@435 | 941 | Node* end_offset, |
duke@435 | 942 | PhaseGVN* phase); |
kvn@1535 | 943 | // Return allocation input memory edge if it is different instance |
kvn@1535 | 944 | // or itself if it is the one we are looking for. |
kvn@1535 | 945 | static bool step_through(Node** np, uint instance_id, PhaseTransform* phase); |
duke@435 | 946 | }; |
duke@435 | 947 | |
kvn@2694 | 948 | //------------------------------StrIntrinsic------------------------------- |
kvn@2694 | 949 | // Base class for Ideal nodes used in String instrinsic code. |
kvn@2694 | 950 | class StrIntrinsicNode: public Node { |
duke@435 | 951 | public: |
kvn@2694 | 952 | StrIntrinsicNode(Node* control, Node* char_array_mem, |
kvn@2694 | 953 | Node* s1, Node* c1, Node* s2, Node* c2): |
kvn@2694 | 954 | Node(control, char_array_mem, s1, c1, s2, c2) { |
kvn@2694 | 955 | } |
kvn@2694 | 956 | |
kvn@2694 | 957 | StrIntrinsicNode(Node* control, Node* char_array_mem, |
kvn@2694 | 958 | Node* s1, Node* s2, Node* c): |
kvn@2694 | 959 | Node(control, char_array_mem, s1, s2, c) { |
kvn@2694 | 960 | } |
kvn@2694 | 961 | |
kvn@2694 | 962 | StrIntrinsicNode(Node* control, Node* char_array_mem, |
kvn@2694 | 963 | Node* s1, Node* s2): |
kvn@2694 | 964 | Node(control, char_array_mem, s1, s2) { |
kvn@2694 | 965 | } |
kvn@2694 | 966 | |
duke@435 | 967 | virtual bool depends_only_on_test() const { return false; } |
kvn@1421 | 968 | virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; } |
duke@435 | 969 | virtual uint match_edge(uint idx) const; |
duke@435 | 970 | virtual uint ideal_reg() const { return Op_RegI; } |
duke@435 | 971 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
kvn@3311 | 972 | virtual const Type *Value(PhaseTransform *phase) const; |
duke@435 | 973 | }; |
duke@435 | 974 | |
kvn@2694 | 975 | //------------------------------StrComp------------------------------------- |
kvn@2694 | 976 | class StrCompNode: public StrIntrinsicNode { |
kvn@2694 | 977 | public: |
kvn@2694 | 978 | StrCompNode(Node* control, Node* char_array_mem, |
kvn@2694 | 979 | Node* s1, Node* c1, Node* s2, Node* c2): |
kvn@2694 | 980 | StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; |
kvn@2694 | 981 | virtual int Opcode() const; |
kvn@2694 | 982 | virtual const Type* bottom_type() const { return TypeInt::INT; } |
kvn@2694 | 983 | }; |
kvn@2694 | 984 | |
cfang@1116 | 985 | //------------------------------StrEquals------------------------------------- |
kvn@2694 | 986 | class StrEqualsNode: public StrIntrinsicNode { |
cfang@1116 | 987 | public: |
kvn@1421 | 988 | StrEqualsNode(Node* control, Node* char_array_mem, |
kvn@2694 | 989 | Node* s1, Node* s2, Node* c): |
kvn@2694 | 990 | StrIntrinsicNode(control, char_array_mem, s1, s2, c) {}; |
cfang@1116 | 991 | virtual int Opcode() const; |
cfang@1116 | 992 | virtual const Type* bottom_type() const { return TypeInt::BOOL; } |
cfang@1116 | 993 | }; |
cfang@1116 | 994 | |
cfang@1116 | 995 | //------------------------------StrIndexOf------------------------------------- |
kvn@2694 | 996 | class StrIndexOfNode: public StrIntrinsicNode { |
cfang@1116 | 997 | public: |
kvn@1421 | 998 | StrIndexOfNode(Node* control, Node* char_array_mem, |
kvn@2694 | 999 | Node* s1, Node* c1, Node* s2, Node* c2): |
kvn@2694 | 1000 | StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {}; |
cfang@1116 | 1001 | virtual int Opcode() const; |
cfang@1116 | 1002 | virtual const Type* bottom_type() const { return TypeInt::INT; } |
cfang@1116 | 1003 | }; |
cfang@1116 | 1004 | |
rasbold@604 | 1005 | //------------------------------AryEq--------------------------------------- |
kvn@2694 | 1006 | class AryEqNode: public StrIntrinsicNode { |
rasbold@604 | 1007 | public: |
kvn@2694 | 1008 | AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2): |
kvn@2694 | 1009 | StrIntrinsicNode(control, char_array_mem, s1, s2) {}; |
rasbold@604 | 1010 | virtual int Opcode() const; |
rasbold@604 | 1011 | virtual const Type* bottom_type() const { return TypeInt::BOOL; } |
rasbold@604 | 1012 | }; |
rasbold@604 | 1013 | |
kvn@4479 | 1014 | |
kvn@4479 | 1015 | //------------------------------EncodeISOArray-------------------------------- |
kvn@4479 | 1016 | // encode char[] to byte[] in ISO_8859_1 |
kvn@4479 | 1017 | class EncodeISOArrayNode: public Node { |
kvn@4479 | 1018 | public: |
kvn@4479 | 1019 | EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {}; |
kvn@4479 | 1020 | virtual int Opcode() const; |
kvn@4479 | 1021 | virtual bool depends_only_on_test() const { return false; } |
kvn@4479 | 1022 | virtual const Type* bottom_type() const { return TypeInt::INT; } |
kvn@4479 | 1023 | virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; } |
kvn@4479 | 1024 | virtual uint match_edge(uint idx) const; |
kvn@4479 | 1025 | virtual uint ideal_reg() const { return Op_RegI; } |
kvn@4479 | 1026 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
kvn@4479 | 1027 | virtual const Type *Value(PhaseTransform *phase) const; |
kvn@4479 | 1028 | }; |
kvn@4479 | 1029 | |
duke@435 | 1030 | //------------------------------MemBar----------------------------------------- |
duke@435 | 1031 | // There are different flavors of Memory Barriers to match the Java Memory |
duke@435 | 1032 | // Model. Monitor-enter and volatile-load act as Aquires: no following ref |
duke@435 | 1033 | // can be moved to before them. We insert a MemBar-Acquire after a FastLock or |
duke@435 | 1034 | // volatile-load. Monitor-exit and volatile-store act as Release: no |
twisti@1040 | 1035 | // preceding ref can be moved to after them. We insert a MemBar-Release |
duke@435 | 1036 | // before a FastUnlock or volatile-store. All volatiles need to be |
duke@435 | 1037 | // serialized, so we follow all volatile-stores with a MemBar-Volatile to |
twisti@1040 | 1038 | // separate it from any following volatile-load. |
duke@435 | 1039 | class MemBarNode: public MultiNode { |
duke@435 | 1040 | virtual uint hash() const ; // { return NO_HASH; } |
duke@435 | 1041 | virtual uint cmp( const Node &n ) const ; // Always fail, except on self |
duke@435 | 1042 | |
duke@435 | 1043 | virtual uint size_of() const { return sizeof(*this); } |
duke@435 | 1044 | // Memory type this node is serializing. Usually either rawptr or bottom. |
duke@435 | 1045 | const TypePtr* _adr_type; |
duke@435 | 1046 | |
duke@435 | 1047 | public: |
duke@435 | 1048 | enum { |
duke@435 | 1049 | Precedent = TypeFunc::Parms // optional edge to force precedence |
duke@435 | 1050 | }; |
duke@435 | 1051 | MemBarNode(Compile* C, int alias_idx, Node* precedent); |
duke@435 | 1052 | virtual int Opcode() const = 0; |
duke@435 | 1053 | virtual const class TypePtr *adr_type() const { return _adr_type; } |
duke@435 | 1054 | virtual const Type *Value( PhaseTransform *phase ) const; |
duke@435 | 1055 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 1056 | virtual uint match_edge(uint idx) const { return 0; } |
duke@435 | 1057 | virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; } |
duke@435 | 1058 | virtual Node *match( const ProjNode *proj, const Matcher *m ); |
duke@435 | 1059 | // Factory method. Builds a wide or narrow membar. |
duke@435 | 1060 | // Optional 'precedent' becomes an extra edge if not null. |
duke@435 | 1061 | static MemBarNode* make(Compile* C, int opcode, |
duke@435 | 1062 | int alias_idx = Compile::AliasIdxBot, |
duke@435 | 1063 | Node* precedent = NULL); |
duke@435 | 1064 | }; |
duke@435 | 1065 | |
duke@435 | 1066 | // "Acquire" - no following ref can move before (but earlier refs can |
duke@435 | 1067 | // follow, like an early Load stalled in cache). Requires multi-cpu |
roland@3047 | 1068 | // visibility. Inserted after a volatile load. |
duke@435 | 1069 | class MemBarAcquireNode: public MemBarNode { |
duke@435 | 1070 | public: |
duke@435 | 1071 | MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent) |
duke@435 | 1072 | : MemBarNode(C, alias_idx, precedent) {} |
duke@435 | 1073 | virtual int Opcode() const; |
duke@435 | 1074 | }; |
duke@435 | 1075 | |
goetz@6489 | 1076 | // "Acquire" - no following ref can move before (but earlier refs can |
goetz@6489 | 1077 | // follow, like an early Load stalled in cache). Requires multi-cpu |
goetz@6489 | 1078 | // visibility. Inserted independ of any load, as required |
goetz@6489 | 1079 | // for intrinsic sun.misc.Unsafe.loadFence(). |
goetz@6489 | 1080 | class LoadFenceNode: public MemBarNode { |
goetz@6489 | 1081 | public: |
goetz@6489 | 1082 | LoadFenceNode(Compile* C, int alias_idx, Node* precedent) |
goetz@6489 | 1083 | : MemBarNode(C, alias_idx, precedent) {} |
goetz@6489 | 1084 | virtual int Opcode() const; |
goetz@6489 | 1085 | }; |
goetz@6489 | 1086 | |
duke@435 | 1087 | // "Release" - no earlier ref can move after (but later refs can move |
duke@435 | 1088 | // up, like a speculative pipelined cache-hitting Load). Requires |
roland@3047 | 1089 | // multi-cpu visibility. Inserted before a volatile store. |
duke@435 | 1090 | class MemBarReleaseNode: public MemBarNode { |
duke@435 | 1091 | public: |
duke@435 | 1092 | MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent) |
duke@435 | 1093 | : MemBarNode(C, alias_idx, precedent) {} |
duke@435 | 1094 | virtual int Opcode() const; |
duke@435 | 1095 | }; |
duke@435 | 1096 | |
goetz@6489 | 1097 | // "Release" - no earlier ref can move after (but later refs can move |
goetz@6489 | 1098 | // up, like a speculative pipelined cache-hitting Load). Requires |
goetz@6489 | 1099 | // multi-cpu visibility. Inserted independent of any store, as required |
goetz@6489 | 1100 | // for intrinsic sun.misc.Unsafe.storeFence(). |
goetz@6489 | 1101 | class StoreFenceNode: public MemBarNode { |
goetz@6489 | 1102 | public: |
goetz@6489 | 1103 | StoreFenceNode(Compile* C, int alias_idx, Node* precedent) |
goetz@6489 | 1104 | : MemBarNode(C, alias_idx, precedent) {} |
goetz@6489 | 1105 | virtual int Opcode() const; |
goetz@6489 | 1106 | }; |
goetz@6489 | 1107 | |
roland@3047 | 1108 | // "Acquire" - no following ref can move before (but earlier refs can |
roland@3047 | 1109 | // follow, like an early Load stalled in cache). Requires multi-cpu |
roland@3047 | 1110 | // visibility. Inserted after a FastLock. |
roland@3047 | 1111 | class MemBarAcquireLockNode: public MemBarNode { |
roland@3047 | 1112 | public: |
roland@3047 | 1113 | MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent) |
roland@3047 | 1114 | : MemBarNode(C, alias_idx, precedent) {} |
roland@3047 | 1115 | virtual int Opcode() const; |
roland@3047 | 1116 | }; |
roland@3047 | 1117 | |
roland@3047 | 1118 | // "Release" - no earlier ref can move after (but later refs can move |
roland@3047 | 1119 | // up, like a speculative pipelined cache-hitting Load). Requires |
roland@3047 | 1120 | // multi-cpu visibility. Inserted before a FastUnLock. |
roland@3047 | 1121 | class MemBarReleaseLockNode: public MemBarNode { |
roland@3047 | 1122 | public: |
roland@3047 | 1123 | MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent) |
roland@3047 | 1124 | : MemBarNode(C, alias_idx, precedent) {} |
roland@3047 | 1125 | virtual int Opcode() const; |
roland@3047 | 1126 | }; |
roland@3047 | 1127 | |
roland@3392 | 1128 | class MemBarStoreStoreNode: public MemBarNode { |
roland@3392 | 1129 | public: |
roland@3392 | 1130 | MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent) |
roland@3392 | 1131 | : MemBarNode(C, alias_idx, precedent) { |
roland@3392 | 1132 | init_class_id(Class_MemBarStoreStore); |
roland@3392 | 1133 | } |
roland@3392 | 1134 | virtual int Opcode() const; |
roland@3392 | 1135 | }; |
roland@3392 | 1136 | |
duke@435 | 1137 | // Ordering between a volatile store and a following volatile load. |
duke@435 | 1138 | // Requires multi-CPU visibility? |
duke@435 | 1139 | class MemBarVolatileNode: public MemBarNode { |
duke@435 | 1140 | public: |
duke@435 | 1141 | MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent) |
duke@435 | 1142 | : MemBarNode(C, alias_idx, precedent) {} |
duke@435 | 1143 | virtual int Opcode() const; |
duke@435 | 1144 | }; |
duke@435 | 1145 | |
duke@435 | 1146 | // Ordering within the same CPU. Used to order unsafe memory references |
duke@435 | 1147 | // inside the compiler when we lack alias info. Not needed "outside" the |
duke@435 | 1148 | // compiler because the CPU does all the ordering for us. |
duke@435 | 1149 | class MemBarCPUOrderNode: public MemBarNode { |
duke@435 | 1150 | public: |
duke@435 | 1151 | MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent) |
duke@435 | 1152 | : MemBarNode(C, alias_idx, precedent) {} |
duke@435 | 1153 | virtual int Opcode() const; |
duke@435 | 1154 | virtual uint ideal_reg() const { return 0; } // not matched in the AD file |
duke@435 | 1155 | }; |
duke@435 | 1156 | |
duke@435 | 1157 | // Isolation of object setup after an AllocateNode and before next safepoint. |
duke@435 | 1158 | // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.) |
duke@435 | 1159 | class InitializeNode: public MemBarNode { |
duke@435 | 1160 | friend class AllocateNode; |
duke@435 | 1161 | |
kvn@3157 | 1162 | enum { |
kvn@3157 | 1163 | Incomplete = 0, |
kvn@3157 | 1164 | Complete = 1, |
kvn@3157 | 1165 | WithArraycopy = 2 |
kvn@3157 | 1166 | }; |
kvn@3157 | 1167 | int _is_complete; |
duke@435 | 1168 | |
roland@3392 | 1169 | bool _does_not_escape; |
roland@3392 | 1170 | |
duke@435 | 1171 | public: |
duke@435 | 1172 | enum { |
duke@435 | 1173 | Control = TypeFunc::Control, |
duke@435 | 1174 | Memory = TypeFunc::Memory, // MergeMem for states affected by this op |
duke@435 | 1175 | RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address |
duke@435 | 1176 | RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP) |
duke@435 | 1177 | }; |
duke@435 | 1178 | |
duke@435 | 1179 | InitializeNode(Compile* C, int adr_type, Node* rawoop); |
duke@435 | 1180 | virtual int Opcode() const; |
duke@435 | 1181 | virtual uint size_of() const { return sizeof(*this); } |
duke@435 | 1182 | virtual uint ideal_reg() const { return 0; } // not matched in the AD file |
duke@435 | 1183 | virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress |
duke@435 | 1184 | |
duke@435 | 1185 | // Manage incoming memory edges via a MergeMem on in(Memory): |
duke@435 | 1186 | Node* memory(uint alias_idx); |
duke@435 | 1187 | |
duke@435 | 1188 | // The raw memory edge coming directly from the Allocation. |
duke@435 | 1189 | // The contents of this memory are *always* all-zero-bits. |
duke@435 | 1190 | Node* zero_memory() { return memory(Compile::AliasIdxRaw); } |
duke@435 | 1191 | |
duke@435 | 1192 | // Return the corresponding allocation for this initialization (or null if none). |
duke@435 | 1193 | // (Note: Both InitializeNode::allocation and AllocateNode::initialization |
duke@435 | 1194 | // are defined in graphKit.cpp, which sets up the bidirectional relation.) |
duke@435 | 1195 | AllocateNode* allocation(); |
duke@435 | 1196 | |
duke@435 | 1197 | // Anything other than zeroing in this init? |
duke@435 | 1198 | bool is_non_zero(); |
duke@435 | 1199 | |
duke@435 | 1200 | // An InitializeNode must completed before macro expansion is done. |
duke@435 | 1201 | // Completion requires that the AllocateNode must be followed by |
duke@435 | 1202 | // initialization of the new memory to zero, then to any initializers. |
kvn@3157 | 1203 | bool is_complete() { return _is_complete != Incomplete; } |
kvn@3157 | 1204 | bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; } |
duke@435 | 1205 | |
duke@435 | 1206 | // Mark complete. (Must not yet be complete.) |
duke@435 | 1207 | void set_complete(PhaseGVN* phase); |
kvn@3157 | 1208 | void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; } |
duke@435 | 1209 | |
roland@3392 | 1210 | bool does_not_escape() { return _does_not_escape; } |
roland@3392 | 1211 | void set_does_not_escape() { _does_not_escape = true; } |
roland@3392 | 1212 | |
duke@435 | 1213 | #ifdef ASSERT |
duke@435 | 1214 | // ensure all non-degenerate stores are ordered and non-overlapping |
duke@435 | 1215 | bool stores_are_sane(PhaseTransform* phase); |
duke@435 | 1216 | #endif //ASSERT |
duke@435 | 1217 | |
duke@435 | 1218 | // See if this store can be captured; return offset where it initializes. |
duke@435 | 1219 | // Return 0 if the store cannot be moved (any sort of problem). |
roland@4657 | 1220 | intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape); |
duke@435 | 1221 | |
duke@435 | 1222 | // Capture another store; reformat it to write my internal raw memory. |
duke@435 | 1223 | // Return the captured copy, else NULL if there is some sort of problem. |
roland@4657 | 1224 | Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape); |
duke@435 | 1225 | |
duke@435 | 1226 | // Find captured store which corresponds to the range [start..start+size). |
duke@435 | 1227 | // Return my own memory projection (meaning the initial zero bits) |
duke@435 | 1228 | // if there is no such store. Return NULL if there is a problem. |
duke@435 | 1229 | Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase); |
duke@435 | 1230 | |
duke@435 | 1231 | // Called when the associated AllocateNode is expanded into CFG. |
duke@435 | 1232 | Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr, |
duke@435 | 1233 | intptr_t header_size, Node* size_in_bytes, |
duke@435 | 1234 | PhaseGVN* phase); |
duke@435 | 1235 | |
duke@435 | 1236 | private: |
duke@435 | 1237 | void remove_extra_zeroes(); |
duke@435 | 1238 | |
duke@435 | 1239 | // Find out where a captured store should be placed (or already is placed). |
duke@435 | 1240 | int captured_store_insertion_point(intptr_t start, int size_in_bytes, |
duke@435 | 1241 | PhaseTransform* phase); |
duke@435 | 1242 | |
duke@435 | 1243 | static intptr_t get_store_offset(Node* st, PhaseTransform* phase); |
duke@435 | 1244 | |
duke@435 | 1245 | Node* make_raw_address(intptr_t offset, PhaseTransform* phase); |
duke@435 | 1246 | |
kvn@5110 | 1247 | bool detect_init_independence(Node* n, int& count); |
duke@435 | 1248 | |
duke@435 | 1249 | void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes, |
duke@435 | 1250 | PhaseGVN* phase); |
duke@435 | 1251 | |
duke@435 | 1252 | intptr_t find_next_fullword_store(uint i, PhaseGVN* phase); |
duke@435 | 1253 | }; |
duke@435 | 1254 | |
duke@435 | 1255 | //------------------------------MergeMem--------------------------------------- |
duke@435 | 1256 | // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.) |
duke@435 | 1257 | class MergeMemNode: public Node { |
duke@435 | 1258 | virtual uint hash() const ; // { return NO_HASH; } |
duke@435 | 1259 | virtual uint cmp( const Node &n ) const ; // Always fail, except on self |
duke@435 | 1260 | friend class MergeMemStream; |
duke@435 | 1261 | MergeMemNode(Node* def); // clients use MergeMemNode::make |
duke@435 | 1262 | |
duke@435 | 1263 | public: |
duke@435 | 1264 | // If the input is a whole memory state, clone it with all its slices intact. |
duke@435 | 1265 | // Otherwise, make a new memory state with just that base memory input. |
duke@435 | 1266 | // In either case, the result is a newly created MergeMem. |
duke@435 | 1267 | static MergeMemNode* make(Compile* C, Node* base_memory); |
duke@435 | 1268 | |
duke@435 | 1269 | virtual int Opcode() const; |
duke@435 | 1270 | virtual Node *Identity( PhaseTransform *phase ); |
duke@435 | 1271 | virtual Node *Ideal(PhaseGVN *phase, bool can_reshape); |
duke@435 | 1272 | virtual uint ideal_reg() const { return NotAMachineReg; } |
duke@435 | 1273 | virtual uint match_edge(uint idx) const { return 0; } |
duke@435 | 1274 | virtual const RegMask &out_RegMask() const; |
duke@435 | 1275 | virtual const Type *bottom_type() const { return Type::MEMORY; } |
duke@435 | 1276 | virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; } |
duke@435 | 1277 | // sparse accessors |
duke@435 | 1278 | // Fetch the previously stored "set_memory_at", or else the base memory. |
duke@435 | 1279 | // (Caller should clone it if it is a phi-nest.) |
duke@435 | 1280 | Node* memory_at(uint alias_idx) const; |
duke@435 | 1281 | // set the memory, regardless of its previous value |
duke@435 | 1282 | void set_memory_at(uint alias_idx, Node* n); |
duke@435 | 1283 | // the "base" is the memory that provides the non-finite support |
duke@435 | 1284 | Node* base_memory() const { return in(Compile::AliasIdxBot); } |
duke@435 | 1285 | // warning: setting the base can implicitly set any of the other slices too |
duke@435 | 1286 | void set_base_memory(Node* def); |
duke@435 | 1287 | // sentinel value which denotes a copy of the base memory: |
duke@435 | 1288 | Node* empty_memory() const { return in(Compile::AliasIdxTop); } |
duke@435 | 1289 | static Node* make_empty_memory(); // where the sentinel comes from |
duke@435 | 1290 | bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); } |
duke@435 | 1291 | // hook for the iterator, to perform any necessary setup |
duke@435 | 1292 | void iteration_setup(const MergeMemNode* other = NULL); |
duke@435 | 1293 | // push sentinels until I am at least as long as the other (semantic no-op) |
duke@435 | 1294 | void grow_to_match(const MergeMemNode* other); |
duke@435 | 1295 | bool verify_sparse() const PRODUCT_RETURN0; |
duke@435 | 1296 | #ifndef PRODUCT |
duke@435 | 1297 | virtual void dump_spec(outputStream *st) const; |
duke@435 | 1298 | #endif |
duke@435 | 1299 | }; |
duke@435 | 1300 | |
duke@435 | 1301 | class MergeMemStream : public StackObj { |
duke@435 | 1302 | private: |
duke@435 | 1303 | MergeMemNode* _mm; |
duke@435 | 1304 | const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations |
duke@435 | 1305 | Node* _mm_base; // loop-invariant base memory of _mm |
duke@435 | 1306 | int _idx; |
duke@435 | 1307 | int _cnt; |
duke@435 | 1308 | Node* _mem; |
duke@435 | 1309 | Node* _mem2; |
duke@435 | 1310 | int _cnt2; |
duke@435 | 1311 | |
duke@435 | 1312 | void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) { |
duke@435 | 1313 | // subsume_node will break sparseness at times, whenever a memory slice |
duke@435 | 1314 | // folds down to a copy of the base ("fat") memory. In such a case, |
duke@435 | 1315 | // the raw edge will update to base, although it should be top. |
duke@435 | 1316 | // This iterator will recognize either top or base_memory as an |
duke@435 | 1317 | // "empty" slice. See is_empty, is_empty2, and next below. |
duke@435 | 1318 | // |
duke@435 | 1319 | // The sparseness property is repaired in MergeMemNode::Ideal. |
duke@435 | 1320 | // As long as access to a MergeMem goes through this iterator |
duke@435 | 1321 | // or the memory_at accessor, flaws in the sparseness will |
duke@435 | 1322 | // never be observed. |
duke@435 | 1323 | // |
duke@435 | 1324 | // Also, iteration_setup repairs sparseness. |
duke@435 | 1325 | assert(mm->verify_sparse(), "please, no dups of base"); |
duke@435 | 1326 | assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base"); |
duke@435 | 1327 | |
duke@435 | 1328 | _mm = mm; |
duke@435 | 1329 | _mm_base = mm->base_memory(); |
duke@435 | 1330 | _mm2 = mm2; |
duke@435 | 1331 | _cnt = mm->req(); |
duke@435 | 1332 | _idx = Compile::AliasIdxBot-1; // start at the base memory |
duke@435 | 1333 | _mem = NULL; |
duke@435 | 1334 | _mem2 = NULL; |
duke@435 | 1335 | } |
duke@435 | 1336 | |
duke@435 | 1337 | #ifdef ASSERT |
duke@435 | 1338 | Node* check_memory() const { |
duke@435 | 1339 | if (at_base_memory()) |
duke@435 | 1340 | return _mm->base_memory(); |
duke@435 | 1341 | else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top()) |
duke@435 | 1342 | return _mm->memory_at(_idx); |
duke@435 | 1343 | else |
duke@435 | 1344 | return _mm_base; |
duke@435 | 1345 | } |
duke@435 | 1346 | Node* check_memory2() const { |
duke@435 | 1347 | return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx); |
duke@435 | 1348 | } |
duke@435 | 1349 | #endif |
duke@435 | 1350 | |
duke@435 | 1351 | static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0; |
duke@435 | 1352 | void assert_synch() const { |
duke@435 | 1353 | assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx), |
duke@435 | 1354 | "no side-effects except through the stream"); |
duke@435 | 1355 | } |
duke@435 | 1356 | |
duke@435 | 1357 | public: |
duke@435 | 1358 | |
duke@435 | 1359 | // expected usages: |
duke@435 | 1360 | // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... } |
duke@435 | 1361 | // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... } |
duke@435 | 1362 | |
duke@435 | 1363 | // iterate over one merge |
duke@435 | 1364 | MergeMemStream(MergeMemNode* mm) { |
duke@435 | 1365 | mm->iteration_setup(); |
duke@435 | 1366 | init(mm); |
duke@435 | 1367 | debug_only(_cnt2 = 999); |
duke@435 | 1368 | } |
duke@435 | 1369 | // iterate in parallel over two merges |
duke@435 | 1370 | // only iterates through non-empty elements of mm2 |
duke@435 | 1371 | MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) { |
duke@435 | 1372 | assert(mm2, "second argument must be a MergeMem also"); |
duke@435 | 1373 | ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state |
duke@435 | 1374 | mm->iteration_setup(mm2); |
duke@435 | 1375 | init(mm, mm2); |
duke@435 | 1376 | _cnt2 = mm2->req(); |
duke@435 | 1377 | } |
duke@435 | 1378 | #ifdef ASSERT |
duke@435 | 1379 | ~MergeMemStream() { |
duke@435 | 1380 | assert_synch(); |
duke@435 | 1381 | } |
duke@435 | 1382 | #endif |
duke@435 | 1383 | |
duke@435 | 1384 | MergeMemNode* all_memory() const { |
duke@435 | 1385 | return _mm; |
duke@435 | 1386 | } |
duke@435 | 1387 | Node* base_memory() const { |
duke@435 | 1388 | assert(_mm_base == _mm->base_memory(), "no update to base memory, please"); |
duke@435 | 1389 | return _mm_base; |
duke@435 | 1390 | } |
duke@435 | 1391 | const MergeMemNode* all_memory2() const { |
duke@435 | 1392 | assert(_mm2 != NULL, ""); |
duke@435 | 1393 | return _mm2; |
duke@435 | 1394 | } |
duke@435 | 1395 | bool at_base_memory() const { |
duke@435 | 1396 | return _idx == Compile::AliasIdxBot; |
duke@435 | 1397 | } |
duke@435 | 1398 | int alias_idx() const { |
duke@435 | 1399 | assert(_mem, "must call next 1st"); |
duke@435 | 1400 | return _idx; |
duke@435 | 1401 | } |
duke@435 | 1402 | |
duke@435 | 1403 | const TypePtr* adr_type() const { |
duke@435 | 1404 | return Compile::current()->get_adr_type(alias_idx()); |
duke@435 | 1405 | } |
duke@435 | 1406 | |
duke@435 | 1407 | const TypePtr* adr_type(Compile* C) const { |
duke@435 | 1408 | return C->get_adr_type(alias_idx()); |
duke@435 | 1409 | } |
duke@435 | 1410 | bool is_empty() const { |
duke@435 | 1411 | assert(_mem, "must call next 1st"); |
duke@435 | 1412 | assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel"); |
duke@435 | 1413 | return _mem->is_top(); |
duke@435 | 1414 | } |
duke@435 | 1415 | bool is_empty2() const { |
duke@435 | 1416 | assert(_mem2, "must call next 1st"); |
duke@435 | 1417 | assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel"); |
duke@435 | 1418 | return _mem2->is_top(); |
duke@435 | 1419 | } |
duke@435 | 1420 | Node* memory() const { |
duke@435 | 1421 | assert(!is_empty(), "must not be empty"); |
duke@435 | 1422 | assert_synch(); |
duke@435 | 1423 | return _mem; |
duke@435 | 1424 | } |
duke@435 | 1425 | // get the current memory, regardless of empty or non-empty status |
duke@435 | 1426 | Node* force_memory() const { |
duke@435 | 1427 | assert(!is_empty() || !at_base_memory(), ""); |
duke@435 | 1428 | // Use _mm_base to defend against updates to _mem->base_memory(). |
duke@435 | 1429 | Node *mem = _mem->is_top() ? _mm_base : _mem; |
duke@435 | 1430 | assert(mem == check_memory(), ""); |
duke@435 | 1431 | return mem; |
duke@435 | 1432 | } |
duke@435 | 1433 | Node* memory2() const { |
duke@435 | 1434 | assert(_mem2 == check_memory2(), ""); |
duke@435 | 1435 | return _mem2; |
duke@435 | 1436 | } |
duke@435 | 1437 | void set_memory(Node* mem) { |
duke@435 | 1438 | if (at_base_memory()) { |
duke@435 | 1439 | // Note that this does not change the invariant _mm_base. |
duke@435 | 1440 | _mm->set_base_memory(mem); |
duke@435 | 1441 | } else { |
duke@435 | 1442 | _mm->set_memory_at(_idx, mem); |
duke@435 | 1443 | } |
duke@435 | 1444 | _mem = mem; |
duke@435 | 1445 | assert_synch(); |
duke@435 | 1446 | } |
duke@435 | 1447 | |
duke@435 | 1448 | // Recover from a side effect to the MergeMemNode. |
duke@435 | 1449 | void set_memory() { |
duke@435 | 1450 | _mem = _mm->in(_idx); |
duke@435 | 1451 | } |
duke@435 | 1452 | |
duke@435 | 1453 | bool next() { return next(false); } |
duke@435 | 1454 | bool next2() { return next(true); } |
duke@435 | 1455 | |
duke@435 | 1456 | bool next_non_empty() { return next_non_empty(false); } |
duke@435 | 1457 | bool next_non_empty2() { return next_non_empty(true); } |
duke@435 | 1458 | // next_non_empty2 can yield states where is_empty() is true |
duke@435 | 1459 | |
duke@435 | 1460 | private: |
duke@435 | 1461 | // find the next item, which might be empty |
duke@435 | 1462 | bool next(bool have_mm2) { |
duke@435 | 1463 | assert((_mm2 != NULL) == have_mm2, "use other next"); |
duke@435 | 1464 | assert_synch(); |
duke@435 | 1465 | if (++_idx < _cnt) { |
duke@435 | 1466 | // Note: This iterator allows _mm to be non-sparse. |
duke@435 | 1467 | // It behaves the same whether _mem is top or base_memory. |
duke@435 | 1468 | _mem = _mm->in(_idx); |
duke@435 | 1469 | if (have_mm2) |
duke@435 | 1470 | _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop); |
duke@435 | 1471 | return true; |
duke@435 | 1472 | } |
duke@435 | 1473 | return false; |
duke@435 | 1474 | } |
duke@435 | 1475 | |
duke@435 | 1476 | // find the next non-empty item |
duke@435 | 1477 | bool next_non_empty(bool have_mm2) { |
duke@435 | 1478 | while (next(have_mm2)) { |
duke@435 | 1479 | if (!is_empty()) { |
duke@435 | 1480 | // make sure _mem2 is filled in sensibly |
duke@435 | 1481 | if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory(); |
duke@435 | 1482 | return true; |
duke@435 | 1483 | } else if (have_mm2 && !is_empty2()) { |
duke@435 | 1484 | return true; // is_empty() == true |
duke@435 | 1485 | } |
duke@435 | 1486 | } |
duke@435 | 1487 | return false; |
duke@435 | 1488 | } |
duke@435 | 1489 | }; |
duke@435 | 1490 | |
duke@435 | 1491 | //------------------------------Prefetch--------------------------------------- |
duke@435 | 1492 | |
duke@435 | 1493 | // Non-faulting prefetch load. Prefetch for many reads. |
duke@435 | 1494 | class PrefetchReadNode : public Node { |
duke@435 | 1495 | public: |
duke@435 | 1496 | PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {} |
duke@435 | 1497 | virtual int Opcode() const; |
duke@435 | 1498 | virtual uint ideal_reg() const { return NotAMachineReg; } |
duke@435 | 1499 | virtual uint match_edge(uint idx) const { return idx==2; } |
duke@435 | 1500 | virtual const Type *bottom_type() const { return Type::ABIO; } |
duke@435 | 1501 | }; |
duke@435 | 1502 | |
duke@435 | 1503 | // Non-faulting prefetch load. Prefetch for many reads & many writes. |
duke@435 | 1504 | class PrefetchWriteNode : public Node { |
duke@435 | 1505 | public: |
duke@435 | 1506 | PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {} |
duke@435 | 1507 | virtual int Opcode() const; |
duke@435 | 1508 | virtual uint ideal_reg() const { return NotAMachineReg; } |
duke@435 | 1509 | virtual uint match_edge(uint idx) const { return idx==2; } |
kvn@3052 | 1510 | virtual const Type *bottom_type() const { return Type::ABIO; } |
kvn@3052 | 1511 | }; |
kvn@3052 | 1512 | |
kvn@3052 | 1513 | // Allocation prefetch which may fault, TLAB size have to be adjusted. |
kvn@3052 | 1514 | class PrefetchAllocationNode : public Node { |
kvn@3052 | 1515 | public: |
kvn@3052 | 1516 | PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {} |
kvn@3052 | 1517 | virtual int Opcode() const; |
kvn@3052 | 1518 | virtual uint ideal_reg() const { return NotAMachineReg; } |
kvn@3052 | 1519 | virtual uint match_edge(uint idx) const { return idx==2; } |
kvn@1802 | 1520 | virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; } |
duke@435 | 1521 | }; |
stefank@2314 | 1522 | |
stefank@2314 | 1523 | #endif // SHARE_VM_OPTO_MEMNODE_HPP |