src/share/vm/opto/memnode.hpp

Wed, 21 May 2008 13:46:23 -0700

author
kvn
date
Wed, 21 May 2008 13:46:23 -0700
changeset 599
c436414a719e
parent 598
885ed790ecf0
child 604
9148c65abefc
permissions
-rw-r--r--

6703890: Compressed Oops: add LoadNKlass node to generate narrow oops (32-bits) compare instructions
Summary: Add LoadNKlass and CMoveN nodes, use CmpN and ConN nodes to generate narrow oops compare instructions.
Reviewed-by: never, rasbold

duke@435 1 /*
duke@435 2 * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
duke@435 19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
duke@435 20 * CA 95054 USA or visit www.sun.com if you need additional information or
duke@435 21 * have any questions.
duke@435 22 *
duke@435 23 */
duke@435 24
duke@435 25 // Portions of code courtesy of Clifford Click
duke@435 26
duke@435 27 class MultiNode;
duke@435 28 class PhaseCCP;
duke@435 29 class PhaseTransform;
duke@435 30
duke@435 31 //------------------------------MemNode----------------------------------------
duke@435 32 // Load or Store, possibly throwing a NULL pointer exception
duke@435 33 class MemNode : public Node {
duke@435 34 protected:
duke@435 35 #ifdef ASSERT
duke@435 36 const TypePtr* _adr_type; // What kind of memory is being addressed?
duke@435 37 #endif
duke@435 38 virtual uint size_of() const; // Size is bigger (ASSERT only)
duke@435 39 public:
duke@435 40 enum { Control, // When is it safe to do this load?
duke@435 41 Memory, // Chunk of memory is being loaded from
duke@435 42 Address, // Actually address, derived from base
duke@435 43 ValueIn, // Value to store
duke@435 44 OopStore // Preceeding oop store, only in StoreCM
duke@435 45 };
duke@435 46 protected:
duke@435 47 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
duke@435 48 : Node(c0,c1,c2 ) {
duke@435 49 init_class_id(Class_Mem);
duke@435 50 debug_only(_adr_type=at; adr_type();)
duke@435 51 }
duke@435 52 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
duke@435 53 : Node(c0,c1,c2,c3) {
duke@435 54 init_class_id(Class_Mem);
duke@435 55 debug_only(_adr_type=at; adr_type();)
duke@435 56 }
duke@435 57 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
duke@435 58 : Node(c0,c1,c2,c3,c4) {
duke@435 59 init_class_id(Class_Mem);
duke@435 60 debug_only(_adr_type=at; adr_type();)
duke@435 61 }
duke@435 62
kvn@468 63 public:
duke@435 64 // Helpers for the optimizer. Documented in memnode.cpp.
duke@435 65 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
duke@435 66 Node* p2, AllocateNode* a2,
duke@435 67 PhaseTransform* phase);
duke@435 68 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
duke@435 69
kvn@509 70 static Node *optimize_simple_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
kvn@509 71 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, PhaseGVN *phase);
duke@435 72 // This one should probably be a phase-specific function:
kvn@520 73 static bool all_controls_dominate(Node* dom, Node* sub);
duke@435 74
kvn@598 75 // Find any cast-away of null-ness and keep its control.
kvn@598 76 static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
duke@435 77 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
duke@435 78
duke@435 79 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
duke@435 80
duke@435 81 // Shared code for Ideal methods:
duke@435 82 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL.
duke@435 83
duke@435 84 // Helper function for adr_type() implementations.
duke@435 85 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
duke@435 86
duke@435 87 // Raw access function, to allow copying of adr_type efficiently in
duke@435 88 // product builds and retain the debug info for debug builds.
duke@435 89 const TypePtr *raw_adr_type() const {
duke@435 90 #ifdef ASSERT
duke@435 91 return _adr_type;
duke@435 92 #else
duke@435 93 return 0;
duke@435 94 #endif
duke@435 95 }
duke@435 96
duke@435 97 // Map a load or store opcode to its corresponding store opcode.
duke@435 98 // (Return -1 if unknown.)
duke@435 99 virtual int store_Opcode() const { return -1; }
duke@435 100
duke@435 101 // What is the type of the value in memory? (T_VOID mean "unspecified".)
duke@435 102 virtual BasicType memory_type() const = 0;
kvn@464 103 virtual int memory_size() const {
kvn@464 104 #ifdef ASSERT
kvn@464 105 return type2aelembytes(memory_type(), true);
kvn@464 106 #else
kvn@464 107 return type2aelembytes(memory_type());
kvn@464 108 #endif
kvn@464 109 }
duke@435 110
duke@435 111 // Search through memory states which precede this node (load or store).
duke@435 112 // Look for an exact match for the address, with no intervening
duke@435 113 // aliased stores.
duke@435 114 Node* find_previous_store(PhaseTransform* phase);
duke@435 115
duke@435 116 // Can this node (load or store) accurately see a stored value in
duke@435 117 // the given memory state? (The state may or may not be in(Memory).)
duke@435 118 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
duke@435 119
duke@435 120 #ifndef PRODUCT
duke@435 121 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
duke@435 122 virtual void dump_spec(outputStream *st) const;
duke@435 123 #endif
duke@435 124 };
duke@435 125
duke@435 126 //------------------------------LoadNode---------------------------------------
duke@435 127 // Load value; requires Memory and Address
duke@435 128 class LoadNode : public MemNode {
duke@435 129 protected:
duke@435 130 virtual uint cmp( const Node &n ) const;
duke@435 131 virtual uint size_of() const; // Size is bigger
duke@435 132 const Type* const _type; // What kind of value is loaded?
duke@435 133 public:
duke@435 134
duke@435 135 LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
duke@435 136 : MemNode(c,mem,adr,at), _type(rt) {
duke@435 137 init_class_id(Class_Load);
duke@435 138 }
duke@435 139
duke@435 140 // Polymorphic factory method:
coleenp@548 141 static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
coleenp@548 142 const TypePtr* at, const Type *rt, BasicType bt );
duke@435 143
duke@435 144 virtual uint hash() const; // Check the type
duke@435 145
duke@435 146 // Handle algebraic identities here. If we have an identity, return the Node
duke@435 147 // we are equivalent to. We look for Load of a Store.
duke@435 148 virtual Node *Identity( PhaseTransform *phase );
duke@435 149
duke@435 150 // If the load is from Field memory and the pointer is non-null, we can
duke@435 151 // zero out the control input.
duke@435 152 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 153
kvn@598 154 // Split instance field load through Phi.
kvn@598 155 Node* split_through_phi(PhaseGVN *phase);
kvn@598 156
never@452 157 // Recover original value from boxed values
never@452 158 Node *eliminate_autobox(PhaseGVN *phase);
never@452 159
duke@435 160 // Compute a new Type for this node. Basically we just do the pre-check,
duke@435 161 // then call the virtual add() to set the type.
duke@435 162 virtual const Type *Value( PhaseTransform *phase ) const;
duke@435 163
kvn@599 164 // Common methods for LoadKlass and LoadNKlass nodes.
kvn@599 165 const Type *klass_value_common( PhaseTransform *phase ) const;
kvn@599 166 Node *klass_identity_common( PhaseTransform *phase );
kvn@599 167
duke@435 168 virtual uint ideal_reg() const;
duke@435 169 virtual const Type *bottom_type() const;
duke@435 170 // Following method is copied from TypeNode:
duke@435 171 void set_type(const Type* t) {
duke@435 172 assert(t != NULL, "sanity");
duke@435 173 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
duke@435 174 *(const Type**)&_type = t; // cast away const-ness
duke@435 175 // If this node is in the hash table, make sure it doesn't need a rehash.
duke@435 176 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
duke@435 177 }
duke@435 178 const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
duke@435 179
duke@435 180 // Do not match memory edge
duke@435 181 virtual uint match_edge(uint idx) const;
duke@435 182
duke@435 183 // Map a load opcode to its corresponding store opcode.
duke@435 184 virtual int store_Opcode() const = 0;
duke@435 185
kvn@499 186 // Check if the load's memory input is a Phi node with the same control.
kvn@499 187 bool is_instance_field_load_with_local_phi(Node* ctrl);
kvn@499 188
duke@435 189 #ifndef PRODUCT
duke@435 190 virtual void dump_spec(outputStream *st) const;
duke@435 191 #endif
duke@435 192 protected:
duke@435 193 const Type* load_array_final_field(const TypeKlassPtr *tkls,
duke@435 194 ciKlass* klass) const;
duke@435 195 };
duke@435 196
duke@435 197 //------------------------------LoadBNode--------------------------------------
duke@435 198 // Load a byte (8bits signed) from memory
duke@435 199 class LoadBNode : public LoadNode {
duke@435 200 public:
duke@435 201 LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
duke@435 202 : LoadNode(c,mem,adr,at,ti) {}
duke@435 203 virtual int Opcode() const;
duke@435 204 virtual uint ideal_reg() const { return Op_RegI; }
duke@435 205 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 206 virtual int store_Opcode() const { return Op_StoreB; }
duke@435 207 virtual BasicType memory_type() const { return T_BYTE; }
duke@435 208 };
duke@435 209
duke@435 210 //------------------------------LoadCNode--------------------------------------
duke@435 211 // Load a char (16bits unsigned) from memory
duke@435 212 class LoadCNode : public LoadNode {
duke@435 213 public:
duke@435 214 LoadCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
duke@435 215 : LoadNode(c,mem,adr,at,ti) {}
duke@435 216 virtual int Opcode() const;
duke@435 217 virtual uint ideal_reg() const { return Op_RegI; }
duke@435 218 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 219 virtual int store_Opcode() const { return Op_StoreC; }
duke@435 220 virtual BasicType memory_type() const { return T_CHAR; }
duke@435 221 };
duke@435 222
duke@435 223 //------------------------------LoadINode--------------------------------------
duke@435 224 // Load an integer from memory
duke@435 225 class LoadINode : public LoadNode {
duke@435 226 public:
duke@435 227 LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
duke@435 228 : LoadNode(c,mem,adr,at,ti) {}
duke@435 229 virtual int Opcode() const;
duke@435 230 virtual uint ideal_reg() const { return Op_RegI; }
duke@435 231 virtual int store_Opcode() const { return Op_StoreI; }
duke@435 232 virtual BasicType memory_type() const { return T_INT; }
duke@435 233 };
duke@435 234
duke@435 235 //------------------------------LoadRangeNode----------------------------------
duke@435 236 // Load an array length from the array
duke@435 237 class LoadRangeNode : public LoadINode {
duke@435 238 public:
duke@435 239 LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
duke@435 240 : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
duke@435 241 virtual int Opcode() const;
duke@435 242 virtual const Type *Value( PhaseTransform *phase ) const;
duke@435 243 virtual Node *Identity( PhaseTransform *phase );
duke@435 244 };
duke@435 245
duke@435 246 //------------------------------LoadLNode--------------------------------------
duke@435 247 // Load a long from memory
duke@435 248 class LoadLNode : public LoadNode {
duke@435 249 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
duke@435 250 virtual uint cmp( const Node &n ) const {
duke@435 251 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
duke@435 252 && LoadNode::cmp(n);
duke@435 253 }
duke@435 254 virtual uint size_of() const { return sizeof(*this); }
duke@435 255 const bool _require_atomic_access; // is piecewise load forbidden?
duke@435 256
duke@435 257 public:
duke@435 258 LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
duke@435 259 const TypeLong *tl = TypeLong::LONG,
duke@435 260 bool require_atomic_access = false )
duke@435 261 : LoadNode(c,mem,adr,at,tl)
duke@435 262 , _require_atomic_access(require_atomic_access)
duke@435 263 {}
duke@435 264 virtual int Opcode() const;
duke@435 265 virtual uint ideal_reg() const { return Op_RegL; }
duke@435 266 virtual int store_Opcode() const { return Op_StoreL; }
duke@435 267 virtual BasicType memory_type() const { return T_LONG; }
duke@435 268 bool require_atomic_access() { return _require_atomic_access; }
duke@435 269 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
duke@435 270 #ifndef PRODUCT
duke@435 271 virtual void dump_spec(outputStream *st) const {
duke@435 272 LoadNode::dump_spec(st);
duke@435 273 if (_require_atomic_access) st->print(" Atomic!");
duke@435 274 }
duke@435 275 #endif
duke@435 276 };
duke@435 277
duke@435 278 //------------------------------LoadL_unalignedNode----------------------------
duke@435 279 // Load a long from unaligned memory
duke@435 280 class LoadL_unalignedNode : public LoadLNode {
duke@435 281 public:
duke@435 282 LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
duke@435 283 : LoadLNode(c,mem,adr,at) {}
duke@435 284 virtual int Opcode() const;
duke@435 285 };
duke@435 286
duke@435 287 //------------------------------LoadFNode--------------------------------------
duke@435 288 // Load a float (64 bits) from memory
duke@435 289 class LoadFNode : public LoadNode {
duke@435 290 public:
duke@435 291 LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
duke@435 292 : LoadNode(c,mem,adr,at,t) {}
duke@435 293 virtual int Opcode() const;
duke@435 294 virtual uint ideal_reg() const { return Op_RegF; }
duke@435 295 virtual int store_Opcode() const { return Op_StoreF; }
duke@435 296 virtual BasicType memory_type() const { return T_FLOAT; }
duke@435 297 };
duke@435 298
duke@435 299 //------------------------------LoadDNode--------------------------------------
duke@435 300 // Load a double (64 bits) from memory
duke@435 301 class LoadDNode : public LoadNode {
duke@435 302 public:
duke@435 303 LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
duke@435 304 : LoadNode(c,mem,adr,at,t) {}
duke@435 305 virtual int Opcode() const;
duke@435 306 virtual uint ideal_reg() const { return Op_RegD; }
duke@435 307 virtual int store_Opcode() const { return Op_StoreD; }
duke@435 308 virtual BasicType memory_type() const { return T_DOUBLE; }
duke@435 309 };
duke@435 310
duke@435 311 //------------------------------LoadD_unalignedNode----------------------------
duke@435 312 // Load a double from unaligned memory
duke@435 313 class LoadD_unalignedNode : public LoadDNode {
duke@435 314 public:
duke@435 315 LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
duke@435 316 : LoadDNode(c,mem,adr,at) {}
duke@435 317 virtual int Opcode() const;
duke@435 318 };
duke@435 319
duke@435 320 //------------------------------LoadPNode--------------------------------------
duke@435 321 // Load a pointer from memory (either object or array)
duke@435 322 class LoadPNode : public LoadNode {
duke@435 323 public:
duke@435 324 LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
duke@435 325 : LoadNode(c,mem,adr,at,t) {}
duke@435 326 virtual int Opcode() const;
duke@435 327 virtual uint ideal_reg() const { return Op_RegP; }
duke@435 328 virtual int store_Opcode() const { return Op_StoreP; }
duke@435 329 virtual BasicType memory_type() const { return T_ADDRESS; }
duke@435 330 // depends_only_on_test is almost always true, and needs to be almost always
duke@435 331 // true to enable key hoisting & commoning optimizations. However, for the
duke@435 332 // special case of RawPtr loads from TLS top & end, the control edge carries
duke@435 333 // the dependence preventing hoisting past a Safepoint instead of the memory
duke@435 334 // edge. (An unfortunate consequence of having Safepoints not set Raw
duke@435 335 // Memory; itself an unfortunate consequence of having Nodes which produce
duke@435 336 // results (new raw memory state) inside of loops preventing all manner of
duke@435 337 // other optimizations). Basically, it's ugly but so is the alternative.
duke@435 338 // See comment in macro.cpp, around line 125 expand_allocate_common().
duke@435 339 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
duke@435 340 };
duke@435 341
coleenp@548 342
coleenp@548 343 //------------------------------LoadNNode--------------------------------------
coleenp@548 344 // Load a narrow oop from memory (either object or array)
coleenp@548 345 class LoadNNode : public LoadNode {
coleenp@548 346 public:
coleenp@548 347 LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
coleenp@548 348 : LoadNode(c,mem,adr,at,t) {}
coleenp@548 349 virtual int Opcode() const;
coleenp@548 350 virtual uint ideal_reg() const { return Op_RegN; }
coleenp@548 351 virtual int store_Opcode() const { return Op_StoreN; }
coleenp@548 352 virtual BasicType memory_type() const { return T_NARROWOOP; }
coleenp@548 353 // depends_only_on_test is almost always true, and needs to be almost always
coleenp@548 354 // true to enable key hoisting & commoning optimizations. However, for the
coleenp@548 355 // special case of RawPtr loads from TLS top & end, the control edge carries
coleenp@548 356 // the dependence preventing hoisting past a Safepoint instead of the memory
coleenp@548 357 // edge. (An unfortunate consequence of having Safepoints not set Raw
coleenp@548 358 // Memory; itself an unfortunate consequence of having Nodes which produce
coleenp@548 359 // results (new raw memory state) inside of loops preventing all manner of
coleenp@548 360 // other optimizations). Basically, it's ugly but so is the alternative.
coleenp@548 361 // See comment in macro.cpp, around line 125 expand_allocate_common().
coleenp@548 362 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
coleenp@548 363 };
coleenp@548 364
duke@435 365 //------------------------------LoadKlassNode----------------------------------
duke@435 366 // Load a Klass from an object
duke@435 367 class LoadKlassNode : public LoadPNode {
duke@435 368 public:
kvn@599 369 LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
duke@435 370 : LoadPNode(c,mem,adr,at,tk) {}
duke@435 371 virtual int Opcode() const;
duke@435 372 virtual const Type *Value( PhaseTransform *phase ) const;
duke@435 373 virtual Node *Identity( PhaseTransform *phase );
duke@435 374 virtual bool depends_only_on_test() const { return true; }
kvn@599 375
kvn@599 376 // Polymorphic factory method:
kvn@599 377 static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
kvn@599 378 const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
duke@435 379 };
duke@435 380
kvn@599 381 //------------------------------LoadNKlassNode---------------------------------
kvn@599 382 // Load a narrow Klass from an object.
kvn@599 383 class LoadNKlassNode : public LoadNNode {
kvn@599 384 public:
kvn@599 385 LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowOop *tk )
kvn@599 386 : LoadNNode(c,mem,adr,at,tk) {}
kvn@599 387 virtual int Opcode() const;
kvn@599 388 virtual uint ideal_reg() const { return Op_RegN; }
kvn@599 389 virtual int store_Opcode() const { return Op_StoreN; }
kvn@599 390 virtual BasicType memory_type() const { return T_NARROWOOP; }
kvn@599 391
kvn@599 392 virtual const Type *Value( PhaseTransform *phase ) const;
kvn@599 393 virtual Node *Identity( PhaseTransform *phase );
kvn@599 394 virtual bool depends_only_on_test() const { return true; }
kvn@599 395 };
kvn@599 396
kvn@599 397
duke@435 398 //------------------------------LoadSNode--------------------------------------
duke@435 399 // Load a short (16bits signed) from memory
duke@435 400 class LoadSNode : public LoadNode {
duke@435 401 public:
duke@435 402 LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
duke@435 403 : LoadNode(c,mem,adr,at,ti) {}
duke@435 404 virtual int Opcode() const;
duke@435 405 virtual uint ideal_reg() const { return Op_RegI; }
duke@435 406 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 407 virtual int store_Opcode() const { return Op_StoreC; }
duke@435 408 virtual BasicType memory_type() const { return T_SHORT; }
duke@435 409 };
duke@435 410
duke@435 411 //------------------------------StoreNode--------------------------------------
duke@435 412 // Store value; requires Store, Address and Value
duke@435 413 class StoreNode : public MemNode {
duke@435 414 protected:
duke@435 415 virtual uint cmp( const Node &n ) const;
duke@435 416 virtual bool depends_only_on_test() const { return false; }
duke@435 417
duke@435 418 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
duke@435 419 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
duke@435 420
duke@435 421 public:
duke@435 422 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
duke@435 423 : MemNode(c,mem,adr,at,val) {
duke@435 424 init_class_id(Class_Store);
duke@435 425 }
duke@435 426 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
duke@435 427 : MemNode(c,mem,adr,at,val,oop_store) {
duke@435 428 init_class_id(Class_Store);
duke@435 429 }
duke@435 430
duke@435 431 // Polymorphic factory method:
coleenp@548 432 static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
coleenp@548 433 const TypePtr* at, Node *val, BasicType bt );
duke@435 434
duke@435 435 virtual uint hash() const; // Check the type
duke@435 436
duke@435 437 // If the store is to Field memory and the pointer is non-null, we can
duke@435 438 // zero out the control input.
duke@435 439 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 440
duke@435 441 // Compute a new Type for this node. Basically we just do the pre-check,
duke@435 442 // then call the virtual add() to set the type.
duke@435 443 virtual const Type *Value( PhaseTransform *phase ) const;
duke@435 444
duke@435 445 // Check for identity function on memory (Load then Store at same address)
duke@435 446 virtual Node *Identity( PhaseTransform *phase );
duke@435 447
duke@435 448 // Do not match memory edge
duke@435 449 virtual uint match_edge(uint idx) const;
duke@435 450
duke@435 451 virtual const Type *bottom_type() const; // returns Type::MEMORY
duke@435 452
duke@435 453 // Map a store opcode to its corresponding own opcode, trivially.
duke@435 454 virtual int store_Opcode() const { return Opcode(); }
duke@435 455
duke@435 456 // have all possible loads of the value stored been optimized away?
duke@435 457 bool value_never_loaded(PhaseTransform *phase) const;
duke@435 458 };
duke@435 459
duke@435 460 //------------------------------StoreBNode-------------------------------------
duke@435 461 // Store byte to memory
duke@435 462 class StoreBNode : public StoreNode {
duke@435 463 public:
duke@435 464 StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
duke@435 465 virtual int Opcode() const;
duke@435 466 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 467 virtual BasicType memory_type() const { return T_BYTE; }
duke@435 468 };
duke@435 469
duke@435 470 //------------------------------StoreCNode-------------------------------------
duke@435 471 // Store char/short to memory
duke@435 472 class StoreCNode : public StoreNode {
duke@435 473 public:
duke@435 474 StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
duke@435 475 virtual int Opcode() const;
duke@435 476 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 477 virtual BasicType memory_type() const { return T_CHAR; }
duke@435 478 };
duke@435 479
duke@435 480 //------------------------------StoreINode-------------------------------------
duke@435 481 // Store int to memory
duke@435 482 class StoreINode : public StoreNode {
duke@435 483 public:
duke@435 484 StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
duke@435 485 virtual int Opcode() const;
duke@435 486 virtual BasicType memory_type() const { return T_INT; }
duke@435 487 };
duke@435 488
duke@435 489 //------------------------------StoreLNode-------------------------------------
duke@435 490 // Store long to memory
duke@435 491 class StoreLNode : public StoreNode {
duke@435 492 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
duke@435 493 virtual uint cmp( const Node &n ) const {
duke@435 494 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
duke@435 495 && StoreNode::cmp(n);
duke@435 496 }
duke@435 497 virtual uint size_of() const { return sizeof(*this); }
duke@435 498 const bool _require_atomic_access; // is piecewise store forbidden?
duke@435 499
duke@435 500 public:
duke@435 501 StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
duke@435 502 bool require_atomic_access = false )
duke@435 503 : StoreNode(c,mem,adr,at,val)
duke@435 504 , _require_atomic_access(require_atomic_access)
duke@435 505 {}
duke@435 506 virtual int Opcode() const;
duke@435 507 virtual BasicType memory_type() const { return T_LONG; }
duke@435 508 bool require_atomic_access() { return _require_atomic_access; }
duke@435 509 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
duke@435 510 #ifndef PRODUCT
duke@435 511 virtual void dump_spec(outputStream *st) const {
duke@435 512 StoreNode::dump_spec(st);
duke@435 513 if (_require_atomic_access) st->print(" Atomic!");
duke@435 514 }
duke@435 515 #endif
duke@435 516 };
duke@435 517
duke@435 518 //------------------------------StoreFNode-------------------------------------
duke@435 519 // Store float to memory
duke@435 520 class StoreFNode : public StoreNode {
duke@435 521 public:
duke@435 522 StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
duke@435 523 virtual int Opcode() const;
duke@435 524 virtual BasicType memory_type() const { return T_FLOAT; }
duke@435 525 };
duke@435 526
duke@435 527 //------------------------------StoreDNode-------------------------------------
duke@435 528 // Store double to memory
duke@435 529 class StoreDNode : public StoreNode {
duke@435 530 public:
duke@435 531 StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
duke@435 532 virtual int Opcode() const;
duke@435 533 virtual BasicType memory_type() const { return T_DOUBLE; }
duke@435 534 };
duke@435 535
duke@435 536 //------------------------------StorePNode-------------------------------------
duke@435 537 // Store pointer to memory
duke@435 538 class StorePNode : public StoreNode {
duke@435 539 public:
duke@435 540 StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
duke@435 541 virtual int Opcode() const;
duke@435 542 virtual BasicType memory_type() const { return T_ADDRESS; }
duke@435 543 };
duke@435 544
coleenp@548 545 //------------------------------StoreNNode-------------------------------------
coleenp@548 546 // Store narrow oop to memory
coleenp@548 547 class StoreNNode : public StoreNode {
coleenp@548 548 public:
coleenp@548 549 StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
coleenp@548 550 virtual int Opcode() const;
coleenp@548 551 virtual BasicType memory_type() const { return T_NARROWOOP; }
coleenp@548 552 };
coleenp@548 553
duke@435 554 //------------------------------StoreCMNode-----------------------------------
duke@435 555 // Store card-mark byte to memory for CM
duke@435 556 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
duke@435 557 // Preceeding equivalent StoreCMs may be eliminated.
duke@435 558 class StoreCMNode : public StoreNode {
duke@435 559 public:
duke@435 560 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store ) : StoreNode(c,mem,adr,at,val,oop_store) {}
duke@435 561 virtual int Opcode() const;
duke@435 562 virtual Node *Identity( PhaseTransform *phase );
duke@435 563 virtual const Type *Value( PhaseTransform *phase ) const;
duke@435 564 virtual BasicType memory_type() const { return T_VOID; } // unspecific
duke@435 565 };
duke@435 566
duke@435 567 //------------------------------LoadPLockedNode---------------------------------
duke@435 568 // Load-locked a pointer from memory (either object or array).
duke@435 569 // On Sparc & Intel this is implemented as a normal pointer load.
duke@435 570 // On PowerPC and friends it's a real load-locked.
duke@435 571 class LoadPLockedNode : public LoadPNode {
duke@435 572 public:
duke@435 573 LoadPLockedNode( Node *c, Node *mem, Node *adr )
duke@435 574 : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
duke@435 575 virtual int Opcode() const;
duke@435 576 virtual int store_Opcode() const { return Op_StorePConditional; }
duke@435 577 virtual bool depends_only_on_test() const { return true; }
duke@435 578 };
duke@435 579
duke@435 580 //------------------------------LoadLLockedNode---------------------------------
duke@435 581 // Load-locked a pointer from memory (either object or array).
duke@435 582 // On Sparc & Intel this is implemented as a normal long load.
duke@435 583 class LoadLLockedNode : public LoadLNode {
duke@435 584 public:
duke@435 585 LoadLLockedNode( Node *c, Node *mem, Node *adr )
duke@435 586 : LoadLNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeLong::LONG) {}
duke@435 587 virtual int Opcode() const;
duke@435 588 virtual int store_Opcode() const { return Op_StoreLConditional; }
duke@435 589 };
duke@435 590
duke@435 591 //------------------------------SCMemProjNode---------------------------------------
duke@435 592 // This class defines a projection of the memory state of a store conditional node.
duke@435 593 // These nodes return a value, but also update memory.
duke@435 594 class SCMemProjNode : public ProjNode {
duke@435 595 public:
duke@435 596 enum {SCMEMPROJCON = (uint)-2};
duke@435 597 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
duke@435 598 virtual int Opcode() const;
duke@435 599 virtual bool is_CFG() const { return false; }
duke@435 600 virtual const Type *bottom_type() const {return Type::MEMORY;}
duke@435 601 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
duke@435 602 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
duke@435 603 virtual const Type *Value( PhaseTransform *phase ) const;
duke@435 604 #ifndef PRODUCT
duke@435 605 virtual void dump_spec(outputStream *st) const {};
duke@435 606 #endif
duke@435 607 };
duke@435 608
duke@435 609 //------------------------------LoadStoreNode---------------------------
duke@435 610 class LoadStoreNode : public Node {
duke@435 611 public:
duke@435 612 enum {
duke@435 613 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
duke@435 614 };
duke@435 615 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex);
duke@435 616 virtual bool depends_only_on_test() const { return false; }
duke@435 617 virtual const Type *bottom_type() const { return TypeInt::BOOL; }
duke@435 618 virtual uint ideal_reg() const { return Op_RegI; }
duke@435 619 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
duke@435 620 };
duke@435 621
duke@435 622 //------------------------------StorePConditionalNode---------------------------
duke@435 623 // Conditionally store pointer to memory, if no change since prior
duke@435 624 // load-locked. Sets flags for success or failure of the store.
duke@435 625 class StorePConditionalNode : public LoadStoreNode {
duke@435 626 public:
duke@435 627 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
duke@435 628 virtual int Opcode() const;
duke@435 629 // Produces flags
duke@435 630 virtual uint ideal_reg() const { return Op_RegFlags; }
duke@435 631 };
duke@435 632
duke@435 633 //------------------------------StoreLConditionalNode---------------------------
duke@435 634 // Conditionally store long to memory, if no change since prior
duke@435 635 // load-locked. Sets flags for success or failure of the store.
duke@435 636 class StoreLConditionalNode : public LoadStoreNode {
duke@435 637 public:
duke@435 638 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreNode(c, mem, adr, val, ll) { }
duke@435 639 virtual int Opcode() const;
duke@435 640 };
duke@435 641
duke@435 642
duke@435 643 //------------------------------CompareAndSwapLNode---------------------------
duke@435 644 class CompareAndSwapLNode : public LoadStoreNode {
duke@435 645 public:
duke@435 646 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
duke@435 647 virtual int Opcode() const;
duke@435 648 };
duke@435 649
duke@435 650
duke@435 651 //------------------------------CompareAndSwapINode---------------------------
duke@435 652 class CompareAndSwapINode : public LoadStoreNode {
duke@435 653 public:
duke@435 654 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
duke@435 655 virtual int Opcode() const;
duke@435 656 };
duke@435 657
duke@435 658
duke@435 659 //------------------------------CompareAndSwapPNode---------------------------
duke@435 660 class CompareAndSwapPNode : public LoadStoreNode {
duke@435 661 public:
duke@435 662 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
duke@435 663 virtual int Opcode() const;
duke@435 664 };
duke@435 665
coleenp@548 666 //------------------------------CompareAndSwapNNode---------------------------
coleenp@548 667 class CompareAndSwapNNode : public LoadStoreNode {
coleenp@548 668 public:
coleenp@548 669 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreNode(c, mem, adr, val, ex) { }
coleenp@548 670 virtual int Opcode() const;
coleenp@548 671 };
coleenp@548 672
duke@435 673 //------------------------------ClearArray-------------------------------------
duke@435 674 class ClearArrayNode: public Node {
duke@435 675 public:
duke@435 676 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base ) : Node(ctrl,arymem,word_cnt,base) {}
duke@435 677 virtual int Opcode() const;
duke@435 678 virtual const Type *bottom_type() const { return Type::MEMORY; }
duke@435 679 // ClearArray modifies array elements, and so affects only the
duke@435 680 // array memory addressed by the bottom_type of its base address.
duke@435 681 virtual const class TypePtr *adr_type() const;
duke@435 682 virtual Node *Identity( PhaseTransform *phase );
duke@435 683 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 684 virtual uint match_edge(uint idx) const;
duke@435 685
duke@435 686 // Clear the given area of an object or array.
duke@435 687 // The start offset must always be aligned mod BytesPerInt.
duke@435 688 // The end offset must always be aligned mod BytesPerLong.
duke@435 689 // Return the new memory.
duke@435 690 static Node* clear_memory(Node* control, Node* mem, Node* dest,
duke@435 691 intptr_t start_offset,
duke@435 692 intptr_t end_offset,
duke@435 693 PhaseGVN* phase);
duke@435 694 static Node* clear_memory(Node* control, Node* mem, Node* dest,
duke@435 695 intptr_t start_offset,
duke@435 696 Node* end_offset,
duke@435 697 PhaseGVN* phase);
duke@435 698 static Node* clear_memory(Node* control, Node* mem, Node* dest,
duke@435 699 Node* start_offset,
duke@435 700 Node* end_offset,
duke@435 701 PhaseGVN* phase);
duke@435 702 };
duke@435 703
duke@435 704 //------------------------------StrComp-------------------------------------
duke@435 705 class StrCompNode: public Node {
duke@435 706 public:
duke@435 707 StrCompNode(Node *control,
duke@435 708 Node* char_array_mem,
duke@435 709 Node* value_mem,
duke@435 710 Node* count_mem,
duke@435 711 Node* offset_mem,
duke@435 712 Node* s1, Node* s2): Node(control,
duke@435 713 char_array_mem,
duke@435 714 value_mem,
duke@435 715 count_mem,
duke@435 716 offset_mem,
duke@435 717 s1, s2) {};
duke@435 718 virtual int Opcode() const;
duke@435 719 virtual bool depends_only_on_test() const { return false; }
duke@435 720 virtual const Type* bottom_type() const { return TypeInt::INT; }
duke@435 721 // a StrCompNode (conservatively) aliases with everything:
duke@435 722 virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
duke@435 723 virtual uint match_edge(uint idx) const;
duke@435 724 virtual uint ideal_reg() const { return Op_RegI; }
duke@435 725 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 726 };
duke@435 727
duke@435 728 //------------------------------MemBar-----------------------------------------
duke@435 729 // There are different flavors of Memory Barriers to match the Java Memory
duke@435 730 // Model. Monitor-enter and volatile-load act as Aquires: no following ref
duke@435 731 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
duke@435 732 // volatile-load. Monitor-exit and volatile-store act as Release: no
duke@435 733 // preceeding ref can be moved to after them. We insert a MemBar-Release
duke@435 734 // before a FastUnlock or volatile-store. All volatiles need to be
duke@435 735 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
duke@435 736 // seperate it from any following volatile-load.
duke@435 737 class MemBarNode: public MultiNode {
duke@435 738 virtual uint hash() const ; // { return NO_HASH; }
duke@435 739 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
duke@435 740
duke@435 741 virtual uint size_of() const { return sizeof(*this); }
duke@435 742 // Memory type this node is serializing. Usually either rawptr or bottom.
duke@435 743 const TypePtr* _adr_type;
duke@435 744
duke@435 745 public:
duke@435 746 enum {
duke@435 747 Precedent = TypeFunc::Parms // optional edge to force precedence
duke@435 748 };
duke@435 749 MemBarNode(Compile* C, int alias_idx, Node* precedent);
duke@435 750 virtual int Opcode() const = 0;
duke@435 751 virtual const class TypePtr *adr_type() const { return _adr_type; }
duke@435 752 virtual const Type *Value( PhaseTransform *phase ) const;
duke@435 753 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 754 virtual uint match_edge(uint idx) const { return 0; }
duke@435 755 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
duke@435 756 virtual Node *match( const ProjNode *proj, const Matcher *m );
duke@435 757 // Factory method. Builds a wide or narrow membar.
duke@435 758 // Optional 'precedent' becomes an extra edge if not null.
duke@435 759 static MemBarNode* make(Compile* C, int opcode,
duke@435 760 int alias_idx = Compile::AliasIdxBot,
duke@435 761 Node* precedent = NULL);
duke@435 762 };
duke@435 763
duke@435 764 // "Acquire" - no following ref can move before (but earlier refs can
duke@435 765 // follow, like an early Load stalled in cache). Requires multi-cpu
duke@435 766 // visibility. Inserted after a volatile load or FastLock.
duke@435 767 class MemBarAcquireNode: public MemBarNode {
duke@435 768 public:
duke@435 769 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
duke@435 770 : MemBarNode(C, alias_idx, precedent) {}
duke@435 771 virtual int Opcode() const;
duke@435 772 };
duke@435 773
duke@435 774 // "Release" - no earlier ref can move after (but later refs can move
duke@435 775 // up, like a speculative pipelined cache-hitting Load). Requires
duke@435 776 // multi-cpu visibility. Inserted before a volatile store or FastUnLock.
duke@435 777 class MemBarReleaseNode: public MemBarNode {
duke@435 778 public:
duke@435 779 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
duke@435 780 : MemBarNode(C, alias_idx, precedent) {}
duke@435 781 virtual int Opcode() const;
duke@435 782 };
duke@435 783
duke@435 784 // Ordering between a volatile store and a following volatile load.
duke@435 785 // Requires multi-CPU visibility?
duke@435 786 class MemBarVolatileNode: public MemBarNode {
duke@435 787 public:
duke@435 788 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
duke@435 789 : MemBarNode(C, alias_idx, precedent) {}
duke@435 790 virtual int Opcode() const;
duke@435 791 };
duke@435 792
duke@435 793 // Ordering within the same CPU. Used to order unsafe memory references
duke@435 794 // inside the compiler when we lack alias info. Not needed "outside" the
duke@435 795 // compiler because the CPU does all the ordering for us.
duke@435 796 class MemBarCPUOrderNode: public MemBarNode {
duke@435 797 public:
duke@435 798 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
duke@435 799 : MemBarNode(C, alias_idx, precedent) {}
duke@435 800 virtual int Opcode() const;
duke@435 801 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
duke@435 802 };
duke@435 803
duke@435 804 // Isolation of object setup after an AllocateNode and before next safepoint.
duke@435 805 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
duke@435 806 class InitializeNode: public MemBarNode {
duke@435 807 friend class AllocateNode;
duke@435 808
duke@435 809 bool _is_complete;
duke@435 810
duke@435 811 public:
duke@435 812 enum {
duke@435 813 Control = TypeFunc::Control,
duke@435 814 Memory = TypeFunc::Memory, // MergeMem for states affected by this op
duke@435 815 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
duke@435 816 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
duke@435 817 };
duke@435 818
duke@435 819 InitializeNode(Compile* C, int adr_type, Node* rawoop);
duke@435 820 virtual int Opcode() const;
duke@435 821 virtual uint size_of() const { return sizeof(*this); }
duke@435 822 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
duke@435 823 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
duke@435 824
duke@435 825 // Manage incoming memory edges via a MergeMem on in(Memory):
duke@435 826 Node* memory(uint alias_idx);
duke@435 827
duke@435 828 // The raw memory edge coming directly from the Allocation.
duke@435 829 // The contents of this memory are *always* all-zero-bits.
duke@435 830 Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
duke@435 831
duke@435 832 // Return the corresponding allocation for this initialization (or null if none).
duke@435 833 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
duke@435 834 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
duke@435 835 AllocateNode* allocation();
duke@435 836
duke@435 837 // Anything other than zeroing in this init?
duke@435 838 bool is_non_zero();
duke@435 839
duke@435 840 // An InitializeNode must completed before macro expansion is done.
duke@435 841 // Completion requires that the AllocateNode must be followed by
duke@435 842 // initialization of the new memory to zero, then to any initializers.
duke@435 843 bool is_complete() { return _is_complete; }
duke@435 844
duke@435 845 // Mark complete. (Must not yet be complete.)
duke@435 846 void set_complete(PhaseGVN* phase);
duke@435 847
duke@435 848 #ifdef ASSERT
duke@435 849 // ensure all non-degenerate stores are ordered and non-overlapping
duke@435 850 bool stores_are_sane(PhaseTransform* phase);
duke@435 851 #endif //ASSERT
duke@435 852
duke@435 853 // See if this store can be captured; return offset where it initializes.
duke@435 854 // Return 0 if the store cannot be moved (any sort of problem).
duke@435 855 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
duke@435 856
duke@435 857 // Capture another store; reformat it to write my internal raw memory.
duke@435 858 // Return the captured copy, else NULL if there is some sort of problem.
duke@435 859 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
duke@435 860
duke@435 861 // Find captured store which corresponds to the range [start..start+size).
duke@435 862 // Return my own memory projection (meaning the initial zero bits)
duke@435 863 // if there is no such store. Return NULL if there is a problem.
duke@435 864 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
duke@435 865
duke@435 866 // Called when the associated AllocateNode is expanded into CFG.
duke@435 867 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
duke@435 868 intptr_t header_size, Node* size_in_bytes,
duke@435 869 PhaseGVN* phase);
duke@435 870
duke@435 871 private:
duke@435 872 void remove_extra_zeroes();
duke@435 873
duke@435 874 // Find out where a captured store should be placed (or already is placed).
duke@435 875 int captured_store_insertion_point(intptr_t start, int size_in_bytes,
duke@435 876 PhaseTransform* phase);
duke@435 877
duke@435 878 static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
duke@435 879
duke@435 880 Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
duke@435 881
duke@435 882 bool detect_init_independence(Node* n, bool st_is_pinned, int& count);
duke@435 883
duke@435 884 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
duke@435 885 PhaseGVN* phase);
duke@435 886
duke@435 887 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
duke@435 888 };
duke@435 889
duke@435 890 //------------------------------MergeMem---------------------------------------
duke@435 891 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
duke@435 892 class MergeMemNode: public Node {
duke@435 893 virtual uint hash() const ; // { return NO_HASH; }
duke@435 894 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
duke@435 895 friend class MergeMemStream;
duke@435 896 MergeMemNode(Node* def); // clients use MergeMemNode::make
duke@435 897
duke@435 898 public:
duke@435 899 // If the input is a whole memory state, clone it with all its slices intact.
duke@435 900 // Otherwise, make a new memory state with just that base memory input.
duke@435 901 // In either case, the result is a newly created MergeMem.
duke@435 902 static MergeMemNode* make(Compile* C, Node* base_memory);
duke@435 903
duke@435 904 virtual int Opcode() const;
duke@435 905 virtual Node *Identity( PhaseTransform *phase );
duke@435 906 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
duke@435 907 virtual uint ideal_reg() const { return NotAMachineReg; }
duke@435 908 virtual uint match_edge(uint idx) const { return 0; }
duke@435 909 virtual const RegMask &out_RegMask() const;
duke@435 910 virtual const Type *bottom_type() const { return Type::MEMORY; }
duke@435 911 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
duke@435 912 // sparse accessors
duke@435 913 // Fetch the previously stored "set_memory_at", or else the base memory.
duke@435 914 // (Caller should clone it if it is a phi-nest.)
duke@435 915 Node* memory_at(uint alias_idx) const;
duke@435 916 // set the memory, regardless of its previous value
duke@435 917 void set_memory_at(uint alias_idx, Node* n);
duke@435 918 // the "base" is the memory that provides the non-finite support
duke@435 919 Node* base_memory() const { return in(Compile::AliasIdxBot); }
duke@435 920 // warning: setting the base can implicitly set any of the other slices too
duke@435 921 void set_base_memory(Node* def);
duke@435 922 // sentinel value which denotes a copy of the base memory:
duke@435 923 Node* empty_memory() const { return in(Compile::AliasIdxTop); }
duke@435 924 static Node* make_empty_memory(); // where the sentinel comes from
duke@435 925 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
duke@435 926 // hook for the iterator, to perform any necessary setup
duke@435 927 void iteration_setup(const MergeMemNode* other = NULL);
duke@435 928 // push sentinels until I am at least as long as the other (semantic no-op)
duke@435 929 void grow_to_match(const MergeMemNode* other);
duke@435 930 bool verify_sparse() const PRODUCT_RETURN0;
duke@435 931 #ifndef PRODUCT
duke@435 932 virtual void dump_spec(outputStream *st) const;
duke@435 933 #endif
duke@435 934 };
duke@435 935
duke@435 936 class MergeMemStream : public StackObj {
duke@435 937 private:
duke@435 938 MergeMemNode* _mm;
duke@435 939 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
duke@435 940 Node* _mm_base; // loop-invariant base memory of _mm
duke@435 941 int _idx;
duke@435 942 int _cnt;
duke@435 943 Node* _mem;
duke@435 944 Node* _mem2;
duke@435 945 int _cnt2;
duke@435 946
duke@435 947 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
duke@435 948 // subsume_node will break sparseness at times, whenever a memory slice
duke@435 949 // folds down to a copy of the base ("fat") memory. In such a case,
duke@435 950 // the raw edge will update to base, although it should be top.
duke@435 951 // This iterator will recognize either top or base_memory as an
duke@435 952 // "empty" slice. See is_empty, is_empty2, and next below.
duke@435 953 //
duke@435 954 // The sparseness property is repaired in MergeMemNode::Ideal.
duke@435 955 // As long as access to a MergeMem goes through this iterator
duke@435 956 // or the memory_at accessor, flaws in the sparseness will
duke@435 957 // never be observed.
duke@435 958 //
duke@435 959 // Also, iteration_setup repairs sparseness.
duke@435 960 assert(mm->verify_sparse(), "please, no dups of base");
duke@435 961 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
duke@435 962
duke@435 963 _mm = mm;
duke@435 964 _mm_base = mm->base_memory();
duke@435 965 _mm2 = mm2;
duke@435 966 _cnt = mm->req();
duke@435 967 _idx = Compile::AliasIdxBot-1; // start at the base memory
duke@435 968 _mem = NULL;
duke@435 969 _mem2 = NULL;
duke@435 970 }
duke@435 971
duke@435 972 #ifdef ASSERT
duke@435 973 Node* check_memory() const {
duke@435 974 if (at_base_memory())
duke@435 975 return _mm->base_memory();
duke@435 976 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
duke@435 977 return _mm->memory_at(_idx);
duke@435 978 else
duke@435 979 return _mm_base;
duke@435 980 }
duke@435 981 Node* check_memory2() const {
duke@435 982 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
duke@435 983 }
duke@435 984 #endif
duke@435 985
duke@435 986 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
duke@435 987 void assert_synch() const {
duke@435 988 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
duke@435 989 "no side-effects except through the stream");
duke@435 990 }
duke@435 991
duke@435 992 public:
duke@435 993
duke@435 994 // expected usages:
duke@435 995 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
duke@435 996 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
duke@435 997
duke@435 998 // iterate over one merge
duke@435 999 MergeMemStream(MergeMemNode* mm) {
duke@435 1000 mm->iteration_setup();
duke@435 1001 init(mm);
duke@435 1002 debug_only(_cnt2 = 999);
duke@435 1003 }
duke@435 1004 // iterate in parallel over two merges
duke@435 1005 // only iterates through non-empty elements of mm2
duke@435 1006 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
duke@435 1007 assert(mm2, "second argument must be a MergeMem also");
duke@435 1008 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
duke@435 1009 mm->iteration_setup(mm2);
duke@435 1010 init(mm, mm2);
duke@435 1011 _cnt2 = mm2->req();
duke@435 1012 }
duke@435 1013 #ifdef ASSERT
duke@435 1014 ~MergeMemStream() {
duke@435 1015 assert_synch();
duke@435 1016 }
duke@435 1017 #endif
duke@435 1018
duke@435 1019 MergeMemNode* all_memory() const {
duke@435 1020 return _mm;
duke@435 1021 }
duke@435 1022 Node* base_memory() const {
duke@435 1023 assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
duke@435 1024 return _mm_base;
duke@435 1025 }
duke@435 1026 const MergeMemNode* all_memory2() const {
duke@435 1027 assert(_mm2 != NULL, "");
duke@435 1028 return _mm2;
duke@435 1029 }
duke@435 1030 bool at_base_memory() const {
duke@435 1031 return _idx == Compile::AliasIdxBot;
duke@435 1032 }
duke@435 1033 int alias_idx() const {
duke@435 1034 assert(_mem, "must call next 1st");
duke@435 1035 return _idx;
duke@435 1036 }
duke@435 1037
duke@435 1038 const TypePtr* adr_type() const {
duke@435 1039 return Compile::current()->get_adr_type(alias_idx());
duke@435 1040 }
duke@435 1041
duke@435 1042 const TypePtr* adr_type(Compile* C) const {
duke@435 1043 return C->get_adr_type(alias_idx());
duke@435 1044 }
duke@435 1045 bool is_empty() const {
duke@435 1046 assert(_mem, "must call next 1st");
duke@435 1047 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
duke@435 1048 return _mem->is_top();
duke@435 1049 }
duke@435 1050 bool is_empty2() const {
duke@435 1051 assert(_mem2, "must call next 1st");
duke@435 1052 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
duke@435 1053 return _mem2->is_top();
duke@435 1054 }
duke@435 1055 Node* memory() const {
duke@435 1056 assert(!is_empty(), "must not be empty");
duke@435 1057 assert_synch();
duke@435 1058 return _mem;
duke@435 1059 }
duke@435 1060 // get the current memory, regardless of empty or non-empty status
duke@435 1061 Node* force_memory() const {
duke@435 1062 assert(!is_empty() || !at_base_memory(), "");
duke@435 1063 // Use _mm_base to defend against updates to _mem->base_memory().
duke@435 1064 Node *mem = _mem->is_top() ? _mm_base : _mem;
duke@435 1065 assert(mem == check_memory(), "");
duke@435 1066 return mem;
duke@435 1067 }
duke@435 1068 Node* memory2() const {
duke@435 1069 assert(_mem2 == check_memory2(), "");
duke@435 1070 return _mem2;
duke@435 1071 }
duke@435 1072 void set_memory(Node* mem) {
duke@435 1073 if (at_base_memory()) {
duke@435 1074 // Note that this does not change the invariant _mm_base.
duke@435 1075 _mm->set_base_memory(mem);
duke@435 1076 } else {
duke@435 1077 _mm->set_memory_at(_idx, mem);
duke@435 1078 }
duke@435 1079 _mem = mem;
duke@435 1080 assert_synch();
duke@435 1081 }
duke@435 1082
duke@435 1083 // Recover from a side effect to the MergeMemNode.
duke@435 1084 void set_memory() {
duke@435 1085 _mem = _mm->in(_idx);
duke@435 1086 }
duke@435 1087
duke@435 1088 bool next() { return next(false); }
duke@435 1089 bool next2() { return next(true); }
duke@435 1090
duke@435 1091 bool next_non_empty() { return next_non_empty(false); }
duke@435 1092 bool next_non_empty2() { return next_non_empty(true); }
duke@435 1093 // next_non_empty2 can yield states where is_empty() is true
duke@435 1094
duke@435 1095 private:
duke@435 1096 // find the next item, which might be empty
duke@435 1097 bool next(bool have_mm2) {
duke@435 1098 assert((_mm2 != NULL) == have_mm2, "use other next");
duke@435 1099 assert_synch();
duke@435 1100 if (++_idx < _cnt) {
duke@435 1101 // Note: This iterator allows _mm to be non-sparse.
duke@435 1102 // It behaves the same whether _mem is top or base_memory.
duke@435 1103 _mem = _mm->in(_idx);
duke@435 1104 if (have_mm2)
duke@435 1105 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
duke@435 1106 return true;
duke@435 1107 }
duke@435 1108 return false;
duke@435 1109 }
duke@435 1110
duke@435 1111 // find the next non-empty item
duke@435 1112 bool next_non_empty(bool have_mm2) {
duke@435 1113 while (next(have_mm2)) {
duke@435 1114 if (!is_empty()) {
duke@435 1115 // make sure _mem2 is filled in sensibly
duke@435 1116 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
duke@435 1117 return true;
duke@435 1118 } else if (have_mm2 && !is_empty2()) {
duke@435 1119 return true; // is_empty() == true
duke@435 1120 }
duke@435 1121 }
duke@435 1122 return false;
duke@435 1123 }
duke@435 1124 };
duke@435 1125
duke@435 1126 //------------------------------Prefetch---------------------------------------
duke@435 1127
duke@435 1128 // Non-faulting prefetch load. Prefetch for many reads.
duke@435 1129 class PrefetchReadNode : public Node {
duke@435 1130 public:
duke@435 1131 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
duke@435 1132 virtual int Opcode() const;
duke@435 1133 virtual uint ideal_reg() const { return NotAMachineReg; }
duke@435 1134 virtual uint match_edge(uint idx) const { return idx==2; }
duke@435 1135 virtual const Type *bottom_type() const { return Type::ABIO; }
duke@435 1136 };
duke@435 1137
duke@435 1138 // Non-faulting prefetch load. Prefetch for many reads & many writes.
duke@435 1139 class PrefetchWriteNode : public Node {
duke@435 1140 public:
duke@435 1141 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
duke@435 1142 virtual int Opcode() const;
duke@435 1143 virtual uint ideal_reg() const { return NotAMachineReg; }
duke@435 1144 virtual uint match_edge(uint idx) const { return idx==2; }
duke@435 1145 virtual const Type *bottom_type() const { return Type::ABIO; }
duke@435 1146 };

mercurial