Fri, 15 Jan 2016 22:33:15 +0000
8132051: Better byte behavior
Reviewed-by: coleenp, roland
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP
26 #define SHARE_VM_OPTO_MEMNODE_HPP
28 #include "opto/multnode.hpp"
29 #include "opto/node.hpp"
30 #include "opto/opcodes.hpp"
31 #include "opto/type.hpp"
33 // Portions of code courtesy of Clifford Click
35 class MultiNode;
36 class PhaseCCP;
37 class PhaseTransform;
39 //------------------------------MemNode----------------------------------------
40 // Load or Store, possibly throwing a NULL pointer exception
41 class MemNode : public Node {
42 protected:
43 #ifdef ASSERT
44 const TypePtr* _adr_type; // What kind of memory is being addressed?
45 #endif
46 virtual uint size_of() const; // Size is bigger (ASSERT only)
47 public:
48 enum { Control, // When is it safe to do this load?
49 Memory, // Chunk of memory is being loaded from
50 Address, // Actually address, derived from base
51 ValueIn, // Value to store
52 OopStore // Preceeding oop store, only in StoreCM
53 };
54 typedef enum { unordered = 0,
55 acquire, // Load has to acquire or be succeeded by MemBarAcquire.
56 release // Store has to release or be preceded by MemBarRelease.
57 } MemOrd;
58 protected:
59 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
60 : Node(c0,c1,c2 ) {
61 init_class_id(Class_Mem);
62 debug_only(_adr_type=at; adr_type();)
63 }
64 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
65 : Node(c0,c1,c2,c3) {
66 init_class_id(Class_Mem);
67 debug_only(_adr_type=at; adr_type();)
68 }
69 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
70 : Node(c0,c1,c2,c3,c4) {
71 init_class_id(Class_Mem);
72 debug_only(_adr_type=at; adr_type();)
73 }
75 public:
76 // Helpers for the optimizer. Documented in memnode.cpp.
77 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
78 Node* p2, AllocateNode* a2,
79 PhaseTransform* phase);
80 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
82 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
83 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
84 // This one should probably be a phase-specific function:
85 static bool all_controls_dominate(Node* dom, Node* sub);
87 // Find any cast-away of null-ness and keep its control.
88 static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
89 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
91 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
93 // Shared code for Ideal methods:
94 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL.
96 // Helper function for adr_type() implementations.
97 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
99 // Raw access function, to allow copying of adr_type efficiently in
100 // product builds and retain the debug info for debug builds.
101 const TypePtr *raw_adr_type() const {
102 #ifdef ASSERT
103 return _adr_type;
104 #else
105 return 0;
106 #endif
107 }
109 // Map a load or store opcode to its corresponding store opcode.
110 // (Return -1 if unknown.)
111 virtual int store_Opcode() const { return -1; }
113 // What is the type of the value in memory? (T_VOID mean "unspecified".)
114 virtual BasicType memory_type() const = 0;
115 virtual int memory_size() const {
116 #ifdef ASSERT
117 return type2aelembytes(memory_type(), true);
118 #else
119 return type2aelembytes(memory_type());
120 #endif
121 }
123 // Search through memory states which precede this node (load or store).
124 // Look for an exact match for the address, with no intervening
125 // aliased stores.
126 Node* find_previous_store(PhaseTransform* phase);
128 // Can this node (load or store) accurately see a stored value in
129 // the given memory state? (The state may or may not be in(Memory).)
130 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
132 #ifndef PRODUCT
133 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
134 virtual void dump_spec(outputStream *st) const;
135 #endif
136 };
138 //------------------------------LoadNode---------------------------------------
139 // Load value; requires Memory and Address
140 class LoadNode : public MemNode {
141 public:
142 // Some loads (from unsafe) should be pinned: they don't depend only
143 // on the dominating test. The boolean field _depends_only_on_test
144 // below records whether that node depends only on the dominating
145 // test.
146 // Methods used to build LoadNodes pass an argument of type enum
147 // ControlDependency instead of a boolean because those methods
148 // typically have multiple boolean parameters with default values:
149 // passing the wrong boolean to one of these parameters by mistake
150 // goes easily unnoticed. Using an enum, the compiler can check that
151 // the type of a value and the type of the parameter match.
152 enum ControlDependency {
153 Pinned,
154 DependsOnlyOnTest
155 };
156 private:
157 // LoadNode::hash() doesn't take the _depends_only_on_test field
158 // into account: If the graph already has a non-pinned LoadNode and
159 // we add a pinned LoadNode with the same inputs, it's safe for GVN
160 // to replace the pinned LoadNode with the non-pinned LoadNode,
161 // otherwise it wouldn't be safe to have a non pinned LoadNode with
162 // those inputs in the first place. If the graph already has a
163 // pinned LoadNode and we add a non pinned LoadNode with the same
164 // inputs, it's safe (but suboptimal) for GVN to replace the
165 // non-pinned LoadNode by the pinned LoadNode.
166 bool _depends_only_on_test;
168 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
169 // loads that can be reordered, and such requiring acquire semantics to
170 // adhere to the Java specification. The required behaviour is stored in
171 // this field.
172 const MemOrd _mo;
174 protected:
175 virtual uint cmp(const Node &n) const;
176 virtual uint size_of() const; // Size is bigger
177 // Should LoadNode::Ideal() attempt to remove control edges?
178 virtual bool can_remove_control() const;
179 const Type* const _type; // What kind of value is loaded?
180 public:
182 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
183 : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) {
184 init_class_id(Class_Load);
185 }
186 inline bool is_unordered() const { return !is_acquire(); }
187 inline bool is_acquire() const {
188 assert(_mo == unordered || _mo == acquire, "unexpected");
189 return _mo == acquire;
190 }
192 // Polymorphic factory method:
193 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
194 const TypePtr* at, const Type *rt, BasicType bt,
195 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
197 virtual uint hash() const; // Check the type
199 // Handle algebraic identities here. If we have an identity, return the Node
200 // we are equivalent to. We look for Load of a Store.
201 virtual Node *Identity( PhaseTransform *phase );
203 // If the load is from Field memory and the pointer is non-null, it might be possible to
204 // zero out the control input.
205 // If the offset is constant and the base is an object allocation,
206 // try to hook me up to the exact initializing store.
207 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
209 // Split instance field load through Phi.
210 Node* split_through_phi(PhaseGVN *phase);
212 // Recover original value from boxed values
213 Node *eliminate_autobox(PhaseGVN *phase);
215 // Compute a new Type for this node. Basically we just do the pre-check,
216 // then call the virtual add() to set the type.
217 virtual const Type *Value( PhaseTransform *phase ) const;
219 // Common methods for LoadKlass and LoadNKlass nodes.
220 const Type *klass_value_common( PhaseTransform *phase ) const;
221 Node *klass_identity_common( PhaseTransform *phase );
223 virtual uint ideal_reg() const;
224 virtual const Type *bottom_type() const;
225 // Following method is copied from TypeNode:
226 void set_type(const Type* t) {
227 assert(t != NULL, "sanity");
228 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
229 *(const Type**)&_type = t; // cast away const-ness
230 // If this node is in the hash table, make sure it doesn't need a rehash.
231 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
232 }
233 const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
235 // Do not match memory edge
236 virtual uint match_edge(uint idx) const;
238 // Map a load opcode to its corresponding store opcode.
239 virtual int store_Opcode() const = 0;
241 // Check if the load's memory input is a Phi node with the same control.
242 bool is_instance_field_load_with_local_phi(Node* ctrl);
244 #ifndef PRODUCT
245 virtual void dump_spec(outputStream *st) const;
246 #endif
247 #ifdef ASSERT
248 // Helper function to allow a raw load without control edge for some cases
249 static bool is_immutable_value(Node* adr);
250 #endif
251 protected:
252 const Type* load_array_final_field(const TypeKlassPtr *tkls,
253 ciKlass* klass) const;
254 // depends_only_on_test is almost always true, and needs to be almost always
255 // true to enable key hoisting & commoning optimizations. However, for the
256 // special case of RawPtr loads from TLS top & end, and other loads performed by
257 // GC barriers, the control edge carries the dependence preventing hoisting past
258 // a Safepoint instead of the memory edge. (An unfortunate consequence of having
259 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
260 // which produce results (new raw memory state) inside of loops preventing all
261 // manner of other optimizations). Basically, it's ugly but so is the alternative.
262 // See comment in macro.cpp, around line 125 expand_allocate_common().
263 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
264 };
266 //------------------------------LoadBNode--------------------------------------
267 // Load a byte (8bits signed) from memory
268 class LoadBNode : public LoadNode {
269 public:
270 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
271 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
272 virtual int Opcode() const;
273 virtual uint ideal_reg() const { return Op_RegI; }
274 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
275 virtual const Type *Value(PhaseTransform *phase) const;
276 virtual int store_Opcode() const { return Op_StoreB; }
277 virtual BasicType memory_type() const { return T_BYTE; }
278 };
280 //------------------------------LoadUBNode-------------------------------------
281 // Load a unsigned byte (8bits unsigned) from memory
282 class LoadUBNode : public LoadNode {
283 public:
284 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
285 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
286 virtual int Opcode() const;
287 virtual uint ideal_reg() const { return Op_RegI; }
288 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
289 virtual const Type *Value(PhaseTransform *phase) const;
290 virtual int store_Opcode() const { return Op_StoreB; }
291 virtual BasicType memory_type() const { return T_BYTE; }
292 };
294 //------------------------------LoadUSNode-------------------------------------
295 // Load an unsigned short/char (16bits unsigned) from memory
296 class LoadUSNode : public LoadNode {
297 public:
298 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
299 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
300 virtual int Opcode() const;
301 virtual uint ideal_reg() const { return Op_RegI; }
302 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
303 virtual const Type *Value(PhaseTransform *phase) const;
304 virtual int store_Opcode() const { return Op_StoreC; }
305 virtual BasicType memory_type() const { return T_CHAR; }
306 };
308 //------------------------------LoadSNode--------------------------------------
309 // Load a short (16bits signed) from memory
310 class LoadSNode : public LoadNode {
311 public:
312 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
313 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
314 virtual int Opcode() const;
315 virtual uint ideal_reg() const { return Op_RegI; }
316 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
317 virtual const Type *Value(PhaseTransform *phase) const;
318 virtual int store_Opcode() const { return Op_StoreC; }
319 virtual BasicType memory_type() const { return T_SHORT; }
320 };
322 //------------------------------LoadINode--------------------------------------
323 // Load an integer from memory
324 class LoadINode : public LoadNode {
325 public:
326 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
327 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
328 virtual int Opcode() const;
329 virtual uint ideal_reg() const { return Op_RegI; }
330 virtual int store_Opcode() const { return Op_StoreI; }
331 virtual BasicType memory_type() const { return T_INT; }
332 };
334 //------------------------------LoadRangeNode----------------------------------
335 // Load an array length from the array
336 class LoadRangeNode : public LoadINode {
337 public:
338 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
339 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
340 virtual int Opcode() const;
341 virtual const Type *Value( PhaseTransform *phase ) const;
342 virtual Node *Identity( PhaseTransform *phase );
343 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
344 };
346 //------------------------------LoadLNode--------------------------------------
347 // Load a long from memory
348 class LoadLNode : public LoadNode {
349 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
350 virtual uint cmp( const Node &n ) const {
351 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
352 && LoadNode::cmp(n);
353 }
354 virtual uint size_of() const { return sizeof(*this); }
355 const bool _require_atomic_access; // is piecewise load forbidden?
357 public:
358 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
359 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
360 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
361 virtual int Opcode() const;
362 virtual uint ideal_reg() const { return Op_RegL; }
363 virtual int store_Opcode() const { return Op_StoreL; }
364 virtual BasicType memory_type() const { return T_LONG; }
365 bool require_atomic_access() const { return _require_atomic_access; }
366 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
367 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
368 #ifndef PRODUCT
369 virtual void dump_spec(outputStream *st) const {
370 LoadNode::dump_spec(st);
371 if (_require_atomic_access) st->print(" Atomic!");
372 }
373 #endif
374 };
376 //------------------------------LoadL_unalignedNode----------------------------
377 // Load a long from unaligned memory
378 class LoadL_unalignedNode : public LoadLNode {
379 public:
380 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
381 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
382 virtual int Opcode() const;
383 };
385 //------------------------------LoadFNode--------------------------------------
386 // Load a float (64 bits) from memory
387 class LoadFNode : public LoadNode {
388 public:
389 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
390 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
391 virtual int Opcode() const;
392 virtual uint ideal_reg() const { return Op_RegF; }
393 virtual int store_Opcode() const { return Op_StoreF; }
394 virtual BasicType memory_type() const { return T_FLOAT; }
395 };
397 //------------------------------LoadDNode--------------------------------------
398 // Load a double (64 bits) from memory
399 class LoadDNode : public LoadNode {
400 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
401 virtual uint cmp( const Node &n ) const {
402 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
403 && LoadNode::cmp(n);
404 }
405 virtual uint size_of() const { return sizeof(*this); }
406 const bool _require_atomic_access; // is piecewise load forbidden?
408 public:
409 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
410 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
411 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
412 virtual int Opcode() const;
413 virtual uint ideal_reg() const { return Op_RegD; }
414 virtual int store_Opcode() const { return Op_StoreD; }
415 virtual BasicType memory_type() const { return T_DOUBLE; }
416 bool require_atomic_access() const { return _require_atomic_access; }
417 static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
418 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
419 #ifndef PRODUCT
420 virtual void dump_spec(outputStream *st) const {
421 LoadNode::dump_spec(st);
422 if (_require_atomic_access) st->print(" Atomic!");
423 }
424 #endif
425 };
427 //------------------------------LoadD_unalignedNode----------------------------
428 // Load a double from unaligned memory
429 class LoadD_unalignedNode : public LoadDNode {
430 public:
431 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
432 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
433 virtual int Opcode() const;
434 };
436 //------------------------------LoadPNode--------------------------------------
437 // Load a pointer from memory (either object or array)
438 class LoadPNode : public LoadNode {
439 public:
440 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
441 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
442 virtual int Opcode() const;
443 virtual uint ideal_reg() const { return Op_RegP; }
444 virtual int store_Opcode() const { return Op_StoreP; }
445 virtual BasicType memory_type() const { return T_ADDRESS; }
446 };
449 //------------------------------LoadNNode--------------------------------------
450 // Load a narrow oop from memory (either object or array)
451 class LoadNNode : public LoadNode {
452 public:
453 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
454 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
455 virtual int Opcode() const;
456 virtual uint ideal_reg() const { return Op_RegN; }
457 virtual int store_Opcode() const { return Op_StoreN; }
458 virtual BasicType memory_type() const { return T_NARROWOOP; }
459 };
461 //------------------------------LoadKlassNode----------------------------------
462 // Load a Klass from an object
463 class LoadKlassNode : public LoadPNode {
464 protected:
465 // In most cases, LoadKlassNode does not have the control input set. If the control
466 // input is set, it must not be removed (by LoadNode::Ideal()).
467 virtual bool can_remove_control() const;
468 public:
469 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
470 : LoadPNode(c, mem, adr, at, tk, mo) {}
471 virtual int Opcode() const;
472 virtual const Type *Value( PhaseTransform *phase ) const;
473 virtual Node *Identity( PhaseTransform *phase );
474 virtual bool depends_only_on_test() const { return true; }
476 // Polymorphic factory method:
477 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
478 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
479 };
481 //------------------------------LoadNKlassNode---------------------------------
482 // Load a narrow Klass from an object.
483 class LoadNKlassNode : public LoadNNode {
484 public:
485 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
486 : LoadNNode(c, mem, adr, at, tk, mo) {}
487 virtual int Opcode() const;
488 virtual uint ideal_reg() const { return Op_RegN; }
489 virtual int store_Opcode() const { return Op_StoreNKlass; }
490 virtual BasicType memory_type() const { return T_NARROWKLASS; }
492 virtual const Type *Value( PhaseTransform *phase ) const;
493 virtual Node *Identity( PhaseTransform *phase );
494 virtual bool depends_only_on_test() const { return true; }
495 };
498 //------------------------------StoreNode--------------------------------------
499 // Store value; requires Store, Address and Value
500 class StoreNode : public MemNode {
501 private:
502 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
503 // stores that can be reordered, and such requiring release semantics to
504 // adhere to the Java specification. The required behaviour is stored in
505 // this field.
506 const MemOrd _mo;
507 // Needed for proper cloning.
508 virtual uint size_of() const { return sizeof(*this); }
509 protected:
510 virtual uint cmp( const Node &n ) const;
511 virtual bool depends_only_on_test() const { return false; }
513 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
514 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
516 public:
517 // We must ensure that stores of object references will be visible
518 // only after the object's initialization. So the callers of this
519 // procedure must indicate that the store requires `release'
520 // semantics, if the stored value is an object reference that might
521 // point to a new object and may become externally visible.
522 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
523 : MemNode(c, mem, adr, at, val), _mo(mo) {
524 init_class_id(Class_Store);
525 }
526 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
527 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
528 init_class_id(Class_Store);
529 }
531 inline bool is_unordered() const { return !is_release(); }
532 inline bool is_release() const {
533 assert((_mo == unordered || _mo == release), "unexpected");
534 return _mo == release;
535 }
537 // Conservatively release stores of object references in order to
538 // ensure visibility of object initialization.
539 static inline MemOrd release_if_reference(const BasicType t) {
540 const MemOrd mo = (t == T_ARRAY ||
541 t == T_ADDRESS || // Might be the address of an object reference (`boxing').
542 t == T_OBJECT) ? release : unordered;
543 return mo;
544 }
546 // Polymorphic factory method
547 //
548 // We must ensure that stores of object references will be visible
549 // only after the object's initialization. So the callers of this
550 // procedure must indicate that the store requires `release'
551 // semantics, if the stored value is an object reference that might
552 // point to a new object and may become externally visible.
553 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
554 const TypePtr* at, Node *val, BasicType bt, MemOrd mo);
556 virtual uint hash() const; // Check the type
558 // If the store is to Field memory and the pointer is non-null, we can
559 // zero out the control input.
560 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
562 // Compute a new Type for this node. Basically we just do the pre-check,
563 // then call the virtual add() to set the type.
564 virtual const Type *Value( PhaseTransform *phase ) const;
566 // Check for identity function on memory (Load then Store at same address)
567 virtual Node *Identity( PhaseTransform *phase );
569 // Do not match memory edge
570 virtual uint match_edge(uint idx) const;
572 virtual const Type *bottom_type() const; // returns Type::MEMORY
574 // Map a store opcode to its corresponding own opcode, trivially.
575 virtual int store_Opcode() const { return Opcode(); }
577 // have all possible loads of the value stored been optimized away?
578 bool value_never_loaded(PhaseTransform *phase) const;
579 };
581 //------------------------------StoreBNode-------------------------------------
582 // Store byte to memory
583 class StoreBNode : public StoreNode {
584 public:
585 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
586 : StoreNode(c, mem, adr, at, val, mo) {}
587 virtual int Opcode() const;
588 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
589 virtual BasicType memory_type() const { return T_BYTE; }
590 };
592 //------------------------------StoreCNode-------------------------------------
593 // Store char/short to memory
594 class StoreCNode : public StoreNode {
595 public:
596 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
597 : StoreNode(c, mem, adr, at, val, mo) {}
598 virtual int Opcode() const;
599 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
600 virtual BasicType memory_type() const { return T_CHAR; }
601 };
603 //------------------------------StoreINode-------------------------------------
604 // Store int to memory
605 class StoreINode : public StoreNode {
606 public:
607 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
608 : StoreNode(c, mem, adr, at, val, mo) {}
609 virtual int Opcode() const;
610 virtual BasicType memory_type() const { return T_INT; }
611 };
613 //------------------------------StoreLNode-------------------------------------
614 // Store long to memory
615 class StoreLNode : public StoreNode {
616 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
617 virtual uint cmp( const Node &n ) const {
618 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
619 && StoreNode::cmp(n);
620 }
621 virtual uint size_of() const { return sizeof(*this); }
622 const bool _require_atomic_access; // is piecewise store forbidden?
624 public:
625 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
626 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
627 virtual int Opcode() const;
628 virtual BasicType memory_type() const { return T_LONG; }
629 bool require_atomic_access() const { return _require_atomic_access; }
630 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
631 #ifndef PRODUCT
632 virtual void dump_spec(outputStream *st) const {
633 StoreNode::dump_spec(st);
634 if (_require_atomic_access) st->print(" Atomic!");
635 }
636 #endif
637 };
639 //------------------------------StoreFNode-------------------------------------
640 // Store float to memory
641 class StoreFNode : public StoreNode {
642 public:
643 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
644 : StoreNode(c, mem, adr, at, val, mo) {}
645 virtual int Opcode() const;
646 virtual BasicType memory_type() const { return T_FLOAT; }
647 };
649 //------------------------------StoreDNode-------------------------------------
650 // Store double to memory
651 class StoreDNode : public StoreNode {
652 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
653 virtual uint cmp( const Node &n ) const {
654 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
655 && StoreNode::cmp(n);
656 }
657 virtual uint size_of() const { return sizeof(*this); }
658 const bool _require_atomic_access; // is piecewise store forbidden?
659 public:
660 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
661 MemOrd mo, bool require_atomic_access = false)
662 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
663 virtual int Opcode() const;
664 virtual BasicType memory_type() const { return T_DOUBLE; }
665 bool require_atomic_access() const { return _require_atomic_access; }
666 static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
667 #ifndef PRODUCT
668 virtual void dump_spec(outputStream *st) const {
669 StoreNode::dump_spec(st);
670 if (_require_atomic_access) st->print(" Atomic!");
671 }
672 #endif
674 };
676 //------------------------------StorePNode-------------------------------------
677 // Store pointer to memory
678 class StorePNode : public StoreNode {
679 public:
680 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
681 : StoreNode(c, mem, adr, at, val, mo) {}
682 virtual int Opcode() const;
683 virtual BasicType memory_type() const { return T_ADDRESS; }
684 };
686 //------------------------------StoreNNode-------------------------------------
687 // Store narrow oop to memory
688 class StoreNNode : public StoreNode {
689 public:
690 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
691 : StoreNode(c, mem, adr, at, val, mo) {}
692 virtual int Opcode() const;
693 virtual BasicType memory_type() const { return T_NARROWOOP; }
694 };
696 //------------------------------StoreNKlassNode--------------------------------------
697 // Store narrow klass to memory
698 class StoreNKlassNode : public StoreNNode {
699 public:
700 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
701 : StoreNNode(c, mem, adr, at, val, mo) {}
702 virtual int Opcode() const;
703 virtual BasicType memory_type() const { return T_NARROWKLASS; }
704 };
706 //------------------------------StoreCMNode-----------------------------------
707 // Store card-mark byte to memory for CM
708 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
709 // Preceeding equivalent StoreCMs may be eliminated.
710 class StoreCMNode : public StoreNode {
711 private:
712 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
713 virtual uint cmp( const Node &n ) const {
714 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
715 && StoreNode::cmp(n);
716 }
717 virtual uint size_of() const { return sizeof(*this); }
718 int _oop_alias_idx; // The alias_idx of OopStore
720 public:
721 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
722 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release),
723 _oop_alias_idx(oop_alias_idx) {
724 assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
725 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
726 "bad oop alias idx");
727 }
728 virtual int Opcode() const;
729 virtual Node *Identity( PhaseTransform *phase );
730 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
731 virtual const Type *Value( PhaseTransform *phase ) const;
732 virtual BasicType memory_type() const { return T_VOID; } // unspecific
733 int oop_alias_idx() const { return _oop_alias_idx; }
734 };
736 //------------------------------LoadPLockedNode---------------------------------
737 // Load-locked a pointer from memory (either object or array).
738 // On Sparc & Intel this is implemented as a normal pointer load.
739 // On PowerPC and friends it's a real load-locked.
740 class LoadPLockedNode : public LoadPNode {
741 public:
742 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo)
743 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {}
744 virtual int Opcode() const;
745 virtual int store_Opcode() const { return Op_StorePConditional; }
746 virtual bool depends_only_on_test() const { return true; }
747 };
749 //------------------------------SCMemProjNode---------------------------------------
750 // This class defines a projection of the memory state of a store conditional node.
751 // These nodes return a value, but also update memory.
752 class SCMemProjNode : public ProjNode {
753 public:
754 enum {SCMEMPROJCON = (uint)-2};
755 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
756 virtual int Opcode() const;
757 virtual bool is_CFG() const { return false; }
758 virtual const Type *bottom_type() const {return Type::MEMORY;}
759 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
760 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
761 virtual const Type *Value( PhaseTransform *phase ) const;
762 #ifndef PRODUCT
763 virtual void dump_spec(outputStream *st) const {};
764 #endif
765 };
767 //------------------------------LoadStoreNode---------------------------
768 // Note: is_Mem() method returns 'true' for this class.
769 class LoadStoreNode : public Node {
770 private:
771 const Type* const _type; // What kind of value is loaded?
772 const TypePtr* _adr_type; // What kind of memory is being addressed?
773 virtual uint size_of() const; // Size is bigger
774 public:
775 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
776 virtual bool depends_only_on_test() const { return false; }
777 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
779 virtual const Type *bottom_type() const { return _type; }
780 virtual uint ideal_reg() const;
781 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address
783 bool result_not_used() const;
784 };
786 class LoadStoreConditionalNode : public LoadStoreNode {
787 public:
788 enum {
789 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
790 };
791 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
792 };
794 //------------------------------StorePConditionalNode---------------------------
795 // Conditionally store pointer to memory, if no change since prior
796 // load-locked. Sets flags for success or failure of the store.
797 class StorePConditionalNode : public LoadStoreConditionalNode {
798 public:
799 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
800 virtual int Opcode() const;
801 // Produces flags
802 virtual uint ideal_reg() const { return Op_RegFlags; }
803 };
805 //------------------------------StoreIConditionalNode---------------------------
806 // Conditionally store int to memory, if no change since prior
807 // load-locked. Sets flags for success or failure of the store.
808 class StoreIConditionalNode : public LoadStoreConditionalNode {
809 public:
810 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
811 virtual int Opcode() const;
812 // Produces flags
813 virtual uint ideal_reg() const { return Op_RegFlags; }
814 };
816 //------------------------------StoreLConditionalNode---------------------------
817 // Conditionally store long to memory, if no change since prior
818 // load-locked. Sets flags for success or failure of the store.
819 class StoreLConditionalNode : public LoadStoreConditionalNode {
820 public:
821 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
822 virtual int Opcode() const;
823 // Produces flags
824 virtual uint ideal_reg() const { return Op_RegFlags; }
825 };
828 //------------------------------CompareAndSwapLNode---------------------------
829 class CompareAndSwapLNode : public LoadStoreConditionalNode {
830 public:
831 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
832 virtual int Opcode() const;
833 };
836 //------------------------------CompareAndSwapINode---------------------------
837 class CompareAndSwapINode : public LoadStoreConditionalNode {
838 public:
839 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
840 virtual int Opcode() const;
841 };
844 //------------------------------CompareAndSwapPNode---------------------------
845 class CompareAndSwapPNode : public LoadStoreConditionalNode {
846 public:
847 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
848 virtual int Opcode() const;
849 };
851 //------------------------------CompareAndSwapNNode---------------------------
852 class CompareAndSwapNNode : public LoadStoreConditionalNode {
853 public:
854 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
855 virtual int Opcode() const;
856 };
858 //------------------------------GetAndAddINode---------------------------
859 class GetAndAddINode : public LoadStoreNode {
860 public:
861 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
862 virtual int Opcode() const;
863 };
865 //------------------------------GetAndAddLNode---------------------------
866 class GetAndAddLNode : public LoadStoreNode {
867 public:
868 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
869 virtual int Opcode() const;
870 };
873 //------------------------------GetAndSetINode---------------------------
874 class GetAndSetINode : public LoadStoreNode {
875 public:
876 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
877 virtual int Opcode() const;
878 };
880 //------------------------------GetAndSetINode---------------------------
881 class GetAndSetLNode : public LoadStoreNode {
882 public:
883 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
884 virtual int Opcode() const;
885 };
887 //------------------------------GetAndSetPNode---------------------------
888 class GetAndSetPNode : public LoadStoreNode {
889 public:
890 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
891 virtual int Opcode() const;
892 };
894 //------------------------------GetAndSetNNode---------------------------
895 class GetAndSetNNode : public LoadStoreNode {
896 public:
897 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
898 virtual int Opcode() const;
899 };
901 //------------------------------ClearArray-------------------------------------
902 class ClearArrayNode: public Node {
903 public:
904 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
905 : Node(ctrl,arymem,word_cnt,base) {
906 init_class_id(Class_ClearArray);
907 }
908 virtual int Opcode() const;
909 virtual const Type *bottom_type() const { return Type::MEMORY; }
910 // ClearArray modifies array elements, and so affects only the
911 // array memory addressed by the bottom_type of its base address.
912 virtual const class TypePtr *adr_type() const;
913 virtual Node *Identity( PhaseTransform *phase );
914 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
915 virtual uint match_edge(uint idx) const;
917 // Clear the given area of an object or array.
918 // The start offset must always be aligned mod BytesPerInt.
919 // The end offset must always be aligned mod BytesPerLong.
920 // Return the new memory.
921 static Node* clear_memory(Node* control, Node* mem, Node* dest,
922 intptr_t start_offset,
923 intptr_t end_offset,
924 PhaseGVN* phase);
925 static Node* clear_memory(Node* control, Node* mem, Node* dest,
926 intptr_t start_offset,
927 Node* end_offset,
928 PhaseGVN* phase);
929 static Node* clear_memory(Node* control, Node* mem, Node* dest,
930 Node* start_offset,
931 Node* end_offset,
932 PhaseGVN* phase);
933 // Return allocation input memory edge if it is different instance
934 // or itself if it is the one we are looking for.
935 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
936 };
938 //------------------------------StrIntrinsic-------------------------------
939 // Base class for Ideal nodes used in String instrinsic code.
940 class StrIntrinsicNode: public Node {
941 public:
942 StrIntrinsicNode(Node* control, Node* char_array_mem,
943 Node* s1, Node* c1, Node* s2, Node* c2):
944 Node(control, char_array_mem, s1, c1, s2, c2) {
945 }
947 StrIntrinsicNode(Node* control, Node* char_array_mem,
948 Node* s1, Node* s2, Node* c):
949 Node(control, char_array_mem, s1, s2, c) {
950 }
952 StrIntrinsicNode(Node* control, Node* char_array_mem,
953 Node* s1, Node* s2):
954 Node(control, char_array_mem, s1, s2) {
955 }
957 virtual bool depends_only_on_test() const { return false; }
958 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
959 virtual uint match_edge(uint idx) const;
960 virtual uint ideal_reg() const { return Op_RegI; }
961 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
962 virtual const Type *Value(PhaseTransform *phase) const;
963 };
965 //------------------------------StrComp-------------------------------------
966 class StrCompNode: public StrIntrinsicNode {
967 public:
968 StrCompNode(Node* control, Node* char_array_mem,
969 Node* s1, Node* c1, Node* s2, Node* c2):
970 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
971 virtual int Opcode() const;
972 virtual const Type* bottom_type() const { return TypeInt::INT; }
973 };
975 //------------------------------StrEquals-------------------------------------
976 class StrEqualsNode: public StrIntrinsicNode {
977 public:
978 StrEqualsNode(Node* control, Node* char_array_mem,
979 Node* s1, Node* s2, Node* c):
980 StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
981 virtual int Opcode() const;
982 virtual const Type* bottom_type() const { return TypeInt::BOOL; }
983 };
985 //------------------------------StrIndexOf-------------------------------------
986 class StrIndexOfNode: public StrIntrinsicNode {
987 public:
988 StrIndexOfNode(Node* control, Node* char_array_mem,
989 Node* s1, Node* c1, Node* s2, Node* c2):
990 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
991 virtual int Opcode() const;
992 virtual const Type* bottom_type() const { return TypeInt::INT; }
993 };
995 //------------------------------AryEq---------------------------------------
996 class AryEqNode: public StrIntrinsicNode {
997 public:
998 AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
999 StrIntrinsicNode(control, char_array_mem, s1, s2) {};
1000 virtual int Opcode() const;
1001 virtual const Type* bottom_type() const { return TypeInt::BOOL; }
1002 };
1005 //------------------------------EncodeISOArray--------------------------------
1006 // encode char[] to byte[] in ISO_8859_1
1007 class EncodeISOArrayNode: public Node {
1008 public:
1009 EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
1010 virtual int Opcode() const;
1011 virtual bool depends_only_on_test() const { return false; }
1012 virtual const Type* bottom_type() const { return TypeInt::INT; }
1013 virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
1014 virtual uint match_edge(uint idx) const;
1015 virtual uint ideal_reg() const { return Op_RegI; }
1016 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1017 virtual const Type *Value(PhaseTransform *phase) const;
1018 };
1020 //------------------------------MemBar-----------------------------------------
1021 // There are different flavors of Memory Barriers to match the Java Memory
1022 // Model. Monitor-enter and volatile-load act as Aquires: no following ref
1023 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1024 // volatile-load. Monitor-exit and volatile-store act as Release: no
1025 // preceding ref can be moved to after them. We insert a MemBar-Release
1026 // before a FastUnlock or volatile-store. All volatiles need to be
1027 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1028 // separate it from any following volatile-load.
1029 class MemBarNode: public MultiNode {
1030 virtual uint hash() const ; // { return NO_HASH; }
1031 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
1033 virtual uint size_of() const { return sizeof(*this); }
1034 // Memory type this node is serializing. Usually either rawptr or bottom.
1035 const TypePtr* _adr_type;
1037 public:
1038 enum {
1039 Precedent = TypeFunc::Parms // optional edge to force precedence
1040 };
1041 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1042 virtual int Opcode() const = 0;
1043 virtual const class TypePtr *adr_type() const { return _adr_type; }
1044 virtual const Type *Value( PhaseTransform *phase ) const;
1045 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1046 virtual uint match_edge(uint idx) const { return 0; }
1047 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1048 virtual Node *match( const ProjNode *proj, const Matcher *m );
1049 // Factory method. Builds a wide or narrow membar.
1050 // Optional 'precedent' becomes an extra edge if not null.
1051 static MemBarNode* make(Compile* C, int opcode,
1052 int alias_idx = Compile::AliasIdxBot,
1053 Node* precedent = NULL);
1054 };
1056 // "Acquire" - no following ref can move before (but earlier refs can
1057 // follow, like an early Load stalled in cache). Requires multi-cpu
1058 // visibility. Inserted after a volatile load.
1059 class MemBarAcquireNode: public MemBarNode {
1060 public:
1061 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1062 : MemBarNode(C, alias_idx, precedent) {}
1063 virtual int Opcode() const;
1064 };
1066 // "Acquire" - no following ref can move before (but earlier refs can
1067 // follow, like an early Load stalled in cache). Requires multi-cpu
1068 // visibility. Inserted independ of any load, as required
1069 // for intrinsic sun.misc.Unsafe.loadFence().
1070 class LoadFenceNode: public MemBarNode {
1071 public:
1072 LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1073 : MemBarNode(C, alias_idx, precedent) {}
1074 virtual int Opcode() const;
1075 };
1077 // "Release" - no earlier ref can move after (but later refs can move
1078 // up, like a speculative pipelined cache-hitting Load). Requires
1079 // multi-cpu visibility. Inserted before a volatile store.
1080 class MemBarReleaseNode: public MemBarNode {
1081 public:
1082 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
1083 : MemBarNode(C, alias_idx, precedent) {}
1084 virtual int Opcode() const;
1085 };
1087 // "Release" - no earlier ref can move after (but later refs can move
1088 // up, like a speculative pipelined cache-hitting Load). Requires
1089 // multi-cpu visibility. Inserted independent of any store, as required
1090 // for intrinsic sun.misc.Unsafe.storeFence().
1091 class StoreFenceNode: public MemBarNode {
1092 public:
1093 StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1094 : MemBarNode(C, alias_idx, precedent) {}
1095 virtual int Opcode() const;
1096 };
1098 // "Acquire" - no following ref can move before (but earlier refs can
1099 // follow, like an early Load stalled in cache). Requires multi-cpu
1100 // visibility. Inserted after a FastLock.
1101 class MemBarAcquireLockNode: public MemBarNode {
1102 public:
1103 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
1104 : MemBarNode(C, alias_idx, precedent) {}
1105 virtual int Opcode() const;
1106 };
1108 // "Release" - no earlier ref can move after (but later refs can move
1109 // up, like a speculative pipelined cache-hitting Load). Requires
1110 // multi-cpu visibility. Inserted before a FastUnLock.
1111 class MemBarReleaseLockNode: public MemBarNode {
1112 public:
1113 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
1114 : MemBarNode(C, alias_idx, precedent) {}
1115 virtual int Opcode() const;
1116 };
1118 class MemBarStoreStoreNode: public MemBarNode {
1119 public:
1120 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
1121 : MemBarNode(C, alias_idx, precedent) {
1122 init_class_id(Class_MemBarStoreStore);
1123 }
1124 virtual int Opcode() const;
1125 };
1127 // Ordering between a volatile store and a following volatile load.
1128 // Requires multi-CPU visibility?
1129 class MemBarVolatileNode: public MemBarNode {
1130 public:
1131 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
1132 : MemBarNode(C, alias_idx, precedent) {}
1133 virtual int Opcode() const;
1134 };
1136 // Ordering within the same CPU. Used to order unsafe memory references
1137 // inside the compiler when we lack alias info. Not needed "outside" the
1138 // compiler because the CPU does all the ordering for us.
1139 class MemBarCPUOrderNode: public MemBarNode {
1140 public:
1141 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1142 : MemBarNode(C, alias_idx, precedent) {}
1143 virtual int Opcode() const;
1144 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1145 };
1147 // Isolation of object setup after an AllocateNode and before next safepoint.
1148 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1149 class InitializeNode: public MemBarNode {
1150 friend class AllocateNode;
1152 enum {
1153 Incomplete = 0,
1154 Complete = 1,
1155 WithArraycopy = 2
1156 };
1157 int _is_complete;
1159 bool _does_not_escape;
1161 public:
1162 enum {
1163 Control = TypeFunc::Control,
1164 Memory = TypeFunc::Memory, // MergeMem for states affected by this op
1165 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
1166 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
1167 };
1169 InitializeNode(Compile* C, int adr_type, Node* rawoop);
1170 virtual int Opcode() const;
1171 virtual uint size_of() const { return sizeof(*this); }
1172 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1173 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
1175 // Manage incoming memory edges via a MergeMem on in(Memory):
1176 Node* memory(uint alias_idx);
1178 // The raw memory edge coming directly from the Allocation.
1179 // The contents of this memory are *always* all-zero-bits.
1180 Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1182 // Return the corresponding allocation for this initialization (or null if none).
1183 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1184 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1185 AllocateNode* allocation();
1187 // Anything other than zeroing in this init?
1188 bool is_non_zero();
1190 // An InitializeNode must completed before macro expansion is done.
1191 // Completion requires that the AllocateNode must be followed by
1192 // initialization of the new memory to zero, then to any initializers.
1193 bool is_complete() { return _is_complete != Incomplete; }
1194 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1196 // Mark complete. (Must not yet be complete.)
1197 void set_complete(PhaseGVN* phase);
1198 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1200 bool does_not_escape() { return _does_not_escape; }
1201 void set_does_not_escape() { _does_not_escape = true; }
1203 #ifdef ASSERT
1204 // ensure all non-degenerate stores are ordered and non-overlapping
1205 bool stores_are_sane(PhaseTransform* phase);
1206 #endif //ASSERT
1208 // See if this store can be captured; return offset where it initializes.
1209 // Return 0 if the store cannot be moved (any sort of problem).
1210 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
1212 // Capture another store; reformat it to write my internal raw memory.
1213 // Return the captured copy, else NULL if there is some sort of problem.
1214 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
1216 // Find captured store which corresponds to the range [start..start+size).
1217 // Return my own memory projection (meaning the initial zero bits)
1218 // if there is no such store. Return NULL if there is a problem.
1219 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
1221 // Called when the associated AllocateNode is expanded into CFG.
1222 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1223 intptr_t header_size, Node* size_in_bytes,
1224 PhaseGVN* phase);
1226 private:
1227 void remove_extra_zeroes();
1229 // Find out where a captured store should be placed (or already is placed).
1230 int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1231 PhaseTransform* phase);
1233 static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
1235 Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
1237 bool detect_init_independence(Node* n, int& count);
1239 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1240 PhaseGVN* phase);
1242 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1243 };
1245 //------------------------------MergeMem---------------------------------------
1246 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1247 class MergeMemNode: public Node {
1248 virtual uint hash() const ; // { return NO_HASH; }
1249 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
1250 friend class MergeMemStream;
1251 MergeMemNode(Node* def); // clients use MergeMemNode::make
1253 public:
1254 // If the input is a whole memory state, clone it with all its slices intact.
1255 // Otherwise, make a new memory state with just that base memory input.
1256 // In either case, the result is a newly created MergeMem.
1257 static MergeMemNode* make(Compile* C, Node* base_memory);
1259 virtual int Opcode() const;
1260 virtual Node *Identity( PhaseTransform *phase );
1261 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1262 virtual uint ideal_reg() const { return NotAMachineReg; }
1263 virtual uint match_edge(uint idx) const { return 0; }
1264 virtual const RegMask &out_RegMask() const;
1265 virtual const Type *bottom_type() const { return Type::MEMORY; }
1266 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1267 // sparse accessors
1268 // Fetch the previously stored "set_memory_at", or else the base memory.
1269 // (Caller should clone it if it is a phi-nest.)
1270 Node* memory_at(uint alias_idx) const;
1271 // set the memory, regardless of its previous value
1272 void set_memory_at(uint alias_idx, Node* n);
1273 // the "base" is the memory that provides the non-finite support
1274 Node* base_memory() const { return in(Compile::AliasIdxBot); }
1275 // warning: setting the base can implicitly set any of the other slices too
1276 void set_base_memory(Node* def);
1277 // sentinel value which denotes a copy of the base memory:
1278 Node* empty_memory() const { return in(Compile::AliasIdxTop); }
1279 static Node* make_empty_memory(); // where the sentinel comes from
1280 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1281 // hook for the iterator, to perform any necessary setup
1282 void iteration_setup(const MergeMemNode* other = NULL);
1283 // push sentinels until I am at least as long as the other (semantic no-op)
1284 void grow_to_match(const MergeMemNode* other);
1285 bool verify_sparse() const PRODUCT_RETURN0;
1286 #ifndef PRODUCT
1287 virtual void dump_spec(outputStream *st) const;
1288 #endif
1289 };
1291 class MergeMemStream : public StackObj {
1292 private:
1293 MergeMemNode* _mm;
1294 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
1295 Node* _mm_base; // loop-invariant base memory of _mm
1296 int _idx;
1297 int _cnt;
1298 Node* _mem;
1299 Node* _mem2;
1300 int _cnt2;
1302 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
1303 // subsume_node will break sparseness at times, whenever a memory slice
1304 // folds down to a copy of the base ("fat") memory. In such a case,
1305 // the raw edge will update to base, although it should be top.
1306 // This iterator will recognize either top or base_memory as an
1307 // "empty" slice. See is_empty, is_empty2, and next below.
1308 //
1309 // The sparseness property is repaired in MergeMemNode::Ideal.
1310 // As long as access to a MergeMem goes through this iterator
1311 // or the memory_at accessor, flaws in the sparseness will
1312 // never be observed.
1313 //
1314 // Also, iteration_setup repairs sparseness.
1315 assert(mm->verify_sparse(), "please, no dups of base");
1316 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
1318 _mm = mm;
1319 _mm_base = mm->base_memory();
1320 _mm2 = mm2;
1321 _cnt = mm->req();
1322 _idx = Compile::AliasIdxBot-1; // start at the base memory
1323 _mem = NULL;
1324 _mem2 = NULL;
1325 }
1327 #ifdef ASSERT
1328 Node* check_memory() const {
1329 if (at_base_memory())
1330 return _mm->base_memory();
1331 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1332 return _mm->memory_at(_idx);
1333 else
1334 return _mm_base;
1335 }
1336 Node* check_memory2() const {
1337 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1338 }
1339 #endif
1341 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1342 void assert_synch() const {
1343 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1344 "no side-effects except through the stream");
1345 }
1347 public:
1349 // expected usages:
1350 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1351 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1353 // iterate over one merge
1354 MergeMemStream(MergeMemNode* mm) {
1355 mm->iteration_setup();
1356 init(mm);
1357 debug_only(_cnt2 = 999);
1358 }
1359 // iterate in parallel over two merges
1360 // only iterates through non-empty elements of mm2
1361 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1362 assert(mm2, "second argument must be a MergeMem also");
1363 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
1364 mm->iteration_setup(mm2);
1365 init(mm, mm2);
1366 _cnt2 = mm2->req();
1367 }
1368 #ifdef ASSERT
1369 ~MergeMemStream() {
1370 assert_synch();
1371 }
1372 #endif
1374 MergeMemNode* all_memory() const {
1375 return _mm;
1376 }
1377 Node* base_memory() const {
1378 assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1379 return _mm_base;
1380 }
1381 const MergeMemNode* all_memory2() const {
1382 assert(_mm2 != NULL, "");
1383 return _mm2;
1384 }
1385 bool at_base_memory() const {
1386 return _idx == Compile::AliasIdxBot;
1387 }
1388 int alias_idx() const {
1389 assert(_mem, "must call next 1st");
1390 return _idx;
1391 }
1393 const TypePtr* adr_type() const {
1394 return Compile::current()->get_adr_type(alias_idx());
1395 }
1397 const TypePtr* adr_type(Compile* C) const {
1398 return C->get_adr_type(alias_idx());
1399 }
1400 bool is_empty() const {
1401 assert(_mem, "must call next 1st");
1402 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1403 return _mem->is_top();
1404 }
1405 bool is_empty2() const {
1406 assert(_mem2, "must call next 1st");
1407 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1408 return _mem2->is_top();
1409 }
1410 Node* memory() const {
1411 assert(!is_empty(), "must not be empty");
1412 assert_synch();
1413 return _mem;
1414 }
1415 // get the current memory, regardless of empty or non-empty status
1416 Node* force_memory() const {
1417 assert(!is_empty() || !at_base_memory(), "");
1418 // Use _mm_base to defend against updates to _mem->base_memory().
1419 Node *mem = _mem->is_top() ? _mm_base : _mem;
1420 assert(mem == check_memory(), "");
1421 return mem;
1422 }
1423 Node* memory2() const {
1424 assert(_mem2 == check_memory2(), "");
1425 return _mem2;
1426 }
1427 void set_memory(Node* mem) {
1428 if (at_base_memory()) {
1429 // Note that this does not change the invariant _mm_base.
1430 _mm->set_base_memory(mem);
1431 } else {
1432 _mm->set_memory_at(_idx, mem);
1433 }
1434 _mem = mem;
1435 assert_synch();
1436 }
1438 // Recover from a side effect to the MergeMemNode.
1439 void set_memory() {
1440 _mem = _mm->in(_idx);
1441 }
1443 bool next() { return next(false); }
1444 bool next2() { return next(true); }
1446 bool next_non_empty() { return next_non_empty(false); }
1447 bool next_non_empty2() { return next_non_empty(true); }
1448 // next_non_empty2 can yield states where is_empty() is true
1450 private:
1451 // find the next item, which might be empty
1452 bool next(bool have_mm2) {
1453 assert((_mm2 != NULL) == have_mm2, "use other next");
1454 assert_synch();
1455 if (++_idx < _cnt) {
1456 // Note: This iterator allows _mm to be non-sparse.
1457 // It behaves the same whether _mem is top or base_memory.
1458 _mem = _mm->in(_idx);
1459 if (have_mm2)
1460 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1461 return true;
1462 }
1463 return false;
1464 }
1466 // find the next non-empty item
1467 bool next_non_empty(bool have_mm2) {
1468 while (next(have_mm2)) {
1469 if (!is_empty()) {
1470 // make sure _mem2 is filled in sensibly
1471 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
1472 return true;
1473 } else if (have_mm2 && !is_empty2()) {
1474 return true; // is_empty() == true
1475 }
1476 }
1477 return false;
1478 }
1479 };
1481 //------------------------------Prefetch---------------------------------------
1483 // Non-faulting prefetch load. Prefetch for many reads.
1484 class PrefetchReadNode : public Node {
1485 public:
1486 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1487 virtual int Opcode() const;
1488 virtual uint ideal_reg() const { return NotAMachineReg; }
1489 virtual uint match_edge(uint idx) const { return idx==2; }
1490 virtual const Type *bottom_type() const { return Type::ABIO; }
1491 };
1493 // Non-faulting prefetch load. Prefetch for many reads & many writes.
1494 class PrefetchWriteNode : public Node {
1495 public:
1496 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1497 virtual int Opcode() const;
1498 virtual uint ideal_reg() const { return NotAMachineReg; }
1499 virtual uint match_edge(uint idx) const { return idx==2; }
1500 virtual const Type *bottom_type() const { return Type::ABIO; }
1501 };
1503 // Allocation prefetch which may fault, TLAB size have to be adjusted.
1504 class PrefetchAllocationNode : public Node {
1505 public:
1506 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
1507 virtual int Opcode() const;
1508 virtual uint ideal_reg() const { return NotAMachineReg; }
1509 virtual uint match_edge(uint idx) const { return idx==2; }
1510 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1511 };
1513 #endif // SHARE_VM_OPTO_MEMNODE_HPP