Mon, 25 May 2020 14:24:27 +0800
8244407: JVM crashes after transformation in C2 IdealLoopTree::split_fall_in
Reviewed-by: thartmann, kvn, andrew
Contributed-by: zhouyong44@huawei.com
1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP
26 #define SHARE_VM_OPTO_MEMNODE_HPP
28 #include "opto/multnode.hpp"
29 #include "opto/node.hpp"
30 #include "opto/opcodes.hpp"
31 #include "opto/type.hpp"
33 // Portions of code courtesy of Clifford Click
35 class MultiNode;
36 class PhaseCCP;
37 class PhaseTransform;
39 //------------------------------MemNode----------------------------------------
40 // Load or Store, possibly throwing a NULL pointer exception
41 class MemNode : public Node {
42 private:
43 bool _unaligned_access; // Unaligned access from unsafe
44 bool _mismatched_access; // Mismatched access from unsafe: byte read in integer array for instance
45 protected:
46 #ifdef ASSERT
47 const TypePtr* _adr_type; // What kind of memory is being addressed?
48 #endif
49 virtual uint size_of() const;
50 public:
51 enum { Control, // When is it safe to do this load?
52 Memory, // Chunk of memory is being loaded from
53 Address, // Actually address, derived from base
54 ValueIn, // Value to store
55 OopStore // Preceeding oop store, only in StoreCM
56 };
57 typedef enum { unordered = 0,
58 acquire, // Load has to acquire or be succeeded by MemBarAcquire.
59 release // Store has to release or be preceded by MemBarRelease.
60 } MemOrd;
61 protected:
62 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
63 : Node(c0,c1,c2 ), _unaligned_access(false), _mismatched_access(false) {
64 init_class_id(Class_Mem);
65 debug_only(_adr_type=at; adr_type();)
66 }
67 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
68 : Node(c0,c1,c2,c3), _unaligned_access(false), _mismatched_access(false) {
69 init_class_id(Class_Mem);
70 debug_only(_adr_type=at; adr_type();)
71 }
72 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
73 : Node(c0,c1,c2,c3,c4), _unaligned_access(false), _mismatched_access(false) {
74 init_class_id(Class_Mem);
75 debug_only(_adr_type=at; adr_type();)
76 }
78 static bool check_if_adr_maybe_raw(Node* adr);
80 public:
81 // Helpers for the optimizer. Documented in memnode.cpp.
82 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
83 Node* p2, AllocateNode* a2,
84 PhaseTransform* phase);
85 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
87 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
88 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
89 // This one should probably be a phase-specific function:
90 static bool all_controls_dominate(Node* dom, Node* sub);
92 // Find any cast-away of null-ness and keep its control.
93 static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
94 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
96 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
98 // Shared code for Ideal methods:
99 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL.
101 // Helper function for adr_type() implementations.
102 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
104 // Raw access function, to allow copying of adr_type efficiently in
105 // product builds and retain the debug info for debug builds.
106 const TypePtr *raw_adr_type() const {
107 #ifdef ASSERT
108 return _adr_type;
109 #else
110 return 0;
111 #endif
112 }
114 // Map a load or store opcode to its corresponding store opcode.
115 // (Return -1 if unknown.)
116 virtual int store_Opcode() const { return -1; }
118 // What is the type of the value in memory? (T_VOID mean "unspecified".)
119 virtual BasicType memory_type() const = 0;
120 virtual int memory_size() const {
121 #ifdef ASSERT
122 return type2aelembytes(memory_type(), true);
123 #else
124 return type2aelembytes(memory_type());
125 #endif
126 }
128 // Search through memory states which precede this node (load or store).
129 // Look for an exact match for the address, with no intervening
130 // aliased stores.
131 Node* find_previous_store(PhaseTransform* phase);
133 // Can this node (load or store) accurately see a stored value in
134 // the given memory state? (The state may or may not be in(Memory).)
135 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
137 void set_unaligned_access() { _unaligned_access = true; }
138 bool is_unaligned_access() const { return _unaligned_access; }
139 void set_mismatched_access() { _mismatched_access = true; }
140 bool is_mismatched_access() const { return _mismatched_access; }
142 #ifndef PRODUCT
143 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
144 virtual void dump_spec(outputStream *st) const;
145 #endif
146 };
148 //------------------------------LoadNode---------------------------------------
149 // Load value; requires Memory and Address
150 class LoadNode : public MemNode {
151 public:
152 // Some loads (from unsafe) should be pinned: they don't depend only
153 // on the dominating test. The boolean field _depends_only_on_test
154 // below records whether that node depends only on the dominating
155 // test.
156 // Methods used to build LoadNodes pass an argument of type enum
157 // ControlDependency instead of a boolean because those methods
158 // typically have multiple boolean parameters with default values:
159 // passing the wrong boolean to one of these parameters by mistake
160 // goes easily unnoticed. Using an enum, the compiler can check that
161 // the type of a value and the type of the parameter match.
162 enum ControlDependency {
163 Pinned,
164 DependsOnlyOnTest
165 };
166 private:
167 // LoadNode::hash() doesn't take the _depends_only_on_test field
168 // into account: If the graph already has a non-pinned LoadNode and
169 // we add a pinned LoadNode with the same inputs, it's safe for GVN
170 // to replace the pinned LoadNode with the non-pinned LoadNode,
171 // otherwise it wouldn't be safe to have a non pinned LoadNode with
172 // those inputs in the first place. If the graph already has a
173 // pinned LoadNode and we add a non pinned LoadNode with the same
174 // inputs, it's safe (but suboptimal) for GVN to replace the
175 // non-pinned LoadNode by the pinned LoadNode.
176 bool _depends_only_on_test;
178 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
179 // loads that can be reordered, and such requiring acquire semantics to
180 // adhere to the Java specification. The required behaviour is stored in
181 // this field.
182 const MemOrd _mo;
184 protected:
185 virtual uint cmp(const Node &n) const;
186 virtual uint size_of() const; // Size is bigger
187 // Should LoadNode::Ideal() attempt to remove control edges?
188 virtual bool can_remove_control() const;
189 const Type* const _type; // What kind of value is loaded?
190 public:
192 LoadNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt, MemOrd mo, ControlDependency control_dependency)
193 : MemNode(c,mem,adr,at), _type(rt), _mo(mo), _depends_only_on_test(control_dependency == DependsOnlyOnTest) {
194 init_class_id(Class_Load);
195 }
196 inline bool is_unordered() const { return !is_acquire(); }
197 inline bool is_acquire() const {
198 assert(_mo == unordered || _mo == acquire, "unexpected");
199 return _mo == acquire;
200 }
202 // Polymorphic factory method:
203 static Node* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
204 const TypePtr* at, const Type *rt, BasicType bt,
205 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
207 virtual uint hash() const; // Check the type
209 // Handle algebraic identities here. If we have an identity, return the Node
210 // we are equivalent to. We look for Load of a Store.
211 virtual Node *Identity( PhaseTransform *phase );
213 // If the load is from Field memory and the pointer is non-null, it might be possible to
214 // zero out the control input.
215 // If the offset is constant and the base is an object allocation,
216 // try to hook me up to the exact initializing store.
217 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
219 // Split instance field load through Phi.
220 Node* split_through_phi(PhaseGVN *phase);
222 // Recover original value from boxed values
223 Node *eliminate_autobox(PhaseGVN *phase);
225 // Compute a new Type for this node. Basically we just do the pre-check,
226 // then call the virtual add() to set the type.
227 virtual const Type *Value( PhaseTransform *phase ) const;
229 // Common methods for LoadKlass and LoadNKlass nodes.
230 const Type *klass_value_common( PhaseTransform *phase ) const;
231 Node *klass_identity_common( PhaseTransform *phase );
233 virtual uint ideal_reg() const;
234 virtual const Type *bottom_type() const;
235 // Following method is copied from TypeNode:
236 void set_type(const Type* t) {
237 assert(t != NULL, "sanity");
238 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
239 *(const Type**)&_type = t; // cast away const-ness
240 // If this node is in the hash table, make sure it doesn't need a rehash.
241 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
242 }
243 const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
245 // Do not match memory edge
246 virtual uint match_edge(uint idx) const;
248 // Map a load opcode to its corresponding store opcode.
249 virtual int store_Opcode() const = 0;
251 // Check if the load's memory input is a Phi node with the same control.
252 bool is_instance_field_load_with_local_phi(Node* ctrl);
254 #ifndef PRODUCT
255 virtual void dump_spec(outputStream *st) const;
256 #endif
257 #ifdef ASSERT
258 // Helper function to allow a raw load without control edge for some cases
259 static bool is_immutable_value(Node* adr);
260 #endif
261 protected:
262 const Type* load_array_final_field(const TypeKlassPtr *tkls,
263 ciKlass* klass) const;
264 // depends_only_on_test is almost always true, and needs to be almost always
265 // true to enable key hoisting & commoning optimizations. However, for the
266 // special case of RawPtr loads from TLS top & end, and other loads performed by
267 // GC barriers, the control edge carries the dependence preventing hoisting past
268 // a Safepoint instead of the memory edge. (An unfortunate consequence of having
269 // Safepoints not set Raw Memory; itself an unfortunate consequence of having Nodes
270 // which produce results (new raw memory state) inside of loops preventing all
271 // manner of other optimizations). Basically, it's ugly but so is the alternative.
272 // See comment in macro.cpp, around line 125 expand_allocate_common().
273 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM && _depends_only_on_test; }
274 };
276 //------------------------------LoadBNode--------------------------------------
277 // Load a byte (8bits signed) from memory
278 class LoadBNode : public LoadNode {
279 public:
280 LoadBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
281 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
282 virtual int Opcode() const;
283 virtual uint ideal_reg() const { return Op_RegI; }
284 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
285 virtual const Type *Value(PhaseTransform *phase) const;
286 virtual int store_Opcode() const { return Op_StoreB; }
287 virtual BasicType memory_type() const { return T_BYTE; }
288 };
290 //------------------------------LoadUBNode-------------------------------------
291 // Load a unsigned byte (8bits unsigned) from memory
292 class LoadUBNode : public LoadNode {
293 public:
294 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
295 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
296 virtual int Opcode() const;
297 virtual uint ideal_reg() const { return Op_RegI; }
298 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
299 virtual const Type *Value(PhaseTransform *phase) const;
300 virtual int store_Opcode() const { return Op_StoreB; }
301 virtual BasicType memory_type() const { return T_BYTE; }
302 };
304 //------------------------------LoadUSNode-------------------------------------
305 // Load an unsigned short/char (16bits unsigned) from memory
306 class LoadUSNode : public LoadNode {
307 public:
308 LoadUSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
309 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
310 virtual int Opcode() const;
311 virtual uint ideal_reg() const { return Op_RegI; }
312 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
313 virtual const Type *Value(PhaseTransform *phase) const;
314 virtual int store_Opcode() const { return Op_StoreC; }
315 virtual BasicType memory_type() const { return T_CHAR; }
316 };
318 //------------------------------LoadSNode--------------------------------------
319 // Load a short (16bits signed) from memory
320 class LoadSNode : public LoadNode {
321 public:
322 LoadSNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
323 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
324 virtual int Opcode() const;
325 virtual uint ideal_reg() const { return Op_RegI; }
326 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
327 virtual const Type *Value(PhaseTransform *phase) const;
328 virtual int store_Opcode() const { return Op_StoreC; }
329 virtual BasicType memory_type() const { return T_SHORT; }
330 };
332 //------------------------------LoadINode--------------------------------------
333 // Load an integer from memory
334 class LoadINode : public LoadNode {
335 public:
336 LoadINode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
337 : LoadNode(c, mem, adr, at, ti, mo, control_dependency) {}
338 virtual int Opcode() const;
339 virtual uint ideal_reg() const { return Op_RegI; }
340 virtual int store_Opcode() const { return Op_StoreI; }
341 virtual BasicType memory_type() const { return T_INT; }
342 };
344 //------------------------------LoadRangeNode----------------------------------
345 // Load an array length from the array
346 class LoadRangeNode : public LoadINode {
347 public:
348 LoadRangeNode(Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS)
349 : LoadINode(c, mem, adr, TypeAryPtr::RANGE, ti, MemNode::unordered) {}
350 virtual int Opcode() const;
351 virtual const Type *Value( PhaseTransform *phase ) const;
352 virtual Node *Identity( PhaseTransform *phase );
353 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
354 };
356 //------------------------------LoadLNode--------------------------------------
357 // Load a long from memory
358 class LoadLNode : public LoadNode {
359 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
360 virtual uint cmp( const Node &n ) const {
361 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
362 && LoadNode::cmp(n);
363 }
364 virtual uint size_of() const { return sizeof(*this); }
365 const bool _require_atomic_access; // is piecewise load forbidden?
367 public:
368 LoadLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeLong *tl,
369 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
370 : LoadNode(c, mem, adr, at, tl, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
371 virtual int Opcode() const;
372 virtual uint ideal_reg() const { return Op_RegL; }
373 virtual int store_Opcode() const { return Op_StoreL; }
374 virtual BasicType memory_type() const { return T_LONG; }
375 bool require_atomic_access() const { return _require_atomic_access; }
376 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
377 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
378 #ifndef PRODUCT
379 virtual void dump_spec(outputStream *st) const {
380 LoadNode::dump_spec(st);
381 if (_require_atomic_access) st->print(" Atomic!");
382 }
383 #endif
384 };
386 //------------------------------LoadL_unalignedNode----------------------------
387 // Load a long from unaligned memory
388 class LoadL_unalignedNode : public LoadLNode {
389 public:
390 LoadL_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
391 : LoadLNode(c, mem, adr, at, TypeLong::LONG, mo, control_dependency) {}
392 virtual int Opcode() const;
393 };
395 //------------------------------LoadFNode--------------------------------------
396 // Load a float (64 bits) from memory
397 class LoadFNode : public LoadNode {
398 public:
399 LoadFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
400 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
401 virtual int Opcode() const;
402 virtual uint ideal_reg() const { return Op_RegF; }
403 virtual int store_Opcode() const { return Op_StoreF; }
404 virtual BasicType memory_type() const { return T_FLOAT; }
405 };
407 //------------------------------LoadDNode--------------------------------------
408 // Load a double (64 bits) from memory
409 class LoadDNode : public LoadNode {
410 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
411 virtual uint cmp( const Node &n ) const {
412 return _require_atomic_access == ((LoadDNode&)n)._require_atomic_access
413 && LoadNode::cmp(n);
414 }
415 virtual uint size_of() const { return sizeof(*this); }
416 const bool _require_atomic_access; // is piecewise load forbidden?
418 public:
419 LoadDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t,
420 MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest, bool require_atomic_access = false)
421 : LoadNode(c, mem, adr, at, t, mo, control_dependency), _require_atomic_access(require_atomic_access) {}
422 virtual int Opcode() const;
423 virtual uint ideal_reg() const { return Op_RegD; }
424 virtual int store_Opcode() const { return Op_StoreD; }
425 virtual BasicType memory_type() const { return T_DOUBLE; }
426 bool require_atomic_access() const { return _require_atomic_access; }
427 static LoadDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type,
428 const Type* rt, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest);
429 #ifndef PRODUCT
430 virtual void dump_spec(outputStream *st) const {
431 LoadNode::dump_spec(st);
432 if (_require_atomic_access) st->print(" Atomic!");
433 }
434 #endif
435 };
437 //------------------------------LoadD_unalignedNode----------------------------
438 // Load a double from unaligned memory
439 class LoadD_unalignedNode : public LoadDNode {
440 public:
441 LoadD_unalignedNode(Node *c, Node *mem, Node *adr, const TypePtr* at, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
442 : LoadDNode(c, mem, adr, at, Type::DOUBLE, mo, control_dependency) {}
443 virtual int Opcode() const;
444 };
446 //------------------------------LoadPNode--------------------------------------
447 // Load a pointer from memory (either object or array)
448 class LoadPNode : public LoadNode {
449 public:
450 LoadPNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
451 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
452 virtual int Opcode() const;
453 virtual uint ideal_reg() const { return Op_RegP; }
454 virtual int store_Opcode() const { return Op_StoreP; }
455 virtual BasicType memory_type() const { return T_ADDRESS; }
456 };
459 //------------------------------LoadNNode--------------------------------------
460 // Load a narrow oop from memory (either object or array)
461 class LoadNNode : public LoadNode {
462 public:
463 LoadNNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t, MemOrd mo, ControlDependency control_dependency = DependsOnlyOnTest)
464 : LoadNode(c, mem, adr, at, t, mo, control_dependency) {}
465 virtual int Opcode() const;
466 virtual uint ideal_reg() const { return Op_RegN; }
467 virtual int store_Opcode() const { return Op_StoreN; }
468 virtual BasicType memory_type() const { return T_NARROWOOP; }
469 };
471 //------------------------------LoadKlassNode----------------------------------
472 // Load a Klass from an object
473 class LoadKlassNode : public LoadPNode {
474 protected:
475 // In most cases, LoadKlassNode does not have the control input set. If the control
476 // input is set, it must not be removed (by LoadNode::Ideal()).
477 virtual bool can_remove_control() const;
478 public:
479 LoadKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk, MemOrd mo)
480 : LoadPNode(c, mem, adr, at, tk, mo) {}
481 virtual int Opcode() const;
482 virtual const Type *Value( PhaseTransform *phase ) const;
483 virtual Node *Identity( PhaseTransform *phase );
484 virtual bool depends_only_on_test() const { return true; }
486 // Polymorphic factory method:
487 static Node* make(PhaseGVN& gvn, Node* ctl, Node* mem, Node* adr, const TypePtr* at,
488 const TypeKlassPtr* tk = TypeKlassPtr::OBJECT);
489 };
491 //------------------------------LoadNKlassNode---------------------------------
492 // Load a narrow Klass from an object.
493 class LoadNKlassNode : public LoadNNode {
494 public:
495 LoadNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk, MemOrd mo)
496 : LoadNNode(c, mem, adr, at, tk, mo) {}
497 virtual int Opcode() const;
498 virtual uint ideal_reg() const { return Op_RegN; }
499 virtual int store_Opcode() const { return Op_StoreNKlass; }
500 virtual BasicType memory_type() const { return T_NARROWKLASS; }
502 virtual const Type *Value( PhaseTransform *phase ) const;
503 virtual Node *Identity( PhaseTransform *phase );
504 virtual bool depends_only_on_test() const { return true; }
505 };
508 //------------------------------StoreNode--------------------------------------
509 // Store value; requires Store, Address and Value
510 class StoreNode : public MemNode {
511 private:
512 // On platforms with weak memory ordering (e.g., PPC, Ia64) we distinguish
513 // stores that can be reordered, and such requiring release semantics to
514 // adhere to the Java specification. The required behaviour is stored in
515 // this field.
516 const MemOrd _mo;
517 // Needed for proper cloning.
518 virtual uint size_of() const { return sizeof(*this); }
519 protected:
520 virtual uint cmp( const Node &n ) const;
521 virtual bool depends_only_on_test() const { return false; }
523 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
524 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
526 public:
527 // We must ensure that stores of object references will be visible
528 // only after the object's initialization. So the callers of this
529 // procedure must indicate that the store requires `release'
530 // semantics, if the stored value is an object reference that might
531 // point to a new object and may become externally visible.
532 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
533 : MemNode(c, mem, adr, at, val), _mo(mo) {
534 init_class_id(Class_Store);
535 }
536 StoreNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, MemOrd mo)
537 : MemNode(c, mem, adr, at, val, oop_store), _mo(mo) {
538 init_class_id(Class_Store);
539 }
541 inline bool is_unordered() const { return !is_release(); }
542 inline bool is_release() const {
543 assert((_mo == unordered || _mo == release), "unexpected");
544 return _mo == release;
545 }
547 // Conservatively release stores of object references in order to
548 // ensure visibility of object initialization.
549 static inline MemOrd release_if_reference(const BasicType t) {
550 const MemOrd mo = (t == T_ARRAY ||
551 t == T_ADDRESS || // Might be the address of an object reference (`boxing').
552 t == T_OBJECT) ? release : unordered;
553 return mo;
554 }
556 // Polymorphic factory method
557 //
558 // We must ensure that stores of object references will be visible
559 // only after the object's initialization. So the callers of this
560 // procedure must indicate that the store requires `release'
561 // semantics, if the stored value is an object reference that might
562 // point to a new object and may become externally visible.
563 static StoreNode* make(PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
564 const TypePtr* at, Node *val, BasicType bt, MemOrd mo);
566 virtual uint hash() const; // Check the type
568 // If the store is to Field memory and the pointer is non-null, we can
569 // zero out the control input.
570 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
572 // Compute a new Type for this node. Basically we just do the pre-check,
573 // then call the virtual add() to set the type.
574 virtual const Type *Value( PhaseTransform *phase ) const;
576 // Check for identity function on memory (Load then Store at same address)
577 virtual Node *Identity( PhaseTransform *phase );
579 // Do not match memory edge
580 virtual uint match_edge(uint idx) const;
582 virtual const Type *bottom_type() const; // returns Type::MEMORY
584 // Map a store opcode to its corresponding own opcode, trivially.
585 virtual int store_Opcode() const { return Opcode(); }
587 // have all possible loads of the value stored been optimized away?
588 bool value_never_loaded(PhaseTransform *phase) const;
589 };
591 //------------------------------StoreBNode-------------------------------------
592 // Store byte to memory
593 class StoreBNode : public StoreNode {
594 public:
595 StoreBNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
596 : StoreNode(c, mem, adr, at, val, mo) {}
597 virtual int Opcode() const;
598 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
599 virtual BasicType memory_type() const { return T_BYTE; }
600 };
602 //------------------------------StoreCNode-------------------------------------
603 // Store char/short to memory
604 class StoreCNode : public StoreNode {
605 public:
606 StoreCNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
607 : StoreNode(c, mem, adr, at, val, mo) {}
608 virtual int Opcode() const;
609 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
610 virtual BasicType memory_type() const { return T_CHAR; }
611 };
613 //------------------------------StoreINode-------------------------------------
614 // Store int to memory
615 class StoreINode : public StoreNode {
616 public:
617 StoreINode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
618 : StoreNode(c, mem, adr, at, val, mo) {}
619 virtual int Opcode() const;
620 virtual BasicType memory_type() const { return T_INT; }
621 };
623 //------------------------------StoreLNode-------------------------------------
624 // Store long to memory
625 class StoreLNode : public StoreNode {
626 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
627 virtual uint cmp( const Node &n ) const {
628 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
629 && StoreNode::cmp(n);
630 }
631 virtual uint size_of() const { return sizeof(*this); }
632 const bool _require_atomic_access; // is piecewise store forbidden?
634 public:
635 StoreLNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo, bool require_atomic_access = false)
636 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
637 virtual int Opcode() const;
638 virtual BasicType memory_type() const { return T_LONG; }
639 bool require_atomic_access() const { return _require_atomic_access; }
640 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
641 #ifndef PRODUCT
642 virtual void dump_spec(outputStream *st) const {
643 StoreNode::dump_spec(st);
644 if (_require_atomic_access) st->print(" Atomic!");
645 }
646 #endif
647 };
649 //------------------------------StoreFNode-------------------------------------
650 // Store float to memory
651 class StoreFNode : public StoreNode {
652 public:
653 StoreFNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
654 : StoreNode(c, mem, adr, at, val, mo) {}
655 virtual int Opcode() const;
656 virtual BasicType memory_type() const { return T_FLOAT; }
657 };
659 //------------------------------StoreDNode-------------------------------------
660 // Store double to memory
661 class StoreDNode : public StoreNode {
662 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
663 virtual uint cmp( const Node &n ) const {
664 return _require_atomic_access == ((StoreDNode&)n)._require_atomic_access
665 && StoreNode::cmp(n);
666 }
667 virtual uint size_of() const { return sizeof(*this); }
668 const bool _require_atomic_access; // is piecewise store forbidden?
669 public:
670 StoreDNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
671 MemOrd mo, bool require_atomic_access = false)
672 : StoreNode(c, mem, adr, at, val, mo), _require_atomic_access(require_atomic_access) {}
673 virtual int Opcode() const;
674 virtual BasicType memory_type() const { return T_DOUBLE; }
675 bool require_atomic_access() const { return _require_atomic_access; }
676 static StoreDNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val, MemOrd mo);
677 #ifndef PRODUCT
678 virtual void dump_spec(outputStream *st) const {
679 StoreNode::dump_spec(st);
680 if (_require_atomic_access) st->print(" Atomic!");
681 }
682 #endif
684 };
686 //------------------------------StorePNode-------------------------------------
687 // Store pointer to memory
688 class StorePNode : public StoreNode {
689 public:
690 StorePNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
691 : StoreNode(c, mem, adr, at, val, mo) {}
692 virtual int Opcode() const;
693 virtual BasicType memory_type() const { return T_ADDRESS; }
694 };
696 //------------------------------StoreNNode-------------------------------------
697 // Store narrow oop to memory
698 class StoreNNode : public StoreNode {
699 public:
700 StoreNNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
701 : StoreNode(c, mem, adr, at, val, mo) {}
702 virtual int Opcode() const;
703 virtual BasicType memory_type() const { return T_NARROWOOP; }
704 };
706 //------------------------------StoreNKlassNode--------------------------------------
707 // Store narrow klass to memory
708 class StoreNKlassNode : public StoreNNode {
709 public:
710 StoreNKlassNode(Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, MemOrd mo)
711 : StoreNNode(c, mem, adr, at, val, mo) {}
712 virtual int Opcode() const;
713 virtual BasicType memory_type() const { return T_NARROWKLASS; }
714 };
716 //------------------------------StoreCMNode-----------------------------------
717 // Store card-mark byte to memory for CM
718 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
719 // Preceeding equivalent StoreCMs may be eliminated.
720 class StoreCMNode : public StoreNode {
721 private:
722 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
723 virtual uint cmp( const Node &n ) const {
724 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
725 && StoreNode::cmp(n);
726 }
727 virtual uint size_of() const { return sizeof(*this); }
728 int _oop_alias_idx; // The alias_idx of OopStore
730 public:
731 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
732 StoreNode(c, mem, adr, at, val, oop_store, MemNode::release),
733 _oop_alias_idx(oop_alias_idx) {
734 assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
735 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
736 "bad oop alias idx");
737 }
738 virtual int Opcode() const;
739 virtual Node *Identity( PhaseTransform *phase );
740 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
741 virtual const Type *Value( PhaseTransform *phase ) const;
742 virtual BasicType memory_type() const { return T_VOID; } // unspecific
743 int oop_alias_idx() const { return _oop_alias_idx; }
744 };
746 //------------------------------LoadPLockedNode---------------------------------
747 // Load-locked a pointer from memory (either object or array).
748 // On Sparc & Intel this is implemented as a normal pointer load.
749 // On PowerPC and friends it's a real load-locked.
750 class LoadPLockedNode : public LoadPNode {
751 public:
752 LoadPLockedNode(Node *c, Node *mem, Node *adr, MemOrd mo)
753 : LoadPNode(c, mem, adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM, mo) {}
754 virtual int Opcode() const;
755 virtual int store_Opcode() const { return Op_StorePConditional; }
756 virtual bool depends_only_on_test() const { return true; }
757 };
759 //------------------------------SCMemProjNode---------------------------------------
760 // This class defines a projection of the memory state of a store conditional node.
761 // These nodes return a value, but also update memory.
762 class SCMemProjNode : public ProjNode {
763 public:
764 enum {SCMEMPROJCON = (uint)-2};
765 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
766 virtual int Opcode() const;
767 virtual bool is_CFG() const { return false; }
768 virtual const Type *bottom_type() const {return Type::MEMORY;}
769 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
770 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
771 virtual const Type *Value( PhaseTransform *phase ) const;
772 #ifndef PRODUCT
773 virtual void dump_spec(outputStream *st) const {};
774 #endif
775 };
777 //------------------------------LoadStoreNode---------------------------
778 // Note: is_Mem() method returns 'true' for this class.
779 class LoadStoreNode : public Node {
780 private:
781 const Type* const _type; // What kind of value is loaded?
782 const TypePtr* _adr_type; // What kind of memory is being addressed?
783 virtual uint size_of() const; // Size is bigger
784 public:
785 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
786 virtual bool depends_only_on_test() const { return false; }
787 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
789 virtual const Type *bottom_type() const { return _type; }
790 virtual uint ideal_reg() const;
791 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address
793 bool result_not_used() const;
794 };
796 class LoadStoreConditionalNode : public LoadStoreNode {
797 public:
798 enum {
799 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
800 };
801 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
802 };
804 //------------------------------StorePConditionalNode---------------------------
805 // Conditionally store pointer to memory, if no change since prior
806 // load-locked. Sets flags for success or failure of the store.
807 class StorePConditionalNode : public LoadStoreConditionalNode {
808 public:
809 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
810 virtual int Opcode() const;
811 // Produces flags
812 virtual uint ideal_reg() const { return Op_RegFlags; }
813 };
815 //------------------------------StoreIConditionalNode---------------------------
816 // Conditionally store int to memory, if no change since prior
817 // load-locked. Sets flags for success or failure of the store.
818 class StoreIConditionalNode : public LoadStoreConditionalNode {
819 public:
820 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
821 virtual int Opcode() const;
822 // Produces flags
823 virtual uint ideal_reg() const { return Op_RegFlags; }
824 };
826 //------------------------------StoreLConditionalNode---------------------------
827 // Conditionally store long to memory, if no change since prior
828 // load-locked. Sets flags for success or failure of the store.
829 class StoreLConditionalNode : public LoadStoreConditionalNode {
830 public:
831 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
832 virtual int Opcode() const;
833 // Produces flags
834 virtual uint ideal_reg() const { return Op_RegFlags; }
835 };
838 //------------------------------CompareAndSwapLNode---------------------------
839 class CompareAndSwapLNode : public LoadStoreConditionalNode {
840 public:
841 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
842 virtual int Opcode() const;
843 };
846 //------------------------------CompareAndSwapINode---------------------------
847 class CompareAndSwapINode : public LoadStoreConditionalNode {
848 public:
849 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
850 virtual int Opcode() const;
851 };
854 //------------------------------CompareAndSwapPNode---------------------------
855 class CompareAndSwapPNode : public LoadStoreConditionalNode {
856 public:
857 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
858 virtual int Opcode() const;
859 };
861 //------------------------------CompareAndSwapNNode---------------------------
862 class CompareAndSwapNNode : public LoadStoreConditionalNode {
863 public:
864 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
865 virtual int Opcode() const;
866 };
868 //------------------------------GetAndAddINode---------------------------
869 class GetAndAddINode : public LoadStoreNode {
870 public:
871 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
872 virtual int Opcode() const;
873 };
875 //------------------------------GetAndAddLNode---------------------------
876 class GetAndAddLNode : public LoadStoreNode {
877 public:
878 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
879 virtual int Opcode() const;
880 };
883 //------------------------------GetAndSetINode---------------------------
884 class GetAndSetINode : public LoadStoreNode {
885 public:
886 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
887 virtual int Opcode() const;
888 };
890 //------------------------------GetAndSetINode---------------------------
891 class GetAndSetLNode : public LoadStoreNode {
892 public:
893 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
894 virtual int Opcode() const;
895 };
897 //------------------------------GetAndSetPNode---------------------------
898 class GetAndSetPNode : public LoadStoreNode {
899 public:
900 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
901 virtual int Opcode() const;
902 };
904 //------------------------------GetAndSetNNode---------------------------
905 class GetAndSetNNode : public LoadStoreNode {
906 public:
907 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
908 virtual int Opcode() const;
909 };
911 //------------------------------ClearArray-------------------------------------
912 class ClearArrayNode: public Node {
913 public:
914 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
915 : Node(ctrl,arymem,word_cnt,base) {
916 init_class_id(Class_ClearArray);
917 }
918 virtual int Opcode() const;
919 virtual const Type *bottom_type() const { return Type::MEMORY; }
920 // ClearArray modifies array elements, and so affects only the
921 // array memory addressed by the bottom_type of its base address.
922 virtual const class TypePtr *adr_type() const;
923 virtual Node *Identity( PhaseTransform *phase );
924 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
925 virtual uint match_edge(uint idx) const;
927 // Clear the given area of an object or array.
928 // The start offset must always be aligned mod BytesPerInt.
929 // The end offset must always be aligned mod BytesPerLong.
930 // Return the new memory.
931 static Node* clear_memory(Node* control, Node* mem, Node* dest,
932 intptr_t start_offset,
933 intptr_t end_offset,
934 PhaseGVN* phase);
935 static Node* clear_memory(Node* control, Node* mem, Node* dest,
936 intptr_t start_offset,
937 Node* end_offset,
938 PhaseGVN* phase);
939 static Node* clear_memory(Node* control, Node* mem, Node* dest,
940 Node* start_offset,
941 Node* end_offset,
942 PhaseGVN* phase);
943 // Return allocation input memory edge if it is different instance
944 // or itself if it is the one we are looking for.
945 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
946 };
948 //------------------------------StrIntrinsic-------------------------------
949 // Base class for Ideal nodes used in String instrinsic code.
950 class StrIntrinsicNode: public Node {
951 public:
952 StrIntrinsicNode(Node* control, Node* char_array_mem,
953 Node* s1, Node* c1, Node* s2, Node* c2):
954 Node(control, char_array_mem, s1, c1, s2, c2) {
955 }
957 StrIntrinsicNode(Node* control, Node* char_array_mem,
958 Node* s1, Node* s2, Node* c):
959 Node(control, char_array_mem, s1, s2, c) {
960 }
962 StrIntrinsicNode(Node* control, Node* char_array_mem,
963 Node* s1, Node* s2):
964 Node(control, char_array_mem, s1, s2) {
965 }
967 virtual bool depends_only_on_test() const { return false; }
968 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
969 virtual uint match_edge(uint idx) const;
970 virtual uint ideal_reg() const { return Op_RegI; }
971 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
972 virtual const Type *Value(PhaseTransform *phase) const;
973 };
975 //------------------------------StrComp-------------------------------------
976 class StrCompNode: public StrIntrinsicNode {
977 public:
978 StrCompNode(Node* control, Node* char_array_mem,
979 Node* s1, Node* c1, Node* s2, Node* c2):
980 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
981 virtual int Opcode() const;
982 virtual const Type* bottom_type() const { return TypeInt::INT; }
983 };
985 //------------------------------StrEquals-------------------------------------
986 class StrEqualsNode: public StrIntrinsicNode {
987 public:
988 StrEqualsNode(Node* control, Node* char_array_mem,
989 Node* s1, Node* s2, Node* c):
990 StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
991 virtual int Opcode() const;
992 virtual const Type* bottom_type() const { return TypeInt::BOOL; }
993 };
995 //------------------------------StrIndexOf-------------------------------------
996 class StrIndexOfNode: public StrIntrinsicNode {
997 public:
998 StrIndexOfNode(Node* control, Node* char_array_mem,
999 Node* s1, Node* c1, Node* s2, Node* c2):
1000 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
1001 virtual int Opcode() const;
1002 virtual const Type* bottom_type() const { return TypeInt::INT; }
1003 };
1005 //------------------------------AryEq---------------------------------------
1006 class AryEqNode: public StrIntrinsicNode {
1007 public:
1008 AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
1009 StrIntrinsicNode(control, char_array_mem, s1, s2) {};
1010 virtual int Opcode() const;
1011 virtual const Type* bottom_type() const { return TypeInt::BOOL; }
1012 };
1015 //------------------------------EncodeISOArray--------------------------------
1016 // encode char[] to byte[] in ISO_8859_1
1017 class EncodeISOArrayNode: public Node {
1018 public:
1019 EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
1020 virtual int Opcode() const;
1021 virtual bool depends_only_on_test() const { return false; }
1022 virtual const Type* bottom_type() const { return TypeInt::INT; }
1023 virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
1024 virtual uint match_edge(uint idx) const;
1025 virtual uint ideal_reg() const { return Op_RegI; }
1026 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1027 virtual const Type *Value(PhaseTransform *phase) const;
1028 };
1030 //------------------------------MemBar-----------------------------------------
1031 // There are different flavors of Memory Barriers to match the Java Memory
1032 // Model. Monitor-enter and volatile-load act as Aquires: no following ref
1033 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
1034 // volatile-load. Monitor-exit and volatile-store act as Release: no
1035 // preceding ref can be moved to after them. We insert a MemBar-Release
1036 // before a FastUnlock or volatile-store. All volatiles need to be
1037 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
1038 // separate it from any following volatile-load.
1039 class MemBarNode: public MultiNode {
1040 virtual uint hash() const ; // { return NO_HASH; }
1041 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
1043 virtual uint size_of() const { return sizeof(*this); }
1044 // Memory type this node is serializing. Usually either rawptr or bottom.
1045 const TypePtr* _adr_type;
1047 public:
1048 enum {
1049 Precedent = TypeFunc::Parms // optional edge to force precedence
1050 };
1051 MemBarNode(Compile* C, int alias_idx, Node* precedent);
1052 virtual int Opcode() const = 0;
1053 virtual const class TypePtr *adr_type() const { return _adr_type; }
1054 virtual const Type *Value( PhaseTransform *phase ) const;
1055 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1056 virtual uint match_edge(uint idx) const { return 0; }
1057 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
1058 virtual Node *match( const ProjNode *proj, const Matcher *m );
1059 // Factory method. Builds a wide or narrow membar.
1060 // Optional 'precedent' becomes an extra edge if not null.
1061 static MemBarNode* make(Compile* C, int opcode,
1062 int alias_idx = Compile::AliasIdxBot,
1063 Node* precedent = NULL);
1064 };
1066 // "Acquire" - no following ref can move before (but earlier refs can
1067 // follow, like an early Load stalled in cache). Requires multi-cpu
1068 // visibility. Inserted after a volatile load.
1069 class MemBarAcquireNode: public MemBarNode {
1070 public:
1071 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
1072 : MemBarNode(C, alias_idx, precedent) {}
1073 virtual int Opcode() const;
1074 };
1076 // "Acquire" - no following ref can move before (but earlier refs can
1077 // follow, like an early Load stalled in cache). Requires multi-cpu
1078 // visibility. Inserted independ of any load, as required
1079 // for intrinsic sun.misc.Unsafe.loadFence().
1080 class LoadFenceNode: public MemBarNode {
1081 public:
1082 LoadFenceNode(Compile* C, int alias_idx, Node* precedent)
1083 : MemBarNode(C, alias_idx, precedent) {}
1084 virtual int Opcode() const;
1085 };
1087 // "Release" - no earlier ref can move after (but later refs can move
1088 // up, like a speculative pipelined cache-hitting Load). Requires
1089 // multi-cpu visibility. Inserted before a volatile store.
1090 class MemBarReleaseNode: public MemBarNode {
1091 public:
1092 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
1093 : MemBarNode(C, alias_idx, precedent) {}
1094 virtual int Opcode() const;
1095 };
1097 // "Release" - no earlier ref can move after (but later refs can move
1098 // up, like a speculative pipelined cache-hitting Load). Requires
1099 // multi-cpu visibility. Inserted independent of any store, as required
1100 // for intrinsic sun.misc.Unsafe.storeFence().
1101 class StoreFenceNode: public MemBarNode {
1102 public:
1103 StoreFenceNode(Compile* C, int alias_idx, Node* precedent)
1104 : MemBarNode(C, alias_idx, precedent) {}
1105 virtual int Opcode() const;
1106 };
1108 // "Acquire" - no following ref can move before (but earlier refs can
1109 // follow, like an early Load stalled in cache). Requires multi-cpu
1110 // visibility. Inserted after a FastLock.
1111 class MemBarAcquireLockNode: public MemBarNode {
1112 public:
1113 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
1114 : MemBarNode(C, alias_idx, precedent) {}
1115 virtual int Opcode() const;
1116 };
1118 // "Release" - no earlier ref can move after (but later refs can move
1119 // up, like a speculative pipelined cache-hitting Load). Requires
1120 // multi-cpu visibility. Inserted before a FastUnLock.
1121 class MemBarReleaseLockNode: public MemBarNode {
1122 public:
1123 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
1124 : MemBarNode(C, alias_idx, precedent) {}
1125 virtual int Opcode() const;
1126 };
1128 class MemBarStoreStoreNode: public MemBarNode {
1129 public:
1130 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
1131 : MemBarNode(C, alias_idx, precedent) {
1132 init_class_id(Class_MemBarStoreStore);
1133 }
1134 virtual int Opcode() const;
1135 };
1137 // Ordering between a volatile store and a following volatile load.
1138 // Requires multi-CPU visibility?
1139 class MemBarVolatileNode: public MemBarNode {
1140 public:
1141 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
1142 : MemBarNode(C, alias_idx, precedent) {}
1143 virtual int Opcode() const;
1144 };
1146 // Ordering within the same CPU. Used to order unsafe memory references
1147 // inside the compiler when we lack alias info. Not needed "outside" the
1148 // compiler because the CPU does all the ordering for us.
1149 class MemBarCPUOrderNode: public MemBarNode {
1150 public:
1151 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1152 : MemBarNode(C, alias_idx, precedent) {}
1153 virtual int Opcode() const;
1154 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1155 };
1157 // Isolation of object setup after an AllocateNode and before next safepoint.
1158 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1159 class InitializeNode: public MemBarNode {
1160 friend class AllocateNode;
1162 enum {
1163 Incomplete = 0,
1164 Complete = 1,
1165 WithArraycopy = 2
1166 };
1167 int _is_complete;
1169 bool _does_not_escape;
1171 public:
1172 enum {
1173 Control = TypeFunc::Control,
1174 Memory = TypeFunc::Memory, // MergeMem for states affected by this op
1175 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
1176 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
1177 };
1179 InitializeNode(Compile* C, int adr_type, Node* rawoop);
1180 virtual int Opcode() const;
1181 virtual uint size_of() const { return sizeof(*this); }
1182 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1183 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
1185 // Manage incoming memory edges via a MergeMem on in(Memory):
1186 Node* memory(uint alias_idx);
1188 // The raw memory edge coming directly from the Allocation.
1189 // The contents of this memory are *always* all-zero-bits.
1190 Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1192 // Return the corresponding allocation for this initialization (or null if none).
1193 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1194 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1195 AllocateNode* allocation();
1197 // Anything other than zeroing in this init?
1198 bool is_non_zero();
1200 // An InitializeNode must completed before macro expansion is done.
1201 // Completion requires that the AllocateNode must be followed by
1202 // initialization of the new memory to zero, then to any initializers.
1203 bool is_complete() { return _is_complete != Incomplete; }
1204 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1206 // Mark complete. (Must not yet be complete.)
1207 void set_complete(PhaseGVN* phase);
1208 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1210 bool does_not_escape() { return _does_not_escape; }
1211 void set_does_not_escape() { _does_not_escape = true; }
1213 #ifdef ASSERT
1214 // ensure all non-degenerate stores are ordered and non-overlapping
1215 bool stores_are_sane(PhaseTransform* phase);
1216 #endif //ASSERT
1218 // See if this store can be captured; return offset where it initializes.
1219 // Return 0 if the store cannot be moved (any sort of problem).
1220 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
1222 // Capture another store; reformat it to write my internal raw memory.
1223 // Return the captured copy, else NULL if there is some sort of problem.
1224 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
1226 // Find captured store which corresponds to the range [start..start+size).
1227 // Return my own memory projection (meaning the initial zero bits)
1228 // if there is no such store. Return NULL if there is a problem.
1229 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
1231 // Called when the associated AllocateNode is expanded into CFG.
1232 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1233 intptr_t header_size, Node* size_in_bytes,
1234 PhaseGVN* phase);
1236 private:
1237 void remove_extra_zeroes();
1239 // Find out where a captured store should be placed (or already is placed).
1240 int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1241 PhaseTransform* phase);
1243 static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
1245 Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
1247 bool detect_init_independence(Node* n, int& count);
1249 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1250 PhaseGVN* phase);
1252 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1253 };
1255 //------------------------------MergeMem---------------------------------------
1256 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1257 class MergeMemNode: public Node {
1258 virtual uint hash() const ; // { return NO_HASH; }
1259 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
1260 friend class MergeMemStream;
1261 MergeMemNode(Node* def); // clients use MergeMemNode::make
1263 public:
1264 // If the input is a whole memory state, clone it with all its slices intact.
1265 // Otherwise, make a new memory state with just that base memory input.
1266 // In either case, the result is a newly created MergeMem.
1267 static MergeMemNode* make(Compile* C, Node* base_memory);
1269 virtual int Opcode() const;
1270 virtual Node *Identity( PhaseTransform *phase );
1271 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1272 virtual uint ideal_reg() const { return NotAMachineReg; }
1273 virtual uint match_edge(uint idx) const { return 0; }
1274 virtual const RegMask &out_RegMask() const;
1275 virtual const Type *bottom_type() const { return Type::MEMORY; }
1276 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1277 // sparse accessors
1278 // Fetch the previously stored "set_memory_at", or else the base memory.
1279 // (Caller should clone it if it is a phi-nest.)
1280 Node* memory_at(uint alias_idx) const;
1281 // set the memory, regardless of its previous value
1282 void set_memory_at(uint alias_idx, Node* n);
1283 // the "base" is the memory that provides the non-finite support
1284 Node* base_memory() const { return in(Compile::AliasIdxBot); }
1285 // warning: setting the base can implicitly set any of the other slices too
1286 void set_base_memory(Node* def);
1287 // sentinel value which denotes a copy of the base memory:
1288 Node* empty_memory() const { return in(Compile::AliasIdxTop); }
1289 static Node* make_empty_memory(); // where the sentinel comes from
1290 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1291 // hook for the iterator, to perform any necessary setup
1292 void iteration_setup(const MergeMemNode* other = NULL);
1293 // push sentinels until I am at least as long as the other (semantic no-op)
1294 void grow_to_match(const MergeMemNode* other);
1295 bool verify_sparse() const PRODUCT_RETURN0;
1296 #ifndef PRODUCT
1297 virtual void dump_spec(outputStream *st) const;
1298 #endif
1299 };
1301 class MergeMemStream : public StackObj {
1302 private:
1303 MergeMemNode* _mm;
1304 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
1305 Node* _mm_base; // loop-invariant base memory of _mm
1306 int _idx;
1307 int _cnt;
1308 Node* _mem;
1309 Node* _mem2;
1310 int _cnt2;
1312 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
1313 // subsume_node will break sparseness at times, whenever a memory slice
1314 // folds down to a copy of the base ("fat") memory. In such a case,
1315 // the raw edge will update to base, although it should be top.
1316 // This iterator will recognize either top or base_memory as an
1317 // "empty" slice. See is_empty, is_empty2, and next below.
1318 //
1319 // The sparseness property is repaired in MergeMemNode::Ideal.
1320 // As long as access to a MergeMem goes through this iterator
1321 // or the memory_at accessor, flaws in the sparseness will
1322 // never be observed.
1323 //
1324 // Also, iteration_setup repairs sparseness.
1325 assert(mm->verify_sparse(), "please, no dups of base");
1326 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
1328 _mm = mm;
1329 _mm_base = mm->base_memory();
1330 _mm2 = mm2;
1331 _cnt = mm->req();
1332 _idx = Compile::AliasIdxBot-1; // start at the base memory
1333 _mem = NULL;
1334 _mem2 = NULL;
1335 }
1337 #ifdef ASSERT
1338 Node* check_memory() const {
1339 if (at_base_memory())
1340 return _mm->base_memory();
1341 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1342 return _mm->memory_at(_idx);
1343 else
1344 return _mm_base;
1345 }
1346 Node* check_memory2() const {
1347 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1348 }
1349 #endif
1351 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1352 void assert_synch() const {
1353 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1354 "no side-effects except through the stream");
1355 }
1357 public:
1359 // expected usages:
1360 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1361 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1363 // iterate over one merge
1364 MergeMemStream(MergeMemNode* mm) {
1365 mm->iteration_setup();
1366 init(mm);
1367 debug_only(_cnt2 = 999);
1368 }
1369 // iterate in parallel over two merges
1370 // only iterates through non-empty elements of mm2
1371 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1372 assert(mm2, "second argument must be a MergeMem also");
1373 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
1374 mm->iteration_setup(mm2);
1375 init(mm, mm2);
1376 _cnt2 = mm2->req();
1377 }
1378 #ifdef ASSERT
1379 ~MergeMemStream() {
1380 assert_synch();
1381 }
1382 #endif
1384 MergeMemNode* all_memory() const {
1385 return _mm;
1386 }
1387 Node* base_memory() const {
1388 assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1389 return _mm_base;
1390 }
1391 const MergeMemNode* all_memory2() const {
1392 assert(_mm2 != NULL, "");
1393 return _mm2;
1394 }
1395 bool at_base_memory() const {
1396 return _idx == Compile::AliasIdxBot;
1397 }
1398 int alias_idx() const {
1399 assert(_mem, "must call next 1st");
1400 return _idx;
1401 }
1403 const TypePtr* adr_type() const {
1404 return Compile::current()->get_adr_type(alias_idx());
1405 }
1407 const TypePtr* adr_type(Compile* C) const {
1408 return C->get_adr_type(alias_idx());
1409 }
1410 bool is_empty() const {
1411 assert(_mem, "must call next 1st");
1412 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1413 return _mem->is_top();
1414 }
1415 bool is_empty2() const {
1416 assert(_mem2, "must call next 1st");
1417 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1418 return _mem2->is_top();
1419 }
1420 Node* memory() const {
1421 assert(!is_empty(), "must not be empty");
1422 assert_synch();
1423 return _mem;
1424 }
1425 // get the current memory, regardless of empty or non-empty status
1426 Node* force_memory() const {
1427 assert(!is_empty() || !at_base_memory(), "");
1428 // Use _mm_base to defend against updates to _mem->base_memory().
1429 Node *mem = _mem->is_top() ? _mm_base : _mem;
1430 assert(mem == check_memory(), "");
1431 return mem;
1432 }
1433 Node* memory2() const {
1434 assert(_mem2 == check_memory2(), "");
1435 return _mem2;
1436 }
1437 void set_memory(Node* mem) {
1438 if (at_base_memory()) {
1439 // Note that this does not change the invariant _mm_base.
1440 _mm->set_base_memory(mem);
1441 } else {
1442 _mm->set_memory_at(_idx, mem);
1443 }
1444 _mem = mem;
1445 assert_synch();
1446 }
1448 // Recover from a side effect to the MergeMemNode.
1449 void set_memory() {
1450 _mem = _mm->in(_idx);
1451 }
1453 bool next() { return next(false); }
1454 bool next2() { return next(true); }
1456 bool next_non_empty() { return next_non_empty(false); }
1457 bool next_non_empty2() { return next_non_empty(true); }
1458 // next_non_empty2 can yield states where is_empty() is true
1460 private:
1461 // find the next item, which might be empty
1462 bool next(bool have_mm2) {
1463 assert((_mm2 != NULL) == have_mm2, "use other next");
1464 assert_synch();
1465 if (++_idx < _cnt) {
1466 // Note: This iterator allows _mm to be non-sparse.
1467 // It behaves the same whether _mem is top or base_memory.
1468 _mem = _mm->in(_idx);
1469 if (have_mm2)
1470 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1471 return true;
1472 }
1473 return false;
1474 }
1476 // find the next non-empty item
1477 bool next_non_empty(bool have_mm2) {
1478 while (next(have_mm2)) {
1479 if (!is_empty()) {
1480 // make sure _mem2 is filled in sensibly
1481 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
1482 return true;
1483 } else if (have_mm2 && !is_empty2()) {
1484 return true; // is_empty() == true
1485 }
1486 }
1487 return false;
1488 }
1489 };
1491 //------------------------------Prefetch---------------------------------------
1493 // Non-faulting prefetch load. Prefetch for many reads.
1494 class PrefetchReadNode : public Node {
1495 public:
1496 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1497 virtual int Opcode() const;
1498 virtual uint ideal_reg() const { return NotAMachineReg; }
1499 virtual uint match_edge(uint idx) const { return idx==2; }
1500 virtual const Type *bottom_type() const { return Type::ABIO; }
1501 };
1503 // Non-faulting prefetch load. Prefetch for many reads & many writes.
1504 class PrefetchWriteNode : public Node {
1505 public:
1506 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1507 virtual int Opcode() const;
1508 virtual uint ideal_reg() const { return NotAMachineReg; }
1509 virtual uint match_edge(uint idx) const { return idx==2; }
1510 virtual const Type *bottom_type() const { return Type::ABIO; }
1511 };
1513 // Allocation prefetch which may fault, TLAB size have to be adjusted.
1514 class PrefetchAllocationNode : public Node {
1515 public:
1516 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
1517 virtual int Opcode() const;
1518 virtual uint ideal_reg() const { return NotAMachineReg; }
1519 virtual uint match_edge(uint idx) const { return idx==2; }
1520 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1521 };
1523 #endif // SHARE_VM_OPTO_MEMNODE_HPP