Wed, 18 Sep 2013 14:34:56 -0700
8024342: PPC64 (part 111): Support for C calling conventions that require 64-bit ints.
Summary: Some platforms, as ppc and s390x/zArch require that 32-bit ints are passed as 64-bit values to C functions. This change adds support to adapt the signature and to issue proper casts to c2-compiled stubs. The functions are used in generate_native_wrapper(). Adapt signature used by the compiler as in PhaseIdealLoop::intrinsify_fill().
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_MEMNODE_HPP
26 #define SHARE_VM_OPTO_MEMNODE_HPP
28 #include "opto/multnode.hpp"
29 #include "opto/node.hpp"
30 #include "opto/opcodes.hpp"
31 #include "opto/type.hpp"
33 // Portions of code courtesy of Clifford Click
35 class MultiNode;
36 class PhaseCCP;
37 class PhaseTransform;
39 //------------------------------MemNode----------------------------------------
40 // Load or Store, possibly throwing a NULL pointer exception
41 class MemNode : public Node {
42 protected:
43 #ifdef ASSERT
44 const TypePtr* _adr_type; // What kind of memory is being addressed?
45 #endif
46 virtual uint size_of() const; // Size is bigger (ASSERT only)
47 public:
48 enum { Control, // When is it safe to do this load?
49 Memory, // Chunk of memory is being loaded from
50 Address, // Actually address, derived from base
51 ValueIn, // Value to store
52 OopStore // Preceeding oop store, only in StoreCM
53 };
54 protected:
55 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at )
56 : Node(c0,c1,c2 ) {
57 init_class_id(Class_Mem);
58 debug_only(_adr_type=at; adr_type();)
59 }
60 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3 )
61 : Node(c0,c1,c2,c3) {
62 init_class_id(Class_Mem);
63 debug_only(_adr_type=at; adr_type();)
64 }
65 MemNode( Node *c0, Node *c1, Node *c2, const TypePtr* at, Node *c3, Node *c4)
66 : Node(c0,c1,c2,c3,c4) {
67 init_class_id(Class_Mem);
68 debug_only(_adr_type=at; adr_type();)
69 }
71 public:
72 // Helpers for the optimizer. Documented in memnode.cpp.
73 static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
74 Node* p2, AllocateNode* a2,
75 PhaseTransform* phase);
76 static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
78 static Node *optimize_simple_memory_chain(Node *mchain, const TypeOopPtr *t_oop, Node *load, PhaseGVN *phase);
79 static Node *optimize_memory_chain(Node *mchain, const TypePtr *t_adr, Node *load, PhaseGVN *phase);
80 // This one should probably be a phase-specific function:
81 static bool all_controls_dominate(Node* dom, Node* sub);
83 // Find any cast-away of null-ness and keep its control.
84 static Node *Ideal_common_DU_postCCP( PhaseCCP *ccp, Node* n, Node* adr );
85 virtual Node *Ideal_DU_postCCP( PhaseCCP *ccp );
87 virtual const class TypePtr *adr_type() const; // returns bottom_type of address
89 // Shared code for Ideal methods:
90 Node *Ideal_common(PhaseGVN *phase, bool can_reshape); // Return -1 for short-circuit NULL.
92 // Helper function for adr_type() implementations.
93 static const TypePtr* calculate_adr_type(const Type* t, const TypePtr* cross_check = NULL);
95 // Raw access function, to allow copying of adr_type efficiently in
96 // product builds and retain the debug info for debug builds.
97 const TypePtr *raw_adr_type() const {
98 #ifdef ASSERT
99 return _adr_type;
100 #else
101 return 0;
102 #endif
103 }
105 // Map a load or store opcode to its corresponding store opcode.
106 // (Return -1 if unknown.)
107 virtual int store_Opcode() const { return -1; }
109 // What is the type of the value in memory? (T_VOID mean "unspecified".)
110 virtual BasicType memory_type() const = 0;
111 virtual int memory_size() const {
112 #ifdef ASSERT
113 return type2aelembytes(memory_type(), true);
114 #else
115 return type2aelembytes(memory_type());
116 #endif
117 }
119 // Search through memory states which precede this node (load or store).
120 // Look for an exact match for the address, with no intervening
121 // aliased stores.
122 Node* find_previous_store(PhaseTransform* phase);
124 // Can this node (load or store) accurately see a stored value in
125 // the given memory state? (The state may or may not be in(Memory).)
126 Node* can_see_stored_value(Node* st, PhaseTransform* phase) const;
128 #ifndef PRODUCT
129 static void dump_adr_type(const Node* mem, const TypePtr* adr_type, outputStream *st);
130 virtual void dump_spec(outputStream *st) const;
131 #endif
132 };
134 //------------------------------LoadNode---------------------------------------
135 // Load value; requires Memory and Address
136 class LoadNode : public MemNode {
137 protected:
138 virtual uint cmp( const Node &n ) const;
139 virtual uint size_of() const; // Size is bigger
140 const Type* const _type; // What kind of value is loaded?
141 public:
143 LoadNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *rt )
144 : MemNode(c,mem,adr,at), _type(rt) {
145 init_class_id(Class_Load);
146 }
148 // Polymorphic factory method:
149 static Node* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
150 const TypePtr* at, const Type *rt, BasicType bt );
152 virtual uint hash() const; // Check the type
154 // Handle algebraic identities here. If we have an identity, return the Node
155 // we are equivalent to. We look for Load of a Store.
156 virtual Node *Identity( PhaseTransform *phase );
158 // If the load is from Field memory and the pointer is non-null, we can
159 // zero out the control input.
160 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
162 // Split instance field load through Phi.
163 Node* split_through_phi(PhaseGVN *phase);
165 // Recover original value from boxed values
166 Node *eliminate_autobox(PhaseGVN *phase);
168 // Compute a new Type for this node. Basically we just do the pre-check,
169 // then call the virtual add() to set the type.
170 virtual const Type *Value( PhaseTransform *phase ) const;
172 // Common methods for LoadKlass and LoadNKlass nodes.
173 const Type *klass_value_common( PhaseTransform *phase ) const;
174 Node *klass_identity_common( PhaseTransform *phase );
176 virtual uint ideal_reg() const;
177 virtual const Type *bottom_type() const;
178 // Following method is copied from TypeNode:
179 void set_type(const Type* t) {
180 assert(t != NULL, "sanity");
181 debug_only(uint check_hash = (VerifyHashTableKeys && _hash_lock) ? hash() : NO_HASH);
182 *(const Type**)&_type = t; // cast away const-ness
183 // If this node is in the hash table, make sure it doesn't need a rehash.
184 assert(check_hash == NO_HASH || check_hash == hash(), "type change must preserve hash code");
185 }
186 const Type* type() const { assert(_type != NULL, "sanity"); return _type; };
188 // Do not match memory edge
189 virtual uint match_edge(uint idx) const;
191 // Map a load opcode to its corresponding store opcode.
192 virtual int store_Opcode() const = 0;
194 // Check if the load's memory input is a Phi node with the same control.
195 bool is_instance_field_load_with_local_phi(Node* ctrl);
197 #ifndef PRODUCT
198 virtual void dump_spec(outputStream *st) const;
199 #endif
200 #ifdef ASSERT
201 // Helper function to allow a raw load without control edge for some cases
202 static bool is_immutable_value(Node* adr);
203 #endif
204 protected:
205 const Type* load_array_final_field(const TypeKlassPtr *tkls,
206 ciKlass* klass) const;
207 };
209 //------------------------------LoadBNode--------------------------------------
210 // Load a byte (8bits signed) from memory
211 class LoadBNode : public LoadNode {
212 public:
213 LoadBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::BYTE )
214 : LoadNode(c,mem,adr,at,ti) {}
215 virtual int Opcode() const;
216 virtual uint ideal_reg() const { return Op_RegI; }
217 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
218 virtual const Type *Value(PhaseTransform *phase) const;
219 virtual int store_Opcode() const { return Op_StoreB; }
220 virtual BasicType memory_type() const { return T_BYTE; }
221 };
223 //------------------------------LoadUBNode-------------------------------------
224 // Load a unsigned byte (8bits unsigned) from memory
225 class LoadUBNode : public LoadNode {
226 public:
227 LoadUBNode(Node* c, Node* mem, Node* adr, const TypePtr* at, const TypeInt* ti = TypeInt::UBYTE )
228 : LoadNode(c, mem, adr, at, ti) {}
229 virtual int Opcode() const;
230 virtual uint ideal_reg() const { return Op_RegI; }
231 virtual Node* Ideal(PhaseGVN *phase, bool can_reshape);
232 virtual const Type *Value(PhaseTransform *phase) const;
233 virtual int store_Opcode() const { return Op_StoreB; }
234 virtual BasicType memory_type() const { return T_BYTE; }
235 };
237 //------------------------------LoadUSNode-------------------------------------
238 // Load an unsigned short/char (16bits unsigned) from memory
239 class LoadUSNode : public LoadNode {
240 public:
241 LoadUSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::CHAR )
242 : LoadNode(c,mem,adr,at,ti) {}
243 virtual int Opcode() const;
244 virtual uint ideal_reg() const { return Op_RegI; }
245 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
246 virtual const Type *Value(PhaseTransform *phase) const;
247 virtual int store_Opcode() const { return Op_StoreC; }
248 virtual BasicType memory_type() const { return T_CHAR; }
249 };
251 //------------------------------LoadSNode--------------------------------------
252 // Load a short (16bits signed) from memory
253 class LoadSNode : public LoadNode {
254 public:
255 LoadSNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::SHORT )
256 : LoadNode(c,mem,adr,at,ti) {}
257 virtual int Opcode() const;
258 virtual uint ideal_reg() const { return Op_RegI; }
259 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
260 virtual const Type *Value(PhaseTransform *phase) const;
261 virtual int store_Opcode() const { return Op_StoreC; }
262 virtual BasicType memory_type() const { return T_SHORT; }
263 };
265 //------------------------------LoadINode--------------------------------------
266 // Load an integer from memory
267 class LoadINode : public LoadNode {
268 public:
269 LoadINode( Node *c, Node *mem, Node *adr, const TypePtr* at, const TypeInt *ti = TypeInt::INT )
270 : LoadNode(c,mem,adr,at,ti) {}
271 virtual int Opcode() const;
272 virtual uint ideal_reg() const { return Op_RegI; }
273 virtual int store_Opcode() const { return Op_StoreI; }
274 virtual BasicType memory_type() const { return T_INT; }
275 };
277 //------------------------------LoadRangeNode----------------------------------
278 // Load an array length from the array
279 class LoadRangeNode : public LoadINode {
280 public:
281 LoadRangeNode( Node *c, Node *mem, Node *adr, const TypeInt *ti = TypeInt::POS )
282 : LoadINode(c,mem,adr,TypeAryPtr::RANGE,ti) {}
283 virtual int Opcode() const;
284 virtual const Type *Value( PhaseTransform *phase ) const;
285 virtual Node *Identity( PhaseTransform *phase );
286 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
287 };
289 //------------------------------LoadLNode--------------------------------------
290 // Load a long from memory
291 class LoadLNode : public LoadNode {
292 virtual uint hash() const { return LoadNode::hash() + _require_atomic_access; }
293 virtual uint cmp( const Node &n ) const {
294 return _require_atomic_access == ((LoadLNode&)n)._require_atomic_access
295 && LoadNode::cmp(n);
296 }
297 virtual uint size_of() const { return sizeof(*this); }
298 const bool _require_atomic_access; // is piecewise load forbidden?
300 public:
301 LoadLNode( Node *c, Node *mem, Node *adr, const TypePtr* at,
302 const TypeLong *tl = TypeLong::LONG,
303 bool require_atomic_access = false )
304 : LoadNode(c,mem,adr,at,tl)
305 , _require_atomic_access(require_atomic_access)
306 {}
307 virtual int Opcode() const;
308 virtual uint ideal_reg() const { return Op_RegL; }
309 virtual int store_Opcode() const { return Op_StoreL; }
310 virtual BasicType memory_type() const { return T_LONG; }
311 bool require_atomic_access() { return _require_atomic_access; }
312 static LoadLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, const Type* rt);
313 #ifndef PRODUCT
314 virtual void dump_spec(outputStream *st) const {
315 LoadNode::dump_spec(st);
316 if (_require_atomic_access) st->print(" Atomic!");
317 }
318 #endif
319 };
321 //------------------------------LoadL_unalignedNode----------------------------
322 // Load a long from unaligned memory
323 class LoadL_unalignedNode : public LoadLNode {
324 public:
325 LoadL_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
326 : LoadLNode(c,mem,adr,at) {}
327 virtual int Opcode() const;
328 };
330 //------------------------------LoadFNode--------------------------------------
331 // Load a float (64 bits) from memory
332 class LoadFNode : public LoadNode {
333 public:
334 LoadFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::FLOAT )
335 : LoadNode(c,mem,adr,at,t) {}
336 virtual int Opcode() const;
337 virtual uint ideal_reg() const { return Op_RegF; }
338 virtual int store_Opcode() const { return Op_StoreF; }
339 virtual BasicType memory_type() const { return T_FLOAT; }
340 };
342 //------------------------------LoadDNode--------------------------------------
343 // Load a double (64 bits) from memory
344 class LoadDNode : public LoadNode {
345 public:
346 LoadDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, const Type *t = Type::DOUBLE )
347 : LoadNode(c,mem,adr,at,t) {}
348 virtual int Opcode() const;
349 virtual uint ideal_reg() const { return Op_RegD; }
350 virtual int store_Opcode() const { return Op_StoreD; }
351 virtual BasicType memory_type() const { return T_DOUBLE; }
352 };
354 //------------------------------LoadD_unalignedNode----------------------------
355 // Load a double from unaligned memory
356 class LoadD_unalignedNode : public LoadDNode {
357 public:
358 LoadD_unalignedNode( Node *c, Node *mem, Node *adr, const TypePtr* at )
359 : LoadDNode(c,mem,adr,at) {}
360 virtual int Opcode() const;
361 };
363 //------------------------------LoadPNode--------------------------------------
364 // Load a pointer from memory (either object or array)
365 class LoadPNode : public LoadNode {
366 public:
367 LoadPNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypePtr* t )
368 : LoadNode(c,mem,adr,at,t) {}
369 virtual int Opcode() const;
370 virtual uint ideal_reg() const { return Op_RegP; }
371 virtual int store_Opcode() const { return Op_StoreP; }
372 virtual BasicType memory_type() const { return T_ADDRESS; }
373 // depends_only_on_test is almost always true, and needs to be almost always
374 // true to enable key hoisting & commoning optimizations. However, for the
375 // special case of RawPtr loads from TLS top & end, the control edge carries
376 // the dependence preventing hoisting past a Safepoint instead of the memory
377 // edge. (An unfortunate consequence of having Safepoints not set Raw
378 // Memory; itself an unfortunate consequence of having Nodes which produce
379 // results (new raw memory state) inside of loops preventing all manner of
380 // other optimizations). Basically, it's ugly but so is the alternative.
381 // See comment in macro.cpp, around line 125 expand_allocate_common().
382 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
383 };
386 //------------------------------LoadNNode--------------------------------------
387 // Load a narrow oop from memory (either object or array)
388 class LoadNNode : public LoadNode {
389 public:
390 LoadNNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const Type* t )
391 : LoadNode(c,mem,adr,at,t) {}
392 virtual int Opcode() const;
393 virtual uint ideal_reg() const { return Op_RegN; }
394 virtual int store_Opcode() const { return Op_StoreN; }
395 virtual BasicType memory_type() const { return T_NARROWOOP; }
396 // depends_only_on_test is almost always true, and needs to be almost always
397 // true to enable key hoisting & commoning optimizations. However, for the
398 // special case of RawPtr loads from TLS top & end, the control edge carries
399 // the dependence preventing hoisting past a Safepoint instead of the memory
400 // edge. (An unfortunate consequence of having Safepoints not set Raw
401 // Memory; itself an unfortunate consequence of having Nodes which produce
402 // results (new raw memory state) inside of loops preventing all manner of
403 // other optimizations). Basically, it's ugly but so is the alternative.
404 // See comment in macro.cpp, around line 125 expand_allocate_common().
405 virtual bool depends_only_on_test() const { return adr_type() != TypeRawPtr::BOTTOM; }
406 };
408 //------------------------------LoadKlassNode----------------------------------
409 // Load a Klass from an object
410 class LoadKlassNode : public LoadPNode {
411 public:
412 LoadKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeKlassPtr *tk )
413 : LoadPNode(c,mem,adr,at,tk) {}
414 virtual int Opcode() const;
415 virtual const Type *Value( PhaseTransform *phase ) const;
416 virtual Node *Identity( PhaseTransform *phase );
417 virtual bool depends_only_on_test() const { return true; }
419 // Polymorphic factory method:
420 static Node* make( PhaseGVN& gvn, Node *mem, Node *adr, const TypePtr* at,
421 const TypeKlassPtr *tk = TypeKlassPtr::OBJECT );
422 };
424 //------------------------------LoadNKlassNode---------------------------------
425 // Load a narrow Klass from an object.
426 class LoadNKlassNode : public LoadNNode {
427 public:
428 LoadNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr *at, const TypeNarrowKlass *tk )
429 : LoadNNode(c,mem,adr,at,tk) {}
430 virtual int Opcode() const;
431 virtual uint ideal_reg() const { return Op_RegN; }
432 virtual int store_Opcode() const { return Op_StoreNKlass; }
433 virtual BasicType memory_type() const { return T_NARROWKLASS; }
435 virtual const Type *Value( PhaseTransform *phase ) const;
436 virtual Node *Identity( PhaseTransform *phase );
437 virtual bool depends_only_on_test() const { return true; }
438 };
441 //------------------------------StoreNode--------------------------------------
442 // Store value; requires Store, Address and Value
443 class StoreNode : public MemNode {
444 protected:
445 virtual uint cmp( const Node &n ) const;
446 virtual bool depends_only_on_test() const { return false; }
448 Node *Ideal_masked_input (PhaseGVN *phase, uint mask);
449 Node *Ideal_sign_extended_input(PhaseGVN *phase, int num_bits);
451 public:
452 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val )
453 : MemNode(c,mem,adr,at,val) {
454 init_class_id(Class_Store);
455 }
456 StoreNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store )
457 : MemNode(c,mem,adr,at,val,oop_store) {
458 init_class_id(Class_Store);
459 }
461 // Polymorphic factory method:
462 static StoreNode* make( PhaseGVN& gvn, Node *c, Node *mem, Node *adr,
463 const TypePtr* at, Node *val, BasicType bt );
465 virtual uint hash() const; // Check the type
467 // If the store is to Field memory and the pointer is non-null, we can
468 // zero out the control input.
469 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
471 // Compute a new Type for this node. Basically we just do the pre-check,
472 // then call the virtual add() to set the type.
473 virtual const Type *Value( PhaseTransform *phase ) const;
475 // Check for identity function on memory (Load then Store at same address)
476 virtual Node *Identity( PhaseTransform *phase );
478 // Do not match memory edge
479 virtual uint match_edge(uint idx) const;
481 virtual const Type *bottom_type() const; // returns Type::MEMORY
483 // Map a store opcode to its corresponding own opcode, trivially.
484 virtual int store_Opcode() const { return Opcode(); }
486 // have all possible loads of the value stored been optimized away?
487 bool value_never_loaded(PhaseTransform *phase) const;
488 };
490 //------------------------------StoreBNode-------------------------------------
491 // Store byte to memory
492 class StoreBNode : public StoreNode {
493 public:
494 StoreBNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
495 virtual int Opcode() const;
496 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
497 virtual BasicType memory_type() const { return T_BYTE; }
498 };
500 //------------------------------StoreCNode-------------------------------------
501 // Store char/short to memory
502 class StoreCNode : public StoreNode {
503 public:
504 StoreCNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
505 virtual int Opcode() const;
506 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
507 virtual BasicType memory_type() const { return T_CHAR; }
508 };
510 //------------------------------StoreINode-------------------------------------
511 // Store int to memory
512 class StoreINode : public StoreNode {
513 public:
514 StoreINode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
515 virtual int Opcode() const;
516 virtual BasicType memory_type() const { return T_INT; }
517 };
519 //------------------------------StoreLNode-------------------------------------
520 // Store long to memory
521 class StoreLNode : public StoreNode {
522 virtual uint hash() const { return StoreNode::hash() + _require_atomic_access; }
523 virtual uint cmp( const Node &n ) const {
524 return _require_atomic_access == ((StoreLNode&)n)._require_atomic_access
525 && StoreNode::cmp(n);
526 }
527 virtual uint size_of() const { return sizeof(*this); }
528 const bool _require_atomic_access; // is piecewise store forbidden?
530 public:
531 StoreLNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val,
532 bool require_atomic_access = false )
533 : StoreNode(c,mem,adr,at,val)
534 , _require_atomic_access(require_atomic_access)
535 {}
536 virtual int Opcode() const;
537 virtual BasicType memory_type() const { return T_LONG; }
538 bool require_atomic_access() { return _require_atomic_access; }
539 static StoreLNode* make_atomic(Compile *C, Node* ctl, Node* mem, Node* adr, const TypePtr* adr_type, Node* val);
540 #ifndef PRODUCT
541 virtual void dump_spec(outputStream *st) const {
542 StoreNode::dump_spec(st);
543 if (_require_atomic_access) st->print(" Atomic!");
544 }
545 #endif
546 };
548 //------------------------------StoreFNode-------------------------------------
549 // Store float to memory
550 class StoreFNode : public StoreNode {
551 public:
552 StoreFNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
553 virtual int Opcode() const;
554 virtual BasicType memory_type() const { return T_FLOAT; }
555 };
557 //------------------------------StoreDNode-------------------------------------
558 // Store double to memory
559 class StoreDNode : public StoreNode {
560 public:
561 StoreDNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
562 virtual int Opcode() const;
563 virtual BasicType memory_type() const { return T_DOUBLE; }
564 };
566 //------------------------------StorePNode-------------------------------------
567 // Store pointer to memory
568 class StorePNode : public StoreNode {
569 public:
570 StorePNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
571 virtual int Opcode() const;
572 virtual BasicType memory_type() const { return T_ADDRESS; }
573 };
575 //------------------------------StoreNNode-------------------------------------
576 // Store narrow oop to memory
577 class StoreNNode : public StoreNode {
578 public:
579 StoreNNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNode(c,mem,adr,at,val) {}
580 virtual int Opcode() const;
581 virtual BasicType memory_type() const { return T_NARROWOOP; }
582 };
584 //------------------------------StoreNKlassNode--------------------------------------
585 // Store narrow klass to memory
586 class StoreNKlassNode : public StoreNNode {
587 public:
588 StoreNKlassNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val ) : StoreNNode(c,mem,adr,at,val) {}
589 virtual int Opcode() const;
590 virtual BasicType memory_type() const { return T_NARROWKLASS; }
591 };
593 //------------------------------StoreCMNode-----------------------------------
594 // Store card-mark byte to memory for CM
595 // The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
596 // Preceeding equivalent StoreCMs may be eliminated.
597 class StoreCMNode : public StoreNode {
598 private:
599 virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
600 virtual uint cmp( const Node &n ) const {
601 return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
602 && StoreNode::cmp(n);
603 }
604 virtual uint size_of() const { return sizeof(*this); }
605 int _oop_alias_idx; // The alias_idx of OopStore
607 public:
608 StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
609 StoreNode(c,mem,adr,at,val,oop_store),
610 _oop_alias_idx(oop_alias_idx) {
611 assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
612 _oop_alias_idx == Compile::AliasIdxBot && Compile::current()->AliasLevel() == 0,
613 "bad oop alias idx");
614 }
615 virtual int Opcode() const;
616 virtual Node *Identity( PhaseTransform *phase );
617 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
618 virtual const Type *Value( PhaseTransform *phase ) const;
619 virtual BasicType memory_type() const { return T_VOID; } // unspecific
620 int oop_alias_idx() const { return _oop_alias_idx; }
621 };
623 //------------------------------LoadPLockedNode---------------------------------
624 // Load-locked a pointer from memory (either object or array).
625 // On Sparc & Intel this is implemented as a normal pointer load.
626 // On PowerPC and friends it's a real load-locked.
627 class LoadPLockedNode : public LoadPNode {
628 public:
629 LoadPLockedNode( Node *c, Node *mem, Node *adr )
630 : LoadPNode(c,mem,adr,TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM) {}
631 virtual int Opcode() const;
632 virtual int store_Opcode() const { return Op_StorePConditional; }
633 virtual bool depends_only_on_test() const { return true; }
634 };
636 //------------------------------SCMemProjNode---------------------------------------
637 // This class defines a projection of the memory state of a store conditional node.
638 // These nodes return a value, but also update memory.
639 class SCMemProjNode : public ProjNode {
640 public:
641 enum {SCMEMPROJCON = (uint)-2};
642 SCMemProjNode( Node *src) : ProjNode( src, SCMEMPROJCON) { }
643 virtual int Opcode() const;
644 virtual bool is_CFG() const { return false; }
645 virtual const Type *bottom_type() const {return Type::MEMORY;}
646 virtual const TypePtr *adr_type() const { return in(0)->in(MemNode::Memory)->adr_type();}
647 virtual uint ideal_reg() const { return 0;} // memory projections don't have a register
648 virtual const Type *Value( PhaseTransform *phase ) const;
649 #ifndef PRODUCT
650 virtual void dump_spec(outputStream *st) const {};
651 #endif
652 };
654 //------------------------------LoadStoreNode---------------------------
655 // Note: is_Mem() method returns 'true' for this class.
656 class LoadStoreNode : public Node {
657 private:
658 const Type* const _type; // What kind of value is loaded?
659 const TypePtr* _adr_type; // What kind of memory is being addressed?
660 virtual uint size_of() const; // Size is bigger
661 public:
662 LoadStoreNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* rt, uint required );
663 virtual bool depends_only_on_test() const { return false; }
664 virtual uint match_edge(uint idx) const { return idx == MemNode::Address || idx == MemNode::ValueIn; }
666 virtual const Type *bottom_type() const { return _type; }
667 virtual uint ideal_reg() const;
668 virtual const class TypePtr *adr_type() const { return _adr_type; } // returns bottom_type of address
670 bool result_not_used() const;
671 };
673 class LoadStoreConditionalNode : public LoadStoreNode {
674 public:
675 enum {
676 ExpectedIn = MemNode::ValueIn+1 // One more input than MemNode
677 };
678 LoadStoreConditionalNode(Node *c, Node *mem, Node *adr, Node *val, Node *ex);
679 };
681 //------------------------------StorePConditionalNode---------------------------
682 // Conditionally store pointer to memory, if no change since prior
683 // load-locked. Sets flags for success or failure of the store.
684 class StorePConditionalNode : public LoadStoreConditionalNode {
685 public:
686 StorePConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
687 virtual int Opcode() const;
688 // Produces flags
689 virtual uint ideal_reg() const { return Op_RegFlags; }
690 };
692 //------------------------------StoreIConditionalNode---------------------------
693 // Conditionally store int to memory, if no change since prior
694 // load-locked. Sets flags for success or failure of the store.
695 class StoreIConditionalNode : public LoadStoreConditionalNode {
696 public:
697 StoreIConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ii ) : LoadStoreConditionalNode(c, mem, adr, val, ii) { }
698 virtual int Opcode() const;
699 // Produces flags
700 virtual uint ideal_reg() const { return Op_RegFlags; }
701 };
703 //------------------------------StoreLConditionalNode---------------------------
704 // Conditionally store long to memory, if no change since prior
705 // load-locked. Sets flags for success or failure of the store.
706 class StoreLConditionalNode : public LoadStoreConditionalNode {
707 public:
708 StoreLConditionalNode( Node *c, Node *mem, Node *adr, Node *val, Node *ll ) : LoadStoreConditionalNode(c, mem, adr, val, ll) { }
709 virtual int Opcode() const;
710 // Produces flags
711 virtual uint ideal_reg() const { return Op_RegFlags; }
712 };
715 //------------------------------CompareAndSwapLNode---------------------------
716 class CompareAndSwapLNode : public LoadStoreConditionalNode {
717 public:
718 CompareAndSwapLNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
719 virtual int Opcode() const;
720 };
723 //------------------------------CompareAndSwapINode---------------------------
724 class CompareAndSwapINode : public LoadStoreConditionalNode {
725 public:
726 CompareAndSwapINode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
727 virtual int Opcode() const;
728 };
731 //------------------------------CompareAndSwapPNode---------------------------
732 class CompareAndSwapPNode : public LoadStoreConditionalNode {
733 public:
734 CompareAndSwapPNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
735 virtual int Opcode() const;
736 };
738 //------------------------------CompareAndSwapNNode---------------------------
739 class CompareAndSwapNNode : public LoadStoreConditionalNode {
740 public:
741 CompareAndSwapNNode( Node *c, Node *mem, Node *adr, Node *val, Node *ex) : LoadStoreConditionalNode(c, mem, adr, val, ex) { }
742 virtual int Opcode() const;
743 };
745 //------------------------------GetAndAddINode---------------------------
746 class GetAndAddINode : public LoadStoreNode {
747 public:
748 GetAndAddINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
749 virtual int Opcode() const;
750 };
752 //------------------------------GetAndAddLNode---------------------------
753 class GetAndAddLNode : public LoadStoreNode {
754 public:
755 GetAndAddLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
756 virtual int Opcode() const;
757 };
760 //------------------------------GetAndSetINode---------------------------
761 class GetAndSetINode : public LoadStoreNode {
762 public:
763 GetAndSetINode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeInt::INT, 4) { }
764 virtual int Opcode() const;
765 };
767 //------------------------------GetAndSetINode---------------------------
768 class GetAndSetLNode : public LoadStoreNode {
769 public:
770 GetAndSetLNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at ) : LoadStoreNode(c, mem, adr, val, at, TypeLong::LONG, 4) { }
771 virtual int Opcode() const;
772 };
774 //------------------------------GetAndSetPNode---------------------------
775 class GetAndSetPNode : public LoadStoreNode {
776 public:
777 GetAndSetPNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
778 virtual int Opcode() const;
779 };
781 //------------------------------GetAndSetNNode---------------------------
782 class GetAndSetNNode : public LoadStoreNode {
783 public:
784 GetAndSetNNode( Node *c, Node *mem, Node *adr, Node *val, const TypePtr* at, const Type* t ) : LoadStoreNode(c, mem, adr, val, at, t, 4) { }
785 virtual int Opcode() const;
786 };
788 //------------------------------ClearArray-------------------------------------
789 class ClearArrayNode: public Node {
790 public:
791 ClearArrayNode( Node *ctrl, Node *arymem, Node *word_cnt, Node *base )
792 : Node(ctrl,arymem,word_cnt,base) {
793 init_class_id(Class_ClearArray);
794 }
795 virtual int Opcode() const;
796 virtual const Type *bottom_type() const { return Type::MEMORY; }
797 // ClearArray modifies array elements, and so affects only the
798 // array memory addressed by the bottom_type of its base address.
799 virtual const class TypePtr *adr_type() const;
800 virtual Node *Identity( PhaseTransform *phase );
801 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
802 virtual uint match_edge(uint idx) const;
804 // Clear the given area of an object or array.
805 // The start offset must always be aligned mod BytesPerInt.
806 // The end offset must always be aligned mod BytesPerLong.
807 // Return the new memory.
808 static Node* clear_memory(Node* control, Node* mem, Node* dest,
809 intptr_t start_offset,
810 intptr_t end_offset,
811 PhaseGVN* phase);
812 static Node* clear_memory(Node* control, Node* mem, Node* dest,
813 intptr_t start_offset,
814 Node* end_offset,
815 PhaseGVN* phase);
816 static Node* clear_memory(Node* control, Node* mem, Node* dest,
817 Node* start_offset,
818 Node* end_offset,
819 PhaseGVN* phase);
820 // Return allocation input memory edge if it is different instance
821 // or itself if it is the one we are looking for.
822 static bool step_through(Node** np, uint instance_id, PhaseTransform* phase);
823 };
825 //------------------------------StrIntrinsic-------------------------------
826 // Base class for Ideal nodes used in String instrinsic code.
827 class StrIntrinsicNode: public Node {
828 public:
829 StrIntrinsicNode(Node* control, Node* char_array_mem,
830 Node* s1, Node* c1, Node* s2, Node* c2):
831 Node(control, char_array_mem, s1, c1, s2, c2) {
832 }
834 StrIntrinsicNode(Node* control, Node* char_array_mem,
835 Node* s1, Node* s2, Node* c):
836 Node(control, char_array_mem, s1, s2, c) {
837 }
839 StrIntrinsicNode(Node* control, Node* char_array_mem,
840 Node* s1, Node* s2):
841 Node(control, char_array_mem, s1, s2) {
842 }
844 virtual bool depends_only_on_test() const { return false; }
845 virtual const TypePtr* adr_type() const { return TypeAryPtr::CHARS; }
846 virtual uint match_edge(uint idx) const;
847 virtual uint ideal_reg() const { return Op_RegI; }
848 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
849 virtual const Type *Value(PhaseTransform *phase) const;
850 };
852 //------------------------------StrComp-------------------------------------
853 class StrCompNode: public StrIntrinsicNode {
854 public:
855 StrCompNode(Node* control, Node* char_array_mem,
856 Node* s1, Node* c1, Node* s2, Node* c2):
857 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
858 virtual int Opcode() const;
859 virtual const Type* bottom_type() const { return TypeInt::INT; }
860 };
862 //------------------------------StrEquals-------------------------------------
863 class StrEqualsNode: public StrIntrinsicNode {
864 public:
865 StrEqualsNode(Node* control, Node* char_array_mem,
866 Node* s1, Node* s2, Node* c):
867 StrIntrinsicNode(control, char_array_mem, s1, s2, c) {};
868 virtual int Opcode() const;
869 virtual const Type* bottom_type() const { return TypeInt::BOOL; }
870 };
872 //------------------------------StrIndexOf-------------------------------------
873 class StrIndexOfNode: public StrIntrinsicNode {
874 public:
875 StrIndexOfNode(Node* control, Node* char_array_mem,
876 Node* s1, Node* c1, Node* s2, Node* c2):
877 StrIntrinsicNode(control, char_array_mem, s1, c1, s2, c2) {};
878 virtual int Opcode() const;
879 virtual const Type* bottom_type() const { return TypeInt::INT; }
880 };
882 //------------------------------AryEq---------------------------------------
883 class AryEqNode: public StrIntrinsicNode {
884 public:
885 AryEqNode(Node* control, Node* char_array_mem, Node* s1, Node* s2):
886 StrIntrinsicNode(control, char_array_mem, s1, s2) {};
887 virtual int Opcode() const;
888 virtual const Type* bottom_type() const { return TypeInt::BOOL; }
889 };
892 //------------------------------EncodeISOArray--------------------------------
893 // encode char[] to byte[] in ISO_8859_1
894 class EncodeISOArrayNode: public Node {
895 public:
896 EncodeISOArrayNode(Node *control, Node* arymem, Node* s1, Node* s2, Node* c): Node(control, arymem, s1, s2, c) {};
897 virtual int Opcode() const;
898 virtual bool depends_only_on_test() const { return false; }
899 virtual const Type* bottom_type() const { return TypeInt::INT; }
900 virtual const TypePtr* adr_type() const { return TypePtr::BOTTOM; }
901 virtual uint match_edge(uint idx) const;
902 virtual uint ideal_reg() const { return Op_RegI; }
903 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
904 virtual const Type *Value(PhaseTransform *phase) const;
905 };
907 //------------------------------MemBar-----------------------------------------
908 // There are different flavors of Memory Barriers to match the Java Memory
909 // Model. Monitor-enter and volatile-load act as Aquires: no following ref
910 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or
911 // volatile-load. Monitor-exit and volatile-store act as Release: no
912 // preceding ref can be moved to after them. We insert a MemBar-Release
913 // before a FastUnlock or volatile-store. All volatiles need to be
914 // serialized, so we follow all volatile-stores with a MemBar-Volatile to
915 // separate it from any following volatile-load.
916 class MemBarNode: public MultiNode {
917 virtual uint hash() const ; // { return NO_HASH; }
918 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
920 virtual uint size_of() const { return sizeof(*this); }
921 // Memory type this node is serializing. Usually either rawptr or bottom.
922 const TypePtr* _adr_type;
924 public:
925 enum {
926 Precedent = TypeFunc::Parms // optional edge to force precedence
927 };
928 MemBarNode(Compile* C, int alias_idx, Node* precedent);
929 virtual int Opcode() const = 0;
930 virtual const class TypePtr *adr_type() const { return _adr_type; }
931 virtual const Type *Value( PhaseTransform *phase ) const;
932 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
933 virtual uint match_edge(uint idx) const { return 0; }
934 virtual const Type *bottom_type() const { return TypeTuple::MEMBAR; }
935 virtual Node *match( const ProjNode *proj, const Matcher *m );
936 // Factory method. Builds a wide or narrow membar.
937 // Optional 'precedent' becomes an extra edge if not null.
938 static MemBarNode* make(Compile* C, int opcode,
939 int alias_idx = Compile::AliasIdxBot,
940 Node* precedent = NULL);
941 };
943 // "Acquire" - no following ref can move before (but earlier refs can
944 // follow, like an early Load stalled in cache). Requires multi-cpu
945 // visibility. Inserted after a volatile load.
946 class MemBarAcquireNode: public MemBarNode {
947 public:
948 MemBarAcquireNode(Compile* C, int alias_idx, Node* precedent)
949 : MemBarNode(C, alias_idx, precedent) {}
950 virtual int Opcode() const;
951 };
953 // "Release" - no earlier ref can move after (but later refs can move
954 // up, like a speculative pipelined cache-hitting Load). Requires
955 // multi-cpu visibility. Inserted before a volatile store.
956 class MemBarReleaseNode: public MemBarNode {
957 public:
958 MemBarReleaseNode(Compile* C, int alias_idx, Node* precedent)
959 : MemBarNode(C, alias_idx, precedent) {}
960 virtual int Opcode() const;
961 };
963 // "Acquire" - no following ref can move before (but earlier refs can
964 // follow, like an early Load stalled in cache). Requires multi-cpu
965 // visibility. Inserted after a FastLock.
966 class MemBarAcquireLockNode: public MemBarNode {
967 public:
968 MemBarAcquireLockNode(Compile* C, int alias_idx, Node* precedent)
969 : MemBarNode(C, alias_idx, precedent) {}
970 virtual int Opcode() const;
971 };
973 // "Release" - no earlier ref can move after (but later refs can move
974 // up, like a speculative pipelined cache-hitting Load). Requires
975 // multi-cpu visibility. Inserted before a FastUnLock.
976 class MemBarReleaseLockNode: public MemBarNode {
977 public:
978 MemBarReleaseLockNode(Compile* C, int alias_idx, Node* precedent)
979 : MemBarNode(C, alias_idx, precedent) {}
980 virtual int Opcode() const;
981 };
983 class MemBarStoreStoreNode: public MemBarNode {
984 public:
985 MemBarStoreStoreNode(Compile* C, int alias_idx, Node* precedent)
986 : MemBarNode(C, alias_idx, precedent) {
987 init_class_id(Class_MemBarStoreStore);
988 }
989 virtual int Opcode() const;
990 };
992 // Ordering between a volatile store and a following volatile load.
993 // Requires multi-CPU visibility?
994 class MemBarVolatileNode: public MemBarNode {
995 public:
996 MemBarVolatileNode(Compile* C, int alias_idx, Node* precedent)
997 : MemBarNode(C, alias_idx, precedent) {}
998 virtual int Opcode() const;
999 };
1001 // Ordering within the same CPU. Used to order unsafe memory references
1002 // inside the compiler when we lack alias info. Not needed "outside" the
1003 // compiler because the CPU does all the ordering for us.
1004 class MemBarCPUOrderNode: public MemBarNode {
1005 public:
1006 MemBarCPUOrderNode(Compile* C, int alias_idx, Node* precedent)
1007 : MemBarNode(C, alias_idx, precedent) {}
1008 virtual int Opcode() const;
1009 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1010 };
1012 // Isolation of object setup after an AllocateNode and before next safepoint.
1013 // (See comment in memnode.cpp near InitializeNode::InitializeNode for semantics.)
1014 class InitializeNode: public MemBarNode {
1015 friend class AllocateNode;
1017 enum {
1018 Incomplete = 0,
1019 Complete = 1,
1020 WithArraycopy = 2
1021 };
1022 int _is_complete;
1024 bool _does_not_escape;
1026 public:
1027 enum {
1028 Control = TypeFunc::Control,
1029 Memory = TypeFunc::Memory, // MergeMem for states affected by this op
1030 RawAddress = TypeFunc::Parms+0, // the newly-allocated raw address
1031 RawStores = TypeFunc::Parms+1 // zero or more stores (or TOP)
1032 };
1034 InitializeNode(Compile* C, int adr_type, Node* rawoop);
1035 virtual int Opcode() const;
1036 virtual uint size_of() const { return sizeof(*this); }
1037 virtual uint ideal_reg() const { return 0; } // not matched in the AD file
1038 virtual const RegMask &in_RegMask(uint) const; // mask for RawAddress
1040 // Manage incoming memory edges via a MergeMem on in(Memory):
1041 Node* memory(uint alias_idx);
1043 // The raw memory edge coming directly from the Allocation.
1044 // The contents of this memory are *always* all-zero-bits.
1045 Node* zero_memory() { return memory(Compile::AliasIdxRaw); }
1047 // Return the corresponding allocation for this initialization (or null if none).
1048 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
1049 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
1050 AllocateNode* allocation();
1052 // Anything other than zeroing in this init?
1053 bool is_non_zero();
1055 // An InitializeNode must completed before macro expansion is done.
1056 // Completion requires that the AllocateNode must be followed by
1057 // initialization of the new memory to zero, then to any initializers.
1058 bool is_complete() { return _is_complete != Incomplete; }
1059 bool is_complete_with_arraycopy() { return (_is_complete & WithArraycopy) != 0; }
1061 // Mark complete. (Must not yet be complete.)
1062 void set_complete(PhaseGVN* phase);
1063 void set_complete_with_arraycopy() { _is_complete = Complete | WithArraycopy; }
1065 bool does_not_escape() { return _does_not_escape; }
1066 void set_does_not_escape() { _does_not_escape = true; }
1068 #ifdef ASSERT
1069 // ensure all non-degenerate stores are ordered and non-overlapping
1070 bool stores_are_sane(PhaseTransform* phase);
1071 #endif //ASSERT
1073 // See if this store can be captured; return offset where it initializes.
1074 // Return 0 if the store cannot be moved (any sort of problem).
1075 intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
1077 // Capture another store; reformat it to write my internal raw memory.
1078 // Return the captured copy, else NULL if there is some sort of problem.
1079 Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
1081 // Find captured store which corresponds to the range [start..start+size).
1082 // Return my own memory projection (meaning the initial zero bits)
1083 // if there is no such store. Return NULL if there is a problem.
1084 Node* find_captured_store(intptr_t start, int size_in_bytes, PhaseTransform* phase);
1086 // Called when the associated AllocateNode is expanded into CFG.
1087 Node* complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
1088 intptr_t header_size, Node* size_in_bytes,
1089 PhaseGVN* phase);
1091 private:
1092 void remove_extra_zeroes();
1094 // Find out where a captured store should be placed (or already is placed).
1095 int captured_store_insertion_point(intptr_t start, int size_in_bytes,
1096 PhaseTransform* phase);
1098 static intptr_t get_store_offset(Node* st, PhaseTransform* phase);
1100 Node* make_raw_address(intptr_t offset, PhaseTransform* phase);
1102 bool detect_init_independence(Node* n, int& count);
1104 void coalesce_subword_stores(intptr_t header_size, Node* size_in_bytes,
1105 PhaseGVN* phase);
1107 intptr_t find_next_fullword_store(uint i, PhaseGVN* phase);
1108 };
1110 //------------------------------MergeMem---------------------------------------
1111 // (See comment in memnode.cpp near MergeMemNode::MergeMemNode for semantics.)
1112 class MergeMemNode: public Node {
1113 virtual uint hash() const ; // { return NO_HASH; }
1114 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
1115 friend class MergeMemStream;
1116 MergeMemNode(Node* def); // clients use MergeMemNode::make
1118 public:
1119 // If the input is a whole memory state, clone it with all its slices intact.
1120 // Otherwise, make a new memory state with just that base memory input.
1121 // In either case, the result is a newly created MergeMem.
1122 static MergeMemNode* make(Compile* C, Node* base_memory);
1124 virtual int Opcode() const;
1125 virtual Node *Identity( PhaseTransform *phase );
1126 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
1127 virtual uint ideal_reg() const { return NotAMachineReg; }
1128 virtual uint match_edge(uint idx) const { return 0; }
1129 virtual const RegMask &out_RegMask() const;
1130 virtual const Type *bottom_type() const { return Type::MEMORY; }
1131 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
1132 // sparse accessors
1133 // Fetch the previously stored "set_memory_at", or else the base memory.
1134 // (Caller should clone it if it is a phi-nest.)
1135 Node* memory_at(uint alias_idx) const;
1136 // set the memory, regardless of its previous value
1137 void set_memory_at(uint alias_idx, Node* n);
1138 // the "base" is the memory that provides the non-finite support
1139 Node* base_memory() const { return in(Compile::AliasIdxBot); }
1140 // warning: setting the base can implicitly set any of the other slices too
1141 void set_base_memory(Node* def);
1142 // sentinel value which denotes a copy of the base memory:
1143 Node* empty_memory() const { return in(Compile::AliasIdxTop); }
1144 static Node* make_empty_memory(); // where the sentinel comes from
1145 bool is_empty_memory(Node* n) const { assert((n == empty_memory()) == n->is_top(), "sanity"); return n->is_top(); }
1146 // hook for the iterator, to perform any necessary setup
1147 void iteration_setup(const MergeMemNode* other = NULL);
1148 // push sentinels until I am at least as long as the other (semantic no-op)
1149 void grow_to_match(const MergeMemNode* other);
1150 bool verify_sparse() const PRODUCT_RETURN0;
1151 #ifndef PRODUCT
1152 virtual void dump_spec(outputStream *st) const;
1153 #endif
1154 };
1156 class MergeMemStream : public StackObj {
1157 private:
1158 MergeMemNode* _mm;
1159 const MergeMemNode* _mm2; // optional second guy, contributes non-empty iterations
1160 Node* _mm_base; // loop-invariant base memory of _mm
1161 int _idx;
1162 int _cnt;
1163 Node* _mem;
1164 Node* _mem2;
1165 int _cnt2;
1167 void init(MergeMemNode* mm, const MergeMemNode* mm2 = NULL) {
1168 // subsume_node will break sparseness at times, whenever a memory slice
1169 // folds down to a copy of the base ("fat") memory. In such a case,
1170 // the raw edge will update to base, although it should be top.
1171 // This iterator will recognize either top or base_memory as an
1172 // "empty" slice. See is_empty, is_empty2, and next below.
1173 //
1174 // The sparseness property is repaired in MergeMemNode::Ideal.
1175 // As long as access to a MergeMem goes through this iterator
1176 // or the memory_at accessor, flaws in the sparseness will
1177 // never be observed.
1178 //
1179 // Also, iteration_setup repairs sparseness.
1180 assert(mm->verify_sparse(), "please, no dups of base");
1181 assert(mm2==NULL || mm2->verify_sparse(), "please, no dups of base");
1183 _mm = mm;
1184 _mm_base = mm->base_memory();
1185 _mm2 = mm2;
1186 _cnt = mm->req();
1187 _idx = Compile::AliasIdxBot-1; // start at the base memory
1188 _mem = NULL;
1189 _mem2 = NULL;
1190 }
1192 #ifdef ASSERT
1193 Node* check_memory() const {
1194 if (at_base_memory())
1195 return _mm->base_memory();
1196 else if ((uint)_idx < _mm->req() && !_mm->in(_idx)->is_top())
1197 return _mm->memory_at(_idx);
1198 else
1199 return _mm_base;
1200 }
1201 Node* check_memory2() const {
1202 return at_base_memory()? _mm2->base_memory(): _mm2->memory_at(_idx);
1203 }
1204 #endif
1206 static bool match_memory(Node* mem, const MergeMemNode* mm, int idx) PRODUCT_RETURN0;
1207 void assert_synch() const {
1208 assert(!_mem || _idx >= _cnt || match_memory(_mem, _mm, _idx),
1209 "no side-effects except through the stream");
1210 }
1212 public:
1214 // expected usages:
1215 // for (MergeMemStream mms(mem->is_MergeMem()); next_non_empty(); ) { ... }
1216 // for (MergeMemStream mms(mem1, mem2); next_non_empty2(); ) { ... }
1218 // iterate over one merge
1219 MergeMemStream(MergeMemNode* mm) {
1220 mm->iteration_setup();
1221 init(mm);
1222 debug_only(_cnt2 = 999);
1223 }
1224 // iterate in parallel over two merges
1225 // only iterates through non-empty elements of mm2
1226 MergeMemStream(MergeMemNode* mm, const MergeMemNode* mm2) {
1227 assert(mm2, "second argument must be a MergeMem also");
1228 ((MergeMemNode*)mm2)->iteration_setup(); // update hidden state
1229 mm->iteration_setup(mm2);
1230 init(mm, mm2);
1231 _cnt2 = mm2->req();
1232 }
1233 #ifdef ASSERT
1234 ~MergeMemStream() {
1235 assert_synch();
1236 }
1237 #endif
1239 MergeMemNode* all_memory() const {
1240 return _mm;
1241 }
1242 Node* base_memory() const {
1243 assert(_mm_base == _mm->base_memory(), "no update to base memory, please");
1244 return _mm_base;
1245 }
1246 const MergeMemNode* all_memory2() const {
1247 assert(_mm2 != NULL, "");
1248 return _mm2;
1249 }
1250 bool at_base_memory() const {
1251 return _idx == Compile::AliasIdxBot;
1252 }
1253 int alias_idx() const {
1254 assert(_mem, "must call next 1st");
1255 return _idx;
1256 }
1258 const TypePtr* adr_type() const {
1259 return Compile::current()->get_adr_type(alias_idx());
1260 }
1262 const TypePtr* adr_type(Compile* C) const {
1263 return C->get_adr_type(alias_idx());
1264 }
1265 bool is_empty() const {
1266 assert(_mem, "must call next 1st");
1267 assert(_mem->is_top() == (_mem==_mm->empty_memory()), "correct sentinel");
1268 return _mem->is_top();
1269 }
1270 bool is_empty2() const {
1271 assert(_mem2, "must call next 1st");
1272 assert(_mem2->is_top() == (_mem2==_mm2->empty_memory()), "correct sentinel");
1273 return _mem2->is_top();
1274 }
1275 Node* memory() const {
1276 assert(!is_empty(), "must not be empty");
1277 assert_synch();
1278 return _mem;
1279 }
1280 // get the current memory, regardless of empty or non-empty status
1281 Node* force_memory() const {
1282 assert(!is_empty() || !at_base_memory(), "");
1283 // Use _mm_base to defend against updates to _mem->base_memory().
1284 Node *mem = _mem->is_top() ? _mm_base : _mem;
1285 assert(mem == check_memory(), "");
1286 return mem;
1287 }
1288 Node* memory2() const {
1289 assert(_mem2 == check_memory2(), "");
1290 return _mem2;
1291 }
1292 void set_memory(Node* mem) {
1293 if (at_base_memory()) {
1294 // Note that this does not change the invariant _mm_base.
1295 _mm->set_base_memory(mem);
1296 } else {
1297 _mm->set_memory_at(_idx, mem);
1298 }
1299 _mem = mem;
1300 assert_synch();
1301 }
1303 // Recover from a side effect to the MergeMemNode.
1304 void set_memory() {
1305 _mem = _mm->in(_idx);
1306 }
1308 bool next() { return next(false); }
1309 bool next2() { return next(true); }
1311 bool next_non_empty() { return next_non_empty(false); }
1312 bool next_non_empty2() { return next_non_empty(true); }
1313 // next_non_empty2 can yield states where is_empty() is true
1315 private:
1316 // find the next item, which might be empty
1317 bool next(bool have_mm2) {
1318 assert((_mm2 != NULL) == have_mm2, "use other next");
1319 assert_synch();
1320 if (++_idx < _cnt) {
1321 // Note: This iterator allows _mm to be non-sparse.
1322 // It behaves the same whether _mem is top or base_memory.
1323 _mem = _mm->in(_idx);
1324 if (have_mm2)
1325 _mem2 = _mm2->in((_idx < _cnt2) ? _idx : Compile::AliasIdxTop);
1326 return true;
1327 }
1328 return false;
1329 }
1331 // find the next non-empty item
1332 bool next_non_empty(bool have_mm2) {
1333 while (next(have_mm2)) {
1334 if (!is_empty()) {
1335 // make sure _mem2 is filled in sensibly
1336 if (have_mm2 && _mem2->is_top()) _mem2 = _mm2->base_memory();
1337 return true;
1338 } else if (have_mm2 && !is_empty2()) {
1339 return true; // is_empty() == true
1340 }
1341 }
1342 return false;
1343 }
1344 };
1346 //------------------------------Prefetch---------------------------------------
1348 // Non-faulting prefetch load. Prefetch for many reads.
1349 class PrefetchReadNode : public Node {
1350 public:
1351 PrefetchReadNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1352 virtual int Opcode() const;
1353 virtual uint ideal_reg() const { return NotAMachineReg; }
1354 virtual uint match_edge(uint idx) const { return idx==2; }
1355 virtual const Type *bottom_type() const { return Type::ABIO; }
1356 };
1358 // Non-faulting prefetch load. Prefetch for many reads & many writes.
1359 class PrefetchWriteNode : public Node {
1360 public:
1361 PrefetchWriteNode(Node *abio, Node *adr) : Node(0,abio,adr) {}
1362 virtual int Opcode() const;
1363 virtual uint ideal_reg() const { return NotAMachineReg; }
1364 virtual uint match_edge(uint idx) const { return idx==2; }
1365 virtual const Type *bottom_type() const { return Type::ABIO; }
1366 };
1368 // Allocation prefetch which may fault, TLAB size have to be adjusted.
1369 class PrefetchAllocationNode : public Node {
1370 public:
1371 PrefetchAllocationNode(Node *mem, Node *adr) : Node(0,mem,adr) {}
1372 virtual int Opcode() const;
1373 virtual uint ideal_reg() const { return NotAMachineReg; }
1374 virtual uint match_edge(uint idx) const { return idx==2; }
1375 virtual const Type *bottom_type() const { return ( AllocatePrefetchStyle == 3 ) ? Type::MEMORY : Type::ABIO; }
1376 };
1378 #endif // SHARE_VM_OPTO_MEMNODE_HPP