Thu, 20 Sep 2012 16:49:17 +0200
7023898: Intrinsify AtomicLongFieldUpdater.getAndIncrement()
Summary: use shorter instruction sequences for atomic add and atomic exchange when possible.
Reviewed-by: kvn, jrose
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_CALLNODE_HPP
26 #define SHARE_VM_OPTO_CALLNODE_HPP
28 #include "opto/connode.hpp"
29 #include "opto/mulnode.hpp"
30 #include "opto/multnode.hpp"
31 #include "opto/opcodes.hpp"
32 #include "opto/phaseX.hpp"
33 #include "opto/type.hpp"
35 // Portions of code courtesy of Clifford Click
37 // Optimization - Graph Style
39 class Chaitin;
40 class NamedCounter;
41 class MultiNode;
42 class SafePointNode;
43 class CallNode;
44 class CallJavaNode;
45 class CallStaticJavaNode;
46 class CallDynamicJavaNode;
47 class CallRuntimeNode;
48 class CallLeafNode;
49 class CallLeafNoFPNode;
50 class AllocateNode;
51 class AllocateArrayNode;
52 class LockNode;
53 class UnlockNode;
54 class JVMState;
55 class OopMap;
56 class State;
57 class StartNode;
58 class MachCallNode;
59 class FastLockNode;
61 //------------------------------StartNode--------------------------------------
62 // The method start node
63 class StartNode : public MultiNode {
64 virtual uint cmp( const Node &n ) const;
65 virtual uint size_of() const; // Size is bigger
66 public:
67 const TypeTuple *_domain;
68 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
69 init_class_id(Class_Start);
70 init_req(0,this);
71 init_req(1,root);
72 }
73 virtual int Opcode() const;
74 virtual bool pinned() const { return true; };
75 virtual const Type *bottom_type() const;
76 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
77 virtual const Type *Value( PhaseTransform *phase ) const;
78 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
79 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
80 virtual const RegMask &in_RegMask(uint) const;
81 virtual Node *match( const ProjNode *proj, const Matcher *m );
82 virtual uint ideal_reg() const { return 0; }
83 #ifndef PRODUCT
84 virtual void dump_spec(outputStream *st) const;
85 #endif
86 };
88 //------------------------------StartOSRNode-----------------------------------
89 // The method start node for on stack replacement code
90 class StartOSRNode : public StartNode {
91 public:
92 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
93 virtual int Opcode() const;
94 static const TypeTuple *osr_domain();
95 };
98 //------------------------------ParmNode---------------------------------------
99 // Incoming parameters
100 class ParmNode : public ProjNode {
101 static const char * const names[TypeFunc::Parms+1];
102 public:
103 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
104 init_class_id(Class_Parm);
105 }
106 virtual int Opcode() const;
107 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
108 virtual uint ideal_reg() const;
109 #ifndef PRODUCT
110 virtual void dump_spec(outputStream *st) const;
111 #endif
112 };
115 //------------------------------ReturnNode-------------------------------------
116 // Return from subroutine node
117 class ReturnNode : public Node {
118 public:
119 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
120 virtual int Opcode() const;
121 virtual bool is_CFG() const { return true; }
122 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
123 virtual bool depends_only_on_test() const { return false; }
124 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
125 virtual const Type *Value( PhaseTransform *phase ) const;
126 virtual uint ideal_reg() const { return NotAMachineReg; }
127 virtual uint match_edge(uint idx) const;
128 #ifndef PRODUCT
129 virtual void dump_req() const;
130 #endif
131 };
134 //------------------------------RethrowNode------------------------------------
135 // Rethrow of exception at call site. Ends a procedure before rethrowing;
136 // ends the current basic block like a ReturnNode. Restores registers and
137 // unwinds stack. Rethrow happens in the caller's method.
138 class RethrowNode : public Node {
139 public:
140 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
141 virtual int Opcode() const;
142 virtual bool is_CFG() const { return true; }
143 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
144 virtual bool depends_only_on_test() const { return false; }
145 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
146 virtual const Type *Value( PhaseTransform *phase ) const;
147 virtual uint match_edge(uint idx) const;
148 virtual uint ideal_reg() const { return NotAMachineReg; }
149 #ifndef PRODUCT
150 virtual void dump_req() const;
151 #endif
152 };
155 //------------------------------TailCallNode-----------------------------------
156 // Pop stack frame and jump indirect
157 class TailCallNode : public ReturnNode {
158 public:
159 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
160 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
161 init_req(TypeFunc::Parms, target);
162 init_req(TypeFunc::Parms+1, moop);
163 }
165 virtual int Opcode() const;
166 virtual uint match_edge(uint idx) const;
167 };
169 //------------------------------TailJumpNode-----------------------------------
170 // Pop stack frame and jump indirect
171 class TailJumpNode : public ReturnNode {
172 public:
173 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
174 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
175 init_req(TypeFunc::Parms, target);
176 init_req(TypeFunc::Parms+1, ex_oop);
177 }
179 virtual int Opcode() const;
180 virtual uint match_edge(uint idx) const;
181 };
183 //-------------------------------JVMState-------------------------------------
184 // A linked list of JVMState nodes captures the whole interpreter state,
185 // plus GC roots, for all active calls at some call site in this compilation
186 // unit. (If there is no inlining, then the list has exactly one link.)
187 // This provides a way to map the optimized program back into the interpreter,
188 // or to let the GC mark the stack.
189 class JVMState : public ResourceObj {
190 friend class VMStructs;
191 public:
192 typedef enum {
193 Reexecute_Undefined = -1, // not defined -- will be translated into false later
194 Reexecute_False = 0, // false -- do not reexecute
195 Reexecute_True = 1 // true -- reexecute the bytecode
196 } ReexecuteState; //Reexecute State
198 private:
199 JVMState* _caller; // List pointer for forming scope chains
200 uint _depth; // One more than caller depth, or one.
201 uint _locoff; // Offset to locals in input edge mapping
202 uint _stkoff; // Offset to stack in input edge mapping
203 uint _monoff; // Offset to monitors in input edge mapping
204 uint _scloff; // Offset to fields of scalar objs in input edge mapping
205 uint _endoff; // Offset to end of input edge mapping
206 uint _sp; // Jave Expression Stack Pointer for this state
207 int _bci; // Byte Code Index of this JVM point
208 ReexecuteState _reexecute; // Whether this bytecode need to be re-executed
209 ciMethod* _method; // Method Pointer
210 SafePointNode* _map; // Map node associated with this scope
211 public:
212 friend class Compile;
213 friend class PreserveReexecuteState;
215 // Because JVMState objects live over the entire lifetime of the
216 // Compile object, they are allocated into the comp_arena, which
217 // does not get resource marked or reset during the compile process
218 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
219 void operator delete( void * ) { } // fast deallocation
221 // Create a new JVMState, ready for abstract interpretation.
222 JVMState(ciMethod* method, JVMState* caller);
223 JVMState(int stack_size); // root state; has a null method
225 // Access functions for the JVM
226 // ... --|--- loc ---|--- stk ---|--- arg ---|--- mon ---|--- scl ---|
227 // \ locoff \ stkoff \ argoff \ monoff \ scloff \ endoff
228 uint locoff() const { return _locoff; }
229 uint stkoff() const { return _stkoff; }
230 uint argoff() const { return _stkoff + _sp; }
231 uint monoff() const { return _monoff; }
232 uint scloff() const { return _scloff; }
233 uint endoff() const { return _endoff; }
234 uint oopoff() const { return debug_end(); }
236 int loc_size() const { return stkoff() - locoff(); }
237 int stk_size() const { return monoff() - stkoff(); }
238 int arg_size() const { return monoff() - argoff(); }
239 int mon_size() const { return scloff() - monoff(); }
240 int scl_size() const { return endoff() - scloff(); }
242 bool is_loc(uint i) const { return locoff() <= i && i < stkoff(); }
243 bool is_stk(uint i) const { return stkoff() <= i && i < monoff(); }
244 bool is_mon(uint i) const { return monoff() <= i && i < scloff(); }
245 bool is_scl(uint i) const { return scloff() <= i && i < endoff(); }
247 uint sp() const { return _sp; }
248 int bci() const { return _bci; }
249 bool should_reexecute() const { return _reexecute==Reexecute_True; }
250 bool is_reexecute_undefined() const { return _reexecute==Reexecute_Undefined; }
251 bool has_method() const { return _method != NULL; }
252 ciMethod* method() const { assert(has_method(), ""); return _method; }
253 JVMState* caller() const { return _caller; }
254 SafePointNode* map() const { return _map; }
255 uint depth() const { return _depth; }
256 uint debug_start() const; // returns locoff of root caller
257 uint debug_end() const; // returns endoff of self
258 uint debug_size() const {
259 return loc_size() + sp() + mon_size() + scl_size();
260 }
261 uint debug_depth() const; // returns sum of debug_size values at all depths
263 // Returns the JVM state at the desired depth (1 == root).
264 JVMState* of_depth(int d) const;
266 // Tells if two JVM states have the same call chain (depth, methods, & bcis).
267 bool same_calls_as(const JVMState* that) const;
269 // Monitors (monitors are stored as (boxNode, objNode) pairs
270 enum { logMonitorEdges = 1 };
271 int nof_monitors() const { return mon_size() >> logMonitorEdges; }
272 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
273 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
274 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
275 bool is_monitor_box(uint off) const {
276 assert(is_mon(off), "should be called only for monitor edge");
277 return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
278 }
279 bool is_monitor_use(uint off) const { return (is_mon(off)
280 && is_monitor_box(off))
281 || (caller() && caller()->is_monitor_use(off)); }
283 // Initialization functions for the JVM
284 void set_locoff(uint off) { _locoff = off; }
285 void set_stkoff(uint off) { _stkoff = off; }
286 void set_monoff(uint off) { _monoff = off; }
287 void set_scloff(uint off) { _scloff = off; }
288 void set_endoff(uint off) { _endoff = off; }
289 void set_offsets(uint off) {
290 _locoff = _stkoff = _monoff = _scloff = _endoff = off;
291 }
292 void set_map(SafePointNode *map) { _map = map; }
293 void set_sp(uint sp) { _sp = sp; }
294 // _reexecute is initialized to "undefined" for a new bci
295 void set_bci(int bci) {if(_bci != bci)_reexecute=Reexecute_Undefined; _bci = bci; }
296 void set_should_reexecute(bool reexec) {_reexecute = reexec ? Reexecute_True : Reexecute_False;}
298 // Miscellaneous utility functions
299 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
300 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
302 #ifndef PRODUCT
303 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
304 void dump_spec(outputStream *st) const;
305 void dump_on(outputStream* st) const;
306 void dump() const {
307 dump_on(tty);
308 }
309 #endif
310 };
312 //------------------------------SafePointNode----------------------------------
313 // A SafePointNode is a subclass of a MultiNode for convenience (and
314 // potential code sharing) only - conceptually it is independent of
315 // the Node semantics.
316 class SafePointNode : public MultiNode {
317 virtual uint cmp( const Node &n ) const;
318 virtual uint size_of() const; // Size is bigger
320 public:
321 SafePointNode(uint edges, JVMState* jvms,
322 // A plain safepoint advertises no memory effects (NULL):
323 const TypePtr* adr_type = NULL)
324 : MultiNode( edges ),
325 _jvms(jvms),
326 _oop_map(NULL),
327 _adr_type(adr_type)
328 {
329 init_class_id(Class_SafePoint);
330 }
332 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC
333 JVMState* const _jvms; // Pointer to list of JVM State objects
334 const TypePtr* _adr_type; // What type of memory does this node produce?
336 // Many calls take *all* of memory as input,
337 // but some produce a limited subset of that memory as output.
338 // The adr_type reports the call's behavior as a store, not a load.
340 virtual JVMState* jvms() const { return _jvms; }
341 void set_jvms(JVMState* s) {
342 *(JVMState**)&_jvms = s; // override const attribute in the accessor
343 }
344 OopMap *oop_map() const { return _oop_map; }
345 void set_oop_map(OopMap *om) { _oop_map = om; }
347 // Functionality from old debug nodes which has changed
348 Node *local(JVMState* jvms, uint idx) const {
349 assert(verify_jvms(jvms), "jvms must match");
350 return in(jvms->locoff() + idx);
351 }
352 Node *stack(JVMState* jvms, uint idx) const {
353 assert(verify_jvms(jvms), "jvms must match");
354 return in(jvms->stkoff() + idx);
355 }
356 Node *argument(JVMState* jvms, uint idx) const {
357 assert(verify_jvms(jvms), "jvms must match");
358 return in(jvms->argoff() + idx);
359 }
360 Node *monitor_box(JVMState* jvms, uint idx) const {
361 assert(verify_jvms(jvms), "jvms must match");
362 return in(jvms->monitor_box_offset(idx));
363 }
364 Node *monitor_obj(JVMState* jvms, uint idx) const {
365 assert(verify_jvms(jvms), "jvms must match");
366 return in(jvms->monitor_obj_offset(idx));
367 }
369 void set_local(JVMState* jvms, uint idx, Node *c);
371 void set_stack(JVMState* jvms, uint idx, Node *c) {
372 assert(verify_jvms(jvms), "jvms must match");
373 set_req(jvms->stkoff() + idx, c);
374 }
375 void set_argument(JVMState* jvms, uint idx, Node *c) {
376 assert(verify_jvms(jvms), "jvms must match");
377 set_req(jvms->argoff() + idx, c);
378 }
379 void ensure_stack(JVMState* jvms, uint stk_size) {
380 assert(verify_jvms(jvms), "jvms must match");
381 int grow_by = (int)stk_size - (int)jvms->stk_size();
382 if (grow_by > 0) grow_stack(jvms, grow_by);
383 }
384 void grow_stack(JVMState* jvms, uint grow_by);
385 // Handle monitor stack
386 void push_monitor( const FastLockNode *lock );
387 void pop_monitor ();
388 Node *peek_monitor_box() const;
389 Node *peek_monitor_obj() const;
391 // Access functions for the JVM
392 Node *control () const { return in(TypeFunc::Control ); }
393 Node *i_o () const { return in(TypeFunc::I_O ); }
394 Node *memory () const { return in(TypeFunc::Memory ); }
395 Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
396 Node *frameptr () const { return in(TypeFunc::FramePtr ); }
398 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
399 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
400 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
402 MergeMemNode* merged_memory() const {
403 return in(TypeFunc::Memory)->as_MergeMem();
404 }
406 // The parser marks useless maps as dead when it's done with them:
407 bool is_killed() { return in(TypeFunc::Control) == NULL; }
409 // Exception states bubbling out of subgraphs such as inlined calls
410 // are recorded here. (There might be more than one, hence the "next".)
411 // This feature is used only for safepoints which serve as "maps"
412 // for JVM states during parsing, intrinsic expansion, etc.
413 SafePointNode* next_exception() const;
414 void set_next_exception(SafePointNode* n);
415 bool has_exceptions() const { return next_exception() != NULL; }
417 // Standard Node stuff
418 virtual int Opcode() const;
419 virtual bool pinned() const { return true; }
420 virtual const Type *Value( PhaseTransform *phase ) const;
421 virtual const Type *bottom_type() const { return Type::CONTROL; }
422 virtual const TypePtr *adr_type() const { return _adr_type; }
423 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
424 virtual Node *Identity( PhaseTransform *phase );
425 virtual uint ideal_reg() const { return 0; }
426 virtual const RegMask &in_RegMask(uint) const;
427 virtual const RegMask &out_RegMask() const;
428 virtual uint match_edge(uint idx) const;
430 static bool needs_polling_address_input();
432 #ifndef PRODUCT
433 virtual void dump_spec(outputStream *st) const;
434 #endif
435 };
437 //------------------------------SafePointScalarObjectNode----------------------
438 // A SafePointScalarObjectNode represents the state of a scalarized object
439 // at a safepoint.
441 class SafePointScalarObjectNode: public TypeNode {
442 uint _first_index; // First input edge index of a SafePoint node where
443 // states of the scalarized object fields are collected.
444 uint _n_fields; // Number of non-static fields of the scalarized object.
445 DEBUG_ONLY(AllocateNode* _alloc;)
447 virtual uint hash() const ; // { return NO_HASH; }
448 virtual uint cmp( const Node &n ) const;
450 public:
451 SafePointScalarObjectNode(const TypeOopPtr* tp,
452 #ifdef ASSERT
453 AllocateNode* alloc,
454 #endif
455 uint first_index, uint n_fields);
456 virtual int Opcode() const;
457 virtual uint ideal_reg() const;
458 virtual const RegMask &in_RegMask(uint) const;
459 virtual const RegMask &out_RegMask() const;
460 virtual uint match_edge(uint idx) const;
462 uint first_index() const { return _first_index; }
463 uint n_fields() const { return _n_fields; }
465 #ifdef ASSERT
466 AllocateNode* alloc() const { return _alloc; }
467 #endif
469 virtual uint size_of() const { return sizeof(*this); }
471 // Assumes that "this" is an argument to a safepoint node "s", and that
472 // "new_call" is being created to correspond to "s". But the difference
473 // between the start index of the jvmstates of "new_call" and "s" is
474 // "jvms_adj". Produce and return a SafePointScalarObjectNode that
475 // corresponds appropriately to "this" in "new_call". Assumes that
476 // "sosn_map" is a map, specific to the translation of "s" to "new_call",
477 // mapping old SafePointScalarObjectNodes to new, to avoid multiple copies.
478 SafePointScalarObjectNode* clone(int jvms_adj, Dict* sosn_map) const;
480 #ifndef PRODUCT
481 virtual void dump_spec(outputStream *st) const;
482 #endif
483 };
486 // Simple container for the outgoing projections of a call. Useful
487 // for serious surgery on calls.
488 class CallProjections : public StackObj {
489 public:
490 Node* fallthrough_proj;
491 Node* fallthrough_catchproj;
492 Node* fallthrough_memproj;
493 Node* fallthrough_ioproj;
494 Node* catchall_catchproj;
495 Node* catchall_memproj;
496 Node* catchall_ioproj;
497 Node* resproj;
498 Node* exobj;
499 };
502 //------------------------------CallNode---------------------------------------
503 // Call nodes now subsume the function of debug nodes at callsites, so they
504 // contain the functionality of a full scope chain of debug nodes.
505 class CallNode : public SafePointNode {
506 friend class VMStructs;
507 public:
508 const TypeFunc *_tf; // Function type
509 address _entry_point; // Address of method being called
510 float _cnt; // Estimate of number of times called
512 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
513 : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
514 _tf(tf),
515 _entry_point(addr),
516 _cnt(COUNT_UNKNOWN)
517 {
518 init_class_id(Class_Call);
519 }
521 const TypeFunc* tf() const { return _tf; }
522 const address entry_point() const { return _entry_point; }
523 const float cnt() const { return _cnt; }
525 void set_tf(const TypeFunc* tf) { _tf = tf; }
526 void set_entry_point(address p) { _entry_point = p; }
527 void set_cnt(float c) { _cnt = c; }
529 virtual const Type *bottom_type() const;
530 virtual const Type *Value( PhaseTransform *phase ) const;
531 virtual Node *Identity( PhaseTransform *phase ) { return this; }
532 virtual uint cmp( const Node &n ) const;
533 virtual uint size_of() const = 0;
534 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
535 virtual Node *match( const ProjNode *proj, const Matcher *m );
536 virtual uint ideal_reg() const { return NotAMachineReg; }
537 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
538 // for some macro nodes whose expansion does not have a safepoint on the fast path.
539 virtual bool guaranteed_safepoint() { return true; }
540 // For macro nodes, the JVMState gets modified during expansion, so when cloning
541 // the node the JVMState must be cloned.
542 virtual void clone_jvms() { } // default is not to clone
544 // Returns true if the call may modify n
545 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase);
546 // Does this node have a use of n other than in debug information?
547 bool has_non_debug_use(Node *n);
548 // Returns the unique CheckCastPP of a call
549 // or result projection is there are several CheckCastPP
550 // or returns NULL if there is no one.
551 Node *result_cast();
552 // Does this node returns pointer?
553 bool returns_pointer() const {
554 const TypeTuple *r = tf()->range();
555 return (r->cnt() > TypeFunc::Parms &&
556 r->field_at(TypeFunc::Parms)->isa_ptr());
557 }
559 // Collect all the interesting edges from a call for use in
560 // replacing the call by something else. Used by macro expansion
561 // and the late inlining support.
562 void extract_projections(CallProjections* projs, bool separate_io_proj);
564 virtual uint match_edge(uint idx) const;
566 #ifndef PRODUCT
567 virtual void dump_req() const;
568 virtual void dump_spec(outputStream *st) const;
569 #endif
570 };
573 //------------------------------CallJavaNode-----------------------------------
574 // Make a static or dynamic subroutine call node using Java calling
575 // convention. (The "Java" calling convention is the compiler's calling
576 // convention, as opposed to the interpreter's or that of native C.)
577 class CallJavaNode : public CallNode {
578 friend class VMStructs;
579 protected:
580 virtual uint cmp( const Node &n ) const;
581 virtual uint size_of() const; // Size is bigger
583 bool _optimized_virtual;
584 bool _method_handle_invoke;
585 ciMethod* _method; // Method being direct called
586 public:
587 const int _bci; // Byte Code Index of call byte code
588 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
589 : CallNode(tf, addr, TypePtr::BOTTOM),
590 _method(method), _bci(bci),
591 _optimized_virtual(false),
592 _method_handle_invoke(false)
593 {
594 init_class_id(Class_CallJava);
595 }
597 virtual int Opcode() const;
598 ciMethod* method() const { return _method; }
599 void set_method(ciMethod *m) { _method = m; }
600 void set_optimized_virtual(bool f) { _optimized_virtual = f; }
601 bool is_optimized_virtual() const { return _optimized_virtual; }
602 void set_method_handle_invoke(bool f) { _method_handle_invoke = f; }
603 bool is_method_handle_invoke() const { return _method_handle_invoke; }
605 #ifndef PRODUCT
606 virtual void dump_spec(outputStream *st) const;
607 #endif
608 };
610 //------------------------------CallStaticJavaNode-----------------------------
611 // Make a direct subroutine call using Java calling convention (for static
612 // calls and optimized virtual calls, plus calls to wrappers for run-time
613 // routines); generates static stub.
614 class CallStaticJavaNode : public CallJavaNode {
615 virtual uint cmp( const Node &n ) const;
616 virtual uint size_of() const; // Size is bigger
617 public:
618 CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci)
619 : CallJavaNode(tf, addr, method, bci), _name(NULL) {
620 init_class_id(Class_CallStaticJava);
621 }
622 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
623 const TypePtr* adr_type)
624 : CallJavaNode(tf, addr, NULL, bci), _name(name) {
625 init_class_id(Class_CallStaticJava);
626 // This node calls a runtime stub, which often has narrow memory effects.
627 _adr_type = adr_type;
628 }
629 const char *_name; // Runtime wrapper name
631 // If this is an uncommon trap, return the request code, else zero.
632 int uncommon_trap_request() const;
633 static int extract_uncommon_trap_request(const Node* call);
635 virtual int Opcode() const;
636 #ifndef PRODUCT
637 virtual void dump_spec(outputStream *st) const;
638 #endif
639 };
641 //------------------------------CallDynamicJavaNode----------------------------
642 // Make a dispatched call using Java calling convention.
643 class CallDynamicJavaNode : public CallJavaNode {
644 virtual uint cmp( const Node &n ) const;
645 virtual uint size_of() const; // Size is bigger
646 public:
647 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
648 init_class_id(Class_CallDynamicJava);
649 }
651 int _vtable_index;
652 virtual int Opcode() const;
653 #ifndef PRODUCT
654 virtual void dump_spec(outputStream *st) const;
655 #endif
656 };
658 //------------------------------CallRuntimeNode--------------------------------
659 // Make a direct subroutine call node into compiled C++ code.
660 class CallRuntimeNode : public CallNode {
661 virtual uint cmp( const Node &n ) const;
662 virtual uint size_of() const; // Size is bigger
663 public:
664 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
665 const TypePtr* adr_type)
666 : CallNode(tf, addr, adr_type),
667 _name(name)
668 {
669 init_class_id(Class_CallRuntime);
670 }
672 const char *_name; // Printable name, if _method is NULL
673 virtual int Opcode() const;
674 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
676 #ifndef PRODUCT
677 virtual void dump_spec(outputStream *st) const;
678 #endif
679 };
681 //------------------------------CallLeafNode-----------------------------------
682 // Make a direct subroutine call node into compiled C++ code, without
683 // safepoints
684 class CallLeafNode : public CallRuntimeNode {
685 public:
686 CallLeafNode(const TypeFunc* tf, address addr, const char* name,
687 const TypePtr* adr_type)
688 : CallRuntimeNode(tf, addr, name, adr_type)
689 {
690 init_class_id(Class_CallLeaf);
691 }
692 virtual int Opcode() const;
693 virtual bool guaranteed_safepoint() { return false; }
694 #ifndef PRODUCT
695 virtual void dump_spec(outputStream *st) const;
696 #endif
697 };
699 //------------------------------CallLeafNoFPNode-------------------------------
700 // CallLeafNode, not using floating point or using it in the same manner as
701 // the generated code
702 class CallLeafNoFPNode : public CallLeafNode {
703 public:
704 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
705 const TypePtr* adr_type)
706 : CallLeafNode(tf, addr, name, adr_type)
707 {
708 }
709 virtual int Opcode() const;
710 };
713 //------------------------------Allocate---------------------------------------
714 // High-level memory allocation
715 //
716 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
717 // get expanded into a code sequence containing a call. Unlike other CallNodes,
718 // they have 2 memory projections and 2 i_o projections (which are distinguished by
719 // the _is_io_use flag in the projection.) This is needed when expanding the node in
720 // order to differentiate the uses of the projection on the normal control path from
721 // those on the exception return path.
722 //
723 class AllocateNode : public CallNode {
724 public:
725 enum {
726 // Output:
727 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
728 // Inputs:
729 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
730 KlassNode, // type (maybe dynamic) of the obj.
731 InitialTest, // slow-path test (may be constant)
732 ALength, // array length (or TOP if none)
733 ParmLimit
734 };
736 static const TypeFunc* alloc_type() {
737 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
738 fields[AllocSize] = TypeInt::POS;
739 fields[KlassNode] = TypeInstPtr::NOTNULL;
740 fields[InitialTest] = TypeInt::BOOL;
741 fields[ALength] = TypeInt::INT; // length (can be a bad length)
743 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
745 // create result type (range)
746 fields = TypeTuple::fields(1);
747 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
749 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
751 return TypeFunc::make(domain, range);
752 }
754 bool _is_scalar_replaceable; // Result of Escape Analysis
756 virtual uint size_of() const; // Size is bigger
757 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
758 Node *size, Node *klass_node, Node *initial_test);
759 // Expansion modifies the JVMState, so we need to clone it
760 virtual void clone_jvms() {
761 set_jvms(jvms()->clone_deep(Compile::current()));
762 }
763 virtual int Opcode() const;
764 virtual uint ideal_reg() const { return Op_RegP; }
765 virtual bool guaranteed_safepoint() { return false; }
767 // allocations do not modify their arguments
768 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase) { return false;}
770 // Pattern-match a possible usage of AllocateNode.
771 // Return null if no allocation is recognized.
772 // The operand is the pointer produced by the (possible) allocation.
773 // It must be a projection of the Allocate or its subsequent CastPP.
774 // (Note: This function is defined in file graphKit.cpp, near
775 // GraphKit::new_instance/new_array, whose output it recognizes.)
776 // The 'ptr' may not have an offset unless the 'offset' argument is given.
777 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
779 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
780 // an offset, which is reported back to the caller.
781 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
782 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
783 intptr_t& offset);
785 // Dig the klass operand out of a (possible) allocation site.
786 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
787 AllocateNode* allo = Ideal_allocation(ptr, phase);
788 return (allo == NULL) ? NULL : allo->in(KlassNode);
789 }
791 // Conservatively small estimate of offset of first non-header byte.
792 int minimum_header_size() {
793 return is_AllocateArray() ? arrayOopDesc::base_offset_in_bytes(T_BYTE) :
794 instanceOopDesc::base_offset_in_bytes();
795 }
797 // Return the corresponding initialization barrier (or null if none).
798 // Walks out edges to find it...
799 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
800 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
801 InitializeNode* initialization();
803 // Return the corresponding storestore barrier (or null if none).
804 // Walks out edges to find it...
805 MemBarStoreStoreNode* storestore();
807 // Convenience for initialization->maybe_set_complete(phase)
808 bool maybe_set_complete(PhaseGVN* phase);
809 };
811 //------------------------------AllocateArray---------------------------------
812 //
813 // High-level array allocation
814 //
815 class AllocateArrayNode : public AllocateNode {
816 public:
817 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
818 Node* size, Node* klass_node, Node* initial_test,
819 Node* count_val
820 )
821 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
822 initial_test)
823 {
824 init_class_id(Class_AllocateArray);
825 set_req(AllocateNode::ALength, count_val);
826 }
827 virtual int Opcode() const;
828 virtual uint size_of() const; // Size is bigger
829 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
831 // Dig the length operand out of a array allocation site.
832 Node* Ideal_length() {
833 return in(AllocateNode::ALength);
834 }
836 // Dig the length operand out of a array allocation site and narrow the
837 // type with a CastII, if necesssary
838 Node* make_ideal_length(const TypeOopPtr* ary_type, PhaseTransform *phase, bool can_create = true);
840 // Pattern-match a possible usage of AllocateArrayNode.
841 // Return null if no allocation is recognized.
842 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
843 AllocateNode* allo = Ideal_allocation(ptr, phase);
844 return (allo == NULL || !allo->is_AllocateArray())
845 ? NULL : allo->as_AllocateArray();
846 }
847 };
849 //------------------------------AbstractLockNode-----------------------------------
850 class AbstractLockNode: public CallNode {
851 private:
852 enum {
853 Regular = 0, // Normal lock
854 NonEscObj, // Lock is used for non escaping object
855 Coarsened, // Lock was coarsened
856 Nested // Nested lock
857 } _kind;
858 #ifndef PRODUCT
859 NamedCounter* _counter;
860 #endif
862 protected:
863 // helper functions for lock elimination
864 //
866 bool find_matching_unlock(const Node* ctrl, LockNode* lock,
867 GrowableArray<AbstractLockNode*> &lock_ops);
868 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
869 GrowableArray<AbstractLockNode*> &lock_ops);
870 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
871 GrowableArray<AbstractLockNode*> &lock_ops);
872 LockNode *find_matching_lock(UnlockNode* unlock);
874 // Update the counter to indicate that this lock was eliminated.
875 void set_eliminated_lock_counter() PRODUCT_RETURN;
877 public:
878 AbstractLockNode(const TypeFunc *tf)
879 : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
880 _kind(Regular)
881 {
882 #ifndef PRODUCT
883 _counter = NULL;
884 #endif
885 }
886 virtual int Opcode() const = 0;
887 Node * obj_node() const {return in(TypeFunc::Parms + 0); }
888 Node * box_node() const {return in(TypeFunc::Parms + 1); }
889 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
890 void set_box_node(Node* box) { set_req(TypeFunc::Parms + 1, box); }
892 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
894 virtual uint size_of() const { return sizeof(*this); }
896 bool is_eliminated() const { return (_kind != Regular); }
897 bool is_non_esc_obj() const { return (_kind == NonEscObj); }
898 bool is_coarsened() const { return (_kind == Coarsened); }
899 bool is_nested() const { return (_kind == Nested); }
901 void set_non_esc_obj() { _kind = NonEscObj; set_eliminated_lock_counter(); }
902 void set_coarsened() { _kind = Coarsened; set_eliminated_lock_counter(); }
903 void set_nested() { _kind = Nested; set_eliminated_lock_counter(); }
905 // locking does not modify its arguments
906 virtual bool may_modify(const TypePtr *addr_t, PhaseTransform *phase){ return false;}
908 #ifndef PRODUCT
909 void create_lock_counter(JVMState* s);
910 NamedCounter* counter() const { return _counter; }
911 #endif
912 };
914 //------------------------------Lock---------------------------------------
915 // High-level lock operation
916 //
917 // This is a subclass of CallNode because it is a macro node which gets expanded
918 // into a code sequence containing a call. This node takes 3 "parameters":
919 // 0 - object to lock
920 // 1 - a BoxLockNode
921 // 2 - a FastLockNode
922 //
923 class LockNode : public AbstractLockNode {
924 public:
926 static const TypeFunc *lock_type() {
927 // create input type (domain)
928 const Type **fields = TypeTuple::fields(3);
929 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
930 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
931 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
932 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
934 // create result type (range)
935 fields = TypeTuple::fields(0);
937 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
939 return TypeFunc::make(domain,range);
940 }
942 virtual int Opcode() const;
943 virtual uint size_of() const; // Size is bigger
944 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
945 init_class_id(Class_Lock);
946 init_flags(Flag_is_macro);
947 C->add_macro_node(this);
948 }
949 virtual bool guaranteed_safepoint() { return false; }
951 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
952 // Expansion modifies the JVMState, so we need to clone it
953 virtual void clone_jvms() {
954 set_jvms(jvms()->clone_deep(Compile::current()));
955 }
957 bool is_nested_lock_region(); // Is this Lock nested?
958 };
960 //------------------------------Unlock---------------------------------------
961 // High-level unlock operation
962 class UnlockNode : public AbstractLockNode {
963 public:
964 virtual int Opcode() const;
965 virtual uint size_of() const; // Size is bigger
966 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
967 init_class_id(Class_Unlock);
968 init_flags(Flag_is_macro);
969 C->add_macro_node(this);
970 }
971 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
972 // unlock is never a safepoint
973 virtual bool guaranteed_safepoint() { return false; }
974 };
976 #endif // SHARE_VM_OPTO_CALLNODE_HPP