Thu, 06 Mar 2008 10:30:17 -0800
6667610: (Escape Analysis) retry compilation without EA if it fails
Summary: During split unique types EA could exceed nodes limit and fail the method compilation.
Reviewed-by: rasbold
1 /*
2 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // Portions of code courtesy of Clifford Click
27 // Optimization - Graph Style
29 class Chaitin;
30 class NamedCounter;
31 class MultiNode;
32 class SafePointNode;
33 class CallNode;
34 class CallJavaNode;
35 class CallStaticJavaNode;
36 class CallDynamicJavaNode;
37 class CallRuntimeNode;
38 class CallLeafNode;
39 class CallLeafNoFPNode;
40 class AllocateNode;
41 class AllocateArrayNode;
42 class LockNode;
43 class UnlockNode;
44 class JVMState;
45 class OopMap;
46 class State;
47 class StartNode;
48 class MachCallNode;
49 class FastLockNode;
51 //------------------------------StartNode--------------------------------------
52 // The method start node
53 class StartNode : public MultiNode {
54 virtual uint cmp( const Node &n ) const;
55 virtual uint size_of() const; // Size is bigger
56 public:
57 const TypeTuple *_domain;
58 StartNode( Node *root, const TypeTuple *domain ) : MultiNode(2), _domain(domain) {
59 init_class_id(Class_Start);
60 init_flags(Flag_is_block_start);
61 init_req(0,this);
62 init_req(1,root);
63 }
64 virtual int Opcode() const;
65 virtual bool pinned() const { return true; };
66 virtual const Type *bottom_type() const;
67 virtual const TypePtr *adr_type() const { return TypePtr::BOTTOM; }
68 virtual const Type *Value( PhaseTransform *phase ) const;
69 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
70 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_reg, uint length ) const;
71 virtual const RegMask &in_RegMask(uint) const;
72 virtual Node *match( const ProjNode *proj, const Matcher *m );
73 virtual uint ideal_reg() const { return 0; }
74 #ifndef PRODUCT
75 virtual void dump_spec(outputStream *st) const;
76 #endif
77 };
79 //------------------------------StartOSRNode-----------------------------------
80 // The method start node for on stack replacement code
81 class StartOSRNode : public StartNode {
82 public:
83 StartOSRNode( Node *root, const TypeTuple *domain ) : StartNode(root, domain) {}
84 virtual int Opcode() const;
85 static const TypeTuple *osr_domain();
86 };
89 //------------------------------ParmNode---------------------------------------
90 // Incoming parameters
91 class ParmNode : public ProjNode {
92 static const char * const names[TypeFunc::Parms+1];
93 public:
94 ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
95 init_class_id(Class_Parm);
96 }
97 virtual int Opcode() const;
98 virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
99 virtual uint ideal_reg() const;
100 #ifndef PRODUCT
101 virtual void dump_spec(outputStream *st) const;
102 #endif
103 };
106 //------------------------------ReturnNode-------------------------------------
107 // Return from subroutine node
108 class ReturnNode : public Node {
109 public:
110 ReturnNode( uint edges, Node *cntrl, Node *i_o, Node *memory, Node *retadr, Node *frameptr );
111 virtual int Opcode() const;
112 virtual bool is_CFG() const { return true; }
113 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
114 virtual bool depends_only_on_test() const { return false; }
115 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
116 virtual const Type *Value( PhaseTransform *phase ) const;
117 virtual uint ideal_reg() const { return NotAMachineReg; }
118 virtual uint match_edge(uint idx) const;
119 #ifndef PRODUCT
120 virtual void dump_req() const;
121 #endif
122 };
125 //------------------------------RethrowNode------------------------------------
126 // Rethrow of exception at call site. Ends a procedure before rethrowing;
127 // ends the current basic block like a ReturnNode. Restores registers and
128 // unwinds stack. Rethrow happens in the caller's method.
129 class RethrowNode : public Node {
130 public:
131 RethrowNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *ret_adr, Node *exception );
132 virtual int Opcode() const;
133 virtual bool is_CFG() const { return true; }
134 virtual uint hash() const { return NO_HASH; } // CFG nodes do not hash
135 virtual bool depends_only_on_test() const { return false; }
136 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
137 virtual const Type *Value( PhaseTransform *phase ) const;
138 virtual uint match_edge(uint idx) const;
139 virtual uint ideal_reg() const { return NotAMachineReg; }
140 #ifndef PRODUCT
141 virtual void dump_req() const;
142 #endif
143 };
146 //------------------------------TailCallNode-----------------------------------
147 // Pop stack frame and jump indirect
148 class TailCallNode : public ReturnNode {
149 public:
150 TailCallNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr, Node *target, Node *moop )
151 : ReturnNode( TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, retadr ) {
152 init_req(TypeFunc::Parms, target);
153 init_req(TypeFunc::Parms+1, moop);
154 }
156 virtual int Opcode() const;
157 virtual uint match_edge(uint idx) const;
158 };
160 //------------------------------TailJumpNode-----------------------------------
161 // Pop stack frame and jump indirect
162 class TailJumpNode : public ReturnNode {
163 public:
164 TailJumpNode( Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *target, Node *ex_oop)
165 : ReturnNode(TypeFunc::Parms+2, cntrl, i_o, memory, frameptr, Compile::current()->top()) {
166 init_req(TypeFunc::Parms, target);
167 init_req(TypeFunc::Parms+1, ex_oop);
168 }
170 virtual int Opcode() const;
171 virtual uint match_edge(uint idx) const;
172 };
174 //-------------------------------JVMState-------------------------------------
175 // A linked list of JVMState nodes captures the whole interpreter state,
176 // plus GC roots, for all active calls at some call site in this compilation
177 // unit. (If there is no inlining, then the list has exactly one link.)
178 // This provides a way to map the optimized program back into the interpreter,
179 // or to let the GC mark the stack.
180 class JVMState : public ResourceObj {
181 private:
182 JVMState* _caller; // List pointer for forming scope chains
183 uint _depth; // One mroe than caller depth, or one.
184 uint _locoff; // Offset to locals in input edge mapping
185 uint _stkoff; // Offset to stack in input edge mapping
186 uint _monoff; // Offset to monitors in input edge mapping
187 uint _endoff; // Offset to end of input edge mapping
188 uint _sp; // Jave Expression Stack Pointer for this state
189 int _bci; // Byte Code Index of this JVM point
190 ciMethod* _method; // Method Pointer
191 SafePointNode* _map; // Map node associated with this scope
192 public:
193 friend class Compile;
195 // Because JVMState objects live over the entire lifetime of the
196 // Compile object, they are allocated into the comp_arena, which
197 // does not get resource marked or reset during the compile process
198 void *operator new( size_t x, Compile* C ) { return C->comp_arena()->Amalloc(x); }
199 void operator delete( void * ) { } // fast deallocation
201 // Create a new JVMState, ready for abstract interpretation.
202 JVMState(ciMethod* method, JVMState* caller);
203 JVMState(int stack_size); // root state; has a null method
205 // Access functions for the JVM
206 uint locoff() const { return _locoff; }
207 uint stkoff() const { return _stkoff; }
208 uint argoff() const { return _stkoff + _sp; }
209 uint monoff() const { return _monoff; }
210 uint endoff() const { return _endoff; }
211 uint oopoff() const { return debug_end(); }
213 int loc_size() const { return _stkoff - _locoff; }
214 int stk_size() const { return _monoff - _stkoff; }
215 int mon_size() const { return _endoff - _monoff; }
217 bool is_loc(uint i) const { return i >= _locoff && i < _stkoff; }
218 bool is_stk(uint i) const { return i >= _stkoff && i < _monoff; }
219 bool is_mon(uint i) const { return i >= _monoff && i < _endoff; }
221 uint sp() const { return _sp; }
222 int bci() const { return _bci; }
223 bool has_method() const { return _method != NULL; }
224 ciMethod* method() const { assert(has_method(), ""); return _method; }
225 JVMState* caller() const { return _caller; }
226 SafePointNode* map() const { return _map; }
227 uint depth() const { return _depth; }
228 uint debug_start() const; // returns locoff of root caller
229 uint debug_end() const; // returns endoff of self
230 uint debug_size() const { return loc_size() + sp() + mon_size(); }
231 uint debug_depth() const; // returns sum of debug_size values at all depths
233 // Returns the JVM state at the desired depth (1 == root).
234 JVMState* of_depth(int d) const;
236 // Tells if two JVM states have the same call chain (depth, methods, & bcis).
237 bool same_calls_as(const JVMState* that) const;
239 // Monitors (monitors are stored as (boxNode, objNode) pairs
240 enum { logMonitorEdges = 1 };
241 int nof_monitors() const { return mon_size() >> logMonitorEdges; }
242 int monitor_depth() const { return nof_monitors() + (caller() ? caller()->monitor_depth() : 0); }
243 int monitor_box_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 0; }
244 int monitor_obj_offset(int idx) const { return monoff() + (idx << logMonitorEdges) + 1; }
245 bool is_monitor_box(uint off) const {
246 assert(is_mon(off), "should be called only for monitor edge");
247 return (0 == bitfield(off - monoff(), 0, logMonitorEdges));
248 }
249 bool is_monitor_use(uint off) const { return (is_mon(off)
250 && is_monitor_box(off))
251 || (caller() && caller()->is_monitor_use(off)); }
253 // Initialization functions for the JVM
254 void set_locoff(uint off) { _locoff = off; }
255 void set_stkoff(uint off) { _stkoff = off; }
256 void set_monoff(uint off) { _monoff = off; }
257 void set_endoff(uint off) { _endoff = off; }
258 void set_offsets(uint off) { _locoff = _stkoff = _monoff = _endoff = off; }
259 void set_map(SafePointNode *map) { _map = map; }
260 void set_sp(uint sp) { _sp = sp; }
261 void set_bci(int bci) { _bci = bci; }
263 // Miscellaneous utility functions
264 JVMState* clone_deep(Compile* C) const; // recursively clones caller chain
265 JVMState* clone_shallow(Compile* C) const; // retains uncloned caller
267 #ifndef PRODUCT
268 void format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const;
269 void dump_spec(outputStream *st) const;
270 void dump_on(outputStream* st) const;
271 void dump() const {
272 dump_on(tty);
273 }
274 #endif
275 };
277 //------------------------------SafePointNode----------------------------------
278 // A SafePointNode is a subclass of a MultiNode for convenience (and
279 // potential code sharing) only - conceptually it is independent of
280 // the Node semantics.
281 class SafePointNode : public MultiNode {
282 virtual uint cmp( const Node &n ) const;
283 virtual uint size_of() const; // Size is bigger
285 public:
286 SafePointNode(uint edges, JVMState* jvms,
287 // A plain safepoint advertises no memory effects (NULL):
288 const TypePtr* adr_type = NULL)
289 : MultiNode( edges ),
290 _jvms(jvms),
291 _oop_map(NULL),
292 _adr_type(adr_type)
293 {
294 init_class_id(Class_SafePoint);
295 }
297 OopMap* _oop_map; // Array of OopMap info (8-bit char) for GC
298 JVMState* const _jvms; // Pointer to list of JVM State objects
299 const TypePtr* _adr_type; // What type of memory does this node produce?
301 // Many calls take *all* of memory as input,
302 // but some produce a limited subset of that memory as output.
303 // The adr_type reports the call's behavior as a store, not a load.
305 virtual JVMState* jvms() const { return _jvms; }
306 void set_jvms(JVMState* s) {
307 *(JVMState**)&_jvms = s; // override const attribute in the accessor
308 }
309 OopMap *oop_map() const { return _oop_map; }
310 void set_oop_map(OopMap *om) { _oop_map = om; }
312 // Functionality from old debug nodes which has changed
313 Node *local(JVMState* jvms, uint idx) const {
314 assert(verify_jvms(jvms), "jvms must match");
315 return in(jvms->locoff() + idx);
316 }
317 Node *stack(JVMState* jvms, uint idx) const {
318 assert(verify_jvms(jvms), "jvms must match");
319 return in(jvms->stkoff() + idx);
320 }
321 Node *argument(JVMState* jvms, uint idx) const {
322 assert(verify_jvms(jvms), "jvms must match");
323 return in(jvms->argoff() + idx);
324 }
325 Node *monitor_box(JVMState* jvms, uint idx) const {
326 assert(verify_jvms(jvms), "jvms must match");
327 return in(jvms->monitor_box_offset(idx));
328 }
329 Node *monitor_obj(JVMState* jvms, uint idx) const {
330 assert(verify_jvms(jvms), "jvms must match");
331 return in(jvms->monitor_obj_offset(idx));
332 }
334 void set_local(JVMState* jvms, uint idx, Node *c);
336 void set_stack(JVMState* jvms, uint idx, Node *c) {
337 assert(verify_jvms(jvms), "jvms must match");
338 set_req(jvms->stkoff() + idx, c);
339 }
340 void set_argument(JVMState* jvms, uint idx, Node *c) {
341 assert(verify_jvms(jvms), "jvms must match");
342 set_req(jvms->argoff() + idx, c);
343 }
344 void ensure_stack(JVMState* jvms, uint stk_size) {
345 assert(verify_jvms(jvms), "jvms must match");
346 int grow_by = (int)stk_size - (int)jvms->stk_size();
347 if (grow_by > 0) grow_stack(jvms, grow_by);
348 }
349 void grow_stack(JVMState* jvms, uint grow_by);
350 // Handle monitor stack
351 void push_monitor( const FastLockNode *lock );
352 void pop_monitor ();
353 Node *peek_monitor_box() const;
354 Node *peek_monitor_obj() const;
356 // Access functions for the JVM
357 Node *control () const { return in(TypeFunc::Control ); }
358 Node *i_o () const { return in(TypeFunc::I_O ); }
359 Node *memory () const { return in(TypeFunc::Memory ); }
360 Node *returnadr() const { return in(TypeFunc::ReturnAdr); }
361 Node *frameptr () const { return in(TypeFunc::FramePtr ); }
363 void set_control ( Node *c ) { set_req(TypeFunc::Control,c); }
364 void set_i_o ( Node *c ) { set_req(TypeFunc::I_O ,c); }
365 void set_memory ( Node *c ) { set_req(TypeFunc::Memory ,c); }
367 MergeMemNode* merged_memory() const {
368 return in(TypeFunc::Memory)->as_MergeMem();
369 }
371 // The parser marks useless maps as dead when it's done with them:
372 bool is_killed() { return in(TypeFunc::Control) == NULL; }
374 // Exception states bubbling out of subgraphs such as inlined calls
375 // are recorded here. (There might be more than one, hence the "next".)
376 // This feature is used only for safepoints which serve as "maps"
377 // for JVM states during parsing, intrinsic expansion, etc.
378 SafePointNode* next_exception() const;
379 void set_next_exception(SafePointNode* n);
380 bool has_exceptions() const { return next_exception() != NULL; }
382 // Standard Node stuff
383 virtual int Opcode() const;
384 virtual bool pinned() const { return true; }
385 virtual const Type *Value( PhaseTransform *phase ) const;
386 virtual const Type *bottom_type() const { return Type::CONTROL; }
387 virtual const TypePtr *adr_type() const { return _adr_type; }
388 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
389 virtual Node *Identity( PhaseTransform *phase );
390 virtual uint ideal_reg() const { return 0; }
391 virtual const RegMask &in_RegMask(uint) const;
392 virtual const RegMask &out_RegMask() const;
393 virtual uint match_edge(uint idx) const;
395 static bool needs_polling_address_input();
397 #ifndef PRODUCT
398 virtual void dump_spec(outputStream *st) const;
399 #endif
400 };
402 //------------------------------CallNode---------------------------------------
403 // Call nodes now subsume the function of debug nodes at callsites, so they
404 // contain the functionality of a full scope chain of debug nodes.
405 class CallNode : public SafePointNode {
406 public:
407 const TypeFunc *_tf; // Function type
408 address _entry_point; // Address of method being called
409 float _cnt; // Estimate of number of times called
410 PointsToNode::EscapeState _escape_state;
412 CallNode(const TypeFunc* tf, address addr, const TypePtr* adr_type)
413 : SafePointNode(tf->domain()->cnt(), NULL, adr_type),
414 _tf(tf),
415 _entry_point(addr),
416 _cnt(COUNT_UNKNOWN)
417 {
418 init_class_id(Class_Call);
419 init_flags(Flag_is_Call);
420 _escape_state = PointsToNode::UnknownEscape;
421 }
423 const TypeFunc* tf() const { return _tf; }
424 const address entry_point() const { return _entry_point; }
425 const float cnt() const { return _cnt; }
427 void set_tf(const TypeFunc* tf) { _tf = tf; }
428 void set_entry_point(address p) { _entry_point = p; }
429 void set_cnt(float c) { _cnt = c; }
431 virtual const Type *bottom_type() const;
432 virtual const Type *Value( PhaseTransform *phase ) const;
433 virtual Node *Identity( PhaseTransform *phase ) { return this; }
434 virtual uint cmp( const Node &n ) const;
435 virtual uint size_of() const = 0;
436 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
437 virtual Node *match( const ProjNode *proj, const Matcher *m );
438 virtual uint ideal_reg() const { return NotAMachineReg; }
439 // Are we guaranteed that this node is a safepoint? Not true for leaf calls and
440 // for some macro nodes whose expansion does not have a safepoint on the fast path.
441 virtual bool guaranteed_safepoint() { return true; }
442 // For macro nodes, the JVMState gets modified during expansion, so when cloning
443 // the node the JVMState must be cloned.
444 virtual void clone_jvms() { } // default is not to clone
446 virtual uint match_edge(uint idx) const;
448 #ifndef PRODUCT
449 virtual void dump_req() const;
450 virtual void dump_spec(outputStream *st) const;
451 #endif
452 };
454 //------------------------------CallJavaNode-----------------------------------
455 // Make a static or dynamic subroutine call node using Java calling
456 // convention. (The "Java" calling convention is the compiler's calling
457 // convention, as opposed to the interpreter's or that of native C.)
458 class CallJavaNode : public CallNode {
459 protected:
460 virtual uint cmp( const Node &n ) const;
461 virtual uint size_of() const; // Size is bigger
463 bool _optimized_virtual;
464 ciMethod* _method; // Method being direct called
465 public:
466 const int _bci; // Byte Code Index of call byte code
467 CallJavaNode(const TypeFunc* tf , address addr, ciMethod* method, int bci)
468 : CallNode(tf, addr, TypePtr::BOTTOM),
469 _method(method), _bci(bci), _optimized_virtual(false)
470 {
471 init_class_id(Class_CallJava);
472 }
474 virtual int Opcode() const;
475 ciMethod* method() const { return _method; }
476 void set_method(ciMethod *m) { _method = m; }
477 void set_optimized_virtual(bool f) { _optimized_virtual = f; }
478 bool is_optimized_virtual() const { return _optimized_virtual; }
480 #ifndef PRODUCT
481 virtual void dump_spec(outputStream *st) const;
482 #endif
483 };
485 //------------------------------CallStaticJavaNode-----------------------------
486 // Make a direct subroutine call using Java calling convention (for static
487 // calls and optimized virtual calls, plus calls to wrappers for run-time
488 // routines); generates static stub.
489 class CallStaticJavaNode : public CallJavaNode {
490 virtual uint cmp( const Node &n ) const;
491 virtual uint size_of() const; // Size is bigger
492 public:
493 CallStaticJavaNode(const TypeFunc* tf, address addr, ciMethod* method, int bci)
494 : CallJavaNode(tf, addr, method, bci), _name(NULL) {
495 init_class_id(Class_CallStaticJava);
496 }
497 CallStaticJavaNode(const TypeFunc* tf, address addr, const char* name, int bci,
498 const TypePtr* adr_type)
499 : CallJavaNode(tf, addr, NULL, bci), _name(name) {
500 init_class_id(Class_CallStaticJava);
501 // This node calls a runtime stub, which often has narrow memory effects.
502 _adr_type = adr_type;
503 }
504 const char *_name; // Runtime wrapper name
506 // If this is an uncommon trap, return the request code, else zero.
507 int uncommon_trap_request() const;
508 static int extract_uncommon_trap_request(const Node* call);
510 virtual int Opcode() const;
511 #ifndef PRODUCT
512 virtual void dump_spec(outputStream *st) const;
513 #endif
514 };
516 //------------------------------CallDynamicJavaNode----------------------------
517 // Make a dispatched call using Java calling convention.
518 class CallDynamicJavaNode : public CallJavaNode {
519 virtual uint cmp( const Node &n ) const;
520 virtual uint size_of() const; // Size is bigger
521 public:
522 CallDynamicJavaNode( const TypeFunc *tf , address addr, ciMethod* method, int vtable_index, int bci ) : CallJavaNode(tf,addr,method,bci), _vtable_index(vtable_index) {
523 init_class_id(Class_CallDynamicJava);
524 }
526 int _vtable_index;
527 virtual int Opcode() const;
528 #ifndef PRODUCT
529 virtual void dump_spec(outputStream *st) const;
530 #endif
531 };
533 //------------------------------CallRuntimeNode--------------------------------
534 // Make a direct subroutine call node into compiled C++ code.
535 class CallRuntimeNode : public CallNode {
536 virtual uint cmp( const Node &n ) const;
537 virtual uint size_of() const; // Size is bigger
538 public:
539 CallRuntimeNode(const TypeFunc* tf, address addr, const char* name,
540 const TypePtr* adr_type)
541 : CallNode(tf, addr, adr_type),
542 _name(name)
543 {
544 init_class_id(Class_CallRuntime);
545 }
547 const char *_name; // Printable name, if _method is NULL
548 virtual int Opcode() const;
549 virtual void calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const;
551 #ifndef PRODUCT
552 virtual void dump_spec(outputStream *st) const;
553 #endif
554 };
556 //------------------------------CallLeafNode-----------------------------------
557 // Make a direct subroutine call node into compiled C++ code, without
558 // safepoints
559 class CallLeafNode : public CallRuntimeNode {
560 public:
561 CallLeafNode(const TypeFunc* tf, address addr, const char* name,
562 const TypePtr* adr_type)
563 : CallRuntimeNode(tf, addr, name, adr_type)
564 {
565 init_class_id(Class_CallLeaf);
566 }
567 virtual int Opcode() const;
568 virtual bool guaranteed_safepoint() { return false; }
569 #ifndef PRODUCT
570 virtual void dump_spec(outputStream *st) const;
571 #endif
572 };
574 //------------------------------CallLeafNoFPNode-------------------------------
575 // CallLeafNode, not using floating point or using it in the same manner as
576 // the generated code
577 class CallLeafNoFPNode : public CallLeafNode {
578 public:
579 CallLeafNoFPNode(const TypeFunc* tf, address addr, const char* name,
580 const TypePtr* adr_type)
581 : CallLeafNode(tf, addr, name, adr_type)
582 {
583 }
584 virtual int Opcode() const;
585 };
588 //------------------------------Allocate---------------------------------------
589 // High-level memory allocation
590 //
591 // AllocateNode and AllocateArrayNode are subclasses of CallNode because they will
592 // get expanded into a code sequence containing a call. Unlike other CallNodes,
593 // they have 2 memory projections and 2 i_o projections (which are distinguished by
594 // the _is_io_use flag in the projection.) This is needed when expanding the node in
595 // order to differentiate the uses of the projection on the normal control path from
596 // those on the exception return path.
597 //
598 class AllocateNode : public CallNode {
599 public:
600 enum {
601 // Output:
602 RawAddress = TypeFunc::Parms, // the newly-allocated raw address
603 // Inputs:
604 AllocSize = TypeFunc::Parms, // size (in bytes) of the new object
605 KlassNode, // type (maybe dynamic) of the obj.
606 InitialTest, // slow-path test (may be constant)
607 ALength, // array length (or TOP if none)
608 ParmLimit
609 };
611 static const TypeFunc* alloc_type() {
612 const Type** fields = TypeTuple::fields(ParmLimit - TypeFunc::Parms);
613 fields[AllocSize] = TypeInt::POS;
614 fields[KlassNode] = TypeInstPtr::NOTNULL;
615 fields[InitialTest] = TypeInt::BOOL;
616 fields[ALength] = TypeInt::INT; // length (can be a bad length)
618 const TypeTuple *domain = TypeTuple::make(ParmLimit, fields);
620 // create result type (range)
621 fields = TypeTuple::fields(1);
622 fields[TypeFunc::Parms+0] = TypeRawPtr::NOTNULL; // Returned oop
624 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+1, fields);
626 return TypeFunc::make(domain, range);
627 }
629 virtual uint size_of() const; // Size is bigger
630 AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
631 Node *size, Node *klass_node, Node *initial_test);
632 // Expansion modifies the JVMState, so we need to clone it
633 virtual void clone_jvms() {
634 set_jvms(jvms()->clone_deep(Compile::current()));
635 }
636 virtual int Opcode() const;
637 virtual uint ideal_reg() const { return Op_RegP; }
638 virtual bool guaranteed_safepoint() { return false; }
640 // Pattern-match a possible usage of AllocateNode.
641 // Return null if no allocation is recognized.
642 // The operand is the pointer produced by the (possible) allocation.
643 // It must be a projection of the Allocate or its subsequent CastPP.
644 // (Note: This function is defined in file graphKit.cpp, near
645 // GraphKit::new_instance/new_array, whose output it recognizes.)
646 // The 'ptr' may not have an offset unless the 'offset' argument is given.
647 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase);
649 // Fancy version which uses AddPNode::Ideal_base_and_offset to strip
650 // an offset, which is reported back to the caller.
651 // (Note: AllocateNode::Ideal_allocation is defined in graphKit.cpp.)
652 static AllocateNode* Ideal_allocation(Node* ptr, PhaseTransform* phase,
653 intptr_t& offset);
655 // Dig the klass operand out of a (possible) allocation site.
656 static Node* Ideal_klass(Node* ptr, PhaseTransform* phase) {
657 AllocateNode* allo = Ideal_allocation(ptr, phase);
658 return (allo == NULL) ? NULL : allo->in(KlassNode);
659 }
661 // Conservatively small estimate of offset of first non-header byte.
662 int minimum_header_size() {
663 return is_AllocateArray() ? sizeof(arrayOopDesc) : sizeof(oopDesc);
664 }
666 // Return the corresponding initialization barrier (or null if none).
667 // Walks out edges to find it...
668 // (Note: Both InitializeNode::allocation and AllocateNode::initialization
669 // are defined in graphKit.cpp, which sets up the bidirectional relation.)
670 InitializeNode* initialization();
672 // Convenience for initialization->maybe_set_complete(phase)
673 bool maybe_set_complete(PhaseGVN* phase);
674 };
676 //------------------------------AllocateArray---------------------------------
677 //
678 // High-level array allocation
679 //
680 class AllocateArrayNode : public AllocateNode {
681 public:
682 AllocateArrayNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
683 Node* size, Node* klass_node, Node* initial_test,
684 Node* count_val
685 )
686 : AllocateNode(C, atype, ctrl, mem, abio, size, klass_node,
687 initial_test)
688 {
689 init_class_id(Class_AllocateArray);
690 set_req(AllocateNode::ALength, count_val);
691 }
692 virtual int Opcode() const;
693 virtual uint size_of() const; // Size is bigger
695 // Pattern-match a possible usage of AllocateArrayNode.
696 // Return null if no allocation is recognized.
697 static AllocateArrayNode* Ideal_array_allocation(Node* ptr, PhaseTransform* phase) {
698 AllocateNode* allo = Ideal_allocation(ptr, phase);
699 return (allo == NULL || !allo->is_AllocateArray())
700 ? NULL : allo->as_AllocateArray();
701 }
703 // Dig the length operand out of a (possible) array allocation site.
704 static Node* Ideal_length(Node* ptr, PhaseTransform* phase) {
705 AllocateArrayNode* allo = Ideal_array_allocation(ptr, phase);
706 return (allo == NULL) ? NULL : allo->in(AllocateNode::ALength);
707 }
708 };
710 //------------------------------AbstractLockNode-----------------------------------
711 class AbstractLockNode: public CallNode {
712 private:
713 bool _eliminate; // indicates this lock can be safely eliminated
714 #ifndef PRODUCT
715 NamedCounter* _counter;
716 #endif
718 protected:
719 // helper functions for lock elimination
720 //
722 bool find_matching_unlock(const Node* ctrl, LockNode* lock,
723 GrowableArray<AbstractLockNode*> &lock_ops);
724 bool find_lock_and_unlock_through_if(Node* node, LockNode* lock,
725 GrowableArray<AbstractLockNode*> &lock_ops);
726 bool find_unlocks_for_region(const RegionNode* region, LockNode* lock,
727 GrowableArray<AbstractLockNode*> &lock_ops);
728 LockNode *find_matching_lock(UnlockNode* unlock);
731 public:
732 AbstractLockNode(const TypeFunc *tf)
733 : CallNode(tf, NULL, TypeRawPtr::BOTTOM),
734 _eliminate(false)
735 {
736 #ifndef PRODUCT
737 _counter = NULL;
738 #endif
739 }
740 virtual int Opcode() const = 0;
741 Node * obj_node() const {return in(TypeFunc::Parms + 0); }
742 Node * box_node() const {return in(TypeFunc::Parms + 1); }
743 Node * fastlock_node() const {return in(TypeFunc::Parms + 2); }
744 const Type *sub(const Type *t1, const Type *t2) const { return TypeInt::CC;}
746 virtual uint size_of() const { return sizeof(*this); }
748 bool is_eliminated() {return _eliminate; }
749 // mark node as eliminated and update the counter if there is one
750 void set_eliminated();
752 #ifndef PRODUCT
753 void create_lock_counter(JVMState* s);
754 NamedCounter* counter() const { return _counter; }
755 #endif
756 };
758 //------------------------------Lock---------------------------------------
759 // High-level lock operation
760 //
761 // This is a subclass of CallNode because it is a macro node which gets expanded
762 // into a code sequence containing a call. This node takes 3 "parameters":
763 // 0 - object to lock
764 // 1 - a BoxLockNode
765 // 2 - a FastLockNode
766 //
767 class LockNode : public AbstractLockNode {
768 public:
770 static const TypeFunc *lock_type() {
771 // create input type (domain)
772 const Type **fields = TypeTuple::fields(3);
773 fields[TypeFunc::Parms+0] = TypeInstPtr::NOTNULL; // Object to be Locked
774 fields[TypeFunc::Parms+1] = TypeRawPtr::BOTTOM; // Address of stack location for lock
775 fields[TypeFunc::Parms+2] = TypeInt::BOOL; // FastLock
776 const TypeTuple *domain = TypeTuple::make(TypeFunc::Parms+3,fields);
778 // create result type (range)
779 fields = TypeTuple::fields(0);
781 const TypeTuple *range = TypeTuple::make(TypeFunc::Parms+0,fields);
783 return TypeFunc::make(domain,range);
784 }
786 virtual int Opcode() const;
787 virtual uint size_of() const; // Size is bigger
788 LockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
789 init_class_id(Class_Lock);
790 init_flags(Flag_is_macro);
791 C->add_macro_node(this);
792 }
793 virtual bool guaranteed_safepoint() { return false; }
795 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
796 // Expansion modifies the JVMState, so we need to clone it
797 virtual void clone_jvms() {
798 set_jvms(jvms()->clone_deep(Compile::current()));
799 }
800 };
802 //------------------------------Unlock---------------------------------------
803 // High-level unlock operation
804 class UnlockNode : public AbstractLockNode {
805 public:
806 virtual int Opcode() const;
807 virtual uint size_of() const; // Size is bigger
808 UnlockNode(Compile* C, const TypeFunc *tf) : AbstractLockNode( tf ) {
809 init_class_id(Class_Unlock);
810 init_flags(Flag_is_macro);
811 C->add_macro_node(this);
812 }
813 virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
814 // unlock is never a safepoint
815 virtual bool guaranteed_safepoint() { return false; }
816 };