Thu, 13 Mar 2008 16:06:34 -0700
6671807: (Escape Analysis) Add new ideal node to represent the state of a scalarized object at a safepoint
Summary: Values of non-static fields of a scalarized object should be saved in debug info to reallocate the object during deoptimization.
Reviewed-by: never
1 /*
2 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // Portions of code courtesy of Clifford Click
27 // Optimization - Graph Style
29 #include "incls/_precompiled.incl"
30 #include "incls/_callnode.cpp.incl"
32 //=============================================================================
33 uint StartNode::size_of() const { return sizeof(*this); }
34 uint StartNode::cmp( const Node &n ) const
35 { return _domain == ((StartNode&)n)._domain; }
36 const Type *StartNode::bottom_type() const { return _domain; }
37 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
38 #ifndef PRODUCT
39 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
40 #endif
42 //------------------------------Ideal------------------------------------------
43 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
44 return remove_dead_region(phase, can_reshape) ? this : NULL;
45 }
47 //------------------------------calling_convention-----------------------------
48 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
49 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
50 }
52 //------------------------------Registers--------------------------------------
53 const RegMask &StartNode::in_RegMask(uint) const {
54 return RegMask::Empty;
55 }
57 //------------------------------match------------------------------------------
58 // Construct projections for incoming parameters, and their RegMask info
59 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
60 switch (proj->_con) {
61 case TypeFunc::Control:
62 case TypeFunc::I_O:
63 case TypeFunc::Memory:
64 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
65 case TypeFunc::FramePtr:
66 return new (match->C, 1) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
67 case TypeFunc::ReturnAdr:
68 return new (match->C, 1) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
69 case TypeFunc::Parms:
70 default: {
71 uint parm_num = proj->_con - TypeFunc::Parms;
72 const Type *t = _domain->field_at(proj->_con);
73 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
74 return new (match->C, 1) ConNode(Type::TOP);
75 uint ideal_reg = Matcher::base2reg[t->base()];
76 RegMask &rm = match->_calling_convention_mask[parm_num];
77 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
78 }
79 }
80 return NULL;
81 }
83 //------------------------------StartOSRNode----------------------------------
84 // The method start node for an on stack replacement adapter
86 //------------------------------osr_domain-----------------------------
87 const TypeTuple *StartOSRNode::osr_domain() {
88 const Type **fields = TypeTuple::fields(2);
89 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
91 return TypeTuple::make(TypeFunc::Parms+1, fields);
92 }
94 //=============================================================================
95 const char * const ParmNode::names[TypeFunc::Parms+1] = {
96 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
97 };
99 #ifndef PRODUCT
100 void ParmNode::dump_spec(outputStream *st) const {
101 if( _con < TypeFunc::Parms ) {
102 st->print(names[_con]);
103 } else {
104 st->print("Parm%d: ",_con-TypeFunc::Parms);
105 // Verbose and WizardMode dump bottom_type for all nodes
106 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
107 }
108 }
109 #endif
111 uint ParmNode::ideal_reg() const {
112 switch( _con ) {
113 case TypeFunc::Control : // fall through
114 case TypeFunc::I_O : // fall through
115 case TypeFunc::Memory : return 0;
116 case TypeFunc::FramePtr : // fall through
117 case TypeFunc::ReturnAdr: return Op_RegP;
118 default : assert( _con > TypeFunc::Parms, "" );
119 // fall through
120 case TypeFunc::Parms : {
121 // Type of argument being passed
122 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
123 return Matcher::base2reg[t->base()];
124 }
125 }
126 ShouldNotReachHere();
127 return 0;
128 }
130 //=============================================================================
131 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
132 init_req(TypeFunc::Control,cntrl);
133 init_req(TypeFunc::I_O,i_o);
134 init_req(TypeFunc::Memory,memory);
135 init_req(TypeFunc::FramePtr,frameptr);
136 init_req(TypeFunc::ReturnAdr,retadr);
137 }
139 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
140 return remove_dead_region(phase, can_reshape) ? this : NULL;
141 }
143 const Type *ReturnNode::Value( PhaseTransform *phase ) const {
144 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
145 ? Type::TOP
146 : Type::BOTTOM;
147 }
149 // Do we Match on this edge index or not? No edges on return nodes
150 uint ReturnNode::match_edge(uint idx) const {
151 return 0;
152 }
155 #ifndef PRODUCT
156 void ReturnNode::dump_req() const {
157 // Dump the required inputs, enclosed in '(' and ')'
158 uint i; // Exit value of loop
159 for( i=0; i<req(); i++ ) { // For all required inputs
160 if( i == TypeFunc::Parms ) tty->print("returns");
161 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
162 else tty->print("_ ");
163 }
164 }
165 #endif
167 //=============================================================================
168 RethrowNode::RethrowNode(
169 Node* cntrl,
170 Node* i_o,
171 Node* memory,
172 Node* frameptr,
173 Node* ret_adr,
174 Node* exception
175 ) : Node(TypeFunc::Parms + 1) {
176 init_req(TypeFunc::Control , cntrl );
177 init_req(TypeFunc::I_O , i_o );
178 init_req(TypeFunc::Memory , memory );
179 init_req(TypeFunc::FramePtr , frameptr );
180 init_req(TypeFunc::ReturnAdr, ret_adr);
181 init_req(TypeFunc::Parms , exception);
182 }
184 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
185 return remove_dead_region(phase, can_reshape) ? this : NULL;
186 }
188 const Type *RethrowNode::Value( PhaseTransform *phase ) const {
189 return (phase->type(in(TypeFunc::Control)) == Type::TOP)
190 ? Type::TOP
191 : Type::BOTTOM;
192 }
194 uint RethrowNode::match_edge(uint idx) const {
195 return 0;
196 }
198 #ifndef PRODUCT
199 void RethrowNode::dump_req() const {
200 // Dump the required inputs, enclosed in '(' and ')'
201 uint i; // Exit value of loop
202 for( i=0; i<req(); i++ ) { // For all required inputs
203 if( i == TypeFunc::Parms ) tty->print("exception");
204 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
205 else tty->print("_ ");
206 }
207 }
208 #endif
210 //=============================================================================
211 // Do we Match on this edge index or not? Match only target address & method
212 uint TailCallNode::match_edge(uint idx) const {
213 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
214 }
216 //=============================================================================
217 // Do we Match on this edge index or not? Match only target address & oop
218 uint TailJumpNode::match_edge(uint idx) const {
219 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
220 }
222 //=============================================================================
223 JVMState::JVMState(ciMethod* method, JVMState* caller) {
224 assert(method != NULL, "must be valid call site");
225 _method = method;
226 debug_only(_bci = -99); // random garbage value
227 debug_only(_map = (SafePointNode*)-1);
228 _caller = caller;
229 _depth = 1 + (caller == NULL ? 0 : caller->depth());
230 _locoff = TypeFunc::Parms;
231 _stkoff = _locoff + _method->max_locals();
232 _monoff = _stkoff + _method->max_stack();
233 _scloff = _monoff;
234 _endoff = _monoff;
235 _sp = 0;
236 }
237 JVMState::JVMState(int stack_size) {
238 _method = NULL;
239 _bci = InvocationEntryBci;
240 debug_only(_map = (SafePointNode*)-1);
241 _caller = NULL;
242 _depth = 1;
243 _locoff = TypeFunc::Parms;
244 _stkoff = _locoff;
245 _monoff = _stkoff + stack_size;
246 _scloff = _monoff;
247 _endoff = _monoff;
248 _sp = 0;
249 }
251 //--------------------------------of_depth-------------------------------------
252 JVMState* JVMState::of_depth(int d) const {
253 const JVMState* jvmp = this;
254 assert(0 < d && (uint)d <= depth(), "oob");
255 for (int skip = depth() - d; skip > 0; skip--) {
256 jvmp = jvmp->caller();
257 }
258 assert(jvmp->depth() == (uint)d, "found the right one");
259 return (JVMState*)jvmp;
260 }
262 //-----------------------------same_calls_as-----------------------------------
263 bool JVMState::same_calls_as(const JVMState* that) const {
264 if (this == that) return true;
265 if (this->depth() != that->depth()) return false;
266 const JVMState* p = this;
267 const JVMState* q = that;
268 for (;;) {
269 if (p->_method != q->_method) return false;
270 if (p->_method == NULL) return true; // bci is irrelevant
271 if (p->_bci != q->_bci) return false;
272 p = p->caller();
273 q = q->caller();
274 if (p == q) return true;
275 assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
276 }
277 }
279 //------------------------------debug_start------------------------------------
280 uint JVMState::debug_start() const {
281 debug_only(JVMState* jvmroot = of_depth(1));
282 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
283 return of_depth(1)->locoff();
284 }
286 //-------------------------------debug_end-------------------------------------
287 uint JVMState::debug_end() const {
288 debug_only(JVMState* jvmroot = of_depth(1));
289 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
290 return endoff();
291 }
293 //------------------------------debug_depth------------------------------------
294 uint JVMState::debug_depth() const {
295 uint total = 0;
296 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
297 total += jvmp->debug_size();
298 }
299 return total;
300 }
302 #ifndef PRODUCT
304 //------------------------------format_helper----------------------------------
305 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
306 // any defined value or not. If it does, print out the register or constant.
307 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
308 if (n == NULL) { st->print(" NULL"); return; }
309 if (n->is_SafePointScalarObject()) {
310 // Scalar replacement.
311 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
312 scobjs->append_if_missing(spobj);
313 int sco_n = scobjs->find(spobj);
314 assert(sco_n >= 0, "");
315 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
316 return;
317 }
318 if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
319 char buf[50];
320 regalloc->dump_register(n,buf);
321 st->print(" %s%d]=%s",msg,i,buf);
322 } else { // No register, but might be constant
323 const Type *t = n->bottom_type();
324 switch (t->base()) {
325 case Type::Int:
326 st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
327 break;
328 case Type::AnyPtr:
329 assert( t == TypePtr::NULL_PTR, "" );
330 st->print(" %s%d]=#NULL",msg,i);
331 break;
332 case Type::AryPtr:
333 case Type::KlassPtr:
334 case Type::InstPtr:
335 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop());
336 break;
337 case Type::RawPtr:
338 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr());
339 break;
340 case Type::DoubleCon:
341 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
342 break;
343 case Type::FloatCon:
344 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
345 break;
346 case Type::Long:
347 st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con());
348 break;
349 case Type::Half:
350 case Type::Top:
351 st->print(" %s%d]=_",msg,i);
352 break;
353 default: ShouldNotReachHere();
354 }
355 }
356 }
358 //------------------------------format-----------------------------------------
359 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
360 st->print(" #");
361 if( _method ) {
362 _method->print_short_name(st);
363 st->print(" @ bci:%d ",_bci);
364 } else {
365 st->print_cr(" runtime stub ");
366 return;
367 }
368 if (n->is_MachSafePoint()) {
369 GrowableArray<SafePointScalarObjectNode*> scobjs;
370 MachSafePointNode *mcall = n->as_MachSafePoint();
371 uint i;
372 // Print locals
373 for( i = 0; i < (uint)loc_size(); i++ )
374 format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs );
375 // Print stack
376 for (i = 0; i < (uint)stk_size(); i++) {
377 if ((uint)(_stkoff + i) >= mcall->len())
378 st->print(" oob ");
379 else
380 format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs );
381 }
382 for (i = 0; (int)i < nof_monitors(); i++) {
383 Node *box = mcall->monitor_box(this, i);
384 Node *obj = mcall->monitor_obj(this, i);
385 if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
386 while( !box->is_BoxLock() ) box = box->in(1);
387 format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs );
388 } else {
389 OptoReg::Name box_reg = BoxLockNode::stack_slot(box);
390 st->print(" MON-BOX%d=%s+%d",
391 i,
392 OptoReg::regname(OptoReg::c_frame_pointer),
393 regalloc->reg2offset(box_reg));
394 }
395 format_helper( regalloc, st, obj, "MON-OBJ[", i, &scobjs );
396 }
398 for (i = 0; i < (uint)scobjs.length(); i++) {
399 // Scalar replaced objects.
400 st->print_cr("");
401 st->print(" # ScObj" INT32_FORMAT " ", i);
402 SafePointScalarObjectNode* spobj = scobjs.at(i);
403 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass();
404 assert(cik->is_instance_klass() ||
405 cik->is_array_klass(), "Not supported allocation.");
406 ciInstanceKlass *iklass = NULL;
407 if (cik->is_instance_klass()) {
408 cik->print_name_on(st);
409 iklass = cik->as_instance_klass();
410 } else if (cik->is_type_array_klass()) {
411 cik->as_array_klass()->base_element_type()->print_name_on(st);
412 st->print("[%d]=", spobj->n_fields());
413 } else if (cik->is_obj_array_klass()) {
414 ciType* cie = cik->as_array_klass()->base_element_type();
415 int ndim = 1;
416 while (cie->is_obj_array_klass()) {
417 ndim += 1;
418 cie = cie->as_array_klass()->base_element_type();
419 }
420 cie->print_name_on(st);
421 while (ndim-- > 0) {
422 st->print("[]");
423 }
424 st->print("[%d]=", spobj->n_fields());
425 }
426 st->print("{");
427 uint nf = spobj->n_fields();
428 if (nf > 0) {
429 uint first_ind = spobj->first_index();
430 Node* fld_node = mcall->in(first_ind);
431 ciField* cifield;
432 if (iklass != NULL) {
433 st->print(" [");
434 cifield = iklass->nonstatic_field_at(0);
435 cifield->print_name_on(st);
436 format_helper( regalloc, st, fld_node, ":", 0, &scobjs );
437 } else {
438 format_helper( regalloc, st, fld_node, "[", 0, &scobjs );
439 }
440 for (uint j = 1; j < nf; j++) {
441 fld_node = mcall->in(first_ind+j);
442 if (iklass != NULL) {
443 st->print(", [");
444 cifield = iklass->nonstatic_field_at(j);
445 cifield->print_name_on(st);
446 format_helper( regalloc, st, fld_node, ":", j, &scobjs );
447 } else {
448 format_helper( regalloc, st, fld_node, ", [", j, &scobjs );
449 }
450 }
451 }
452 st->print(" }");
453 }
454 }
455 st->print_cr("");
456 if (caller() != NULL) caller()->format(regalloc, n, st);
457 }
460 void JVMState::dump_spec(outputStream *st) const {
461 if (_method != NULL) {
462 bool printed = false;
463 if (!Verbose) {
464 // The JVMS dumps make really, really long lines.
465 // Take out the most boring parts, which are the package prefixes.
466 char buf[500];
467 stringStream namest(buf, sizeof(buf));
468 _method->print_short_name(&namest);
469 if (namest.count() < sizeof(buf)) {
470 const char* name = namest.base();
471 if (name[0] == ' ') ++name;
472 const char* endcn = strchr(name, ':'); // end of class name
473 if (endcn == NULL) endcn = strchr(name, '(');
474 if (endcn == NULL) endcn = name + strlen(name);
475 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
476 --endcn;
477 st->print(" %s", endcn);
478 printed = true;
479 }
480 }
481 if (!printed)
482 _method->print_short_name(st);
483 st->print(" @ bci:%d",_bci);
484 } else {
485 st->print(" runtime stub");
486 }
487 if (caller() != NULL) caller()->dump_spec(st);
488 }
491 void JVMState::dump_on(outputStream* st) const {
492 if (_map && !((uintptr_t)_map & 1)) {
493 if (_map->len() > _map->req()) { // _map->has_exceptions()
494 Node* ex = _map->in(_map->req()); // _map->next_exception()
495 // skip the first one; it's already being printed
496 while (ex != NULL && ex->len() > ex->req()) {
497 ex = ex->in(ex->req()); // ex->next_exception()
498 ex->dump(1);
499 }
500 }
501 _map->dump(2);
502 }
503 st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d method=",
504 depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci());
505 if (_method == NULL) {
506 st->print_cr("(none)");
507 } else {
508 _method->print_name(st);
509 st->cr();
510 if (bci() >= 0 && bci() < _method->code_size()) {
511 st->print(" bc: ");
512 _method->print_codes_on(bci(), bci()+1, st);
513 }
514 }
515 if (caller() != NULL) {
516 caller()->dump_on(st);
517 }
518 }
520 // Extra way to dump a jvms from the debugger,
521 // to avoid a bug with C++ member function calls.
522 void dump_jvms(JVMState* jvms) {
523 jvms->dump();
524 }
525 #endif
527 //--------------------------clone_shallow--------------------------------------
528 JVMState* JVMState::clone_shallow(Compile* C) const {
529 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
530 n->set_bci(_bci);
531 n->set_locoff(_locoff);
532 n->set_stkoff(_stkoff);
533 n->set_monoff(_monoff);
534 n->set_scloff(_scloff);
535 n->set_endoff(_endoff);
536 n->set_sp(_sp);
537 n->set_map(_map);
538 return n;
539 }
541 //---------------------------clone_deep----------------------------------------
542 JVMState* JVMState::clone_deep(Compile* C) const {
543 JVMState* n = clone_shallow(C);
544 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
545 p->_caller = p->_caller->clone_shallow(C);
546 }
547 assert(n->depth() == depth(), "sanity");
548 assert(n->debug_depth() == debug_depth(), "sanity");
549 return n;
550 }
552 //=============================================================================
553 uint CallNode::cmp( const Node &n ) const
554 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
555 #ifndef PRODUCT
556 void CallNode::dump_req() const {
557 // Dump the required inputs, enclosed in '(' and ')'
558 uint i; // Exit value of loop
559 for( i=0; i<req(); i++ ) { // For all required inputs
560 if( i == TypeFunc::Parms ) tty->print("(");
561 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
562 else tty->print("_ ");
563 }
564 tty->print(")");
565 }
567 void CallNode::dump_spec(outputStream *st) const {
568 st->print(" ");
569 tf()->dump_on(st);
570 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
571 if (jvms() != NULL) jvms()->dump_spec(st);
572 }
573 #endif
575 const Type *CallNode::bottom_type() const { return tf()->range(); }
576 const Type *CallNode::Value(PhaseTransform *phase) const {
577 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
578 return tf()->range();
579 }
581 //------------------------------calling_convention-----------------------------
582 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
583 // Use the standard compiler calling convention
584 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
585 }
588 //------------------------------match------------------------------------------
589 // Construct projections for control, I/O, memory-fields, ..., and
590 // return result(s) along with their RegMask info
591 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
592 switch (proj->_con) {
593 case TypeFunc::Control:
594 case TypeFunc::I_O:
595 case TypeFunc::Memory:
596 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
598 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
599 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
600 // 2nd half of doubles and longs
601 return new (match->C, 1) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
603 case TypeFunc::Parms: { // Normal returns
604 uint ideal_reg = Matcher::base2reg[tf()->range()->field_at(TypeFunc::Parms)->base()];
605 OptoRegPair regs = is_CallRuntime()
606 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
607 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
608 RegMask rm = RegMask(regs.first());
609 if( OptoReg::is_valid(regs.second()) )
610 rm.Insert( regs.second() );
611 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
612 }
614 case TypeFunc::ReturnAdr:
615 case TypeFunc::FramePtr:
616 default:
617 ShouldNotReachHere();
618 }
619 return NULL;
620 }
622 // Do we Match on this edge index or not? Match no edges
623 uint CallNode::match_edge(uint idx) const {
624 return 0;
625 }
627 //=============================================================================
628 uint CallJavaNode::size_of() const { return sizeof(*this); }
629 uint CallJavaNode::cmp( const Node &n ) const {
630 CallJavaNode &call = (CallJavaNode&)n;
631 return CallNode::cmp(call) && _method == call._method;
632 }
633 #ifndef PRODUCT
634 void CallJavaNode::dump_spec(outputStream *st) const {
635 if( _method ) _method->print_short_name(st);
636 CallNode::dump_spec(st);
637 }
638 #endif
640 //=============================================================================
641 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
642 uint CallStaticJavaNode::cmp( const Node &n ) const {
643 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
644 return CallJavaNode::cmp(call);
645 }
647 //----------------------------uncommon_trap_request----------------------------
648 // If this is an uncommon trap, return the request code, else zero.
649 int CallStaticJavaNode::uncommon_trap_request() const {
650 if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
651 return extract_uncommon_trap_request(this);
652 }
653 return 0;
654 }
655 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
656 #ifndef PRODUCT
657 if (!(call->req() > TypeFunc::Parms &&
658 call->in(TypeFunc::Parms) != NULL &&
659 call->in(TypeFunc::Parms)->is_Con())) {
660 assert(_in_dump_cnt != 0, "OK if dumping");
661 tty->print("[bad uncommon trap]");
662 return 0;
663 }
664 #endif
665 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
666 }
668 #ifndef PRODUCT
669 void CallStaticJavaNode::dump_spec(outputStream *st) const {
670 st->print("# Static ");
671 if (_name != NULL) {
672 st->print("%s", _name);
673 int trap_req = uncommon_trap_request();
674 if (trap_req != 0) {
675 char buf[100];
676 st->print("(%s)",
677 Deoptimization::format_trap_request(buf, sizeof(buf),
678 trap_req));
679 }
680 st->print(" ");
681 }
682 CallJavaNode::dump_spec(st);
683 }
684 #endif
686 //=============================================================================
687 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
688 uint CallDynamicJavaNode::cmp( const Node &n ) const {
689 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
690 return CallJavaNode::cmp(call);
691 }
692 #ifndef PRODUCT
693 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
694 st->print("# Dynamic ");
695 CallJavaNode::dump_spec(st);
696 }
697 #endif
699 //=============================================================================
700 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
701 uint CallRuntimeNode::cmp( const Node &n ) const {
702 CallRuntimeNode &call = (CallRuntimeNode&)n;
703 return CallNode::cmp(call) && !strcmp(_name,call._name);
704 }
705 #ifndef PRODUCT
706 void CallRuntimeNode::dump_spec(outputStream *st) const {
707 st->print("# ");
708 st->print(_name);
709 CallNode::dump_spec(st);
710 }
711 #endif
713 //------------------------------calling_convention-----------------------------
714 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
715 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
716 }
718 //=============================================================================
719 //------------------------------calling_convention-----------------------------
722 //=============================================================================
723 #ifndef PRODUCT
724 void CallLeafNode::dump_spec(outputStream *st) const {
725 st->print("# ");
726 st->print(_name);
727 CallNode::dump_spec(st);
728 }
729 #endif
731 //=============================================================================
733 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
734 assert(verify_jvms(jvms), "jvms must match");
735 int loc = jvms->locoff() + idx;
736 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
737 // If current local idx is top then local idx - 1 could
738 // be a long/double that needs to be killed since top could
739 // represent the 2nd half ofthe long/double.
740 uint ideal = in(loc -1)->ideal_reg();
741 if (ideal == Op_RegD || ideal == Op_RegL) {
742 // set other (low index) half to top
743 set_req(loc - 1, in(loc));
744 }
745 }
746 set_req(loc, c);
747 }
749 uint SafePointNode::size_of() const { return sizeof(*this); }
750 uint SafePointNode::cmp( const Node &n ) const {
751 return (&n == this); // Always fail except on self
752 }
754 //-------------------------set_next_exception----------------------------------
755 void SafePointNode::set_next_exception(SafePointNode* n) {
756 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
757 if (len() == req()) {
758 if (n != NULL) add_prec(n);
759 } else {
760 set_prec(req(), n);
761 }
762 }
765 //----------------------------next_exception-----------------------------------
766 SafePointNode* SafePointNode::next_exception() const {
767 if (len() == req()) {
768 return NULL;
769 } else {
770 Node* n = in(req());
771 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
772 return (SafePointNode*) n;
773 }
774 }
777 //------------------------------Ideal------------------------------------------
778 // Skip over any collapsed Regions
779 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
780 if (remove_dead_region(phase, can_reshape)) return this;
782 return NULL;
783 }
785 //------------------------------Identity---------------------------------------
786 // Remove obviously duplicate safepoints
787 Node *SafePointNode::Identity( PhaseTransform *phase ) {
789 // If you have back to back safepoints, remove one
790 if( in(TypeFunc::Control)->is_SafePoint() )
791 return in(TypeFunc::Control);
793 if( in(0)->is_Proj() ) {
794 Node *n0 = in(0)->in(0);
795 // Check if he is a call projection (except Leaf Call)
796 if( n0->is_Catch() ) {
797 n0 = n0->in(0)->in(0);
798 assert( n0->is_Call(), "expect a call here" );
799 }
800 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
801 // Useless Safepoint, so remove it
802 return in(TypeFunc::Control);
803 }
804 }
806 return this;
807 }
809 //------------------------------Value------------------------------------------
810 const Type *SafePointNode::Value( PhaseTransform *phase ) const {
811 if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
812 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop
813 return Type::CONTROL;
814 }
816 #ifndef PRODUCT
817 void SafePointNode::dump_spec(outputStream *st) const {
818 st->print(" SafePoint ");
819 }
820 #endif
822 const RegMask &SafePointNode::in_RegMask(uint idx) const {
823 if( idx < TypeFunc::Parms ) return RegMask::Empty;
824 // Values outside the domain represent debug info
825 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
826 }
827 const RegMask &SafePointNode::out_RegMask() const {
828 return RegMask::Empty;
829 }
832 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
833 assert((int)grow_by > 0, "sanity");
834 int monoff = jvms->monoff();
835 int scloff = jvms->scloff();
836 int endoff = jvms->endoff();
837 assert(endoff == (int)req(), "no other states or debug info after me");
838 Node* top = Compile::current()->top();
839 for (uint i = 0; i < grow_by; i++) {
840 ins_req(monoff, top);
841 }
842 jvms->set_monoff(monoff + grow_by);
843 jvms->set_scloff(scloff + grow_by);
844 jvms->set_endoff(endoff + grow_by);
845 }
847 void SafePointNode::push_monitor(const FastLockNode *lock) {
848 // Add a LockNode, which points to both the original BoxLockNode (the
849 // stack space for the monitor) and the Object being locked.
850 const int MonitorEdges = 2;
851 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
852 assert(req() == jvms()->endoff(), "correct sizing");
853 int nextmon = jvms()->scloff();
854 if (GenerateSynchronizationCode) {
855 add_req(lock->box_node());
856 add_req(lock->obj_node());
857 } else {
858 add_req(NULL);
859 add_req(NULL);
860 }
861 jvms()->set_scloff(nextmon+MonitorEdges);
862 jvms()->set_endoff(req());
863 }
865 void SafePointNode::pop_monitor() {
866 // Delete last monitor from debug info
867 debug_only(int num_before_pop = jvms()->nof_monitors());
868 const int MonitorEdges = (1<<JVMState::logMonitorEdges);
869 int scloff = jvms()->scloff();
870 int endoff = jvms()->endoff();
871 int new_scloff = scloff - MonitorEdges;
872 int new_endoff = endoff - MonitorEdges;
873 jvms()->set_scloff(new_scloff);
874 jvms()->set_endoff(new_endoff);
875 while (scloff > new_scloff) del_req(--scloff);
876 assert(jvms()->nof_monitors() == num_before_pop-1, "");
877 }
879 Node *SafePointNode::peek_monitor_box() const {
880 int mon = jvms()->nof_monitors() - 1;
881 assert(mon >= 0, "most have a monitor");
882 return monitor_box(jvms(), mon);
883 }
885 Node *SafePointNode::peek_monitor_obj() const {
886 int mon = jvms()->nof_monitors() - 1;
887 assert(mon >= 0, "most have a monitor");
888 return monitor_obj(jvms(), mon);
889 }
891 // Do we Match on this edge index or not? Match no edges
892 uint SafePointNode::match_edge(uint idx) const {
893 if( !needs_polling_address_input() )
894 return 0;
896 return (TypeFunc::Parms == idx);
897 }
899 //============== SafePointScalarObjectNode ==============
901 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
902 #ifdef ASSERT
903 AllocateNode* alloc,
904 #endif
905 uint first_index,
906 uint n_fields) :
907 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
908 #ifdef ASSERT
909 _alloc(alloc),
910 #endif
911 _first_index(first_index),
912 _n_fields(n_fields)
913 {
914 init_class_id(Class_SafePointScalarObject);
915 }
918 uint SafePointScalarObjectNode::ideal_reg() const {
919 return 0; // No matching to machine instruction
920 }
922 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
923 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
924 }
926 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
927 return RegMask::Empty;
928 }
930 uint SafePointScalarObjectNode::match_edge(uint idx) const {
931 return 0;
932 }
934 SafePointScalarObjectNode*
935 SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
936 void* cached = (*sosn_map)[(void*)this];
937 if (cached != NULL) {
938 return (SafePointScalarObjectNode*)cached;
939 }
940 Compile* C = Compile::current();
941 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
942 res->_first_index += jvms_adj;
943 sosn_map->Insert((void*)this, (void*)res);
944 return res;
945 }
948 #ifndef PRODUCT
949 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
950 st->print(" # fields@[%d..%d]", first_index(),
951 first_index() + n_fields() - 1);
952 }
954 #endif
956 //=============================================================================
957 uint AllocateNode::size_of() const { return sizeof(*this); }
959 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
960 Node *ctrl, Node *mem, Node *abio,
961 Node *size, Node *klass_node, Node *initial_test)
962 : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
963 {
964 init_class_id(Class_Allocate);
965 init_flags(Flag_is_macro);
966 _is_scalar_replaceable = false;
967 Node *topnode = C->top();
969 init_req( TypeFunc::Control , ctrl );
970 init_req( TypeFunc::I_O , abio );
971 init_req( TypeFunc::Memory , mem );
972 init_req( TypeFunc::ReturnAdr, topnode );
973 init_req( TypeFunc::FramePtr , topnode );
974 init_req( AllocSize , size);
975 init_req( KlassNode , klass_node);
976 init_req( InitialTest , initial_test);
977 init_req( ALength , topnode);
978 C->add_macro_node(this);
979 }
981 //=============================================================================
982 uint AllocateArrayNode::size_of() const { return sizeof(*this); }
984 //=============================================================================
985 uint LockNode::size_of() const { return sizeof(*this); }
987 // Redundant lock elimination
988 //
989 // There are various patterns of locking where we release and
990 // immediately reacquire a lock in a piece of code where no operations
991 // occur in between that would be observable. In those cases we can
992 // skip releasing and reacquiring the lock without violating any
993 // fairness requirements. Doing this around a loop could cause a lock
994 // to be held for a very long time so we concentrate on non-looping
995 // control flow. We also require that the operations are fully
996 // redundant meaning that we don't introduce new lock operations on
997 // some paths so to be able to eliminate it on others ala PRE. This
998 // would probably require some more extensive graph manipulation to
999 // guarantee that the memory edges were all handled correctly.
1000 //
1001 // Assuming p is a simple predicate which can't trap in any way and s
1002 // is a synchronized method consider this code:
1003 //
1004 // s();
1005 // if (p)
1006 // s();
1007 // else
1008 // s();
1009 // s();
1010 //
1011 // 1. The unlocks of the first call to s can be eliminated if the
1012 // locks inside the then and else branches are eliminated.
1013 //
1014 // 2. The unlocks of the then and else branches can be eliminated if
1015 // the lock of the final call to s is eliminated.
1016 //
1017 // Either of these cases subsumes the simple case of sequential control flow
1018 //
1019 // Addtionally we can eliminate versions without the else case:
1020 //
1021 // s();
1022 // if (p)
1023 // s();
1024 // s();
1025 //
1026 // 3. In this case we eliminate the unlock of the first s, the lock
1027 // and unlock in the then case and the lock in the final s.
1028 //
1029 // Note also that in all these cases the then/else pieces don't have
1030 // to be trivial as long as they begin and end with synchronization
1031 // operations.
1032 //
1033 // s();
1034 // if (p)
1035 // s();
1036 // f();
1037 // s();
1038 // s();
1039 //
1040 // The code will work properly for this case, leaving in the unlock
1041 // before the call to f and the relock after it.
1042 //
1043 // A potentially interesting case which isn't handled here is when the
1044 // locking is partially redundant.
1045 //
1046 // s();
1047 // if (p)
1048 // s();
1049 //
1050 // This could be eliminated putting unlocking on the else case and
1051 // eliminating the first unlock and the lock in the then side.
1052 // Alternatively the unlock could be moved out of the then side so it
1053 // was after the merge and the first unlock and second lock
1054 // eliminated. This might require less manipulation of the memory
1055 // state to get correct.
1056 //
1057 // Additionally we might allow work between a unlock and lock before
1058 // giving up eliminating the locks. The current code disallows any
1059 // conditional control flow between these operations. A formulation
1060 // similar to partial redundancy elimination computing the
1061 // availability of unlocking and the anticipatability of locking at a
1062 // program point would allow detection of fully redundant locking with
1063 // some amount of work in between. I'm not sure how often I really
1064 // think that would occur though. Most of the cases I've seen
1065 // indicate it's likely non-trivial work would occur in between.
1066 // There may be other more complicated constructs where we could
1067 // eliminate locking but I haven't seen any others appear as hot or
1068 // interesting.
1069 //
1070 // Locking and unlocking have a canonical form in ideal that looks
1071 // roughly like this:
1072 //
1073 // <obj>
1074 // | \\------+
1075 // | \ \
1076 // | BoxLock \
1077 // | | | \
1078 // | | \ \
1079 // | | FastLock
1080 // | | /
1081 // | | /
1082 // | | |
1083 //
1084 // Lock
1085 // |
1086 // Proj #0
1087 // |
1088 // MembarAcquire
1089 // |
1090 // Proj #0
1091 //
1092 // MembarRelease
1093 // |
1094 // Proj #0
1095 // |
1096 // Unlock
1097 // |
1098 // Proj #0
1099 //
1100 //
1101 // This code proceeds by processing Lock nodes during PhaseIterGVN
1102 // and searching back through its control for the proper code
1103 // patterns. Once it finds a set of lock and unlock operations to
1104 // eliminate they are marked as eliminatable which causes the
1105 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
1106 //
1107 //=============================================================================
1109 //
1110 // Utility function to skip over uninteresting control nodes. Nodes skipped are:
1111 // - copy regions. (These may not have been optimized away yet.)
1112 // - eliminated locking nodes
1113 //
1114 static Node *next_control(Node *ctrl) {
1115 if (ctrl == NULL)
1116 return NULL;
1117 while (1) {
1118 if (ctrl->is_Region()) {
1119 RegionNode *r = ctrl->as_Region();
1120 Node *n = r->is_copy();
1121 if (n == NULL)
1122 break; // hit a region, return it
1123 else
1124 ctrl = n;
1125 } else if (ctrl->is_Proj()) {
1126 Node *in0 = ctrl->in(0);
1127 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
1128 ctrl = in0->in(0);
1129 } else {
1130 break;
1131 }
1132 } else {
1133 break; // found an interesting control
1134 }
1135 }
1136 return ctrl;
1137 }
1138 //
1139 // Given a control, see if it's the control projection of an Unlock which
1140 // operating on the same object as lock.
1141 //
1142 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1143 GrowableArray<AbstractLockNode*> &lock_ops) {
1144 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1145 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1146 Node *n = ctrl_proj->in(0);
1147 if (n != NULL && n->is_Unlock()) {
1148 UnlockNode *unlock = n->as_Unlock();
1149 if ((lock->obj_node() == unlock->obj_node()) &&
1150 (lock->box_node() == unlock->box_node()) && !unlock->is_eliminated()) {
1151 lock_ops.append(unlock);
1152 return true;
1153 }
1154 }
1155 }
1156 return false;
1157 }
1159 //
1160 // Find the lock matching an unlock. Returns null if a safepoint
1161 // or complicated control is encountered first.
1162 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1163 LockNode *lock_result = NULL;
1164 // find the matching lock, or an intervening safepoint
1165 Node *ctrl = next_control(unlock->in(0));
1166 while (1) {
1167 assert(ctrl != NULL, "invalid control graph");
1168 assert(!ctrl->is_Start(), "missing lock for unlock");
1169 if (ctrl->is_top()) break; // dead control path
1170 if (ctrl->is_Proj()) ctrl = ctrl->in(0);
1171 if (ctrl->is_SafePoint()) {
1172 break; // found a safepoint (may be the lock we are searching for)
1173 } else if (ctrl->is_Region()) {
1174 // Check for a simple diamond pattern. Punt on anything more complicated
1175 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1176 Node *in1 = next_control(ctrl->in(1));
1177 Node *in2 = next_control(ctrl->in(2));
1178 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1179 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1180 ctrl = next_control(in1->in(0)->in(0));
1181 } else {
1182 break;
1183 }
1184 } else {
1185 break;
1186 }
1187 } else {
1188 ctrl = next_control(ctrl->in(0)); // keep searching
1189 }
1190 }
1191 if (ctrl->is_Lock()) {
1192 LockNode *lock = ctrl->as_Lock();
1193 if ((lock->obj_node() == unlock->obj_node()) &&
1194 (lock->box_node() == unlock->box_node())) {
1195 lock_result = lock;
1196 }
1197 }
1198 return lock_result;
1199 }
1201 // This code corresponds to case 3 above.
1203 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1204 GrowableArray<AbstractLockNode*> &lock_ops) {
1205 Node* if_node = node->in(0);
1206 bool if_true = node->is_IfTrue();
1208 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1209 Node *lock_ctrl = next_control(if_node->in(0));
1210 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1211 Node* lock1_node = NULL;
1212 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1213 if (if_true) {
1214 if (proj->is_IfFalse() && proj->outcnt() == 1) {
1215 lock1_node = proj->unique_out();
1216 }
1217 } else {
1218 if (proj->is_IfTrue() && proj->outcnt() == 1) {
1219 lock1_node = proj->unique_out();
1220 }
1221 }
1222 if (lock1_node != NULL && lock1_node->is_Lock()) {
1223 LockNode *lock1 = lock1_node->as_Lock();
1224 if ((lock->obj_node() == lock1->obj_node()) &&
1225 (lock->box_node() == lock1->box_node()) && !lock1->is_eliminated()) {
1226 lock_ops.append(lock1);
1227 return true;
1228 }
1229 }
1230 }
1231 }
1233 lock_ops.trunc_to(0);
1234 return false;
1235 }
1237 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1238 GrowableArray<AbstractLockNode*> &lock_ops) {
1239 // check each control merging at this point for a matching unlock.
1240 // in(0) should be self edge so skip it.
1241 for (int i = 1; i < (int)region->req(); i++) {
1242 Node *in_node = next_control(region->in(i));
1243 if (in_node != NULL) {
1244 if (find_matching_unlock(in_node, lock, lock_ops)) {
1245 // found a match so keep on checking.
1246 continue;
1247 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
1248 continue;
1249 }
1251 // If we fall through to here then it was some kind of node we
1252 // don't understand or there wasn't a matching unlock, so give
1253 // up trying to merge locks.
1254 lock_ops.trunc_to(0);
1255 return false;
1256 }
1257 }
1258 return true;
1260 }
1262 #ifndef PRODUCT
1263 //
1264 // Create a counter which counts the number of times this lock is acquired
1265 //
1266 void AbstractLockNode::create_lock_counter(JVMState* state) {
1267 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
1268 }
1269 #endif
1271 void AbstractLockNode::set_eliminated() {
1272 _eliminate = true;
1273 #ifndef PRODUCT
1274 if (_counter) {
1275 // Update the counter to indicate that this lock was eliminated.
1276 // The counter update code will stay around even though the
1277 // optimizer will eliminate the lock operation itself.
1278 _counter->set_tag(NamedCounter::EliminatedLockCounter);
1279 }
1280 #endif
1281 }
1283 //=============================================================================
1284 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1286 // perform any generic optimizations first
1287 Node *result = SafePointNode::Ideal(phase, can_reshape);
1289 // Now see if we can optimize away this lock. We don't actually
1290 // remove the locking here, we simply set the _eliminate flag which
1291 // prevents macro expansion from expanding the lock. Since we don't
1292 // modify the graph, the value returned from this function is the
1293 // one computed above.
1294 if (EliminateLocks && !is_eliminated()) {
1295 //
1296 // Try lock coarsening
1297 //
1298 PhaseIterGVN* iter = phase->is_IterGVN();
1299 if (iter != NULL) {
1301 GrowableArray<AbstractLockNode*> lock_ops;
1303 Node *ctrl = next_control(in(0));
1305 // now search back for a matching Unlock
1306 if (find_matching_unlock(ctrl, this, lock_ops)) {
1307 // found an unlock directly preceding this lock. This is the
1308 // case of single unlock directly control dependent on a
1309 // single lock which is the trivial version of case 1 or 2.
1310 } else if (ctrl->is_Region() ) {
1311 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
1312 // found lock preceded by multiple unlocks along all paths
1313 // joining at this point which is case 3 in description above.
1314 }
1315 } else {
1316 // see if this lock comes from either half of an if and the
1317 // predecessors merges unlocks and the other half of the if
1318 // performs a lock.
1319 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
1320 // found unlock splitting to an if with locks on both branches.
1321 }
1322 }
1324 if (lock_ops.length() > 0) {
1325 // add ourselves to the list of locks to be eliminated.
1326 lock_ops.append(this);
1328 #ifndef PRODUCT
1329 if (PrintEliminateLocks) {
1330 int locks = 0;
1331 int unlocks = 0;
1332 for (int i = 0; i < lock_ops.length(); i++) {
1333 AbstractLockNode* lock = lock_ops.at(i);
1334 if (lock->Opcode() == Op_Lock) locks++;
1335 else unlocks++;
1336 if (Verbose) {
1337 lock->dump(1);
1338 }
1339 }
1340 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1341 }
1342 #endif
1344 // for each of the identified locks, mark them
1345 // as eliminatable
1346 for (int i = 0; i < lock_ops.length(); i++) {
1347 AbstractLockNode* lock = lock_ops.at(i);
1349 // Mark it eliminated to update any counters
1350 lock->set_eliminated();
1351 }
1352 } else if (result != NULL && ctrl->is_Region() &&
1353 iter->_worklist.member(ctrl)) {
1354 // We weren't able to find any opportunities but the region this
1355 // lock is control dependent on hasn't been processed yet so put
1356 // this lock back on the worklist so we can check again once any
1357 // region simplification has occurred.
1358 iter->_worklist.push(this);
1359 }
1360 }
1361 }
1363 return result;
1364 }
1366 //=============================================================================
1367 uint UnlockNode::size_of() const { return sizeof(*this); }
1369 //=============================================================================
1370 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1372 // perform any generic optimizations first
1373 Node * result = SafePointNode::Ideal(phase, can_reshape);
1375 // Now see if we can optimize away this unlock. We don't actually
1376 // remove the unlocking here, we simply set the _eliminate flag which
1377 // prevents macro expansion from expanding the unlock. Since we don't
1378 // modify the graph, the value returned from this function is the
1379 // one computed above.
1380 if (EliminateLocks && !is_eliminated()) {
1381 //
1382 // If we are unlocking an unescaped object, the lock/unlock is unnecessary
1383 // We can eliminate them if there are no safepoints in the locked region.
1384 //
1385 ConnectionGraph *cgr = Compile::current()->congraph();
1386 if (cgr != NULL && cgr->escape_state(obj_node(), phase) == PointsToNode::NoEscape) {
1387 GrowableArray<AbstractLockNode*> lock_ops;
1388 LockNode *lock = find_matching_lock(this);
1389 if (lock != NULL) {
1390 lock_ops.append(this);
1391 lock_ops.append(lock);
1392 // find other unlocks which pair with the lock we found and add them
1393 // to the list
1394 Node * box = box_node();
1396 for (DUIterator_Fast imax, i = box->fast_outs(imax); i < imax; i++) {
1397 Node *use = box->fast_out(i);
1398 if (use->is_Unlock() && use != this) {
1399 UnlockNode *unlock1 = use->as_Unlock();
1400 if (!unlock1->is_eliminated()) {
1401 LockNode *lock1 = find_matching_lock(unlock1);
1402 if (lock == lock1)
1403 lock_ops.append(unlock1);
1404 else if (lock1 == NULL) {
1405 // we can't find a matching lock, we must assume the worst
1406 lock_ops.trunc_to(0);
1407 break;
1408 }
1409 }
1410 }
1411 }
1412 if (lock_ops.length() > 0) {
1414 #ifndef PRODUCT
1415 if (PrintEliminateLocks) {
1416 int locks = 0;
1417 int unlocks = 0;
1418 for (int i = 0; i < lock_ops.length(); i++) {
1419 AbstractLockNode* lock = lock_ops.at(i);
1420 if (lock->Opcode() == Op_Lock) locks++;
1421 else unlocks++;
1422 if (Verbose) {
1423 lock->dump(1);
1424 }
1425 }
1426 tty->print_cr("***Eliminated %d unescaped unlocks and %d unescaped locks", unlocks, locks);
1427 }
1428 #endif
1430 // for each of the identified locks, mark them
1431 // as eliminatable
1432 for (int i = 0; i < lock_ops.length(); i++) {
1433 AbstractLockNode* lock = lock_ops.at(i);
1435 // Mark it eliminated to update any counters
1436 lock->set_eliminated();
1437 }
1438 }
1439 }
1440 }
1441 }
1442 return result;
1443 }