Fri, 07 Mar 2008 11:09:13 -0800
6667605: (Escape Analysis) inline java constructors when EA is on
Summary: java constructors should be inlined to be able scalar replace a new object
Reviewed-by: rasbold
1 /*
2 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // Portions of code courtesy of Clifford Click
27 // Optimization - Graph Style
29 #include "incls/_precompiled.incl"
30 #include "incls/_callnode.cpp.incl"
32 //=============================================================================
33 uint StartNode::size_of() const { return sizeof(*this); }
34 uint StartNode::cmp( const Node &n ) const
35 { return _domain == ((StartNode&)n)._domain; }
36 const Type *StartNode::bottom_type() const { return _domain; }
37 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
38 #ifndef PRODUCT
39 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
40 #endif
42 //------------------------------Ideal------------------------------------------
43 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
44 return remove_dead_region(phase, can_reshape) ? this : NULL;
45 }
47 //------------------------------calling_convention-----------------------------
48 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
49 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
50 }
52 //------------------------------Registers--------------------------------------
53 const RegMask &StartNode::in_RegMask(uint) const {
54 return RegMask::Empty;
55 }
57 //------------------------------match------------------------------------------
58 // Construct projections for incoming parameters, and their RegMask info
59 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
60 switch (proj->_con) {
61 case TypeFunc::Control:
62 case TypeFunc::I_O:
63 case TypeFunc::Memory:
64 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
65 case TypeFunc::FramePtr:
66 return new (match->C, 1) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
67 case TypeFunc::ReturnAdr:
68 return new (match->C, 1) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
69 case TypeFunc::Parms:
70 default: {
71 uint parm_num = proj->_con - TypeFunc::Parms;
72 const Type *t = _domain->field_at(proj->_con);
73 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
74 return new (match->C, 1) ConNode(Type::TOP);
75 uint ideal_reg = Matcher::base2reg[t->base()];
76 RegMask &rm = match->_calling_convention_mask[parm_num];
77 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
78 }
79 }
80 return NULL;
81 }
83 //------------------------------StartOSRNode----------------------------------
84 // The method start node for an on stack replacement adapter
86 //------------------------------osr_domain-----------------------------
87 const TypeTuple *StartOSRNode::osr_domain() {
88 const Type **fields = TypeTuple::fields(2);
89 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
91 return TypeTuple::make(TypeFunc::Parms+1, fields);
92 }
94 //=============================================================================
95 const char * const ParmNode::names[TypeFunc::Parms+1] = {
96 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
97 };
99 #ifndef PRODUCT
100 void ParmNode::dump_spec(outputStream *st) const {
101 if( _con < TypeFunc::Parms ) {
102 st->print(names[_con]);
103 } else {
104 st->print("Parm%d: ",_con-TypeFunc::Parms);
105 // Verbose and WizardMode dump bottom_type for all nodes
106 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
107 }
108 }
109 #endif
111 uint ParmNode::ideal_reg() const {
112 switch( _con ) {
113 case TypeFunc::Control : // fall through
114 case TypeFunc::I_O : // fall through
115 case TypeFunc::Memory : return 0;
116 case TypeFunc::FramePtr : // fall through
117 case TypeFunc::ReturnAdr: return Op_RegP;
118 default : assert( _con > TypeFunc::Parms, "" );
119 // fall through
120 case TypeFunc::Parms : {
121 // Type of argument being passed
122 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
123 return Matcher::base2reg[t->base()];
124 }
125 }
126 ShouldNotReachHere();
127 return 0;
128 }
130 //=============================================================================
131 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
132 init_req(TypeFunc::Control,cntrl);
133 init_req(TypeFunc::I_O,i_o);
134 init_req(TypeFunc::Memory,memory);
135 init_req(TypeFunc::FramePtr,frameptr);
136 init_req(TypeFunc::ReturnAdr,retadr);
137 }
139 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
140 return remove_dead_region(phase, can_reshape) ? this : NULL;
141 }
143 const Type *ReturnNode::Value( PhaseTransform *phase ) const {
144 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
145 ? Type::TOP
146 : Type::BOTTOM;
147 }
149 // Do we Match on this edge index or not? No edges on return nodes
150 uint ReturnNode::match_edge(uint idx) const {
151 return 0;
152 }
155 #ifndef PRODUCT
156 void ReturnNode::dump_req() const {
157 // Dump the required inputs, enclosed in '(' and ')'
158 uint i; // Exit value of loop
159 for( i=0; i<req(); i++ ) { // For all required inputs
160 if( i == TypeFunc::Parms ) tty->print("returns");
161 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
162 else tty->print("_ ");
163 }
164 }
165 #endif
167 //=============================================================================
168 RethrowNode::RethrowNode(
169 Node* cntrl,
170 Node* i_o,
171 Node* memory,
172 Node* frameptr,
173 Node* ret_adr,
174 Node* exception
175 ) : Node(TypeFunc::Parms + 1) {
176 init_req(TypeFunc::Control , cntrl );
177 init_req(TypeFunc::I_O , i_o );
178 init_req(TypeFunc::Memory , memory );
179 init_req(TypeFunc::FramePtr , frameptr );
180 init_req(TypeFunc::ReturnAdr, ret_adr);
181 init_req(TypeFunc::Parms , exception);
182 }
184 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
185 return remove_dead_region(phase, can_reshape) ? this : NULL;
186 }
188 const Type *RethrowNode::Value( PhaseTransform *phase ) const {
189 return (phase->type(in(TypeFunc::Control)) == Type::TOP)
190 ? Type::TOP
191 : Type::BOTTOM;
192 }
194 uint RethrowNode::match_edge(uint idx) const {
195 return 0;
196 }
198 #ifndef PRODUCT
199 void RethrowNode::dump_req() const {
200 // Dump the required inputs, enclosed in '(' and ')'
201 uint i; // Exit value of loop
202 for( i=0; i<req(); i++ ) { // For all required inputs
203 if( i == TypeFunc::Parms ) tty->print("exception");
204 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
205 else tty->print("_ ");
206 }
207 }
208 #endif
210 //=============================================================================
211 // Do we Match on this edge index or not? Match only target address & method
212 uint TailCallNode::match_edge(uint idx) const {
213 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
214 }
216 //=============================================================================
217 // Do we Match on this edge index or not? Match only target address & oop
218 uint TailJumpNode::match_edge(uint idx) const {
219 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
220 }
222 //=============================================================================
223 JVMState::JVMState(ciMethod* method, JVMState* caller) {
224 assert(method != NULL, "must be valid call site");
225 _method = method;
226 debug_only(_bci = -99); // random garbage value
227 debug_only(_map = (SafePointNode*)-1);
228 _caller = caller;
229 _depth = 1 + (caller == NULL ? 0 : caller->depth());
230 _locoff = TypeFunc::Parms;
231 _stkoff = _locoff + _method->max_locals();
232 _monoff = _stkoff + _method->max_stack();
233 _endoff = _monoff;
234 _sp = 0;
235 }
236 JVMState::JVMState(int stack_size) {
237 _method = NULL;
238 _bci = InvocationEntryBci;
239 debug_only(_map = (SafePointNode*)-1);
240 _caller = NULL;
241 _depth = 1;
242 _locoff = TypeFunc::Parms;
243 _stkoff = _locoff;
244 _monoff = _stkoff + stack_size;
245 _endoff = _monoff;
246 _sp = 0;
247 }
249 //--------------------------------of_depth-------------------------------------
250 JVMState* JVMState::of_depth(int d) const {
251 const JVMState* jvmp = this;
252 assert(0 < d && (uint)d <= depth(), "oob");
253 for (int skip = depth() - d; skip > 0; skip--) {
254 jvmp = jvmp->caller();
255 }
256 assert(jvmp->depth() == (uint)d, "found the right one");
257 return (JVMState*)jvmp;
258 }
260 //-----------------------------same_calls_as-----------------------------------
261 bool JVMState::same_calls_as(const JVMState* that) const {
262 if (this == that) return true;
263 if (this->depth() != that->depth()) return false;
264 const JVMState* p = this;
265 const JVMState* q = that;
266 for (;;) {
267 if (p->_method != q->_method) return false;
268 if (p->_method == NULL) return true; // bci is irrelevant
269 if (p->_bci != q->_bci) return false;
270 p = p->caller();
271 q = q->caller();
272 if (p == q) return true;
273 assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
274 }
275 }
277 //------------------------------debug_start------------------------------------
278 uint JVMState::debug_start() const {
279 debug_only(JVMState* jvmroot = of_depth(1));
280 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
281 return of_depth(1)->locoff();
282 }
284 //-------------------------------debug_end-------------------------------------
285 uint JVMState::debug_end() const {
286 debug_only(JVMState* jvmroot = of_depth(1));
287 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
288 return endoff();
289 }
291 //------------------------------debug_depth------------------------------------
292 uint JVMState::debug_depth() const {
293 uint total = 0;
294 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
295 total += jvmp->debug_size();
296 }
297 return total;
298 }
300 //------------------------------format_helper----------------------------------
301 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
302 // any defined value or not. If it does, print out the register or constant.
303 #ifndef PRODUCT
304 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i ) {
305 if (n == NULL) { st->print(" NULL"); return; }
306 if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
307 char buf[50];
308 regalloc->dump_register(n,buf);
309 st->print(" %s%d]=%s",msg,i,buf);
310 } else { // No register, but might be constant
311 const Type *t = n->bottom_type();
312 switch (t->base()) {
313 case Type::Int:
314 st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
315 break;
316 case Type::AnyPtr:
317 assert( t == TypePtr::NULL_PTR, "" );
318 st->print(" %s%d]=#NULL",msg,i);
319 break;
320 case Type::AryPtr:
321 case Type::KlassPtr:
322 case Type::InstPtr:
323 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop());
324 break;
325 case Type::RawPtr:
326 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr());
327 break;
328 case Type::DoubleCon:
329 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
330 break;
331 case Type::FloatCon:
332 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
333 break;
334 case Type::Long:
335 st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con());
336 break;
337 case Type::Half:
338 case Type::Top:
339 st->print(" %s%d]=_",msg,i);
340 break;
341 default: ShouldNotReachHere();
342 }
343 }
344 }
345 #endif
347 //------------------------------format-----------------------------------------
348 #ifndef PRODUCT
349 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
350 st->print(" #");
351 if( _method ) {
352 _method->print_short_name(st);
353 st->print(" @ bci:%d ",_bci);
354 } else {
355 st->print_cr(" runtime stub ");
356 return;
357 }
358 if (n->is_MachSafePoint()) {
359 MachSafePointNode *mcall = n->as_MachSafePoint();
360 uint i;
361 // Print locals
362 for( i = 0; i < (uint)loc_size(); i++ )
363 format_helper( regalloc, st, mcall->local(this, i), "L[", i );
364 // Print stack
365 for (i = 0; i < (uint)stk_size(); i++) {
366 if ((uint)(_stkoff + i) >= mcall->len())
367 st->print(" oob ");
368 else
369 format_helper( regalloc, st, mcall->stack(this, i), "STK[", i );
370 }
371 for (i = 0; (int)i < nof_monitors(); i++) {
372 Node *box = mcall->monitor_box(this, i);
373 Node *obj = mcall->monitor_obj(this, i);
374 if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
375 while( !box->is_BoxLock() ) box = box->in(1);
376 format_helper( regalloc, st, box, "MON-BOX[", i );
377 } else {
378 OptoReg::Name box_reg = BoxLockNode::stack_slot(box);
379 st->print(" MON-BOX%d=%s+%d",
380 i,
381 OptoReg::regname(OptoReg::c_frame_pointer),
382 regalloc->reg2offset(box_reg));
383 }
384 format_helper( regalloc, st, obj, "MON-OBJ[", i );
385 }
386 }
387 st->print_cr("");
388 if (caller() != NULL) caller()->format(regalloc, n, st);
389 }
390 #endif
392 #ifndef PRODUCT
393 void JVMState::dump_spec(outputStream *st) const {
394 if (_method != NULL) {
395 bool printed = false;
396 if (!Verbose) {
397 // The JVMS dumps make really, really long lines.
398 // Take out the most boring parts, which are the package prefixes.
399 char buf[500];
400 stringStream namest(buf, sizeof(buf));
401 _method->print_short_name(&namest);
402 if (namest.count() < sizeof(buf)) {
403 const char* name = namest.base();
404 if (name[0] == ' ') ++name;
405 const char* endcn = strchr(name, ':'); // end of class name
406 if (endcn == NULL) endcn = strchr(name, '(');
407 if (endcn == NULL) endcn = name + strlen(name);
408 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
409 --endcn;
410 st->print(" %s", endcn);
411 printed = true;
412 }
413 }
414 if (!printed)
415 _method->print_short_name(st);
416 st->print(" @ bci:%d",_bci);
417 } else {
418 st->print(" runtime stub");
419 }
420 if (caller() != NULL) caller()->dump_spec(st);
421 }
422 #endif
424 #ifndef PRODUCT
425 void JVMState::dump_on(outputStream* st) const {
426 if (_map && !((uintptr_t)_map & 1)) {
427 if (_map->len() > _map->req()) { // _map->has_exceptions()
428 Node* ex = _map->in(_map->req()); // _map->next_exception()
429 // skip the first one; it's already being printed
430 while (ex != NULL && ex->len() > ex->req()) {
431 ex = ex->in(ex->req()); // ex->next_exception()
432 ex->dump(1);
433 }
434 }
435 _map->dump(2);
436 }
437 st->print("JVMS depth=%d loc=%d stk=%d mon=%d end=%d mondepth=%d sp=%d bci=%d method=",
438 depth(), locoff(), stkoff(), monoff(), endoff(), monitor_depth(), sp(), bci());
439 if (_method == NULL) {
440 st->print_cr("(none)");
441 } else {
442 _method->print_name(st);
443 st->cr();
444 if (bci() >= 0 && bci() < _method->code_size()) {
445 st->print(" bc: ");
446 _method->print_codes_on(bci(), bci()+1, st);
447 }
448 }
449 if (caller() != NULL) {
450 caller()->dump_on(st);
451 }
452 }
454 // Extra way to dump a jvms from the debugger,
455 // to avoid a bug with C++ member function calls.
456 void dump_jvms(JVMState* jvms) {
457 jvms->dump();
458 }
459 #endif
461 //--------------------------clone_shallow--------------------------------------
462 JVMState* JVMState::clone_shallow(Compile* C) const {
463 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
464 n->set_bci(_bci);
465 n->set_locoff(_locoff);
466 n->set_stkoff(_stkoff);
467 n->set_monoff(_monoff);
468 n->set_endoff(_endoff);
469 n->set_sp(_sp);
470 n->set_map(_map);
471 return n;
472 }
474 //---------------------------clone_deep----------------------------------------
475 JVMState* JVMState::clone_deep(Compile* C) const {
476 JVMState* n = clone_shallow(C);
477 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
478 p->_caller = p->_caller->clone_shallow(C);
479 }
480 assert(n->depth() == depth(), "sanity");
481 assert(n->debug_depth() == debug_depth(), "sanity");
482 return n;
483 }
485 //=============================================================================
486 uint CallNode::cmp( const Node &n ) const
487 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
488 #ifndef PRODUCT
489 void CallNode::dump_req() const {
490 // Dump the required inputs, enclosed in '(' and ')'
491 uint i; // Exit value of loop
492 for( i=0; i<req(); i++ ) { // For all required inputs
493 if( i == TypeFunc::Parms ) tty->print("(");
494 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
495 else tty->print("_ ");
496 }
497 tty->print(")");
498 }
500 void CallNode::dump_spec(outputStream *st) const {
501 st->print(" ");
502 tf()->dump_on(st);
503 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
504 if (jvms() != NULL) jvms()->dump_spec(st);
505 }
506 #endif
508 const Type *CallNode::bottom_type() const { return tf()->range(); }
509 const Type *CallNode::Value(PhaseTransform *phase) const {
510 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
511 return tf()->range();
512 }
514 //------------------------------calling_convention-----------------------------
515 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
516 // Use the standard compiler calling convention
517 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
518 }
521 //------------------------------match------------------------------------------
522 // Construct projections for control, I/O, memory-fields, ..., and
523 // return result(s) along with their RegMask info
524 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
525 switch (proj->_con) {
526 case TypeFunc::Control:
527 case TypeFunc::I_O:
528 case TypeFunc::Memory:
529 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
531 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
532 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
533 // 2nd half of doubles and longs
534 return new (match->C, 1) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
536 case TypeFunc::Parms: { // Normal returns
537 uint ideal_reg = Matcher::base2reg[tf()->range()->field_at(TypeFunc::Parms)->base()];
538 OptoRegPair regs = is_CallRuntime()
539 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
540 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
541 RegMask rm = RegMask(regs.first());
542 if( OptoReg::is_valid(regs.second()) )
543 rm.Insert( regs.second() );
544 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
545 }
547 case TypeFunc::ReturnAdr:
548 case TypeFunc::FramePtr:
549 default:
550 ShouldNotReachHere();
551 }
552 return NULL;
553 }
555 // Do we Match on this edge index or not? Match no edges
556 uint CallNode::match_edge(uint idx) const {
557 return 0;
558 }
560 //=============================================================================
561 uint CallJavaNode::size_of() const { return sizeof(*this); }
562 uint CallJavaNode::cmp( const Node &n ) const {
563 CallJavaNode &call = (CallJavaNode&)n;
564 return CallNode::cmp(call) && _method == call._method;
565 }
566 #ifndef PRODUCT
567 void CallJavaNode::dump_spec(outputStream *st) const {
568 if( _method ) _method->print_short_name(st);
569 CallNode::dump_spec(st);
570 }
571 #endif
573 //=============================================================================
574 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
575 uint CallStaticJavaNode::cmp( const Node &n ) const {
576 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
577 return CallJavaNode::cmp(call);
578 }
580 //----------------------------uncommon_trap_request----------------------------
581 // If this is an uncommon trap, return the request code, else zero.
582 int CallStaticJavaNode::uncommon_trap_request() const {
583 if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
584 return extract_uncommon_trap_request(this);
585 }
586 return 0;
587 }
588 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
589 #ifndef PRODUCT
590 if (!(call->req() > TypeFunc::Parms &&
591 call->in(TypeFunc::Parms) != NULL &&
592 call->in(TypeFunc::Parms)->is_Con())) {
593 assert(_in_dump_cnt != 0, "OK if dumping");
594 tty->print("[bad uncommon trap]");
595 return 0;
596 }
597 #endif
598 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
599 }
601 #ifndef PRODUCT
602 void CallStaticJavaNode::dump_spec(outputStream *st) const {
603 st->print("# Static ");
604 if (_name != NULL) {
605 st->print("%s", _name);
606 int trap_req = uncommon_trap_request();
607 if (trap_req != 0) {
608 char buf[100];
609 st->print("(%s)",
610 Deoptimization::format_trap_request(buf, sizeof(buf),
611 trap_req));
612 }
613 st->print(" ");
614 }
615 CallJavaNode::dump_spec(st);
616 }
617 #endif
619 //=============================================================================
620 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
621 uint CallDynamicJavaNode::cmp( const Node &n ) const {
622 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
623 return CallJavaNode::cmp(call);
624 }
625 #ifndef PRODUCT
626 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
627 st->print("# Dynamic ");
628 CallJavaNode::dump_spec(st);
629 }
630 #endif
632 //=============================================================================
633 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
634 uint CallRuntimeNode::cmp( const Node &n ) const {
635 CallRuntimeNode &call = (CallRuntimeNode&)n;
636 return CallNode::cmp(call) && !strcmp(_name,call._name);
637 }
638 #ifndef PRODUCT
639 void CallRuntimeNode::dump_spec(outputStream *st) const {
640 st->print("# ");
641 st->print(_name);
642 CallNode::dump_spec(st);
643 }
644 #endif
646 //------------------------------calling_convention-----------------------------
647 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
648 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
649 }
651 //=============================================================================
652 //------------------------------calling_convention-----------------------------
655 //=============================================================================
656 #ifndef PRODUCT
657 void CallLeafNode::dump_spec(outputStream *st) const {
658 st->print("# ");
659 st->print(_name);
660 CallNode::dump_spec(st);
661 }
662 #endif
664 //=============================================================================
666 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
667 assert(verify_jvms(jvms), "jvms must match");
668 int loc = jvms->locoff() + idx;
669 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
670 // If current local idx is top then local idx - 1 could
671 // be a long/double that needs to be killed since top could
672 // represent the 2nd half ofthe long/double.
673 uint ideal = in(loc -1)->ideal_reg();
674 if (ideal == Op_RegD || ideal == Op_RegL) {
675 // set other (low index) half to top
676 set_req(loc - 1, in(loc));
677 }
678 }
679 set_req(loc, c);
680 }
682 uint SafePointNode::size_of() const { return sizeof(*this); }
683 uint SafePointNode::cmp( const Node &n ) const {
684 return (&n == this); // Always fail except on self
685 }
687 //-------------------------set_next_exception----------------------------------
688 void SafePointNode::set_next_exception(SafePointNode* n) {
689 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
690 if (len() == req()) {
691 if (n != NULL) add_prec(n);
692 } else {
693 set_prec(req(), n);
694 }
695 }
698 //----------------------------next_exception-----------------------------------
699 SafePointNode* SafePointNode::next_exception() const {
700 if (len() == req()) {
701 return NULL;
702 } else {
703 Node* n = in(req());
704 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
705 return (SafePointNode*) n;
706 }
707 }
710 //------------------------------Ideal------------------------------------------
711 // Skip over any collapsed Regions
712 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
713 if (remove_dead_region(phase, can_reshape)) return this;
715 return NULL;
716 }
718 //------------------------------Identity---------------------------------------
719 // Remove obviously duplicate safepoints
720 Node *SafePointNode::Identity( PhaseTransform *phase ) {
722 // If you have back to back safepoints, remove one
723 if( in(TypeFunc::Control)->is_SafePoint() )
724 return in(TypeFunc::Control);
726 if( in(0)->is_Proj() ) {
727 Node *n0 = in(0)->in(0);
728 // Check if he is a call projection (except Leaf Call)
729 if( n0->is_Catch() ) {
730 n0 = n0->in(0)->in(0);
731 assert( n0->is_Call(), "expect a call here" );
732 }
733 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
734 // Useless Safepoint, so remove it
735 return in(TypeFunc::Control);
736 }
737 }
739 return this;
740 }
742 //------------------------------Value------------------------------------------
743 const Type *SafePointNode::Value( PhaseTransform *phase ) const {
744 if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
745 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop
746 return Type::CONTROL;
747 }
749 #ifndef PRODUCT
750 void SafePointNode::dump_spec(outputStream *st) const {
751 st->print(" SafePoint ");
752 }
753 #endif
755 const RegMask &SafePointNode::in_RegMask(uint idx) const {
756 if( idx < TypeFunc::Parms ) return RegMask::Empty;
757 // Values outside the domain represent debug info
758 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
759 }
760 const RegMask &SafePointNode::out_RegMask() const {
761 return RegMask::Empty;
762 }
765 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
766 assert((int)grow_by > 0, "sanity");
767 int monoff = jvms->monoff();
768 int endoff = jvms->endoff();
769 assert(endoff == (int)req(), "no other states or debug info after me");
770 Node* top = Compile::current()->top();
771 for (uint i = 0; i < grow_by; i++) {
772 ins_req(monoff, top);
773 }
774 jvms->set_monoff(monoff + grow_by);
775 jvms->set_endoff(endoff + grow_by);
776 }
778 void SafePointNode::push_monitor(const FastLockNode *lock) {
779 // Add a LockNode, which points to both the original BoxLockNode (the
780 // stack space for the monitor) and the Object being locked.
781 const int MonitorEdges = 2;
782 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
783 assert(req() == jvms()->endoff(), "correct sizing");
784 if (GenerateSynchronizationCode) {
785 add_req(lock->box_node());
786 add_req(lock->obj_node());
787 } else {
788 add_req(NULL);
789 add_req(NULL);
790 }
791 jvms()->set_endoff(req());
792 }
794 void SafePointNode::pop_monitor() {
795 // Delete last monitor from debug info
796 debug_only(int num_before_pop = jvms()->nof_monitors());
797 const int MonitorEdges = (1<<JVMState::logMonitorEdges);
798 int endoff = jvms()->endoff();
799 int new_endoff = endoff - MonitorEdges;
800 jvms()->set_endoff(new_endoff);
801 while (endoff > new_endoff) del_req(--endoff);
802 assert(jvms()->nof_monitors() == num_before_pop-1, "");
803 }
805 Node *SafePointNode::peek_monitor_box() const {
806 int mon = jvms()->nof_monitors() - 1;
807 assert(mon >= 0, "most have a monitor");
808 return monitor_box(jvms(), mon);
809 }
811 Node *SafePointNode::peek_monitor_obj() const {
812 int mon = jvms()->nof_monitors() - 1;
813 assert(mon >= 0, "most have a monitor");
814 return monitor_obj(jvms(), mon);
815 }
817 // Do we Match on this edge index or not? Match no edges
818 uint SafePointNode::match_edge(uint idx) const {
819 if( !needs_polling_address_input() )
820 return 0;
822 return (TypeFunc::Parms == idx);
823 }
825 //=============================================================================
826 uint AllocateNode::size_of() const { return sizeof(*this); }
828 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
829 Node *ctrl, Node *mem, Node *abio,
830 Node *size, Node *klass_node, Node *initial_test)
831 : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
832 {
833 init_class_id(Class_Allocate);
834 init_flags(Flag_is_macro);
835 _is_scalar_replaceable = false;
836 Node *topnode = C->top();
838 init_req( TypeFunc::Control , ctrl );
839 init_req( TypeFunc::I_O , abio );
840 init_req( TypeFunc::Memory , mem );
841 init_req( TypeFunc::ReturnAdr, topnode );
842 init_req( TypeFunc::FramePtr , topnode );
843 init_req( AllocSize , size);
844 init_req( KlassNode , klass_node);
845 init_req( InitialTest , initial_test);
846 init_req( ALength , topnode);
847 C->add_macro_node(this);
848 }
850 //=============================================================================
851 uint AllocateArrayNode::size_of() const { return sizeof(*this); }
853 //=============================================================================
854 uint LockNode::size_of() const { return sizeof(*this); }
856 // Redundant lock elimination
857 //
858 // There are various patterns of locking where we release and
859 // immediately reacquire a lock in a piece of code where no operations
860 // occur in between that would be observable. In those cases we can
861 // skip releasing and reacquiring the lock without violating any
862 // fairness requirements. Doing this around a loop could cause a lock
863 // to be held for a very long time so we concentrate on non-looping
864 // control flow. We also require that the operations are fully
865 // redundant meaning that we don't introduce new lock operations on
866 // some paths so to be able to eliminate it on others ala PRE. This
867 // would probably require some more extensive graph manipulation to
868 // guarantee that the memory edges were all handled correctly.
869 //
870 // Assuming p is a simple predicate which can't trap in any way and s
871 // is a synchronized method consider this code:
872 //
873 // s();
874 // if (p)
875 // s();
876 // else
877 // s();
878 // s();
879 //
880 // 1. The unlocks of the first call to s can be eliminated if the
881 // locks inside the then and else branches are eliminated.
882 //
883 // 2. The unlocks of the then and else branches can be eliminated if
884 // the lock of the final call to s is eliminated.
885 //
886 // Either of these cases subsumes the simple case of sequential control flow
887 //
888 // Addtionally we can eliminate versions without the else case:
889 //
890 // s();
891 // if (p)
892 // s();
893 // s();
894 //
895 // 3. In this case we eliminate the unlock of the first s, the lock
896 // and unlock in the then case and the lock in the final s.
897 //
898 // Note also that in all these cases the then/else pieces don't have
899 // to be trivial as long as they begin and end with synchronization
900 // operations.
901 //
902 // s();
903 // if (p)
904 // s();
905 // f();
906 // s();
907 // s();
908 //
909 // The code will work properly for this case, leaving in the unlock
910 // before the call to f and the relock after it.
911 //
912 // A potentially interesting case which isn't handled here is when the
913 // locking is partially redundant.
914 //
915 // s();
916 // if (p)
917 // s();
918 //
919 // This could be eliminated putting unlocking on the else case and
920 // eliminating the first unlock and the lock in the then side.
921 // Alternatively the unlock could be moved out of the then side so it
922 // was after the merge and the first unlock and second lock
923 // eliminated. This might require less manipulation of the memory
924 // state to get correct.
925 //
926 // Additionally we might allow work between a unlock and lock before
927 // giving up eliminating the locks. The current code disallows any
928 // conditional control flow between these operations. A formulation
929 // similar to partial redundancy elimination computing the
930 // availability of unlocking and the anticipatability of locking at a
931 // program point would allow detection of fully redundant locking with
932 // some amount of work in between. I'm not sure how often I really
933 // think that would occur though. Most of the cases I've seen
934 // indicate it's likely non-trivial work would occur in between.
935 // There may be other more complicated constructs where we could
936 // eliminate locking but I haven't seen any others appear as hot or
937 // interesting.
938 //
939 // Locking and unlocking have a canonical form in ideal that looks
940 // roughly like this:
941 //
942 // <obj>
943 // | \\------+
944 // | \ \
945 // | BoxLock \
946 // | | | \
947 // | | \ \
948 // | | FastLock
949 // | | /
950 // | | /
951 // | | |
952 //
953 // Lock
954 // |
955 // Proj #0
956 // |
957 // MembarAcquire
958 // |
959 // Proj #0
960 //
961 // MembarRelease
962 // |
963 // Proj #0
964 // |
965 // Unlock
966 // |
967 // Proj #0
968 //
969 //
970 // This code proceeds by processing Lock nodes during PhaseIterGVN
971 // and searching back through its control for the proper code
972 // patterns. Once it finds a set of lock and unlock operations to
973 // eliminate they are marked as eliminatable which causes the
974 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
975 //
976 //=============================================================================
978 //
979 // Utility function to skip over uninteresting control nodes. Nodes skipped are:
980 // - copy regions. (These may not have been optimized away yet.)
981 // - eliminated locking nodes
982 //
983 static Node *next_control(Node *ctrl) {
984 if (ctrl == NULL)
985 return NULL;
986 while (1) {
987 if (ctrl->is_Region()) {
988 RegionNode *r = ctrl->as_Region();
989 Node *n = r->is_copy();
990 if (n == NULL)
991 break; // hit a region, return it
992 else
993 ctrl = n;
994 } else if (ctrl->is_Proj()) {
995 Node *in0 = ctrl->in(0);
996 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
997 ctrl = in0->in(0);
998 } else {
999 break;
1000 }
1001 } else {
1002 break; // found an interesting control
1003 }
1004 }
1005 return ctrl;
1006 }
1007 //
1008 // Given a control, see if it's the control projection of an Unlock which
1009 // operating on the same object as lock.
1010 //
1011 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1012 GrowableArray<AbstractLockNode*> &lock_ops) {
1013 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1014 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1015 Node *n = ctrl_proj->in(0);
1016 if (n != NULL && n->is_Unlock()) {
1017 UnlockNode *unlock = n->as_Unlock();
1018 if ((lock->obj_node() == unlock->obj_node()) &&
1019 (lock->box_node() == unlock->box_node()) && !unlock->is_eliminated()) {
1020 lock_ops.append(unlock);
1021 return true;
1022 }
1023 }
1024 }
1025 return false;
1026 }
1028 //
1029 // Find the lock matching an unlock. Returns null if a safepoint
1030 // or complicated control is encountered first.
1031 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1032 LockNode *lock_result = NULL;
1033 // find the matching lock, or an intervening safepoint
1034 Node *ctrl = next_control(unlock->in(0));
1035 while (1) {
1036 assert(ctrl != NULL, "invalid control graph");
1037 assert(!ctrl->is_Start(), "missing lock for unlock");
1038 if (ctrl->is_top()) break; // dead control path
1039 if (ctrl->is_Proj()) ctrl = ctrl->in(0);
1040 if (ctrl->is_SafePoint()) {
1041 break; // found a safepoint (may be the lock we are searching for)
1042 } else if (ctrl->is_Region()) {
1043 // Check for a simple diamond pattern. Punt on anything more complicated
1044 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1045 Node *in1 = next_control(ctrl->in(1));
1046 Node *in2 = next_control(ctrl->in(2));
1047 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1048 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1049 ctrl = next_control(in1->in(0)->in(0));
1050 } else {
1051 break;
1052 }
1053 } else {
1054 break;
1055 }
1056 } else {
1057 ctrl = next_control(ctrl->in(0)); // keep searching
1058 }
1059 }
1060 if (ctrl->is_Lock()) {
1061 LockNode *lock = ctrl->as_Lock();
1062 if ((lock->obj_node() == unlock->obj_node()) &&
1063 (lock->box_node() == unlock->box_node())) {
1064 lock_result = lock;
1065 }
1066 }
1067 return lock_result;
1068 }
1070 // This code corresponds to case 3 above.
1072 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1073 GrowableArray<AbstractLockNode*> &lock_ops) {
1074 Node* if_node = node->in(0);
1075 bool if_true = node->is_IfTrue();
1077 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1078 Node *lock_ctrl = next_control(if_node->in(0));
1079 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1080 Node* lock1_node = NULL;
1081 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1082 if (if_true) {
1083 if (proj->is_IfFalse() && proj->outcnt() == 1) {
1084 lock1_node = proj->unique_out();
1085 }
1086 } else {
1087 if (proj->is_IfTrue() && proj->outcnt() == 1) {
1088 lock1_node = proj->unique_out();
1089 }
1090 }
1091 if (lock1_node != NULL && lock1_node->is_Lock()) {
1092 LockNode *lock1 = lock1_node->as_Lock();
1093 if ((lock->obj_node() == lock1->obj_node()) &&
1094 (lock->box_node() == lock1->box_node()) && !lock1->is_eliminated()) {
1095 lock_ops.append(lock1);
1096 return true;
1097 }
1098 }
1099 }
1100 }
1102 lock_ops.trunc_to(0);
1103 return false;
1104 }
1106 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1107 GrowableArray<AbstractLockNode*> &lock_ops) {
1108 // check each control merging at this point for a matching unlock.
1109 // in(0) should be self edge so skip it.
1110 for (int i = 1; i < (int)region->req(); i++) {
1111 Node *in_node = next_control(region->in(i));
1112 if (in_node != NULL) {
1113 if (find_matching_unlock(in_node, lock, lock_ops)) {
1114 // found a match so keep on checking.
1115 continue;
1116 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
1117 continue;
1118 }
1120 // If we fall through to here then it was some kind of node we
1121 // don't understand or there wasn't a matching unlock, so give
1122 // up trying to merge locks.
1123 lock_ops.trunc_to(0);
1124 return false;
1125 }
1126 }
1127 return true;
1129 }
1131 #ifndef PRODUCT
1132 //
1133 // Create a counter which counts the number of times this lock is acquired
1134 //
1135 void AbstractLockNode::create_lock_counter(JVMState* state) {
1136 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
1137 }
1138 #endif
1140 void AbstractLockNode::set_eliminated() {
1141 _eliminate = true;
1142 #ifndef PRODUCT
1143 if (_counter) {
1144 // Update the counter to indicate that this lock was eliminated.
1145 // The counter update code will stay around even though the
1146 // optimizer will eliminate the lock operation itself.
1147 _counter->set_tag(NamedCounter::EliminatedLockCounter);
1148 }
1149 #endif
1150 }
1152 //=============================================================================
1153 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1155 // perform any generic optimizations first
1156 Node *result = SafePointNode::Ideal(phase, can_reshape);
1158 // Now see if we can optimize away this lock. We don't actually
1159 // remove the locking here, we simply set the _eliminate flag which
1160 // prevents macro expansion from expanding the lock. Since we don't
1161 // modify the graph, the value returned from this function is the
1162 // one computed above.
1163 if (EliminateLocks && !is_eliminated()) {
1164 //
1165 // Try lock coarsening
1166 //
1167 PhaseIterGVN* iter = phase->is_IterGVN();
1168 if (iter != NULL) {
1170 GrowableArray<AbstractLockNode*> lock_ops;
1172 Node *ctrl = next_control(in(0));
1174 // now search back for a matching Unlock
1175 if (find_matching_unlock(ctrl, this, lock_ops)) {
1176 // found an unlock directly preceding this lock. This is the
1177 // case of single unlock directly control dependent on a
1178 // single lock which is the trivial version of case 1 or 2.
1179 } else if (ctrl->is_Region() ) {
1180 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
1181 // found lock preceded by multiple unlocks along all paths
1182 // joining at this point which is case 3 in description above.
1183 }
1184 } else {
1185 // see if this lock comes from either half of an if and the
1186 // predecessors merges unlocks and the other half of the if
1187 // performs a lock.
1188 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
1189 // found unlock splitting to an if with locks on both branches.
1190 }
1191 }
1193 if (lock_ops.length() > 0) {
1194 // add ourselves to the list of locks to be eliminated.
1195 lock_ops.append(this);
1197 #ifndef PRODUCT
1198 if (PrintEliminateLocks) {
1199 int locks = 0;
1200 int unlocks = 0;
1201 for (int i = 0; i < lock_ops.length(); i++) {
1202 AbstractLockNode* lock = lock_ops.at(i);
1203 if (lock->Opcode() == Op_Lock) locks++;
1204 else unlocks++;
1205 if (Verbose) {
1206 lock->dump(1);
1207 }
1208 }
1209 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1210 }
1211 #endif
1213 // for each of the identified locks, mark them
1214 // as eliminatable
1215 for (int i = 0; i < lock_ops.length(); i++) {
1216 AbstractLockNode* lock = lock_ops.at(i);
1218 // Mark it eliminated to update any counters
1219 lock->set_eliminated();
1220 }
1221 } else if (result != NULL && ctrl->is_Region() &&
1222 iter->_worklist.member(ctrl)) {
1223 // We weren't able to find any opportunities but the region this
1224 // lock is control dependent on hasn't been processed yet so put
1225 // this lock back on the worklist so we can check again once any
1226 // region simplification has occurred.
1227 iter->_worklist.push(this);
1228 }
1229 }
1230 }
1232 return result;
1233 }
1235 //=============================================================================
1236 uint UnlockNode::size_of() const { return sizeof(*this); }
1238 //=============================================================================
1239 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1241 // perform any generic optimizations first
1242 Node * result = SafePointNode::Ideal(phase, can_reshape);
1244 // Now see if we can optimize away this unlock. We don't actually
1245 // remove the unlocking here, we simply set the _eliminate flag which
1246 // prevents macro expansion from expanding the unlock. Since we don't
1247 // modify the graph, the value returned from this function is the
1248 // one computed above.
1249 if (EliminateLocks && !is_eliminated()) {
1250 //
1251 // If we are unlocking an unescaped object, the lock/unlock is unnecessary
1252 // We can eliminate them if there are no safepoints in the locked region.
1253 //
1254 ConnectionGraph *cgr = Compile::current()->congraph();
1255 if (cgr != NULL && cgr->escape_state(obj_node(), phase) == PointsToNode::NoEscape) {
1256 GrowableArray<AbstractLockNode*> lock_ops;
1257 LockNode *lock = find_matching_lock(this);
1258 if (lock != NULL) {
1259 lock_ops.append(this);
1260 lock_ops.append(lock);
1261 // find other unlocks which pair with the lock we found and add them
1262 // to the list
1263 Node * box = box_node();
1265 for (DUIterator_Fast imax, i = box->fast_outs(imax); i < imax; i++) {
1266 Node *use = box->fast_out(i);
1267 if (use->is_Unlock() && use != this) {
1268 UnlockNode *unlock1 = use->as_Unlock();
1269 if (!unlock1->is_eliminated()) {
1270 LockNode *lock1 = find_matching_lock(unlock1);
1271 if (lock == lock1)
1272 lock_ops.append(unlock1);
1273 else if (lock1 == NULL) {
1274 // we can't find a matching lock, we must assume the worst
1275 lock_ops.trunc_to(0);
1276 break;
1277 }
1278 }
1279 }
1280 }
1281 if (lock_ops.length() > 0) {
1283 #ifndef PRODUCT
1284 if (PrintEliminateLocks) {
1285 int locks = 0;
1286 int unlocks = 0;
1287 for (int i = 0; i < lock_ops.length(); i++) {
1288 AbstractLockNode* lock = lock_ops.at(i);
1289 if (lock->Opcode() == Op_Lock) locks++;
1290 else unlocks++;
1291 if (Verbose) {
1292 lock->dump(1);
1293 }
1294 }
1295 tty->print_cr("***Eliminated %d unescaped unlocks and %d unescaped locks", unlocks, locks);
1296 }
1297 #endif
1299 // for each of the identified locks, mark them
1300 // as eliminatable
1301 for (int i = 0; i < lock_ops.length(); i++) {
1302 AbstractLockNode* lock = lock_ops.at(i);
1304 // Mark it eliminated to update any counters
1305 lock->set_eliminated();
1306 }
1307 }
1308 }
1309 }
1310 }
1311 return result;
1312 }