Tue, 24 Jun 2008 10:43:29 -0700
6710487: More than half of JDI Regression tests hang with COOPs in -Xcomp mode
Summary: Remove DecodeNNode::decode() and EncodePNode::encode() methods.
Reviewed-by: rasbold, never
1 /*
2 * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // Portions of code courtesy of Clifford Click
27 // Optimization - Graph Style
29 #include "incls/_precompiled.incl"
30 #include "incls/_callnode.cpp.incl"
32 //=============================================================================
33 uint StartNode::size_of() const { return sizeof(*this); }
34 uint StartNode::cmp( const Node &n ) const
35 { return _domain == ((StartNode&)n)._domain; }
36 const Type *StartNode::bottom_type() const { return _domain; }
37 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
38 #ifndef PRODUCT
39 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
40 #endif
42 //------------------------------Ideal------------------------------------------
43 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
44 return remove_dead_region(phase, can_reshape) ? this : NULL;
45 }
47 //------------------------------calling_convention-----------------------------
48 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
49 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
50 }
52 //------------------------------Registers--------------------------------------
53 const RegMask &StartNode::in_RegMask(uint) const {
54 return RegMask::Empty;
55 }
57 //------------------------------match------------------------------------------
58 // Construct projections for incoming parameters, and their RegMask info
59 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
60 switch (proj->_con) {
61 case TypeFunc::Control:
62 case TypeFunc::I_O:
63 case TypeFunc::Memory:
64 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
65 case TypeFunc::FramePtr:
66 return new (match->C, 1) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
67 case TypeFunc::ReturnAdr:
68 return new (match->C, 1) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
69 case TypeFunc::Parms:
70 default: {
71 uint parm_num = proj->_con - TypeFunc::Parms;
72 const Type *t = _domain->field_at(proj->_con);
73 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
74 return new (match->C, 1) ConNode(Type::TOP);
75 uint ideal_reg = Matcher::base2reg[t->base()];
76 RegMask &rm = match->_calling_convention_mask[parm_num];
77 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
78 }
79 }
80 return NULL;
81 }
83 //------------------------------StartOSRNode----------------------------------
84 // The method start node for an on stack replacement adapter
86 //------------------------------osr_domain-----------------------------
87 const TypeTuple *StartOSRNode::osr_domain() {
88 const Type **fields = TypeTuple::fields(2);
89 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
91 return TypeTuple::make(TypeFunc::Parms+1, fields);
92 }
94 //=============================================================================
95 const char * const ParmNode::names[TypeFunc::Parms+1] = {
96 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
97 };
99 #ifndef PRODUCT
100 void ParmNode::dump_spec(outputStream *st) const {
101 if( _con < TypeFunc::Parms ) {
102 st->print(names[_con]);
103 } else {
104 st->print("Parm%d: ",_con-TypeFunc::Parms);
105 // Verbose and WizardMode dump bottom_type for all nodes
106 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
107 }
108 }
109 #endif
111 uint ParmNode::ideal_reg() const {
112 switch( _con ) {
113 case TypeFunc::Control : // fall through
114 case TypeFunc::I_O : // fall through
115 case TypeFunc::Memory : return 0;
116 case TypeFunc::FramePtr : // fall through
117 case TypeFunc::ReturnAdr: return Op_RegP;
118 default : assert( _con > TypeFunc::Parms, "" );
119 // fall through
120 case TypeFunc::Parms : {
121 // Type of argument being passed
122 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
123 return Matcher::base2reg[t->base()];
124 }
125 }
126 ShouldNotReachHere();
127 return 0;
128 }
130 //=============================================================================
131 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
132 init_req(TypeFunc::Control,cntrl);
133 init_req(TypeFunc::I_O,i_o);
134 init_req(TypeFunc::Memory,memory);
135 init_req(TypeFunc::FramePtr,frameptr);
136 init_req(TypeFunc::ReturnAdr,retadr);
137 }
139 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
140 return remove_dead_region(phase, can_reshape) ? this : NULL;
141 }
143 const Type *ReturnNode::Value( PhaseTransform *phase ) const {
144 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
145 ? Type::TOP
146 : Type::BOTTOM;
147 }
149 // Do we Match on this edge index or not? No edges on return nodes
150 uint ReturnNode::match_edge(uint idx) const {
151 return 0;
152 }
155 #ifndef PRODUCT
156 void ReturnNode::dump_req() const {
157 // Dump the required inputs, enclosed in '(' and ')'
158 uint i; // Exit value of loop
159 for( i=0; i<req(); i++ ) { // For all required inputs
160 if( i == TypeFunc::Parms ) tty->print("returns");
161 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
162 else tty->print("_ ");
163 }
164 }
165 #endif
167 //=============================================================================
168 RethrowNode::RethrowNode(
169 Node* cntrl,
170 Node* i_o,
171 Node* memory,
172 Node* frameptr,
173 Node* ret_adr,
174 Node* exception
175 ) : Node(TypeFunc::Parms + 1) {
176 init_req(TypeFunc::Control , cntrl );
177 init_req(TypeFunc::I_O , i_o );
178 init_req(TypeFunc::Memory , memory );
179 init_req(TypeFunc::FramePtr , frameptr );
180 init_req(TypeFunc::ReturnAdr, ret_adr);
181 init_req(TypeFunc::Parms , exception);
182 }
184 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
185 return remove_dead_region(phase, can_reshape) ? this : NULL;
186 }
188 const Type *RethrowNode::Value( PhaseTransform *phase ) const {
189 return (phase->type(in(TypeFunc::Control)) == Type::TOP)
190 ? Type::TOP
191 : Type::BOTTOM;
192 }
194 uint RethrowNode::match_edge(uint idx) const {
195 return 0;
196 }
198 #ifndef PRODUCT
199 void RethrowNode::dump_req() const {
200 // Dump the required inputs, enclosed in '(' and ')'
201 uint i; // Exit value of loop
202 for( i=0; i<req(); i++ ) { // For all required inputs
203 if( i == TypeFunc::Parms ) tty->print("exception");
204 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
205 else tty->print("_ ");
206 }
207 }
208 #endif
210 //=============================================================================
211 // Do we Match on this edge index or not? Match only target address & method
212 uint TailCallNode::match_edge(uint idx) const {
213 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
214 }
216 //=============================================================================
217 // Do we Match on this edge index or not? Match only target address & oop
218 uint TailJumpNode::match_edge(uint idx) const {
219 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
220 }
222 //=============================================================================
223 JVMState::JVMState(ciMethod* method, JVMState* caller) {
224 assert(method != NULL, "must be valid call site");
225 _method = method;
226 debug_only(_bci = -99); // random garbage value
227 debug_only(_map = (SafePointNode*)-1);
228 _caller = caller;
229 _depth = 1 + (caller == NULL ? 0 : caller->depth());
230 _locoff = TypeFunc::Parms;
231 _stkoff = _locoff + _method->max_locals();
232 _monoff = _stkoff + _method->max_stack();
233 _scloff = _monoff;
234 _endoff = _monoff;
235 _sp = 0;
236 }
237 JVMState::JVMState(int stack_size) {
238 _method = NULL;
239 _bci = InvocationEntryBci;
240 debug_only(_map = (SafePointNode*)-1);
241 _caller = NULL;
242 _depth = 1;
243 _locoff = TypeFunc::Parms;
244 _stkoff = _locoff;
245 _monoff = _stkoff + stack_size;
246 _scloff = _monoff;
247 _endoff = _monoff;
248 _sp = 0;
249 }
251 //--------------------------------of_depth-------------------------------------
252 JVMState* JVMState::of_depth(int d) const {
253 const JVMState* jvmp = this;
254 assert(0 < d && (uint)d <= depth(), "oob");
255 for (int skip = depth() - d; skip > 0; skip--) {
256 jvmp = jvmp->caller();
257 }
258 assert(jvmp->depth() == (uint)d, "found the right one");
259 return (JVMState*)jvmp;
260 }
262 //-----------------------------same_calls_as-----------------------------------
263 bool JVMState::same_calls_as(const JVMState* that) const {
264 if (this == that) return true;
265 if (this->depth() != that->depth()) return false;
266 const JVMState* p = this;
267 const JVMState* q = that;
268 for (;;) {
269 if (p->_method != q->_method) return false;
270 if (p->_method == NULL) return true; // bci is irrelevant
271 if (p->_bci != q->_bci) return false;
272 p = p->caller();
273 q = q->caller();
274 if (p == q) return true;
275 assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
276 }
277 }
279 //------------------------------debug_start------------------------------------
280 uint JVMState::debug_start() const {
281 debug_only(JVMState* jvmroot = of_depth(1));
282 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
283 return of_depth(1)->locoff();
284 }
286 //-------------------------------debug_end-------------------------------------
287 uint JVMState::debug_end() const {
288 debug_only(JVMState* jvmroot = of_depth(1));
289 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
290 return endoff();
291 }
293 //------------------------------debug_depth------------------------------------
294 uint JVMState::debug_depth() const {
295 uint total = 0;
296 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
297 total += jvmp->debug_size();
298 }
299 return total;
300 }
302 #ifndef PRODUCT
304 //------------------------------format_helper----------------------------------
305 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
306 // any defined value or not. If it does, print out the register or constant.
307 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
308 if (n == NULL) { st->print(" NULL"); return; }
309 if (n->is_SafePointScalarObject()) {
310 // Scalar replacement.
311 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
312 scobjs->append_if_missing(spobj);
313 int sco_n = scobjs->find(spobj);
314 assert(sco_n >= 0, "");
315 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
316 return;
317 }
318 if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
319 char buf[50];
320 regalloc->dump_register(n,buf);
321 st->print(" %s%d]=%s",msg,i,buf);
322 } else { // No register, but might be constant
323 const Type *t = n->bottom_type();
324 switch (t->base()) {
325 case Type::Int:
326 st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
327 break;
328 case Type::AnyPtr:
329 assert( t == TypePtr::NULL_PTR, "" );
330 st->print(" %s%d]=#NULL",msg,i);
331 break;
332 case Type::AryPtr:
333 case Type::KlassPtr:
334 case Type::InstPtr:
335 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop());
336 break;
337 case Type::RawPtr:
338 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr());
339 break;
340 case Type::DoubleCon:
341 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
342 break;
343 case Type::FloatCon:
344 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
345 break;
346 case Type::Long:
347 st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con());
348 break;
349 case Type::Half:
350 case Type::Top:
351 st->print(" %s%d]=_",msg,i);
352 break;
353 default: ShouldNotReachHere();
354 }
355 }
356 }
358 //------------------------------format-----------------------------------------
359 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
360 st->print(" #");
361 if( _method ) {
362 _method->print_short_name(st);
363 st->print(" @ bci:%d ",_bci);
364 } else {
365 st->print_cr(" runtime stub ");
366 return;
367 }
368 if (n->is_MachSafePoint()) {
369 GrowableArray<SafePointScalarObjectNode*> scobjs;
370 MachSafePointNode *mcall = n->as_MachSafePoint();
371 uint i;
372 // Print locals
373 for( i = 0; i < (uint)loc_size(); i++ )
374 format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs );
375 // Print stack
376 for (i = 0; i < (uint)stk_size(); i++) {
377 if ((uint)(_stkoff + i) >= mcall->len())
378 st->print(" oob ");
379 else
380 format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs );
381 }
382 for (i = 0; (int)i < nof_monitors(); i++) {
383 Node *box = mcall->monitor_box(this, i);
384 Node *obj = mcall->monitor_obj(this, i);
385 if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
386 while( !box->is_BoxLock() ) box = box->in(1);
387 format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs );
388 } else {
389 OptoReg::Name box_reg = BoxLockNode::stack_slot(box);
390 st->print(" MON-BOX%d=%s+%d",
391 i,
392 OptoReg::regname(OptoReg::c_frame_pointer),
393 regalloc->reg2offset(box_reg));
394 }
395 format_helper( regalloc, st, obj, "MON-OBJ[", i, &scobjs );
396 }
398 for (i = 0; i < (uint)scobjs.length(); i++) {
399 // Scalar replaced objects.
400 st->print_cr("");
401 st->print(" # ScObj" INT32_FORMAT " ", i);
402 SafePointScalarObjectNode* spobj = scobjs.at(i);
403 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass();
404 assert(cik->is_instance_klass() ||
405 cik->is_array_klass(), "Not supported allocation.");
406 ciInstanceKlass *iklass = NULL;
407 if (cik->is_instance_klass()) {
408 cik->print_name_on(st);
409 iklass = cik->as_instance_klass();
410 } else if (cik->is_type_array_klass()) {
411 cik->as_array_klass()->base_element_type()->print_name_on(st);
412 st->print("[%d]=", spobj->n_fields());
413 } else if (cik->is_obj_array_klass()) {
414 ciType* cie = cik->as_array_klass()->base_element_type();
415 int ndim = 1;
416 while (cie->is_obj_array_klass()) {
417 ndim += 1;
418 cie = cie->as_array_klass()->base_element_type();
419 }
420 cie->print_name_on(st);
421 while (ndim-- > 0) {
422 st->print("[]");
423 }
424 st->print("[%d]=", spobj->n_fields());
425 }
426 st->print("{");
427 uint nf = spobj->n_fields();
428 if (nf > 0) {
429 uint first_ind = spobj->first_index();
430 Node* fld_node = mcall->in(first_ind);
431 ciField* cifield;
432 if (iklass != NULL) {
433 st->print(" [");
434 cifield = iklass->nonstatic_field_at(0);
435 cifield->print_name_on(st);
436 format_helper( regalloc, st, fld_node, ":", 0, &scobjs );
437 } else {
438 format_helper( regalloc, st, fld_node, "[", 0, &scobjs );
439 }
440 for (uint j = 1; j < nf; j++) {
441 fld_node = mcall->in(first_ind+j);
442 if (iklass != NULL) {
443 st->print(", [");
444 cifield = iklass->nonstatic_field_at(j);
445 cifield->print_name_on(st);
446 format_helper( regalloc, st, fld_node, ":", j, &scobjs );
447 } else {
448 format_helper( regalloc, st, fld_node, ", [", j, &scobjs );
449 }
450 }
451 }
452 st->print(" }");
453 }
454 }
455 st->print_cr("");
456 if (caller() != NULL) caller()->format(regalloc, n, st);
457 }
460 void JVMState::dump_spec(outputStream *st) const {
461 if (_method != NULL) {
462 bool printed = false;
463 if (!Verbose) {
464 // The JVMS dumps make really, really long lines.
465 // Take out the most boring parts, which are the package prefixes.
466 char buf[500];
467 stringStream namest(buf, sizeof(buf));
468 _method->print_short_name(&namest);
469 if (namest.count() < sizeof(buf)) {
470 const char* name = namest.base();
471 if (name[0] == ' ') ++name;
472 const char* endcn = strchr(name, ':'); // end of class name
473 if (endcn == NULL) endcn = strchr(name, '(');
474 if (endcn == NULL) endcn = name + strlen(name);
475 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
476 --endcn;
477 st->print(" %s", endcn);
478 printed = true;
479 }
480 }
481 if (!printed)
482 _method->print_short_name(st);
483 st->print(" @ bci:%d",_bci);
484 } else {
485 st->print(" runtime stub");
486 }
487 if (caller() != NULL) caller()->dump_spec(st);
488 }
491 void JVMState::dump_on(outputStream* st) const {
492 if (_map && !((uintptr_t)_map & 1)) {
493 if (_map->len() > _map->req()) { // _map->has_exceptions()
494 Node* ex = _map->in(_map->req()); // _map->next_exception()
495 // skip the first one; it's already being printed
496 while (ex != NULL && ex->len() > ex->req()) {
497 ex = ex->in(ex->req()); // ex->next_exception()
498 ex->dump(1);
499 }
500 }
501 _map->dump(2);
502 }
503 st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d method=",
504 depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci());
505 if (_method == NULL) {
506 st->print_cr("(none)");
507 } else {
508 _method->print_name(st);
509 st->cr();
510 if (bci() >= 0 && bci() < _method->code_size()) {
511 st->print(" bc: ");
512 _method->print_codes_on(bci(), bci()+1, st);
513 }
514 }
515 if (caller() != NULL) {
516 caller()->dump_on(st);
517 }
518 }
520 // Extra way to dump a jvms from the debugger,
521 // to avoid a bug with C++ member function calls.
522 void dump_jvms(JVMState* jvms) {
523 jvms->dump();
524 }
525 #endif
527 //--------------------------clone_shallow--------------------------------------
528 JVMState* JVMState::clone_shallow(Compile* C) const {
529 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
530 n->set_bci(_bci);
531 n->set_locoff(_locoff);
532 n->set_stkoff(_stkoff);
533 n->set_monoff(_monoff);
534 n->set_scloff(_scloff);
535 n->set_endoff(_endoff);
536 n->set_sp(_sp);
537 n->set_map(_map);
538 return n;
539 }
541 //---------------------------clone_deep----------------------------------------
542 JVMState* JVMState::clone_deep(Compile* C) const {
543 JVMState* n = clone_shallow(C);
544 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
545 p->_caller = p->_caller->clone_shallow(C);
546 }
547 assert(n->depth() == depth(), "sanity");
548 assert(n->debug_depth() == debug_depth(), "sanity");
549 return n;
550 }
552 //=============================================================================
553 uint CallNode::cmp( const Node &n ) const
554 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
555 #ifndef PRODUCT
556 void CallNode::dump_req() const {
557 // Dump the required inputs, enclosed in '(' and ')'
558 uint i; // Exit value of loop
559 for( i=0; i<req(); i++ ) { // For all required inputs
560 if( i == TypeFunc::Parms ) tty->print("(");
561 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
562 else tty->print("_ ");
563 }
564 tty->print(")");
565 }
567 void CallNode::dump_spec(outputStream *st) const {
568 st->print(" ");
569 tf()->dump_on(st);
570 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
571 if (jvms() != NULL) jvms()->dump_spec(st);
572 }
573 #endif
575 const Type *CallNode::bottom_type() const { return tf()->range(); }
576 const Type *CallNode::Value(PhaseTransform *phase) const {
577 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
578 return tf()->range();
579 }
581 //------------------------------calling_convention-----------------------------
582 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
583 // Use the standard compiler calling convention
584 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
585 }
588 //------------------------------match------------------------------------------
589 // Construct projections for control, I/O, memory-fields, ..., and
590 // return result(s) along with their RegMask info
591 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
592 switch (proj->_con) {
593 case TypeFunc::Control:
594 case TypeFunc::I_O:
595 case TypeFunc::Memory:
596 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
598 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
599 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
600 // 2nd half of doubles and longs
601 return new (match->C, 1) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
603 case TypeFunc::Parms: { // Normal returns
604 uint ideal_reg = Matcher::base2reg[tf()->range()->field_at(TypeFunc::Parms)->base()];
605 OptoRegPair regs = is_CallRuntime()
606 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
607 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
608 RegMask rm = RegMask(regs.first());
609 if( OptoReg::is_valid(regs.second()) )
610 rm.Insert( regs.second() );
611 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
612 }
614 case TypeFunc::ReturnAdr:
615 case TypeFunc::FramePtr:
616 default:
617 ShouldNotReachHere();
618 }
619 return NULL;
620 }
622 // Do we Match on this edge index or not? Match no edges
623 uint CallNode::match_edge(uint idx) const {
624 return 0;
625 }
627 //
628 // Determine whether the call could modify the field of the specified
629 // instance at the specified offset.
630 //
631 bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) {
632 const TypeOopPtr *adrInst_t = addr_t->isa_oopptr();
634 // if not an InstPtr or not an instance type, assume the worst
635 if (adrInst_t == NULL || !adrInst_t->is_instance_field()) {
636 return true;
637 }
638 Compile *C = phase->C;
639 int offset = adrInst_t->offset();
640 assert(adrInst_t->klass_is_exact() && offset >= 0, "should be valid offset");
641 ciKlass* adr_k = adrInst_t->klass();
642 assert(adr_k->is_loaded() &&
643 adr_k->is_java_klass() &&
644 !adr_k->is_interface(),
645 "only non-abstract classes are expected");
647 int base_idx = C->get_alias_index(adrInst_t);
648 int size = BytesPerLong; // If we don't know the size, assume largest.
649 if (adrInst_t->isa_instptr()) {
650 ciField* field = C->alias_type(base_idx)->field();
651 if (field != NULL) {
652 size = field->size_in_bytes();
653 }
654 } else {
655 assert(adrInst_t->isa_aryptr(), "only arrays are expected");
656 size = type2aelembytes(adr_k->as_array_klass()->element_type()->basic_type());
657 }
659 ciMethod * meth = is_CallStaticJava() ? as_CallStaticJava()->method() : NULL;
660 BCEscapeAnalyzer *bcea = (meth != NULL) ? meth->get_bcea() : NULL;
662 const TypeTuple * d = tf()->domain();
663 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
664 const Type* t = d->field_at(i);
665 Node *arg = in(i);
666 const Type *at = phase->type(arg);
667 if (at == TypePtr::NULL_PTR || at == Type::TOP)
668 continue; // null can't affect anything
670 const TypeOopPtr *at_ptr = at->isa_oopptr();
671 if (!arg->is_top() && (t->isa_oopptr() != NULL ||
672 t->isa_ptr() && at_ptr != NULL)) {
673 assert(at_ptr != NULL, "expecting an OopPtr");
674 ciKlass* at_k = at_ptr->klass();
675 if ((adrInst_t->base() == at_ptr->base()) &&
676 at_k->is_loaded() &&
677 at_k->is_java_klass()) {
678 // If we have found an argument matching addr_t, check if the field
679 // at the specified offset is modified.
680 if ((at_k->is_interface() || adr_k == at_k ||
681 adr_k->is_subclass_of(at_k) && !at_ptr->klass_is_exact()) &&
682 (bcea == NULL ||
683 bcea->is_arg_modified(i - TypeFunc::Parms, offset, size))) {
684 return true;
685 }
686 }
687 }
688 }
689 return false;
690 }
692 // Does this call have a direct reference to n other than debug information?
693 bool CallNode::has_non_debug_use(Node *n) {
694 const TypeTuple * d = tf()->domain();
695 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
696 Node *arg = in(i);
697 if (arg == n) {
698 return true;
699 }
700 }
701 return false;
702 }
704 // Returns the unique CheckCastPP of a call
705 // or 'this' if there are several CheckCastPP
706 // or returns NULL if there is no one.
707 Node *CallNode::result_cast() {
708 Node *cast = NULL;
710 Node *p = proj_out(TypeFunc::Parms);
711 if (p == NULL)
712 return NULL;
714 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
715 Node *use = p->fast_out(i);
716 if (use->is_CheckCastPP()) {
717 if (cast != NULL) {
718 return this; // more than 1 CheckCastPP
719 }
720 cast = use;
721 }
722 }
723 return cast;
724 }
727 //=============================================================================
728 uint CallJavaNode::size_of() const { return sizeof(*this); }
729 uint CallJavaNode::cmp( const Node &n ) const {
730 CallJavaNode &call = (CallJavaNode&)n;
731 return CallNode::cmp(call) && _method == call._method;
732 }
733 #ifndef PRODUCT
734 void CallJavaNode::dump_spec(outputStream *st) const {
735 if( _method ) _method->print_short_name(st);
736 CallNode::dump_spec(st);
737 }
738 #endif
740 //=============================================================================
741 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
742 uint CallStaticJavaNode::cmp( const Node &n ) const {
743 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
744 return CallJavaNode::cmp(call);
745 }
747 //----------------------------uncommon_trap_request----------------------------
748 // If this is an uncommon trap, return the request code, else zero.
749 int CallStaticJavaNode::uncommon_trap_request() const {
750 if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
751 return extract_uncommon_trap_request(this);
752 }
753 return 0;
754 }
755 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
756 #ifndef PRODUCT
757 if (!(call->req() > TypeFunc::Parms &&
758 call->in(TypeFunc::Parms) != NULL &&
759 call->in(TypeFunc::Parms)->is_Con())) {
760 assert(_in_dump_cnt != 0, "OK if dumping");
761 tty->print("[bad uncommon trap]");
762 return 0;
763 }
764 #endif
765 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
766 }
768 #ifndef PRODUCT
769 void CallStaticJavaNode::dump_spec(outputStream *st) const {
770 st->print("# Static ");
771 if (_name != NULL) {
772 st->print("%s", _name);
773 int trap_req = uncommon_trap_request();
774 if (trap_req != 0) {
775 char buf[100];
776 st->print("(%s)",
777 Deoptimization::format_trap_request(buf, sizeof(buf),
778 trap_req));
779 }
780 st->print(" ");
781 }
782 CallJavaNode::dump_spec(st);
783 }
784 #endif
786 //=============================================================================
787 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
788 uint CallDynamicJavaNode::cmp( const Node &n ) const {
789 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
790 return CallJavaNode::cmp(call);
791 }
792 #ifndef PRODUCT
793 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
794 st->print("# Dynamic ");
795 CallJavaNode::dump_spec(st);
796 }
797 #endif
799 //=============================================================================
800 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
801 uint CallRuntimeNode::cmp( const Node &n ) const {
802 CallRuntimeNode &call = (CallRuntimeNode&)n;
803 return CallNode::cmp(call) && !strcmp(_name,call._name);
804 }
805 #ifndef PRODUCT
806 void CallRuntimeNode::dump_spec(outputStream *st) const {
807 st->print("# ");
808 st->print(_name);
809 CallNode::dump_spec(st);
810 }
811 #endif
813 //------------------------------calling_convention-----------------------------
814 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
815 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
816 }
818 //=============================================================================
819 //------------------------------calling_convention-----------------------------
822 //=============================================================================
823 #ifndef PRODUCT
824 void CallLeafNode::dump_spec(outputStream *st) const {
825 st->print("# ");
826 st->print(_name);
827 CallNode::dump_spec(st);
828 }
829 #endif
831 //=============================================================================
833 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
834 assert(verify_jvms(jvms), "jvms must match");
835 int loc = jvms->locoff() + idx;
836 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
837 // If current local idx is top then local idx - 1 could
838 // be a long/double that needs to be killed since top could
839 // represent the 2nd half ofthe long/double.
840 uint ideal = in(loc -1)->ideal_reg();
841 if (ideal == Op_RegD || ideal == Op_RegL) {
842 // set other (low index) half to top
843 set_req(loc - 1, in(loc));
844 }
845 }
846 set_req(loc, c);
847 }
849 uint SafePointNode::size_of() const { return sizeof(*this); }
850 uint SafePointNode::cmp( const Node &n ) const {
851 return (&n == this); // Always fail except on self
852 }
854 //-------------------------set_next_exception----------------------------------
855 void SafePointNode::set_next_exception(SafePointNode* n) {
856 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
857 if (len() == req()) {
858 if (n != NULL) add_prec(n);
859 } else {
860 set_prec(req(), n);
861 }
862 }
865 //----------------------------next_exception-----------------------------------
866 SafePointNode* SafePointNode::next_exception() const {
867 if (len() == req()) {
868 return NULL;
869 } else {
870 Node* n = in(req());
871 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
872 return (SafePointNode*) n;
873 }
874 }
877 //------------------------------Ideal------------------------------------------
878 // Skip over any collapsed Regions
879 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
880 if (remove_dead_region(phase, can_reshape)) return this;
882 return NULL;
883 }
885 //------------------------------Identity---------------------------------------
886 // Remove obviously duplicate safepoints
887 Node *SafePointNode::Identity( PhaseTransform *phase ) {
889 // If you have back to back safepoints, remove one
890 if( in(TypeFunc::Control)->is_SafePoint() )
891 return in(TypeFunc::Control);
893 if( in(0)->is_Proj() ) {
894 Node *n0 = in(0)->in(0);
895 // Check if he is a call projection (except Leaf Call)
896 if( n0->is_Catch() ) {
897 n0 = n0->in(0)->in(0);
898 assert( n0->is_Call(), "expect a call here" );
899 }
900 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
901 // Useless Safepoint, so remove it
902 return in(TypeFunc::Control);
903 }
904 }
906 return this;
907 }
909 //------------------------------Value------------------------------------------
910 const Type *SafePointNode::Value( PhaseTransform *phase ) const {
911 if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
912 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop
913 return Type::CONTROL;
914 }
916 #ifndef PRODUCT
917 void SafePointNode::dump_spec(outputStream *st) const {
918 st->print(" SafePoint ");
919 }
920 #endif
922 const RegMask &SafePointNode::in_RegMask(uint idx) const {
923 if( idx < TypeFunc::Parms ) return RegMask::Empty;
924 // Values outside the domain represent debug info
925 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
926 }
927 const RegMask &SafePointNode::out_RegMask() const {
928 return RegMask::Empty;
929 }
932 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
933 assert((int)grow_by > 0, "sanity");
934 int monoff = jvms->monoff();
935 int scloff = jvms->scloff();
936 int endoff = jvms->endoff();
937 assert(endoff == (int)req(), "no other states or debug info after me");
938 Node* top = Compile::current()->top();
939 for (uint i = 0; i < grow_by; i++) {
940 ins_req(monoff, top);
941 }
942 jvms->set_monoff(monoff + grow_by);
943 jvms->set_scloff(scloff + grow_by);
944 jvms->set_endoff(endoff + grow_by);
945 }
947 void SafePointNode::push_monitor(const FastLockNode *lock) {
948 // Add a LockNode, which points to both the original BoxLockNode (the
949 // stack space for the monitor) and the Object being locked.
950 const int MonitorEdges = 2;
951 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
952 assert(req() == jvms()->endoff(), "correct sizing");
953 int nextmon = jvms()->scloff();
954 if (GenerateSynchronizationCode) {
955 add_req(lock->box_node());
956 add_req(lock->obj_node());
957 } else {
958 add_req(NULL);
959 add_req(NULL);
960 }
961 jvms()->set_scloff(nextmon+MonitorEdges);
962 jvms()->set_endoff(req());
963 }
965 void SafePointNode::pop_monitor() {
966 // Delete last monitor from debug info
967 debug_only(int num_before_pop = jvms()->nof_monitors());
968 const int MonitorEdges = (1<<JVMState::logMonitorEdges);
969 int scloff = jvms()->scloff();
970 int endoff = jvms()->endoff();
971 int new_scloff = scloff - MonitorEdges;
972 int new_endoff = endoff - MonitorEdges;
973 jvms()->set_scloff(new_scloff);
974 jvms()->set_endoff(new_endoff);
975 while (scloff > new_scloff) del_req(--scloff);
976 assert(jvms()->nof_monitors() == num_before_pop-1, "");
977 }
979 Node *SafePointNode::peek_monitor_box() const {
980 int mon = jvms()->nof_monitors() - 1;
981 assert(mon >= 0, "most have a monitor");
982 return monitor_box(jvms(), mon);
983 }
985 Node *SafePointNode::peek_monitor_obj() const {
986 int mon = jvms()->nof_monitors() - 1;
987 assert(mon >= 0, "most have a monitor");
988 return monitor_obj(jvms(), mon);
989 }
991 // Do we Match on this edge index or not? Match no edges
992 uint SafePointNode::match_edge(uint idx) const {
993 if( !needs_polling_address_input() )
994 return 0;
996 return (TypeFunc::Parms == idx);
997 }
999 //============== SafePointScalarObjectNode ==============
1001 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
1002 #ifdef ASSERT
1003 AllocateNode* alloc,
1004 #endif
1005 uint first_index,
1006 uint n_fields) :
1007 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1008 #ifdef ASSERT
1009 _alloc(alloc),
1010 #endif
1011 _first_index(first_index),
1012 _n_fields(n_fields)
1013 {
1014 init_class_id(Class_SafePointScalarObject);
1015 }
1018 uint SafePointScalarObjectNode::ideal_reg() const {
1019 return 0; // No matching to machine instruction
1020 }
1022 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1023 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1024 }
1026 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1027 return RegMask::Empty;
1028 }
1030 uint SafePointScalarObjectNode::match_edge(uint idx) const {
1031 return 0;
1032 }
1034 SafePointScalarObjectNode*
1035 SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
1036 void* cached = (*sosn_map)[(void*)this];
1037 if (cached != NULL) {
1038 return (SafePointScalarObjectNode*)cached;
1039 }
1040 Compile* C = Compile::current();
1041 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1042 res->_first_index += jvms_adj;
1043 sosn_map->Insert((void*)this, (void*)res);
1044 return res;
1045 }
1048 #ifndef PRODUCT
1049 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1050 st->print(" # fields@[%d..%d]", first_index(),
1051 first_index() + n_fields() - 1);
1052 }
1054 #endif
1056 //=============================================================================
1057 uint AllocateNode::size_of() const { return sizeof(*this); }
1059 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1060 Node *ctrl, Node *mem, Node *abio,
1061 Node *size, Node *klass_node, Node *initial_test)
1062 : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1063 {
1064 init_class_id(Class_Allocate);
1065 init_flags(Flag_is_macro);
1066 _is_scalar_replaceable = false;
1067 Node *topnode = C->top();
1069 init_req( TypeFunc::Control , ctrl );
1070 init_req( TypeFunc::I_O , abio );
1071 init_req( TypeFunc::Memory , mem );
1072 init_req( TypeFunc::ReturnAdr, topnode );
1073 init_req( TypeFunc::FramePtr , topnode );
1074 init_req( AllocSize , size);
1075 init_req( KlassNode , klass_node);
1076 init_req( InitialTest , initial_test);
1077 init_req( ALength , topnode);
1078 C->add_macro_node(this);
1079 }
1081 //=============================================================================
1082 uint AllocateArrayNode::size_of() const { return sizeof(*this); }
1084 //=============================================================================
1085 uint LockNode::size_of() const { return sizeof(*this); }
1087 // Redundant lock elimination
1088 //
1089 // There are various patterns of locking where we release and
1090 // immediately reacquire a lock in a piece of code where no operations
1091 // occur in between that would be observable. In those cases we can
1092 // skip releasing and reacquiring the lock without violating any
1093 // fairness requirements. Doing this around a loop could cause a lock
1094 // to be held for a very long time so we concentrate on non-looping
1095 // control flow. We also require that the operations are fully
1096 // redundant meaning that we don't introduce new lock operations on
1097 // some paths so to be able to eliminate it on others ala PRE. This
1098 // would probably require some more extensive graph manipulation to
1099 // guarantee that the memory edges were all handled correctly.
1100 //
1101 // Assuming p is a simple predicate which can't trap in any way and s
1102 // is a synchronized method consider this code:
1103 //
1104 // s();
1105 // if (p)
1106 // s();
1107 // else
1108 // s();
1109 // s();
1110 //
1111 // 1. The unlocks of the first call to s can be eliminated if the
1112 // locks inside the then and else branches are eliminated.
1113 //
1114 // 2. The unlocks of the then and else branches can be eliminated if
1115 // the lock of the final call to s is eliminated.
1116 //
1117 // Either of these cases subsumes the simple case of sequential control flow
1118 //
1119 // Addtionally we can eliminate versions without the else case:
1120 //
1121 // s();
1122 // if (p)
1123 // s();
1124 // s();
1125 //
1126 // 3. In this case we eliminate the unlock of the first s, the lock
1127 // and unlock in the then case and the lock in the final s.
1128 //
1129 // Note also that in all these cases the then/else pieces don't have
1130 // to be trivial as long as they begin and end with synchronization
1131 // operations.
1132 //
1133 // s();
1134 // if (p)
1135 // s();
1136 // f();
1137 // s();
1138 // s();
1139 //
1140 // The code will work properly for this case, leaving in the unlock
1141 // before the call to f and the relock after it.
1142 //
1143 // A potentially interesting case which isn't handled here is when the
1144 // locking is partially redundant.
1145 //
1146 // s();
1147 // if (p)
1148 // s();
1149 //
1150 // This could be eliminated putting unlocking on the else case and
1151 // eliminating the first unlock and the lock in the then side.
1152 // Alternatively the unlock could be moved out of the then side so it
1153 // was after the merge and the first unlock and second lock
1154 // eliminated. This might require less manipulation of the memory
1155 // state to get correct.
1156 //
1157 // Additionally we might allow work between a unlock and lock before
1158 // giving up eliminating the locks. The current code disallows any
1159 // conditional control flow between these operations. A formulation
1160 // similar to partial redundancy elimination computing the
1161 // availability of unlocking and the anticipatability of locking at a
1162 // program point would allow detection of fully redundant locking with
1163 // some amount of work in between. I'm not sure how often I really
1164 // think that would occur though. Most of the cases I've seen
1165 // indicate it's likely non-trivial work would occur in between.
1166 // There may be other more complicated constructs where we could
1167 // eliminate locking but I haven't seen any others appear as hot or
1168 // interesting.
1169 //
1170 // Locking and unlocking have a canonical form in ideal that looks
1171 // roughly like this:
1172 //
1173 // <obj>
1174 // | \\------+
1175 // | \ \
1176 // | BoxLock \
1177 // | | | \
1178 // | | \ \
1179 // | | FastLock
1180 // | | /
1181 // | | /
1182 // | | |
1183 //
1184 // Lock
1185 // |
1186 // Proj #0
1187 // |
1188 // MembarAcquire
1189 // |
1190 // Proj #0
1191 //
1192 // MembarRelease
1193 // |
1194 // Proj #0
1195 // |
1196 // Unlock
1197 // |
1198 // Proj #0
1199 //
1200 //
1201 // This code proceeds by processing Lock nodes during PhaseIterGVN
1202 // and searching back through its control for the proper code
1203 // patterns. Once it finds a set of lock and unlock operations to
1204 // eliminate they are marked as eliminatable which causes the
1205 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
1206 //
1207 //=============================================================================
1209 //
1210 // Utility function to skip over uninteresting control nodes. Nodes skipped are:
1211 // - copy regions. (These may not have been optimized away yet.)
1212 // - eliminated locking nodes
1213 //
1214 static Node *next_control(Node *ctrl) {
1215 if (ctrl == NULL)
1216 return NULL;
1217 while (1) {
1218 if (ctrl->is_Region()) {
1219 RegionNode *r = ctrl->as_Region();
1220 Node *n = r->is_copy();
1221 if (n == NULL)
1222 break; // hit a region, return it
1223 else
1224 ctrl = n;
1225 } else if (ctrl->is_Proj()) {
1226 Node *in0 = ctrl->in(0);
1227 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
1228 ctrl = in0->in(0);
1229 } else {
1230 break;
1231 }
1232 } else {
1233 break; // found an interesting control
1234 }
1235 }
1236 return ctrl;
1237 }
1238 //
1239 // Given a control, see if it's the control projection of an Unlock which
1240 // operating on the same object as lock.
1241 //
1242 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1243 GrowableArray<AbstractLockNode*> &lock_ops) {
1244 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1245 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1246 Node *n = ctrl_proj->in(0);
1247 if (n != NULL && n->is_Unlock()) {
1248 UnlockNode *unlock = n->as_Unlock();
1249 if ((lock->obj_node() == unlock->obj_node()) &&
1250 (lock->box_node() == unlock->box_node()) && !unlock->is_eliminated()) {
1251 lock_ops.append(unlock);
1252 return true;
1253 }
1254 }
1255 }
1256 return false;
1257 }
1259 //
1260 // Find the lock matching an unlock. Returns null if a safepoint
1261 // or complicated control is encountered first.
1262 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1263 LockNode *lock_result = NULL;
1264 // find the matching lock, or an intervening safepoint
1265 Node *ctrl = next_control(unlock->in(0));
1266 while (1) {
1267 assert(ctrl != NULL, "invalid control graph");
1268 assert(!ctrl->is_Start(), "missing lock for unlock");
1269 if (ctrl->is_top()) break; // dead control path
1270 if (ctrl->is_Proj()) ctrl = ctrl->in(0);
1271 if (ctrl->is_SafePoint()) {
1272 break; // found a safepoint (may be the lock we are searching for)
1273 } else if (ctrl->is_Region()) {
1274 // Check for a simple diamond pattern. Punt on anything more complicated
1275 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1276 Node *in1 = next_control(ctrl->in(1));
1277 Node *in2 = next_control(ctrl->in(2));
1278 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1279 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1280 ctrl = next_control(in1->in(0)->in(0));
1281 } else {
1282 break;
1283 }
1284 } else {
1285 break;
1286 }
1287 } else {
1288 ctrl = next_control(ctrl->in(0)); // keep searching
1289 }
1290 }
1291 if (ctrl->is_Lock()) {
1292 LockNode *lock = ctrl->as_Lock();
1293 if ((lock->obj_node() == unlock->obj_node()) &&
1294 (lock->box_node() == unlock->box_node())) {
1295 lock_result = lock;
1296 }
1297 }
1298 return lock_result;
1299 }
1301 // This code corresponds to case 3 above.
1303 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1304 GrowableArray<AbstractLockNode*> &lock_ops) {
1305 Node* if_node = node->in(0);
1306 bool if_true = node->is_IfTrue();
1308 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1309 Node *lock_ctrl = next_control(if_node->in(0));
1310 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1311 Node* lock1_node = NULL;
1312 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1313 if (if_true) {
1314 if (proj->is_IfFalse() && proj->outcnt() == 1) {
1315 lock1_node = proj->unique_out();
1316 }
1317 } else {
1318 if (proj->is_IfTrue() && proj->outcnt() == 1) {
1319 lock1_node = proj->unique_out();
1320 }
1321 }
1322 if (lock1_node != NULL && lock1_node->is_Lock()) {
1323 LockNode *lock1 = lock1_node->as_Lock();
1324 if ((lock->obj_node() == lock1->obj_node()) &&
1325 (lock->box_node() == lock1->box_node()) && !lock1->is_eliminated()) {
1326 lock_ops.append(lock1);
1327 return true;
1328 }
1329 }
1330 }
1331 }
1333 lock_ops.trunc_to(0);
1334 return false;
1335 }
1337 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1338 GrowableArray<AbstractLockNode*> &lock_ops) {
1339 // check each control merging at this point for a matching unlock.
1340 // in(0) should be self edge so skip it.
1341 for (int i = 1; i < (int)region->req(); i++) {
1342 Node *in_node = next_control(region->in(i));
1343 if (in_node != NULL) {
1344 if (find_matching_unlock(in_node, lock, lock_ops)) {
1345 // found a match so keep on checking.
1346 continue;
1347 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
1348 continue;
1349 }
1351 // If we fall through to here then it was some kind of node we
1352 // don't understand or there wasn't a matching unlock, so give
1353 // up trying to merge locks.
1354 lock_ops.trunc_to(0);
1355 return false;
1356 }
1357 }
1358 return true;
1360 }
1362 #ifndef PRODUCT
1363 //
1364 // Create a counter which counts the number of times this lock is acquired
1365 //
1366 void AbstractLockNode::create_lock_counter(JVMState* state) {
1367 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
1368 }
1369 #endif
1371 void AbstractLockNode::set_eliminated() {
1372 _eliminate = true;
1373 #ifndef PRODUCT
1374 if (_counter) {
1375 // Update the counter to indicate that this lock was eliminated.
1376 // The counter update code will stay around even though the
1377 // optimizer will eliminate the lock operation itself.
1378 _counter->set_tag(NamedCounter::EliminatedLockCounter);
1379 }
1380 #endif
1381 }
1383 //=============================================================================
1384 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1386 // perform any generic optimizations first (returns 'this' or NULL)
1387 Node *result = SafePointNode::Ideal(phase, can_reshape);
1389 // Now see if we can optimize away this lock. We don't actually
1390 // remove the locking here, we simply set the _eliminate flag which
1391 // prevents macro expansion from expanding the lock. Since we don't
1392 // modify the graph, the value returned from this function is the
1393 // one computed above.
1394 if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) {
1395 //
1396 // If we are locking an unescaped object, the lock/unlock is unnecessary
1397 //
1398 ConnectionGraph *cgr = Compile::current()->congraph();
1399 PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
1400 if (cgr != NULL)
1401 es = cgr->escape_state(obj_node(), phase);
1402 if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
1403 // Mark it eliminated to update any counters
1404 this->set_eliminated();
1405 return result;
1406 }
1408 //
1409 // Try lock coarsening
1410 //
1411 PhaseIterGVN* iter = phase->is_IterGVN();
1412 if (iter != NULL) {
1414 GrowableArray<AbstractLockNode*> lock_ops;
1416 Node *ctrl = next_control(in(0));
1418 // now search back for a matching Unlock
1419 if (find_matching_unlock(ctrl, this, lock_ops)) {
1420 // found an unlock directly preceding this lock. This is the
1421 // case of single unlock directly control dependent on a
1422 // single lock which is the trivial version of case 1 or 2.
1423 } else if (ctrl->is_Region() ) {
1424 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
1425 // found lock preceded by multiple unlocks along all paths
1426 // joining at this point which is case 3 in description above.
1427 }
1428 } else {
1429 // see if this lock comes from either half of an if and the
1430 // predecessors merges unlocks and the other half of the if
1431 // performs a lock.
1432 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
1433 // found unlock splitting to an if with locks on both branches.
1434 }
1435 }
1437 if (lock_ops.length() > 0) {
1438 // add ourselves to the list of locks to be eliminated.
1439 lock_ops.append(this);
1441 #ifndef PRODUCT
1442 if (PrintEliminateLocks) {
1443 int locks = 0;
1444 int unlocks = 0;
1445 for (int i = 0; i < lock_ops.length(); i++) {
1446 AbstractLockNode* lock = lock_ops.at(i);
1447 if (lock->Opcode() == Op_Lock)
1448 locks++;
1449 else
1450 unlocks++;
1451 if (Verbose) {
1452 lock->dump(1);
1453 }
1454 }
1455 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1456 }
1457 #endif
1459 // for each of the identified locks, mark them
1460 // as eliminatable
1461 for (int i = 0; i < lock_ops.length(); i++) {
1462 AbstractLockNode* lock = lock_ops.at(i);
1464 // Mark it eliminated to update any counters
1465 lock->set_eliminated();
1466 }
1467 } else if (result != NULL && ctrl->is_Region() &&
1468 iter->_worklist.member(ctrl)) {
1469 // We weren't able to find any opportunities but the region this
1470 // lock is control dependent on hasn't been processed yet so put
1471 // this lock back on the worklist so we can check again once any
1472 // region simplification has occurred.
1473 iter->_worklist.push(this);
1474 }
1475 }
1476 }
1478 return result;
1479 }
1481 //=============================================================================
1482 uint UnlockNode::size_of() const { return sizeof(*this); }
1484 //=============================================================================
1485 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1487 // perform any generic optimizations first (returns 'this' or NULL)
1488 Node * result = SafePointNode::Ideal(phase, can_reshape);
1490 // Now see if we can optimize away this unlock. We don't actually
1491 // remove the unlocking here, we simply set the _eliminate flag which
1492 // prevents macro expansion from expanding the unlock. Since we don't
1493 // modify the graph, the value returned from this function is the
1494 // one computed above.
1495 // Escape state is defined after Parse phase.
1496 if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) {
1497 //
1498 // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
1499 //
1500 ConnectionGraph *cgr = Compile::current()->congraph();
1501 PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
1502 if (cgr != NULL)
1503 es = cgr->escape_state(obj_node(), phase);
1504 if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
1505 // Mark it eliminated to update any counters
1506 this->set_eliminated();
1507 }
1508 }
1509 return result;
1510 }