Fri, 07 Nov 2008 09:29:38 -0800
6462850: generate biased locking code in C2 ideal graph
Summary: Inline biased locking code in C2 ideal graph during macro nodes expansion
Reviewed-by: never
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // Portions of code courtesy of Clifford Click
27 // Optimization - Graph Style
29 #include "incls/_precompiled.incl"
30 #include "incls/_callnode.cpp.incl"
32 //=============================================================================
33 uint StartNode::size_of() const { return sizeof(*this); }
34 uint StartNode::cmp( const Node &n ) const
35 { return _domain == ((StartNode&)n)._domain; }
36 const Type *StartNode::bottom_type() const { return _domain; }
37 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
38 #ifndef PRODUCT
39 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
40 #endif
42 //------------------------------Ideal------------------------------------------
43 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
44 return remove_dead_region(phase, can_reshape) ? this : NULL;
45 }
47 //------------------------------calling_convention-----------------------------
48 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
49 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
50 }
52 //------------------------------Registers--------------------------------------
53 const RegMask &StartNode::in_RegMask(uint) const {
54 return RegMask::Empty;
55 }
57 //------------------------------match------------------------------------------
58 // Construct projections for incoming parameters, and their RegMask info
59 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
60 switch (proj->_con) {
61 case TypeFunc::Control:
62 case TypeFunc::I_O:
63 case TypeFunc::Memory:
64 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
65 case TypeFunc::FramePtr:
66 return new (match->C, 1) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
67 case TypeFunc::ReturnAdr:
68 return new (match->C, 1) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
69 case TypeFunc::Parms:
70 default: {
71 uint parm_num = proj->_con - TypeFunc::Parms;
72 const Type *t = _domain->field_at(proj->_con);
73 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
74 return new (match->C, 1) ConNode(Type::TOP);
75 uint ideal_reg = Matcher::base2reg[t->base()];
76 RegMask &rm = match->_calling_convention_mask[parm_num];
77 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
78 }
79 }
80 return NULL;
81 }
83 //------------------------------StartOSRNode----------------------------------
84 // The method start node for an on stack replacement adapter
86 //------------------------------osr_domain-----------------------------
87 const TypeTuple *StartOSRNode::osr_domain() {
88 const Type **fields = TypeTuple::fields(2);
89 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
91 return TypeTuple::make(TypeFunc::Parms+1, fields);
92 }
94 //=============================================================================
95 const char * const ParmNode::names[TypeFunc::Parms+1] = {
96 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
97 };
99 #ifndef PRODUCT
100 void ParmNode::dump_spec(outputStream *st) const {
101 if( _con < TypeFunc::Parms ) {
102 st->print(names[_con]);
103 } else {
104 st->print("Parm%d: ",_con-TypeFunc::Parms);
105 // Verbose and WizardMode dump bottom_type for all nodes
106 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
107 }
108 }
109 #endif
111 uint ParmNode::ideal_reg() const {
112 switch( _con ) {
113 case TypeFunc::Control : // fall through
114 case TypeFunc::I_O : // fall through
115 case TypeFunc::Memory : return 0;
116 case TypeFunc::FramePtr : // fall through
117 case TypeFunc::ReturnAdr: return Op_RegP;
118 default : assert( _con > TypeFunc::Parms, "" );
119 // fall through
120 case TypeFunc::Parms : {
121 // Type of argument being passed
122 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
123 return Matcher::base2reg[t->base()];
124 }
125 }
126 ShouldNotReachHere();
127 return 0;
128 }
130 //=============================================================================
131 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
132 init_req(TypeFunc::Control,cntrl);
133 init_req(TypeFunc::I_O,i_o);
134 init_req(TypeFunc::Memory,memory);
135 init_req(TypeFunc::FramePtr,frameptr);
136 init_req(TypeFunc::ReturnAdr,retadr);
137 }
139 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
140 return remove_dead_region(phase, can_reshape) ? this : NULL;
141 }
143 const Type *ReturnNode::Value( PhaseTransform *phase ) const {
144 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
145 ? Type::TOP
146 : Type::BOTTOM;
147 }
149 // Do we Match on this edge index or not? No edges on return nodes
150 uint ReturnNode::match_edge(uint idx) const {
151 return 0;
152 }
155 #ifndef PRODUCT
156 void ReturnNode::dump_req() const {
157 // Dump the required inputs, enclosed in '(' and ')'
158 uint i; // Exit value of loop
159 for( i=0; i<req(); i++ ) { // For all required inputs
160 if( i == TypeFunc::Parms ) tty->print("returns");
161 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
162 else tty->print("_ ");
163 }
164 }
165 #endif
167 //=============================================================================
168 RethrowNode::RethrowNode(
169 Node* cntrl,
170 Node* i_o,
171 Node* memory,
172 Node* frameptr,
173 Node* ret_adr,
174 Node* exception
175 ) : Node(TypeFunc::Parms + 1) {
176 init_req(TypeFunc::Control , cntrl );
177 init_req(TypeFunc::I_O , i_o );
178 init_req(TypeFunc::Memory , memory );
179 init_req(TypeFunc::FramePtr , frameptr );
180 init_req(TypeFunc::ReturnAdr, ret_adr);
181 init_req(TypeFunc::Parms , exception);
182 }
184 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
185 return remove_dead_region(phase, can_reshape) ? this : NULL;
186 }
188 const Type *RethrowNode::Value( PhaseTransform *phase ) const {
189 return (phase->type(in(TypeFunc::Control)) == Type::TOP)
190 ? Type::TOP
191 : Type::BOTTOM;
192 }
194 uint RethrowNode::match_edge(uint idx) const {
195 return 0;
196 }
198 #ifndef PRODUCT
199 void RethrowNode::dump_req() const {
200 // Dump the required inputs, enclosed in '(' and ')'
201 uint i; // Exit value of loop
202 for( i=0; i<req(); i++ ) { // For all required inputs
203 if( i == TypeFunc::Parms ) tty->print("exception");
204 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
205 else tty->print("_ ");
206 }
207 }
208 #endif
210 //=============================================================================
211 // Do we Match on this edge index or not? Match only target address & method
212 uint TailCallNode::match_edge(uint idx) const {
213 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
214 }
216 //=============================================================================
217 // Do we Match on this edge index or not? Match only target address & oop
218 uint TailJumpNode::match_edge(uint idx) const {
219 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
220 }
222 //=============================================================================
223 JVMState::JVMState(ciMethod* method, JVMState* caller) {
224 assert(method != NULL, "must be valid call site");
225 _method = method;
226 debug_only(_bci = -99); // random garbage value
227 debug_only(_map = (SafePointNode*)-1);
228 _caller = caller;
229 _depth = 1 + (caller == NULL ? 0 : caller->depth());
230 _locoff = TypeFunc::Parms;
231 _stkoff = _locoff + _method->max_locals();
232 _monoff = _stkoff + _method->max_stack();
233 _scloff = _monoff;
234 _endoff = _monoff;
235 _sp = 0;
236 }
237 JVMState::JVMState(int stack_size) {
238 _method = NULL;
239 _bci = InvocationEntryBci;
240 debug_only(_map = (SafePointNode*)-1);
241 _caller = NULL;
242 _depth = 1;
243 _locoff = TypeFunc::Parms;
244 _stkoff = _locoff;
245 _monoff = _stkoff + stack_size;
246 _scloff = _monoff;
247 _endoff = _monoff;
248 _sp = 0;
249 }
251 //--------------------------------of_depth-------------------------------------
252 JVMState* JVMState::of_depth(int d) const {
253 const JVMState* jvmp = this;
254 assert(0 < d && (uint)d <= depth(), "oob");
255 for (int skip = depth() - d; skip > 0; skip--) {
256 jvmp = jvmp->caller();
257 }
258 assert(jvmp->depth() == (uint)d, "found the right one");
259 return (JVMState*)jvmp;
260 }
262 //-----------------------------same_calls_as-----------------------------------
263 bool JVMState::same_calls_as(const JVMState* that) const {
264 if (this == that) return true;
265 if (this->depth() != that->depth()) return false;
266 const JVMState* p = this;
267 const JVMState* q = that;
268 for (;;) {
269 if (p->_method != q->_method) return false;
270 if (p->_method == NULL) return true; // bci is irrelevant
271 if (p->_bci != q->_bci) return false;
272 p = p->caller();
273 q = q->caller();
274 if (p == q) return true;
275 assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
276 }
277 }
279 //------------------------------debug_start------------------------------------
280 uint JVMState::debug_start() const {
281 debug_only(JVMState* jvmroot = of_depth(1));
282 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
283 return of_depth(1)->locoff();
284 }
286 //-------------------------------debug_end-------------------------------------
287 uint JVMState::debug_end() const {
288 debug_only(JVMState* jvmroot = of_depth(1));
289 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
290 return endoff();
291 }
293 //------------------------------debug_depth------------------------------------
294 uint JVMState::debug_depth() const {
295 uint total = 0;
296 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
297 total += jvmp->debug_size();
298 }
299 return total;
300 }
302 #ifndef PRODUCT
304 //------------------------------format_helper----------------------------------
305 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
306 // any defined value or not. If it does, print out the register or constant.
307 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
308 if (n == NULL) { st->print(" NULL"); return; }
309 if (n->is_SafePointScalarObject()) {
310 // Scalar replacement.
311 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
312 scobjs->append_if_missing(spobj);
313 int sco_n = scobjs->find(spobj);
314 assert(sco_n >= 0, "");
315 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
316 return;
317 }
318 if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
319 char buf[50];
320 regalloc->dump_register(n,buf);
321 st->print(" %s%d]=%s",msg,i,buf);
322 } else { // No register, but might be constant
323 const Type *t = n->bottom_type();
324 switch (t->base()) {
325 case Type::Int:
326 st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
327 break;
328 case Type::AnyPtr:
329 assert( t == TypePtr::NULL_PTR, "" );
330 st->print(" %s%d]=#NULL",msg,i);
331 break;
332 case Type::AryPtr:
333 case Type::KlassPtr:
334 case Type::InstPtr:
335 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop());
336 break;
337 case Type::NarrowOop:
338 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_oopptr()->const_oop());
339 break;
340 case Type::RawPtr:
341 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr());
342 break;
343 case Type::DoubleCon:
344 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
345 break;
346 case Type::FloatCon:
347 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
348 break;
349 case Type::Long:
350 st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con());
351 break;
352 case Type::Half:
353 case Type::Top:
354 st->print(" %s%d]=_",msg,i);
355 break;
356 default: ShouldNotReachHere();
357 }
358 }
359 }
361 //------------------------------format-----------------------------------------
362 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
363 st->print(" #");
364 if( _method ) {
365 _method->print_short_name(st);
366 st->print(" @ bci:%d ",_bci);
367 } else {
368 st->print_cr(" runtime stub ");
369 return;
370 }
371 if (n->is_MachSafePoint()) {
372 GrowableArray<SafePointScalarObjectNode*> scobjs;
373 MachSafePointNode *mcall = n->as_MachSafePoint();
374 uint i;
375 // Print locals
376 for( i = 0; i < (uint)loc_size(); i++ )
377 format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs );
378 // Print stack
379 for (i = 0; i < (uint)stk_size(); i++) {
380 if ((uint)(_stkoff + i) >= mcall->len())
381 st->print(" oob ");
382 else
383 format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs );
384 }
385 for (i = 0; (int)i < nof_monitors(); i++) {
386 Node *box = mcall->monitor_box(this, i);
387 Node *obj = mcall->monitor_obj(this, i);
388 if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
389 while( !box->is_BoxLock() ) box = box->in(1);
390 format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs );
391 } else {
392 OptoReg::Name box_reg = BoxLockNode::stack_slot(box);
393 st->print(" MON-BOX%d=%s+%d",
394 i,
395 OptoReg::regname(OptoReg::c_frame_pointer),
396 regalloc->reg2offset(box_reg));
397 }
398 format_helper( regalloc, st, obj, "MON-OBJ[", i, &scobjs );
399 }
401 for (i = 0; i < (uint)scobjs.length(); i++) {
402 // Scalar replaced objects.
403 st->print_cr("");
404 st->print(" # ScObj" INT32_FORMAT " ", i);
405 SafePointScalarObjectNode* spobj = scobjs.at(i);
406 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass();
407 assert(cik->is_instance_klass() ||
408 cik->is_array_klass(), "Not supported allocation.");
409 ciInstanceKlass *iklass = NULL;
410 if (cik->is_instance_klass()) {
411 cik->print_name_on(st);
412 iklass = cik->as_instance_klass();
413 } else if (cik->is_type_array_klass()) {
414 cik->as_array_klass()->base_element_type()->print_name_on(st);
415 st->print("[%d]=", spobj->n_fields());
416 } else if (cik->is_obj_array_klass()) {
417 ciType* cie = cik->as_array_klass()->base_element_type();
418 int ndim = 1;
419 while (cie->is_obj_array_klass()) {
420 ndim += 1;
421 cie = cie->as_array_klass()->base_element_type();
422 }
423 cie->print_name_on(st);
424 while (ndim-- > 0) {
425 st->print("[]");
426 }
427 st->print("[%d]=", spobj->n_fields());
428 }
429 st->print("{");
430 uint nf = spobj->n_fields();
431 if (nf > 0) {
432 uint first_ind = spobj->first_index();
433 Node* fld_node = mcall->in(first_ind);
434 ciField* cifield;
435 if (iklass != NULL) {
436 st->print(" [");
437 cifield = iklass->nonstatic_field_at(0);
438 cifield->print_name_on(st);
439 format_helper( regalloc, st, fld_node, ":", 0, &scobjs );
440 } else {
441 format_helper( regalloc, st, fld_node, "[", 0, &scobjs );
442 }
443 for (uint j = 1; j < nf; j++) {
444 fld_node = mcall->in(first_ind+j);
445 if (iklass != NULL) {
446 st->print(", [");
447 cifield = iklass->nonstatic_field_at(j);
448 cifield->print_name_on(st);
449 format_helper( regalloc, st, fld_node, ":", j, &scobjs );
450 } else {
451 format_helper( regalloc, st, fld_node, ", [", j, &scobjs );
452 }
453 }
454 }
455 st->print(" }");
456 }
457 }
458 st->print_cr("");
459 if (caller() != NULL) caller()->format(regalloc, n, st);
460 }
463 void JVMState::dump_spec(outputStream *st) const {
464 if (_method != NULL) {
465 bool printed = false;
466 if (!Verbose) {
467 // The JVMS dumps make really, really long lines.
468 // Take out the most boring parts, which are the package prefixes.
469 char buf[500];
470 stringStream namest(buf, sizeof(buf));
471 _method->print_short_name(&namest);
472 if (namest.count() < sizeof(buf)) {
473 const char* name = namest.base();
474 if (name[0] == ' ') ++name;
475 const char* endcn = strchr(name, ':'); // end of class name
476 if (endcn == NULL) endcn = strchr(name, '(');
477 if (endcn == NULL) endcn = name + strlen(name);
478 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
479 --endcn;
480 st->print(" %s", endcn);
481 printed = true;
482 }
483 }
484 if (!printed)
485 _method->print_short_name(st);
486 st->print(" @ bci:%d",_bci);
487 } else {
488 st->print(" runtime stub");
489 }
490 if (caller() != NULL) caller()->dump_spec(st);
491 }
494 void JVMState::dump_on(outputStream* st) const {
495 if (_map && !((uintptr_t)_map & 1)) {
496 if (_map->len() > _map->req()) { // _map->has_exceptions()
497 Node* ex = _map->in(_map->req()); // _map->next_exception()
498 // skip the first one; it's already being printed
499 while (ex != NULL && ex->len() > ex->req()) {
500 ex = ex->in(ex->req()); // ex->next_exception()
501 ex->dump(1);
502 }
503 }
504 _map->dump(2);
505 }
506 st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d method=",
507 depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci());
508 if (_method == NULL) {
509 st->print_cr("(none)");
510 } else {
511 _method->print_name(st);
512 st->cr();
513 if (bci() >= 0 && bci() < _method->code_size()) {
514 st->print(" bc: ");
515 _method->print_codes_on(bci(), bci()+1, st);
516 }
517 }
518 if (caller() != NULL) {
519 caller()->dump_on(st);
520 }
521 }
523 // Extra way to dump a jvms from the debugger,
524 // to avoid a bug with C++ member function calls.
525 void dump_jvms(JVMState* jvms) {
526 jvms->dump();
527 }
528 #endif
530 //--------------------------clone_shallow--------------------------------------
531 JVMState* JVMState::clone_shallow(Compile* C) const {
532 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
533 n->set_bci(_bci);
534 n->set_locoff(_locoff);
535 n->set_stkoff(_stkoff);
536 n->set_monoff(_monoff);
537 n->set_scloff(_scloff);
538 n->set_endoff(_endoff);
539 n->set_sp(_sp);
540 n->set_map(_map);
541 return n;
542 }
544 //---------------------------clone_deep----------------------------------------
545 JVMState* JVMState::clone_deep(Compile* C) const {
546 JVMState* n = clone_shallow(C);
547 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
548 p->_caller = p->_caller->clone_shallow(C);
549 }
550 assert(n->depth() == depth(), "sanity");
551 assert(n->debug_depth() == debug_depth(), "sanity");
552 return n;
553 }
555 //=============================================================================
556 uint CallNode::cmp( const Node &n ) const
557 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
558 #ifndef PRODUCT
559 void CallNode::dump_req() const {
560 // Dump the required inputs, enclosed in '(' and ')'
561 uint i; // Exit value of loop
562 for( i=0; i<req(); i++ ) { // For all required inputs
563 if( i == TypeFunc::Parms ) tty->print("(");
564 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
565 else tty->print("_ ");
566 }
567 tty->print(")");
568 }
570 void CallNode::dump_spec(outputStream *st) const {
571 st->print(" ");
572 tf()->dump_on(st);
573 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
574 if (jvms() != NULL) jvms()->dump_spec(st);
575 }
576 #endif
578 const Type *CallNode::bottom_type() const { return tf()->range(); }
579 const Type *CallNode::Value(PhaseTransform *phase) const {
580 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
581 return tf()->range();
582 }
584 //------------------------------calling_convention-----------------------------
585 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
586 // Use the standard compiler calling convention
587 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
588 }
591 //------------------------------match------------------------------------------
592 // Construct projections for control, I/O, memory-fields, ..., and
593 // return result(s) along with their RegMask info
594 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
595 switch (proj->_con) {
596 case TypeFunc::Control:
597 case TypeFunc::I_O:
598 case TypeFunc::Memory:
599 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
601 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
602 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
603 // 2nd half of doubles and longs
604 return new (match->C, 1) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
606 case TypeFunc::Parms: { // Normal returns
607 uint ideal_reg = Matcher::base2reg[tf()->range()->field_at(TypeFunc::Parms)->base()];
608 OptoRegPair regs = is_CallRuntime()
609 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
610 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
611 RegMask rm = RegMask(regs.first());
612 if( OptoReg::is_valid(regs.second()) )
613 rm.Insert( regs.second() );
614 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
615 }
617 case TypeFunc::ReturnAdr:
618 case TypeFunc::FramePtr:
619 default:
620 ShouldNotReachHere();
621 }
622 return NULL;
623 }
625 // Do we Match on this edge index or not? Match no edges
626 uint CallNode::match_edge(uint idx) const {
627 return 0;
628 }
630 //
631 // Determine whether the call could modify the field of the specified
632 // instance at the specified offset.
633 //
634 bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) {
635 const TypeOopPtr *adrInst_t = addr_t->isa_oopptr();
637 // If not an OopPtr or not an instance type, assume the worst.
638 // Note: currently this method is called only for instance types.
639 if (adrInst_t == NULL || !adrInst_t->is_known_instance()) {
640 return true;
641 }
642 // The instance_id is set only for scalar-replaceable allocations which
643 // are not passed as arguments according to Escape Analysis.
644 return false;
645 }
647 // Does this call have a direct reference to n other than debug information?
648 bool CallNode::has_non_debug_use(Node *n) {
649 const TypeTuple * d = tf()->domain();
650 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
651 Node *arg = in(i);
652 if (arg == n) {
653 return true;
654 }
655 }
656 return false;
657 }
659 // Returns the unique CheckCastPP of a call
660 // or 'this' if there are several CheckCastPP
661 // or returns NULL if there is no one.
662 Node *CallNode::result_cast() {
663 Node *cast = NULL;
665 Node *p = proj_out(TypeFunc::Parms);
666 if (p == NULL)
667 return NULL;
669 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
670 Node *use = p->fast_out(i);
671 if (use->is_CheckCastPP()) {
672 if (cast != NULL) {
673 return this; // more than 1 CheckCastPP
674 }
675 cast = use;
676 }
677 }
678 return cast;
679 }
682 //=============================================================================
683 uint CallJavaNode::size_of() const { return sizeof(*this); }
684 uint CallJavaNode::cmp( const Node &n ) const {
685 CallJavaNode &call = (CallJavaNode&)n;
686 return CallNode::cmp(call) && _method == call._method;
687 }
688 #ifndef PRODUCT
689 void CallJavaNode::dump_spec(outputStream *st) const {
690 if( _method ) _method->print_short_name(st);
691 CallNode::dump_spec(st);
692 }
693 #endif
695 //=============================================================================
696 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
697 uint CallStaticJavaNode::cmp( const Node &n ) const {
698 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
699 return CallJavaNode::cmp(call);
700 }
702 //----------------------------uncommon_trap_request----------------------------
703 // If this is an uncommon trap, return the request code, else zero.
704 int CallStaticJavaNode::uncommon_trap_request() const {
705 if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
706 return extract_uncommon_trap_request(this);
707 }
708 return 0;
709 }
710 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
711 #ifndef PRODUCT
712 if (!(call->req() > TypeFunc::Parms &&
713 call->in(TypeFunc::Parms) != NULL &&
714 call->in(TypeFunc::Parms)->is_Con())) {
715 assert(_in_dump_cnt != 0, "OK if dumping");
716 tty->print("[bad uncommon trap]");
717 return 0;
718 }
719 #endif
720 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
721 }
723 #ifndef PRODUCT
724 void CallStaticJavaNode::dump_spec(outputStream *st) const {
725 st->print("# Static ");
726 if (_name != NULL) {
727 st->print("%s", _name);
728 int trap_req = uncommon_trap_request();
729 if (trap_req != 0) {
730 char buf[100];
731 st->print("(%s)",
732 Deoptimization::format_trap_request(buf, sizeof(buf),
733 trap_req));
734 }
735 st->print(" ");
736 }
737 CallJavaNode::dump_spec(st);
738 }
739 #endif
741 //=============================================================================
742 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
743 uint CallDynamicJavaNode::cmp( const Node &n ) const {
744 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
745 return CallJavaNode::cmp(call);
746 }
747 #ifndef PRODUCT
748 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
749 st->print("# Dynamic ");
750 CallJavaNode::dump_spec(st);
751 }
752 #endif
754 //=============================================================================
755 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
756 uint CallRuntimeNode::cmp( const Node &n ) const {
757 CallRuntimeNode &call = (CallRuntimeNode&)n;
758 return CallNode::cmp(call) && !strcmp(_name,call._name);
759 }
760 #ifndef PRODUCT
761 void CallRuntimeNode::dump_spec(outputStream *st) const {
762 st->print("# ");
763 st->print(_name);
764 CallNode::dump_spec(st);
765 }
766 #endif
768 //------------------------------calling_convention-----------------------------
769 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
770 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
771 }
773 //=============================================================================
774 //------------------------------calling_convention-----------------------------
777 //=============================================================================
778 #ifndef PRODUCT
779 void CallLeafNode::dump_spec(outputStream *st) const {
780 st->print("# ");
781 st->print(_name);
782 CallNode::dump_spec(st);
783 }
784 #endif
786 //=============================================================================
788 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
789 assert(verify_jvms(jvms), "jvms must match");
790 int loc = jvms->locoff() + idx;
791 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
792 // If current local idx is top then local idx - 1 could
793 // be a long/double that needs to be killed since top could
794 // represent the 2nd half ofthe long/double.
795 uint ideal = in(loc -1)->ideal_reg();
796 if (ideal == Op_RegD || ideal == Op_RegL) {
797 // set other (low index) half to top
798 set_req(loc - 1, in(loc));
799 }
800 }
801 set_req(loc, c);
802 }
804 uint SafePointNode::size_of() const { return sizeof(*this); }
805 uint SafePointNode::cmp( const Node &n ) const {
806 return (&n == this); // Always fail except on self
807 }
809 //-------------------------set_next_exception----------------------------------
810 void SafePointNode::set_next_exception(SafePointNode* n) {
811 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
812 if (len() == req()) {
813 if (n != NULL) add_prec(n);
814 } else {
815 set_prec(req(), n);
816 }
817 }
820 //----------------------------next_exception-----------------------------------
821 SafePointNode* SafePointNode::next_exception() const {
822 if (len() == req()) {
823 return NULL;
824 } else {
825 Node* n = in(req());
826 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
827 return (SafePointNode*) n;
828 }
829 }
832 //------------------------------Ideal------------------------------------------
833 // Skip over any collapsed Regions
834 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
835 return remove_dead_region(phase, can_reshape) ? this : NULL;
836 }
838 //------------------------------Identity---------------------------------------
839 // Remove obviously duplicate safepoints
840 Node *SafePointNode::Identity( PhaseTransform *phase ) {
842 // If you have back to back safepoints, remove one
843 if( in(TypeFunc::Control)->is_SafePoint() )
844 return in(TypeFunc::Control);
846 if( in(0)->is_Proj() ) {
847 Node *n0 = in(0)->in(0);
848 // Check if he is a call projection (except Leaf Call)
849 if( n0->is_Catch() ) {
850 n0 = n0->in(0)->in(0);
851 assert( n0->is_Call(), "expect a call here" );
852 }
853 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
854 // Useless Safepoint, so remove it
855 return in(TypeFunc::Control);
856 }
857 }
859 return this;
860 }
862 //------------------------------Value------------------------------------------
863 const Type *SafePointNode::Value( PhaseTransform *phase ) const {
864 if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
865 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop
866 return Type::CONTROL;
867 }
869 #ifndef PRODUCT
870 void SafePointNode::dump_spec(outputStream *st) const {
871 st->print(" SafePoint ");
872 }
873 #endif
875 const RegMask &SafePointNode::in_RegMask(uint idx) const {
876 if( idx < TypeFunc::Parms ) return RegMask::Empty;
877 // Values outside the domain represent debug info
878 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
879 }
880 const RegMask &SafePointNode::out_RegMask() const {
881 return RegMask::Empty;
882 }
885 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
886 assert((int)grow_by > 0, "sanity");
887 int monoff = jvms->monoff();
888 int scloff = jvms->scloff();
889 int endoff = jvms->endoff();
890 assert(endoff == (int)req(), "no other states or debug info after me");
891 Node* top = Compile::current()->top();
892 for (uint i = 0; i < grow_by; i++) {
893 ins_req(monoff, top);
894 }
895 jvms->set_monoff(monoff + grow_by);
896 jvms->set_scloff(scloff + grow_by);
897 jvms->set_endoff(endoff + grow_by);
898 }
900 void SafePointNode::push_monitor(const FastLockNode *lock) {
901 // Add a LockNode, which points to both the original BoxLockNode (the
902 // stack space for the monitor) and the Object being locked.
903 const int MonitorEdges = 2;
904 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
905 assert(req() == jvms()->endoff(), "correct sizing");
906 int nextmon = jvms()->scloff();
907 if (GenerateSynchronizationCode) {
908 add_req(lock->box_node());
909 add_req(lock->obj_node());
910 } else {
911 add_req(NULL);
912 add_req(NULL);
913 }
914 jvms()->set_scloff(nextmon+MonitorEdges);
915 jvms()->set_endoff(req());
916 }
918 void SafePointNode::pop_monitor() {
919 // Delete last monitor from debug info
920 debug_only(int num_before_pop = jvms()->nof_monitors());
921 const int MonitorEdges = (1<<JVMState::logMonitorEdges);
922 int scloff = jvms()->scloff();
923 int endoff = jvms()->endoff();
924 int new_scloff = scloff - MonitorEdges;
925 int new_endoff = endoff - MonitorEdges;
926 jvms()->set_scloff(new_scloff);
927 jvms()->set_endoff(new_endoff);
928 while (scloff > new_scloff) del_req(--scloff);
929 assert(jvms()->nof_monitors() == num_before_pop-1, "");
930 }
932 Node *SafePointNode::peek_monitor_box() const {
933 int mon = jvms()->nof_monitors() - 1;
934 assert(mon >= 0, "most have a monitor");
935 return monitor_box(jvms(), mon);
936 }
938 Node *SafePointNode::peek_monitor_obj() const {
939 int mon = jvms()->nof_monitors() - 1;
940 assert(mon >= 0, "most have a monitor");
941 return monitor_obj(jvms(), mon);
942 }
944 // Do we Match on this edge index or not? Match no edges
945 uint SafePointNode::match_edge(uint idx) const {
946 if( !needs_polling_address_input() )
947 return 0;
949 return (TypeFunc::Parms == idx);
950 }
952 //============== SafePointScalarObjectNode ==============
954 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
955 #ifdef ASSERT
956 AllocateNode* alloc,
957 #endif
958 uint first_index,
959 uint n_fields) :
960 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
961 #ifdef ASSERT
962 _alloc(alloc),
963 #endif
964 _first_index(first_index),
965 _n_fields(n_fields)
966 {
967 init_class_id(Class_SafePointScalarObject);
968 }
970 bool SafePointScalarObjectNode::pinned() const { return true; }
972 uint SafePointScalarObjectNode::ideal_reg() const {
973 return 0; // No matching to machine instruction
974 }
976 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
977 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
978 }
980 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
981 return RegMask::Empty;
982 }
984 uint SafePointScalarObjectNode::match_edge(uint idx) const {
985 return 0;
986 }
988 SafePointScalarObjectNode*
989 SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
990 void* cached = (*sosn_map)[(void*)this];
991 if (cached != NULL) {
992 return (SafePointScalarObjectNode*)cached;
993 }
994 Compile* C = Compile::current();
995 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
996 res->_first_index += jvms_adj;
997 sosn_map->Insert((void*)this, (void*)res);
998 return res;
999 }
1002 #ifndef PRODUCT
1003 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1004 st->print(" # fields@[%d..%d]", first_index(),
1005 first_index() + n_fields() - 1);
1006 }
1008 #endif
1010 //=============================================================================
1011 uint AllocateNode::size_of() const { return sizeof(*this); }
1013 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1014 Node *ctrl, Node *mem, Node *abio,
1015 Node *size, Node *klass_node, Node *initial_test)
1016 : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1017 {
1018 init_class_id(Class_Allocate);
1019 init_flags(Flag_is_macro);
1020 _is_scalar_replaceable = false;
1021 Node *topnode = C->top();
1023 init_req( TypeFunc::Control , ctrl );
1024 init_req( TypeFunc::I_O , abio );
1025 init_req( TypeFunc::Memory , mem );
1026 init_req( TypeFunc::ReturnAdr, topnode );
1027 init_req( TypeFunc::FramePtr , topnode );
1028 init_req( AllocSize , size);
1029 init_req( KlassNode , klass_node);
1030 init_req( InitialTest , initial_test);
1031 init_req( ALength , topnode);
1032 C->add_macro_node(this);
1033 }
1035 //=============================================================================
1036 uint AllocateArrayNode::size_of() const { return sizeof(*this); }
1038 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1039 // CastII, if appropriate. If we are not allowed to create new nodes, and
1040 // a CastII is appropriate, return NULL.
1041 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
1042 Node *length = in(AllocateNode::ALength);
1043 assert(length != NULL, "length is not null");
1045 const TypeInt* length_type = phase->find_int_type(length);
1046 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1048 if (ary_type != NULL && length_type != NULL) {
1049 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1050 if (narrow_length_type != length_type) {
1051 // Assert one of:
1052 // - the narrow_length is 0
1053 // - the narrow_length is not wider than length
1054 assert(narrow_length_type == TypeInt::ZERO ||
1055 (narrow_length_type->_hi <= length_type->_hi &&
1056 narrow_length_type->_lo >= length_type->_lo),
1057 "narrow type must be narrower than length type");
1059 // Return NULL if new nodes are not allowed
1060 if (!allow_new_nodes) return NULL;
1061 // Create a cast which is control dependent on the initialization to
1062 // propagate the fact that the array length must be positive.
1063 length = new (phase->C, 2) CastIINode(length, narrow_length_type);
1064 length->set_req(0, initialization()->proj_out(0));
1065 }
1066 }
1068 return length;
1069 }
1071 //=============================================================================
1072 uint LockNode::size_of() const { return sizeof(*this); }
1074 // Redundant lock elimination
1075 //
1076 // There are various patterns of locking where we release and
1077 // immediately reacquire a lock in a piece of code where no operations
1078 // occur in between that would be observable. In those cases we can
1079 // skip releasing and reacquiring the lock without violating any
1080 // fairness requirements. Doing this around a loop could cause a lock
1081 // to be held for a very long time so we concentrate on non-looping
1082 // control flow. We also require that the operations are fully
1083 // redundant meaning that we don't introduce new lock operations on
1084 // some paths so to be able to eliminate it on others ala PRE. This
1085 // would probably require some more extensive graph manipulation to
1086 // guarantee that the memory edges were all handled correctly.
1087 //
1088 // Assuming p is a simple predicate which can't trap in any way and s
1089 // is a synchronized method consider this code:
1090 //
1091 // s();
1092 // if (p)
1093 // s();
1094 // else
1095 // s();
1096 // s();
1097 //
1098 // 1. The unlocks of the first call to s can be eliminated if the
1099 // locks inside the then and else branches are eliminated.
1100 //
1101 // 2. The unlocks of the then and else branches can be eliminated if
1102 // the lock of the final call to s is eliminated.
1103 //
1104 // Either of these cases subsumes the simple case of sequential control flow
1105 //
1106 // Addtionally we can eliminate versions without the else case:
1107 //
1108 // s();
1109 // if (p)
1110 // s();
1111 // s();
1112 //
1113 // 3. In this case we eliminate the unlock of the first s, the lock
1114 // and unlock in the then case and the lock in the final s.
1115 //
1116 // Note also that in all these cases the then/else pieces don't have
1117 // to be trivial as long as they begin and end with synchronization
1118 // operations.
1119 //
1120 // s();
1121 // if (p)
1122 // s();
1123 // f();
1124 // s();
1125 // s();
1126 //
1127 // The code will work properly for this case, leaving in the unlock
1128 // before the call to f and the relock after it.
1129 //
1130 // A potentially interesting case which isn't handled here is when the
1131 // locking is partially redundant.
1132 //
1133 // s();
1134 // if (p)
1135 // s();
1136 //
1137 // This could be eliminated putting unlocking on the else case and
1138 // eliminating the first unlock and the lock in the then side.
1139 // Alternatively the unlock could be moved out of the then side so it
1140 // was after the merge and the first unlock and second lock
1141 // eliminated. This might require less manipulation of the memory
1142 // state to get correct.
1143 //
1144 // Additionally we might allow work between a unlock and lock before
1145 // giving up eliminating the locks. The current code disallows any
1146 // conditional control flow between these operations. A formulation
1147 // similar to partial redundancy elimination computing the
1148 // availability of unlocking and the anticipatability of locking at a
1149 // program point would allow detection of fully redundant locking with
1150 // some amount of work in between. I'm not sure how often I really
1151 // think that would occur though. Most of the cases I've seen
1152 // indicate it's likely non-trivial work would occur in between.
1153 // There may be other more complicated constructs where we could
1154 // eliminate locking but I haven't seen any others appear as hot or
1155 // interesting.
1156 //
1157 // Locking and unlocking have a canonical form in ideal that looks
1158 // roughly like this:
1159 //
1160 // <obj>
1161 // | \\------+
1162 // | \ \
1163 // | BoxLock \
1164 // | | | \
1165 // | | \ \
1166 // | | FastLock
1167 // | | /
1168 // | | /
1169 // | | |
1170 //
1171 // Lock
1172 // |
1173 // Proj #0
1174 // |
1175 // MembarAcquire
1176 // |
1177 // Proj #0
1178 //
1179 // MembarRelease
1180 // |
1181 // Proj #0
1182 // |
1183 // Unlock
1184 // |
1185 // Proj #0
1186 //
1187 //
1188 // This code proceeds by processing Lock nodes during PhaseIterGVN
1189 // and searching back through its control for the proper code
1190 // patterns. Once it finds a set of lock and unlock operations to
1191 // eliminate they are marked as eliminatable which causes the
1192 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
1193 //
1194 //=============================================================================
1196 //
1197 // Utility function to skip over uninteresting control nodes. Nodes skipped are:
1198 // - copy regions. (These may not have been optimized away yet.)
1199 // - eliminated locking nodes
1200 //
1201 static Node *next_control(Node *ctrl) {
1202 if (ctrl == NULL)
1203 return NULL;
1204 while (1) {
1205 if (ctrl->is_Region()) {
1206 RegionNode *r = ctrl->as_Region();
1207 Node *n = r->is_copy();
1208 if (n == NULL)
1209 break; // hit a region, return it
1210 else
1211 ctrl = n;
1212 } else if (ctrl->is_Proj()) {
1213 Node *in0 = ctrl->in(0);
1214 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
1215 ctrl = in0->in(0);
1216 } else {
1217 break;
1218 }
1219 } else {
1220 break; // found an interesting control
1221 }
1222 }
1223 return ctrl;
1224 }
1225 //
1226 // Given a control, see if it's the control projection of an Unlock which
1227 // operating on the same object as lock.
1228 //
1229 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1230 GrowableArray<AbstractLockNode*> &lock_ops) {
1231 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1232 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1233 Node *n = ctrl_proj->in(0);
1234 if (n != NULL && n->is_Unlock()) {
1235 UnlockNode *unlock = n->as_Unlock();
1236 if ((lock->obj_node() == unlock->obj_node()) &&
1237 (lock->box_node() == unlock->box_node()) && !unlock->is_eliminated()) {
1238 lock_ops.append(unlock);
1239 return true;
1240 }
1241 }
1242 }
1243 return false;
1244 }
1246 //
1247 // Find the lock matching an unlock. Returns null if a safepoint
1248 // or complicated control is encountered first.
1249 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1250 LockNode *lock_result = NULL;
1251 // find the matching lock, or an intervening safepoint
1252 Node *ctrl = next_control(unlock->in(0));
1253 while (1) {
1254 assert(ctrl != NULL, "invalid control graph");
1255 assert(!ctrl->is_Start(), "missing lock for unlock");
1256 if (ctrl->is_top()) break; // dead control path
1257 if (ctrl->is_Proj()) ctrl = ctrl->in(0);
1258 if (ctrl->is_SafePoint()) {
1259 break; // found a safepoint (may be the lock we are searching for)
1260 } else if (ctrl->is_Region()) {
1261 // Check for a simple diamond pattern. Punt on anything more complicated
1262 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1263 Node *in1 = next_control(ctrl->in(1));
1264 Node *in2 = next_control(ctrl->in(2));
1265 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1266 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1267 ctrl = next_control(in1->in(0)->in(0));
1268 } else {
1269 break;
1270 }
1271 } else {
1272 break;
1273 }
1274 } else {
1275 ctrl = next_control(ctrl->in(0)); // keep searching
1276 }
1277 }
1278 if (ctrl->is_Lock()) {
1279 LockNode *lock = ctrl->as_Lock();
1280 if ((lock->obj_node() == unlock->obj_node()) &&
1281 (lock->box_node() == unlock->box_node())) {
1282 lock_result = lock;
1283 }
1284 }
1285 return lock_result;
1286 }
1288 // This code corresponds to case 3 above.
1290 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1291 GrowableArray<AbstractLockNode*> &lock_ops) {
1292 Node* if_node = node->in(0);
1293 bool if_true = node->is_IfTrue();
1295 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1296 Node *lock_ctrl = next_control(if_node->in(0));
1297 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1298 Node* lock1_node = NULL;
1299 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1300 if (if_true) {
1301 if (proj->is_IfFalse() && proj->outcnt() == 1) {
1302 lock1_node = proj->unique_out();
1303 }
1304 } else {
1305 if (proj->is_IfTrue() && proj->outcnt() == 1) {
1306 lock1_node = proj->unique_out();
1307 }
1308 }
1309 if (lock1_node != NULL && lock1_node->is_Lock()) {
1310 LockNode *lock1 = lock1_node->as_Lock();
1311 if ((lock->obj_node() == lock1->obj_node()) &&
1312 (lock->box_node() == lock1->box_node()) && !lock1->is_eliminated()) {
1313 lock_ops.append(lock1);
1314 return true;
1315 }
1316 }
1317 }
1318 }
1320 lock_ops.trunc_to(0);
1321 return false;
1322 }
1324 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1325 GrowableArray<AbstractLockNode*> &lock_ops) {
1326 // check each control merging at this point for a matching unlock.
1327 // in(0) should be self edge so skip it.
1328 for (int i = 1; i < (int)region->req(); i++) {
1329 Node *in_node = next_control(region->in(i));
1330 if (in_node != NULL) {
1331 if (find_matching_unlock(in_node, lock, lock_ops)) {
1332 // found a match so keep on checking.
1333 continue;
1334 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
1335 continue;
1336 }
1338 // If we fall through to here then it was some kind of node we
1339 // don't understand or there wasn't a matching unlock, so give
1340 // up trying to merge locks.
1341 lock_ops.trunc_to(0);
1342 return false;
1343 }
1344 }
1345 return true;
1347 }
1349 #ifndef PRODUCT
1350 //
1351 // Create a counter which counts the number of times this lock is acquired
1352 //
1353 void AbstractLockNode::create_lock_counter(JVMState* state) {
1354 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
1355 }
1356 #endif
1358 void AbstractLockNode::set_eliminated() {
1359 _eliminate = true;
1360 #ifndef PRODUCT
1361 if (_counter) {
1362 // Update the counter to indicate that this lock was eliminated.
1363 // The counter update code will stay around even though the
1364 // optimizer will eliminate the lock operation itself.
1365 _counter->set_tag(NamedCounter::EliminatedLockCounter);
1366 }
1367 #endif
1368 }
1370 //=============================================================================
1371 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1373 // perform any generic optimizations first (returns 'this' or NULL)
1374 Node *result = SafePointNode::Ideal(phase, can_reshape);
1376 // Now see if we can optimize away this lock. We don't actually
1377 // remove the locking here, we simply set the _eliminate flag which
1378 // prevents macro expansion from expanding the lock. Since we don't
1379 // modify the graph, the value returned from this function is the
1380 // one computed above.
1381 if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) {
1382 //
1383 // If we are locking an unescaped object, the lock/unlock is unnecessary
1384 //
1385 ConnectionGraph *cgr = Compile::current()->congraph();
1386 PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
1387 if (cgr != NULL)
1388 es = cgr->escape_state(obj_node(), phase);
1389 if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
1390 // Mark it eliminated to update any counters
1391 this->set_eliminated();
1392 return result;
1393 }
1395 //
1396 // Try lock coarsening
1397 //
1398 PhaseIterGVN* iter = phase->is_IterGVN();
1399 if (iter != NULL) {
1401 GrowableArray<AbstractLockNode*> lock_ops;
1403 Node *ctrl = next_control(in(0));
1405 // now search back for a matching Unlock
1406 if (find_matching_unlock(ctrl, this, lock_ops)) {
1407 // found an unlock directly preceding this lock. This is the
1408 // case of single unlock directly control dependent on a
1409 // single lock which is the trivial version of case 1 or 2.
1410 } else if (ctrl->is_Region() ) {
1411 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
1412 // found lock preceded by multiple unlocks along all paths
1413 // joining at this point which is case 3 in description above.
1414 }
1415 } else {
1416 // see if this lock comes from either half of an if and the
1417 // predecessors merges unlocks and the other half of the if
1418 // performs a lock.
1419 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
1420 // found unlock splitting to an if with locks on both branches.
1421 }
1422 }
1424 if (lock_ops.length() > 0) {
1425 // add ourselves to the list of locks to be eliminated.
1426 lock_ops.append(this);
1428 #ifndef PRODUCT
1429 if (PrintEliminateLocks) {
1430 int locks = 0;
1431 int unlocks = 0;
1432 for (int i = 0; i < lock_ops.length(); i++) {
1433 AbstractLockNode* lock = lock_ops.at(i);
1434 if (lock->Opcode() == Op_Lock)
1435 locks++;
1436 else
1437 unlocks++;
1438 if (Verbose) {
1439 lock->dump(1);
1440 }
1441 }
1442 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1443 }
1444 #endif
1446 // for each of the identified locks, mark them
1447 // as eliminatable
1448 for (int i = 0; i < lock_ops.length(); i++) {
1449 AbstractLockNode* lock = lock_ops.at(i);
1451 // Mark it eliminated to update any counters
1452 lock->set_eliminated();
1453 }
1454 } else if (result != NULL && ctrl->is_Region() &&
1455 iter->_worklist.member(ctrl)) {
1456 // We weren't able to find any opportunities but the region this
1457 // lock is control dependent on hasn't been processed yet so put
1458 // this lock back on the worklist so we can check again once any
1459 // region simplification has occurred.
1460 iter->_worklist.push(this);
1461 }
1462 }
1463 }
1465 return result;
1466 }
1468 //=============================================================================
1469 uint UnlockNode::size_of() const { return sizeof(*this); }
1471 //=============================================================================
1472 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1474 // perform any generic optimizations first (returns 'this' or NULL)
1475 Node * result = SafePointNode::Ideal(phase, can_reshape);
1477 // Now see if we can optimize away this unlock. We don't actually
1478 // remove the unlocking here, we simply set the _eliminate flag which
1479 // prevents macro expansion from expanding the unlock. Since we don't
1480 // modify the graph, the value returned from this function is the
1481 // one computed above.
1482 // Escape state is defined after Parse phase.
1483 if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) {
1484 //
1485 // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
1486 //
1487 ConnectionGraph *cgr = Compile::current()->congraph();
1488 PointsToNode::EscapeState es = PointsToNode::GlobalEscape;
1489 if (cgr != NULL)
1490 es = cgr->escape_state(obj_node(), phase);
1491 if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) {
1492 // Mark it eliminated to update any counters
1493 this->set_eliminated();
1494 }
1495 }
1496 return result;
1497 }