Tue, 11 Sep 2012 16:20:57 +0200
7195816: NPG: Crash in c1_ValueType - ShouldNotReachHere
Summary: C1 needs knowledge of T_METADATA at the LIR level.
Reviewed-by: kvn, coleenp
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "ci/bcEscapeAnalyzer.hpp"
27 #include "compiler/oopMap.hpp"
28 #include "opto/callnode.hpp"
29 #include "opto/escape.hpp"
30 #include "opto/locknode.hpp"
31 #include "opto/machnode.hpp"
32 #include "opto/matcher.hpp"
33 #include "opto/parse.hpp"
34 #include "opto/regalloc.hpp"
35 #include "opto/regmask.hpp"
36 #include "opto/rootnode.hpp"
37 #include "opto/runtime.hpp"
39 // Portions of code courtesy of Clifford Click
41 // Optimization - Graph Style
43 //=============================================================================
44 uint StartNode::size_of() const { return sizeof(*this); }
45 uint StartNode::cmp( const Node &n ) const
46 { return _domain == ((StartNode&)n)._domain; }
47 const Type *StartNode::bottom_type() const { return _domain; }
48 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
49 #ifndef PRODUCT
50 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
51 #endif
53 //------------------------------Ideal------------------------------------------
54 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
55 return remove_dead_region(phase, can_reshape) ? this : NULL;
56 }
58 //------------------------------calling_convention-----------------------------
59 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
60 Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
61 }
63 //------------------------------Registers--------------------------------------
64 const RegMask &StartNode::in_RegMask(uint) const {
65 return RegMask::Empty;
66 }
68 //------------------------------match------------------------------------------
69 // Construct projections for incoming parameters, and their RegMask info
70 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
71 switch (proj->_con) {
72 case TypeFunc::Control:
73 case TypeFunc::I_O:
74 case TypeFunc::Memory:
75 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
76 case TypeFunc::FramePtr:
77 return new (match->C, 1) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
78 case TypeFunc::ReturnAdr:
79 return new (match->C, 1) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
80 case TypeFunc::Parms:
81 default: {
82 uint parm_num = proj->_con - TypeFunc::Parms;
83 const Type *t = _domain->field_at(proj->_con);
84 if (t->base() == Type::Half) // 2nd half of Longs and Doubles
85 return new (match->C, 1) ConNode(Type::TOP);
86 uint ideal_reg = t->ideal_reg();
87 RegMask &rm = match->_calling_convention_mask[parm_num];
88 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
89 }
90 }
91 return NULL;
92 }
94 //------------------------------StartOSRNode----------------------------------
95 // The method start node for an on stack replacement adapter
97 //------------------------------osr_domain-----------------------------
98 const TypeTuple *StartOSRNode::osr_domain() {
99 const Type **fields = TypeTuple::fields(2);
100 fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer
102 return TypeTuple::make(TypeFunc::Parms+1, fields);
103 }
105 //=============================================================================
106 const char * const ParmNode::names[TypeFunc::Parms+1] = {
107 "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
108 };
110 #ifndef PRODUCT
111 void ParmNode::dump_spec(outputStream *st) const {
112 if( _con < TypeFunc::Parms ) {
113 st->print(names[_con]);
114 } else {
115 st->print("Parm%d: ",_con-TypeFunc::Parms);
116 // Verbose and WizardMode dump bottom_type for all nodes
117 if( !Verbose && !WizardMode ) bottom_type()->dump_on(st);
118 }
119 }
120 #endif
122 uint ParmNode::ideal_reg() const {
123 switch( _con ) {
124 case TypeFunc::Control : // fall through
125 case TypeFunc::I_O : // fall through
126 case TypeFunc::Memory : return 0;
127 case TypeFunc::FramePtr : // fall through
128 case TypeFunc::ReturnAdr: return Op_RegP;
129 default : assert( _con > TypeFunc::Parms, "" );
130 // fall through
131 case TypeFunc::Parms : {
132 // Type of argument being passed
133 const Type *t = in(0)->as_Start()->_domain->field_at(_con);
134 return t->ideal_reg();
135 }
136 }
137 ShouldNotReachHere();
138 return 0;
139 }
141 //=============================================================================
142 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
143 init_req(TypeFunc::Control,cntrl);
144 init_req(TypeFunc::I_O,i_o);
145 init_req(TypeFunc::Memory,memory);
146 init_req(TypeFunc::FramePtr,frameptr);
147 init_req(TypeFunc::ReturnAdr,retadr);
148 }
150 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
151 return remove_dead_region(phase, can_reshape) ? this : NULL;
152 }
154 const Type *ReturnNode::Value( PhaseTransform *phase ) const {
155 return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
156 ? Type::TOP
157 : Type::BOTTOM;
158 }
160 // Do we Match on this edge index or not? No edges on return nodes
161 uint ReturnNode::match_edge(uint idx) const {
162 return 0;
163 }
166 #ifndef PRODUCT
167 void ReturnNode::dump_req() const {
168 // Dump the required inputs, enclosed in '(' and ')'
169 uint i; // Exit value of loop
170 for( i=0; i<req(); i++ ) { // For all required inputs
171 if( i == TypeFunc::Parms ) tty->print("returns");
172 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
173 else tty->print("_ ");
174 }
175 }
176 #endif
178 //=============================================================================
179 RethrowNode::RethrowNode(
180 Node* cntrl,
181 Node* i_o,
182 Node* memory,
183 Node* frameptr,
184 Node* ret_adr,
185 Node* exception
186 ) : Node(TypeFunc::Parms + 1) {
187 init_req(TypeFunc::Control , cntrl );
188 init_req(TypeFunc::I_O , i_o );
189 init_req(TypeFunc::Memory , memory );
190 init_req(TypeFunc::FramePtr , frameptr );
191 init_req(TypeFunc::ReturnAdr, ret_adr);
192 init_req(TypeFunc::Parms , exception);
193 }
195 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
196 return remove_dead_region(phase, can_reshape) ? this : NULL;
197 }
199 const Type *RethrowNode::Value( PhaseTransform *phase ) const {
200 return (phase->type(in(TypeFunc::Control)) == Type::TOP)
201 ? Type::TOP
202 : Type::BOTTOM;
203 }
205 uint RethrowNode::match_edge(uint idx) const {
206 return 0;
207 }
209 #ifndef PRODUCT
210 void RethrowNode::dump_req() const {
211 // Dump the required inputs, enclosed in '(' and ')'
212 uint i; // Exit value of loop
213 for( i=0; i<req(); i++ ) { // For all required inputs
214 if( i == TypeFunc::Parms ) tty->print("exception");
215 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
216 else tty->print("_ ");
217 }
218 }
219 #endif
221 //=============================================================================
222 // Do we Match on this edge index or not? Match only target address & method
223 uint TailCallNode::match_edge(uint idx) const {
224 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
225 }
227 //=============================================================================
228 // Do we Match on this edge index or not? Match only target address & oop
229 uint TailJumpNode::match_edge(uint idx) const {
230 return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1;
231 }
233 //=============================================================================
234 JVMState::JVMState(ciMethod* method, JVMState* caller) :
235 _method(method) {
236 assert(method != NULL, "must be valid call site");
237 _reexecute = Reexecute_Undefined;
238 debug_only(_bci = -99); // random garbage value
239 debug_only(_map = (SafePointNode*)-1);
240 _caller = caller;
241 _depth = 1 + (caller == NULL ? 0 : caller->depth());
242 _locoff = TypeFunc::Parms;
243 _stkoff = _locoff + _method->max_locals();
244 _monoff = _stkoff + _method->max_stack();
245 _scloff = _monoff;
246 _endoff = _monoff;
247 _sp = 0;
248 }
249 JVMState::JVMState(int stack_size) :
250 _method(NULL) {
251 _bci = InvocationEntryBci;
252 _reexecute = Reexecute_Undefined;
253 debug_only(_map = (SafePointNode*)-1);
254 _caller = NULL;
255 _depth = 1;
256 _locoff = TypeFunc::Parms;
257 _stkoff = _locoff;
258 _monoff = _stkoff + stack_size;
259 _scloff = _monoff;
260 _endoff = _monoff;
261 _sp = 0;
262 }
264 //--------------------------------of_depth-------------------------------------
265 JVMState* JVMState::of_depth(int d) const {
266 const JVMState* jvmp = this;
267 assert(0 < d && (uint)d <= depth(), "oob");
268 for (int skip = depth() - d; skip > 0; skip--) {
269 jvmp = jvmp->caller();
270 }
271 assert(jvmp->depth() == (uint)d, "found the right one");
272 return (JVMState*)jvmp;
273 }
275 //-----------------------------same_calls_as-----------------------------------
276 bool JVMState::same_calls_as(const JVMState* that) const {
277 if (this == that) return true;
278 if (this->depth() != that->depth()) return false;
279 const JVMState* p = this;
280 const JVMState* q = that;
281 for (;;) {
282 if (p->_method != q->_method) return false;
283 if (p->_method == NULL) return true; // bci is irrelevant
284 if (p->_bci != q->_bci) return false;
285 if (p->_reexecute != q->_reexecute) return false;
286 p = p->caller();
287 q = q->caller();
288 if (p == q) return true;
289 assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
290 }
291 }
293 //------------------------------debug_start------------------------------------
294 uint JVMState::debug_start() const {
295 debug_only(JVMState* jvmroot = of_depth(1));
296 assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
297 return of_depth(1)->locoff();
298 }
300 //-------------------------------debug_end-------------------------------------
301 uint JVMState::debug_end() const {
302 debug_only(JVMState* jvmroot = of_depth(1));
303 assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
304 return endoff();
305 }
307 //------------------------------debug_depth------------------------------------
308 uint JVMState::debug_depth() const {
309 uint total = 0;
310 for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
311 total += jvmp->debug_size();
312 }
313 return total;
314 }
316 #ifndef PRODUCT
318 //------------------------------format_helper----------------------------------
319 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
320 // any defined value or not. If it does, print out the register or constant.
321 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
322 if (n == NULL) { st->print(" NULL"); return; }
323 if (n->is_SafePointScalarObject()) {
324 // Scalar replacement.
325 SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
326 scobjs->append_if_missing(spobj);
327 int sco_n = scobjs->find(spobj);
328 assert(sco_n >= 0, "");
329 st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
330 return;
331 }
332 if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
333 char buf[50];
334 regalloc->dump_register(n,buf);
335 st->print(" %s%d]=%s",msg,i,buf);
336 } else { // No register, but might be constant
337 const Type *t = n->bottom_type();
338 switch (t->base()) {
339 case Type::Int:
340 st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
341 break;
342 case Type::AnyPtr:
343 assert( t == TypePtr::NULL_PTR, "" );
344 st->print(" %s%d]=#NULL",msg,i);
345 break;
346 case Type::AryPtr:
347 case Type::InstPtr:
348 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop());
349 break;
350 case Type::KlassPtr:
351 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_klassptr()->klass());
352 break;
353 case Type::MetadataPtr:
354 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_metadataptr()->metadata());
355 break;
356 case Type::NarrowOop:
357 st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_oopptr()->const_oop());
358 break;
359 case Type::RawPtr:
360 st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr());
361 break;
362 case Type::DoubleCon:
363 st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
364 break;
365 case Type::FloatCon:
366 st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
367 break;
368 case Type::Long:
369 st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con());
370 break;
371 case Type::Half:
372 case Type::Top:
373 st->print(" %s%d]=_",msg,i);
374 break;
375 default: ShouldNotReachHere();
376 }
377 }
378 }
380 //------------------------------format-----------------------------------------
381 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
382 st->print(" #");
383 if( _method ) {
384 _method->print_short_name(st);
385 st->print(" @ bci:%d ",_bci);
386 } else {
387 st->print_cr(" runtime stub ");
388 return;
389 }
390 if (n->is_MachSafePoint()) {
391 GrowableArray<SafePointScalarObjectNode*> scobjs;
392 MachSafePointNode *mcall = n->as_MachSafePoint();
393 uint i;
394 // Print locals
395 for( i = 0; i < (uint)loc_size(); i++ )
396 format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs );
397 // Print stack
398 for (i = 0; i < (uint)stk_size(); i++) {
399 if ((uint)(_stkoff + i) >= mcall->len())
400 st->print(" oob ");
401 else
402 format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs );
403 }
404 for (i = 0; (int)i < nof_monitors(); i++) {
405 Node *box = mcall->monitor_box(this, i);
406 Node *obj = mcall->monitor_obj(this, i);
407 if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) {
408 box = BoxLockNode::box_node(box);
409 format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs );
410 } else {
411 OptoReg::Name box_reg = BoxLockNode::reg(box);
412 st->print(" MON-BOX%d=%s+%d",
413 i,
414 OptoReg::regname(OptoReg::c_frame_pointer),
415 regalloc->reg2offset(box_reg));
416 }
417 const char* obj_msg = "MON-OBJ[";
418 if (EliminateLocks) {
419 if (BoxLockNode::box_node(box)->is_eliminated())
420 obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
421 }
422 format_helper( regalloc, st, obj, obj_msg, i, &scobjs );
423 }
425 for (i = 0; i < (uint)scobjs.length(); i++) {
426 // Scalar replaced objects.
427 st->print_cr("");
428 st->print(" # ScObj" INT32_FORMAT " ", i);
429 SafePointScalarObjectNode* spobj = scobjs.at(i);
430 ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass();
431 assert(cik->is_instance_klass() ||
432 cik->is_array_klass(), "Not supported allocation.");
433 ciInstanceKlass *iklass = NULL;
434 if (cik->is_instance_klass()) {
435 cik->print_name_on(st);
436 iklass = cik->as_instance_klass();
437 } else if (cik->is_type_array_klass()) {
438 cik->as_array_klass()->base_element_type()->print_name_on(st);
439 st->print("[%d]", spobj->n_fields());
440 } else if (cik->is_obj_array_klass()) {
441 ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
442 if (cie->is_instance_klass()) {
443 cie->print_name_on(st);
444 } else if (cie->is_type_array_klass()) {
445 cie->as_array_klass()->base_element_type()->print_name_on(st);
446 } else {
447 ShouldNotReachHere();
448 }
449 st->print("[%d]", spobj->n_fields());
450 int ndim = cik->as_array_klass()->dimension() - 1;
451 while (ndim-- > 0) {
452 st->print("[]");
453 }
454 }
455 st->print("={");
456 uint nf = spobj->n_fields();
457 if (nf > 0) {
458 uint first_ind = spobj->first_index();
459 Node* fld_node = mcall->in(first_ind);
460 ciField* cifield;
461 if (iklass != NULL) {
462 st->print(" [");
463 cifield = iklass->nonstatic_field_at(0);
464 cifield->print_name_on(st);
465 format_helper( regalloc, st, fld_node, ":", 0, &scobjs );
466 } else {
467 format_helper( regalloc, st, fld_node, "[", 0, &scobjs );
468 }
469 for (uint j = 1; j < nf; j++) {
470 fld_node = mcall->in(first_ind+j);
471 if (iklass != NULL) {
472 st->print(", [");
473 cifield = iklass->nonstatic_field_at(j);
474 cifield->print_name_on(st);
475 format_helper( regalloc, st, fld_node, ":", j, &scobjs );
476 } else {
477 format_helper( regalloc, st, fld_node, ", [", j, &scobjs );
478 }
479 }
480 }
481 st->print(" }");
482 }
483 }
484 st->print_cr("");
485 if (caller() != NULL) caller()->format(regalloc, n, st);
486 }
489 void JVMState::dump_spec(outputStream *st) const {
490 if (_method != NULL) {
491 bool printed = false;
492 if (!Verbose) {
493 // The JVMS dumps make really, really long lines.
494 // Take out the most boring parts, which are the package prefixes.
495 char buf[500];
496 stringStream namest(buf, sizeof(buf));
497 _method->print_short_name(&namest);
498 if (namest.count() < sizeof(buf)) {
499 const char* name = namest.base();
500 if (name[0] == ' ') ++name;
501 const char* endcn = strchr(name, ':'); // end of class name
502 if (endcn == NULL) endcn = strchr(name, '(');
503 if (endcn == NULL) endcn = name + strlen(name);
504 while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
505 --endcn;
506 st->print(" %s", endcn);
507 printed = true;
508 }
509 }
510 if (!printed)
511 _method->print_short_name(st);
512 st->print(" @ bci:%d",_bci);
513 if(_reexecute == Reexecute_True)
514 st->print(" reexecute");
515 } else {
516 st->print(" runtime stub");
517 }
518 if (caller() != NULL) caller()->dump_spec(st);
519 }
522 void JVMState::dump_on(outputStream* st) const {
523 if (_map && !((uintptr_t)_map & 1)) {
524 if (_map->len() > _map->req()) { // _map->has_exceptions()
525 Node* ex = _map->in(_map->req()); // _map->next_exception()
526 // skip the first one; it's already being printed
527 while (ex != NULL && ex->len() > ex->req()) {
528 ex = ex->in(ex->req()); // ex->next_exception()
529 ex->dump(1);
530 }
531 }
532 _map->dump(2);
533 }
534 st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
535 depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
536 if (_method == NULL) {
537 st->print_cr("(none)");
538 } else {
539 _method->print_name(st);
540 st->cr();
541 if (bci() >= 0 && bci() < _method->code_size()) {
542 st->print(" bc: ");
543 _method->print_codes_on(bci(), bci()+1, st);
544 }
545 }
546 if (caller() != NULL) {
547 caller()->dump_on(st);
548 }
549 }
551 // Extra way to dump a jvms from the debugger,
552 // to avoid a bug with C++ member function calls.
553 void dump_jvms(JVMState* jvms) {
554 jvms->dump();
555 }
556 #endif
558 //--------------------------clone_shallow--------------------------------------
559 JVMState* JVMState::clone_shallow(Compile* C) const {
560 JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
561 n->set_bci(_bci);
562 n->_reexecute = _reexecute;
563 n->set_locoff(_locoff);
564 n->set_stkoff(_stkoff);
565 n->set_monoff(_monoff);
566 n->set_scloff(_scloff);
567 n->set_endoff(_endoff);
568 n->set_sp(_sp);
569 n->set_map(_map);
570 return n;
571 }
573 //---------------------------clone_deep----------------------------------------
574 JVMState* JVMState::clone_deep(Compile* C) const {
575 JVMState* n = clone_shallow(C);
576 for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
577 p->_caller = p->_caller->clone_shallow(C);
578 }
579 assert(n->depth() == depth(), "sanity");
580 assert(n->debug_depth() == debug_depth(), "sanity");
581 return n;
582 }
584 //=============================================================================
585 uint CallNode::cmp( const Node &n ) const
586 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
587 #ifndef PRODUCT
588 void CallNode::dump_req() const {
589 // Dump the required inputs, enclosed in '(' and ')'
590 uint i; // Exit value of loop
591 for( i=0; i<req(); i++ ) { // For all required inputs
592 if( i == TypeFunc::Parms ) tty->print("(");
593 if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
594 else tty->print("_ ");
595 }
596 tty->print(")");
597 }
599 void CallNode::dump_spec(outputStream *st) const {
600 st->print(" ");
601 tf()->dump_on(st);
602 if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt);
603 if (jvms() != NULL) jvms()->dump_spec(st);
604 }
605 #endif
607 const Type *CallNode::bottom_type() const { return tf()->range(); }
608 const Type *CallNode::Value(PhaseTransform *phase) const {
609 if (phase->type(in(0)) == Type::TOP) return Type::TOP;
610 return tf()->range();
611 }
613 //------------------------------calling_convention-----------------------------
614 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
615 // Use the standard compiler calling convention
616 Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
617 }
620 //------------------------------match------------------------------------------
621 // Construct projections for control, I/O, memory-fields, ..., and
622 // return result(s) along with their RegMask info
623 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
624 switch (proj->_con) {
625 case TypeFunc::Control:
626 case TypeFunc::I_O:
627 case TypeFunc::Memory:
628 return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
630 case TypeFunc::Parms+1: // For LONG & DOUBLE returns
631 assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
632 // 2nd half of doubles and longs
633 return new (match->C, 1) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
635 case TypeFunc::Parms: { // Normal returns
636 uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
637 OptoRegPair regs = is_CallRuntime()
638 ? match->c_return_value(ideal_reg,true) // Calls into C runtime
639 : match-> return_value(ideal_reg,true); // Calls into compiled Java code
640 RegMask rm = RegMask(regs.first());
641 if( OptoReg::is_valid(regs.second()) )
642 rm.Insert( regs.second() );
643 return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg);
644 }
646 case TypeFunc::ReturnAdr:
647 case TypeFunc::FramePtr:
648 default:
649 ShouldNotReachHere();
650 }
651 return NULL;
652 }
654 // Do we Match on this edge index or not? Match no edges
655 uint CallNode::match_edge(uint idx) const {
656 return 0;
657 }
659 //
660 // Determine whether the call could modify the field of the specified
661 // instance at the specified offset.
662 //
663 bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) {
664 const TypeOopPtr *adrInst_t = addr_t->isa_oopptr();
666 // If not an OopPtr or not an instance type, assume the worst.
667 // Note: currently this method is called only for instance types.
668 if (adrInst_t == NULL || !adrInst_t->is_known_instance()) {
669 return true;
670 }
671 // The instance_id is set only for scalar-replaceable allocations which
672 // are not passed as arguments according to Escape Analysis.
673 return false;
674 }
676 // Does this call have a direct reference to n other than debug information?
677 bool CallNode::has_non_debug_use(Node *n) {
678 const TypeTuple * d = tf()->domain();
679 for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
680 Node *arg = in(i);
681 if (arg == n) {
682 return true;
683 }
684 }
685 return false;
686 }
688 // Returns the unique CheckCastPP of a call
689 // or 'this' if there are several CheckCastPP
690 // or returns NULL if there is no one.
691 Node *CallNode::result_cast() {
692 Node *cast = NULL;
694 Node *p = proj_out(TypeFunc::Parms);
695 if (p == NULL)
696 return NULL;
698 for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
699 Node *use = p->fast_out(i);
700 if (use->is_CheckCastPP()) {
701 if (cast != NULL) {
702 return this; // more than 1 CheckCastPP
703 }
704 cast = use;
705 }
706 }
707 return cast;
708 }
711 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) {
712 projs->fallthrough_proj = NULL;
713 projs->fallthrough_catchproj = NULL;
714 projs->fallthrough_ioproj = NULL;
715 projs->catchall_ioproj = NULL;
716 projs->catchall_catchproj = NULL;
717 projs->fallthrough_memproj = NULL;
718 projs->catchall_memproj = NULL;
719 projs->resproj = NULL;
720 projs->exobj = NULL;
722 for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
723 ProjNode *pn = fast_out(i)->as_Proj();
724 if (pn->outcnt() == 0) continue;
725 switch (pn->_con) {
726 case TypeFunc::Control:
727 {
728 // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
729 projs->fallthrough_proj = pn;
730 DUIterator_Fast jmax, j = pn->fast_outs(jmax);
731 const Node *cn = pn->fast_out(j);
732 if (cn->is_Catch()) {
733 ProjNode *cpn = NULL;
734 for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
735 cpn = cn->fast_out(k)->as_Proj();
736 assert(cpn->is_CatchProj(), "must be a CatchProjNode");
737 if (cpn->_con == CatchProjNode::fall_through_index)
738 projs->fallthrough_catchproj = cpn;
739 else {
740 assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
741 projs->catchall_catchproj = cpn;
742 }
743 }
744 }
745 break;
746 }
747 case TypeFunc::I_O:
748 if (pn->_is_io_use)
749 projs->catchall_ioproj = pn;
750 else
751 projs->fallthrough_ioproj = pn;
752 for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
753 Node* e = pn->out(j);
754 if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj()) {
755 assert(projs->exobj == NULL, "only one");
756 projs->exobj = e;
757 }
758 }
759 break;
760 case TypeFunc::Memory:
761 if (pn->_is_io_use)
762 projs->catchall_memproj = pn;
763 else
764 projs->fallthrough_memproj = pn;
765 break;
766 case TypeFunc::Parms:
767 projs->resproj = pn;
768 break;
769 default:
770 assert(false, "unexpected projection from allocation node.");
771 }
772 }
774 // The resproj may not exist because the result couuld be ignored
775 // and the exception object may not exist if an exception handler
776 // swallows the exception but all the other must exist and be found.
777 assert(projs->fallthrough_proj != NULL, "must be found");
778 assert(projs->fallthrough_catchproj != NULL, "must be found");
779 assert(projs->fallthrough_memproj != NULL, "must be found");
780 assert(projs->fallthrough_ioproj != NULL, "must be found");
781 assert(projs->catchall_catchproj != NULL, "must be found");
782 if (separate_io_proj) {
783 assert(projs->catchall_memproj != NULL, "must be found");
784 assert(projs->catchall_ioproj != NULL, "must be found");
785 }
786 }
789 //=============================================================================
790 uint CallJavaNode::size_of() const { return sizeof(*this); }
791 uint CallJavaNode::cmp( const Node &n ) const {
792 CallJavaNode &call = (CallJavaNode&)n;
793 return CallNode::cmp(call) && _method == call._method;
794 }
795 #ifndef PRODUCT
796 void CallJavaNode::dump_spec(outputStream *st) const {
797 if( _method ) _method->print_short_name(st);
798 CallNode::dump_spec(st);
799 }
800 #endif
802 //=============================================================================
803 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
804 uint CallStaticJavaNode::cmp( const Node &n ) const {
805 CallStaticJavaNode &call = (CallStaticJavaNode&)n;
806 return CallJavaNode::cmp(call);
807 }
809 //----------------------------uncommon_trap_request----------------------------
810 // If this is an uncommon trap, return the request code, else zero.
811 int CallStaticJavaNode::uncommon_trap_request() const {
812 if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
813 return extract_uncommon_trap_request(this);
814 }
815 return 0;
816 }
817 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
818 #ifndef PRODUCT
819 if (!(call->req() > TypeFunc::Parms &&
820 call->in(TypeFunc::Parms) != NULL &&
821 call->in(TypeFunc::Parms)->is_Con())) {
822 assert(_in_dump_cnt != 0, "OK if dumping");
823 tty->print("[bad uncommon trap]");
824 return 0;
825 }
826 #endif
827 return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
828 }
830 #ifndef PRODUCT
831 void CallStaticJavaNode::dump_spec(outputStream *st) const {
832 st->print("# Static ");
833 if (_name != NULL) {
834 st->print("%s", _name);
835 int trap_req = uncommon_trap_request();
836 if (trap_req != 0) {
837 char buf[100];
838 st->print("(%s)",
839 Deoptimization::format_trap_request(buf, sizeof(buf),
840 trap_req));
841 }
842 st->print(" ");
843 }
844 CallJavaNode::dump_spec(st);
845 }
846 #endif
848 //=============================================================================
849 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
850 uint CallDynamicJavaNode::cmp( const Node &n ) const {
851 CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
852 return CallJavaNode::cmp(call);
853 }
854 #ifndef PRODUCT
855 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
856 st->print("# Dynamic ");
857 CallJavaNode::dump_spec(st);
858 }
859 #endif
861 //=============================================================================
862 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
863 uint CallRuntimeNode::cmp( const Node &n ) const {
864 CallRuntimeNode &call = (CallRuntimeNode&)n;
865 return CallNode::cmp(call) && !strcmp(_name,call._name);
866 }
867 #ifndef PRODUCT
868 void CallRuntimeNode::dump_spec(outputStream *st) const {
869 st->print("# ");
870 st->print(_name);
871 CallNode::dump_spec(st);
872 }
873 #endif
875 //------------------------------calling_convention-----------------------------
876 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
877 Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
878 }
880 //=============================================================================
881 //------------------------------calling_convention-----------------------------
884 //=============================================================================
885 #ifndef PRODUCT
886 void CallLeafNode::dump_spec(outputStream *st) const {
887 st->print("# ");
888 st->print(_name);
889 CallNode::dump_spec(st);
890 }
891 #endif
893 //=============================================================================
895 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
896 assert(verify_jvms(jvms), "jvms must match");
897 int loc = jvms->locoff() + idx;
898 if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
899 // If current local idx is top then local idx - 1 could
900 // be a long/double that needs to be killed since top could
901 // represent the 2nd half ofthe long/double.
902 uint ideal = in(loc -1)->ideal_reg();
903 if (ideal == Op_RegD || ideal == Op_RegL) {
904 // set other (low index) half to top
905 set_req(loc - 1, in(loc));
906 }
907 }
908 set_req(loc, c);
909 }
911 uint SafePointNode::size_of() const { return sizeof(*this); }
912 uint SafePointNode::cmp( const Node &n ) const {
913 return (&n == this); // Always fail except on self
914 }
916 //-------------------------set_next_exception----------------------------------
917 void SafePointNode::set_next_exception(SafePointNode* n) {
918 assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
919 if (len() == req()) {
920 if (n != NULL) add_prec(n);
921 } else {
922 set_prec(req(), n);
923 }
924 }
927 //----------------------------next_exception-----------------------------------
928 SafePointNode* SafePointNode::next_exception() const {
929 if (len() == req()) {
930 return NULL;
931 } else {
932 Node* n = in(req());
933 assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
934 return (SafePointNode*) n;
935 }
936 }
939 //------------------------------Ideal------------------------------------------
940 // Skip over any collapsed Regions
941 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
942 return remove_dead_region(phase, can_reshape) ? this : NULL;
943 }
945 //------------------------------Identity---------------------------------------
946 // Remove obviously duplicate safepoints
947 Node *SafePointNode::Identity( PhaseTransform *phase ) {
949 // If you have back to back safepoints, remove one
950 if( in(TypeFunc::Control)->is_SafePoint() )
951 return in(TypeFunc::Control);
953 if( in(0)->is_Proj() ) {
954 Node *n0 = in(0)->in(0);
955 // Check if he is a call projection (except Leaf Call)
956 if( n0->is_Catch() ) {
957 n0 = n0->in(0)->in(0);
958 assert( n0->is_Call(), "expect a call here" );
959 }
960 if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
961 // Useless Safepoint, so remove it
962 return in(TypeFunc::Control);
963 }
964 }
966 return this;
967 }
969 //------------------------------Value------------------------------------------
970 const Type *SafePointNode::Value( PhaseTransform *phase ) const {
971 if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
972 if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop
973 return Type::CONTROL;
974 }
976 #ifndef PRODUCT
977 void SafePointNode::dump_spec(outputStream *st) const {
978 st->print(" SafePoint ");
979 }
980 #endif
982 const RegMask &SafePointNode::in_RegMask(uint idx) const {
983 if( idx < TypeFunc::Parms ) return RegMask::Empty;
984 // Values outside the domain represent debug info
985 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
986 }
987 const RegMask &SafePointNode::out_RegMask() const {
988 return RegMask::Empty;
989 }
992 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
993 assert((int)grow_by > 0, "sanity");
994 int monoff = jvms->monoff();
995 int scloff = jvms->scloff();
996 int endoff = jvms->endoff();
997 assert(endoff == (int)req(), "no other states or debug info after me");
998 Node* top = Compile::current()->top();
999 for (uint i = 0; i < grow_by; i++) {
1000 ins_req(monoff, top);
1001 }
1002 jvms->set_monoff(monoff + grow_by);
1003 jvms->set_scloff(scloff + grow_by);
1004 jvms->set_endoff(endoff + grow_by);
1005 }
1007 void SafePointNode::push_monitor(const FastLockNode *lock) {
1008 // Add a LockNode, which points to both the original BoxLockNode (the
1009 // stack space for the monitor) and the Object being locked.
1010 const int MonitorEdges = 2;
1011 assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
1012 assert(req() == jvms()->endoff(), "correct sizing");
1013 int nextmon = jvms()->scloff();
1014 if (GenerateSynchronizationCode) {
1015 add_req(lock->box_node());
1016 add_req(lock->obj_node());
1017 } else {
1018 Node* top = Compile::current()->top();
1019 add_req(top);
1020 add_req(top);
1021 }
1022 jvms()->set_scloff(nextmon+MonitorEdges);
1023 jvms()->set_endoff(req());
1024 }
1026 void SafePointNode::pop_monitor() {
1027 // Delete last monitor from debug info
1028 debug_only(int num_before_pop = jvms()->nof_monitors());
1029 const int MonitorEdges = (1<<JVMState::logMonitorEdges);
1030 int scloff = jvms()->scloff();
1031 int endoff = jvms()->endoff();
1032 int new_scloff = scloff - MonitorEdges;
1033 int new_endoff = endoff - MonitorEdges;
1034 jvms()->set_scloff(new_scloff);
1035 jvms()->set_endoff(new_endoff);
1036 while (scloff > new_scloff) del_req(--scloff);
1037 assert(jvms()->nof_monitors() == num_before_pop-1, "");
1038 }
1040 Node *SafePointNode::peek_monitor_box() const {
1041 int mon = jvms()->nof_monitors() - 1;
1042 assert(mon >= 0, "most have a monitor");
1043 return monitor_box(jvms(), mon);
1044 }
1046 Node *SafePointNode::peek_monitor_obj() const {
1047 int mon = jvms()->nof_monitors() - 1;
1048 assert(mon >= 0, "most have a monitor");
1049 return monitor_obj(jvms(), mon);
1050 }
1052 // Do we Match on this edge index or not? Match no edges
1053 uint SafePointNode::match_edge(uint idx) const {
1054 if( !needs_polling_address_input() )
1055 return 0;
1057 return (TypeFunc::Parms == idx);
1058 }
1060 //============== SafePointScalarObjectNode ==============
1062 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
1063 #ifdef ASSERT
1064 AllocateNode* alloc,
1065 #endif
1066 uint first_index,
1067 uint n_fields) :
1068 TypeNode(tp, 1), // 1 control input -- seems required. Get from root.
1069 #ifdef ASSERT
1070 _alloc(alloc),
1071 #endif
1072 _first_index(first_index),
1073 _n_fields(n_fields)
1074 {
1075 init_class_id(Class_SafePointScalarObject);
1076 }
1078 // Do not allow value-numbering for SafePointScalarObject node.
1079 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
1080 uint SafePointScalarObjectNode::cmp( const Node &n ) const {
1081 return (&n == this); // Always fail except on self
1082 }
1084 uint SafePointScalarObjectNode::ideal_reg() const {
1085 return 0; // No matching to machine instruction
1086 }
1088 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
1089 return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
1090 }
1092 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
1093 return RegMask::Empty;
1094 }
1096 uint SafePointScalarObjectNode::match_edge(uint idx) const {
1097 return 0;
1098 }
1100 SafePointScalarObjectNode*
1101 SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const {
1102 void* cached = (*sosn_map)[(void*)this];
1103 if (cached != NULL) {
1104 return (SafePointScalarObjectNode*)cached;
1105 }
1106 SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
1107 res->_first_index += jvms_adj;
1108 sosn_map->Insert((void*)this, (void*)res);
1109 return res;
1110 }
1113 #ifndef PRODUCT
1114 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
1115 st->print(" # fields@[%d..%d]", first_index(),
1116 first_index() + n_fields() - 1);
1117 }
1119 #endif
1121 //=============================================================================
1122 uint AllocateNode::size_of() const { return sizeof(*this); }
1124 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
1125 Node *ctrl, Node *mem, Node *abio,
1126 Node *size, Node *klass_node, Node *initial_test)
1127 : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
1128 {
1129 init_class_id(Class_Allocate);
1130 init_flags(Flag_is_macro);
1131 _is_scalar_replaceable = false;
1132 Node *topnode = C->top();
1134 init_req( TypeFunc::Control , ctrl );
1135 init_req( TypeFunc::I_O , abio );
1136 init_req( TypeFunc::Memory , mem );
1137 init_req( TypeFunc::ReturnAdr, topnode );
1138 init_req( TypeFunc::FramePtr , topnode );
1139 init_req( AllocSize , size);
1140 init_req( KlassNode , klass_node);
1141 init_req( InitialTest , initial_test);
1142 init_req( ALength , topnode);
1143 C->add_macro_node(this);
1144 }
1146 //=============================================================================
1147 uint AllocateArrayNode::size_of() const { return sizeof(*this); }
1149 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1150 if (remove_dead_region(phase, can_reshape)) return this;
1151 // Don't bother trying to transform a dead node
1152 if (in(0) && in(0)->is_top()) return NULL;
1154 const Type* type = phase->type(Ideal_length());
1155 if (type->isa_int() && type->is_int()->_hi < 0) {
1156 if (can_reshape) {
1157 PhaseIterGVN *igvn = phase->is_IterGVN();
1158 // Unreachable fall through path (negative array length),
1159 // the allocation can only throw so disconnect it.
1160 Node* proj = proj_out(TypeFunc::Control);
1161 Node* catchproj = NULL;
1162 if (proj != NULL) {
1163 for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
1164 Node *cn = proj->fast_out(i);
1165 if (cn->is_Catch()) {
1166 catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
1167 break;
1168 }
1169 }
1170 }
1171 if (catchproj != NULL && catchproj->outcnt() > 0 &&
1172 (catchproj->outcnt() > 1 ||
1173 catchproj->unique_out()->Opcode() != Op_Halt)) {
1174 assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
1175 Node* nproj = catchproj->clone();
1176 igvn->register_new_node_with_optimizer(nproj);
1178 Node *frame = new (phase->C, 1) ParmNode( phase->C->start(), TypeFunc::FramePtr );
1179 frame = phase->transform(frame);
1180 // Halt & Catch Fire
1181 Node *halt = new (phase->C, TypeFunc::Parms) HaltNode( nproj, frame );
1182 phase->C->root()->add_req(halt);
1183 phase->transform(halt);
1185 igvn->replace_node(catchproj, phase->C->top());
1186 return this;
1187 }
1188 } else {
1189 // Can't correct it during regular GVN so register for IGVN
1190 phase->C->record_for_igvn(this);
1191 }
1192 }
1193 return NULL;
1194 }
1196 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
1197 // CastII, if appropriate. If we are not allowed to create new nodes, and
1198 // a CastII is appropriate, return NULL.
1199 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
1200 Node *length = in(AllocateNode::ALength);
1201 assert(length != NULL, "length is not null");
1203 const TypeInt* length_type = phase->find_int_type(length);
1204 const TypeAryPtr* ary_type = oop_type->isa_aryptr();
1206 if (ary_type != NULL && length_type != NULL) {
1207 const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
1208 if (narrow_length_type != length_type) {
1209 // Assert one of:
1210 // - the narrow_length is 0
1211 // - the narrow_length is not wider than length
1212 assert(narrow_length_type == TypeInt::ZERO ||
1213 (narrow_length_type->_hi <= length_type->_hi &&
1214 narrow_length_type->_lo >= length_type->_lo),
1215 "narrow type must be narrower than length type");
1217 // Return NULL if new nodes are not allowed
1218 if (!allow_new_nodes) return NULL;
1219 // Create a cast which is control dependent on the initialization to
1220 // propagate the fact that the array length must be positive.
1221 length = new (phase->C, 2) CastIINode(length, narrow_length_type);
1222 length->set_req(0, initialization()->proj_out(0));
1223 }
1224 }
1226 return length;
1227 }
1229 //=============================================================================
1230 uint LockNode::size_of() const { return sizeof(*this); }
1232 // Redundant lock elimination
1233 //
1234 // There are various patterns of locking where we release and
1235 // immediately reacquire a lock in a piece of code where no operations
1236 // occur in between that would be observable. In those cases we can
1237 // skip releasing and reacquiring the lock without violating any
1238 // fairness requirements. Doing this around a loop could cause a lock
1239 // to be held for a very long time so we concentrate on non-looping
1240 // control flow. We also require that the operations are fully
1241 // redundant meaning that we don't introduce new lock operations on
1242 // some paths so to be able to eliminate it on others ala PRE. This
1243 // would probably require some more extensive graph manipulation to
1244 // guarantee that the memory edges were all handled correctly.
1245 //
1246 // Assuming p is a simple predicate which can't trap in any way and s
1247 // is a synchronized method consider this code:
1248 //
1249 // s();
1250 // if (p)
1251 // s();
1252 // else
1253 // s();
1254 // s();
1255 //
1256 // 1. The unlocks of the first call to s can be eliminated if the
1257 // locks inside the then and else branches are eliminated.
1258 //
1259 // 2. The unlocks of the then and else branches can be eliminated if
1260 // the lock of the final call to s is eliminated.
1261 //
1262 // Either of these cases subsumes the simple case of sequential control flow
1263 //
1264 // Addtionally we can eliminate versions without the else case:
1265 //
1266 // s();
1267 // if (p)
1268 // s();
1269 // s();
1270 //
1271 // 3. In this case we eliminate the unlock of the first s, the lock
1272 // and unlock in the then case and the lock in the final s.
1273 //
1274 // Note also that in all these cases the then/else pieces don't have
1275 // to be trivial as long as they begin and end with synchronization
1276 // operations.
1277 //
1278 // s();
1279 // if (p)
1280 // s();
1281 // f();
1282 // s();
1283 // s();
1284 //
1285 // The code will work properly for this case, leaving in the unlock
1286 // before the call to f and the relock after it.
1287 //
1288 // A potentially interesting case which isn't handled here is when the
1289 // locking is partially redundant.
1290 //
1291 // s();
1292 // if (p)
1293 // s();
1294 //
1295 // This could be eliminated putting unlocking on the else case and
1296 // eliminating the first unlock and the lock in the then side.
1297 // Alternatively the unlock could be moved out of the then side so it
1298 // was after the merge and the first unlock and second lock
1299 // eliminated. This might require less manipulation of the memory
1300 // state to get correct.
1301 //
1302 // Additionally we might allow work between a unlock and lock before
1303 // giving up eliminating the locks. The current code disallows any
1304 // conditional control flow between these operations. A formulation
1305 // similar to partial redundancy elimination computing the
1306 // availability of unlocking and the anticipatability of locking at a
1307 // program point would allow detection of fully redundant locking with
1308 // some amount of work in between. I'm not sure how often I really
1309 // think that would occur though. Most of the cases I've seen
1310 // indicate it's likely non-trivial work would occur in between.
1311 // There may be other more complicated constructs where we could
1312 // eliminate locking but I haven't seen any others appear as hot or
1313 // interesting.
1314 //
1315 // Locking and unlocking have a canonical form in ideal that looks
1316 // roughly like this:
1317 //
1318 // <obj>
1319 // | \\------+
1320 // | \ \
1321 // | BoxLock \
1322 // | | | \
1323 // | | \ \
1324 // | | FastLock
1325 // | | /
1326 // | | /
1327 // | | |
1328 //
1329 // Lock
1330 // |
1331 // Proj #0
1332 // |
1333 // MembarAcquire
1334 // |
1335 // Proj #0
1336 //
1337 // MembarRelease
1338 // |
1339 // Proj #0
1340 // |
1341 // Unlock
1342 // |
1343 // Proj #0
1344 //
1345 //
1346 // This code proceeds by processing Lock nodes during PhaseIterGVN
1347 // and searching back through its control for the proper code
1348 // patterns. Once it finds a set of lock and unlock operations to
1349 // eliminate they are marked as eliminatable which causes the
1350 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
1351 //
1352 //=============================================================================
1354 //
1355 // Utility function to skip over uninteresting control nodes. Nodes skipped are:
1356 // - copy regions. (These may not have been optimized away yet.)
1357 // - eliminated locking nodes
1358 //
1359 static Node *next_control(Node *ctrl) {
1360 if (ctrl == NULL)
1361 return NULL;
1362 while (1) {
1363 if (ctrl->is_Region()) {
1364 RegionNode *r = ctrl->as_Region();
1365 Node *n = r->is_copy();
1366 if (n == NULL)
1367 break; // hit a region, return it
1368 else
1369 ctrl = n;
1370 } else if (ctrl->is_Proj()) {
1371 Node *in0 = ctrl->in(0);
1372 if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
1373 ctrl = in0->in(0);
1374 } else {
1375 break;
1376 }
1377 } else {
1378 break; // found an interesting control
1379 }
1380 }
1381 return ctrl;
1382 }
1383 //
1384 // Given a control, see if it's the control projection of an Unlock which
1385 // operating on the same object as lock.
1386 //
1387 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
1388 GrowableArray<AbstractLockNode*> &lock_ops) {
1389 ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
1390 if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
1391 Node *n = ctrl_proj->in(0);
1392 if (n != NULL && n->is_Unlock()) {
1393 UnlockNode *unlock = n->as_Unlock();
1394 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
1395 BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
1396 !unlock->is_eliminated()) {
1397 lock_ops.append(unlock);
1398 return true;
1399 }
1400 }
1401 }
1402 return false;
1403 }
1405 //
1406 // Find the lock matching an unlock. Returns null if a safepoint
1407 // or complicated control is encountered first.
1408 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
1409 LockNode *lock_result = NULL;
1410 // find the matching lock, or an intervening safepoint
1411 Node *ctrl = next_control(unlock->in(0));
1412 while (1) {
1413 assert(ctrl != NULL, "invalid control graph");
1414 assert(!ctrl->is_Start(), "missing lock for unlock");
1415 if (ctrl->is_top()) break; // dead control path
1416 if (ctrl->is_Proj()) ctrl = ctrl->in(0);
1417 if (ctrl->is_SafePoint()) {
1418 break; // found a safepoint (may be the lock we are searching for)
1419 } else if (ctrl->is_Region()) {
1420 // Check for a simple diamond pattern. Punt on anything more complicated
1421 if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
1422 Node *in1 = next_control(ctrl->in(1));
1423 Node *in2 = next_control(ctrl->in(2));
1424 if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
1425 (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
1426 ctrl = next_control(in1->in(0)->in(0));
1427 } else {
1428 break;
1429 }
1430 } else {
1431 break;
1432 }
1433 } else {
1434 ctrl = next_control(ctrl->in(0)); // keep searching
1435 }
1436 }
1437 if (ctrl->is_Lock()) {
1438 LockNode *lock = ctrl->as_Lock();
1439 if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
1440 BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
1441 lock_result = lock;
1442 }
1443 }
1444 return lock_result;
1445 }
1447 // This code corresponds to case 3 above.
1449 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
1450 GrowableArray<AbstractLockNode*> &lock_ops) {
1451 Node* if_node = node->in(0);
1452 bool if_true = node->is_IfTrue();
1454 if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
1455 Node *lock_ctrl = next_control(if_node->in(0));
1456 if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
1457 Node* lock1_node = NULL;
1458 ProjNode* proj = if_node->as_If()->proj_out(!if_true);
1459 if (if_true) {
1460 if (proj->is_IfFalse() && proj->outcnt() == 1) {
1461 lock1_node = proj->unique_out();
1462 }
1463 } else {
1464 if (proj->is_IfTrue() && proj->outcnt() == 1) {
1465 lock1_node = proj->unique_out();
1466 }
1467 }
1468 if (lock1_node != NULL && lock1_node->is_Lock()) {
1469 LockNode *lock1 = lock1_node->as_Lock();
1470 if (lock->obj_node()->eqv_uncast(lock1->obj_node()) &&
1471 BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
1472 !lock1->is_eliminated()) {
1473 lock_ops.append(lock1);
1474 return true;
1475 }
1476 }
1477 }
1478 }
1480 lock_ops.trunc_to(0);
1481 return false;
1482 }
1484 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
1485 GrowableArray<AbstractLockNode*> &lock_ops) {
1486 // check each control merging at this point for a matching unlock.
1487 // in(0) should be self edge so skip it.
1488 for (int i = 1; i < (int)region->req(); i++) {
1489 Node *in_node = next_control(region->in(i));
1490 if (in_node != NULL) {
1491 if (find_matching_unlock(in_node, lock, lock_ops)) {
1492 // found a match so keep on checking.
1493 continue;
1494 } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
1495 continue;
1496 }
1498 // If we fall through to here then it was some kind of node we
1499 // don't understand or there wasn't a matching unlock, so give
1500 // up trying to merge locks.
1501 lock_ops.trunc_to(0);
1502 return false;
1503 }
1504 }
1505 return true;
1507 }
1509 #ifndef PRODUCT
1510 //
1511 // Create a counter which counts the number of times this lock is acquired
1512 //
1513 void AbstractLockNode::create_lock_counter(JVMState* state) {
1514 _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
1515 }
1517 void AbstractLockNode::set_eliminated_lock_counter() {
1518 if (_counter) {
1519 // Update the counter to indicate that this lock was eliminated.
1520 // The counter update code will stay around even though the
1521 // optimizer will eliminate the lock operation itself.
1522 _counter->set_tag(NamedCounter::EliminatedLockCounter);
1523 }
1524 }
1525 #endif
1527 //=============================================================================
1528 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1530 // perform any generic optimizations first (returns 'this' or NULL)
1531 Node *result = SafePointNode::Ideal(phase, can_reshape);
1532 if (result != NULL) return result;
1533 // Don't bother trying to transform a dead node
1534 if (in(0) && in(0)->is_top()) return NULL;
1536 // Now see if we can optimize away this lock. We don't actually
1537 // remove the locking here, we simply set the _eliminate flag which
1538 // prevents macro expansion from expanding the lock. Since we don't
1539 // modify the graph, the value returned from this function is the
1540 // one computed above.
1541 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1542 //
1543 // If we are locking an unescaped object, the lock/unlock is unnecessary
1544 //
1545 ConnectionGraph *cgr = phase->C->congraph();
1546 if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1547 assert(!is_eliminated() || is_coarsened(), "sanity");
1548 // The lock could be marked eliminated by lock coarsening
1549 // code during first IGVN before EA. Replace coarsened flag
1550 // to eliminate all associated locks/unlocks.
1551 this->set_non_esc_obj();
1552 return result;
1553 }
1555 //
1556 // Try lock coarsening
1557 //
1558 PhaseIterGVN* iter = phase->is_IterGVN();
1559 if (iter != NULL && !is_eliminated()) {
1561 GrowableArray<AbstractLockNode*> lock_ops;
1563 Node *ctrl = next_control(in(0));
1565 // now search back for a matching Unlock
1566 if (find_matching_unlock(ctrl, this, lock_ops)) {
1567 // found an unlock directly preceding this lock. This is the
1568 // case of single unlock directly control dependent on a
1569 // single lock which is the trivial version of case 1 or 2.
1570 } else if (ctrl->is_Region() ) {
1571 if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
1572 // found lock preceded by multiple unlocks along all paths
1573 // joining at this point which is case 3 in description above.
1574 }
1575 } else {
1576 // see if this lock comes from either half of an if and the
1577 // predecessors merges unlocks and the other half of the if
1578 // performs a lock.
1579 if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
1580 // found unlock splitting to an if with locks on both branches.
1581 }
1582 }
1584 if (lock_ops.length() > 0) {
1585 // add ourselves to the list of locks to be eliminated.
1586 lock_ops.append(this);
1588 #ifndef PRODUCT
1589 if (PrintEliminateLocks) {
1590 int locks = 0;
1591 int unlocks = 0;
1592 for (int i = 0; i < lock_ops.length(); i++) {
1593 AbstractLockNode* lock = lock_ops.at(i);
1594 if (lock->Opcode() == Op_Lock)
1595 locks++;
1596 else
1597 unlocks++;
1598 if (Verbose) {
1599 lock->dump(1);
1600 }
1601 }
1602 tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
1603 }
1604 #endif
1606 // for each of the identified locks, mark them
1607 // as eliminatable
1608 for (int i = 0; i < lock_ops.length(); i++) {
1609 AbstractLockNode* lock = lock_ops.at(i);
1611 // Mark it eliminated by coarsening and update any counters
1612 lock->set_coarsened();
1613 }
1614 } else if (ctrl->is_Region() &&
1615 iter->_worklist.member(ctrl)) {
1616 // We weren't able to find any opportunities but the region this
1617 // lock is control dependent on hasn't been processed yet so put
1618 // this lock back on the worklist so we can check again once any
1619 // region simplification has occurred.
1620 iter->_worklist.push(this);
1621 }
1622 }
1623 }
1625 return result;
1626 }
1628 //=============================================================================
1629 bool LockNode::is_nested_lock_region() {
1630 BoxLockNode* box = box_node()->as_BoxLock();
1631 int stk_slot = box->stack_slot();
1632 if (stk_slot <= 0)
1633 return false; // External lock or it is not Box (Phi node).
1635 // Ignore complex cases: merged locks or multiple locks.
1636 Node* obj = obj_node();
1637 LockNode* unique_lock = NULL;
1638 if (!box->is_simple_lock_region(&unique_lock, obj) ||
1639 (unique_lock != this)) {
1640 return false;
1641 }
1643 // Look for external lock for the same object.
1644 SafePointNode* sfn = this->as_SafePoint();
1645 JVMState* youngest_jvms = sfn->jvms();
1646 int max_depth = youngest_jvms->depth();
1647 for (int depth = 1; depth <= max_depth; depth++) {
1648 JVMState* jvms = youngest_jvms->of_depth(depth);
1649 int num_mon = jvms->nof_monitors();
1650 // Loop over monitors
1651 for (int idx = 0; idx < num_mon; idx++) {
1652 Node* obj_node = sfn->monitor_obj(jvms, idx);
1653 BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
1654 if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
1655 return true;
1656 }
1657 }
1658 }
1659 return false;
1660 }
1662 //=============================================================================
1663 uint UnlockNode::size_of() const { return sizeof(*this); }
1665 //=============================================================================
1666 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
1668 // perform any generic optimizations first (returns 'this' or NULL)
1669 Node *result = SafePointNode::Ideal(phase, can_reshape);
1670 if (result != NULL) return result;
1671 // Don't bother trying to transform a dead node
1672 if (in(0) && in(0)->is_top()) return NULL;
1674 // Now see if we can optimize away this unlock. We don't actually
1675 // remove the unlocking here, we simply set the _eliminate flag which
1676 // prevents macro expansion from expanding the unlock. Since we don't
1677 // modify the graph, the value returned from this function is the
1678 // one computed above.
1679 // Escape state is defined after Parse phase.
1680 if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
1681 //
1682 // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
1683 //
1684 ConnectionGraph *cgr = phase->C->congraph();
1685 if (cgr != NULL && cgr->not_global_escape(obj_node())) {
1686 assert(!is_eliminated() || is_coarsened(), "sanity");
1687 // The lock could be marked eliminated by lock coarsening
1688 // code during first IGVN before EA. Replace coarsened flag
1689 // to eliminate all associated locks/unlocks.
1690 this->set_non_esc_obj();
1691 }
1692 }
1693 return result;
1694 }