src/share/vm/opto/callnode.cpp

Mon, 06 Jan 2014 11:02:21 +0100

author
goetz
date
Mon, 06 Jan 2014 11:02:21 +0100
changeset 6500
4345c6a92f35
parent 6499
ad3b94907eed
child 6503
a9becfeecd1b
permissions
-rw-r--r--

8031188: Fix for 8029015: PPC64 (part 216): opto: trap based null and range checks
Summary: Swap the Projs in the block list so that the new block is added behind the proper node.
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "ci/bcEscapeAnalyzer.hpp"
    27 #include "compiler/oopMap.hpp"
    28 #include "opto/callGenerator.hpp"
    29 #include "opto/callnode.hpp"
    30 #include "opto/escape.hpp"
    31 #include "opto/locknode.hpp"
    32 #include "opto/machnode.hpp"
    33 #include "opto/matcher.hpp"
    34 #include "opto/parse.hpp"
    35 #include "opto/regalloc.hpp"
    36 #include "opto/regmask.hpp"
    37 #include "opto/rootnode.hpp"
    38 #include "opto/runtime.hpp"
    40 // Portions of code courtesy of Clifford Click
    42 // Optimization - Graph Style
    44 //=============================================================================
    45 uint StartNode::size_of() const { return sizeof(*this); }
    46 uint StartNode::cmp( const Node &n ) const
    47 { return _domain == ((StartNode&)n)._domain; }
    48 const Type *StartNode::bottom_type() const { return _domain; }
    49 const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; }
    50 #ifndef PRODUCT
    51 void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);}
    52 #endif
    54 //------------------------------Ideal------------------------------------------
    55 Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){
    56   return remove_dead_region(phase, can_reshape) ? this : NULL;
    57 }
    59 //------------------------------calling_convention-----------------------------
    60 void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
    61   Matcher::calling_convention( sig_bt, parm_regs, argcnt, false );
    62 }
    64 //------------------------------Registers--------------------------------------
    65 const RegMask &StartNode::in_RegMask(uint) const {
    66   return RegMask::Empty;
    67 }
    69 //------------------------------match------------------------------------------
    70 // Construct projections for incoming parameters, and their RegMask info
    71 Node *StartNode::match( const ProjNode *proj, const Matcher *match ) {
    72   switch (proj->_con) {
    73   case TypeFunc::Control:
    74   case TypeFunc::I_O:
    75   case TypeFunc::Memory:
    76     return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
    77   case TypeFunc::FramePtr:
    78     return new (match->C) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP);
    79   case TypeFunc::ReturnAdr:
    80     return new (match->C) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP);
    81   case TypeFunc::Parms:
    82   default: {
    83       uint parm_num = proj->_con - TypeFunc::Parms;
    84       const Type *t = _domain->field_at(proj->_con);
    85       if (t->base() == Type::Half)  // 2nd half of Longs and Doubles
    86         return new (match->C) ConNode(Type::TOP);
    87       uint ideal_reg = t->ideal_reg();
    88       RegMask &rm = match->_calling_convention_mask[parm_num];
    89       return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg);
    90     }
    91   }
    92   return NULL;
    93 }
    95 //------------------------------StartOSRNode----------------------------------
    96 // The method start node for an on stack replacement adapter
    98 //------------------------------osr_domain-----------------------------
    99 const TypeTuple *StartOSRNode::osr_domain() {
   100   const Type **fields = TypeTuple::fields(2);
   101   fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM;  // address of osr buffer
   103   return TypeTuple::make(TypeFunc::Parms+1, fields);
   104 }
   106 //=============================================================================
   107 const char * const ParmNode::names[TypeFunc::Parms+1] = {
   108   "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms"
   109 };
   111 #ifndef PRODUCT
   112 void ParmNode::dump_spec(outputStream *st) const {
   113   if( _con < TypeFunc::Parms ) {
   114     st->print(names[_con]);
   115   } else {
   116     st->print("Parm%d: ",_con-TypeFunc::Parms);
   117     // Verbose and WizardMode dump bottom_type for all nodes
   118     if( !Verbose && !WizardMode )   bottom_type()->dump_on(st);
   119   }
   120 }
   121 #endif
   123 uint ParmNode::ideal_reg() const {
   124   switch( _con ) {
   125   case TypeFunc::Control  : // fall through
   126   case TypeFunc::I_O      : // fall through
   127   case TypeFunc::Memory   : return 0;
   128   case TypeFunc::FramePtr : // fall through
   129   case TypeFunc::ReturnAdr: return Op_RegP;
   130   default                 : assert( _con > TypeFunc::Parms, "" );
   131     // fall through
   132   case TypeFunc::Parms    : {
   133     // Type of argument being passed
   134     const Type *t = in(0)->as_Start()->_domain->field_at(_con);
   135     return t->ideal_reg();
   136   }
   137   }
   138   ShouldNotReachHere();
   139   return 0;
   140 }
   142 //=============================================================================
   143 ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) {
   144   init_req(TypeFunc::Control,cntrl);
   145   init_req(TypeFunc::I_O,i_o);
   146   init_req(TypeFunc::Memory,memory);
   147   init_req(TypeFunc::FramePtr,frameptr);
   148   init_req(TypeFunc::ReturnAdr,retadr);
   149 }
   151 Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){
   152   return remove_dead_region(phase, can_reshape) ? this : NULL;
   153 }
   155 const Type *ReturnNode::Value( PhaseTransform *phase ) const {
   156   return ( phase->type(in(TypeFunc::Control)) == Type::TOP)
   157     ? Type::TOP
   158     : Type::BOTTOM;
   159 }
   161 // Do we Match on this edge index or not?  No edges on return nodes
   162 uint ReturnNode::match_edge(uint idx) const {
   163   return 0;
   164 }
   167 #ifndef PRODUCT
   168 void ReturnNode::dump_req(outputStream *st) const {
   169   // Dump the required inputs, enclosed in '(' and ')'
   170   uint i;                       // Exit value of loop
   171   for (i = 0; i < req(); i++) {    // For all required inputs
   172     if (i == TypeFunc::Parms) st->print("returns");
   173     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
   174     else st->print("_ ");
   175   }
   176 }
   177 #endif
   179 //=============================================================================
   180 RethrowNode::RethrowNode(
   181   Node* cntrl,
   182   Node* i_o,
   183   Node* memory,
   184   Node* frameptr,
   185   Node* ret_adr,
   186   Node* exception
   187 ) : Node(TypeFunc::Parms + 1) {
   188   init_req(TypeFunc::Control  , cntrl    );
   189   init_req(TypeFunc::I_O      , i_o      );
   190   init_req(TypeFunc::Memory   , memory   );
   191   init_req(TypeFunc::FramePtr , frameptr );
   192   init_req(TypeFunc::ReturnAdr, ret_adr);
   193   init_req(TypeFunc::Parms    , exception);
   194 }
   196 Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){
   197   return remove_dead_region(phase, can_reshape) ? this : NULL;
   198 }
   200 const Type *RethrowNode::Value( PhaseTransform *phase ) const {
   201   return (phase->type(in(TypeFunc::Control)) == Type::TOP)
   202     ? Type::TOP
   203     : Type::BOTTOM;
   204 }
   206 uint RethrowNode::match_edge(uint idx) const {
   207   return 0;
   208 }
   210 #ifndef PRODUCT
   211 void RethrowNode::dump_req(outputStream *st) const {
   212   // Dump the required inputs, enclosed in '(' and ')'
   213   uint i;                       // Exit value of loop
   214   for (i = 0; i < req(); i++) {    // For all required inputs
   215     if (i == TypeFunc::Parms) st->print("exception");
   216     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
   217     else st->print("_ ");
   218   }
   219 }
   220 #endif
   222 //=============================================================================
   223 // Do we Match on this edge index or not?  Match only target address & method
   224 uint TailCallNode::match_edge(uint idx) const {
   225   return TypeFunc::Parms <= idx  &&  idx <= TypeFunc::Parms+1;
   226 }
   228 //=============================================================================
   229 // Do we Match on this edge index or not?  Match only target address & oop
   230 uint TailJumpNode::match_edge(uint idx) const {
   231   return TypeFunc::Parms <= idx  &&  idx <= TypeFunc::Parms+1;
   232 }
   234 //=============================================================================
   235 JVMState::JVMState(ciMethod* method, JVMState* caller) :
   236   _method(method) {
   237   assert(method != NULL, "must be valid call site");
   238   _reexecute = Reexecute_Undefined;
   239   debug_only(_bci = -99);  // random garbage value
   240   debug_only(_map = (SafePointNode*)-1);
   241   _caller = caller;
   242   _depth  = 1 + (caller == NULL ? 0 : caller->depth());
   243   _locoff = TypeFunc::Parms;
   244   _stkoff = _locoff + _method->max_locals();
   245   _monoff = _stkoff + _method->max_stack();
   246   _scloff = _monoff;
   247   _endoff = _monoff;
   248   _sp = 0;
   249 }
   250 JVMState::JVMState(int stack_size) :
   251   _method(NULL) {
   252   _bci = InvocationEntryBci;
   253   _reexecute = Reexecute_Undefined;
   254   debug_only(_map = (SafePointNode*)-1);
   255   _caller = NULL;
   256   _depth  = 1;
   257   _locoff = TypeFunc::Parms;
   258   _stkoff = _locoff;
   259   _monoff = _stkoff + stack_size;
   260   _scloff = _monoff;
   261   _endoff = _monoff;
   262   _sp = 0;
   263 }
   265 //--------------------------------of_depth-------------------------------------
   266 JVMState* JVMState::of_depth(int d) const {
   267   const JVMState* jvmp = this;
   268   assert(0 < d && (uint)d <= depth(), "oob");
   269   for (int skip = depth() - d; skip > 0; skip--) {
   270     jvmp = jvmp->caller();
   271   }
   272   assert(jvmp->depth() == (uint)d, "found the right one");
   273   return (JVMState*)jvmp;
   274 }
   276 //-----------------------------same_calls_as-----------------------------------
   277 bool JVMState::same_calls_as(const JVMState* that) const {
   278   if (this == that)                    return true;
   279   if (this->depth() != that->depth())  return false;
   280   const JVMState* p = this;
   281   const JVMState* q = that;
   282   for (;;) {
   283     if (p->_method != q->_method)    return false;
   284     if (p->_method == NULL)          return true;   // bci is irrelevant
   285     if (p->_bci    != q->_bci)       return false;
   286     if (p->_reexecute != q->_reexecute)  return false;
   287     p = p->caller();
   288     q = q->caller();
   289     if (p == q)                      return true;
   290     assert(p != NULL && q != NULL, "depth check ensures we don't run off end");
   291   }
   292 }
   294 //------------------------------debug_start------------------------------------
   295 uint JVMState::debug_start()  const {
   296   debug_only(JVMState* jvmroot = of_depth(1));
   297   assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last");
   298   return of_depth(1)->locoff();
   299 }
   301 //-------------------------------debug_end-------------------------------------
   302 uint JVMState::debug_end() const {
   303   debug_only(JVMState* jvmroot = of_depth(1));
   304   assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last");
   305   return endoff();
   306 }
   308 //------------------------------debug_depth------------------------------------
   309 uint JVMState::debug_depth() const {
   310   uint total = 0;
   311   for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) {
   312     total += jvmp->debug_size();
   313   }
   314   return total;
   315 }
   317 #ifndef PRODUCT
   319 //------------------------------format_helper----------------------------------
   320 // Given an allocation (a Chaitin object) and a Node decide if the Node carries
   321 // any defined value or not.  If it does, print out the register or constant.
   322 static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) {
   323   if (n == NULL) { st->print(" NULL"); return; }
   324   if (n->is_SafePointScalarObject()) {
   325     // Scalar replacement.
   326     SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject();
   327     scobjs->append_if_missing(spobj);
   328     int sco_n = scobjs->find(spobj);
   329     assert(sco_n >= 0, "");
   330     st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n);
   331     return;
   332   }
   333   if (regalloc->node_regs_max_index() > 0 &&
   334       OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined
   335     char buf[50];
   336     regalloc->dump_register(n,buf);
   337     st->print(" %s%d]=%s",msg,i,buf);
   338   } else {                      // No register, but might be constant
   339     const Type *t = n->bottom_type();
   340     switch (t->base()) {
   341     case Type::Int:
   342       st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con());
   343       break;
   344     case Type::AnyPtr:
   345       assert( t == TypePtr::NULL_PTR || n->in_dump(), "" );
   346       st->print(" %s%d]=#NULL",msg,i);
   347       break;
   348     case Type::AryPtr:
   349     case Type::InstPtr:
   350       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop());
   351       break;
   352     case Type::KlassPtr:
   353       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_klassptr()->klass());
   354       break;
   355     case Type::MetadataPtr:
   356       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_metadataptr()->metadata());
   357       break;
   358     case Type::NarrowOop:
   359       st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_oopptr()->const_oop());
   360       break;
   361     case Type::RawPtr:
   362       st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr());
   363       break;
   364     case Type::DoubleCon:
   365       st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d);
   366       break;
   367     case Type::FloatCon:
   368       st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f);
   369       break;
   370     case Type::Long:
   371       st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con());
   372       break;
   373     case Type::Half:
   374     case Type::Top:
   375       st->print(" %s%d]=_",msg,i);
   376       break;
   377     default: ShouldNotReachHere();
   378     }
   379   }
   380 }
   382 //------------------------------format-----------------------------------------
   383 void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const {
   384   st->print("        #");
   385   if (_method) {
   386     _method->print_short_name(st);
   387     st->print(" @ bci:%d ",_bci);
   388   } else {
   389     st->print_cr(" runtime stub ");
   390     return;
   391   }
   392   if (n->is_MachSafePoint()) {
   393     GrowableArray<SafePointScalarObjectNode*> scobjs;
   394     MachSafePointNode *mcall = n->as_MachSafePoint();
   395     uint i;
   396     // Print locals
   397     for (i = 0; i < (uint)loc_size(); i++)
   398       format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs);
   399     // Print stack
   400     for (i = 0; i < (uint)stk_size(); i++) {
   401       if ((uint)(_stkoff + i) >= mcall->len())
   402         st->print(" oob ");
   403       else
   404        format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs);
   405     }
   406     for (i = 0; (int)i < nof_monitors(); i++) {
   407       Node *box = mcall->monitor_box(this, i);
   408       Node *obj = mcall->monitor_obj(this, i);
   409       if (regalloc->node_regs_max_index() > 0 &&
   410           OptoReg::is_valid(regalloc->get_reg_first(box))) {
   411         box = BoxLockNode::box_node(box);
   412         format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs);
   413       } else {
   414         OptoReg::Name box_reg = BoxLockNode::reg(box);
   415         st->print(" MON-BOX%d=%s+%d",
   416                    i,
   417                    OptoReg::regname(OptoReg::c_frame_pointer),
   418                    regalloc->reg2offset(box_reg));
   419       }
   420       const char* obj_msg = "MON-OBJ[";
   421       if (EliminateLocks) {
   422         if (BoxLockNode::box_node(box)->is_eliminated())
   423           obj_msg = "MON-OBJ(LOCK ELIMINATED)[";
   424       }
   425       format_helper(regalloc, st, obj, obj_msg, i, &scobjs);
   426     }
   428     for (i = 0; i < (uint)scobjs.length(); i++) {
   429       // Scalar replaced objects.
   430       st->print_cr("");
   431       st->print("        # ScObj" INT32_FORMAT " ", i);
   432       SafePointScalarObjectNode* spobj = scobjs.at(i);
   433       ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass();
   434       assert(cik->is_instance_klass() ||
   435              cik->is_array_klass(), "Not supported allocation.");
   436       ciInstanceKlass *iklass = NULL;
   437       if (cik->is_instance_klass()) {
   438         cik->print_name_on(st);
   439         iklass = cik->as_instance_klass();
   440       } else if (cik->is_type_array_klass()) {
   441         cik->as_array_klass()->base_element_type()->print_name_on(st);
   442         st->print("[%d]", spobj->n_fields());
   443       } else if (cik->is_obj_array_klass()) {
   444         ciKlass* cie = cik->as_obj_array_klass()->base_element_klass();
   445         if (cie->is_instance_klass()) {
   446           cie->print_name_on(st);
   447         } else if (cie->is_type_array_klass()) {
   448           cie->as_array_klass()->base_element_type()->print_name_on(st);
   449         } else {
   450           ShouldNotReachHere();
   451         }
   452         st->print("[%d]", spobj->n_fields());
   453         int ndim = cik->as_array_klass()->dimension() - 1;
   454         while (ndim-- > 0) {
   455           st->print("[]");
   456         }
   457       }
   458       st->print("={");
   459       uint nf = spobj->n_fields();
   460       if (nf > 0) {
   461         uint first_ind = spobj->first_index(mcall->jvms());
   462         Node* fld_node = mcall->in(first_ind);
   463         ciField* cifield;
   464         if (iklass != NULL) {
   465           st->print(" [");
   466           cifield = iklass->nonstatic_field_at(0);
   467           cifield->print_name_on(st);
   468           format_helper(regalloc, st, fld_node, ":", 0, &scobjs);
   469         } else {
   470           format_helper(regalloc, st, fld_node, "[", 0, &scobjs);
   471         }
   472         for (uint j = 1; j < nf; j++) {
   473           fld_node = mcall->in(first_ind+j);
   474           if (iklass != NULL) {
   475             st->print(", [");
   476             cifield = iklass->nonstatic_field_at(j);
   477             cifield->print_name_on(st);
   478             format_helper(regalloc, st, fld_node, ":", j, &scobjs);
   479           } else {
   480             format_helper(regalloc, st, fld_node, ", [", j, &scobjs);
   481           }
   482         }
   483       }
   484       st->print(" }");
   485     }
   486   }
   487   st->print_cr("");
   488   if (caller() != NULL) caller()->format(regalloc, n, st);
   489 }
   492 void JVMState::dump_spec(outputStream *st) const {
   493   if (_method != NULL) {
   494     bool printed = false;
   495     if (!Verbose) {
   496       // The JVMS dumps make really, really long lines.
   497       // Take out the most boring parts, which are the package prefixes.
   498       char buf[500];
   499       stringStream namest(buf, sizeof(buf));
   500       _method->print_short_name(&namest);
   501       if (namest.count() < sizeof(buf)) {
   502         const char* name = namest.base();
   503         if (name[0] == ' ')  ++name;
   504         const char* endcn = strchr(name, ':');  // end of class name
   505         if (endcn == NULL)  endcn = strchr(name, '(');
   506         if (endcn == NULL)  endcn = name + strlen(name);
   507         while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/')
   508           --endcn;
   509         st->print(" %s", endcn);
   510         printed = true;
   511       }
   512     }
   513     if (!printed)
   514       _method->print_short_name(st);
   515     st->print(" @ bci:%d",_bci);
   516     if(_reexecute == Reexecute_True)
   517       st->print(" reexecute");
   518   } else {
   519     st->print(" runtime stub");
   520   }
   521   if (caller() != NULL)  caller()->dump_spec(st);
   522 }
   525 void JVMState::dump_on(outputStream* st) const {
   526   bool print_map = _map && !((uintptr_t)_map & 1) &&
   527                   ((caller() == NULL) || (caller()->map() != _map));
   528   if (print_map) {
   529     if (_map->len() > _map->req()) {  // _map->has_exceptions()
   530       Node* ex = _map->in(_map->req());  // _map->next_exception()
   531       // skip the first one; it's already being printed
   532       while (ex != NULL && ex->len() > ex->req()) {
   533         ex = ex->in(ex->req());  // ex->next_exception()
   534         ex->dump(1);
   535       }
   536     }
   537     _map->dump(Verbose ? 2 : 1);
   538   }
   539   if (caller() != NULL) {
   540     caller()->dump_on(st);
   541   }
   542   st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=",
   543              depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false");
   544   if (_method == NULL) {
   545     st->print_cr("(none)");
   546   } else {
   547     _method->print_name(st);
   548     st->cr();
   549     if (bci() >= 0 && bci() < _method->code_size()) {
   550       st->print("    bc: ");
   551       _method->print_codes_on(bci(), bci()+1, st);
   552     }
   553   }
   554 }
   556 // Extra way to dump a jvms from the debugger,
   557 // to avoid a bug with C++ member function calls.
   558 void dump_jvms(JVMState* jvms) {
   559   jvms->dump();
   560 }
   561 #endif
   563 //--------------------------clone_shallow--------------------------------------
   564 JVMState* JVMState::clone_shallow(Compile* C) const {
   565   JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0);
   566   n->set_bci(_bci);
   567   n->_reexecute = _reexecute;
   568   n->set_locoff(_locoff);
   569   n->set_stkoff(_stkoff);
   570   n->set_monoff(_monoff);
   571   n->set_scloff(_scloff);
   572   n->set_endoff(_endoff);
   573   n->set_sp(_sp);
   574   n->set_map(_map);
   575   return n;
   576 }
   578 //---------------------------clone_deep----------------------------------------
   579 JVMState* JVMState::clone_deep(Compile* C) const {
   580   JVMState* n = clone_shallow(C);
   581   for (JVMState* p = n; p->_caller != NULL; p = p->_caller) {
   582     p->_caller = p->_caller->clone_shallow(C);
   583   }
   584   assert(n->depth() == depth(), "sanity");
   585   assert(n->debug_depth() == debug_depth(), "sanity");
   586   return n;
   587 }
   589 /**
   590  * Reset map for all callers
   591  */
   592 void JVMState::set_map_deep(SafePointNode* map) {
   593   for (JVMState* p = this; p->_caller != NULL; p = p->_caller) {
   594     p->set_map(map);
   595   }
   596 }
   598 // Adapt offsets in in-array after adding or removing an edge.
   599 // Prerequisite is that the JVMState is used by only one node.
   600 void JVMState::adapt_position(int delta) {
   601   for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) {
   602     jvms->set_locoff(jvms->locoff() + delta);
   603     jvms->set_stkoff(jvms->stkoff() + delta);
   604     jvms->set_monoff(jvms->monoff() + delta);
   605     jvms->set_scloff(jvms->scloff() + delta);
   606     jvms->set_endoff(jvms->endoff() + delta);
   607   }
   608 }
   610 //=============================================================================
   611 uint CallNode::cmp( const Node &n ) const
   612 { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; }
   613 #ifndef PRODUCT
   614 void CallNode::dump_req(outputStream *st) const {
   615   // Dump the required inputs, enclosed in '(' and ')'
   616   uint i;                       // Exit value of loop
   617   for (i = 0; i < req(); i++) {    // For all required inputs
   618     if (i == TypeFunc::Parms) st->print("(");
   619     if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx);
   620     else st->print("_ ");
   621   }
   622   st->print(")");
   623 }
   625 void CallNode::dump_spec(outputStream *st) const {
   626   st->print(" ");
   627   tf()->dump_on(st);
   628   if (_cnt != COUNT_UNKNOWN)  st->print(" C=%f",_cnt);
   629   if (jvms() != NULL)  jvms()->dump_spec(st);
   630 }
   631 #endif
   633 const Type *CallNode::bottom_type() const { return tf()->range(); }
   634 const Type *CallNode::Value(PhaseTransform *phase) const {
   635   if (phase->type(in(0)) == Type::TOP)  return Type::TOP;
   636   return tf()->range();
   637 }
   639 //------------------------------calling_convention-----------------------------
   640 void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
   641   // Use the standard compiler calling convention
   642   Matcher::calling_convention( sig_bt, parm_regs, argcnt, true );
   643 }
   646 //------------------------------match------------------------------------------
   647 // Construct projections for control, I/O, memory-fields, ..., and
   648 // return result(s) along with their RegMask info
   649 Node *CallNode::match( const ProjNode *proj, const Matcher *match ) {
   650   switch (proj->_con) {
   651   case TypeFunc::Control:
   652   case TypeFunc::I_O:
   653   case TypeFunc::Memory:
   654     return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj);
   656   case TypeFunc::Parms+1:       // For LONG & DOUBLE returns
   657     assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, "");
   658     // 2nd half of doubles and longs
   659     return new (match->C) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad);
   661   case TypeFunc::Parms: {       // Normal returns
   662     uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg();
   663     OptoRegPair regs = is_CallRuntime()
   664       ? match->c_return_value(ideal_reg,true)  // Calls into C runtime
   665       : match->  return_value(ideal_reg,true); // Calls into compiled Java code
   666     RegMask rm = RegMask(regs.first());
   667     if( OptoReg::is_valid(regs.second()) )
   668       rm.Insert( regs.second() );
   669     return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg);
   670   }
   672   case TypeFunc::ReturnAdr:
   673   case TypeFunc::FramePtr:
   674   default:
   675     ShouldNotReachHere();
   676   }
   677   return NULL;
   678 }
   680 // Do we Match on this edge index or not?  Match no edges
   681 uint CallNode::match_edge(uint idx) const {
   682   return 0;
   683 }
   685 //
   686 // Determine whether the call could modify the field of the specified
   687 // instance at the specified offset.
   688 //
   689 bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) {
   690   assert((t_oop != NULL), "sanity");
   691   if (t_oop->is_known_instance()) {
   692     // The instance_id is set only for scalar-replaceable allocations which
   693     // are not passed as arguments according to Escape Analysis.
   694     return false;
   695   }
   696   if (t_oop->is_ptr_to_boxed_value()) {
   697     ciKlass* boxing_klass = t_oop->klass();
   698     if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) {
   699       // Skip unrelated boxing methods.
   700       Node* proj = proj_out(TypeFunc::Parms);
   701       if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) {
   702         return false;
   703       }
   704     }
   705     if (is_CallJava() && as_CallJava()->method() != NULL) {
   706       ciMethod* meth = as_CallJava()->method();
   707       if (meth->is_accessor()) {
   708         return false;
   709       }
   710       // May modify (by reflection) if an boxing object is passed
   711       // as argument or returned.
   712       if (returns_pointer() && (proj_out(TypeFunc::Parms) != NULL)) {
   713         Node* proj = proj_out(TypeFunc::Parms);
   714         const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr();
   715         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
   716                                  (inst_t->klass() == boxing_klass))) {
   717           return true;
   718         }
   719       }
   720       const TypeTuple* d = tf()->domain();
   721       for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
   722         const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr();
   723         if ((inst_t != NULL) && (!inst_t->klass_is_exact() ||
   724                                  (inst_t->klass() == boxing_klass))) {
   725           return true;
   726         }
   727       }
   728       return false;
   729     }
   730   }
   731   return true;
   732 }
   734 // Does this call have a direct reference to n other than debug information?
   735 bool CallNode::has_non_debug_use(Node *n) {
   736   const TypeTuple * d = tf()->domain();
   737   for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
   738     Node *arg = in(i);
   739     if (arg == n) {
   740       return true;
   741     }
   742   }
   743   return false;
   744 }
   746 // Returns the unique CheckCastPP of a call
   747 // or 'this' if there are several CheckCastPP
   748 // or returns NULL if there is no one.
   749 Node *CallNode::result_cast() {
   750   Node *cast = NULL;
   752   Node *p = proj_out(TypeFunc::Parms);
   753   if (p == NULL)
   754     return NULL;
   756   for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) {
   757     Node *use = p->fast_out(i);
   758     if (use->is_CheckCastPP()) {
   759       if (cast != NULL) {
   760         return this;  // more than 1 CheckCastPP
   761       }
   762       cast = use;
   763     }
   764   }
   765   return cast;
   766 }
   769 void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) {
   770   projs->fallthrough_proj      = NULL;
   771   projs->fallthrough_catchproj = NULL;
   772   projs->fallthrough_ioproj    = NULL;
   773   projs->catchall_ioproj       = NULL;
   774   projs->catchall_catchproj    = NULL;
   775   projs->fallthrough_memproj   = NULL;
   776   projs->catchall_memproj      = NULL;
   777   projs->resproj               = NULL;
   778   projs->exobj                 = NULL;
   780   for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) {
   781     ProjNode *pn = fast_out(i)->as_Proj();
   782     if (pn->outcnt() == 0) continue;
   783     switch (pn->_con) {
   784     case TypeFunc::Control:
   785       {
   786         // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj
   787         projs->fallthrough_proj = pn;
   788         DUIterator_Fast jmax, j = pn->fast_outs(jmax);
   789         const Node *cn = pn->fast_out(j);
   790         if (cn->is_Catch()) {
   791           ProjNode *cpn = NULL;
   792           for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) {
   793             cpn = cn->fast_out(k)->as_Proj();
   794             assert(cpn->is_CatchProj(), "must be a CatchProjNode");
   795             if (cpn->_con == CatchProjNode::fall_through_index)
   796               projs->fallthrough_catchproj = cpn;
   797             else {
   798               assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index.");
   799               projs->catchall_catchproj = cpn;
   800             }
   801           }
   802         }
   803         break;
   804       }
   805     case TypeFunc::I_O:
   806       if (pn->_is_io_use)
   807         projs->catchall_ioproj = pn;
   808       else
   809         projs->fallthrough_ioproj = pn;
   810       for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
   811         Node* e = pn->out(j);
   812         if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
   813           assert(projs->exobj == NULL, "only one");
   814           projs->exobj = e;
   815         }
   816       }
   817       break;
   818     case TypeFunc::Memory:
   819       if (pn->_is_io_use)
   820         projs->catchall_memproj = pn;
   821       else
   822         projs->fallthrough_memproj = pn;
   823       break;
   824     case TypeFunc::Parms:
   825       projs->resproj = pn;
   826       break;
   827     default:
   828       assert(false, "unexpected projection from allocation node.");
   829     }
   830   }
   832   // The resproj may not exist because the result couuld be ignored
   833   // and the exception object may not exist if an exception handler
   834   // swallows the exception but all the other must exist and be found.
   835   assert(projs->fallthrough_proj      != NULL, "must be found");
   836   assert(Compile::current()->inlining_incrementally() || projs->fallthrough_catchproj != NULL, "must be found");
   837   assert(Compile::current()->inlining_incrementally() || projs->fallthrough_memproj   != NULL, "must be found");
   838   assert(Compile::current()->inlining_incrementally() || projs->fallthrough_ioproj    != NULL, "must be found");
   839   assert(Compile::current()->inlining_incrementally() || projs->catchall_catchproj    != NULL, "must be found");
   840   if (separate_io_proj) {
   841     assert(Compile::current()->inlining_incrementally() || projs->catchall_memproj    != NULL, "must be found");
   842     assert(Compile::current()->inlining_incrementally() || projs->catchall_ioproj     != NULL, "must be found");
   843   }
   844 }
   846 Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) {
   847   CallGenerator* cg = generator();
   848   if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) {
   849     // Check whether this MH handle call becomes a candidate for inlining
   850     ciMethod* callee = cg->method();
   851     vmIntrinsics::ID iid = callee->intrinsic_id();
   852     if (iid == vmIntrinsics::_invokeBasic) {
   853       if (in(TypeFunc::Parms)->Opcode() == Op_ConP) {
   854         phase->C->prepend_late_inline(cg);
   855         set_generator(NULL);
   856       }
   857     } else {
   858       assert(callee->has_member_arg(), "wrong type of call?");
   859       if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) {
   860         phase->C->prepend_late_inline(cg);
   861         set_generator(NULL);
   862       }
   863     }
   864   }
   865   return SafePointNode::Ideal(phase, can_reshape);
   866 }
   869 //=============================================================================
   870 uint CallJavaNode::size_of() const { return sizeof(*this); }
   871 uint CallJavaNode::cmp( const Node &n ) const {
   872   CallJavaNode &call = (CallJavaNode&)n;
   873   return CallNode::cmp(call) && _method == call._method;
   874 }
   875 #ifndef PRODUCT
   876 void CallJavaNode::dump_spec(outputStream *st) const {
   877   if( _method ) _method->print_short_name(st);
   878   CallNode::dump_spec(st);
   879 }
   880 #endif
   882 //=============================================================================
   883 uint CallStaticJavaNode::size_of() const { return sizeof(*this); }
   884 uint CallStaticJavaNode::cmp( const Node &n ) const {
   885   CallStaticJavaNode &call = (CallStaticJavaNode&)n;
   886   return CallJavaNode::cmp(call);
   887 }
   889 //----------------------------uncommon_trap_request----------------------------
   890 // If this is an uncommon trap, return the request code, else zero.
   891 int CallStaticJavaNode::uncommon_trap_request() const {
   892   if (_name != NULL && !strcmp(_name, "uncommon_trap")) {
   893     return extract_uncommon_trap_request(this);
   894   }
   895   return 0;
   896 }
   897 int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) {
   898 #ifndef PRODUCT
   899   if (!(call->req() > TypeFunc::Parms &&
   900         call->in(TypeFunc::Parms) != NULL &&
   901         call->in(TypeFunc::Parms)->is_Con())) {
   902     assert(in_dump() != 0, "OK if dumping");
   903     tty->print("[bad uncommon trap]");
   904     return 0;
   905   }
   906 #endif
   907   return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con();
   908 }
   910 #ifndef PRODUCT
   911 void CallStaticJavaNode::dump_spec(outputStream *st) const {
   912   st->print("# Static ");
   913   if (_name != NULL) {
   914     st->print("%s", _name);
   915     int trap_req = uncommon_trap_request();
   916     if (trap_req != 0) {
   917       char buf[100];
   918       st->print("(%s)",
   919                  Deoptimization::format_trap_request(buf, sizeof(buf),
   920                                                      trap_req));
   921     }
   922     st->print(" ");
   923   }
   924   CallJavaNode::dump_spec(st);
   925 }
   926 #endif
   928 //=============================================================================
   929 uint CallDynamicJavaNode::size_of() const { return sizeof(*this); }
   930 uint CallDynamicJavaNode::cmp( const Node &n ) const {
   931   CallDynamicJavaNode &call = (CallDynamicJavaNode&)n;
   932   return CallJavaNode::cmp(call);
   933 }
   934 #ifndef PRODUCT
   935 void CallDynamicJavaNode::dump_spec(outputStream *st) const {
   936   st->print("# Dynamic ");
   937   CallJavaNode::dump_spec(st);
   938 }
   939 #endif
   941 //=============================================================================
   942 uint CallRuntimeNode::size_of() const { return sizeof(*this); }
   943 uint CallRuntimeNode::cmp( const Node &n ) const {
   944   CallRuntimeNode &call = (CallRuntimeNode&)n;
   945   return CallNode::cmp(call) && !strcmp(_name,call._name);
   946 }
   947 #ifndef PRODUCT
   948 void CallRuntimeNode::dump_spec(outputStream *st) const {
   949   st->print("# ");
   950   st->print(_name);
   951   CallNode::dump_spec(st);
   952 }
   953 #endif
   955 //------------------------------calling_convention-----------------------------
   956 void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const {
   957   Matcher::c_calling_convention( sig_bt, parm_regs, argcnt );
   958 }
   960 //=============================================================================
   961 //------------------------------calling_convention-----------------------------
   964 //=============================================================================
   965 #ifndef PRODUCT
   966 void CallLeafNode::dump_spec(outputStream *st) const {
   967   st->print("# ");
   968   st->print(_name);
   969   CallNode::dump_spec(st);
   970 }
   971 #endif
   973 //=============================================================================
   975 void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) {
   976   assert(verify_jvms(jvms), "jvms must match");
   977   int loc = jvms->locoff() + idx;
   978   if (in(loc)->is_top() && idx > 0 && !c->is_top() ) {
   979     // If current local idx is top then local idx - 1 could
   980     // be a long/double that needs to be killed since top could
   981     // represent the 2nd half ofthe long/double.
   982     uint ideal = in(loc -1)->ideal_reg();
   983     if (ideal == Op_RegD || ideal == Op_RegL) {
   984       // set other (low index) half to top
   985       set_req(loc - 1, in(loc));
   986     }
   987   }
   988   set_req(loc, c);
   989 }
   991 uint SafePointNode::size_of() const { return sizeof(*this); }
   992 uint SafePointNode::cmp( const Node &n ) const {
   993   return (&n == this);          // Always fail except on self
   994 }
   996 //-------------------------set_next_exception----------------------------------
   997 void SafePointNode::set_next_exception(SafePointNode* n) {
   998   assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception");
   999   if (len() == req()) {
  1000     if (n != NULL)  add_prec(n);
  1001   } else {
  1002     set_prec(req(), n);
  1007 //----------------------------next_exception-----------------------------------
  1008 SafePointNode* SafePointNode::next_exception() const {
  1009   if (len() == req()) {
  1010     return NULL;
  1011   } else {
  1012     Node* n = in(req());
  1013     assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges");
  1014     return (SafePointNode*) n;
  1019 //------------------------------Ideal------------------------------------------
  1020 // Skip over any collapsed Regions
  1021 Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  1022   return remove_dead_region(phase, can_reshape) ? this : NULL;
  1025 //------------------------------Identity---------------------------------------
  1026 // Remove obviously duplicate safepoints
  1027 Node *SafePointNode::Identity( PhaseTransform *phase ) {
  1029   // If you have back to back safepoints, remove one
  1030   if( in(TypeFunc::Control)->is_SafePoint() )
  1031     return in(TypeFunc::Control);
  1033   if( in(0)->is_Proj() ) {
  1034     Node *n0 = in(0)->in(0);
  1035     // Check if he is a call projection (except Leaf Call)
  1036     if( n0->is_Catch() ) {
  1037       n0 = n0->in(0)->in(0);
  1038       assert( n0->is_Call(), "expect a call here" );
  1040     if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) {
  1041       // Useless Safepoint, so remove it
  1042       return in(TypeFunc::Control);
  1046   return this;
  1049 //------------------------------Value------------------------------------------
  1050 const Type *SafePointNode::Value( PhaseTransform *phase ) const {
  1051   if( phase->type(in(0)) == Type::TOP ) return Type::TOP;
  1052   if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop
  1053   return Type::CONTROL;
  1056 #ifndef PRODUCT
  1057 void SafePointNode::dump_spec(outputStream *st) const {
  1058   st->print(" SafePoint ");
  1060 #endif
  1062 const RegMask &SafePointNode::in_RegMask(uint idx) const {
  1063   if( idx < TypeFunc::Parms ) return RegMask::Empty;
  1064   // Values outside the domain represent debug info
  1065   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
  1067 const RegMask &SafePointNode::out_RegMask() const {
  1068   return RegMask::Empty;
  1072 void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) {
  1073   assert((int)grow_by > 0, "sanity");
  1074   int monoff = jvms->monoff();
  1075   int scloff = jvms->scloff();
  1076   int endoff = jvms->endoff();
  1077   assert(endoff == (int)req(), "no other states or debug info after me");
  1078   Node* top = Compile::current()->top();
  1079   for (uint i = 0; i < grow_by; i++) {
  1080     ins_req(monoff, top);
  1082   jvms->set_monoff(monoff + grow_by);
  1083   jvms->set_scloff(scloff + grow_by);
  1084   jvms->set_endoff(endoff + grow_by);
  1087 void SafePointNode::push_monitor(const FastLockNode *lock) {
  1088   // Add a LockNode, which points to both the original BoxLockNode (the
  1089   // stack space for the monitor) and the Object being locked.
  1090   const int MonitorEdges = 2;
  1091   assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
  1092   assert(req() == jvms()->endoff(), "correct sizing");
  1093   int nextmon = jvms()->scloff();
  1094   if (GenerateSynchronizationCode) {
  1095     ins_req(nextmon,   lock->box_node());
  1096     ins_req(nextmon+1, lock->obj_node());
  1097   } else {
  1098     Node* top = Compile::current()->top();
  1099     ins_req(nextmon, top);
  1100     ins_req(nextmon, top);
  1102   jvms()->set_scloff(nextmon + MonitorEdges);
  1103   jvms()->set_endoff(req());
  1106 void SafePointNode::pop_monitor() {
  1107   // Delete last monitor from debug info
  1108   debug_only(int num_before_pop = jvms()->nof_monitors());
  1109   const int MonitorEdges = 2;
  1110   assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges");
  1111   int scloff = jvms()->scloff();
  1112   int endoff = jvms()->endoff();
  1113   int new_scloff = scloff - MonitorEdges;
  1114   int new_endoff = endoff - MonitorEdges;
  1115   jvms()->set_scloff(new_scloff);
  1116   jvms()->set_endoff(new_endoff);
  1117   while (scloff > new_scloff)  del_req_ordered(--scloff);
  1118   assert(jvms()->nof_monitors() == num_before_pop-1, "");
  1121 Node *SafePointNode::peek_monitor_box() const {
  1122   int mon = jvms()->nof_monitors() - 1;
  1123   assert(mon >= 0, "most have a monitor");
  1124   return monitor_box(jvms(), mon);
  1127 Node *SafePointNode::peek_monitor_obj() const {
  1128   int mon = jvms()->nof_monitors() - 1;
  1129   assert(mon >= 0, "most have a monitor");
  1130   return monitor_obj(jvms(), mon);
  1133 // Do we Match on this edge index or not?  Match no edges
  1134 uint SafePointNode::match_edge(uint idx) const {
  1135   if( !needs_polling_address_input() )
  1136     return 0;
  1138   return (TypeFunc::Parms == idx);
  1141 //==============  SafePointScalarObjectNode  ==============
  1143 SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp,
  1144 #ifdef ASSERT
  1145                                                      AllocateNode* alloc,
  1146 #endif
  1147                                                      uint first_index,
  1148                                                      uint n_fields) :
  1149   TypeNode(tp, 1), // 1 control input -- seems required.  Get from root.
  1150 #ifdef ASSERT
  1151   _alloc(alloc),
  1152 #endif
  1153   _first_index(first_index),
  1154   _n_fields(n_fields)
  1156   init_class_id(Class_SafePointScalarObject);
  1159 // Do not allow value-numbering for SafePointScalarObject node.
  1160 uint SafePointScalarObjectNode::hash() const { return NO_HASH; }
  1161 uint SafePointScalarObjectNode::cmp( const Node &n ) const {
  1162   return (&n == this); // Always fail except on self
  1165 uint SafePointScalarObjectNode::ideal_reg() const {
  1166   return 0; // No matching to machine instruction
  1169 const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const {
  1170   return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]);
  1173 const RegMask &SafePointScalarObjectNode::out_RegMask() const {
  1174   return RegMask::Empty;
  1177 uint SafePointScalarObjectNode::match_edge(uint idx) const {
  1178   return 0;
  1181 SafePointScalarObjectNode*
  1182 SafePointScalarObjectNode::clone(Dict* sosn_map) const {
  1183   void* cached = (*sosn_map)[(void*)this];
  1184   if (cached != NULL) {
  1185     return (SafePointScalarObjectNode*)cached;
  1187   SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone();
  1188   sosn_map->Insert((void*)this, (void*)res);
  1189   return res;
  1193 #ifndef PRODUCT
  1194 void SafePointScalarObjectNode::dump_spec(outputStream *st) const {
  1195   st->print(" # fields@[%d..%d]", first_index(),
  1196              first_index() + n_fields() - 1);
  1199 #endif
  1201 //=============================================================================
  1202 uint AllocateNode::size_of() const { return sizeof(*this); }
  1204 AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
  1205                            Node *ctrl, Node *mem, Node *abio,
  1206                            Node *size, Node *klass_node, Node *initial_test)
  1207   : CallNode(atype, NULL, TypeRawPtr::BOTTOM)
  1209   init_class_id(Class_Allocate);
  1210   init_flags(Flag_is_macro);
  1211   _is_scalar_replaceable = false;
  1212   _is_non_escaping = false;
  1213   Node *topnode = C->top();
  1215   init_req( TypeFunc::Control  , ctrl );
  1216   init_req( TypeFunc::I_O      , abio );
  1217   init_req( TypeFunc::Memory   , mem );
  1218   init_req( TypeFunc::ReturnAdr, topnode );
  1219   init_req( TypeFunc::FramePtr , topnode );
  1220   init_req( AllocSize          , size);
  1221   init_req( KlassNode          , klass_node);
  1222   init_req( InitialTest        , initial_test);
  1223   init_req( ALength            , topnode);
  1224   C->add_macro_node(this);
  1227 //=============================================================================
  1228 Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  1229   if (remove_dead_region(phase, can_reshape))  return this;
  1230   // Don't bother trying to transform a dead node
  1231   if (in(0) && in(0)->is_top())  return NULL;
  1233   const Type* type = phase->type(Ideal_length());
  1234   if (type->isa_int() && type->is_int()->_hi < 0) {
  1235     if (can_reshape) {
  1236       PhaseIterGVN *igvn = phase->is_IterGVN();
  1237       // Unreachable fall through path (negative array length),
  1238       // the allocation can only throw so disconnect it.
  1239       Node* proj = proj_out(TypeFunc::Control);
  1240       Node* catchproj = NULL;
  1241       if (proj != NULL) {
  1242         for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) {
  1243           Node *cn = proj->fast_out(i);
  1244           if (cn->is_Catch()) {
  1245             catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index);
  1246             break;
  1250       if (catchproj != NULL && catchproj->outcnt() > 0 &&
  1251           (catchproj->outcnt() > 1 ||
  1252            catchproj->unique_out()->Opcode() != Op_Halt)) {
  1253         assert(catchproj->is_CatchProj(), "must be a CatchProjNode");
  1254         Node* nproj = catchproj->clone();
  1255         igvn->register_new_node_with_optimizer(nproj);
  1257         Node *frame = new (phase->C) ParmNode( phase->C->start(), TypeFunc::FramePtr );
  1258         frame = phase->transform(frame);
  1259         // Halt & Catch Fire
  1260         Node *halt = new (phase->C) HaltNode( nproj, frame );
  1261         phase->C->root()->add_req(halt);
  1262         phase->transform(halt);
  1264         igvn->replace_node(catchproj, phase->C->top());
  1265         return this;
  1267     } else {
  1268       // Can't correct it during regular GVN so register for IGVN
  1269       phase->C->record_for_igvn(this);
  1272   return NULL;
  1275 // Retrieve the length from the AllocateArrayNode. Narrow the type with a
  1276 // CastII, if appropriate.  If we are not allowed to create new nodes, and
  1277 // a CastII is appropriate, return NULL.
  1278 Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) {
  1279   Node *length = in(AllocateNode::ALength);
  1280   assert(length != NULL, "length is not null");
  1282   const TypeInt* length_type = phase->find_int_type(length);
  1283   const TypeAryPtr* ary_type = oop_type->isa_aryptr();
  1285   if (ary_type != NULL && length_type != NULL) {
  1286     const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type);
  1287     if (narrow_length_type != length_type) {
  1288       // Assert one of:
  1289       //   - the narrow_length is 0
  1290       //   - the narrow_length is not wider than length
  1291       assert(narrow_length_type == TypeInt::ZERO ||
  1292              length_type->is_con() && narrow_length_type->is_con() &&
  1293                 (narrow_length_type->_hi <= length_type->_lo) ||
  1294              (narrow_length_type->_hi <= length_type->_hi &&
  1295               narrow_length_type->_lo >= length_type->_lo),
  1296              "narrow type must be narrower than length type");
  1298       // Return NULL if new nodes are not allowed
  1299       if (!allow_new_nodes) return NULL;
  1300       // Create a cast which is control dependent on the initialization to
  1301       // propagate the fact that the array length must be positive.
  1302       length = new (phase->C) CastIINode(length, narrow_length_type);
  1303       length->set_req(0, initialization()->proj_out(0));
  1307   return length;
  1310 //=============================================================================
  1311 uint LockNode::size_of() const { return sizeof(*this); }
  1313 // Redundant lock elimination
  1314 //
  1315 // There are various patterns of locking where we release and
  1316 // immediately reacquire a lock in a piece of code where no operations
  1317 // occur in between that would be observable.  In those cases we can
  1318 // skip releasing and reacquiring the lock without violating any
  1319 // fairness requirements.  Doing this around a loop could cause a lock
  1320 // to be held for a very long time so we concentrate on non-looping
  1321 // control flow.  We also require that the operations are fully
  1322 // redundant meaning that we don't introduce new lock operations on
  1323 // some paths so to be able to eliminate it on others ala PRE.  This
  1324 // would probably require some more extensive graph manipulation to
  1325 // guarantee that the memory edges were all handled correctly.
  1326 //
  1327 // Assuming p is a simple predicate which can't trap in any way and s
  1328 // is a synchronized method consider this code:
  1329 //
  1330 //   s();
  1331 //   if (p)
  1332 //     s();
  1333 //   else
  1334 //     s();
  1335 //   s();
  1336 //
  1337 // 1. The unlocks of the first call to s can be eliminated if the
  1338 // locks inside the then and else branches are eliminated.
  1339 //
  1340 // 2. The unlocks of the then and else branches can be eliminated if
  1341 // the lock of the final call to s is eliminated.
  1342 //
  1343 // Either of these cases subsumes the simple case of sequential control flow
  1344 //
  1345 // Addtionally we can eliminate versions without the else case:
  1346 //
  1347 //   s();
  1348 //   if (p)
  1349 //     s();
  1350 //   s();
  1351 //
  1352 // 3. In this case we eliminate the unlock of the first s, the lock
  1353 // and unlock in the then case and the lock in the final s.
  1354 //
  1355 // Note also that in all these cases the then/else pieces don't have
  1356 // to be trivial as long as they begin and end with synchronization
  1357 // operations.
  1358 //
  1359 //   s();
  1360 //   if (p)
  1361 //     s();
  1362 //     f();
  1363 //     s();
  1364 //   s();
  1365 //
  1366 // The code will work properly for this case, leaving in the unlock
  1367 // before the call to f and the relock after it.
  1368 //
  1369 // A potentially interesting case which isn't handled here is when the
  1370 // locking is partially redundant.
  1371 //
  1372 //   s();
  1373 //   if (p)
  1374 //     s();
  1375 //
  1376 // This could be eliminated putting unlocking on the else case and
  1377 // eliminating the first unlock and the lock in the then side.
  1378 // Alternatively the unlock could be moved out of the then side so it
  1379 // was after the merge and the first unlock and second lock
  1380 // eliminated.  This might require less manipulation of the memory
  1381 // state to get correct.
  1382 //
  1383 // Additionally we might allow work between a unlock and lock before
  1384 // giving up eliminating the locks.  The current code disallows any
  1385 // conditional control flow between these operations.  A formulation
  1386 // similar to partial redundancy elimination computing the
  1387 // availability of unlocking and the anticipatability of locking at a
  1388 // program point would allow detection of fully redundant locking with
  1389 // some amount of work in between.  I'm not sure how often I really
  1390 // think that would occur though.  Most of the cases I've seen
  1391 // indicate it's likely non-trivial work would occur in between.
  1392 // There may be other more complicated constructs where we could
  1393 // eliminate locking but I haven't seen any others appear as hot or
  1394 // interesting.
  1395 //
  1396 // Locking and unlocking have a canonical form in ideal that looks
  1397 // roughly like this:
  1398 //
  1399 //              <obj>
  1400 //                | \\------+
  1401 //                |  \       \
  1402 //                | BoxLock   \
  1403 //                |  |   |     \
  1404 //                |  |    \     \
  1405 //                |  |   FastLock
  1406 //                |  |   /
  1407 //                |  |  /
  1408 //                |  |  |
  1409 //
  1410 //               Lock
  1411 //                |
  1412 //            Proj #0
  1413 //                |
  1414 //            MembarAcquire
  1415 //                |
  1416 //            Proj #0
  1417 //
  1418 //            MembarRelease
  1419 //                |
  1420 //            Proj #0
  1421 //                |
  1422 //              Unlock
  1423 //                |
  1424 //            Proj #0
  1425 //
  1426 //
  1427 // This code proceeds by processing Lock nodes during PhaseIterGVN
  1428 // and searching back through its control for the proper code
  1429 // patterns.  Once it finds a set of lock and unlock operations to
  1430 // eliminate they are marked as eliminatable which causes the
  1431 // expansion of the Lock and Unlock macro nodes to make the operation a NOP
  1432 //
  1433 //=============================================================================
  1435 //
  1436 // Utility function to skip over uninteresting control nodes.  Nodes skipped are:
  1437 //   - copy regions.  (These may not have been optimized away yet.)
  1438 //   - eliminated locking nodes
  1439 //
  1440 static Node *next_control(Node *ctrl) {
  1441   if (ctrl == NULL)
  1442     return NULL;
  1443   while (1) {
  1444     if (ctrl->is_Region()) {
  1445       RegionNode *r = ctrl->as_Region();
  1446       Node *n = r->is_copy();
  1447       if (n == NULL)
  1448         break;  // hit a region, return it
  1449       else
  1450         ctrl = n;
  1451     } else if (ctrl->is_Proj()) {
  1452       Node *in0 = ctrl->in(0);
  1453       if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) {
  1454         ctrl = in0->in(0);
  1455       } else {
  1456         break;
  1458     } else {
  1459       break; // found an interesting control
  1462   return ctrl;
  1464 //
  1465 // Given a control, see if it's the control projection of an Unlock which
  1466 // operating on the same object as lock.
  1467 //
  1468 bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock,
  1469                                             GrowableArray<AbstractLockNode*> &lock_ops) {
  1470   ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL;
  1471   if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) {
  1472     Node *n = ctrl_proj->in(0);
  1473     if (n != NULL && n->is_Unlock()) {
  1474       UnlockNode *unlock = n->as_Unlock();
  1475       if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
  1476           BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) &&
  1477           !unlock->is_eliminated()) {
  1478         lock_ops.append(unlock);
  1479         return true;
  1483   return false;
  1486 //
  1487 // Find the lock matching an unlock.  Returns null if a safepoint
  1488 // or complicated control is encountered first.
  1489 LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) {
  1490   LockNode *lock_result = NULL;
  1491   // find the matching lock, or an intervening safepoint
  1492   Node *ctrl = next_control(unlock->in(0));
  1493   while (1) {
  1494     assert(ctrl != NULL, "invalid control graph");
  1495     assert(!ctrl->is_Start(), "missing lock for unlock");
  1496     if (ctrl->is_top()) break;  // dead control path
  1497     if (ctrl->is_Proj()) ctrl = ctrl->in(0);
  1498     if (ctrl->is_SafePoint()) {
  1499         break;  // found a safepoint (may be the lock we are searching for)
  1500     } else if (ctrl->is_Region()) {
  1501       // Check for a simple diamond pattern.  Punt on anything more complicated
  1502       if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) {
  1503         Node *in1 = next_control(ctrl->in(1));
  1504         Node *in2 = next_control(ctrl->in(2));
  1505         if (((in1->is_IfTrue() && in2->is_IfFalse()) ||
  1506              (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) {
  1507           ctrl = next_control(in1->in(0)->in(0));
  1508         } else {
  1509           break;
  1511       } else {
  1512         break;
  1514     } else {
  1515       ctrl = next_control(ctrl->in(0));  // keep searching
  1518   if (ctrl->is_Lock()) {
  1519     LockNode *lock = ctrl->as_Lock();
  1520     if (lock->obj_node()->eqv_uncast(unlock->obj_node()) &&
  1521         BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) {
  1522       lock_result = lock;
  1525   return lock_result;
  1528 // This code corresponds to case 3 above.
  1530 bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock,
  1531                                                        GrowableArray<AbstractLockNode*> &lock_ops) {
  1532   Node* if_node = node->in(0);
  1533   bool  if_true = node->is_IfTrue();
  1535   if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) {
  1536     Node *lock_ctrl = next_control(if_node->in(0));
  1537     if (find_matching_unlock(lock_ctrl, lock, lock_ops)) {
  1538       Node* lock1_node = NULL;
  1539       ProjNode* proj = if_node->as_If()->proj_out(!if_true);
  1540       if (if_true) {
  1541         if (proj->is_IfFalse() && proj->outcnt() == 1) {
  1542           lock1_node = proj->unique_out();
  1544       } else {
  1545         if (proj->is_IfTrue() && proj->outcnt() == 1) {
  1546           lock1_node = proj->unique_out();
  1549       if (lock1_node != NULL && lock1_node->is_Lock()) {
  1550         LockNode *lock1 = lock1_node->as_Lock();
  1551         if (lock->obj_node()->eqv_uncast(lock1->obj_node()) &&
  1552             BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) &&
  1553             !lock1->is_eliminated()) {
  1554           lock_ops.append(lock1);
  1555           return true;
  1561   lock_ops.trunc_to(0);
  1562   return false;
  1565 bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock,
  1566                                GrowableArray<AbstractLockNode*> &lock_ops) {
  1567   // check each control merging at this point for a matching unlock.
  1568   // in(0) should be self edge so skip it.
  1569   for (int i = 1; i < (int)region->req(); i++) {
  1570     Node *in_node = next_control(region->in(i));
  1571     if (in_node != NULL) {
  1572       if (find_matching_unlock(in_node, lock, lock_ops)) {
  1573         // found a match so keep on checking.
  1574         continue;
  1575       } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) {
  1576         continue;
  1579       // If we fall through to here then it was some kind of node we
  1580       // don't understand or there wasn't a matching unlock, so give
  1581       // up trying to merge locks.
  1582       lock_ops.trunc_to(0);
  1583       return false;
  1586   return true;
  1590 #ifndef PRODUCT
  1591 //
  1592 // Create a counter which counts the number of times this lock is acquired
  1593 //
  1594 void AbstractLockNode::create_lock_counter(JVMState* state) {
  1595   _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter);
  1598 void AbstractLockNode::set_eliminated_lock_counter() {
  1599   if (_counter) {
  1600     // Update the counter to indicate that this lock was eliminated.
  1601     // The counter update code will stay around even though the
  1602     // optimizer will eliminate the lock operation itself.
  1603     _counter->set_tag(NamedCounter::EliminatedLockCounter);
  1606 #endif
  1608 //=============================================================================
  1609 Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  1611   // perform any generic optimizations first (returns 'this' or NULL)
  1612   Node *result = SafePointNode::Ideal(phase, can_reshape);
  1613   if (result != NULL)  return result;
  1614   // Don't bother trying to transform a dead node
  1615   if (in(0) && in(0)->is_top())  return NULL;
  1617   // Now see if we can optimize away this lock.  We don't actually
  1618   // remove the locking here, we simply set the _eliminate flag which
  1619   // prevents macro expansion from expanding the lock.  Since we don't
  1620   // modify the graph, the value returned from this function is the
  1621   // one computed above.
  1622   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
  1623     //
  1624     // If we are locking an unescaped object, the lock/unlock is unnecessary
  1625     //
  1626     ConnectionGraph *cgr = phase->C->congraph();
  1627     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
  1628       assert(!is_eliminated() || is_coarsened(), "sanity");
  1629       // The lock could be marked eliminated by lock coarsening
  1630       // code during first IGVN before EA. Replace coarsened flag
  1631       // to eliminate all associated locks/unlocks.
  1632       this->set_non_esc_obj();
  1633       return result;
  1636     //
  1637     // Try lock coarsening
  1638     //
  1639     PhaseIterGVN* iter = phase->is_IterGVN();
  1640     if (iter != NULL && !is_eliminated()) {
  1642       GrowableArray<AbstractLockNode*>   lock_ops;
  1644       Node *ctrl = next_control(in(0));
  1646       // now search back for a matching Unlock
  1647       if (find_matching_unlock(ctrl, this, lock_ops)) {
  1648         // found an unlock directly preceding this lock.  This is the
  1649         // case of single unlock directly control dependent on a
  1650         // single lock which is the trivial version of case 1 or 2.
  1651       } else if (ctrl->is_Region() ) {
  1652         if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) {
  1653         // found lock preceded by multiple unlocks along all paths
  1654         // joining at this point which is case 3 in description above.
  1656       } else {
  1657         // see if this lock comes from either half of an if and the
  1658         // predecessors merges unlocks and the other half of the if
  1659         // performs a lock.
  1660         if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) {
  1661           // found unlock splitting to an if with locks on both branches.
  1665       if (lock_ops.length() > 0) {
  1666         // add ourselves to the list of locks to be eliminated.
  1667         lock_ops.append(this);
  1669   #ifndef PRODUCT
  1670         if (PrintEliminateLocks) {
  1671           int locks = 0;
  1672           int unlocks = 0;
  1673           for (int i = 0; i < lock_ops.length(); i++) {
  1674             AbstractLockNode* lock = lock_ops.at(i);
  1675             if (lock->Opcode() == Op_Lock)
  1676               locks++;
  1677             else
  1678               unlocks++;
  1679             if (Verbose) {
  1680               lock->dump(1);
  1683           tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks);
  1685   #endif
  1687         // for each of the identified locks, mark them
  1688         // as eliminatable
  1689         for (int i = 0; i < lock_ops.length(); i++) {
  1690           AbstractLockNode* lock = lock_ops.at(i);
  1692           // Mark it eliminated by coarsening and update any counters
  1693           lock->set_coarsened();
  1695       } else if (ctrl->is_Region() &&
  1696                  iter->_worklist.member(ctrl)) {
  1697         // We weren't able to find any opportunities but the region this
  1698         // lock is control dependent on hasn't been processed yet so put
  1699         // this lock back on the worklist so we can check again once any
  1700         // region simplification has occurred.
  1701         iter->_worklist.push(this);
  1706   return result;
  1709 //=============================================================================
  1710 bool LockNode::is_nested_lock_region() {
  1711   BoxLockNode* box = box_node()->as_BoxLock();
  1712   int stk_slot = box->stack_slot();
  1713   if (stk_slot <= 0)
  1714     return false; // External lock or it is not Box (Phi node).
  1716   // Ignore complex cases: merged locks or multiple locks.
  1717   Node* obj = obj_node();
  1718   LockNode* unique_lock = NULL;
  1719   if (!box->is_simple_lock_region(&unique_lock, obj) ||
  1720       (unique_lock != this)) {
  1721     return false;
  1724   // Look for external lock for the same object.
  1725   SafePointNode* sfn = this->as_SafePoint();
  1726   JVMState* youngest_jvms = sfn->jvms();
  1727   int max_depth = youngest_jvms->depth();
  1728   for (int depth = 1; depth <= max_depth; depth++) {
  1729     JVMState* jvms = youngest_jvms->of_depth(depth);
  1730     int num_mon  = jvms->nof_monitors();
  1731     // Loop over monitors
  1732     for (int idx = 0; idx < num_mon; idx++) {
  1733       Node* obj_node = sfn->monitor_obj(jvms, idx);
  1734       BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock();
  1735       if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) {
  1736         return true;
  1740   return false;
  1743 //=============================================================================
  1744 uint UnlockNode::size_of() const { return sizeof(*this); }
  1746 //=============================================================================
  1747 Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) {
  1749   // perform any generic optimizations first (returns 'this' or NULL)
  1750   Node *result = SafePointNode::Ideal(phase, can_reshape);
  1751   if (result != NULL)  return result;
  1752   // Don't bother trying to transform a dead node
  1753   if (in(0) && in(0)->is_top())  return NULL;
  1755   // Now see if we can optimize away this unlock.  We don't actually
  1756   // remove the unlocking here, we simply set the _eliminate flag which
  1757   // prevents macro expansion from expanding the unlock.  Since we don't
  1758   // modify the graph, the value returned from this function is the
  1759   // one computed above.
  1760   // Escape state is defined after Parse phase.
  1761   if (can_reshape && EliminateLocks && !is_non_esc_obj()) {
  1762     //
  1763     // If we are unlocking an unescaped object, the lock/unlock is unnecessary.
  1764     //
  1765     ConnectionGraph *cgr = phase->C->congraph();
  1766     if (cgr != NULL && cgr->not_global_escape(obj_node())) {
  1767       assert(!is_eliminated() || is_coarsened(), "sanity");
  1768       // The lock could be marked eliminated by lock coarsening
  1769       // code during first IGVN before EA. Replace coarsened flag
  1770       // to eliminate all associated locks/unlocks.
  1771       this->set_non_esc_obj();
  1774   return result;

mercurial