1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 1.2 +++ b/src/share/vm/opto/callnode.cpp Wed Apr 27 01:25:04 2016 +0800 1.3 @@ -0,0 +1,1808 @@ 1.4 +/* 1.5 + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. 1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 1.7 + * 1.8 + * This code is free software; you can redistribute it and/or modify it 1.9 + * under the terms of the GNU General Public License version 2 only, as 1.10 + * published by the Free Software Foundation. 1.11 + * 1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 1.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 1.15 + * version 2 for more details (a copy is included in the LICENSE file that 1.16 + * accompanied this code). 1.17 + * 1.18 + * You should have received a copy of the GNU General Public License version 1.19 + * 2 along with this work; if not, write to the Free Software Foundation, 1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 1.21 + * 1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 1.23 + * or visit www.oracle.com if you need additional information or have any 1.24 + * questions. 1.25 + * 1.26 + */ 1.27 + 1.28 +#include "precompiled.hpp" 1.29 +#include "ci/bcEscapeAnalyzer.hpp" 1.30 +#include "compiler/oopMap.hpp" 1.31 +#include "opto/callGenerator.hpp" 1.32 +#include "opto/callnode.hpp" 1.33 +#include "opto/escape.hpp" 1.34 +#include "opto/locknode.hpp" 1.35 +#include "opto/machnode.hpp" 1.36 +#include "opto/matcher.hpp" 1.37 +#include "opto/parse.hpp" 1.38 +#include "opto/regalloc.hpp" 1.39 +#include "opto/regmask.hpp" 1.40 +#include "opto/rootnode.hpp" 1.41 +#include "opto/runtime.hpp" 1.42 + 1.43 +// Portions of code courtesy of Clifford Click 1.44 + 1.45 +// Optimization - Graph Style 1.46 + 1.47 +//============================================================================= 1.48 +uint StartNode::size_of() const { return sizeof(*this); } 1.49 +uint StartNode::cmp( const Node &n ) const 1.50 +{ return _domain == ((StartNode&)n)._domain; } 1.51 +const Type *StartNode::bottom_type() const { return _domain; } 1.52 +const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; } 1.53 +#ifndef PRODUCT 1.54 +void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} 1.55 +#endif 1.56 + 1.57 +//------------------------------Ideal------------------------------------------ 1.58 +Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ 1.59 + return remove_dead_region(phase, can_reshape) ? this : NULL; 1.60 +} 1.61 + 1.62 +//------------------------------calling_convention----------------------------- 1.63 +void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1.64 + Matcher::calling_convention( sig_bt, parm_regs, argcnt, false ); 1.65 +} 1.66 + 1.67 +//------------------------------Registers-------------------------------------- 1.68 +const RegMask &StartNode::in_RegMask(uint) const { 1.69 + return RegMask::Empty; 1.70 +} 1.71 + 1.72 +//------------------------------match------------------------------------------ 1.73 +// Construct projections for incoming parameters, and their RegMask info 1.74 +Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { 1.75 + switch (proj->_con) { 1.76 + case TypeFunc::Control: 1.77 + case TypeFunc::I_O: 1.78 + case TypeFunc::Memory: 1.79 + return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 1.80 + case TypeFunc::FramePtr: 1.81 + return new (match->C) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); 1.82 + case TypeFunc::ReturnAdr: 1.83 + return new (match->C) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); 1.84 + case TypeFunc::Parms: 1.85 + default: { 1.86 + uint parm_num = proj->_con - TypeFunc::Parms; 1.87 + const Type *t = _domain->field_at(proj->_con); 1.88 + if (t->base() == Type::Half) // 2nd half of Longs and Doubles 1.89 + return new (match->C) ConNode(Type::TOP); 1.90 + uint ideal_reg = t->ideal_reg(); 1.91 + RegMask &rm = match->_calling_convention_mask[parm_num]; 1.92 + return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg); 1.93 + } 1.94 + } 1.95 + return NULL; 1.96 +} 1.97 + 1.98 +//------------------------------StartOSRNode---------------------------------- 1.99 +// The method start node for an on stack replacement adapter 1.100 + 1.101 +//------------------------------osr_domain----------------------------- 1.102 +const TypeTuple *StartOSRNode::osr_domain() { 1.103 + const Type **fields = TypeTuple::fields(2); 1.104 + fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer 1.105 + 1.106 + return TypeTuple::make(TypeFunc::Parms+1, fields); 1.107 +} 1.108 + 1.109 +//============================================================================= 1.110 +const char * const ParmNode::names[TypeFunc::Parms+1] = { 1.111 + "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" 1.112 +}; 1.113 + 1.114 +#ifndef PRODUCT 1.115 +void ParmNode::dump_spec(outputStream *st) const { 1.116 + if( _con < TypeFunc::Parms ) { 1.117 + st->print("%s", names[_con]); 1.118 + } else { 1.119 + st->print("Parm%d: ",_con-TypeFunc::Parms); 1.120 + // Verbose and WizardMode dump bottom_type for all nodes 1.121 + if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); 1.122 + } 1.123 +} 1.124 +#endif 1.125 + 1.126 +uint ParmNode::ideal_reg() const { 1.127 + switch( _con ) { 1.128 + case TypeFunc::Control : // fall through 1.129 + case TypeFunc::I_O : // fall through 1.130 + case TypeFunc::Memory : return 0; 1.131 + case TypeFunc::FramePtr : // fall through 1.132 + case TypeFunc::ReturnAdr: return Op_RegP; 1.133 + default : assert( _con > TypeFunc::Parms, "" ); 1.134 + // fall through 1.135 + case TypeFunc::Parms : { 1.136 + // Type of argument being passed 1.137 + const Type *t = in(0)->as_Start()->_domain->field_at(_con); 1.138 + return t->ideal_reg(); 1.139 + } 1.140 + } 1.141 + ShouldNotReachHere(); 1.142 + return 0; 1.143 +} 1.144 + 1.145 +//============================================================================= 1.146 +ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { 1.147 + init_req(TypeFunc::Control,cntrl); 1.148 + init_req(TypeFunc::I_O,i_o); 1.149 + init_req(TypeFunc::Memory,memory); 1.150 + init_req(TypeFunc::FramePtr,frameptr); 1.151 + init_req(TypeFunc::ReturnAdr,retadr); 1.152 +} 1.153 + 1.154 +Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ 1.155 + return remove_dead_region(phase, can_reshape) ? this : NULL; 1.156 +} 1.157 + 1.158 +const Type *ReturnNode::Value( PhaseTransform *phase ) const { 1.159 + return ( phase->type(in(TypeFunc::Control)) == Type::TOP) 1.160 + ? Type::TOP 1.161 + : Type::BOTTOM; 1.162 +} 1.163 + 1.164 +// Do we Match on this edge index or not? No edges on return nodes 1.165 +uint ReturnNode::match_edge(uint idx) const { 1.166 + return 0; 1.167 +} 1.168 + 1.169 + 1.170 +#ifndef PRODUCT 1.171 +void ReturnNode::dump_req(outputStream *st) const { 1.172 + // Dump the required inputs, enclosed in '(' and ')' 1.173 + uint i; // Exit value of loop 1.174 + for (i = 0; i < req(); i++) { // For all required inputs 1.175 + if (i == TypeFunc::Parms) st->print("returns"); 1.176 + if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 1.177 + else st->print("_ "); 1.178 + } 1.179 +} 1.180 +#endif 1.181 + 1.182 +//============================================================================= 1.183 +RethrowNode::RethrowNode( 1.184 + Node* cntrl, 1.185 + Node* i_o, 1.186 + Node* memory, 1.187 + Node* frameptr, 1.188 + Node* ret_adr, 1.189 + Node* exception 1.190 +) : Node(TypeFunc::Parms + 1) { 1.191 + init_req(TypeFunc::Control , cntrl ); 1.192 + init_req(TypeFunc::I_O , i_o ); 1.193 + init_req(TypeFunc::Memory , memory ); 1.194 + init_req(TypeFunc::FramePtr , frameptr ); 1.195 + init_req(TypeFunc::ReturnAdr, ret_adr); 1.196 + init_req(TypeFunc::Parms , exception); 1.197 +} 1.198 + 1.199 +Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ 1.200 + return remove_dead_region(phase, can_reshape) ? this : NULL; 1.201 +} 1.202 + 1.203 +const Type *RethrowNode::Value( PhaseTransform *phase ) const { 1.204 + return (phase->type(in(TypeFunc::Control)) == Type::TOP) 1.205 + ? Type::TOP 1.206 + : Type::BOTTOM; 1.207 +} 1.208 + 1.209 +uint RethrowNode::match_edge(uint idx) const { 1.210 + return 0; 1.211 +} 1.212 + 1.213 +#ifndef PRODUCT 1.214 +void RethrowNode::dump_req(outputStream *st) const { 1.215 + // Dump the required inputs, enclosed in '(' and ')' 1.216 + uint i; // Exit value of loop 1.217 + for (i = 0; i < req(); i++) { // For all required inputs 1.218 + if (i == TypeFunc::Parms) st->print("exception"); 1.219 + if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 1.220 + else st->print("_ "); 1.221 + } 1.222 +} 1.223 +#endif 1.224 + 1.225 +//============================================================================= 1.226 +// Do we Match on this edge index or not? Match only target address & method 1.227 +uint TailCallNode::match_edge(uint idx) const { 1.228 + return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 1.229 +} 1.230 + 1.231 +//============================================================================= 1.232 +// Do we Match on this edge index or not? Match only target address & oop 1.233 +uint TailJumpNode::match_edge(uint idx) const { 1.234 + return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; 1.235 +} 1.236 + 1.237 +//============================================================================= 1.238 +JVMState::JVMState(ciMethod* method, JVMState* caller) : 1.239 + _method(method) { 1.240 + assert(method != NULL, "must be valid call site"); 1.241 + _reexecute = Reexecute_Undefined; 1.242 + debug_only(_bci = -99); // random garbage value 1.243 + debug_only(_map = (SafePointNode*)-1); 1.244 + _caller = caller; 1.245 + _depth = 1 + (caller == NULL ? 0 : caller->depth()); 1.246 + _locoff = TypeFunc::Parms; 1.247 + _stkoff = _locoff + _method->max_locals(); 1.248 + _monoff = _stkoff + _method->max_stack(); 1.249 + _scloff = _monoff; 1.250 + _endoff = _monoff; 1.251 + _sp = 0; 1.252 +} 1.253 +JVMState::JVMState(int stack_size) : 1.254 + _method(NULL) { 1.255 + _bci = InvocationEntryBci; 1.256 + _reexecute = Reexecute_Undefined; 1.257 + debug_only(_map = (SafePointNode*)-1); 1.258 + _caller = NULL; 1.259 + _depth = 1; 1.260 + _locoff = TypeFunc::Parms; 1.261 + _stkoff = _locoff; 1.262 + _monoff = _stkoff + stack_size; 1.263 + _scloff = _monoff; 1.264 + _endoff = _monoff; 1.265 + _sp = 0; 1.266 +} 1.267 + 1.268 +//--------------------------------of_depth------------------------------------- 1.269 +JVMState* JVMState::of_depth(int d) const { 1.270 + const JVMState* jvmp = this; 1.271 + assert(0 < d && (uint)d <= depth(), "oob"); 1.272 + for (int skip = depth() - d; skip > 0; skip--) { 1.273 + jvmp = jvmp->caller(); 1.274 + } 1.275 + assert(jvmp->depth() == (uint)d, "found the right one"); 1.276 + return (JVMState*)jvmp; 1.277 +} 1.278 + 1.279 +//-----------------------------same_calls_as----------------------------------- 1.280 +bool JVMState::same_calls_as(const JVMState* that) const { 1.281 + if (this == that) return true; 1.282 + if (this->depth() != that->depth()) return false; 1.283 + const JVMState* p = this; 1.284 + const JVMState* q = that; 1.285 + for (;;) { 1.286 + if (p->_method != q->_method) return false; 1.287 + if (p->_method == NULL) return true; // bci is irrelevant 1.288 + if (p->_bci != q->_bci) return false; 1.289 + if (p->_reexecute != q->_reexecute) return false; 1.290 + p = p->caller(); 1.291 + q = q->caller(); 1.292 + if (p == q) return true; 1.293 + assert(p != NULL && q != NULL, "depth check ensures we don't run off end"); 1.294 + } 1.295 +} 1.296 + 1.297 +//------------------------------debug_start------------------------------------ 1.298 +uint JVMState::debug_start() const { 1.299 + debug_only(JVMState* jvmroot = of_depth(1)); 1.300 + assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); 1.301 + return of_depth(1)->locoff(); 1.302 +} 1.303 + 1.304 +//-------------------------------debug_end------------------------------------- 1.305 +uint JVMState::debug_end() const { 1.306 + debug_only(JVMState* jvmroot = of_depth(1)); 1.307 + assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); 1.308 + return endoff(); 1.309 +} 1.310 + 1.311 +//------------------------------debug_depth------------------------------------ 1.312 +uint JVMState::debug_depth() const { 1.313 + uint total = 0; 1.314 + for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) { 1.315 + total += jvmp->debug_size(); 1.316 + } 1.317 + return total; 1.318 +} 1.319 + 1.320 +#ifndef PRODUCT 1.321 + 1.322 +//------------------------------format_helper---------------------------------- 1.323 +// Given an allocation (a Chaitin object) and a Node decide if the Node carries 1.324 +// any defined value or not. If it does, print out the register or constant. 1.325 +static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { 1.326 + if (n == NULL) { st->print(" NULL"); return; } 1.327 + if (n->is_SafePointScalarObject()) { 1.328 + // Scalar replacement. 1.329 + SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); 1.330 + scobjs->append_if_missing(spobj); 1.331 + int sco_n = scobjs->find(spobj); 1.332 + assert(sco_n >= 0, ""); 1.333 + st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); 1.334 + return; 1.335 + } 1.336 + if (regalloc->node_regs_max_index() > 0 && 1.337 + OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined 1.338 + char buf[50]; 1.339 + regalloc->dump_register(n,buf); 1.340 + st->print(" %s%d]=%s",msg,i,buf); 1.341 + } else { // No register, but might be constant 1.342 + const Type *t = n->bottom_type(); 1.343 + switch (t->base()) { 1.344 + case Type::Int: 1.345 + st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con()); 1.346 + break; 1.347 + case Type::AnyPtr: 1.348 + assert( t == TypePtr::NULL_PTR || n->in_dump(), "" ); 1.349 + st->print(" %s%d]=#NULL",msg,i); 1.350 + break; 1.351 + case Type::AryPtr: 1.352 + case Type::InstPtr: 1.353 + st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->isa_oopptr()->const_oop())); 1.354 + break; 1.355 + case Type::KlassPtr: 1.356 + st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_klassptr()->klass())); 1.357 + break; 1.358 + case Type::MetadataPtr: 1.359 + st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_metadataptr()->metadata())); 1.360 + break; 1.361 + case Type::NarrowOop: 1.362 + st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,p2i(t->make_ptr()->isa_oopptr()->const_oop())); 1.363 + break; 1.364 + case Type::RawPtr: 1.365 + st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,p2i(t->is_rawptr())); 1.366 + break; 1.367 + case Type::DoubleCon: 1.368 + st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); 1.369 + break; 1.370 + case Type::FloatCon: 1.371 + st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); 1.372 + break; 1.373 + case Type::Long: 1.374 + st->print(" %s%d]=#"INT64_FORMAT,msg,i,(int64_t)(t->is_long()->get_con())); 1.375 + break; 1.376 + case Type::Half: 1.377 + case Type::Top: 1.378 + st->print(" %s%d]=_",msg,i); 1.379 + break; 1.380 + default: ShouldNotReachHere(); 1.381 + } 1.382 + } 1.383 +} 1.384 + 1.385 +//------------------------------format----------------------------------------- 1.386 +void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { 1.387 + st->print(" #"); 1.388 + if (_method) { 1.389 + _method->print_short_name(st); 1.390 + st->print(" @ bci:%d ",_bci); 1.391 + } else { 1.392 + st->print_cr(" runtime stub "); 1.393 + return; 1.394 + } 1.395 + if (n->is_MachSafePoint()) { 1.396 + GrowableArray<SafePointScalarObjectNode*> scobjs; 1.397 + MachSafePointNode *mcall = n->as_MachSafePoint(); 1.398 + uint i; 1.399 + // Print locals 1.400 + for (i = 0; i < (uint)loc_size(); i++) 1.401 + format_helper(regalloc, st, mcall->local(this, i), "L[", i, &scobjs); 1.402 + // Print stack 1.403 + for (i = 0; i < (uint)stk_size(); i++) { 1.404 + if ((uint)(_stkoff + i) >= mcall->len()) 1.405 + st->print(" oob "); 1.406 + else 1.407 + format_helper(regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs); 1.408 + } 1.409 + for (i = 0; (int)i < nof_monitors(); i++) { 1.410 + Node *box = mcall->monitor_box(this, i); 1.411 + Node *obj = mcall->monitor_obj(this, i); 1.412 + if (regalloc->node_regs_max_index() > 0 && 1.413 + OptoReg::is_valid(regalloc->get_reg_first(box))) { 1.414 + box = BoxLockNode::box_node(box); 1.415 + format_helper(regalloc, st, box, "MON-BOX[", i, &scobjs); 1.416 + } else { 1.417 + OptoReg::Name box_reg = BoxLockNode::reg(box); 1.418 + st->print(" MON-BOX%d=%s+%d", 1.419 + i, 1.420 + OptoReg::regname(OptoReg::c_frame_pointer), 1.421 + regalloc->reg2offset(box_reg)); 1.422 + } 1.423 + const char* obj_msg = "MON-OBJ["; 1.424 + if (EliminateLocks) { 1.425 + if (BoxLockNode::box_node(box)->is_eliminated()) 1.426 + obj_msg = "MON-OBJ(LOCK ELIMINATED)["; 1.427 + } 1.428 + format_helper(regalloc, st, obj, obj_msg, i, &scobjs); 1.429 + } 1.430 + 1.431 + for (i = 0; i < (uint)scobjs.length(); i++) { 1.432 + // Scalar replaced objects. 1.433 + st->cr(); 1.434 + st->print(" # ScObj" INT32_FORMAT " ", i); 1.435 + SafePointScalarObjectNode* spobj = scobjs.at(i); 1.436 + ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass(); 1.437 + assert(cik->is_instance_klass() || 1.438 + cik->is_array_klass(), "Not supported allocation."); 1.439 + ciInstanceKlass *iklass = NULL; 1.440 + if (cik->is_instance_klass()) { 1.441 + cik->print_name_on(st); 1.442 + iklass = cik->as_instance_klass(); 1.443 + } else if (cik->is_type_array_klass()) { 1.444 + cik->as_array_klass()->base_element_type()->print_name_on(st); 1.445 + st->print("[%d]", spobj->n_fields()); 1.446 + } else if (cik->is_obj_array_klass()) { 1.447 + ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); 1.448 + if (cie->is_instance_klass()) { 1.449 + cie->print_name_on(st); 1.450 + } else if (cie->is_type_array_klass()) { 1.451 + cie->as_array_klass()->base_element_type()->print_name_on(st); 1.452 + } else { 1.453 + ShouldNotReachHere(); 1.454 + } 1.455 + st->print("[%d]", spobj->n_fields()); 1.456 + int ndim = cik->as_array_klass()->dimension() - 1; 1.457 + while (ndim-- > 0) { 1.458 + st->print("[]"); 1.459 + } 1.460 + } 1.461 + st->print("={"); 1.462 + uint nf = spobj->n_fields(); 1.463 + if (nf > 0) { 1.464 + uint first_ind = spobj->first_index(mcall->jvms()); 1.465 + Node* fld_node = mcall->in(first_ind); 1.466 + ciField* cifield; 1.467 + if (iklass != NULL) { 1.468 + st->print(" ["); 1.469 + cifield = iklass->nonstatic_field_at(0); 1.470 + cifield->print_name_on(st); 1.471 + format_helper(regalloc, st, fld_node, ":", 0, &scobjs); 1.472 + } else { 1.473 + format_helper(regalloc, st, fld_node, "[", 0, &scobjs); 1.474 + } 1.475 + for (uint j = 1; j < nf; j++) { 1.476 + fld_node = mcall->in(first_ind+j); 1.477 + if (iklass != NULL) { 1.478 + st->print(", ["); 1.479 + cifield = iklass->nonstatic_field_at(j); 1.480 + cifield->print_name_on(st); 1.481 + format_helper(regalloc, st, fld_node, ":", j, &scobjs); 1.482 + } else { 1.483 + format_helper(regalloc, st, fld_node, ", [", j, &scobjs); 1.484 + } 1.485 + } 1.486 + } 1.487 + st->print(" }"); 1.488 + } 1.489 + } 1.490 + st->cr(); 1.491 + if (caller() != NULL) caller()->format(regalloc, n, st); 1.492 +} 1.493 + 1.494 + 1.495 +void JVMState::dump_spec(outputStream *st) const { 1.496 + if (_method != NULL) { 1.497 + bool printed = false; 1.498 + if (!Verbose) { 1.499 + // The JVMS dumps make really, really long lines. 1.500 + // Take out the most boring parts, which are the package prefixes. 1.501 + char buf[500]; 1.502 + stringStream namest(buf, sizeof(buf)); 1.503 + _method->print_short_name(&namest); 1.504 + if (namest.count() < sizeof(buf)) { 1.505 + const char* name = namest.base(); 1.506 + if (name[0] == ' ') ++name; 1.507 + const char* endcn = strchr(name, ':'); // end of class name 1.508 + if (endcn == NULL) endcn = strchr(name, '('); 1.509 + if (endcn == NULL) endcn = name + strlen(name); 1.510 + while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') 1.511 + --endcn; 1.512 + st->print(" %s", endcn); 1.513 + printed = true; 1.514 + } 1.515 + } 1.516 + if (!printed) 1.517 + _method->print_short_name(st); 1.518 + st->print(" @ bci:%d",_bci); 1.519 + if(_reexecute == Reexecute_True) 1.520 + st->print(" reexecute"); 1.521 + } else { 1.522 + st->print(" runtime stub"); 1.523 + } 1.524 + if (caller() != NULL) caller()->dump_spec(st); 1.525 +} 1.526 + 1.527 + 1.528 +void JVMState::dump_on(outputStream* st) const { 1.529 + bool print_map = _map && !((uintptr_t)_map & 1) && 1.530 + ((caller() == NULL) || (caller()->map() != _map)); 1.531 + if (print_map) { 1.532 + if (_map->len() > _map->req()) { // _map->has_exceptions() 1.533 + Node* ex = _map->in(_map->req()); // _map->next_exception() 1.534 + // skip the first one; it's already being printed 1.535 + while (ex != NULL && ex->len() > ex->req()) { 1.536 + ex = ex->in(ex->req()); // ex->next_exception() 1.537 + ex->dump(1); 1.538 + } 1.539 + } 1.540 + _map->dump(Verbose ? 2 : 1); 1.541 + } 1.542 + if (caller() != NULL) { 1.543 + caller()->dump_on(st); 1.544 + } 1.545 + st->print("JVMS depth=%d loc=%d stk=%d arg=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", 1.546 + depth(), locoff(), stkoff(), argoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); 1.547 + if (_method == NULL) { 1.548 + st->print_cr("(none)"); 1.549 + } else { 1.550 + _method->print_name(st); 1.551 + st->cr(); 1.552 + if (bci() >= 0 && bci() < _method->code_size()) { 1.553 + st->print(" bc: "); 1.554 + _method->print_codes_on(bci(), bci()+1, st); 1.555 + } 1.556 + } 1.557 +} 1.558 + 1.559 +// Extra way to dump a jvms from the debugger, 1.560 +// to avoid a bug with C++ member function calls. 1.561 +void dump_jvms(JVMState* jvms) { 1.562 + jvms->dump(); 1.563 +} 1.564 +#endif 1.565 + 1.566 +//--------------------------clone_shallow-------------------------------------- 1.567 +JVMState* JVMState::clone_shallow(Compile* C) const { 1.568 + JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); 1.569 + n->set_bci(_bci); 1.570 + n->_reexecute = _reexecute; 1.571 + n->set_locoff(_locoff); 1.572 + n->set_stkoff(_stkoff); 1.573 + n->set_monoff(_monoff); 1.574 + n->set_scloff(_scloff); 1.575 + n->set_endoff(_endoff); 1.576 + n->set_sp(_sp); 1.577 + n->set_map(_map); 1.578 + return n; 1.579 +} 1.580 + 1.581 +//---------------------------clone_deep---------------------------------------- 1.582 +JVMState* JVMState::clone_deep(Compile* C) const { 1.583 + JVMState* n = clone_shallow(C); 1.584 + for (JVMState* p = n; p->_caller != NULL; p = p->_caller) { 1.585 + p->_caller = p->_caller->clone_shallow(C); 1.586 + } 1.587 + assert(n->depth() == depth(), "sanity"); 1.588 + assert(n->debug_depth() == debug_depth(), "sanity"); 1.589 + return n; 1.590 +} 1.591 + 1.592 +/** 1.593 + * Reset map for all callers 1.594 + */ 1.595 +void JVMState::set_map_deep(SafePointNode* map) { 1.596 + for (JVMState* p = this; p->_caller != NULL; p = p->_caller) { 1.597 + p->set_map(map); 1.598 + } 1.599 +} 1.600 + 1.601 +// Adapt offsets in in-array after adding or removing an edge. 1.602 +// Prerequisite is that the JVMState is used by only one node. 1.603 +void JVMState::adapt_position(int delta) { 1.604 + for (JVMState* jvms = this; jvms != NULL; jvms = jvms->caller()) { 1.605 + jvms->set_locoff(jvms->locoff() + delta); 1.606 + jvms->set_stkoff(jvms->stkoff() + delta); 1.607 + jvms->set_monoff(jvms->monoff() + delta); 1.608 + jvms->set_scloff(jvms->scloff() + delta); 1.609 + jvms->set_endoff(jvms->endoff() + delta); 1.610 + } 1.611 +} 1.612 + 1.613 +// Mirror the stack size calculation in the deopt code 1.614 +// How much stack space would we need at this point in the program in 1.615 +// case of deoptimization? 1.616 +int JVMState::interpreter_frame_size() const { 1.617 + const JVMState* jvms = this; 1.618 + int size = 0; 1.619 + int callee_parameters = 0; 1.620 + int callee_locals = 0; 1.621 + int extra_args = method()->max_stack() - stk_size(); 1.622 + 1.623 + while (jvms != NULL) { 1.624 + int locks = jvms->nof_monitors(); 1.625 + int temps = jvms->stk_size(); 1.626 + bool is_top_frame = (jvms == this); 1.627 + ciMethod* method = jvms->method(); 1.628 + 1.629 + int frame_size = BytesPerWord * Interpreter::size_activation(method->max_stack(), 1.630 + temps + callee_parameters, 1.631 + extra_args, 1.632 + locks, 1.633 + callee_parameters, 1.634 + callee_locals, 1.635 + is_top_frame); 1.636 + size += frame_size; 1.637 + 1.638 + callee_parameters = method->size_of_parameters(); 1.639 + callee_locals = method->max_locals(); 1.640 + extra_args = 0; 1.641 + jvms = jvms->caller(); 1.642 + } 1.643 + return size + Deoptimization::last_frame_adjust(0, callee_locals) * BytesPerWord; 1.644 +} 1.645 + 1.646 +//============================================================================= 1.647 +uint CallNode::cmp( const Node &n ) const 1.648 +{ return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } 1.649 +#ifndef PRODUCT 1.650 +void CallNode::dump_req(outputStream *st) const { 1.651 + // Dump the required inputs, enclosed in '(' and ')' 1.652 + uint i; // Exit value of loop 1.653 + for (i = 0; i < req(); i++) { // For all required inputs 1.654 + if (i == TypeFunc::Parms) st->print("("); 1.655 + if (in(i)) st->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); 1.656 + else st->print("_ "); 1.657 + } 1.658 + st->print(")"); 1.659 +} 1.660 + 1.661 +void CallNode::dump_spec(outputStream *st) const { 1.662 + st->print(" "); 1.663 + tf()->dump_on(st); 1.664 + if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); 1.665 + if (jvms() != NULL) jvms()->dump_spec(st); 1.666 +} 1.667 +#endif 1.668 + 1.669 +const Type *CallNode::bottom_type() const { return tf()->range(); } 1.670 +const Type *CallNode::Value(PhaseTransform *phase) const { 1.671 + if (phase->type(in(0)) == Type::TOP) return Type::TOP; 1.672 + return tf()->range(); 1.673 +} 1.674 + 1.675 +//------------------------------calling_convention----------------------------- 1.676 +void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1.677 + // Use the standard compiler calling convention 1.678 + Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); 1.679 +} 1.680 + 1.681 + 1.682 +//------------------------------match------------------------------------------ 1.683 +// Construct projections for control, I/O, memory-fields, ..., and 1.684 +// return result(s) along with their RegMask info 1.685 +Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { 1.686 + switch (proj->_con) { 1.687 + case TypeFunc::Control: 1.688 + case TypeFunc::I_O: 1.689 + case TypeFunc::Memory: 1.690 + return new (match->C) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); 1.691 + 1.692 + case TypeFunc::Parms+1: // For LONG & DOUBLE returns 1.693 + assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, ""); 1.694 + // 2nd half of doubles and longs 1.695 + return new (match->C) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); 1.696 + 1.697 + case TypeFunc::Parms: { // Normal returns 1.698 + uint ideal_reg = tf()->range()->field_at(TypeFunc::Parms)->ideal_reg(); 1.699 + OptoRegPair regs = is_CallRuntime() 1.700 + ? match->c_return_value(ideal_reg,true) // Calls into C runtime 1.701 + : match-> return_value(ideal_reg,true); // Calls into compiled Java code 1.702 + RegMask rm = RegMask(regs.first()); 1.703 + if( OptoReg::is_valid(regs.second()) ) 1.704 + rm.Insert( regs.second() ); 1.705 + return new (match->C) MachProjNode(this,proj->_con,rm,ideal_reg); 1.706 + } 1.707 + 1.708 + case TypeFunc::ReturnAdr: 1.709 + case TypeFunc::FramePtr: 1.710 + default: 1.711 + ShouldNotReachHere(); 1.712 + } 1.713 + return NULL; 1.714 +} 1.715 + 1.716 +// Do we Match on this edge index or not? Match no edges 1.717 +uint CallNode::match_edge(uint idx) const { 1.718 + return 0; 1.719 +} 1.720 + 1.721 +// 1.722 +// Determine whether the call could modify the field of the specified 1.723 +// instance at the specified offset. 1.724 +// 1.725 +bool CallNode::may_modify(const TypeOopPtr *t_oop, PhaseTransform *phase) { 1.726 + assert((t_oop != NULL), "sanity"); 1.727 + if (t_oop->is_known_instance()) { 1.728 + // The instance_id is set only for scalar-replaceable allocations which 1.729 + // are not passed as arguments according to Escape Analysis. 1.730 + return false; 1.731 + } 1.732 + if (t_oop->is_ptr_to_boxed_value()) { 1.733 + ciKlass* boxing_klass = t_oop->klass(); 1.734 + if (is_CallStaticJava() && as_CallStaticJava()->is_boxing_method()) { 1.735 + // Skip unrelated boxing methods. 1.736 + Node* proj = proj_out(TypeFunc::Parms); 1.737 + if ((proj == NULL) || (phase->type(proj)->is_instptr()->klass() != boxing_klass)) { 1.738 + return false; 1.739 + } 1.740 + } 1.741 + if (is_CallJava() && as_CallJava()->method() != NULL) { 1.742 + ciMethod* meth = as_CallJava()->method(); 1.743 + if (meth->is_accessor()) { 1.744 + return false; 1.745 + } 1.746 + // May modify (by reflection) if an boxing object is passed 1.747 + // as argument or returned. 1.748 + if (returns_pointer() && (proj_out(TypeFunc::Parms) != NULL)) { 1.749 + Node* proj = proj_out(TypeFunc::Parms); 1.750 + const TypeInstPtr* inst_t = phase->type(proj)->isa_instptr(); 1.751 + if ((inst_t != NULL) && (!inst_t->klass_is_exact() || 1.752 + (inst_t->klass() == boxing_klass))) { 1.753 + return true; 1.754 + } 1.755 + } 1.756 + const TypeTuple* d = tf()->domain(); 1.757 + for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1.758 + const TypeInstPtr* inst_t = d->field_at(i)->isa_instptr(); 1.759 + if ((inst_t != NULL) && (!inst_t->klass_is_exact() || 1.760 + (inst_t->klass() == boxing_klass))) { 1.761 + return true; 1.762 + } 1.763 + } 1.764 + return false; 1.765 + } 1.766 + } 1.767 + return true; 1.768 +} 1.769 + 1.770 +// Does this call have a direct reference to n other than debug information? 1.771 +bool CallNode::has_non_debug_use(Node *n) { 1.772 + const TypeTuple * d = tf()->domain(); 1.773 + for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { 1.774 + Node *arg = in(i); 1.775 + if (arg == n) { 1.776 + return true; 1.777 + } 1.778 + } 1.779 + return false; 1.780 +} 1.781 + 1.782 +// Returns the unique CheckCastPP of a call 1.783 +// or 'this' if there are several CheckCastPP 1.784 +// or returns NULL if there is no one. 1.785 +Node *CallNode::result_cast() { 1.786 + Node *cast = NULL; 1.787 + 1.788 + Node *p = proj_out(TypeFunc::Parms); 1.789 + if (p == NULL) 1.790 + return NULL; 1.791 + 1.792 + for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { 1.793 + Node *use = p->fast_out(i); 1.794 + if (use->is_CheckCastPP()) { 1.795 + if (cast != NULL) { 1.796 + return this; // more than 1 CheckCastPP 1.797 + } 1.798 + cast = use; 1.799 + } 1.800 + } 1.801 + return cast; 1.802 +} 1.803 + 1.804 + 1.805 +void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) { 1.806 + projs->fallthrough_proj = NULL; 1.807 + projs->fallthrough_catchproj = NULL; 1.808 + projs->fallthrough_ioproj = NULL; 1.809 + projs->catchall_ioproj = NULL; 1.810 + projs->catchall_catchproj = NULL; 1.811 + projs->fallthrough_memproj = NULL; 1.812 + projs->catchall_memproj = NULL; 1.813 + projs->resproj = NULL; 1.814 + projs->exobj = NULL; 1.815 + 1.816 + for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { 1.817 + ProjNode *pn = fast_out(i)->as_Proj(); 1.818 + if (pn->outcnt() == 0) continue; 1.819 + switch (pn->_con) { 1.820 + case TypeFunc::Control: 1.821 + { 1.822 + // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj 1.823 + projs->fallthrough_proj = pn; 1.824 + DUIterator_Fast jmax, j = pn->fast_outs(jmax); 1.825 + const Node *cn = pn->fast_out(j); 1.826 + if (cn->is_Catch()) { 1.827 + ProjNode *cpn = NULL; 1.828 + for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { 1.829 + cpn = cn->fast_out(k)->as_Proj(); 1.830 + assert(cpn->is_CatchProj(), "must be a CatchProjNode"); 1.831 + if (cpn->_con == CatchProjNode::fall_through_index) 1.832 + projs->fallthrough_catchproj = cpn; 1.833 + else { 1.834 + assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); 1.835 + projs->catchall_catchproj = cpn; 1.836 + } 1.837 + } 1.838 + } 1.839 + break; 1.840 + } 1.841 + case TypeFunc::I_O: 1.842 + if (pn->_is_io_use) 1.843 + projs->catchall_ioproj = pn; 1.844 + else 1.845 + projs->fallthrough_ioproj = pn; 1.846 + for (DUIterator j = pn->outs(); pn->has_out(j); j++) { 1.847 + Node* e = pn->out(j); 1.848 + if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) { 1.849 + assert(projs->exobj == NULL, "only one"); 1.850 + projs->exobj = e; 1.851 + } 1.852 + } 1.853 + break; 1.854 + case TypeFunc::Memory: 1.855 + if (pn->_is_io_use) 1.856 + projs->catchall_memproj = pn; 1.857 + else 1.858 + projs->fallthrough_memproj = pn; 1.859 + break; 1.860 + case TypeFunc::Parms: 1.861 + projs->resproj = pn; 1.862 + break; 1.863 + default: 1.864 + assert(false, "unexpected projection from allocation node."); 1.865 + } 1.866 + } 1.867 + 1.868 + // The resproj may not exist because the result couuld be ignored 1.869 + // and the exception object may not exist if an exception handler 1.870 + // swallows the exception but all the other must exist and be found. 1.871 + assert(projs->fallthrough_proj != NULL, "must be found"); 1.872 + assert(Compile::current()->inlining_incrementally() || projs->fallthrough_catchproj != NULL, "must be found"); 1.873 + assert(Compile::current()->inlining_incrementally() || projs->fallthrough_memproj != NULL, "must be found"); 1.874 + assert(Compile::current()->inlining_incrementally() || projs->fallthrough_ioproj != NULL, "must be found"); 1.875 + assert(Compile::current()->inlining_incrementally() || projs->catchall_catchproj != NULL, "must be found"); 1.876 + if (separate_io_proj) { 1.877 + assert(Compile::current()->inlining_incrementally() || projs->catchall_memproj != NULL, "must be found"); 1.878 + assert(Compile::current()->inlining_incrementally() || projs->catchall_ioproj != NULL, "must be found"); 1.879 + } 1.880 +} 1.881 + 1.882 +Node *CallNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1.883 + CallGenerator* cg = generator(); 1.884 + if (can_reshape && cg != NULL && cg->is_mh_late_inline() && !cg->already_attempted()) { 1.885 + // Check whether this MH handle call becomes a candidate for inlining 1.886 + ciMethod* callee = cg->method(); 1.887 + vmIntrinsics::ID iid = callee->intrinsic_id(); 1.888 + if (iid == vmIntrinsics::_invokeBasic) { 1.889 + if (in(TypeFunc::Parms)->Opcode() == Op_ConP) { 1.890 + phase->C->prepend_late_inline(cg); 1.891 + set_generator(NULL); 1.892 + } 1.893 + } else { 1.894 + assert(callee->has_member_arg(), "wrong type of call?"); 1.895 + if (in(TypeFunc::Parms + callee->arg_size() - 1)->Opcode() == Op_ConP) { 1.896 + phase->C->prepend_late_inline(cg); 1.897 + set_generator(NULL); 1.898 + } 1.899 + } 1.900 + } 1.901 + return SafePointNode::Ideal(phase, can_reshape); 1.902 +} 1.903 + 1.904 + 1.905 +//============================================================================= 1.906 +uint CallJavaNode::size_of() const { return sizeof(*this); } 1.907 +uint CallJavaNode::cmp( const Node &n ) const { 1.908 + CallJavaNode &call = (CallJavaNode&)n; 1.909 + return CallNode::cmp(call) && _method == call._method; 1.910 +} 1.911 +#ifndef PRODUCT 1.912 +void CallJavaNode::dump_spec(outputStream *st) const { 1.913 + if( _method ) _method->print_short_name(st); 1.914 + CallNode::dump_spec(st); 1.915 +} 1.916 +#endif 1.917 + 1.918 +//============================================================================= 1.919 +uint CallStaticJavaNode::size_of() const { return sizeof(*this); } 1.920 +uint CallStaticJavaNode::cmp( const Node &n ) const { 1.921 + CallStaticJavaNode &call = (CallStaticJavaNode&)n; 1.922 + return CallJavaNode::cmp(call); 1.923 +} 1.924 + 1.925 +//----------------------------uncommon_trap_request---------------------------- 1.926 +// If this is an uncommon trap, return the request code, else zero. 1.927 +int CallStaticJavaNode::uncommon_trap_request() const { 1.928 + if (_name != NULL && !strcmp(_name, "uncommon_trap")) { 1.929 + return extract_uncommon_trap_request(this); 1.930 + } 1.931 + return 0; 1.932 +} 1.933 +int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { 1.934 +#ifndef PRODUCT 1.935 + if (!(call->req() > TypeFunc::Parms && 1.936 + call->in(TypeFunc::Parms) != NULL && 1.937 + call->in(TypeFunc::Parms)->is_Con())) { 1.938 + assert(in_dump() != 0, "OK if dumping"); 1.939 + tty->print("[bad uncommon trap]"); 1.940 + return 0; 1.941 + } 1.942 +#endif 1.943 + return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); 1.944 +} 1.945 + 1.946 +#ifndef PRODUCT 1.947 +void CallStaticJavaNode::dump_spec(outputStream *st) const { 1.948 + st->print("# Static "); 1.949 + if (_name != NULL) { 1.950 + st->print("%s", _name); 1.951 + int trap_req = uncommon_trap_request(); 1.952 + if (trap_req != 0) { 1.953 + char buf[100]; 1.954 + st->print("(%s)", 1.955 + Deoptimization::format_trap_request(buf, sizeof(buf), 1.956 + trap_req)); 1.957 + } 1.958 + st->print(" "); 1.959 + } 1.960 + CallJavaNode::dump_spec(st); 1.961 +} 1.962 +#endif 1.963 + 1.964 +//============================================================================= 1.965 +uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } 1.966 +uint CallDynamicJavaNode::cmp( const Node &n ) const { 1.967 + CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; 1.968 + return CallJavaNode::cmp(call); 1.969 +} 1.970 +#ifndef PRODUCT 1.971 +void CallDynamicJavaNode::dump_spec(outputStream *st) const { 1.972 + st->print("# Dynamic "); 1.973 + CallJavaNode::dump_spec(st); 1.974 +} 1.975 +#endif 1.976 + 1.977 +//============================================================================= 1.978 +uint CallRuntimeNode::size_of() const { return sizeof(*this); } 1.979 +uint CallRuntimeNode::cmp( const Node &n ) const { 1.980 + CallRuntimeNode &call = (CallRuntimeNode&)n; 1.981 + return CallNode::cmp(call) && !strcmp(_name,call._name); 1.982 +} 1.983 +#ifndef PRODUCT 1.984 +void CallRuntimeNode::dump_spec(outputStream *st) const { 1.985 + st->print("# "); 1.986 + st->print("%s", _name); 1.987 + CallNode::dump_spec(st); 1.988 +} 1.989 +#endif 1.990 + 1.991 +//------------------------------calling_convention----------------------------- 1.992 +void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { 1.993 + Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); 1.994 +} 1.995 + 1.996 +//============================================================================= 1.997 +//------------------------------calling_convention----------------------------- 1.998 + 1.999 + 1.1000 +//============================================================================= 1.1001 +#ifndef PRODUCT 1.1002 +void CallLeafNode::dump_spec(outputStream *st) const { 1.1003 + st->print("# "); 1.1004 + st->print("%s", _name); 1.1005 + CallNode::dump_spec(st); 1.1006 +} 1.1007 +#endif 1.1008 + 1.1009 +//============================================================================= 1.1010 + 1.1011 +void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { 1.1012 + assert(verify_jvms(jvms), "jvms must match"); 1.1013 + int loc = jvms->locoff() + idx; 1.1014 + if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { 1.1015 + // If current local idx is top then local idx - 1 could 1.1016 + // be a long/double that needs to be killed since top could 1.1017 + // represent the 2nd half ofthe long/double. 1.1018 + uint ideal = in(loc -1)->ideal_reg(); 1.1019 + if (ideal == Op_RegD || ideal == Op_RegL) { 1.1020 + // set other (low index) half to top 1.1021 + set_req(loc - 1, in(loc)); 1.1022 + } 1.1023 + } 1.1024 + set_req(loc, c); 1.1025 +} 1.1026 + 1.1027 +uint SafePointNode::size_of() const { return sizeof(*this); } 1.1028 +uint SafePointNode::cmp( const Node &n ) const { 1.1029 + return (&n == this); // Always fail except on self 1.1030 +} 1.1031 + 1.1032 +//-------------------------set_next_exception---------------------------------- 1.1033 +void SafePointNode::set_next_exception(SafePointNode* n) { 1.1034 + assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception"); 1.1035 + if (len() == req()) { 1.1036 + if (n != NULL) add_prec(n); 1.1037 + } else { 1.1038 + set_prec(req(), n); 1.1039 + } 1.1040 +} 1.1041 + 1.1042 + 1.1043 +//----------------------------next_exception----------------------------------- 1.1044 +SafePointNode* SafePointNode::next_exception() const { 1.1045 + if (len() == req()) { 1.1046 + return NULL; 1.1047 + } else { 1.1048 + Node* n = in(req()); 1.1049 + assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); 1.1050 + return (SafePointNode*) n; 1.1051 + } 1.1052 +} 1.1053 + 1.1054 + 1.1055 +//------------------------------Ideal------------------------------------------ 1.1056 +// Skip over any collapsed Regions 1.1057 +Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1.1058 + return remove_dead_region(phase, can_reshape) ? this : NULL; 1.1059 +} 1.1060 + 1.1061 +//------------------------------Identity--------------------------------------- 1.1062 +// Remove obviously duplicate safepoints 1.1063 +Node *SafePointNode::Identity( PhaseTransform *phase ) { 1.1064 + 1.1065 + // If you have back to back safepoints, remove one 1.1066 + if( in(TypeFunc::Control)->is_SafePoint() ) 1.1067 + return in(TypeFunc::Control); 1.1068 + 1.1069 + if( in(0)->is_Proj() ) { 1.1070 + Node *n0 = in(0)->in(0); 1.1071 + // Check if he is a call projection (except Leaf Call) 1.1072 + if( n0->is_Catch() ) { 1.1073 + n0 = n0->in(0)->in(0); 1.1074 + assert( n0->is_Call(), "expect a call here" ); 1.1075 + } 1.1076 + if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { 1.1077 + // Useless Safepoint, so remove it 1.1078 + return in(TypeFunc::Control); 1.1079 + } 1.1080 + } 1.1081 + 1.1082 + return this; 1.1083 +} 1.1084 + 1.1085 +//------------------------------Value------------------------------------------ 1.1086 +const Type *SafePointNode::Value( PhaseTransform *phase ) const { 1.1087 + if( phase->type(in(0)) == Type::TOP ) return Type::TOP; 1.1088 + if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop 1.1089 + return Type::CONTROL; 1.1090 +} 1.1091 + 1.1092 +#ifndef PRODUCT 1.1093 +void SafePointNode::dump_spec(outputStream *st) const { 1.1094 + st->print(" SafePoint "); 1.1095 +} 1.1096 +#endif 1.1097 + 1.1098 +const RegMask &SafePointNode::in_RegMask(uint idx) const { 1.1099 + if( idx < TypeFunc::Parms ) return RegMask::Empty; 1.1100 + // Values outside the domain represent debug info 1.1101 + return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1.1102 +} 1.1103 +const RegMask &SafePointNode::out_RegMask() const { 1.1104 + return RegMask::Empty; 1.1105 +} 1.1106 + 1.1107 + 1.1108 +void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { 1.1109 + assert((int)grow_by > 0, "sanity"); 1.1110 + int monoff = jvms->monoff(); 1.1111 + int scloff = jvms->scloff(); 1.1112 + int endoff = jvms->endoff(); 1.1113 + assert(endoff == (int)req(), "no other states or debug info after me"); 1.1114 + Node* top = Compile::current()->top(); 1.1115 + for (uint i = 0; i < grow_by; i++) { 1.1116 + ins_req(monoff, top); 1.1117 + } 1.1118 + jvms->set_monoff(monoff + grow_by); 1.1119 + jvms->set_scloff(scloff + grow_by); 1.1120 + jvms->set_endoff(endoff + grow_by); 1.1121 +} 1.1122 + 1.1123 +void SafePointNode::push_monitor(const FastLockNode *lock) { 1.1124 + // Add a LockNode, which points to both the original BoxLockNode (the 1.1125 + // stack space for the monitor) and the Object being locked. 1.1126 + const int MonitorEdges = 2; 1.1127 + assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1.1128 + assert(req() == jvms()->endoff(), "correct sizing"); 1.1129 + int nextmon = jvms()->scloff(); 1.1130 + if (GenerateSynchronizationCode) { 1.1131 + ins_req(nextmon, lock->box_node()); 1.1132 + ins_req(nextmon+1, lock->obj_node()); 1.1133 + } else { 1.1134 + Node* top = Compile::current()->top(); 1.1135 + ins_req(nextmon, top); 1.1136 + ins_req(nextmon, top); 1.1137 + } 1.1138 + jvms()->set_scloff(nextmon + MonitorEdges); 1.1139 + jvms()->set_endoff(req()); 1.1140 +} 1.1141 + 1.1142 +void SafePointNode::pop_monitor() { 1.1143 + // Delete last monitor from debug info 1.1144 + debug_only(int num_before_pop = jvms()->nof_monitors()); 1.1145 + const int MonitorEdges = 2; 1.1146 + assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); 1.1147 + int scloff = jvms()->scloff(); 1.1148 + int endoff = jvms()->endoff(); 1.1149 + int new_scloff = scloff - MonitorEdges; 1.1150 + int new_endoff = endoff - MonitorEdges; 1.1151 + jvms()->set_scloff(new_scloff); 1.1152 + jvms()->set_endoff(new_endoff); 1.1153 + while (scloff > new_scloff) del_req_ordered(--scloff); 1.1154 + assert(jvms()->nof_monitors() == num_before_pop-1, ""); 1.1155 +} 1.1156 + 1.1157 +Node *SafePointNode::peek_monitor_box() const { 1.1158 + int mon = jvms()->nof_monitors() - 1; 1.1159 + assert(mon >= 0, "most have a monitor"); 1.1160 + return monitor_box(jvms(), mon); 1.1161 +} 1.1162 + 1.1163 +Node *SafePointNode::peek_monitor_obj() const { 1.1164 + int mon = jvms()->nof_monitors() - 1; 1.1165 + assert(mon >= 0, "most have a monitor"); 1.1166 + return monitor_obj(jvms(), mon); 1.1167 +} 1.1168 + 1.1169 +// Do we Match on this edge index or not? Match no edges 1.1170 +uint SafePointNode::match_edge(uint idx) const { 1.1171 + if( !needs_polling_address_input() ) 1.1172 + return 0; 1.1173 + 1.1174 + return (TypeFunc::Parms == idx); 1.1175 +} 1.1176 + 1.1177 +//============== SafePointScalarObjectNode ============== 1.1178 + 1.1179 +SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, 1.1180 +#ifdef ASSERT 1.1181 + AllocateNode* alloc, 1.1182 +#endif 1.1183 + uint first_index, 1.1184 + uint n_fields) : 1.1185 + TypeNode(tp, 1), // 1 control input -- seems required. Get from root. 1.1186 +#ifdef ASSERT 1.1187 + _alloc(alloc), 1.1188 +#endif 1.1189 + _first_index(first_index), 1.1190 + _n_fields(n_fields) 1.1191 +{ 1.1192 + init_class_id(Class_SafePointScalarObject); 1.1193 +} 1.1194 + 1.1195 +// Do not allow value-numbering for SafePointScalarObject node. 1.1196 +uint SafePointScalarObjectNode::hash() const { return NO_HASH; } 1.1197 +uint SafePointScalarObjectNode::cmp( const Node &n ) const { 1.1198 + return (&n == this); // Always fail except on self 1.1199 +} 1.1200 + 1.1201 +uint SafePointScalarObjectNode::ideal_reg() const { 1.1202 + return 0; // No matching to machine instruction 1.1203 +} 1.1204 + 1.1205 +const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { 1.1206 + return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); 1.1207 +} 1.1208 + 1.1209 +const RegMask &SafePointScalarObjectNode::out_RegMask() const { 1.1210 + return RegMask::Empty; 1.1211 +} 1.1212 + 1.1213 +uint SafePointScalarObjectNode::match_edge(uint idx) const { 1.1214 + return 0; 1.1215 +} 1.1216 + 1.1217 +SafePointScalarObjectNode* 1.1218 +SafePointScalarObjectNode::clone(Dict* sosn_map) const { 1.1219 + void* cached = (*sosn_map)[(void*)this]; 1.1220 + if (cached != NULL) { 1.1221 + return (SafePointScalarObjectNode*)cached; 1.1222 + } 1.1223 + SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); 1.1224 + sosn_map->Insert((void*)this, (void*)res); 1.1225 + return res; 1.1226 +} 1.1227 + 1.1228 + 1.1229 +#ifndef PRODUCT 1.1230 +void SafePointScalarObjectNode::dump_spec(outputStream *st) const { 1.1231 + st->print(" # fields@[%d..%d]", first_index(), 1.1232 + first_index() + n_fields() - 1); 1.1233 +} 1.1234 + 1.1235 +#endif 1.1236 + 1.1237 +//============================================================================= 1.1238 +uint AllocateNode::size_of() const { return sizeof(*this); } 1.1239 + 1.1240 +AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, 1.1241 + Node *ctrl, Node *mem, Node *abio, 1.1242 + Node *size, Node *klass_node, Node *initial_test) 1.1243 + : CallNode(atype, NULL, TypeRawPtr::BOTTOM) 1.1244 +{ 1.1245 + init_class_id(Class_Allocate); 1.1246 + init_flags(Flag_is_macro); 1.1247 + _is_scalar_replaceable = false; 1.1248 + _is_non_escaping = false; 1.1249 + Node *topnode = C->top(); 1.1250 + 1.1251 + init_req( TypeFunc::Control , ctrl ); 1.1252 + init_req( TypeFunc::I_O , abio ); 1.1253 + init_req( TypeFunc::Memory , mem ); 1.1254 + init_req( TypeFunc::ReturnAdr, topnode ); 1.1255 + init_req( TypeFunc::FramePtr , topnode ); 1.1256 + init_req( AllocSize , size); 1.1257 + init_req( KlassNode , klass_node); 1.1258 + init_req( InitialTest , initial_test); 1.1259 + init_req( ALength , topnode); 1.1260 + C->add_macro_node(this); 1.1261 +} 1.1262 + 1.1263 +//============================================================================= 1.1264 +Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1.1265 + if (remove_dead_region(phase, can_reshape)) return this; 1.1266 + // Don't bother trying to transform a dead node 1.1267 + if (in(0) && in(0)->is_top()) return NULL; 1.1268 + 1.1269 + const Type* type = phase->type(Ideal_length()); 1.1270 + if (type->isa_int() && type->is_int()->_hi < 0) { 1.1271 + if (can_reshape) { 1.1272 + PhaseIterGVN *igvn = phase->is_IterGVN(); 1.1273 + // Unreachable fall through path (negative array length), 1.1274 + // the allocation can only throw so disconnect it. 1.1275 + Node* proj = proj_out(TypeFunc::Control); 1.1276 + Node* catchproj = NULL; 1.1277 + if (proj != NULL) { 1.1278 + for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) { 1.1279 + Node *cn = proj->fast_out(i); 1.1280 + if (cn->is_Catch()) { 1.1281 + catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index); 1.1282 + break; 1.1283 + } 1.1284 + } 1.1285 + } 1.1286 + if (catchproj != NULL && catchproj->outcnt() > 0 && 1.1287 + (catchproj->outcnt() > 1 || 1.1288 + catchproj->unique_out()->Opcode() != Op_Halt)) { 1.1289 + assert(catchproj->is_CatchProj(), "must be a CatchProjNode"); 1.1290 + Node* nproj = catchproj->clone(); 1.1291 + igvn->register_new_node_with_optimizer(nproj); 1.1292 + 1.1293 + Node *frame = new (phase->C) ParmNode( phase->C->start(), TypeFunc::FramePtr ); 1.1294 + frame = phase->transform(frame); 1.1295 + // Halt & Catch Fire 1.1296 + Node *halt = new (phase->C) HaltNode( nproj, frame ); 1.1297 + phase->C->root()->add_req(halt); 1.1298 + phase->transform(halt); 1.1299 + 1.1300 + igvn->replace_node(catchproj, phase->C->top()); 1.1301 + return this; 1.1302 + } 1.1303 + } else { 1.1304 + // Can't correct it during regular GVN so register for IGVN 1.1305 + phase->C->record_for_igvn(this); 1.1306 + } 1.1307 + } 1.1308 + return NULL; 1.1309 +} 1.1310 + 1.1311 +// Retrieve the length from the AllocateArrayNode. Narrow the type with a 1.1312 +// CastII, if appropriate. If we are not allowed to create new nodes, and 1.1313 +// a CastII is appropriate, return NULL. 1.1314 +Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { 1.1315 + Node *length = in(AllocateNode::ALength); 1.1316 + assert(length != NULL, "length is not null"); 1.1317 + 1.1318 + const TypeInt* length_type = phase->find_int_type(length); 1.1319 + const TypeAryPtr* ary_type = oop_type->isa_aryptr(); 1.1320 + 1.1321 + if (ary_type != NULL && length_type != NULL) { 1.1322 + const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); 1.1323 + if (narrow_length_type != length_type) { 1.1324 + // Assert one of: 1.1325 + // - the narrow_length is 0 1.1326 + // - the narrow_length is not wider than length 1.1327 + assert(narrow_length_type == TypeInt::ZERO || 1.1328 + length_type->is_con() && narrow_length_type->is_con() && 1.1329 + (narrow_length_type->_hi <= length_type->_lo) || 1.1330 + (narrow_length_type->_hi <= length_type->_hi && 1.1331 + narrow_length_type->_lo >= length_type->_lo), 1.1332 + "narrow type must be narrower than length type"); 1.1333 + 1.1334 + // Return NULL if new nodes are not allowed 1.1335 + if (!allow_new_nodes) return NULL; 1.1336 + // Create a cast which is control dependent on the initialization to 1.1337 + // propagate the fact that the array length must be positive. 1.1338 + length = new (phase->C) CastIINode(length, narrow_length_type); 1.1339 + length->set_req(0, initialization()->proj_out(0)); 1.1340 + } 1.1341 + } 1.1342 + 1.1343 + return length; 1.1344 +} 1.1345 + 1.1346 +//============================================================================= 1.1347 +uint LockNode::size_of() const { return sizeof(*this); } 1.1348 + 1.1349 +// Redundant lock elimination 1.1350 +// 1.1351 +// There are various patterns of locking where we release and 1.1352 +// immediately reacquire a lock in a piece of code where no operations 1.1353 +// occur in between that would be observable. In those cases we can 1.1354 +// skip releasing and reacquiring the lock without violating any 1.1355 +// fairness requirements. Doing this around a loop could cause a lock 1.1356 +// to be held for a very long time so we concentrate on non-looping 1.1357 +// control flow. We also require that the operations are fully 1.1358 +// redundant meaning that we don't introduce new lock operations on 1.1359 +// some paths so to be able to eliminate it on others ala PRE. This 1.1360 +// would probably require some more extensive graph manipulation to 1.1361 +// guarantee that the memory edges were all handled correctly. 1.1362 +// 1.1363 +// Assuming p is a simple predicate which can't trap in any way and s 1.1364 +// is a synchronized method consider this code: 1.1365 +// 1.1366 +// s(); 1.1367 +// if (p) 1.1368 +// s(); 1.1369 +// else 1.1370 +// s(); 1.1371 +// s(); 1.1372 +// 1.1373 +// 1. The unlocks of the first call to s can be eliminated if the 1.1374 +// locks inside the then and else branches are eliminated. 1.1375 +// 1.1376 +// 2. The unlocks of the then and else branches can be eliminated if 1.1377 +// the lock of the final call to s is eliminated. 1.1378 +// 1.1379 +// Either of these cases subsumes the simple case of sequential control flow 1.1380 +// 1.1381 +// Addtionally we can eliminate versions without the else case: 1.1382 +// 1.1383 +// s(); 1.1384 +// if (p) 1.1385 +// s(); 1.1386 +// s(); 1.1387 +// 1.1388 +// 3. In this case we eliminate the unlock of the first s, the lock 1.1389 +// and unlock in the then case and the lock in the final s. 1.1390 +// 1.1391 +// Note also that in all these cases the then/else pieces don't have 1.1392 +// to be trivial as long as they begin and end with synchronization 1.1393 +// operations. 1.1394 +// 1.1395 +// s(); 1.1396 +// if (p) 1.1397 +// s(); 1.1398 +// f(); 1.1399 +// s(); 1.1400 +// s(); 1.1401 +// 1.1402 +// The code will work properly for this case, leaving in the unlock 1.1403 +// before the call to f and the relock after it. 1.1404 +// 1.1405 +// A potentially interesting case which isn't handled here is when the 1.1406 +// locking is partially redundant. 1.1407 +// 1.1408 +// s(); 1.1409 +// if (p) 1.1410 +// s(); 1.1411 +// 1.1412 +// This could be eliminated putting unlocking on the else case and 1.1413 +// eliminating the first unlock and the lock in the then side. 1.1414 +// Alternatively the unlock could be moved out of the then side so it 1.1415 +// was after the merge and the first unlock and second lock 1.1416 +// eliminated. This might require less manipulation of the memory 1.1417 +// state to get correct. 1.1418 +// 1.1419 +// Additionally we might allow work between a unlock and lock before 1.1420 +// giving up eliminating the locks. The current code disallows any 1.1421 +// conditional control flow between these operations. A formulation 1.1422 +// similar to partial redundancy elimination computing the 1.1423 +// availability of unlocking and the anticipatability of locking at a 1.1424 +// program point would allow detection of fully redundant locking with 1.1425 +// some amount of work in between. I'm not sure how often I really 1.1426 +// think that would occur though. Most of the cases I've seen 1.1427 +// indicate it's likely non-trivial work would occur in between. 1.1428 +// There may be other more complicated constructs where we could 1.1429 +// eliminate locking but I haven't seen any others appear as hot or 1.1430 +// interesting. 1.1431 +// 1.1432 +// Locking and unlocking have a canonical form in ideal that looks 1.1433 +// roughly like this: 1.1434 +// 1.1435 +// <obj> 1.1436 +// | \\------+ 1.1437 +// | \ \ 1.1438 +// | BoxLock \ 1.1439 +// | | | \ 1.1440 +// | | \ \ 1.1441 +// | | FastLock 1.1442 +// | | / 1.1443 +// | | / 1.1444 +// | | | 1.1445 +// 1.1446 +// Lock 1.1447 +// | 1.1448 +// Proj #0 1.1449 +// | 1.1450 +// MembarAcquire 1.1451 +// | 1.1452 +// Proj #0 1.1453 +// 1.1454 +// MembarRelease 1.1455 +// | 1.1456 +// Proj #0 1.1457 +// | 1.1458 +// Unlock 1.1459 +// | 1.1460 +// Proj #0 1.1461 +// 1.1462 +// 1.1463 +// This code proceeds by processing Lock nodes during PhaseIterGVN 1.1464 +// and searching back through its control for the proper code 1.1465 +// patterns. Once it finds a set of lock and unlock operations to 1.1466 +// eliminate they are marked as eliminatable which causes the 1.1467 +// expansion of the Lock and Unlock macro nodes to make the operation a NOP 1.1468 +// 1.1469 +//============================================================================= 1.1470 + 1.1471 +// 1.1472 +// Utility function to skip over uninteresting control nodes. Nodes skipped are: 1.1473 +// - copy regions. (These may not have been optimized away yet.) 1.1474 +// - eliminated locking nodes 1.1475 +// 1.1476 +static Node *next_control(Node *ctrl) { 1.1477 + if (ctrl == NULL) 1.1478 + return NULL; 1.1479 + while (1) { 1.1480 + if (ctrl->is_Region()) { 1.1481 + RegionNode *r = ctrl->as_Region(); 1.1482 + Node *n = r->is_copy(); 1.1483 + if (n == NULL) 1.1484 + break; // hit a region, return it 1.1485 + else 1.1486 + ctrl = n; 1.1487 + } else if (ctrl->is_Proj()) { 1.1488 + Node *in0 = ctrl->in(0); 1.1489 + if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { 1.1490 + ctrl = in0->in(0); 1.1491 + } else { 1.1492 + break; 1.1493 + } 1.1494 + } else { 1.1495 + break; // found an interesting control 1.1496 + } 1.1497 + } 1.1498 + return ctrl; 1.1499 +} 1.1500 +// 1.1501 +// Given a control, see if it's the control projection of an Unlock which 1.1502 +// operating on the same object as lock. 1.1503 +// 1.1504 +bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, 1.1505 + GrowableArray<AbstractLockNode*> &lock_ops) { 1.1506 + ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL; 1.1507 + if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) { 1.1508 + Node *n = ctrl_proj->in(0); 1.1509 + if (n != NULL && n->is_Unlock()) { 1.1510 + UnlockNode *unlock = n->as_Unlock(); 1.1511 + if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && 1.1512 + BoxLockNode::same_slot(lock->box_node(), unlock->box_node()) && 1.1513 + !unlock->is_eliminated()) { 1.1514 + lock_ops.append(unlock); 1.1515 + return true; 1.1516 + } 1.1517 + } 1.1518 + } 1.1519 + return false; 1.1520 +} 1.1521 + 1.1522 +// 1.1523 +// Find the lock matching an unlock. Returns null if a safepoint 1.1524 +// or complicated control is encountered first. 1.1525 +LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { 1.1526 + LockNode *lock_result = NULL; 1.1527 + // find the matching lock, or an intervening safepoint 1.1528 + Node *ctrl = next_control(unlock->in(0)); 1.1529 + while (1) { 1.1530 + assert(ctrl != NULL, "invalid control graph"); 1.1531 + assert(!ctrl->is_Start(), "missing lock for unlock"); 1.1532 + if (ctrl->is_top()) break; // dead control path 1.1533 + if (ctrl->is_Proj()) ctrl = ctrl->in(0); 1.1534 + if (ctrl->is_SafePoint()) { 1.1535 + break; // found a safepoint (may be the lock we are searching for) 1.1536 + } else if (ctrl->is_Region()) { 1.1537 + // Check for a simple diamond pattern. Punt on anything more complicated 1.1538 + if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) { 1.1539 + Node *in1 = next_control(ctrl->in(1)); 1.1540 + Node *in2 = next_control(ctrl->in(2)); 1.1541 + if (((in1->is_IfTrue() && in2->is_IfFalse()) || 1.1542 + (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { 1.1543 + ctrl = next_control(in1->in(0)->in(0)); 1.1544 + } else { 1.1545 + break; 1.1546 + } 1.1547 + } else { 1.1548 + break; 1.1549 + } 1.1550 + } else { 1.1551 + ctrl = next_control(ctrl->in(0)); // keep searching 1.1552 + } 1.1553 + } 1.1554 + if (ctrl->is_Lock()) { 1.1555 + LockNode *lock = ctrl->as_Lock(); 1.1556 + if (lock->obj_node()->eqv_uncast(unlock->obj_node()) && 1.1557 + BoxLockNode::same_slot(lock->box_node(), unlock->box_node())) { 1.1558 + lock_result = lock; 1.1559 + } 1.1560 + } 1.1561 + return lock_result; 1.1562 +} 1.1563 + 1.1564 +// This code corresponds to case 3 above. 1.1565 + 1.1566 +bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, 1.1567 + GrowableArray<AbstractLockNode*> &lock_ops) { 1.1568 + Node* if_node = node->in(0); 1.1569 + bool if_true = node->is_IfTrue(); 1.1570 + 1.1571 + if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { 1.1572 + Node *lock_ctrl = next_control(if_node->in(0)); 1.1573 + if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { 1.1574 + Node* lock1_node = NULL; 1.1575 + ProjNode* proj = if_node->as_If()->proj_out(!if_true); 1.1576 + if (if_true) { 1.1577 + if (proj->is_IfFalse() && proj->outcnt() == 1) { 1.1578 + lock1_node = proj->unique_out(); 1.1579 + } 1.1580 + } else { 1.1581 + if (proj->is_IfTrue() && proj->outcnt() == 1) { 1.1582 + lock1_node = proj->unique_out(); 1.1583 + } 1.1584 + } 1.1585 + if (lock1_node != NULL && lock1_node->is_Lock()) { 1.1586 + LockNode *lock1 = lock1_node->as_Lock(); 1.1587 + if (lock->obj_node()->eqv_uncast(lock1->obj_node()) && 1.1588 + BoxLockNode::same_slot(lock->box_node(), lock1->box_node()) && 1.1589 + !lock1->is_eliminated()) { 1.1590 + lock_ops.append(lock1); 1.1591 + return true; 1.1592 + } 1.1593 + } 1.1594 + } 1.1595 + } 1.1596 + 1.1597 + lock_ops.trunc_to(0); 1.1598 + return false; 1.1599 +} 1.1600 + 1.1601 +bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, 1.1602 + GrowableArray<AbstractLockNode*> &lock_ops) { 1.1603 + // check each control merging at this point for a matching unlock. 1.1604 + // in(0) should be self edge so skip it. 1.1605 + for (int i = 1; i < (int)region->req(); i++) { 1.1606 + Node *in_node = next_control(region->in(i)); 1.1607 + if (in_node != NULL) { 1.1608 + if (find_matching_unlock(in_node, lock, lock_ops)) { 1.1609 + // found a match so keep on checking. 1.1610 + continue; 1.1611 + } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { 1.1612 + continue; 1.1613 + } 1.1614 + 1.1615 + // If we fall through to here then it was some kind of node we 1.1616 + // don't understand or there wasn't a matching unlock, so give 1.1617 + // up trying to merge locks. 1.1618 + lock_ops.trunc_to(0); 1.1619 + return false; 1.1620 + } 1.1621 + } 1.1622 + return true; 1.1623 + 1.1624 +} 1.1625 + 1.1626 +#ifndef PRODUCT 1.1627 +// 1.1628 +// Create a counter which counts the number of times this lock is acquired 1.1629 +// 1.1630 +void AbstractLockNode::create_lock_counter(JVMState* state) { 1.1631 + _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); 1.1632 +} 1.1633 + 1.1634 +void AbstractLockNode::set_eliminated_lock_counter() { 1.1635 + if (_counter) { 1.1636 + // Update the counter to indicate that this lock was eliminated. 1.1637 + // The counter update code will stay around even though the 1.1638 + // optimizer will eliminate the lock operation itself. 1.1639 + _counter->set_tag(NamedCounter::EliminatedLockCounter); 1.1640 + } 1.1641 +} 1.1642 +#endif 1.1643 + 1.1644 +//============================================================================= 1.1645 +Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1.1646 + 1.1647 + // perform any generic optimizations first (returns 'this' or NULL) 1.1648 + Node *result = SafePointNode::Ideal(phase, can_reshape); 1.1649 + if (result != NULL) return result; 1.1650 + // Don't bother trying to transform a dead node 1.1651 + if (in(0) && in(0)->is_top()) return NULL; 1.1652 + 1.1653 + // Now see if we can optimize away this lock. We don't actually 1.1654 + // remove the locking here, we simply set the _eliminate flag which 1.1655 + // prevents macro expansion from expanding the lock. Since we don't 1.1656 + // modify the graph, the value returned from this function is the 1.1657 + // one computed above. 1.1658 + if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 1.1659 + // 1.1660 + // If we are locking an unescaped object, the lock/unlock is unnecessary 1.1661 + // 1.1662 + ConnectionGraph *cgr = phase->C->congraph(); 1.1663 + if (cgr != NULL && cgr->not_global_escape(obj_node())) { 1.1664 + assert(!is_eliminated() || is_coarsened(), "sanity"); 1.1665 + // The lock could be marked eliminated by lock coarsening 1.1666 + // code during first IGVN before EA. Replace coarsened flag 1.1667 + // to eliminate all associated locks/unlocks. 1.1668 + this->set_non_esc_obj(); 1.1669 + return result; 1.1670 + } 1.1671 + 1.1672 + // 1.1673 + // Try lock coarsening 1.1674 + // 1.1675 + PhaseIterGVN* iter = phase->is_IterGVN(); 1.1676 + if (iter != NULL && !is_eliminated()) { 1.1677 + 1.1678 + GrowableArray<AbstractLockNode*> lock_ops; 1.1679 + 1.1680 + Node *ctrl = next_control(in(0)); 1.1681 + 1.1682 + // now search back for a matching Unlock 1.1683 + if (find_matching_unlock(ctrl, this, lock_ops)) { 1.1684 + // found an unlock directly preceding this lock. This is the 1.1685 + // case of single unlock directly control dependent on a 1.1686 + // single lock which is the trivial version of case 1 or 2. 1.1687 + } else if (ctrl->is_Region() ) { 1.1688 + if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { 1.1689 + // found lock preceded by multiple unlocks along all paths 1.1690 + // joining at this point which is case 3 in description above. 1.1691 + } 1.1692 + } else { 1.1693 + // see if this lock comes from either half of an if and the 1.1694 + // predecessors merges unlocks and the other half of the if 1.1695 + // performs a lock. 1.1696 + if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { 1.1697 + // found unlock splitting to an if with locks on both branches. 1.1698 + } 1.1699 + } 1.1700 + 1.1701 + if (lock_ops.length() > 0) { 1.1702 + // add ourselves to the list of locks to be eliminated. 1.1703 + lock_ops.append(this); 1.1704 + 1.1705 + #ifndef PRODUCT 1.1706 + if (PrintEliminateLocks) { 1.1707 + int locks = 0; 1.1708 + int unlocks = 0; 1.1709 + for (int i = 0; i < lock_ops.length(); i++) { 1.1710 + AbstractLockNode* lock = lock_ops.at(i); 1.1711 + if (lock->Opcode() == Op_Lock) 1.1712 + locks++; 1.1713 + else 1.1714 + unlocks++; 1.1715 + if (Verbose) { 1.1716 + lock->dump(1); 1.1717 + } 1.1718 + } 1.1719 + tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks); 1.1720 + } 1.1721 + #endif 1.1722 + 1.1723 + // for each of the identified locks, mark them 1.1724 + // as eliminatable 1.1725 + for (int i = 0; i < lock_ops.length(); i++) { 1.1726 + AbstractLockNode* lock = lock_ops.at(i); 1.1727 + 1.1728 + // Mark it eliminated by coarsening and update any counters 1.1729 + lock->set_coarsened(); 1.1730 + } 1.1731 + } else if (ctrl->is_Region() && 1.1732 + iter->_worklist.member(ctrl)) { 1.1733 + // We weren't able to find any opportunities but the region this 1.1734 + // lock is control dependent on hasn't been processed yet so put 1.1735 + // this lock back on the worklist so we can check again once any 1.1736 + // region simplification has occurred. 1.1737 + iter->_worklist.push(this); 1.1738 + } 1.1739 + } 1.1740 + } 1.1741 + 1.1742 + return result; 1.1743 +} 1.1744 + 1.1745 +//============================================================================= 1.1746 +bool LockNode::is_nested_lock_region() { 1.1747 + BoxLockNode* box = box_node()->as_BoxLock(); 1.1748 + int stk_slot = box->stack_slot(); 1.1749 + if (stk_slot <= 0) 1.1750 + return false; // External lock or it is not Box (Phi node). 1.1751 + 1.1752 + // Ignore complex cases: merged locks or multiple locks. 1.1753 + Node* obj = obj_node(); 1.1754 + LockNode* unique_lock = NULL; 1.1755 + if (!box->is_simple_lock_region(&unique_lock, obj) || 1.1756 + (unique_lock != this)) { 1.1757 + return false; 1.1758 + } 1.1759 + 1.1760 + // Look for external lock for the same object. 1.1761 + SafePointNode* sfn = this->as_SafePoint(); 1.1762 + JVMState* youngest_jvms = sfn->jvms(); 1.1763 + int max_depth = youngest_jvms->depth(); 1.1764 + for (int depth = 1; depth <= max_depth; depth++) { 1.1765 + JVMState* jvms = youngest_jvms->of_depth(depth); 1.1766 + int num_mon = jvms->nof_monitors(); 1.1767 + // Loop over monitors 1.1768 + for (int idx = 0; idx < num_mon; idx++) { 1.1769 + Node* obj_node = sfn->monitor_obj(jvms, idx); 1.1770 + BoxLockNode* box_node = sfn->monitor_box(jvms, idx)->as_BoxLock(); 1.1771 + if ((box_node->stack_slot() < stk_slot) && obj_node->eqv_uncast(obj)) { 1.1772 + return true; 1.1773 + } 1.1774 + } 1.1775 + } 1.1776 + return false; 1.1777 +} 1.1778 + 1.1779 +//============================================================================= 1.1780 +uint UnlockNode::size_of() const { return sizeof(*this); } 1.1781 + 1.1782 +//============================================================================= 1.1783 +Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { 1.1784 + 1.1785 + // perform any generic optimizations first (returns 'this' or NULL) 1.1786 + Node *result = SafePointNode::Ideal(phase, can_reshape); 1.1787 + if (result != NULL) return result; 1.1788 + // Don't bother trying to transform a dead node 1.1789 + if (in(0) && in(0)->is_top()) return NULL; 1.1790 + 1.1791 + // Now see if we can optimize away this unlock. We don't actually 1.1792 + // remove the unlocking here, we simply set the _eliminate flag which 1.1793 + // prevents macro expansion from expanding the unlock. Since we don't 1.1794 + // modify the graph, the value returned from this function is the 1.1795 + // one computed above. 1.1796 + // Escape state is defined after Parse phase. 1.1797 + if (can_reshape && EliminateLocks && !is_non_esc_obj()) { 1.1798 + // 1.1799 + // If we are unlocking an unescaped object, the lock/unlock is unnecessary. 1.1800 + // 1.1801 + ConnectionGraph *cgr = phase->C->congraph(); 1.1802 + if (cgr != NULL && cgr->not_global_escape(obj_node())) { 1.1803 + assert(!is_eliminated() || is_coarsened(), "sanity"); 1.1804 + // The lock could be marked eliminated by lock coarsening 1.1805 + // code during first IGVN before EA. Replace coarsened flag 1.1806 + // to eliminate all associated locks/unlocks. 1.1807 + this->set_non_esc_obj(); 1.1808 + } 1.1809 + } 1.1810 + return result; 1.1811 +}