Mon, 26 Sep 2011 10:24:05 -0700
7081933: Use zeroing elimination optimization for large array
Summary: Don't zero new typeArray during runtime call if the allocation is followed by arraycopy into it.
Reviewed-by: twisti
duke@435 | 1 | /* |
stefank@2314 | 2 | * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "ci/bcEscapeAnalyzer.hpp" |
stefank@2314 | 27 | #include "compiler/oopMap.hpp" |
stefank@2314 | 28 | #include "opto/callnode.hpp" |
stefank@2314 | 29 | #include "opto/escape.hpp" |
stefank@2314 | 30 | #include "opto/locknode.hpp" |
stefank@2314 | 31 | #include "opto/machnode.hpp" |
stefank@2314 | 32 | #include "opto/matcher.hpp" |
stefank@2314 | 33 | #include "opto/parse.hpp" |
stefank@2314 | 34 | #include "opto/regalloc.hpp" |
stefank@2314 | 35 | #include "opto/regmask.hpp" |
stefank@2314 | 36 | #include "opto/rootnode.hpp" |
stefank@2314 | 37 | #include "opto/runtime.hpp" |
stefank@2314 | 38 | |
duke@435 | 39 | // Portions of code courtesy of Clifford Click |
duke@435 | 40 | |
duke@435 | 41 | // Optimization - Graph Style |
duke@435 | 42 | |
duke@435 | 43 | //============================================================================= |
duke@435 | 44 | uint StartNode::size_of() const { return sizeof(*this); } |
duke@435 | 45 | uint StartNode::cmp( const Node &n ) const |
duke@435 | 46 | { return _domain == ((StartNode&)n)._domain; } |
duke@435 | 47 | const Type *StartNode::bottom_type() const { return _domain; } |
duke@435 | 48 | const Type *StartNode::Value(PhaseTransform *phase) const { return _domain; } |
duke@435 | 49 | #ifndef PRODUCT |
duke@435 | 50 | void StartNode::dump_spec(outputStream *st) const { st->print(" #"); _domain->dump_on(st);} |
duke@435 | 51 | #endif |
duke@435 | 52 | |
duke@435 | 53 | //------------------------------Ideal------------------------------------------ |
duke@435 | 54 | Node *StartNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
duke@435 | 55 | return remove_dead_region(phase, can_reshape) ? this : NULL; |
duke@435 | 56 | } |
duke@435 | 57 | |
duke@435 | 58 | //------------------------------calling_convention----------------------------- |
duke@435 | 59 | void StartNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { |
duke@435 | 60 | Matcher::calling_convention( sig_bt, parm_regs, argcnt, false ); |
duke@435 | 61 | } |
duke@435 | 62 | |
duke@435 | 63 | //------------------------------Registers-------------------------------------- |
duke@435 | 64 | const RegMask &StartNode::in_RegMask(uint) const { |
duke@435 | 65 | return RegMask::Empty; |
duke@435 | 66 | } |
duke@435 | 67 | |
duke@435 | 68 | //------------------------------match------------------------------------------ |
duke@435 | 69 | // Construct projections for incoming parameters, and their RegMask info |
duke@435 | 70 | Node *StartNode::match( const ProjNode *proj, const Matcher *match ) { |
duke@435 | 71 | switch (proj->_con) { |
duke@435 | 72 | case TypeFunc::Control: |
duke@435 | 73 | case TypeFunc::I_O: |
duke@435 | 74 | case TypeFunc::Memory: |
duke@435 | 75 | return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); |
duke@435 | 76 | case TypeFunc::FramePtr: |
duke@435 | 77 | return new (match->C, 1) MachProjNode(this,proj->_con,Matcher::c_frame_ptr_mask, Op_RegP); |
duke@435 | 78 | case TypeFunc::ReturnAdr: |
duke@435 | 79 | return new (match->C, 1) MachProjNode(this,proj->_con,match->_return_addr_mask,Op_RegP); |
duke@435 | 80 | case TypeFunc::Parms: |
duke@435 | 81 | default: { |
duke@435 | 82 | uint parm_num = proj->_con - TypeFunc::Parms; |
duke@435 | 83 | const Type *t = _domain->field_at(proj->_con); |
duke@435 | 84 | if (t->base() == Type::Half) // 2nd half of Longs and Doubles |
duke@435 | 85 | return new (match->C, 1) ConNode(Type::TOP); |
duke@435 | 86 | uint ideal_reg = Matcher::base2reg[t->base()]; |
duke@435 | 87 | RegMask &rm = match->_calling_convention_mask[parm_num]; |
duke@435 | 88 | return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg); |
duke@435 | 89 | } |
duke@435 | 90 | } |
duke@435 | 91 | return NULL; |
duke@435 | 92 | } |
duke@435 | 93 | |
duke@435 | 94 | //------------------------------StartOSRNode---------------------------------- |
duke@435 | 95 | // The method start node for an on stack replacement adapter |
duke@435 | 96 | |
duke@435 | 97 | //------------------------------osr_domain----------------------------- |
duke@435 | 98 | const TypeTuple *StartOSRNode::osr_domain() { |
duke@435 | 99 | const Type **fields = TypeTuple::fields(2); |
duke@435 | 100 | fields[TypeFunc::Parms+0] = TypeRawPtr::BOTTOM; // address of osr buffer |
duke@435 | 101 | |
duke@435 | 102 | return TypeTuple::make(TypeFunc::Parms+1, fields); |
duke@435 | 103 | } |
duke@435 | 104 | |
duke@435 | 105 | //============================================================================= |
duke@435 | 106 | const char * const ParmNode::names[TypeFunc::Parms+1] = { |
duke@435 | 107 | "Control", "I_O", "Memory", "FramePtr", "ReturnAdr", "Parms" |
duke@435 | 108 | }; |
duke@435 | 109 | |
duke@435 | 110 | #ifndef PRODUCT |
duke@435 | 111 | void ParmNode::dump_spec(outputStream *st) const { |
duke@435 | 112 | if( _con < TypeFunc::Parms ) { |
duke@435 | 113 | st->print(names[_con]); |
duke@435 | 114 | } else { |
duke@435 | 115 | st->print("Parm%d: ",_con-TypeFunc::Parms); |
duke@435 | 116 | // Verbose and WizardMode dump bottom_type for all nodes |
duke@435 | 117 | if( !Verbose && !WizardMode ) bottom_type()->dump_on(st); |
duke@435 | 118 | } |
duke@435 | 119 | } |
duke@435 | 120 | #endif |
duke@435 | 121 | |
duke@435 | 122 | uint ParmNode::ideal_reg() const { |
duke@435 | 123 | switch( _con ) { |
duke@435 | 124 | case TypeFunc::Control : // fall through |
duke@435 | 125 | case TypeFunc::I_O : // fall through |
duke@435 | 126 | case TypeFunc::Memory : return 0; |
duke@435 | 127 | case TypeFunc::FramePtr : // fall through |
duke@435 | 128 | case TypeFunc::ReturnAdr: return Op_RegP; |
duke@435 | 129 | default : assert( _con > TypeFunc::Parms, "" ); |
duke@435 | 130 | // fall through |
duke@435 | 131 | case TypeFunc::Parms : { |
duke@435 | 132 | // Type of argument being passed |
duke@435 | 133 | const Type *t = in(0)->as_Start()->_domain->field_at(_con); |
duke@435 | 134 | return Matcher::base2reg[t->base()]; |
duke@435 | 135 | } |
duke@435 | 136 | } |
duke@435 | 137 | ShouldNotReachHere(); |
duke@435 | 138 | return 0; |
duke@435 | 139 | } |
duke@435 | 140 | |
duke@435 | 141 | //============================================================================= |
duke@435 | 142 | ReturnNode::ReturnNode(uint edges, Node *cntrl, Node *i_o, Node *memory, Node *frameptr, Node *retadr ) : Node(edges) { |
duke@435 | 143 | init_req(TypeFunc::Control,cntrl); |
duke@435 | 144 | init_req(TypeFunc::I_O,i_o); |
duke@435 | 145 | init_req(TypeFunc::Memory,memory); |
duke@435 | 146 | init_req(TypeFunc::FramePtr,frameptr); |
duke@435 | 147 | init_req(TypeFunc::ReturnAdr,retadr); |
duke@435 | 148 | } |
duke@435 | 149 | |
duke@435 | 150 | Node *ReturnNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
duke@435 | 151 | return remove_dead_region(phase, can_reshape) ? this : NULL; |
duke@435 | 152 | } |
duke@435 | 153 | |
duke@435 | 154 | const Type *ReturnNode::Value( PhaseTransform *phase ) const { |
duke@435 | 155 | return ( phase->type(in(TypeFunc::Control)) == Type::TOP) |
duke@435 | 156 | ? Type::TOP |
duke@435 | 157 | : Type::BOTTOM; |
duke@435 | 158 | } |
duke@435 | 159 | |
duke@435 | 160 | // Do we Match on this edge index or not? No edges on return nodes |
duke@435 | 161 | uint ReturnNode::match_edge(uint idx) const { |
duke@435 | 162 | return 0; |
duke@435 | 163 | } |
duke@435 | 164 | |
duke@435 | 165 | |
duke@435 | 166 | #ifndef PRODUCT |
duke@435 | 167 | void ReturnNode::dump_req() const { |
duke@435 | 168 | // Dump the required inputs, enclosed in '(' and ')' |
duke@435 | 169 | uint i; // Exit value of loop |
duke@435 | 170 | for( i=0; i<req(); i++ ) { // For all required inputs |
duke@435 | 171 | if( i == TypeFunc::Parms ) tty->print("returns"); |
duke@435 | 172 | if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); |
duke@435 | 173 | else tty->print("_ "); |
duke@435 | 174 | } |
duke@435 | 175 | } |
duke@435 | 176 | #endif |
duke@435 | 177 | |
duke@435 | 178 | //============================================================================= |
duke@435 | 179 | RethrowNode::RethrowNode( |
duke@435 | 180 | Node* cntrl, |
duke@435 | 181 | Node* i_o, |
duke@435 | 182 | Node* memory, |
duke@435 | 183 | Node* frameptr, |
duke@435 | 184 | Node* ret_adr, |
duke@435 | 185 | Node* exception |
duke@435 | 186 | ) : Node(TypeFunc::Parms + 1) { |
duke@435 | 187 | init_req(TypeFunc::Control , cntrl ); |
duke@435 | 188 | init_req(TypeFunc::I_O , i_o ); |
duke@435 | 189 | init_req(TypeFunc::Memory , memory ); |
duke@435 | 190 | init_req(TypeFunc::FramePtr , frameptr ); |
duke@435 | 191 | init_req(TypeFunc::ReturnAdr, ret_adr); |
duke@435 | 192 | init_req(TypeFunc::Parms , exception); |
duke@435 | 193 | } |
duke@435 | 194 | |
duke@435 | 195 | Node *RethrowNode::Ideal(PhaseGVN *phase, bool can_reshape){ |
duke@435 | 196 | return remove_dead_region(phase, can_reshape) ? this : NULL; |
duke@435 | 197 | } |
duke@435 | 198 | |
duke@435 | 199 | const Type *RethrowNode::Value( PhaseTransform *phase ) const { |
duke@435 | 200 | return (phase->type(in(TypeFunc::Control)) == Type::TOP) |
duke@435 | 201 | ? Type::TOP |
duke@435 | 202 | : Type::BOTTOM; |
duke@435 | 203 | } |
duke@435 | 204 | |
duke@435 | 205 | uint RethrowNode::match_edge(uint idx) const { |
duke@435 | 206 | return 0; |
duke@435 | 207 | } |
duke@435 | 208 | |
duke@435 | 209 | #ifndef PRODUCT |
duke@435 | 210 | void RethrowNode::dump_req() const { |
duke@435 | 211 | // Dump the required inputs, enclosed in '(' and ')' |
duke@435 | 212 | uint i; // Exit value of loop |
duke@435 | 213 | for( i=0; i<req(); i++ ) { // For all required inputs |
duke@435 | 214 | if( i == TypeFunc::Parms ) tty->print("exception"); |
duke@435 | 215 | if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); |
duke@435 | 216 | else tty->print("_ "); |
duke@435 | 217 | } |
duke@435 | 218 | } |
duke@435 | 219 | #endif |
duke@435 | 220 | |
duke@435 | 221 | //============================================================================= |
duke@435 | 222 | // Do we Match on this edge index or not? Match only target address & method |
duke@435 | 223 | uint TailCallNode::match_edge(uint idx) const { |
duke@435 | 224 | return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; |
duke@435 | 225 | } |
duke@435 | 226 | |
duke@435 | 227 | //============================================================================= |
duke@435 | 228 | // Do we Match on this edge index or not? Match only target address & oop |
duke@435 | 229 | uint TailJumpNode::match_edge(uint idx) const { |
duke@435 | 230 | return TypeFunc::Parms <= idx && idx <= TypeFunc::Parms+1; |
duke@435 | 231 | } |
duke@435 | 232 | |
duke@435 | 233 | //============================================================================= |
duke@435 | 234 | JVMState::JVMState(ciMethod* method, JVMState* caller) { |
duke@435 | 235 | assert(method != NULL, "must be valid call site"); |
duke@435 | 236 | _method = method; |
cfang@1335 | 237 | _reexecute = Reexecute_Undefined; |
duke@435 | 238 | debug_only(_bci = -99); // random garbage value |
duke@435 | 239 | debug_only(_map = (SafePointNode*)-1); |
duke@435 | 240 | _caller = caller; |
duke@435 | 241 | _depth = 1 + (caller == NULL ? 0 : caller->depth()); |
duke@435 | 242 | _locoff = TypeFunc::Parms; |
duke@435 | 243 | _stkoff = _locoff + _method->max_locals(); |
duke@435 | 244 | _monoff = _stkoff + _method->max_stack(); |
kvn@498 | 245 | _scloff = _monoff; |
duke@435 | 246 | _endoff = _monoff; |
duke@435 | 247 | _sp = 0; |
duke@435 | 248 | } |
duke@435 | 249 | JVMState::JVMState(int stack_size) { |
duke@435 | 250 | _method = NULL; |
duke@435 | 251 | _bci = InvocationEntryBci; |
cfang@1335 | 252 | _reexecute = Reexecute_Undefined; |
duke@435 | 253 | debug_only(_map = (SafePointNode*)-1); |
duke@435 | 254 | _caller = NULL; |
duke@435 | 255 | _depth = 1; |
duke@435 | 256 | _locoff = TypeFunc::Parms; |
duke@435 | 257 | _stkoff = _locoff; |
duke@435 | 258 | _monoff = _stkoff + stack_size; |
kvn@498 | 259 | _scloff = _monoff; |
duke@435 | 260 | _endoff = _monoff; |
duke@435 | 261 | _sp = 0; |
duke@435 | 262 | } |
duke@435 | 263 | |
duke@435 | 264 | //--------------------------------of_depth------------------------------------- |
duke@435 | 265 | JVMState* JVMState::of_depth(int d) const { |
duke@435 | 266 | const JVMState* jvmp = this; |
duke@435 | 267 | assert(0 < d && (uint)d <= depth(), "oob"); |
duke@435 | 268 | for (int skip = depth() - d; skip > 0; skip--) { |
duke@435 | 269 | jvmp = jvmp->caller(); |
duke@435 | 270 | } |
duke@435 | 271 | assert(jvmp->depth() == (uint)d, "found the right one"); |
duke@435 | 272 | return (JVMState*)jvmp; |
duke@435 | 273 | } |
duke@435 | 274 | |
duke@435 | 275 | //-----------------------------same_calls_as----------------------------------- |
duke@435 | 276 | bool JVMState::same_calls_as(const JVMState* that) const { |
duke@435 | 277 | if (this == that) return true; |
duke@435 | 278 | if (this->depth() != that->depth()) return false; |
duke@435 | 279 | const JVMState* p = this; |
duke@435 | 280 | const JVMState* q = that; |
duke@435 | 281 | for (;;) { |
duke@435 | 282 | if (p->_method != q->_method) return false; |
duke@435 | 283 | if (p->_method == NULL) return true; // bci is irrelevant |
duke@435 | 284 | if (p->_bci != q->_bci) return false; |
cfang@1335 | 285 | if (p->_reexecute != q->_reexecute) return false; |
duke@435 | 286 | p = p->caller(); |
duke@435 | 287 | q = q->caller(); |
duke@435 | 288 | if (p == q) return true; |
duke@435 | 289 | assert(p != NULL && q != NULL, "depth check ensures we don't run off end"); |
duke@435 | 290 | } |
duke@435 | 291 | } |
duke@435 | 292 | |
duke@435 | 293 | //------------------------------debug_start------------------------------------ |
duke@435 | 294 | uint JVMState::debug_start() const { |
duke@435 | 295 | debug_only(JVMState* jvmroot = of_depth(1)); |
duke@435 | 296 | assert(jvmroot->locoff() <= this->locoff(), "youngest JVMState must be last"); |
duke@435 | 297 | return of_depth(1)->locoff(); |
duke@435 | 298 | } |
duke@435 | 299 | |
duke@435 | 300 | //-------------------------------debug_end------------------------------------- |
duke@435 | 301 | uint JVMState::debug_end() const { |
duke@435 | 302 | debug_only(JVMState* jvmroot = of_depth(1)); |
duke@435 | 303 | assert(jvmroot->endoff() <= this->endoff(), "youngest JVMState must be last"); |
duke@435 | 304 | return endoff(); |
duke@435 | 305 | } |
duke@435 | 306 | |
duke@435 | 307 | //------------------------------debug_depth------------------------------------ |
duke@435 | 308 | uint JVMState::debug_depth() const { |
duke@435 | 309 | uint total = 0; |
duke@435 | 310 | for (const JVMState* jvmp = this; jvmp != NULL; jvmp = jvmp->caller()) { |
duke@435 | 311 | total += jvmp->debug_size(); |
duke@435 | 312 | } |
duke@435 | 313 | return total; |
duke@435 | 314 | } |
duke@435 | 315 | |
kvn@498 | 316 | #ifndef PRODUCT |
kvn@498 | 317 | |
duke@435 | 318 | //------------------------------format_helper---------------------------------- |
duke@435 | 319 | // Given an allocation (a Chaitin object) and a Node decide if the Node carries |
duke@435 | 320 | // any defined value or not. If it does, print out the register or constant. |
kvn@498 | 321 | static void format_helper( PhaseRegAlloc *regalloc, outputStream* st, Node *n, const char *msg, uint i, GrowableArray<SafePointScalarObjectNode*> *scobjs ) { |
duke@435 | 322 | if (n == NULL) { st->print(" NULL"); return; } |
kvn@498 | 323 | if (n->is_SafePointScalarObject()) { |
kvn@498 | 324 | // Scalar replacement. |
kvn@498 | 325 | SafePointScalarObjectNode* spobj = n->as_SafePointScalarObject(); |
kvn@498 | 326 | scobjs->append_if_missing(spobj); |
kvn@498 | 327 | int sco_n = scobjs->find(spobj); |
kvn@498 | 328 | assert(sco_n >= 0, ""); |
kvn@498 | 329 | st->print(" %s%d]=#ScObj" INT32_FORMAT, msg, i, sco_n); |
kvn@498 | 330 | return; |
kvn@498 | 331 | } |
duke@435 | 332 | if( OptoReg::is_valid(regalloc->get_reg_first(n))) { // Check for undefined |
duke@435 | 333 | char buf[50]; |
duke@435 | 334 | regalloc->dump_register(n,buf); |
duke@435 | 335 | st->print(" %s%d]=%s",msg,i,buf); |
duke@435 | 336 | } else { // No register, but might be constant |
duke@435 | 337 | const Type *t = n->bottom_type(); |
duke@435 | 338 | switch (t->base()) { |
duke@435 | 339 | case Type::Int: |
duke@435 | 340 | st->print(" %s%d]=#"INT32_FORMAT,msg,i,t->is_int()->get_con()); |
duke@435 | 341 | break; |
duke@435 | 342 | case Type::AnyPtr: |
duke@435 | 343 | assert( t == TypePtr::NULL_PTR, "" ); |
duke@435 | 344 | st->print(" %s%d]=#NULL",msg,i); |
duke@435 | 345 | break; |
duke@435 | 346 | case Type::AryPtr: |
duke@435 | 347 | case Type::KlassPtr: |
duke@435 | 348 | case Type::InstPtr: |
duke@435 | 349 | st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->isa_oopptr()->const_oop()); |
duke@435 | 350 | break; |
kvn@766 | 351 | case Type::NarrowOop: |
kvn@766 | 352 | st->print(" %s%d]=#Ptr" INTPTR_FORMAT,msg,i,t->make_ptr()->isa_oopptr()->const_oop()); |
kvn@766 | 353 | break; |
duke@435 | 354 | case Type::RawPtr: |
duke@435 | 355 | st->print(" %s%d]=#Raw" INTPTR_FORMAT,msg,i,t->is_rawptr()); |
duke@435 | 356 | break; |
duke@435 | 357 | case Type::DoubleCon: |
duke@435 | 358 | st->print(" %s%d]=#%fD",msg,i,t->is_double_constant()->_d); |
duke@435 | 359 | break; |
duke@435 | 360 | case Type::FloatCon: |
duke@435 | 361 | st->print(" %s%d]=#%fF",msg,i,t->is_float_constant()->_f); |
duke@435 | 362 | break; |
duke@435 | 363 | case Type::Long: |
duke@435 | 364 | st->print(" %s%d]=#"INT64_FORMAT,msg,i,t->is_long()->get_con()); |
duke@435 | 365 | break; |
duke@435 | 366 | case Type::Half: |
duke@435 | 367 | case Type::Top: |
duke@435 | 368 | st->print(" %s%d]=_",msg,i); |
duke@435 | 369 | break; |
duke@435 | 370 | default: ShouldNotReachHere(); |
duke@435 | 371 | } |
duke@435 | 372 | } |
duke@435 | 373 | } |
duke@435 | 374 | |
duke@435 | 375 | //------------------------------format----------------------------------------- |
duke@435 | 376 | void JVMState::format(PhaseRegAlloc *regalloc, const Node *n, outputStream* st) const { |
duke@435 | 377 | st->print(" #"); |
duke@435 | 378 | if( _method ) { |
duke@435 | 379 | _method->print_short_name(st); |
duke@435 | 380 | st->print(" @ bci:%d ",_bci); |
duke@435 | 381 | } else { |
duke@435 | 382 | st->print_cr(" runtime stub "); |
duke@435 | 383 | return; |
duke@435 | 384 | } |
duke@435 | 385 | if (n->is_MachSafePoint()) { |
kvn@498 | 386 | GrowableArray<SafePointScalarObjectNode*> scobjs; |
duke@435 | 387 | MachSafePointNode *mcall = n->as_MachSafePoint(); |
duke@435 | 388 | uint i; |
duke@435 | 389 | // Print locals |
duke@435 | 390 | for( i = 0; i < (uint)loc_size(); i++ ) |
kvn@498 | 391 | format_helper( regalloc, st, mcall->local(this, i), "L[", i, &scobjs ); |
duke@435 | 392 | // Print stack |
duke@435 | 393 | for (i = 0; i < (uint)stk_size(); i++) { |
duke@435 | 394 | if ((uint)(_stkoff + i) >= mcall->len()) |
duke@435 | 395 | st->print(" oob "); |
duke@435 | 396 | else |
kvn@498 | 397 | format_helper( regalloc, st, mcall->stack(this, i), "STK[", i, &scobjs ); |
duke@435 | 398 | } |
duke@435 | 399 | for (i = 0; (int)i < nof_monitors(); i++) { |
duke@435 | 400 | Node *box = mcall->monitor_box(this, i); |
duke@435 | 401 | Node *obj = mcall->monitor_obj(this, i); |
duke@435 | 402 | if ( OptoReg::is_valid(regalloc->get_reg_first(box)) ) { |
duke@435 | 403 | while( !box->is_BoxLock() ) box = box->in(1); |
kvn@498 | 404 | format_helper( regalloc, st, box, "MON-BOX[", i, &scobjs ); |
duke@435 | 405 | } else { |
duke@435 | 406 | OptoReg::Name box_reg = BoxLockNode::stack_slot(box); |
duke@435 | 407 | st->print(" MON-BOX%d=%s+%d", |
duke@435 | 408 | i, |
duke@435 | 409 | OptoReg::regname(OptoReg::c_frame_pointer), |
duke@435 | 410 | regalloc->reg2offset(box_reg)); |
duke@435 | 411 | } |
kvn@895 | 412 | const char* obj_msg = "MON-OBJ["; |
kvn@895 | 413 | if (EliminateLocks) { |
kvn@895 | 414 | while( !box->is_BoxLock() ) box = box->in(1); |
kvn@895 | 415 | if (box->as_BoxLock()->is_eliminated()) |
kvn@895 | 416 | obj_msg = "MON-OBJ(LOCK ELIMINATED)["; |
kvn@895 | 417 | } |
kvn@895 | 418 | format_helper( regalloc, st, obj, obj_msg, i, &scobjs ); |
kvn@498 | 419 | } |
kvn@498 | 420 | |
kvn@498 | 421 | for (i = 0; i < (uint)scobjs.length(); i++) { |
kvn@498 | 422 | // Scalar replaced objects. |
kvn@498 | 423 | st->print_cr(""); |
kvn@498 | 424 | st->print(" # ScObj" INT32_FORMAT " ", i); |
kvn@498 | 425 | SafePointScalarObjectNode* spobj = scobjs.at(i); |
kvn@498 | 426 | ciKlass* cik = spobj->bottom_type()->is_oopptr()->klass(); |
kvn@498 | 427 | assert(cik->is_instance_klass() || |
kvn@498 | 428 | cik->is_array_klass(), "Not supported allocation."); |
kvn@498 | 429 | ciInstanceKlass *iklass = NULL; |
kvn@498 | 430 | if (cik->is_instance_klass()) { |
kvn@498 | 431 | cik->print_name_on(st); |
kvn@498 | 432 | iklass = cik->as_instance_klass(); |
kvn@498 | 433 | } else if (cik->is_type_array_klass()) { |
kvn@498 | 434 | cik->as_array_klass()->base_element_type()->print_name_on(st); |
kvn@1475 | 435 | st->print("[%d]", spobj->n_fields()); |
kvn@498 | 436 | } else if (cik->is_obj_array_klass()) { |
kvn@1475 | 437 | ciKlass* cie = cik->as_obj_array_klass()->base_element_klass(); |
kvn@1475 | 438 | if (cie->is_instance_klass()) { |
kvn@1475 | 439 | cie->print_name_on(st); |
kvn@1475 | 440 | } else if (cie->is_type_array_klass()) { |
kvn@1475 | 441 | cie->as_array_klass()->base_element_type()->print_name_on(st); |
kvn@1475 | 442 | } else { |
kvn@1475 | 443 | ShouldNotReachHere(); |
kvn@498 | 444 | } |
kvn@1475 | 445 | st->print("[%d]", spobj->n_fields()); |
kvn@1475 | 446 | int ndim = cik->as_array_klass()->dimension() - 1; |
kvn@498 | 447 | while (ndim-- > 0) { |
kvn@498 | 448 | st->print("[]"); |
kvn@498 | 449 | } |
kvn@498 | 450 | } |
kvn@1475 | 451 | st->print("={"); |
kvn@498 | 452 | uint nf = spobj->n_fields(); |
kvn@498 | 453 | if (nf > 0) { |
kvn@498 | 454 | uint first_ind = spobj->first_index(); |
kvn@498 | 455 | Node* fld_node = mcall->in(first_ind); |
kvn@498 | 456 | ciField* cifield; |
kvn@498 | 457 | if (iklass != NULL) { |
kvn@498 | 458 | st->print(" ["); |
kvn@498 | 459 | cifield = iklass->nonstatic_field_at(0); |
kvn@498 | 460 | cifield->print_name_on(st); |
kvn@498 | 461 | format_helper( regalloc, st, fld_node, ":", 0, &scobjs ); |
kvn@498 | 462 | } else { |
kvn@498 | 463 | format_helper( regalloc, st, fld_node, "[", 0, &scobjs ); |
kvn@498 | 464 | } |
kvn@498 | 465 | for (uint j = 1; j < nf; j++) { |
kvn@498 | 466 | fld_node = mcall->in(first_ind+j); |
kvn@498 | 467 | if (iklass != NULL) { |
kvn@498 | 468 | st->print(", ["); |
kvn@498 | 469 | cifield = iklass->nonstatic_field_at(j); |
kvn@498 | 470 | cifield->print_name_on(st); |
kvn@498 | 471 | format_helper( regalloc, st, fld_node, ":", j, &scobjs ); |
kvn@498 | 472 | } else { |
kvn@498 | 473 | format_helper( regalloc, st, fld_node, ", [", j, &scobjs ); |
kvn@498 | 474 | } |
kvn@498 | 475 | } |
kvn@498 | 476 | } |
kvn@498 | 477 | st->print(" }"); |
duke@435 | 478 | } |
duke@435 | 479 | } |
duke@435 | 480 | st->print_cr(""); |
duke@435 | 481 | if (caller() != NULL) caller()->format(regalloc, n, st); |
duke@435 | 482 | } |
duke@435 | 483 | |
kvn@498 | 484 | |
duke@435 | 485 | void JVMState::dump_spec(outputStream *st) const { |
duke@435 | 486 | if (_method != NULL) { |
duke@435 | 487 | bool printed = false; |
duke@435 | 488 | if (!Verbose) { |
duke@435 | 489 | // The JVMS dumps make really, really long lines. |
duke@435 | 490 | // Take out the most boring parts, which are the package prefixes. |
duke@435 | 491 | char buf[500]; |
duke@435 | 492 | stringStream namest(buf, sizeof(buf)); |
duke@435 | 493 | _method->print_short_name(&namest); |
duke@435 | 494 | if (namest.count() < sizeof(buf)) { |
duke@435 | 495 | const char* name = namest.base(); |
duke@435 | 496 | if (name[0] == ' ') ++name; |
duke@435 | 497 | const char* endcn = strchr(name, ':'); // end of class name |
duke@435 | 498 | if (endcn == NULL) endcn = strchr(name, '('); |
duke@435 | 499 | if (endcn == NULL) endcn = name + strlen(name); |
duke@435 | 500 | while (endcn > name && endcn[-1] != '.' && endcn[-1] != '/') |
duke@435 | 501 | --endcn; |
duke@435 | 502 | st->print(" %s", endcn); |
duke@435 | 503 | printed = true; |
duke@435 | 504 | } |
duke@435 | 505 | } |
duke@435 | 506 | if (!printed) |
duke@435 | 507 | _method->print_short_name(st); |
duke@435 | 508 | st->print(" @ bci:%d",_bci); |
cfang@1366 | 509 | if(_reexecute == Reexecute_True) |
cfang@1366 | 510 | st->print(" reexecute"); |
duke@435 | 511 | } else { |
duke@435 | 512 | st->print(" runtime stub"); |
duke@435 | 513 | } |
duke@435 | 514 | if (caller() != NULL) caller()->dump_spec(st); |
duke@435 | 515 | } |
duke@435 | 516 | |
kvn@498 | 517 | |
duke@435 | 518 | void JVMState::dump_on(outputStream* st) const { |
duke@435 | 519 | if (_map && !((uintptr_t)_map & 1)) { |
duke@435 | 520 | if (_map->len() > _map->req()) { // _map->has_exceptions() |
duke@435 | 521 | Node* ex = _map->in(_map->req()); // _map->next_exception() |
duke@435 | 522 | // skip the first one; it's already being printed |
duke@435 | 523 | while (ex != NULL && ex->len() > ex->req()) { |
duke@435 | 524 | ex = ex->in(ex->req()); // ex->next_exception() |
duke@435 | 525 | ex->dump(1); |
duke@435 | 526 | } |
duke@435 | 527 | } |
duke@435 | 528 | _map->dump(2); |
duke@435 | 529 | } |
cfang@1335 | 530 | st->print("JVMS depth=%d loc=%d stk=%d mon=%d scalar=%d end=%d mondepth=%d sp=%d bci=%d reexecute=%s method=", |
cfang@1335 | 531 | depth(), locoff(), stkoff(), monoff(), scloff(), endoff(), monitor_depth(), sp(), bci(), should_reexecute()?"true":"false"); |
duke@435 | 532 | if (_method == NULL) { |
duke@435 | 533 | st->print_cr("(none)"); |
duke@435 | 534 | } else { |
duke@435 | 535 | _method->print_name(st); |
duke@435 | 536 | st->cr(); |
duke@435 | 537 | if (bci() >= 0 && bci() < _method->code_size()) { |
duke@435 | 538 | st->print(" bc: "); |
duke@435 | 539 | _method->print_codes_on(bci(), bci()+1, st); |
duke@435 | 540 | } |
duke@435 | 541 | } |
duke@435 | 542 | if (caller() != NULL) { |
duke@435 | 543 | caller()->dump_on(st); |
duke@435 | 544 | } |
duke@435 | 545 | } |
duke@435 | 546 | |
duke@435 | 547 | // Extra way to dump a jvms from the debugger, |
duke@435 | 548 | // to avoid a bug with C++ member function calls. |
duke@435 | 549 | void dump_jvms(JVMState* jvms) { |
duke@435 | 550 | jvms->dump(); |
duke@435 | 551 | } |
duke@435 | 552 | #endif |
duke@435 | 553 | |
duke@435 | 554 | //--------------------------clone_shallow-------------------------------------- |
duke@435 | 555 | JVMState* JVMState::clone_shallow(Compile* C) const { |
duke@435 | 556 | JVMState* n = has_method() ? new (C) JVMState(_method, _caller) : new (C) JVMState(0); |
duke@435 | 557 | n->set_bci(_bci); |
cfang@1335 | 558 | n->_reexecute = _reexecute; |
duke@435 | 559 | n->set_locoff(_locoff); |
duke@435 | 560 | n->set_stkoff(_stkoff); |
duke@435 | 561 | n->set_monoff(_monoff); |
kvn@498 | 562 | n->set_scloff(_scloff); |
duke@435 | 563 | n->set_endoff(_endoff); |
duke@435 | 564 | n->set_sp(_sp); |
duke@435 | 565 | n->set_map(_map); |
duke@435 | 566 | return n; |
duke@435 | 567 | } |
duke@435 | 568 | |
duke@435 | 569 | //---------------------------clone_deep---------------------------------------- |
duke@435 | 570 | JVMState* JVMState::clone_deep(Compile* C) const { |
duke@435 | 571 | JVMState* n = clone_shallow(C); |
duke@435 | 572 | for (JVMState* p = n; p->_caller != NULL; p = p->_caller) { |
duke@435 | 573 | p->_caller = p->_caller->clone_shallow(C); |
duke@435 | 574 | } |
duke@435 | 575 | assert(n->depth() == depth(), "sanity"); |
duke@435 | 576 | assert(n->debug_depth() == debug_depth(), "sanity"); |
duke@435 | 577 | return n; |
duke@435 | 578 | } |
duke@435 | 579 | |
duke@435 | 580 | //============================================================================= |
duke@435 | 581 | uint CallNode::cmp( const Node &n ) const |
duke@435 | 582 | { return _tf == ((CallNode&)n)._tf && _jvms == ((CallNode&)n)._jvms; } |
duke@435 | 583 | #ifndef PRODUCT |
duke@435 | 584 | void CallNode::dump_req() const { |
duke@435 | 585 | // Dump the required inputs, enclosed in '(' and ')' |
duke@435 | 586 | uint i; // Exit value of loop |
duke@435 | 587 | for( i=0; i<req(); i++ ) { // For all required inputs |
duke@435 | 588 | if( i == TypeFunc::Parms ) tty->print("("); |
duke@435 | 589 | if( in(i) ) tty->print("%c%d ", Compile::current()->node_arena()->contains(in(i)) ? ' ' : 'o', in(i)->_idx); |
duke@435 | 590 | else tty->print("_ "); |
duke@435 | 591 | } |
duke@435 | 592 | tty->print(")"); |
duke@435 | 593 | } |
duke@435 | 594 | |
duke@435 | 595 | void CallNode::dump_spec(outputStream *st) const { |
duke@435 | 596 | st->print(" "); |
duke@435 | 597 | tf()->dump_on(st); |
duke@435 | 598 | if (_cnt != COUNT_UNKNOWN) st->print(" C=%f",_cnt); |
duke@435 | 599 | if (jvms() != NULL) jvms()->dump_spec(st); |
duke@435 | 600 | } |
duke@435 | 601 | #endif |
duke@435 | 602 | |
duke@435 | 603 | const Type *CallNode::bottom_type() const { return tf()->range(); } |
duke@435 | 604 | const Type *CallNode::Value(PhaseTransform *phase) const { |
duke@435 | 605 | if (phase->type(in(0)) == Type::TOP) return Type::TOP; |
duke@435 | 606 | return tf()->range(); |
duke@435 | 607 | } |
duke@435 | 608 | |
duke@435 | 609 | //------------------------------calling_convention----------------------------- |
duke@435 | 610 | void CallNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { |
duke@435 | 611 | // Use the standard compiler calling convention |
duke@435 | 612 | Matcher::calling_convention( sig_bt, parm_regs, argcnt, true ); |
duke@435 | 613 | } |
duke@435 | 614 | |
duke@435 | 615 | |
duke@435 | 616 | //------------------------------match------------------------------------------ |
duke@435 | 617 | // Construct projections for control, I/O, memory-fields, ..., and |
duke@435 | 618 | // return result(s) along with their RegMask info |
duke@435 | 619 | Node *CallNode::match( const ProjNode *proj, const Matcher *match ) { |
duke@435 | 620 | switch (proj->_con) { |
duke@435 | 621 | case TypeFunc::Control: |
duke@435 | 622 | case TypeFunc::I_O: |
duke@435 | 623 | case TypeFunc::Memory: |
duke@435 | 624 | return new (match->C, 1) MachProjNode(this,proj->_con,RegMask::Empty,MachProjNode::unmatched_proj); |
duke@435 | 625 | |
duke@435 | 626 | case TypeFunc::Parms+1: // For LONG & DOUBLE returns |
duke@435 | 627 | assert(tf()->_range->field_at(TypeFunc::Parms+1) == Type::HALF, ""); |
duke@435 | 628 | // 2nd half of doubles and longs |
duke@435 | 629 | return new (match->C, 1) MachProjNode(this,proj->_con, RegMask::Empty, (uint)OptoReg::Bad); |
duke@435 | 630 | |
duke@435 | 631 | case TypeFunc::Parms: { // Normal returns |
duke@435 | 632 | uint ideal_reg = Matcher::base2reg[tf()->range()->field_at(TypeFunc::Parms)->base()]; |
duke@435 | 633 | OptoRegPair regs = is_CallRuntime() |
duke@435 | 634 | ? match->c_return_value(ideal_reg,true) // Calls into C runtime |
duke@435 | 635 | : match-> return_value(ideal_reg,true); // Calls into compiled Java code |
duke@435 | 636 | RegMask rm = RegMask(regs.first()); |
duke@435 | 637 | if( OptoReg::is_valid(regs.second()) ) |
duke@435 | 638 | rm.Insert( regs.second() ); |
duke@435 | 639 | return new (match->C, 1) MachProjNode(this,proj->_con,rm,ideal_reg); |
duke@435 | 640 | } |
duke@435 | 641 | |
duke@435 | 642 | case TypeFunc::ReturnAdr: |
duke@435 | 643 | case TypeFunc::FramePtr: |
duke@435 | 644 | default: |
duke@435 | 645 | ShouldNotReachHere(); |
duke@435 | 646 | } |
duke@435 | 647 | return NULL; |
duke@435 | 648 | } |
duke@435 | 649 | |
duke@435 | 650 | // Do we Match on this edge index or not? Match no edges |
duke@435 | 651 | uint CallNode::match_edge(uint idx) const { |
duke@435 | 652 | return 0; |
duke@435 | 653 | } |
duke@435 | 654 | |
kvn@500 | 655 | // |
kvn@509 | 656 | // Determine whether the call could modify the field of the specified |
kvn@509 | 657 | // instance at the specified offset. |
kvn@500 | 658 | // |
kvn@500 | 659 | bool CallNode::may_modify(const TypePtr *addr_t, PhaseTransform *phase) { |
kvn@500 | 660 | const TypeOopPtr *adrInst_t = addr_t->isa_oopptr(); |
kvn@500 | 661 | |
kvn@682 | 662 | // If not an OopPtr or not an instance type, assume the worst. |
kvn@682 | 663 | // Note: currently this method is called only for instance types. |
kvn@682 | 664 | if (adrInst_t == NULL || !adrInst_t->is_known_instance()) { |
kvn@500 | 665 | return true; |
kvn@500 | 666 | } |
kvn@682 | 667 | // The instance_id is set only for scalar-replaceable allocations which |
kvn@682 | 668 | // are not passed as arguments according to Escape Analysis. |
kvn@500 | 669 | return false; |
kvn@500 | 670 | } |
kvn@500 | 671 | |
kvn@500 | 672 | // Does this call have a direct reference to n other than debug information? |
kvn@500 | 673 | bool CallNode::has_non_debug_use(Node *n) { |
kvn@500 | 674 | const TypeTuple * d = tf()->domain(); |
kvn@500 | 675 | for (uint i = TypeFunc::Parms; i < d->cnt(); i++) { |
kvn@500 | 676 | Node *arg = in(i); |
kvn@500 | 677 | if (arg == n) { |
kvn@500 | 678 | return true; |
kvn@500 | 679 | } |
kvn@500 | 680 | } |
kvn@500 | 681 | return false; |
kvn@500 | 682 | } |
kvn@500 | 683 | |
kvn@500 | 684 | // Returns the unique CheckCastPP of a call |
kvn@500 | 685 | // or 'this' if there are several CheckCastPP |
kvn@500 | 686 | // or returns NULL if there is no one. |
kvn@500 | 687 | Node *CallNode::result_cast() { |
kvn@500 | 688 | Node *cast = NULL; |
kvn@500 | 689 | |
kvn@500 | 690 | Node *p = proj_out(TypeFunc::Parms); |
kvn@500 | 691 | if (p == NULL) |
kvn@500 | 692 | return NULL; |
kvn@500 | 693 | |
kvn@500 | 694 | for (DUIterator_Fast imax, i = p->fast_outs(imax); i < imax; i++) { |
kvn@500 | 695 | Node *use = p->fast_out(i); |
kvn@500 | 696 | if (use->is_CheckCastPP()) { |
kvn@500 | 697 | if (cast != NULL) { |
kvn@500 | 698 | return this; // more than 1 CheckCastPP |
kvn@500 | 699 | } |
kvn@500 | 700 | cast = use; |
kvn@500 | 701 | } |
kvn@500 | 702 | } |
kvn@500 | 703 | return cast; |
kvn@500 | 704 | } |
kvn@500 | 705 | |
kvn@500 | 706 | |
never@1515 | 707 | void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj) { |
never@1515 | 708 | projs->fallthrough_proj = NULL; |
never@1515 | 709 | projs->fallthrough_catchproj = NULL; |
never@1515 | 710 | projs->fallthrough_ioproj = NULL; |
never@1515 | 711 | projs->catchall_ioproj = NULL; |
never@1515 | 712 | projs->catchall_catchproj = NULL; |
never@1515 | 713 | projs->fallthrough_memproj = NULL; |
never@1515 | 714 | projs->catchall_memproj = NULL; |
never@1515 | 715 | projs->resproj = NULL; |
never@1515 | 716 | projs->exobj = NULL; |
never@1515 | 717 | |
never@1515 | 718 | for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { |
never@1515 | 719 | ProjNode *pn = fast_out(i)->as_Proj(); |
never@1515 | 720 | if (pn->outcnt() == 0) continue; |
never@1515 | 721 | switch (pn->_con) { |
never@1515 | 722 | case TypeFunc::Control: |
never@1515 | 723 | { |
never@1515 | 724 | // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj |
never@1515 | 725 | projs->fallthrough_proj = pn; |
never@1515 | 726 | DUIterator_Fast jmax, j = pn->fast_outs(jmax); |
never@1515 | 727 | const Node *cn = pn->fast_out(j); |
never@1515 | 728 | if (cn->is_Catch()) { |
never@1515 | 729 | ProjNode *cpn = NULL; |
never@1515 | 730 | for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { |
never@1515 | 731 | cpn = cn->fast_out(k)->as_Proj(); |
never@1515 | 732 | assert(cpn->is_CatchProj(), "must be a CatchProjNode"); |
never@1515 | 733 | if (cpn->_con == CatchProjNode::fall_through_index) |
never@1515 | 734 | projs->fallthrough_catchproj = cpn; |
never@1515 | 735 | else { |
never@1515 | 736 | assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); |
never@1515 | 737 | projs->catchall_catchproj = cpn; |
never@1515 | 738 | } |
never@1515 | 739 | } |
never@1515 | 740 | } |
never@1515 | 741 | break; |
never@1515 | 742 | } |
never@1515 | 743 | case TypeFunc::I_O: |
never@1515 | 744 | if (pn->_is_io_use) |
never@1515 | 745 | projs->catchall_ioproj = pn; |
never@1515 | 746 | else |
never@1515 | 747 | projs->fallthrough_ioproj = pn; |
never@1515 | 748 | for (DUIterator j = pn->outs(); pn->has_out(j); j++) { |
never@1515 | 749 | Node* e = pn->out(j); |
never@1515 | 750 | if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj()) { |
never@1515 | 751 | assert(projs->exobj == NULL, "only one"); |
never@1515 | 752 | projs->exobj = e; |
never@1515 | 753 | } |
never@1515 | 754 | } |
never@1515 | 755 | break; |
never@1515 | 756 | case TypeFunc::Memory: |
never@1515 | 757 | if (pn->_is_io_use) |
never@1515 | 758 | projs->catchall_memproj = pn; |
never@1515 | 759 | else |
never@1515 | 760 | projs->fallthrough_memproj = pn; |
never@1515 | 761 | break; |
never@1515 | 762 | case TypeFunc::Parms: |
never@1515 | 763 | projs->resproj = pn; |
never@1515 | 764 | break; |
never@1515 | 765 | default: |
never@1515 | 766 | assert(false, "unexpected projection from allocation node."); |
never@1515 | 767 | } |
never@1515 | 768 | } |
never@1515 | 769 | |
never@1515 | 770 | // The resproj may not exist because the result couuld be ignored |
never@1515 | 771 | // and the exception object may not exist if an exception handler |
never@1515 | 772 | // swallows the exception but all the other must exist and be found. |
never@1515 | 773 | assert(projs->fallthrough_proj != NULL, "must be found"); |
never@1515 | 774 | assert(projs->fallthrough_catchproj != NULL, "must be found"); |
never@1515 | 775 | assert(projs->fallthrough_memproj != NULL, "must be found"); |
never@1515 | 776 | assert(projs->fallthrough_ioproj != NULL, "must be found"); |
never@1515 | 777 | assert(projs->catchall_catchproj != NULL, "must be found"); |
never@1515 | 778 | if (separate_io_proj) { |
never@1515 | 779 | assert(projs->catchall_memproj != NULL, "must be found"); |
never@1515 | 780 | assert(projs->catchall_ioproj != NULL, "must be found"); |
never@1515 | 781 | } |
never@1515 | 782 | } |
never@1515 | 783 | |
never@1515 | 784 | |
duke@435 | 785 | //============================================================================= |
duke@435 | 786 | uint CallJavaNode::size_of() const { return sizeof(*this); } |
duke@435 | 787 | uint CallJavaNode::cmp( const Node &n ) const { |
duke@435 | 788 | CallJavaNode &call = (CallJavaNode&)n; |
duke@435 | 789 | return CallNode::cmp(call) && _method == call._method; |
duke@435 | 790 | } |
duke@435 | 791 | #ifndef PRODUCT |
duke@435 | 792 | void CallJavaNode::dump_spec(outputStream *st) const { |
duke@435 | 793 | if( _method ) _method->print_short_name(st); |
duke@435 | 794 | CallNode::dump_spec(st); |
duke@435 | 795 | } |
duke@435 | 796 | #endif |
duke@435 | 797 | |
duke@435 | 798 | //============================================================================= |
duke@435 | 799 | uint CallStaticJavaNode::size_of() const { return sizeof(*this); } |
duke@435 | 800 | uint CallStaticJavaNode::cmp( const Node &n ) const { |
duke@435 | 801 | CallStaticJavaNode &call = (CallStaticJavaNode&)n; |
duke@435 | 802 | return CallJavaNode::cmp(call); |
duke@435 | 803 | } |
duke@435 | 804 | |
duke@435 | 805 | //----------------------------uncommon_trap_request---------------------------- |
duke@435 | 806 | // If this is an uncommon trap, return the request code, else zero. |
duke@435 | 807 | int CallStaticJavaNode::uncommon_trap_request() const { |
duke@435 | 808 | if (_name != NULL && !strcmp(_name, "uncommon_trap")) { |
duke@435 | 809 | return extract_uncommon_trap_request(this); |
duke@435 | 810 | } |
duke@435 | 811 | return 0; |
duke@435 | 812 | } |
duke@435 | 813 | int CallStaticJavaNode::extract_uncommon_trap_request(const Node* call) { |
duke@435 | 814 | #ifndef PRODUCT |
duke@435 | 815 | if (!(call->req() > TypeFunc::Parms && |
duke@435 | 816 | call->in(TypeFunc::Parms) != NULL && |
duke@435 | 817 | call->in(TypeFunc::Parms)->is_Con())) { |
duke@435 | 818 | assert(_in_dump_cnt != 0, "OK if dumping"); |
duke@435 | 819 | tty->print("[bad uncommon trap]"); |
duke@435 | 820 | return 0; |
duke@435 | 821 | } |
duke@435 | 822 | #endif |
duke@435 | 823 | return call->in(TypeFunc::Parms)->bottom_type()->is_int()->get_con(); |
duke@435 | 824 | } |
duke@435 | 825 | |
duke@435 | 826 | #ifndef PRODUCT |
duke@435 | 827 | void CallStaticJavaNode::dump_spec(outputStream *st) const { |
duke@435 | 828 | st->print("# Static "); |
duke@435 | 829 | if (_name != NULL) { |
duke@435 | 830 | st->print("%s", _name); |
duke@435 | 831 | int trap_req = uncommon_trap_request(); |
duke@435 | 832 | if (trap_req != 0) { |
duke@435 | 833 | char buf[100]; |
duke@435 | 834 | st->print("(%s)", |
duke@435 | 835 | Deoptimization::format_trap_request(buf, sizeof(buf), |
duke@435 | 836 | trap_req)); |
duke@435 | 837 | } |
duke@435 | 838 | st->print(" "); |
duke@435 | 839 | } |
duke@435 | 840 | CallJavaNode::dump_spec(st); |
duke@435 | 841 | } |
duke@435 | 842 | #endif |
duke@435 | 843 | |
duke@435 | 844 | //============================================================================= |
duke@435 | 845 | uint CallDynamicJavaNode::size_of() const { return sizeof(*this); } |
duke@435 | 846 | uint CallDynamicJavaNode::cmp( const Node &n ) const { |
duke@435 | 847 | CallDynamicJavaNode &call = (CallDynamicJavaNode&)n; |
duke@435 | 848 | return CallJavaNode::cmp(call); |
duke@435 | 849 | } |
duke@435 | 850 | #ifndef PRODUCT |
duke@435 | 851 | void CallDynamicJavaNode::dump_spec(outputStream *st) const { |
duke@435 | 852 | st->print("# Dynamic "); |
duke@435 | 853 | CallJavaNode::dump_spec(st); |
duke@435 | 854 | } |
duke@435 | 855 | #endif |
duke@435 | 856 | |
duke@435 | 857 | //============================================================================= |
duke@435 | 858 | uint CallRuntimeNode::size_of() const { return sizeof(*this); } |
duke@435 | 859 | uint CallRuntimeNode::cmp( const Node &n ) const { |
duke@435 | 860 | CallRuntimeNode &call = (CallRuntimeNode&)n; |
duke@435 | 861 | return CallNode::cmp(call) && !strcmp(_name,call._name); |
duke@435 | 862 | } |
duke@435 | 863 | #ifndef PRODUCT |
duke@435 | 864 | void CallRuntimeNode::dump_spec(outputStream *st) const { |
duke@435 | 865 | st->print("# "); |
duke@435 | 866 | st->print(_name); |
duke@435 | 867 | CallNode::dump_spec(st); |
duke@435 | 868 | } |
duke@435 | 869 | #endif |
duke@435 | 870 | |
duke@435 | 871 | //------------------------------calling_convention----------------------------- |
duke@435 | 872 | void CallRuntimeNode::calling_convention( BasicType* sig_bt, VMRegPair *parm_regs, uint argcnt ) const { |
duke@435 | 873 | Matcher::c_calling_convention( sig_bt, parm_regs, argcnt ); |
duke@435 | 874 | } |
duke@435 | 875 | |
duke@435 | 876 | //============================================================================= |
duke@435 | 877 | //------------------------------calling_convention----------------------------- |
duke@435 | 878 | |
duke@435 | 879 | |
duke@435 | 880 | //============================================================================= |
duke@435 | 881 | #ifndef PRODUCT |
duke@435 | 882 | void CallLeafNode::dump_spec(outputStream *st) const { |
duke@435 | 883 | st->print("# "); |
duke@435 | 884 | st->print(_name); |
duke@435 | 885 | CallNode::dump_spec(st); |
duke@435 | 886 | } |
duke@435 | 887 | #endif |
duke@435 | 888 | |
duke@435 | 889 | //============================================================================= |
duke@435 | 890 | |
duke@435 | 891 | void SafePointNode::set_local(JVMState* jvms, uint idx, Node *c) { |
duke@435 | 892 | assert(verify_jvms(jvms), "jvms must match"); |
duke@435 | 893 | int loc = jvms->locoff() + idx; |
duke@435 | 894 | if (in(loc)->is_top() && idx > 0 && !c->is_top() ) { |
duke@435 | 895 | // If current local idx is top then local idx - 1 could |
duke@435 | 896 | // be a long/double that needs to be killed since top could |
duke@435 | 897 | // represent the 2nd half ofthe long/double. |
duke@435 | 898 | uint ideal = in(loc -1)->ideal_reg(); |
duke@435 | 899 | if (ideal == Op_RegD || ideal == Op_RegL) { |
duke@435 | 900 | // set other (low index) half to top |
duke@435 | 901 | set_req(loc - 1, in(loc)); |
duke@435 | 902 | } |
duke@435 | 903 | } |
duke@435 | 904 | set_req(loc, c); |
duke@435 | 905 | } |
duke@435 | 906 | |
duke@435 | 907 | uint SafePointNode::size_of() const { return sizeof(*this); } |
duke@435 | 908 | uint SafePointNode::cmp( const Node &n ) const { |
duke@435 | 909 | return (&n == this); // Always fail except on self |
duke@435 | 910 | } |
duke@435 | 911 | |
duke@435 | 912 | //-------------------------set_next_exception---------------------------------- |
duke@435 | 913 | void SafePointNode::set_next_exception(SafePointNode* n) { |
duke@435 | 914 | assert(n == NULL || n->Opcode() == Op_SafePoint, "correct value for next_exception"); |
duke@435 | 915 | if (len() == req()) { |
duke@435 | 916 | if (n != NULL) add_prec(n); |
duke@435 | 917 | } else { |
duke@435 | 918 | set_prec(req(), n); |
duke@435 | 919 | } |
duke@435 | 920 | } |
duke@435 | 921 | |
duke@435 | 922 | |
duke@435 | 923 | //----------------------------next_exception----------------------------------- |
duke@435 | 924 | SafePointNode* SafePointNode::next_exception() const { |
duke@435 | 925 | if (len() == req()) { |
duke@435 | 926 | return NULL; |
duke@435 | 927 | } else { |
duke@435 | 928 | Node* n = in(req()); |
duke@435 | 929 | assert(n == NULL || n->Opcode() == Op_SafePoint, "no other uses of prec edges"); |
duke@435 | 930 | return (SafePointNode*) n; |
duke@435 | 931 | } |
duke@435 | 932 | } |
duke@435 | 933 | |
duke@435 | 934 | |
duke@435 | 935 | //------------------------------Ideal------------------------------------------ |
duke@435 | 936 | // Skip over any collapsed Regions |
duke@435 | 937 | Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
kvn@740 | 938 | return remove_dead_region(phase, can_reshape) ? this : NULL; |
duke@435 | 939 | } |
duke@435 | 940 | |
duke@435 | 941 | //------------------------------Identity--------------------------------------- |
duke@435 | 942 | // Remove obviously duplicate safepoints |
duke@435 | 943 | Node *SafePointNode::Identity( PhaseTransform *phase ) { |
duke@435 | 944 | |
duke@435 | 945 | // If you have back to back safepoints, remove one |
duke@435 | 946 | if( in(TypeFunc::Control)->is_SafePoint() ) |
duke@435 | 947 | return in(TypeFunc::Control); |
duke@435 | 948 | |
duke@435 | 949 | if( in(0)->is_Proj() ) { |
duke@435 | 950 | Node *n0 = in(0)->in(0); |
duke@435 | 951 | // Check if he is a call projection (except Leaf Call) |
duke@435 | 952 | if( n0->is_Catch() ) { |
duke@435 | 953 | n0 = n0->in(0)->in(0); |
duke@435 | 954 | assert( n0->is_Call(), "expect a call here" ); |
duke@435 | 955 | } |
duke@435 | 956 | if( n0->is_Call() && n0->as_Call()->guaranteed_safepoint() ) { |
duke@435 | 957 | // Useless Safepoint, so remove it |
duke@435 | 958 | return in(TypeFunc::Control); |
duke@435 | 959 | } |
duke@435 | 960 | } |
duke@435 | 961 | |
duke@435 | 962 | return this; |
duke@435 | 963 | } |
duke@435 | 964 | |
duke@435 | 965 | //------------------------------Value------------------------------------------ |
duke@435 | 966 | const Type *SafePointNode::Value( PhaseTransform *phase ) const { |
duke@435 | 967 | if( phase->type(in(0)) == Type::TOP ) return Type::TOP; |
duke@435 | 968 | if( phase->eqv( in(0), this ) ) return Type::TOP; // Dead infinite loop |
duke@435 | 969 | return Type::CONTROL; |
duke@435 | 970 | } |
duke@435 | 971 | |
duke@435 | 972 | #ifndef PRODUCT |
duke@435 | 973 | void SafePointNode::dump_spec(outputStream *st) const { |
duke@435 | 974 | st->print(" SafePoint "); |
duke@435 | 975 | } |
duke@435 | 976 | #endif |
duke@435 | 977 | |
duke@435 | 978 | const RegMask &SafePointNode::in_RegMask(uint idx) const { |
duke@435 | 979 | if( idx < TypeFunc::Parms ) return RegMask::Empty; |
duke@435 | 980 | // Values outside the domain represent debug info |
duke@435 | 981 | return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); |
duke@435 | 982 | } |
duke@435 | 983 | const RegMask &SafePointNode::out_RegMask() const { |
duke@435 | 984 | return RegMask::Empty; |
duke@435 | 985 | } |
duke@435 | 986 | |
duke@435 | 987 | |
duke@435 | 988 | void SafePointNode::grow_stack(JVMState* jvms, uint grow_by) { |
duke@435 | 989 | assert((int)grow_by > 0, "sanity"); |
duke@435 | 990 | int monoff = jvms->monoff(); |
kvn@498 | 991 | int scloff = jvms->scloff(); |
duke@435 | 992 | int endoff = jvms->endoff(); |
duke@435 | 993 | assert(endoff == (int)req(), "no other states or debug info after me"); |
duke@435 | 994 | Node* top = Compile::current()->top(); |
duke@435 | 995 | for (uint i = 0; i < grow_by; i++) { |
duke@435 | 996 | ins_req(monoff, top); |
duke@435 | 997 | } |
duke@435 | 998 | jvms->set_monoff(monoff + grow_by); |
kvn@498 | 999 | jvms->set_scloff(scloff + grow_by); |
duke@435 | 1000 | jvms->set_endoff(endoff + grow_by); |
duke@435 | 1001 | } |
duke@435 | 1002 | |
duke@435 | 1003 | void SafePointNode::push_monitor(const FastLockNode *lock) { |
duke@435 | 1004 | // Add a LockNode, which points to both the original BoxLockNode (the |
duke@435 | 1005 | // stack space for the monitor) and the Object being locked. |
duke@435 | 1006 | const int MonitorEdges = 2; |
duke@435 | 1007 | assert(JVMState::logMonitorEdges == exact_log2(MonitorEdges), "correct MonitorEdges"); |
duke@435 | 1008 | assert(req() == jvms()->endoff(), "correct sizing"); |
kvn@498 | 1009 | int nextmon = jvms()->scloff(); |
duke@435 | 1010 | if (GenerateSynchronizationCode) { |
duke@435 | 1011 | add_req(lock->box_node()); |
duke@435 | 1012 | add_req(lock->obj_node()); |
duke@435 | 1013 | } else { |
kvn@895 | 1014 | Node* top = Compile::current()->top(); |
kvn@895 | 1015 | add_req(top); |
kvn@895 | 1016 | add_req(top); |
duke@435 | 1017 | } |
kvn@498 | 1018 | jvms()->set_scloff(nextmon+MonitorEdges); |
duke@435 | 1019 | jvms()->set_endoff(req()); |
duke@435 | 1020 | } |
duke@435 | 1021 | |
duke@435 | 1022 | void SafePointNode::pop_monitor() { |
duke@435 | 1023 | // Delete last monitor from debug info |
duke@435 | 1024 | debug_only(int num_before_pop = jvms()->nof_monitors()); |
duke@435 | 1025 | const int MonitorEdges = (1<<JVMState::logMonitorEdges); |
kvn@498 | 1026 | int scloff = jvms()->scloff(); |
duke@435 | 1027 | int endoff = jvms()->endoff(); |
kvn@498 | 1028 | int new_scloff = scloff - MonitorEdges; |
duke@435 | 1029 | int new_endoff = endoff - MonitorEdges; |
kvn@498 | 1030 | jvms()->set_scloff(new_scloff); |
duke@435 | 1031 | jvms()->set_endoff(new_endoff); |
kvn@498 | 1032 | while (scloff > new_scloff) del_req(--scloff); |
duke@435 | 1033 | assert(jvms()->nof_monitors() == num_before_pop-1, ""); |
duke@435 | 1034 | } |
duke@435 | 1035 | |
duke@435 | 1036 | Node *SafePointNode::peek_monitor_box() const { |
duke@435 | 1037 | int mon = jvms()->nof_monitors() - 1; |
duke@435 | 1038 | assert(mon >= 0, "most have a monitor"); |
duke@435 | 1039 | return monitor_box(jvms(), mon); |
duke@435 | 1040 | } |
duke@435 | 1041 | |
duke@435 | 1042 | Node *SafePointNode::peek_monitor_obj() const { |
duke@435 | 1043 | int mon = jvms()->nof_monitors() - 1; |
duke@435 | 1044 | assert(mon >= 0, "most have a monitor"); |
duke@435 | 1045 | return monitor_obj(jvms(), mon); |
duke@435 | 1046 | } |
duke@435 | 1047 | |
duke@435 | 1048 | // Do we Match on this edge index or not? Match no edges |
duke@435 | 1049 | uint SafePointNode::match_edge(uint idx) const { |
duke@435 | 1050 | if( !needs_polling_address_input() ) |
duke@435 | 1051 | return 0; |
duke@435 | 1052 | |
duke@435 | 1053 | return (TypeFunc::Parms == idx); |
duke@435 | 1054 | } |
duke@435 | 1055 | |
kvn@498 | 1056 | //============== SafePointScalarObjectNode ============== |
kvn@498 | 1057 | |
kvn@498 | 1058 | SafePointScalarObjectNode::SafePointScalarObjectNode(const TypeOopPtr* tp, |
kvn@498 | 1059 | #ifdef ASSERT |
kvn@498 | 1060 | AllocateNode* alloc, |
kvn@498 | 1061 | #endif |
kvn@498 | 1062 | uint first_index, |
kvn@498 | 1063 | uint n_fields) : |
kvn@498 | 1064 | TypeNode(tp, 1), // 1 control input -- seems required. Get from root. |
kvn@498 | 1065 | #ifdef ASSERT |
kvn@498 | 1066 | _alloc(alloc), |
kvn@498 | 1067 | #endif |
kvn@498 | 1068 | _first_index(first_index), |
kvn@498 | 1069 | _n_fields(n_fields) |
kvn@498 | 1070 | { |
kvn@498 | 1071 | init_class_id(Class_SafePointScalarObject); |
kvn@498 | 1072 | } |
kvn@498 | 1073 | |
kvn@855 | 1074 | bool SafePointScalarObjectNode::pinned() const { return true; } |
kvn@1036 | 1075 | bool SafePointScalarObjectNode::depends_only_on_test() const { return false; } |
kvn@498 | 1076 | |
kvn@498 | 1077 | uint SafePointScalarObjectNode::ideal_reg() const { |
kvn@498 | 1078 | return 0; // No matching to machine instruction |
kvn@498 | 1079 | } |
kvn@498 | 1080 | |
kvn@498 | 1081 | const RegMask &SafePointScalarObjectNode::in_RegMask(uint idx) const { |
kvn@498 | 1082 | return *(Compile::current()->matcher()->idealreg2debugmask[in(idx)->ideal_reg()]); |
kvn@498 | 1083 | } |
kvn@498 | 1084 | |
kvn@498 | 1085 | const RegMask &SafePointScalarObjectNode::out_RegMask() const { |
kvn@498 | 1086 | return RegMask::Empty; |
kvn@498 | 1087 | } |
kvn@498 | 1088 | |
kvn@498 | 1089 | uint SafePointScalarObjectNode::match_edge(uint idx) const { |
kvn@498 | 1090 | return 0; |
kvn@498 | 1091 | } |
kvn@498 | 1092 | |
kvn@498 | 1093 | SafePointScalarObjectNode* |
kvn@498 | 1094 | SafePointScalarObjectNode::clone(int jvms_adj, Dict* sosn_map) const { |
kvn@498 | 1095 | void* cached = (*sosn_map)[(void*)this]; |
kvn@498 | 1096 | if (cached != NULL) { |
kvn@498 | 1097 | return (SafePointScalarObjectNode*)cached; |
kvn@498 | 1098 | } |
kvn@498 | 1099 | Compile* C = Compile::current(); |
kvn@498 | 1100 | SafePointScalarObjectNode* res = (SafePointScalarObjectNode*)Node::clone(); |
kvn@498 | 1101 | res->_first_index += jvms_adj; |
kvn@498 | 1102 | sosn_map->Insert((void*)this, (void*)res); |
kvn@498 | 1103 | return res; |
kvn@498 | 1104 | } |
kvn@498 | 1105 | |
kvn@498 | 1106 | |
kvn@498 | 1107 | #ifndef PRODUCT |
kvn@498 | 1108 | void SafePointScalarObjectNode::dump_spec(outputStream *st) const { |
kvn@498 | 1109 | st->print(" # fields@[%d..%d]", first_index(), |
kvn@498 | 1110 | first_index() + n_fields() - 1); |
kvn@498 | 1111 | } |
kvn@498 | 1112 | |
kvn@498 | 1113 | #endif |
kvn@498 | 1114 | |
duke@435 | 1115 | //============================================================================= |
duke@435 | 1116 | uint AllocateNode::size_of() const { return sizeof(*this); } |
duke@435 | 1117 | |
duke@435 | 1118 | AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, |
duke@435 | 1119 | Node *ctrl, Node *mem, Node *abio, |
duke@435 | 1120 | Node *size, Node *klass_node, Node *initial_test) |
duke@435 | 1121 | : CallNode(atype, NULL, TypeRawPtr::BOTTOM) |
duke@435 | 1122 | { |
duke@435 | 1123 | init_class_id(Class_Allocate); |
duke@435 | 1124 | init_flags(Flag_is_macro); |
kvn@474 | 1125 | _is_scalar_replaceable = false; |
duke@435 | 1126 | Node *topnode = C->top(); |
duke@435 | 1127 | |
duke@435 | 1128 | init_req( TypeFunc::Control , ctrl ); |
duke@435 | 1129 | init_req( TypeFunc::I_O , abio ); |
duke@435 | 1130 | init_req( TypeFunc::Memory , mem ); |
duke@435 | 1131 | init_req( TypeFunc::ReturnAdr, topnode ); |
duke@435 | 1132 | init_req( TypeFunc::FramePtr , topnode ); |
duke@435 | 1133 | init_req( AllocSize , size); |
duke@435 | 1134 | init_req( KlassNode , klass_node); |
duke@435 | 1135 | init_req( InitialTest , initial_test); |
duke@435 | 1136 | init_req( ALength , topnode); |
duke@435 | 1137 | C->add_macro_node(this); |
duke@435 | 1138 | } |
duke@435 | 1139 | |
duke@435 | 1140 | //============================================================================= |
duke@435 | 1141 | uint AllocateArrayNode::size_of() const { return sizeof(*this); } |
duke@435 | 1142 | |
kvn@1139 | 1143 | Node* AllocateArrayNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
kvn@1139 | 1144 | if (remove_dead_region(phase, can_reshape)) return this; |
kvn@1139 | 1145 | |
kvn@1139 | 1146 | const Type* type = phase->type(Ideal_length()); |
kvn@1139 | 1147 | if (type->isa_int() && type->is_int()->_hi < 0) { |
kvn@1139 | 1148 | if (can_reshape) { |
kvn@1139 | 1149 | PhaseIterGVN *igvn = phase->is_IterGVN(); |
kvn@1139 | 1150 | // Unreachable fall through path (negative array length), |
kvn@1139 | 1151 | // the allocation can only throw so disconnect it. |
kvn@1139 | 1152 | Node* proj = proj_out(TypeFunc::Control); |
kvn@1139 | 1153 | Node* catchproj = NULL; |
kvn@1139 | 1154 | if (proj != NULL) { |
kvn@1139 | 1155 | for (DUIterator_Fast imax, i = proj->fast_outs(imax); i < imax; i++) { |
kvn@1139 | 1156 | Node *cn = proj->fast_out(i); |
kvn@1139 | 1157 | if (cn->is_Catch()) { |
kvn@1139 | 1158 | catchproj = cn->as_Multi()->proj_out(CatchProjNode::fall_through_index); |
kvn@1139 | 1159 | break; |
kvn@1139 | 1160 | } |
kvn@1139 | 1161 | } |
kvn@1139 | 1162 | } |
kvn@1139 | 1163 | if (catchproj != NULL && catchproj->outcnt() > 0 && |
kvn@1139 | 1164 | (catchproj->outcnt() > 1 || |
kvn@1139 | 1165 | catchproj->unique_out()->Opcode() != Op_Halt)) { |
kvn@1139 | 1166 | assert(catchproj->is_CatchProj(), "must be a CatchProjNode"); |
kvn@1139 | 1167 | Node* nproj = catchproj->clone(); |
kvn@1139 | 1168 | igvn->register_new_node_with_optimizer(nproj); |
kvn@1139 | 1169 | |
kvn@1139 | 1170 | Node *frame = new (phase->C, 1) ParmNode( phase->C->start(), TypeFunc::FramePtr ); |
kvn@1139 | 1171 | frame = phase->transform(frame); |
kvn@1139 | 1172 | // Halt & Catch Fire |
kvn@1139 | 1173 | Node *halt = new (phase->C, TypeFunc::Parms) HaltNode( nproj, frame ); |
kvn@1139 | 1174 | phase->C->root()->add_req(halt); |
kvn@1139 | 1175 | phase->transform(halt); |
kvn@1139 | 1176 | |
kvn@1139 | 1177 | igvn->replace_node(catchproj, phase->C->top()); |
kvn@1139 | 1178 | return this; |
kvn@1139 | 1179 | } |
kvn@1139 | 1180 | } else { |
kvn@1139 | 1181 | // Can't correct it during regular GVN so register for IGVN |
kvn@1139 | 1182 | phase->C->record_for_igvn(this); |
kvn@1139 | 1183 | } |
kvn@1139 | 1184 | } |
kvn@1139 | 1185 | return NULL; |
kvn@1139 | 1186 | } |
kvn@1139 | 1187 | |
rasbold@801 | 1188 | // Retrieve the length from the AllocateArrayNode. Narrow the type with a |
rasbold@801 | 1189 | // CastII, if appropriate. If we are not allowed to create new nodes, and |
rasbold@801 | 1190 | // a CastII is appropriate, return NULL. |
rasbold@801 | 1191 | Node *AllocateArrayNode::make_ideal_length(const TypeOopPtr* oop_type, PhaseTransform *phase, bool allow_new_nodes) { |
rasbold@801 | 1192 | Node *length = in(AllocateNode::ALength); |
rasbold@801 | 1193 | assert(length != NULL, "length is not null"); |
rasbold@801 | 1194 | |
rasbold@801 | 1195 | const TypeInt* length_type = phase->find_int_type(length); |
rasbold@801 | 1196 | const TypeAryPtr* ary_type = oop_type->isa_aryptr(); |
rasbold@801 | 1197 | |
rasbold@801 | 1198 | if (ary_type != NULL && length_type != NULL) { |
rasbold@801 | 1199 | const TypeInt* narrow_length_type = ary_type->narrow_size_type(length_type); |
rasbold@801 | 1200 | if (narrow_length_type != length_type) { |
rasbold@801 | 1201 | // Assert one of: |
rasbold@801 | 1202 | // - the narrow_length is 0 |
rasbold@801 | 1203 | // - the narrow_length is not wider than length |
rasbold@801 | 1204 | assert(narrow_length_type == TypeInt::ZERO || |
rasbold@801 | 1205 | (narrow_length_type->_hi <= length_type->_hi && |
rasbold@801 | 1206 | narrow_length_type->_lo >= length_type->_lo), |
rasbold@801 | 1207 | "narrow type must be narrower than length type"); |
rasbold@801 | 1208 | |
rasbold@801 | 1209 | // Return NULL if new nodes are not allowed |
rasbold@801 | 1210 | if (!allow_new_nodes) return NULL; |
rasbold@801 | 1211 | // Create a cast which is control dependent on the initialization to |
rasbold@801 | 1212 | // propagate the fact that the array length must be positive. |
rasbold@801 | 1213 | length = new (phase->C, 2) CastIINode(length, narrow_length_type); |
rasbold@801 | 1214 | length->set_req(0, initialization()->proj_out(0)); |
rasbold@801 | 1215 | } |
rasbold@801 | 1216 | } |
rasbold@801 | 1217 | |
rasbold@801 | 1218 | return length; |
rasbold@801 | 1219 | } |
rasbold@801 | 1220 | |
duke@435 | 1221 | //============================================================================= |
duke@435 | 1222 | uint LockNode::size_of() const { return sizeof(*this); } |
duke@435 | 1223 | |
duke@435 | 1224 | // Redundant lock elimination |
duke@435 | 1225 | // |
duke@435 | 1226 | // There are various patterns of locking where we release and |
duke@435 | 1227 | // immediately reacquire a lock in a piece of code where no operations |
duke@435 | 1228 | // occur in between that would be observable. In those cases we can |
duke@435 | 1229 | // skip releasing and reacquiring the lock without violating any |
duke@435 | 1230 | // fairness requirements. Doing this around a loop could cause a lock |
duke@435 | 1231 | // to be held for a very long time so we concentrate on non-looping |
duke@435 | 1232 | // control flow. We also require that the operations are fully |
duke@435 | 1233 | // redundant meaning that we don't introduce new lock operations on |
duke@435 | 1234 | // some paths so to be able to eliminate it on others ala PRE. This |
duke@435 | 1235 | // would probably require some more extensive graph manipulation to |
duke@435 | 1236 | // guarantee that the memory edges were all handled correctly. |
duke@435 | 1237 | // |
duke@435 | 1238 | // Assuming p is a simple predicate which can't trap in any way and s |
duke@435 | 1239 | // is a synchronized method consider this code: |
duke@435 | 1240 | // |
duke@435 | 1241 | // s(); |
duke@435 | 1242 | // if (p) |
duke@435 | 1243 | // s(); |
duke@435 | 1244 | // else |
duke@435 | 1245 | // s(); |
duke@435 | 1246 | // s(); |
duke@435 | 1247 | // |
duke@435 | 1248 | // 1. The unlocks of the first call to s can be eliminated if the |
duke@435 | 1249 | // locks inside the then and else branches are eliminated. |
duke@435 | 1250 | // |
duke@435 | 1251 | // 2. The unlocks of the then and else branches can be eliminated if |
duke@435 | 1252 | // the lock of the final call to s is eliminated. |
duke@435 | 1253 | // |
duke@435 | 1254 | // Either of these cases subsumes the simple case of sequential control flow |
duke@435 | 1255 | // |
duke@435 | 1256 | // Addtionally we can eliminate versions without the else case: |
duke@435 | 1257 | // |
duke@435 | 1258 | // s(); |
duke@435 | 1259 | // if (p) |
duke@435 | 1260 | // s(); |
duke@435 | 1261 | // s(); |
duke@435 | 1262 | // |
duke@435 | 1263 | // 3. In this case we eliminate the unlock of the first s, the lock |
duke@435 | 1264 | // and unlock in the then case and the lock in the final s. |
duke@435 | 1265 | // |
duke@435 | 1266 | // Note also that in all these cases the then/else pieces don't have |
duke@435 | 1267 | // to be trivial as long as they begin and end with synchronization |
duke@435 | 1268 | // operations. |
duke@435 | 1269 | // |
duke@435 | 1270 | // s(); |
duke@435 | 1271 | // if (p) |
duke@435 | 1272 | // s(); |
duke@435 | 1273 | // f(); |
duke@435 | 1274 | // s(); |
duke@435 | 1275 | // s(); |
duke@435 | 1276 | // |
duke@435 | 1277 | // The code will work properly for this case, leaving in the unlock |
duke@435 | 1278 | // before the call to f and the relock after it. |
duke@435 | 1279 | // |
duke@435 | 1280 | // A potentially interesting case which isn't handled here is when the |
duke@435 | 1281 | // locking is partially redundant. |
duke@435 | 1282 | // |
duke@435 | 1283 | // s(); |
duke@435 | 1284 | // if (p) |
duke@435 | 1285 | // s(); |
duke@435 | 1286 | // |
duke@435 | 1287 | // This could be eliminated putting unlocking on the else case and |
duke@435 | 1288 | // eliminating the first unlock and the lock in the then side. |
duke@435 | 1289 | // Alternatively the unlock could be moved out of the then side so it |
duke@435 | 1290 | // was after the merge and the first unlock and second lock |
duke@435 | 1291 | // eliminated. This might require less manipulation of the memory |
duke@435 | 1292 | // state to get correct. |
duke@435 | 1293 | // |
duke@435 | 1294 | // Additionally we might allow work between a unlock and lock before |
duke@435 | 1295 | // giving up eliminating the locks. The current code disallows any |
duke@435 | 1296 | // conditional control flow between these operations. A formulation |
duke@435 | 1297 | // similar to partial redundancy elimination computing the |
duke@435 | 1298 | // availability of unlocking and the anticipatability of locking at a |
duke@435 | 1299 | // program point would allow detection of fully redundant locking with |
duke@435 | 1300 | // some amount of work in between. I'm not sure how often I really |
duke@435 | 1301 | // think that would occur though. Most of the cases I've seen |
duke@435 | 1302 | // indicate it's likely non-trivial work would occur in between. |
duke@435 | 1303 | // There may be other more complicated constructs where we could |
duke@435 | 1304 | // eliminate locking but I haven't seen any others appear as hot or |
duke@435 | 1305 | // interesting. |
duke@435 | 1306 | // |
duke@435 | 1307 | // Locking and unlocking have a canonical form in ideal that looks |
duke@435 | 1308 | // roughly like this: |
duke@435 | 1309 | // |
duke@435 | 1310 | // <obj> |
duke@435 | 1311 | // | \\------+ |
duke@435 | 1312 | // | \ \ |
duke@435 | 1313 | // | BoxLock \ |
duke@435 | 1314 | // | | | \ |
duke@435 | 1315 | // | | \ \ |
duke@435 | 1316 | // | | FastLock |
duke@435 | 1317 | // | | / |
duke@435 | 1318 | // | | / |
duke@435 | 1319 | // | | | |
duke@435 | 1320 | // |
duke@435 | 1321 | // Lock |
duke@435 | 1322 | // | |
duke@435 | 1323 | // Proj #0 |
duke@435 | 1324 | // | |
duke@435 | 1325 | // MembarAcquire |
duke@435 | 1326 | // | |
duke@435 | 1327 | // Proj #0 |
duke@435 | 1328 | // |
duke@435 | 1329 | // MembarRelease |
duke@435 | 1330 | // | |
duke@435 | 1331 | // Proj #0 |
duke@435 | 1332 | // | |
duke@435 | 1333 | // Unlock |
duke@435 | 1334 | // | |
duke@435 | 1335 | // Proj #0 |
duke@435 | 1336 | // |
duke@435 | 1337 | // |
duke@435 | 1338 | // This code proceeds by processing Lock nodes during PhaseIterGVN |
duke@435 | 1339 | // and searching back through its control for the proper code |
duke@435 | 1340 | // patterns. Once it finds a set of lock and unlock operations to |
duke@435 | 1341 | // eliminate they are marked as eliminatable which causes the |
duke@435 | 1342 | // expansion of the Lock and Unlock macro nodes to make the operation a NOP |
duke@435 | 1343 | // |
duke@435 | 1344 | //============================================================================= |
duke@435 | 1345 | |
duke@435 | 1346 | // |
duke@435 | 1347 | // Utility function to skip over uninteresting control nodes. Nodes skipped are: |
duke@435 | 1348 | // - copy regions. (These may not have been optimized away yet.) |
duke@435 | 1349 | // - eliminated locking nodes |
duke@435 | 1350 | // |
duke@435 | 1351 | static Node *next_control(Node *ctrl) { |
duke@435 | 1352 | if (ctrl == NULL) |
duke@435 | 1353 | return NULL; |
duke@435 | 1354 | while (1) { |
duke@435 | 1355 | if (ctrl->is_Region()) { |
duke@435 | 1356 | RegionNode *r = ctrl->as_Region(); |
duke@435 | 1357 | Node *n = r->is_copy(); |
duke@435 | 1358 | if (n == NULL) |
duke@435 | 1359 | break; // hit a region, return it |
duke@435 | 1360 | else |
duke@435 | 1361 | ctrl = n; |
duke@435 | 1362 | } else if (ctrl->is_Proj()) { |
duke@435 | 1363 | Node *in0 = ctrl->in(0); |
duke@435 | 1364 | if (in0->is_AbstractLock() && in0->as_AbstractLock()->is_eliminated()) { |
duke@435 | 1365 | ctrl = in0->in(0); |
duke@435 | 1366 | } else { |
duke@435 | 1367 | break; |
duke@435 | 1368 | } |
duke@435 | 1369 | } else { |
duke@435 | 1370 | break; // found an interesting control |
duke@435 | 1371 | } |
duke@435 | 1372 | } |
duke@435 | 1373 | return ctrl; |
duke@435 | 1374 | } |
duke@435 | 1375 | // |
duke@435 | 1376 | // Given a control, see if it's the control projection of an Unlock which |
duke@435 | 1377 | // operating on the same object as lock. |
duke@435 | 1378 | // |
duke@435 | 1379 | bool AbstractLockNode::find_matching_unlock(const Node* ctrl, LockNode* lock, |
duke@435 | 1380 | GrowableArray<AbstractLockNode*> &lock_ops) { |
duke@435 | 1381 | ProjNode *ctrl_proj = (ctrl->is_Proj()) ? ctrl->as_Proj() : NULL; |
duke@435 | 1382 | if (ctrl_proj != NULL && ctrl_proj->_con == TypeFunc::Control) { |
duke@435 | 1383 | Node *n = ctrl_proj->in(0); |
duke@435 | 1384 | if (n != NULL && n->is_Unlock()) { |
duke@435 | 1385 | UnlockNode *unlock = n->as_Unlock(); |
duke@435 | 1386 | if ((lock->obj_node() == unlock->obj_node()) && |
duke@435 | 1387 | (lock->box_node() == unlock->box_node()) && !unlock->is_eliminated()) { |
duke@435 | 1388 | lock_ops.append(unlock); |
duke@435 | 1389 | return true; |
duke@435 | 1390 | } |
duke@435 | 1391 | } |
duke@435 | 1392 | } |
duke@435 | 1393 | return false; |
duke@435 | 1394 | } |
duke@435 | 1395 | |
duke@435 | 1396 | // |
duke@435 | 1397 | // Find the lock matching an unlock. Returns null if a safepoint |
duke@435 | 1398 | // or complicated control is encountered first. |
duke@435 | 1399 | LockNode *AbstractLockNode::find_matching_lock(UnlockNode* unlock) { |
duke@435 | 1400 | LockNode *lock_result = NULL; |
duke@435 | 1401 | // find the matching lock, or an intervening safepoint |
duke@435 | 1402 | Node *ctrl = next_control(unlock->in(0)); |
duke@435 | 1403 | while (1) { |
duke@435 | 1404 | assert(ctrl != NULL, "invalid control graph"); |
duke@435 | 1405 | assert(!ctrl->is_Start(), "missing lock for unlock"); |
duke@435 | 1406 | if (ctrl->is_top()) break; // dead control path |
duke@435 | 1407 | if (ctrl->is_Proj()) ctrl = ctrl->in(0); |
duke@435 | 1408 | if (ctrl->is_SafePoint()) { |
duke@435 | 1409 | break; // found a safepoint (may be the lock we are searching for) |
duke@435 | 1410 | } else if (ctrl->is_Region()) { |
duke@435 | 1411 | // Check for a simple diamond pattern. Punt on anything more complicated |
duke@435 | 1412 | if (ctrl->req() == 3 && ctrl->in(1) != NULL && ctrl->in(2) != NULL) { |
duke@435 | 1413 | Node *in1 = next_control(ctrl->in(1)); |
duke@435 | 1414 | Node *in2 = next_control(ctrl->in(2)); |
duke@435 | 1415 | if (((in1->is_IfTrue() && in2->is_IfFalse()) || |
duke@435 | 1416 | (in2->is_IfTrue() && in1->is_IfFalse())) && (in1->in(0) == in2->in(0))) { |
duke@435 | 1417 | ctrl = next_control(in1->in(0)->in(0)); |
duke@435 | 1418 | } else { |
duke@435 | 1419 | break; |
duke@435 | 1420 | } |
duke@435 | 1421 | } else { |
duke@435 | 1422 | break; |
duke@435 | 1423 | } |
duke@435 | 1424 | } else { |
duke@435 | 1425 | ctrl = next_control(ctrl->in(0)); // keep searching |
duke@435 | 1426 | } |
duke@435 | 1427 | } |
duke@435 | 1428 | if (ctrl->is_Lock()) { |
duke@435 | 1429 | LockNode *lock = ctrl->as_Lock(); |
duke@435 | 1430 | if ((lock->obj_node() == unlock->obj_node()) && |
duke@435 | 1431 | (lock->box_node() == unlock->box_node())) { |
duke@435 | 1432 | lock_result = lock; |
duke@435 | 1433 | } |
duke@435 | 1434 | } |
duke@435 | 1435 | return lock_result; |
duke@435 | 1436 | } |
duke@435 | 1437 | |
duke@435 | 1438 | // This code corresponds to case 3 above. |
duke@435 | 1439 | |
duke@435 | 1440 | bool AbstractLockNode::find_lock_and_unlock_through_if(Node* node, LockNode* lock, |
duke@435 | 1441 | GrowableArray<AbstractLockNode*> &lock_ops) { |
duke@435 | 1442 | Node* if_node = node->in(0); |
duke@435 | 1443 | bool if_true = node->is_IfTrue(); |
duke@435 | 1444 | |
duke@435 | 1445 | if (if_node->is_If() && if_node->outcnt() == 2 && (if_true || node->is_IfFalse())) { |
duke@435 | 1446 | Node *lock_ctrl = next_control(if_node->in(0)); |
duke@435 | 1447 | if (find_matching_unlock(lock_ctrl, lock, lock_ops)) { |
duke@435 | 1448 | Node* lock1_node = NULL; |
duke@435 | 1449 | ProjNode* proj = if_node->as_If()->proj_out(!if_true); |
duke@435 | 1450 | if (if_true) { |
duke@435 | 1451 | if (proj->is_IfFalse() && proj->outcnt() == 1) { |
duke@435 | 1452 | lock1_node = proj->unique_out(); |
duke@435 | 1453 | } |
duke@435 | 1454 | } else { |
duke@435 | 1455 | if (proj->is_IfTrue() && proj->outcnt() == 1) { |
duke@435 | 1456 | lock1_node = proj->unique_out(); |
duke@435 | 1457 | } |
duke@435 | 1458 | } |
duke@435 | 1459 | if (lock1_node != NULL && lock1_node->is_Lock()) { |
duke@435 | 1460 | LockNode *lock1 = lock1_node->as_Lock(); |
duke@435 | 1461 | if ((lock->obj_node() == lock1->obj_node()) && |
duke@435 | 1462 | (lock->box_node() == lock1->box_node()) && !lock1->is_eliminated()) { |
duke@435 | 1463 | lock_ops.append(lock1); |
duke@435 | 1464 | return true; |
duke@435 | 1465 | } |
duke@435 | 1466 | } |
duke@435 | 1467 | } |
duke@435 | 1468 | } |
duke@435 | 1469 | |
duke@435 | 1470 | lock_ops.trunc_to(0); |
duke@435 | 1471 | return false; |
duke@435 | 1472 | } |
duke@435 | 1473 | |
duke@435 | 1474 | bool AbstractLockNode::find_unlocks_for_region(const RegionNode* region, LockNode* lock, |
duke@435 | 1475 | GrowableArray<AbstractLockNode*> &lock_ops) { |
duke@435 | 1476 | // check each control merging at this point for a matching unlock. |
duke@435 | 1477 | // in(0) should be self edge so skip it. |
duke@435 | 1478 | for (int i = 1; i < (int)region->req(); i++) { |
duke@435 | 1479 | Node *in_node = next_control(region->in(i)); |
duke@435 | 1480 | if (in_node != NULL) { |
duke@435 | 1481 | if (find_matching_unlock(in_node, lock, lock_ops)) { |
duke@435 | 1482 | // found a match so keep on checking. |
duke@435 | 1483 | continue; |
duke@435 | 1484 | } else if (find_lock_and_unlock_through_if(in_node, lock, lock_ops)) { |
duke@435 | 1485 | continue; |
duke@435 | 1486 | } |
duke@435 | 1487 | |
duke@435 | 1488 | // If we fall through to here then it was some kind of node we |
duke@435 | 1489 | // don't understand or there wasn't a matching unlock, so give |
duke@435 | 1490 | // up trying to merge locks. |
duke@435 | 1491 | lock_ops.trunc_to(0); |
duke@435 | 1492 | return false; |
duke@435 | 1493 | } |
duke@435 | 1494 | } |
duke@435 | 1495 | return true; |
duke@435 | 1496 | |
duke@435 | 1497 | } |
duke@435 | 1498 | |
duke@435 | 1499 | #ifndef PRODUCT |
duke@435 | 1500 | // |
duke@435 | 1501 | // Create a counter which counts the number of times this lock is acquired |
duke@435 | 1502 | // |
duke@435 | 1503 | void AbstractLockNode::create_lock_counter(JVMState* state) { |
duke@435 | 1504 | _counter = OptoRuntime::new_named_counter(state, NamedCounter::LockCounter); |
duke@435 | 1505 | } |
duke@435 | 1506 | #endif |
duke@435 | 1507 | |
duke@435 | 1508 | void AbstractLockNode::set_eliminated() { |
duke@435 | 1509 | _eliminate = true; |
duke@435 | 1510 | #ifndef PRODUCT |
duke@435 | 1511 | if (_counter) { |
duke@435 | 1512 | // Update the counter to indicate that this lock was eliminated. |
duke@435 | 1513 | // The counter update code will stay around even though the |
duke@435 | 1514 | // optimizer will eliminate the lock operation itself. |
duke@435 | 1515 | _counter->set_tag(NamedCounter::EliminatedLockCounter); |
duke@435 | 1516 | } |
duke@435 | 1517 | #endif |
duke@435 | 1518 | } |
duke@435 | 1519 | |
duke@435 | 1520 | //============================================================================= |
duke@435 | 1521 | Node *LockNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
duke@435 | 1522 | |
kvn@501 | 1523 | // perform any generic optimizations first (returns 'this' or NULL) |
duke@435 | 1524 | Node *result = SafePointNode::Ideal(phase, can_reshape); |
duke@435 | 1525 | |
duke@435 | 1526 | // Now see if we can optimize away this lock. We don't actually |
duke@435 | 1527 | // remove the locking here, we simply set the _eliminate flag which |
duke@435 | 1528 | // prevents macro expansion from expanding the lock. Since we don't |
duke@435 | 1529 | // modify the graph, the value returned from this function is the |
duke@435 | 1530 | // one computed above. |
kvn@501 | 1531 | if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) { |
kvn@501 | 1532 | // |
kvn@501 | 1533 | // If we are locking an unescaped object, the lock/unlock is unnecessary |
kvn@501 | 1534 | // |
kvn@895 | 1535 | ConnectionGraph *cgr = phase->C->congraph(); |
kvn@501 | 1536 | PointsToNode::EscapeState es = PointsToNode::GlobalEscape; |
kvn@501 | 1537 | if (cgr != NULL) |
kvn@1989 | 1538 | es = cgr->escape_state(obj_node()); |
kvn@501 | 1539 | if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { |
kvn@501 | 1540 | // Mark it eliminated to update any counters |
kvn@501 | 1541 | this->set_eliminated(); |
kvn@501 | 1542 | return result; |
kvn@501 | 1543 | } |
kvn@501 | 1544 | |
duke@435 | 1545 | // |
duke@435 | 1546 | // Try lock coarsening |
duke@435 | 1547 | // |
duke@435 | 1548 | PhaseIterGVN* iter = phase->is_IterGVN(); |
duke@435 | 1549 | if (iter != NULL) { |
duke@435 | 1550 | |
duke@435 | 1551 | GrowableArray<AbstractLockNode*> lock_ops; |
duke@435 | 1552 | |
duke@435 | 1553 | Node *ctrl = next_control(in(0)); |
duke@435 | 1554 | |
duke@435 | 1555 | // now search back for a matching Unlock |
duke@435 | 1556 | if (find_matching_unlock(ctrl, this, lock_ops)) { |
duke@435 | 1557 | // found an unlock directly preceding this lock. This is the |
duke@435 | 1558 | // case of single unlock directly control dependent on a |
duke@435 | 1559 | // single lock which is the trivial version of case 1 or 2. |
duke@435 | 1560 | } else if (ctrl->is_Region() ) { |
duke@435 | 1561 | if (find_unlocks_for_region(ctrl->as_Region(), this, lock_ops)) { |
duke@435 | 1562 | // found lock preceded by multiple unlocks along all paths |
duke@435 | 1563 | // joining at this point which is case 3 in description above. |
duke@435 | 1564 | } |
duke@435 | 1565 | } else { |
duke@435 | 1566 | // see if this lock comes from either half of an if and the |
duke@435 | 1567 | // predecessors merges unlocks and the other half of the if |
duke@435 | 1568 | // performs a lock. |
duke@435 | 1569 | if (find_lock_and_unlock_through_if(ctrl, this, lock_ops)) { |
duke@435 | 1570 | // found unlock splitting to an if with locks on both branches. |
duke@435 | 1571 | } |
duke@435 | 1572 | } |
duke@435 | 1573 | |
duke@435 | 1574 | if (lock_ops.length() > 0) { |
duke@435 | 1575 | // add ourselves to the list of locks to be eliminated. |
duke@435 | 1576 | lock_ops.append(this); |
duke@435 | 1577 | |
duke@435 | 1578 | #ifndef PRODUCT |
duke@435 | 1579 | if (PrintEliminateLocks) { |
duke@435 | 1580 | int locks = 0; |
duke@435 | 1581 | int unlocks = 0; |
duke@435 | 1582 | for (int i = 0; i < lock_ops.length(); i++) { |
duke@435 | 1583 | AbstractLockNode* lock = lock_ops.at(i); |
kvn@501 | 1584 | if (lock->Opcode() == Op_Lock) |
kvn@501 | 1585 | locks++; |
kvn@501 | 1586 | else |
kvn@501 | 1587 | unlocks++; |
duke@435 | 1588 | if (Verbose) { |
duke@435 | 1589 | lock->dump(1); |
duke@435 | 1590 | } |
duke@435 | 1591 | } |
duke@435 | 1592 | tty->print_cr("***Eliminated %d unlocks and %d locks", unlocks, locks); |
duke@435 | 1593 | } |
duke@435 | 1594 | #endif |
duke@435 | 1595 | |
duke@435 | 1596 | // for each of the identified locks, mark them |
duke@435 | 1597 | // as eliminatable |
duke@435 | 1598 | for (int i = 0; i < lock_ops.length(); i++) { |
duke@435 | 1599 | AbstractLockNode* lock = lock_ops.at(i); |
duke@435 | 1600 | |
duke@435 | 1601 | // Mark it eliminated to update any counters |
duke@435 | 1602 | lock->set_eliminated(); |
kvn@895 | 1603 | lock->set_coarsened(); |
duke@435 | 1604 | } |
duke@435 | 1605 | } else if (result != NULL && ctrl->is_Region() && |
duke@435 | 1606 | iter->_worklist.member(ctrl)) { |
duke@435 | 1607 | // We weren't able to find any opportunities but the region this |
duke@435 | 1608 | // lock is control dependent on hasn't been processed yet so put |
duke@435 | 1609 | // this lock back on the worklist so we can check again once any |
duke@435 | 1610 | // region simplification has occurred. |
duke@435 | 1611 | iter->_worklist.push(this); |
duke@435 | 1612 | } |
duke@435 | 1613 | } |
duke@435 | 1614 | } |
duke@435 | 1615 | |
duke@435 | 1616 | return result; |
duke@435 | 1617 | } |
duke@435 | 1618 | |
duke@435 | 1619 | //============================================================================= |
duke@435 | 1620 | uint UnlockNode::size_of() const { return sizeof(*this); } |
duke@435 | 1621 | |
duke@435 | 1622 | //============================================================================= |
duke@435 | 1623 | Node *UnlockNode::Ideal(PhaseGVN *phase, bool can_reshape) { |
duke@435 | 1624 | |
kvn@501 | 1625 | // perform any generic optimizations first (returns 'this' or NULL) |
duke@435 | 1626 | Node * result = SafePointNode::Ideal(phase, can_reshape); |
duke@435 | 1627 | |
duke@435 | 1628 | // Now see if we can optimize away this unlock. We don't actually |
duke@435 | 1629 | // remove the unlocking here, we simply set the _eliminate flag which |
duke@435 | 1630 | // prevents macro expansion from expanding the unlock. Since we don't |
duke@435 | 1631 | // modify the graph, the value returned from this function is the |
duke@435 | 1632 | // one computed above. |
kvn@501 | 1633 | // Escape state is defined after Parse phase. |
kvn@501 | 1634 | if (result == NULL && can_reshape && EliminateLocks && !is_eliminated()) { |
duke@435 | 1635 | // |
kvn@501 | 1636 | // If we are unlocking an unescaped object, the lock/unlock is unnecessary. |
duke@435 | 1637 | // |
kvn@895 | 1638 | ConnectionGraph *cgr = phase->C->congraph(); |
kvn@501 | 1639 | PointsToNode::EscapeState es = PointsToNode::GlobalEscape; |
kvn@501 | 1640 | if (cgr != NULL) |
kvn@1989 | 1641 | es = cgr->escape_state(obj_node()); |
kvn@501 | 1642 | if (es != PointsToNode::UnknownEscape && es != PointsToNode::GlobalEscape) { |
kvn@501 | 1643 | // Mark it eliminated to update any counters |
kvn@501 | 1644 | this->set_eliminated(); |
duke@435 | 1645 | } |
duke@435 | 1646 | } |
duke@435 | 1647 | return result; |
duke@435 | 1648 | } |