Thu, 26 Jun 2008 11:43:34 -0700
6718811: Mismerge of 6680469:macro.cpp
Summary: Fixed the mismerge by deleting the lines that were inadvertently left in place.
Reviewed-by: iveresov
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 2005-2007 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | #include "incls/_precompiled.incl" |
duke@435 | 26 | #include "incls/_macro.cpp.incl" |
duke@435 | 27 | |
duke@435 | 28 | |
duke@435 | 29 | // |
duke@435 | 30 | // Replace any references to "oldref" in inputs to "use" with "newref". |
duke@435 | 31 | // Returns the number of replacements made. |
duke@435 | 32 | // |
duke@435 | 33 | int PhaseMacroExpand::replace_input(Node *use, Node *oldref, Node *newref) { |
duke@435 | 34 | int nreplacements = 0; |
duke@435 | 35 | uint req = use->req(); |
duke@435 | 36 | for (uint j = 0; j < use->len(); j++) { |
duke@435 | 37 | Node *uin = use->in(j); |
duke@435 | 38 | if (uin == oldref) { |
duke@435 | 39 | if (j < req) |
duke@435 | 40 | use->set_req(j, newref); |
duke@435 | 41 | else |
duke@435 | 42 | use->set_prec(j, newref); |
duke@435 | 43 | nreplacements++; |
duke@435 | 44 | } else if (j >= req && uin == NULL) { |
duke@435 | 45 | break; |
duke@435 | 46 | } |
duke@435 | 47 | } |
duke@435 | 48 | return nreplacements; |
duke@435 | 49 | } |
duke@435 | 50 | |
duke@435 | 51 | void PhaseMacroExpand::copy_call_debug_info(CallNode *oldcall, CallNode * newcall) { |
duke@435 | 52 | // Copy debug information and adjust JVMState information |
duke@435 | 53 | uint old_dbg_start = oldcall->tf()->domain()->cnt(); |
duke@435 | 54 | uint new_dbg_start = newcall->tf()->domain()->cnt(); |
duke@435 | 55 | int jvms_adj = new_dbg_start - old_dbg_start; |
duke@435 | 56 | assert (new_dbg_start == newcall->req(), "argument count mismatch"); |
kvn@498 | 57 | |
kvn@498 | 58 | Dict* sosn_map = new Dict(cmpkey,hashkey); |
duke@435 | 59 | for (uint i = old_dbg_start; i < oldcall->req(); i++) { |
kvn@498 | 60 | Node* old_in = oldcall->in(i); |
kvn@498 | 61 | // Clone old SafePointScalarObjectNodes, adjusting their field contents. |
kvn@498 | 62 | if (old_in->is_SafePointScalarObject()) { |
kvn@498 | 63 | SafePointScalarObjectNode* old_sosn = old_in->as_SafePointScalarObject(); |
kvn@498 | 64 | uint old_unique = C->unique(); |
kvn@498 | 65 | Node* new_in = old_sosn->clone(jvms_adj, sosn_map); |
kvn@498 | 66 | if (old_unique != C->unique()) { |
kvn@498 | 67 | new_in = transform_later(new_in); // Register new node. |
kvn@498 | 68 | } |
kvn@498 | 69 | old_in = new_in; |
kvn@498 | 70 | } |
kvn@498 | 71 | newcall->add_req(old_in); |
duke@435 | 72 | } |
kvn@498 | 73 | |
duke@435 | 74 | newcall->set_jvms(oldcall->jvms()); |
duke@435 | 75 | for (JVMState *jvms = newcall->jvms(); jvms != NULL; jvms = jvms->caller()) { |
duke@435 | 76 | jvms->set_map(newcall); |
duke@435 | 77 | jvms->set_locoff(jvms->locoff()+jvms_adj); |
duke@435 | 78 | jvms->set_stkoff(jvms->stkoff()+jvms_adj); |
duke@435 | 79 | jvms->set_monoff(jvms->monoff()+jvms_adj); |
kvn@498 | 80 | jvms->set_scloff(jvms->scloff()+jvms_adj); |
duke@435 | 81 | jvms->set_endoff(jvms->endoff()+jvms_adj); |
duke@435 | 82 | } |
duke@435 | 83 | } |
duke@435 | 84 | |
duke@435 | 85 | Node* PhaseMacroExpand::opt_iff(Node* region, Node* iff) { |
duke@435 | 86 | IfNode *opt_iff = transform_later(iff)->as_If(); |
duke@435 | 87 | |
duke@435 | 88 | // Fast path taken; set region slot 2 |
duke@435 | 89 | Node *fast_taken = transform_later( new (C, 1) IfFalseNode(opt_iff) ); |
duke@435 | 90 | region->init_req(2,fast_taken); // Capture fast-control |
duke@435 | 91 | |
duke@435 | 92 | // Fast path not-taken, i.e. slow path |
duke@435 | 93 | Node *slow_taken = transform_later( new (C, 1) IfTrueNode(opt_iff) ); |
duke@435 | 94 | return slow_taken; |
duke@435 | 95 | } |
duke@435 | 96 | |
duke@435 | 97 | //--------------------copy_predefined_input_for_runtime_call-------------------- |
duke@435 | 98 | void PhaseMacroExpand::copy_predefined_input_for_runtime_call(Node * ctrl, CallNode* oldcall, CallNode* call) { |
duke@435 | 99 | // Set fixed predefined input arguments |
duke@435 | 100 | call->init_req( TypeFunc::Control, ctrl ); |
duke@435 | 101 | call->init_req( TypeFunc::I_O , oldcall->in( TypeFunc::I_O) ); |
duke@435 | 102 | call->init_req( TypeFunc::Memory , oldcall->in( TypeFunc::Memory ) ); // ????? |
duke@435 | 103 | call->init_req( TypeFunc::ReturnAdr, oldcall->in( TypeFunc::ReturnAdr ) ); |
duke@435 | 104 | call->init_req( TypeFunc::FramePtr, oldcall->in( TypeFunc::FramePtr ) ); |
duke@435 | 105 | } |
duke@435 | 106 | |
duke@435 | 107 | //------------------------------make_slow_call--------------------------------- |
duke@435 | 108 | CallNode* PhaseMacroExpand::make_slow_call(CallNode *oldcall, const TypeFunc* slow_call_type, address slow_call, const char* leaf_name, Node* slow_path, Node* parm0, Node* parm1) { |
duke@435 | 109 | |
duke@435 | 110 | // Slow-path call |
duke@435 | 111 | int size = slow_call_type->domain()->cnt(); |
duke@435 | 112 | CallNode *call = leaf_name |
duke@435 | 113 | ? (CallNode*)new (C, size) CallLeafNode ( slow_call_type, slow_call, leaf_name, TypeRawPtr::BOTTOM ) |
duke@435 | 114 | : (CallNode*)new (C, size) CallStaticJavaNode( slow_call_type, slow_call, OptoRuntime::stub_name(slow_call), oldcall->jvms()->bci(), TypeRawPtr::BOTTOM ); |
duke@435 | 115 | |
duke@435 | 116 | // Slow path call has no side-effects, uses few values |
duke@435 | 117 | copy_predefined_input_for_runtime_call(slow_path, oldcall, call ); |
duke@435 | 118 | if (parm0 != NULL) call->init_req(TypeFunc::Parms+0, parm0); |
duke@435 | 119 | if (parm1 != NULL) call->init_req(TypeFunc::Parms+1, parm1); |
duke@435 | 120 | copy_call_debug_info(oldcall, call); |
duke@435 | 121 | call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. |
duke@435 | 122 | _igvn.hash_delete(oldcall); |
duke@435 | 123 | _igvn.subsume_node(oldcall, call); |
duke@435 | 124 | transform_later(call); |
duke@435 | 125 | |
duke@435 | 126 | return call; |
duke@435 | 127 | } |
duke@435 | 128 | |
duke@435 | 129 | void PhaseMacroExpand::extract_call_projections(CallNode *call) { |
duke@435 | 130 | _fallthroughproj = NULL; |
duke@435 | 131 | _fallthroughcatchproj = NULL; |
duke@435 | 132 | _ioproj_fallthrough = NULL; |
duke@435 | 133 | _ioproj_catchall = NULL; |
duke@435 | 134 | _catchallcatchproj = NULL; |
duke@435 | 135 | _memproj_fallthrough = NULL; |
duke@435 | 136 | _memproj_catchall = NULL; |
duke@435 | 137 | _resproj = NULL; |
duke@435 | 138 | for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { |
duke@435 | 139 | ProjNode *pn = call->fast_out(i)->as_Proj(); |
duke@435 | 140 | switch (pn->_con) { |
duke@435 | 141 | case TypeFunc::Control: |
duke@435 | 142 | { |
duke@435 | 143 | // For Control (fallthrough) and I_O (catch_all_index) we have CatchProj -> Catch -> Proj |
duke@435 | 144 | _fallthroughproj = pn; |
duke@435 | 145 | DUIterator_Fast jmax, j = pn->fast_outs(jmax); |
duke@435 | 146 | const Node *cn = pn->fast_out(j); |
duke@435 | 147 | if (cn->is_Catch()) { |
duke@435 | 148 | ProjNode *cpn = NULL; |
duke@435 | 149 | for (DUIterator_Fast kmax, k = cn->fast_outs(kmax); k < kmax; k++) { |
duke@435 | 150 | cpn = cn->fast_out(k)->as_Proj(); |
duke@435 | 151 | assert(cpn->is_CatchProj(), "must be a CatchProjNode"); |
duke@435 | 152 | if (cpn->_con == CatchProjNode::fall_through_index) |
duke@435 | 153 | _fallthroughcatchproj = cpn; |
duke@435 | 154 | else { |
duke@435 | 155 | assert(cpn->_con == CatchProjNode::catch_all_index, "must be correct index."); |
duke@435 | 156 | _catchallcatchproj = cpn; |
duke@435 | 157 | } |
duke@435 | 158 | } |
duke@435 | 159 | } |
duke@435 | 160 | break; |
duke@435 | 161 | } |
duke@435 | 162 | case TypeFunc::I_O: |
duke@435 | 163 | if (pn->_is_io_use) |
duke@435 | 164 | _ioproj_catchall = pn; |
duke@435 | 165 | else |
duke@435 | 166 | _ioproj_fallthrough = pn; |
duke@435 | 167 | break; |
duke@435 | 168 | case TypeFunc::Memory: |
duke@435 | 169 | if (pn->_is_io_use) |
duke@435 | 170 | _memproj_catchall = pn; |
duke@435 | 171 | else |
duke@435 | 172 | _memproj_fallthrough = pn; |
duke@435 | 173 | break; |
duke@435 | 174 | case TypeFunc::Parms: |
duke@435 | 175 | _resproj = pn; |
duke@435 | 176 | break; |
duke@435 | 177 | default: |
duke@435 | 178 | assert(false, "unexpected projection from allocation node."); |
duke@435 | 179 | } |
duke@435 | 180 | } |
duke@435 | 181 | |
duke@435 | 182 | } |
duke@435 | 183 | |
kvn@508 | 184 | // Eliminate a card mark sequence. p2x is a ConvP2XNode |
kvn@508 | 185 | void PhaseMacroExpand::eliminate_card_mark(Node *p2x) { |
kvn@508 | 186 | assert(p2x->Opcode() == Op_CastP2X, "ConvP2XNode required"); |
kvn@508 | 187 | Node *shift = p2x->unique_out(); |
kvn@508 | 188 | Node *addp = shift->unique_out(); |
kvn@508 | 189 | for (DUIterator_Last jmin, j = addp->last_outs(jmin); j >= jmin; --j) { |
kvn@508 | 190 | Node *st = addp->last_out(j); |
kvn@508 | 191 | assert(st->is_Store(), "store required"); |
kvn@508 | 192 | _igvn.replace_node(st, st->in(MemNode::Memory)); |
kvn@508 | 193 | } |
kvn@508 | 194 | } |
kvn@508 | 195 | |
kvn@508 | 196 | // Search for a memory operation for the specified memory slice. |
kvn@508 | 197 | static Node *scan_mem_chain(Node *mem, int alias_idx, int offset, Node *start_mem, Node *alloc) { |
kvn@508 | 198 | Node *orig_mem = mem; |
kvn@508 | 199 | Node *alloc_mem = alloc->in(TypeFunc::Memory); |
kvn@508 | 200 | while (true) { |
kvn@508 | 201 | if (mem == alloc_mem || mem == start_mem ) { |
kvn@508 | 202 | return mem; // hit one of our sentinals |
kvn@508 | 203 | } else if (mem->is_MergeMem()) { |
kvn@508 | 204 | mem = mem->as_MergeMem()->memory_at(alias_idx); |
kvn@508 | 205 | } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) { |
kvn@508 | 206 | Node *in = mem->in(0); |
kvn@508 | 207 | // we can safely skip over safepoints, calls, locks and membars because we |
kvn@508 | 208 | // already know that the object is safe to eliminate. |
kvn@508 | 209 | if (in->is_Initialize() && in->as_Initialize()->allocation() == alloc) { |
kvn@508 | 210 | return in; |
kvn@508 | 211 | } else if (in->is_Call() || in->is_MemBar()) { |
kvn@508 | 212 | mem = in->in(TypeFunc::Memory); |
kvn@508 | 213 | } else { |
kvn@508 | 214 | assert(false, "unexpected projection"); |
kvn@508 | 215 | } |
kvn@508 | 216 | } else if (mem->is_Store()) { |
kvn@508 | 217 | const TypePtr* atype = mem->as_Store()->adr_type(); |
kvn@508 | 218 | int adr_idx = Compile::current()->get_alias_index(atype); |
kvn@508 | 219 | if (adr_idx == alias_idx) { |
kvn@508 | 220 | assert(atype->isa_oopptr(), "address type must be oopptr"); |
kvn@508 | 221 | int adr_offset = atype->offset(); |
kvn@508 | 222 | uint adr_iid = atype->is_oopptr()->instance_id(); |
kvn@508 | 223 | // Array elements references have the same alias_idx |
kvn@508 | 224 | // but different offset and different instance_id. |
kvn@508 | 225 | if (adr_offset == offset && adr_iid == alloc->_idx) |
kvn@508 | 226 | return mem; |
kvn@508 | 227 | } else { |
kvn@508 | 228 | assert(adr_idx == Compile::AliasIdxRaw, "address must match or be raw"); |
kvn@508 | 229 | } |
kvn@508 | 230 | mem = mem->in(MemNode::Memory); |
kvn@508 | 231 | } else { |
kvn@508 | 232 | return mem; |
kvn@508 | 233 | } |
kvn@508 | 234 | if (mem == orig_mem) |
kvn@508 | 235 | return mem; |
kvn@508 | 236 | } |
kvn@508 | 237 | } |
kvn@508 | 238 | |
kvn@508 | 239 | // |
kvn@508 | 240 | // Given a Memory Phi, compute a value Phi containing the values from stores |
kvn@508 | 241 | // on the input paths. |
kvn@508 | 242 | // Note: this function is recursive, its depth is limied by the "level" argument |
kvn@508 | 243 | // Returns the computed Phi, or NULL if it cannot compute it. |
kvn@508 | 244 | Node *PhaseMacroExpand::value_from_mem_phi(Node *mem, BasicType ft, const Type *phi_type, const TypeOopPtr *adr_t, Node *alloc, int level) { |
kvn@508 | 245 | |
kvn@508 | 246 | if (level <= 0) { |
kvn@508 | 247 | return NULL; |
kvn@508 | 248 | } |
kvn@508 | 249 | int alias_idx = C->get_alias_index(adr_t); |
kvn@508 | 250 | int offset = adr_t->offset(); |
kvn@508 | 251 | int instance_id = adr_t->instance_id(); |
kvn@508 | 252 | |
kvn@508 | 253 | Node *start_mem = C->start()->proj_out(TypeFunc::Memory); |
kvn@508 | 254 | Node *alloc_mem = alloc->in(TypeFunc::Memory); |
kvn@508 | 255 | |
kvn@508 | 256 | uint length = mem->req(); |
kvn@508 | 257 | GrowableArray <Node *> values(length, length, NULL); |
kvn@508 | 258 | |
kvn@508 | 259 | for (uint j = 1; j < length; j++) { |
kvn@508 | 260 | Node *in = mem->in(j); |
kvn@508 | 261 | if (in == NULL || in->is_top()) { |
kvn@508 | 262 | values.at_put(j, in); |
kvn@508 | 263 | } else { |
kvn@508 | 264 | Node *val = scan_mem_chain(in, alias_idx, offset, start_mem, alloc); |
kvn@508 | 265 | if (val == start_mem || val == alloc_mem) { |
kvn@508 | 266 | // hit a sentinel, return appropriate 0 value |
kvn@508 | 267 | values.at_put(j, _igvn.zerocon(ft)); |
kvn@508 | 268 | continue; |
kvn@508 | 269 | } |
kvn@508 | 270 | if (val->is_Initialize()) { |
kvn@508 | 271 | val = val->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); |
kvn@508 | 272 | } |
kvn@508 | 273 | if (val == NULL) { |
kvn@508 | 274 | return NULL; // can't find a value on this path |
kvn@508 | 275 | } |
kvn@508 | 276 | if (val == mem) { |
kvn@508 | 277 | values.at_put(j, mem); |
kvn@508 | 278 | } else if (val->is_Store()) { |
kvn@508 | 279 | values.at_put(j, val->in(MemNode::ValueIn)); |
kvn@508 | 280 | } else if(val->is_Proj() && val->in(0) == alloc) { |
kvn@508 | 281 | values.at_put(j, _igvn.zerocon(ft)); |
kvn@508 | 282 | } else if (val->is_Phi()) { |
kvn@508 | 283 | // Check if an appropriate node already exists. |
kvn@508 | 284 | Node* region = val->in(0); |
kvn@508 | 285 | Node* old_phi = NULL; |
kvn@508 | 286 | for (DUIterator_Fast kmax, k = region->fast_outs(kmax); k < kmax; k++) { |
kvn@508 | 287 | Node* phi = region->fast_out(k); |
kvn@508 | 288 | if (phi->is_Phi() && phi != val && |
kvn@508 | 289 | phi->as_Phi()->is_same_inst_field(phi_type, instance_id, alias_idx, offset)) { |
kvn@508 | 290 | old_phi = phi; |
kvn@508 | 291 | break; |
kvn@508 | 292 | } |
kvn@508 | 293 | } |
kvn@508 | 294 | if (old_phi == NULL) { |
kvn@508 | 295 | val = value_from_mem_phi(val, ft, phi_type, adr_t, alloc, level-1); |
kvn@508 | 296 | if (val == NULL) { |
kvn@508 | 297 | return NULL; |
kvn@508 | 298 | } |
kvn@508 | 299 | values.at_put(j, val); |
kvn@508 | 300 | } else { |
kvn@508 | 301 | values.at_put(j, old_phi); |
kvn@508 | 302 | } |
kvn@508 | 303 | } else { |
kvn@508 | 304 | return NULL; // unknown node on this path |
kvn@508 | 305 | } |
kvn@508 | 306 | } |
kvn@508 | 307 | } |
kvn@508 | 308 | // create a new Phi for the value |
kvn@508 | 309 | PhiNode *phi = new (C, length) PhiNode(mem->in(0), phi_type, NULL, instance_id, alias_idx, offset); |
kvn@508 | 310 | for (uint j = 1; j < length; j++) { |
kvn@508 | 311 | if (values.at(j) == mem) { |
kvn@508 | 312 | phi->init_req(j, phi); |
kvn@508 | 313 | } else { |
kvn@508 | 314 | phi->init_req(j, values.at(j)); |
kvn@508 | 315 | } |
kvn@508 | 316 | } |
kvn@508 | 317 | transform_later(phi); |
kvn@508 | 318 | return phi; |
kvn@508 | 319 | } |
kvn@508 | 320 | |
kvn@508 | 321 | // Search the last value stored into the object's field. |
kvn@508 | 322 | Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, Node *alloc) { |
kvn@508 | 323 | assert(adr_t->is_instance_field(), "instance required"); |
kvn@508 | 324 | uint instance_id = adr_t->instance_id(); |
kvn@508 | 325 | assert(instance_id == alloc->_idx, "wrong allocation"); |
kvn@508 | 326 | |
kvn@508 | 327 | int alias_idx = C->get_alias_index(adr_t); |
kvn@508 | 328 | int offset = adr_t->offset(); |
kvn@508 | 329 | Node *start_mem = C->start()->proj_out(TypeFunc::Memory); |
kvn@508 | 330 | Node *alloc_ctrl = alloc->in(TypeFunc::Control); |
kvn@508 | 331 | Node *alloc_mem = alloc->in(TypeFunc::Memory); |
kvn@508 | 332 | VectorSet visited(Thread::current()->resource_area()); |
kvn@508 | 333 | |
kvn@508 | 334 | |
kvn@508 | 335 | bool done = sfpt_mem == alloc_mem; |
kvn@508 | 336 | Node *mem = sfpt_mem; |
kvn@508 | 337 | while (!done) { |
kvn@508 | 338 | if (visited.test_set(mem->_idx)) { |
kvn@508 | 339 | return NULL; // found a loop, give up |
kvn@508 | 340 | } |
kvn@508 | 341 | mem = scan_mem_chain(mem, alias_idx, offset, start_mem, alloc); |
kvn@508 | 342 | if (mem == start_mem || mem == alloc_mem) { |
kvn@508 | 343 | done = true; // hit a sentinel, return appropriate 0 value |
kvn@508 | 344 | } else if (mem->is_Initialize()) { |
kvn@508 | 345 | mem = mem->as_Initialize()->find_captured_store(offset, type2aelembytes(ft), &_igvn); |
kvn@508 | 346 | if (mem == NULL) { |
kvn@508 | 347 | done = true; // Something go wrong. |
kvn@508 | 348 | } else if (mem->is_Store()) { |
kvn@508 | 349 | const TypePtr* atype = mem->as_Store()->adr_type(); |
kvn@508 | 350 | assert(C->get_alias_index(atype) == Compile::AliasIdxRaw, "store is correct memory slice"); |
kvn@508 | 351 | done = true; |
kvn@508 | 352 | } |
kvn@508 | 353 | } else if (mem->is_Store()) { |
kvn@508 | 354 | const TypeOopPtr* atype = mem->as_Store()->adr_type()->isa_oopptr(); |
kvn@508 | 355 | assert(atype != NULL, "address type must be oopptr"); |
kvn@508 | 356 | assert(C->get_alias_index(atype) == alias_idx && |
kvn@508 | 357 | atype->is_instance_field() && atype->offset() == offset && |
kvn@508 | 358 | atype->instance_id() == instance_id, "store is correct memory slice"); |
kvn@508 | 359 | done = true; |
kvn@508 | 360 | } else if (mem->is_Phi()) { |
kvn@508 | 361 | // try to find a phi's unique input |
kvn@508 | 362 | Node *unique_input = NULL; |
kvn@508 | 363 | Node *top = C->top(); |
kvn@508 | 364 | for (uint i = 1; i < mem->req(); i++) { |
kvn@508 | 365 | Node *n = scan_mem_chain(mem->in(i), alias_idx, offset, start_mem, alloc); |
kvn@508 | 366 | if (n == NULL || n == top || n == mem) { |
kvn@508 | 367 | continue; |
kvn@508 | 368 | } else if (unique_input == NULL) { |
kvn@508 | 369 | unique_input = n; |
kvn@508 | 370 | } else if (unique_input != n) { |
kvn@508 | 371 | unique_input = top; |
kvn@508 | 372 | break; |
kvn@508 | 373 | } |
kvn@508 | 374 | } |
kvn@508 | 375 | if (unique_input != NULL && unique_input != top) { |
kvn@508 | 376 | mem = unique_input; |
kvn@508 | 377 | } else { |
kvn@508 | 378 | done = true; |
kvn@508 | 379 | } |
kvn@508 | 380 | } else { |
kvn@508 | 381 | assert(false, "unexpected node"); |
kvn@508 | 382 | } |
kvn@508 | 383 | } |
kvn@508 | 384 | if (mem != NULL) { |
kvn@508 | 385 | if (mem == start_mem || mem == alloc_mem) { |
kvn@508 | 386 | // hit a sentinel, return appropriate 0 value |
kvn@508 | 387 | return _igvn.zerocon(ft); |
kvn@508 | 388 | } else if (mem->is_Store()) { |
kvn@508 | 389 | return mem->in(MemNode::ValueIn); |
kvn@508 | 390 | } else if (mem->is_Phi()) { |
kvn@508 | 391 | // attempt to produce a Phi reflecting the values on the input paths of the Phi |
kvn@508 | 392 | Node * phi = value_from_mem_phi(mem, ft, ftype, adr_t, alloc, 8); |
kvn@508 | 393 | if (phi != NULL) { |
kvn@508 | 394 | return phi; |
kvn@508 | 395 | } |
kvn@508 | 396 | } |
kvn@508 | 397 | } |
kvn@508 | 398 | // Something go wrong. |
kvn@508 | 399 | return NULL; |
kvn@508 | 400 | } |
kvn@508 | 401 | |
kvn@508 | 402 | // Check the possibility of scalar replacement. |
kvn@508 | 403 | bool PhaseMacroExpand::can_eliminate_allocation(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) { |
kvn@508 | 404 | // Scan the uses of the allocation to check for anything that would |
kvn@508 | 405 | // prevent us from eliminating it. |
kvn@508 | 406 | NOT_PRODUCT( const char* fail_eliminate = NULL; ) |
kvn@508 | 407 | DEBUG_ONLY( Node* disq_node = NULL; ) |
kvn@508 | 408 | bool can_eliminate = true; |
kvn@508 | 409 | |
kvn@508 | 410 | Node* res = alloc->result_cast(); |
kvn@508 | 411 | const TypeOopPtr* res_type = NULL; |
kvn@508 | 412 | if (res == NULL) { |
kvn@508 | 413 | // All users were eliminated. |
kvn@508 | 414 | } else if (!res->is_CheckCastPP()) { |
kvn@508 | 415 | alloc->_is_scalar_replaceable = false; // don't try again |
kvn@508 | 416 | NOT_PRODUCT(fail_eliminate = "Allocation does not have unique CheckCastPP";) |
kvn@508 | 417 | can_eliminate = false; |
kvn@508 | 418 | } else { |
kvn@508 | 419 | res_type = _igvn.type(res)->isa_oopptr(); |
kvn@508 | 420 | if (res_type == NULL) { |
kvn@508 | 421 | NOT_PRODUCT(fail_eliminate = "Neither instance or array allocation";) |
kvn@508 | 422 | can_eliminate = false; |
kvn@508 | 423 | } else if (res_type->isa_aryptr()) { |
kvn@508 | 424 | int length = alloc->in(AllocateNode::ALength)->find_int_con(-1); |
kvn@508 | 425 | if (length < 0) { |
kvn@508 | 426 | NOT_PRODUCT(fail_eliminate = "Array's size is not constant";) |
kvn@508 | 427 | can_eliminate = false; |
kvn@508 | 428 | } |
kvn@508 | 429 | } |
kvn@508 | 430 | } |
kvn@508 | 431 | |
kvn@508 | 432 | if (can_eliminate && res != NULL) { |
kvn@508 | 433 | for (DUIterator_Fast jmax, j = res->fast_outs(jmax); |
kvn@508 | 434 | j < jmax && can_eliminate; j++) { |
kvn@508 | 435 | Node* use = res->fast_out(j); |
kvn@508 | 436 | |
kvn@508 | 437 | if (use->is_AddP()) { |
kvn@508 | 438 | const TypePtr* addp_type = _igvn.type(use)->is_ptr(); |
kvn@508 | 439 | int offset = addp_type->offset(); |
kvn@508 | 440 | |
kvn@508 | 441 | if (offset == Type::OffsetTop || offset == Type::OffsetBot) { |
kvn@508 | 442 | NOT_PRODUCT(fail_eliminate = "Undefined field referrence";) |
kvn@508 | 443 | can_eliminate = false; |
kvn@508 | 444 | break; |
kvn@508 | 445 | } |
kvn@508 | 446 | for (DUIterator_Fast kmax, k = use->fast_outs(kmax); |
kvn@508 | 447 | k < kmax && can_eliminate; k++) { |
kvn@508 | 448 | Node* n = use->fast_out(k); |
kvn@508 | 449 | if (!n->is_Store() && n->Opcode() != Op_CastP2X) { |
kvn@508 | 450 | DEBUG_ONLY(disq_node = n;) |
kvn@508 | 451 | if (n->is_Load()) { |
kvn@508 | 452 | NOT_PRODUCT(fail_eliminate = "Field load";) |
kvn@508 | 453 | } else { |
kvn@508 | 454 | NOT_PRODUCT(fail_eliminate = "Not store field referrence";) |
kvn@508 | 455 | } |
kvn@508 | 456 | can_eliminate = false; |
kvn@508 | 457 | } |
kvn@508 | 458 | } |
kvn@508 | 459 | } else if (use->is_SafePoint()) { |
kvn@508 | 460 | SafePointNode* sfpt = use->as_SafePoint(); |
kvn@603 | 461 | if (sfpt->is_Call() && sfpt->as_Call()->has_non_debug_use(res)) { |
kvn@508 | 462 | // Object is passed as argument. |
kvn@508 | 463 | DEBUG_ONLY(disq_node = use;) |
kvn@508 | 464 | NOT_PRODUCT(fail_eliminate = "Object is passed as argument";) |
kvn@508 | 465 | can_eliminate = false; |
kvn@508 | 466 | } |
kvn@508 | 467 | Node* sfptMem = sfpt->memory(); |
kvn@508 | 468 | if (sfptMem == NULL || sfptMem->is_top()) { |
kvn@508 | 469 | DEBUG_ONLY(disq_node = use;) |
kvn@508 | 470 | NOT_PRODUCT(fail_eliminate = "NULL or TOP memory";) |
kvn@508 | 471 | can_eliminate = false; |
kvn@508 | 472 | } else { |
kvn@508 | 473 | safepoints.append_if_missing(sfpt); |
kvn@508 | 474 | } |
kvn@508 | 475 | } else if (use->Opcode() != Op_CastP2X) { // CastP2X is used by card mark |
kvn@508 | 476 | if (use->is_Phi()) { |
kvn@508 | 477 | if (use->outcnt() == 1 && use->unique_out()->Opcode() == Op_Return) { |
kvn@508 | 478 | NOT_PRODUCT(fail_eliminate = "Object is return value";) |
kvn@508 | 479 | } else { |
kvn@508 | 480 | NOT_PRODUCT(fail_eliminate = "Object is referenced by Phi";) |
kvn@508 | 481 | } |
kvn@508 | 482 | DEBUG_ONLY(disq_node = use;) |
kvn@508 | 483 | } else { |
kvn@508 | 484 | if (use->Opcode() == Op_Return) { |
kvn@508 | 485 | NOT_PRODUCT(fail_eliminate = "Object is return value";) |
kvn@508 | 486 | }else { |
kvn@508 | 487 | NOT_PRODUCT(fail_eliminate = "Object is referenced by node";) |
kvn@508 | 488 | } |
kvn@508 | 489 | DEBUG_ONLY(disq_node = use;) |
kvn@508 | 490 | } |
kvn@508 | 491 | can_eliminate = false; |
kvn@508 | 492 | } |
kvn@508 | 493 | } |
kvn@508 | 494 | } |
kvn@508 | 495 | |
kvn@508 | 496 | #ifndef PRODUCT |
kvn@508 | 497 | if (PrintEliminateAllocations) { |
kvn@508 | 498 | if (can_eliminate) { |
kvn@508 | 499 | tty->print("Scalar "); |
kvn@508 | 500 | if (res == NULL) |
kvn@508 | 501 | alloc->dump(); |
kvn@508 | 502 | else |
kvn@508 | 503 | res->dump(); |
kvn@508 | 504 | } else { |
kvn@508 | 505 | tty->print("NotScalar (%s)", fail_eliminate); |
kvn@508 | 506 | if (res == NULL) |
kvn@508 | 507 | alloc->dump(); |
kvn@508 | 508 | else |
kvn@508 | 509 | res->dump(); |
kvn@508 | 510 | #ifdef ASSERT |
kvn@508 | 511 | if (disq_node != NULL) { |
kvn@508 | 512 | tty->print(" >>>> "); |
kvn@508 | 513 | disq_node->dump(); |
kvn@508 | 514 | } |
kvn@508 | 515 | #endif /*ASSERT*/ |
kvn@508 | 516 | } |
kvn@508 | 517 | } |
kvn@508 | 518 | #endif |
kvn@508 | 519 | return can_eliminate; |
kvn@508 | 520 | } |
kvn@508 | 521 | |
kvn@508 | 522 | // Do scalar replacement. |
kvn@508 | 523 | bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <SafePointNode *>& safepoints) { |
kvn@508 | 524 | GrowableArray <SafePointNode *> safepoints_done; |
kvn@508 | 525 | |
kvn@508 | 526 | ciKlass* klass = NULL; |
kvn@508 | 527 | ciInstanceKlass* iklass = NULL; |
kvn@508 | 528 | int nfields = 0; |
kvn@508 | 529 | int array_base; |
kvn@508 | 530 | int element_size; |
kvn@508 | 531 | BasicType basic_elem_type; |
kvn@508 | 532 | ciType* elem_type; |
kvn@508 | 533 | |
kvn@508 | 534 | Node* res = alloc->result_cast(); |
kvn@508 | 535 | const TypeOopPtr* res_type = NULL; |
kvn@508 | 536 | if (res != NULL) { // Could be NULL when there are no users |
kvn@508 | 537 | res_type = _igvn.type(res)->isa_oopptr(); |
kvn@508 | 538 | } |
kvn@508 | 539 | |
kvn@508 | 540 | if (res != NULL) { |
kvn@508 | 541 | klass = res_type->klass(); |
kvn@508 | 542 | if (res_type->isa_instptr()) { |
kvn@508 | 543 | // find the fields of the class which will be needed for safepoint debug information |
kvn@508 | 544 | assert(klass->is_instance_klass(), "must be an instance klass."); |
kvn@508 | 545 | iklass = klass->as_instance_klass(); |
kvn@508 | 546 | nfields = iklass->nof_nonstatic_fields(); |
kvn@508 | 547 | } else { |
kvn@508 | 548 | // find the array's elements which will be needed for safepoint debug information |
kvn@508 | 549 | nfields = alloc->in(AllocateNode::ALength)->find_int_con(-1); |
kvn@508 | 550 | assert(klass->is_array_klass() && nfields >= 0, "must be an array klass."); |
kvn@508 | 551 | elem_type = klass->as_array_klass()->element_type(); |
kvn@508 | 552 | basic_elem_type = elem_type->basic_type(); |
kvn@508 | 553 | array_base = arrayOopDesc::base_offset_in_bytes(basic_elem_type); |
kvn@508 | 554 | element_size = type2aelembytes(basic_elem_type); |
kvn@508 | 555 | } |
kvn@508 | 556 | } |
kvn@508 | 557 | // |
kvn@508 | 558 | // Process the safepoint uses |
kvn@508 | 559 | // |
kvn@508 | 560 | while (safepoints.length() > 0) { |
kvn@508 | 561 | SafePointNode* sfpt = safepoints.pop(); |
kvn@508 | 562 | Node* mem = sfpt->memory(); |
kvn@508 | 563 | uint first_ind = sfpt->req(); |
kvn@508 | 564 | SafePointScalarObjectNode* sobj = new (C, 1) SafePointScalarObjectNode(res_type, |
kvn@508 | 565 | #ifdef ASSERT |
kvn@508 | 566 | alloc, |
kvn@508 | 567 | #endif |
kvn@508 | 568 | first_ind, nfields); |
kvn@508 | 569 | sobj->init_req(0, sfpt->in(TypeFunc::Control)); |
kvn@508 | 570 | transform_later(sobj); |
kvn@508 | 571 | |
kvn@508 | 572 | // Scan object's fields adding an input to the safepoint for each field. |
kvn@508 | 573 | for (int j = 0; j < nfields; j++) { |
kvn@508 | 574 | int offset; |
kvn@508 | 575 | ciField* field = NULL; |
kvn@508 | 576 | if (iklass != NULL) { |
kvn@508 | 577 | field = iklass->nonstatic_field_at(j); |
kvn@508 | 578 | offset = field->offset(); |
kvn@508 | 579 | elem_type = field->type(); |
kvn@508 | 580 | basic_elem_type = field->layout_type(); |
kvn@508 | 581 | } else { |
kvn@508 | 582 | offset = array_base + j * element_size; |
kvn@508 | 583 | } |
kvn@508 | 584 | |
kvn@508 | 585 | const Type *field_type; |
kvn@508 | 586 | // The next code is taken from Parse::do_get_xxx(). |
kvn@559 | 587 | if (basic_elem_type == T_OBJECT || basic_elem_type == T_ARRAY) { |
kvn@508 | 588 | if (!elem_type->is_loaded()) { |
kvn@508 | 589 | field_type = TypeInstPtr::BOTTOM; |
kvn@508 | 590 | } else if (field != NULL && field->is_constant()) { |
kvn@508 | 591 | // This can happen if the constant oop is non-perm. |
kvn@508 | 592 | ciObject* con = field->constant_value().as_object(); |
kvn@508 | 593 | // Do not "join" in the previous type; it doesn't add value, |
kvn@508 | 594 | // and may yield a vacuous result if the field is of interface type. |
kvn@508 | 595 | field_type = TypeOopPtr::make_from_constant(con)->isa_oopptr(); |
kvn@508 | 596 | assert(field_type != NULL, "field singleton type must be consistent"); |
kvn@508 | 597 | } else { |
kvn@508 | 598 | field_type = TypeOopPtr::make_from_klass(elem_type->as_klass()); |
kvn@508 | 599 | } |
kvn@559 | 600 | if (UseCompressedOops) { |
kvn@559 | 601 | field_type = field_type->is_oopptr()->make_narrowoop(); |
kvn@559 | 602 | basic_elem_type = T_NARROWOOP; |
kvn@559 | 603 | } |
kvn@508 | 604 | } else { |
kvn@508 | 605 | field_type = Type::get_const_basic_type(basic_elem_type); |
kvn@508 | 606 | } |
kvn@508 | 607 | |
kvn@508 | 608 | const TypeOopPtr *field_addr_type = res_type->add_offset(offset)->isa_oopptr(); |
kvn@508 | 609 | |
kvn@508 | 610 | Node *field_val = value_from_mem(mem, basic_elem_type, field_type, field_addr_type, alloc); |
kvn@508 | 611 | if (field_val == NULL) { |
kvn@508 | 612 | // we weren't able to find a value for this field, |
kvn@508 | 613 | // give up on eliminating this allocation |
kvn@508 | 614 | alloc->_is_scalar_replaceable = false; // don't try again |
kvn@508 | 615 | // remove any extra entries we added to the safepoint |
kvn@508 | 616 | uint last = sfpt->req() - 1; |
kvn@508 | 617 | for (int k = 0; k < j; k++) { |
kvn@508 | 618 | sfpt->del_req(last--); |
kvn@508 | 619 | } |
kvn@508 | 620 | // rollback processed safepoints |
kvn@508 | 621 | while (safepoints_done.length() > 0) { |
kvn@508 | 622 | SafePointNode* sfpt_done = safepoints_done.pop(); |
kvn@508 | 623 | // remove any extra entries we added to the safepoint |
kvn@508 | 624 | last = sfpt_done->req() - 1; |
kvn@508 | 625 | for (int k = 0; k < nfields; k++) { |
kvn@508 | 626 | sfpt_done->del_req(last--); |
kvn@508 | 627 | } |
kvn@508 | 628 | JVMState *jvms = sfpt_done->jvms(); |
kvn@508 | 629 | jvms->set_endoff(sfpt_done->req()); |
kvn@508 | 630 | // Now make a pass over the debug information replacing any references |
kvn@508 | 631 | // to SafePointScalarObjectNode with the allocated object. |
kvn@508 | 632 | int start = jvms->debug_start(); |
kvn@508 | 633 | int end = jvms->debug_end(); |
kvn@508 | 634 | for (int i = start; i < end; i++) { |
kvn@508 | 635 | if (sfpt_done->in(i)->is_SafePointScalarObject()) { |
kvn@508 | 636 | SafePointScalarObjectNode* scobj = sfpt_done->in(i)->as_SafePointScalarObject(); |
kvn@508 | 637 | if (scobj->first_index() == sfpt_done->req() && |
kvn@508 | 638 | scobj->n_fields() == (uint)nfields) { |
kvn@508 | 639 | assert(scobj->alloc() == alloc, "sanity"); |
kvn@508 | 640 | sfpt_done->set_req(i, res); |
kvn@508 | 641 | } |
kvn@508 | 642 | } |
kvn@508 | 643 | } |
kvn@508 | 644 | } |
kvn@508 | 645 | #ifndef PRODUCT |
kvn@508 | 646 | if (PrintEliminateAllocations) { |
kvn@508 | 647 | if (field != NULL) { |
kvn@508 | 648 | tty->print("=== At SafePoint node %d can't find value of Field: ", |
kvn@508 | 649 | sfpt->_idx); |
kvn@508 | 650 | field->print(); |
kvn@508 | 651 | int field_idx = C->get_alias_index(field_addr_type); |
kvn@508 | 652 | tty->print(" (alias_idx=%d)", field_idx); |
kvn@508 | 653 | } else { // Array's element |
kvn@508 | 654 | tty->print("=== At SafePoint node %d can't find value of array element [%d]", |
kvn@508 | 655 | sfpt->_idx, j); |
kvn@508 | 656 | } |
kvn@508 | 657 | tty->print(", which prevents elimination of: "); |
kvn@508 | 658 | if (res == NULL) |
kvn@508 | 659 | alloc->dump(); |
kvn@508 | 660 | else |
kvn@508 | 661 | res->dump(); |
kvn@508 | 662 | } |
kvn@508 | 663 | #endif |
kvn@508 | 664 | return false; |
kvn@508 | 665 | } |
kvn@559 | 666 | if (UseCompressedOops && field_type->isa_narrowoop()) { |
kvn@559 | 667 | // Enable "DecodeN(EncodeP(Allocate)) --> Allocate" transformation |
kvn@559 | 668 | // to be able scalar replace the allocation. |
kvn@559 | 669 | _igvn.set_delay_transform(false); |
kvn@559 | 670 | field_val = DecodeNNode::decode(&_igvn, field_val); |
kvn@559 | 671 | _igvn.set_delay_transform(true); |
kvn@559 | 672 | } |
kvn@508 | 673 | sfpt->add_req(field_val); |
kvn@508 | 674 | } |
kvn@508 | 675 | JVMState *jvms = sfpt->jvms(); |
kvn@508 | 676 | jvms->set_endoff(sfpt->req()); |
kvn@508 | 677 | // Now make a pass over the debug information replacing any references |
kvn@508 | 678 | // to the allocated object with "sobj" |
kvn@508 | 679 | int start = jvms->debug_start(); |
kvn@508 | 680 | int end = jvms->debug_end(); |
kvn@508 | 681 | for (int i = start; i < end; i++) { |
kvn@508 | 682 | if (sfpt->in(i) == res) { |
kvn@508 | 683 | sfpt->set_req(i, sobj); |
kvn@508 | 684 | } |
kvn@508 | 685 | } |
kvn@508 | 686 | safepoints_done.append_if_missing(sfpt); // keep it for rollback |
kvn@508 | 687 | } |
kvn@508 | 688 | return true; |
kvn@508 | 689 | } |
kvn@508 | 690 | |
kvn@508 | 691 | // Process users of eliminated allocation. |
kvn@508 | 692 | void PhaseMacroExpand::process_users_of_allocation(AllocateNode *alloc) { |
kvn@508 | 693 | Node* res = alloc->result_cast(); |
kvn@508 | 694 | if (res != NULL) { |
kvn@508 | 695 | for (DUIterator_Last jmin, j = res->last_outs(jmin); j >= jmin; ) { |
kvn@508 | 696 | Node *use = res->last_out(j); |
kvn@508 | 697 | uint oc1 = res->outcnt(); |
kvn@508 | 698 | |
kvn@508 | 699 | if (use->is_AddP()) { |
kvn@508 | 700 | for (DUIterator_Last kmin, k = use->last_outs(kmin); k >= kmin; ) { |
kvn@508 | 701 | Node *n = use->last_out(k); |
kvn@508 | 702 | uint oc2 = use->outcnt(); |
kvn@508 | 703 | if (n->is_Store()) { |
kvn@508 | 704 | _igvn.replace_node(n, n->in(MemNode::Memory)); |
kvn@508 | 705 | } else { |
kvn@508 | 706 | assert( n->Opcode() == Op_CastP2X, "CastP2X required"); |
kvn@508 | 707 | eliminate_card_mark(n); |
kvn@508 | 708 | } |
kvn@508 | 709 | k -= (oc2 - use->outcnt()); |
kvn@508 | 710 | } |
kvn@508 | 711 | } else { |
kvn@508 | 712 | assert( !use->is_SafePoint(), "safepoint uses must have been already elimiated"); |
kvn@508 | 713 | assert( use->Opcode() == Op_CastP2X, "CastP2X required"); |
kvn@508 | 714 | eliminate_card_mark(use); |
kvn@508 | 715 | } |
kvn@508 | 716 | j -= (oc1 - res->outcnt()); |
kvn@508 | 717 | } |
kvn@508 | 718 | assert(res->outcnt() == 0, "all uses of allocated objects must be deleted"); |
kvn@508 | 719 | _igvn.remove_dead_node(res); |
kvn@508 | 720 | } |
kvn@508 | 721 | |
kvn@508 | 722 | // |
kvn@508 | 723 | // Process other users of allocation's projections |
kvn@508 | 724 | // |
kvn@508 | 725 | if (_resproj != NULL && _resproj->outcnt() != 0) { |
kvn@508 | 726 | for (DUIterator_Last jmin, j = _resproj->last_outs(jmin); j >= jmin; ) { |
kvn@508 | 727 | Node *use = _resproj->last_out(j); |
kvn@508 | 728 | uint oc1 = _resproj->outcnt(); |
kvn@508 | 729 | if (use->is_Initialize()) { |
kvn@508 | 730 | // Eliminate Initialize node. |
kvn@508 | 731 | InitializeNode *init = use->as_Initialize(); |
kvn@508 | 732 | assert(init->outcnt() <= 2, "only a control and memory projection expected"); |
kvn@508 | 733 | Node *ctrl_proj = init->proj_out(TypeFunc::Control); |
kvn@508 | 734 | if (ctrl_proj != NULL) { |
kvn@508 | 735 | assert(init->in(TypeFunc::Control) == _fallthroughcatchproj, "allocation control projection"); |
kvn@508 | 736 | _igvn.replace_node(ctrl_proj, _fallthroughcatchproj); |
kvn@508 | 737 | } |
kvn@508 | 738 | Node *mem_proj = init->proj_out(TypeFunc::Memory); |
kvn@508 | 739 | if (mem_proj != NULL) { |
kvn@508 | 740 | Node *mem = init->in(TypeFunc::Memory); |
kvn@508 | 741 | #ifdef ASSERT |
kvn@508 | 742 | if (mem->is_MergeMem()) { |
kvn@508 | 743 | assert(mem->in(TypeFunc::Memory) == _memproj_fallthrough, "allocation memory projection"); |
kvn@508 | 744 | } else { |
kvn@508 | 745 | assert(mem == _memproj_fallthrough, "allocation memory projection"); |
kvn@508 | 746 | } |
kvn@508 | 747 | #endif |
kvn@508 | 748 | _igvn.replace_node(mem_proj, mem); |
kvn@508 | 749 | } |
kvn@508 | 750 | } else if (use->is_AddP()) { |
kvn@508 | 751 | // raw memory addresses used only by the initialization |
kvn@508 | 752 | _igvn.hash_delete(use); |
kvn@508 | 753 | _igvn.subsume_node(use, C->top()); |
kvn@508 | 754 | } else { |
kvn@508 | 755 | assert(false, "only Initialize or AddP expected"); |
kvn@508 | 756 | } |
kvn@508 | 757 | j -= (oc1 - _resproj->outcnt()); |
kvn@508 | 758 | } |
kvn@508 | 759 | } |
kvn@508 | 760 | if (_fallthroughcatchproj != NULL) { |
kvn@508 | 761 | _igvn.replace_node(_fallthroughcatchproj, alloc->in(TypeFunc::Control)); |
kvn@508 | 762 | } |
kvn@508 | 763 | if (_memproj_fallthrough != NULL) { |
kvn@508 | 764 | _igvn.replace_node(_memproj_fallthrough, alloc->in(TypeFunc::Memory)); |
kvn@508 | 765 | } |
kvn@508 | 766 | if (_memproj_catchall != NULL) { |
kvn@508 | 767 | _igvn.replace_node(_memproj_catchall, C->top()); |
kvn@508 | 768 | } |
kvn@508 | 769 | if (_ioproj_fallthrough != NULL) { |
kvn@508 | 770 | _igvn.replace_node(_ioproj_fallthrough, alloc->in(TypeFunc::I_O)); |
kvn@508 | 771 | } |
kvn@508 | 772 | if (_ioproj_catchall != NULL) { |
kvn@508 | 773 | _igvn.replace_node(_ioproj_catchall, C->top()); |
kvn@508 | 774 | } |
kvn@508 | 775 | if (_catchallcatchproj != NULL) { |
kvn@508 | 776 | _igvn.replace_node(_catchallcatchproj, C->top()); |
kvn@508 | 777 | } |
kvn@508 | 778 | } |
kvn@508 | 779 | |
kvn@508 | 780 | bool PhaseMacroExpand::eliminate_allocate_node(AllocateNode *alloc) { |
kvn@508 | 781 | |
kvn@508 | 782 | if (!EliminateAllocations || !alloc->_is_scalar_replaceable) { |
kvn@508 | 783 | return false; |
kvn@508 | 784 | } |
kvn@508 | 785 | |
kvn@508 | 786 | extract_call_projections(alloc); |
kvn@508 | 787 | |
kvn@508 | 788 | GrowableArray <SafePointNode *> safepoints; |
kvn@508 | 789 | if (!can_eliminate_allocation(alloc, safepoints)) { |
kvn@508 | 790 | return false; |
kvn@508 | 791 | } |
kvn@508 | 792 | |
kvn@508 | 793 | if (!scalar_replacement(alloc, safepoints)) { |
kvn@508 | 794 | return false; |
kvn@508 | 795 | } |
kvn@508 | 796 | |
kvn@508 | 797 | process_users_of_allocation(alloc); |
kvn@508 | 798 | |
kvn@508 | 799 | #ifndef PRODUCT |
kvn@508 | 800 | if (PrintEliminateAllocations) { |
kvn@508 | 801 | if (alloc->is_AllocateArray()) |
kvn@508 | 802 | tty->print_cr("++++ Eliminated: %d AllocateArray", alloc->_idx); |
kvn@508 | 803 | else |
kvn@508 | 804 | tty->print_cr("++++ Eliminated: %d Allocate", alloc->_idx); |
kvn@508 | 805 | } |
kvn@508 | 806 | #endif |
kvn@508 | 807 | |
kvn@508 | 808 | return true; |
kvn@508 | 809 | } |
kvn@508 | 810 | |
duke@435 | 811 | |
duke@435 | 812 | //---------------------------set_eden_pointers------------------------- |
duke@435 | 813 | void PhaseMacroExpand::set_eden_pointers(Node* &eden_top_adr, Node* &eden_end_adr) { |
duke@435 | 814 | if (UseTLAB) { // Private allocation: load from TLS |
duke@435 | 815 | Node* thread = transform_later(new (C, 1) ThreadLocalNode()); |
duke@435 | 816 | int tlab_top_offset = in_bytes(JavaThread::tlab_top_offset()); |
duke@435 | 817 | int tlab_end_offset = in_bytes(JavaThread::tlab_end_offset()); |
duke@435 | 818 | eden_top_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_top_offset); |
duke@435 | 819 | eden_end_adr = basic_plus_adr(top()/*not oop*/, thread, tlab_end_offset); |
duke@435 | 820 | } else { // Shared allocation: load from globals |
duke@435 | 821 | CollectedHeap* ch = Universe::heap(); |
duke@435 | 822 | address top_adr = (address)ch->top_addr(); |
duke@435 | 823 | address end_adr = (address)ch->end_addr(); |
duke@435 | 824 | eden_top_adr = makecon(TypeRawPtr::make(top_adr)); |
duke@435 | 825 | eden_end_adr = basic_plus_adr(eden_top_adr, end_adr - top_adr); |
duke@435 | 826 | } |
duke@435 | 827 | } |
duke@435 | 828 | |
duke@435 | 829 | |
duke@435 | 830 | Node* PhaseMacroExpand::make_load(Node* ctl, Node* mem, Node* base, int offset, const Type* value_type, BasicType bt) { |
duke@435 | 831 | Node* adr = basic_plus_adr(base, offset); |
duke@435 | 832 | const TypePtr* adr_type = TypeRawPtr::BOTTOM; |
coleenp@548 | 833 | Node* value = LoadNode::make(_igvn, ctl, mem, adr, adr_type, value_type, bt); |
duke@435 | 834 | transform_later(value); |
duke@435 | 835 | return value; |
duke@435 | 836 | } |
duke@435 | 837 | |
duke@435 | 838 | |
duke@435 | 839 | Node* PhaseMacroExpand::make_store(Node* ctl, Node* mem, Node* base, int offset, Node* value, BasicType bt) { |
duke@435 | 840 | Node* adr = basic_plus_adr(base, offset); |
coleenp@548 | 841 | mem = StoreNode::make(_igvn, ctl, mem, adr, NULL, value, bt); |
duke@435 | 842 | transform_later(mem); |
duke@435 | 843 | return mem; |
duke@435 | 844 | } |
duke@435 | 845 | |
duke@435 | 846 | //============================================================================= |
duke@435 | 847 | // |
duke@435 | 848 | // A L L O C A T I O N |
duke@435 | 849 | // |
duke@435 | 850 | // Allocation attempts to be fast in the case of frequent small objects. |
duke@435 | 851 | // It breaks down like this: |
duke@435 | 852 | // |
duke@435 | 853 | // 1) Size in doublewords is computed. This is a constant for objects and |
duke@435 | 854 | // variable for most arrays. Doubleword units are used to avoid size |
duke@435 | 855 | // overflow of huge doubleword arrays. We need doublewords in the end for |
duke@435 | 856 | // rounding. |
duke@435 | 857 | // |
duke@435 | 858 | // 2) Size is checked for being 'too large'. Too-large allocations will go |
duke@435 | 859 | // the slow path into the VM. The slow path can throw any required |
duke@435 | 860 | // exceptions, and does all the special checks for very large arrays. The |
duke@435 | 861 | // size test can constant-fold away for objects. For objects with |
duke@435 | 862 | // finalizers it constant-folds the otherway: you always go slow with |
duke@435 | 863 | // finalizers. |
duke@435 | 864 | // |
duke@435 | 865 | // 3) If NOT using TLABs, this is the contended loop-back point. |
duke@435 | 866 | // Load-Locked the heap top. If using TLABs normal-load the heap top. |
duke@435 | 867 | // |
duke@435 | 868 | // 4) Check that heap top + size*8 < max. If we fail go the slow ` route. |
duke@435 | 869 | // NOTE: "top+size*8" cannot wrap the 4Gig line! Here's why: for largish |
duke@435 | 870 | // "size*8" we always enter the VM, where "largish" is a constant picked small |
duke@435 | 871 | // enough that there's always space between the eden max and 4Gig (old space is |
duke@435 | 872 | // there so it's quite large) and large enough that the cost of entering the VM |
duke@435 | 873 | // is dwarfed by the cost to initialize the space. |
duke@435 | 874 | // |
duke@435 | 875 | // 5) If NOT using TLABs, Store-Conditional the adjusted heap top back |
duke@435 | 876 | // down. If contended, repeat at step 3. If using TLABs normal-store |
duke@435 | 877 | // adjusted heap top back down; there is no contention. |
duke@435 | 878 | // |
duke@435 | 879 | // 6) If !ZeroTLAB then Bulk-clear the object/array. Fill in klass & mark |
duke@435 | 880 | // fields. |
duke@435 | 881 | // |
duke@435 | 882 | // 7) Merge with the slow-path; cast the raw memory pointer to the correct |
duke@435 | 883 | // oop flavor. |
duke@435 | 884 | // |
duke@435 | 885 | //============================================================================= |
duke@435 | 886 | // FastAllocateSizeLimit value is in DOUBLEWORDS. |
duke@435 | 887 | // Allocations bigger than this always go the slow route. |
duke@435 | 888 | // This value must be small enough that allocation attempts that need to |
duke@435 | 889 | // trigger exceptions go the slow route. Also, it must be small enough so |
duke@435 | 890 | // that heap_top + size_in_bytes does not wrap around the 4Gig limit. |
duke@435 | 891 | //=============================================================================j// |
duke@435 | 892 | // %%% Here is an old comment from parseHelper.cpp; is it outdated? |
duke@435 | 893 | // The allocator will coalesce int->oop copies away. See comment in |
duke@435 | 894 | // coalesce.cpp about how this works. It depends critically on the exact |
duke@435 | 895 | // code shape produced here, so if you are changing this code shape |
duke@435 | 896 | // make sure the GC info for the heap-top is correct in and around the |
duke@435 | 897 | // slow-path call. |
duke@435 | 898 | // |
duke@435 | 899 | |
duke@435 | 900 | void PhaseMacroExpand::expand_allocate_common( |
duke@435 | 901 | AllocateNode* alloc, // allocation node to be expanded |
duke@435 | 902 | Node* length, // array length for an array allocation |
duke@435 | 903 | const TypeFunc* slow_call_type, // Type of slow call |
duke@435 | 904 | address slow_call_address // Address of slow call |
duke@435 | 905 | ) |
duke@435 | 906 | { |
duke@435 | 907 | |
duke@435 | 908 | Node* ctrl = alloc->in(TypeFunc::Control); |
duke@435 | 909 | Node* mem = alloc->in(TypeFunc::Memory); |
duke@435 | 910 | Node* i_o = alloc->in(TypeFunc::I_O); |
duke@435 | 911 | Node* size_in_bytes = alloc->in(AllocateNode::AllocSize); |
duke@435 | 912 | Node* klass_node = alloc->in(AllocateNode::KlassNode); |
duke@435 | 913 | Node* initial_slow_test = alloc->in(AllocateNode::InitialTest); |
duke@435 | 914 | |
kvn@508 | 915 | // With escape analysis, the entire memory state was needed to be able to |
kvn@508 | 916 | // eliminate the allocation. Since the allocations cannot be eliminated, |
kvn@508 | 917 | // optimize it to the raw slice. |
kvn@508 | 918 | if (mem->is_MergeMem()) { |
kvn@508 | 919 | mem = mem->as_MergeMem()->memory_at(Compile::AliasIdxRaw); |
kvn@508 | 920 | } |
kvn@508 | 921 | |
duke@435 | 922 | assert(ctrl != NULL, "must have control"); |
duke@435 | 923 | // We need a Region and corresponding Phi's to merge the slow-path and fast-path results. |
duke@435 | 924 | // they will not be used if "always_slow" is set |
duke@435 | 925 | enum { slow_result_path = 1, fast_result_path = 2 }; |
duke@435 | 926 | Node *result_region; |
duke@435 | 927 | Node *result_phi_rawmem; |
duke@435 | 928 | Node *result_phi_rawoop; |
duke@435 | 929 | Node *result_phi_i_o; |
duke@435 | 930 | |
duke@435 | 931 | // The initial slow comparison is a size check, the comparison |
duke@435 | 932 | // we want to do is a BoolTest::gt |
duke@435 | 933 | bool always_slow = false; |
duke@435 | 934 | int tv = _igvn.find_int_con(initial_slow_test, -1); |
duke@435 | 935 | if (tv >= 0) { |
duke@435 | 936 | always_slow = (tv == 1); |
duke@435 | 937 | initial_slow_test = NULL; |
duke@435 | 938 | } else { |
duke@435 | 939 | initial_slow_test = BoolNode::make_predicate(initial_slow_test, &_igvn); |
duke@435 | 940 | } |
duke@435 | 941 | |
ysr@777 | 942 | if (DTraceAllocProbes || |
ysr@777 | 943 | !UseTLAB && (!Universe::heap()->supports_inline_contig_alloc() || |
ysr@777 | 944 | (UseConcMarkSweepGC && CMSIncrementalMode))) { |
duke@435 | 945 | // Force slow-path allocation |
duke@435 | 946 | always_slow = true; |
duke@435 | 947 | initial_slow_test = NULL; |
duke@435 | 948 | } |
duke@435 | 949 | |
ysr@777 | 950 | |
duke@435 | 951 | enum { too_big_or_final_path = 1, need_gc_path = 2 }; |
duke@435 | 952 | Node *slow_region = NULL; |
duke@435 | 953 | Node *toobig_false = ctrl; |
duke@435 | 954 | |
duke@435 | 955 | assert (initial_slow_test == NULL || !always_slow, "arguments must be consistent"); |
duke@435 | 956 | // generate the initial test if necessary |
duke@435 | 957 | if (initial_slow_test != NULL ) { |
duke@435 | 958 | slow_region = new (C, 3) RegionNode(3); |
duke@435 | 959 | |
duke@435 | 960 | // Now make the initial failure test. Usually a too-big test but |
duke@435 | 961 | // might be a TRUE for finalizers or a fancy class check for |
duke@435 | 962 | // newInstance0. |
duke@435 | 963 | IfNode *toobig_iff = new (C, 2) IfNode(ctrl, initial_slow_test, PROB_MIN, COUNT_UNKNOWN); |
duke@435 | 964 | transform_later(toobig_iff); |
duke@435 | 965 | // Plug the failing-too-big test into the slow-path region |
duke@435 | 966 | Node *toobig_true = new (C, 1) IfTrueNode( toobig_iff ); |
duke@435 | 967 | transform_later(toobig_true); |
duke@435 | 968 | slow_region ->init_req( too_big_or_final_path, toobig_true ); |
duke@435 | 969 | toobig_false = new (C, 1) IfFalseNode( toobig_iff ); |
duke@435 | 970 | transform_later(toobig_false); |
duke@435 | 971 | } else { // No initial test, just fall into next case |
duke@435 | 972 | toobig_false = ctrl; |
duke@435 | 973 | debug_only(slow_region = NodeSentinel); |
duke@435 | 974 | } |
duke@435 | 975 | |
duke@435 | 976 | Node *slow_mem = mem; // save the current memory state for slow path |
duke@435 | 977 | // generate the fast allocation code unless we know that the initial test will always go slow |
duke@435 | 978 | if (!always_slow) { |
ysr@777 | 979 | Node* eden_top_adr; |
ysr@777 | 980 | Node* eden_end_adr; |
ysr@777 | 981 | |
ysr@777 | 982 | set_eden_pointers(eden_top_adr, eden_end_adr); |
ysr@777 | 983 | |
ysr@777 | 984 | // Load Eden::end. Loop invariant and hoisted. |
ysr@777 | 985 | // |
ysr@777 | 986 | // Note: We set the control input on "eden_end" and "old_eden_top" when using |
ysr@777 | 987 | // a TLAB to work around a bug where these values were being moved across |
ysr@777 | 988 | // a safepoint. These are not oops, so they cannot be include in the oop |
ysr@777 | 989 | // map, but the can be changed by a GC. The proper way to fix this would |
ysr@777 | 990 | // be to set the raw memory state when generating a SafepointNode. However |
ysr@777 | 991 | // this will require extensive changes to the loop optimization in order to |
ysr@777 | 992 | // prevent a degradation of the optimization. |
ysr@777 | 993 | // See comment in memnode.hpp, around line 227 in class LoadPNode. |
ysr@777 | 994 | Node *eden_end = make_load(ctrl, mem, eden_end_adr, 0, TypeRawPtr::BOTTOM, T_ADDRESS); |
ysr@777 | 995 | |
duke@435 | 996 | // allocate the Region and Phi nodes for the result |
duke@435 | 997 | result_region = new (C, 3) RegionNode(3); |
duke@435 | 998 | result_phi_rawmem = new (C, 3) PhiNode( result_region, Type::MEMORY, TypeRawPtr::BOTTOM ); |
duke@435 | 999 | result_phi_rawoop = new (C, 3) PhiNode( result_region, TypeRawPtr::BOTTOM ); |
duke@435 | 1000 | result_phi_i_o = new (C, 3) PhiNode( result_region, Type::ABIO ); // I/O is used for Prefetch |
duke@435 | 1001 | |
duke@435 | 1002 | // We need a Region for the loop-back contended case. |
duke@435 | 1003 | enum { fall_in_path = 1, contended_loopback_path = 2 }; |
duke@435 | 1004 | Node *contended_region; |
duke@435 | 1005 | Node *contended_phi_rawmem; |
duke@435 | 1006 | if( UseTLAB ) { |
duke@435 | 1007 | contended_region = toobig_false; |
duke@435 | 1008 | contended_phi_rawmem = mem; |
duke@435 | 1009 | } else { |
duke@435 | 1010 | contended_region = new (C, 3) RegionNode(3); |
duke@435 | 1011 | contended_phi_rawmem = new (C, 3) PhiNode( contended_region, Type::MEMORY, TypeRawPtr::BOTTOM); |
duke@435 | 1012 | // Now handle the passing-too-big test. We fall into the contended |
duke@435 | 1013 | // loop-back merge point. |
duke@435 | 1014 | contended_region ->init_req( fall_in_path, toobig_false ); |
duke@435 | 1015 | contended_phi_rawmem->init_req( fall_in_path, mem ); |
duke@435 | 1016 | transform_later(contended_region); |
duke@435 | 1017 | transform_later(contended_phi_rawmem); |
duke@435 | 1018 | } |
duke@435 | 1019 | |
duke@435 | 1020 | // Load(-locked) the heap top. |
duke@435 | 1021 | // See note above concerning the control input when using a TLAB |
duke@435 | 1022 | Node *old_eden_top = UseTLAB |
duke@435 | 1023 | ? new (C, 3) LoadPNode ( ctrl, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ) |
duke@435 | 1024 | : new (C, 3) LoadPLockedNode( contended_region, contended_phi_rawmem, eden_top_adr ); |
duke@435 | 1025 | |
duke@435 | 1026 | transform_later(old_eden_top); |
duke@435 | 1027 | // Add to heap top to get a new heap top |
duke@435 | 1028 | Node *new_eden_top = new (C, 4) AddPNode( top(), old_eden_top, size_in_bytes ); |
duke@435 | 1029 | transform_later(new_eden_top); |
duke@435 | 1030 | // Check for needing a GC; compare against heap end |
duke@435 | 1031 | Node *needgc_cmp = new (C, 3) CmpPNode( new_eden_top, eden_end ); |
duke@435 | 1032 | transform_later(needgc_cmp); |
duke@435 | 1033 | Node *needgc_bol = new (C, 2) BoolNode( needgc_cmp, BoolTest::ge ); |
duke@435 | 1034 | transform_later(needgc_bol); |
duke@435 | 1035 | IfNode *needgc_iff = new (C, 2) IfNode(contended_region, needgc_bol, PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); |
duke@435 | 1036 | transform_later(needgc_iff); |
duke@435 | 1037 | |
duke@435 | 1038 | // Plug the failing-heap-space-need-gc test into the slow-path region |
duke@435 | 1039 | Node *needgc_true = new (C, 1) IfTrueNode( needgc_iff ); |
duke@435 | 1040 | transform_later(needgc_true); |
duke@435 | 1041 | if( initial_slow_test ) { |
duke@435 | 1042 | slow_region ->init_req( need_gc_path, needgc_true ); |
duke@435 | 1043 | // This completes all paths into the slow merge point |
duke@435 | 1044 | transform_later(slow_region); |
duke@435 | 1045 | } else { // No initial slow path needed! |
duke@435 | 1046 | // Just fall from the need-GC path straight into the VM call. |
duke@435 | 1047 | slow_region = needgc_true; |
duke@435 | 1048 | } |
duke@435 | 1049 | // No need for a GC. Setup for the Store-Conditional |
duke@435 | 1050 | Node *needgc_false = new (C, 1) IfFalseNode( needgc_iff ); |
duke@435 | 1051 | transform_later(needgc_false); |
duke@435 | 1052 | |
duke@435 | 1053 | // Grab regular I/O before optional prefetch may change it. |
duke@435 | 1054 | // Slow-path does no I/O so just set it to the original I/O. |
duke@435 | 1055 | result_phi_i_o->init_req( slow_result_path, i_o ); |
duke@435 | 1056 | |
duke@435 | 1057 | i_o = prefetch_allocation(i_o, needgc_false, contended_phi_rawmem, |
duke@435 | 1058 | old_eden_top, new_eden_top, length); |
duke@435 | 1059 | |
duke@435 | 1060 | // Store (-conditional) the modified eden top back down. |
duke@435 | 1061 | // StorePConditional produces flags for a test PLUS a modified raw |
duke@435 | 1062 | // memory state. |
duke@435 | 1063 | Node *store_eden_top; |
duke@435 | 1064 | Node *fast_oop_ctrl; |
duke@435 | 1065 | if( UseTLAB ) { |
duke@435 | 1066 | store_eden_top = new (C, 4) StorePNode( needgc_false, contended_phi_rawmem, eden_top_adr, TypeRawPtr::BOTTOM, new_eden_top ); |
duke@435 | 1067 | transform_later(store_eden_top); |
duke@435 | 1068 | fast_oop_ctrl = needgc_false; // No contention, so this is the fast path |
duke@435 | 1069 | } else { |
duke@435 | 1070 | store_eden_top = new (C, 5) StorePConditionalNode( needgc_false, contended_phi_rawmem, eden_top_adr, new_eden_top, old_eden_top ); |
duke@435 | 1071 | transform_later(store_eden_top); |
duke@435 | 1072 | Node *contention_check = new (C, 2) BoolNode( store_eden_top, BoolTest::ne ); |
duke@435 | 1073 | transform_later(contention_check); |
duke@435 | 1074 | store_eden_top = new (C, 1) SCMemProjNode(store_eden_top); |
duke@435 | 1075 | transform_later(store_eden_top); |
duke@435 | 1076 | |
duke@435 | 1077 | // If not using TLABs, check to see if there was contention. |
duke@435 | 1078 | IfNode *contention_iff = new (C, 2) IfNode ( needgc_false, contention_check, PROB_MIN, COUNT_UNKNOWN ); |
duke@435 | 1079 | transform_later(contention_iff); |
duke@435 | 1080 | Node *contention_true = new (C, 1) IfTrueNode( contention_iff ); |
duke@435 | 1081 | transform_later(contention_true); |
duke@435 | 1082 | // If contention, loopback and try again. |
duke@435 | 1083 | contended_region->init_req( contended_loopback_path, contention_true ); |
duke@435 | 1084 | contended_phi_rawmem->init_req( contended_loopback_path, store_eden_top ); |
duke@435 | 1085 | |
duke@435 | 1086 | // Fast-path succeeded with no contention! |
duke@435 | 1087 | Node *contention_false = new (C, 1) IfFalseNode( contention_iff ); |
duke@435 | 1088 | transform_later(contention_false); |
duke@435 | 1089 | fast_oop_ctrl = contention_false; |
duke@435 | 1090 | } |
duke@435 | 1091 | |
duke@435 | 1092 | // Rename successful fast-path variables to make meaning more obvious |
duke@435 | 1093 | Node* fast_oop = old_eden_top; |
duke@435 | 1094 | Node* fast_oop_rawmem = store_eden_top; |
duke@435 | 1095 | fast_oop_rawmem = initialize_object(alloc, |
duke@435 | 1096 | fast_oop_ctrl, fast_oop_rawmem, fast_oop, |
duke@435 | 1097 | klass_node, length, size_in_bytes); |
duke@435 | 1098 | |
duke@435 | 1099 | if (ExtendedDTraceProbes) { |
duke@435 | 1100 | // Slow-path call |
duke@435 | 1101 | int size = TypeFunc::Parms + 2; |
duke@435 | 1102 | CallLeafNode *call = new (C, size) CallLeafNode(OptoRuntime::dtrace_object_alloc_Type(), |
duke@435 | 1103 | CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc_base), |
duke@435 | 1104 | "dtrace_object_alloc", |
duke@435 | 1105 | TypeRawPtr::BOTTOM); |
duke@435 | 1106 | |
duke@435 | 1107 | // Get base of thread-local storage area |
duke@435 | 1108 | Node* thread = new (C, 1) ThreadLocalNode(); |
duke@435 | 1109 | transform_later(thread); |
duke@435 | 1110 | |
duke@435 | 1111 | call->init_req(TypeFunc::Parms+0, thread); |
duke@435 | 1112 | call->init_req(TypeFunc::Parms+1, fast_oop); |
duke@435 | 1113 | call->init_req( TypeFunc::Control, fast_oop_ctrl ); |
duke@435 | 1114 | call->init_req( TypeFunc::I_O , top() ) ; // does no i/o |
duke@435 | 1115 | call->init_req( TypeFunc::Memory , fast_oop_rawmem ); |
duke@435 | 1116 | call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) ); |
duke@435 | 1117 | call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) ); |
duke@435 | 1118 | transform_later(call); |
duke@435 | 1119 | fast_oop_ctrl = new (C, 1) ProjNode(call,TypeFunc::Control); |
duke@435 | 1120 | transform_later(fast_oop_ctrl); |
duke@435 | 1121 | fast_oop_rawmem = new (C, 1) ProjNode(call,TypeFunc::Memory); |
duke@435 | 1122 | transform_later(fast_oop_rawmem); |
duke@435 | 1123 | } |
duke@435 | 1124 | |
duke@435 | 1125 | // Plug in the successful fast-path into the result merge point |
duke@435 | 1126 | result_region ->init_req( fast_result_path, fast_oop_ctrl ); |
duke@435 | 1127 | result_phi_rawoop->init_req( fast_result_path, fast_oop ); |
duke@435 | 1128 | result_phi_i_o ->init_req( fast_result_path, i_o ); |
duke@435 | 1129 | result_phi_rawmem->init_req( fast_result_path, fast_oop_rawmem ); |
duke@435 | 1130 | } else { |
duke@435 | 1131 | slow_region = ctrl; |
duke@435 | 1132 | } |
duke@435 | 1133 | |
duke@435 | 1134 | // Generate slow-path call |
duke@435 | 1135 | CallNode *call = new (C, slow_call_type->domain()->cnt()) |
duke@435 | 1136 | CallStaticJavaNode(slow_call_type, slow_call_address, |
duke@435 | 1137 | OptoRuntime::stub_name(slow_call_address), |
duke@435 | 1138 | alloc->jvms()->bci(), |
duke@435 | 1139 | TypePtr::BOTTOM); |
duke@435 | 1140 | call->init_req( TypeFunc::Control, slow_region ); |
duke@435 | 1141 | call->init_req( TypeFunc::I_O , top() ) ; // does no i/o |
duke@435 | 1142 | call->init_req( TypeFunc::Memory , slow_mem ); // may gc ptrs |
duke@435 | 1143 | call->init_req( TypeFunc::ReturnAdr, alloc->in(TypeFunc::ReturnAdr) ); |
duke@435 | 1144 | call->init_req( TypeFunc::FramePtr, alloc->in(TypeFunc::FramePtr) ); |
duke@435 | 1145 | |
duke@435 | 1146 | call->init_req(TypeFunc::Parms+0, klass_node); |
duke@435 | 1147 | if (length != NULL) { |
duke@435 | 1148 | call->init_req(TypeFunc::Parms+1, length); |
duke@435 | 1149 | } |
duke@435 | 1150 | |
duke@435 | 1151 | // Copy debug information and adjust JVMState information, then replace |
duke@435 | 1152 | // allocate node with the call |
duke@435 | 1153 | copy_call_debug_info((CallNode *) alloc, call); |
duke@435 | 1154 | if (!always_slow) { |
duke@435 | 1155 | call->set_cnt(PROB_UNLIKELY_MAG(4)); // Same effect as RC_UNCOMMON. |
duke@435 | 1156 | } |
duke@435 | 1157 | _igvn.hash_delete(alloc); |
duke@435 | 1158 | _igvn.subsume_node(alloc, call); |
duke@435 | 1159 | transform_later(call); |
duke@435 | 1160 | |
duke@435 | 1161 | // Identify the output projections from the allocate node and |
duke@435 | 1162 | // adjust any references to them. |
duke@435 | 1163 | // The control and io projections look like: |
duke@435 | 1164 | // |
duke@435 | 1165 | // v---Proj(ctrl) <-----+ v---CatchProj(ctrl) |
duke@435 | 1166 | // Allocate Catch |
duke@435 | 1167 | // ^---Proj(io) <-------+ ^---CatchProj(io) |
duke@435 | 1168 | // |
duke@435 | 1169 | // We are interested in the CatchProj nodes. |
duke@435 | 1170 | // |
duke@435 | 1171 | extract_call_projections(call); |
duke@435 | 1172 | |
duke@435 | 1173 | // An allocate node has separate memory projections for the uses on the control and i_o paths |
duke@435 | 1174 | // Replace uses of the control memory projection with result_phi_rawmem (unless we are only generating a slow call) |
duke@435 | 1175 | if (!always_slow && _memproj_fallthrough != NULL) { |
duke@435 | 1176 | for (DUIterator_Fast imax, i = _memproj_fallthrough->fast_outs(imax); i < imax; i++) { |
duke@435 | 1177 | Node *use = _memproj_fallthrough->fast_out(i); |
duke@435 | 1178 | _igvn.hash_delete(use); |
duke@435 | 1179 | imax -= replace_input(use, _memproj_fallthrough, result_phi_rawmem); |
duke@435 | 1180 | _igvn._worklist.push(use); |
duke@435 | 1181 | // back up iterator |
duke@435 | 1182 | --i; |
duke@435 | 1183 | } |
duke@435 | 1184 | } |
duke@435 | 1185 | // Now change uses of _memproj_catchall to use _memproj_fallthrough and delete _memproj_catchall so |
duke@435 | 1186 | // we end up with a call that has only 1 memory projection |
duke@435 | 1187 | if (_memproj_catchall != NULL ) { |
duke@435 | 1188 | if (_memproj_fallthrough == NULL) { |
duke@435 | 1189 | _memproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::Memory); |
duke@435 | 1190 | transform_later(_memproj_fallthrough); |
duke@435 | 1191 | } |
duke@435 | 1192 | for (DUIterator_Fast imax, i = _memproj_catchall->fast_outs(imax); i < imax; i++) { |
duke@435 | 1193 | Node *use = _memproj_catchall->fast_out(i); |
duke@435 | 1194 | _igvn.hash_delete(use); |
duke@435 | 1195 | imax -= replace_input(use, _memproj_catchall, _memproj_fallthrough); |
duke@435 | 1196 | _igvn._worklist.push(use); |
duke@435 | 1197 | // back up iterator |
duke@435 | 1198 | --i; |
duke@435 | 1199 | } |
duke@435 | 1200 | } |
duke@435 | 1201 | |
duke@435 | 1202 | mem = result_phi_rawmem; |
duke@435 | 1203 | |
duke@435 | 1204 | // An allocate node has separate i_o projections for the uses on the control and i_o paths |
duke@435 | 1205 | // Replace uses of the control i_o projection with result_phi_i_o (unless we are only generating a slow call) |
duke@435 | 1206 | if (_ioproj_fallthrough == NULL) { |
duke@435 | 1207 | _ioproj_fallthrough = new (C, 1) ProjNode(call, TypeFunc::I_O); |
duke@435 | 1208 | transform_later(_ioproj_fallthrough); |
duke@435 | 1209 | } else if (!always_slow) { |
duke@435 | 1210 | for (DUIterator_Fast imax, i = _ioproj_fallthrough->fast_outs(imax); i < imax; i++) { |
duke@435 | 1211 | Node *use = _ioproj_fallthrough->fast_out(i); |
duke@435 | 1212 | |
duke@435 | 1213 | _igvn.hash_delete(use); |
duke@435 | 1214 | imax -= replace_input(use, _ioproj_fallthrough, result_phi_i_o); |
duke@435 | 1215 | _igvn._worklist.push(use); |
duke@435 | 1216 | // back up iterator |
duke@435 | 1217 | --i; |
duke@435 | 1218 | } |
duke@435 | 1219 | } |
duke@435 | 1220 | // Now change uses of _ioproj_catchall to use _ioproj_fallthrough and delete _ioproj_catchall so |
duke@435 | 1221 | // we end up with a call that has only 1 control projection |
duke@435 | 1222 | if (_ioproj_catchall != NULL ) { |
duke@435 | 1223 | for (DUIterator_Fast imax, i = _ioproj_catchall->fast_outs(imax); i < imax; i++) { |
duke@435 | 1224 | Node *use = _ioproj_catchall->fast_out(i); |
duke@435 | 1225 | _igvn.hash_delete(use); |
duke@435 | 1226 | imax -= replace_input(use, _ioproj_catchall, _ioproj_fallthrough); |
duke@435 | 1227 | _igvn._worklist.push(use); |
duke@435 | 1228 | // back up iterator |
duke@435 | 1229 | --i; |
duke@435 | 1230 | } |
duke@435 | 1231 | } |
duke@435 | 1232 | |
duke@435 | 1233 | // if we generated only a slow call, we are done |
duke@435 | 1234 | if (always_slow) |
duke@435 | 1235 | return; |
duke@435 | 1236 | |
duke@435 | 1237 | |
duke@435 | 1238 | if (_fallthroughcatchproj != NULL) { |
duke@435 | 1239 | ctrl = _fallthroughcatchproj->clone(); |
duke@435 | 1240 | transform_later(ctrl); |
duke@435 | 1241 | _igvn.hash_delete(_fallthroughcatchproj); |
duke@435 | 1242 | _igvn.subsume_node(_fallthroughcatchproj, result_region); |
duke@435 | 1243 | } else { |
duke@435 | 1244 | ctrl = top(); |
duke@435 | 1245 | } |
duke@435 | 1246 | Node *slow_result; |
duke@435 | 1247 | if (_resproj == NULL) { |
duke@435 | 1248 | // no uses of the allocation result |
duke@435 | 1249 | slow_result = top(); |
duke@435 | 1250 | } else { |
duke@435 | 1251 | slow_result = _resproj->clone(); |
duke@435 | 1252 | transform_later(slow_result); |
duke@435 | 1253 | _igvn.hash_delete(_resproj); |
duke@435 | 1254 | _igvn.subsume_node(_resproj, result_phi_rawoop); |
duke@435 | 1255 | } |
duke@435 | 1256 | |
duke@435 | 1257 | // Plug slow-path into result merge point |
duke@435 | 1258 | result_region ->init_req( slow_result_path, ctrl ); |
duke@435 | 1259 | result_phi_rawoop->init_req( slow_result_path, slow_result); |
duke@435 | 1260 | result_phi_rawmem->init_req( slow_result_path, _memproj_fallthrough ); |
duke@435 | 1261 | transform_later(result_region); |
duke@435 | 1262 | transform_later(result_phi_rawoop); |
duke@435 | 1263 | transform_later(result_phi_rawmem); |
duke@435 | 1264 | transform_later(result_phi_i_o); |
duke@435 | 1265 | // This completes all paths into the result merge point |
duke@435 | 1266 | } |
duke@435 | 1267 | |
duke@435 | 1268 | |
duke@435 | 1269 | // Helper for PhaseMacroExpand::expand_allocate_common. |
duke@435 | 1270 | // Initializes the newly-allocated storage. |
duke@435 | 1271 | Node* |
duke@435 | 1272 | PhaseMacroExpand::initialize_object(AllocateNode* alloc, |
duke@435 | 1273 | Node* control, Node* rawmem, Node* object, |
duke@435 | 1274 | Node* klass_node, Node* length, |
duke@435 | 1275 | Node* size_in_bytes) { |
duke@435 | 1276 | InitializeNode* init = alloc->initialization(); |
duke@435 | 1277 | // Store the klass & mark bits |
duke@435 | 1278 | Node* mark_node = NULL; |
duke@435 | 1279 | // For now only enable fast locking for non-array types |
duke@435 | 1280 | if (UseBiasedLocking && (length == NULL)) { |
duke@435 | 1281 | mark_node = make_load(NULL, rawmem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeRawPtr::BOTTOM, T_ADDRESS); |
duke@435 | 1282 | } else { |
duke@435 | 1283 | mark_node = makecon(TypeRawPtr::make((address)markOopDesc::prototype())); |
duke@435 | 1284 | } |
duke@435 | 1285 | rawmem = make_store(control, rawmem, object, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); |
coleenp@548 | 1286 | |
duke@435 | 1287 | rawmem = make_store(control, rawmem, object, oopDesc::klass_offset_in_bytes(), klass_node, T_OBJECT); |
duke@435 | 1288 | int header_size = alloc->minimum_header_size(); // conservatively small |
duke@435 | 1289 | |
duke@435 | 1290 | // Array length |
duke@435 | 1291 | if (length != NULL) { // Arrays need length field |
duke@435 | 1292 | rawmem = make_store(control, rawmem, object, arrayOopDesc::length_offset_in_bytes(), length, T_INT); |
duke@435 | 1293 | // conservatively small header size: |
coleenp@548 | 1294 | header_size = arrayOopDesc::base_offset_in_bytes(T_BYTE); |
duke@435 | 1295 | ciKlass* k = _igvn.type(klass_node)->is_klassptr()->klass(); |
duke@435 | 1296 | if (k->is_array_klass()) // we know the exact header size in most cases: |
duke@435 | 1297 | header_size = Klass::layout_helper_header_size(k->layout_helper()); |
duke@435 | 1298 | } |
duke@435 | 1299 | |
duke@435 | 1300 | // Clear the object body, if necessary. |
duke@435 | 1301 | if (init == NULL) { |
duke@435 | 1302 | // The init has somehow disappeared; be cautious and clear everything. |
duke@435 | 1303 | // |
duke@435 | 1304 | // This can happen if a node is allocated but an uncommon trap occurs |
duke@435 | 1305 | // immediately. In this case, the Initialize gets associated with the |
duke@435 | 1306 | // trap, and may be placed in a different (outer) loop, if the Allocate |
duke@435 | 1307 | // is in a loop. If (this is rare) the inner loop gets unrolled, then |
duke@435 | 1308 | // there can be two Allocates to one Initialize. The answer in all these |
duke@435 | 1309 | // edge cases is safety first. It is always safe to clear immediately |
duke@435 | 1310 | // within an Allocate, and then (maybe or maybe not) clear some more later. |
duke@435 | 1311 | if (!ZeroTLAB) |
duke@435 | 1312 | rawmem = ClearArrayNode::clear_memory(control, rawmem, object, |
duke@435 | 1313 | header_size, size_in_bytes, |
duke@435 | 1314 | &_igvn); |
duke@435 | 1315 | } else { |
duke@435 | 1316 | if (!init->is_complete()) { |
duke@435 | 1317 | // Try to win by zeroing only what the init does not store. |
duke@435 | 1318 | // We can also try to do some peephole optimizations, |
duke@435 | 1319 | // such as combining some adjacent subword stores. |
duke@435 | 1320 | rawmem = init->complete_stores(control, rawmem, object, |
duke@435 | 1321 | header_size, size_in_bytes, &_igvn); |
duke@435 | 1322 | } |
duke@435 | 1323 | // We have no more use for this link, since the AllocateNode goes away: |
duke@435 | 1324 | init->set_req(InitializeNode::RawAddress, top()); |
duke@435 | 1325 | // (If we keep the link, it just confuses the register allocator, |
duke@435 | 1326 | // who thinks he sees a real use of the address by the membar.) |
duke@435 | 1327 | } |
duke@435 | 1328 | |
duke@435 | 1329 | return rawmem; |
duke@435 | 1330 | } |
duke@435 | 1331 | |
duke@435 | 1332 | // Generate prefetch instructions for next allocations. |
duke@435 | 1333 | Node* PhaseMacroExpand::prefetch_allocation(Node* i_o, Node*& needgc_false, |
duke@435 | 1334 | Node*& contended_phi_rawmem, |
duke@435 | 1335 | Node* old_eden_top, Node* new_eden_top, |
duke@435 | 1336 | Node* length) { |
duke@435 | 1337 | if( UseTLAB && AllocatePrefetchStyle == 2 ) { |
duke@435 | 1338 | // Generate prefetch allocation with watermark check. |
duke@435 | 1339 | // As an allocation hits the watermark, we will prefetch starting |
duke@435 | 1340 | // at a "distance" away from watermark. |
duke@435 | 1341 | enum { fall_in_path = 1, pf_path = 2 }; |
duke@435 | 1342 | |
duke@435 | 1343 | Node *pf_region = new (C, 3) RegionNode(3); |
duke@435 | 1344 | Node *pf_phi_rawmem = new (C, 3) PhiNode( pf_region, Type::MEMORY, |
duke@435 | 1345 | TypeRawPtr::BOTTOM ); |
duke@435 | 1346 | // I/O is used for Prefetch |
duke@435 | 1347 | Node *pf_phi_abio = new (C, 3) PhiNode( pf_region, Type::ABIO ); |
duke@435 | 1348 | |
duke@435 | 1349 | Node *thread = new (C, 1) ThreadLocalNode(); |
duke@435 | 1350 | transform_later(thread); |
duke@435 | 1351 | |
duke@435 | 1352 | Node *eden_pf_adr = new (C, 4) AddPNode( top()/*not oop*/, thread, |
duke@435 | 1353 | _igvn.MakeConX(in_bytes(JavaThread::tlab_pf_top_offset())) ); |
duke@435 | 1354 | transform_later(eden_pf_adr); |
duke@435 | 1355 | |
duke@435 | 1356 | Node *old_pf_wm = new (C, 3) LoadPNode( needgc_false, |
duke@435 | 1357 | contended_phi_rawmem, eden_pf_adr, |
duke@435 | 1358 | TypeRawPtr::BOTTOM, TypeRawPtr::BOTTOM ); |
duke@435 | 1359 | transform_later(old_pf_wm); |
duke@435 | 1360 | |
duke@435 | 1361 | // check against new_eden_top |
duke@435 | 1362 | Node *need_pf_cmp = new (C, 3) CmpPNode( new_eden_top, old_pf_wm ); |
duke@435 | 1363 | transform_later(need_pf_cmp); |
duke@435 | 1364 | Node *need_pf_bol = new (C, 2) BoolNode( need_pf_cmp, BoolTest::ge ); |
duke@435 | 1365 | transform_later(need_pf_bol); |
duke@435 | 1366 | IfNode *need_pf_iff = new (C, 2) IfNode( needgc_false, need_pf_bol, |
duke@435 | 1367 | PROB_UNLIKELY_MAG(4), COUNT_UNKNOWN ); |
duke@435 | 1368 | transform_later(need_pf_iff); |
duke@435 | 1369 | |
duke@435 | 1370 | // true node, add prefetchdistance |
duke@435 | 1371 | Node *need_pf_true = new (C, 1) IfTrueNode( need_pf_iff ); |
duke@435 | 1372 | transform_later(need_pf_true); |
duke@435 | 1373 | |
duke@435 | 1374 | Node *need_pf_false = new (C, 1) IfFalseNode( need_pf_iff ); |
duke@435 | 1375 | transform_later(need_pf_false); |
duke@435 | 1376 | |
duke@435 | 1377 | Node *new_pf_wmt = new (C, 4) AddPNode( top(), old_pf_wm, |
duke@435 | 1378 | _igvn.MakeConX(AllocatePrefetchDistance) ); |
duke@435 | 1379 | transform_later(new_pf_wmt ); |
duke@435 | 1380 | new_pf_wmt->set_req(0, need_pf_true); |
duke@435 | 1381 | |
duke@435 | 1382 | Node *store_new_wmt = new (C, 4) StorePNode( need_pf_true, |
duke@435 | 1383 | contended_phi_rawmem, eden_pf_adr, |
duke@435 | 1384 | TypeRawPtr::BOTTOM, new_pf_wmt ); |
duke@435 | 1385 | transform_later(store_new_wmt); |
duke@435 | 1386 | |
duke@435 | 1387 | // adding prefetches |
duke@435 | 1388 | pf_phi_abio->init_req( fall_in_path, i_o ); |
duke@435 | 1389 | |
duke@435 | 1390 | Node *prefetch_adr; |
duke@435 | 1391 | Node *prefetch; |
duke@435 | 1392 | uint lines = AllocatePrefetchDistance / AllocatePrefetchStepSize; |
duke@435 | 1393 | uint step_size = AllocatePrefetchStepSize; |
duke@435 | 1394 | uint distance = 0; |
duke@435 | 1395 | |
duke@435 | 1396 | for ( uint i = 0; i < lines; i++ ) { |
duke@435 | 1397 | prefetch_adr = new (C, 4) AddPNode( old_pf_wm, new_pf_wmt, |
duke@435 | 1398 | _igvn.MakeConX(distance) ); |
duke@435 | 1399 | transform_later(prefetch_adr); |
duke@435 | 1400 | prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr ); |
duke@435 | 1401 | transform_later(prefetch); |
duke@435 | 1402 | distance += step_size; |
duke@435 | 1403 | i_o = prefetch; |
duke@435 | 1404 | } |
duke@435 | 1405 | pf_phi_abio->set_req( pf_path, i_o ); |
duke@435 | 1406 | |
duke@435 | 1407 | pf_region->init_req( fall_in_path, need_pf_false ); |
duke@435 | 1408 | pf_region->init_req( pf_path, need_pf_true ); |
duke@435 | 1409 | |
duke@435 | 1410 | pf_phi_rawmem->init_req( fall_in_path, contended_phi_rawmem ); |
duke@435 | 1411 | pf_phi_rawmem->init_req( pf_path, store_new_wmt ); |
duke@435 | 1412 | |
duke@435 | 1413 | transform_later(pf_region); |
duke@435 | 1414 | transform_later(pf_phi_rawmem); |
duke@435 | 1415 | transform_later(pf_phi_abio); |
duke@435 | 1416 | |
duke@435 | 1417 | needgc_false = pf_region; |
duke@435 | 1418 | contended_phi_rawmem = pf_phi_rawmem; |
duke@435 | 1419 | i_o = pf_phi_abio; |
duke@435 | 1420 | } else if( AllocatePrefetchStyle > 0 ) { |
duke@435 | 1421 | // Insert a prefetch for each allocation only on the fast-path |
duke@435 | 1422 | Node *prefetch_adr; |
duke@435 | 1423 | Node *prefetch; |
duke@435 | 1424 | // Generate several prefetch instructions only for arrays. |
duke@435 | 1425 | uint lines = (length != NULL) ? AllocatePrefetchLines : 1; |
duke@435 | 1426 | uint step_size = AllocatePrefetchStepSize; |
duke@435 | 1427 | uint distance = AllocatePrefetchDistance; |
duke@435 | 1428 | for ( uint i = 0; i < lines; i++ ) { |
duke@435 | 1429 | prefetch_adr = new (C, 4) AddPNode( old_eden_top, new_eden_top, |
duke@435 | 1430 | _igvn.MakeConX(distance) ); |
duke@435 | 1431 | transform_later(prefetch_adr); |
duke@435 | 1432 | prefetch = new (C, 3) PrefetchWriteNode( i_o, prefetch_adr ); |
duke@435 | 1433 | // Do not let it float too high, since if eden_top == eden_end, |
duke@435 | 1434 | // both might be null. |
duke@435 | 1435 | if( i == 0 ) { // Set control for first prefetch, next follows it |
duke@435 | 1436 | prefetch->init_req(0, needgc_false); |
duke@435 | 1437 | } |
duke@435 | 1438 | transform_later(prefetch); |
duke@435 | 1439 | distance += step_size; |
duke@435 | 1440 | i_o = prefetch; |
duke@435 | 1441 | } |
duke@435 | 1442 | } |
duke@435 | 1443 | return i_o; |
duke@435 | 1444 | } |
duke@435 | 1445 | |
duke@435 | 1446 | |
duke@435 | 1447 | void PhaseMacroExpand::expand_allocate(AllocateNode *alloc) { |
duke@435 | 1448 | expand_allocate_common(alloc, NULL, |
duke@435 | 1449 | OptoRuntime::new_instance_Type(), |
duke@435 | 1450 | OptoRuntime::new_instance_Java()); |
duke@435 | 1451 | } |
duke@435 | 1452 | |
duke@435 | 1453 | void PhaseMacroExpand::expand_allocate_array(AllocateArrayNode *alloc) { |
duke@435 | 1454 | Node* length = alloc->in(AllocateNode::ALength); |
duke@435 | 1455 | expand_allocate_common(alloc, length, |
duke@435 | 1456 | OptoRuntime::new_array_Type(), |
duke@435 | 1457 | OptoRuntime::new_array_Java()); |
duke@435 | 1458 | } |
duke@435 | 1459 | |
duke@435 | 1460 | |
duke@435 | 1461 | // we have determined that this lock/unlock can be eliminated, we simply |
duke@435 | 1462 | // eliminate the node without expanding it. |
duke@435 | 1463 | // |
duke@435 | 1464 | // Note: The membar's associated with the lock/unlock are currently not |
duke@435 | 1465 | // eliminated. This should be investigated as a future enhancement. |
duke@435 | 1466 | // |
kvn@501 | 1467 | bool PhaseMacroExpand::eliminate_locking_node(AbstractLockNode *alock) { |
kvn@501 | 1468 | |
kvn@501 | 1469 | if (!alock->is_eliminated()) { |
kvn@501 | 1470 | return false; |
kvn@501 | 1471 | } |
kvn@501 | 1472 | // Mark the box lock as eliminated if all correspondent locks are eliminated |
kvn@501 | 1473 | // to construct correct debug info. |
kvn@501 | 1474 | BoxLockNode* box = alock->box_node()->as_BoxLock(); |
kvn@501 | 1475 | if (!box->is_eliminated()) { |
kvn@501 | 1476 | bool eliminate = true; |
kvn@501 | 1477 | for (DUIterator_Fast imax, i = box->fast_outs(imax); i < imax; i++) { |
kvn@501 | 1478 | Node *lck = box->fast_out(i); |
kvn@501 | 1479 | if (lck->is_Lock() && !lck->as_AbstractLock()->is_eliminated()) { |
kvn@501 | 1480 | eliminate = false; |
kvn@501 | 1481 | break; |
kvn@501 | 1482 | } |
kvn@501 | 1483 | } |
kvn@501 | 1484 | if (eliminate) |
kvn@501 | 1485 | box->set_eliminated(); |
kvn@501 | 1486 | } |
kvn@501 | 1487 | |
kvn@501 | 1488 | #ifndef PRODUCT |
kvn@501 | 1489 | if (PrintEliminateLocks) { |
kvn@501 | 1490 | if (alock->is_Lock()) { |
kvn@501 | 1491 | tty->print_cr("++++ Eliminating: %d Lock", alock->_idx); |
kvn@501 | 1492 | } else { |
kvn@501 | 1493 | tty->print_cr("++++ Eliminating: %d Unlock", alock->_idx); |
kvn@501 | 1494 | } |
kvn@501 | 1495 | } |
kvn@501 | 1496 | #endif |
kvn@501 | 1497 | |
kvn@501 | 1498 | Node* mem = alock->in(TypeFunc::Memory); |
kvn@501 | 1499 | Node* ctrl = alock->in(TypeFunc::Control); |
kvn@501 | 1500 | |
kvn@501 | 1501 | extract_call_projections(alock); |
kvn@501 | 1502 | // There are 2 projections from the lock. The lock node will |
kvn@501 | 1503 | // be deleted when its last use is subsumed below. |
kvn@501 | 1504 | assert(alock->outcnt() == 2 && |
kvn@501 | 1505 | _fallthroughproj != NULL && |
kvn@501 | 1506 | _memproj_fallthrough != NULL, |
kvn@501 | 1507 | "Unexpected projections from Lock/Unlock"); |
kvn@501 | 1508 | |
kvn@501 | 1509 | Node* fallthroughproj = _fallthroughproj; |
kvn@501 | 1510 | Node* memproj_fallthrough = _memproj_fallthrough; |
duke@435 | 1511 | |
duke@435 | 1512 | // The memory projection from a lock/unlock is RawMem |
duke@435 | 1513 | // The input to a Lock is merged memory, so extract its RawMem input |
duke@435 | 1514 | // (unless the MergeMem has been optimized away.) |
duke@435 | 1515 | if (alock->is_Lock()) { |
kvn@501 | 1516 | // Seach for MemBarAcquire node and delete it also. |
kvn@501 | 1517 | MemBarNode* membar = fallthroughproj->unique_ctrl_out()->as_MemBar(); |
kvn@501 | 1518 | assert(membar != NULL && membar->Opcode() == Op_MemBarAcquire, ""); |
kvn@501 | 1519 | Node* ctrlproj = membar->proj_out(TypeFunc::Control); |
kvn@501 | 1520 | Node* memproj = membar->proj_out(TypeFunc::Memory); |
kvn@501 | 1521 | _igvn.hash_delete(ctrlproj); |
kvn@501 | 1522 | _igvn.subsume_node(ctrlproj, fallthroughproj); |
kvn@501 | 1523 | _igvn.hash_delete(memproj); |
kvn@501 | 1524 | _igvn.subsume_node(memproj, memproj_fallthrough); |
duke@435 | 1525 | } |
duke@435 | 1526 | |
kvn@501 | 1527 | // Seach for MemBarRelease node and delete it also. |
kvn@501 | 1528 | if (alock->is_Unlock() && ctrl != NULL && ctrl->is_Proj() && |
kvn@501 | 1529 | ctrl->in(0)->is_MemBar()) { |
kvn@501 | 1530 | MemBarNode* membar = ctrl->in(0)->as_MemBar(); |
kvn@501 | 1531 | assert(membar->Opcode() == Op_MemBarRelease && |
kvn@501 | 1532 | mem->is_Proj() && membar == mem->in(0), ""); |
kvn@501 | 1533 | _igvn.hash_delete(fallthroughproj); |
kvn@501 | 1534 | _igvn.subsume_node(fallthroughproj, ctrl); |
kvn@501 | 1535 | _igvn.hash_delete(memproj_fallthrough); |
kvn@501 | 1536 | _igvn.subsume_node(memproj_fallthrough, mem); |
kvn@501 | 1537 | fallthroughproj = ctrl; |
kvn@501 | 1538 | memproj_fallthrough = mem; |
kvn@501 | 1539 | ctrl = membar->in(TypeFunc::Control); |
kvn@501 | 1540 | mem = membar->in(TypeFunc::Memory); |
kvn@501 | 1541 | } |
kvn@501 | 1542 | |
kvn@501 | 1543 | _igvn.hash_delete(fallthroughproj); |
kvn@501 | 1544 | _igvn.subsume_node(fallthroughproj, ctrl); |
kvn@501 | 1545 | _igvn.hash_delete(memproj_fallthrough); |
kvn@501 | 1546 | _igvn.subsume_node(memproj_fallthrough, mem); |
kvn@501 | 1547 | return true; |
duke@435 | 1548 | } |
duke@435 | 1549 | |
duke@435 | 1550 | |
duke@435 | 1551 | //------------------------------expand_lock_node---------------------- |
duke@435 | 1552 | void PhaseMacroExpand::expand_lock_node(LockNode *lock) { |
duke@435 | 1553 | |
duke@435 | 1554 | Node* ctrl = lock->in(TypeFunc::Control); |
duke@435 | 1555 | Node* mem = lock->in(TypeFunc::Memory); |
duke@435 | 1556 | Node* obj = lock->obj_node(); |
duke@435 | 1557 | Node* box = lock->box_node(); |
kvn@501 | 1558 | Node* flock = lock->fastlock_node(); |
duke@435 | 1559 | |
duke@435 | 1560 | // Make the merge point |
duke@435 | 1561 | Node *region = new (C, 3) RegionNode(3); |
duke@435 | 1562 | |
duke@435 | 1563 | Node *bol = transform_later(new (C, 2) BoolNode(flock,BoolTest::ne)); |
duke@435 | 1564 | Node *iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); |
duke@435 | 1565 | // Optimize test; set region slot 2 |
duke@435 | 1566 | Node *slow_path = opt_iff(region,iff); |
duke@435 | 1567 | |
duke@435 | 1568 | // Make slow path call |
duke@435 | 1569 | CallNode *call = make_slow_call( (CallNode *) lock, OptoRuntime::complete_monitor_enter_Type(), OptoRuntime::complete_monitor_locking_Java(), NULL, slow_path, obj, box ); |
duke@435 | 1570 | |
duke@435 | 1571 | extract_call_projections(call); |
duke@435 | 1572 | |
duke@435 | 1573 | // Slow path can only throw asynchronous exceptions, which are always |
duke@435 | 1574 | // de-opted. So the compiler thinks the slow-call can never throw an |
duke@435 | 1575 | // exception. If it DOES throw an exception we would need the debug |
duke@435 | 1576 | // info removed first (since if it throws there is no monitor). |
duke@435 | 1577 | assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL && |
duke@435 | 1578 | _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock"); |
duke@435 | 1579 | |
duke@435 | 1580 | // Capture slow path |
duke@435 | 1581 | // disconnect fall-through projection from call and create a new one |
duke@435 | 1582 | // hook up users of fall-through projection to region |
duke@435 | 1583 | Node *slow_ctrl = _fallthroughproj->clone(); |
duke@435 | 1584 | transform_later(slow_ctrl); |
duke@435 | 1585 | _igvn.hash_delete(_fallthroughproj); |
duke@435 | 1586 | _fallthroughproj->disconnect_inputs(NULL); |
duke@435 | 1587 | region->init_req(1, slow_ctrl); |
duke@435 | 1588 | // region inputs are now complete |
duke@435 | 1589 | transform_later(region); |
duke@435 | 1590 | _igvn.subsume_node(_fallthroughproj, region); |
duke@435 | 1591 | |
duke@435 | 1592 | // create a Phi for the memory state |
duke@435 | 1593 | Node *mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); |
duke@435 | 1594 | Node *memproj = transform_later( new (C, 1) ProjNode(call, TypeFunc::Memory) ); |
duke@435 | 1595 | mem_phi->init_req(1, memproj ); |
duke@435 | 1596 | mem_phi->init_req(2, mem); |
duke@435 | 1597 | transform_later(mem_phi); |
duke@435 | 1598 | _igvn.hash_delete(_memproj_fallthrough); |
duke@435 | 1599 | _igvn.subsume_node(_memproj_fallthrough, mem_phi); |
duke@435 | 1600 | |
duke@435 | 1601 | |
duke@435 | 1602 | } |
duke@435 | 1603 | |
duke@435 | 1604 | //------------------------------expand_unlock_node---------------------- |
duke@435 | 1605 | void PhaseMacroExpand::expand_unlock_node(UnlockNode *unlock) { |
duke@435 | 1606 | |
kvn@501 | 1607 | Node* ctrl = unlock->in(TypeFunc::Control); |
duke@435 | 1608 | Node* mem = unlock->in(TypeFunc::Memory); |
duke@435 | 1609 | Node* obj = unlock->obj_node(); |
duke@435 | 1610 | Node* box = unlock->box_node(); |
duke@435 | 1611 | |
duke@435 | 1612 | // No need for a null check on unlock |
duke@435 | 1613 | |
duke@435 | 1614 | // Make the merge point |
duke@435 | 1615 | RegionNode *region = new (C, 3) RegionNode(3); |
duke@435 | 1616 | |
duke@435 | 1617 | FastUnlockNode *funlock = new (C, 3) FastUnlockNode( ctrl, obj, box ); |
duke@435 | 1618 | funlock = transform_later( funlock )->as_FastUnlock(); |
duke@435 | 1619 | Node *bol = transform_later(new (C, 2) BoolNode(funlock,BoolTest::ne)); |
duke@435 | 1620 | Node *iff = new (C, 2) IfNode( ctrl, bol, PROB_MIN, COUNT_UNKNOWN ); |
duke@435 | 1621 | // Optimize test; set region slot 2 |
duke@435 | 1622 | Node *slow_path = opt_iff(region,iff); |
duke@435 | 1623 | |
duke@435 | 1624 | CallNode *call = make_slow_call( (CallNode *) unlock, OptoRuntime::complete_monitor_exit_Type(), CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C), "complete_monitor_unlocking_C", slow_path, obj, box ); |
duke@435 | 1625 | |
duke@435 | 1626 | extract_call_projections(call); |
duke@435 | 1627 | |
duke@435 | 1628 | assert ( _ioproj_fallthrough == NULL && _ioproj_catchall == NULL && |
duke@435 | 1629 | _memproj_catchall == NULL && _catchallcatchproj == NULL, "Unexpected projection from Lock"); |
duke@435 | 1630 | |
duke@435 | 1631 | // No exceptions for unlocking |
duke@435 | 1632 | // Capture slow path |
duke@435 | 1633 | // disconnect fall-through projection from call and create a new one |
duke@435 | 1634 | // hook up users of fall-through projection to region |
duke@435 | 1635 | Node *slow_ctrl = _fallthroughproj->clone(); |
duke@435 | 1636 | transform_later(slow_ctrl); |
duke@435 | 1637 | _igvn.hash_delete(_fallthroughproj); |
duke@435 | 1638 | _fallthroughproj->disconnect_inputs(NULL); |
duke@435 | 1639 | region->init_req(1, slow_ctrl); |
duke@435 | 1640 | // region inputs are now complete |
duke@435 | 1641 | transform_later(region); |
duke@435 | 1642 | _igvn.subsume_node(_fallthroughproj, region); |
duke@435 | 1643 | |
duke@435 | 1644 | // create a Phi for the memory state |
duke@435 | 1645 | Node *mem_phi = new (C, 3) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM); |
duke@435 | 1646 | Node *memproj = transform_later( new(C, 1) ProjNode(call, TypeFunc::Memory) ); |
duke@435 | 1647 | mem_phi->init_req(1, memproj ); |
duke@435 | 1648 | mem_phi->init_req(2, mem); |
duke@435 | 1649 | transform_later(mem_phi); |
duke@435 | 1650 | _igvn.hash_delete(_memproj_fallthrough); |
duke@435 | 1651 | _igvn.subsume_node(_memproj_fallthrough, mem_phi); |
duke@435 | 1652 | |
duke@435 | 1653 | |
duke@435 | 1654 | } |
duke@435 | 1655 | |
duke@435 | 1656 | //------------------------------expand_macro_nodes---------------------- |
duke@435 | 1657 | // Returns true if a failure occurred. |
duke@435 | 1658 | bool PhaseMacroExpand::expand_macro_nodes() { |
duke@435 | 1659 | if (C->macro_count() == 0) |
duke@435 | 1660 | return false; |
kvn@508 | 1661 | // attempt to eliminate allocations |
kvn@508 | 1662 | bool progress = true; |
kvn@508 | 1663 | while (progress) { |
kvn@508 | 1664 | progress = false; |
kvn@508 | 1665 | for (int i = C->macro_count(); i > 0; i--) { |
kvn@508 | 1666 | Node * n = C->macro_node(i-1); |
kvn@508 | 1667 | bool success = false; |
kvn@508 | 1668 | debug_only(int old_macro_count = C->macro_count();); |
kvn@508 | 1669 | switch (n->class_id()) { |
kvn@508 | 1670 | case Node::Class_Allocate: |
kvn@508 | 1671 | case Node::Class_AllocateArray: |
kvn@508 | 1672 | success = eliminate_allocate_node(n->as_Allocate()); |
kvn@508 | 1673 | break; |
kvn@508 | 1674 | case Node::Class_Lock: |
kvn@508 | 1675 | case Node::Class_Unlock: |
kvn@508 | 1676 | success = eliminate_locking_node(n->as_AbstractLock()); |
kvn@508 | 1677 | break; |
kvn@508 | 1678 | default: |
kvn@508 | 1679 | assert(false, "unknown node type in macro list"); |
kvn@508 | 1680 | } |
kvn@508 | 1681 | assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count"); |
kvn@508 | 1682 | progress = progress || success; |
kvn@508 | 1683 | } |
kvn@508 | 1684 | } |
kvn@508 | 1685 | // Make sure expansion will not cause node limit to be exceeded. |
kvn@508 | 1686 | // Worst case is a macro node gets expanded into about 50 nodes. |
kvn@508 | 1687 | // Allow 50% more for optimization. |
duke@435 | 1688 | if (C->check_node_count(C->macro_count() * 75, "out of nodes before macro expansion" ) ) |
duke@435 | 1689 | return true; |
kvn@508 | 1690 | |
duke@435 | 1691 | // expand "macro" nodes |
duke@435 | 1692 | // nodes are removed from the macro list as they are processed |
duke@435 | 1693 | while (C->macro_count() > 0) { |
kvn@508 | 1694 | int macro_count = C->macro_count(); |
kvn@508 | 1695 | Node * n = C->macro_node(macro_count-1); |
duke@435 | 1696 | assert(n->is_macro(), "only macro nodes expected here"); |
duke@435 | 1697 | if (_igvn.type(n) == Type::TOP || n->in(0)->is_top() ) { |
duke@435 | 1698 | // node is unreachable, so don't try to expand it |
duke@435 | 1699 | C->remove_macro_node(n); |
duke@435 | 1700 | continue; |
duke@435 | 1701 | } |
duke@435 | 1702 | switch (n->class_id()) { |
duke@435 | 1703 | case Node::Class_Allocate: |
duke@435 | 1704 | expand_allocate(n->as_Allocate()); |
duke@435 | 1705 | break; |
duke@435 | 1706 | case Node::Class_AllocateArray: |
duke@435 | 1707 | expand_allocate_array(n->as_AllocateArray()); |
duke@435 | 1708 | break; |
duke@435 | 1709 | case Node::Class_Lock: |
duke@435 | 1710 | expand_lock_node(n->as_Lock()); |
duke@435 | 1711 | break; |
duke@435 | 1712 | case Node::Class_Unlock: |
duke@435 | 1713 | expand_unlock_node(n->as_Unlock()); |
duke@435 | 1714 | break; |
duke@435 | 1715 | default: |
duke@435 | 1716 | assert(false, "unknown node type in macro list"); |
duke@435 | 1717 | } |
kvn@508 | 1718 | assert(C->macro_count() < macro_count, "must have deleted a node from macro list"); |
duke@435 | 1719 | if (C->failing()) return true; |
duke@435 | 1720 | } |
coleenp@548 | 1721 | |
coleenp@548 | 1722 | _igvn.set_delay_transform(false); |
duke@435 | 1723 | _igvn.optimize(); |
duke@435 | 1724 | return false; |
duke@435 | 1725 | } |