Mon, 25 May 2020 14:24:27 +0800
8244407: JVM crashes after transformation in C2 IdealLoopTree::split_fall_in
Reviewed-by: thartmann, kvn, andrew
Contributed-by: zhouyong44@huawei.com
duke@435 | 1 | /* |
mdoerr@9912 | 2 | * Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 27 | #include "opto/block.hpp" |
stefank@2314 | 28 | #include "opto/c2compiler.hpp" |
stefank@2314 | 29 | #include "opto/callnode.hpp" |
stefank@2314 | 30 | #include "opto/cfgnode.hpp" |
stefank@2314 | 31 | #include "opto/machnode.hpp" |
stefank@2314 | 32 | #include "opto/runtime.hpp" |
dlong@7598 | 33 | #if defined AD_MD_HPP |
dlong@7598 | 34 | # include AD_MD_HPP |
dlong@7598 | 35 | #elif defined TARGET_ARCH_MODEL_x86_32 |
stefank@2314 | 36 | # include "adfiles/ad_x86_32.hpp" |
dlong@7598 | 37 | #elif defined TARGET_ARCH_MODEL_x86_64 |
stefank@2314 | 38 | # include "adfiles/ad_x86_64.hpp" |
dlong@7598 | 39 | #elif defined TARGET_ARCH_MODEL_sparc |
stefank@2314 | 40 | # include "adfiles/ad_sparc.hpp" |
dlong@7598 | 41 | #elif defined TARGET_ARCH_MODEL_zero |
stefank@2314 | 42 | # include "adfiles/ad_zero.hpp" |
dlong@7598 | 43 | #elif defined TARGET_ARCH_MODEL_ppc_64 |
goetz@6441 | 44 | # include "adfiles/ad_ppc_64.hpp" |
jcoomes@2993 | 45 | #endif |
stefank@2314 | 46 | |
duke@435 | 47 | // Optimization - Graph Style |
duke@435 | 48 | |
goetz@6486 | 49 | // Check whether val is not-null-decoded compressed oop, |
goetz@6486 | 50 | // i.e. will grab into the base of the heap if it represents NULL. |
goetz@6486 | 51 | static bool accesses_heap_base_zone(Node *val) { |
tschatzl@9475 | 52 | if (Universe::narrow_oop_base() != NULL) { // Implies UseCompressedOops. |
goetz@6486 | 53 | if (val && val->is_Mach()) { |
goetz@6486 | 54 | if (val->as_Mach()->ideal_Opcode() == Op_DecodeN) { |
goetz@6486 | 55 | // This assumes all Decodes with TypePtr::NotNull are matched to nodes that |
goetz@6486 | 56 | // decode NULL to point to the heap base (Decode_NN). |
goetz@6486 | 57 | if (val->bottom_type()->is_oopptr()->ptr() == TypePtr::NotNull) { |
goetz@6486 | 58 | return true; |
goetz@6486 | 59 | } |
goetz@6486 | 60 | } |
goetz@6486 | 61 | // Must recognize load operation with Decode matched in memory operand. |
goetz@6486 | 62 | // We should not reach here exept for PPC/AIX, as os::zero_page_read_protected() |
goetz@6486 | 63 | // returns true everywhere else. On PPC, no such memory operands |
goetz@6486 | 64 | // exist, therefore we did not yet implement a check for such operands. |
goetz@6486 | 65 | NOT_AIX(Unimplemented()); |
goetz@6486 | 66 | } |
goetz@6486 | 67 | } |
goetz@6486 | 68 | return false; |
goetz@6486 | 69 | } |
goetz@6486 | 70 | |
goetz@6486 | 71 | static bool needs_explicit_null_check_for_read(Node *val) { |
goetz@6486 | 72 | // On some OSes (AIX) the page at address 0 is only write protected. |
goetz@6486 | 73 | // If so, only Store operations will trap. |
goetz@6486 | 74 | if (os::zero_page_read_protected()) { |
goetz@6486 | 75 | return false; // Implicit null check will work. |
goetz@6486 | 76 | } |
goetz@6486 | 77 | // Also a read accessing the base of a heap-based compressed heap will trap. |
goetz@6486 | 78 | if (accesses_heap_base_zone(val) && // Hits the base zone page. |
goetz@6486 | 79 | Universe::narrow_oop_use_implicit_null_checks()) { // Base zone page is protected. |
goetz@6486 | 80 | return false; |
goetz@6486 | 81 | } |
goetz@6486 | 82 | |
goetz@6486 | 83 | return true; |
goetz@6486 | 84 | } |
goetz@6486 | 85 | |
duke@435 | 86 | //------------------------------implicit_null_check---------------------------- |
duke@435 | 87 | // Detect implicit-null-check opportunities. Basically, find NULL checks |
duke@435 | 88 | // with suitable memory ops nearby. Use the memory op to do the NULL check. |
duke@435 | 89 | // I can generate a memory op if there is not one nearby. |
duke@435 | 90 | // The proj is the control projection for the not-null case. |
kvn@1930 | 91 | // The val is the pointer being checked for nullness or |
kvn@1930 | 92 | // decodeHeapOop_not_null node if it did not fold into address. |
adlertz@5639 | 93 | void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons) { |
duke@435 | 94 | // Assume if null check need for 0 offset then always needed |
duke@435 | 95 | // Intel solaris doesn't support any null checks yet and no |
duke@435 | 96 | // mechanism exists (yet) to set the switches at an os_cpu level |
duke@435 | 97 | if( !ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(0)) return; |
duke@435 | 98 | |
duke@435 | 99 | // Make sure the ptr-is-null path appears to be uncommon! |
adlertz@5639 | 100 | float f = block->end()->as_MachIf()->_prob; |
duke@435 | 101 | if( proj->Opcode() == Op_IfTrue ) f = 1.0f - f; |
duke@435 | 102 | if( f > PROB_UNLIKELY_MAG(4) ) return; |
duke@435 | 103 | |
duke@435 | 104 | uint bidx = 0; // Capture index of value into memop |
duke@435 | 105 | bool was_store; // Memory op is a store op |
duke@435 | 106 | |
duke@435 | 107 | // Get the successor block for if the test ptr is non-null |
duke@435 | 108 | Block* not_null_block; // this one goes with the proj |
duke@435 | 109 | Block* null_block; |
adlertz@5639 | 110 | if (block->get_node(block->number_of_nodes()-1) == proj) { |
adlertz@5639 | 111 | null_block = block->_succs[0]; |
adlertz@5639 | 112 | not_null_block = block->_succs[1]; |
duke@435 | 113 | } else { |
adlertz@5639 | 114 | assert(block->get_node(block->number_of_nodes()-2) == proj, "proj is one or the other"); |
adlertz@5639 | 115 | not_null_block = block->_succs[0]; |
adlertz@5639 | 116 | null_block = block->_succs[1]; |
duke@435 | 117 | } |
kvn@767 | 118 | while (null_block->is_Empty() == Block::empty_with_goto) { |
kvn@767 | 119 | null_block = null_block->_succs[0]; |
kvn@767 | 120 | } |
duke@435 | 121 | |
duke@435 | 122 | // Search the exception block for an uncommon trap. |
duke@435 | 123 | // (See Parse::do_if and Parse::do_ifnull for the reason |
duke@435 | 124 | // we need an uncommon trap. Briefly, we need a way to |
duke@435 | 125 | // detect failure of this optimization, as in 6366351.) |
duke@435 | 126 | { |
duke@435 | 127 | bool found_trap = false; |
adlertz@5639 | 128 | for (uint i1 = 0; i1 < null_block->number_of_nodes(); i1++) { |
adlertz@5635 | 129 | Node* nn = null_block->get_node(i1); |
duke@435 | 130 | if (nn->is_MachCall() && |
twisti@2103 | 131 | nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) { |
duke@435 | 132 | const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type(); |
duke@435 | 133 | if (trtype->isa_int() && trtype->is_int()->is_con()) { |
duke@435 | 134 | jint tr_con = trtype->is_int()->get_con(); |
duke@435 | 135 | Deoptimization::DeoptReason reason = Deoptimization::trap_request_reason(tr_con); |
duke@435 | 136 | Deoptimization::DeoptAction action = Deoptimization::trap_request_action(tr_con); |
duke@435 | 137 | assert((int)reason < (int)BitsPerInt, "recode bit map"); |
duke@435 | 138 | if (is_set_nth_bit(allowed_reasons, (int) reason) |
duke@435 | 139 | && action != Deoptimization::Action_none) { |
duke@435 | 140 | // This uncommon trap is sure to recompile, eventually. |
duke@435 | 141 | // When that happens, C->too_many_traps will prevent |
duke@435 | 142 | // this transformation from happening again. |
duke@435 | 143 | found_trap = true; |
duke@435 | 144 | } |
duke@435 | 145 | } |
duke@435 | 146 | break; |
duke@435 | 147 | } |
duke@435 | 148 | } |
duke@435 | 149 | if (!found_trap) { |
duke@435 | 150 | // We did not find an uncommon trap. |
duke@435 | 151 | return; |
duke@435 | 152 | } |
duke@435 | 153 | } |
duke@435 | 154 | |
kvn@1930 | 155 | // Check for decodeHeapOop_not_null node which did not fold into address |
kvn@1930 | 156 | bool is_decoden = ((intptr_t)val) & 1; |
kvn@1930 | 157 | val = (Node*)(((intptr_t)val) & ~1); |
kvn@1930 | 158 | |
kvn@1930 | 159 | assert(!is_decoden || (val->in(0) == NULL) && val->is_Mach() && |
kvn@1930 | 160 | (val->as_Mach()->ideal_Opcode() == Op_DecodeN), "sanity"); |
kvn@1930 | 161 | |
duke@435 | 162 | // Search the successor block for a load or store who's base value is also |
duke@435 | 163 | // the tested value. There may be several. |
duke@435 | 164 | Node_List *out = new Node_List(Thread::current()->resource_area()); |
duke@435 | 165 | MachNode *best = NULL; // Best found so far |
duke@435 | 166 | for (DUIterator i = val->outs(); val->has_out(i); i++) { |
duke@435 | 167 | Node *m = val->out(i); |
duke@435 | 168 | if( !m->is_Mach() ) continue; |
duke@435 | 169 | MachNode *mach = m->as_Mach(); |
duke@435 | 170 | was_store = false; |
kvn@2048 | 171 | int iop = mach->ideal_Opcode(); |
kvn@2048 | 172 | switch( iop ) { |
duke@435 | 173 | case Op_LoadB: |
kvn@3882 | 174 | case Op_LoadUB: |
twisti@993 | 175 | case Op_LoadUS: |
duke@435 | 176 | case Op_LoadD: |
duke@435 | 177 | case Op_LoadF: |
duke@435 | 178 | case Op_LoadI: |
duke@435 | 179 | case Op_LoadL: |
duke@435 | 180 | case Op_LoadP: |
coleenp@548 | 181 | case Op_LoadN: |
duke@435 | 182 | case Op_LoadS: |
duke@435 | 183 | case Op_LoadKlass: |
kvn@599 | 184 | case Op_LoadNKlass: |
duke@435 | 185 | case Op_LoadRange: |
duke@435 | 186 | case Op_LoadD_unaligned: |
duke@435 | 187 | case Op_LoadL_unaligned: |
kvn@1586 | 188 | assert(mach->in(2) == val, "should be address"); |
duke@435 | 189 | break; |
duke@435 | 190 | case Op_StoreB: |
duke@435 | 191 | case Op_StoreC: |
duke@435 | 192 | case Op_StoreCM: |
duke@435 | 193 | case Op_StoreD: |
duke@435 | 194 | case Op_StoreF: |
duke@435 | 195 | case Op_StoreI: |
duke@435 | 196 | case Op_StoreL: |
duke@435 | 197 | case Op_StoreP: |
coleenp@548 | 198 | case Op_StoreN: |
roland@4159 | 199 | case Op_StoreNKlass: |
duke@435 | 200 | was_store = true; // Memory op is a store op |
duke@435 | 201 | // Stores will have their address in slot 2 (memory in slot 1). |
duke@435 | 202 | // If the value being nul-checked is in another slot, it means we |
duke@435 | 203 | // are storing the checked value, which does NOT check the value! |
duke@435 | 204 | if( mach->in(2) != val ) continue; |
duke@435 | 205 | break; // Found a memory op? |
duke@435 | 206 | case Op_StrComp: |
cfang@1116 | 207 | case Op_StrEquals: |
cfang@1116 | 208 | case Op_StrIndexOf: |
rasbold@604 | 209 | case Op_AryEq: |
kvn@4479 | 210 | case Op_EncodeISOArray: |
duke@435 | 211 | // Not a legit memory op for implicit null check regardless of |
duke@435 | 212 | // embedded loads |
duke@435 | 213 | continue; |
duke@435 | 214 | default: // Also check for embedded loads |
duke@435 | 215 | if( !mach->needs_anti_dependence_check() ) |
duke@435 | 216 | continue; // Not an memory op; skip it |
kvn@2048 | 217 | if( must_clone[iop] ) { |
kvn@2048 | 218 | // Do not move nodes which produce flags because |
kvn@2048 | 219 | // RA will try to clone it to place near branch and |
kvn@2048 | 220 | // it will cause recompilation, see clone_node(). |
kvn@2048 | 221 | continue; |
kvn@2048 | 222 | } |
kvn@1586 | 223 | { |
kvn@1930 | 224 | // Check that value is used in memory address in |
kvn@1930 | 225 | // instructions with embedded load (CmpP val1,(val2+off)). |
kvn@1586 | 226 | Node* base; |
kvn@1586 | 227 | Node* index; |
kvn@1586 | 228 | const MachOper* oper = mach->memory_inputs(base, index); |
kvn@1586 | 229 | if (oper == NULL || oper == (MachOper*)-1) { |
kvn@1586 | 230 | continue; // Not an memory op; skip it |
kvn@1586 | 231 | } |
kvn@1586 | 232 | if (val == base || |
kvn@1586 | 233 | val == index && val->bottom_type()->isa_narrowoop()) { |
kvn@1586 | 234 | break; // Found it |
kvn@1586 | 235 | } else { |
kvn@1586 | 236 | continue; // Skip it |
kvn@1586 | 237 | } |
kvn@1586 | 238 | } |
duke@435 | 239 | break; |
duke@435 | 240 | } |
goetz@6486 | 241 | |
goetz@6486 | 242 | // On some OSes (AIX) the page at address 0 is only write protected. |
goetz@6486 | 243 | // If so, only Store operations will trap. |
goetz@6486 | 244 | // But a read accessing the base of a heap-based compressed heap will trap. |
goetz@6486 | 245 | if (!was_store && needs_explicit_null_check_for_read(val)) { |
goetz@6486 | 246 | continue; |
goetz@6486 | 247 | } |
goetz@6486 | 248 | |
kvn@8615 | 249 | // Check that node's control edge is not-null block's head or dominates it, |
kvn@8615 | 250 | // otherwise we can't hoist it because there are other control dependencies. |
kvn@8615 | 251 | Node* ctrl = mach->in(0); |
kvn@8615 | 252 | if (ctrl != NULL && !(ctrl == not_null_block->head() || |
kvn@8615 | 253 | get_block_for_node(ctrl)->dominates(not_null_block))) { |
kvn@8615 | 254 | continue; |
kvn@8615 | 255 | } |
kvn@8615 | 256 | |
duke@435 | 257 | // check if the offset is not too high for implicit exception |
duke@435 | 258 | { |
duke@435 | 259 | intptr_t offset = 0; |
duke@435 | 260 | const TypePtr *adr_type = NULL; // Do not need this return value here |
duke@435 | 261 | const Node* base = mach->get_base_and_disp(offset, adr_type); |
duke@435 | 262 | if (base == NULL || base == NodeSentinel) { |
kvn@767 | 263 | // Narrow oop address doesn't have base, only index |
kvn@767 | 264 | if( val->bottom_type()->isa_narrowoop() && |
kvn@767 | 265 | MacroAssembler::needs_explicit_null_check(offset) ) |
kvn@767 | 266 | continue; // Give up if offset is beyond page size |
duke@435 | 267 | // cannot reason about it; is probably not implicit null exception |
duke@435 | 268 | } else { |
kvn@1077 | 269 | const TypePtr* tptr; |
kvn@5111 | 270 | if (UseCompressedOops && (Universe::narrow_oop_shift() == 0 || |
kvn@5111 | 271 | Universe::narrow_klass_shift() == 0)) { |
kvn@1077 | 272 | // 32-bits narrow oop can be the base of address expressions |
kvn@5111 | 273 | tptr = base->get_ptr_type(); |
kvn@1077 | 274 | } else { |
kvn@1077 | 275 | // only regular oops are expected here |
kvn@1077 | 276 | tptr = base->bottom_type()->is_ptr(); |
kvn@1077 | 277 | } |
duke@435 | 278 | // Give up if offset is not a compile-time constant |
duke@435 | 279 | if( offset == Type::OffsetBot || tptr->_offset == Type::OffsetBot ) |
duke@435 | 280 | continue; |
duke@435 | 281 | offset += tptr->_offset; // correct if base is offseted |
duke@435 | 282 | if( MacroAssembler::needs_explicit_null_check(offset) ) |
duke@435 | 283 | continue; // Give up is reference is beyond 4K page size |
duke@435 | 284 | } |
duke@435 | 285 | } |
duke@435 | 286 | |
duke@435 | 287 | // Check ctrl input to see if the null-check dominates the memory op |
adlertz@5639 | 288 | Block *cb = get_block_for_node(mach); |
duke@435 | 289 | cb = cb->_idom; // Always hoist at least 1 block |
duke@435 | 290 | if( !was_store ) { // Stores can be hoisted only one block |
adlertz@5639 | 291 | while( cb->_dom_depth > (block->_dom_depth + 1)) |
duke@435 | 292 | cb = cb->_idom; // Hoist loads as far as we want |
duke@435 | 293 | // The non-null-block should dominate the memory op, too. Live |
duke@435 | 294 | // range spilling will insert a spill in the non-null-block if it is |
duke@435 | 295 | // needs to spill the memory op for an implicit null check. |
adlertz@5639 | 296 | if (cb->_dom_depth == (block->_dom_depth + 1)) { |
duke@435 | 297 | if (cb != not_null_block) continue; |
duke@435 | 298 | cb = cb->_idom; |
duke@435 | 299 | } |
duke@435 | 300 | } |
adlertz@5639 | 301 | if( cb != block ) continue; |
duke@435 | 302 | |
duke@435 | 303 | // Found a memory user; see if it can be hoisted to check-block |
duke@435 | 304 | uint vidx = 0; // Capture index of value into memop |
duke@435 | 305 | uint j; |
duke@435 | 306 | for( j = mach->req()-1; j > 0; j-- ) { |
kvn@1930 | 307 | if( mach->in(j) == val ) { |
kvn@1930 | 308 | vidx = j; |
kvn@1930 | 309 | // Ignore DecodeN val which could be hoisted to where needed. |
kvn@1930 | 310 | if( is_decoden ) continue; |
kvn@1930 | 311 | } |
duke@435 | 312 | // Block of memory-op input |
adlertz@5639 | 313 | Block *inb = get_block_for_node(mach->in(j)); |
adlertz@5639 | 314 | Block *b = block; // Start from nul check |
duke@435 | 315 | while( b != inb && b->_dom_depth > inb->_dom_depth ) |
duke@435 | 316 | b = b->_idom; // search upwards for input |
duke@435 | 317 | // See if input dominates null check |
duke@435 | 318 | if( b != inb ) |
duke@435 | 319 | break; |
duke@435 | 320 | } |
duke@435 | 321 | if( j > 0 ) |
duke@435 | 322 | continue; |
adlertz@5639 | 323 | Block *mb = get_block_for_node(mach); |
duke@435 | 324 | // Hoisting stores requires more checks for the anti-dependence case. |
duke@435 | 325 | // Give up hoisting if we have to move the store past any load. |
duke@435 | 326 | if( was_store ) { |
duke@435 | 327 | Block *b = mb; // Start searching here for a local load |
duke@435 | 328 | // mach use (faulting) trying to hoist |
duke@435 | 329 | // n might be blocker to hoisting |
adlertz@5639 | 330 | while( b != block ) { |
duke@435 | 331 | uint k; |
adlertz@5639 | 332 | for( k = 1; k < b->number_of_nodes(); k++ ) { |
adlertz@5635 | 333 | Node *n = b->get_node(k); |
duke@435 | 334 | if( n->needs_anti_dependence_check() && |
duke@435 | 335 | n->in(LoadNode::Memory) == mach->in(StoreNode::Memory) ) |
duke@435 | 336 | break; // Found anti-dependent load |
duke@435 | 337 | } |
adlertz@5639 | 338 | if( k < b->number_of_nodes() ) |
duke@435 | 339 | break; // Found anti-dependent load |
duke@435 | 340 | // Make sure control does not do a merge (would have to check allpaths) |
duke@435 | 341 | if( b->num_preds() != 2 ) break; |
adlertz@5639 | 342 | b = get_block_for_node(b->pred(1)); // Move up to predecessor block |
duke@435 | 343 | } |
adlertz@5639 | 344 | if( b != block ) continue; |
duke@435 | 345 | } |
duke@435 | 346 | |
duke@435 | 347 | // Make sure this memory op is not already being used for a NullCheck |
duke@435 | 348 | Node *e = mb->end(); |
duke@435 | 349 | if( e->is_MachNullCheck() && e->in(1) == mach ) |
duke@435 | 350 | continue; // Already being used as a NULL check |
duke@435 | 351 | |
duke@435 | 352 | // Found a candidate! Pick one with least dom depth - the highest |
duke@435 | 353 | // in the dom tree should be closest to the null check. |
adlertz@5639 | 354 | if (best == NULL || get_block_for_node(mach)->_dom_depth < get_block_for_node(best)->_dom_depth) { |
duke@435 | 355 | best = mach; |
duke@435 | 356 | bidx = vidx; |
duke@435 | 357 | } |
duke@435 | 358 | } |
duke@435 | 359 | // No candidate! |
adlertz@5509 | 360 | if (best == NULL) { |
adlertz@5509 | 361 | return; |
adlertz@5509 | 362 | } |
duke@435 | 363 | |
duke@435 | 364 | // ---- Found an implicit null check |
duke@435 | 365 | extern int implicit_null_checks; |
duke@435 | 366 | implicit_null_checks++; |
duke@435 | 367 | |
kvn@1930 | 368 | if( is_decoden ) { |
kvn@1930 | 369 | // Check if we need to hoist decodeHeapOop_not_null first. |
adlertz@5639 | 370 | Block *valb = get_block_for_node(val); |
adlertz@5639 | 371 | if( block != valb && block->_dom_depth < valb->_dom_depth ) { |
kvn@1930 | 372 | // Hoist it up to the end of the test block. |
kvn@1930 | 373 | valb->find_remove(val); |
adlertz@5639 | 374 | block->add_inst(val); |
adlertz@5639 | 375 | map_node_to_block(val, block); |
kvn@1930 | 376 | // DecodeN on x86 may kill flags. Check for flag-killing projections |
kvn@1930 | 377 | // that also need to be hoisted. |
kvn@1930 | 378 | for (DUIterator_Fast jmax, j = val->fast_outs(jmax); j < jmax; j++) { |
kvn@1930 | 379 | Node* n = val->fast_out(j); |
kvn@3040 | 380 | if( n->is_MachProj() ) { |
adlertz@5639 | 381 | get_block_for_node(n)->find_remove(n); |
adlertz@5639 | 382 | block->add_inst(n); |
adlertz@5639 | 383 | map_node_to_block(n, block); |
kvn@1930 | 384 | } |
kvn@1930 | 385 | } |
kvn@1930 | 386 | } |
kvn@1930 | 387 | } |
duke@435 | 388 | // Hoist the memory candidate up to the end of the test block. |
adlertz@5639 | 389 | Block *old_block = get_block_for_node(best); |
duke@435 | 390 | old_block->find_remove(best); |
adlertz@5639 | 391 | block->add_inst(best); |
adlertz@5639 | 392 | map_node_to_block(best, block); |
duke@435 | 393 | |
kvn@8615 | 394 | // Move the control dependence if it is pinned to not-null block. |
kvn@8615 | 395 | // Don't change it in other cases: NULL or dominating control. |
kvn@8615 | 396 | if (best->in(0) == not_null_block->head()) { |
kvn@8615 | 397 | // Set it to control edge of null check. |
kvn@8615 | 398 | best->set_req(0, proj->in(0)->in(0)); |
kvn@8615 | 399 | } |
duke@435 | 400 | |
duke@435 | 401 | // Check for flag-killing projections that also need to be hoisted |
duke@435 | 402 | // Should be DU safe because no edge updates. |
duke@435 | 403 | for (DUIterator_Fast jmax, j = best->fast_outs(jmax); j < jmax; j++) { |
duke@435 | 404 | Node* n = best->fast_out(j); |
kvn@3040 | 405 | if( n->is_MachProj() ) { |
adlertz@5639 | 406 | get_block_for_node(n)->find_remove(n); |
adlertz@5639 | 407 | block->add_inst(n); |
adlertz@5639 | 408 | map_node_to_block(n, block); |
duke@435 | 409 | } |
duke@435 | 410 | } |
duke@435 | 411 | |
duke@435 | 412 | // proj==Op_True --> ne test; proj==Op_False --> eq test. |
duke@435 | 413 | // One of two graph shapes got matched: |
duke@435 | 414 | // (IfTrue (If (Bool NE (CmpP ptr NULL)))) |
duke@435 | 415 | // (IfFalse (If (Bool EQ (CmpP ptr NULL)))) |
duke@435 | 416 | // NULL checks are always branch-if-eq. If we see a IfTrue projection |
duke@435 | 417 | // then we are replacing a 'ne' test with a 'eq' NULL check test. |
duke@435 | 418 | // We need to flip the projections to keep the same semantics. |
duke@435 | 419 | if( proj->Opcode() == Op_IfTrue ) { |
duke@435 | 420 | // Swap order of projections in basic block to swap branch targets |
adlertz@5639 | 421 | Node *tmp1 = block->get_node(block->end_idx()+1); |
adlertz@5639 | 422 | Node *tmp2 = block->get_node(block->end_idx()+2); |
adlertz@5639 | 423 | block->map_node(tmp2, block->end_idx()+1); |
adlertz@5639 | 424 | block->map_node(tmp1, block->end_idx()+2); |
kvn@4115 | 425 | Node *tmp = new (C) Node(C->top()); // Use not NULL input |
duke@435 | 426 | tmp1->replace_by(tmp); |
duke@435 | 427 | tmp2->replace_by(tmp1); |
duke@435 | 428 | tmp->replace_by(tmp2); |
duke@435 | 429 | tmp->destruct(); |
duke@435 | 430 | } |
duke@435 | 431 | |
duke@435 | 432 | // Remove the existing null check; use a new implicit null check instead. |
duke@435 | 433 | // Since schedule-local needs precise def-use info, we need to correct |
duke@435 | 434 | // it as well. |
duke@435 | 435 | Node *old_tst = proj->in(0); |
duke@435 | 436 | MachNode *nul_chk = new (C) MachNullCheckNode(old_tst->in(0),best,bidx); |
adlertz@5639 | 437 | block->map_node(nul_chk, block->end_idx()); |
adlertz@5639 | 438 | map_node_to_block(nul_chk, block); |
duke@435 | 439 | // Redirect users of old_test to nul_chk |
duke@435 | 440 | for (DUIterator_Last i2min, i2 = old_tst->last_outs(i2min); i2 >= i2min; --i2) |
duke@435 | 441 | old_tst->last_out(i2)->set_req(0, nul_chk); |
duke@435 | 442 | // Clean-up any dead code |
thartmann@7559 | 443 | for (uint i3 = 0; i3 < old_tst->req(); i3++) { |
thartmann@7559 | 444 | Node* in = old_tst->in(i3); |
duke@435 | 445 | old_tst->set_req(i3, NULL); |
thartmann@7559 | 446 | if (in->outcnt() == 0) { |
thartmann@7559 | 447 | // Remove dead input node |
thartmann@7559 | 448 | in->disconnect_inputs(NULL, C); |
thartmann@7559 | 449 | block->find_remove(in); |
thartmann@7559 | 450 | } |
thartmann@7559 | 451 | } |
duke@435 | 452 | |
adlertz@5639 | 453 | latency_from_uses(nul_chk); |
adlertz@5639 | 454 | latency_from_uses(best); |
kvn@8615 | 455 | |
kvn@8615 | 456 | // insert anti-dependences to defs in this block |
kvn@8615 | 457 | if (! best->needs_anti_dependence_check()) { |
kvn@8615 | 458 | for (uint k = 1; k < block->number_of_nodes(); k++) { |
kvn@8615 | 459 | Node *n = block->get_node(k); |
kvn@8615 | 460 | if (n->needs_anti_dependence_check() && |
kvn@8615 | 461 | n->in(LoadNode::Memory) == best->in(StoreNode::Memory)) { |
kvn@8615 | 462 | // Found anti-dependent load |
kvn@8615 | 463 | insert_anti_dependences(block, n); |
kvn@8615 | 464 | } |
kvn@8615 | 465 | } |
kvn@8615 | 466 | } |
duke@435 | 467 | } |
duke@435 | 468 | |
duke@435 | 469 | |
duke@435 | 470 | //------------------------------select----------------------------------------- |
duke@435 | 471 | // Select a nice fellow from the worklist to schedule next. If there is only |
duke@435 | 472 | // one choice, then use it. Projections take top priority for correctness |
duke@435 | 473 | // reasons - if I see a projection, then it is next. There are a number of |
duke@435 | 474 | // other special cases, for instructions that consume condition codes, et al. |
duke@435 | 475 | // These are chosen immediately. Some instructions are required to immediately |
duke@435 | 476 | // precede the last instruction in the block, and these are taken last. Of the |
duke@435 | 477 | // remaining cases (most), choose the instruction with the greatest latency |
duke@435 | 478 | // (that is, the most number of pseudo-cycles required to the end of the |
duke@435 | 479 | // routine). If there is a tie, choose the instruction with the most inputs. |
adlertz@5639 | 480 | Node* PhaseCFG::select(Block* block, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot) { |
duke@435 | 481 | |
duke@435 | 482 | // If only a single entry on the stack, use it |
duke@435 | 483 | uint cnt = worklist.size(); |
duke@435 | 484 | if (cnt == 1) { |
duke@435 | 485 | Node *n = worklist[0]; |
duke@435 | 486 | worklist.map(0,worklist.pop()); |
duke@435 | 487 | return n; |
duke@435 | 488 | } |
duke@435 | 489 | |
duke@435 | 490 | uint choice = 0; // Bigger is most important |
duke@435 | 491 | uint latency = 0; // Bigger is scheduled first |
duke@435 | 492 | uint score = 0; // Bigger is better |
kvn@688 | 493 | int idx = -1; // Index in worklist |
shade@4691 | 494 | int cand_cnt = 0; // Candidate count |
duke@435 | 495 | |
duke@435 | 496 | for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist |
duke@435 | 497 | // Order in worklist is used to break ties. |
duke@435 | 498 | // See caller for how this is used to delay scheduling |
duke@435 | 499 | // of induction variable increments to after the other |
duke@435 | 500 | // uses of the phi are scheduled. |
duke@435 | 501 | Node *n = worklist[i]; // Get Node on worklist |
duke@435 | 502 | |
duke@435 | 503 | int iop = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : 0; |
duke@435 | 504 | if( n->is_Proj() || // Projections always win |
duke@435 | 505 | n->Opcode()== Op_Con || // So does constant 'Top' |
duke@435 | 506 | iop == Op_CreateEx || // Create-exception must start block |
duke@435 | 507 | iop == Op_CheckCastPP |
duke@435 | 508 | ) { |
kvn@7332 | 509 | worklist.map(i,worklist.pop()); |
duke@435 | 510 | return n; |
duke@435 | 511 | } |
duke@435 | 512 | |
duke@435 | 513 | // Final call in a block must be adjacent to 'catch' |
adlertz@5639 | 514 | Node *e = block->end(); |
duke@435 | 515 | if( e->is_Catch() && e->in(0)->in(0) == n ) |
duke@435 | 516 | continue; |
duke@435 | 517 | |
duke@435 | 518 | // Memory op for an implicit null check has to be at the end of the block |
duke@435 | 519 | if( e->is_MachNullCheck() && e->in(1) == n ) |
duke@435 | 520 | continue; |
duke@435 | 521 | |
kvn@3882 | 522 | // Schedule IV increment last. |
kvn@3882 | 523 | if (e->is_Mach() && e->as_Mach()->ideal_Opcode() == Op_CountedLoopEnd && |
kvn@3882 | 524 | e->in(1)->in(1) == n && n->is_iteratively_computed()) |
kvn@3882 | 525 | continue; |
kvn@3882 | 526 | |
duke@435 | 527 | uint n_choice = 2; |
duke@435 | 528 | |
duke@435 | 529 | // See if this instruction is consumed by a branch. If so, then (as the |
duke@435 | 530 | // branch is the last instruction in the basic block) force it to the |
duke@435 | 531 | // end of the basic block |
duke@435 | 532 | if ( must_clone[iop] ) { |
duke@435 | 533 | // See if any use is a branch |
duke@435 | 534 | bool found_machif = false; |
duke@435 | 535 | |
duke@435 | 536 | for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { |
duke@435 | 537 | Node* use = n->fast_out(j); |
duke@435 | 538 | |
duke@435 | 539 | // The use is a conditional branch, make them adjacent |
adlertz@5639 | 540 | if (use->is_MachIf() && get_block_for_node(use) == block) { |
duke@435 | 541 | found_machif = true; |
duke@435 | 542 | break; |
duke@435 | 543 | } |
duke@435 | 544 | |
duke@435 | 545 | // More than this instruction pending for successor to be ready, |
duke@435 | 546 | // don't choose this if other opportunities are ready |
roland@3447 | 547 | if (ready_cnt.at(use->_idx) > 1) |
duke@435 | 548 | n_choice = 1; |
duke@435 | 549 | } |
duke@435 | 550 | |
duke@435 | 551 | // loop terminated, prefer not to use this instruction |
duke@435 | 552 | if (found_machif) |
duke@435 | 553 | continue; |
duke@435 | 554 | } |
duke@435 | 555 | |
duke@435 | 556 | // See if this has a predecessor that is "must_clone", i.e. sets the |
duke@435 | 557 | // condition code. If so, choose this first |
duke@435 | 558 | for (uint j = 0; j < n->req() ; j++) { |
duke@435 | 559 | Node *inn = n->in(j); |
duke@435 | 560 | if (inn) { |
duke@435 | 561 | if (inn->is_Mach() && must_clone[inn->as_Mach()->ideal_Opcode()] ) { |
duke@435 | 562 | n_choice = 3; |
duke@435 | 563 | break; |
duke@435 | 564 | } |
duke@435 | 565 | } |
duke@435 | 566 | } |
duke@435 | 567 | |
duke@435 | 568 | // MachTemps should be scheduled last so they are near their uses |
duke@435 | 569 | if (n->is_MachTemp()) { |
duke@435 | 570 | n_choice = 1; |
duke@435 | 571 | } |
duke@435 | 572 | |
adlertz@5639 | 573 | uint n_latency = get_latency_for_node(n); |
duke@435 | 574 | uint n_score = n->req(); // Many inputs get high score to break ties |
duke@435 | 575 | |
duke@435 | 576 | // Keep best latency found |
shade@4691 | 577 | cand_cnt++; |
shade@4691 | 578 | if (choice < n_choice || |
shade@4691 | 579 | (choice == n_choice && |
shade@4691 | 580 | ((StressLCM && Compile::randomized_select(cand_cnt)) || |
shade@4691 | 581 | (!StressLCM && |
shade@4691 | 582 | (latency < n_latency || |
shade@4691 | 583 | (latency == n_latency && |
shade@4691 | 584 | (score < n_score))))))) { |
duke@435 | 585 | choice = n_choice; |
duke@435 | 586 | latency = n_latency; |
duke@435 | 587 | score = n_score; |
duke@435 | 588 | idx = i; // Also keep index in worklist |
duke@435 | 589 | } |
duke@435 | 590 | } // End of for all ready nodes in worklist |
duke@435 | 591 | |
kvn@688 | 592 | assert(idx >= 0, "index should be set"); |
kvn@688 | 593 | Node *n = worklist[(uint)idx]; // Get the winner |
duke@435 | 594 | |
kvn@7332 | 595 | worklist.map((uint)idx, worklist.pop()); // Compress worklist |
duke@435 | 596 | return n; |
duke@435 | 597 | } |
duke@435 | 598 | |
duke@435 | 599 | |
duke@435 | 600 | //------------------------------set_next_call---------------------------------- |
adlertz@5639 | 601 | void PhaseCFG::set_next_call(Block* block, Node* n, VectorSet& next_call) { |
duke@435 | 602 | if( next_call.test_set(n->_idx) ) return; |
duke@435 | 603 | for( uint i=0; i<n->len(); i++ ) { |
duke@435 | 604 | Node *m = n->in(i); |
duke@435 | 605 | if( !m ) continue; // must see all nodes in block that precede call |
adlertz@5639 | 606 | if (get_block_for_node(m) == block) { |
adlertz@5639 | 607 | set_next_call(block, m, next_call); |
adlertz@5509 | 608 | } |
duke@435 | 609 | } |
duke@435 | 610 | } |
duke@435 | 611 | |
duke@435 | 612 | //------------------------------needed_for_next_call--------------------------- |
duke@435 | 613 | // Set the flag 'next_call' for each Node that is needed for the next call to |
duke@435 | 614 | // be scheduled. This flag lets me bias scheduling so Nodes needed for the |
duke@435 | 615 | // next subroutine call get priority - basically it moves things NOT needed |
duke@435 | 616 | // for the next call till after the call. This prevents me from trying to |
duke@435 | 617 | // carry lots of stuff live across a call. |
adlertz@5639 | 618 | void PhaseCFG::needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call) { |
duke@435 | 619 | // Find the next control-defining Node in this block |
duke@435 | 620 | Node* call = NULL; |
duke@435 | 621 | for (DUIterator_Fast imax, i = this_call->fast_outs(imax); i < imax; i++) { |
duke@435 | 622 | Node* m = this_call->fast_out(i); |
adlertz@5642 | 623 | if (get_block_for_node(m) == block && // Local-block user |
duke@435 | 624 | m != this_call && // Not self-start node |
adlertz@5642 | 625 | m->is_MachCall()) { |
duke@435 | 626 | call = m; |
duke@435 | 627 | break; |
adlertz@5642 | 628 | } |
duke@435 | 629 | } |
duke@435 | 630 | if (call == NULL) return; // No next call (e.g., block end is near) |
duke@435 | 631 | // Set next-call for all inputs to this call |
adlertz@5639 | 632 | set_next_call(block, call, next_call); |
duke@435 | 633 | } |
duke@435 | 634 | |
roland@3316 | 635 | //------------------------------add_call_kills------------------------------------- |
adlertz@5639 | 636 | // helper function that adds caller save registers to MachProjNode |
adlertz@5639 | 637 | static void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe) { |
roland@3316 | 638 | // Fill in the kill mask for the call |
roland@3316 | 639 | for( OptoReg::Name r = OptoReg::Name(0); r < _last_Mach_Reg; r=OptoReg::add(r,1) ) { |
roland@3316 | 640 | if( !regs.Member(r) ) { // Not already defined by the call |
roland@3316 | 641 | // Save-on-call register? |
roland@3316 | 642 | if ((save_policy[r] == 'C') || |
roland@3316 | 643 | (save_policy[r] == 'A') || |
roland@3316 | 644 | ((save_policy[r] == 'E') && exclude_soe)) { |
roland@3316 | 645 | proj->_rout.Insert(r); |
roland@3316 | 646 | } |
roland@3316 | 647 | } |
roland@3316 | 648 | } |
roland@3316 | 649 | } |
roland@3316 | 650 | |
roland@3316 | 651 | |
duke@435 | 652 | //------------------------------sched_call------------------------------------- |
adlertz@5639 | 653 | uint PhaseCFG::sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call) { |
duke@435 | 654 | RegMask regs; |
duke@435 | 655 | |
duke@435 | 656 | // Schedule all the users of the call right now. All the users are |
duke@435 | 657 | // projection Nodes, so they must be scheduled next to the call. |
duke@435 | 658 | // Collect all the defined registers. |
duke@435 | 659 | for (DUIterator_Fast imax, i = mcall->fast_outs(imax); i < imax; i++) { |
duke@435 | 660 | Node* n = mcall->fast_out(i); |
kvn@3040 | 661 | assert( n->is_MachProj(), "" ); |
roland@3447 | 662 | int n_cnt = ready_cnt.at(n->_idx)-1; |
roland@3447 | 663 | ready_cnt.at_put(n->_idx, n_cnt); |
roland@3447 | 664 | assert( n_cnt == 0, "" ); |
duke@435 | 665 | // Schedule next to call |
adlertz@5639 | 666 | block->map_node(n, node_cnt++); |
duke@435 | 667 | // Collect defined registers |
duke@435 | 668 | regs.OR(n->out_RegMask()); |
duke@435 | 669 | // Check for scheduling the next control-definer |
duke@435 | 670 | if( n->bottom_type() == Type::CONTROL ) |
duke@435 | 671 | // Warm up next pile of heuristic bits |
adlertz@5639 | 672 | needed_for_next_call(block, n, next_call); |
duke@435 | 673 | |
duke@435 | 674 | // Children of projections are now all ready |
duke@435 | 675 | for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { |
duke@435 | 676 | Node* m = n->fast_out(j); // Get user |
adlertz@5639 | 677 | if(get_block_for_node(m) != block) { |
adlertz@5509 | 678 | continue; |
adlertz@5509 | 679 | } |
duke@435 | 680 | if( m->is_Phi() ) continue; |
roland@3447 | 681 | int m_cnt = ready_cnt.at(m->_idx)-1; |
roland@3447 | 682 | ready_cnt.at_put(m->_idx, m_cnt); |
roland@3447 | 683 | if( m_cnt == 0 ) |
duke@435 | 684 | worklist.push(m); |
duke@435 | 685 | } |
duke@435 | 686 | |
duke@435 | 687 | } |
duke@435 | 688 | |
duke@435 | 689 | // Act as if the call defines the Frame Pointer. |
duke@435 | 690 | // Certainly the FP is alive and well after the call. |
adlertz@5639 | 691 | regs.Insert(_matcher.c_frame_pointer()); |
duke@435 | 692 | |
duke@435 | 693 | // Set all registers killed and not already defined by the call. |
duke@435 | 694 | uint r_cnt = mcall->tf()->range()->cnt(); |
duke@435 | 695 | int op = mcall->ideal_Opcode(); |
adlertz@5639 | 696 | MachProjNode *proj = new (C) MachProjNode( mcall, r_cnt+1, RegMask::Empty, MachProjNode::fat_proj ); |
adlertz@5639 | 697 | map_node_to_block(proj, block); |
adlertz@5639 | 698 | block->insert_node(proj, node_cnt++); |
duke@435 | 699 | |
duke@435 | 700 | // Select the right register save policy. |
csahu@8316 | 701 | const char *save_policy = NULL; |
duke@435 | 702 | switch (op) { |
duke@435 | 703 | case Op_CallRuntime: |
duke@435 | 704 | case Op_CallLeaf: |
duke@435 | 705 | case Op_CallLeafNoFP: |
duke@435 | 706 | // Calling C code so use C calling convention |
adlertz@5639 | 707 | save_policy = _matcher._c_reg_save_policy; |
duke@435 | 708 | break; |
duke@435 | 709 | |
duke@435 | 710 | case Op_CallStaticJava: |
duke@435 | 711 | case Op_CallDynamicJava: |
duke@435 | 712 | // Calling Java code so use Java calling convention |
adlertz@5639 | 713 | save_policy = _matcher._register_save_policy; |
duke@435 | 714 | break; |
duke@435 | 715 | |
duke@435 | 716 | default: |
duke@435 | 717 | ShouldNotReachHere(); |
duke@435 | 718 | } |
duke@435 | 719 | |
duke@435 | 720 | // When using CallRuntime mark SOE registers as killed by the call |
duke@435 | 721 | // so values that could show up in the RegisterMap aren't live in a |
duke@435 | 722 | // callee saved register since the register wouldn't know where to |
duke@435 | 723 | // find them. CallLeaf and CallLeafNoFP are ok because they can't |
duke@435 | 724 | // have debug info on them. Strictly speaking this only needs to be |
duke@435 | 725 | // done for oops since idealreg2debugmask takes care of debug info |
duke@435 | 726 | // references but there no way to handle oops differently than other |
duke@435 | 727 | // pointers as far as the kill mask goes. |
duke@435 | 728 | bool exclude_soe = op == Op_CallRuntime; |
duke@435 | 729 | |
twisti@1572 | 730 | // If the call is a MethodHandle invoke, we need to exclude the |
twisti@1572 | 731 | // register which is used to save the SP value over MH invokes from |
twisti@1572 | 732 | // the mask. Otherwise this register could be used for |
twisti@1572 | 733 | // deoptimization information. |
twisti@1572 | 734 | if (op == Op_CallStaticJava) { |
twisti@1572 | 735 | MachCallStaticJavaNode* mcallstaticjava = (MachCallStaticJavaNode*) mcall; |
twisti@1572 | 736 | if (mcallstaticjava->_method_handle_invoke) |
twisti@1572 | 737 | proj->_rout.OR(Matcher::method_handle_invoke_SP_save_mask()); |
twisti@1572 | 738 | } |
twisti@1572 | 739 | |
roland@3316 | 740 | add_call_kills(proj, regs, save_policy, exclude_soe); |
duke@435 | 741 | |
duke@435 | 742 | return node_cnt; |
duke@435 | 743 | } |
duke@435 | 744 | |
duke@435 | 745 | |
duke@435 | 746 | //------------------------------schedule_local--------------------------------- |
duke@435 | 747 | // Topological sort within a block. Someday become a real scheduler. |
adlertz@5639 | 748 | bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call) { |
duke@435 | 749 | // Already "sorted" are the block start Node (as the first entry), and |
duke@435 | 750 | // the block-ending Node and any trailing control projections. We leave |
duke@435 | 751 | // these alone. PhiNodes and ParmNodes are made to follow the block start |
duke@435 | 752 | // Node. Everything else gets topo-sorted. |
duke@435 | 753 | |
duke@435 | 754 | #ifndef PRODUCT |
adlertz@5639 | 755 | if (trace_opto_pipelining()) { |
adlertz@5639 | 756 | tty->print_cr("# --- schedule_local B%d, before: ---", block->_pre_order); |
adlertz@5639 | 757 | for (uint i = 0;i < block->number_of_nodes(); i++) { |
duke@435 | 758 | tty->print("# "); |
adlertz@5639 | 759 | block->get_node(i)->fast_dump(); |
duke@435 | 760 | } |
duke@435 | 761 | tty->print_cr("#"); |
duke@435 | 762 | } |
duke@435 | 763 | #endif |
duke@435 | 764 | |
duke@435 | 765 | // RootNode is already sorted |
adlertz@5639 | 766 | if (block->number_of_nodes() == 1) { |
adlertz@5639 | 767 | return true; |
adlertz@5639 | 768 | } |
duke@435 | 769 | |
duke@435 | 770 | // Move PhiNodes and ParmNodes from 1 to cnt up to the start |
adlertz@5639 | 771 | uint node_cnt = block->end_idx(); |
duke@435 | 772 | uint phi_cnt = 1; |
duke@435 | 773 | uint i; |
duke@435 | 774 | for( i = 1; i<node_cnt; i++ ) { // Scan for Phi |
adlertz@5639 | 775 | Node *n = block->get_node(i); |
duke@435 | 776 | if( n->is_Phi() || // Found a PhiNode or ParmNode |
adlertz@5639 | 777 | (n->is_Proj() && n->in(0) == block->head()) ) { |
duke@435 | 778 | // Move guy at 'phi_cnt' to the end; makes a hole at phi_cnt |
adlertz@5639 | 779 | block->map_node(block->get_node(phi_cnt), i); |
adlertz@5639 | 780 | block->map_node(n, phi_cnt++); // swap Phi/Parm up front |
duke@435 | 781 | } else { // All others |
duke@435 | 782 | // Count block-local inputs to 'n' |
duke@435 | 783 | uint cnt = n->len(); // Input count |
duke@435 | 784 | uint local = 0; |
duke@435 | 785 | for( uint j=0; j<cnt; j++ ) { |
duke@435 | 786 | Node *m = n->in(j); |
adlertz@5639 | 787 | if( m && get_block_for_node(m) == block && !m->is_top() ) |
duke@435 | 788 | local++; // One more block-local input |
duke@435 | 789 | } |
roland@3447 | 790 | ready_cnt.at_put(n->_idx, local); // Count em up |
duke@435 | 791 | |
never@2780 | 792 | #ifdef ASSERT |
never@2780 | 793 | if( UseConcMarkSweepGC || UseG1GC ) { |
never@2780 | 794 | if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) { |
never@2780 | 795 | // Check the precedence edges |
never@2780 | 796 | for (uint prec = n->req(); prec < n->len(); prec++) { |
never@2780 | 797 | Node* oop_store = n->in(prec); |
never@2780 | 798 | if (oop_store != NULL) { |
adlertz@5639 | 799 | assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark"); |
never@2780 | 800 | } |
never@2780 | 801 | } |
never@2780 | 802 | } |
never@2780 | 803 | } |
never@2780 | 804 | #endif |
never@2780 | 805 | |
duke@435 | 806 | // A few node types require changing a required edge to a precedence edge |
duke@435 | 807 | // before allocation. |
kvn@1535 | 808 | if( n->is_Mach() && n->req() > TypeFunc::Parms && |
kvn@1535 | 809 | (n->as_Mach()->ideal_Opcode() == Op_MemBarAcquire || |
kvn@1535 | 810 | n->as_Mach()->ideal_Opcode() == Op_MemBarVolatile) ) { |
kvn@688 | 811 | // MemBarAcquire could be created without Precedent edge. |
kvn@688 | 812 | // del_req() replaces the specified edge with the last input edge |
kvn@688 | 813 | // and then removes the last edge. If the specified edge > number of |
kvn@688 | 814 | // edges the last edge will be moved outside of the input edges array |
kvn@688 | 815 | // and the edge will be lost. This is why this code should be |
kvn@688 | 816 | // executed only when Precedent (== TypeFunc::Parms) edge is present. |
duke@435 | 817 | Node *x = n->in(TypeFunc::Parms); |
mdoerr@9912 | 818 | if (x != NULL && get_block_for_node(x) == block && n->find_prec_edge(x) != -1) { |
mdoerr@9912 | 819 | // Old edge to node within same block will get removed, but no precedence |
mdoerr@9912 | 820 | // edge will get added because it already exists. Update ready count. |
mdoerr@9912 | 821 | int cnt = ready_cnt.at(n->_idx); |
mdoerr@9912 | 822 | assert(cnt > 1, err_msg("MemBar node %d must not get ready here", n->_idx)); |
mdoerr@9912 | 823 | ready_cnt.at_put(n->_idx, cnt-1); |
mdoerr@9912 | 824 | } |
duke@435 | 825 | n->del_req(TypeFunc::Parms); |
duke@435 | 826 | n->add_prec(x); |
duke@435 | 827 | } |
duke@435 | 828 | } |
duke@435 | 829 | } |
adlertz@5639 | 830 | for(uint i2=i; i2< block->number_of_nodes(); i2++ ) // Trailing guys get zapped count |
adlertz@5639 | 831 | ready_cnt.at_put(block->get_node(i2)->_idx, 0); |
duke@435 | 832 | |
duke@435 | 833 | // All the prescheduled guys do not hold back internal nodes |
duke@435 | 834 | uint i3; |
duke@435 | 835 | for(i3 = 0; i3<phi_cnt; i3++ ) { // For all pre-scheduled |
adlertz@5639 | 836 | Node *n = block->get_node(i3); // Get pre-scheduled |
duke@435 | 837 | for (DUIterator_Fast jmax, j = n->fast_outs(jmax); j < jmax; j++) { |
duke@435 | 838 | Node* m = n->fast_out(j); |
adlertz@5639 | 839 | if (get_block_for_node(m) == block) { // Local-block user |
roland@3447 | 840 | int m_cnt = ready_cnt.at(m->_idx)-1; |
roland@3447 | 841 | ready_cnt.at_put(m->_idx, m_cnt); // Fix ready count |
roland@3447 | 842 | } |
duke@435 | 843 | } |
duke@435 | 844 | } |
duke@435 | 845 | |
duke@435 | 846 | Node_List delay; |
duke@435 | 847 | // Make a worklist |
duke@435 | 848 | Node_List worklist; |
duke@435 | 849 | for(uint i4=i3; i4<node_cnt; i4++ ) { // Put ready guys on worklist |
adlertz@5639 | 850 | Node *m = block->get_node(i4); |
roland@3447 | 851 | if( !ready_cnt.at(m->_idx) ) { // Zero ready count? |
duke@435 | 852 | if (m->is_iteratively_computed()) { |
duke@435 | 853 | // Push induction variable increments last to allow other uses |
duke@435 | 854 | // of the phi to be scheduled first. The select() method breaks |
duke@435 | 855 | // ties in scheduling by worklist order. |
duke@435 | 856 | delay.push(m); |
never@560 | 857 | } else if (m->is_Mach() && m->as_Mach()->ideal_Opcode() == Op_CreateEx) { |
never@560 | 858 | // Force the CreateEx to the top of the list so it's processed |
never@560 | 859 | // first and ends up at the start of the block. |
never@560 | 860 | worklist.insert(0, m); |
duke@435 | 861 | } else { |
duke@435 | 862 | worklist.push(m); // Then on to worklist! |
duke@435 | 863 | } |
duke@435 | 864 | } |
duke@435 | 865 | } |
duke@435 | 866 | while (delay.size()) { |
duke@435 | 867 | Node* d = delay.pop(); |
duke@435 | 868 | worklist.push(d); |
duke@435 | 869 | } |
duke@435 | 870 | |
duke@435 | 871 | // Warm up the 'next_call' heuristic bits |
adlertz@5639 | 872 | needed_for_next_call(block, block->head(), next_call); |
duke@435 | 873 | |
duke@435 | 874 | #ifndef PRODUCT |
adlertz@5639 | 875 | if (trace_opto_pipelining()) { |
adlertz@5639 | 876 | for (uint j=0; j< block->number_of_nodes(); j++) { |
adlertz@5639 | 877 | Node *n = block->get_node(j); |
duke@435 | 878 | int idx = n->_idx; |
roland@3447 | 879 | tty->print("# ready cnt:%3d ", ready_cnt.at(idx)); |
adlertz@5639 | 880 | tty->print("latency:%3d ", get_latency_for_node(n)); |
duke@435 | 881 | tty->print("%4d: %s\n", idx, n->Name()); |
duke@435 | 882 | } |
duke@435 | 883 | } |
duke@435 | 884 | #endif |
duke@435 | 885 | |
roland@3447 | 886 | uint max_idx = (uint)ready_cnt.length(); |
duke@435 | 887 | // Pull from worklist and schedule |
duke@435 | 888 | while( worklist.size() ) { // Worklist is not ready |
duke@435 | 889 | |
duke@435 | 890 | #ifndef PRODUCT |
adlertz@5639 | 891 | if (trace_opto_pipelining()) { |
duke@435 | 892 | tty->print("# ready list:"); |
duke@435 | 893 | for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist |
duke@435 | 894 | Node *n = worklist[i]; // Get Node on worklist |
duke@435 | 895 | tty->print(" %d", n->_idx); |
duke@435 | 896 | } |
duke@435 | 897 | tty->cr(); |
duke@435 | 898 | } |
duke@435 | 899 | #endif |
duke@435 | 900 | |
duke@435 | 901 | // Select and pop a ready guy from worklist |
adlertz@5639 | 902 | Node* n = select(block, worklist, ready_cnt, next_call, phi_cnt); |
adlertz@5639 | 903 | block->map_node(n, phi_cnt++); // Schedule him next |
duke@435 | 904 | |
duke@435 | 905 | #ifndef PRODUCT |
adlertz@5639 | 906 | if (trace_opto_pipelining()) { |
duke@435 | 907 | tty->print("# select %d: %s", n->_idx, n->Name()); |
adlertz@5639 | 908 | tty->print(", latency:%d", get_latency_for_node(n)); |
duke@435 | 909 | n->dump(); |
duke@435 | 910 | if (Verbose) { |
duke@435 | 911 | tty->print("# ready list:"); |
duke@435 | 912 | for( uint i=0; i<worklist.size(); i++ ) { // Inspect entire worklist |
duke@435 | 913 | Node *n = worklist[i]; // Get Node on worklist |
duke@435 | 914 | tty->print(" %d", n->_idx); |
duke@435 | 915 | } |
duke@435 | 916 | tty->cr(); |
duke@435 | 917 | } |
duke@435 | 918 | } |
duke@435 | 919 | |
duke@435 | 920 | #endif |
duke@435 | 921 | if( n->is_MachCall() ) { |
duke@435 | 922 | MachCallNode *mcall = n->as_MachCall(); |
adlertz@5639 | 923 | phi_cnt = sched_call(block, phi_cnt, worklist, ready_cnt, mcall, next_call); |
duke@435 | 924 | continue; |
duke@435 | 925 | } |
roland@3316 | 926 | |
roland@3316 | 927 | if (n->is_Mach() && n->as_Mach()->has_call()) { |
roland@3316 | 928 | RegMask regs; |
adlertz@5639 | 929 | regs.Insert(_matcher.c_frame_pointer()); |
roland@3316 | 930 | regs.OR(n->out_RegMask()); |
roland@3316 | 931 | |
adlertz@5639 | 932 | MachProjNode *proj = new (C) MachProjNode( n, 1, RegMask::Empty, MachProjNode::fat_proj ); |
adlertz@5639 | 933 | map_node_to_block(proj, block); |
adlertz@5639 | 934 | block->insert_node(proj, phi_cnt++); |
roland@3316 | 935 | |
adlertz@5639 | 936 | add_call_kills(proj, regs, _matcher._c_reg_save_policy, false); |
roland@3316 | 937 | } |
roland@3316 | 938 | |
duke@435 | 939 | // Children are now all ready |
duke@435 | 940 | for (DUIterator_Fast i5max, i5 = n->fast_outs(i5max); i5 < i5max; i5++) { |
duke@435 | 941 | Node* m = n->fast_out(i5); // Get user |
adlertz@5639 | 942 | if (get_block_for_node(m) != block) { |
adlertz@5509 | 943 | continue; |
adlertz@5509 | 944 | } |
duke@435 | 945 | if( m->is_Phi() ) continue; |
roland@3447 | 946 | if (m->_idx >= max_idx) { // new node, skip it |
roland@3316 | 947 | assert(m->is_MachProj() && n->is_Mach() && n->as_Mach()->has_call(), "unexpected node types"); |
roland@3316 | 948 | continue; |
roland@3316 | 949 | } |
roland@3447 | 950 | int m_cnt = ready_cnt.at(m->_idx)-1; |
roland@3447 | 951 | ready_cnt.at_put(m->_idx, m_cnt); |
roland@3447 | 952 | if( m_cnt == 0 ) |
duke@435 | 953 | worklist.push(m); |
duke@435 | 954 | } |
duke@435 | 955 | } |
duke@435 | 956 | |
adlertz@5639 | 957 | if( phi_cnt != block->end_idx() ) { |
duke@435 | 958 | // did not schedule all. Retry, Bailout, or Die |
duke@435 | 959 | if (C->subsume_loads() == true && !C->failing()) { |
duke@435 | 960 | // Retry with subsume_loads == false |
duke@435 | 961 | // If this is the first failure, the sentinel string will "stick" |
duke@435 | 962 | // to the Compile object, and the C2Compiler will see it and retry. |
duke@435 | 963 | C->record_failure(C2Compiler::retry_no_subsuming_loads()); |
duke@435 | 964 | } |
duke@435 | 965 | // assert( phi_cnt == end_idx(), "did not schedule all" ); |
duke@435 | 966 | return false; |
duke@435 | 967 | } |
duke@435 | 968 | |
duke@435 | 969 | #ifndef PRODUCT |
adlertz@5639 | 970 | if (trace_opto_pipelining()) { |
duke@435 | 971 | tty->print_cr("#"); |
duke@435 | 972 | tty->print_cr("# after schedule_local"); |
adlertz@5639 | 973 | for (uint i = 0;i < block->number_of_nodes();i++) { |
duke@435 | 974 | tty->print("# "); |
adlertz@5639 | 975 | block->get_node(i)->fast_dump(); |
duke@435 | 976 | } |
duke@435 | 977 | tty->cr(); |
duke@435 | 978 | } |
duke@435 | 979 | #endif |
duke@435 | 980 | |
duke@435 | 981 | |
duke@435 | 982 | return true; |
duke@435 | 983 | } |
duke@435 | 984 | |
duke@435 | 985 | //--------------------------catch_cleanup_fix_all_inputs----------------------- |
duke@435 | 986 | static void catch_cleanup_fix_all_inputs(Node *use, Node *old_def, Node *new_def) { |
duke@435 | 987 | for (uint l = 0; l < use->len(); l++) { |
duke@435 | 988 | if (use->in(l) == old_def) { |
duke@435 | 989 | if (l < use->req()) { |
duke@435 | 990 | use->set_req(l, new_def); |
duke@435 | 991 | } else { |
duke@435 | 992 | use->rm_prec(l); |
duke@435 | 993 | use->add_prec(new_def); |
duke@435 | 994 | l--; |
duke@435 | 995 | } |
duke@435 | 996 | } |
duke@435 | 997 | } |
duke@435 | 998 | } |
duke@435 | 999 | |
duke@435 | 1000 | //------------------------------catch_cleanup_find_cloned_def------------------ |
adlertz@5639 | 1001 | Node* PhaseCFG::catch_cleanup_find_cloned_def(Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) { |
duke@435 | 1002 | assert( use_blk != def_blk, "Inter-block cleanup only"); |
duke@435 | 1003 | |
duke@435 | 1004 | // The use is some block below the Catch. Find and return the clone of the def |
duke@435 | 1005 | // that dominates the use. If there is no clone in a dominating block, then |
duke@435 | 1006 | // create a phi for the def in a dominating block. |
duke@435 | 1007 | |
duke@435 | 1008 | // Find which successor block dominates this use. The successor |
duke@435 | 1009 | // blocks must all be single-entry (from the Catch only; I will have |
duke@435 | 1010 | // split blocks to make this so), hence they all dominate. |
duke@435 | 1011 | while( use_blk->_dom_depth > def_blk->_dom_depth+1 ) |
duke@435 | 1012 | use_blk = use_blk->_idom; |
duke@435 | 1013 | |
duke@435 | 1014 | // Find the successor |
duke@435 | 1015 | Node *fixup = NULL; |
duke@435 | 1016 | |
duke@435 | 1017 | uint j; |
duke@435 | 1018 | for( j = 0; j < def_blk->_num_succs; j++ ) |
duke@435 | 1019 | if( use_blk == def_blk->_succs[j] ) |
duke@435 | 1020 | break; |
duke@435 | 1021 | |
duke@435 | 1022 | if( j == def_blk->_num_succs ) { |
duke@435 | 1023 | // Block at same level in dom-tree is not a successor. It needs a |
duke@435 | 1024 | // PhiNode, the PhiNode uses from the def and IT's uses need fixup. |
duke@435 | 1025 | Node_Array inputs = new Node_List(Thread::current()->resource_area()); |
duke@435 | 1026 | for(uint k = 1; k < use_blk->num_preds(); k++) { |
adlertz@5639 | 1027 | Block* block = get_block_for_node(use_blk->pred(k)); |
adlertz@5639 | 1028 | inputs.map(k, catch_cleanup_find_cloned_def(block, def, def_blk, n_clone_idx)); |
duke@435 | 1029 | } |
duke@435 | 1030 | |
duke@435 | 1031 | // Check to see if the use_blk already has an identical phi inserted. |
duke@435 | 1032 | // If it exists, it will be at the first position since all uses of a |
duke@435 | 1033 | // def are processed together. |
adlertz@5635 | 1034 | Node *phi = use_blk->get_node(1); |
duke@435 | 1035 | if( phi->is_Phi() ) { |
duke@435 | 1036 | fixup = phi; |
duke@435 | 1037 | for (uint k = 1; k < use_blk->num_preds(); k++) { |
duke@435 | 1038 | if (phi->in(k) != inputs[k]) { |
duke@435 | 1039 | // Not a match |
duke@435 | 1040 | fixup = NULL; |
duke@435 | 1041 | break; |
duke@435 | 1042 | } |
duke@435 | 1043 | } |
duke@435 | 1044 | } |
duke@435 | 1045 | |
duke@435 | 1046 | // If an existing PhiNode was not found, make a new one. |
duke@435 | 1047 | if (fixup == NULL) { |
duke@435 | 1048 | Node *new_phi = PhiNode::make(use_blk->head(), def); |
adlertz@5635 | 1049 | use_blk->insert_node(new_phi, 1); |
adlertz@5639 | 1050 | map_node_to_block(new_phi, use_blk); |
duke@435 | 1051 | for (uint k = 1; k < use_blk->num_preds(); k++) { |
duke@435 | 1052 | new_phi->set_req(k, inputs[k]); |
duke@435 | 1053 | } |
duke@435 | 1054 | fixup = new_phi; |
duke@435 | 1055 | } |
duke@435 | 1056 | |
duke@435 | 1057 | } else { |
duke@435 | 1058 | // Found the use just below the Catch. Make it use the clone. |
adlertz@5635 | 1059 | fixup = use_blk->get_node(n_clone_idx); |
duke@435 | 1060 | } |
duke@435 | 1061 | |
duke@435 | 1062 | return fixup; |
duke@435 | 1063 | } |
duke@435 | 1064 | |
duke@435 | 1065 | //--------------------------catch_cleanup_intra_block-------------------------- |
duke@435 | 1066 | // Fix all input edges in use that reference "def". The use is in the same |
duke@435 | 1067 | // block as the def and both have been cloned in each successor block. |
duke@435 | 1068 | static void catch_cleanup_intra_block(Node *use, Node *def, Block *blk, int beg, int n_clone_idx) { |
duke@435 | 1069 | |
duke@435 | 1070 | // Both the use and def have been cloned. For each successor block, |
duke@435 | 1071 | // get the clone of the use, and make its input the clone of the def |
duke@435 | 1072 | // found in that block. |
duke@435 | 1073 | |
duke@435 | 1074 | uint use_idx = blk->find_node(use); |
duke@435 | 1075 | uint offset_idx = use_idx - beg; |
duke@435 | 1076 | for( uint k = 0; k < blk->_num_succs; k++ ) { |
duke@435 | 1077 | // Get clone in each successor block |
duke@435 | 1078 | Block *sb = blk->_succs[k]; |
adlertz@5635 | 1079 | Node *clone = sb->get_node(offset_idx+1); |
duke@435 | 1080 | assert( clone->Opcode() == use->Opcode(), "" ); |
duke@435 | 1081 | |
duke@435 | 1082 | // Make use-clone reference the def-clone |
adlertz@5635 | 1083 | catch_cleanup_fix_all_inputs(clone, def, sb->get_node(n_clone_idx)); |
duke@435 | 1084 | } |
duke@435 | 1085 | } |
duke@435 | 1086 | |
duke@435 | 1087 | //------------------------------catch_cleanup_inter_block--------------------- |
duke@435 | 1088 | // Fix all input edges in use that reference "def". The use is in a different |
duke@435 | 1089 | // block than the def. |
adlertz@5639 | 1090 | void PhaseCFG::catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx) { |
duke@435 | 1091 | if( !use_blk ) return; // Can happen if the use is a precedence edge |
duke@435 | 1092 | |
adlertz@5639 | 1093 | Node *new_def = catch_cleanup_find_cloned_def(use_blk, def, def_blk, n_clone_idx); |
duke@435 | 1094 | catch_cleanup_fix_all_inputs(use, def, new_def); |
duke@435 | 1095 | } |
duke@435 | 1096 | |
duke@435 | 1097 | //------------------------------call_catch_cleanup----------------------------- |
duke@435 | 1098 | // If we inserted any instructions between a Call and his CatchNode, |
duke@435 | 1099 | // clone the instructions on all paths below the Catch. |
adlertz@5639 | 1100 | void PhaseCFG::call_catch_cleanup(Block* block) { |
duke@435 | 1101 | |
duke@435 | 1102 | // End of region to clone |
adlertz@5639 | 1103 | uint end = block->end_idx(); |
adlertz@5639 | 1104 | if( !block->get_node(end)->is_Catch() ) return; |
duke@435 | 1105 | // Start of region to clone |
duke@435 | 1106 | uint beg = end; |
adlertz@5639 | 1107 | while(!block->get_node(beg-1)->is_MachProj() || |
adlertz@5639 | 1108 | !block->get_node(beg-1)->in(0)->is_MachCall() ) { |
duke@435 | 1109 | beg--; |
duke@435 | 1110 | assert(beg > 0,"Catch cleanup walking beyond block boundary"); |
duke@435 | 1111 | } |
duke@435 | 1112 | // Range of inserted instructions is [beg, end) |
duke@435 | 1113 | if( beg == end ) return; |
duke@435 | 1114 | |
duke@435 | 1115 | // Clone along all Catch output paths. Clone area between the 'beg' and |
duke@435 | 1116 | // 'end' indices. |
adlertz@5639 | 1117 | for( uint i = 0; i < block->_num_succs; i++ ) { |
adlertz@5639 | 1118 | Block *sb = block->_succs[i]; |
duke@435 | 1119 | // Clone the entire area; ignoring the edge fixup for now. |
duke@435 | 1120 | for( uint j = end; j > beg; j-- ) { |
adlertz@5639 | 1121 | Node *clone = block->get_node(j-1)->clone(); |
adlertz@5635 | 1122 | sb->insert_node(clone, 1); |
adlertz@5639 | 1123 | map_node_to_block(clone, sb); |
aph@8614 | 1124 | if (clone->needs_anti_dependence_check()) { |
aph@8614 | 1125 | insert_anti_dependences(sb, clone); |
aph@8614 | 1126 | } |
duke@435 | 1127 | } |
duke@435 | 1128 | } |
duke@435 | 1129 | |
duke@435 | 1130 | |
duke@435 | 1131 | // Fixup edges. Check the def-use info per cloned Node |
duke@435 | 1132 | for(uint i2 = beg; i2 < end; i2++ ) { |
duke@435 | 1133 | uint n_clone_idx = i2-beg+1; // Index of clone of n in each successor block |
adlertz@5639 | 1134 | Node *n = block->get_node(i2); // Node that got cloned |
duke@435 | 1135 | // Need DU safe iterator because of edge manipulation in calls. |
duke@435 | 1136 | Unique_Node_List *out = new Unique_Node_List(Thread::current()->resource_area()); |
duke@435 | 1137 | for (DUIterator_Fast j1max, j1 = n->fast_outs(j1max); j1 < j1max; j1++) { |
duke@435 | 1138 | out->push(n->fast_out(j1)); |
duke@435 | 1139 | } |
duke@435 | 1140 | uint max = out->size(); |
duke@435 | 1141 | for (uint j = 0; j < max; j++) {// For all users |
duke@435 | 1142 | Node *use = out->pop(); |
adlertz@5639 | 1143 | Block *buse = get_block_for_node(use); |
duke@435 | 1144 | if( use->is_Phi() ) { |
duke@435 | 1145 | for( uint k = 1; k < use->req(); k++ ) |
duke@435 | 1146 | if( use->in(k) == n ) { |
adlertz@5639 | 1147 | Block* b = get_block_for_node(buse->pred(k)); |
adlertz@5639 | 1148 | Node *fixup = catch_cleanup_find_cloned_def(b, n, block, n_clone_idx); |
duke@435 | 1149 | use->set_req(k, fixup); |
duke@435 | 1150 | } |
duke@435 | 1151 | } else { |
adlertz@5639 | 1152 | if (block == buse) { |
adlertz@5639 | 1153 | catch_cleanup_intra_block(use, n, block, beg, n_clone_idx); |
duke@435 | 1154 | } else { |
adlertz@5639 | 1155 | catch_cleanup_inter_block(use, buse, n, block, n_clone_idx); |
duke@435 | 1156 | } |
duke@435 | 1157 | } |
duke@435 | 1158 | } // End for all users |
duke@435 | 1159 | |
duke@435 | 1160 | } // End of for all Nodes in cloned area |
duke@435 | 1161 | |
duke@435 | 1162 | // Remove the now-dead cloned ops |
duke@435 | 1163 | for(uint i3 = beg; i3 < end; i3++ ) { |
adlertz@5639 | 1164 | block->get_node(beg)->disconnect_inputs(NULL, C); |
adlertz@5639 | 1165 | block->remove_node(beg); |
duke@435 | 1166 | } |
duke@435 | 1167 | |
duke@435 | 1168 | // If the successor blocks have a CreateEx node, move it back to the top |
adlertz@5639 | 1169 | for(uint i4 = 0; i4 < block->_num_succs; i4++ ) { |
adlertz@5639 | 1170 | Block *sb = block->_succs[i4]; |
duke@435 | 1171 | uint new_cnt = end - beg; |
duke@435 | 1172 | // Remove any newly created, but dead, nodes. |
duke@435 | 1173 | for( uint j = new_cnt; j > 0; j-- ) { |
adlertz@5635 | 1174 | Node *n = sb->get_node(j); |
duke@435 | 1175 | if (n->outcnt() == 0 && |
duke@435 | 1176 | (!n->is_Proj() || n->as_Proj()->in(0)->outcnt() == 1) ){ |
bharadwaj@4315 | 1177 | n->disconnect_inputs(NULL, C); |
adlertz@5635 | 1178 | sb->remove_node(j); |
duke@435 | 1179 | new_cnt--; |
duke@435 | 1180 | } |
duke@435 | 1181 | } |
duke@435 | 1182 | // If any newly created nodes remain, move the CreateEx node to the top |
duke@435 | 1183 | if (new_cnt > 0) { |
adlertz@5635 | 1184 | Node *cex = sb->get_node(1+new_cnt); |
duke@435 | 1185 | if( cex->is_Mach() && cex->as_Mach()->ideal_Opcode() == Op_CreateEx ) { |
adlertz@5635 | 1186 | sb->remove_node(1+new_cnt); |
adlertz@5635 | 1187 | sb->insert_node(cex, 1); |
duke@435 | 1188 | } |
duke@435 | 1189 | } |
duke@435 | 1190 | } |
duke@435 | 1191 | } |