Sun, 01 Sep 2013 19:21:05 +0200
8023988: Move local scheduling of nodes to the CFG creation and code motion phase (PhaseCFG)
Summary: Moved local scheduling code from class Block to class PhaseCFG
Reviewed-by: kvn, roland
duke@435 | 1 | /* |
mikael@4153 | 2 | * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
stefank@2314 | 26 | #include "libadt/vectset.hpp" |
stefank@2314 | 27 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 28 | #include "opto/block.hpp" |
stefank@2314 | 29 | #include "opto/c2compiler.hpp" |
stefank@2314 | 30 | #include "opto/callnode.hpp" |
stefank@2314 | 31 | #include "opto/cfgnode.hpp" |
stefank@2314 | 32 | #include "opto/machnode.hpp" |
stefank@2314 | 33 | #include "opto/opcodes.hpp" |
stefank@2314 | 34 | #include "opto/phaseX.hpp" |
stefank@2314 | 35 | #include "opto/rootnode.hpp" |
stefank@2314 | 36 | #include "opto/runtime.hpp" |
stefank@2314 | 37 | #include "runtime/deoptimization.hpp" |
stefank@2314 | 38 | #ifdef TARGET_ARCH_MODEL_x86_32 |
stefank@2314 | 39 | # include "adfiles/ad_x86_32.hpp" |
stefank@2314 | 40 | #endif |
stefank@2314 | 41 | #ifdef TARGET_ARCH_MODEL_x86_64 |
stefank@2314 | 42 | # include "adfiles/ad_x86_64.hpp" |
stefank@2314 | 43 | #endif |
stefank@2314 | 44 | #ifdef TARGET_ARCH_MODEL_sparc |
stefank@2314 | 45 | # include "adfiles/ad_sparc.hpp" |
stefank@2314 | 46 | #endif |
stefank@2314 | 47 | #ifdef TARGET_ARCH_MODEL_zero |
stefank@2314 | 48 | # include "adfiles/ad_zero.hpp" |
stefank@2314 | 49 | #endif |
bobv@2508 | 50 | #ifdef TARGET_ARCH_MODEL_arm |
bobv@2508 | 51 | # include "adfiles/ad_arm.hpp" |
bobv@2508 | 52 | #endif |
bobv@2508 | 53 | #ifdef TARGET_ARCH_MODEL_ppc |
bobv@2508 | 54 | # include "adfiles/ad_ppc.hpp" |
bobv@2508 | 55 | #endif |
stefank@2314 | 56 | |
duke@435 | 57 | // Portions of code courtesy of Clifford Click |
duke@435 | 58 | |
duke@435 | 59 | // Optimization - Graph Style |
duke@435 | 60 | |
kvn@987 | 61 | // To avoid float value underflow |
kvn@987 | 62 | #define MIN_BLOCK_FREQUENCY 1.e-35f |
kvn@987 | 63 | |
duke@435 | 64 | //----------------------------schedule_node_into_block------------------------- |
duke@435 | 65 | // Insert node n into block b. Look for projections of n and make sure they |
duke@435 | 66 | // are in b also. |
duke@435 | 67 | void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) { |
duke@435 | 68 | // Set basic block of n, Add n to b, |
adlertz@5509 | 69 | map_node_to_block(n, b); |
duke@435 | 70 | b->add_inst(n); |
duke@435 | 71 | |
duke@435 | 72 | // After Matching, nearly any old Node may have projections trailing it. |
duke@435 | 73 | // These are usually machine-dependent flags. In any case, they might |
duke@435 | 74 | // float to another block below this one. Move them up. |
duke@435 | 75 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { |
duke@435 | 76 | Node* use = n->fast_out(i); |
duke@435 | 77 | if (use->is_Proj()) { |
adlertz@5509 | 78 | Block* buse = get_block_for_node(use); |
duke@435 | 79 | if (buse != b) { // In wrong block? |
adlertz@5509 | 80 | if (buse != NULL) { |
duke@435 | 81 | buse->find_remove(use); // Remove from wrong block |
adlertz@5509 | 82 | } |
adlertz@5509 | 83 | map_node_to_block(use, b); |
duke@435 | 84 | b->add_inst(use); |
duke@435 | 85 | } |
duke@435 | 86 | } |
duke@435 | 87 | } |
duke@435 | 88 | } |
duke@435 | 89 | |
kvn@1036 | 90 | //----------------------------replace_block_proj_ctrl------------------------- |
kvn@1036 | 91 | // Nodes that have is_block_proj() nodes as their control need to use |
kvn@1036 | 92 | // the appropriate Region for their actual block as their control since |
kvn@1036 | 93 | // the projection will be in a predecessor block. |
kvn@1036 | 94 | void PhaseCFG::replace_block_proj_ctrl( Node *n ) { |
kvn@1036 | 95 | const Node *in0 = n->in(0); |
kvn@1036 | 96 | assert(in0 != NULL, "Only control-dependent"); |
kvn@1036 | 97 | const Node *p = in0->is_block_proj(); |
kvn@1036 | 98 | if (p != NULL && p != n) { // Control from a block projection? |
kvn@3311 | 99 | assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here"); |
kvn@1036 | 100 | // Find trailing Region |
adlertz@5509 | 101 | Block *pb = get_block_for_node(in0); // Block-projection already has basic block |
kvn@1036 | 102 | uint j = 0; |
kvn@1036 | 103 | if (pb->_num_succs != 1) { // More then 1 successor? |
kvn@1036 | 104 | // Search for successor |
adlertz@5635 | 105 | uint max = pb->number_of_nodes(); |
kvn@1036 | 106 | assert( max > 1, "" ); |
kvn@1036 | 107 | uint start = max - pb->_num_succs; |
kvn@1036 | 108 | // Find which output path belongs to projection |
kvn@1036 | 109 | for (j = start; j < max; j++) { |
adlertz@5635 | 110 | if( pb->get_node(j) == in0 ) |
kvn@1036 | 111 | break; |
kvn@1036 | 112 | } |
kvn@1036 | 113 | assert( j < max, "must find" ); |
kvn@1036 | 114 | // Change control to match head of successor basic block |
kvn@1036 | 115 | j -= start; |
kvn@1036 | 116 | } |
kvn@1036 | 117 | n->set_req(0, pb->_succs[j]->head()); |
kvn@1036 | 118 | } |
kvn@1036 | 119 | } |
kvn@1036 | 120 | |
duke@435 | 121 | |
duke@435 | 122 | //------------------------------schedule_pinned_nodes-------------------------- |
duke@435 | 123 | // Set the basic block for Nodes pinned into blocks |
adlertz@5539 | 124 | void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) { |
duke@435 | 125 | // Allocate node stack of size C->unique()+8 to avoid frequent realloc |
adlertz@5539 | 126 | GrowableArray <Node *> spstack(C->unique() + 8); |
duke@435 | 127 | spstack.push(_root); |
adlertz@5539 | 128 | while (spstack.is_nonempty()) { |
adlertz@5539 | 129 | Node* node = spstack.pop(); |
adlertz@5539 | 130 | if (!visited.test_set(node->_idx)) { // Test node and flag it as visited |
adlertz@5539 | 131 | if (node->pinned() && !has_block(node)) { // Pinned? Nail it down! |
adlertz@5539 | 132 | assert(node->in(0), "pinned Node must have Control"); |
kvn@1036 | 133 | // Before setting block replace block_proj control edge |
adlertz@5539 | 134 | replace_block_proj_ctrl(node); |
adlertz@5539 | 135 | Node* input = node->in(0); |
adlertz@5509 | 136 | while (!input->is_block_start()) { |
duke@435 | 137 | input = input->in(0); |
adlertz@5509 | 138 | } |
adlertz@5539 | 139 | Block* block = get_block_for_node(input); // Basic block of controlling input |
adlertz@5539 | 140 | schedule_node_into_block(node, block); |
duke@435 | 141 | } |
adlertz@5539 | 142 | |
adlertz@5539 | 143 | // process all inputs that are non NULL |
adlertz@5539 | 144 | for (int i = node->req() - 1; i >= 0; --i) { |
adlertz@5539 | 145 | if (node->in(i) != NULL) { |
adlertz@5539 | 146 | spstack.push(node->in(i)); |
adlertz@5539 | 147 | } |
duke@435 | 148 | } |
duke@435 | 149 | } |
duke@435 | 150 | } |
duke@435 | 151 | } |
duke@435 | 152 | |
duke@435 | 153 | #ifdef ASSERT |
duke@435 | 154 | // Assert that new input b2 is dominated by all previous inputs. |
duke@435 | 155 | // Check this by by seeing that it is dominated by b1, the deepest |
duke@435 | 156 | // input observed until b2. |
adlertz@5509 | 157 | static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) { |
duke@435 | 158 | if (b1 == NULL) return; |
duke@435 | 159 | assert(b1->_dom_depth < b2->_dom_depth, "sanity"); |
duke@435 | 160 | Block* tmp = b2; |
duke@435 | 161 | while (tmp != b1 && tmp != NULL) { |
duke@435 | 162 | tmp = tmp->_idom; |
duke@435 | 163 | } |
duke@435 | 164 | if (tmp != b1) { |
duke@435 | 165 | // Detected an unschedulable graph. Print some nice stuff and die. |
duke@435 | 166 | tty->print_cr("!!! Unschedulable graph !!!"); |
duke@435 | 167 | for (uint j=0; j<n->len(); j++) { // For all inputs |
duke@435 | 168 | Node* inn = n->in(j); // Get input |
duke@435 | 169 | if (inn == NULL) continue; // Ignore NULL, missing inputs |
adlertz@5509 | 170 | Block* inb = cfg->get_block_for_node(inn); |
duke@435 | 171 | tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order, |
duke@435 | 172 | inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth); |
duke@435 | 173 | inn->dump(); |
duke@435 | 174 | } |
duke@435 | 175 | tty->print("Failing node: "); |
duke@435 | 176 | n->dump(); |
duke@435 | 177 | assert(false, "unscheduable graph"); |
duke@435 | 178 | } |
duke@435 | 179 | } |
duke@435 | 180 | #endif |
duke@435 | 181 | |
adlertz@5509 | 182 | static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) { |
duke@435 | 183 | // Find the last input dominated by all other inputs. |
duke@435 | 184 | Block* deepb = NULL; // Deepest block so far |
duke@435 | 185 | int deepb_dom_depth = 0; |
duke@435 | 186 | for (uint k = 0; k < n->len(); k++) { // For all inputs |
duke@435 | 187 | Node* inn = n->in(k); // Get input |
duke@435 | 188 | if (inn == NULL) continue; // Ignore NULL, missing inputs |
adlertz@5509 | 189 | Block* inb = cfg->get_block_for_node(inn); |
duke@435 | 190 | assert(inb != NULL, "must already have scheduled this input"); |
duke@435 | 191 | if (deepb_dom_depth < (int) inb->_dom_depth) { |
duke@435 | 192 | // The new inb must be dominated by the previous deepb. |
duke@435 | 193 | // The various inputs must be linearly ordered in the dom |
duke@435 | 194 | // tree, or else there will not be a unique deepest block. |
adlertz@5509 | 195 | DEBUG_ONLY(assert_dom(deepb, inb, n, cfg)); |
duke@435 | 196 | deepb = inb; // Save deepest block |
duke@435 | 197 | deepb_dom_depth = deepb->_dom_depth; |
duke@435 | 198 | } |
duke@435 | 199 | } |
duke@435 | 200 | assert(deepb != NULL, "must be at least one input to n"); |
duke@435 | 201 | return deepb; |
duke@435 | 202 | } |
duke@435 | 203 | |
duke@435 | 204 | |
duke@435 | 205 | //------------------------------schedule_early--------------------------------- |
duke@435 | 206 | // Find the earliest Block any instruction can be placed in. Some instructions |
duke@435 | 207 | // are pinned into Blocks. Unpinned instructions can appear in last block in |
duke@435 | 208 | // which all their inputs occur. |
duke@435 | 209 | bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) { |
duke@435 | 210 | // Allocate stack with enough space to avoid frequent realloc |
adlertz@5539 | 211 | Node_Stack nstack(roots.Size() + 8); |
adlertz@5539 | 212 | // _root will be processed among C->top() inputs |
duke@435 | 213 | roots.push(C->top()); |
duke@435 | 214 | visited.set(C->top()->_idx); |
duke@435 | 215 | |
duke@435 | 216 | while (roots.size() != 0) { |
duke@435 | 217 | // Use local variables nstack_top_n & nstack_top_i to cache values |
duke@435 | 218 | // on stack's top. |
adlertz@5539 | 219 | Node* parent_node = roots.pop(); |
adlertz@5539 | 220 | uint input_index = 0; |
adlertz@5539 | 221 | |
duke@435 | 222 | while (true) { |
adlertz@5539 | 223 | if (input_index == 0) { |
kvn@1036 | 224 | // Fixup some control. Constants without control get attached |
kvn@1036 | 225 | // to root and nodes that use is_block_proj() nodes should be attached |
kvn@1036 | 226 | // to the region that starts their block. |
adlertz@5539 | 227 | const Node* control_input = parent_node->in(0); |
adlertz@5539 | 228 | if (control_input != NULL) { |
adlertz@5539 | 229 | replace_block_proj_ctrl(parent_node); |
adlertz@5539 | 230 | } else { |
adlertz@5539 | 231 | // Is a constant with NO inputs? |
adlertz@5539 | 232 | if (parent_node->req() == 1) { |
adlertz@5539 | 233 | parent_node->set_req(0, _root); |
duke@435 | 234 | } |
duke@435 | 235 | } |
duke@435 | 236 | } |
duke@435 | 237 | |
duke@435 | 238 | // First, visit all inputs and force them to get a block. If an |
duke@435 | 239 | // input is already in a block we quit following inputs (to avoid |
duke@435 | 240 | // cycles). Instead we put that Node on a worklist to be handled |
duke@435 | 241 | // later (since IT'S inputs may not have a block yet). |
adlertz@5539 | 242 | |
adlertz@5539 | 243 | // Assume all n's inputs will be processed |
adlertz@5539 | 244 | bool done = true; |
adlertz@5539 | 245 | |
adlertz@5539 | 246 | while (input_index < parent_node->len()) { |
adlertz@5539 | 247 | Node* in = parent_node->in(input_index++); |
adlertz@5539 | 248 | if (in == NULL) { |
adlertz@5539 | 249 | continue; |
adlertz@5539 | 250 | } |
adlertz@5539 | 251 | |
duke@435 | 252 | int is_visited = visited.test_set(in->_idx); |
adlertz@5539 | 253 | if (!has_block(in)) { |
duke@435 | 254 | if (is_visited) { |
duke@435 | 255 | return false; |
duke@435 | 256 | } |
adlertz@5539 | 257 | // Save parent node and next input's index. |
adlertz@5539 | 258 | nstack.push(parent_node, input_index); |
adlertz@5539 | 259 | // Process current input now. |
adlertz@5539 | 260 | parent_node = in; |
adlertz@5539 | 261 | input_index = 0; |
adlertz@5539 | 262 | // Not all n's inputs processed. |
adlertz@5539 | 263 | done = false; |
adlertz@5539 | 264 | break; |
adlertz@5539 | 265 | } else if (!is_visited) { |
adlertz@5539 | 266 | // Visit this guy later, using worklist |
adlertz@5539 | 267 | roots.push(in); |
duke@435 | 268 | } |
duke@435 | 269 | } |
adlertz@5539 | 270 | |
duke@435 | 271 | if (done) { |
duke@435 | 272 | // All of n's inputs have been processed, complete post-processing. |
duke@435 | 273 | |
duke@435 | 274 | // Some instructions are pinned into a block. These include Region, |
duke@435 | 275 | // Phi, Start, Return, and other control-dependent instructions and |
duke@435 | 276 | // any projections which depend on them. |
adlertz@5539 | 277 | if (!parent_node->pinned()) { |
duke@435 | 278 | // Set earliest legal block. |
adlertz@5539 | 279 | Block* earliest_block = find_deepest_input(parent_node, this); |
adlertz@5539 | 280 | map_node_to_block(parent_node, earliest_block); |
kvn@1036 | 281 | } else { |
adlertz@5539 | 282 | assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge"); |
duke@435 | 283 | } |
duke@435 | 284 | |
duke@435 | 285 | if (nstack.is_empty()) { |
duke@435 | 286 | // Finished all nodes on stack. |
duke@435 | 287 | // Process next node on the worklist 'roots'. |
duke@435 | 288 | break; |
duke@435 | 289 | } |
duke@435 | 290 | // Get saved parent node and next input's index. |
adlertz@5539 | 291 | parent_node = nstack.node(); |
adlertz@5539 | 292 | input_index = nstack.index(); |
duke@435 | 293 | nstack.pop(); |
adlertz@5539 | 294 | } |
adlertz@5539 | 295 | } |
adlertz@5539 | 296 | } |
duke@435 | 297 | return true; |
duke@435 | 298 | } |
duke@435 | 299 | |
duke@435 | 300 | //------------------------------dom_lca---------------------------------------- |
duke@435 | 301 | // Find least common ancestor in dominator tree |
duke@435 | 302 | // LCA is a current notion of LCA, to be raised above 'this'. |
duke@435 | 303 | // As a convenient boundary condition, return 'this' if LCA is NULL. |
duke@435 | 304 | // Find the LCA of those two nodes. |
duke@435 | 305 | Block* Block::dom_lca(Block* LCA) { |
duke@435 | 306 | if (LCA == NULL || LCA == this) return this; |
duke@435 | 307 | |
duke@435 | 308 | Block* anc = this; |
duke@435 | 309 | while (anc->_dom_depth > LCA->_dom_depth) |
duke@435 | 310 | anc = anc->_idom; // Walk up till anc is as high as LCA |
duke@435 | 311 | |
duke@435 | 312 | while (LCA->_dom_depth > anc->_dom_depth) |
duke@435 | 313 | LCA = LCA->_idom; // Walk up till LCA is as high as anc |
duke@435 | 314 | |
duke@435 | 315 | while (LCA != anc) { // Walk both up till they are the same |
duke@435 | 316 | LCA = LCA->_idom; |
duke@435 | 317 | anc = anc->_idom; |
duke@435 | 318 | } |
duke@435 | 319 | |
duke@435 | 320 | return LCA; |
duke@435 | 321 | } |
duke@435 | 322 | |
duke@435 | 323 | //--------------------------raise_LCA_above_use-------------------------------- |
duke@435 | 324 | // We are placing a definition, and have been given a def->use edge. |
duke@435 | 325 | // The definition must dominate the use, so move the LCA upward in the |
duke@435 | 326 | // dominator tree to dominate the use. If the use is a phi, adjust |
duke@435 | 327 | // the LCA only with the phi input paths which actually use this def. |
adlertz@5509 | 328 | static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) { |
adlertz@5509 | 329 | Block* buse = cfg->get_block_for_node(use); |
duke@435 | 330 | if (buse == NULL) return LCA; // Unused killing Projs have no use block |
duke@435 | 331 | if (!use->is_Phi()) return buse->dom_lca(LCA); |
duke@435 | 332 | uint pmax = use->req(); // Number of Phi inputs |
duke@435 | 333 | // Why does not this loop just break after finding the matching input to |
duke@435 | 334 | // the Phi? Well...it's like this. I do not have true def-use/use-def |
duke@435 | 335 | // chains. Means I cannot distinguish, from the def-use direction, which |
duke@435 | 336 | // of many use-defs lead from the same use to the same def. That is, this |
duke@435 | 337 | // Phi might have several uses of the same def. Each use appears in a |
duke@435 | 338 | // different predecessor block. But when I enter here, I cannot distinguish |
duke@435 | 339 | // which use-def edge I should find the predecessor block for. So I find |
duke@435 | 340 | // them all. Means I do a little extra work if a Phi uses the same value |
duke@435 | 341 | // more than once. |
duke@435 | 342 | for (uint j=1; j<pmax; j++) { // For all inputs |
duke@435 | 343 | if (use->in(j) == def) { // Found matching input? |
adlertz@5509 | 344 | Block* pred = cfg->get_block_for_node(buse->pred(j)); |
duke@435 | 345 | LCA = pred->dom_lca(LCA); |
duke@435 | 346 | } |
duke@435 | 347 | } |
duke@435 | 348 | return LCA; |
duke@435 | 349 | } |
duke@435 | 350 | |
duke@435 | 351 | //----------------------------raise_LCA_above_marks---------------------------- |
duke@435 | 352 | // Return a new LCA that dominates LCA and any of its marked predecessors. |
duke@435 | 353 | // Search all my parents up to 'early' (exclusive), looking for predecessors |
duke@435 | 354 | // which are marked with the given index. Return the LCA (in the dom tree) |
duke@435 | 355 | // of all marked blocks. If there are none marked, return the original |
duke@435 | 356 | // LCA. |
adlertz@5509 | 357 | static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) { |
duke@435 | 358 | Block_List worklist; |
duke@435 | 359 | worklist.push(LCA); |
duke@435 | 360 | while (worklist.size() > 0) { |
duke@435 | 361 | Block* mid = worklist.pop(); |
duke@435 | 362 | if (mid == early) continue; // stop searching here |
duke@435 | 363 | |
duke@435 | 364 | // Test and set the visited bit. |
duke@435 | 365 | if (mid->raise_LCA_visited() == mark) continue; // already visited |
duke@435 | 366 | |
duke@435 | 367 | // Don't process the current LCA, otherwise the search may terminate early |
duke@435 | 368 | if (mid != LCA && mid->raise_LCA_mark() == mark) { |
duke@435 | 369 | // Raise the LCA. |
duke@435 | 370 | LCA = mid->dom_lca(LCA); |
duke@435 | 371 | if (LCA == early) break; // stop searching everywhere |
duke@435 | 372 | assert(early->dominates(LCA), "early is high enough"); |
duke@435 | 373 | // Resume searching at that point, skipping intermediate levels. |
duke@435 | 374 | worklist.push(LCA); |
kvn@650 | 375 | if (LCA == mid) |
kvn@650 | 376 | continue; // Don't mark as visited to avoid early termination. |
duke@435 | 377 | } else { |
duke@435 | 378 | // Keep searching through this block's predecessors. |
duke@435 | 379 | for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) { |
adlertz@5509 | 380 | Block* mid_parent = cfg->get_block_for_node(mid->pred(j)); |
duke@435 | 381 | worklist.push(mid_parent); |
duke@435 | 382 | } |
duke@435 | 383 | } |
kvn@650 | 384 | mid->set_raise_LCA_visited(mark); |
duke@435 | 385 | } |
duke@435 | 386 | return LCA; |
duke@435 | 387 | } |
duke@435 | 388 | |
duke@435 | 389 | //--------------------------memory_early_block-------------------------------- |
duke@435 | 390 | // This is a variation of find_deepest_input, the heart of schedule_early. |
duke@435 | 391 | // Find the "early" block for a load, if we considered only memory and |
duke@435 | 392 | // address inputs, that is, if other data inputs were ignored. |
duke@435 | 393 | // |
duke@435 | 394 | // Because a subset of edges are considered, the resulting block will |
duke@435 | 395 | // be earlier (at a shallower dom_depth) than the true schedule_early |
duke@435 | 396 | // point of the node. We compute this earlier block as a more permissive |
duke@435 | 397 | // site for anti-dependency insertion, but only if subsume_loads is enabled. |
adlertz@5509 | 398 | static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) { |
duke@435 | 399 | Node* base; |
duke@435 | 400 | Node* index; |
duke@435 | 401 | Node* store = load->in(MemNode::Memory); |
duke@435 | 402 | load->as_Mach()->memory_inputs(base, index); |
duke@435 | 403 | |
duke@435 | 404 | assert(base != NodeSentinel && index != NodeSentinel, |
duke@435 | 405 | "unexpected base/index inputs"); |
duke@435 | 406 | |
duke@435 | 407 | Node* mem_inputs[4]; |
duke@435 | 408 | int mem_inputs_length = 0; |
duke@435 | 409 | if (base != NULL) mem_inputs[mem_inputs_length++] = base; |
duke@435 | 410 | if (index != NULL) mem_inputs[mem_inputs_length++] = index; |
duke@435 | 411 | if (store != NULL) mem_inputs[mem_inputs_length++] = store; |
duke@435 | 412 | |
duke@435 | 413 | // In the comparision below, add one to account for the control input, |
duke@435 | 414 | // which may be null, but always takes up a spot in the in array. |
duke@435 | 415 | if (mem_inputs_length + 1 < (int) load->req()) { |
duke@435 | 416 | // This "load" has more inputs than just the memory, base and index inputs. |
duke@435 | 417 | // For purposes of checking anti-dependences, we need to start |
duke@435 | 418 | // from the early block of only the address portion of the instruction, |
duke@435 | 419 | // and ignore other blocks that may have factored into the wider |
duke@435 | 420 | // schedule_early calculation. |
duke@435 | 421 | if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0); |
duke@435 | 422 | |
duke@435 | 423 | Block* deepb = NULL; // Deepest block so far |
duke@435 | 424 | int deepb_dom_depth = 0; |
duke@435 | 425 | for (int i = 0; i < mem_inputs_length; i++) { |
adlertz@5509 | 426 | Block* inb = cfg->get_block_for_node(mem_inputs[i]); |
duke@435 | 427 | if (deepb_dom_depth < (int) inb->_dom_depth) { |
duke@435 | 428 | // The new inb must be dominated by the previous deepb. |
duke@435 | 429 | // The various inputs must be linearly ordered in the dom |
duke@435 | 430 | // tree, or else there will not be a unique deepest block. |
adlertz@5509 | 431 | DEBUG_ONLY(assert_dom(deepb, inb, load, cfg)); |
duke@435 | 432 | deepb = inb; // Save deepest block |
duke@435 | 433 | deepb_dom_depth = deepb->_dom_depth; |
duke@435 | 434 | } |
duke@435 | 435 | } |
duke@435 | 436 | early = deepb; |
duke@435 | 437 | } |
duke@435 | 438 | |
duke@435 | 439 | return early; |
duke@435 | 440 | } |
duke@435 | 441 | |
duke@435 | 442 | //--------------------------insert_anti_dependences--------------------------- |
duke@435 | 443 | // A load may need to witness memory that nearby stores can overwrite. |
duke@435 | 444 | // For each nearby store, either insert an "anti-dependence" edge |
duke@435 | 445 | // from the load to the store, or else move LCA upward to force the |
duke@435 | 446 | // load to (eventually) be scheduled in a block above the store. |
duke@435 | 447 | // |
duke@435 | 448 | // Do not add edges to stores on distinct control-flow paths; |
duke@435 | 449 | // only add edges to stores which might interfere. |
duke@435 | 450 | // |
duke@435 | 451 | // Return the (updated) LCA. There will not be any possibly interfering |
duke@435 | 452 | // store between the load's "early block" and the updated LCA. |
duke@435 | 453 | // Any stores in the updated LCA will have new precedence edges |
duke@435 | 454 | // back to the load. The caller is expected to schedule the load |
duke@435 | 455 | // in the LCA, in which case the precedence edges will make LCM |
duke@435 | 456 | // preserve anti-dependences. The caller may also hoist the load |
duke@435 | 457 | // above the LCA, if it is not the early block. |
duke@435 | 458 | Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) { |
duke@435 | 459 | assert(load->needs_anti_dependence_check(), "must be a load of some sort"); |
duke@435 | 460 | assert(LCA != NULL, ""); |
duke@435 | 461 | DEBUG_ONLY(Block* LCA_orig = LCA); |
duke@435 | 462 | |
duke@435 | 463 | // Compute the alias index. Loads and stores with different alias indices |
duke@435 | 464 | // do not need anti-dependence edges. |
duke@435 | 465 | uint load_alias_idx = C->get_alias_index(load->adr_type()); |
duke@435 | 466 | #ifdef ASSERT |
duke@435 | 467 | if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 && |
duke@435 | 468 | (PrintOpto || VerifyAliases || |
duke@435 | 469 | PrintMiscellaneous && (WizardMode || Verbose))) { |
duke@435 | 470 | // Load nodes should not consume all of memory. |
duke@435 | 471 | // Reporting a bottom type indicates a bug in adlc. |
duke@435 | 472 | // If some particular type of node validly consumes all of memory, |
duke@435 | 473 | // sharpen the preceding "if" to exclude it, so we can catch bugs here. |
duke@435 | 474 | tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory."); |
duke@435 | 475 | load->dump(2); |
duke@435 | 476 | if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, ""); |
duke@435 | 477 | } |
duke@435 | 478 | #endif |
duke@435 | 479 | assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp), |
duke@435 | 480 | "String compare is only known 'load' that does not conflict with any stores"); |
cfang@1116 | 481 | assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals), |
cfang@1116 | 482 | "String equals is a 'load' that does not conflict with any stores"); |
cfang@1116 | 483 | assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf), |
cfang@1116 | 484 | "String indexOf is a 'load' that does not conflict with any stores"); |
cfang@1116 | 485 | assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq), |
cfang@1116 | 486 | "Arrays equals is a 'load' that do not conflict with any stores"); |
duke@435 | 487 | |
duke@435 | 488 | if (!C->alias_type(load_alias_idx)->is_rewritable()) { |
duke@435 | 489 | // It is impossible to spoil this load by putting stores before it, |
duke@435 | 490 | // because we know that the stores will never update the value |
duke@435 | 491 | // which 'load' must witness. |
duke@435 | 492 | return LCA; |
duke@435 | 493 | } |
duke@435 | 494 | |
duke@435 | 495 | node_idx_t load_index = load->_idx; |
duke@435 | 496 | |
duke@435 | 497 | // Note the earliest legal placement of 'load', as determined by |
duke@435 | 498 | // by the unique point in the dom tree where all memory effects |
duke@435 | 499 | // and other inputs are first available. (Computed by schedule_early.) |
duke@435 | 500 | // For normal loads, 'early' is the shallowest place (dom graph wise) |
duke@435 | 501 | // to look for anti-deps between this load and any store. |
adlertz@5509 | 502 | Block* early = get_block_for_node(load); |
duke@435 | 503 | |
duke@435 | 504 | // If we are subsuming loads, compute an "early" block that only considers |
duke@435 | 505 | // memory or address inputs. This block may be different than the |
duke@435 | 506 | // schedule_early block in that it could be at an even shallower depth in the |
duke@435 | 507 | // dominator tree, and allow for a broader discovery of anti-dependences. |
duke@435 | 508 | if (C->subsume_loads()) { |
adlertz@5509 | 509 | early = memory_early_block(load, early, this); |
duke@435 | 510 | } |
duke@435 | 511 | |
duke@435 | 512 | ResourceArea *area = Thread::current()->resource_area(); |
duke@435 | 513 | Node_List worklist_mem(area); // prior memory state to store |
duke@435 | 514 | Node_List worklist_store(area); // possible-def to explore |
kvn@466 | 515 | Node_List worklist_visited(area); // visited mergemem nodes |
duke@435 | 516 | Node_List non_early_stores(area); // all relevant stores outside of early |
duke@435 | 517 | bool must_raise_LCA = false; |
duke@435 | 518 | |
duke@435 | 519 | #ifdef TRACK_PHI_INPUTS |
duke@435 | 520 | // %%% This extra checking fails because MergeMem nodes are not GVNed. |
duke@435 | 521 | // Provide "phi_inputs" to check if every input to a PhiNode is from the |
duke@435 | 522 | // original memory state. This indicates a PhiNode for which should not |
duke@435 | 523 | // prevent the load from sinking. For such a block, set_raise_LCA_mark |
duke@435 | 524 | // may be overly conservative. |
duke@435 | 525 | // Mechanism: count inputs seen for each Phi encountered in worklist_store. |
duke@435 | 526 | DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0)); |
duke@435 | 527 | #endif |
duke@435 | 528 | |
duke@435 | 529 | // 'load' uses some memory state; look for users of the same state. |
duke@435 | 530 | // Recurse through MergeMem nodes to the stores that use them. |
duke@435 | 531 | |
duke@435 | 532 | // Each of these stores is a possible definition of memory |
duke@435 | 533 | // that 'load' needs to use. We need to force 'load' |
duke@435 | 534 | // to occur before each such store. When the store is in |
duke@435 | 535 | // the same block as 'load', we insert an anti-dependence |
duke@435 | 536 | // edge load->store. |
duke@435 | 537 | |
duke@435 | 538 | // The relevant stores "nearby" the load consist of a tree rooted |
duke@435 | 539 | // at initial_mem, with internal nodes of type MergeMem. |
duke@435 | 540 | // Therefore, the branches visited by the worklist are of this form: |
duke@435 | 541 | // initial_mem -> (MergeMem ->)* store |
duke@435 | 542 | // The anti-dependence constraints apply only to the fringe of this tree. |
duke@435 | 543 | |
duke@435 | 544 | Node* initial_mem = load->in(MemNode::Memory); |
duke@435 | 545 | worklist_store.push(initial_mem); |
kvn@466 | 546 | worklist_visited.push(initial_mem); |
duke@435 | 547 | worklist_mem.push(NULL); |
duke@435 | 548 | while (worklist_store.size() > 0) { |
duke@435 | 549 | // Examine a nearby store to see if it might interfere with our load. |
duke@435 | 550 | Node* mem = worklist_mem.pop(); |
duke@435 | 551 | Node* store = worklist_store.pop(); |
duke@435 | 552 | uint op = store->Opcode(); |
duke@435 | 553 | |
duke@435 | 554 | // MergeMems do not directly have anti-deps. |
duke@435 | 555 | // Treat them as internal nodes in a forward tree of memory states, |
duke@435 | 556 | // the leaves of which are each a 'possible-def'. |
duke@435 | 557 | if (store == initial_mem // root (exclusive) of tree we are searching |
duke@435 | 558 | || op == Op_MergeMem // internal node of tree we are searching |
duke@435 | 559 | ) { |
duke@435 | 560 | mem = store; // It's not a possibly interfering store. |
kvn@466 | 561 | if (store == initial_mem) |
kvn@466 | 562 | initial_mem = NULL; // only process initial memory once |
kvn@466 | 563 | |
duke@435 | 564 | for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { |
duke@435 | 565 | store = mem->fast_out(i); |
duke@435 | 566 | if (store->is_MergeMem()) { |
duke@435 | 567 | // Be sure we don't get into combinatorial problems. |
duke@435 | 568 | // (Allow phis to be repeated; they can merge two relevant states.) |
kvn@466 | 569 | uint j = worklist_visited.size(); |
kvn@466 | 570 | for (; j > 0; j--) { |
kvn@466 | 571 | if (worklist_visited.at(j-1) == store) break; |
duke@435 | 572 | } |
kvn@466 | 573 | if (j > 0) continue; // already on work list; do not repeat |
kvn@466 | 574 | worklist_visited.push(store); |
duke@435 | 575 | } |
duke@435 | 576 | worklist_mem.push(mem); |
duke@435 | 577 | worklist_store.push(store); |
duke@435 | 578 | } |
duke@435 | 579 | continue; |
duke@435 | 580 | } |
duke@435 | 581 | |
duke@435 | 582 | if (op == Op_MachProj || op == Op_Catch) continue; |
duke@435 | 583 | if (store->needs_anti_dependence_check()) continue; // not really a store |
duke@435 | 584 | |
duke@435 | 585 | // Compute the alias index. Loads and stores with different alias |
duke@435 | 586 | // indices do not need anti-dependence edges. Wide MemBar's are |
duke@435 | 587 | // anti-dependent on everything (except immutable memories). |
duke@435 | 588 | const TypePtr* adr_type = store->adr_type(); |
duke@435 | 589 | if (!C->can_alias(adr_type, load_alias_idx)) continue; |
duke@435 | 590 | |
duke@435 | 591 | // Most slow-path runtime calls do NOT modify Java memory, but |
duke@435 | 592 | // they can block and so write Raw memory. |
duke@435 | 593 | if (store->is_Mach()) { |
duke@435 | 594 | MachNode* mstore = store->as_Mach(); |
duke@435 | 595 | if (load_alias_idx != Compile::AliasIdxRaw) { |
duke@435 | 596 | // Check for call into the runtime using the Java calling |
duke@435 | 597 | // convention (and from there into a wrapper); it has no |
duke@435 | 598 | // _method. Can't do this optimization for Native calls because |
duke@435 | 599 | // they CAN write to Java memory. |
duke@435 | 600 | if (mstore->ideal_Opcode() == Op_CallStaticJava) { |
duke@435 | 601 | assert(mstore->is_MachSafePoint(), ""); |
duke@435 | 602 | MachSafePointNode* ms = (MachSafePointNode*) mstore; |
duke@435 | 603 | assert(ms->is_MachCallJava(), ""); |
duke@435 | 604 | MachCallJavaNode* mcj = (MachCallJavaNode*) ms; |
duke@435 | 605 | if (mcj->_method == NULL) { |
duke@435 | 606 | // These runtime calls do not write to Java visible memory |
duke@435 | 607 | // (other than Raw) and so do not require anti-dependence edges. |
duke@435 | 608 | continue; |
duke@435 | 609 | } |
duke@435 | 610 | } |
duke@435 | 611 | // Same for SafePoints: they read/write Raw but only read otherwise. |
duke@435 | 612 | // This is basically a workaround for SafePoints only defining control |
duke@435 | 613 | // instead of control + memory. |
duke@435 | 614 | if (mstore->ideal_Opcode() == Op_SafePoint) |
duke@435 | 615 | continue; |
duke@435 | 616 | } else { |
duke@435 | 617 | // Some raw memory, such as the load of "top" at an allocation, |
duke@435 | 618 | // can be control dependent on the previous safepoint. See |
duke@435 | 619 | // comments in GraphKit::allocate_heap() about control input. |
duke@435 | 620 | // Inserting an anti-dep between such a safepoint and a use |
duke@435 | 621 | // creates a cycle, and will cause a subsequent failure in |
duke@435 | 622 | // local scheduling. (BugId 4919904) |
duke@435 | 623 | // (%%% How can a control input be a safepoint and not a projection??) |
duke@435 | 624 | if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore) |
duke@435 | 625 | continue; |
duke@435 | 626 | } |
duke@435 | 627 | } |
duke@435 | 628 | |
duke@435 | 629 | // Identify a block that the current load must be above, |
duke@435 | 630 | // or else observe that 'store' is all the way up in the |
duke@435 | 631 | // earliest legal block for 'load'. In the latter case, |
duke@435 | 632 | // immediately insert an anti-dependence edge. |
adlertz@5509 | 633 | Block* store_block = get_block_for_node(store); |
duke@435 | 634 | assert(store_block != NULL, "unused killing projections skipped above"); |
duke@435 | 635 | |
duke@435 | 636 | if (store->is_Phi()) { |
duke@435 | 637 | // 'load' uses memory which is one (or more) of the Phi's inputs. |
duke@435 | 638 | // It must be scheduled not before the Phi, but rather before |
duke@435 | 639 | // each of the relevant Phi inputs. |
duke@435 | 640 | // |
duke@435 | 641 | // Instead of finding the LCA of all inputs to a Phi that match 'mem', |
duke@435 | 642 | // we mark each corresponding predecessor block and do a combined |
duke@435 | 643 | // hoisting operation later (raise_LCA_above_marks). |
duke@435 | 644 | // |
duke@435 | 645 | // Do not assert(store_block != early, "Phi merging memory after access") |
duke@435 | 646 | // PhiNode may be at start of block 'early' with backedge to 'early' |
duke@435 | 647 | DEBUG_ONLY(bool found_match = false); |
duke@435 | 648 | for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) { |
duke@435 | 649 | if (store->in(j) == mem) { // Found matching input? |
duke@435 | 650 | DEBUG_ONLY(found_match = true); |
adlertz@5509 | 651 | Block* pred_block = get_block_for_node(store_block->pred(j)); |
duke@435 | 652 | if (pred_block != early) { |
duke@435 | 653 | // If any predecessor of the Phi matches the load's "early block", |
duke@435 | 654 | // we do not need a precedence edge between the Phi and 'load' |
twisti@1040 | 655 | // since the load will be forced into a block preceding the Phi. |
duke@435 | 656 | pred_block->set_raise_LCA_mark(load_index); |
duke@435 | 657 | assert(!LCA_orig->dominates(pred_block) || |
duke@435 | 658 | early->dominates(pred_block), "early is high enough"); |
duke@435 | 659 | must_raise_LCA = true; |
kvn@1223 | 660 | } else { |
kvn@1223 | 661 | // anti-dependent upon PHI pinned below 'early', no edge needed |
kvn@1223 | 662 | LCA = early; // but can not schedule below 'early' |
duke@435 | 663 | } |
duke@435 | 664 | } |
duke@435 | 665 | } |
duke@435 | 666 | assert(found_match, "no worklist bug"); |
duke@435 | 667 | #ifdef TRACK_PHI_INPUTS |
duke@435 | 668 | #ifdef ASSERT |
duke@435 | 669 | // This assert asks about correct handling of PhiNodes, which may not |
duke@435 | 670 | // have all input edges directly from 'mem'. See BugId 4621264 |
duke@435 | 671 | int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1; |
duke@435 | 672 | // Increment by exactly one even if there are multiple copies of 'mem' |
duke@435 | 673 | // coming into the phi, because we will run this block several times |
duke@435 | 674 | // if there are several copies of 'mem'. (That's how DU iterators work.) |
duke@435 | 675 | phi_inputs.at_put(store->_idx, num_mem_inputs); |
duke@435 | 676 | assert(PhiNode::Input + num_mem_inputs < store->req(), |
duke@435 | 677 | "Expect at least one phi input will not be from original memory state"); |
duke@435 | 678 | #endif //ASSERT |
duke@435 | 679 | #endif //TRACK_PHI_INPUTS |
duke@435 | 680 | } else if (store_block != early) { |
duke@435 | 681 | // 'store' is between the current LCA and earliest possible block. |
duke@435 | 682 | // Label its block, and decide later on how to raise the LCA |
duke@435 | 683 | // to include the effect on LCA of this store. |
duke@435 | 684 | // If this store's block gets chosen as the raised LCA, we |
duke@435 | 685 | // will find him on the non_early_stores list and stick him |
duke@435 | 686 | // with a precedence edge. |
duke@435 | 687 | // (But, don't bother if LCA is already raised all the way.) |
duke@435 | 688 | if (LCA != early) { |
duke@435 | 689 | store_block->set_raise_LCA_mark(load_index); |
duke@435 | 690 | must_raise_LCA = true; |
duke@435 | 691 | non_early_stores.push(store); |
duke@435 | 692 | } |
duke@435 | 693 | } else { |
duke@435 | 694 | // Found a possibly-interfering store in the load's 'early' block. |
duke@435 | 695 | // This means 'load' cannot sink at all in the dominator tree. |
duke@435 | 696 | // Add an anti-dep edge, and squeeze 'load' into the highest block. |
duke@435 | 697 | assert(store != load->in(0), "dependence cycle found"); |
duke@435 | 698 | if (verify) { |
duke@435 | 699 | assert(store->find_edge(load) != -1, "missing precedence edge"); |
duke@435 | 700 | } else { |
duke@435 | 701 | store->add_prec(load); |
duke@435 | 702 | } |
duke@435 | 703 | LCA = early; |
duke@435 | 704 | // This turns off the process of gathering non_early_stores. |
duke@435 | 705 | } |
duke@435 | 706 | } |
duke@435 | 707 | // (Worklist is now empty; all nearby stores have been visited.) |
duke@435 | 708 | |
duke@435 | 709 | // Finished if 'load' must be scheduled in its 'early' block. |
duke@435 | 710 | // If we found any stores there, they have already been given |
duke@435 | 711 | // precedence edges. |
duke@435 | 712 | if (LCA == early) return LCA; |
duke@435 | 713 | |
duke@435 | 714 | // We get here only if there are no possibly-interfering stores |
duke@435 | 715 | // in the load's 'early' block. Move LCA up above all predecessors |
duke@435 | 716 | // which contain stores we have noted. |
duke@435 | 717 | // |
duke@435 | 718 | // The raised LCA block can be a home to such interfering stores, |
duke@435 | 719 | // but its predecessors must not contain any such stores. |
duke@435 | 720 | // |
duke@435 | 721 | // The raised LCA will be a lower bound for placing the load, |
duke@435 | 722 | // preventing the load from sinking past any block containing |
duke@435 | 723 | // a store that may invalidate the memory state required by 'load'. |
duke@435 | 724 | if (must_raise_LCA) |
adlertz@5509 | 725 | LCA = raise_LCA_above_marks(LCA, load->_idx, early, this); |
duke@435 | 726 | if (LCA == early) return LCA; |
duke@435 | 727 | |
duke@435 | 728 | // Insert anti-dependence edges from 'load' to each store |
duke@435 | 729 | // in the non-early LCA block. |
duke@435 | 730 | // Mine the non_early_stores list for such stores. |
duke@435 | 731 | if (LCA->raise_LCA_mark() == load_index) { |
duke@435 | 732 | while (non_early_stores.size() > 0) { |
duke@435 | 733 | Node* store = non_early_stores.pop(); |
adlertz@5509 | 734 | Block* store_block = get_block_for_node(store); |
duke@435 | 735 | if (store_block == LCA) { |
duke@435 | 736 | // add anti_dependence from store to load in its own block |
duke@435 | 737 | assert(store != load->in(0), "dependence cycle found"); |
duke@435 | 738 | if (verify) { |
duke@435 | 739 | assert(store->find_edge(load) != -1, "missing precedence edge"); |
duke@435 | 740 | } else { |
duke@435 | 741 | store->add_prec(load); |
duke@435 | 742 | } |
duke@435 | 743 | } else { |
duke@435 | 744 | assert(store_block->raise_LCA_mark() == load_index, "block was marked"); |
duke@435 | 745 | // Any other stores we found must be either inside the new LCA |
duke@435 | 746 | // or else outside the original LCA. In the latter case, they |
duke@435 | 747 | // did not interfere with any use of 'load'. |
duke@435 | 748 | assert(LCA->dominates(store_block) |
duke@435 | 749 | || !LCA_orig->dominates(store_block), "no stray stores"); |
duke@435 | 750 | } |
duke@435 | 751 | } |
duke@435 | 752 | } |
duke@435 | 753 | |
duke@435 | 754 | // Return the highest block containing stores; any stores |
duke@435 | 755 | // within that block have been given anti-dependence edges. |
duke@435 | 756 | return LCA; |
duke@435 | 757 | } |
duke@435 | 758 | |
duke@435 | 759 | // This class is used to iterate backwards over the nodes in the graph. |
duke@435 | 760 | |
duke@435 | 761 | class Node_Backward_Iterator { |
duke@435 | 762 | |
duke@435 | 763 | private: |
duke@435 | 764 | Node_Backward_Iterator(); |
duke@435 | 765 | |
duke@435 | 766 | public: |
duke@435 | 767 | // Constructor for the iterator |
adlertz@5509 | 768 | Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg); |
duke@435 | 769 | |
duke@435 | 770 | // Postincrement operator to iterate over the nodes |
duke@435 | 771 | Node *next(); |
duke@435 | 772 | |
duke@435 | 773 | private: |
duke@435 | 774 | VectorSet &_visited; |
duke@435 | 775 | Node_List &_stack; |
adlertz@5509 | 776 | PhaseCFG &_cfg; |
duke@435 | 777 | }; |
duke@435 | 778 | |
duke@435 | 779 | // Constructor for the Node_Backward_Iterator |
adlertz@5509 | 780 | Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg) |
adlertz@5509 | 781 | : _visited(visited), _stack(stack), _cfg(cfg) { |
duke@435 | 782 | // The stack should contain exactly the root |
duke@435 | 783 | stack.clear(); |
duke@435 | 784 | stack.push(root); |
duke@435 | 785 | |
duke@435 | 786 | // Clear the visited bits |
duke@435 | 787 | visited.Clear(); |
duke@435 | 788 | } |
duke@435 | 789 | |
duke@435 | 790 | // Iterator for the Node_Backward_Iterator |
duke@435 | 791 | Node *Node_Backward_Iterator::next() { |
duke@435 | 792 | |
duke@435 | 793 | // If the _stack is empty, then just return NULL: finished. |
duke@435 | 794 | if ( !_stack.size() ) |
duke@435 | 795 | return NULL; |
duke@435 | 796 | |
duke@435 | 797 | // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been |
duke@435 | 798 | // made stateless, so I do not need to record the index 'i' on my _stack. |
duke@435 | 799 | // Instead I visit all users each time, scanning for unvisited users. |
duke@435 | 800 | // I visit unvisited not-anti-dependence users first, then anti-dependent |
duke@435 | 801 | // children next. |
duke@435 | 802 | Node *self = _stack.pop(); |
duke@435 | 803 | |
duke@435 | 804 | // I cycle here when I am entering a deeper level of recursion. |
duke@435 | 805 | // The key variable 'self' was set prior to jumping here. |
duke@435 | 806 | while( 1 ) { |
duke@435 | 807 | |
duke@435 | 808 | _visited.set(self->_idx); |
duke@435 | 809 | |
duke@435 | 810 | // Now schedule all uses as late as possible. |
adlertz@5509 | 811 | const Node* src = self->is_Proj() ? self->in(0) : self; |
adlertz@5509 | 812 | uint src_rpo = _cfg.get_block_for_node(src)->_rpo; |
duke@435 | 813 | |
duke@435 | 814 | // Schedule all nodes in a post-order visit |
duke@435 | 815 | Node *unvisited = NULL; // Unvisited anti-dependent Node, if any |
duke@435 | 816 | |
duke@435 | 817 | // Scan for unvisited nodes |
duke@435 | 818 | for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { |
duke@435 | 819 | // For all uses, schedule late |
duke@435 | 820 | Node* n = self->fast_out(i); // Use |
duke@435 | 821 | |
duke@435 | 822 | // Skip already visited children |
duke@435 | 823 | if ( _visited.test(n->_idx) ) |
duke@435 | 824 | continue; |
duke@435 | 825 | |
duke@435 | 826 | // do not traverse backward control edges |
duke@435 | 827 | Node *use = n->is_Proj() ? n->in(0) : n; |
adlertz@5509 | 828 | uint use_rpo = _cfg.get_block_for_node(use)->_rpo; |
duke@435 | 829 | |
duke@435 | 830 | if ( use_rpo < src_rpo ) |
duke@435 | 831 | continue; |
duke@435 | 832 | |
duke@435 | 833 | // Phi nodes always precede uses in a basic block |
duke@435 | 834 | if ( use_rpo == src_rpo && use->is_Phi() ) |
duke@435 | 835 | continue; |
duke@435 | 836 | |
duke@435 | 837 | unvisited = n; // Found unvisited |
duke@435 | 838 | |
duke@435 | 839 | // Check for possible-anti-dependent |
duke@435 | 840 | if( !n->needs_anti_dependence_check() ) |
duke@435 | 841 | break; // Not visited, not anti-dep; schedule it NOW |
duke@435 | 842 | } |
duke@435 | 843 | |
duke@435 | 844 | // Did I find an unvisited not-anti-dependent Node? |
duke@435 | 845 | if ( !unvisited ) |
duke@435 | 846 | break; // All done with children; post-visit 'self' |
duke@435 | 847 | |
duke@435 | 848 | // Visit the unvisited Node. Contains the obvious push to |
duke@435 | 849 | // indicate I'm entering a deeper level of recursion. I push the |
duke@435 | 850 | // old state onto the _stack and set a new state and loop (recurse). |
duke@435 | 851 | _stack.push(self); |
duke@435 | 852 | self = unvisited; |
duke@435 | 853 | } // End recursion loop |
duke@435 | 854 | |
duke@435 | 855 | return self; |
duke@435 | 856 | } |
duke@435 | 857 | |
duke@435 | 858 | //------------------------------ComputeLatenciesBackwards---------------------- |
duke@435 | 859 | // Compute the latency of all the instructions. |
adlertz@5539 | 860 | void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) { |
duke@435 | 861 | #ifndef PRODUCT |
duke@435 | 862 | if (trace_opto_pipelining()) |
duke@435 | 863 | tty->print("\n#---- ComputeLatenciesBackwards ----\n"); |
duke@435 | 864 | #endif |
duke@435 | 865 | |
adlertz@5509 | 866 | Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); |
duke@435 | 867 | Node *n; |
duke@435 | 868 | |
duke@435 | 869 | // Walk over all the nodes from last to first |
duke@435 | 870 | while (n = iter.next()) { |
duke@435 | 871 | // Set the latency for the definitions of this instruction |
duke@435 | 872 | partial_latency_of_defs(n); |
duke@435 | 873 | } |
duke@435 | 874 | } // end ComputeLatenciesBackwards |
duke@435 | 875 | |
duke@435 | 876 | //------------------------------partial_latency_of_defs------------------------ |
duke@435 | 877 | // Compute the latency impact of this node on all defs. This computes |
duke@435 | 878 | // a number that increases as we approach the beginning of the routine. |
duke@435 | 879 | void PhaseCFG::partial_latency_of_defs(Node *n) { |
duke@435 | 880 | // Set the latency for this instruction |
duke@435 | 881 | #ifndef PRODUCT |
duke@435 | 882 | if (trace_opto_pipelining()) { |
adlertz@5539 | 883 | tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); |
duke@435 | 884 | dump(); |
duke@435 | 885 | } |
duke@435 | 886 | #endif |
duke@435 | 887 | |
adlertz@5539 | 888 | if (n->is_Proj()) { |
duke@435 | 889 | n = n->in(0); |
adlertz@5539 | 890 | } |
duke@435 | 891 | |
adlertz@5539 | 892 | if (n->is_Root()) { |
duke@435 | 893 | return; |
adlertz@5539 | 894 | } |
duke@435 | 895 | |
duke@435 | 896 | uint nlen = n->len(); |
adlertz@5539 | 897 | uint use_latency = get_latency_for_node(n); |
adlertz@5509 | 898 | uint use_pre_order = get_block_for_node(n)->_pre_order; |
duke@435 | 899 | |
adlertz@5539 | 900 | for (uint j = 0; j < nlen; j++) { |
duke@435 | 901 | Node *def = n->in(j); |
duke@435 | 902 | |
adlertz@5539 | 903 | if (!def || def == n) { |
duke@435 | 904 | continue; |
adlertz@5539 | 905 | } |
duke@435 | 906 | |
duke@435 | 907 | // Walk backwards thru projections |
adlertz@5539 | 908 | if (def->is_Proj()) { |
duke@435 | 909 | def = def->in(0); |
adlertz@5539 | 910 | } |
duke@435 | 911 | |
duke@435 | 912 | #ifndef PRODUCT |
duke@435 | 913 | if (trace_opto_pipelining()) { |
duke@435 | 914 | tty->print("# in(%2d): ", j); |
duke@435 | 915 | def->dump(); |
duke@435 | 916 | } |
duke@435 | 917 | #endif |
duke@435 | 918 | |
duke@435 | 919 | // If the defining block is not known, assume it is ok |
adlertz@5509 | 920 | Block *def_block = get_block_for_node(def); |
duke@435 | 921 | uint def_pre_order = def_block ? def_block->_pre_order : 0; |
duke@435 | 922 | |
adlertz@5539 | 923 | if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) { |
duke@435 | 924 | continue; |
adlertz@5539 | 925 | } |
duke@435 | 926 | |
duke@435 | 927 | uint delta_latency = n->latency(j); |
duke@435 | 928 | uint current_latency = delta_latency + use_latency; |
duke@435 | 929 | |
adlertz@5539 | 930 | if (get_latency_for_node(def) < current_latency) { |
adlertz@5539 | 931 | set_latency_for_node(def, current_latency); |
duke@435 | 932 | } |
duke@435 | 933 | |
duke@435 | 934 | #ifndef PRODUCT |
duke@435 | 935 | if (trace_opto_pipelining()) { |
adlertz@5539 | 936 | tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def)); |
duke@435 | 937 | } |
duke@435 | 938 | #endif |
duke@435 | 939 | } |
duke@435 | 940 | } |
duke@435 | 941 | |
duke@435 | 942 | //------------------------------latency_from_use------------------------------- |
duke@435 | 943 | // Compute the latency of a specific use |
duke@435 | 944 | int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { |
duke@435 | 945 | // If self-reference, return no latency |
adlertz@5509 | 946 | if (use == n || use->is_Root()) { |
duke@435 | 947 | return 0; |
adlertz@5509 | 948 | } |
duke@435 | 949 | |
adlertz@5509 | 950 | uint def_pre_order = get_block_for_node(def)->_pre_order; |
duke@435 | 951 | uint latency = 0; |
duke@435 | 952 | |
duke@435 | 953 | // If the use is not a projection, then it is simple... |
duke@435 | 954 | if (!use->is_Proj()) { |
duke@435 | 955 | #ifndef PRODUCT |
duke@435 | 956 | if (trace_opto_pipelining()) { |
duke@435 | 957 | tty->print("# out(): "); |
duke@435 | 958 | use->dump(); |
duke@435 | 959 | } |
duke@435 | 960 | #endif |
duke@435 | 961 | |
adlertz@5509 | 962 | uint use_pre_order = get_block_for_node(use)->_pre_order; |
duke@435 | 963 | |
duke@435 | 964 | if (use_pre_order < def_pre_order) |
duke@435 | 965 | return 0; |
duke@435 | 966 | |
duke@435 | 967 | if (use_pre_order == def_pre_order && use->is_Phi()) |
duke@435 | 968 | return 0; |
duke@435 | 969 | |
duke@435 | 970 | uint nlen = use->len(); |
adlertz@5539 | 971 | uint nl = get_latency_for_node(use); |
duke@435 | 972 | |
duke@435 | 973 | for ( uint j=0; j<nlen; j++ ) { |
duke@435 | 974 | if (use->in(j) == n) { |
duke@435 | 975 | // Change this if we want local latencies |
duke@435 | 976 | uint ul = use->latency(j); |
duke@435 | 977 | uint l = ul + nl; |
duke@435 | 978 | if (latency < l) latency = l; |
duke@435 | 979 | #ifndef PRODUCT |
duke@435 | 980 | if (trace_opto_pipelining()) { |
duke@435 | 981 | tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d", |
duke@435 | 982 | nl, j, ul, l, latency); |
duke@435 | 983 | } |
duke@435 | 984 | #endif |
duke@435 | 985 | } |
duke@435 | 986 | } |
duke@435 | 987 | } else { |
duke@435 | 988 | // This is a projection, just grab the latency of the use(s) |
duke@435 | 989 | for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) { |
duke@435 | 990 | uint l = latency_from_use(use, def, use->fast_out(j)); |
duke@435 | 991 | if (latency < l) latency = l; |
duke@435 | 992 | } |
duke@435 | 993 | } |
duke@435 | 994 | |
duke@435 | 995 | return latency; |
duke@435 | 996 | } |
duke@435 | 997 | |
duke@435 | 998 | //------------------------------latency_from_uses------------------------------ |
duke@435 | 999 | // Compute the latency of this instruction relative to all of it's uses. |
duke@435 | 1000 | // This computes a number that increases as we approach the beginning of the |
duke@435 | 1001 | // routine. |
duke@435 | 1002 | void PhaseCFG::latency_from_uses(Node *n) { |
duke@435 | 1003 | // Set the latency for this instruction |
duke@435 | 1004 | #ifndef PRODUCT |
duke@435 | 1005 | if (trace_opto_pipelining()) { |
adlertz@5539 | 1006 | tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n)); |
duke@435 | 1007 | dump(); |
duke@435 | 1008 | } |
duke@435 | 1009 | #endif |
duke@435 | 1010 | uint latency=0; |
duke@435 | 1011 | const Node *def = n->is_Proj() ? n->in(0): n; |
duke@435 | 1012 | |
duke@435 | 1013 | for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { |
duke@435 | 1014 | uint l = latency_from_use(n, def, n->fast_out(i)); |
duke@435 | 1015 | |
duke@435 | 1016 | if (latency < l) latency = l; |
duke@435 | 1017 | } |
duke@435 | 1018 | |
adlertz@5539 | 1019 | set_latency_for_node(n, latency); |
duke@435 | 1020 | } |
duke@435 | 1021 | |
duke@435 | 1022 | //------------------------------hoist_to_cheaper_block------------------------- |
duke@435 | 1023 | // Pick a block for node self, between early and LCA, that is a cheaper |
duke@435 | 1024 | // alternative to LCA. |
duke@435 | 1025 | Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { |
duke@435 | 1026 | const double delta = 1+PROB_UNLIKELY_MAG(4); |
duke@435 | 1027 | Block* least = LCA; |
duke@435 | 1028 | double least_freq = least->_freq; |
adlertz@5539 | 1029 | uint target = get_latency_for_node(self); |
adlertz@5635 | 1030 | uint start_latency = get_latency_for_node(LCA->head()); |
adlertz@5635 | 1031 | uint end_latency = get_latency_for_node(LCA->get_node(LCA->end_idx())); |
duke@435 | 1032 | bool in_latency = (target <= start_latency); |
adlertz@5509 | 1033 | const Block* root_block = get_block_for_node(_root); |
duke@435 | 1034 | |
duke@435 | 1035 | // Turn off latency scheduling if scheduling is just plain off |
duke@435 | 1036 | if (!C->do_scheduling()) |
duke@435 | 1037 | in_latency = true; |
duke@435 | 1038 | |
duke@435 | 1039 | // Do not hoist (to cover latency) instructions which target a |
duke@435 | 1040 | // single register. Hoisting stretches the live range of the |
duke@435 | 1041 | // single register and may force spilling. |
duke@435 | 1042 | MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; |
duke@435 | 1043 | if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty()) |
duke@435 | 1044 | in_latency = true; |
duke@435 | 1045 | |
duke@435 | 1046 | #ifndef PRODUCT |
duke@435 | 1047 | if (trace_opto_pipelining()) { |
adlertz@5539 | 1048 | tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self)); |
duke@435 | 1049 | self->dump(); |
duke@435 | 1050 | tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", |
duke@435 | 1051 | LCA->_pre_order, |
adlertz@5635 | 1052 | LCA->head()->_idx, |
duke@435 | 1053 | start_latency, |
adlertz@5635 | 1054 | LCA->get_node(LCA->end_idx())->_idx, |
duke@435 | 1055 | end_latency, |
duke@435 | 1056 | least_freq); |
duke@435 | 1057 | } |
duke@435 | 1058 | #endif |
duke@435 | 1059 | |
shade@4691 | 1060 | int cand_cnt = 0; // number of candidates tried |
shade@4691 | 1061 | |
duke@435 | 1062 | // Walk up the dominator tree from LCA (Lowest common ancestor) to |
duke@435 | 1063 | // the earliest legal location. Capture the least execution frequency. |
duke@435 | 1064 | while (LCA != early) { |
duke@435 | 1065 | LCA = LCA->_idom; // Follow up the dominator tree |
duke@435 | 1066 | |
duke@435 | 1067 | if (LCA == NULL) { |
duke@435 | 1068 | // Bailout without retry |
duke@435 | 1069 | C->record_method_not_compilable("late schedule failed: LCA == NULL"); |
duke@435 | 1070 | return least; |
duke@435 | 1071 | } |
duke@435 | 1072 | |
duke@435 | 1073 | // Don't hoist machine instructions to the root basic block |
duke@435 | 1074 | if (mach && LCA == root_block) |
duke@435 | 1075 | break; |
duke@435 | 1076 | |
adlertz@5635 | 1077 | uint start_lat = get_latency_for_node(LCA->head()); |
duke@435 | 1078 | uint end_idx = LCA->end_idx(); |
adlertz@5635 | 1079 | uint end_lat = get_latency_for_node(LCA->get_node(end_idx)); |
duke@435 | 1080 | double LCA_freq = LCA->_freq; |
duke@435 | 1081 | #ifndef PRODUCT |
duke@435 | 1082 | if (trace_opto_pipelining()) { |
duke@435 | 1083 | tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", |
adlertz@5635 | 1084 | LCA->_pre_order, LCA->head()->_idx, start_lat, end_idx, end_lat, LCA_freq); |
duke@435 | 1085 | } |
duke@435 | 1086 | #endif |
shade@4691 | 1087 | cand_cnt++; |
duke@435 | 1088 | if (LCA_freq < least_freq || // Better Frequency |
shade@4691 | 1089 | (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode |
shade@4691 | 1090 | (!StressGCM && // Otherwise, choose with latency |
shade@4691 | 1091 | !in_latency && // No block containing latency |
duke@435 | 1092 | LCA_freq < least_freq * delta && // No worse frequency |
duke@435 | 1093 | target >= end_lat && // within latency range |
duke@435 | 1094 | !self->is_iteratively_computed() ) // But don't hoist IV increments |
duke@435 | 1095 | // because they may end up above other uses of their phi forcing |
duke@435 | 1096 | // their result register to be different from their input. |
duke@435 | 1097 | ) { |
duke@435 | 1098 | least = LCA; // Found cheaper block |
duke@435 | 1099 | least_freq = LCA_freq; |
duke@435 | 1100 | start_latency = start_lat; |
duke@435 | 1101 | end_latency = end_lat; |
duke@435 | 1102 | if (target <= start_lat) |
duke@435 | 1103 | in_latency = true; |
duke@435 | 1104 | } |
duke@435 | 1105 | } |
duke@435 | 1106 | |
duke@435 | 1107 | #ifndef PRODUCT |
duke@435 | 1108 | if (trace_opto_pipelining()) { |
duke@435 | 1109 | tty->print_cr("# Choose block B%d with start latency=%d and freq=%g", |
duke@435 | 1110 | least->_pre_order, start_latency, least_freq); |
duke@435 | 1111 | } |
duke@435 | 1112 | #endif |
duke@435 | 1113 | |
duke@435 | 1114 | // See if the latency needs to be updated |
duke@435 | 1115 | if (target < end_latency) { |
duke@435 | 1116 | #ifndef PRODUCT |
duke@435 | 1117 | if (trace_opto_pipelining()) { |
duke@435 | 1118 | tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); |
duke@435 | 1119 | } |
duke@435 | 1120 | #endif |
adlertz@5539 | 1121 | set_latency_for_node(self, end_latency); |
duke@435 | 1122 | partial_latency_of_defs(self); |
duke@435 | 1123 | } |
duke@435 | 1124 | |
duke@435 | 1125 | return least; |
duke@435 | 1126 | } |
duke@435 | 1127 | |
duke@435 | 1128 | |
duke@435 | 1129 | //------------------------------schedule_late----------------------------------- |
duke@435 | 1130 | // Now schedule all codes as LATE as possible. This is the LCA in the |
duke@435 | 1131 | // dominator tree of all USES of a value. Pick the block with the least |
duke@435 | 1132 | // loop nesting depth that is lowest in the dominator tree. |
duke@435 | 1133 | extern const char must_clone[]; |
duke@435 | 1134 | void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { |
duke@435 | 1135 | #ifndef PRODUCT |
duke@435 | 1136 | if (trace_opto_pipelining()) |
duke@435 | 1137 | tty->print("\n#---- schedule_late ----\n"); |
duke@435 | 1138 | #endif |
duke@435 | 1139 | |
adlertz@5509 | 1140 | Node_Backward_Iterator iter((Node *)_root, visited, stack, *this); |
duke@435 | 1141 | Node *self; |
duke@435 | 1142 | |
duke@435 | 1143 | // Walk over all the nodes from last to first |
duke@435 | 1144 | while (self = iter.next()) { |
adlertz@5509 | 1145 | Block* early = get_block_for_node(self); // Earliest legal placement |
duke@435 | 1146 | |
duke@435 | 1147 | if (self->is_top()) { |
duke@435 | 1148 | // Top node goes in bb #2 with other constants. |
duke@435 | 1149 | // It must be special-cased, because it has no out edges. |
duke@435 | 1150 | early->add_inst(self); |
duke@435 | 1151 | continue; |
duke@435 | 1152 | } |
duke@435 | 1153 | |
duke@435 | 1154 | // No uses, just terminate |
duke@435 | 1155 | if (self->outcnt() == 0) { |
kvn@3040 | 1156 | assert(self->is_MachProj(), "sanity"); |
duke@435 | 1157 | continue; // Must be a dead machine projection |
duke@435 | 1158 | } |
duke@435 | 1159 | |
duke@435 | 1160 | // If node is pinned in the block, then no scheduling can be done. |
duke@435 | 1161 | if( self->pinned() ) // Pinned in block? |
duke@435 | 1162 | continue; |
duke@435 | 1163 | |
duke@435 | 1164 | MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL; |
duke@435 | 1165 | if (mach) { |
duke@435 | 1166 | switch (mach->ideal_Opcode()) { |
duke@435 | 1167 | case Op_CreateEx: |
duke@435 | 1168 | // Don't move exception creation |
duke@435 | 1169 | early->add_inst(self); |
duke@435 | 1170 | continue; |
duke@435 | 1171 | break; |
duke@435 | 1172 | case Op_CheckCastPP: |
duke@435 | 1173 | // Don't move CheckCastPP nodes away from their input, if the input |
duke@435 | 1174 | // is a rawptr (5071820). |
duke@435 | 1175 | Node *def = self->in(1); |
duke@435 | 1176 | if (def != NULL && def->bottom_type()->base() == Type::RawPtr) { |
duke@435 | 1177 | early->add_inst(self); |
kvn@1268 | 1178 | #ifdef ASSERT |
kvn@1268 | 1179 | _raw_oops.push(def); |
kvn@1268 | 1180 | #endif |
duke@435 | 1181 | continue; |
duke@435 | 1182 | } |
duke@435 | 1183 | break; |
duke@435 | 1184 | } |
duke@435 | 1185 | } |
duke@435 | 1186 | |
duke@435 | 1187 | // Gather LCA of all uses |
duke@435 | 1188 | Block *LCA = NULL; |
duke@435 | 1189 | { |
duke@435 | 1190 | for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) { |
duke@435 | 1191 | // For all uses, find LCA |
duke@435 | 1192 | Node* use = self->fast_out(i); |
adlertz@5509 | 1193 | LCA = raise_LCA_above_use(LCA, use, self, this); |
duke@435 | 1194 | } |
duke@435 | 1195 | } // (Hide defs of imax, i from rest of block.) |
duke@435 | 1196 | |
duke@435 | 1197 | // Place temps in the block of their use. This isn't a |
duke@435 | 1198 | // requirement for correctness but it reduces useless |
duke@435 | 1199 | // interference between temps and other nodes. |
duke@435 | 1200 | if (mach != NULL && mach->is_MachTemp()) { |
adlertz@5509 | 1201 | map_node_to_block(self, LCA); |
duke@435 | 1202 | LCA->add_inst(self); |
duke@435 | 1203 | continue; |
duke@435 | 1204 | } |
duke@435 | 1205 | |
duke@435 | 1206 | // Check if 'self' could be anti-dependent on memory |
duke@435 | 1207 | if (self->needs_anti_dependence_check()) { |
duke@435 | 1208 | // Hoist LCA above possible-defs and insert anti-dependences to |
duke@435 | 1209 | // defs in new LCA block. |
duke@435 | 1210 | LCA = insert_anti_dependences(LCA, self); |
duke@435 | 1211 | } |
duke@435 | 1212 | |
duke@435 | 1213 | if (early->_dom_depth > LCA->_dom_depth) { |
duke@435 | 1214 | // Somehow the LCA has moved above the earliest legal point. |
duke@435 | 1215 | // (One way this can happen is via memory_early_block.) |
duke@435 | 1216 | if (C->subsume_loads() == true && !C->failing()) { |
duke@435 | 1217 | // Retry with subsume_loads == false |
duke@435 | 1218 | // If this is the first failure, the sentinel string will "stick" |
duke@435 | 1219 | // to the Compile object, and the C2Compiler will see it and retry. |
duke@435 | 1220 | C->record_failure(C2Compiler::retry_no_subsuming_loads()); |
duke@435 | 1221 | } else { |
duke@435 | 1222 | // Bailout without retry when (early->_dom_depth > LCA->_dom_depth) |
duke@435 | 1223 | C->record_method_not_compilable("late schedule failed: incorrect graph"); |
duke@435 | 1224 | } |
duke@435 | 1225 | return; |
duke@435 | 1226 | } |
duke@435 | 1227 | |
duke@435 | 1228 | // If there is no opportunity to hoist, then we're done. |
shade@4691 | 1229 | // In stress mode, try to hoist even the single operations. |
shade@4691 | 1230 | bool try_to_hoist = StressGCM || (LCA != early); |
duke@435 | 1231 | |
duke@435 | 1232 | // Must clone guys stay next to use; no hoisting allowed. |
duke@435 | 1233 | // Also cannot hoist guys that alter memory or are otherwise not |
duke@435 | 1234 | // allocatable (hoisting can make a value live longer, leading to |
duke@435 | 1235 | // anti and output dependency problems which are normally resolved |
duke@435 | 1236 | // by the register allocator giving everyone a different register). |
duke@435 | 1237 | if (mach != NULL && must_clone[mach->ideal_Opcode()]) |
duke@435 | 1238 | try_to_hoist = false; |
duke@435 | 1239 | |
duke@435 | 1240 | Block* late = NULL; |
duke@435 | 1241 | if (try_to_hoist) { |
duke@435 | 1242 | // Now find the block with the least execution frequency. |
duke@435 | 1243 | // Start at the latest schedule and work up to the earliest schedule |
duke@435 | 1244 | // in the dominator tree. Thus the Node will dominate all its uses. |
duke@435 | 1245 | late = hoist_to_cheaper_block(LCA, early, self); |
duke@435 | 1246 | } else { |
duke@435 | 1247 | // Just use the LCA of the uses. |
duke@435 | 1248 | late = LCA; |
duke@435 | 1249 | } |
duke@435 | 1250 | |
duke@435 | 1251 | // Put the node into target block |
duke@435 | 1252 | schedule_node_into_block(self, late); |
duke@435 | 1253 | |
duke@435 | 1254 | #ifdef ASSERT |
duke@435 | 1255 | if (self->needs_anti_dependence_check()) { |
duke@435 | 1256 | // since precedence edges are only inserted when we're sure they |
duke@435 | 1257 | // are needed make sure that after placement in a block we don't |
duke@435 | 1258 | // need any new precedence edges. |
duke@435 | 1259 | verify_anti_dependences(late, self); |
duke@435 | 1260 | } |
duke@435 | 1261 | #endif |
duke@435 | 1262 | } // Loop until all nodes have been visited |
duke@435 | 1263 | |
duke@435 | 1264 | } // end ScheduleLate |
duke@435 | 1265 | |
duke@435 | 1266 | //------------------------------GlobalCodeMotion------------------------------- |
adlertz@5539 | 1267 | void PhaseCFG::global_code_motion() { |
duke@435 | 1268 | ResourceMark rm; |
duke@435 | 1269 | |
duke@435 | 1270 | #ifndef PRODUCT |
duke@435 | 1271 | if (trace_opto_pipelining()) { |
duke@435 | 1272 | tty->print("\n---- Start GlobalCodeMotion ----\n"); |
duke@435 | 1273 | } |
duke@435 | 1274 | #endif |
duke@435 | 1275 | |
adlertz@5509 | 1276 | // Initialize the node to block mapping for things on the proj_list |
adlertz@5539 | 1277 | for (uint i = 0; i < _matcher.number_of_projections(); i++) { |
adlertz@5539 | 1278 | unmap_node_from_block(_matcher.get_projection(i)); |
adlertz@5509 | 1279 | } |
duke@435 | 1280 | |
duke@435 | 1281 | // Set the basic block for Nodes pinned into blocks |
adlertz@5539 | 1282 | Arena* arena = Thread::current()->resource_area(); |
adlertz@5539 | 1283 | VectorSet visited(arena); |
adlertz@5539 | 1284 | schedule_pinned_nodes(visited); |
duke@435 | 1285 | |
duke@435 | 1286 | // Find the earliest Block any instruction can be placed in. Some |
duke@435 | 1287 | // instructions are pinned into Blocks. Unpinned instructions can |
duke@435 | 1288 | // appear in last block in which all their inputs occur. |
duke@435 | 1289 | visited.Clear(); |
adlertz@5539 | 1290 | Node_List stack(arena); |
adlertz@5539 | 1291 | // Pre-grow the list |
adlertz@5539 | 1292 | stack.map((C->unique() >> 1) + 16, NULL); |
duke@435 | 1293 | if (!schedule_early(visited, stack)) { |
duke@435 | 1294 | // Bailout without retry |
duke@435 | 1295 | C->record_method_not_compilable("early schedule failed"); |
duke@435 | 1296 | return; |
duke@435 | 1297 | } |
duke@435 | 1298 | |
duke@435 | 1299 | // Build Def-Use edges. |
duke@435 | 1300 | // Compute the latency information (via backwards walk) for all the |
duke@435 | 1301 | // instructions in the graph |
kvn@2040 | 1302 | _node_latency = new GrowableArray<uint>(); // resource_area allocation |
duke@435 | 1303 | |
adlertz@5539 | 1304 | if (C->do_scheduling()) { |
adlertz@5539 | 1305 | compute_latencies_backwards(visited, stack); |
adlertz@5539 | 1306 | } |
duke@435 | 1307 | |
duke@435 | 1308 | // Now schedule all codes as LATE as possible. This is the LCA in the |
duke@435 | 1309 | // dominator tree of all USES of a value. Pick the block with the least |
duke@435 | 1310 | // loop nesting depth that is lowest in the dominator tree. |
duke@435 | 1311 | // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() ) |
duke@435 | 1312 | schedule_late(visited, stack); |
adlertz@5539 | 1313 | if (C->failing()) { |
duke@435 | 1314 | // schedule_late fails only when graph is incorrect. |
duke@435 | 1315 | assert(!VerifyGraphEdges, "verification should have failed"); |
duke@435 | 1316 | return; |
duke@435 | 1317 | } |
duke@435 | 1318 | |
duke@435 | 1319 | #ifndef PRODUCT |
duke@435 | 1320 | if (trace_opto_pipelining()) { |
duke@435 | 1321 | tty->print("\n---- Detect implicit null checks ----\n"); |
duke@435 | 1322 | } |
duke@435 | 1323 | #endif |
duke@435 | 1324 | |
duke@435 | 1325 | // Detect implicit-null-check opportunities. Basically, find NULL checks |
duke@435 | 1326 | // with suitable memory ops nearby. Use the memory op to do the NULL check. |
duke@435 | 1327 | // I can generate a memory op if there is not one nearby. |
duke@435 | 1328 | if (C->is_method_compilation()) { |
duke@435 | 1329 | // Don't do it for natives, adapters, or runtime stubs |
duke@435 | 1330 | int allowed_reasons = 0; |
duke@435 | 1331 | // ...and don't do it when there have been too many traps, globally. |
duke@435 | 1332 | for (int reason = (int)Deoptimization::Reason_none+1; |
duke@435 | 1333 | reason < Compile::trapHistLength; reason++) { |
duke@435 | 1334 | assert(reason < BitsPerInt, "recode bit map"); |
duke@435 | 1335 | if (!C->too_many_traps((Deoptimization::DeoptReason) reason)) |
duke@435 | 1336 | allowed_reasons |= nth_bit(reason); |
duke@435 | 1337 | } |
duke@435 | 1338 | // By reversing the loop direction we get a very minor gain on mpegaudio. |
duke@435 | 1339 | // Feel free to revert to a forward loop for clarity. |
duke@435 | 1340 | // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) { |
adlertz@5539 | 1341 | for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) { |
adlertz@5539 | 1342 | Node* proj = _matcher._null_check_tests[i]; |
adlertz@5539 | 1343 | Node* val = _matcher._null_check_tests[i + 1]; |
adlertz@5539 | 1344 | Block* block = get_block_for_node(proj); |
adlertz@5639 | 1345 | implicit_null_check(block, proj, val, allowed_reasons); |
duke@435 | 1346 | // The implicit_null_check will only perform the transformation |
duke@435 | 1347 | // if the null branch is truly uncommon, *and* it leads to an |
duke@435 | 1348 | // uncommon trap. Combined with the too_many_traps guards |
duke@435 | 1349 | // above, this prevents SEGV storms reported in 6366351, |
duke@435 | 1350 | // by recompiling offending methods without this optimization. |
duke@435 | 1351 | } |
duke@435 | 1352 | } |
duke@435 | 1353 | |
duke@435 | 1354 | #ifndef PRODUCT |
duke@435 | 1355 | if (trace_opto_pipelining()) { |
duke@435 | 1356 | tty->print("\n---- Start Local Scheduling ----\n"); |
duke@435 | 1357 | } |
duke@435 | 1358 | #endif |
duke@435 | 1359 | |
duke@435 | 1360 | // Schedule locally. Right now a simple topological sort. |
duke@435 | 1361 | // Later, do a real latency aware scheduler. |
adlertz@5539 | 1362 | GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1); |
duke@435 | 1363 | visited.Clear(); |
adlertz@5539 | 1364 | for (uint i = 0; i < number_of_blocks(); i++) { |
adlertz@5539 | 1365 | Block* block = get_block(i); |
adlertz@5639 | 1366 | if (!schedule_local(block, ready_cnt, visited)) { |
duke@435 | 1367 | if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) { |
duke@435 | 1368 | C->record_method_not_compilable("local schedule failed"); |
duke@435 | 1369 | } |
duke@435 | 1370 | return; |
duke@435 | 1371 | } |
duke@435 | 1372 | } |
duke@435 | 1373 | |
duke@435 | 1374 | // If we inserted any instructions between a Call and his CatchNode, |
duke@435 | 1375 | // clone the instructions on all paths below the Catch. |
adlertz@5539 | 1376 | for (uint i = 0; i < number_of_blocks(); i++) { |
adlertz@5539 | 1377 | Block* block = get_block(i); |
adlertz@5639 | 1378 | call_catch_cleanup(block); |
adlertz@5509 | 1379 | } |
duke@435 | 1380 | |
duke@435 | 1381 | #ifndef PRODUCT |
duke@435 | 1382 | if (trace_opto_pipelining()) { |
duke@435 | 1383 | tty->print("\n---- After GlobalCodeMotion ----\n"); |
adlertz@5539 | 1384 | for (uint i = 0; i < number_of_blocks(); i++) { |
adlertz@5539 | 1385 | Block* block = get_block(i); |
adlertz@5539 | 1386 | block->dump(); |
duke@435 | 1387 | } |
duke@435 | 1388 | } |
duke@435 | 1389 | #endif |
kvn@2040 | 1390 | // Dead. |
kvn@2040 | 1391 | _node_latency = (GrowableArray<uint> *)0xdeadbeef; |
duke@435 | 1392 | } |
duke@435 | 1393 | |
adlertz@5539 | 1394 | bool PhaseCFG::do_global_code_motion() { |
adlertz@5539 | 1395 | |
adlertz@5539 | 1396 | build_dominator_tree(); |
adlertz@5539 | 1397 | if (C->failing()) { |
adlertz@5539 | 1398 | return false; |
adlertz@5539 | 1399 | } |
adlertz@5539 | 1400 | |
adlertz@5539 | 1401 | NOT_PRODUCT( C->verify_graph_edges(); ) |
adlertz@5539 | 1402 | |
adlertz@5539 | 1403 | estimate_block_frequency(); |
adlertz@5539 | 1404 | |
adlertz@5539 | 1405 | global_code_motion(); |
adlertz@5539 | 1406 | |
adlertz@5539 | 1407 | if (C->failing()) { |
adlertz@5539 | 1408 | return false; |
adlertz@5539 | 1409 | } |
adlertz@5539 | 1410 | |
adlertz@5539 | 1411 | return true; |
adlertz@5539 | 1412 | } |
duke@435 | 1413 | |
duke@435 | 1414 | //------------------------------Estimate_Block_Frequency----------------------- |
duke@435 | 1415 | // Estimate block frequencies based on IfNode probabilities. |
adlertz@5539 | 1416 | void PhaseCFG::estimate_block_frequency() { |
rasbold@853 | 1417 | |
rasbold@853 | 1418 | // Force conditional branches leading to uncommon traps to be unlikely, |
rasbold@853 | 1419 | // not because we get to the uncommon_trap with less relative frequency, |
rasbold@853 | 1420 | // but because an uncommon_trap typically causes a deopt, so we only get |
rasbold@853 | 1421 | // there once. |
rasbold@853 | 1422 | if (C->do_freq_based_layout()) { |
rasbold@853 | 1423 | Block_List worklist; |
adlertz@5539 | 1424 | Block* root_blk = get_block(0); |
rasbold@853 | 1425 | for (uint i = 1; i < root_blk->num_preds(); i++) { |
adlertz@5509 | 1426 | Block *pb = get_block_for_node(root_blk->pred(i)); |
rasbold@853 | 1427 | if (pb->has_uncommon_code()) { |
rasbold@853 | 1428 | worklist.push(pb); |
rasbold@853 | 1429 | } |
rasbold@853 | 1430 | } |
rasbold@853 | 1431 | while (worklist.size() > 0) { |
rasbold@853 | 1432 | Block* uct = worklist.pop(); |
adlertz@5539 | 1433 | if (uct == get_root_block()) { |
adlertz@5539 | 1434 | continue; |
adlertz@5539 | 1435 | } |
rasbold@853 | 1436 | for (uint i = 1; i < uct->num_preds(); i++) { |
adlertz@5509 | 1437 | Block *pb = get_block_for_node(uct->pred(i)); |
rasbold@853 | 1438 | if (pb->_num_succs == 1) { |
rasbold@853 | 1439 | worklist.push(pb); |
rasbold@853 | 1440 | } else if (pb->num_fall_throughs() == 2) { |
rasbold@853 | 1441 | pb->update_uncommon_branch(uct); |
rasbold@853 | 1442 | } |
rasbold@853 | 1443 | } |
rasbold@853 | 1444 | } |
rasbold@853 | 1445 | } |
duke@435 | 1446 | |
duke@435 | 1447 | // Create the loop tree and calculate loop depth. |
duke@435 | 1448 | _root_loop = create_loop_tree(); |
duke@435 | 1449 | _root_loop->compute_loop_depth(0); |
duke@435 | 1450 | |
duke@435 | 1451 | // Compute block frequency of each block, relative to a single loop entry. |
duke@435 | 1452 | _root_loop->compute_freq(); |
duke@435 | 1453 | |
duke@435 | 1454 | // Adjust all frequencies to be relative to a single method entry |
rasbold@853 | 1455 | _root_loop->_freq = 1.0; |
duke@435 | 1456 | _root_loop->scale_freq(); |
duke@435 | 1457 | |
kvn@1108 | 1458 | // Save outmost loop frequency for LRG frequency threshold |
adlertz@5539 | 1459 | _outer_loop_frequency = _root_loop->outer_loop_freq(); |
kvn@1108 | 1460 | |
duke@435 | 1461 | // force paths ending at uncommon traps to be infrequent |
rasbold@853 | 1462 | if (!C->do_freq_based_layout()) { |
rasbold@853 | 1463 | Block_List worklist; |
adlertz@5539 | 1464 | Block* root_blk = get_block(0); |
rasbold@853 | 1465 | for (uint i = 1; i < root_blk->num_preds(); i++) { |
adlertz@5509 | 1466 | Block *pb = get_block_for_node(root_blk->pred(i)); |
rasbold@853 | 1467 | if (pb->has_uncommon_code()) { |
rasbold@853 | 1468 | worklist.push(pb); |
rasbold@853 | 1469 | } |
duke@435 | 1470 | } |
rasbold@853 | 1471 | while (worklist.size() > 0) { |
rasbold@853 | 1472 | Block* uct = worklist.pop(); |
rasbold@853 | 1473 | uct->_freq = PROB_MIN; |
rasbold@853 | 1474 | for (uint i = 1; i < uct->num_preds(); i++) { |
adlertz@5509 | 1475 | Block *pb = get_block_for_node(uct->pred(i)); |
rasbold@853 | 1476 | if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) { |
rasbold@853 | 1477 | worklist.push(pb); |
rasbold@853 | 1478 | } |
duke@435 | 1479 | } |
duke@435 | 1480 | } |
duke@435 | 1481 | } |
duke@435 | 1482 | |
kvn@987 | 1483 | #ifdef ASSERT |
adlertz@5539 | 1484 | for (uint i = 0; i < number_of_blocks(); i++) { |
adlertz@5539 | 1485 | Block* b = get_block(i); |
twisti@1040 | 1486 | assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); |
kvn@987 | 1487 | } |
kvn@987 | 1488 | #endif |
kvn@987 | 1489 | |
duke@435 | 1490 | #ifndef PRODUCT |
duke@435 | 1491 | if (PrintCFGBlockFreq) { |
duke@435 | 1492 | tty->print_cr("CFG Block Frequencies"); |
duke@435 | 1493 | _root_loop->dump_tree(); |
duke@435 | 1494 | if (Verbose) { |
duke@435 | 1495 | tty->print_cr("PhaseCFG dump"); |
duke@435 | 1496 | dump(); |
duke@435 | 1497 | tty->print_cr("Node dump"); |
duke@435 | 1498 | _root->dump(99999); |
duke@435 | 1499 | } |
duke@435 | 1500 | } |
duke@435 | 1501 | #endif |
duke@435 | 1502 | } |
duke@435 | 1503 | |
duke@435 | 1504 | //----------------------------create_loop_tree-------------------------------- |
duke@435 | 1505 | // Create a loop tree from the CFG |
duke@435 | 1506 | CFGLoop* PhaseCFG::create_loop_tree() { |
duke@435 | 1507 | |
duke@435 | 1508 | #ifdef ASSERT |
adlertz@5539 | 1509 | assert(get_block(0) == get_root_block(), "first block should be root block"); |
adlertz@5539 | 1510 | for (uint i = 0; i < number_of_blocks(); i++) { |
adlertz@5539 | 1511 | Block* block = get_block(i); |
duke@435 | 1512 | // Check that _loop field are clear...we could clear them if not. |
adlertz@5539 | 1513 | assert(block->_loop == NULL, "clear _loop expected"); |
duke@435 | 1514 | // Sanity check that the RPO numbering is reflected in the _blocks array. |
duke@435 | 1515 | // It doesn't have to be for the loop tree to be built, but if it is not, |
duke@435 | 1516 | // then the blocks have been reordered since dom graph building...which |
duke@435 | 1517 | // may question the RPO numbering |
adlertz@5539 | 1518 | assert(block->_rpo == i, "unexpected reverse post order number"); |
duke@435 | 1519 | } |
duke@435 | 1520 | #endif |
duke@435 | 1521 | |
duke@435 | 1522 | int idct = 0; |
duke@435 | 1523 | CFGLoop* root_loop = new CFGLoop(idct++); |
duke@435 | 1524 | |
duke@435 | 1525 | Block_List worklist; |
duke@435 | 1526 | |
duke@435 | 1527 | // Assign blocks to loops |
adlertz@5539 | 1528 | for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block |
adlertz@5539 | 1529 | Block* block = get_block(i); |
duke@435 | 1530 | |
adlertz@5539 | 1531 | if (block->head()->is_Loop()) { |
adlertz@5539 | 1532 | Block* loop_head = block; |
duke@435 | 1533 | assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); |
duke@435 | 1534 | Node* tail_n = loop_head->pred(LoopNode::LoopBackControl); |
adlertz@5509 | 1535 | Block* tail = get_block_for_node(tail_n); |
duke@435 | 1536 | |
duke@435 | 1537 | // Defensively filter out Loop nodes for non-single-entry loops. |
duke@435 | 1538 | // For all reasonable loops, the head occurs before the tail in RPO. |
duke@435 | 1539 | if (i <= tail->_rpo) { |
duke@435 | 1540 | |
duke@435 | 1541 | // The tail and (recursive) predecessors of the tail |
duke@435 | 1542 | // are made members of a new loop. |
duke@435 | 1543 | |
duke@435 | 1544 | assert(worklist.size() == 0, "nonempty worklist"); |
duke@435 | 1545 | CFGLoop* nloop = new CFGLoop(idct++); |
duke@435 | 1546 | assert(loop_head->_loop == NULL, "just checking"); |
duke@435 | 1547 | loop_head->_loop = nloop; |
duke@435 | 1548 | // Add to nloop so push_pred() will skip over inner loops |
duke@435 | 1549 | nloop->add_member(loop_head); |
adlertz@5509 | 1550 | nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this); |
duke@435 | 1551 | |
duke@435 | 1552 | while (worklist.size() > 0) { |
duke@435 | 1553 | Block* member = worklist.pop(); |
duke@435 | 1554 | if (member != loop_head) { |
duke@435 | 1555 | for (uint j = 1; j < member->num_preds(); j++) { |
adlertz@5509 | 1556 | nloop->push_pred(member, j, worklist, this); |
duke@435 | 1557 | } |
duke@435 | 1558 | } |
duke@435 | 1559 | } |
duke@435 | 1560 | } |
duke@435 | 1561 | } |
duke@435 | 1562 | } |
duke@435 | 1563 | |
duke@435 | 1564 | // Create a member list for each loop consisting |
duke@435 | 1565 | // of both blocks and (immediate child) loops. |
adlertz@5539 | 1566 | for (uint i = 0; i < number_of_blocks(); i++) { |
adlertz@5539 | 1567 | Block* block = get_block(i); |
adlertz@5539 | 1568 | CFGLoop* lp = block->_loop; |
duke@435 | 1569 | if (lp == NULL) { |
duke@435 | 1570 | // Not assigned to a loop. Add it to the method's pseudo loop. |
adlertz@5539 | 1571 | block->_loop = root_loop; |
duke@435 | 1572 | lp = root_loop; |
duke@435 | 1573 | } |
adlertz@5539 | 1574 | if (lp == root_loop || block != lp->head()) { // loop heads are already members |
adlertz@5539 | 1575 | lp->add_member(block); |
duke@435 | 1576 | } |
duke@435 | 1577 | if (lp != root_loop) { |
duke@435 | 1578 | if (lp->parent() == NULL) { |
duke@435 | 1579 | // Not a nested loop. Make it a child of the method's pseudo loop. |
duke@435 | 1580 | root_loop->add_nested_loop(lp); |
duke@435 | 1581 | } |
adlertz@5539 | 1582 | if (block == lp->head()) { |
duke@435 | 1583 | // Add nested loop to member list of parent loop. |
duke@435 | 1584 | lp->parent()->add_member(lp); |
duke@435 | 1585 | } |
duke@435 | 1586 | } |
duke@435 | 1587 | } |
duke@435 | 1588 | |
duke@435 | 1589 | return root_loop; |
duke@435 | 1590 | } |
duke@435 | 1591 | |
duke@435 | 1592 | //------------------------------push_pred-------------------------------------- |
adlertz@5509 | 1593 | void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) { |
duke@435 | 1594 | Node* pred_n = blk->pred(i); |
adlertz@5509 | 1595 | Block* pred = cfg->get_block_for_node(pred_n); |
duke@435 | 1596 | CFGLoop *pred_loop = pred->_loop; |
duke@435 | 1597 | if (pred_loop == NULL) { |
duke@435 | 1598 | // Filter out blocks for non-single-entry loops. |
duke@435 | 1599 | // For all reasonable loops, the head occurs before the tail in RPO. |
duke@435 | 1600 | if (pred->_rpo > head()->_rpo) { |
duke@435 | 1601 | pred->_loop = this; |
duke@435 | 1602 | worklist.push(pred); |
duke@435 | 1603 | } |
duke@435 | 1604 | } else if (pred_loop != this) { |
duke@435 | 1605 | // Nested loop. |
duke@435 | 1606 | while (pred_loop->_parent != NULL && pred_loop->_parent != this) { |
duke@435 | 1607 | pred_loop = pred_loop->_parent; |
duke@435 | 1608 | } |
duke@435 | 1609 | // Make pred's loop be a child |
duke@435 | 1610 | if (pred_loop->_parent == NULL) { |
duke@435 | 1611 | add_nested_loop(pred_loop); |
duke@435 | 1612 | // Continue with loop entry predecessor. |
duke@435 | 1613 | Block* pred_head = pred_loop->head(); |
duke@435 | 1614 | assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors"); |
duke@435 | 1615 | assert(pred_head != head(), "loop head in only one loop"); |
adlertz@5509 | 1616 | push_pred(pred_head, LoopNode::EntryControl, worklist, cfg); |
duke@435 | 1617 | } else { |
duke@435 | 1618 | assert(pred_loop->_parent == this && _parent == NULL, "just checking"); |
duke@435 | 1619 | } |
duke@435 | 1620 | } |
duke@435 | 1621 | } |
duke@435 | 1622 | |
duke@435 | 1623 | //------------------------------add_nested_loop-------------------------------- |
duke@435 | 1624 | // Make cl a child of the current loop in the loop tree. |
duke@435 | 1625 | void CFGLoop::add_nested_loop(CFGLoop* cl) { |
duke@435 | 1626 | assert(_parent == NULL, "no parent yet"); |
duke@435 | 1627 | assert(cl != this, "not my own parent"); |
duke@435 | 1628 | cl->_parent = this; |
duke@435 | 1629 | CFGLoop* ch = _child; |
duke@435 | 1630 | if (ch == NULL) { |
duke@435 | 1631 | _child = cl; |
duke@435 | 1632 | } else { |
duke@435 | 1633 | while (ch->_sibling != NULL) { ch = ch->_sibling; } |
duke@435 | 1634 | ch->_sibling = cl; |
duke@435 | 1635 | } |
duke@435 | 1636 | } |
duke@435 | 1637 | |
duke@435 | 1638 | //------------------------------compute_loop_depth----------------------------- |
duke@435 | 1639 | // Store the loop depth in each CFGLoop object. |
duke@435 | 1640 | // Recursively walk the children to do the same for them. |
duke@435 | 1641 | void CFGLoop::compute_loop_depth(int depth) { |
duke@435 | 1642 | _depth = depth; |
duke@435 | 1643 | CFGLoop* ch = _child; |
duke@435 | 1644 | while (ch != NULL) { |
duke@435 | 1645 | ch->compute_loop_depth(depth + 1); |
duke@435 | 1646 | ch = ch->_sibling; |
duke@435 | 1647 | } |
duke@435 | 1648 | } |
duke@435 | 1649 | |
duke@435 | 1650 | //------------------------------compute_freq----------------------------------- |
duke@435 | 1651 | // Compute the frequency of each block and loop, relative to a single entry |
duke@435 | 1652 | // into the dominating loop head. |
duke@435 | 1653 | void CFGLoop::compute_freq() { |
duke@435 | 1654 | // Bottom up traversal of loop tree (visit inner loops first.) |
duke@435 | 1655 | // Set loop head frequency to 1.0, then transitively |
duke@435 | 1656 | // compute frequency for all successors in the loop, |
duke@435 | 1657 | // as well as for each exit edge. Inner loops are |
duke@435 | 1658 | // treated as single blocks with loop exit targets |
duke@435 | 1659 | // as the successor blocks. |
duke@435 | 1660 | |
duke@435 | 1661 | // Nested loops first |
duke@435 | 1662 | CFGLoop* ch = _child; |
duke@435 | 1663 | while (ch != NULL) { |
duke@435 | 1664 | ch->compute_freq(); |
duke@435 | 1665 | ch = ch->_sibling; |
duke@435 | 1666 | } |
duke@435 | 1667 | assert (_members.length() > 0, "no empty loops"); |
duke@435 | 1668 | Block* hd = head(); |
duke@435 | 1669 | hd->_freq = 1.0f; |
duke@435 | 1670 | for (int i = 0; i < _members.length(); i++) { |
duke@435 | 1671 | CFGElement* s = _members.at(i); |
duke@435 | 1672 | float freq = s->_freq; |
duke@435 | 1673 | if (s->is_block()) { |
duke@435 | 1674 | Block* b = s->as_Block(); |
duke@435 | 1675 | for (uint j = 0; j < b->_num_succs; j++) { |
duke@435 | 1676 | Block* sb = b->_succs[j]; |
duke@435 | 1677 | update_succ_freq(sb, freq * b->succ_prob(j)); |
duke@435 | 1678 | } |
duke@435 | 1679 | } else { |
duke@435 | 1680 | CFGLoop* lp = s->as_CFGLoop(); |
duke@435 | 1681 | assert(lp->_parent == this, "immediate child"); |
duke@435 | 1682 | for (int k = 0; k < lp->_exits.length(); k++) { |
duke@435 | 1683 | Block* eb = lp->_exits.at(k).get_target(); |
duke@435 | 1684 | float prob = lp->_exits.at(k).get_prob(); |
duke@435 | 1685 | update_succ_freq(eb, freq * prob); |
duke@435 | 1686 | } |
duke@435 | 1687 | } |
duke@435 | 1688 | } |
duke@435 | 1689 | |
duke@435 | 1690 | // For all loops other than the outer, "method" loop, |
duke@435 | 1691 | // sum and normalize the exit probability. The "method" loop |
duke@435 | 1692 | // should keep the initial exit probability of 1, so that |
duke@435 | 1693 | // inner blocks do not get erroneously scaled. |
duke@435 | 1694 | if (_depth != 0) { |
duke@435 | 1695 | // Total the exit probabilities for this loop. |
duke@435 | 1696 | float exits_sum = 0.0f; |
duke@435 | 1697 | for (int i = 0; i < _exits.length(); i++) { |
duke@435 | 1698 | exits_sum += _exits.at(i).get_prob(); |
duke@435 | 1699 | } |
duke@435 | 1700 | |
duke@435 | 1701 | // Normalize the exit probabilities. Until now, the |
duke@435 | 1702 | // probabilities estimate the possibility of exit per |
duke@435 | 1703 | // a single loop iteration; afterward, they estimate |
duke@435 | 1704 | // the probability of exit per loop entry. |
duke@435 | 1705 | for (int i = 0; i < _exits.length(); i++) { |
duke@435 | 1706 | Block* et = _exits.at(i).get_target(); |
rasbold@853 | 1707 | float new_prob = 0.0f; |
rasbold@853 | 1708 | if (_exits.at(i).get_prob() > 0.0f) { |
rasbold@853 | 1709 | new_prob = _exits.at(i).get_prob() / exits_sum; |
rasbold@853 | 1710 | } |
duke@435 | 1711 | BlockProbPair bpp(et, new_prob); |
duke@435 | 1712 | _exits.at_put(i, bpp); |
duke@435 | 1713 | } |
duke@435 | 1714 | |
rasbold@853 | 1715 | // Save the total, but guard against unreasonable probability, |
duke@435 | 1716 | // as the value is used to estimate the loop trip count. |
duke@435 | 1717 | // An infinite trip count would blur relative block |
duke@435 | 1718 | // frequencies. |
duke@435 | 1719 | if (exits_sum > 1.0f) exits_sum = 1.0; |
duke@435 | 1720 | if (exits_sum < PROB_MIN) exits_sum = PROB_MIN; |
duke@435 | 1721 | _exit_prob = exits_sum; |
duke@435 | 1722 | } |
duke@435 | 1723 | } |
duke@435 | 1724 | |
duke@435 | 1725 | //------------------------------succ_prob------------------------------------- |
duke@435 | 1726 | // Determine the probability of reaching successor 'i' from the receiver block. |
duke@435 | 1727 | float Block::succ_prob(uint i) { |
duke@435 | 1728 | int eidx = end_idx(); |
adlertz@5635 | 1729 | Node *n = get_node(eidx); // Get ending Node |
rasbold@743 | 1730 | |
rasbold@743 | 1731 | int op = n->Opcode(); |
rasbold@743 | 1732 | if (n->is_Mach()) { |
rasbold@743 | 1733 | if (n->is_MachNullCheck()) { |
rasbold@743 | 1734 | // Can only reach here if called after lcm. The original Op_If is gone, |
rasbold@743 | 1735 | // so we attempt to infer the probability from one or both of the |
rasbold@743 | 1736 | // successor blocks. |
rasbold@743 | 1737 | assert(_num_succs == 2, "expecting 2 successors of a null check"); |
rasbold@743 | 1738 | // If either successor has only one predecessor, then the |
twisti@1040 | 1739 | // probability estimate can be derived using the |
rasbold@743 | 1740 | // relative frequency of the successor and this block. |
rasbold@743 | 1741 | if (_succs[i]->num_preds() == 2) { |
rasbold@743 | 1742 | return _succs[i]->_freq / _freq; |
rasbold@743 | 1743 | } else if (_succs[1-i]->num_preds() == 2) { |
rasbold@743 | 1744 | return 1 - (_succs[1-i]->_freq / _freq); |
rasbold@743 | 1745 | } else { |
rasbold@743 | 1746 | // Estimate using both successor frequencies |
rasbold@743 | 1747 | float freq = _succs[i]->_freq; |
rasbold@743 | 1748 | return freq / (freq + _succs[1-i]->_freq); |
rasbold@743 | 1749 | } |
rasbold@743 | 1750 | } |
rasbold@743 | 1751 | op = n->as_Mach()->ideal_Opcode(); |
rasbold@743 | 1752 | } |
rasbold@743 | 1753 | |
duke@435 | 1754 | |
duke@435 | 1755 | // Switch on branch type |
duke@435 | 1756 | switch( op ) { |
duke@435 | 1757 | case Op_CountedLoopEnd: |
duke@435 | 1758 | case Op_If: { |
duke@435 | 1759 | assert (i < 2, "just checking"); |
duke@435 | 1760 | // Conditionals pass on only part of their frequency |
duke@435 | 1761 | float prob = n->as_MachIf()->_prob; |
duke@435 | 1762 | assert(prob >= 0.0 && prob <= 1.0, "out of range probability"); |
duke@435 | 1763 | // If succ[i] is the FALSE branch, invert path info |
adlertz@5635 | 1764 | if( get_node(i + eidx + 1)->Opcode() == Op_IfFalse ) { |
duke@435 | 1765 | return 1.0f - prob; // not taken |
duke@435 | 1766 | } else { |
duke@435 | 1767 | return prob; // taken |
duke@435 | 1768 | } |
duke@435 | 1769 | } |
duke@435 | 1770 | |
duke@435 | 1771 | case Op_Jump: |
duke@435 | 1772 | // Divide the frequency between all successors evenly |
duke@435 | 1773 | return 1.0f/_num_succs; |
duke@435 | 1774 | |
duke@435 | 1775 | case Op_Catch: { |
adlertz@5635 | 1776 | const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); |
duke@435 | 1777 | if (ci->_con == CatchProjNode::fall_through_index) { |
duke@435 | 1778 | // Fall-thru path gets the lion's share. |
duke@435 | 1779 | return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs; |
duke@435 | 1780 | } else { |
duke@435 | 1781 | // Presume exceptional paths are equally unlikely |
duke@435 | 1782 | return PROB_UNLIKELY_MAG(5); |
duke@435 | 1783 | } |
duke@435 | 1784 | } |
duke@435 | 1785 | |
duke@435 | 1786 | case Op_Root: |
duke@435 | 1787 | case Op_Goto: |
duke@435 | 1788 | // Pass frequency straight thru to target |
duke@435 | 1789 | return 1.0f; |
duke@435 | 1790 | |
duke@435 | 1791 | case Op_NeverBranch: |
duke@435 | 1792 | return 0.0f; |
duke@435 | 1793 | |
duke@435 | 1794 | case Op_TailCall: |
duke@435 | 1795 | case Op_TailJump: |
duke@435 | 1796 | case Op_Return: |
duke@435 | 1797 | case Op_Halt: |
duke@435 | 1798 | case Op_Rethrow: |
duke@435 | 1799 | // Do not push out freq to root block |
duke@435 | 1800 | return 0.0f; |
duke@435 | 1801 | |
duke@435 | 1802 | default: |
duke@435 | 1803 | ShouldNotReachHere(); |
duke@435 | 1804 | } |
duke@435 | 1805 | |
duke@435 | 1806 | return 0.0f; |
duke@435 | 1807 | } |
duke@435 | 1808 | |
rasbold@853 | 1809 | //------------------------------num_fall_throughs----------------------------- |
rasbold@853 | 1810 | // Return the number of fall-through candidates for a block |
rasbold@853 | 1811 | int Block::num_fall_throughs() { |
rasbold@853 | 1812 | int eidx = end_idx(); |
adlertz@5635 | 1813 | Node *n = get_node(eidx); // Get ending Node |
rasbold@853 | 1814 | |
rasbold@853 | 1815 | int op = n->Opcode(); |
rasbold@853 | 1816 | if (n->is_Mach()) { |
rasbold@853 | 1817 | if (n->is_MachNullCheck()) { |
rasbold@853 | 1818 | // In theory, either side can fall-thru, for simplicity sake, |
rasbold@853 | 1819 | // let's say only the false branch can now. |
rasbold@853 | 1820 | return 1; |
rasbold@853 | 1821 | } |
rasbold@853 | 1822 | op = n->as_Mach()->ideal_Opcode(); |
rasbold@853 | 1823 | } |
rasbold@853 | 1824 | |
rasbold@853 | 1825 | // Switch on branch type |
rasbold@853 | 1826 | switch( op ) { |
rasbold@853 | 1827 | case Op_CountedLoopEnd: |
rasbold@853 | 1828 | case Op_If: |
rasbold@853 | 1829 | return 2; |
rasbold@853 | 1830 | |
rasbold@853 | 1831 | case Op_Root: |
rasbold@853 | 1832 | case Op_Goto: |
rasbold@853 | 1833 | return 1; |
rasbold@853 | 1834 | |
rasbold@853 | 1835 | case Op_Catch: { |
rasbold@853 | 1836 | for (uint i = 0; i < _num_succs; i++) { |
adlertz@5635 | 1837 | const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); |
rasbold@853 | 1838 | if (ci->_con == CatchProjNode::fall_through_index) { |
rasbold@853 | 1839 | return 1; |
rasbold@853 | 1840 | } |
rasbold@853 | 1841 | } |
rasbold@853 | 1842 | return 0; |
rasbold@853 | 1843 | } |
rasbold@853 | 1844 | |
rasbold@853 | 1845 | case Op_Jump: |
rasbold@853 | 1846 | case Op_NeverBranch: |
rasbold@853 | 1847 | case Op_TailCall: |
rasbold@853 | 1848 | case Op_TailJump: |
rasbold@853 | 1849 | case Op_Return: |
rasbold@853 | 1850 | case Op_Halt: |
rasbold@853 | 1851 | case Op_Rethrow: |
rasbold@853 | 1852 | return 0; |
rasbold@853 | 1853 | |
rasbold@853 | 1854 | default: |
rasbold@853 | 1855 | ShouldNotReachHere(); |
rasbold@853 | 1856 | } |
rasbold@853 | 1857 | |
rasbold@853 | 1858 | return 0; |
rasbold@853 | 1859 | } |
rasbold@853 | 1860 | |
rasbold@853 | 1861 | //------------------------------succ_fall_through----------------------------- |
rasbold@853 | 1862 | // Return true if a specific successor could be fall-through target. |
rasbold@853 | 1863 | bool Block::succ_fall_through(uint i) { |
rasbold@853 | 1864 | int eidx = end_idx(); |
adlertz@5635 | 1865 | Node *n = get_node(eidx); // Get ending Node |
rasbold@853 | 1866 | |
rasbold@853 | 1867 | int op = n->Opcode(); |
rasbold@853 | 1868 | if (n->is_Mach()) { |
rasbold@853 | 1869 | if (n->is_MachNullCheck()) { |
rasbold@853 | 1870 | // In theory, either side can fall-thru, for simplicity sake, |
rasbold@853 | 1871 | // let's say only the false branch can now. |
adlertz@5635 | 1872 | return get_node(i + eidx + 1)->Opcode() == Op_IfFalse; |
rasbold@853 | 1873 | } |
rasbold@853 | 1874 | op = n->as_Mach()->ideal_Opcode(); |
rasbold@853 | 1875 | } |
rasbold@853 | 1876 | |
rasbold@853 | 1877 | // Switch on branch type |
rasbold@853 | 1878 | switch( op ) { |
rasbold@853 | 1879 | case Op_CountedLoopEnd: |
rasbold@853 | 1880 | case Op_If: |
rasbold@853 | 1881 | case Op_Root: |
rasbold@853 | 1882 | case Op_Goto: |
rasbold@853 | 1883 | return true; |
rasbold@853 | 1884 | |
rasbold@853 | 1885 | case Op_Catch: { |
adlertz@5635 | 1886 | const CatchProjNode *ci = get_node(i + eidx + 1)->as_CatchProj(); |
rasbold@853 | 1887 | return ci->_con == CatchProjNode::fall_through_index; |
rasbold@853 | 1888 | } |
rasbold@853 | 1889 | |
rasbold@853 | 1890 | case Op_Jump: |
rasbold@853 | 1891 | case Op_NeverBranch: |
rasbold@853 | 1892 | case Op_TailCall: |
rasbold@853 | 1893 | case Op_TailJump: |
rasbold@853 | 1894 | case Op_Return: |
rasbold@853 | 1895 | case Op_Halt: |
rasbold@853 | 1896 | case Op_Rethrow: |
rasbold@853 | 1897 | return false; |
rasbold@853 | 1898 | |
rasbold@853 | 1899 | default: |
rasbold@853 | 1900 | ShouldNotReachHere(); |
rasbold@853 | 1901 | } |
rasbold@853 | 1902 | |
rasbold@853 | 1903 | return false; |
rasbold@853 | 1904 | } |
rasbold@853 | 1905 | |
rasbold@853 | 1906 | //------------------------------update_uncommon_branch------------------------ |
rasbold@853 | 1907 | // Update the probability of a two-branch to be uncommon |
rasbold@853 | 1908 | void Block::update_uncommon_branch(Block* ub) { |
rasbold@853 | 1909 | int eidx = end_idx(); |
adlertz@5635 | 1910 | Node *n = get_node(eidx); // Get ending Node |
rasbold@853 | 1911 | |
rasbold@853 | 1912 | int op = n->as_Mach()->ideal_Opcode(); |
rasbold@853 | 1913 | |
rasbold@853 | 1914 | assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If"); |
rasbold@853 | 1915 | assert(num_fall_throughs() == 2, "must be a two way branch block"); |
rasbold@853 | 1916 | |
rasbold@853 | 1917 | // Which successor is ub? |
rasbold@853 | 1918 | uint s; |
rasbold@853 | 1919 | for (s = 0; s <_num_succs; s++) { |
rasbold@853 | 1920 | if (_succs[s] == ub) break; |
rasbold@853 | 1921 | } |
rasbold@853 | 1922 | assert(s < 2, "uncommon successor must be found"); |
rasbold@853 | 1923 | |
rasbold@853 | 1924 | // If ub is the true path, make the proability small, else |
rasbold@853 | 1925 | // ub is the false path, and make the probability large |
adlertz@5635 | 1926 | bool invert = (get_node(s + eidx + 1)->Opcode() == Op_IfFalse); |
rasbold@853 | 1927 | |
rasbold@853 | 1928 | // Get existing probability |
rasbold@853 | 1929 | float p = n->as_MachIf()->_prob; |
rasbold@853 | 1930 | |
rasbold@853 | 1931 | if (invert) p = 1.0 - p; |
rasbold@853 | 1932 | if (p > PROB_MIN) { |
rasbold@853 | 1933 | p = PROB_MIN; |
rasbold@853 | 1934 | } |
rasbold@853 | 1935 | if (invert) p = 1.0 - p; |
rasbold@853 | 1936 | |
rasbold@853 | 1937 | n->as_MachIf()->_prob = p; |
rasbold@853 | 1938 | } |
rasbold@853 | 1939 | |
duke@435 | 1940 | //------------------------------update_succ_freq------------------------------- |
twisti@1040 | 1941 | // Update the appropriate frequency associated with block 'b', a successor of |
duke@435 | 1942 | // a block in this loop. |
duke@435 | 1943 | void CFGLoop::update_succ_freq(Block* b, float freq) { |
duke@435 | 1944 | if (b->_loop == this) { |
duke@435 | 1945 | if (b == head()) { |
duke@435 | 1946 | // back branch within the loop |
duke@435 | 1947 | // Do nothing now, the loop carried frequency will be |
duke@435 | 1948 | // adjust later in scale_freq(). |
duke@435 | 1949 | } else { |
duke@435 | 1950 | // simple branch within the loop |
duke@435 | 1951 | b->_freq += freq; |
duke@435 | 1952 | } |
duke@435 | 1953 | } else if (!in_loop_nest(b)) { |
duke@435 | 1954 | // branch is exit from this loop |
duke@435 | 1955 | BlockProbPair bpp(b, freq); |
duke@435 | 1956 | _exits.append(bpp); |
duke@435 | 1957 | } else { |
duke@435 | 1958 | // branch into nested loop |
duke@435 | 1959 | CFGLoop* ch = b->_loop; |
duke@435 | 1960 | ch->_freq += freq; |
duke@435 | 1961 | } |
duke@435 | 1962 | } |
duke@435 | 1963 | |
duke@435 | 1964 | //------------------------------in_loop_nest----------------------------------- |
duke@435 | 1965 | // Determine if block b is in the receiver's loop nest. |
duke@435 | 1966 | bool CFGLoop::in_loop_nest(Block* b) { |
duke@435 | 1967 | int depth = _depth; |
duke@435 | 1968 | CFGLoop* b_loop = b->_loop; |
duke@435 | 1969 | int b_depth = b_loop->_depth; |
duke@435 | 1970 | if (depth == b_depth) { |
duke@435 | 1971 | return true; |
duke@435 | 1972 | } |
duke@435 | 1973 | while (b_depth > depth) { |
duke@435 | 1974 | b_loop = b_loop->_parent; |
duke@435 | 1975 | b_depth = b_loop->_depth; |
duke@435 | 1976 | } |
duke@435 | 1977 | return b_loop == this; |
duke@435 | 1978 | } |
duke@435 | 1979 | |
duke@435 | 1980 | //------------------------------scale_freq------------------------------------- |
duke@435 | 1981 | // Scale frequency of loops and blocks by trip counts from outer loops |
duke@435 | 1982 | // Do a top down traversal of loop tree (visit outer loops first.) |
duke@435 | 1983 | void CFGLoop::scale_freq() { |
duke@435 | 1984 | float loop_freq = _freq * trip_count(); |
kvn@1108 | 1985 | _freq = loop_freq; |
duke@435 | 1986 | for (int i = 0; i < _members.length(); i++) { |
duke@435 | 1987 | CFGElement* s = _members.at(i); |
kvn@987 | 1988 | float block_freq = s->_freq * loop_freq; |
kvn@1056 | 1989 | if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY) |
kvn@1056 | 1990 | block_freq = MIN_BLOCK_FREQUENCY; |
kvn@987 | 1991 | s->_freq = block_freq; |
duke@435 | 1992 | } |
duke@435 | 1993 | CFGLoop* ch = _child; |
duke@435 | 1994 | while (ch != NULL) { |
duke@435 | 1995 | ch->scale_freq(); |
duke@435 | 1996 | ch = ch->_sibling; |
duke@435 | 1997 | } |
duke@435 | 1998 | } |
duke@435 | 1999 | |
kvn@1108 | 2000 | // Frequency of outer loop |
kvn@1108 | 2001 | float CFGLoop::outer_loop_freq() const { |
kvn@1108 | 2002 | if (_child != NULL) { |
kvn@1108 | 2003 | return _child->_freq; |
kvn@1108 | 2004 | } |
kvn@1108 | 2005 | return _freq; |
kvn@1108 | 2006 | } |
kvn@1108 | 2007 | |
duke@435 | 2008 | #ifndef PRODUCT |
duke@435 | 2009 | //------------------------------dump_tree-------------------------------------- |
duke@435 | 2010 | void CFGLoop::dump_tree() const { |
duke@435 | 2011 | dump(); |
duke@435 | 2012 | if (_child != NULL) _child->dump_tree(); |
duke@435 | 2013 | if (_sibling != NULL) _sibling->dump_tree(); |
duke@435 | 2014 | } |
duke@435 | 2015 | |
duke@435 | 2016 | //------------------------------dump------------------------------------------- |
duke@435 | 2017 | void CFGLoop::dump() const { |
duke@435 | 2018 | for (int i = 0; i < _depth; i++) tty->print(" "); |
duke@435 | 2019 | tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n", |
duke@435 | 2020 | _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq); |
duke@435 | 2021 | for (int i = 0; i < _depth; i++) tty->print(" "); |
duke@435 | 2022 | tty->print(" members:", _id); |
duke@435 | 2023 | int k = 0; |
duke@435 | 2024 | for (int i = 0; i < _members.length(); i++) { |
duke@435 | 2025 | if (k++ >= 6) { |
duke@435 | 2026 | tty->print("\n "); |
duke@435 | 2027 | for (int j = 0; j < _depth+1; j++) tty->print(" "); |
duke@435 | 2028 | k = 0; |
duke@435 | 2029 | } |
duke@435 | 2030 | CFGElement *s = _members.at(i); |
duke@435 | 2031 | if (s->is_block()) { |
duke@435 | 2032 | Block *b = s->as_Block(); |
duke@435 | 2033 | tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq); |
duke@435 | 2034 | } else { |
duke@435 | 2035 | CFGLoop* lp = s->as_CFGLoop(); |
duke@435 | 2036 | tty->print(" L%d(%6.3f)", lp->_id, lp->_freq); |
duke@435 | 2037 | } |
duke@435 | 2038 | } |
duke@435 | 2039 | tty->print("\n"); |
duke@435 | 2040 | for (int i = 0; i < _depth; i++) tty->print(" "); |
duke@435 | 2041 | tty->print(" exits: "); |
duke@435 | 2042 | k = 0; |
duke@435 | 2043 | for (int i = 0; i < _exits.length(); i++) { |
duke@435 | 2044 | if (k++ >= 7) { |
duke@435 | 2045 | tty->print("\n "); |
duke@435 | 2046 | for (int j = 0; j < _depth+1; j++) tty->print(" "); |
duke@435 | 2047 | k = 0; |
duke@435 | 2048 | } |
duke@435 | 2049 | Block *blk = _exits.at(i).get_target(); |
duke@435 | 2050 | float prob = _exits.at(i).get_prob(); |
duke@435 | 2051 | tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100)); |
duke@435 | 2052 | } |
duke@435 | 2053 | tty->print("\n"); |
duke@435 | 2054 | } |
duke@435 | 2055 | #endif |