Thu, 05 Sep 2013 11:04:39 -0700
Merge
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "libadt/vectset.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/block.hpp"
29 #include "opto/c2compiler.hpp"
30 #include "opto/callnode.hpp"
31 #include "opto/cfgnode.hpp"
32 #include "opto/machnode.hpp"
33 #include "opto/opcodes.hpp"
34 #include "opto/phaseX.hpp"
35 #include "opto/rootnode.hpp"
36 #include "opto/runtime.hpp"
37 #include "runtime/deoptimization.hpp"
38 #ifdef TARGET_ARCH_MODEL_x86_32
39 # include "adfiles/ad_x86_32.hpp"
40 #endif
41 #ifdef TARGET_ARCH_MODEL_x86_64
42 # include "adfiles/ad_x86_64.hpp"
43 #endif
44 #ifdef TARGET_ARCH_MODEL_sparc
45 # include "adfiles/ad_sparc.hpp"
46 #endif
47 #ifdef TARGET_ARCH_MODEL_zero
48 # include "adfiles/ad_zero.hpp"
49 #endif
50 #ifdef TARGET_ARCH_MODEL_arm
51 # include "adfiles/ad_arm.hpp"
52 #endif
53 #ifdef TARGET_ARCH_MODEL_ppc_32
54 # include "adfiles/ad_ppc_32.hpp"
55 #endif
56 #ifdef TARGET_ARCH_MODEL_ppc_64
57 # include "adfiles/ad_ppc_64.hpp"
58 #endif
61 // Portions of code courtesy of Clifford Click
63 // Optimization - Graph Style
65 // To avoid float value underflow
66 #define MIN_BLOCK_FREQUENCY 1.e-35f
68 //----------------------------schedule_node_into_block-------------------------
69 // Insert node n into block b. Look for projections of n and make sure they
70 // are in b also.
71 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
72 // Set basic block of n, Add n to b,
73 map_node_to_block(n, b);
74 b->add_inst(n);
76 // After Matching, nearly any old Node may have projections trailing it.
77 // These are usually machine-dependent flags. In any case, they might
78 // float to another block below this one. Move them up.
79 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
80 Node* use = n->fast_out(i);
81 if (use->is_Proj()) {
82 Block* buse = get_block_for_node(use);
83 if (buse != b) { // In wrong block?
84 if (buse != NULL) {
85 buse->find_remove(use); // Remove from wrong block
86 }
87 map_node_to_block(use, b);
88 b->add_inst(use);
89 }
90 }
91 }
92 }
94 //----------------------------replace_block_proj_ctrl-------------------------
95 // Nodes that have is_block_proj() nodes as their control need to use
96 // the appropriate Region for their actual block as their control since
97 // the projection will be in a predecessor block.
98 void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
99 const Node *in0 = n->in(0);
100 assert(in0 != NULL, "Only control-dependent");
101 const Node *p = in0->is_block_proj();
102 if (p != NULL && p != n) { // Control from a block projection?
103 assert(!n->pinned() || n->is_MachConstantBase(), "only pinned MachConstantBase node is expected here");
104 // Find trailing Region
105 Block *pb = get_block_for_node(in0); // Block-projection already has basic block
106 uint j = 0;
107 if (pb->_num_succs != 1) { // More then 1 successor?
108 // Search for successor
109 uint max = pb->_nodes.size();
110 assert( max > 1, "" );
111 uint start = max - pb->_num_succs;
112 // Find which output path belongs to projection
113 for (j = start; j < max; j++) {
114 if( pb->_nodes[j] == in0 )
115 break;
116 }
117 assert( j < max, "must find" );
118 // Change control to match head of successor basic block
119 j -= start;
120 }
121 n->set_req(0, pb->_succs[j]->head());
122 }
123 }
126 //------------------------------schedule_pinned_nodes--------------------------
127 // Set the basic block for Nodes pinned into blocks
128 void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
129 // Allocate node stack of size C->unique()+8 to avoid frequent realloc
130 GrowableArray <Node *> spstack(C->unique() + 8);
131 spstack.push(_root);
132 while (spstack.is_nonempty()) {
133 Node* node = spstack.pop();
134 if (!visited.test_set(node->_idx)) { // Test node and flag it as visited
135 if (node->pinned() && !has_block(node)) { // Pinned? Nail it down!
136 assert(node->in(0), "pinned Node must have Control");
137 // Before setting block replace block_proj control edge
138 replace_block_proj_ctrl(node);
139 Node* input = node->in(0);
140 while (!input->is_block_start()) {
141 input = input->in(0);
142 }
143 Block* block = get_block_for_node(input); // Basic block of controlling input
144 schedule_node_into_block(node, block);
145 }
147 // process all inputs that are non NULL
148 for (int i = node->req() - 1; i >= 0; --i) {
149 if (node->in(i) != NULL) {
150 spstack.push(node->in(i));
151 }
152 }
153 }
154 }
155 }
157 #ifdef ASSERT
158 // Assert that new input b2 is dominated by all previous inputs.
159 // Check this by by seeing that it is dominated by b1, the deepest
160 // input observed until b2.
161 static void assert_dom(Block* b1, Block* b2, Node* n, const PhaseCFG* cfg) {
162 if (b1 == NULL) return;
163 assert(b1->_dom_depth < b2->_dom_depth, "sanity");
164 Block* tmp = b2;
165 while (tmp != b1 && tmp != NULL) {
166 tmp = tmp->_idom;
167 }
168 if (tmp != b1) {
169 // Detected an unschedulable graph. Print some nice stuff and die.
170 tty->print_cr("!!! Unschedulable graph !!!");
171 for (uint j=0; j<n->len(); j++) { // For all inputs
172 Node* inn = n->in(j); // Get input
173 if (inn == NULL) continue; // Ignore NULL, missing inputs
174 Block* inb = cfg->get_block_for_node(inn);
175 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
176 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
177 inn->dump();
178 }
179 tty->print("Failing node: ");
180 n->dump();
181 assert(false, "unscheduable graph");
182 }
183 }
184 #endif
186 static Block* find_deepest_input(Node* n, const PhaseCFG* cfg) {
187 // Find the last input dominated by all other inputs.
188 Block* deepb = NULL; // Deepest block so far
189 int deepb_dom_depth = 0;
190 for (uint k = 0; k < n->len(); k++) { // For all inputs
191 Node* inn = n->in(k); // Get input
192 if (inn == NULL) continue; // Ignore NULL, missing inputs
193 Block* inb = cfg->get_block_for_node(inn);
194 assert(inb != NULL, "must already have scheduled this input");
195 if (deepb_dom_depth < (int) inb->_dom_depth) {
196 // The new inb must be dominated by the previous deepb.
197 // The various inputs must be linearly ordered in the dom
198 // tree, or else there will not be a unique deepest block.
199 DEBUG_ONLY(assert_dom(deepb, inb, n, cfg));
200 deepb = inb; // Save deepest block
201 deepb_dom_depth = deepb->_dom_depth;
202 }
203 }
204 assert(deepb != NULL, "must be at least one input to n");
205 return deepb;
206 }
209 //------------------------------schedule_early---------------------------------
210 // Find the earliest Block any instruction can be placed in. Some instructions
211 // are pinned into Blocks. Unpinned instructions can appear in last block in
212 // which all their inputs occur.
213 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
214 // Allocate stack with enough space to avoid frequent realloc
215 Node_Stack nstack(roots.Size() + 8);
216 // _root will be processed among C->top() inputs
217 roots.push(C->top());
218 visited.set(C->top()->_idx);
220 while (roots.size() != 0) {
221 // Use local variables nstack_top_n & nstack_top_i to cache values
222 // on stack's top.
223 Node* parent_node = roots.pop();
224 uint input_index = 0;
226 while (true) {
227 if (input_index == 0) {
228 // Fixup some control. Constants without control get attached
229 // to root and nodes that use is_block_proj() nodes should be attached
230 // to the region that starts their block.
231 const Node* control_input = parent_node->in(0);
232 if (control_input != NULL) {
233 replace_block_proj_ctrl(parent_node);
234 } else {
235 // Is a constant with NO inputs?
236 if (parent_node->req() == 1) {
237 parent_node->set_req(0, _root);
238 }
239 }
240 }
242 // First, visit all inputs and force them to get a block. If an
243 // input is already in a block we quit following inputs (to avoid
244 // cycles). Instead we put that Node on a worklist to be handled
245 // later (since IT'S inputs may not have a block yet).
247 // Assume all n's inputs will be processed
248 bool done = true;
250 while (input_index < parent_node->len()) {
251 Node* in = parent_node->in(input_index++);
252 if (in == NULL) {
253 continue;
254 }
256 int is_visited = visited.test_set(in->_idx);
257 if (!has_block(in)) {
258 if (is_visited) {
259 return false;
260 }
261 // Save parent node and next input's index.
262 nstack.push(parent_node, input_index);
263 // Process current input now.
264 parent_node = in;
265 input_index = 0;
266 // Not all n's inputs processed.
267 done = false;
268 break;
269 } else if (!is_visited) {
270 // Visit this guy later, using worklist
271 roots.push(in);
272 }
273 }
275 if (done) {
276 // All of n's inputs have been processed, complete post-processing.
278 // Some instructions are pinned into a block. These include Region,
279 // Phi, Start, Return, and other control-dependent instructions and
280 // any projections which depend on them.
281 if (!parent_node->pinned()) {
282 // Set earliest legal block.
283 Block* earliest_block = find_deepest_input(parent_node, this);
284 map_node_to_block(parent_node, earliest_block);
285 } else {
286 assert(get_block_for_node(parent_node) == get_block_for_node(parent_node->in(0)), "Pinned Node should be at the same block as its control edge");
287 }
289 if (nstack.is_empty()) {
290 // Finished all nodes on stack.
291 // Process next node on the worklist 'roots'.
292 break;
293 }
294 // Get saved parent node and next input's index.
295 parent_node = nstack.node();
296 input_index = nstack.index();
297 nstack.pop();
298 }
299 }
300 }
301 return true;
302 }
304 //------------------------------dom_lca----------------------------------------
305 // Find least common ancestor in dominator tree
306 // LCA is a current notion of LCA, to be raised above 'this'.
307 // As a convenient boundary condition, return 'this' if LCA is NULL.
308 // Find the LCA of those two nodes.
309 Block* Block::dom_lca(Block* LCA) {
310 if (LCA == NULL || LCA == this) return this;
312 Block* anc = this;
313 while (anc->_dom_depth > LCA->_dom_depth)
314 anc = anc->_idom; // Walk up till anc is as high as LCA
316 while (LCA->_dom_depth > anc->_dom_depth)
317 LCA = LCA->_idom; // Walk up till LCA is as high as anc
319 while (LCA != anc) { // Walk both up till they are the same
320 LCA = LCA->_idom;
321 anc = anc->_idom;
322 }
324 return LCA;
325 }
327 //--------------------------raise_LCA_above_use--------------------------------
328 // We are placing a definition, and have been given a def->use edge.
329 // The definition must dominate the use, so move the LCA upward in the
330 // dominator tree to dominate the use. If the use is a phi, adjust
331 // the LCA only with the phi input paths which actually use this def.
332 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, const PhaseCFG* cfg) {
333 Block* buse = cfg->get_block_for_node(use);
334 if (buse == NULL) return LCA; // Unused killing Projs have no use block
335 if (!use->is_Phi()) return buse->dom_lca(LCA);
336 uint pmax = use->req(); // Number of Phi inputs
337 // Why does not this loop just break after finding the matching input to
338 // the Phi? Well...it's like this. I do not have true def-use/use-def
339 // chains. Means I cannot distinguish, from the def-use direction, which
340 // of many use-defs lead from the same use to the same def. That is, this
341 // Phi might have several uses of the same def. Each use appears in a
342 // different predecessor block. But when I enter here, I cannot distinguish
343 // which use-def edge I should find the predecessor block for. So I find
344 // them all. Means I do a little extra work if a Phi uses the same value
345 // more than once.
346 for (uint j=1; j<pmax; j++) { // For all inputs
347 if (use->in(j) == def) { // Found matching input?
348 Block* pred = cfg->get_block_for_node(buse->pred(j));
349 LCA = pred->dom_lca(LCA);
350 }
351 }
352 return LCA;
353 }
355 //----------------------------raise_LCA_above_marks----------------------------
356 // Return a new LCA that dominates LCA and any of its marked predecessors.
357 // Search all my parents up to 'early' (exclusive), looking for predecessors
358 // which are marked with the given index. Return the LCA (in the dom tree)
359 // of all marked blocks. If there are none marked, return the original
360 // LCA.
361 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark, Block* early, const PhaseCFG* cfg) {
362 Block_List worklist;
363 worklist.push(LCA);
364 while (worklist.size() > 0) {
365 Block* mid = worklist.pop();
366 if (mid == early) continue; // stop searching here
368 // Test and set the visited bit.
369 if (mid->raise_LCA_visited() == mark) continue; // already visited
371 // Don't process the current LCA, otherwise the search may terminate early
372 if (mid != LCA && mid->raise_LCA_mark() == mark) {
373 // Raise the LCA.
374 LCA = mid->dom_lca(LCA);
375 if (LCA == early) break; // stop searching everywhere
376 assert(early->dominates(LCA), "early is high enough");
377 // Resume searching at that point, skipping intermediate levels.
378 worklist.push(LCA);
379 if (LCA == mid)
380 continue; // Don't mark as visited to avoid early termination.
381 } else {
382 // Keep searching through this block's predecessors.
383 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
384 Block* mid_parent = cfg->get_block_for_node(mid->pred(j));
385 worklist.push(mid_parent);
386 }
387 }
388 mid->set_raise_LCA_visited(mark);
389 }
390 return LCA;
391 }
393 //--------------------------memory_early_block--------------------------------
394 // This is a variation of find_deepest_input, the heart of schedule_early.
395 // Find the "early" block for a load, if we considered only memory and
396 // address inputs, that is, if other data inputs were ignored.
397 //
398 // Because a subset of edges are considered, the resulting block will
399 // be earlier (at a shallower dom_depth) than the true schedule_early
400 // point of the node. We compute this earlier block as a more permissive
401 // site for anti-dependency insertion, but only if subsume_loads is enabled.
402 static Block* memory_early_block(Node* load, Block* early, const PhaseCFG* cfg) {
403 Node* base;
404 Node* index;
405 Node* store = load->in(MemNode::Memory);
406 load->as_Mach()->memory_inputs(base, index);
408 assert(base != NodeSentinel && index != NodeSentinel,
409 "unexpected base/index inputs");
411 Node* mem_inputs[4];
412 int mem_inputs_length = 0;
413 if (base != NULL) mem_inputs[mem_inputs_length++] = base;
414 if (index != NULL) mem_inputs[mem_inputs_length++] = index;
415 if (store != NULL) mem_inputs[mem_inputs_length++] = store;
417 // In the comparision below, add one to account for the control input,
418 // which may be null, but always takes up a spot in the in array.
419 if (mem_inputs_length + 1 < (int) load->req()) {
420 // This "load" has more inputs than just the memory, base and index inputs.
421 // For purposes of checking anti-dependences, we need to start
422 // from the early block of only the address portion of the instruction,
423 // and ignore other blocks that may have factored into the wider
424 // schedule_early calculation.
425 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
427 Block* deepb = NULL; // Deepest block so far
428 int deepb_dom_depth = 0;
429 for (int i = 0; i < mem_inputs_length; i++) {
430 Block* inb = cfg->get_block_for_node(mem_inputs[i]);
431 if (deepb_dom_depth < (int) inb->_dom_depth) {
432 // The new inb must be dominated by the previous deepb.
433 // The various inputs must be linearly ordered in the dom
434 // tree, or else there will not be a unique deepest block.
435 DEBUG_ONLY(assert_dom(deepb, inb, load, cfg));
436 deepb = inb; // Save deepest block
437 deepb_dom_depth = deepb->_dom_depth;
438 }
439 }
440 early = deepb;
441 }
443 return early;
444 }
446 //--------------------------insert_anti_dependences---------------------------
447 // A load may need to witness memory that nearby stores can overwrite.
448 // For each nearby store, either insert an "anti-dependence" edge
449 // from the load to the store, or else move LCA upward to force the
450 // load to (eventually) be scheduled in a block above the store.
451 //
452 // Do not add edges to stores on distinct control-flow paths;
453 // only add edges to stores which might interfere.
454 //
455 // Return the (updated) LCA. There will not be any possibly interfering
456 // store between the load's "early block" and the updated LCA.
457 // Any stores in the updated LCA will have new precedence edges
458 // back to the load. The caller is expected to schedule the load
459 // in the LCA, in which case the precedence edges will make LCM
460 // preserve anti-dependences. The caller may also hoist the load
461 // above the LCA, if it is not the early block.
462 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
463 assert(load->needs_anti_dependence_check(), "must be a load of some sort");
464 assert(LCA != NULL, "");
465 DEBUG_ONLY(Block* LCA_orig = LCA);
467 // Compute the alias index. Loads and stores with different alias indices
468 // do not need anti-dependence edges.
469 uint load_alias_idx = C->get_alias_index(load->adr_type());
470 #ifdef ASSERT
471 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
472 (PrintOpto || VerifyAliases ||
473 PrintMiscellaneous && (WizardMode || Verbose))) {
474 // Load nodes should not consume all of memory.
475 // Reporting a bottom type indicates a bug in adlc.
476 // If some particular type of node validly consumes all of memory,
477 // sharpen the preceding "if" to exclude it, so we can catch bugs here.
478 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory.");
479 load->dump(2);
480 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, "");
481 }
482 #endif
483 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
484 "String compare is only known 'load' that does not conflict with any stores");
485 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrEquals),
486 "String equals is a 'load' that does not conflict with any stores");
487 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrIndexOf),
488 "String indexOf is a 'load' that does not conflict with any stores");
489 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_AryEq),
490 "Arrays equals is a 'load' that do not conflict with any stores");
492 if (!C->alias_type(load_alias_idx)->is_rewritable()) {
493 // It is impossible to spoil this load by putting stores before it,
494 // because we know that the stores will never update the value
495 // which 'load' must witness.
496 return LCA;
497 }
499 node_idx_t load_index = load->_idx;
501 // Note the earliest legal placement of 'load', as determined by
502 // by the unique point in the dom tree where all memory effects
503 // and other inputs are first available. (Computed by schedule_early.)
504 // For normal loads, 'early' is the shallowest place (dom graph wise)
505 // to look for anti-deps between this load and any store.
506 Block* early = get_block_for_node(load);
508 // If we are subsuming loads, compute an "early" block that only considers
509 // memory or address inputs. This block may be different than the
510 // schedule_early block in that it could be at an even shallower depth in the
511 // dominator tree, and allow for a broader discovery of anti-dependences.
512 if (C->subsume_loads()) {
513 early = memory_early_block(load, early, this);
514 }
516 ResourceArea *area = Thread::current()->resource_area();
517 Node_List worklist_mem(area); // prior memory state to store
518 Node_List worklist_store(area); // possible-def to explore
519 Node_List worklist_visited(area); // visited mergemem nodes
520 Node_List non_early_stores(area); // all relevant stores outside of early
521 bool must_raise_LCA = false;
523 #ifdef TRACK_PHI_INPUTS
524 // %%% This extra checking fails because MergeMem nodes are not GVNed.
525 // Provide "phi_inputs" to check if every input to a PhiNode is from the
526 // original memory state. This indicates a PhiNode for which should not
527 // prevent the load from sinking. For such a block, set_raise_LCA_mark
528 // may be overly conservative.
529 // Mechanism: count inputs seen for each Phi encountered in worklist_store.
530 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));
531 #endif
533 // 'load' uses some memory state; look for users of the same state.
534 // Recurse through MergeMem nodes to the stores that use them.
536 // Each of these stores is a possible definition of memory
537 // that 'load' needs to use. We need to force 'load'
538 // to occur before each such store. When the store is in
539 // the same block as 'load', we insert an anti-dependence
540 // edge load->store.
542 // The relevant stores "nearby" the load consist of a tree rooted
543 // at initial_mem, with internal nodes of type MergeMem.
544 // Therefore, the branches visited by the worklist are of this form:
545 // initial_mem -> (MergeMem ->)* store
546 // The anti-dependence constraints apply only to the fringe of this tree.
548 Node* initial_mem = load->in(MemNode::Memory);
549 worklist_store.push(initial_mem);
550 worklist_visited.push(initial_mem);
551 worklist_mem.push(NULL);
552 while (worklist_store.size() > 0) {
553 // Examine a nearby store to see if it might interfere with our load.
554 Node* mem = worklist_mem.pop();
555 Node* store = worklist_store.pop();
556 uint op = store->Opcode();
558 // MergeMems do not directly have anti-deps.
559 // Treat them as internal nodes in a forward tree of memory states,
560 // the leaves of which are each a 'possible-def'.
561 if (store == initial_mem // root (exclusive) of tree we are searching
562 || op == Op_MergeMem // internal node of tree we are searching
563 ) {
564 mem = store; // It's not a possibly interfering store.
565 if (store == initial_mem)
566 initial_mem = NULL; // only process initial memory once
568 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
569 store = mem->fast_out(i);
570 if (store->is_MergeMem()) {
571 // Be sure we don't get into combinatorial problems.
572 // (Allow phis to be repeated; they can merge two relevant states.)
573 uint j = worklist_visited.size();
574 for (; j > 0; j--) {
575 if (worklist_visited.at(j-1) == store) break;
576 }
577 if (j > 0) continue; // already on work list; do not repeat
578 worklist_visited.push(store);
579 }
580 worklist_mem.push(mem);
581 worklist_store.push(store);
582 }
583 continue;
584 }
586 if (op == Op_MachProj || op == Op_Catch) continue;
587 if (store->needs_anti_dependence_check()) continue; // not really a store
589 // Compute the alias index. Loads and stores with different alias
590 // indices do not need anti-dependence edges. Wide MemBar's are
591 // anti-dependent on everything (except immutable memories).
592 const TypePtr* adr_type = store->adr_type();
593 if (!C->can_alias(adr_type, load_alias_idx)) continue;
595 // Most slow-path runtime calls do NOT modify Java memory, but
596 // they can block and so write Raw memory.
597 if (store->is_Mach()) {
598 MachNode* mstore = store->as_Mach();
599 if (load_alias_idx != Compile::AliasIdxRaw) {
600 // Check for call into the runtime using the Java calling
601 // convention (and from there into a wrapper); it has no
602 // _method. Can't do this optimization for Native calls because
603 // they CAN write to Java memory.
604 if (mstore->ideal_Opcode() == Op_CallStaticJava) {
605 assert(mstore->is_MachSafePoint(), "");
606 MachSafePointNode* ms = (MachSafePointNode*) mstore;
607 assert(ms->is_MachCallJava(), "");
608 MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
609 if (mcj->_method == NULL) {
610 // These runtime calls do not write to Java visible memory
611 // (other than Raw) and so do not require anti-dependence edges.
612 continue;
613 }
614 }
615 // Same for SafePoints: they read/write Raw but only read otherwise.
616 // This is basically a workaround for SafePoints only defining control
617 // instead of control + memory.
618 if (mstore->ideal_Opcode() == Op_SafePoint)
619 continue;
620 } else {
621 // Some raw memory, such as the load of "top" at an allocation,
622 // can be control dependent on the previous safepoint. See
623 // comments in GraphKit::allocate_heap() about control input.
624 // Inserting an anti-dep between such a safepoint and a use
625 // creates a cycle, and will cause a subsequent failure in
626 // local scheduling. (BugId 4919904)
627 // (%%% How can a control input be a safepoint and not a projection??)
628 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
629 continue;
630 }
631 }
633 // Identify a block that the current load must be above,
634 // or else observe that 'store' is all the way up in the
635 // earliest legal block for 'load'. In the latter case,
636 // immediately insert an anti-dependence edge.
637 Block* store_block = get_block_for_node(store);
638 assert(store_block != NULL, "unused killing projections skipped above");
640 if (store->is_Phi()) {
641 // 'load' uses memory which is one (or more) of the Phi's inputs.
642 // It must be scheduled not before the Phi, but rather before
643 // each of the relevant Phi inputs.
644 //
645 // Instead of finding the LCA of all inputs to a Phi that match 'mem',
646 // we mark each corresponding predecessor block and do a combined
647 // hoisting operation later (raise_LCA_above_marks).
648 //
649 // Do not assert(store_block != early, "Phi merging memory after access")
650 // PhiNode may be at start of block 'early' with backedge to 'early'
651 DEBUG_ONLY(bool found_match = false);
652 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
653 if (store->in(j) == mem) { // Found matching input?
654 DEBUG_ONLY(found_match = true);
655 Block* pred_block = get_block_for_node(store_block->pred(j));
656 if (pred_block != early) {
657 // If any predecessor of the Phi matches the load's "early block",
658 // we do not need a precedence edge between the Phi and 'load'
659 // since the load will be forced into a block preceding the Phi.
660 pred_block->set_raise_LCA_mark(load_index);
661 assert(!LCA_orig->dominates(pred_block) ||
662 early->dominates(pred_block), "early is high enough");
663 must_raise_LCA = true;
664 } else {
665 // anti-dependent upon PHI pinned below 'early', no edge needed
666 LCA = early; // but can not schedule below 'early'
667 }
668 }
669 }
670 assert(found_match, "no worklist bug");
671 #ifdef TRACK_PHI_INPUTS
672 #ifdef ASSERT
673 // This assert asks about correct handling of PhiNodes, which may not
674 // have all input edges directly from 'mem'. See BugId 4621264
675 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
676 // Increment by exactly one even if there are multiple copies of 'mem'
677 // coming into the phi, because we will run this block several times
678 // if there are several copies of 'mem'. (That's how DU iterators work.)
679 phi_inputs.at_put(store->_idx, num_mem_inputs);
680 assert(PhiNode::Input + num_mem_inputs < store->req(),
681 "Expect at least one phi input will not be from original memory state");
682 #endif //ASSERT
683 #endif //TRACK_PHI_INPUTS
684 } else if (store_block != early) {
685 // 'store' is between the current LCA and earliest possible block.
686 // Label its block, and decide later on how to raise the LCA
687 // to include the effect on LCA of this store.
688 // If this store's block gets chosen as the raised LCA, we
689 // will find him on the non_early_stores list and stick him
690 // with a precedence edge.
691 // (But, don't bother if LCA is already raised all the way.)
692 if (LCA != early) {
693 store_block->set_raise_LCA_mark(load_index);
694 must_raise_LCA = true;
695 non_early_stores.push(store);
696 }
697 } else {
698 // Found a possibly-interfering store in the load's 'early' block.
699 // This means 'load' cannot sink at all in the dominator tree.
700 // Add an anti-dep edge, and squeeze 'load' into the highest block.
701 assert(store != load->in(0), "dependence cycle found");
702 if (verify) {
703 assert(store->find_edge(load) != -1, "missing precedence edge");
704 } else {
705 store->add_prec(load);
706 }
707 LCA = early;
708 // This turns off the process of gathering non_early_stores.
709 }
710 }
711 // (Worklist is now empty; all nearby stores have been visited.)
713 // Finished if 'load' must be scheduled in its 'early' block.
714 // If we found any stores there, they have already been given
715 // precedence edges.
716 if (LCA == early) return LCA;
718 // We get here only if there are no possibly-interfering stores
719 // in the load's 'early' block. Move LCA up above all predecessors
720 // which contain stores we have noted.
721 //
722 // The raised LCA block can be a home to such interfering stores,
723 // but its predecessors must not contain any such stores.
724 //
725 // The raised LCA will be a lower bound for placing the load,
726 // preventing the load from sinking past any block containing
727 // a store that may invalidate the memory state required by 'load'.
728 if (must_raise_LCA)
729 LCA = raise_LCA_above_marks(LCA, load->_idx, early, this);
730 if (LCA == early) return LCA;
732 // Insert anti-dependence edges from 'load' to each store
733 // in the non-early LCA block.
734 // Mine the non_early_stores list for such stores.
735 if (LCA->raise_LCA_mark() == load_index) {
736 while (non_early_stores.size() > 0) {
737 Node* store = non_early_stores.pop();
738 Block* store_block = get_block_for_node(store);
739 if (store_block == LCA) {
740 // add anti_dependence from store to load in its own block
741 assert(store != load->in(0), "dependence cycle found");
742 if (verify) {
743 assert(store->find_edge(load) != -1, "missing precedence edge");
744 } else {
745 store->add_prec(load);
746 }
747 } else {
748 assert(store_block->raise_LCA_mark() == load_index, "block was marked");
749 // Any other stores we found must be either inside the new LCA
750 // or else outside the original LCA. In the latter case, they
751 // did not interfere with any use of 'load'.
752 assert(LCA->dominates(store_block)
753 || !LCA_orig->dominates(store_block), "no stray stores");
754 }
755 }
756 }
758 // Return the highest block containing stores; any stores
759 // within that block have been given anti-dependence edges.
760 return LCA;
761 }
763 // This class is used to iterate backwards over the nodes in the graph.
765 class Node_Backward_Iterator {
767 private:
768 Node_Backward_Iterator();
770 public:
771 // Constructor for the iterator
772 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg);
774 // Postincrement operator to iterate over the nodes
775 Node *next();
777 private:
778 VectorSet &_visited;
779 Node_List &_stack;
780 PhaseCFG &_cfg;
781 };
783 // Constructor for the Node_Backward_Iterator
784 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, PhaseCFG &cfg)
785 : _visited(visited), _stack(stack), _cfg(cfg) {
786 // The stack should contain exactly the root
787 stack.clear();
788 stack.push(root);
790 // Clear the visited bits
791 visited.Clear();
792 }
794 // Iterator for the Node_Backward_Iterator
795 Node *Node_Backward_Iterator::next() {
797 // If the _stack is empty, then just return NULL: finished.
798 if ( !_stack.size() )
799 return NULL;
801 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been
802 // made stateless, so I do not need to record the index 'i' on my _stack.
803 // Instead I visit all users each time, scanning for unvisited users.
804 // I visit unvisited not-anti-dependence users first, then anti-dependent
805 // children next.
806 Node *self = _stack.pop();
808 // I cycle here when I am entering a deeper level of recursion.
809 // The key variable 'self' was set prior to jumping here.
810 while( 1 ) {
812 _visited.set(self->_idx);
814 // Now schedule all uses as late as possible.
815 const Node* src = self->is_Proj() ? self->in(0) : self;
816 uint src_rpo = _cfg.get_block_for_node(src)->_rpo;
818 // Schedule all nodes in a post-order visit
819 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any
821 // Scan for unvisited nodes
822 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
823 // For all uses, schedule late
824 Node* n = self->fast_out(i); // Use
826 // Skip already visited children
827 if ( _visited.test(n->_idx) )
828 continue;
830 // do not traverse backward control edges
831 Node *use = n->is_Proj() ? n->in(0) : n;
832 uint use_rpo = _cfg.get_block_for_node(use)->_rpo;
834 if ( use_rpo < src_rpo )
835 continue;
837 // Phi nodes always precede uses in a basic block
838 if ( use_rpo == src_rpo && use->is_Phi() )
839 continue;
841 unvisited = n; // Found unvisited
843 // Check for possible-anti-dependent
844 if( !n->needs_anti_dependence_check() )
845 break; // Not visited, not anti-dep; schedule it NOW
846 }
848 // Did I find an unvisited not-anti-dependent Node?
849 if ( !unvisited )
850 break; // All done with children; post-visit 'self'
852 // Visit the unvisited Node. Contains the obvious push to
853 // indicate I'm entering a deeper level of recursion. I push the
854 // old state onto the _stack and set a new state and loop (recurse).
855 _stack.push(self);
856 self = unvisited;
857 } // End recursion loop
859 return self;
860 }
862 //------------------------------ComputeLatenciesBackwards----------------------
863 // Compute the latency of all the instructions.
864 void PhaseCFG::compute_latencies_backwards(VectorSet &visited, Node_List &stack) {
865 #ifndef PRODUCT
866 if (trace_opto_pipelining())
867 tty->print("\n#---- ComputeLatenciesBackwards ----\n");
868 #endif
870 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
871 Node *n;
873 // Walk over all the nodes from last to first
874 while (n = iter.next()) {
875 // Set the latency for the definitions of this instruction
876 partial_latency_of_defs(n);
877 }
878 } // end ComputeLatenciesBackwards
880 //------------------------------partial_latency_of_defs------------------------
881 // Compute the latency impact of this node on all defs. This computes
882 // a number that increases as we approach the beginning of the routine.
883 void PhaseCFG::partial_latency_of_defs(Node *n) {
884 // Set the latency for this instruction
885 #ifndef PRODUCT
886 if (trace_opto_pipelining()) {
887 tty->print("# latency_to_inputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
888 dump();
889 }
890 #endif
892 if (n->is_Proj()) {
893 n = n->in(0);
894 }
896 if (n->is_Root()) {
897 return;
898 }
900 uint nlen = n->len();
901 uint use_latency = get_latency_for_node(n);
902 uint use_pre_order = get_block_for_node(n)->_pre_order;
904 for (uint j = 0; j < nlen; j++) {
905 Node *def = n->in(j);
907 if (!def || def == n) {
908 continue;
909 }
911 // Walk backwards thru projections
912 if (def->is_Proj()) {
913 def = def->in(0);
914 }
916 #ifndef PRODUCT
917 if (trace_opto_pipelining()) {
918 tty->print("# in(%2d): ", j);
919 def->dump();
920 }
921 #endif
923 // If the defining block is not known, assume it is ok
924 Block *def_block = get_block_for_node(def);
925 uint def_pre_order = def_block ? def_block->_pre_order : 0;
927 if ((use_pre_order < def_pre_order) || (use_pre_order == def_pre_order && n->is_Phi())) {
928 continue;
929 }
931 uint delta_latency = n->latency(j);
932 uint current_latency = delta_latency + use_latency;
934 if (get_latency_for_node(def) < current_latency) {
935 set_latency_for_node(def, current_latency);
936 }
938 #ifndef PRODUCT
939 if (trace_opto_pipelining()) {
940 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", use_latency, j, delta_latency, current_latency, def->_idx, get_latency_for_node(def));
941 }
942 #endif
943 }
944 }
946 //------------------------------latency_from_use-------------------------------
947 // Compute the latency of a specific use
948 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
949 // If self-reference, return no latency
950 if (use == n || use->is_Root()) {
951 return 0;
952 }
954 uint def_pre_order = get_block_for_node(def)->_pre_order;
955 uint latency = 0;
957 // If the use is not a projection, then it is simple...
958 if (!use->is_Proj()) {
959 #ifndef PRODUCT
960 if (trace_opto_pipelining()) {
961 tty->print("# out(): ");
962 use->dump();
963 }
964 #endif
966 uint use_pre_order = get_block_for_node(use)->_pre_order;
968 if (use_pre_order < def_pre_order)
969 return 0;
971 if (use_pre_order == def_pre_order && use->is_Phi())
972 return 0;
974 uint nlen = use->len();
975 uint nl = get_latency_for_node(use);
977 for ( uint j=0; j<nlen; j++ ) {
978 if (use->in(j) == n) {
979 // Change this if we want local latencies
980 uint ul = use->latency(j);
981 uint l = ul + nl;
982 if (latency < l) latency = l;
983 #ifndef PRODUCT
984 if (trace_opto_pipelining()) {
985 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d",
986 nl, j, ul, l, latency);
987 }
988 #endif
989 }
990 }
991 } else {
992 // This is a projection, just grab the latency of the use(s)
993 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
994 uint l = latency_from_use(use, def, use->fast_out(j));
995 if (latency < l) latency = l;
996 }
997 }
999 return latency;
1000 }
1002 //------------------------------latency_from_uses------------------------------
1003 // Compute the latency of this instruction relative to all of it's uses.
1004 // This computes a number that increases as we approach the beginning of the
1005 // routine.
1006 void PhaseCFG::latency_from_uses(Node *n) {
1007 // Set the latency for this instruction
1008 #ifndef PRODUCT
1009 if (trace_opto_pipelining()) {
1010 tty->print("# latency_from_outputs: node_latency[%d] = %d for node", n->_idx, get_latency_for_node(n));
1011 dump();
1012 }
1013 #endif
1014 uint latency=0;
1015 const Node *def = n->is_Proj() ? n->in(0): n;
1017 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
1018 uint l = latency_from_use(n, def, n->fast_out(i));
1020 if (latency < l) latency = l;
1021 }
1023 set_latency_for_node(n, latency);
1024 }
1026 //------------------------------hoist_to_cheaper_block-------------------------
1027 // Pick a block for node self, between early and LCA, that is a cheaper
1028 // alternative to LCA.
1029 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
1030 const double delta = 1+PROB_UNLIKELY_MAG(4);
1031 Block* least = LCA;
1032 double least_freq = least->_freq;
1033 uint target = get_latency_for_node(self);
1034 uint start_latency = get_latency_for_node(LCA->_nodes[0]);
1035 uint end_latency = get_latency_for_node(LCA->_nodes[LCA->end_idx()]);
1036 bool in_latency = (target <= start_latency);
1037 const Block* root_block = get_block_for_node(_root);
1039 // Turn off latency scheduling if scheduling is just plain off
1040 if (!C->do_scheduling())
1041 in_latency = true;
1043 // Do not hoist (to cover latency) instructions which target a
1044 // single register. Hoisting stretches the live range of the
1045 // single register and may force spilling.
1046 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1047 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
1048 in_latency = true;
1050 #ifndef PRODUCT
1051 if (trace_opto_pipelining()) {
1052 tty->print("# Find cheaper block for latency %d: ", get_latency_for_node(self));
1053 self->dump();
1054 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1055 LCA->_pre_order,
1056 LCA->_nodes[0]->_idx,
1057 start_latency,
1058 LCA->_nodes[LCA->end_idx()]->_idx,
1059 end_latency,
1060 least_freq);
1061 }
1062 #endif
1064 int cand_cnt = 0; // number of candidates tried
1066 // Walk up the dominator tree from LCA (Lowest common ancestor) to
1067 // the earliest legal location. Capture the least execution frequency.
1068 while (LCA != early) {
1069 LCA = LCA->_idom; // Follow up the dominator tree
1071 if (LCA == NULL) {
1072 // Bailout without retry
1073 C->record_method_not_compilable("late schedule failed: LCA == NULL");
1074 return least;
1075 }
1077 // Don't hoist machine instructions to the root basic block
1078 if (mach && LCA == root_block)
1079 break;
1081 uint start_lat = get_latency_for_node(LCA->_nodes[0]);
1082 uint end_idx = LCA->end_idx();
1083 uint end_lat = get_latency_for_node(LCA->_nodes[end_idx]);
1084 double LCA_freq = LCA->_freq;
1085 #ifndef PRODUCT
1086 if (trace_opto_pipelining()) {
1087 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1088 LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
1089 }
1090 #endif
1091 cand_cnt++;
1092 if (LCA_freq < least_freq || // Better Frequency
1093 (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
1094 (!StressGCM && // Otherwise, choose with latency
1095 !in_latency && // No block containing latency
1096 LCA_freq < least_freq * delta && // No worse frequency
1097 target >= end_lat && // within latency range
1098 !self->is_iteratively_computed() ) // But don't hoist IV increments
1099 // because they may end up above other uses of their phi forcing
1100 // their result register to be different from their input.
1101 ) {
1102 least = LCA; // Found cheaper block
1103 least_freq = LCA_freq;
1104 start_latency = start_lat;
1105 end_latency = end_lat;
1106 if (target <= start_lat)
1107 in_latency = true;
1108 }
1109 }
1111 #ifndef PRODUCT
1112 if (trace_opto_pipelining()) {
1113 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g",
1114 least->_pre_order, start_latency, least_freq);
1115 }
1116 #endif
1118 // See if the latency needs to be updated
1119 if (target < end_latency) {
1120 #ifndef PRODUCT
1121 if (trace_opto_pipelining()) {
1122 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1123 }
1124 #endif
1125 set_latency_for_node(self, end_latency);
1126 partial_latency_of_defs(self);
1127 }
1129 return least;
1130 }
1133 //------------------------------schedule_late-----------------------------------
1134 // Now schedule all codes as LATE as possible. This is the LCA in the
1135 // dominator tree of all USES of a value. Pick the block with the least
1136 // loop nesting depth that is lowest in the dominator tree.
1137 extern const char must_clone[];
1138 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
1139 #ifndef PRODUCT
1140 if (trace_opto_pipelining())
1141 tty->print("\n#---- schedule_late ----\n");
1142 #endif
1144 Node_Backward_Iterator iter((Node *)_root, visited, stack, *this);
1145 Node *self;
1147 // Walk over all the nodes from last to first
1148 while (self = iter.next()) {
1149 Block* early = get_block_for_node(self); // Earliest legal placement
1151 if (self->is_top()) {
1152 // Top node goes in bb #2 with other constants.
1153 // It must be special-cased, because it has no out edges.
1154 early->add_inst(self);
1155 continue;
1156 }
1158 // No uses, just terminate
1159 if (self->outcnt() == 0) {
1160 assert(self->is_MachProj(), "sanity");
1161 continue; // Must be a dead machine projection
1162 }
1164 // If node is pinned in the block, then no scheduling can be done.
1165 if( self->pinned() ) // Pinned in block?
1166 continue;
1168 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1169 if (mach) {
1170 switch (mach->ideal_Opcode()) {
1171 case Op_CreateEx:
1172 // Don't move exception creation
1173 early->add_inst(self);
1174 continue;
1175 break;
1176 case Op_CheckCastPP:
1177 // Don't move CheckCastPP nodes away from their input, if the input
1178 // is a rawptr (5071820).
1179 Node *def = self->in(1);
1180 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
1181 early->add_inst(self);
1182 #ifdef ASSERT
1183 _raw_oops.push(def);
1184 #endif
1185 continue;
1186 }
1187 break;
1188 }
1189 }
1191 // Gather LCA of all uses
1192 Block *LCA = NULL;
1193 {
1194 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1195 // For all uses, find LCA
1196 Node* use = self->fast_out(i);
1197 LCA = raise_LCA_above_use(LCA, use, self, this);
1198 }
1199 } // (Hide defs of imax, i from rest of block.)
1201 // Place temps in the block of their use. This isn't a
1202 // requirement for correctness but it reduces useless
1203 // interference between temps and other nodes.
1204 if (mach != NULL && mach->is_MachTemp()) {
1205 map_node_to_block(self, LCA);
1206 LCA->add_inst(self);
1207 continue;
1208 }
1210 // Check if 'self' could be anti-dependent on memory
1211 if (self->needs_anti_dependence_check()) {
1212 // Hoist LCA above possible-defs and insert anti-dependences to
1213 // defs in new LCA block.
1214 LCA = insert_anti_dependences(LCA, self);
1215 }
1217 if (early->_dom_depth > LCA->_dom_depth) {
1218 // Somehow the LCA has moved above the earliest legal point.
1219 // (One way this can happen is via memory_early_block.)
1220 if (C->subsume_loads() == true && !C->failing()) {
1221 // Retry with subsume_loads == false
1222 // If this is the first failure, the sentinel string will "stick"
1223 // to the Compile object, and the C2Compiler will see it and retry.
1224 C->record_failure(C2Compiler::retry_no_subsuming_loads());
1225 } else {
1226 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1227 C->record_method_not_compilable("late schedule failed: incorrect graph");
1228 }
1229 return;
1230 }
1232 // If there is no opportunity to hoist, then we're done.
1233 // In stress mode, try to hoist even the single operations.
1234 bool try_to_hoist = StressGCM || (LCA != early);
1236 // Must clone guys stay next to use; no hoisting allowed.
1237 // Also cannot hoist guys that alter memory or are otherwise not
1238 // allocatable (hoisting can make a value live longer, leading to
1239 // anti and output dependency problems which are normally resolved
1240 // by the register allocator giving everyone a different register).
1241 if (mach != NULL && must_clone[mach->ideal_Opcode()])
1242 try_to_hoist = false;
1244 Block* late = NULL;
1245 if (try_to_hoist) {
1246 // Now find the block with the least execution frequency.
1247 // Start at the latest schedule and work up to the earliest schedule
1248 // in the dominator tree. Thus the Node will dominate all its uses.
1249 late = hoist_to_cheaper_block(LCA, early, self);
1250 } else {
1251 // Just use the LCA of the uses.
1252 late = LCA;
1253 }
1255 // Put the node into target block
1256 schedule_node_into_block(self, late);
1258 #ifdef ASSERT
1259 if (self->needs_anti_dependence_check()) {
1260 // since precedence edges are only inserted when we're sure they
1261 // are needed make sure that after placement in a block we don't
1262 // need any new precedence edges.
1263 verify_anti_dependences(late, self);
1264 }
1265 #endif
1266 } // Loop until all nodes have been visited
1268 } // end ScheduleLate
1270 //------------------------------GlobalCodeMotion-------------------------------
1271 void PhaseCFG::global_code_motion() {
1272 ResourceMark rm;
1274 #ifndef PRODUCT
1275 if (trace_opto_pipelining()) {
1276 tty->print("\n---- Start GlobalCodeMotion ----\n");
1277 }
1278 #endif
1280 // Initialize the node to block mapping for things on the proj_list
1281 for (uint i = 0; i < _matcher.number_of_projections(); i++) {
1282 unmap_node_from_block(_matcher.get_projection(i));
1283 }
1285 // Set the basic block for Nodes pinned into blocks
1286 Arena* arena = Thread::current()->resource_area();
1287 VectorSet visited(arena);
1288 schedule_pinned_nodes(visited);
1290 // Find the earliest Block any instruction can be placed in. Some
1291 // instructions are pinned into Blocks. Unpinned instructions can
1292 // appear in last block in which all their inputs occur.
1293 visited.Clear();
1294 Node_List stack(arena);
1295 // Pre-grow the list
1296 stack.map((C->unique() >> 1) + 16, NULL);
1297 if (!schedule_early(visited, stack)) {
1298 // Bailout without retry
1299 C->record_method_not_compilable("early schedule failed");
1300 return;
1301 }
1303 // Build Def-Use edges.
1304 // Compute the latency information (via backwards walk) for all the
1305 // instructions in the graph
1306 _node_latency = new GrowableArray<uint>(); // resource_area allocation
1308 if (C->do_scheduling()) {
1309 compute_latencies_backwards(visited, stack);
1310 }
1312 // Now schedule all codes as LATE as possible. This is the LCA in the
1313 // dominator tree of all USES of a value. Pick the block with the least
1314 // loop nesting depth that is lowest in the dominator tree.
1315 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1316 schedule_late(visited, stack);
1317 if (C->failing()) {
1318 // schedule_late fails only when graph is incorrect.
1319 assert(!VerifyGraphEdges, "verification should have failed");
1320 return;
1321 }
1323 #ifndef PRODUCT
1324 if (trace_opto_pipelining()) {
1325 tty->print("\n---- Detect implicit null checks ----\n");
1326 }
1327 #endif
1329 // Detect implicit-null-check opportunities. Basically, find NULL checks
1330 // with suitable memory ops nearby. Use the memory op to do the NULL check.
1331 // I can generate a memory op if there is not one nearby.
1332 if (C->is_method_compilation()) {
1333 // Don't do it for natives, adapters, or runtime stubs
1334 int allowed_reasons = 0;
1335 // ...and don't do it when there have been too many traps, globally.
1336 for (int reason = (int)Deoptimization::Reason_none+1;
1337 reason < Compile::trapHistLength; reason++) {
1338 assert(reason < BitsPerInt, "recode bit map");
1339 if (!C->too_many_traps((Deoptimization::DeoptReason) reason))
1340 allowed_reasons |= nth_bit(reason);
1341 }
1342 // By reversing the loop direction we get a very minor gain on mpegaudio.
1343 // Feel free to revert to a forward loop for clarity.
1344 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1345 for (int i = _matcher._null_check_tests.size() - 2; i >= 0; i -= 2) {
1346 Node* proj = _matcher._null_check_tests[i];
1347 Node* val = _matcher._null_check_tests[i + 1];
1348 Block* block = get_block_for_node(proj);
1349 block->implicit_null_check(this, proj, val, allowed_reasons);
1350 // The implicit_null_check will only perform the transformation
1351 // if the null branch is truly uncommon, *and* it leads to an
1352 // uncommon trap. Combined with the too_many_traps guards
1353 // above, this prevents SEGV storms reported in 6366351,
1354 // by recompiling offending methods without this optimization.
1355 }
1356 }
1358 #ifndef PRODUCT
1359 if (trace_opto_pipelining()) {
1360 tty->print("\n---- Start Local Scheduling ----\n");
1361 }
1362 #endif
1364 // Schedule locally. Right now a simple topological sort.
1365 // Later, do a real latency aware scheduler.
1366 GrowableArray<int> ready_cnt(C->unique(), C->unique(), -1);
1367 visited.Clear();
1368 for (uint i = 0; i < number_of_blocks(); i++) {
1369 Block* block = get_block(i);
1370 if (!block->schedule_local(this, _matcher, ready_cnt, visited)) {
1371 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1372 C->record_method_not_compilable("local schedule failed");
1373 }
1374 return;
1375 }
1376 }
1378 // If we inserted any instructions between a Call and his CatchNode,
1379 // clone the instructions on all paths below the Catch.
1380 for (uint i = 0; i < number_of_blocks(); i++) {
1381 Block* block = get_block(i);
1382 block->call_catch_cleanup(this, C);
1383 }
1385 #ifndef PRODUCT
1386 if (trace_opto_pipelining()) {
1387 tty->print("\n---- After GlobalCodeMotion ----\n");
1388 for (uint i = 0; i < number_of_blocks(); i++) {
1389 Block* block = get_block(i);
1390 block->dump();
1391 }
1392 }
1393 #endif
1394 // Dead.
1395 _node_latency = (GrowableArray<uint> *)0xdeadbeef;
1396 }
1398 bool PhaseCFG::do_global_code_motion() {
1400 build_dominator_tree();
1401 if (C->failing()) {
1402 return false;
1403 }
1405 NOT_PRODUCT( C->verify_graph_edges(); )
1407 estimate_block_frequency();
1409 global_code_motion();
1411 if (C->failing()) {
1412 return false;
1413 }
1415 return true;
1416 }
1418 //------------------------------Estimate_Block_Frequency-----------------------
1419 // Estimate block frequencies based on IfNode probabilities.
1420 void PhaseCFG::estimate_block_frequency() {
1422 // Force conditional branches leading to uncommon traps to be unlikely,
1423 // not because we get to the uncommon_trap with less relative frequency,
1424 // but because an uncommon_trap typically causes a deopt, so we only get
1425 // there once.
1426 if (C->do_freq_based_layout()) {
1427 Block_List worklist;
1428 Block* root_blk = get_block(0);
1429 for (uint i = 1; i < root_blk->num_preds(); i++) {
1430 Block *pb = get_block_for_node(root_blk->pred(i));
1431 if (pb->has_uncommon_code()) {
1432 worklist.push(pb);
1433 }
1434 }
1435 while (worklist.size() > 0) {
1436 Block* uct = worklist.pop();
1437 if (uct == get_root_block()) {
1438 continue;
1439 }
1440 for (uint i = 1; i < uct->num_preds(); i++) {
1441 Block *pb = get_block_for_node(uct->pred(i));
1442 if (pb->_num_succs == 1) {
1443 worklist.push(pb);
1444 } else if (pb->num_fall_throughs() == 2) {
1445 pb->update_uncommon_branch(uct);
1446 }
1447 }
1448 }
1449 }
1451 // Create the loop tree and calculate loop depth.
1452 _root_loop = create_loop_tree();
1453 _root_loop->compute_loop_depth(0);
1455 // Compute block frequency of each block, relative to a single loop entry.
1456 _root_loop->compute_freq();
1458 // Adjust all frequencies to be relative to a single method entry
1459 _root_loop->_freq = 1.0;
1460 _root_loop->scale_freq();
1462 // Save outmost loop frequency for LRG frequency threshold
1463 _outer_loop_frequency = _root_loop->outer_loop_freq();
1465 // force paths ending at uncommon traps to be infrequent
1466 if (!C->do_freq_based_layout()) {
1467 Block_List worklist;
1468 Block* root_blk = get_block(0);
1469 for (uint i = 1; i < root_blk->num_preds(); i++) {
1470 Block *pb = get_block_for_node(root_blk->pred(i));
1471 if (pb->has_uncommon_code()) {
1472 worklist.push(pb);
1473 }
1474 }
1475 while (worklist.size() > 0) {
1476 Block* uct = worklist.pop();
1477 uct->_freq = PROB_MIN;
1478 for (uint i = 1; i < uct->num_preds(); i++) {
1479 Block *pb = get_block_for_node(uct->pred(i));
1480 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1481 worklist.push(pb);
1482 }
1483 }
1484 }
1485 }
1487 #ifdef ASSERT
1488 for (uint i = 0; i < number_of_blocks(); i++) {
1489 Block* b = get_block(i);
1490 assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency");
1491 }
1492 #endif
1494 #ifndef PRODUCT
1495 if (PrintCFGBlockFreq) {
1496 tty->print_cr("CFG Block Frequencies");
1497 _root_loop->dump_tree();
1498 if (Verbose) {
1499 tty->print_cr("PhaseCFG dump");
1500 dump();
1501 tty->print_cr("Node dump");
1502 _root->dump(99999);
1503 }
1504 }
1505 #endif
1506 }
1508 //----------------------------create_loop_tree--------------------------------
1509 // Create a loop tree from the CFG
1510 CFGLoop* PhaseCFG::create_loop_tree() {
1512 #ifdef ASSERT
1513 assert(get_block(0) == get_root_block(), "first block should be root block");
1514 for (uint i = 0; i < number_of_blocks(); i++) {
1515 Block* block = get_block(i);
1516 // Check that _loop field are clear...we could clear them if not.
1517 assert(block->_loop == NULL, "clear _loop expected");
1518 // Sanity check that the RPO numbering is reflected in the _blocks array.
1519 // It doesn't have to be for the loop tree to be built, but if it is not,
1520 // then the blocks have been reordered since dom graph building...which
1521 // may question the RPO numbering
1522 assert(block->_rpo == i, "unexpected reverse post order number");
1523 }
1524 #endif
1526 int idct = 0;
1527 CFGLoop* root_loop = new CFGLoop(idct++);
1529 Block_List worklist;
1531 // Assign blocks to loops
1532 for(uint i = number_of_blocks() - 1; i > 0; i-- ) { // skip Root block
1533 Block* block = get_block(i);
1535 if (block->head()->is_Loop()) {
1536 Block* loop_head = block;
1537 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1538 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1539 Block* tail = get_block_for_node(tail_n);
1541 // Defensively filter out Loop nodes for non-single-entry loops.
1542 // For all reasonable loops, the head occurs before the tail in RPO.
1543 if (i <= tail->_rpo) {
1545 // The tail and (recursive) predecessors of the tail
1546 // are made members of a new loop.
1548 assert(worklist.size() == 0, "nonempty worklist");
1549 CFGLoop* nloop = new CFGLoop(idct++);
1550 assert(loop_head->_loop == NULL, "just checking");
1551 loop_head->_loop = nloop;
1552 // Add to nloop so push_pred() will skip over inner loops
1553 nloop->add_member(loop_head);
1554 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, this);
1556 while (worklist.size() > 0) {
1557 Block* member = worklist.pop();
1558 if (member != loop_head) {
1559 for (uint j = 1; j < member->num_preds(); j++) {
1560 nloop->push_pred(member, j, worklist, this);
1561 }
1562 }
1563 }
1564 }
1565 }
1566 }
1568 // Create a member list for each loop consisting
1569 // of both blocks and (immediate child) loops.
1570 for (uint i = 0; i < number_of_blocks(); i++) {
1571 Block* block = get_block(i);
1572 CFGLoop* lp = block->_loop;
1573 if (lp == NULL) {
1574 // Not assigned to a loop. Add it to the method's pseudo loop.
1575 block->_loop = root_loop;
1576 lp = root_loop;
1577 }
1578 if (lp == root_loop || block != lp->head()) { // loop heads are already members
1579 lp->add_member(block);
1580 }
1581 if (lp != root_loop) {
1582 if (lp->parent() == NULL) {
1583 // Not a nested loop. Make it a child of the method's pseudo loop.
1584 root_loop->add_nested_loop(lp);
1585 }
1586 if (block == lp->head()) {
1587 // Add nested loop to member list of parent loop.
1588 lp->parent()->add_member(lp);
1589 }
1590 }
1591 }
1593 return root_loop;
1594 }
1596 //------------------------------push_pred--------------------------------------
1597 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg) {
1598 Node* pred_n = blk->pred(i);
1599 Block* pred = cfg->get_block_for_node(pred_n);
1600 CFGLoop *pred_loop = pred->_loop;
1601 if (pred_loop == NULL) {
1602 // Filter out blocks for non-single-entry loops.
1603 // For all reasonable loops, the head occurs before the tail in RPO.
1604 if (pred->_rpo > head()->_rpo) {
1605 pred->_loop = this;
1606 worklist.push(pred);
1607 }
1608 } else if (pred_loop != this) {
1609 // Nested loop.
1610 while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
1611 pred_loop = pred_loop->_parent;
1612 }
1613 // Make pred's loop be a child
1614 if (pred_loop->_parent == NULL) {
1615 add_nested_loop(pred_loop);
1616 // Continue with loop entry predecessor.
1617 Block* pred_head = pred_loop->head();
1618 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1619 assert(pred_head != head(), "loop head in only one loop");
1620 push_pred(pred_head, LoopNode::EntryControl, worklist, cfg);
1621 } else {
1622 assert(pred_loop->_parent == this && _parent == NULL, "just checking");
1623 }
1624 }
1625 }
1627 //------------------------------add_nested_loop--------------------------------
1628 // Make cl a child of the current loop in the loop tree.
1629 void CFGLoop::add_nested_loop(CFGLoop* cl) {
1630 assert(_parent == NULL, "no parent yet");
1631 assert(cl != this, "not my own parent");
1632 cl->_parent = this;
1633 CFGLoop* ch = _child;
1634 if (ch == NULL) {
1635 _child = cl;
1636 } else {
1637 while (ch->_sibling != NULL) { ch = ch->_sibling; }
1638 ch->_sibling = cl;
1639 }
1640 }
1642 //------------------------------compute_loop_depth-----------------------------
1643 // Store the loop depth in each CFGLoop object.
1644 // Recursively walk the children to do the same for them.
1645 void CFGLoop::compute_loop_depth(int depth) {
1646 _depth = depth;
1647 CFGLoop* ch = _child;
1648 while (ch != NULL) {
1649 ch->compute_loop_depth(depth + 1);
1650 ch = ch->_sibling;
1651 }
1652 }
1654 //------------------------------compute_freq-----------------------------------
1655 // Compute the frequency of each block and loop, relative to a single entry
1656 // into the dominating loop head.
1657 void CFGLoop::compute_freq() {
1658 // Bottom up traversal of loop tree (visit inner loops first.)
1659 // Set loop head frequency to 1.0, then transitively
1660 // compute frequency for all successors in the loop,
1661 // as well as for each exit edge. Inner loops are
1662 // treated as single blocks with loop exit targets
1663 // as the successor blocks.
1665 // Nested loops first
1666 CFGLoop* ch = _child;
1667 while (ch != NULL) {
1668 ch->compute_freq();
1669 ch = ch->_sibling;
1670 }
1671 assert (_members.length() > 0, "no empty loops");
1672 Block* hd = head();
1673 hd->_freq = 1.0f;
1674 for (int i = 0; i < _members.length(); i++) {
1675 CFGElement* s = _members.at(i);
1676 float freq = s->_freq;
1677 if (s->is_block()) {
1678 Block* b = s->as_Block();
1679 for (uint j = 0; j < b->_num_succs; j++) {
1680 Block* sb = b->_succs[j];
1681 update_succ_freq(sb, freq * b->succ_prob(j));
1682 }
1683 } else {
1684 CFGLoop* lp = s->as_CFGLoop();
1685 assert(lp->_parent == this, "immediate child");
1686 for (int k = 0; k < lp->_exits.length(); k++) {
1687 Block* eb = lp->_exits.at(k).get_target();
1688 float prob = lp->_exits.at(k).get_prob();
1689 update_succ_freq(eb, freq * prob);
1690 }
1691 }
1692 }
1694 // For all loops other than the outer, "method" loop,
1695 // sum and normalize the exit probability. The "method" loop
1696 // should keep the initial exit probability of 1, so that
1697 // inner blocks do not get erroneously scaled.
1698 if (_depth != 0) {
1699 // Total the exit probabilities for this loop.
1700 float exits_sum = 0.0f;
1701 for (int i = 0; i < _exits.length(); i++) {
1702 exits_sum += _exits.at(i).get_prob();
1703 }
1705 // Normalize the exit probabilities. Until now, the
1706 // probabilities estimate the possibility of exit per
1707 // a single loop iteration; afterward, they estimate
1708 // the probability of exit per loop entry.
1709 for (int i = 0; i < _exits.length(); i++) {
1710 Block* et = _exits.at(i).get_target();
1711 float new_prob = 0.0f;
1712 if (_exits.at(i).get_prob() > 0.0f) {
1713 new_prob = _exits.at(i).get_prob() / exits_sum;
1714 }
1715 BlockProbPair bpp(et, new_prob);
1716 _exits.at_put(i, bpp);
1717 }
1719 // Save the total, but guard against unreasonable probability,
1720 // as the value is used to estimate the loop trip count.
1721 // An infinite trip count would blur relative block
1722 // frequencies.
1723 if (exits_sum > 1.0f) exits_sum = 1.0;
1724 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1725 _exit_prob = exits_sum;
1726 }
1727 }
1729 //------------------------------succ_prob-------------------------------------
1730 // Determine the probability of reaching successor 'i' from the receiver block.
1731 float Block::succ_prob(uint i) {
1732 int eidx = end_idx();
1733 Node *n = _nodes[eidx]; // Get ending Node
1735 int op = n->Opcode();
1736 if (n->is_Mach()) {
1737 if (n->is_MachNullCheck()) {
1738 // Can only reach here if called after lcm. The original Op_If is gone,
1739 // so we attempt to infer the probability from one or both of the
1740 // successor blocks.
1741 assert(_num_succs == 2, "expecting 2 successors of a null check");
1742 // If either successor has only one predecessor, then the
1743 // probability estimate can be derived using the
1744 // relative frequency of the successor and this block.
1745 if (_succs[i]->num_preds() == 2) {
1746 return _succs[i]->_freq / _freq;
1747 } else if (_succs[1-i]->num_preds() == 2) {
1748 return 1 - (_succs[1-i]->_freq / _freq);
1749 } else {
1750 // Estimate using both successor frequencies
1751 float freq = _succs[i]->_freq;
1752 return freq / (freq + _succs[1-i]->_freq);
1753 }
1754 }
1755 op = n->as_Mach()->ideal_Opcode();
1756 }
1759 // Switch on branch type
1760 switch( op ) {
1761 case Op_CountedLoopEnd:
1762 case Op_If: {
1763 assert (i < 2, "just checking");
1764 // Conditionals pass on only part of their frequency
1765 float prob = n->as_MachIf()->_prob;
1766 assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
1767 // If succ[i] is the FALSE branch, invert path info
1768 if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
1769 return 1.0f - prob; // not taken
1770 } else {
1771 return prob; // taken
1772 }
1773 }
1775 case Op_Jump:
1776 // Divide the frequency between all successors evenly
1777 return 1.0f/_num_succs;
1779 case Op_Catch: {
1780 const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
1781 if (ci->_con == CatchProjNode::fall_through_index) {
1782 // Fall-thru path gets the lion's share.
1783 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
1784 } else {
1785 // Presume exceptional paths are equally unlikely
1786 return PROB_UNLIKELY_MAG(5);
1787 }
1788 }
1790 case Op_Root:
1791 case Op_Goto:
1792 // Pass frequency straight thru to target
1793 return 1.0f;
1795 case Op_NeverBranch:
1796 return 0.0f;
1798 case Op_TailCall:
1799 case Op_TailJump:
1800 case Op_Return:
1801 case Op_Halt:
1802 case Op_Rethrow:
1803 // Do not push out freq to root block
1804 return 0.0f;
1806 default:
1807 ShouldNotReachHere();
1808 }
1810 return 0.0f;
1811 }
1813 //------------------------------num_fall_throughs-----------------------------
1814 // Return the number of fall-through candidates for a block
1815 int Block::num_fall_throughs() {
1816 int eidx = end_idx();
1817 Node *n = _nodes[eidx]; // Get ending Node
1819 int op = n->Opcode();
1820 if (n->is_Mach()) {
1821 if (n->is_MachNullCheck()) {
1822 // In theory, either side can fall-thru, for simplicity sake,
1823 // let's say only the false branch can now.
1824 return 1;
1825 }
1826 op = n->as_Mach()->ideal_Opcode();
1827 }
1829 // Switch on branch type
1830 switch( op ) {
1831 case Op_CountedLoopEnd:
1832 case Op_If:
1833 return 2;
1835 case Op_Root:
1836 case Op_Goto:
1837 return 1;
1839 case Op_Catch: {
1840 for (uint i = 0; i < _num_succs; i++) {
1841 const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
1842 if (ci->_con == CatchProjNode::fall_through_index) {
1843 return 1;
1844 }
1845 }
1846 return 0;
1847 }
1849 case Op_Jump:
1850 case Op_NeverBranch:
1851 case Op_TailCall:
1852 case Op_TailJump:
1853 case Op_Return:
1854 case Op_Halt:
1855 case Op_Rethrow:
1856 return 0;
1858 default:
1859 ShouldNotReachHere();
1860 }
1862 return 0;
1863 }
1865 //------------------------------succ_fall_through-----------------------------
1866 // Return true if a specific successor could be fall-through target.
1867 bool Block::succ_fall_through(uint i) {
1868 int eidx = end_idx();
1869 Node *n = _nodes[eidx]; // Get ending Node
1871 int op = n->Opcode();
1872 if (n->is_Mach()) {
1873 if (n->is_MachNullCheck()) {
1874 // In theory, either side can fall-thru, for simplicity sake,
1875 // let's say only the false branch can now.
1876 return _nodes[i + eidx + 1]->Opcode() == Op_IfFalse;
1877 }
1878 op = n->as_Mach()->ideal_Opcode();
1879 }
1881 // Switch on branch type
1882 switch( op ) {
1883 case Op_CountedLoopEnd:
1884 case Op_If:
1885 case Op_Root:
1886 case Op_Goto:
1887 return true;
1889 case Op_Catch: {
1890 const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
1891 return ci->_con == CatchProjNode::fall_through_index;
1892 }
1894 case Op_Jump:
1895 case Op_NeverBranch:
1896 case Op_TailCall:
1897 case Op_TailJump:
1898 case Op_Return:
1899 case Op_Halt:
1900 case Op_Rethrow:
1901 return false;
1903 default:
1904 ShouldNotReachHere();
1905 }
1907 return false;
1908 }
1910 //------------------------------update_uncommon_branch------------------------
1911 // Update the probability of a two-branch to be uncommon
1912 void Block::update_uncommon_branch(Block* ub) {
1913 int eidx = end_idx();
1914 Node *n = _nodes[eidx]; // Get ending Node
1916 int op = n->as_Mach()->ideal_Opcode();
1918 assert(op == Op_CountedLoopEnd || op == Op_If, "must be a If");
1919 assert(num_fall_throughs() == 2, "must be a two way branch block");
1921 // Which successor is ub?
1922 uint s;
1923 for (s = 0; s <_num_succs; s++) {
1924 if (_succs[s] == ub) break;
1925 }
1926 assert(s < 2, "uncommon successor must be found");
1928 // If ub is the true path, make the proability small, else
1929 // ub is the false path, and make the probability large
1930 bool invert = (_nodes[s + eidx + 1]->Opcode() == Op_IfFalse);
1932 // Get existing probability
1933 float p = n->as_MachIf()->_prob;
1935 if (invert) p = 1.0 - p;
1936 if (p > PROB_MIN) {
1937 p = PROB_MIN;
1938 }
1939 if (invert) p = 1.0 - p;
1941 n->as_MachIf()->_prob = p;
1942 }
1944 //------------------------------update_succ_freq-------------------------------
1945 // Update the appropriate frequency associated with block 'b', a successor of
1946 // a block in this loop.
1947 void CFGLoop::update_succ_freq(Block* b, float freq) {
1948 if (b->_loop == this) {
1949 if (b == head()) {
1950 // back branch within the loop
1951 // Do nothing now, the loop carried frequency will be
1952 // adjust later in scale_freq().
1953 } else {
1954 // simple branch within the loop
1955 b->_freq += freq;
1956 }
1957 } else if (!in_loop_nest(b)) {
1958 // branch is exit from this loop
1959 BlockProbPair bpp(b, freq);
1960 _exits.append(bpp);
1961 } else {
1962 // branch into nested loop
1963 CFGLoop* ch = b->_loop;
1964 ch->_freq += freq;
1965 }
1966 }
1968 //------------------------------in_loop_nest-----------------------------------
1969 // Determine if block b is in the receiver's loop nest.
1970 bool CFGLoop::in_loop_nest(Block* b) {
1971 int depth = _depth;
1972 CFGLoop* b_loop = b->_loop;
1973 int b_depth = b_loop->_depth;
1974 if (depth == b_depth) {
1975 return true;
1976 }
1977 while (b_depth > depth) {
1978 b_loop = b_loop->_parent;
1979 b_depth = b_loop->_depth;
1980 }
1981 return b_loop == this;
1982 }
1984 //------------------------------scale_freq-------------------------------------
1985 // Scale frequency of loops and blocks by trip counts from outer loops
1986 // Do a top down traversal of loop tree (visit outer loops first.)
1987 void CFGLoop::scale_freq() {
1988 float loop_freq = _freq * trip_count();
1989 _freq = loop_freq;
1990 for (int i = 0; i < _members.length(); i++) {
1991 CFGElement* s = _members.at(i);
1992 float block_freq = s->_freq * loop_freq;
1993 if (g_isnan(block_freq) || block_freq < MIN_BLOCK_FREQUENCY)
1994 block_freq = MIN_BLOCK_FREQUENCY;
1995 s->_freq = block_freq;
1996 }
1997 CFGLoop* ch = _child;
1998 while (ch != NULL) {
1999 ch->scale_freq();
2000 ch = ch->_sibling;
2001 }
2002 }
2004 // Frequency of outer loop
2005 float CFGLoop::outer_loop_freq() const {
2006 if (_child != NULL) {
2007 return _child->_freq;
2008 }
2009 return _freq;
2010 }
2012 #ifndef PRODUCT
2013 //------------------------------dump_tree--------------------------------------
2014 void CFGLoop::dump_tree() const {
2015 dump();
2016 if (_child != NULL) _child->dump_tree();
2017 if (_sibling != NULL) _sibling->dump_tree();
2018 }
2020 //------------------------------dump-------------------------------------------
2021 void CFGLoop::dump() const {
2022 for (int i = 0; i < _depth; i++) tty->print(" ");
2023 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n",
2024 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
2025 for (int i = 0; i < _depth; i++) tty->print(" ");
2026 tty->print(" members:", _id);
2027 int k = 0;
2028 for (int i = 0; i < _members.length(); i++) {
2029 if (k++ >= 6) {
2030 tty->print("\n ");
2031 for (int j = 0; j < _depth+1; j++) tty->print(" ");
2032 k = 0;
2033 }
2034 CFGElement *s = _members.at(i);
2035 if (s->is_block()) {
2036 Block *b = s->as_Block();
2037 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
2038 } else {
2039 CFGLoop* lp = s->as_CFGLoop();
2040 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
2041 }
2042 }
2043 tty->print("\n");
2044 for (int i = 0; i < _depth; i++) tty->print(" ");
2045 tty->print(" exits: ");
2046 k = 0;
2047 for (int i = 0; i < _exits.length(); i++) {
2048 if (k++ >= 7) {
2049 tty->print("\n ");
2050 for (int j = 0; j < _depth+1; j++) tty->print(" ");
2051 k = 0;
2052 }
2053 Block *blk = _exits.at(i).get_target();
2054 float prob = _exits.at(i).get_prob();
2055 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
2056 }
2057 tty->print("\n");
2058 }
2059 #endif