Tue, 02 Sep 2008 15:03:05 -0700
6738933: assert with base pointers must match with compressed oops enabled
Reviewed-by: kvn, rasbold
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // Portions of code courtesy of Clifford Click
27 // Optimization - Graph Style
29 #include "incls/_precompiled.incl"
30 #include "incls/_gcm.cpp.incl"
32 //----------------------------schedule_node_into_block-------------------------
33 // Insert node n into block b. Look for projections of n and make sure they
34 // are in b also.
35 void PhaseCFG::schedule_node_into_block( Node *n, Block *b ) {
36 // Set basic block of n, Add n to b,
37 _bbs.map(n->_idx, b);
38 b->add_inst(n);
40 // After Matching, nearly any old Node may have projections trailing it.
41 // These are usually machine-dependent flags. In any case, they might
42 // float to another block below this one. Move them up.
43 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
44 Node* use = n->fast_out(i);
45 if (use->is_Proj()) {
46 Block* buse = _bbs[use->_idx];
47 if (buse != b) { // In wrong block?
48 if (buse != NULL)
49 buse->find_remove(use); // Remove from wrong block
50 _bbs.map(use->_idx, b); // Re-insert in this block
51 b->add_inst(use);
52 }
53 }
54 }
55 }
58 //------------------------------schedule_pinned_nodes--------------------------
59 // Set the basic block for Nodes pinned into blocks
60 void PhaseCFG::schedule_pinned_nodes( VectorSet &visited ) {
61 // Allocate node stack of size C->unique()+8 to avoid frequent realloc
62 GrowableArray <Node *> spstack(C->unique()+8);
63 spstack.push(_root);
64 while ( spstack.is_nonempty() ) {
65 Node *n = spstack.pop();
66 if( !visited.test_set(n->_idx) ) { // Test node and flag it as visited
67 if( n->pinned() && !_bbs.lookup(n->_idx) ) { // Pinned? Nail it down!
68 Node *input = n->in(0);
69 assert( input, "pinned Node must have Control" );
70 while( !input->is_block_start() )
71 input = input->in(0);
72 Block *b = _bbs[input->_idx]; // Basic block of controlling input
73 schedule_node_into_block(n, b);
74 }
75 for( int i = n->req() - 1; i >= 0; --i ) { // For all inputs
76 if( n->in(i) != NULL )
77 spstack.push(n->in(i));
78 }
79 }
80 }
81 }
83 #ifdef ASSERT
84 // Assert that new input b2 is dominated by all previous inputs.
85 // Check this by by seeing that it is dominated by b1, the deepest
86 // input observed until b2.
87 static void assert_dom(Block* b1, Block* b2, Node* n, Block_Array &bbs) {
88 if (b1 == NULL) return;
89 assert(b1->_dom_depth < b2->_dom_depth, "sanity");
90 Block* tmp = b2;
91 while (tmp != b1 && tmp != NULL) {
92 tmp = tmp->_idom;
93 }
94 if (tmp != b1) {
95 // Detected an unschedulable graph. Print some nice stuff and die.
96 tty->print_cr("!!! Unschedulable graph !!!");
97 for (uint j=0; j<n->len(); j++) { // For all inputs
98 Node* inn = n->in(j); // Get input
99 if (inn == NULL) continue; // Ignore NULL, missing inputs
100 Block* inb = bbs[inn->_idx];
101 tty->print("B%d idom=B%d depth=%2d ",inb->_pre_order,
102 inb->_idom ? inb->_idom->_pre_order : 0, inb->_dom_depth);
103 inn->dump();
104 }
105 tty->print("Failing node: ");
106 n->dump();
107 assert(false, "unscheduable graph");
108 }
109 }
110 #endif
112 static Block* find_deepest_input(Node* n, Block_Array &bbs) {
113 // Find the last input dominated by all other inputs.
114 Block* deepb = NULL; // Deepest block so far
115 int deepb_dom_depth = 0;
116 for (uint k = 0; k < n->len(); k++) { // For all inputs
117 Node* inn = n->in(k); // Get input
118 if (inn == NULL) continue; // Ignore NULL, missing inputs
119 Block* inb = bbs[inn->_idx];
120 assert(inb != NULL, "must already have scheduled this input");
121 if (deepb_dom_depth < (int) inb->_dom_depth) {
122 // The new inb must be dominated by the previous deepb.
123 // The various inputs must be linearly ordered in the dom
124 // tree, or else there will not be a unique deepest block.
125 DEBUG_ONLY(assert_dom(deepb, inb, n, bbs));
126 deepb = inb; // Save deepest block
127 deepb_dom_depth = deepb->_dom_depth;
128 }
129 }
130 assert(deepb != NULL, "must be at least one input to n");
131 return deepb;
132 }
135 //------------------------------schedule_early---------------------------------
136 // Find the earliest Block any instruction can be placed in. Some instructions
137 // are pinned into Blocks. Unpinned instructions can appear in last block in
138 // which all their inputs occur.
139 bool PhaseCFG::schedule_early(VectorSet &visited, Node_List &roots) {
140 // Allocate stack with enough space to avoid frequent realloc
141 Node_Stack nstack(roots.Size() + 8); // (unique >> 1) + 24 from Java2D stats
142 // roots.push(_root); _root will be processed among C->top() inputs
143 roots.push(C->top());
144 visited.set(C->top()->_idx);
146 while (roots.size() != 0) {
147 // Use local variables nstack_top_n & nstack_top_i to cache values
148 // on stack's top.
149 Node *nstack_top_n = roots.pop();
150 uint nstack_top_i = 0;
151 //while_nstack_nonempty:
152 while (true) {
153 // Get parent node and next input's index from stack's top.
154 Node *n = nstack_top_n;
155 uint i = nstack_top_i;
157 if (i == 0) {
158 // Special control input processing.
159 // While I am here, go ahead and look for Nodes which are taking control
160 // from a is_block_proj Node. After I inserted RegionNodes to make proper
161 // blocks, the control at a is_block_proj more properly comes from the
162 // Region being controlled by the block_proj Node.
163 const Node *in0 = n->in(0);
164 if (in0 != NULL) { // Control-dependent?
165 const Node *p = in0->is_block_proj();
166 if (p != NULL && p != n) { // Control from a block projection?
167 // Find trailing Region
168 Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
169 uint j = 0;
170 if (pb->_num_succs != 1) { // More then 1 successor?
171 // Search for successor
172 uint max = pb->_nodes.size();
173 assert( max > 1, "" );
174 uint start = max - pb->_num_succs;
175 // Find which output path belongs to projection
176 for (j = start; j < max; j++) {
177 if( pb->_nodes[j] == in0 )
178 break;
179 }
180 assert( j < max, "must find" );
181 // Change control to match head of successor basic block
182 j -= start;
183 }
184 n->set_req(0, pb->_succs[j]->head());
185 }
186 } else { // n->in(0) == NULL
187 if (n->req() == 1) { // This guy is a constant with NO inputs?
188 n->set_req(0, _root);
189 }
190 }
191 }
193 // First, visit all inputs and force them to get a block. If an
194 // input is already in a block we quit following inputs (to avoid
195 // cycles). Instead we put that Node on a worklist to be handled
196 // later (since IT'S inputs may not have a block yet).
197 bool done = true; // Assume all n's inputs will be processed
198 while (i < n->len()) { // For all inputs
199 Node *in = n->in(i); // Get input
200 ++i;
201 if (in == NULL) continue; // Ignore NULL, missing inputs
202 int is_visited = visited.test_set(in->_idx);
203 if (!_bbs.lookup(in->_idx)) { // Missing block selection?
204 if (is_visited) {
205 // assert( !visited.test(in->_idx), "did not schedule early" );
206 return false;
207 }
208 nstack.push(n, i); // Save parent node and next input's index.
209 nstack_top_n = in; // Process current input now.
210 nstack_top_i = 0;
211 done = false; // Not all n's inputs processed.
212 break; // continue while_nstack_nonempty;
213 } else if (!is_visited) { // Input not yet visited?
214 roots.push(in); // Visit this guy later, using worklist
215 }
216 }
217 if (done) {
218 // All of n's inputs have been processed, complete post-processing.
220 // Some instructions are pinned into a block. These include Region,
221 // Phi, Start, Return, and other control-dependent instructions and
222 // any projections which depend on them.
223 if (!n->pinned()) {
224 // Set earliest legal block.
225 _bbs.map(n->_idx, find_deepest_input(n, _bbs));
226 }
228 if (nstack.is_empty()) {
229 // Finished all nodes on stack.
230 // Process next node on the worklist 'roots'.
231 break;
232 }
233 // Get saved parent node and next input's index.
234 nstack_top_n = nstack.node();
235 nstack_top_i = nstack.index();
236 nstack.pop();
237 } // if (done)
238 } // while (true)
239 } // while (roots.size() != 0)
240 return true;
241 }
243 //------------------------------dom_lca----------------------------------------
244 // Find least common ancestor in dominator tree
245 // LCA is a current notion of LCA, to be raised above 'this'.
246 // As a convenient boundary condition, return 'this' if LCA is NULL.
247 // Find the LCA of those two nodes.
248 Block* Block::dom_lca(Block* LCA) {
249 if (LCA == NULL || LCA == this) return this;
251 Block* anc = this;
252 while (anc->_dom_depth > LCA->_dom_depth)
253 anc = anc->_idom; // Walk up till anc is as high as LCA
255 while (LCA->_dom_depth > anc->_dom_depth)
256 LCA = LCA->_idom; // Walk up till LCA is as high as anc
258 while (LCA != anc) { // Walk both up till they are the same
259 LCA = LCA->_idom;
260 anc = anc->_idom;
261 }
263 return LCA;
264 }
266 //--------------------------raise_LCA_above_use--------------------------------
267 // We are placing a definition, and have been given a def->use edge.
268 // The definition must dominate the use, so move the LCA upward in the
269 // dominator tree to dominate the use. If the use is a phi, adjust
270 // the LCA only with the phi input paths which actually use this def.
271 static Block* raise_LCA_above_use(Block* LCA, Node* use, Node* def, Block_Array &bbs) {
272 Block* buse = bbs[use->_idx];
273 if (buse == NULL) return LCA; // Unused killing Projs have no use block
274 if (!use->is_Phi()) return buse->dom_lca(LCA);
275 uint pmax = use->req(); // Number of Phi inputs
276 // Why does not this loop just break after finding the matching input to
277 // the Phi? Well...it's like this. I do not have true def-use/use-def
278 // chains. Means I cannot distinguish, from the def-use direction, which
279 // of many use-defs lead from the same use to the same def. That is, this
280 // Phi might have several uses of the same def. Each use appears in a
281 // different predecessor block. But when I enter here, I cannot distinguish
282 // which use-def edge I should find the predecessor block for. So I find
283 // them all. Means I do a little extra work if a Phi uses the same value
284 // more than once.
285 for (uint j=1; j<pmax; j++) { // For all inputs
286 if (use->in(j) == def) { // Found matching input?
287 Block* pred = bbs[buse->pred(j)->_idx];
288 LCA = pred->dom_lca(LCA);
289 }
290 }
291 return LCA;
292 }
294 //----------------------------raise_LCA_above_marks----------------------------
295 // Return a new LCA that dominates LCA and any of its marked predecessors.
296 // Search all my parents up to 'early' (exclusive), looking for predecessors
297 // which are marked with the given index. Return the LCA (in the dom tree)
298 // of all marked blocks. If there are none marked, return the original
299 // LCA.
300 static Block* raise_LCA_above_marks(Block* LCA, node_idx_t mark,
301 Block* early, Block_Array &bbs) {
302 Block_List worklist;
303 worklist.push(LCA);
304 while (worklist.size() > 0) {
305 Block* mid = worklist.pop();
306 if (mid == early) continue; // stop searching here
308 // Test and set the visited bit.
309 if (mid->raise_LCA_visited() == mark) continue; // already visited
311 // Don't process the current LCA, otherwise the search may terminate early
312 if (mid != LCA && mid->raise_LCA_mark() == mark) {
313 // Raise the LCA.
314 LCA = mid->dom_lca(LCA);
315 if (LCA == early) break; // stop searching everywhere
316 assert(early->dominates(LCA), "early is high enough");
317 // Resume searching at that point, skipping intermediate levels.
318 worklist.push(LCA);
319 if (LCA == mid)
320 continue; // Don't mark as visited to avoid early termination.
321 } else {
322 // Keep searching through this block's predecessors.
323 for (uint j = 1, jmax = mid->num_preds(); j < jmax; j++) {
324 Block* mid_parent = bbs[ mid->pred(j)->_idx ];
325 worklist.push(mid_parent);
326 }
327 }
328 mid->set_raise_LCA_visited(mark);
329 }
330 return LCA;
331 }
333 //--------------------------memory_early_block--------------------------------
334 // This is a variation of find_deepest_input, the heart of schedule_early.
335 // Find the "early" block for a load, if we considered only memory and
336 // address inputs, that is, if other data inputs were ignored.
337 //
338 // Because a subset of edges are considered, the resulting block will
339 // be earlier (at a shallower dom_depth) than the true schedule_early
340 // point of the node. We compute this earlier block as a more permissive
341 // site for anti-dependency insertion, but only if subsume_loads is enabled.
342 static Block* memory_early_block(Node* load, Block* early, Block_Array &bbs) {
343 Node* base;
344 Node* index;
345 Node* store = load->in(MemNode::Memory);
346 load->as_Mach()->memory_inputs(base, index);
348 assert(base != NodeSentinel && index != NodeSentinel,
349 "unexpected base/index inputs");
351 Node* mem_inputs[4];
352 int mem_inputs_length = 0;
353 if (base != NULL) mem_inputs[mem_inputs_length++] = base;
354 if (index != NULL) mem_inputs[mem_inputs_length++] = index;
355 if (store != NULL) mem_inputs[mem_inputs_length++] = store;
357 // In the comparision below, add one to account for the control input,
358 // which may be null, but always takes up a spot in the in array.
359 if (mem_inputs_length + 1 < (int) load->req()) {
360 // This "load" has more inputs than just the memory, base and index inputs.
361 // For purposes of checking anti-dependences, we need to start
362 // from the early block of only the address portion of the instruction,
363 // and ignore other blocks that may have factored into the wider
364 // schedule_early calculation.
365 if (load->in(0) != NULL) mem_inputs[mem_inputs_length++] = load->in(0);
367 Block* deepb = NULL; // Deepest block so far
368 int deepb_dom_depth = 0;
369 for (int i = 0; i < mem_inputs_length; i++) {
370 Block* inb = bbs[mem_inputs[i]->_idx];
371 if (deepb_dom_depth < (int) inb->_dom_depth) {
372 // The new inb must be dominated by the previous deepb.
373 // The various inputs must be linearly ordered in the dom
374 // tree, or else there will not be a unique deepest block.
375 DEBUG_ONLY(assert_dom(deepb, inb, load, bbs));
376 deepb = inb; // Save deepest block
377 deepb_dom_depth = deepb->_dom_depth;
378 }
379 }
380 early = deepb;
381 }
383 return early;
384 }
386 //--------------------------insert_anti_dependences---------------------------
387 // A load may need to witness memory that nearby stores can overwrite.
388 // For each nearby store, either insert an "anti-dependence" edge
389 // from the load to the store, or else move LCA upward to force the
390 // load to (eventually) be scheduled in a block above the store.
391 //
392 // Do not add edges to stores on distinct control-flow paths;
393 // only add edges to stores which might interfere.
394 //
395 // Return the (updated) LCA. There will not be any possibly interfering
396 // store between the load's "early block" and the updated LCA.
397 // Any stores in the updated LCA will have new precedence edges
398 // back to the load. The caller is expected to schedule the load
399 // in the LCA, in which case the precedence edges will make LCM
400 // preserve anti-dependences. The caller may also hoist the load
401 // above the LCA, if it is not the early block.
402 Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
403 assert(load->needs_anti_dependence_check(), "must be a load of some sort");
404 assert(LCA != NULL, "");
405 DEBUG_ONLY(Block* LCA_orig = LCA);
407 // Compute the alias index. Loads and stores with different alias indices
408 // do not need anti-dependence edges.
409 uint load_alias_idx = C->get_alias_index(load->adr_type());
410 #ifdef ASSERT
411 if (load_alias_idx == Compile::AliasIdxBot && C->AliasLevel() > 0 &&
412 (PrintOpto || VerifyAliases ||
413 PrintMiscellaneous && (WizardMode || Verbose))) {
414 // Load nodes should not consume all of memory.
415 // Reporting a bottom type indicates a bug in adlc.
416 // If some particular type of node validly consumes all of memory,
417 // sharpen the preceding "if" to exclude it, so we can catch bugs here.
418 tty->print_cr("*** Possible Anti-Dependence Bug: Load consumes all of memory.");
419 load->dump(2);
420 if (VerifyAliases) assert(load_alias_idx != Compile::AliasIdxBot, "");
421 }
422 #endif
423 assert(load_alias_idx || (load->is_Mach() && load->as_Mach()->ideal_Opcode() == Op_StrComp),
424 "String compare is only known 'load' that does not conflict with any stores");
426 if (!C->alias_type(load_alias_idx)->is_rewritable()) {
427 // It is impossible to spoil this load by putting stores before it,
428 // because we know that the stores will never update the value
429 // which 'load' must witness.
430 return LCA;
431 }
433 node_idx_t load_index = load->_idx;
435 // Note the earliest legal placement of 'load', as determined by
436 // by the unique point in the dom tree where all memory effects
437 // and other inputs are first available. (Computed by schedule_early.)
438 // For normal loads, 'early' is the shallowest place (dom graph wise)
439 // to look for anti-deps between this load and any store.
440 Block* early = _bbs[load_index];
442 // If we are subsuming loads, compute an "early" block that only considers
443 // memory or address inputs. This block may be different than the
444 // schedule_early block in that it could be at an even shallower depth in the
445 // dominator tree, and allow for a broader discovery of anti-dependences.
446 if (C->subsume_loads()) {
447 early = memory_early_block(load, early, _bbs);
448 }
450 ResourceArea *area = Thread::current()->resource_area();
451 Node_List worklist_mem(area); // prior memory state to store
452 Node_List worklist_store(area); // possible-def to explore
453 Node_List worklist_visited(area); // visited mergemem nodes
454 Node_List non_early_stores(area); // all relevant stores outside of early
455 bool must_raise_LCA = false;
457 #ifdef TRACK_PHI_INPUTS
458 // %%% This extra checking fails because MergeMem nodes are not GVNed.
459 // Provide "phi_inputs" to check if every input to a PhiNode is from the
460 // original memory state. This indicates a PhiNode for which should not
461 // prevent the load from sinking. For such a block, set_raise_LCA_mark
462 // may be overly conservative.
463 // Mechanism: count inputs seen for each Phi encountered in worklist_store.
464 DEBUG_ONLY(GrowableArray<uint> phi_inputs(area, C->unique(),0,0));
465 #endif
467 // 'load' uses some memory state; look for users of the same state.
468 // Recurse through MergeMem nodes to the stores that use them.
470 // Each of these stores is a possible definition of memory
471 // that 'load' needs to use. We need to force 'load'
472 // to occur before each such store. When the store is in
473 // the same block as 'load', we insert an anti-dependence
474 // edge load->store.
476 // The relevant stores "nearby" the load consist of a tree rooted
477 // at initial_mem, with internal nodes of type MergeMem.
478 // Therefore, the branches visited by the worklist are of this form:
479 // initial_mem -> (MergeMem ->)* store
480 // The anti-dependence constraints apply only to the fringe of this tree.
482 Node* initial_mem = load->in(MemNode::Memory);
483 worklist_store.push(initial_mem);
484 worklist_visited.push(initial_mem);
485 worklist_mem.push(NULL);
486 while (worklist_store.size() > 0) {
487 // Examine a nearby store to see if it might interfere with our load.
488 Node* mem = worklist_mem.pop();
489 Node* store = worklist_store.pop();
490 uint op = store->Opcode();
492 // MergeMems do not directly have anti-deps.
493 // Treat them as internal nodes in a forward tree of memory states,
494 // the leaves of which are each a 'possible-def'.
495 if (store == initial_mem // root (exclusive) of tree we are searching
496 || op == Op_MergeMem // internal node of tree we are searching
497 ) {
498 mem = store; // It's not a possibly interfering store.
499 if (store == initial_mem)
500 initial_mem = NULL; // only process initial memory once
502 for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
503 store = mem->fast_out(i);
504 if (store->is_MergeMem()) {
505 // Be sure we don't get into combinatorial problems.
506 // (Allow phis to be repeated; they can merge two relevant states.)
507 uint j = worklist_visited.size();
508 for (; j > 0; j--) {
509 if (worklist_visited.at(j-1) == store) break;
510 }
511 if (j > 0) continue; // already on work list; do not repeat
512 worklist_visited.push(store);
513 }
514 worklist_mem.push(mem);
515 worklist_store.push(store);
516 }
517 continue;
518 }
520 if (op == Op_MachProj || op == Op_Catch) continue;
521 if (store->needs_anti_dependence_check()) continue; // not really a store
523 // Compute the alias index. Loads and stores with different alias
524 // indices do not need anti-dependence edges. Wide MemBar's are
525 // anti-dependent on everything (except immutable memories).
526 const TypePtr* adr_type = store->adr_type();
527 if (!C->can_alias(adr_type, load_alias_idx)) continue;
529 // Most slow-path runtime calls do NOT modify Java memory, but
530 // they can block and so write Raw memory.
531 if (store->is_Mach()) {
532 MachNode* mstore = store->as_Mach();
533 if (load_alias_idx != Compile::AliasIdxRaw) {
534 // Check for call into the runtime using the Java calling
535 // convention (and from there into a wrapper); it has no
536 // _method. Can't do this optimization for Native calls because
537 // they CAN write to Java memory.
538 if (mstore->ideal_Opcode() == Op_CallStaticJava) {
539 assert(mstore->is_MachSafePoint(), "");
540 MachSafePointNode* ms = (MachSafePointNode*) mstore;
541 assert(ms->is_MachCallJava(), "");
542 MachCallJavaNode* mcj = (MachCallJavaNode*) ms;
543 if (mcj->_method == NULL) {
544 // These runtime calls do not write to Java visible memory
545 // (other than Raw) and so do not require anti-dependence edges.
546 continue;
547 }
548 }
549 // Same for SafePoints: they read/write Raw but only read otherwise.
550 // This is basically a workaround for SafePoints only defining control
551 // instead of control + memory.
552 if (mstore->ideal_Opcode() == Op_SafePoint)
553 continue;
554 } else {
555 // Some raw memory, such as the load of "top" at an allocation,
556 // can be control dependent on the previous safepoint. See
557 // comments in GraphKit::allocate_heap() about control input.
558 // Inserting an anti-dep between such a safepoint and a use
559 // creates a cycle, and will cause a subsequent failure in
560 // local scheduling. (BugId 4919904)
561 // (%%% How can a control input be a safepoint and not a projection??)
562 if (mstore->ideal_Opcode() == Op_SafePoint && load->in(0) == mstore)
563 continue;
564 }
565 }
567 // Identify a block that the current load must be above,
568 // or else observe that 'store' is all the way up in the
569 // earliest legal block for 'load'. In the latter case,
570 // immediately insert an anti-dependence edge.
571 Block* store_block = _bbs[store->_idx];
572 assert(store_block != NULL, "unused killing projections skipped above");
574 if (store->is_Phi()) {
575 // 'load' uses memory which is one (or more) of the Phi's inputs.
576 // It must be scheduled not before the Phi, but rather before
577 // each of the relevant Phi inputs.
578 //
579 // Instead of finding the LCA of all inputs to a Phi that match 'mem',
580 // we mark each corresponding predecessor block and do a combined
581 // hoisting operation later (raise_LCA_above_marks).
582 //
583 // Do not assert(store_block != early, "Phi merging memory after access")
584 // PhiNode may be at start of block 'early' with backedge to 'early'
585 DEBUG_ONLY(bool found_match = false);
586 for (uint j = PhiNode::Input, jmax = store->req(); j < jmax; j++) {
587 if (store->in(j) == mem) { // Found matching input?
588 DEBUG_ONLY(found_match = true);
589 Block* pred_block = _bbs[store_block->pred(j)->_idx];
590 if (pred_block != early) {
591 // If any predecessor of the Phi matches the load's "early block",
592 // we do not need a precedence edge between the Phi and 'load'
593 // since the load will be forced into a block preceeding the Phi.
594 pred_block->set_raise_LCA_mark(load_index);
595 assert(!LCA_orig->dominates(pred_block) ||
596 early->dominates(pred_block), "early is high enough");
597 must_raise_LCA = true;
598 }
599 }
600 }
601 assert(found_match, "no worklist bug");
602 #ifdef TRACK_PHI_INPUTS
603 #ifdef ASSERT
604 // This assert asks about correct handling of PhiNodes, which may not
605 // have all input edges directly from 'mem'. See BugId 4621264
606 int num_mem_inputs = phi_inputs.at_grow(store->_idx,0) + 1;
607 // Increment by exactly one even if there are multiple copies of 'mem'
608 // coming into the phi, because we will run this block several times
609 // if there are several copies of 'mem'. (That's how DU iterators work.)
610 phi_inputs.at_put(store->_idx, num_mem_inputs);
611 assert(PhiNode::Input + num_mem_inputs < store->req(),
612 "Expect at least one phi input will not be from original memory state");
613 #endif //ASSERT
614 #endif //TRACK_PHI_INPUTS
615 } else if (store_block != early) {
616 // 'store' is between the current LCA and earliest possible block.
617 // Label its block, and decide later on how to raise the LCA
618 // to include the effect on LCA of this store.
619 // If this store's block gets chosen as the raised LCA, we
620 // will find him on the non_early_stores list and stick him
621 // with a precedence edge.
622 // (But, don't bother if LCA is already raised all the way.)
623 if (LCA != early) {
624 store_block->set_raise_LCA_mark(load_index);
625 must_raise_LCA = true;
626 non_early_stores.push(store);
627 }
628 } else {
629 // Found a possibly-interfering store in the load's 'early' block.
630 // This means 'load' cannot sink at all in the dominator tree.
631 // Add an anti-dep edge, and squeeze 'load' into the highest block.
632 assert(store != load->in(0), "dependence cycle found");
633 if (verify) {
634 assert(store->find_edge(load) != -1, "missing precedence edge");
635 } else {
636 store->add_prec(load);
637 }
638 LCA = early;
639 // This turns off the process of gathering non_early_stores.
640 }
641 }
642 // (Worklist is now empty; all nearby stores have been visited.)
644 // Finished if 'load' must be scheduled in its 'early' block.
645 // If we found any stores there, they have already been given
646 // precedence edges.
647 if (LCA == early) return LCA;
649 // We get here only if there are no possibly-interfering stores
650 // in the load's 'early' block. Move LCA up above all predecessors
651 // which contain stores we have noted.
652 //
653 // The raised LCA block can be a home to such interfering stores,
654 // but its predecessors must not contain any such stores.
655 //
656 // The raised LCA will be a lower bound for placing the load,
657 // preventing the load from sinking past any block containing
658 // a store that may invalidate the memory state required by 'load'.
659 if (must_raise_LCA)
660 LCA = raise_LCA_above_marks(LCA, load->_idx, early, _bbs);
661 if (LCA == early) return LCA;
663 // Insert anti-dependence edges from 'load' to each store
664 // in the non-early LCA block.
665 // Mine the non_early_stores list for such stores.
666 if (LCA->raise_LCA_mark() == load_index) {
667 while (non_early_stores.size() > 0) {
668 Node* store = non_early_stores.pop();
669 Block* store_block = _bbs[store->_idx];
670 if (store_block == LCA) {
671 // add anti_dependence from store to load in its own block
672 assert(store != load->in(0), "dependence cycle found");
673 if (verify) {
674 assert(store->find_edge(load) != -1, "missing precedence edge");
675 } else {
676 store->add_prec(load);
677 }
678 } else {
679 assert(store_block->raise_LCA_mark() == load_index, "block was marked");
680 // Any other stores we found must be either inside the new LCA
681 // or else outside the original LCA. In the latter case, they
682 // did not interfere with any use of 'load'.
683 assert(LCA->dominates(store_block)
684 || !LCA_orig->dominates(store_block), "no stray stores");
685 }
686 }
687 }
689 // Return the highest block containing stores; any stores
690 // within that block have been given anti-dependence edges.
691 return LCA;
692 }
694 // This class is used to iterate backwards over the nodes in the graph.
696 class Node_Backward_Iterator {
698 private:
699 Node_Backward_Iterator();
701 public:
702 // Constructor for the iterator
703 Node_Backward_Iterator(Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs);
705 // Postincrement operator to iterate over the nodes
706 Node *next();
708 private:
709 VectorSet &_visited;
710 Node_List &_stack;
711 Block_Array &_bbs;
712 };
714 // Constructor for the Node_Backward_Iterator
715 Node_Backward_Iterator::Node_Backward_Iterator( Node *root, VectorSet &visited, Node_List &stack, Block_Array &bbs )
716 : _visited(visited), _stack(stack), _bbs(bbs) {
717 // The stack should contain exactly the root
718 stack.clear();
719 stack.push(root);
721 // Clear the visited bits
722 visited.Clear();
723 }
725 // Iterator for the Node_Backward_Iterator
726 Node *Node_Backward_Iterator::next() {
728 // If the _stack is empty, then just return NULL: finished.
729 if ( !_stack.size() )
730 return NULL;
732 // '_stack' is emulating a real _stack. The 'visit-all-users' loop has been
733 // made stateless, so I do not need to record the index 'i' on my _stack.
734 // Instead I visit all users each time, scanning for unvisited users.
735 // I visit unvisited not-anti-dependence users first, then anti-dependent
736 // children next.
737 Node *self = _stack.pop();
739 // I cycle here when I am entering a deeper level of recursion.
740 // The key variable 'self' was set prior to jumping here.
741 while( 1 ) {
743 _visited.set(self->_idx);
745 // Now schedule all uses as late as possible.
746 uint src = self->is_Proj() ? self->in(0)->_idx : self->_idx;
747 uint src_rpo = _bbs[src]->_rpo;
749 // Schedule all nodes in a post-order visit
750 Node *unvisited = NULL; // Unvisited anti-dependent Node, if any
752 // Scan for unvisited nodes
753 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
754 // For all uses, schedule late
755 Node* n = self->fast_out(i); // Use
757 // Skip already visited children
758 if ( _visited.test(n->_idx) )
759 continue;
761 // do not traverse backward control edges
762 Node *use = n->is_Proj() ? n->in(0) : n;
763 uint use_rpo = _bbs[use->_idx]->_rpo;
765 if ( use_rpo < src_rpo )
766 continue;
768 // Phi nodes always precede uses in a basic block
769 if ( use_rpo == src_rpo && use->is_Phi() )
770 continue;
772 unvisited = n; // Found unvisited
774 // Check for possible-anti-dependent
775 if( !n->needs_anti_dependence_check() )
776 break; // Not visited, not anti-dep; schedule it NOW
777 }
779 // Did I find an unvisited not-anti-dependent Node?
780 if ( !unvisited )
781 break; // All done with children; post-visit 'self'
783 // Visit the unvisited Node. Contains the obvious push to
784 // indicate I'm entering a deeper level of recursion. I push the
785 // old state onto the _stack and set a new state and loop (recurse).
786 _stack.push(self);
787 self = unvisited;
788 } // End recursion loop
790 return self;
791 }
793 //------------------------------ComputeLatenciesBackwards----------------------
794 // Compute the latency of all the instructions.
795 void PhaseCFG::ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack) {
796 #ifndef PRODUCT
797 if (trace_opto_pipelining())
798 tty->print("\n#---- ComputeLatenciesBackwards ----\n");
799 #endif
801 Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
802 Node *n;
804 // Walk over all the nodes from last to first
805 while (n = iter.next()) {
806 // Set the latency for the definitions of this instruction
807 partial_latency_of_defs(n);
808 }
809 } // end ComputeLatenciesBackwards
811 //------------------------------partial_latency_of_defs------------------------
812 // Compute the latency impact of this node on all defs. This computes
813 // a number that increases as we approach the beginning of the routine.
814 void PhaseCFG::partial_latency_of_defs(Node *n) {
815 // Set the latency for this instruction
816 #ifndef PRODUCT
817 if (trace_opto_pipelining()) {
818 tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
819 n->_idx, _node_latency.at_grow(n->_idx));
820 dump();
821 }
822 #endif
824 if (n->is_Proj())
825 n = n->in(0);
827 if (n->is_Root())
828 return;
830 uint nlen = n->len();
831 uint use_latency = _node_latency.at_grow(n->_idx);
832 uint use_pre_order = _bbs[n->_idx]->_pre_order;
834 for ( uint j=0; j<nlen; j++ ) {
835 Node *def = n->in(j);
837 if (!def || def == n)
838 continue;
840 // Walk backwards thru projections
841 if (def->is_Proj())
842 def = def->in(0);
844 #ifndef PRODUCT
845 if (trace_opto_pipelining()) {
846 tty->print("# in(%2d): ", j);
847 def->dump();
848 }
849 #endif
851 // If the defining block is not known, assume it is ok
852 Block *def_block = _bbs[def->_idx];
853 uint def_pre_order = def_block ? def_block->_pre_order : 0;
855 if ( (use_pre_order < def_pre_order) ||
856 (use_pre_order == def_pre_order && n->is_Phi()) )
857 continue;
859 uint delta_latency = n->latency(j);
860 uint current_latency = delta_latency + use_latency;
862 if (_node_latency.at_grow(def->_idx) < current_latency) {
863 _node_latency.at_put_grow(def->_idx, current_latency);
864 }
866 #ifndef PRODUCT
867 if (trace_opto_pipelining()) {
868 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
869 use_latency, j, delta_latency, current_latency, def->_idx,
870 _node_latency.at_grow(def->_idx));
871 }
872 #endif
873 }
874 }
876 //------------------------------latency_from_use-------------------------------
877 // Compute the latency of a specific use
878 int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
879 // If self-reference, return no latency
880 if (use == n || use->is_Root())
881 return 0;
883 uint def_pre_order = _bbs[def->_idx]->_pre_order;
884 uint latency = 0;
886 // If the use is not a projection, then it is simple...
887 if (!use->is_Proj()) {
888 #ifndef PRODUCT
889 if (trace_opto_pipelining()) {
890 tty->print("# out(): ");
891 use->dump();
892 }
893 #endif
895 uint use_pre_order = _bbs[use->_idx]->_pre_order;
897 if (use_pre_order < def_pre_order)
898 return 0;
900 if (use_pre_order == def_pre_order && use->is_Phi())
901 return 0;
903 uint nlen = use->len();
904 uint nl = _node_latency.at_grow(use->_idx);
906 for ( uint j=0; j<nlen; j++ ) {
907 if (use->in(j) == n) {
908 // Change this if we want local latencies
909 uint ul = use->latency(j);
910 uint l = ul + nl;
911 if (latency < l) latency = l;
912 #ifndef PRODUCT
913 if (trace_opto_pipelining()) {
914 tty->print_cr("# %d + edge_latency(%d) == %d -> %d, latency = %d",
915 nl, j, ul, l, latency);
916 }
917 #endif
918 }
919 }
920 } else {
921 // This is a projection, just grab the latency of the use(s)
922 for (DUIterator_Fast jmax, j = use->fast_outs(jmax); j < jmax; j++) {
923 uint l = latency_from_use(use, def, use->fast_out(j));
924 if (latency < l) latency = l;
925 }
926 }
928 return latency;
929 }
931 //------------------------------latency_from_uses------------------------------
932 // Compute the latency of this instruction relative to all of it's uses.
933 // This computes a number that increases as we approach the beginning of the
934 // routine.
935 void PhaseCFG::latency_from_uses(Node *n) {
936 // Set the latency for this instruction
937 #ifndef PRODUCT
938 if (trace_opto_pipelining()) {
939 tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
940 n->_idx, _node_latency.at_grow(n->_idx));
941 dump();
942 }
943 #endif
944 uint latency=0;
945 const Node *def = n->is_Proj() ? n->in(0): n;
947 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
948 uint l = latency_from_use(n, def, n->fast_out(i));
950 if (latency < l) latency = l;
951 }
953 _node_latency.at_put_grow(n->_idx, latency);
954 }
956 //------------------------------hoist_to_cheaper_block-------------------------
957 // Pick a block for node self, between early and LCA, that is a cheaper
958 // alternative to LCA.
959 Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
960 const double delta = 1+PROB_UNLIKELY_MAG(4);
961 Block* least = LCA;
962 double least_freq = least->_freq;
963 uint target = _node_latency.at_grow(self->_idx);
964 uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx);
965 uint end_latency = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
966 bool in_latency = (target <= start_latency);
967 const Block* root_block = _bbs[_root->_idx];
969 // Turn off latency scheduling if scheduling is just plain off
970 if (!C->do_scheduling())
971 in_latency = true;
973 // Do not hoist (to cover latency) instructions which target a
974 // single register. Hoisting stretches the live range of the
975 // single register and may force spilling.
976 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
977 if (mach && mach->out_RegMask().is_bound1() && mach->out_RegMask().is_NotEmpty())
978 in_latency = true;
980 #ifndef PRODUCT
981 if (trace_opto_pipelining()) {
982 tty->print("# Find cheaper block for latency %d: ",
983 _node_latency.at_grow(self->_idx));
984 self->dump();
985 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
986 LCA->_pre_order,
987 LCA->_nodes[0]->_idx,
988 start_latency,
989 LCA->_nodes[LCA->end_idx()]->_idx,
990 end_latency,
991 least_freq);
992 }
993 #endif
995 // Walk up the dominator tree from LCA (Lowest common ancestor) to
996 // the earliest legal location. Capture the least execution frequency.
997 while (LCA != early) {
998 LCA = LCA->_idom; // Follow up the dominator tree
1000 if (LCA == NULL) {
1001 // Bailout without retry
1002 C->record_method_not_compilable("late schedule failed: LCA == NULL");
1003 return least;
1004 }
1006 // Don't hoist machine instructions to the root basic block
1007 if (mach && LCA == root_block)
1008 break;
1010 uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx);
1011 uint end_idx = LCA->end_idx();
1012 uint end_lat = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx);
1013 double LCA_freq = LCA->_freq;
1014 #ifndef PRODUCT
1015 if (trace_opto_pipelining()) {
1016 tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
1017 LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
1018 }
1019 #endif
1020 if (LCA_freq < least_freq || // Better Frequency
1021 ( !in_latency && // No block containing latency
1022 LCA_freq < least_freq * delta && // No worse frequency
1023 target >= end_lat && // within latency range
1024 !self->is_iteratively_computed() ) // But don't hoist IV increments
1025 // because they may end up above other uses of their phi forcing
1026 // their result register to be different from their input.
1027 ) {
1028 least = LCA; // Found cheaper block
1029 least_freq = LCA_freq;
1030 start_latency = start_lat;
1031 end_latency = end_lat;
1032 if (target <= start_lat)
1033 in_latency = true;
1034 }
1035 }
1037 #ifndef PRODUCT
1038 if (trace_opto_pipelining()) {
1039 tty->print_cr("# Choose block B%d with start latency=%d and freq=%g",
1040 least->_pre_order, start_latency, least_freq);
1041 }
1042 #endif
1044 // See if the latency needs to be updated
1045 if (target < end_latency) {
1046 #ifndef PRODUCT
1047 if (trace_opto_pipelining()) {
1048 tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
1049 }
1050 #endif
1051 _node_latency.at_put_grow(self->_idx, end_latency);
1052 partial_latency_of_defs(self);
1053 }
1055 return least;
1056 }
1059 //------------------------------schedule_late-----------------------------------
1060 // Now schedule all codes as LATE as possible. This is the LCA in the
1061 // dominator tree of all USES of a value. Pick the block with the least
1062 // loop nesting depth that is lowest in the dominator tree.
1063 extern const char must_clone[];
1064 void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
1065 #ifndef PRODUCT
1066 if (trace_opto_pipelining())
1067 tty->print("\n#---- schedule_late ----\n");
1068 #endif
1070 Node_Backward_Iterator iter((Node *)_root, visited, stack, _bbs);
1071 Node *self;
1073 // Walk over all the nodes from last to first
1074 while (self = iter.next()) {
1075 Block* early = _bbs[self->_idx]; // Earliest legal placement
1077 if (self->is_top()) {
1078 // Top node goes in bb #2 with other constants.
1079 // It must be special-cased, because it has no out edges.
1080 early->add_inst(self);
1081 continue;
1082 }
1084 // No uses, just terminate
1085 if (self->outcnt() == 0) {
1086 assert(self->Opcode() == Op_MachProj, "sanity");
1087 continue; // Must be a dead machine projection
1088 }
1090 // If node is pinned in the block, then no scheduling can be done.
1091 if( self->pinned() ) // Pinned in block?
1092 continue;
1094 MachNode* mach = self->is_Mach() ? self->as_Mach() : NULL;
1095 if (mach) {
1096 switch (mach->ideal_Opcode()) {
1097 case Op_CreateEx:
1098 // Don't move exception creation
1099 early->add_inst(self);
1100 continue;
1101 break;
1102 case Op_CheckCastPP:
1103 // Don't move CheckCastPP nodes away from their input, if the input
1104 // is a rawptr (5071820).
1105 Node *def = self->in(1);
1106 if (def != NULL && def->bottom_type()->base() == Type::RawPtr) {
1107 early->add_inst(self);
1108 continue;
1109 }
1110 break;
1111 }
1112 }
1114 // Gather LCA of all uses
1115 Block *LCA = NULL;
1116 {
1117 for (DUIterator_Fast imax, i = self->fast_outs(imax); i < imax; i++) {
1118 // For all uses, find LCA
1119 Node* use = self->fast_out(i);
1120 LCA = raise_LCA_above_use(LCA, use, self, _bbs);
1121 }
1122 } // (Hide defs of imax, i from rest of block.)
1124 // Place temps in the block of their use. This isn't a
1125 // requirement for correctness but it reduces useless
1126 // interference between temps and other nodes.
1127 if (mach != NULL && mach->is_MachTemp()) {
1128 _bbs.map(self->_idx, LCA);
1129 LCA->add_inst(self);
1130 continue;
1131 }
1133 // Check if 'self' could be anti-dependent on memory
1134 if (self->needs_anti_dependence_check()) {
1135 // Hoist LCA above possible-defs and insert anti-dependences to
1136 // defs in new LCA block.
1137 LCA = insert_anti_dependences(LCA, self);
1138 }
1140 if (early->_dom_depth > LCA->_dom_depth) {
1141 // Somehow the LCA has moved above the earliest legal point.
1142 // (One way this can happen is via memory_early_block.)
1143 if (C->subsume_loads() == true && !C->failing()) {
1144 // Retry with subsume_loads == false
1145 // If this is the first failure, the sentinel string will "stick"
1146 // to the Compile object, and the C2Compiler will see it and retry.
1147 C->record_failure(C2Compiler::retry_no_subsuming_loads());
1148 } else {
1149 // Bailout without retry when (early->_dom_depth > LCA->_dom_depth)
1150 C->record_method_not_compilable("late schedule failed: incorrect graph");
1151 }
1152 return;
1153 }
1155 // If there is no opportunity to hoist, then we're done.
1156 bool try_to_hoist = (LCA != early);
1158 // Must clone guys stay next to use; no hoisting allowed.
1159 // Also cannot hoist guys that alter memory or are otherwise not
1160 // allocatable (hoisting can make a value live longer, leading to
1161 // anti and output dependency problems which are normally resolved
1162 // by the register allocator giving everyone a different register).
1163 if (mach != NULL && must_clone[mach->ideal_Opcode()])
1164 try_to_hoist = false;
1166 Block* late = NULL;
1167 if (try_to_hoist) {
1168 // Now find the block with the least execution frequency.
1169 // Start at the latest schedule and work up to the earliest schedule
1170 // in the dominator tree. Thus the Node will dominate all its uses.
1171 late = hoist_to_cheaper_block(LCA, early, self);
1172 } else {
1173 // Just use the LCA of the uses.
1174 late = LCA;
1175 }
1177 // Put the node into target block
1178 schedule_node_into_block(self, late);
1180 #ifdef ASSERT
1181 if (self->needs_anti_dependence_check()) {
1182 // since precedence edges are only inserted when we're sure they
1183 // are needed make sure that after placement in a block we don't
1184 // need any new precedence edges.
1185 verify_anti_dependences(late, self);
1186 }
1187 #endif
1188 } // Loop until all nodes have been visited
1190 } // end ScheduleLate
1192 //------------------------------GlobalCodeMotion-------------------------------
1193 void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_list ) {
1194 ResourceMark rm;
1196 #ifndef PRODUCT
1197 if (trace_opto_pipelining()) {
1198 tty->print("\n---- Start GlobalCodeMotion ----\n");
1199 }
1200 #endif
1202 // Initialize the bbs.map for things on the proj_list
1203 uint i;
1204 for( i=0; i < proj_list.size(); i++ )
1205 _bbs.map(proj_list[i]->_idx, NULL);
1207 // Set the basic block for Nodes pinned into blocks
1208 Arena *a = Thread::current()->resource_area();
1209 VectorSet visited(a);
1210 schedule_pinned_nodes( visited );
1212 // Find the earliest Block any instruction can be placed in. Some
1213 // instructions are pinned into Blocks. Unpinned instructions can
1214 // appear in last block in which all their inputs occur.
1215 visited.Clear();
1216 Node_List stack(a);
1217 stack.map( (unique >> 1) + 16, NULL); // Pre-grow the list
1218 if (!schedule_early(visited, stack)) {
1219 // Bailout without retry
1220 C->record_method_not_compilable("early schedule failed");
1221 return;
1222 }
1224 // Build Def-Use edges.
1225 proj_list.push(_root); // Add real root as another root
1226 proj_list.pop();
1228 // Compute the latency information (via backwards walk) for all the
1229 // instructions in the graph
1230 GrowableArray<uint> node_latency;
1231 _node_latency = node_latency;
1233 if( C->do_scheduling() )
1234 ComputeLatenciesBackwards(visited, stack);
1236 // Now schedule all codes as LATE as possible. This is the LCA in the
1237 // dominator tree of all USES of a value. Pick the block with the least
1238 // loop nesting depth that is lowest in the dominator tree.
1239 // ( visited.Clear() called in schedule_late()->Node_Backward_Iterator() )
1240 schedule_late(visited, stack);
1241 if( C->failing() ) {
1242 // schedule_late fails only when graph is incorrect.
1243 assert(!VerifyGraphEdges, "verification should have failed");
1244 return;
1245 }
1247 unique = C->unique();
1249 #ifndef PRODUCT
1250 if (trace_opto_pipelining()) {
1251 tty->print("\n---- Detect implicit null checks ----\n");
1252 }
1253 #endif
1255 // Detect implicit-null-check opportunities. Basically, find NULL checks
1256 // with suitable memory ops nearby. Use the memory op to do the NULL check.
1257 // I can generate a memory op if there is not one nearby.
1258 if (C->is_method_compilation()) {
1259 // Don't do it for natives, adapters, or runtime stubs
1260 int allowed_reasons = 0;
1261 // ...and don't do it when there have been too many traps, globally.
1262 for (int reason = (int)Deoptimization::Reason_none+1;
1263 reason < Compile::trapHistLength; reason++) {
1264 assert(reason < BitsPerInt, "recode bit map");
1265 if (!C->too_many_traps((Deoptimization::DeoptReason) reason))
1266 allowed_reasons |= nth_bit(reason);
1267 }
1268 // By reversing the loop direction we get a very minor gain on mpegaudio.
1269 // Feel free to revert to a forward loop for clarity.
1270 // for( int i=0; i < (int)matcher._null_check_tests.size(); i+=2 ) {
1271 for( int i= matcher._null_check_tests.size()-2; i>=0; i-=2 ) {
1272 Node *proj = matcher._null_check_tests[i ];
1273 Node *val = matcher._null_check_tests[i+1];
1274 _bbs[proj->_idx]->implicit_null_check(this, proj, val, allowed_reasons);
1275 // The implicit_null_check will only perform the transformation
1276 // if the null branch is truly uncommon, *and* it leads to an
1277 // uncommon trap. Combined with the too_many_traps guards
1278 // above, this prevents SEGV storms reported in 6366351,
1279 // by recompiling offending methods without this optimization.
1280 }
1281 }
1283 #ifndef PRODUCT
1284 if (trace_opto_pipelining()) {
1285 tty->print("\n---- Start Local Scheduling ----\n");
1286 }
1287 #endif
1289 // Schedule locally. Right now a simple topological sort.
1290 // Later, do a real latency aware scheduler.
1291 int *ready_cnt = NEW_RESOURCE_ARRAY(int,C->unique());
1292 memset( ready_cnt, -1, C->unique() * sizeof(int) );
1293 visited.Clear();
1294 for (i = 0; i < _num_blocks; i++) {
1295 if (!_blocks[i]->schedule_local(this, matcher, ready_cnt, visited)) {
1296 if (!C->failure_reason_is(C2Compiler::retry_no_subsuming_loads())) {
1297 C->record_method_not_compilable("local schedule failed");
1298 }
1299 return;
1300 }
1301 }
1303 // If we inserted any instructions between a Call and his CatchNode,
1304 // clone the instructions on all paths below the Catch.
1305 for( i=0; i < _num_blocks; i++ )
1306 _blocks[i]->call_catch_cleanup(_bbs);
1308 #ifndef PRODUCT
1309 if (trace_opto_pipelining()) {
1310 tty->print("\n---- After GlobalCodeMotion ----\n");
1311 for (uint i = 0; i < _num_blocks; i++) {
1312 _blocks[i]->dump();
1313 }
1314 }
1315 #endif
1316 }
1319 //------------------------------Estimate_Block_Frequency-----------------------
1320 // Estimate block frequencies based on IfNode probabilities.
1321 void PhaseCFG::Estimate_Block_Frequency() {
1322 int cnts = C->method() ? C->method()->interpreter_invocation_count() : 1;
1323 // Most of our algorithms will die horribly if frequency can become
1324 // negative so make sure cnts is a sane value.
1325 if( cnts <= 0 ) cnts = 1;
1326 float f = (float)cnts/(float)FreqCountInvocations;
1328 // Create the loop tree and calculate loop depth.
1329 _root_loop = create_loop_tree();
1330 _root_loop->compute_loop_depth(0);
1332 // Compute block frequency of each block, relative to a single loop entry.
1333 _root_loop->compute_freq();
1335 // Adjust all frequencies to be relative to a single method entry
1336 _root_loop->_freq = f * 1.0;
1337 _root_loop->scale_freq();
1339 // force paths ending at uncommon traps to be infrequent
1340 Block_List worklist;
1341 Block* root_blk = _blocks[0];
1342 for (uint i = 0; i < root_blk->num_preds(); i++) {
1343 Block *pb = _bbs[root_blk->pred(i)->_idx];
1344 if (pb->has_uncommon_code()) {
1345 worklist.push(pb);
1346 }
1347 }
1348 while (worklist.size() > 0) {
1349 Block* uct = worklist.pop();
1350 uct->_freq = PROB_MIN;
1351 for (uint i = 0; i < uct->num_preds(); i++) {
1352 Block *pb = _bbs[uct->pred(i)->_idx];
1353 if (pb->_num_succs == 1 && pb->_freq > PROB_MIN) {
1354 worklist.push(pb);
1355 }
1356 }
1357 }
1359 #ifndef PRODUCT
1360 if (PrintCFGBlockFreq) {
1361 tty->print_cr("CFG Block Frequencies");
1362 _root_loop->dump_tree();
1363 if (Verbose) {
1364 tty->print_cr("PhaseCFG dump");
1365 dump();
1366 tty->print_cr("Node dump");
1367 _root->dump(99999);
1368 }
1369 }
1370 #endif
1371 }
1373 //----------------------------create_loop_tree--------------------------------
1374 // Create a loop tree from the CFG
1375 CFGLoop* PhaseCFG::create_loop_tree() {
1377 #ifdef ASSERT
1378 assert( _blocks[0] == _broot, "" );
1379 for (uint i = 0; i < _num_blocks; i++ ) {
1380 Block *b = _blocks[i];
1381 // Check that _loop field are clear...we could clear them if not.
1382 assert(b->_loop == NULL, "clear _loop expected");
1383 // Sanity check that the RPO numbering is reflected in the _blocks array.
1384 // It doesn't have to be for the loop tree to be built, but if it is not,
1385 // then the blocks have been reordered since dom graph building...which
1386 // may question the RPO numbering
1387 assert(b->_rpo == i, "unexpected reverse post order number");
1388 }
1389 #endif
1391 int idct = 0;
1392 CFGLoop* root_loop = new CFGLoop(idct++);
1394 Block_List worklist;
1396 // Assign blocks to loops
1397 for(uint i = _num_blocks - 1; i > 0; i-- ) { // skip Root block
1398 Block *b = _blocks[i];
1400 if (b->head()->is_Loop()) {
1401 Block* loop_head = b;
1402 assert(loop_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1403 Node* tail_n = loop_head->pred(LoopNode::LoopBackControl);
1404 Block* tail = _bbs[tail_n->_idx];
1406 // Defensively filter out Loop nodes for non-single-entry loops.
1407 // For all reasonable loops, the head occurs before the tail in RPO.
1408 if (i <= tail->_rpo) {
1410 // The tail and (recursive) predecessors of the tail
1411 // are made members of a new loop.
1413 assert(worklist.size() == 0, "nonempty worklist");
1414 CFGLoop* nloop = new CFGLoop(idct++);
1415 assert(loop_head->_loop == NULL, "just checking");
1416 loop_head->_loop = nloop;
1417 // Add to nloop so push_pred() will skip over inner loops
1418 nloop->add_member(loop_head);
1419 nloop->push_pred(loop_head, LoopNode::LoopBackControl, worklist, _bbs);
1421 while (worklist.size() > 0) {
1422 Block* member = worklist.pop();
1423 if (member != loop_head) {
1424 for (uint j = 1; j < member->num_preds(); j++) {
1425 nloop->push_pred(member, j, worklist, _bbs);
1426 }
1427 }
1428 }
1429 }
1430 }
1431 }
1433 // Create a member list for each loop consisting
1434 // of both blocks and (immediate child) loops.
1435 for (uint i = 0; i < _num_blocks; i++) {
1436 Block *b = _blocks[i];
1437 CFGLoop* lp = b->_loop;
1438 if (lp == NULL) {
1439 // Not assigned to a loop. Add it to the method's pseudo loop.
1440 b->_loop = root_loop;
1441 lp = root_loop;
1442 }
1443 if (lp == root_loop || b != lp->head()) { // loop heads are already members
1444 lp->add_member(b);
1445 }
1446 if (lp != root_loop) {
1447 if (lp->parent() == NULL) {
1448 // Not a nested loop. Make it a child of the method's pseudo loop.
1449 root_loop->add_nested_loop(lp);
1450 }
1451 if (b == lp->head()) {
1452 // Add nested loop to member list of parent loop.
1453 lp->parent()->add_member(lp);
1454 }
1455 }
1456 }
1458 return root_loop;
1459 }
1461 //------------------------------push_pred--------------------------------------
1462 void CFGLoop::push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk) {
1463 Node* pred_n = blk->pred(i);
1464 Block* pred = node_to_blk[pred_n->_idx];
1465 CFGLoop *pred_loop = pred->_loop;
1466 if (pred_loop == NULL) {
1467 // Filter out blocks for non-single-entry loops.
1468 // For all reasonable loops, the head occurs before the tail in RPO.
1469 if (pred->_rpo > head()->_rpo) {
1470 pred->_loop = this;
1471 worklist.push(pred);
1472 }
1473 } else if (pred_loop != this) {
1474 // Nested loop.
1475 while (pred_loop->_parent != NULL && pred_loop->_parent != this) {
1476 pred_loop = pred_loop->_parent;
1477 }
1478 // Make pred's loop be a child
1479 if (pred_loop->_parent == NULL) {
1480 add_nested_loop(pred_loop);
1481 // Continue with loop entry predecessor.
1482 Block* pred_head = pred_loop->head();
1483 assert(pred_head->num_preds() - 1 == 2, "loop must have 2 predecessors");
1484 assert(pred_head != head(), "loop head in only one loop");
1485 push_pred(pred_head, LoopNode::EntryControl, worklist, node_to_blk);
1486 } else {
1487 assert(pred_loop->_parent == this && _parent == NULL, "just checking");
1488 }
1489 }
1490 }
1492 //------------------------------add_nested_loop--------------------------------
1493 // Make cl a child of the current loop in the loop tree.
1494 void CFGLoop::add_nested_loop(CFGLoop* cl) {
1495 assert(_parent == NULL, "no parent yet");
1496 assert(cl != this, "not my own parent");
1497 cl->_parent = this;
1498 CFGLoop* ch = _child;
1499 if (ch == NULL) {
1500 _child = cl;
1501 } else {
1502 while (ch->_sibling != NULL) { ch = ch->_sibling; }
1503 ch->_sibling = cl;
1504 }
1505 }
1507 //------------------------------compute_loop_depth-----------------------------
1508 // Store the loop depth in each CFGLoop object.
1509 // Recursively walk the children to do the same for them.
1510 void CFGLoop::compute_loop_depth(int depth) {
1511 _depth = depth;
1512 CFGLoop* ch = _child;
1513 while (ch != NULL) {
1514 ch->compute_loop_depth(depth + 1);
1515 ch = ch->_sibling;
1516 }
1517 }
1519 //------------------------------compute_freq-----------------------------------
1520 // Compute the frequency of each block and loop, relative to a single entry
1521 // into the dominating loop head.
1522 void CFGLoop::compute_freq() {
1523 // Bottom up traversal of loop tree (visit inner loops first.)
1524 // Set loop head frequency to 1.0, then transitively
1525 // compute frequency for all successors in the loop,
1526 // as well as for each exit edge. Inner loops are
1527 // treated as single blocks with loop exit targets
1528 // as the successor blocks.
1530 // Nested loops first
1531 CFGLoop* ch = _child;
1532 while (ch != NULL) {
1533 ch->compute_freq();
1534 ch = ch->_sibling;
1535 }
1536 assert (_members.length() > 0, "no empty loops");
1537 Block* hd = head();
1538 hd->_freq = 1.0f;
1539 for (int i = 0; i < _members.length(); i++) {
1540 CFGElement* s = _members.at(i);
1541 float freq = s->_freq;
1542 if (s->is_block()) {
1543 Block* b = s->as_Block();
1544 for (uint j = 0; j < b->_num_succs; j++) {
1545 Block* sb = b->_succs[j];
1546 update_succ_freq(sb, freq * b->succ_prob(j));
1547 }
1548 } else {
1549 CFGLoop* lp = s->as_CFGLoop();
1550 assert(lp->_parent == this, "immediate child");
1551 for (int k = 0; k < lp->_exits.length(); k++) {
1552 Block* eb = lp->_exits.at(k).get_target();
1553 float prob = lp->_exits.at(k).get_prob();
1554 update_succ_freq(eb, freq * prob);
1555 }
1556 }
1557 }
1559 #if 0
1560 // Raise frequency of the loop backedge block, in an effort
1561 // to keep it empty. Skip the method level "loop".
1562 if (_parent != NULL) {
1563 CFGElement* s = _members.at(_members.length() - 1);
1564 if (s->is_block()) {
1565 Block* bk = s->as_Block();
1566 if (bk->_num_succs == 1 && bk->_succs[0] == hd) {
1567 // almost any value >= 1.0f works
1568 // FIXME: raw constant
1569 bk->_freq = 1.05f;
1570 }
1571 }
1572 }
1573 #endif
1575 // For all loops other than the outer, "method" loop,
1576 // sum and normalize the exit probability. The "method" loop
1577 // should keep the initial exit probability of 1, so that
1578 // inner blocks do not get erroneously scaled.
1579 if (_depth != 0) {
1580 // Total the exit probabilities for this loop.
1581 float exits_sum = 0.0f;
1582 for (int i = 0; i < _exits.length(); i++) {
1583 exits_sum += _exits.at(i).get_prob();
1584 }
1586 // Normalize the exit probabilities. Until now, the
1587 // probabilities estimate the possibility of exit per
1588 // a single loop iteration; afterward, they estimate
1589 // the probability of exit per loop entry.
1590 for (int i = 0; i < _exits.length(); i++) {
1591 Block* et = _exits.at(i).get_target();
1592 float new_prob = _exits.at(i).get_prob() / exits_sum;
1593 BlockProbPair bpp(et, new_prob);
1594 _exits.at_put(i, bpp);
1595 }
1597 // Save the total, but guard against unreasoable probability,
1598 // as the value is used to estimate the loop trip count.
1599 // An infinite trip count would blur relative block
1600 // frequencies.
1601 if (exits_sum > 1.0f) exits_sum = 1.0;
1602 if (exits_sum < PROB_MIN) exits_sum = PROB_MIN;
1603 _exit_prob = exits_sum;
1604 }
1605 }
1607 //------------------------------succ_prob-------------------------------------
1608 // Determine the probability of reaching successor 'i' from the receiver block.
1609 float Block::succ_prob(uint i) {
1610 int eidx = end_idx();
1611 Node *n = _nodes[eidx]; // Get ending Node
1613 int op = n->Opcode();
1614 if (n->is_Mach()) {
1615 if (n->is_MachNullCheck()) {
1616 // Can only reach here if called after lcm. The original Op_If is gone,
1617 // so we attempt to infer the probability from one or both of the
1618 // successor blocks.
1619 assert(_num_succs == 2, "expecting 2 successors of a null check");
1620 // If either successor has only one predecessor, then the
1621 // probabiltity estimate can be derived using the
1622 // relative frequency of the successor and this block.
1623 if (_succs[i]->num_preds() == 2) {
1624 return _succs[i]->_freq / _freq;
1625 } else if (_succs[1-i]->num_preds() == 2) {
1626 return 1 - (_succs[1-i]->_freq / _freq);
1627 } else {
1628 // Estimate using both successor frequencies
1629 float freq = _succs[i]->_freq;
1630 return freq / (freq + _succs[1-i]->_freq);
1631 }
1632 }
1633 op = n->as_Mach()->ideal_Opcode();
1634 }
1637 // Switch on branch type
1638 switch( op ) {
1639 case Op_CountedLoopEnd:
1640 case Op_If: {
1641 assert (i < 2, "just checking");
1642 // Conditionals pass on only part of their frequency
1643 float prob = n->as_MachIf()->_prob;
1644 assert(prob >= 0.0 && prob <= 1.0, "out of range probability");
1645 // If succ[i] is the FALSE branch, invert path info
1646 if( _nodes[i + eidx + 1]->Opcode() == Op_IfFalse ) {
1647 return 1.0f - prob; // not taken
1648 } else {
1649 return prob; // taken
1650 }
1651 }
1653 case Op_Jump:
1654 // Divide the frequency between all successors evenly
1655 return 1.0f/_num_succs;
1657 case Op_Catch: {
1658 const CatchProjNode *ci = _nodes[i + eidx + 1]->as_CatchProj();
1659 if (ci->_con == CatchProjNode::fall_through_index) {
1660 // Fall-thru path gets the lion's share.
1661 return 1.0f - PROB_UNLIKELY_MAG(5)*_num_succs;
1662 } else {
1663 // Presume exceptional paths are equally unlikely
1664 return PROB_UNLIKELY_MAG(5);
1665 }
1666 }
1668 case Op_Root:
1669 case Op_Goto:
1670 // Pass frequency straight thru to target
1671 return 1.0f;
1673 case Op_NeverBranch:
1674 return 0.0f;
1676 case Op_TailCall:
1677 case Op_TailJump:
1678 case Op_Return:
1679 case Op_Halt:
1680 case Op_Rethrow:
1681 // Do not push out freq to root block
1682 return 0.0f;
1684 default:
1685 ShouldNotReachHere();
1686 }
1688 return 0.0f;
1689 }
1691 //------------------------------update_succ_freq-------------------------------
1692 // Update the appropriate frequency associated with block 'b', a succesor of
1693 // a block in this loop.
1694 void CFGLoop::update_succ_freq(Block* b, float freq) {
1695 if (b->_loop == this) {
1696 if (b == head()) {
1697 // back branch within the loop
1698 // Do nothing now, the loop carried frequency will be
1699 // adjust later in scale_freq().
1700 } else {
1701 // simple branch within the loop
1702 b->_freq += freq;
1703 }
1704 } else if (!in_loop_nest(b)) {
1705 // branch is exit from this loop
1706 BlockProbPair bpp(b, freq);
1707 _exits.append(bpp);
1708 } else {
1709 // branch into nested loop
1710 CFGLoop* ch = b->_loop;
1711 ch->_freq += freq;
1712 }
1713 }
1715 //------------------------------in_loop_nest-----------------------------------
1716 // Determine if block b is in the receiver's loop nest.
1717 bool CFGLoop::in_loop_nest(Block* b) {
1718 int depth = _depth;
1719 CFGLoop* b_loop = b->_loop;
1720 int b_depth = b_loop->_depth;
1721 if (depth == b_depth) {
1722 return true;
1723 }
1724 while (b_depth > depth) {
1725 b_loop = b_loop->_parent;
1726 b_depth = b_loop->_depth;
1727 }
1728 return b_loop == this;
1729 }
1731 //------------------------------scale_freq-------------------------------------
1732 // Scale frequency of loops and blocks by trip counts from outer loops
1733 // Do a top down traversal of loop tree (visit outer loops first.)
1734 void CFGLoop::scale_freq() {
1735 float loop_freq = _freq * trip_count();
1736 for (int i = 0; i < _members.length(); i++) {
1737 CFGElement* s = _members.at(i);
1738 s->_freq *= loop_freq;
1739 }
1740 CFGLoop* ch = _child;
1741 while (ch != NULL) {
1742 ch->scale_freq();
1743 ch = ch->_sibling;
1744 }
1745 }
1747 #ifndef PRODUCT
1748 //------------------------------dump_tree--------------------------------------
1749 void CFGLoop::dump_tree() const {
1750 dump();
1751 if (_child != NULL) _child->dump_tree();
1752 if (_sibling != NULL) _sibling->dump_tree();
1753 }
1755 //------------------------------dump-------------------------------------------
1756 void CFGLoop::dump() const {
1757 for (int i = 0; i < _depth; i++) tty->print(" ");
1758 tty->print("%s: %d trip_count: %6.0f freq: %6.0f\n",
1759 _depth == 0 ? "Method" : "Loop", _id, trip_count(), _freq);
1760 for (int i = 0; i < _depth; i++) tty->print(" ");
1761 tty->print(" members:", _id);
1762 int k = 0;
1763 for (int i = 0; i < _members.length(); i++) {
1764 if (k++ >= 6) {
1765 tty->print("\n ");
1766 for (int j = 0; j < _depth+1; j++) tty->print(" ");
1767 k = 0;
1768 }
1769 CFGElement *s = _members.at(i);
1770 if (s->is_block()) {
1771 Block *b = s->as_Block();
1772 tty->print(" B%d(%6.3f)", b->_pre_order, b->_freq);
1773 } else {
1774 CFGLoop* lp = s->as_CFGLoop();
1775 tty->print(" L%d(%6.3f)", lp->_id, lp->_freq);
1776 }
1777 }
1778 tty->print("\n");
1779 for (int i = 0; i < _depth; i++) tty->print(" ");
1780 tty->print(" exits: ");
1781 k = 0;
1782 for (int i = 0; i < _exits.length(); i++) {
1783 if (k++ >= 7) {
1784 tty->print("\n ");
1785 for (int j = 0; j < _depth+1; j++) tty->print(" ");
1786 k = 0;
1787 }
1788 Block *blk = _exits.at(i).get_target();
1789 float prob = _exits.at(i).get_prob();
1790 tty->print(" ->%d@%d%%", blk->_pre_order, (int)(prob*100));
1791 }
1792 tty->print("\n");
1793 }
1794 #endif