Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "libadt/vectset.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/block.hpp"
29 #include "opto/cfgnode.hpp"
30 #include "opto/chaitin.hpp"
31 #include "opto/loopnode.hpp"
32 #include "opto/machnode.hpp"
33 #include "opto/matcher.hpp"
34 #include "opto/opcodes.hpp"
35 #include "opto/rootnode.hpp"
36 #include "utilities/copy.hpp"
38 // Optimization - Graph Style
41 //-----------------------------------------------------------------------------
42 void Block_Array::grow( uint i ) {
43 assert(i >= Max(), "must be an overflow");
44 debug_only(_limit = i+1);
45 if( i < _size ) return;
46 if( !_size ) {
47 _size = 1;
48 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) );
49 _blocks[0] = NULL;
50 }
51 uint old = _size;
52 while( i >= _size ) _size <<= 1; // Double to fit
53 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*));
54 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
55 }
57 //=============================================================================
58 void Block_List::remove(uint i) {
59 assert(i < _cnt, "index out of bounds");
60 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
61 pop(); // shrink list by one block
62 }
64 void Block_List::insert(uint i, Block *b) {
65 push(b); // grow list by one block
66 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*)));
67 _blocks[i] = b;
68 }
70 #ifndef PRODUCT
71 void Block_List::print() {
72 for (uint i=0; i < size(); i++) {
73 tty->print("B%d ", _blocks[i]->_pre_order);
74 }
75 tty->print("size = %d\n", size());
76 }
77 #endif
79 //=============================================================================
81 uint Block::code_alignment() {
82 // Check for Root block
83 if( _pre_order == 0 ) return CodeEntryAlignment;
84 // Check for Start block
85 if( _pre_order == 1 ) return InteriorEntryAlignment;
86 // Check for loop alignment
87 if (has_loop_alignment()) return loop_alignment();
89 return 1; // no particular alignment
90 }
92 uint Block::compute_loop_alignment() {
93 Node *h = head();
94 if( h->is_Loop() && h->as_Loop()->is_inner_loop() ) {
95 // Pre- and post-loops have low trip count so do not bother with
96 // NOPs for align loop head. The constants are hidden from tuning
97 // but only because my "divide by 4" heuristic surely gets nearly
98 // all possible gain (a "do not align at all" heuristic has a
99 // chance of getting a really tiny gain).
100 if( h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
101 h->as_CountedLoop()->is_post_loop()) )
102 return (OptoLoopAlignment > 4) ? (OptoLoopAlignment>>2) : 1;
103 // Loops with low backedge frequency should not be aligned.
104 Node *n = h->in(LoopNode::LoopBackControl)->in(0);
105 if( n->is_MachIf() && n->as_MachIf()->_prob < 0.01 ) {
106 return 1; // Loop does not loop, more often than not!
107 }
108 return OptoLoopAlignment; // Otherwise align loop head
109 }
111 return 1; // no particular alignment
112 }
114 //-----------------------------------------------------------------------------
115 // Compute the size of first 'inst_cnt' instructions in this block.
116 // Return the number of instructions left to compute if the block has
117 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size
118 // exceeds OptoLoopAlignment.
119 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
120 PhaseRegAlloc* ra) {
121 uint last_inst = _nodes.size();
122 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
123 uint inst_size = _nodes[j]->size(ra);
124 if( inst_size > 0 ) {
125 inst_cnt--;
126 uint sz = sum_size + inst_size;
127 if( sz <= (uint)OptoLoopAlignment ) {
128 // Compute size of instructions which fit into fetch buffer only
129 // since all inst_cnt instructions will not fit even if we align them.
130 sum_size = sz;
131 } else {
132 return 0;
133 }
134 }
135 }
136 return inst_cnt;
137 }
139 //-----------------------------------------------------------------------------
140 uint Block::find_node( const Node *n ) const {
141 for( uint i = 0; i < _nodes.size(); i++ ) {
142 if( _nodes[i] == n )
143 return i;
144 }
145 ShouldNotReachHere();
146 return 0;
147 }
149 // Find and remove n from block list
150 void Block::find_remove( const Node *n ) {
151 _nodes.remove(find_node(n));
152 }
154 //------------------------------is_Empty---------------------------------------
155 // Return empty status of a block. Empty blocks contain only the head, other
156 // ideal nodes, and an optional trailing goto.
157 int Block::is_Empty() const {
159 // Root or start block is not considered empty
160 if (head()->is_Root() || head()->is_Start()) {
161 return not_empty;
162 }
164 int success_result = completely_empty;
165 int end_idx = _nodes.size()-1;
167 // Check for ending goto
168 if ((end_idx > 0) && (_nodes[end_idx]->is_Goto())) {
169 success_result = empty_with_goto;
170 end_idx--;
171 }
173 // Unreachable blocks are considered empty
174 if (num_preds() <= 1) {
175 return success_result;
176 }
178 // Ideal nodes are allowable in empty blocks: skip them Only MachNodes
179 // turn directly into code, because only MachNodes have non-trivial
180 // emit() functions.
181 while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
182 end_idx--;
183 }
185 // No room for any interesting instructions?
186 if (end_idx == 0) {
187 return success_result;
188 }
190 return not_empty;
191 }
193 //------------------------------has_uncommon_code------------------------------
194 // Return true if the block's code implies that it is likely to be
195 // executed infrequently. Check to see if the block ends in a Halt or
196 // a low probability call.
197 bool Block::has_uncommon_code() const {
198 Node* en = end();
200 if (en->is_Goto())
201 en = en->in(0);
202 if (en->is_Catch())
203 en = en->in(0);
204 if (en->is_Proj() && en->in(0)->is_MachCall()) {
205 MachCallNode* call = en->in(0)->as_MachCall();
206 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
207 // This is true for slow-path stubs like new_{instance,array},
208 // slow_arraycopy, complete_monitor_locking, uncommon_trap.
209 // The magic number corresponds to the probability of an uncommon_trap,
210 // even though it is a count not a probability.
211 return true;
212 }
213 }
215 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode();
216 return op == Op_Halt;
217 }
219 //------------------------------is_uncommon------------------------------------
220 // True if block is low enough frequency or guarded by a test which
221 // mostly does not go here.
222 bool Block::is_uncommon( Block_Array &bbs ) const {
223 // Initial blocks must never be moved, so are never uncommon.
224 if (head()->is_Root() || head()->is_Start()) return false;
226 // Check for way-low freq
227 if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
229 // Look for code shape indicating uncommon_trap or slow path
230 if (has_uncommon_code()) return true;
232 const float epsilon = 0.05f;
233 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
234 uint uncommon_preds = 0;
235 uint freq_preds = 0;
236 uint uncommon_for_freq_preds = 0;
238 for( uint i=1; i<num_preds(); i++ ) {
239 Block* guard = bbs[pred(i)->_idx];
240 // Check to see if this block follows its guard 1 time out of 10000
241 // or less.
242 //
243 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which
244 // we intend to be "uncommon", such as slow-path TLE allocation,
245 // predicted call failure, and uncommon trap triggers.
246 //
247 // Use an epsilon value of 5% to allow for variability in frequency
248 // predictions and floating point calculations. The net effect is
249 // that guard_factor is set to 9500.
250 //
251 // Ignore low-frequency blocks.
252 // The next check is (guard->_freq < 1.e-5 * 9500.).
253 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) {
254 uncommon_preds++;
255 } else {
256 freq_preds++;
257 if( _freq < guard->_freq * guard_factor ) {
258 uncommon_for_freq_preds++;
259 }
260 }
261 }
262 if( num_preds() > 1 &&
263 // The block is uncommon if all preds are uncommon or
264 (uncommon_preds == (num_preds()-1) ||
265 // it is uncommon for all frequent preds.
266 uncommon_for_freq_preds == freq_preds) ) {
267 return true;
268 }
269 return false;
270 }
272 //------------------------------dump-------------------------------------------
273 #ifndef PRODUCT
274 void Block::dump_bidx(const Block* orig) const {
275 if (_pre_order) tty->print("B%d",_pre_order);
276 else tty->print("N%d", head()->_idx);
278 if (Verbose && orig != this) {
279 // Dump the original block's idx
280 tty->print(" (");
281 orig->dump_bidx(orig);
282 tty->print(")");
283 }
284 }
286 void Block::dump_pred(const Block_Array *bbs, Block* orig) const {
287 if (is_connector()) {
288 for (uint i=1; i<num_preds(); i++) {
289 Block *p = ((*bbs)[pred(i)->_idx]);
290 p->dump_pred(bbs, orig);
291 }
292 } else {
293 dump_bidx(orig);
294 tty->print(" ");
295 }
296 }
298 void Block::dump_head( const Block_Array *bbs ) const {
299 // Print the basic block
300 dump_bidx(this);
301 tty->print(": #\t");
303 // Print the incoming CFG edges and the outgoing CFG edges
304 for( uint i=0; i<_num_succs; i++ ) {
305 non_connector_successor(i)->dump_bidx(_succs[i]);
306 tty->print(" ");
307 }
308 tty->print("<- ");
309 if( head()->is_block_start() ) {
310 for (uint i=1; i<num_preds(); i++) {
311 Node *s = pred(i);
312 if (bbs) {
313 Block *p = (*bbs)[s->_idx];
314 p->dump_pred(bbs, p);
315 } else {
316 while (!s->is_block_start())
317 s = s->in(0);
318 tty->print("N%d ", s->_idx );
319 }
320 }
321 } else
322 tty->print("BLOCK HEAD IS JUNK ");
324 // Print loop, if any
325 const Block *bhead = this; // Head of self-loop
326 Node *bh = bhead->head();
327 if( bbs && bh->is_Loop() && !head()->is_Root() ) {
328 LoopNode *loop = bh->as_Loop();
329 const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
330 while (bx->is_connector()) {
331 bx = (*bbs)[bx->pred(1)->_idx];
332 }
333 tty->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
334 // Dump any loop-specific bits, especially for CountedLoops.
335 loop->dump_spec(tty);
336 } else if (has_loop_alignment()) {
337 tty->print(" top-of-loop");
338 }
339 tty->print(" Freq: %g",_freq);
340 if( Verbose || WizardMode ) {
341 tty->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
342 tty->print(" RegPressure: %d",_reg_pressure);
343 tty->print(" IHRP Index: %d",_ihrp_index);
344 tty->print(" FRegPressure: %d",_freg_pressure);
345 tty->print(" FHRP Index: %d",_fhrp_index);
346 }
347 tty->print_cr("");
348 }
350 void Block::dump() const { dump(0); }
352 void Block::dump( const Block_Array *bbs ) const {
353 dump_head(bbs);
354 uint cnt = _nodes.size();
355 for( uint i=0; i<cnt; i++ )
356 _nodes[i]->dump();
357 tty->print("\n");
358 }
359 #endif
361 //=============================================================================
362 //------------------------------PhaseCFG---------------------------------------
363 PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
364 Phase(CFG),
365 _bbs(a),
366 _root(r),
367 _node_latency(NULL)
368 #ifndef PRODUCT
369 , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
370 #endif
371 #ifdef ASSERT
372 , _raw_oops(a)
373 #endif
374 {
375 ResourceMark rm;
376 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode,
377 // then Match it into a machine-specific Node. Then clone the machine
378 // Node on demand.
379 Node *x = new (C, 1) GotoNode(NULL);
380 x->init_req(0, x);
381 _goto = m.match_tree(x);
382 assert(_goto != NULL, "");
383 _goto->set_req(0,_goto);
385 // Build the CFG in Reverse Post Order
386 _num_blocks = build_cfg();
387 _broot = _bbs[_root->_idx];
388 }
390 //------------------------------build_cfg--------------------------------------
391 // Build a proper looking CFG. Make every block begin with either a StartNode
392 // or a RegionNode. Make every block end with either a Goto, If or Return.
393 // The RootNode both starts and ends it's own block. Do this with a recursive
394 // backwards walk over the control edges.
395 uint PhaseCFG::build_cfg() {
396 Arena *a = Thread::current()->resource_area();
397 VectorSet visited(a);
399 // Allocate stack with enough space to avoid frequent realloc
400 Node_Stack nstack(a, C->unique() >> 1);
401 nstack.push(_root, 0);
402 uint sum = 0; // Counter for blocks
404 while (nstack.is_nonempty()) {
405 // node and in's index from stack's top
406 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
407 // only nodes which point to the start of basic block (see below).
408 Node *np = nstack.node();
409 // idx > 0, except for the first node (_root) pushed on stack
410 // at the beginning when idx == 0.
411 // We will use the condition (idx == 0) later to end the build.
412 uint idx = nstack.index();
413 Node *proj = np->in(idx);
414 const Node *x = proj->is_block_proj();
415 // Does the block end with a proper block-ending Node? One of Return,
416 // If or Goto? (This check should be done for visited nodes also).
417 if (x == NULL) { // Does not end right...
418 Node *g = _goto->clone(); // Force it to end in a Goto
419 g->set_req(0, proj);
420 np->set_req(idx, g);
421 x = proj = g;
422 }
423 if (!visited.test_set(x->_idx)) { // Visit this block once
424 // Skip any control-pinned middle'in stuff
425 Node *p = proj;
426 do {
427 proj = p; // Update pointer to last Control
428 p = p->in(0); // Move control forward
429 } while( !p->is_block_proj() &&
430 !p->is_block_start() );
431 // Make the block begin with one of Region or StartNode.
432 if( !p->is_block_start() ) {
433 RegionNode *r = new (C, 2) RegionNode( 2 );
434 r->init_req(1, p); // Insert RegionNode in the way
435 proj->set_req(0, r); // Insert RegionNode in the way
436 p = r;
437 }
438 // 'p' now points to the start of this basic block
440 // Put self in array of basic blocks
441 Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
442 _bbs.map(p->_idx,bb);
443 _bbs.map(x->_idx,bb);
444 if( x != p ) // Only for root is x == p
445 bb->_nodes.push((Node*)x);
447 // Now handle predecessors
448 ++sum; // Count 1 for self block
449 uint cnt = bb->num_preds();
450 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
451 Node *prevproj = p->in(i); // Get prior input
452 assert( !prevproj->is_Con(), "dead input not removed" );
453 // Check to see if p->in(i) is a "control-dependent" CFG edge -
454 // i.e., it splits at the source (via an IF or SWITCH) and merges
455 // at the destination (via a many-input Region).
456 // This breaks critical edges. The RegionNode to start the block
457 // will be added when <p,i> is pulled off the node stack
458 if ( cnt > 2 ) { // Merging many things?
459 assert( prevproj== bb->pred(i),"");
460 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge?
461 // Force a block on the control-dependent edge
462 Node *g = _goto->clone(); // Force it to end in a Goto
463 g->set_req(0,prevproj);
464 p->set_req(i,g);
465 }
466 }
467 nstack.push(p, i); // 'p' is RegionNode or StartNode
468 }
469 } else { // Post-processing visited nodes
470 nstack.pop(); // remove node from stack
471 // Check if it the fist node pushed on stack at the beginning.
472 if (idx == 0) break; // end of the build
473 // Find predecessor basic block
474 Block *pb = _bbs[x->_idx];
475 // Insert into nodes array, if not already there
476 if( !_bbs.lookup(proj->_idx) ) {
477 assert( x != proj, "" );
478 // Map basic block of projection
479 _bbs.map(proj->_idx,pb);
480 pb->_nodes.push(proj);
481 }
482 // Insert self as a child of my predecessor block
483 pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
484 assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
485 "too many control users, not a CFG?" );
486 }
487 }
488 // Return number of basic blocks for all children and self
489 return sum;
490 }
492 //------------------------------insert_goto_at---------------------------------
493 // Inserts a goto & corresponding basic block between
494 // block[block_no] and its succ_no'th successor block
495 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
496 // get block with block_no
497 assert(block_no < _num_blocks, "illegal block number");
498 Block* in = _blocks[block_no];
499 // get successor block succ_no
500 assert(succ_no < in->_num_succs, "illegal successor number");
501 Block* out = in->_succs[succ_no];
502 // Compute frequency of the new block. Do this before inserting
503 // new block in case succ_prob() needs to infer the probability from
504 // surrounding blocks.
505 float freq = in->_freq * in->succ_prob(succ_no);
506 // get ProjNode corresponding to the succ_no'th successor of the in block
507 ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
508 // create region for basic block
509 RegionNode* region = new (C, 2) RegionNode(2);
510 region->init_req(1, proj);
511 // setup corresponding basic block
512 Block* block = new (_bbs._arena) Block(_bbs._arena, region);
513 _bbs.map(region->_idx, block);
514 C->regalloc()->set_bad(region->_idx);
515 // add a goto node
516 Node* gto = _goto->clone(); // get a new goto node
517 gto->set_req(0, region);
518 // add it to the basic block
519 block->_nodes.push(gto);
520 _bbs.map(gto->_idx, block);
521 C->regalloc()->set_bad(gto->_idx);
522 // hook up successor block
523 block->_succs.map(block->_num_succs++, out);
524 // remap successor's predecessors if necessary
525 for (uint i = 1; i < out->num_preds(); i++) {
526 if (out->pred(i) == proj) out->head()->set_req(i, gto);
527 }
528 // remap predecessor's successor to new block
529 in->_succs.map(succ_no, block);
530 // Set the frequency of the new block
531 block->_freq = freq;
532 // add new basic block to basic block list
533 _blocks.insert(block_no + 1, block);
534 _num_blocks++;
535 }
537 //------------------------------no_flip_branch---------------------------------
538 // Does this block end in a multiway branch that cannot have the default case
539 // flipped for another case?
540 static bool no_flip_branch( Block *b ) {
541 int branch_idx = b->_nodes.size() - b->_num_succs-1;
542 if( branch_idx < 1 ) return false;
543 Node *bra = b->_nodes[branch_idx];
544 if( bra->is_Catch() )
545 return true;
546 if( bra->is_Mach() ) {
547 if( bra->is_MachNullCheck() )
548 return true;
549 int iop = bra->as_Mach()->ideal_Opcode();
550 if( iop == Op_FastLock || iop == Op_FastUnlock )
551 return true;
552 }
553 return false;
554 }
556 //------------------------------convert_NeverBranch_to_Goto--------------------
557 // Check for NeverBranch at block end. This needs to become a GOTO to the
558 // true target. NeverBranch are treated as a conditional branch that always
559 // goes the same direction for most of the optimizer and are used to give a
560 // fake exit path to infinite loops. At this late stage they need to turn
561 // into Goto's so that when you enter the infinite loop you indeed hang.
562 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
563 // Find true target
564 int end_idx = b->end_idx();
565 int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
566 Block *succ = b->_succs[idx];
567 Node* gto = _goto->clone(); // get a new goto node
568 gto->set_req(0, b->head());
569 Node *bp = b->_nodes[end_idx];
570 b->_nodes.map(end_idx,gto); // Slam over NeverBranch
571 _bbs.map(gto->_idx, b);
572 C->regalloc()->set_bad(gto->_idx);
573 b->_nodes.pop(); // Yank projections
574 b->_nodes.pop(); // Yank projections
575 b->_succs.map(0,succ); // Map only successor
576 b->_num_succs = 1;
577 // remap successor's predecessors if necessary
578 uint j;
579 for( j = 1; j < succ->num_preds(); j++)
580 if( succ->pred(j)->in(0) == bp )
581 succ->head()->set_req(j, gto);
582 // Kill alternate exit path
583 Block *dead = b->_succs[1-idx];
584 for( j = 1; j < dead->num_preds(); j++)
585 if( dead->pred(j)->in(0) == bp )
586 break;
587 // Scan through block, yanking dead path from
588 // all regions and phis.
589 dead->head()->del_req(j);
590 for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
591 dead->_nodes[k]->del_req(j);
592 }
594 //------------------------------move_to_next-----------------------------------
595 // Helper function to move block bx to the slot following b_index. Return
596 // true if the move is successful, otherwise false
597 bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
598 if (bx == NULL) return false;
600 // Return false if bx is already scheduled.
601 uint bx_index = bx->_pre_order;
602 if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) {
603 return false;
604 }
606 // Find the current index of block bx on the block list
607 bx_index = b_index + 1;
608 while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++;
609 assert(_blocks[bx_index] == bx, "block not found");
611 // If the previous block conditionally falls into bx, return false,
612 // because moving bx will create an extra jump.
613 for(uint k = 1; k < bx->num_preds(); k++ ) {
614 Block* pred = _bbs[bx->pred(k)->_idx];
615 if (pred == _blocks[bx_index-1]) {
616 if (pred->_num_succs != 1) {
617 return false;
618 }
619 }
620 }
622 // Reinsert bx just past block 'b'
623 _blocks.remove(bx_index);
624 _blocks.insert(b_index + 1, bx);
625 return true;
626 }
628 //------------------------------move_to_end------------------------------------
629 // Move empty and uncommon blocks to the end.
630 void PhaseCFG::move_to_end(Block *b, uint i) {
631 int e = b->is_Empty();
632 if (e != Block::not_empty) {
633 if (e == Block::empty_with_goto) {
634 // Remove the goto, but leave the block.
635 b->_nodes.pop();
636 }
637 // Mark this block as a connector block, which will cause it to be
638 // ignored in certain functions such as non_connector_successor().
639 b->set_connector();
640 }
641 // Move the empty block to the end, and don't recheck.
642 _blocks.remove(i);
643 _blocks.push(b);
644 }
646 //---------------------------set_loop_alignment--------------------------------
647 // Set loop alignment for every block
648 void PhaseCFG::set_loop_alignment() {
649 uint last = _num_blocks;
650 assert( _blocks[0] == _broot, "" );
652 for (uint i = 1; i < last; i++ ) {
653 Block *b = _blocks[i];
654 if (b->head()->is_Loop()) {
655 b->set_loop_alignment(b);
656 }
657 }
658 }
660 //-----------------------------remove_empty------------------------------------
661 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks
662 // to the end.
663 void PhaseCFG::remove_empty() {
664 // Move uncommon blocks to the end
665 uint last = _num_blocks;
666 assert( _blocks[0] == _broot, "" );
668 for (uint i = 1; i < last; i++) {
669 Block *b = _blocks[i];
670 if (b->is_connector()) break;
672 // Check for NeverBranch at block end. This needs to become a GOTO to the
673 // true target. NeverBranch are treated as a conditional branch that
674 // always goes the same direction for most of the optimizer and are used
675 // to give a fake exit path to infinite loops. At this late stage they
676 // need to turn into Goto's so that when you enter the infinite loop you
677 // indeed hang.
678 if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch )
679 convert_NeverBranch_to_Goto(b);
681 // Look for uncommon blocks and move to end.
682 if (!C->do_freq_based_layout()) {
683 if( b->is_uncommon(_bbs) ) {
684 move_to_end(b, i);
685 last--; // No longer check for being uncommon!
686 if( no_flip_branch(b) ) { // Fall-thru case must follow?
687 b = _blocks[i]; // Find the fall-thru block
688 move_to_end(b, i);
689 last--;
690 }
691 i--; // backup block counter post-increment
692 }
693 }
694 }
696 // Move empty blocks to the end
697 last = _num_blocks;
698 for (uint i = 1; i < last; i++) {
699 Block *b = _blocks[i];
700 if (b->is_Empty() != Block::not_empty) {
701 move_to_end(b, i);
702 last--;
703 i--;
704 }
705 } // End of for all blocks
706 }
708 //-----------------------------fixup_flow--------------------------------------
709 // Fix up the final control flow for basic blocks.
710 void PhaseCFG::fixup_flow() {
711 // Fixup final control flow for the blocks. Remove jump-to-next
712 // block. If neither arm of a IF follows the conditional branch, we
713 // have to add a second jump after the conditional. We place the
714 // TRUE branch target in succs[0] for both GOTOs and IFs.
715 for (uint i=0; i < _num_blocks; i++) {
716 Block *b = _blocks[i];
717 b->_pre_order = i; // turn pre-order into block-index
719 // Connector blocks need no further processing.
720 if (b->is_connector()) {
721 assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(),
722 "All connector blocks should sink to the end");
723 continue;
724 }
725 assert(b->is_Empty() != Block::completely_empty,
726 "Empty blocks should be connectors");
728 Block *bnext = (i < _num_blocks-1) ? _blocks[i+1] : NULL;
729 Block *bs0 = b->non_connector_successor(0);
731 // Check for multi-way branches where I cannot negate the test to
732 // exchange the true and false targets.
733 if( no_flip_branch( b ) ) {
734 // Find fall through case - if must fall into its target
735 int branch_idx = b->_nodes.size() - b->_num_succs;
736 for (uint j2 = 0; j2 < b->_num_succs; j2++) {
737 const ProjNode* p = b->_nodes[branch_idx + j2]->as_Proj();
738 if (p->_con == 0) {
739 // successor j2 is fall through case
740 if (b->non_connector_successor(j2) != bnext) {
741 // but it is not the next block => insert a goto
742 insert_goto_at(i, j2);
743 }
744 // Put taken branch in slot 0
745 if( j2 == 0 && b->_num_succs == 2) {
746 // Flip targets in succs map
747 Block *tbs0 = b->_succs[0];
748 Block *tbs1 = b->_succs[1];
749 b->_succs.map( 0, tbs1 );
750 b->_succs.map( 1, tbs0 );
751 }
752 break;
753 }
754 }
755 // Remove all CatchProjs
756 for (uint j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop();
758 } else if (b->_num_succs == 1) {
759 // Block ends in a Goto?
760 if (bnext == bs0) {
761 // We fall into next block; remove the Goto
762 b->_nodes.pop();
763 }
765 } else if( b->_num_succs == 2 ) { // Block ends in a If?
766 // Get opcode of 1st projection (matches _succs[0])
767 // Note: Since this basic block has 2 exits, the last 2 nodes must
768 // be projections (in any order), the 3rd last node must be
769 // the IfNode (we have excluded other 2-way exits such as
770 // CatchNodes already).
771 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
772 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
773 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
775 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
776 assert(proj0->raw_out(0) == b->_succs[0]->head(), "Mismatch successor 0");
777 assert(proj1->raw_out(0) == b->_succs[1]->head(), "Mismatch successor 1");
779 Block *bs1 = b->non_connector_successor(1);
781 // Check for neither successor block following the current
782 // block ending in a conditional. If so, move one of the
783 // successors after the current one, provided that the
784 // successor was previously unscheduled, but moveable
785 // (i.e., all paths to it involve a branch).
786 if( !C->do_freq_based_layout() && bnext != bs0 && bnext != bs1 ) {
787 // Choose the more common successor based on the probability
788 // of the conditional branch.
789 Block *bx = bs0;
790 Block *by = bs1;
792 // _prob is the probability of taking the true path. Make
793 // p the probability of taking successor #1.
794 float p = iff->as_MachIf()->_prob;
795 if( proj0->Opcode() == Op_IfTrue ) {
796 p = 1.0 - p;
797 }
799 // Prefer successor #1 if p > 0.5
800 if (p > PROB_FAIR) {
801 bx = bs1;
802 by = bs0;
803 }
805 // Attempt the more common successor first
806 if (move_to_next(bx, i)) {
807 bnext = bx;
808 } else if (move_to_next(by, i)) {
809 bnext = by;
810 }
811 }
813 // Check for conditional branching the wrong way. Negate
814 // conditional, if needed, so it falls into the following block
815 // and branches to the not-following block.
817 // Check for the next block being in succs[0]. We are going to branch
818 // to succs[0], so we want the fall-thru case as the next block in
819 // succs[1].
820 if (bnext == bs0) {
821 // Fall-thru case in succs[0], so flip targets in succs map
822 Block *tbs0 = b->_succs[0];
823 Block *tbs1 = b->_succs[1];
824 b->_succs.map( 0, tbs1 );
825 b->_succs.map( 1, tbs0 );
826 // Flip projection for each target
827 { ProjNode *tmp = proj0; proj0 = proj1; proj1 = tmp; }
829 } else if( bnext != bs1 ) {
830 // Need a double-branch
831 // The existing conditional branch need not change.
832 // Add a unconditional branch to the false target.
833 // Alas, it must appear in its own block and adding a
834 // block this late in the game is complicated. Sigh.
835 insert_goto_at(i, 1);
836 }
838 // Make sure we TRUE branch to the target
839 if( proj0->Opcode() == Op_IfFalse ) {
840 iff->negate();
841 }
843 b->_nodes.pop(); // Remove IfFalse & IfTrue projections
844 b->_nodes.pop();
846 } else {
847 // Multi-exit block, e.g. a switch statement
848 // But we don't need to do anything here
849 }
850 } // End of for all blocks
851 }
854 //------------------------------dump-------------------------------------------
855 #ifndef PRODUCT
856 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
857 const Node *x = end->is_block_proj();
858 assert( x, "not a CFG" );
860 // Do not visit this block again
861 if( visited.test_set(x->_idx) ) return;
863 // Skip through this block
864 const Node *p = x;
865 do {
866 p = p->in(0); // Move control forward
867 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
868 } while( !p->is_block_start() );
870 // Recursively visit
871 for( uint i=1; i<p->req(); i++ )
872 _dump_cfg(p->in(i),visited);
874 // Dump the block
875 _bbs[p->_idx]->dump(&_bbs);
876 }
878 void PhaseCFG::dump( ) const {
879 tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
880 if( _blocks.size() ) { // Did we do basic-block layout?
881 for( uint i=0; i<_num_blocks; i++ )
882 _blocks[i]->dump(&_bbs);
883 } else { // Else do it with a DFS
884 VectorSet visited(_bbs._arena);
885 _dump_cfg(_root,visited);
886 }
887 }
889 void PhaseCFG::dump_headers() {
890 for( uint i = 0; i < _num_blocks; i++ ) {
891 if( _blocks[i] == NULL ) continue;
892 _blocks[i]->dump_head(&_bbs);
893 }
894 }
896 void PhaseCFG::verify( ) const {
897 #ifdef ASSERT
898 // Verify sane CFG
899 for( uint i = 0; i < _num_blocks; i++ ) {
900 Block *b = _blocks[i];
901 uint cnt = b->_nodes.size();
902 uint j;
903 for( j = 0; j < cnt; j++ ) {
904 Node *n = b->_nodes[j];
905 assert( _bbs[n->_idx] == b, "" );
906 if( j >= 1 && n->is_Mach() &&
907 n->as_Mach()->ideal_Opcode() == Op_CreateEx ) {
908 assert( j == 1 || b->_nodes[j-1]->is_Phi(),
909 "CreateEx must be first instruction in block" );
910 }
911 for( uint k = 0; k < n->req(); k++ ) {
912 Node *def = n->in(k);
913 if( def && def != n ) {
914 assert( _bbs[def->_idx] || def->is_Con(),
915 "must have block; constants for debug info ok" );
916 // Verify that instructions in the block is in correct order.
917 // Uses must follow their definition if they are at the same block.
918 // Mostly done to check that MachSpillCopy nodes are placed correctly
919 // when CreateEx node is moved in build_ifg_physical().
920 if( _bbs[def->_idx] == b &&
921 !(b->head()->is_Loop() && n->is_Phi()) &&
922 // See (+++) comment in reg_split.cpp
923 !(n->jvms() != NULL && n->jvms()->is_monitor_use(k)) ) {
924 bool is_loop = false;
925 if (n->is_Phi()) {
926 for( uint l = 1; l < def->req(); l++ ) {
927 if (n == def->in(l)) {
928 is_loop = true;
929 break; // Some kind of loop
930 }
931 }
932 }
933 assert( is_loop || b->find_node(def) < j, "uses must follow definitions" );
934 }
935 if( def->is_SafePointScalarObject() ) {
936 assert(_bbs[def->_idx] == b, "SafePointScalarObject Node should be at the same block as its SafePoint node");
937 assert(_bbs[def->_idx] == _bbs[def->in(0)->_idx], "SafePointScalarObject Node should be at the same block as its control edge");
938 }
939 }
940 }
941 }
943 j = b->end_idx();
944 Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj();
945 assert( bp, "last instruction must be a block proj" );
946 assert( bp == b->_nodes[j], "wrong number of successors for this block" );
947 if( bp->is_Catch() ) {
948 while( b->_nodes[--j]->Opcode() == Op_MachProj ) ;
949 assert( b->_nodes[j]->is_Call(), "CatchProj must follow call" );
950 }
951 else if( bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If ) {
952 assert( b->_num_succs == 2, "Conditional branch must have two targets");
953 }
954 }
955 #endif
956 }
957 #endif
959 //=============================================================================
960 //------------------------------UnionFind--------------------------------------
961 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) {
962 Copy::zero_to_bytes( _indices, sizeof(uint)*max );
963 }
965 void UnionFind::extend( uint from_idx, uint to_idx ) {
966 _nesting.check();
967 if( from_idx >= _max ) {
968 uint size = 16;
969 while( size <= from_idx ) size <<=1;
970 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size );
971 _max = size;
972 }
973 while( _cnt <= from_idx ) _indices[_cnt++] = 0;
974 _indices[from_idx] = to_idx;
975 }
977 void UnionFind::reset( uint max ) {
978 assert( max <= max_uint, "Must fit within uint" );
979 // Force the Union-Find mapping to be at least this large
980 extend(max,0);
981 // Initialize to be the ID mapping.
982 for( uint i=0; i<max; i++ ) map(i,i);
983 }
985 //------------------------------Find_compress----------------------------------
986 // Straight out of Tarjan's union-find algorithm
987 uint UnionFind::Find_compress( uint idx ) {
988 uint cur = idx;
989 uint next = lookup(cur);
990 while( next != cur ) { // Scan chain of equivalences
991 assert( next < cur, "always union smaller" );
992 cur = next; // until find a fixed-point
993 next = lookup(cur);
994 }
995 // Core of union-find algorithm: update chain of
996 // equivalences to be equal to the root.
997 while( idx != next ) {
998 uint tmp = lookup(idx);
999 map(idx, next);
1000 idx = tmp;
1001 }
1002 return idx;
1003 }
1005 //------------------------------Find_const-------------------------------------
1006 // Like Find above, but no path compress, so bad asymptotic behavior
1007 uint UnionFind::Find_const( uint idx ) const {
1008 if( idx == 0 ) return idx; // Ignore the zero idx
1009 // Off the end? This can happen during debugging dumps
1010 // when data structures have not finished being updated.
1011 if( idx >= _max ) return idx;
1012 uint next = lookup(idx);
1013 while( next != idx ) { // Scan chain of equivalences
1014 idx = next; // until find a fixed-point
1015 next = lookup(idx);
1016 }
1017 return next;
1018 }
1020 //------------------------------Union------------------------------------------
1021 // union 2 sets together.
1022 void UnionFind::Union( uint idx1, uint idx2 ) {
1023 uint src = Find(idx1);
1024 uint dst = Find(idx2);
1025 assert( src, "" );
1026 assert( dst, "" );
1027 assert( src < _max, "oob" );
1028 assert( dst < _max, "oob" );
1029 assert( src < dst, "always union smaller" );
1030 map(dst,src);
1031 }
1033 #ifndef PRODUCT
1034 static void edge_dump(GrowableArray<CFGEdge *> *edges) {
1035 tty->print_cr("---- Edges ----");
1036 for (int i = 0; i < edges->length(); i++) {
1037 CFGEdge *e = edges->at(i);
1038 if (e != NULL) {
1039 edges->at(i)->dump();
1040 }
1041 }
1042 }
1044 static void trace_dump(Trace *traces[], int count) {
1045 tty->print_cr("---- Traces ----");
1046 for (int i = 0; i < count; i++) {
1047 Trace *tr = traces[i];
1048 if (tr != NULL) {
1049 tr->dump();
1050 }
1051 }
1052 }
1054 void Trace::dump( ) const {
1055 tty->print_cr("Trace (freq %f)", first_block()->_freq);
1056 for (Block *b = first_block(); b != NULL; b = next(b)) {
1057 tty->print(" B%d", b->_pre_order);
1058 if (b->head()->is_Loop()) {
1059 tty->print(" (L%d)", b->compute_loop_alignment());
1060 }
1061 if (b->has_loop_alignment()) {
1062 tty->print(" (T%d)", b->code_alignment());
1063 }
1064 }
1065 tty->cr();
1066 }
1068 void CFGEdge::dump( ) const {
1069 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ",
1070 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct);
1071 switch(state()) {
1072 case connected:
1073 tty->print("connected");
1074 break;
1075 case open:
1076 tty->print("open");
1077 break;
1078 case interior:
1079 tty->print("interior");
1080 break;
1081 }
1082 if (infrequent()) {
1083 tty->print(" infrequent");
1084 }
1085 tty->cr();
1086 }
1087 #endif
1089 //=============================================================================
1091 //------------------------------edge_order-------------------------------------
1092 // Comparison function for edges
1093 static int edge_order(CFGEdge **e0, CFGEdge **e1) {
1094 float freq0 = (*e0)->freq();
1095 float freq1 = (*e1)->freq();
1096 if (freq0 != freq1) {
1097 return freq0 > freq1 ? -1 : 1;
1098 }
1100 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo;
1101 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo;
1103 return dist1 - dist0;
1104 }
1106 //------------------------------trace_frequency_order--------------------------
1107 // Comparison function for edges
1108 static int trace_frequency_order(const void *p0, const void *p1) {
1109 Trace *tr0 = *(Trace **) p0;
1110 Trace *tr1 = *(Trace **) p1;
1111 Block *b0 = tr0->first_block();
1112 Block *b1 = tr1->first_block();
1114 // The trace of connector blocks goes at the end;
1115 // we only expect one such trace
1116 if (b0->is_connector() != b1->is_connector()) {
1117 return b1->is_connector() ? -1 : 1;
1118 }
1120 // Pull more frequently executed blocks to the beginning
1121 float freq0 = b0->_freq;
1122 float freq1 = b1->_freq;
1123 if (freq0 != freq1) {
1124 return freq0 > freq1 ? -1 : 1;
1125 }
1127 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo;
1129 return diff;
1130 }
1132 //------------------------------find_edges-------------------------------------
1133 // Find edges of interest, i.e, those which can fall through. Presumes that
1134 // edges which don't fall through are of low frequency and can be generally
1135 // ignored. Initialize the list of traces.
1136 void PhaseBlockLayout::find_edges()
1137 {
1138 // Walk the blocks, creating edges and Traces
1139 uint i;
1140 Trace *tr = NULL;
1141 for (i = 0; i < _cfg._num_blocks; i++) {
1142 Block *b = _cfg._blocks[i];
1143 tr = new Trace(b, next, prev);
1144 traces[tr->id()] = tr;
1146 // All connector blocks should be at the end of the list
1147 if (b->is_connector()) break;
1149 // If this block and the next one have a one-to-one successor
1150 // predecessor relationship, simply append the next block
1151 int nfallthru = b->num_fall_throughs();
1152 while (nfallthru == 1 &&
1153 b->succ_fall_through(0)) {
1154 Block *n = b->_succs[0];
1156 // Skip over single-entry connector blocks, we don't want to
1157 // add them to the trace.
1158 while (n->is_connector() && n->num_preds() == 1) {
1159 n = n->_succs[0];
1160 }
1162 // We see a merge point, so stop search for the next block
1163 if (n->num_preds() != 1) break;
1165 i++;
1166 assert(n = _cfg._blocks[i], "expecting next block");
1167 tr->append(n);
1168 uf->map(n->_pre_order, tr->id());
1169 traces[n->_pre_order] = NULL;
1170 nfallthru = b->num_fall_throughs();
1171 b = n;
1172 }
1174 if (nfallthru > 0) {
1175 // Create a CFGEdge for each outgoing
1176 // edge that could be a fall-through.
1177 for (uint j = 0; j < b->_num_succs; j++ ) {
1178 if (b->succ_fall_through(j)) {
1179 Block *target = b->non_connector_successor(j);
1180 float freq = b->_freq * b->succ_prob(j);
1181 int from_pct = (int) ((100 * freq) / b->_freq);
1182 int to_pct = (int) ((100 * freq) / target->_freq);
1183 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct));
1184 }
1185 }
1186 }
1187 }
1189 // Group connector blocks into one trace
1190 for (i++; i < _cfg._num_blocks; i++) {
1191 Block *b = _cfg._blocks[i];
1192 assert(b->is_connector(), "connector blocks at the end");
1193 tr->append(b);
1194 uf->map(b->_pre_order, tr->id());
1195 traces[b->_pre_order] = NULL;
1196 }
1197 }
1199 //------------------------------union_traces----------------------------------
1200 // Union two traces together in uf, and null out the trace in the list
1201 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace)
1202 {
1203 uint old_id = old_trace->id();
1204 uint updated_id = updated_trace->id();
1206 uint lo_id = updated_id;
1207 uint hi_id = old_id;
1209 // If from is greater than to, swap values to meet
1210 // UnionFind guarantee.
1211 if (updated_id > old_id) {
1212 lo_id = old_id;
1213 hi_id = updated_id;
1215 // Fix up the trace ids
1216 traces[lo_id] = traces[updated_id];
1217 updated_trace->set_id(lo_id);
1218 }
1220 // Union the lower with the higher and remove the pointer
1221 // to the higher.
1222 uf->Union(lo_id, hi_id);
1223 traces[hi_id] = NULL;
1224 }
1226 //------------------------------grow_traces-------------------------------------
1227 // Append traces together via the most frequently executed edges
1228 void PhaseBlockLayout::grow_traces()
1229 {
1230 // Order the edges, and drive the growth of Traces via the most
1231 // frequently executed edges.
1232 edges->sort(edge_order);
1233 for (int i = 0; i < edges->length(); i++) {
1234 CFGEdge *e = edges->at(i);
1236 if (e->state() != CFGEdge::open) continue;
1238 Block *src_block = e->from();
1239 Block *targ_block = e->to();
1241 // Don't grow traces along backedges?
1242 if (!BlockLayoutRotateLoops) {
1243 if (targ_block->_rpo <= src_block->_rpo) {
1244 targ_block->set_loop_alignment(targ_block);
1245 continue;
1246 }
1247 }
1249 Trace *src_trace = trace(src_block);
1250 Trace *targ_trace = trace(targ_block);
1252 // If the edge in question can join two traces at their ends,
1253 // append one trace to the other.
1254 if (src_trace->last_block() == src_block) {
1255 if (src_trace == targ_trace) {
1256 e->set_state(CFGEdge::interior);
1257 if (targ_trace->backedge(e)) {
1258 // Reset i to catch any newly eligible edge
1259 // (Or we could remember the first "open" edge, and reset there)
1260 i = 0;
1261 }
1262 } else if (targ_trace->first_block() == targ_block) {
1263 e->set_state(CFGEdge::connected);
1264 src_trace->append(targ_trace);
1265 union_traces(src_trace, targ_trace);
1266 }
1267 }
1268 }
1269 }
1271 //------------------------------merge_traces-----------------------------------
1272 // Embed one trace into another, if the fork or join points are sufficiently
1273 // balanced.
1274 void PhaseBlockLayout::merge_traces(bool fall_thru_only)
1275 {
1276 // Walk the edge list a another time, looking at unprocessed edges.
1277 // Fold in diamonds
1278 for (int i = 0; i < edges->length(); i++) {
1279 CFGEdge *e = edges->at(i);
1281 if (e->state() != CFGEdge::open) continue;
1282 if (fall_thru_only) {
1283 if (e->infrequent()) continue;
1284 }
1286 Block *src_block = e->from();
1287 Trace *src_trace = trace(src_block);
1288 bool src_at_tail = src_trace->last_block() == src_block;
1290 Block *targ_block = e->to();
1291 Trace *targ_trace = trace(targ_block);
1292 bool targ_at_start = targ_trace->first_block() == targ_block;
1294 if (src_trace == targ_trace) {
1295 // This may be a loop, but we can't do much about it.
1296 e->set_state(CFGEdge::interior);
1297 continue;
1298 }
1300 if (fall_thru_only) {
1301 // If the edge links the middle of two traces, we can't do anything.
1302 // Mark the edge and continue.
1303 if (!src_at_tail & !targ_at_start) {
1304 continue;
1305 }
1307 // Don't grow traces along backedges?
1308 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) {
1309 continue;
1310 }
1312 // If both ends of the edge are available, why didn't we handle it earlier?
1313 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier.");
1315 if (targ_at_start) {
1316 // Insert the "targ" trace in the "src" trace if the insertion point
1317 // is a two way branch.
1318 // Better profitability check possible, but may not be worth it.
1319 // Someday, see if the this "fork" has an associated "join";
1320 // then make a policy on merging this trace at the fork or join.
1321 // For example, other things being equal, it may be better to place this
1322 // trace at the join point if the "src" trace ends in a two-way, but
1323 // the insertion point is one-way.
1324 assert(src_block->num_fall_throughs() == 2, "unexpected diamond");
1325 e->set_state(CFGEdge::connected);
1326 src_trace->insert_after(src_block, targ_trace);
1327 union_traces(src_trace, targ_trace);
1328 } else if (src_at_tail) {
1329 if (src_trace != trace(_cfg._broot)) {
1330 e->set_state(CFGEdge::connected);
1331 targ_trace->insert_before(targ_block, src_trace);
1332 union_traces(targ_trace, src_trace);
1333 }
1334 }
1335 } else if (e->state() == CFGEdge::open) {
1336 // Append traces, even without a fall-thru connection.
1337 // But leave root entry at the beginning of the block list.
1338 if (targ_trace != trace(_cfg._broot)) {
1339 e->set_state(CFGEdge::connected);
1340 src_trace->append(targ_trace);
1341 union_traces(src_trace, targ_trace);
1342 }
1343 }
1344 }
1345 }
1347 //----------------------------reorder_traces-----------------------------------
1348 // Order the sequence of the traces in some desirable way, and fixup the
1349 // jumps at the end of each block.
1350 void PhaseBlockLayout::reorder_traces(int count)
1351 {
1352 ResourceArea *area = Thread::current()->resource_area();
1353 Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count);
1354 Block_List worklist;
1355 int new_count = 0;
1357 // Compact the traces.
1358 for (int i = 0; i < count; i++) {
1359 Trace *tr = traces[i];
1360 if (tr != NULL) {
1361 new_traces[new_count++] = tr;
1362 }
1363 }
1365 // The entry block should be first on the new trace list.
1366 Trace *tr = trace(_cfg._broot);
1367 assert(tr == new_traces[0], "entry trace misplaced");
1369 // Sort the new trace list by frequency
1370 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order);
1372 // Patch up the successor blocks
1373 _cfg._blocks.reset();
1374 _cfg._num_blocks = 0;
1375 for (int i = 0; i < new_count; i++) {
1376 Trace *tr = new_traces[i];
1377 if (tr != NULL) {
1378 tr->fixup_blocks(_cfg);
1379 }
1380 }
1381 }
1383 //------------------------------PhaseBlockLayout-------------------------------
1384 // Order basic blocks based on frequency
1385 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) :
1386 Phase(BlockLayout),
1387 _cfg(cfg)
1388 {
1389 ResourceMark rm;
1390 ResourceArea *area = Thread::current()->resource_area();
1392 // List of traces
1393 int size = _cfg._num_blocks + 1;
1394 traces = NEW_ARENA_ARRAY(area, Trace *, size);
1395 memset(traces, 0, size*sizeof(Trace*));
1396 next = NEW_ARENA_ARRAY(area, Block *, size);
1397 memset(next, 0, size*sizeof(Block *));
1398 prev = NEW_ARENA_ARRAY(area, Block *, size);
1399 memset(prev , 0, size*sizeof(Block *));
1401 // List of edges
1402 edges = new GrowableArray<CFGEdge*>;
1404 // Mapping block index --> block_trace
1405 uf = new UnionFind(size);
1406 uf->reset(size);
1408 // Find edges and create traces.
1409 find_edges();
1411 // Grow traces at their ends via most frequent edges.
1412 grow_traces();
1414 // Merge one trace into another, but only at fall-through points.
1415 // This may make diamonds and other related shapes in a trace.
1416 merge_traces(true);
1418 // Run merge again, allowing two traces to be catenated, even if
1419 // one does not fall through into the other. This appends loosely
1420 // related traces to be near each other.
1421 merge_traces(false);
1423 // Re-order all the remaining traces by frequency
1424 reorder_traces(size);
1426 assert(_cfg._num_blocks >= (uint) (size - 1), "number of blocks can not shrink");
1427 }
1430 //------------------------------backedge---------------------------------------
1431 // Edge e completes a loop in a trace. If the target block is head of the
1432 // loop, rotate the loop block so that the loop ends in a conditional branch.
1433 bool Trace::backedge(CFGEdge *e) {
1434 bool loop_rotated = false;
1435 Block *src_block = e->from();
1436 Block *targ_block = e->to();
1438 assert(last_block() == src_block, "loop discovery at back branch");
1439 if (first_block() == targ_block) {
1440 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) {
1441 // Find the last block in the trace that has a conditional
1442 // branch.
1443 Block *b;
1444 for (b = last_block(); b != NULL; b = prev(b)) {
1445 if (b->num_fall_throughs() == 2) {
1446 break;
1447 }
1448 }
1450 if (b != last_block() && b != NULL) {
1451 loop_rotated = true;
1453 // Rotate the loop by doing two-part linked-list surgery.
1454 append(first_block());
1455 break_loop_after(b);
1456 }
1457 }
1459 // Backbranch to the top of a trace
1460 // Scroll forward through the trace from the targ_block. If we find
1461 // a loop head before another loop top, use the the loop head alignment.
1462 for (Block *b = targ_block; b != NULL; b = next(b)) {
1463 if (b->has_loop_alignment()) {
1464 break;
1465 }
1466 if (b->head()->is_Loop()) {
1467 targ_block = b;
1468 break;
1469 }
1470 }
1472 first_block()->set_loop_alignment(targ_block);
1474 } else {
1475 // Backbranch into the middle of a trace
1476 targ_block->set_loop_alignment(targ_block);
1477 }
1479 return loop_rotated;
1480 }
1482 //------------------------------fixup_blocks-----------------------------------
1483 // push blocks onto the CFG list
1484 // ensure that blocks have the correct two-way branch sense
1485 void Trace::fixup_blocks(PhaseCFG &cfg) {
1486 Block *last = last_block();
1487 for (Block *b = first_block(); b != NULL; b = next(b)) {
1488 cfg._blocks.push(b);
1489 cfg._num_blocks++;
1490 if (!b->is_connector()) {
1491 int nfallthru = b->num_fall_throughs();
1492 if (b != last) {
1493 if (nfallthru == 2) {
1494 // Ensure that the sense of the branch is correct
1495 Block *bnext = next(b);
1496 Block *bs0 = b->non_connector_successor(0);
1498 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
1499 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
1500 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
1502 if (bnext == bs0) {
1503 // Fall-thru case in succs[0], should be in succs[1]
1505 // Flip targets in _succs map
1506 Block *tbs0 = b->_succs[0];
1507 Block *tbs1 = b->_succs[1];
1508 b->_succs.map( 0, tbs1 );
1509 b->_succs.map( 1, tbs0 );
1511 // Flip projections to match targets
1512 b->_nodes.map(b->_nodes.size()-2, proj1);
1513 b->_nodes.map(b->_nodes.size()-1, proj0);
1514 }
1515 }
1516 }
1517 }
1518 }
1519 }