Thu, 01 Aug 2013 17:25:10 -0700
Merge
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "libadt/vectset.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/block.hpp"
29 #include "opto/cfgnode.hpp"
30 #include "opto/chaitin.hpp"
31 #include "opto/loopnode.hpp"
32 #include "opto/machnode.hpp"
33 #include "opto/matcher.hpp"
34 #include "opto/opcodes.hpp"
35 #include "opto/rootnode.hpp"
36 #include "utilities/copy.hpp"
38 // Optimization - Graph Style
41 //-----------------------------------------------------------------------------
42 void Block_Array::grow( uint i ) {
43 assert(i >= Max(), "must be an overflow");
44 debug_only(_limit = i+1);
45 if( i < _size ) return;
46 if( !_size ) {
47 _size = 1;
48 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) );
49 _blocks[0] = NULL;
50 }
51 uint old = _size;
52 while( i >= _size ) _size <<= 1; // Double to fit
53 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*));
54 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
55 }
57 //=============================================================================
58 void Block_List::remove(uint i) {
59 assert(i < _cnt, "index out of bounds");
60 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
61 pop(); // shrink list by one block
62 }
64 void Block_List::insert(uint i, Block *b) {
65 push(b); // grow list by one block
66 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*)));
67 _blocks[i] = b;
68 }
70 #ifndef PRODUCT
71 void Block_List::print() {
72 for (uint i=0; i < size(); i++) {
73 tty->print("B%d ", _blocks[i]->_pre_order);
74 }
75 tty->print("size = %d\n", size());
76 }
77 #endif
79 //=============================================================================
81 uint Block::code_alignment() {
82 // Check for Root block
83 if (_pre_order == 0) return CodeEntryAlignment;
84 // Check for Start block
85 if (_pre_order == 1) return InteriorEntryAlignment;
86 // Check for loop alignment
87 if (has_loop_alignment()) return loop_alignment();
89 return relocInfo::addr_unit(); // no particular alignment
90 }
92 uint Block::compute_loop_alignment() {
93 Node *h = head();
94 int unit_sz = relocInfo::addr_unit();
95 if (h->is_Loop() && h->as_Loop()->is_inner_loop()) {
96 // Pre- and post-loops have low trip count so do not bother with
97 // NOPs for align loop head. The constants are hidden from tuning
98 // but only because my "divide by 4" heuristic surely gets nearly
99 // all possible gain (a "do not align at all" heuristic has a
100 // chance of getting a really tiny gain).
101 if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
102 h->as_CountedLoop()->is_post_loop())) {
103 return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz;
104 }
105 // Loops with low backedge frequency should not be aligned.
106 Node *n = h->in(LoopNode::LoopBackControl)->in(0);
107 if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) {
108 return unit_sz; // Loop does not loop, more often than not!
109 }
110 return OptoLoopAlignment; // Otherwise align loop head
111 }
113 return unit_sz; // no particular alignment
114 }
116 //-----------------------------------------------------------------------------
117 // Compute the size of first 'inst_cnt' instructions in this block.
118 // Return the number of instructions left to compute if the block has
119 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size
120 // exceeds OptoLoopAlignment.
121 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
122 PhaseRegAlloc* ra) {
123 uint last_inst = _nodes.size();
124 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
125 uint inst_size = _nodes[j]->size(ra);
126 if( inst_size > 0 ) {
127 inst_cnt--;
128 uint sz = sum_size + inst_size;
129 if( sz <= (uint)OptoLoopAlignment ) {
130 // Compute size of instructions which fit into fetch buffer only
131 // since all inst_cnt instructions will not fit even if we align them.
132 sum_size = sz;
133 } else {
134 return 0;
135 }
136 }
137 }
138 return inst_cnt;
139 }
141 //-----------------------------------------------------------------------------
142 uint Block::find_node( const Node *n ) const {
143 for( uint i = 0; i < _nodes.size(); i++ ) {
144 if( _nodes[i] == n )
145 return i;
146 }
147 ShouldNotReachHere();
148 return 0;
149 }
151 // Find and remove n from block list
152 void Block::find_remove( const Node *n ) {
153 _nodes.remove(find_node(n));
154 }
156 //------------------------------is_Empty---------------------------------------
157 // Return empty status of a block. Empty blocks contain only the head, other
158 // ideal nodes, and an optional trailing goto.
159 int Block::is_Empty() const {
161 // Root or start block is not considered empty
162 if (head()->is_Root() || head()->is_Start()) {
163 return not_empty;
164 }
166 int success_result = completely_empty;
167 int end_idx = _nodes.size()-1;
169 // Check for ending goto
170 if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) {
171 success_result = empty_with_goto;
172 end_idx--;
173 }
175 // Unreachable blocks are considered empty
176 if (num_preds() <= 1) {
177 return success_result;
178 }
180 // Ideal nodes are allowable in empty blocks: skip them Only MachNodes
181 // turn directly into code, because only MachNodes have non-trivial
182 // emit() functions.
183 while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
184 end_idx--;
185 }
187 // No room for any interesting instructions?
188 if (end_idx == 0) {
189 return success_result;
190 }
192 return not_empty;
193 }
195 //------------------------------has_uncommon_code------------------------------
196 // Return true if the block's code implies that it is likely to be
197 // executed infrequently. Check to see if the block ends in a Halt or
198 // a low probability call.
199 bool Block::has_uncommon_code() const {
200 Node* en = end();
202 if (en->is_MachGoto())
203 en = en->in(0);
204 if (en->is_Catch())
205 en = en->in(0);
206 if (en->is_MachProj() && en->in(0)->is_MachCall()) {
207 MachCallNode* call = en->in(0)->as_MachCall();
208 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
209 // This is true for slow-path stubs like new_{instance,array},
210 // slow_arraycopy, complete_monitor_locking, uncommon_trap.
211 // The magic number corresponds to the probability of an uncommon_trap,
212 // even though it is a count not a probability.
213 return true;
214 }
215 }
217 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode();
218 return op == Op_Halt;
219 }
221 //------------------------------is_uncommon------------------------------------
222 // True if block is low enough frequency or guarded by a test which
223 // mostly does not go here.
224 bool Block::is_uncommon( Block_Array &bbs ) const {
225 // Initial blocks must never be moved, so are never uncommon.
226 if (head()->is_Root() || head()->is_Start()) return false;
228 // Check for way-low freq
229 if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
231 // Look for code shape indicating uncommon_trap or slow path
232 if (has_uncommon_code()) return true;
234 const float epsilon = 0.05f;
235 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
236 uint uncommon_preds = 0;
237 uint freq_preds = 0;
238 uint uncommon_for_freq_preds = 0;
240 for( uint i=1; i<num_preds(); i++ ) {
241 Block* guard = bbs[pred(i)->_idx];
242 // Check to see if this block follows its guard 1 time out of 10000
243 // or less.
244 //
245 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which
246 // we intend to be "uncommon", such as slow-path TLE allocation,
247 // predicted call failure, and uncommon trap triggers.
248 //
249 // Use an epsilon value of 5% to allow for variability in frequency
250 // predictions and floating point calculations. The net effect is
251 // that guard_factor is set to 9500.
252 //
253 // Ignore low-frequency blocks.
254 // The next check is (guard->_freq < 1.e-5 * 9500.).
255 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) {
256 uncommon_preds++;
257 } else {
258 freq_preds++;
259 if( _freq < guard->_freq * guard_factor ) {
260 uncommon_for_freq_preds++;
261 }
262 }
263 }
264 if( num_preds() > 1 &&
265 // The block is uncommon if all preds are uncommon or
266 (uncommon_preds == (num_preds()-1) ||
267 // it is uncommon for all frequent preds.
268 uncommon_for_freq_preds == freq_preds) ) {
269 return true;
270 }
271 return false;
272 }
274 //------------------------------dump-------------------------------------------
275 #ifndef PRODUCT
276 void Block::dump_bidx(const Block* orig, outputStream* st) const {
277 if (_pre_order) st->print("B%d",_pre_order);
278 else st->print("N%d", head()->_idx);
280 if (Verbose && orig != this) {
281 // Dump the original block's idx
282 st->print(" (");
283 orig->dump_bidx(orig, st);
284 st->print(")");
285 }
286 }
288 void Block::dump_pred(const Block_Array *bbs, Block* orig, outputStream* st) const {
289 if (is_connector()) {
290 for (uint i=1; i<num_preds(); i++) {
291 Block *p = ((*bbs)[pred(i)->_idx]);
292 p->dump_pred(bbs, orig, st);
293 }
294 } else {
295 dump_bidx(orig, st);
296 st->print(" ");
297 }
298 }
300 void Block::dump_head( const Block_Array *bbs, outputStream* st ) const {
301 // Print the basic block
302 dump_bidx(this, st);
303 st->print(": #\t");
305 // Print the incoming CFG edges and the outgoing CFG edges
306 for( uint i=0; i<_num_succs; i++ ) {
307 non_connector_successor(i)->dump_bidx(_succs[i], st);
308 st->print(" ");
309 }
310 st->print("<- ");
311 if( head()->is_block_start() ) {
312 for (uint i=1; i<num_preds(); i++) {
313 Node *s = pred(i);
314 if (bbs) {
315 Block *p = (*bbs)[s->_idx];
316 p->dump_pred(bbs, p, st);
317 } else {
318 while (!s->is_block_start())
319 s = s->in(0);
320 st->print("N%d ", s->_idx );
321 }
322 }
323 } else
324 st->print("BLOCK HEAD IS JUNK ");
326 // Print loop, if any
327 const Block *bhead = this; // Head of self-loop
328 Node *bh = bhead->head();
329 if( bbs && bh->is_Loop() && !head()->is_Root() ) {
330 LoopNode *loop = bh->as_Loop();
331 const Block *bx = (*bbs)[loop->in(LoopNode::LoopBackControl)->_idx];
332 while (bx->is_connector()) {
333 bx = (*bbs)[bx->pred(1)->_idx];
334 }
335 st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
336 // Dump any loop-specific bits, especially for CountedLoops.
337 loop->dump_spec(st);
338 } else if (has_loop_alignment()) {
339 st->print(" top-of-loop");
340 }
341 st->print(" Freq: %g",_freq);
342 if( Verbose || WizardMode ) {
343 st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
344 st->print(" RegPressure: %d",_reg_pressure);
345 st->print(" IHRP Index: %d",_ihrp_index);
346 st->print(" FRegPressure: %d",_freg_pressure);
347 st->print(" FHRP Index: %d",_fhrp_index);
348 }
349 st->print_cr("");
350 }
352 void Block::dump() const { dump(NULL); }
354 void Block::dump( const Block_Array *bbs ) const {
355 dump_head(bbs);
356 uint cnt = _nodes.size();
357 for( uint i=0; i<cnt; i++ )
358 _nodes[i]->dump();
359 tty->print("\n");
360 }
361 #endif
363 //=============================================================================
364 //------------------------------PhaseCFG---------------------------------------
365 PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
366 Phase(CFG),
367 _bbs(a),
368 _root(r),
369 _node_latency(NULL)
370 #ifndef PRODUCT
371 , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
372 #endif
373 #ifdef ASSERT
374 , _raw_oops(a)
375 #endif
376 {
377 ResourceMark rm;
378 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode,
379 // then Match it into a machine-specific Node. Then clone the machine
380 // Node on demand.
381 Node *x = new (C) GotoNode(NULL);
382 x->init_req(0, x);
383 _goto = m.match_tree(x);
384 assert(_goto != NULL, "");
385 _goto->set_req(0,_goto);
387 // Build the CFG in Reverse Post Order
388 _num_blocks = build_cfg();
389 _broot = _bbs[_root->_idx];
390 }
392 //------------------------------build_cfg--------------------------------------
393 // Build a proper looking CFG. Make every block begin with either a StartNode
394 // or a RegionNode. Make every block end with either a Goto, If or Return.
395 // The RootNode both starts and ends it's own block. Do this with a recursive
396 // backwards walk over the control edges.
397 uint PhaseCFG::build_cfg() {
398 Arena *a = Thread::current()->resource_area();
399 VectorSet visited(a);
401 // Allocate stack with enough space to avoid frequent realloc
402 Node_Stack nstack(a, C->unique() >> 1);
403 nstack.push(_root, 0);
404 uint sum = 0; // Counter for blocks
406 while (nstack.is_nonempty()) {
407 // node and in's index from stack's top
408 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
409 // only nodes which point to the start of basic block (see below).
410 Node *np = nstack.node();
411 // idx > 0, except for the first node (_root) pushed on stack
412 // at the beginning when idx == 0.
413 // We will use the condition (idx == 0) later to end the build.
414 uint idx = nstack.index();
415 Node *proj = np->in(idx);
416 const Node *x = proj->is_block_proj();
417 // Does the block end with a proper block-ending Node? One of Return,
418 // If or Goto? (This check should be done for visited nodes also).
419 if (x == NULL) { // Does not end right...
420 Node *g = _goto->clone(); // Force it to end in a Goto
421 g->set_req(0, proj);
422 np->set_req(idx, g);
423 x = proj = g;
424 }
425 if (!visited.test_set(x->_idx)) { // Visit this block once
426 // Skip any control-pinned middle'in stuff
427 Node *p = proj;
428 do {
429 proj = p; // Update pointer to last Control
430 p = p->in(0); // Move control forward
431 } while( !p->is_block_proj() &&
432 !p->is_block_start() );
433 // Make the block begin with one of Region or StartNode.
434 if( !p->is_block_start() ) {
435 RegionNode *r = new (C) RegionNode( 2 );
436 r->init_req(1, p); // Insert RegionNode in the way
437 proj->set_req(0, r); // Insert RegionNode in the way
438 p = r;
439 }
440 // 'p' now points to the start of this basic block
442 // Put self in array of basic blocks
443 Block *bb = new (_bbs._arena) Block(_bbs._arena,p);
444 _bbs.map(p->_idx,bb);
445 _bbs.map(x->_idx,bb);
446 if( x != p ) { // Only for root is x == p
447 bb->_nodes.push((Node*)x);
448 }
449 // Now handle predecessors
450 ++sum; // Count 1 for self block
451 uint cnt = bb->num_preds();
452 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
453 Node *prevproj = p->in(i); // Get prior input
454 assert( !prevproj->is_Con(), "dead input not removed" );
455 // Check to see if p->in(i) is a "control-dependent" CFG edge -
456 // i.e., it splits at the source (via an IF or SWITCH) and merges
457 // at the destination (via a many-input Region).
458 // This breaks critical edges. The RegionNode to start the block
459 // will be added when <p,i> is pulled off the node stack
460 if ( cnt > 2 ) { // Merging many things?
461 assert( prevproj== bb->pred(i),"");
462 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge?
463 // Force a block on the control-dependent edge
464 Node *g = _goto->clone(); // Force it to end in a Goto
465 g->set_req(0,prevproj);
466 p->set_req(i,g);
467 }
468 }
469 nstack.push(p, i); // 'p' is RegionNode or StartNode
470 }
471 } else { // Post-processing visited nodes
472 nstack.pop(); // remove node from stack
473 // Check if it the fist node pushed on stack at the beginning.
474 if (idx == 0) break; // end of the build
475 // Find predecessor basic block
476 Block *pb = _bbs[x->_idx];
477 // Insert into nodes array, if not already there
478 if( !_bbs.lookup(proj->_idx) ) {
479 assert( x != proj, "" );
480 // Map basic block of projection
481 _bbs.map(proj->_idx,pb);
482 pb->_nodes.push(proj);
483 }
484 // Insert self as a child of my predecessor block
485 pb->_succs.map(pb->_num_succs++, _bbs[np->_idx]);
486 assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
487 "too many control users, not a CFG?" );
488 }
489 }
490 // Return number of basic blocks for all children and self
491 return sum;
492 }
494 //------------------------------insert_goto_at---------------------------------
495 // Inserts a goto & corresponding basic block between
496 // block[block_no] and its succ_no'th successor block
497 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
498 // get block with block_no
499 assert(block_no < _num_blocks, "illegal block number");
500 Block* in = _blocks[block_no];
501 // get successor block succ_no
502 assert(succ_no < in->_num_succs, "illegal successor number");
503 Block* out = in->_succs[succ_no];
504 // Compute frequency of the new block. Do this before inserting
505 // new block in case succ_prob() needs to infer the probability from
506 // surrounding blocks.
507 float freq = in->_freq * in->succ_prob(succ_no);
508 // get ProjNode corresponding to the succ_no'th successor of the in block
509 ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
510 // create region for basic block
511 RegionNode* region = new (C) RegionNode(2);
512 region->init_req(1, proj);
513 // setup corresponding basic block
514 Block* block = new (_bbs._arena) Block(_bbs._arena, region);
515 _bbs.map(region->_idx, block);
516 C->regalloc()->set_bad(region->_idx);
517 // add a goto node
518 Node* gto = _goto->clone(); // get a new goto node
519 gto->set_req(0, region);
520 // add it to the basic block
521 block->_nodes.push(gto);
522 _bbs.map(gto->_idx, block);
523 C->regalloc()->set_bad(gto->_idx);
524 // hook up successor block
525 block->_succs.map(block->_num_succs++, out);
526 // remap successor's predecessors if necessary
527 for (uint i = 1; i < out->num_preds(); i++) {
528 if (out->pred(i) == proj) out->head()->set_req(i, gto);
529 }
530 // remap predecessor's successor to new block
531 in->_succs.map(succ_no, block);
532 // Set the frequency of the new block
533 block->_freq = freq;
534 // add new basic block to basic block list
535 _blocks.insert(block_no + 1, block);
536 _num_blocks++;
537 }
539 //------------------------------no_flip_branch---------------------------------
540 // Does this block end in a multiway branch that cannot have the default case
541 // flipped for another case?
542 static bool no_flip_branch( Block *b ) {
543 int branch_idx = b->_nodes.size() - b->_num_succs-1;
544 if( branch_idx < 1 ) return false;
545 Node *bra = b->_nodes[branch_idx];
546 if( bra->is_Catch() )
547 return true;
548 if( bra->is_Mach() ) {
549 if( bra->is_MachNullCheck() )
550 return true;
551 int iop = bra->as_Mach()->ideal_Opcode();
552 if( iop == Op_FastLock || iop == Op_FastUnlock )
553 return true;
554 }
555 return false;
556 }
558 //------------------------------convert_NeverBranch_to_Goto--------------------
559 // Check for NeverBranch at block end. This needs to become a GOTO to the
560 // true target. NeverBranch are treated as a conditional branch that always
561 // goes the same direction for most of the optimizer and are used to give a
562 // fake exit path to infinite loops. At this late stage they need to turn
563 // into Goto's so that when you enter the infinite loop you indeed hang.
564 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
565 // Find true target
566 int end_idx = b->end_idx();
567 int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
568 Block *succ = b->_succs[idx];
569 Node* gto = _goto->clone(); // get a new goto node
570 gto->set_req(0, b->head());
571 Node *bp = b->_nodes[end_idx];
572 b->_nodes.map(end_idx,gto); // Slam over NeverBranch
573 _bbs.map(gto->_idx, b);
574 C->regalloc()->set_bad(gto->_idx);
575 b->_nodes.pop(); // Yank projections
576 b->_nodes.pop(); // Yank projections
577 b->_succs.map(0,succ); // Map only successor
578 b->_num_succs = 1;
579 // remap successor's predecessors if necessary
580 uint j;
581 for( j = 1; j < succ->num_preds(); j++)
582 if( succ->pred(j)->in(0) == bp )
583 succ->head()->set_req(j, gto);
584 // Kill alternate exit path
585 Block *dead = b->_succs[1-idx];
586 for( j = 1; j < dead->num_preds(); j++)
587 if( dead->pred(j)->in(0) == bp )
588 break;
589 // Scan through block, yanking dead path from
590 // all regions and phis.
591 dead->head()->del_req(j);
592 for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
593 dead->_nodes[k]->del_req(j);
594 }
596 //------------------------------move_to_next-----------------------------------
597 // Helper function to move block bx to the slot following b_index. Return
598 // true if the move is successful, otherwise false
599 bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
600 if (bx == NULL) return false;
602 // Return false if bx is already scheduled.
603 uint bx_index = bx->_pre_order;
604 if ((bx_index <= b_index) && (_blocks[bx_index] == bx)) {
605 return false;
606 }
608 // Find the current index of block bx on the block list
609 bx_index = b_index + 1;
610 while( bx_index < _num_blocks && _blocks[bx_index] != bx ) bx_index++;
611 assert(_blocks[bx_index] == bx, "block not found");
613 // If the previous block conditionally falls into bx, return false,
614 // because moving bx will create an extra jump.
615 for(uint k = 1; k < bx->num_preds(); k++ ) {
616 Block* pred = _bbs[bx->pred(k)->_idx];
617 if (pred == _blocks[bx_index-1]) {
618 if (pred->_num_succs != 1) {
619 return false;
620 }
621 }
622 }
624 // Reinsert bx just past block 'b'
625 _blocks.remove(bx_index);
626 _blocks.insert(b_index + 1, bx);
627 return true;
628 }
630 //------------------------------move_to_end------------------------------------
631 // Move empty and uncommon blocks to the end.
632 void PhaseCFG::move_to_end(Block *b, uint i) {
633 int e = b->is_Empty();
634 if (e != Block::not_empty) {
635 if (e == Block::empty_with_goto) {
636 // Remove the goto, but leave the block.
637 b->_nodes.pop();
638 }
639 // Mark this block as a connector block, which will cause it to be
640 // ignored in certain functions such as non_connector_successor().
641 b->set_connector();
642 }
643 // Move the empty block to the end, and don't recheck.
644 _blocks.remove(i);
645 _blocks.push(b);
646 }
648 //---------------------------set_loop_alignment--------------------------------
649 // Set loop alignment for every block
650 void PhaseCFG::set_loop_alignment() {
651 uint last = _num_blocks;
652 assert( _blocks[0] == _broot, "" );
654 for (uint i = 1; i < last; i++ ) {
655 Block *b = _blocks[i];
656 if (b->head()->is_Loop()) {
657 b->set_loop_alignment(b);
658 }
659 }
660 }
662 //-----------------------------remove_empty------------------------------------
663 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks
664 // to the end.
665 void PhaseCFG::remove_empty() {
666 // Move uncommon blocks to the end
667 uint last = _num_blocks;
668 assert( _blocks[0] == _broot, "" );
670 for (uint i = 1; i < last; i++) {
671 Block *b = _blocks[i];
672 if (b->is_connector()) break;
674 // Check for NeverBranch at block end. This needs to become a GOTO to the
675 // true target. NeverBranch are treated as a conditional branch that
676 // always goes the same direction for most of the optimizer and are used
677 // to give a fake exit path to infinite loops. At this late stage they
678 // need to turn into Goto's so that when you enter the infinite loop you
679 // indeed hang.
680 if( b->_nodes[b->end_idx()]->Opcode() == Op_NeverBranch )
681 convert_NeverBranch_to_Goto(b);
683 // Look for uncommon blocks and move to end.
684 if (!C->do_freq_based_layout()) {
685 if( b->is_uncommon(_bbs) ) {
686 move_to_end(b, i);
687 last--; // No longer check for being uncommon!
688 if( no_flip_branch(b) ) { // Fall-thru case must follow?
689 b = _blocks[i]; // Find the fall-thru block
690 move_to_end(b, i);
691 last--;
692 }
693 i--; // backup block counter post-increment
694 }
695 }
696 }
698 // Move empty blocks to the end
699 last = _num_blocks;
700 for (uint i = 1; i < last; i++) {
701 Block *b = _blocks[i];
702 if (b->is_Empty() != Block::not_empty) {
703 move_to_end(b, i);
704 last--;
705 i--;
706 }
707 } // End of for all blocks
708 }
710 //-----------------------------fixup_flow--------------------------------------
711 // Fix up the final control flow for basic blocks.
712 void PhaseCFG::fixup_flow() {
713 // Fixup final control flow for the blocks. Remove jump-to-next
714 // block. If neither arm of a IF follows the conditional branch, we
715 // have to add a second jump after the conditional. We place the
716 // TRUE branch target in succs[0] for both GOTOs and IFs.
717 for (uint i=0; i < _num_blocks; i++) {
718 Block *b = _blocks[i];
719 b->_pre_order = i; // turn pre-order into block-index
721 // Connector blocks need no further processing.
722 if (b->is_connector()) {
723 assert((i+1) == _num_blocks || _blocks[i+1]->is_connector(),
724 "All connector blocks should sink to the end");
725 continue;
726 }
727 assert(b->is_Empty() != Block::completely_empty,
728 "Empty blocks should be connectors");
730 Block *bnext = (i < _num_blocks-1) ? _blocks[i+1] : NULL;
731 Block *bs0 = b->non_connector_successor(0);
733 // Check for multi-way branches where I cannot negate the test to
734 // exchange the true and false targets.
735 if( no_flip_branch( b ) ) {
736 // Find fall through case - if must fall into its target
737 int branch_idx = b->_nodes.size() - b->_num_succs;
738 for (uint j2 = 0; j2 < b->_num_succs; j2++) {
739 const ProjNode* p = b->_nodes[branch_idx + j2]->as_Proj();
740 if (p->_con == 0) {
741 // successor j2 is fall through case
742 if (b->non_connector_successor(j2) != bnext) {
743 // but it is not the next block => insert a goto
744 insert_goto_at(i, j2);
745 }
746 // Put taken branch in slot 0
747 if( j2 == 0 && b->_num_succs == 2) {
748 // Flip targets in succs map
749 Block *tbs0 = b->_succs[0];
750 Block *tbs1 = b->_succs[1];
751 b->_succs.map( 0, tbs1 );
752 b->_succs.map( 1, tbs0 );
753 }
754 break;
755 }
756 }
757 // Remove all CatchProjs
758 for (uint j1 = 0; j1 < b->_num_succs; j1++) b->_nodes.pop();
760 } else if (b->_num_succs == 1) {
761 // Block ends in a Goto?
762 if (bnext == bs0) {
763 // We fall into next block; remove the Goto
764 b->_nodes.pop();
765 }
767 } else if( b->_num_succs == 2 ) { // Block ends in a If?
768 // Get opcode of 1st projection (matches _succs[0])
769 // Note: Since this basic block has 2 exits, the last 2 nodes must
770 // be projections (in any order), the 3rd last node must be
771 // the IfNode (we have excluded other 2-way exits such as
772 // CatchNodes already).
773 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
774 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
775 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
777 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
778 assert(proj0->raw_out(0) == b->_succs[0]->head(), "Mismatch successor 0");
779 assert(proj1->raw_out(0) == b->_succs[1]->head(), "Mismatch successor 1");
781 Block *bs1 = b->non_connector_successor(1);
783 // Check for neither successor block following the current
784 // block ending in a conditional. If so, move one of the
785 // successors after the current one, provided that the
786 // successor was previously unscheduled, but moveable
787 // (i.e., all paths to it involve a branch).
788 if( !C->do_freq_based_layout() && bnext != bs0 && bnext != bs1 ) {
789 // Choose the more common successor based on the probability
790 // of the conditional branch.
791 Block *bx = bs0;
792 Block *by = bs1;
794 // _prob is the probability of taking the true path. Make
795 // p the probability of taking successor #1.
796 float p = iff->as_MachIf()->_prob;
797 if( proj0->Opcode() == Op_IfTrue ) {
798 p = 1.0 - p;
799 }
801 // Prefer successor #1 if p > 0.5
802 if (p > PROB_FAIR) {
803 bx = bs1;
804 by = bs0;
805 }
807 // Attempt the more common successor first
808 if (move_to_next(bx, i)) {
809 bnext = bx;
810 } else if (move_to_next(by, i)) {
811 bnext = by;
812 }
813 }
815 // Check for conditional branching the wrong way. Negate
816 // conditional, if needed, so it falls into the following block
817 // and branches to the not-following block.
819 // Check for the next block being in succs[0]. We are going to branch
820 // to succs[0], so we want the fall-thru case as the next block in
821 // succs[1].
822 if (bnext == bs0) {
823 // Fall-thru case in succs[0], so flip targets in succs map
824 Block *tbs0 = b->_succs[0];
825 Block *tbs1 = b->_succs[1];
826 b->_succs.map( 0, tbs1 );
827 b->_succs.map( 1, tbs0 );
828 // Flip projection for each target
829 { ProjNode *tmp = proj0; proj0 = proj1; proj1 = tmp; }
831 } else if( bnext != bs1 ) {
832 // Need a double-branch
833 // The existing conditional branch need not change.
834 // Add a unconditional branch to the false target.
835 // Alas, it must appear in its own block and adding a
836 // block this late in the game is complicated. Sigh.
837 insert_goto_at(i, 1);
838 }
840 // Make sure we TRUE branch to the target
841 if( proj0->Opcode() == Op_IfFalse ) {
842 iff->as_MachIf()->negate();
843 }
845 b->_nodes.pop(); // Remove IfFalse & IfTrue projections
846 b->_nodes.pop();
848 } else {
849 // Multi-exit block, e.g. a switch statement
850 // But we don't need to do anything here
851 }
852 } // End of for all blocks
853 }
856 //------------------------------dump-------------------------------------------
857 #ifndef PRODUCT
858 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
859 const Node *x = end->is_block_proj();
860 assert( x, "not a CFG" );
862 // Do not visit this block again
863 if( visited.test_set(x->_idx) ) return;
865 // Skip through this block
866 const Node *p = x;
867 do {
868 p = p->in(0); // Move control forward
869 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
870 } while( !p->is_block_start() );
872 // Recursively visit
873 for( uint i=1; i<p->req(); i++ )
874 _dump_cfg(p->in(i),visited);
876 // Dump the block
877 _bbs[p->_idx]->dump(&_bbs);
878 }
880 void PhaseCFG::dump( ) const {
881 tty->print("\n--- CFG --- %d BBs\n",_num_blocks);
882 if( _blocks.size() ) { // Did we do basic-block layout?
883 for( uint i=0; i<_num_blocks; i++ )
884 _blocks[i]->dump(&_bbs);
885 } else { // Else do it with a DFS
886 VectorSet visited(_bbs._arena);
887 _dump_cfg(_root,visited);
888 }
889 }
891 void PhaseCFG::dump_headers() {
892 for( uint i = 0; i < _num_blocks; i++ ) {
893 if( _blocks[i] == NULL ) continue;
894 _blocks[i]->dump_head(&_bbs);
895 }
896 }
898 void PhaseCFG::verify( ) const {
899 #ifdef ASSERT
900 // Verify sane CFG
901 for (uint i = 0; i < _num_blocks; i++) {
902 Block *b = _blocks[i];
903 uint cnt = b->_nodes.size();
904 uint j;
905 for (j = 0; j < cnt; j++) {
906 Node *n = b->_nodes[j];
907 assert( _bbs[n->_idx] == b, "" );
908 if (j >= 1 && n->is_Mach() &&
909 n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
910 assert(j == 1 || b->_nodes[j-1]->is_Phi(),
911 "CreateEx must be first instruction in block");
912 }
913 for (uint k = 0; k < n->req(); k++) {
914 Node *def = n->in(k);
915 if (def && def != n) {
916 assert(_bbs[def->_idx] || def->is_Con(),
917 "must have block; constants for debug info ok");
918 // Verify that instructions in the block is in correct order.
919 // Uses must follow their definition if they are at the same block.
920 // Mostly done to check that MachSpillCopy nodes are placed correctly
921 // when CreateEx node is moved in build_ifg_physical().
922 if (_bbs[def->_idx] == b &&
923 !(b->head()->is_Loop() && n->is_Phi()) &&
924 // See (+++) comment in reg_split.cpp
925 !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
926 bool is_loop = false;
927 if (n->is_Phi()) {
928 for (uint l = 1; l < def->req(); l++) {
929 if (n == def->in(l)) {
930 is_loop = true;
931 break; // Some kind of loop
932 }
933 }
934 }
935 assert(is_loop || b->find_node(def) < j, "uses must follow definitions");
936 }
937 }
938 }
939 }
941 j = b->end_idx();
942 Node *bp = (Node*)b->_nodes[b->_nodes.size()-1]->is_block_proj();
943 assert( bp, "last instruction must be a block proj" );
944 assert( bp == b->_nodes[j], "wrong number of successors for this block" );
945 if (bp->is_Catch()) {
946 while (b->_nodes[--j]->is_MachProj()) ;
947 assert(b->_nodes[j]->is_MachCall(), "CatchProj must follow call");
948 } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
949 assert(b->_num_succs == 2, "Conditional branch must have two targets");
950 }
951 }
952 #endif
953 }
954 #endif
956 //=============================================================================
957 //------------------------------UnionFind--------------------------------------
958 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) {
959 Copy::zero_to_bytes( _indices, sizeof(uint)*max );
960 }
962 void UnionFind::extend( uint from_idx, uint to_idx ) {
963 _nesting.check();
964 if( from_idx >= _max ) {
965 uint size = 16;
966 while( size <= from_idx ) size <<=1;
967 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size );
968 _max = size;
969 }
970 while( _cnt <= from_idx ) _indices[_cnt++] = 0;
971 _indices[from_idx] = to_idx;
972 }
974 void UnionFind::reset( uint max ) {
975 assert( max <= max_uint, "Must fit within uint" );
976 // Force the Union-Find mapping to be at least this large
977 extend(max,0);
978 // Initialize to be the ID mapping.
979 for( uint i=0; i<max; i++ ) map(i,i);
980 }
982 //------------------------------Find_compress----------------------------------
983 // Straight out of Tarjan's union-find algorithm
984 uint UnionFind::Find_compress( uint idx ) {
985 uint cur = idx;
986 uint next = lookup(cur);
987 while( next != cur ) { // Scan chain of equivalences
988 assert( next < cur, "always union smaller" );
989 cur = next; // until find a fixed-point
990 next = lookup(cur);
991 }
992 // Core of union-find algorithm: update chain of
993 // equivalences to be equal to the root.
994 while( idx != next ) {
995 uint tmp = lookup(idx);
996 map(idx, next);
997 idx = tmp;
998 }
999 return idx;
1000 }
1002 //------------------------------Find_const-------------------------------------
1003 // Like Find above, but no path compress, so bad asymptotic behavior
1004 uint UnionFind::Find_const( uint idx ) const {
1005 if( idx == 0 ) return idx; // Ignore the zero idx
1006 // Off the end? This can happen during debugging dumps
1007 // when data structures have not finished being updated.
1008 if( idx >= _max ) return idx;
1009 uint next = lookup(idx);
1010 while( next != idx ) { // Scan chain of equivalences
1011 idx = next; // until find a fixed-point
1012 next = lookup(idx);
1013 }
1014 return next;
1015 }
1017 //------------------------------Union------------------------------------------
1018 // union 2 sets together.
1019 void UnionFind::Union( uint idx1, uint idx2 ) {
1020 uint src = Find(idx1);
1021 uint dst = Find(idx2);
1022 assert( src, "" );
1023 assert( dst, "" );
1024 assert( src < _max, "oob" );
1025 assert( dst < _max, "oob" );
1026 assert( src < dst, "always union smaller" );
1027 map(dst,src);
1028 }
1030 #ifndef PRODUCT
1031 void Trace::dump( ) const {
1032 tty->print_cr("Trace (freq %f)", first_block()->_freq);
1033 for (Block *b = first_block(); b != NULL; b = next(b)) {
1034 tty->print(" B%d", b->_pre_order);
1035 if (b->head()->is_Loop()) {
1036 tty->print(" (L%d)", b->compute_loop_alignment());
1037 }
1038 if (b->has_loop_alignment()) {
1039 tty->print(" (T%d)", b->code_alignment());
1040 }
1041 }
1042 tty->cr();
1043 }
1045 void CFGEdge::dump( ) const {
1046 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ",
1047 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct);
1048 switch(state()) {
1049 case connected:
1050 tty->print("connected");
1051 break;
1052 case open:
1053 tty->print("open");
1054 break;
1055 case interior:
1056 tty->print("interior");
1057 break;
1058 }
1059 if (infrequent()) {
1060 tty->print(" infrequent");
1061 }
1062 tty->cr();
1063 }
1064 #endif
1066 //=============================================================================
1068 //------------------------------edge_order-------------------------------------
1069 // Comparison function for edges
1070 static int edge_order(CFGEdge **e0, CFGEdge **e1) {
1071 float freq0 = (*e0)->freq();
1072 float freq1 = (*e1)->freq();
1073 if (freq0 != freq1) {
1074 return freq0 > freq1 ? -1 : 1;
1075 }
1077 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo;
1078 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo;
1080 return dist1 - dist0;
1081 }
1083 //------------------------------trace_frequency_order--------------------------
1084 // Comparison function for edges
1085 extern "C" int trace_frequency_order(const void *p0, const void *p1) {
1086 Trace *tr0 = *(Trace **) p0;
1087 Trace *tr1 = *(Trace **) p1;
1088 Block *b0 = tr0->first_block();
1089 Block *b1 = tr1->first_block();
1091 // The trace of connector blocks goes at the end;
1092 // we only expect one such trace
1093 if (b0->is_connector() != b1->is_connector()) {
1094 return b1->is_connector() ? -1 : 1;
1095 }
1097 // Pull more frequently executed blocks to the beginning
1098 float freq0 = b0->_freq;
1099 float freq1 = b1->_freq;
1100 if (freq0 != freq1) {
1101 return freq0 > freq1 ? -1 : 1;
1102 }
1104 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo;
1106 return diff;
1107 }
1109 //------------------------------find_edges-------------------------------------
1110 // Find edges of interest, i.e, those which can fall through. Presumes that
1111 // edges which don't fall through are of low frequency and can be generally
1112 // ignored. Initialize the list of traces.
1113 void PhaseBlockLayout::find_edges()
1114 {
1115 // Walk the blocks, creating edges and Traces
1116 uint i;
1117 Trace *tr = NULL;
1118 for (i = 0; i < _cfg._num_blocks; i++) {
1119 Block *b = _cfg._blocks[i];
1120 tr = new Trace(b, next, prev);
1121 traces[tr->id()] = tr;
1123 // All connector blocks should be at the end of the list
1124 if (b->is_connector()) break;
1126 // If this block and the next one have a one-to-one successor
1127 // predecessor relationship, simply append the next block
1128 int nfallthru = b->num_fall_throughs();
1129 while (nfallthru == 1 &&
1130 b->succ_fall_through(0)) {
1131 Block *n = b->_succs[0];
1133 // Skip over single-entry connector blocks, we don't want to
1134 // add them to the trace.
1135 while (n->is_connector() && n->num_preds() == 1) {
1136 n = n->_succs[0];
1137 }
1139 // We see a merge point, so stop search for the next block
1140 if (n->num_preds() != 1) break;
1142 i++;
1143 assert(n = _cfg._blocks[i], "expecting next block");
1144 tr->append(n);
1145 uf->map(n->_pre_order, tr->id());
1146 traces[n->_pre_order] = NULL;
1147 nfallthru = b->num_fall_throughs();
1148 b = n;
1149 }
1151 if (nfallthru > 0) {
1152 // Create a CFGEdge for each outgoing
1153 // edge that could be a fall-through.
1154 for (uint j = 0; j < b->_num_succs; j++ ) {
1155 if (b->succ_fall_through(j)) {
1156 Block *target = b->non_connector_successor(j);
1157 float freq = b->_freq * b->succ_prob(j);
1158 int from_pct = (int) ((100 * freq) / b->_freq);
1159 int to_pct = (int) ((100 * freq) / target->_freq);
1160 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct));
1161 }
1162 }
1163 }
1164 }
1166 // Group connector blocks into one trace
1167 for (i++; i < _cfg._num_blocks; i++) {
1168 Block *b = _cfg._blocks[i];
1169 assert(b->is_connector(), "connector blocks at the end");
1170 tr->append(b);
1171 uf->map(b->_pre_order, tr->id());
1172 traces[b->_pre_order] = NULL;
1173 }
1174 }
1176 //------------------------------union_traces----------------------------------
1177 // Union two traces together in uf, and null out the trace in the list
1178 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace)
1179 {
1180 uint old_id = old_trace->id();
1181 uint updated_id = updated_trace->id();
1183 uint lo_id = updated_id;
1184 uint hi_id = old_id;
1186 // If from is greater than to, swap values to meet
1187 // UnionFind guarantee.
1188 if (updated_id > old_id) {
1189 lo_id = old_id;
1190 hi_id = updated_id;
1192 // Fix up the trace ids
1193 traces[lo_id] = traces[updated_id];
1194 updated_trace->set_id(lo_id);
1195 }
1197 // Union the lower with the higher and remove the pointer
1198 // to the higher.
1199 uf->Union(lo_id, hi_id);
1200 traces[hi_id] = NULL;
1201 }
1203 //------------------------------grow_traces-------------------------------------
1204 // Append traces together via the most frequently executed edges
1205 void PhaseBlockLayout::grow_traces()
1206 {
1207 // Order the edges, and drive the growth of Traces via the most
1208 // frequently executed edges.
1209 edges->sort(edge_order);
1210 for (int i = 0; i < edges->length(); i++) {
1211 CFGEdge *e = edges->at(i);
1213 if (e->state() != CFGEdge::open) continue;
1215 Block *src_block = e->from();
1216 Block *targ_block = e->to();
1218 // Don't grow traces along backedges?
1219 if (!BlockLayoutRotateLoops) {
1220 if (targ_block->_rpo <= src_block->_rpo) {
1221 targ_block->set_loop_alignment(targ_block);
1222 continue;
1223 }
1224 }
1226 Trace *src_trace = trace(src_block);
1227 Trace *targ_trace = trace(targ_block);
1229 // If the edge in question can join two traces at their ends,
1230 // append one trace to the other.
1231 if (src_trace->last_block() == src_block) {
1232 if (src_trace == targ_trace) {
1233 e->set_state(CFGEdge::interior);
1234 if (targ_trace->backedge(e)) {
1235 // Reset i to catch any newly eligible edge
1236 // (Or we could remember the first "open" edge, and reset there)
1237 i = 0;
1238 }
1239 } else if (targ_trace->first_block() == targ_block) {
1240 e->set_state(CFGEdge::connected);
1241 src_trace->append(targ_trace);
1242 union_traces(src_trace, targ_trace);
1243 }
1244 }
1245 }
1246 }
1248 //------------------------------merge_traces-----------------------------------
1249 // Embed one trace into another, if the fork or join points are sufficiently
1250 // balanced.
1251 void PhaseBlockLayout::merge_traces(bool fall_thru_only)
1252 {
1253 // Walk the edge list a another time, looking at unprocessed edges.
1254 // Fold in diamonds
1255 for (int i = 0; i < edges->length(); i++) {
1256 CFGEdge *e = edges->at(i);
1258 if (e->state() != CFGEdge::open) continue;
1259 if (fall_thru_only) {
1260 if (e->infrequent()) continue;
1261 }
1263 Block *src_block = e->from();
1264 Trace *src_trace = trace(src_block);
1265 bool src_at_tail = src_trace->last_block() == src_block;
1267 Block *targ_block = e->to();
1268 Trace *targ_trace = trace(targ_block);
1269 bool targ_at_start = targ_trace->first_block() == targ_block;
1271 if (src_trace == targ_trace) {
1272 // This may be a loop, but we can't do much about it.
1273 e->set_state(CFGEdge::interior);
1274 continue;
1275 }
1277 if (fall_thru_only) {
1278 // If the edge links the middle of two traces, we can't do anything.
1279 // Mark the edge and continue.
1280 if (!src_at_tail & !targ_at_start) {
1281 continue;
1282 }
1284 // Don't grow traces along backedges?
1285 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) {
1286 continue;
1287 }
1289 // If both ends of the edge are available, why didn't we handle it earlier?
1290 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier.");
1292 if (targ_at_start) {
1293 // Insert the "targ" trace in the "src" trace if the insertion point
1294 // is a two way branch.
1295 // Better profitability check possible, but may not be worth it.
1296 // Someday, see if the this "fork" has an associated "join";
1297 // then make a policy on merging this trace at the fork or join.
1298 // For example, other things being equal, it may be better to place this
1299 // trace at the join point if the "src" trace ends in a two-way, but
1300 // the insertion point is one-way.
1301 assert(src_block->num_fall_throughs() == 2, "unexpected diamond");
1302 e->set_state(CFGEdge::connected);
1303 src_trace->insert_after(src_block, targ_trace);
1304 union_traces(src_trace, targ_trace);
1305 } else if (src_at_tail) {
1306 if (src_trace != trace(_cfg._broot)) {
1307 e->set_state(CFGEdge::connected);
1308 targ_trace->insert_before(targ_block, src_trace);
1309 union_traces(targ_trace, src_trace);
1310 }
1311 }
1312 } else if (e->state() == CFGEdge::open) {
1313 // Append traces, even without a fall-thru connection.
1314 // But leave root entry at the beginning of the block list.
1315 if (targ_trace != trace(_cfg._broot)) {
1316 e->set_state(CFGEdge::connected);
1317 src_trace->append(targ_trace);
1318 union_traces(src_trace, targ_trace);
1319 }
1320 }
1321 }
1322 }
1324 //----------------------------reorder_traces-----------------------------------
1325 // Order the sequence of the traces in some desirable way, and fixup the
1326 // jumps at the end of each block.
1327 void PhaseBlockLayout::reorder_traces(int count)
1328 {
1329 ResourceArea *area = Thread::current()->resource_area();
1330 Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count);
1331 Block_List worklist;
1332 int new_count = 0;
1334 // Compact the traces.
1335 for (int i = 0; i < count; i++) {
1336 Trace *tr = traces[i];
1337 if (tr != NULL) {
1338 new_traces[new_count++] = tr;
1339 }
1340 }
1342 // The entry block should be first on the new trace list.
1343 Trace *tr = trace(_cfg._broot);
1344 assert(tr == new_traces[0], "entry trace misplaced");
1346 // Sort the new trace list by frequency
1347 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order);
1349 // Patch up the successor blocks
1350 _cfg._blocks.reset();
1351 _cfg._num_blocks = 0;
1352 for (int i = 0; i < new_count; i++) {
1353 Trace *tr = new_traces[i];
1354 if (tr != NULL) {
1355 tr->fixup_blocks(_cfg);
1356 }
1357 }
1358 }
1360 //------------------------------PhaseBlockLayout-------------------------------
1361 // Order basic blocks based on frequency
1362 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg) :
1363 Phase(BlockLayout),
1364 _cfg(cfg)
1365 {
1366 ResourceMark rm;
1367 ResourceArea *area = Thread::current()->resource_area();
1369 // List of traces
1370 int size = _cfg._num_blocks + 1;
1371 traces = NEW_ARENA_ARRAY(area, Trace *, size);
1372 memset(traces, 0, size*sizeof(Trace*));
1373 next = NEW_ARENA_ARRAY(area, Block *, size);
1374 memset(next, 0, size*sizeof(Block *));
1375 prev = NEW_ARENA_ARRAY(area, Block *, size);
1376 memset(prev , 0, size*sizeof(Block *));
1378 // List of edges
1379 edges = new GrowableArray<CFGEdge*>;
1381 // Mapping block index --> block_trace
1382 uf = new UnionFind(size);
1383 uf->reset(size);
1385 // Find edges and create traces.
1386 find_edges();
1388 // Grow traces at their ends via most frequent edges.
1389 grow_traces();
1391 // Merge one trace into another, but only at fall-through points.
1392 // This may make diamonds and other related shapes in a trace.
1393 merge_traces(true);
1395 // Run merge again, allowing two traces to be catenated, even if
1396 // one does not fall through into the other. This appends loosely
1397 // related traces to be near each other.
1398 merge_traces(false);
1400 // Re-order all the remaining traces by frequency
1401 reorder_traces(size);
1403 assert(_cfg._num_blocks >= (uint) (size - 1), "number of blocks can not shrink");
1404 }
1407 //------------------------------backedge---------------------------------------
1408 // Edge e completes a loop in a trace. If the target block is head of the
1409 // loop, rotate the loop block so that the loop ends in a conditional branch.
1410 bool Trace::backedge(CFGEdge *e) {
1411 bool loop_rotated = false;
1412 Block *src_block = e->from();
1413 Block *targ_block = e->to();
1415 assert(last_block() == src_block, "loop discovery at back branch");
1416 if (first_block() == targ_block) {
1417 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) {
1418 // Find the last block in the trace that has a conditional
1419 // branch.
1420 Block *b;
1421 for (b = last_block(); b != NULL; b = prev(b)) {
1422 if (b->num_fall_throughs() == 2) {
1423 break;
1424 }
1425 }
1427 if (b != last_block() && b != NULL) {
1428 loop_rotated = true;
1430 // Rotate the loop by doing two-part linked-list surgery.
1431 append(first_block());
1432 break_loop_after(b);
1433 }
1434 }
1436 // Backbranch to the top of a trace
1437 // Scroll forward through the trace from the targ_block. If we find
1438 // a loop head before another loop top, use the the loop head alignment.
1439 for (Block *b = targ_block; b != NULL; b = next(b)) {
1440 if (b->has_loop_alignment()) {
1441 break;
1442 }
1443 if (b->head()->is_Loop()) {
1444 targ_block = b;
1445 break;
1446 }
1447 }
1449 first_block()->set_loop_alignment(targ_block);
1451 } else {
1452 // Backbranch into the middle of a trace
1453 targ_block->set_loop_alignment(targ_block);
1454 }
1456 return loop_rotated;
1457 }
1459 //------------------------------fixup_blocks-----------------------------------
1460 // push blocks onto the CFG list
1461 // ensure that blocks have the correct two-way branch sense
1462 void Trace::fixup_blocks(PhaseCFG &cfg) {
1463 Block *last = last_block();
1464 for (Block *b = first_block(); b != NULL; b = next(b)) {
1465 cfg._blocks.push(b);
1466 cfg._num_blocks++;
1467 if (!b->is_connector()) {
1468 int nfallthru = b->num_fall_throughs();
1469 if (b != last) {
1470 if (nfallthru == 2) {
1471 // Ensure that the sense of the branch is correct
1472 Block *bnext = next(b);
1473 Block *bs0 = b->non_connector_successor(0);
1475 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
1476 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
1477 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
1479 if (bnext == bs0) {
1480 // Fall-thru case in succs[0], should be in succs[1]
1482 // Flip targets in _succs map
1483 Block *tbs0 = b->_succs[0];
1484 Block *tbs1 = b->_succs[1];
1485 b->_succs.map( 0, tbs1 );
1486 b->_succs.map( 1, tbs0 );
1488 // Flip projections to match targets
1489 b->_nodes.map(b->_nodes.size()-2, proj1);
1490 b->_nodes.map(b->_nodes.size()-1, proj0);
1491 }
1492 }
1493 }
1494 }
1495 }
1496 }