Wed, 18 Sep 2013 14:34:56 -0700
8024342: PPC64 (part 111): Support for C calling conventions that require 64-bit ints.
Summary: Some platforms, as ppc and s390x/zArch require that 32-bit ints are passed as 64-bit values to C functions. This change adds support to adapt the signature and to issue proper casts to c2-compiled stubs. The functions are used in generate_native_wrapper(). Adapt signature used by the compiler as in PhaseIdealLoop::intrinsify_fill().
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "libadt/vectset.hpp"
27 #include "memory/allocation.inline.hpp"
28 #include "opto/block.hpp"
29 #include "opto/cfgnode.hpp"
30 #include "opto/chaitin.hpp"
31 #include "opto/loopnode.hpp"
32 #include "opto/machnode.hpp"
33 #include "opto/matcher.hpp"
34 #include "opto/opcodes.hpp"
35 #include "opto/rootnode.hpp"
36 #include "utilities/copy.hpp"
38 void Block_Array::grow( uint i ) {
39 assert(i >= Max(), "must be an overflow");
40 debug_only(_limit = i+1);
41 if( i < _size ) return;
42 if( !_size ) {
43 _size = 1;
44 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) );
45 _blocks[0] = NULL;
46 }
47 uint old = _size;
48 while( i >= _size ) _size <<= 1; // Double to fit
49 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*));
50 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
51 }
53 void Block_List::remove(uint i) {
54 assert(i < _cnt, "index out of bounds");
55 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
56 pop(); // shrink list by one block
57 }
59 void Block_List::insert(uint i, Block *b) {
60 push(b); // grow list by one block
61 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*)));
62 _blocks[i] = b;
63 }
65 #ifndef PRODUCT
66 void Block_List::print() {
67 for (uint i=0; i < size(); i++) {
68 tty->print("B%d ", _blocks[i]->_pre_order);
69 }
70 tty->print("size = %d\n", size());
71 }
72 #endif
74 uint Block::code_alignment() {
75 // Check for Root block
76 if (_pre_order == 0) return CodeEntryAlignment;
77 // Check for Start block
78 if (_pre_order == 1) return InteriorEntryAlignment;
79 // Check for loop alignment
80 if (has_loop_alignment()) return loop_alignment();
82 return relocInfo::addr_unit(); // no particular alignment
83 }
85 uint Block::compute_loop_alignment() {
86 Node *h = head();
87 int unit_sz = relocInfo::addr_unit();
88 if (h->is_Loop() && h->as_Loop()->is_inner_loop()) {
89 // Pre- and post-loops have low trip count so do not bother with
90 // NOPs for align loop head. The constants are hidden from tuning
91 // but only because my "divide by 4" heuristic surely gets nearly
92 // all possible gain (a "do not align at all" heuristic has a
93 // chance of getting a really tiny gain).
94 if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
95 h->as_CountedLoop()->is_post_loop())) {
96 return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz;
97 }
98 // Loops with low backedge frequency should not be aligned.
99 Node *n = h->in(LoopNode::LoopBackControl)->in(0);
100 if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) {
101 return unit_sz; // Loop does not loop, more often than not!
102 }
103 return OptoLoopAlignment; // Otherwise align loop head
104 }
106 return unit_sz; // no particular alignment
107 }
109 // Compute the size of first 'inst_cnt' instructions in this block.
110 // Return the number of instructions left to compute if the block has
111 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size
112 // exceeds OptoLoopAlignment.
113 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
114 PhaseRegAlloc* ra) {
115 uint last_inst = _nodes.size();
116 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
117 uint inst_size = _nodes[j]->size(ra);
118 if( inst_size > 0 ) {
119 inst_cnt--;
120 uint sz = sum_size + inst_size;
121 if( sz <= (uint)OptoLoopAlignment ) {
122 // Compute size of instructions which fit into fetch buffer only
123 // since all inst_cnt instructions will not fit even if we align them.
124 sum_size = sz;
125 } else {
126 return 0;
127 }
128 }
129 }
130 return inst_cnt;
131 }
133 uint Block::find_node( const Node *n ) const {
134 for( uint i = 0; i < _nodes.size(); i++ ) {
135 if( _nodes[i] == n )
136 return i;
137 }
138 ShouldNotReachHere();
139 return 0;
140 }
142 // Find and remove n from block list
143 void Block::find_remove( const Node *n ) {
144 _nodes.remove(find_node(n));
145 }
147 // Return empty status of a block. Empty blocks contain only the head, other
148 // ideal nodes, and an optional trailing goto.
149 int Block::is_Empty() const {
151 // Root or start block is not considered empty
152 if (head()->is_Root() || head()->is_Start()) {
153 return not_empty;
154 }
156 int success_result = completely_empty;
157 int end_idx = _nodes.size()-1;
159 // Check for ending goto
160 if ((end_idx > 0) && (_nodes[end_idx]->is_MachGoto())) {
161 success_result = empty_with_goto;
162 end_idx--;
163 }
165 // Unreachable blocks are considered empty
166 if (num_preds() <= 1) {
167 return success_result;
168 }
170 // Ideal nodes are allowable in empty blocks: skip them Only MachNodes
171 // turn directly into code, because only MachNodes have non-trivial
172 // emit() functions.
173 while ((end_idx > 0) && !_nodes[end_idx]->is_Mach()) {
174 end_idx--;
175 }
177 // No room for any interesting instructions?
178 if (end_idx == 0) {
179 return success_result;
180 }
182 return not_empty;
183 }
185 // Return true if the block's code implies that it is likely to be
186 // executed infrequently. Check to see if the block ends in a Halt or
187 // a low probability call.
188 bool Block::has_uncommon_code() const {
189 Node* en = end();
191 if (en->is_MachGoto())
192 en = en->in(0);
193 if (en->is_Catch())
194 en = en->in(0);
195 if (en->is_MachProj() && en->in(0)->is_MachCall()) {
196 MachCallNode* call = en->in(0)->as_MachCall();
197 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
198 // This is true for slow-path stubs like new_{instance,array},
199 // slow_arraycopy, complete_monitor_locking, uncommon_trap.
200 // The magic number corresponds to the probability of an uncommon_trap,
201 // even though it is a count not a probability.
202 return true;
203 }
204 }
206 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode();
207 return op == Op_Halt;
208 }
210 // True if block is low enough frequency or guarded by a test which
211 // mostly does not go here.
212 bool Block::is_uncommon(PhaseCFG* cfg) const {
213 // Initial blocks must never be moved, so are never uncommon.
214 if (head()->is_Root() || head()->is_Start()) return false;
216 // Check for way-low freq
217 if( _freq < BLOCK_FREQUENCY(0.00001f) ) return true;
219 // Look for code shape indicating uncommon_trap or slow path
220 if (has_uncommon_code()) return true;
222 const float epsilon = 0.05f;
223 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
224 uint uncommon_preds = 0;
225 uint freq_preds = 0;
226 uint uncommon_for_freq_preds = 0;
228 for( uint i=1; i<num_preds(); i++ ) {
229 Block* guard = cfg->get_block_for_node(pred(i));
230 // Check to see if this block follows its guard 1 time out of 10000
231 // or less.
232 //
233 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which
234 // we intend to be "uncommon", such as slow-path TLE allocation,
235 // predicted call failure, and uncommon trap triggers.
236 //
237 // Use an epsilon value of 5% to allow for variability in frequency
238 // predictions and floating point calculations. The net effect is
239 // that guard_factor is set to 9500.
240 //
241 // Ignore low-frequency blocks.
242 // The next check is (guard->_freq < 1.e-5 * 9500.).
243 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) {
244 uncommon_preds++;
245 } else {
246 freq_preds++;
247 if( _freq < guard->_freq * guard_factor ) {
248 uncommon_for_freq_preds++;
249 }
250 }
251 }
252 if( num_preds() > 1 &&
253 // The block is uncommon if all preds are uncommon or
254 (uncommon_preds == (num_preds()-1) ||
255 // it is uncommon for all frequent preds.
256 uncommon_for_freq_preds == freq_preds) ) {
257 return true;
258 }
259 return false;
260 }
262 #ifndef PRODUCT
263 void Block::dump_bidx(const Block* orig, outputStream* st) const {
264 if (_pre_order) st->print("B%d",_pre_order);
265 else st->print("N%d", head()->_idx);
267 if (Verbose && orig != this) {
268 // Dump the original block's idx
269 st->print(" (");
270 orig->dump_bidx(orig, st);
271 st->print(")");
272 }
273 }
275 void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
276 if (is_connector()) {
277 for (uint i=1; i<num_preds(); i++) {
278 Block *p = cfg->get_block_for_node(pred(i));
279 p->dump_pred(cfg, orig, st);
280 }
281 } else {
282 dump_bidx(orig, st);
283 st->print(" ");
284 }
285 }
287 void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
288 // Print the basic block
289 dump_bidx(this, st);
290 st->print(": #\t");
292 // Print the incoming CFG edges and the outgoing CFG edges
293 for( uint i=0; i<_num_succs; i++ ) {
294 non_connector_successor(i)->dump_bidx(_succs[i], st);
295 st->print(" ");
296 }
297 st->print("<- ");
298 if( head()->is_block_start() ) {
299 for (uint i=1; i<num_preds(); i++) {
300 Node *s = pred(i);
301 if (cfg != NULL) {
302 Block *p = cfg->get_block_for_node(s);
303 p->dump_pred(cfg, p, st);
304 } else {
305 while (!s->is_block_start())
306 s = s->in(0);
307 st->print("N%d ", s->_idx );
308 }
309 }
310 } else {
311 st->print("BLOCK HEAD IS JUNK ");
312 }
314 // Print loop, if any
315 const Block *bhead = this; // Head of self-loop
316 Node *bh = bhead->head();
318 if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
319 LoopNode *loop = bh->as_Loop();
320 const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
321 while (bx->is_connector()) {
322 bx = cfg->get_block_for_node(bx->pred(1));
323 }
324 st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
325 // Dump any loop-specific bits, especially for CountedLoops.
326 loop->dump_spec(st);
327 } else if (has_loop_alignment()) {
328 st->print(" top-of-loop");
329 }
330 st->print(" Freq: %g",_freq);
331 if( Verbose || WizardMode ) {
332 st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
333 st->print(" RegPressure: %d",_reg_pressure);
334 st->print(" IHRP Index: %d",_ihrp_index);
335 st->print(" FRegPressure: %d",_freg_pressure);
336 st->print(" FHRP Index: %d",_fhrp_index);
337 }
338 st->print_cr("");
339 }
341 void Block::dump() const {
342 dump(NULL);
343 }
345 void Block::dump(const PhaseCFG* cfg) const {
346 dump_head(cfg);
347 for (uint i=0; i< _nodes.size(); i++) {
348 _nodes[i]->dump();
349 }
350 tty->print("\n");
351 }
352 #endif
354 PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
355 : Phase(CFG)
356 , _block_arena(arena)
357 , _root(root)
358 , _matcher(matcher)
359 , _node_to_block_mapping(arena)
360 , _node_latency(NULL)
361 #ifndef PRODUCT
362 , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
363 #endif
364 #ifdef ASSERT
365 , _raw_oops(arena)
366 #endif
367 {
368 ResourceMark rm;
369 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode,
370 // then Match it into a machine-specific Node. Then clone the machine
371 // Node on demand.
372 Node *x = new (C) GotoNode(NULL);
373 x->init_req(0, x);
374 _goto = matcher.match_tree(x);
375 assert(_goto != NULL, "");
376 _goto->set_req(0,_goto);
378 // Build the CFG in Reverse Post Order
379 _number_of_blocks = build_cfg();
380 _root_block = get_block_for_node(_root);
381 }
383 // Build a proper looking CFG. Make every block begin with either a StartNode
384 // or a RegionNode. Make every block end with either a Goto, If or Return.
385 // The RootNode both starts and ends it's own block. Do this with a recursive
386 // backwards walk over the control edges.
387 uint PhaseCFG::build_cfg() {
388 Arena *a = Thread::current()->resource_area();
389 VectorSet visited(a);
391 // Allocate stack with enough space to avoid frequent realloc
392 Node_Stack nstack(a, C->unique() >> 1);
393 nstack.push(_root, 0);
394 uint sum = 0; // Counter for blocks
396 while (nstack.is_nonempty()) {
397 // node and in's index from stack's top
398 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
399 // only nodes which point to the start of basic block (see below).
400 Node *np = nstack.node();
401 // idx > 0, except for the first node (_root) pushed on stack
402 // at the beginning when idx == 0.
403 // We will use the condition (idx == 0) later to end the build.
404 uint idx = nstack.index();
405 Node *proj = np->in(idx);
406 const Node *x = proj->is_block_proj();
407 // Does the block end with a proper block-ending Node? One of Return,
408 // If or Goto? (This check should be done for visited nodes also).
409 if (x == NULL) { // Does not end right...
410 Node *g = _goto->clone(); // Force it to end in a Goto
411 g->set_req(0, proj);
412 np->set_req(idx, g);
413 x = proj = g;
414 }
415 if (!visited.test_set(x->_idx)) { // Visit this block once
416 // Skip any control-pinned middle'in stuff
417 Node *p = proj;
418 do {
419 proj = p; // Update pointer to last Control
420 p = p->in(0); // Move control forward
421 } while( !p->is_block_proj() &&
422 !p->is_block_start() );
423 // Make the block begin with one of Region or StartNode.
424 if( !p->is_block_start() ) {
425 RegionNode *r = new (C) RegionNode( 2 );
426 r->init_req(1, p); // Insert RegionNode in the way
427 proj->set_req(0, r); // Insert RegionNode in the way
428 p = r;
429 }
430 // 'p' now points to the start of this basic block
432 // Put self in array of basic blocks
433 Block *bb = new (_block_arena) Block(_block_arena, p);
434 map_node_to_block(p, bb);
435 map_node_to_block(x, bb);
436 if( x != p ) { // Only for root is x == p
437 bb->_nodes.push((Node*)x);
438 }
439 // Now handle predecessors
440 ++sum; // Count 1 for self block
441 uint cnt = bb->num_preds();
442 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
443 Node *prevproj = p->in(i); // Get prior input
444 assert( !prevproj->is_Con(), "dead input not removed" );
445 // Check to see if p->in(i) is a "control-dependent" CFG edge -
446 // i.e., it splits at the source (via an IF or SWITCH) and merges
447 // at the destination (via a many-input Region).
448 // This breaks critical edges. The RegionNode to start the block
449 // will be added when <p,i> is pulled off the node stack
450 if ( cnt > 2 ) { // Merging many things?
451 assert( prevproj== bb->pred(i),"");
452 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge?
453 // Force a block on the control-dependent edge
454 Node *g = _goto->clone(); // Force it to end in a Goto
455 g->set_req(0,prevproj);
456 p->set_req(i,g);
457 }
458 }
459 nstack.push(p, i); // 'p' is RegionNode or StartNode
460 }
461 } else { // Post-processing visited nodes
462 nstack.pop(); // remove node from stack
463 // Check if it the fist node pushed on stack at the beginning.
464 if (idx == 0) break; // end of the build
465 // Find predecessor basic block
466 Block *pb = get_block_for_node(x);
467 // Insert into nodes array, if not already there
468 if (!has_block(proj)) {
469 assert( x != proj, "" );
470 // Map basic block of projection
471 map_node_to_block(proj, pb);
472 pb->_nodes.push(proj);
473 }
474 // Insert self as a child of my predecessor block
475 pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
476 assert( pb->_nodes[ pb->_nodes.size() - pb->_num_succs ]->is_block_proj(),
477 "too many control users, not a CFG?" );
478 }
479 }
480 // Return number of basic blocks for all children and self
481 return sum;
482 }
484 // Inserts a goto & corresponding basic block between
485 // block[block_no] and its succ_no'th successor block
486 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
487 // get block with block_no
488 assert(block_no < number_of_blocks(), "illegal block number");
489 Block* in = get_block(block_no);
490 // get successor block succ_no
491 assert(succ_no < in->_num_succs, "illegal successor number");
492 Block* out = in->_succs[succ_no];
493 // Compute frequency of the new block. Do this before inserting
494 // new block in case succ_prob() needs to infer the probability from
495 // surrounding blocks.
496 float freq = in->_freq * in->succ_prob(succ_no);
497 // get ProjNode corresponding to the succ_no'th successor of the in block
498 ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
499 // create region for basic block
500 RegionNode* region = new (C) RegionNode(2);
501 region->init_req(1, proj);
502 // setup corresponding basic block
503 Block* block = new (_block_arena) Block(_block_arena, region);
504 map_node_to_block(region, block);
505 C->regalloc()->set_bad(region->_idx);
506 // add a goto node
507 Node* gto = _goto->clone(); // get a new goto node
508 gto->set_req(0, region);
509 // add it to the basic block
510 block->_nodes.push(gto);
511 map_node_to_block(gto, block);
512 C->regalloc()->set_bad(gto->_idx);
513 // hook up successor block
514 block->_succs.map(block->_num_succs++, out);
515 // remap successor's predecessors if necessary
516 for (uint i = 1; i < out->num_preds(); i++) {
517 if (out->pred(i) == proj) out->head()->set_req(i, gto);
518 }
519 // remap predecessor's successor to new block
520 in->_succs.map(succ_no, block);
521 // Set the frequency of the new block
522 block->_freq = freq;
523 // add new basic block to basic block list
524 add_block_at(block_no + 1, block);
525 }
527 // Does this block end in a multiway branch that cannot have the default case
528 // flipped for another case?
529 static bool no_flip_branch( Block *b ) {
530 int branch_idx = b->_nodes.size() - b->_num_succs-1;
531 if( branch_idx < 1 ) return false;
532 Node *bra = b->_nodes[branch_idx];
533 if( bra->is_Catch() )
534 return true;
535 if( bra->is_Mach() ) {
536 if( bra->is_MachNullCheck() )
537 return true;
538 int iop = bra->as_Mach()->ideal_Opcode();
539 if( iop == Op_FastLock || iop == Op_FastUnlock )
540 return true;
541 }
542 return false;
543 }
545 // Check for NeverBranch at block end. This needs to become a GOTO to the
546 // true target. NeverBranch are treated as a conditional branch that always
547 // goes the same direction for most of the optimizer and are used to give a
548 // fake exit path to infinite loops. At this late stage they need to turn
549 // into Goto's so that when you enter the infinite loop you indeed hang.
550 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
551 // Find true target
552 int end_idx = b->end_idx();
553 int idx = b->_nodes[end_idx+1]->as_Proj()->_con;
554 Block *succ = b->_succs[idx];
555 Node* gto = _goto->clone(); // get a new goto node
556 gto->set_req(0, b->head());
557 Node *bp = b->_nodes[end_idx];
558 b->_nodes.map(end_idx,gto); // Slam over NeverBranch
559 map_node_to_block(gto, b);
560 C->regalloc()->set_bad(gto->_idx);
561 b->_nodes.pop(); // Yank projections
562 b->_nodes.pop(); // Yank projections
563 b->_succs.map(0,succ); // Map only successor
564 b->_num_succs = 1;
565 // remap successor's predecessors if necessary
566 uint j;
567 for( j = 1; j < succ->num_preds(); j++)
568 if( succ->pred(j)->in(0) == bp )
569 succ->head()->set_req(j, gto);
570 // Kill alternate exit path
571 Block *dead = b->_succs[1-idx];
572 for( j = 1; j < dead->num_preds(); j++)
573 if( dead->pred(j)->in(0) == bp )
574 break;
575 // Scan through block, yanking dead path from
576 // all regions and phis.
577 dead->head()->del_req(j);
578 for( int k = 1; dead->_nodes[k]->is_Phi(); k++ )
579 dead->_nodes[k]->del_req(j);
580 }
582 // Helper function to move block bx to the slot following b_index. Return
583 // true if the move is successful, otherwise false
584 bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
585 if (bx == NULL) return false;
587 // Return false if bx is already scheduled.
588 uint bx_index = bx->_pre_order;
589 if ((bx_index <= b_index) && (get_block(bx_index) == bx)) {
590 return false;
591 }
593 // Find the current index of block bx on the block list
594 bx_index = b_index + 1;
595 while (bx_index < number_of_blocks() && get_block(bx_index) != bx) {
596 bx_index++;
597 }
598 assert(get_block(bx_index) == bx, "block not found");
600 // If the previous block conditionally falls into bx, return false,
601 // because moving bx will create an extra jump.
602 for(uint k = 1; k < bx->num_preds(); k++ ) {
603 Block* pred = get_block_for_node(bx->pred(k));
604 if (pred == get_block(bx_index - 1)) {
605 if (pred->_num_succs != 1) {
606 return false;
607 }
608 }
609 }
611 // Reinsert bx just past block 'b'
612 _blocks.remove(bx_index);
613 _blocks.insert(b_index + 1, bx);
614 return true;
615 }
617 // Move empty and uncommon blocks to the end.
618 void PhaseCFG::move_to_end(Block *b, uint i) {
619 int e = b->is_Empty();
620 if (e != Block::not_empty) {
621 if (e == Block::empty_with_goto) {
622 // Remove the goto, but leave the block.
623 b->_nodes.pop();
624 }
625 // Mark this block as a connector block, which will cause it to be
626 // ignored in certain functions such as non_connector_successor().
627 b->set_connector();
628 }
629 // Move the empty block to the end, and don't recheck.
630 _blocks.remove(i);
631 _blocks.push(b);
632 }
634 // Set loop alignment for every block
635 void PhaseCFG::set_loop_alignment() {
636 uint last = number_of_blocks();
637 assert(get_block(0) == get_root_block(), "");
639 for (uint i = 1; i < last; i++) {
640 Block* block = get_block(i);
641 if (block->head()->is_Loop()) {
642 block->set_loop_alignment(block);
643 }
644 }
645 }
647 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks
648 // to the end.
649 void PhaseCFG::remove_empty_blocks() {
650 // Move uncommon blocks to the end
651 uint last = number_of_blocks();
652 assert(get_block(0) == get_root_block(), "");
654 for (uint i = 1; i < last; i++) {
655 Block* block = get_block(i);
656 if (block->is_connector()) {
657 break;
658 }
660 // Check for NeverBranch at block end. This needs to become a GOTO to the
661 // true target. NeverBranch are treated as a conditional branch that
662 // always goes the same direction for most of the optimizer and are used
663 // to give a fake exit path to infinite loops. At this late stage they
664 // need to turn into Goto's so that when you enter the infinite loop you
665 // indeed hang.
666 if (block->_nodes[block->end_idx()]->Opcode() == Op_NeverBranch) {
667 convert_NeverBranch_to_Goto(block);
668 }
670 // Look for uncommon blocks and move to end.
671 if (!C->do_freq_based_layout()) {
672 if (block->is_uncommon(this)) {
673 move_to_end(block, i);
674 last--; // No longer check for being uncommon!
675 if (no_flip_branch(block)) { // Fall-thru case must follow?
676 // Find the fall-thru block
677 block = get_block(i);
678 move_to_end(block, i);
679 last--;
680 }
681 // backup block counter post-increment
682 i--;
683 }
684 }
685 }
687 // Move empty blocks to the end
688 last = number_of_blocks();
689 for (uint i = 1; i < last; i++) {
690 Block* block = get_block(i);
691 if (block->is_Empty() != Block::not_empty) {
692 move_to_end(block, i);
693 last--;
694 i--;
695 }
696 } // End of for all blocks
697 }
699 // Fix up the final control flow for basic blocks.
700 void PhaseCFG::fixup_flow() {
701 // Fixup final control flow for the blocks. Remove jump-to-next
702 // block. If neither arm of a IF follows the conditional branch, we
703 // have to add a second jump after the conditional. We place the
704 // TRUE branch target in succs[0] for both GOTOs and IFs.
705 for (uint i = 0; i < number_of_blocks(); i++) {
706 Block* block = get_block(i);
707 block->_pre_order = i; // turn pre-order into block-index
709 // Connector blocks need no further processing.
710 if (block->is_connector()) {
711 assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end");
712 continue;
713 }
714 assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors");
716 Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL;
717 Block* bs0 = block->non_connector_successor(0);
719 // Check for multi-way branches where I cannot negate the test to
720 // exchange the true and false targets.
721 if (no_flip_branch(block)) {
722 // Find fall through case - if must fall into its target
723 int branch_idx = block->_nodes.size() - block->_num_succs;
724 for (uint j2 = 0; j2 < block->_num_succs; j2++) {
725 const ProjNode* p = block->_nodes[branch_idx + j2]->as_Proj();
726 if (p->_con == 0) {
727 // successor j2 is fall through case
728 if (block->non_connector_successor(j2) != bnext) {
729 // but it is not the next block => insert a goto
730 insert_goto_at(i, j2);
731 }
732 // Put taken branch in slot 0
733 if (j2 == 0 && block->_num_succs == 2) {
734 // Flip targets in succs map
735 Block *tbs0 = block->_succs[0];
736 Block *tbs1 = block->_succs[1];
737 block->_succs.map(0, tbs1);
738 block->_succs.map(1, tbs0);
739 }
740 break;
741 }
742 }
744 // Remove all CatchProjs
745 for (uint j = 0; j < block->_num_succs; j++) {
746 block->_nodes.pop();
747 }
749 } else if (block->_num_succs == 1) {
750 // Block ends in a Goto?
751 if (bnext == bs0) {
752 // We fall into next block; remove the Goto
753 block->_nodes.pop();
754 }
756 } else if(block->_num_succs == 2) { // Block ends in a If?
757 // Get opcode of 1st projection (matches _succs[0])
758 // Note: Since this basic block has 2 exits, the last 2 nodes must
759 // be projections (in any order), the 3rd last node must be
760 // the IfNode (we have excluded other 2-way exits such as
761 // CatchNodes already).
762 MachNode* iff = block->_nodes[block->_nodes.size() - 3]->as_Mach();
763 ProjNode* proj0 = block->_nodes[block->_nodes.size() - 2]->as_Proj();
764 ProjNode* proj1 = block->_nodes[block->_nodes.size() - 1]->as_Proj();
766 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
767 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
768 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1");
770 Block* bs1 = block->non_connector_successor(1);
772 // Check for neither successor block following the current
773 // block ending in a conditional. If so, move one of the
774 // successors after the current one, provided that the
775 // successor was previously unscheduled, but moveable
776 // (i.e., all paths to it involve a branch).
777 if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) {
778 // Choose the more common successor based on the probability
779 // of the conditional branch.
780 Block* bx = bs0;
781 Block* by = bs1;
783 // _prob is the probability of taking the true path. Make
784 // p the probability of taking successor #1.
785 float p = iff->as_MachIf()->_prob;
786 if (proj0->Opcode() == Op_IfTrue) {
787 p = 1.0 - p;
788 }
790 // Prefer successor #1 if p > 0.5
791 if (p > PROB_FAIR) {
792 bx = bs1;
793 by = bs0;
794 }
796 // Attempt the more common successor first
797 if (move_to_next(bx, i)) {
798 bnext = bx;
799 } else if (move_to_next(by, i)) {
800 bnext = by;
801 }
802 }
804 // Check for conditional branching the wrong way. Negate
805 // conditional, if needed, so it falls into the following block
806 // and branches to the not-following block.
808 // Check for the next block being in succs[0]. We are going to branch
809 // to succs[0], so we want the fall-thru case as the next block in
810 // succs[1].
811 if (bnext == bs0) {
812 // Fall-thru case in succs[0], so flip targets in succs map
813 Block* tbs0 = block->_succs[0];
814 Block* tbs1 = block->_succs[1];
815 block->_succs.map(0, tbs1);
816 block->_succs.map(1, tbs0);
817 // Flip projection for each target
818 ProjNode* tmp = proj0;
819 proj0 = proj1;
820 proj1 = tmp;
822 } else if(bnext != bs1) {
823 // Need a double-branch
824 // The existing conditional branch need not change.
825 // Add a unconditional branch to the false target.
826 // Alas, it must appear in its own block and adding a
827 // block this late in the game is complicated. Sigh.
828 insert_goto_at(i, 1);
829 }
831 // Make sure we TRUE branch to the target
832 if (proj0->Opcode() == Op_IfFalse) {
833 iff->as_MachIf()->negate();
834 }
836 block->_nodes.pop(); // Remove IfFalse & IfTrue projections
837 block->_nodes.pop();
839 } else {
840 // Multi-exit block, e.g. a switch statement
841 // But we don't need to do anything here
842 }
843 } // End of for all blocks
844 }
847 #ifndef PRODUCT
848 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
849 const Node *x = end->is_block_proj();
850 assert( x, "not a CFG" );
852 // Do not visit this block again
853 if( visited.test_set(x->_idx) ) return;
855 // Skip through this block
856 const Node *p = x;
857 do {
858 p = p->in(0); // Move control forward
859 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
860 } while( !p->is_block_start() );
862 // Recursively visit
863 for (uint i = 1; i < p->req(); i++) {
864 _dump_cfg(p->in(i), visited);
865 }
867 // Dump the block
868 get_block_for_node(p)->dump(this);
869 }
871 void PhaseCFG::dump( ) const {
872 tty->print("\n--- CFG --- %d BBs\n", number_of_blocks());
873 if (_blocks.size()) { // Did we do basic-block layout?
874 for (uint i = 0; i < number_of_blocks(); i++) {
875 const Block* block = get_block(i);
876 block->dump(this);
877 }
878 } else { // Else do it with a DFS
879 VectorSet visited(_block_arena);
880 _dump_cfg(_root,visited);
881 }
882 }
884 void PhaseCFG::dump_headers() {
885 for (uint i = 0; i < number_of_blocks(); i++) {
886 Block* block = get_block(i);
887 if (block != NULL) {
888 block->dump_head(this);
889 }
890 }
891 }
893 void PhaseCFG::verify() const {
894 #ifdef ASSERT
895 // Verify sane CFG
896 for (uint i = 0; i < number_of_blocks(); i++) {
897 Block* block = get_block(i);
898 uint cnt = block->_nodes.size();
899 uint j;
900 for (j = 0; j < cnt; j++) {
901 Node *n = block->_nodes[j];
902 assert(get_block_for_node(n) == block, "");
903 if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
904 assert(j == 1 || block->_nodes[j-1]->is_Phi(), "CreateEx must be first instruction in block");
905 }
906 for (uint k = 0; k < n->req(); k++) {
907 Node *def = n->in(k);
908 if (def && def != n) {
909 assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
910 // Verify that instructions in the block is in correct order.
911 // Uses must follow their definition if they are at the same block.
912 // Mostly done to check that MachSpillCopy nodes are placed correctly
913 // when CreateEx node is moved in build_ifg_physical().
914 if (get_block_for_node(def) == block && !(block->head()->is_Loop() && n->is_Phi()) &&
915 // See (+++) comment in reg_split.cpp
916 !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
917 bool is_loop = false;
918 if (n->is_Phi()) {
919 for (uint l = 1; l < def->req(); l++) {
920 if (n == def->in(l)) {
921 is_loop = true;
922 break; // Some kind of loop
923 }
924 }
925 }
926 assert(is_loop || block->find_node(def) < j, "uses must follow definitions");
927 }
928 }
929 }
930 }
932 j = block->end_idx();
933 Node* bp = (Node*)block->_nodes[block->_nodes.size() - 1]->is_block_proj();
934 assert(bp, "last instruction must be a block proj");
935 assert(bp == block->_nodes[j], "wrong number of successors for this block");
936 if (bp->is_Catch()) {
937 while (block->_nodes[--j]->is_MachProj()) {
938 ;
939 }
940 assert(block->_nodes[j]->is_MachCall(), "CatchProj must follow call");
941 } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
942 assert(block->_num_succs == 2, "Conditional branch must have two targets");
943 }
944 }
945 #endif
946 }
947 #endif
949 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) {
950 Copy::zero_to_bytes( _indices, sizeof(uint)*max );
951 }
953 void UnionFind::extend( uint from_idx, uint to_idx ) {
954 _nesting.check();
955 if( from_idx >= _max ) {
956 uint size = 16;
957 while( size <= from_idx ) size <<=1;
958 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size );
959 _max = size;
960 }
961 while( _cnt <= from_idx ) _indices[_cnt++] = 0;
962 _indices[from_idx] = to_idx;
963 }
965 void UnionFind::reset( uint max ) {
966 assert( max <= max_uint, "Must fit within uint" );
967 // Force the Union-Find mapping to be at least this large
968 extend(max,0);
969 // Initialize to be the ID mapping.
970 for( uint i=0; i<max; i++ ) map(i,i);
971 }
973 // Straight out of Tarjan's union-find algorithm
974 uint UnionFind::Find_compress( uint idx ) {
975 uint cur = idx;
976 uint next = lookup(cur);
977 while( next != cur ) { // Scan chain of equivalences
978 assert( next < cur, "always union smaller" );
979 cur = next; // until find a fixed-point
980 next = lookup(cur);
981 }
982 // Core of union-find algorithm: update chain of
983 // equivalences to be equal to the root.
984 while( idx != next ) {
985 uint tmp = lookup(idx);
986 map(idx, next);
987 idx = tmp;
988 }
989 return idx;
990 }
992 // Like Find above, but no path compress, so bad asymptotic behavior
993 uint UnionFind::Find_const( uint idx ) const {
994 if( idx == 0 ) return idx; // Ignore the zero idx
995 // Off the end? This can happen during debugging dumps
996 // when data structures have not finished being updated.
997 if( idx >= _max ) return idx;
998 uint next = lookup(idx);
999 while( next != idx ) { // Scan chain of equivalences
1000 idx = next; // until find a fixed-point
1001 next = lookup(idx);
1002 }
1003 return next;
1004 }
1006 // union 2 sets together.
1007 void UnionFind::Union( uint idx1, uint idx2 ) {
1008 uint src = Find(idx1);
1009 uint dst = Find(idx2);
1010 assert( src, "" );
1011 assert( dst, "" );
1012 assert( src < _max, "oob" );
1013 assert( dst < _max, "oob" );
1014 assert( src < dst, "always union smaller" );
1015 map(dst,src);
1016 }
1018 #ifndef PRODUCT
1019 void Trace::dump( ) const {
1020 tty->print_cr("Trace (freq %f)", first_block()->_freq);
1021 for (Block *b = first_block(); b != NULL; b = next(b)) {
1022 tty->print(" B%d", b->_pre_order);
1023 if (b->head()->is_Loop()) {
1024 tty->print(" (L%d)", b->compute_loop_alignment());
1025 }
1026 if (b->has_loop_alignment()) {
1027 tty->print(" (T%d)", b->code_alignment());
1028 }
1029 }
1030 tty->cr();
1031 }
1033 void CFGEdge::dump( ) const {
1034 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ",
1035 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct);
1036 switch(state()) {
1037 case connected:
1038 tty->print("connected");
1039 break;
1040 case open:
1041 tty->print("open");
1042 break;
1043 case interior:
1044 tty->print("interior");
1045 break;
1046 }
1047 if (infrequent()) {
1048 tty->print(" infrequent");
1049 }
1050 tty->cr();
1051 }
1052 #endif
1054 // Comparison function for edges
1055 static int edge_order(CFGEdge **e0, CFGEdge **e1) {
1056 float freq0 = (*e0)->freq();
1057 float freq1 = (*e1)->freq();
1058 if (freq0 != freq1) {
1059 return freq0 > freq1 ? -1 : 1;
1060 }
1062 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo;
1063 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo;
1065 return dist1 - dist0;
1066 }
1068 // Comparison function for edges
1069 extern "C" int trace_frequency_order(const void *p0, const void *p1) {
1070 Trace *tr0 = *(Trace **) p0;
1071 Trace *tr1 = *(Trace **) p1;
1072 Block *b0 = tr0->first_block();
1073 Block *b1 = tr1->first_block();
1075 // The trace of connector blocks goes at the end;
1076 // we only expect one such trace
1077 if (b0->is_connector() != b1->is_connector()) {
1078 return b1->is_connector() ? -1 : 1;
1079 }
1081 // Pull more frequently executed blocks to the beginning
1082 float freq0 = b0->_freq;
1083 float freq1 = b1->_freq;
1084 if (freq0 != freq1) {
1085 return freq0 > freq1 ? -1 : 1;
1086 }
1088 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo;
1090 return diff;
1091 }
1093 // Find edges of interest, i.e, those which can fall through. Presumes that
1094 // edges which don't fall through are of low frequency and can be generally
1095 // ignored. Initialize the list of traces.
1096 void PhaseBlockLayout::find_edges() {
1097 // Walk the blocks, creating edges and Traces
1098 uint i;
1099 Trace *tr = NULL;
1100 for (i = 0; i < _cfg.number_of_blocks(); i++) {
1101 Block* b = _cfg.get_block(i);
1102 tr = new Trace(b, next, prev);
1103 traces[tr->id()] = tr;
1105 // All connector blocks should be at the end of the list
1106 if (b->is_connector()) break;
1108 // If this block and the next one have a one-to-one successor
1109 // predecessor relationship, simply append the next block
1110 int nfallthru = b->num_fall_throughs();
1111 while (nfallthru == 1 &&
1112 b->succ_fall_through(0)) {
1113 Block *n = b->_succs[0];
1115 // Skip over single-entry connector blocks, we don't want to
1116 // add them to the trace.
1117 while (n->is_connector() && n->num_preds() == 1) {
1118 n = n->_succs[0];
1119 }
1121 // We see a merge point, so stop search for the next block
1122 if (n->num_preds() != 1) break;
1124 i++;
1125 assert(n = _cfg.get_block(i), "expecting next block");
1126 tr->append(n);
1127 uf->map(n->_pre_order, tr->id());
1128 traces[n->_pre_order] = NULL;
1129 nfallthru = b->num_fall_throughs();
1130 b = n;
1131 }
1133 if (nfallthru > 0) {
1134 // Create a CFGEdge for each outgoing
1135 // edge that could be a fall-through.
1136 for (uint j = 0; j < b->_num_succs; j++ ) {
1137 if (b->succ_fall_through(j)) {
1138 Block *target = b->non_connector_successor(j);
1139 float freq = b->_freq * b->succ_prob(j);
1140 int from_pct = (int) ((100 * freq) / b->_freq);
1141 int to_pct = (int) ((100 * freq) / target->_freq);
1142 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct));
1143 }
1144 }
1145 }
1146 }
1148 // Group connector blocks into one trace
1149 for (i++; i < _cfg.number_of_blocks(); i++) {
1150 Block *b = _cfg.get_block(i);
1151 assert(b->is_connector(), "connector blocks at the end");
1152 tr->append(b);
1153 uf->map(b->_pre_order, tr->id());
1154 traces[b->_pre_order] = NULL;
1155 }
1156 }
1158 // Union two traces together in uf, and null out the trace in the list
1159 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) {
1160 uint old_id = old_trace->id();
1161 uint updated_id = updated_trace->id();
1163 uint lo_id = updated_id;
1164 uint hi_id = old_id;
1166 // If from is greater than to, swap values to meet
1167 // UnionFind guarantee.
1168 if (updated_id > old_id) {
1169 lo_id = old_id;
1170 hi_id = updated_id;
1172 // Fix up the trace ids
1173 traces[lo_id] = traces[updated_id];
1174 updated_trace->set_id(lo_id);
1175 }
1177 // Union the lower with the higher and remove the pointer
1178 // to the higher.
1179 uf->Union(lo_id, hi_id);
1180 traces[hi_id] = NULL;
1181 }
1183 // Append traces together via the most frequently executed edges
1184 void PhaseBlockLayout::grow_traces() {
1185 // Order the edges, and drive the growth of Traces via the most
1186 // frequently executed edges.
1187 edges->sort(edge_order);
1188 for (int i = 0; i < edges->length(); i++) {
1189 CFGEdge *e = edges->at(i);
1191 if (e->state() != CFGEdge::open) continue;
1193 Block *src_block = e->from();
1194 Block *targ_block = e->to();
1196 // Don't grow traces along backedges?
1197 if (!BlockLayoutRotateLoops) {
1198 if (targ_block->_rpo <= src_block->_rpo) {
1199 targ_block->set_loop_alignment(targ_block);
1200 continue;
1201 }
1202 }
1204 Trace *src_trace = trace(src_block);
1205 Trace *targ_trace = trace(targ_block);
1207 // If the edge in question can join two traces at their ends,
1208 // append one trace to the other.
1209 if (src_trace->last_block() == src_block) {
1210 if (src_trace == targ_trace) {
1211 e->set_state(CFGEdge::interior);
1212 if (targ_trace->backedge(e)) {
1213 // Reset i to catch any newly eligible edge
1214 // (Or we could remember the first "open" edge, and reset there)
1215 i = 0;
1216 }
1217 } else if (targ_trace->first_block() == targ_block) {
1218 e->set_state(CFGEdge::connected);
1219 src_trace->append(targ_trace);
1220 union_traces(src_trace, targ_trace);
1221 }
1222 }
1223 }
1224 }
1226 // Embed one trace into another, if the fork or join points are sufficiently
1227 // balanced.
1228 void PhaseBlockLayout::merge_traces(bool fall_thru_only) {
1229 // Walk the edge list a another time, looking at unprocessed edges.
1230 // Fold in diamonds
1231 for (int i = 0; i < edges->length(); i++) {
1232 CFGEdge *e = edges->at(i);
1234 if (e->state() != CFGEdge::open) continue;
1235 if (fall_thru_only) {
1236 if (e->infrequent()) continue;
1237 }
1239 Block *src_block = e->from();
1240 Trace *src_trace = trace(src_block);
1241 bool src_at_tail = src_trace->last_block() == src_block;
1243 Block *targ_block = e->to();
1244 Trace *targ_trace = trace(targ_block);
1245 bool targ_at_start = targ_trace->first_block() == targ_block;
1247 if (src_trace == targ_trace) {
1248 // This may be a loop, but we can't do much about it.
1249 e->set_state(CFGEdge::interior);
1250 continue;
1251 }
1253 if (fall_thru_only) {
1254 // If the edge links the middle of two traces, we can't do anything.
1255 // Mark the edge and continue.
1256 if (!src_at_tail & !targ_at_start) {
1257 continue;
1258 }
1260 // Don't grow traces along backedges?
1261 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) {
1262 continue;
1263 }
1265 // If both ends of the edge are available, why didn't we handle it earlier?
1266 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier.");
1268 if (targ_at_start) {
1269 // Insert the "targ" trace in the "src" trace if the insertion point
1270 // is a two way branch.
1271 // Better profitability check possible, but may not be worth it.
1272 // Someday, see if the this "fork" has an associated "join";
1273 // then make a policy on merging this trace at the fork or join.
1274 // For example, other things being equal, it may be better to place this
1275 // trace at the join point if the "src" trace ends in a two-way, but
1276 // the insertion point is one-way.
1277 assert(src_block->num_fall_throughs() == 2, "unexpected diamond");
1278 e->set_state(CFGEdge::connected);
1279 src_trace->insert_after(src_block, targ_trace);
1280 union_traces(src_trace, targ_trace);
1281 } else if (src_at_tail) {
1282 if (src_trace != trace(_cfg.get_root_block())) {
1283 e->set_state(CFGEdge::connected);
1284 targ_trace->insert_before(targ_block, src_trace);
1285 union_traces(targ_trace, src_trace);
1286 }
1287 }
1288 } else if (e->state() == CFGEdge::open) {
1289 // Append traces, even without a fall-thru connection.
1290 // But leave root entry at the beginning of the block list.
1291 if (targ_trace != trace(_cfg.get_root_block())) {
1292 e->set_state(CFGEdge::connected);
1293 src_trace->append(targ_trace);
1294 union_traces(src_trace, targ_trace);
1295 }
1296 }
1297 }
1298 }
1300 // Order the sequence of the traces in some desirable way, and fixup the
1301 // jumps at the end of each block.
1302 void PhaseBlockLayout::reorder_traces(int count) {
1303 ResourceArea *area = Thread::current()->resource_area();
1304 Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count);
1305 Block_List worklist;
1306 int new_count = 0;
1308 // Compact the traces.
1309 for (int i = 0; i < count; i++) {
1310 Trace *tr = traces[i];
1311 if (tr != NULL) {
1312 new_traces[new_count++] = tr;
1313 }
1314 }
1316 // The entry block should be first on the new trace list.
1317 Trace *tr = trace(_cfg.get_root_block());
1318 assert(tr == new_traces[0], "entry trace misplaced");
1320 // Sort the new trace list by frequency
1321 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order);
1323 // Patch up the successor blocks
1324 _cfg.clear_blocks();
1325 for (int i = 0; i < new_count; i++) {
1326 Trace *tr = new_traces[i];
1327 if (tr != NULL) {
1328 tr->fixup_blocks(_cfg);
1329 }
1330 }
1331 }
1333 // Order basic blocks based on frequency
1334 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg)
1335 : Phase(BlockLayout)
1336 , _cfg(cfg) {
1337 ResourceMark rm;
1338 ResourceArea *area = Thread::current()->resource_area();
1340 // List of traces
1341 int size = _cfg.number_of_blocks() + 1;
1342 traces = NEW_ARENA_ARRAY(area, Trace *, size);
1343 memset(traces, 0, size*sizeof(Trace*));
1344 next = NEW_ARENA_ARRAY(area, Block *, size);
1345 memset(next, 0, size*sizeof(Block *));
1346 prev = NEW_ARENA_ARRAY(area, Block *, size);
1347 memset(prev , 0, size*sizeof(Block *));
1349 // List of edges
1350 edges = new GrowableArray<CFGEdge*>;
1352 // Mapping block index --> block_trace
1353 uf = new UnionFind(size);
1354 uf->reset(size);
1356 // Find edges and create traces.
1357 find_edges();
1359 // Grow traces at their ends via most frequent edges.
1360 grow_traces();
1362 // Merge one trace into another, but only at fall-through points.
1363 // This may make diamonds and other related shapes in a trace.
1364 merge_traces(true);
1366 // Run merge again, allowing two traces to be catenated, even if
1367 // one does not fall through into the other. This appends loosely
1368 // related traces to be near each other.
1369 merge_traces(false);
1371 // Re-order all the remaining traces by frequency
1372 reorder_traces(size);
1374 assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink");
1375 }
1378 // Edge e completes a loop in a trace. If the target block is head of the
1379 // loop, rotate the loop block so that the loop ends in a conditional branch.
1380 bool Trace::backedge(CFGEdge *e) {
1381 bool loop_rotated = false;
1382 Block *src_block = e->from();
1383 Block *targ_block = e->to();
1385 assert(last_block() == src_block, "loop discovery at back branch");
1386 if (first_block() == targ_block) {
1387 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) {
1388 // Find the last block in the trace that has a conditional
1389 // branch.
1390 Block *b;
1391 for (b = last_block(); b != NULL; b = prev(b)) {
1392 if (b->num_fall_throughs() == 2) {
1393 break;
1394 }
1395 }
1397 if (b != last_block() && b != NULL) {
1398 loop_rotated = true;
1400 // Rotate the loop by doing two-part linked-list surgery.
1401 append(first_block());
1402 break_loop_after(b);
1403 }
1404 }
1406 // Backbranch to the top of a trace
1407 // Scroll forward through the trace from the targ_block. If we find
1408 // a loop head before another loop top, use the the loop head alignment.
1409 for (Block *b = targ_block; b != NULL; b = next(b)) {
1410 if (b->has_loop_alignment()) {
1411 break;
1412 }
1413 if (b->head()->is_Loop()) {
1414 targ_block = b;
1415 break;
1416 }
1417 }
1419 first_block()->set_loop_alignment(targ_block);
1421 } else {
1422 // Backbranch into the middle of a trace
1423 targ_block->set_loop_alignment(targ_block);
1424 }
1426 return loop_rotated;
1427 }
1429 // push blocks onto the CFG list
1430 // ensure that blocks have the correct two-way branch sense
1431 void Trace::fixup_blocks(PhaseCFG &cfg) {
1432 Block *last = last_block();
1433 for (Block *b = first_block(); b != NULL; b = next(b)) {
1434 cfg.add_block(b);
1435 if (!b->is_connector()) {
1436 int nfallthru = b->num_fall_throughs();
1437 if (b != last) {
1438 if (nfallthru == 2) {
1439 // Ensure that the sense of the branch is correct
1440 Block *bnext = next(b);
1441 Block *bs0 = b->non_connector_successor(0);
1443 MachNode *iff = b->_nodes[b->_nodes.size()-3]->as_Mach();
1444 ProjNode *proj0 = b->_nodes[b->_nodes.size()-2]->as_Proj();
1445 ProjNode *proj1 = b->_nodes[b->_nodes.size()-1]->as_Proj();
1447 if (bnext == bs0) {
1448 // Fall-thru case in succs[0], should be in succs[1]
1450 // Flip targets in _succs map
1451 Block *tbs0 = b->_succs[0];
1452 Block *tbs1 = b->_succs[1];
1453 b->_succs.map( 0, tbs1 );
1454 b->_succs.map( 1, tbs0 );
1456 // Flip projections to match targets
1457 b->_nodes.map(b->_nodes.size()-2, proj1);
1458 b->_nodes.map(b->_nodes.size()-1, proj0);
1459 }
1460 }
1461 }
1462 }
1463 }
1464 }