src/share/vm/opto/block.cpp

Mon, 06 Jan 2014 11:02:21 +0100

author
goetz
date
Mon, 06 Jan 2014 11:02:21 +0100
changeset 6500
4345c6a92f35
parent 6490
41b780b43b74
child 6503
a9becfeecd1b
permissions
-rw-r--r--

8031188: Fix for 8029015: PPC64 (part 216): opto: trap based null and range checks
Summary: Swap the Projs in the block list so that the new block is added behind the proper node.
Reviewed-by: kvn

duke@435 1 /*
mikael@4153 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "libadt/vectset.hpp"
stefank@2314 27 #include "memory/allocation.inline.hpp"
stefank@2314 28 #include "opto/block.hpp"
stefank@2314 29 #include "opto/cfgnode.hpp"
stefank@2314 30 #include "opto/chaitin.hpp"
stefank@2314 31 #include "opto/loopnode.hpp"
stefank@2314 32 #include "opto/machnode.hpp"
stefank@2314 33 #include "opto/matcher.hpp"
stefank@2314 34 #include "opto/opcodes.hpp"
stefank@2314 35 #include "opto/rootnode.hpp"
stefank@2314 36 #include "utilities/copy.hpp"
stefank@2314 37
duke@435 38 void Block_Array::grow( uint i ) {
duke@435 39 assert(i >= Max(), "must be an overflow");
duke@435 40 debug_only(_limit = i+1);
duke@435 41 if( i < _size ) return;
duke@435 42 if( !_size ) {
duke@435 43 _size = 1;
duke@435 44 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) );
duke@435 45 _blocks[0] = NULL;
duke@435 46 }
duke@435 47 uint old = _size;
duke@435 48 while( i >= _size ) _size <<= 1; // Double to fit
duke@435 49 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*));
duke@435 50 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
duke@435 51 }
duke@435 52
duke@435 53 void Block_List::remove(uint i) {
duke@435 54 assert(i < _cnt, "index out of bounds");
duke@435 55 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
duke@435 56 pop(); // shrink list by one block
duke@435 57 }
duke@435 58
duke@435 59 void Block_List::insert(uint i, Block *b) {
duke@435 60 push(b); // grow list by one block
duke@435 61 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*)));
duke@435 62 _blocks[i] = b;
duke@435 63 }
duke@435 64
rasbold@853 65 #ifndef PRODUCT
rasbold@853 66 void Block_List::print() {
rasbold@853 67 for (uint i=0; i < size(); i++) {
rasbold@853 68 tty->print("B%d ", _blocks[i]->_pre_order);
rasbold@853 69 }
rasbold@853 70 tty->print("size = %d\n", size());
rasbold@853 71 }
rasbold@853 72 #endif
duke@435 73
duke@435 74 uint Block::code_alignment() {
duke@435 75 // Check for Root block
kvn@3049 76 if (_pre_order == 0) return CodeEntryAlignment;
duke@435 77 // Check for Start block
kvn@3049 78 if (_pre_order == 1) return InteriorEntryAlignment;
duke@435 79 // Check for loop alignment
kvn@3049 80 if (has_loop_alignment()) return loop_alignment();
rasbold@853 81
kvn@3049 82 return relocInfo::addr_unit(); // no particular alignment
rasbold@853 83 }
rasbold@853 84
rasbold@853 85 uint Block::compute_loop_alignment() {
duke@435 86 Node *h = head();
kvn@3049 87 int unit_sz = relocInfo::addr_unit();
kvn@3049 88 if (h->is_Loop() && h->as_Loop()->is_inner_loop()) {
duke@435 89 // Pre- and post-loops have low trip count so do not bother with
duke@435 90 // NOPs for align loop head. The constants are hidden from tuning
duke@435 91 // but only because my "divide by 4" heuristic surely gets nearly
duke@435 92 // all possible gain (a "do not align at all" heuristic has a
duke@435 93 // chance of getting a really tiny gain).
kvn@3049 94 if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
kvn@3049 95 h->as_CountedLoop()->is_post_loop())) {
kvn@3049 96 return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz;
kvn@3049 97 }
duke@435 98 // Loops with low backedge frequency should not be aligned.
duke@435 99 Node *n = h->in(LoopNode::LoopBackControl)->in(0);
kvn@3049 100 if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) {
kvn@3049 101 return unit_sz; // Loop does not loop, more often than not!
duke@435 102 }
duke@435 103 return OptoLoopAlignment; // Otherwise align loop head
duke@435 104 }
rasbold@853 105
kvn@3049 106 return unit_sz; // no particular alignment
duke@435 107 }
duke@435 108
duke@435 109 // Compute the size of first 'inst_cnt' instructions in this block.
duke@435 110 // Return the number of instructions left to compute if the block has
rasbold@853 111 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size
rasbold@853 112 // exceeds OptoLoopAlignment.
duke@435 113 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
duke@435 114 PhaseRegAlloc* ra) {
adlertz@5635 115 uint last_inst = number_of_nodes();
duke@435 116 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
adlertz@5635 117 uint inst_size = get_node(j)->size(ra);
duke@435 118 if( inst_size > 0 ) {
duke@435 119 inst_cnt--;
duke@435 120 uint sz = sum_size + inst_size;
duke@435 121 if( sz <= (uint)OptoLoopAlignment ) {
duke@435 122 // Compute size of instructions which fit into fetch buffer only
duke@435 123 // since all inst_cnt instructions will not fit even if we align them.
duke@435 124 sum_size = sz;
duke@435 125 } else {
duke@435 126 return 0;
duke@435 127 }
duke@435 128 }
duke@435 129 }
duke@435 130 return inst_cnt;
duke@435 131 }
duke@435 132
duke@435 133 uint Block::find_node( const Node *n ) const {
adlertz@5635 134 for( uint i = 0; i < number_of_nodes(); i++ ) {
adlertz@5635 135 if( get_node(i) == n )
duke@435 136 return i;
duke@435 137 }
duke@435 138 ShouldNotReachHere();
duke@435 139 return 0;
duke@435 140 }
duke@435 141
duke@435 142 // Find and remove n from block list
duke@435 143 void Block::find_remove( const Node *n ) {
adlertz@5635 144 remove_node(find_node(n));
duke@435 145 }
duke@435 146
goetz@6478 147 bool Block::contains(const Node *n) const {
goetz@6478 148 return _nodes.contains(n);
goetz@6478 149 }
goetz@6478 150
duke@435 151 // Return empty status of a block. Empty blocks contain only the head, other
duke@435 152 // ideal nodes, and an optional trailing goto.
duke@435 153 int Block::is_Empty() const {
duke@435 154
duke@435 155 // Root or start block is not considered empty
duke@435 156 if (head()->is_Root() || head()->is_Start()) {
duke@435 157 return not_empty;
duke@435 158 }
duke@435 159
duke@435 160 int success_result = completely_empty;
adlertz@5635 161 int end_idx = number_of_nodes() - 1;
duke@435 162
duke@435 163 // Check for ending goto
adlertz@5635 164 if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
duke@435 165 success_result = empty_with_goto;
duke@435 166 end_idx--;
duke@435 167 }
duke@435 168
duke@435 169 // Unreachable blocks are considered empty
duke@435 170 if (num_preds() <= 1) {
duke@435 171 return success_result;
duke@435 172 }
duke@435 173
duke@435 174 // Ideal nodes are allowable in empty blocks: skip them Only MachNodes
duke@435 175 // turn directly into code, because only MachNodes have non-trivial
duke@435 176 // emit() functions.
adlertz@5635 177 while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
duke@435 178 end_idx--;
duke@435 179 }
duke@435 180
duke@435 181 // No room for any interesting instructions?
duke@435 182 if (end_idx == 0) {
duke@435 183 return success_result;
duke@435 184 }
duke@435 185
duke@435 186 return not_empty;
duke@435 187 }
duke@435 188
twisti@1040 189 // Return true if the block's code implies that it is likely to be
duke@435 190 // executed infrequently. Check to see if the block ends in a Halt or
duke@435 191 // a low probability call.
duke@435 192 bool Block::has_uncommon_code() const {
duke@435 193 Node* en = end();
duke@435 194
kvn@3040 195 if (en->is_MachGoto())
duke@435 196 en = en->in(0);
duke@435 197 if (en->is_Catch())
duke@435 198 en = en->in(0);
kvn@3040 199 if (en->is_MachProj() && en->in(0)->is_MachCall()) {
duke@435 200 MachCallNode* call = en->in(0)->as_MachCall();
duke@435 201 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
duke@435 202 // This is true for slow-path stubs like new_{instance,array},
duke@435 203 // slow_arraycopy, complete_monitor_locking, uncommon_trap.
duke@435 204 // The magic number corresponds to the probability of an uncommon_trap,
duke@435 205 // even though it is a count not a probability.
duke@435 206 return true;
duke@435 207 }
duke@435 208 }
duke@435 209
duke@435 210 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode();
duke@435 211 return op == Op_Halt;
duke@435 212 }
duke@435 213
duke@435 214 // True if block is low enough frequency or guarded by a test which
duke@435 215 // mostly does not go here.
adlertz@5639 216 bool PhaseCFG::is_uncommon(const Block* block) {
duke@435 217 // Initial blocks must never be moved, so are never uncommon.
adlertz@5639 218 if (block->head()->is_Root() || block->head()->is_Start()) return false;
duke@435 219
duke@435 220 // Check for way-low freq
adlertz@5639 221 if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
duke@435 222
duke@435 223 // Look for code shape indicating uncommon_trap or slow path
adlertz@5639 224 if (block->has_uncommon_code()) return true;
duke@435 225
duke@435 226 const float epsilon = 0.05f;
duke@435 227 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
duke@435 228 uint uncommon_preds = 0;
duke@435 229 uint freq_preds = 0;
duke@435 230 uint uncommon_for_freq_preds = 0;
duke@435 231
adlertz@5639 232 for( uint i=1; i< block->num_preds(); i++ ) {
adlertz@5639 233 Block* guard = get_block_for_node(block->pred(i));
duke@435 234 // Check to see if this block follows its guard 1 time out of 10000
duke@435 235 // or less.
duke@435 236 //
duke@435 237 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which
duke@435 238 // we intend to be "uncommon", such as slow-path TLE allocation,
duke@435 239 // predicted call failure, and uncommon trap triggers.
duke@435 240 //
duke@435 241 // Use an epsilon value of 5% to allow for variability in frequency
duke@435 242 // predictions and floating point calculations. The net effect is
duke@435 243 // that guard_factor is set to 9500.
duke@435 244 //
duke@435 245 // Ignore low-frequency blocks.
duke@435 246 // The next check is (guard->_freq < 1.e-5 * 9500.).
duke@435 247 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) {
duke@435 248 uncommon_preds++;
duke@435 249 } else {
duke@435 250 freq_preds++;
adlertz@5639 251 if(block->_freq < guard->_freq * guard_factor ) {
duke@435 252 uncommon_for_freq_preds++;
duke@435 253 }
duke@435 254 }
duke@435 255 }
adlertz@5639 256 if( block->num_preds() > 1 &&
duke@435 257 // The block is uncommon if all preds are uncommon or
adlertz@5639 258 (uncommon_preds == (block->num_preds()-1) ||
duke@435 259 // it is uncommon for all frequent preds.
duke@435 260 uncommon_for_freq_preds == freq_preds) ) {
duke@435 261 return true;
duke@435 262 }
duke@435 263 return false;
duke@435 264 }
duke@435 265
duke@435 266 #ifndef PRODUCT
kvn@3049 267 void Block::dump_bidx(const Block* orig, outputStream* st) const {
kvn@3049 268 if (_pre_order) st->print("B%d",_pre_order);
kvn@3049 269 else st->print("N%d", head()->_idx);
duke@435 270
duke@435 271 if (Verbose && orig != this) {
duke@435 272 // Dump the original block's idx
kvn@3049 273 st->print(" (");
kvn@3049 274 orig->dump_bidx(orig, st);
kvn@3049 275 st->print(")");
duke@435 276 }
duke@435 277 }
duke@435 278
adlertz@5509 279 void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
duke@435 280 if (is_connector()) {
duke@435 281 for (uint i=1; i<num_preds(); i++) {
adlertz@5509 282 Block *p = cfg->get_block_for_node(pred(i));
adlertz@5509 283 p->dump_pred(cfg, orig, st);
duke@435 284 }
duke@435 285 } else {
kvn@3049 286 dump_bidx(orig, st);
kvn@3049 287 st->print(" ");
duke@435 288 }
duke@435 289 }
duke@435 290
adlertz@5509 291 void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
duke@435 292 // Print the basic block
kvn@3049 293 dump_bidx(this, st);
kvn@3049 294 st->print(": #\t");
duke@435 295
duke@435 296 // Print the incoming CFG edges and the outgoing CFG edges
duke@435 297 for( uint i=0; i<_num_succs; i++ ) {
kvn@3049 298 non_connector_successor(i)->dump_bidx(_succs[i], st);
kvn@3049 299 st->print(" ");
duke@435 300 }
kvn@3049 301 st->print("<- ");
duke@435 302 if( head()->is_block_start() ) {
duke@435 303 for (uint i=1; i<num_preds(); i++) {
duke@435 304 Node *s = pred(i);
adlertz@5509 305 if (cfg != NULL) {
adlertz@5509 306 Block *p = cfg->get_block_for_node(s);
adlertz@5509 307 p->dump_pred(cfg, p, st);
duke@435 308 } else {
duke@435 309 while (!s->is_block_start())
duke@435 310 s = s->in(0);
kvn@3049 311 st->print("N%d ", s->_idx );
duke@435 312 }
duke@435 313 }
adlertz@5509 314 } else {
kvn@3049 315 st->print("BLOCK HEAD IS JUNK ");
adlertz@5509 316 }
duke@435 317
duke@435 318 // Print loop, if any
duke@435 319 const Block *bhead = this; // Head of self-loop
duke@435 320 Node *bh = bhead->head();
adlertz@5509 321
adlertz@5509 322 if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
duke@435 323 LoopNode *loop = bh->as_Loop();
adlertz@5509 324 const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
duke@435 325 while (bx->is_connector()) {
adlertz@5509 326 bx = cfg->get_block_for_node(bx->pred(1));
duke@435 327 }
kvn@3049 328 st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
duke@435 329 // Dump any loop-specific bits, especially for CountedLoops.
kvn@3049 330 loop->dump_spec(st);
rasbold@853 331 } else if (has_loop_alignment()) {
kvn@3049 332 st->print(" top-of-loop");
duke@435 333 }
kvn@3049 334 st->print(" Freq: %g",_freq);
duke@435 335 if( Verbose || WizardMode ) {
kvn@3049 336 st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
kvn@3049 337 st->print(" RegPressure: %d",_reg_pressure);
kvn@3049 338 st->print(" IHRP Index: %d",_ihrp_index);
kvn@3049 339 st->print(" FRegPressure: %d",_freg_pressure);
kvn@3049 340 st->print(" FHRP Index: %d",_fhrp_index);
duke@435 341 }
kvn@3049 342 st->print_cr("");
duke@435 343 }
duke@435 344
adlertz@5509 345 void Block::dump() const {
adlertz@5509 346 dump(NULL);
adlertz@5509 347 }
duke@435 348
adlertz@5509 349 void Block::dump(const PhaseCFG* cfg) const {
adlertz@5509 350 dump_head(cfg);
adlertz@5635 351 for (uint i=0; i< number_of_nodes(); i++) {
adlertz@5635 352 get_node(i)->dump();
adlertz@5509 353 }
duke@435 354 tty->print("\n");
duke@435 355 }
duke@435 356 #endif
duke@435 357
adlertz@5509 358 PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
adlertz@5509 359 : Phase(CFG)
adlertz@5509 360 , _block_arena(arena)
adlertz@5539 361 , _root(root)
adlertz@5539 362 , _matcher(matcher)
adlertz@5509 363 , _node_to_block_mapping(arena)
adlertz@5509 364 , _node_latency(NULL)
duke@435 365 #ifndef PRODUCT
adlertz@5509 366 , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
duke@435 367 #endif
kvn@1268 368 #ifdef ASSERT
adlertz@5509 369 , _raw_oops(arena)
kvn@1268 370 #endif
duke@435 371 {
duke@435 372 ResourceMark rm;
duke@435 373 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode,
duke@435 374 // then Match it into a machine-specific Node. Then clone the machine
duke@435 375 // Node on demand.
kvn@4115 376 Node *x = new (C) GotoNode(NULL);
duke@435 377 x->init_req(0, x);
adlertz@5509 378 _goto = matcher.match_tree(x);
duke@435 379 assert(_goto != NULL, "");
duke@435 380 _goto->set_req(0,_goto);
duke@435 381
duke@435 382 // Build the CFG in Reverse Post Order
adlertz@5539 383 _number_of_blocks = build_cfg();
adlertz@5539 384 _root_block = get_block_for_node(_root);
duke@435 385 }
duke@435 386
duke@435 387 // Build a proper looking CFG. Make every block begin with either a StartNode
duke@435 388 // or a RegionNode. Make every block end with either a Goto, If or Return.
duke@435 389 // The RootNode both starts and ends it's own block. Do this with a recursive
duke@435 390 // backwards walk over the control edges.
duke@435 391 uint PhaseCFG::build_cfg() {
duke@435 392 Arena *a = Thread::current()->resource_area();
duke@435 393 VectorSet visited(a);
duke@435 394
duke@435 395 // Allocate stack with enough space to avoid frequent realloc
duke@435 396 Node_Stack nstack(a, C->unique() >> 1);
duke@435 397 nstack.push(_root, 0);
duke@435 398 uint sum = 0; // Counter for blocks
duke@435 399
duke@435 400 while (nstack.is_nonempty()) {
duke@435 401 // node and in's index from stack's top
duke@435 402 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
duke@435 403 // only nodes which point to the start of basic block (see below).
duke@435 404 Node *np = nstack.node();
duke@435 405 // idx > 0, except for the first node (_root) pushed on stack
duke@435 406 // at the beginning when idx == 0.
duke@435 407 // We will use the condition (idx == 0) later to end the build.
duke@435 408 uint idx = nstack.index();
duke@435 409 Node *proj = np->in(idx);
duke@435 410 const Node *x = proj->is_block_proj();
duke@435 411 // Does the block end with a proper block-ending Node? One of Return,
duke@435 412 // If or Goto? (This check should be done for visited nodes also).
duke@435 413 if (x == NULL) { // Does not end right...
duke@435 414 Node *g = _goto->clone(); // Force it to end in a Goto
duke@435 415 g->set_req(0, proj);
duke@435 416 np->set_req(idx, g);
duke@435 417 x = proj = g;
duke@435 418 }
duke@435 419 if (!visited.test_set(x->_idx)) { // Visit this block once
duke@435 420 // Skip any control-pinned middle'in stuff
duke@435 421 Node *p = proj;
duke@435 422 do {
duke@435 423 proj = p; // Update pointer to last Control
duke@435 424 p = p->in(0); // Move control forward
duke@435 425 } while( !p->is_block_proj() &&
duke@435 426 !p->is_block_start() );
duke@435 427 // Make the block begin with one of Region or StartNode.
duke@435 428 if( !p->is_block_start() ) {
kvn@4115 429 RegionNode *r = new (C) RegionNode( 2 );
duke@435 430 r->init_req(1, p); // Insert RegionNode in the way
duke@435 431 proj->set_req(0, r); // Insert RegionNode in the way
duke@435 432 p = r;
duke@435 433 }
duke@435 434 // 'p' now points to the start of this basic block
duke@435 435
duke@435 436 // Put self in array of basic blocks
adlertz@5509 437 Block *bb = new (_block_arena) Block(_block_arena, p);
adlertz@5509 438 map_node_to_block(p, bb);
adlertz@5509 439 map_node_to_block(x, bb);
kvn@3049 440 if( x != p ) { // Only for root is x == p
adlertz@5635 441 bb->push_node((Node*)x);
kvn@3049 442 }
duke@435 443 // Now handle predecessors
duke@435 444 ++sum; // Count 1 for self block
duke@435 445 uint cnt = bb->num_preds();
duke@435 446 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
duke@435 447 Node *prevproj = p->in(i); // Get prior input
duke@435 448 assert( !prevproj->is_Con(), "dead input not removed" );
duke@435 449 // Check to see if p->in(i) is a "control-dependent" CFG edge -
duke@435 450 // i.e., it splits at the source (via an IF or SWITCH) and merges
duke@435 451 // at the destination (via a many-input Region).
duke@435 452 // This breaks critical edges. The RegionNode to start the block
duke@435 453 // will be added when <p,i> is pulled off the node stack
duke@435 454 if ( cnt > 2 ) { // Merging many things?
duke@435 455 assert( prevproj== bb->pred(i),"");
duke@435 456 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge?
duke@435 457 // Force a block on the control-dependent edge
duke@435 458 Node *g = _goto->clone(); // Force it to end in a Goto
duke@435 459 g->set_req(0,prevproj);
duke@435 460 p->set_req(i,g);
duke@435 461 }
duke@435 462 }
duke@435 463 nstack.push(p, i); // 'p' is RegionNode or StartNode
duke@435 464 }
duke@435 465 } else { // Post-processing visited nodes
duke@435 466 nstack.pop(); // remove node from stack
duke@435 467 // Check if it the fist node pushed on stack at the beginning.
duke@435 468 if (idx == 0) break; // end of the build
duke@435 469 // Find predecessor basic block
adlertz@5509 470 Block *pb = get_block_for_node(x);
duke@435 471 // Insert into nodes array, if not already there
adlertz@5509 472 if (!has_block(proj)) {
duke@435 473 assert( x != proj, "" );
duke@435 474 // Map basic block of projection
adlertz@5509 475 map_node_to_block(proj, pb);
adlertz@5635 476 pb->push_node(proj);
duke@435 477 }
duke@435 478 // Insert self as a child of my predecessor block
adlertz@5509 479 pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
adlertz@5635 480 assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
duke@435 481 "too many control users, not a CFG?" );
duke@435 482 }
duke@435 483 }
duke@435 484 // Return number of basic blocks for all children and self
duke@435 485 return sum;
duke@435 486 }
duke@435 487
duke@435 488 // Inserts a goto & corresponding basic block between
duke@435 489 // block[block_no] and its succ_no'th successor block
duke@435 490 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
duke@435 491 // get block with block_no
adlertz@5539 492 assert(block_no < number_of_blocks(), "illegal block number");
adlertz@5539 493 Block* in = get_block(block_no);
duke@435 494 // get successor block succ_no
duke@435 495 assert(succ_no < in->_num_succs, "illegal successor number");
duke@435 496 Block* out = in->_succs[succ_no];
rasbold@743 497 // Compute frequency of the new block. Do this before inserting
rasbold@743 498 // new block in case succ_prob() needs to infer the probability from
rasbold@743 499 // surrounding blocks.
rasbold@743 500 float freq = in->_freq * in->succ_prob(succ_no);
duke@435 501 // get ProjNode corresponding to the succ_no'th successor of the in block
adlertz@5635 502 ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
duke@435 503 // create region for basic block
kvn@4115 504 RegionNode* region = new (C) RegionNode(2);
duke@435 505 region->init_req(1, proj);
duke@435 506 // setup corresponding basic block
adlertz@5509 507 Block* block = new (_block_arena) Block(_block_arena, region);
adlertz@5509 508 map_node_to_block(region, block);
duke@435 509 C->regalloc()->set_bad(region->_idx);
duke@435 510 // add a goto node
duke@435 511 Node* gto = _goto->clone(); // get a new goto node
duke@435 512 gto->set_req(0, region);
duke@435 513 // add it to the basic block
adlertz@5635 514 block->push_node(gto);
adlertz@5509 515 map_node_to_block(gto, block);
duke@435 516 C->regalloc()->set_bad(gto->_idx);
duke@435 517 // hook up successor block
duke@435 518 block->_succs.map(block->_num_succs++, out);
duke@435 519 // remap successor's predecessors if necessary
duke@435 520 for (uint i = 1; i < out->num_preds(); i++) {
duke@435 521 if (out->pred(i) == proj) out->head()->set_req(i, gto);
duke@435 522 }
duke@435 523 // remap predecessor's successor to new block
duke@435 524 in->_succs.map(succ_no, block);
rasbold@743 525 // Set the frequency of the new block
rasbold@743 526 block->_freq = freq;
duke@435 527 // add new basic block to basic block list
adlertz@5539 528 add_block_at(block_no + 1, block);
duke@435 529 }
duke@435 530
duke@435 531 // Does this block end in a multiway branch that cannot have the default case
duke@435 532 // flipped for another case?
goetz@6490 533 static bool no_flip_branch(Block *b) {
adlertz@5635 534 int branch_idx = b->number_of_nodes() - b->_num_succs-1;
goetz@6490 535 if (branch_idx < 1) {
goetz@6490 536 return false;
goetz@6490 537 }
goetz@6490 538 Node *branch = b->get_node(branch_idx);
goetz@6490 539 if (branch->is_Catch()) {
rasbold@853 540 return true;
goetz@6490 541 }
goetz@6490 542 if (branch->is_Mach()) {
goetz@6490 543 if (branch->is_MachNullCheck()) {
rasbold@853 544 return true;
goetz@6490 545 }
goetz@6490 546 int iop = branch->as_Mach()->ideal_Opcode();
goetz@6490 547 if (iop == Op_FastLock || iop == Op_FastUnlock) {
duke@435 548 return true;
goetz@6490 549 }
goetz@6490 550 // Don't flip if branch has an implicit check.
goetz@6490 551 if (branch->as_Mach()->is_TrapBasedCheckNode()) {
goetz@6490 552 return true;
goetz@6490 553 }
duke@435 554 }
duke@435 555 return false;
duke@435 556 }
duke@435 557
duke@435 558 // Check for NeverBranch at block end. This needs to become a GOTO to the
duke@435 559 // true target. NeverBranch are treated as a conditional branch that always
duke@435 560 // goes the same direction for most of the optimizer and are used to give a
duke@435 561 // fake exit path to infinite loops. At this late stage they need to turn
duke@435 562 // into Goto's so that when you enter the infinite loop you indeed hang.
duke@435 563 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
duke@435 564 // Find true target
duke@435 565 int end_idx = b->end_idx();
adlertz@5635 566 int idx = b->get_node(end_idx+1)->as_Proj()->_con;
duke@435 567 Block *succ = b->_succs[idx];
duke@435 568 Node* gto = _goto->clone(); // get a new goto node
duke@435 569 gto->set_req(0, b->head());
adlertz@5635 570 Node *bp = b->get_node(end_idx);
adlertz@5635 571 b->map_node(gto, end_idx); // Slam over NeverBranch
adlertz@5509 572 map_node_to_block(gto, b);
duke@435 573 C->regalloc()->set_bad(gto->_idx);
adlertz@5635 574 b->pop_node(); // Yank projections
adlertz@5635 575 b->pop_node(); // Yank projections
duke@435 576 b->_succs.map(0,succ); // Map only successor
duke@435 577 b->_num_succs = 1;
duke@435 578 // remap successor's predecessors if necessary
duke@435 579 uint j;
duke@435 580 for( j = 1; j < succ->num_preds(); j++)
duke@435 581 if( succ->pred(j)->in(0) == bp )
duke@435 582 succ->head()->set_req(j, gto);
duke@435 583 // Kill alternate exit path
duke@435 584 Block *dead = b->_succs[1-idx];
duke@435 585 for( j = 1; j < dead->num_preds(); j++)
duke@435 586 if( dead->pred(j)->in(0) == bp )
duke@435 587 break;
duke@435 588 // Scan through block, yanking dead path from
duke@435 589 // all regions and phis.
duke@435 590 dead->head()->del_req(j);
adlertz@5635 591 for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
adlertz@5635 592 dead->get_node(k)->del_req(j);
duke@435 593 }
duke@435 594
duke@435 595 // Helper function to move block bx to the slot following b_index. Return
duke@435 596 // true if the move is successful, otherwise false
rasbold@853 597 bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
duke@435 598 if (bx == NULL) return false;
duke@435 599
duke@435 600 // Return false if bx is already scheduled.
duke@435 601 uint bx_index = bx->_pre_order;
adlertz@5539 602 if ((bx_index <= b_index) && (get_block(bx_index) == bx)) {
duke@435 603 return false;
duke@435 604 }
duke@435 605
duke@435 606 // Find the current index of block bx on the block list
duke@435 607 bx_index = b_index + 1;
adlertz@5539 608 while (bx_index < number_of_blocks() && get_block(bx_index) != bx) {
adlertz@5539 609 bx_index++;
adlertz@5539 610 }
adlertz@5539 611 assert(get_block(bx_index) == bx, "block not found");
duke@435 612
duke@435 613 // If the previous block conditionally falls into bx, return false,
duke@435 614 // because moving bx will create an extra jump.
duke@435 615 for(uint k = 1; k < bx->num_preds(); k++ ) {
adlertz@5509 616 Block* pred = get_block_for_node(bx->pred(k));
adlertz@5539 617 if (pred == get_block(bx_index - 1)) {
duke@435 618 if (pred->_num_succs != 1) {
duke@435 619 return false;
duke@435 620 }
duke@435 621 }
duke@435 622 }
duke@435 623
duke@435 624 // Reinsert bx just past block 'b'
duke@435 625 _blocks.remove(bx_index);
duke@435 626 _blocks.insert(b_index + 1, bx);
duke@435 627 return true;
duke@435 628 }
duke@435 629
duke@435 630 // Move empty and uncommon blocks to the end.
rasbold@853 631 void PhaseCFG::move_to_end(Block *b, uint i) {
duke@435 632 int e = b->is_Empty();
duke@435 633 if (e != Block::not_empty) {
duke@435 634 if (e == Block::empty_with_goto) {
duke@435 635 // Remove the goto, but leave the block.
adlertz@5635 636 b->pop_node();
duke@435 637 }
duke@435 638 // Mark this block as a connector block, which will cause it to be
duke@435 639 // ignored in certain functions such as non_connector_successor().
duke@435 640 b->set_connector();
duke@435 641 }
duke@435 642 // Move the empty block to the end, and don't recheck.
duke@435 643 _blocks.remove(i);
duke@435 644 _blocks.push(b);
duke@435 645 }
duke@435 646
rasbold@853 647 // Set loop alignment for every block
rasbold@853 648 void PhaseCFG::set_loop_alignment() {
adlertz@5539 649 uint last = number_of_blocks();
adlertz@5539 650 assert(get_block(0) == get_root_block(), "");
rasbold@853 651
adlertz@5539 652 for (uint i = 1; i < last; i++) {
adlertz@5539 653 Block* block = get_block(i);
adlertz@5539 654 if (block->head()->is_Loop()) {
adlertz@5539 655 block->set_loop_alignment(block);
rasbold@853 656 }
rasbold@853 657 }
rasbold@853 658 }
rasbold@853 659
rasbold@853 660 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks
rasbold@853 661 // to the end.
adlertz@5539 662 void PhaseCFG::remove_empty_blocks() {
duke@435 663 // Move uncommon blocks to the end
adlertz@5539 664 uint last = number_of_blocks();
adlertz@5539 665 assert(get_block(0) == get_root_block(), "");
rasbold@853 666
rasbold@853 667 for (uint i = 1; i < last; i++) {
adlertz@5539 668 Block* block = get_block(i);
adlertz@5539 669 if (block->is_connector()) {
adlertz@5539 670 break;
adlertz@5539 671 }
duke@435 672
duke@435 673 // Check for NeverBranch at block end. This needs to become a GOTO to the
duke@435 674 // true target. NeverBranch are treated as a conditional branch that
duke@435 675 // always goes the same direction for most of the optimizer and are used
duke@435 676 // to give a fake exit path to infinite loops. At this late stage they
duke@435 677 // need to turn into Goto's so that when you enter the infinite loop you
duke@435 678 // indeed hang.
adlertz@5635 679 if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
adlertz@5539 680 convert_NeverBranch_to_Goto(block);
adlertz@5539 681 }
duke@435 682
duke@435 683 // Look for uncommon blocks and move to end.
rasbold@853 684 if (!C->do_freq_based_layout()) {
adlertz@5639 685 if (is_uncommon(block)) {
adlertz@5539 686 move_to_end(block, i);
rasbold@853 687 last--; // No longer check for being uncommon!
adlertz@5539 688 if (no_flip_branch(block)) { // Fall-thru case must follow?
adlertz@5539 689 // Find the fall-thru block
adlertz@5539 690 block = get_block(i);
adlertz@5539 691 move_to_end(block, i);
rasbold@853 692 last--;
rasbold@853 693 }
adlertz@5539 694 // backup block counter post-increment
adlertz@5539 695 i--;
duke@435 696 }
duke@435 697 }
duke@435 698 }
duke@435 699
rasbold@853 700 // Move empty blocks to the end
adlertz@5539 701 last = number_of_blocks();
rasbold@853 702 for (uint i = 1; i < last; i++) {
adlertz@5539 703 Block* block = get_block(i);
adlertz@5539 704 if (block->is_Empty() != Block::not_empty) {
adlertz@5539 705 move_to_end(block, i);
rasbold@853 706 last--;
rasbold@853 707 i--;
duke@435 708 }
duke@435 709 } // End of for all blocks
rasbold@853 710 }
duke@435 711
goetz@6490 712 Block *PhaseCFG::fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext) {
goetz@6490 713 // Trap based checks must fall through to the successor with
goetz@6490 714 // PROB_ALWAYS.
goetz@6490 715 // They should be an If with 2 successors.
goetz@6490 716 assert(branch->is_MachIf(), "must be If");
goetz@6490 717 assert(block->_num_succs == 2, "must have 2 successors");
goetz@6490 718
goetz@6490 719 // Get the If node and the projection for the first successor.
goetz@6490 720 MachIfNode *iff = block->get_node(block->number_of_nodes()-3)->as_MachIf();
goetz@6490 721 ProjNode *proj0 = block->get_node(block->number_of_nodes()-2)->as_Proj();
goetz@6490 722 ProjNode *proj1 = block->get_node(block->number_of_nodes()-1)->as_Proj();
goetz@6490 723 ProjNode *projt = (proj0->Opcode() == Op_IfTrue) ? proj0 : proj1;
goetz@6490 724 ProjNode *projf = (proj0->Opcode() == Op_IfFalse) ? proj0 : proj1;
goetz@6490 725
goetz@6490 726 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
goetz@6490 727 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
goetz@6490 728 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1");
goetz@6490 729
goetz@6490 730 ProjNode *proj_always;
goetz@6490 731 ProjNode *proj_never;
goetz@6490 732 // We must negate the branch if the implicit check doesn't follow
goetz@6490 733 // the branch's TRUE path. Then, the new TRUE branch target will
goetz@6490 734 // be the old FALSE branch target.
goetz@6490 735 if (iff->_prob <= 2*PROB_NEVER) { // There are small rounding errors.
goetz@6490 736 proj_never = projt;
goetz@6490 737 proj_always = projf;
goetz@6490 738 } else {
goetz@6490 739 // We must negate the branch if the trap doesn't follow the
goetz@6490 740 // branch's TRUE path. Then, the new TRUE branch target will
goetz@6490 741 // be the old FALSE branch target.
goetz@6490 742 proj_never = projf;
goetz@6490 743 proj_always = projt;
goetz@6490 744 iff->negate();
goetz@6490 745 }
goetz@6490 746 assert(iff->_prob <= 2*PROB_NEVER, "Trap based checks are expected to trap never!");
goetz@6490 747 // Map the successors properly
goetz@6490 748 block->_succs.map(0, get_block_for_node(proj_never ->raw_out(0))); // The target of the trap.
goetz@6490 749 block->_succs.map(1, get_block_for_node(proj_always->raw_out(0))); // The fall through target.
goetz@6490 750
goetz@6500 751 if (block->get_node(block->number_of_nodes() - block->_num_succs + 1) != proj_always) {
goetz@6500 752 block->map_node(proj_never, block->number_of_nodes() - block->_num_succs + 0);
goetz@6500 753 block->map_node(proj_always, block->number_of_nodes() - block->_num_succs + 1);
goetz@6500 754 }
goetz@6500 755
goetz@6490 756 // Place the fall through block after this block.
goetz@6490 757 Block *bs1 = block->non_connector_successor(1);
goetz@6490 758 if (bs1 != bnext && move_to_next(bs1, block_pos)) {
goetz@6490 759 bnext = bs1;
goetz@6490 760 }
goetz@6490 761 // If the fall through block still is not the next block, insert a goto.
goetz@6490 762 if (bs1 != bnext) {
goetz@6490 763 insert_goto_at(block_pos, 1);
goetz@6490 764 }
goetz@6490 765 return bnext;
goetz@6490 766 }
goetz@6490 767
rasbold@853 768 // Fix up the final control flow for basic blocks.
rasbold@853 769 void PhaseCFG::fixup_flow() {
duke@435 770 // Fixup final control flow for the blocks. Remove jump-to-next
goetz@6478 771 // block. If neither arm of an IF follows the conditional branch, we
duke@435 772 // have to add a second jump after the conditional. We place the
duke@435 773 // TRUE branch target in succs[0] for both GOTOs and IFs.
adlertz@5539 774 for (uint i = 0; i < number_of_blocks(); i++) {
adlertz@5539 775 Block* block = get_block(i);
adlertz@5539 776 block->_pre_order = i; // turn pre-order into block-index
duke@435 777
duke@435 778 // Connector blocks need no further processing.
adlertz@5539 779 if (block->is_connector()) {
adlertz@5539 780 assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end");
duke@435 781 continue;
duke@435 782 }
adlertz@5539 783 assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors");
duke@435 784
adlertz@5539 785 Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL;
adlertz@5539 786 Block* bs0 = block->non_connector_successor(0);
duke@435 787
duke@435 788 // Check for multi-way branches where I cannot negate the test to
duke@435 789 // exchange the true and false targets.
adlertz@5539 790 if (no_flip_branch(block)) {
goetz@6490 791 // Find fall through case - if must fall into its target.
goetz@6490 792 // Get the index of the branch's first successor.
adlertz@5635 793 int branch_idx = block->number_of_nodes() - block->_num_succs;
goetz@6490 794
goetz@6490 795 // The branch is 1 before the branch's first successor.
goetz@6490 796 Node *branch = block->get_node(branch_idx-1);
goetz@6490 797
goetz@6490 798 // Handle no-flip branches which have implicit checks and which require
goetz@6490 799 // special block ordering and individual semantics of the 'fall through
goetz@6490 800 // case'.
goetz@6490 801 if ((TrapBasedNullChecks || TrapBasedRangeChecks) &&
goetz@6490 802 branch->is_Mach() && branch->as_Mach()->is_TrapBasedCheckNode()) {
goetz@6490 803 bnext = fixup_trap_based_check(branch, block, i, bnext);
goetz@6490 804 } else {
goetz@6490 805 // Else, default handling for no-flip branches
goetz@6490 806 for (uint j2 = 0; j2 < block->_num_succs; j2++) {
goetz@6490 807 const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
goetz@6490 808 if (p->_con == 0) {
goetz@6490 809 // successor j2 is fall through case
goetz@6490 810 if (block->non_connector_successor(j2) != bnext) {
goetz@6490 811 // but it is not the next block => insert a goto
goetz@6490 812 insert_goto_at(i, j2);
goetz@6490 813 }
goetz@6490 814 // Put taken branch in slot 0
goetz@6490 815 if (j2 == 0 && block->_num_succs == 2) {
goetz@6490 816 // Flip targets in succs map
goetz@6490 817 Block *tbs0 = block->_succs[0];
goetz@6490 818 Block *tbs1 = block->_succs[1];
goetz@6490 819 block->_succs.map(0, tbs1);
goetz@6490 820 block->_succs.map(1, tbs0);
goetz@6490 821 }
goetz@6490 822 break;
duke@435 823 }
duke@435 824 }
duke@435 825 }
adlertz@5539 826
duke@435 827 // Remove all CatchProjs
adlertz@5539 828 for (uint j = 0; j < block->_num_succs; j++) {
adlertz@5635 829 block->pop_node();
adlertz@5539 830 }
duke@435 831
adlertz@5539 832 } else if (block->_num_succs == 1) {
duke@435 833 // Block ends in a Goto?
duke@435 834 if (bnext == bs0) {
duke@435 835 // We fall into next block; remove the Goto
adlertz@5635 836 block->pop_node();
duke@435 837 }
duke@435 838
adlertz@5539 839 } else if(block->_num_succs == 2) { // Block ends in a If?
duke@435 840 // Get opcode of 1st projection (matches _succs[0])
duke@435 841 // Note: Since this basic block has 2 exits, the last 2 nodes must
duke@435 842 // be projections (in any order), the 3rd last node must be
duke@435 843 // the IfNode (we have excluded other 2-way exits such as
duke@435 844 // CatchNodes already).
adlertz@5635 845 MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach();
adlertz@5635 846 ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
adlertz@5635 847 ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
duke@435 848
duke@435 849 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
adlertz@5539 850 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
adlertz@5539 851 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1");
duke@435 852
adlertz@5539 853 Block* bs1 = block->non_connector_successor(1);
duke@435 854
duke@435 855 // Check for neither successor block following the current
duke@435 856 // block ending in a conditional. If so, move one of the
duke@435 857 // successors after the current one, provided that the
duke@435 858 // successor was previously unscheduled, but moveable
duke@435 859 // (i.e., all paths to it involve a branch).
adlertz@5539 860 if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) {
duke@435 861 // Choose the more common successor based on the probability
duke@435 862 // of the conditional branch.
adlertz@5539 863 Block* bx = bs0;
adlertz@5539 864 Block* by = bs1;
duke@435 865
duke@435 866 // _prob is the probability of taking the true path. Make
duke@435 867 // p the probability of taking successor #1.
duke@435 868 float p = iff->as_MachIf()->_prob;
adlertz@5539 869 if (proj0->Opcode() == Op_IfTrue) {
duke@435 870 p = 1.0 - p;
duke@435 871 }
duke@435 872
duke@435 873 // Prefer successor #1 if p > 0.5
duke@435 874 if (p > PROB_FAIR) {
duke@435 875 bx = bs1;
duke@435 876 by = bs0;
duke@435 877 }
duke@435 878
duke@435 879 // Attempt the more common successor first
rasbold@853 880 if (move_to_next(bx, i)) {
duke@435 881 bnext = bx;
rasbold@853 882 } else if (move_to_next(by, i)) {
duke@435 883 bnext = by;
duke@435 884 }
duke@435 885 }
duke@435 886
duke@435 887 // Check for conditional branching the wrong way. Negate
duke@435 888 // conditional, if needed, so it falls into the following block
duke@435 889 // and branches to the not-following block.
duke@435 890
duke@435 891 // Check for the next block being in succs[0]. We are going to branch
duke@435 892 // to succs[0], so we want the fall-thru case as the next block in
duke@435 893 // succs[1].
duke@435 894 if (bnext == bs0) {
duke@435 895 // Fall-thru case in succs[0], so flip targets in succs map
adlertz@5539 896 Block* tbs0 = block->_succs[0];
adlertz@5539 897 Block* tbs1 = block->_succs[1];
adlertz@5539 898 block->_succs.map(0, tbs1);
adlertz@5539 899 block->_succs.map(1, tbs0);
duke@435 900 // Flip projection for each target
adlertz@5539 901 ProjNode* tmp = proj0;
adlertz@5539 902 proj0 = proj1;
adlertz@5539 903 proj1 = tmp;
duke@435 904
adlertz@5539 905 } else if(bnext != bs1) {
rasbold@853 906 // Need a double-branch
duke@435 907 // The existing conditional branch need not change.
duke@435 908 // Add a unconditional branch to the false target.
duke@435 909 // Alas, it must appear in its own block and adding a
duke@435 910 // block this late in the game is complicated. Sigh.
duke@435 911 insert_goto_at(i, 1);
duke@435 912 }
duke@435 913
duke@435 914 // Make sure we TRUE branch to the target
adlertz@5539 915 if (proj0->Opcode() == Op_IfFalse) {
kvn@3051 916 iff->as_MachIf()->negate();
rasbold@853 917 }
duke@435 918
adlertz@5635 919 block->pop_node(); // Remove IfFalse & IfTrue projections
adlertz@5635 920 block->pop_node();
duke@435 921
duke@435 922 } else {
duke@435 923 // Multi-exit block, e.g. a switch statement
duke@435 924 // But we don't need to do anything here
duke@435 925 }
duke@435 926 } // End of for all blocks
duke@435 927 }
duke@435 928
duke@435 929
goetz@6478 930 // postalloc_expand: Expand nodes after register allocation.
goetz@6478 931 //
goetz@6478 932 // postalloc_expand has to be called after register allocation, just
goetz@6478 933 // before output (i.e. scheduling). It only gets called if
goetz@6478 934 // Matcher::require_postalloc_expand is true.
goetz@6478 935 //
goetz@6478 936 // Background:
goetz@6478 937 //
goetz@6478 938 // Nodes that are expandend (one compound node requiring several
goetz@6478 939 // assembler instructions to be implemented split into two or more
goetz@6478 940 // non-compound nodes) after register allocation are not as nice as
goetz@6478 941 // the ones expanded before register allocation - they don't
goetz@6478 942 // participate in optimizations as global code motion. But after
goetz@6478 943 // register allocation we can expand nodes that use registers which
goetz@6478 944 // are not spillable or registers that are not allocated, because the
goetz@6478 945 // old compound node is simply replaced (in its location in the basic
goetz@6478 946 // block) by a new subgraph which does not contain compound nodes any
goetz@6478 947 // more. The scheduler called during output can later on process these
goetz@6478 948 // non-compound nodes.
goetz@6478 949 //
goetz@6478 950 // Implementation:
goetz@6478 951 //
goetz@6478 952 // Nodes requiring postalloc expand are specified in the ad file by using
goetz@6478 953 // a postalloc_expand statement instead of ins_encode. A postalloc_expand
goetz@6478 954 // contains a single call to an encoding, as does an ins_encode
goetz@6478 955 // statement. Instead of an emit() function a postalloc_expand() function
goetz@6478 956 // is generated that doesn't emit assembler but creates a new
goetz@6478 957 // subgraph. The code below calls this postalloc_expand function for each
goetz@6478 958 // node with the appropriate attribute. This function returns the new
goetz@6478 959 // nodes generated in an array passed in the call. The old node,
goetz@6478 960 // potential MachTemps before and potential Projs after it then get
goetz@6478 961 // disconnected and replaced by the new nodes. The instruction
goetz@6478 962 // generating the result has to be the last one in the array. In
goetz@6478 963 // general it is assumed that Projs after the node expanded are
goetz@6478 964 // kills. These kills are not required any more after expanding as
goetz@6478 965 // there are now explicitly visible def-use chains and the Projs are
goetz@6478 966 // removed. This does not hold for calls: They do not only have
goetz@6478 967 // kill-Projs but also Projs defining values. Therefore Projs after
goetz@6478 968 // the node expanded are removed for all but for calls. If a node is
goetz@6478 969 // to be reused, it must be added to the nodes list returned, and it
goetz@6478 970 // will be added again.
goetz@6478 971 //
goetz@6478 972 // Implementing the postalloc_expand function for a node in an enc_class
goetz@6478 973 // is rather tedious. It requires knowledge about many node details, as
goetz@6478 974 // the nodes and the subgraph must be hand crafted. To simplify this,
goetz@6478 975 // adlc generates some utility variables into the postalloc_expand function,
goetz@6478 976 // e.g., holding the operands as specified by the postalloc_expand encoding
goetz@6478 977 // specification, e.g.:
goetz@6478 978 // * unsigned idx_<par_name> holding the index of the node in the ins
goetz@6478 979 // * Node *n_<par_name> holding the node loaded from the ins
goetz@6478 980 // * MachOpnd *op_<par_name> holding the corresponding operand
goetz@6478 981 //
goetz@6478 982 // The ordering of operands can not be determined by looking at a
goetz@6478 983 // rule. Especially if a match rule matches several different trees,
goetz@6478 984 // several nodes are generated from one instruct specification with
goetz@6478 985 // different operand orderings. In this case the adlc generated
goetz@6478 986 // variables are the only way to access the ins and operands
goetz@6478 987 // deterministically.
goetz@6478 988 //
goetz@6478 989 // If assigning a register to a node that contains an oop, don't
goetz@6478 990 // forget to call ra_->set_oop() for the node.
goetz@6478 991 void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) {
goetz@6478 992 GrowableArray <Node *> new_nodes(32); // Array with new nodes filled by postalloc_expand function of node.
goetz@6478 993 GrowableArray <Node *> remove(32);
goetz@6478 994 GrowableArray <Node *> succs(32);
goetz@6478 995 unsigned int max_idx = C->unique(); // Remember to distinguish new from old nodes.
goetz@6478 996 DEBUG_ONLY(bool foundNode = false);
goetz@6478 997
goetz@6478 998 // for all blocks
goetz@6478 999 for (uint i = 0; i < number_of_blocks(); i++) {
goetz@6478 1000 Block *b = _blocks[i];
goetz@6478 1001 // For all instructions in the current block.
goetz@6478 1002 for (uint j = 0; j < b->number_of_nodes(); j++) {
goetz@6478 1003 Node *n = b->get_node(j);
goetz@6478 1004 if (n->is_Mach() && n->as_Mach()->requires_postalloc_expand()) {
goetz@6478 1005 #ifdef ASSERT
goetz@6478 1006 if (TracePostallocExpand) {
goetz@6478 1007 if (!foundNode) {
goetz@6478 1008 foundNode = true;
goetz@6478 1009 tty->print("POSTALLOC EXPANDING %d %s\n", C->compile_id(),
goetz@6478 1010 C->method() ? C->method()->name()->as_utf8() : C->stub_name());
goetz@6478 1011 }
goetz@6478 1012 tty->print(" postalloc expanding "); n->dump();
goetz@6478 1013 if (Verbose) {
goetz@6478 1014 tty->print(" with ins:\n");
goetz@6478 1015 for (uint k = 0; k < n->len(); ++k) {
goetz@6478 1016 if (n->in(k)) { tty->print(" "); n->in(k)->dump(); }
goetz@6478 1017 }
goetz@6478 1018 }
goetz@6478 1019 }
goetz@6478 1020 #endif
goetz@6478 1021 new_nodes.clear();
goetz@6478 1022 // Collect nodes that have to be removed from the block later on.
goetz@6478 1023 uint req = n->req();
goetz@6478 1024 remove.clear();
goetz@6478 1025 for (uint k = 0; k < req; ++k) {
goetz@6478 1026 if (n->in(k) && n->in(k)->is_MachTemp()) {
goetz@6478 1027 remove.push(n->in(k)); // MachTemps which are inputs to the old node have to be removed.
goetz@6478 1028 n->in(k)->del_req(0);
goetz@6478 1029 j--;
goetz@6478 1030 }
goetz@6478 1031 }
goetz@6478 1032
goetz@6478 1033 // Check whether we can allocate enough nodes. We set a fix limit for
goetz@6478 1034 // the size of postalloc expands with this.
goetz@6478 1035 uint unique_limit = C->unique() + 40;
goetz@6478 1036 if (unique_limit >= _ra->node_regs_max_index()) {
goetz@6478 1037 Compile::current()->record_failure("out of nodes in postalloc expand");
goetz@6478 1038 return;
goetz@6478 1039 }
goetz@6478 1040
goetz@6478 1041 // Emit (i.e. generate new nodes).
goetz@6478 1042 n->as_Mach()->postalloc_expand(&new_nodes, _ra);
goetz@6478 1043
goetz@6478 1044 assert(C->unique() < unique_limit, "You allocated too many nodes in your postalloc expand.");
goetz@6478 1045
goetz@6478 1046 // Disconnect the inputs of the old node.
goetz@6478 1047 //
goetz@6478 1048 // We reuse MachSpillCopy nodes. If we need to expand them, there
goetz@6478 1049 // are many, so reusing pays off. If reused, the node already
goetz@6478 1050 // has the new ins. n must be the last node on new_nodes list.
goetz@6478 1051 if (!n->is_MachSpillCopy()) {
goetz@6478 1052 for (int k = req - 1; k >= 0; --k) {
goetz@6478 1053 n->del_req(k);
goetz@6478 1054 }
goetz@6478 1055 }
goetz@6478 1056
goetz@6478 1057 #ifdef ASSERT
goetz@6478 1058 // Check that all nodes have proper operands.
goetz@6478 1059 for (int k = 0; k < new_nodes.length(); ++k) {
goetz@6478 1060 if (new_nodes.at(k)->_idx < max_idx || !new_nodes.at(k)->is_Mach()) continue; // old node, Proj ...
goetz@6478 1061 MachNode *m = new_nodes.at(k)->as_Mach();
goetz@6478 1062 for (unsigned int l = 0; l < m->num_opnds(); ++l) {
goetz@6478 1063 if (MachOper::notAnOper(m->_opnds[l])) {
goetz@6478 1064 outputStream *os = tty;
goetz@6478 1065 os->print("Node %s ", m->Name());
goetz@6478 1066 os->print("has invalid opnd %d: %p\n", l, m->_opnds[l]);
goetz@6478 1067 assert(0, "Invalid operands, see inline trace in hs_err_pid file.");
goetz@6478 1068 }
goetz@6478 1069 }
goetz@6478 1070 }
goetz@6478 1071 #endif
goetz@6478 1072
goetz@6478 1073 // Collect succs of old node in remove (for projections) and in succs (for
goetz@6478 1074 // all other nodes) do _not_ collect projections in remove (but in succs)
goetz@6478 1075 // in case the node is a call. We need the projections for calls as they are
goetz@6478 1076 // associated with registes (i.e. they are defs).
goetz@6478 1077 succs.clear();
goetz@6478 1078 for (DUIterator k = n->outs(); n->has_out(k); k++) {
goetz@6478 1079 if (n->out(k)->is_Proj() && !n->is_MachCall() && !n->is_MachBranch()) {
goetz@6478 1080 remove.push(n->out(k));
goetz@6478 1081 } else {
goetz@6478 1082 succs.push(n->out(k));
goetz@6478 1083 }
goetz@6478 1084 }
goetz@6478 1085 // Replace old node n as input of its succs by last of the new nodes.
goetz@6478 1086 for (int k = 0; k < succs.length(); ++k) {
goetz@6478 1087 Node *succ = succs.at(k);
goetz@6478 1088 for (uint l = 0; l < succ->req(); ++l) {
goetz@6478 1089 if (succ->in(l) == n) {
goetz@6478 1090 succ->set_req(l, new_nodes.at(new_nodes.length() - 1));
goetz@6478 1091 }
goetz@6478 1092 }
goetz@6478 1093 for (uint l = succ->req(); l < succ->len(); ++l) {
goetz@6478 1094 if (succ->in(l) == n) {
goetz@6478 1095 succ->set_prec(l, new_nodes.at(new_nodes.length() - 1));
goetz@6478 1096 }
goetz@6478 1097 }
goetz@6478 1098 }
goetz@6478 1099
goetz@6478 1100 // Index of old node in block.
goetz@6478 1101 uint index = b->find_node(n);
goetz@6478 1102 // Insert new nodes into block and map them in nodes->blocks array
goetz@6478 1103 // and remember last node in n2.
goetz@6478 1104 Node *n2 = NULL;
goetz@6478 1105 for (int k = 0; k < new_nodes.length(); ++k) {
goetz@6478 1106 n2 = new_nodes.at(k);
goetz@6478 1107 b->insert_node(n2, ++index);
goetz@6478 1108 map_node_to_block(n2, b);
goetz@6478 1109 }
goetz@6478 1110
goetz@6478 1111 // Add old node n to remove and remove them all from block.
goetz@6478 1112 remove.push(n);
goetz@6478 1113 j--;
goetz@6478 1114 #ifdef ASSERT
goetz@6478 1115 if (TracePostallocExpand && Verbose) {
goetz@6478 1116 tty->print(" removing:\n");
goetz@6478 1117 for (int k = 0; k < remove.length(); ++k) {
goetz@6478 1118 tty->print(" "); remove.at(k)->dump();
goetz@6478 1119 }
goetz@6478 1120 tty->print(" inserting:\n");
goetz@6478 1121 for (int k = 0; k < new_nodes.length(); ++k) {
goetz@6478 1122 tty->print(" "); new_nodes.at(k)->dump();
goetz@6478 1123 }
goetz@6478 1124 }
goetz@6478 1125 #endif
goetz@6478 1126 for (int k = 0; k < remove.length(); ++k) {
goetz@6478 1127 if (b->contains(remove.at(k))) {
goetz@6478 1128 b->find_remove(remove.at(k));
goetz@6478 1129 } else {
goetz@6478 1130 assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), "");
goetz@6478 1131 }
goetz@6478 1132 }
goetz@6478 1133 // If anything has been inserted (n2 != NULL), continue after last node inserted.
goetz@6478 1134 // This does not always work. Some postalloc expands don't insert any nodes, if they
goetz@6478 1135 // do optimizations (e.g., max(x,x)). In this case we decrement j accordingly.
goetz@6478 1136 j = n2 ? b->find_node(n2) : j;
goetz@6478 1137 }
goetz@6478 1138 }
goetz@6478 1139 }
goetz@6478 1140
goetz@6478 1141 #ifdef ASSERT
goetz@6478 1142 if (foundNode) {
goetz@6478 1143 tty->print("FINISHED %d %s\n", C->compile_id(),
goetz@6478 1144 C->method() ? C->method()->name()->as_utf8() : C->stub_name());
goetz@6478 1145 tty->flush();
goetz@6478 1146 }
goetz@6478 1147 #endif
goetz@6478 1148 }
goetz@6478 1149
goetz@6478 1150
goetz@6478 1151 //------------------------------dump-------------------------------------------
duke@435 1152 #ifndef PRODUCT
duke@435 1153 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
duke@435 1154 const Node *x = end->is_block_proj();
duke@435 1155 assert( x, "not a CFG" );
duke@435 1156
duke@435 1157 // Do not visit this block again
duke@435 1158 if( visited.test_set(x->_idx) ) return;
duke@435 1159
duke@435 1160 // Skip through this block
duke@435 1161 const Node *p = x;
duke@435 1162 do {
duke@435 1163 p = p->in(0); // Move control forward
duke@435 1164 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
duke@435 1165 } while( !p->is_block_start() );
duke@435 1166
duke@435 1167 // Recursively visit
adlertz@5509 1168 for (uint i = 1; i < p->req(); i++) {
adlertz@5509 1169 _dump_cfg(p->in(i), visited);
adlertz@5509 1170 }
duke@435 1171
duke@435 1172 // Dump the block
adlertz@5509 1173 get_block_for_node(p)->dump(this);
duke@435 1174 }
duke@435 1175
duke@435 1176 void PhaseCFG::dump( ) const {
adlertz@5539 1177 tty->print("\n--- CFG --- %d BBs\n", number_of_blocks());
adlertz@5509 1178 if (_blocks.size()) { // Did we do basic-block layout?
adlertz@5539 1179 for (uint i = 0; i < number_of_blocks(); i++) {
adlertz@5539 1180 const Block* block = get_block(i);
adlertz@5539 1181 block->dump(this);
adlertz@5509 1182 }
duke@435 1183 } else { // Else do it with a DFS
adlertz@5509 1184 VectorSet visited(_block_arena);
duke@435 1185 _dump_cfg(_root,visited);
duke@435 1186 }
duke@435 1187 }
duke@435 1188
duke@435 1189 void PhaseCFG::dump_headers() {
adlertz@5539 1190 for (uint i = 0; i < number_of_blocks(); i++) {
adlertz@5539 1191 Block* block = get_block(i);
adlertz@5539 1192 if (block != NULL) {
adlertz@5539 1193 block->dump_head(this);
adlertz@5509 1194 }
duke@435 1195 }
duke@435 1196 }
duke@435 1197
adlertz@5539 1198 void PhaseCFG::verify() const {
kvn@1001 1199 #ifdef ASSERT
duke@435 1200 // Verify sane CFG
adlertz@5539 1201 for (uint i = 0; i < number_of_blocks(); i++) {
adlertz@5539 1202 Block* block = get_block(i);
adlertz@5635 1203 uint cnt = block->number_of_nodes();
duke@435 1204 uint j;
kvn@3311 1205 for (j = 0; j < cnt; j++) {
adlertz@5635 1206 Node *n = block->get_node(j);
adlertz@5539 1207 assert(get_block_for_node(n) == block, "");
adlertz@5539 1208 if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
adlertz@5635 1209 assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
duke@435 1210 }
kvn@3311 1211 for (uint k = 0; k < n->req(); k++) {
kvn@1001 1212 Node *def = n->in(k);
kvn@3311 1213 if (def && def != n) {
adlertz@5509 1214 assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
kvn@1001 1215 // Verify that instructions in the block is in correct order.
kvn@1001 1216 // Uses must follow their definition if they are at the same block.
kvn@1001 1217 // Mostly done to check that MachSpillCopy nodes are placed correctly
kvn@1001 1218 // when CreateEx node is moved in build_ifg_physical().
adlertz@5539 1219 if (get_block_for_node(def) == block && !(block->head()->is_Loop() && n->is_Phi()) &&
kvn@1001 1220 // See (+++) comment in reg_split.cpp
kvn@3311 1221 !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
kvn@1328 1222 bool is_loop = false;
kvn@1328 1223 if (n->is_Phi()) {
kvn@3311 1224 for (uint l = 1; l < def->req(); l++) {
kvn@1328 1225 if (n == def->in(l)) {
kvn@1328 1226 is_loop = true;
kvn@1328 1227 break; // Some kind of loop
kvn@1328 1228 }
kvn@1328 1229 }
kvn@1328 1230 }
adlertz@5539 1231 assert(is_loop || block->find_node(def) < j, "uses must follow definitions");
kvn@1036 1232 }
duke@435 1233 }
duke@435 1234 }
duke@435 1235 }
duke@435 1236
adlertz@5539 1237 j = block->end_idx();
adlertz@5635 1238 Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
adlertz@5539 1239 assert(bp, "last instruction must be a block proj");
adlertz@5635 1240 assert(bp == block->get_node(j), "wrong number of successors for this block");
kvn@3311 1241 if (bp->is_Catch()) {
adlertz@5635 1242 while (block->get_node(--j)->is_MachProj()) {
adlertz@5539 1243 ;
adlertz@5539 1244 }
adlertz@5635 1245 assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
kvn@3311 1246 } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
adlertz@5539 1247 assert(block->_num_succs == 2, "Conditional branch must have two targets");
duke@435 1248 }
duke@435 1249 }
kvn@1001 1250 #endif
duke@435 1251 }
duke@435 1252 #endif
duke@435 1253
duke@435 1254 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) {
duke@435 1255 Copy::zero_to_bytes( _indices, sizeof(uint)*max );
duke@435 1256 }
duke@435 1257
duke@435 1258 void UnionFind::extend( uint from_idx, uint to_idx ) {
duke@435 1259 _nesting.check();
duke@435 1260 if( from_idx >= _max ) {
duke@435 1261 uint size = 16;
duke@435 1262 while( size <= from_idx ) size <<=1;
duke@435 1263 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size );
duke@435 1264 _max = size;
duke@435 1265 }
duke@435 1266 while( _cnt <= from_idx ) _indices[_cnt++] = 0;
duke@435 1267 _indices[from_idx] = to_idx;
duke@435 1268 }
duke@435 1269
duke@435 1270 void UnionFind::reset( uint max ) {
duke@435 1271 assert( max <= max_uint, "Must fit within uint" );
duke@435 1272 // Force the Union-Find mapping to be at least this large
duke@435 1273 extend(max,0);
duke@435 1274 // Initialize to be the ID mapping.
rasbold@853 1275 for( uint i=0; i<max; i++ ) map(i,i);
duke@435 1276 }
duke@435 1277
duke@435 1278 // Straight out of Tarjan's union-find algorithm
duke@435 1279 uint UnionFind::Find_compress( uint idx ) {
duke@435 1280 uint cur = idx;
duke@435 1281 uint next = lookup(cur);
duke@435 1282 while( next != cur ) { // Scan chain of equivalences
duke@435 1283 assert( next < cur, "always union smaller" );
duke@435 1284 cur = next; // until find a fixed-point
duke@435 1285 next = lookup(cur);
duke@435 1286 }
duke@435 1287 // Core of union-find algorithm: update chain of
duke@435 1288 // equivalences to be equal to the root.
duke@435 1289 while( idx != next ) {
duke@435 1290 uint tmp = lookup(idx);
duke@435 1291 map(idx, next);
duke@435 1292 idx = tmp;
duke@435 1293 }
duke@435 1294 return idx;
duke@435 1295 }
duke@435 1296
duke@435 1297 // Like Find above, but no path compress, so bad asymptotic behavior
duke@435 1298 uint UnionFind::Find_const( uint idx ) const {
duke@435 1299 if( idx == 0 ) return idx; // Ignore the zero idx
duke@435 1300 // Off the end? This can happen during debugging dumps
duke@435 1301 // when data structures have not finished being updated.
duke@435 1302 if( idx >= _max ) return idx;
duke@435 1303 uint next = lookup(idx);
duke@435 1304 while( next != idx ) { // Scan chain of equivalences
duke@435 1305 idx = next; // until find a fixed-point
duke@435 1306 next = lookup(idx);
duke@435 1307 }
duke@435 1308 return next;
duke@435 1309 }
duke@435 1310
duke@435 1311 // union 2 sets together.
duke@435 1312 void UnionFind::Union( uint idx1, uint idx2 ) {
duke@435 1313 uint src = Find(idx1);
duke@435 1314 uint dst = Find(idx2);
duke@435 1315 assert( src, "" );
duke@435 1316 assert( dst, "" );
duke@435 1317 assert( src < _max, "oob" );
duke@435 1318 assert( dst < _max, "oob" );
duke@435 1319 assert( src < dst, "always union smaller" );
duke@435 1320 map(dst,src);
duke@435 1321 }
rasbold@853 1322
rasbold@853 1323 #ifndef PRODUCT
rasbold@853 1324 void Trace::dump( ) const {
rasbold@853 1325 tty->print_cr("Trace (freq %f)", first_block()->_freq);
rasbold@853 1326 for (Block *b = first_block(); b != NULL; b = next(b)) {
rasbold@853 1327 tty->print(" B%d", b->_pre_order);
rasbold@853 1328 if (b->head()->is_Loop()) {
rasbold@853 1329 tty->print(" (L%d)", b->compute_loop_alignment());
rasbold@853 1330 }
rasbold@853 1331 if (b->has_loop_alignment()) {
rasbold@853 1332 tty->print(" (T%d)", b->code_alignment());
rasbold@853 1333 }
rasbold@853 1334 }
rasbold@853 1335 tty->cr();
rasbold@853 1336 }
rasbold@853 1337
rasbold@853 1338 void CFGEdge::dump( ) const {
rasbold@853 1339 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ",
rasbold@853 1340 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct);
rasbold@853 1341 switch(state()) {
rasbold@853 1342 case connected:
rasbold@853 1343 tty->print("connected");
rasbold@853 1344 break;
rasbold@853 1345 case open:
rasbold@853 1346 tty->print("open");
rasbold@853 1347 break;
rasbold@853 1348 case interior:
rasbold@853 1349 tty->print("interior");
rasbold@853 1350 break;
rasbold@853 1351 }
rasbold@853 1352 if (infrequent()) {
rasbold@853 1353 tty->print(" infrequent");
rasbold@853 1354 }
rasbold@853 1355 tty->cr();
rasbold@853 1356 }
rasbold@853 1357 #endif
rasbold@853 1358
rasbold@853 1359 // Comparison function for edges
rasbold@853 1360 static int edge_order(CFGEdge **e0, CFGEdge **e1) {
rasbold@853 1361 float freq0 = (*e0)->freq();
rasbold@853 1362 float freq1 = (*e1)->freq();
rasbold@853 1363 if (freq0 != freq1) {
rasbold@853 1364 return freq0 > freq1 ? -1 : 1;
rasbold@853 1365 }
rasbold@853 1366
rasbold@853 1367 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo;
rasbold@853 1368 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo;
rasbold@853 1369
rasbold@853 1370 return dist1 - dist0;
rasbold@853 1371 }
rasbold@853 1372
rasbold@853 1373 // Comparison function for edges
kvn@3128 1374 extern "C" int trace_frequency_order(const void *p0, const void *p1) {
rasbold@853 1375 Trace *tr0 = *(Trace **) p0;
rasbold@853 1376 Trace *tr1 = *(Trace **) p1;
rasbold@853 1377 Block *b0 = tr0->first_block();
rasbold@853 1378 Block *b1 = tr1->first_block();
rasbold@853 1379
rasbold@853 1380 // The trace of connector blocks goes at the end;
rasbold@853 1381 // we only expect one such trace
rasbold@853 1382 if (b0->is_connector() != b1->is_connector()) {
rasbold@853 1383 return b1->is_connector() ? -1 : 1;
rasbold@853 1384 }
rasbold@853 1385
rasbold@853 1386 // Pull more frequently executed blocks to the beginning
rasbold@853 1387 float freq0 = b0->_freq;
rasbold@853 1388 float freq1 = b1->_freq;
rasbold@853 1389 if (freq0 != freq1) {
rasbold@853 1390 return freq0 > freq1 ? -1 : 1;
rasbold@853 1391 }
rasbold@853 1392
rasbold@853 1393 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo;
rasbold@853 1394
rasbold@853 1395 return diff;
rasbold@853 1396 }
rasbold@853 1397
rasbold@853 1398 // Find edges of interest, i.e, those which can fall through. Presumes that
rasbold@853 1399 // edges which don't fall through are of low frequency and can be generally
rasbold@853 1400 // ignored. Initialize the list of traces.
adlertz@5539 1401 void PhaseBlockLayout::find_edges() {
rasbold@853 1402 // Walk the blocks, creating edges and Traces
rasbold@853 1403 uint i;
rasbold@853 1404 Trace *tr = NULL;
adlertz@5539 1405 for (i = 0; i < _cfg.number_of_blocks(); i++) {
adlertz@5539 1406 Block* b = _cfg.get_block(i);
rasbold@853 1407 tr = new Trace(b, next, prev);
rasbold@853 1408 traces[tr->id()] = tr;
rasbold@853 1409
rasbold@853 1410 // All connector blocks should be at the end of the list
rasbold@853 1411 if (b->is_connector()) break;
rasbold@853 1412
rasbold@853 1413 // If this block and the next one have a one-to-one successor
rasbold@853 1414 // predecessor relationship, simply append the next block
rasbold@853 1415 int nfallthru = b->num_fall_throughs();
rasbold@853 1416 while (nfallthru == 1 &&
rasbold@853 1417 b->succ_fall_through(0)) {
rasbold@853 1418 Block *n = b->_succs[0];
rasbold@853 1419
rasbold@853 1420 // Skip over single-entry connector blocks, we don't want to
rasbold@853 1421 // add them to the trace.
rasbold@853 1422 while (n->is_connector() && n->num_preds() == 1) {
rasbold@853 1423 n = n->_succs[0];
rasbold@853 1424 }
rasbold@853 1425
rasbold@853 1426 // We see a merge point, so stop search for the next block
rasbold@853 1427 if (n->num_preds() != 1) break;
rasbold@853 1428
rasbold@853 1429 i++;
adlertz@5539 1430 assert(n = _cfg.get_block(i), "expecting next block");
rasbold@853 1431 tr->append(n);
rasbold@853 1432 uf->map(n->_pre_order, tr->id());
rasbold@853 1433 traces[n->_pre_order] = NULL;
rasbold@853 1434 nfallthru = b->num_fall_throughs();
rasbold@853 1435 b = n;
rasbold@853 1436 }
rasbold@853 1437
rasbold@853 1438 if (nfallthru > 0) {
rasbold@853 1439 // Create a CFGEdge for each outgoing
rasbold@853 1440 // edge that could be a fall-through.
rasbold@853 1441 for (uint j = 0; j < b->_num_succs; j++ ) {
rasbold@853 1442 if (b->succ_fall_through(j)) {
rasbold@853 1443 Block *target = b->non_connector_successor(j);
rasbold@853 1444 float freq = b->_freq * b->succ_prob(j);
rasbold@853 1445 int from_pct = (int) ((100 * freq) / b->_freq);
rasbold@853 1446 int to_pct = (int) ((100 * freq) / target->_freq);
rasbold@853 1447 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct));
rasbold@853 1448 }
rasbold@853 1449 }
rasbold@853 1450 }
rasbold@853 1451 }
rasbold@853 1452
rasbold@853 1453 // Group connector blocks into one trace
adlertz@5539 1454 for (i++; i < _cfg.number_of_blocks(); i++) {
adlertz@5539 1455 Block *b = _cfg.get_block(i);
rasbold@853 1456 assert(b->is_connector(), "connector blocks at the end");
rasbold@853 1457 tr->append(b);
rasbold@853 1458 uf->map(b->_pre_order, tr->id());
rasbold@853 1459 traces[b->_pre_order] = NULL;
rasbold@853 1460 }
rasbold@853 1461 }
rasbold@853 1462
rasbold@853 1463 // Union two traces together in uf, and null out the trace in the list
adlertz@5539 1464 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) {
rasbold@853 1465 uint old_id = old_trace->id();
rasbold@853 1466 uint updated_id = updated_trace->id();
rasbold@853 1467
rasbold@853 1468 uint lo_id = updated_id;
rasbold@853 1469 uint hi_id = old_id;
rasbold@853 1470
rasbold@853 1471 // If from is greater than to, swap values to meet
rasbold@853 1472 // UnionFind guarantee.
rasbold@853 1473 if (updated_id > old_id) {
rasbold@853 1474 lo_id = old_id;
rasbold@853 1475 hi_id = updated_id;
rasbold@853 1476
rasbold@853 1477 // Fix up the trace ids
rasbold@853 1478 traces[lo_id] = traces[updated_id];
rasbold@853 1479 updated_trace->set_id(lo_id);
rasbold@853 1480 }
rasbold@853 1481
rasbold@853 1482 // Union the lower with the higher and remove the pointer
rasbold@853 1483 // to the higher.
rasbold@853 1484 uf->Union(lo_id, hi_id);
rasbold@853 1485 traces[hi_id] = NULL;
rasbold@853 1486 }
rasbold@853 1487
rasbold@853 1488 // Append traces together via the most frequently executed edges
adlertz@5539 1489 void PhaseBlockLayout::grow_traces() {
rasbold@853 1490 // Order the edges, and drive the growth of Traces via the most
rasbold@853 1491 // frequently executed edges.
rasbold@853 1492 edges->sort(edge_order);
rasbold@853 1493 for (int i = 0; i < edges->length(); i++) {
rasbold@853 1494 CFGEdge *e = edges->at(i);
rasbold@853 1495
rasbold@853 1496 if (e->state() != CFGEdge::open) continue;
rasbold@853 1497
rasbold@853 1498 Block *src_block = e->from();
rasbold@853 1499 Block *targ_block = e->to();
rasbold@853 1500
rasbold@853 1501 // Don't grow traces along backedges?
rasbold@853 1502 if (!BlockLayoutRotateLoops) {
rasbold@853 1503 if (targ_block->_rpo <= src_block->_rpo) {
rasbold@853 1504 targ_block->set_loop_alignment(targ_block);
rasbold@853 1505 continue;
rasbold@853 1506 }
rasbold@853 1507 }
rasbold@853 1508
rasbold@853 1509 Trace *src_trace = trace(src_block);
rasbold@853 1510 Trace *targ_trace = trace(targ_block);
rasbold@853 1511
rasbold@853 1512 // If the edge in question can join two traces at their ends,
rasbold@853 1513 // append one trace to the other.
rasbold@853 1514 if (src_trace->last_block() == src_block) {
rasbold@853 1515 if (src_trace == targ_trace) {
rasbold@853 1516 e->set_state(CFGEdge::interior);
rasbold@853 1517 if (targ_trace->backedge(e)) {
rasbold@853 1518 // Reset i to catch any newly eligible edge
rasbold@853 1519 // (Or we could remember the first "open" edge, and reset there)
rasbold@853 1520 i = 0;
rasbold@853 1521 }
rasbold@853 1522 } else if (targ_trace->first_block() == targ_block) {
rasbold@853 1523 e->set_state(CFGEdge::connected);
rasbold@853 1524 src_trace->append(targ_trace);
rasbold@853 1525 union_traces(src_trace, targ_trace);
rasbold@853 1526 }
rasbold@853 1527 }
rasbold@853 1528 }
rasbold@853 1529 }
rasbold@853 1530
rasbold@853 1531 // Embed one trace into another, if the fork or join points are sufficiently
rasbold@853 1532 // balanced.
adlertz@5539 1533 void PhaseBlockLayout::merge_traces(bool fall_thru_only) {
rasbold@853 1534 // Walk the edge list a another time, looking at unprocessed edges.
rasbold@853 1535 // Fold in diamonds
rasbold@853 1536 for (int i = 0; i < edges->length(); i++) {
rasbold@853 1537 CFGEdge *e = edges->at(i);
rasbold@853 1538
rasbold@853 1539 if (e->state() != CFGEdge::open) continue;
rasbold@853 1540 if (fall_thru_only) {
rasbold@853 1541 if (e->infrequent()) continue;
rasbold@853 1542 }
rasbold@853 1543
rasbold@853 1544 Block *src_block = e->from();
rasbold@853 1545 Trace *src_trace = trace(src_block);
rasbold@853 1546 bool src_at_tail = src_trace->last_block() == src_block;
rasbold@853 1547
rasbold@853 1548 Block *targ_block = e->to();
rasbold@853 1549 Trace *targ_trace = trace(targ_block);
rasbold@853 1550 bool targ_at_start = targ_trace->first_block() == targ_block;
rasbold@853 1551
rasbold@853 1552 if (src_trace == targ_trace) {
rasbold@853 1553 // This may be a loop, but we can't do much about it.
rasbold@853 1554 e->set_state(CFGEdge::interior);
rasbold@853 1555 continue;
rasbold@853 1556 }
rasbold@853 1557
rasbold@853 1558 if (fall_thru_only) {
rasbold@853 1559 // If the edge links the middle of two traces, we can't do anything.
rasbold@853 1560 // Mark the edge and continue.
rasbold@853 1561 if (!src_at_tail & !targ_at_start) {
rasbold@853 1562 continue;
rasbold@853 1563 }
rasbold@853 1564
rasbold@853 1565 // Don't grow traces along backedges?
rasbold@853 1566 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) {
rasbold@853 1567 continue;
rasbold@853 1568 }
rasbold@853 1569
rasbold@853 1570 // If both ends of the edge are available, why didn't we handle it earlier?
rasbold@853 1571 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier.");
rasbold@853 1572
rasbold@853 1573 if (targ_at_start) {
rasbold@853 1574 // Insert the "targ" trace in the "src" trace if the insertion point
rasbold@853 1575 // is a two way branch.
rasbold@853 1576 // Better profitability check possible, but may not be worth it.
rasbold@853 1577 // Someday, see if the this "fork" has an associated "join";
rasbold@853 1578 // then make a policy on merging this trace at the fork or join.
rasbold@853 1579 // For example, other things being equal, it may be better to place this
rasbold@853 1580 // trace at the join point if the "src" trace ends in a two-way, but
rasbold@853 1581 // the insertion point is one-way.
rasbold@853 1582 assert(src_block->num_fall_throughs() == 2, "unexpected diamond");
rasbold@853 1583 e->set_state(CFGEdge::connected);
rasbold@853 1584 src_trace->insert_after(src_block, targ_trace);
rasbold@853 1585 union_traces(src_trace, targ_trace);
rasbold@853 1586 } else if (src_at_tail) {
adlertz@5539 1587 if (src_trace != trace(_cfg.get_root_block())) {
rasbold@853 1588 e->set_state(CFGEdge::connected);
rasbold@853 1589 targ_trace->insert_before(targ_block, src_trace);
rasbold@853 1590 union_traces(targ_trace, src_trace);
rasbold@853 1591 }
rasbold@853 1592 }
rasbold@853 1593 } else if (e->state() == CFGEdge::open) {
rasbold@853 1594 // Append traces, even without a fall-thru connection.
twisti@1040 1595 // But leave root entry at the beginning of the block list.
adlertz@5539 1596 if (targ_trace != trace(_cfg.get_root_block())) {
rasbold@853 1597 e->set_state(CFGEdge::connected);
rasbold@853 1598 src_trace->append(targ_trace);
rasbold@853 1599 union_traces(src_trace, targ_trace);
rasbold@853 1600 }
rasbold@853 1601 }
rasbold@853 1602 }
rasbold@853 1603 }
rasbold@853 1604
rasbold@853 1605 // Order the sequence of the traces in some desirable way, and fixup the
rasbold@853 1606 // jumps at the end of each block.
adlertz@5539 1607 void PhaseBlockLayout::reorder_traces(int count) {
rasbold@853 1608 ResourceArea *area = Thread::current()->resource_area();
rasbold@853 1609 Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count);
rasbold@853 1610 Block_List worklist;
rasbold@853 1611 int new_count = 0;
rasbold@853 1612
rasbold@853 1613 // Compact the traces.
rasbold@853 1614 for (int i = 0; i < count; i++) {
rasbold@853 1615 Trace *tr = traces[i];
rasbold@853 1616 if (tr != NULL) {
rasbold@853 1617 new_traces[new_count++] = tr;
rasbold@853 1618 }
rasbold@853 1619 }
rasbold@853 1620
rasbold@853 1621 // The entry block should be first on the new trace list.
adlertz@5539 1622 Trace *tr = trace(_cfg.get_root_block());
rasbold@853 1623 assert(tr == new_traces[0], "entry trace misplaced");
rasbold@853 1624
rasbold@853 1625 // Sort the new trace list by frequency
rasbold@853 1626 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order);
rasbold@853 1627
rasbold@853 1628 // Patch up the successor blocks
adlertz@5539 1629 _cfg.clear_blocks();
rasbold@853 1630 for (int i = 0; i < new_count; i++) {
rasbold@853 1631 Trace *tr = new_traces[i];
rasbold@853 1632 if (tr != NULL) {
rasbold@853 1633 tr->fixup_blocks(_cfg);
rasbold@853 1634 }
rasbold@853 1635 }
rasbold@853 1636 }
rasbold@853 1637
rasbold@853 1638 // Order basic blocks based on frequency
adlertz@5539 1639 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg)
adlertz@5539 1640 : Phase(BlockLayout)
adlertz@5539 1641 , _cfg(cfg) {
rasbold@853 1642 ResourceMark rm;
rasbold@853 1643 ResourceArea *area = Thread::current()->resource_area();
rasbold@853 1644
rasbold@853 1645 // List of traces
adlertz@5539 1646 int size = _cfg.number_of_blocks() + 1;
rasbold@853 1647 traces = NEW_ARENA_ARRAY(area, Trace *, size);
rasbold@853 1648 memset(traces, 0, size*sizeof(Trace*));
rasbold@853 1649 next = NEW_ARENA_ARRAY(area, Block *, size);
rasbold@853 1650 memset(next, 0, size*sizeof(Block *));
rasbold@853 1651 prev = NEW_ARENA_ARRAY(area, Block *, size);
rasbold@853 1652 memset(prev , 0, size*sizeof(Block *));
rasbold@853 1653
rasbold@853 1654 // List of edges
rasbold@853 1655 edges = new GrowableArray<CFGEdge*>;
rasbold@853 1656
rasbold@853 1657 // Mapping block index --> block_trace
rasbold@853 1658 uf = new UnionFind(size);
rasbold@853 1659 uf->reset(size);
rasbold@853 1660
rasbold@853 1661 // Find edges and create traces.
rasbold@853 1662 find_edges();
rasbold@853 1663
rasbold@853 1664 // Grow traces at their ends via most frequent edges.
rasbold@853 1665 grow_traces();
rasbold@853 1666
rasbold@853 1667 // Merge one trace into another, but only at fall-through points.
rasbold@853 1668 // This may make diamonds and other related shapes in a trace.
rasbold@853 1669 merge_traces(true);
rasbold@853 1670
rasbold@853 1671 // Run merge again, allowing two traces to be catenated, even if
rasbold@853 1672 // one does not fall through into the other. This appends loosely
rasbold@853 1673 // related traces to be near each other.
rasbold@853 1674 merge_traces(false);
rasbold@853 1675
rasbold@853 1676 // Re-order all the remaining traces by frequency
rasbold@853 1677 reorder_traces(size);
rasbold@853 1678
adlertz@5539 1679 assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink");
rasbold@853 1680 }
rasbold@853 1681
rasbold@853 1682
rasbold@853 1683 // Edge e completes a loop in a trace. If the target block is head of the
rasbold@853 1684 // loop, rotate the loop block so that the loop ends in a conditional branch.
rasbold@853 1685 bool Trace::backedge(CFGEdge *e) {
rasbold@853 1686 bool loop_rotated = false;
rasbold@853 1687 Block *src_block = e->from();
rasbold@853 1688 Block *targ_block = e->to();
rasbold@853 1689
rasbold@853 1690 assert(last_block() == src_block, "loop discovery at back branch");
rasbold@853 1691 if (first_block() == targ_block) {
rasbold@853 1692 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) {
rasbold@853 1693 // Find the last block in the trace that has a conditional
rasbold@853 1694 // branch.
rasbold@853 1695 Block *b;
rasbold@853 1696 for (b = last_block(); b != NULL; b = prev(b)) {
rasbold@853 1697 if (b->num_fall_throughs() == 2) {
rasbold@853 1698 break;
rasbold@853 1699 }
rasbold@853 1700 }
rasbold@853 1701
rasbold@853 1702 if (b != last_block() && b != NULL) {
rasbold@853 1703 loop_rotated = true;
rasbold@853 1704
rasbold@853 1705 // Rotate the loop by doing two-part linked-list surgery.
rasbold@853 1706 append(first_block());
rasbold@853 1707 break_loop_after(b);
rasbold@853 1708 }
rasbold@853 1709 }
rasbold@853 1710
rasbold@853 1711 // Backbranch to the top of a trace
twisti@1040 1712 // Scroll forward through the trace from the targ_block. If we find
rasbold@853 1713 // a loop head before another loop top, use the the loop head alignment.
rasbold@853 1714 for (Block *b = targ_block; b != NULL; b = next(b)) {
rasbold@853 1715 if (b->has_loop_alignment()) {
rasbold@853 1716 break;
rasbold@853 1717 }
rasbold@853 1718 if (b->head()->is_Loop()) {
rasbold@853 1719 targ_block = b;
rasbold@853 1720 break;
rasbold@853 1721 }
rasbold@853 1722 }
rasbold@853 1723
rasbold@853 1724 first_block()->set_loop_alignment(targ_block);
rasbold@853 1725
rasbold@853 1726 } else {
rasbold@853 1727 // Backbranch into the middle of a trace
rasbold@853 1728 targ_block->set_loop_alignment(targ_block);
rasbold@853 1729 }
rasbold@853 1730
rasbold@853 1731 return loop_rotated;
rasbold@853 1732 }
rasbold@853 1733
rasbold@853 1734 // push blocks onto the CFG list
rasbold@853 1735 // ensure that blocks have the correct two-way branch sense
rasbold@853 1736 void Trace::fixup_blocks(PhaseCFG &cfg) {
rasbold@853 1737 Block *last = last_block();
rasbold@853 1738 for (Block *b = first_block(); b != NULL; b = next(b)) {
adlertz@5539 1739 cfg.add_block(b);
rasbold@853 1740 if (!b->is_connector()) {
rasbold@853 1741 int nfallthru = b->num_fall_throughs();
rasbold@853 1742 if (b != last) {
rasbold@853 1743 if (nfallthru == 2) {
rasbold@853 1744 // Ensure that the sense of the branch is correct
rasbold@853 1745 Block *bnext = next(b);
rasbold@853 1746 Block *bs0 = b->non_connector_successor(0);
rasbold@853 1747
adlertz@5635 1748 MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
adlertz@5635 1749 ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
adlertz@5635 1750 ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
rasbold@853 1751
rasbold@853 1752 if (bnext == bs0) {
rasbold@853 1753 // Fall-thru case in succs[0], should be in succs[1]
rasbold@853 1754
rasbold@853 1755 // Flip targets in _succs map
rasbold@853 1756 Block *tbs0 = b->_succs[0];
rasbold@853 1757 Block *tbs1 = b->_succs[1];
rasbold@853 1758 b->_succs.map( 0, tbs1 );
rasbold@853 1759 b->_succs.map( 1, tbs0 );
rasbold@853 1760
rasbold@853 1761 // Flip projections to match targets
adlertz@5635 1762 b->map_node(proj1, b->number_of_nodes() - 2);
adlertz@5635 1763 b->map_node(proj0, b->number_of_nodes() - 1);
rasbold@853 1764 }
rasbold@853 1765 }
rasbold@853 1766 }
rasbold@853 1767 }
rasbold@853 1768 }
rasbold@853 1769 }

mercurial