src/share/vm/opto/block.cpp

Thu, 21 Nov 2013 12:30:35 -0800

author
kvn
date
Thu, 21 Nov 2013 12:30:35 -0800
changeset 6485
da862781b584
parent 6478
044b28168e20
child 6490
41b780b43b74
permissions
-rw-r--r--

Merge

duke@435 1 /*
mikael@4153 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "libadt/vectset.hpp"
stefank@2314 27 #include "memory/allocation.inline.hpp"
stefank@2314 28 #include "opto/block.hpp"
stefank@2314 29 #include "opto/cfgnode.hpp"
stefank@2314 30 #include "opto/chaitin.hpp"
stefank@2314 31 #include "opto/loopnode.hpp"
stefank@2314 32 #include "opto/machnode.hpp"
stefank@2314 33 #include "opto/matcher.hpp"
stefank@2314 34 #include "opto/opcodes.hpp"
stefank@2314 35 #include "opto/rootnode.hpp"
stefank@2314 36 #include "utilities/copy.hpp"
stefank@2314 37
duke@435 38 void Block_Array::grow( uint i ) {
duke@435 39 assert(i >= Max(), "must be an overflow");
duke@435 40 debug_only(_limit = i+1);
duke@435 41 if( i < _size ) return;
duke@435 42 if( !_size ) {
duke@435 43 _size = 1;
duke@435 44 _blocks = (Block**)_arena->Amalloc( _size * sizeof(Block*) );
duke@435 45 _blocks[0] = NULL;
duke@435 46 }
duke@435 47 uint old = _size;
duke@435 48 while( i >= _size ) _size <<= 1; // Double to fit
duke@435 49 _blocks = (Block**)_arena->Arealloc( _blocks, old*sizeof(Block*),_size*sizeof(Block*));
duke@435 50 Copy::zero_to_bytes( &_blocks[old], (_size-old)*sizeof(Block*) );
duke@435 51 }
duke@435 52
duke@435 53 void Block_List::remove(uint i) {
duke@435 54 assert(i < _cnt, "index out of bounds");
duke@435 55 Copy::conjoint_words_to_lower((HeapWord*)&_blocks[i+1], (HeapWord*)&_blocks[i], ((_cnt-i-1)*sizeof(Block*)));
duke@435 56 pop(); // shrink list by one block
duke@435 57 }
duke@435 58
duke@435 59 void Block_List::insert(uint i, Block *b) {
duke@435 60 push(b); // grow list by one block
duke@435 61 Copy::conjoint_words_to_higher((HeapWord*)&_blocks[i], (HeapWord*)&_blocks[i+1], ((_cnt-i-1)*sizeof(Block*)));
duke@435 62 _blocks[i] = b;
duke@435 63 }
duke@435 64
rasbold@853 65 #ifndef PRODUCT
rasbold@853 66 void Block_List::print() {
rasbold@853 67 for (uint i=0; i < size(); i++) {
rasbold@853 68 tty->print("B%d ", _blocks[i]->_pre_order);
rasbold@853 69 }
rasbold@853 70 tty->print("size = %d\n", size());
rasbold@853 71 }
rasbold@853 72 #endif
duke@435 73
duke@435 74 uint Block::code_alignment() {
duke@435 75 // Check for Root block
kvn@3049 76 if (_pre_order == 0) return CodeEntryAlignment;
duke@435 77 // Check for Start block
kvn@3049 78 if (_pre_order == 1) return InteriorEntryAlignment;
duke@435 79 // Check for loop alignment
kvn@3049 80 if (has_loop_alignment()) return loop_alignment();
rasbold@853 81
kvn@3049 82 return relocInfo::addr_unit(); // no particular alignment
rasbold@853 83 }
rasbold@853 84
rasbold@853 85 uint Block::compute_loop_alignment() {
duke@435 86 Node *h = head();
kvn@3049 87 int unit_sz = relocInfo::addr_unit();
kvn@3049 88 if (h->is_Loop() && h->as_Loop()->is_inner_loop()) {
duke@435 89 // Pre- and post-loops have low trip count so do not bother with
duke@435 90 // NOPs for align loop head. The constants are hidden from tuning
duke@435 91 // but only because my "divide by 4" heuristic surely gets nearly
duke@435 92 // all possible gain (a "do not align at all" heuristic has a
duke@435 93 // chance of getting a really tiny gain).
kvn@3049 94 if (h->is_CountedLoop() && (h->as_CountedLoop()->is_pre_loop() ||
kvn@3049 95 h->as_CountedLoop()->is_post_loop())) {
kvn@3049 96 return (OptoLoopAlignment > 4*unit_sz) ? (OptoLoopAlignment>>2) : unit_sz;
kvn@3049 97 }
duke@435 98 // Loops with low backedge frequency should not be aligned.
duke@435 99 Node *n = h->in(LoopNode::LoopBackControl)->in(0);
kvn@3049 100 if (n->is_MachIf() && n->as_MachIf()->_prob < 0.01) {
kvn@3049 101 return unit_sz; // Loop does not loop, more often than not!
duke@435 102 }
duke@435 103 return OptoLoopAlignment; // Otherwise align loop head
duke@435 104 }
rasbold@853 105
kvn@3049 106 return unit_sz; // no particular alignment
duke@435 107 }
duke@435 108
duke@435 109 // Compute the size of first 'inst_cnt' instructions in this block.
duke@435 110 // Return the number of instructions left to compute if the block has
rasbold@853 111 // less then 'inst_cnt' instructions. Stop, and return 0 if sum_size
rasbold@853 112 // exceeds OptoLoopAlignment.
duke@435 113 uint Block::compute_first_inst_size(uint& sum_size, uint inst_cnt,
duke@435 114 PhaseRegAlloc* ra) {
adlertz@5635 115 uint last_inst = number_of_nodes();
duke@435 116 for( uint j = 0; j < last_inst && inst_cnt > 0; j++ ) {
adlertz@5635 117 uint inst_size = get_node(j)->size(ra);
duke@435 118 if( inst_size > 0 ) {
duke@435 119 inst_cnt--;
duke@435 120 uint sz = sum_size + inst_size;
duke@435 121 if( sz <= (uint)OptoLoopAlignment ) {
duke@435 122 // Compute size of instructions which fit into fetch buffer only
duke@435 123 // since all inst_cnt instructions will not fit even if we align them.
duke@435 124 sum_size = sz;
duke@435 125 } else {
duke@435 126 return 0;
duke@435 127 }
duke@435 128 }
duke@435 129 }
duke@435 130 return inst_cnt;
duke@435 131 }
duke@435 132
duke@435 133 uint Block::find_node( const Node *n ) const {
adlertz@5635 134 for( uint i = 0; i < number_of_nodes(); i++ ) {
adlertz@5635 135 if( get_node(i) == n )
duke@435 136 return i;
duke@435 137 }
duke@435 138 ShouldNotReachHere();
duke@435 139 return 0;
duke@435 140 }
duke@435 141
duke@435 142 // Find and remove n from block list
duke@435 143 void Block::find_remove( const Node *n ) {
adlertz@5635 144 remove_node(find_node(n));
duke@435 145 }
duke@435 146
goetz@6478 147 bool Block::contains(const Node *n) const {
goetz@6478 148 return _nodes.contains(n);
goetz@6478 149 }
goetz@6478 150
duke@435 151 // Return empty status of a block. Empty blocks contain only the head, other
duke@435 152 // ideal nodes, and an optional trailing goto.
duke@435 153 int Block::is_Empty() const {
duke@435 154
duke@435 155 // Root or start block is not considered empty
duke@435 156 if (head()->is_Root() || head()->is_Start()) {
duke@435 157 return not_empty;
duke@435 158 }
duke@435 159
duke@435 160 int success_result = completely_empty;
adlertz@5635 161 int end_idx = number_of_nodes() - 1;
duke@435 162
duke@435 163 // Check for ending goto
adlertz@5635 164 if ((end_idx > 0) && (get_node(end_idx)->is_MachGoto())) {
duke@435 165 success_result = empty_with_goto;
duke@435 166 end_idx--;
duke@435 167 }
duke@435 168
duke@435 169 // Unreachable blocks are considered empty
duke@435 170 if (num_preds() <= 1) {
duke@435 171 return success_result;
duke@435 172 }
duke@435 173
duke@435 174 // Ideal nodes are allowable in empty blocks: skip them Only MachNodes
duke@435 175 // turn directly into code, because only MachNodes have non-trivial
duke@435 176 // emit() functions.
adlertz@5635 177 while ((end_idx > 0) && !get_node(end_idx)->is_Mach()) {
duke@435 178 end_idx--;
duke@435 179 }
duke@435 180
duke@435 181 // No room for any interesting instructions?
duke@435 182 if (end_idx == 0) {
duke@435 183 return success_result;
duke@435 184 }
duke@435 185
duke@435 186 return not_empty;
duke@435 187 }
duke@435 188
twisti@1040 189 // Return true if the block's code implies that it is likely to be
duke@435 190 // executed infrequently. Check to see if the block ends in a Halt or
duke@435 191 // a low probability call.
duke@435 192 bool Block::has_uncommon_code() const {
duke@435 193 Node* en = end();
duke@435 194
kvn@3040 195 if (en->is_MachGoto())
duke@435 196 en = en->in(0);
duke@435 197 if (en->is_Catch())
duke@435 198 en = en->in(0);
kvn@3040 199 if (en->is_MachProj() && en->in(0)->is_MachCall()) {
duke@435 200 MachCallNode* call = en->in(0)->as_MachCall();
duke@435 201 if (call->cnt() != COUNT_UNKNOWN && call->cnt() <= PROB_UNLIKELY_MAG(4)) {
duke@435 202 // This is true for slow-path stubs like new_{instance,array},
duke@435 203 // slow_arraycopy, complete_monitor_locking, uncommon_trap.
duke@435 204 // The magic number corresponds to the probability of an uncommon_trap,
duke@435 205 // even though it is a count not a probability.
duke@435 206 return true;
duke@435 207 }
duke@435 208 }
duke@435 209
duke@435 210 int op = en->is_Mach() ? en->as_Mach()->ideal_Opcode() : en->Opcode();
duke@435 211 return op == Op_Halt;
duke@435 212 }
duke@435 213
duke@435 214 // True if block is low enough frequency or guarded by a test which
duke@435 215 // mostly does not go here.
adlertz@5639 216 bool PhaseCFG::is_uncommon(const Block* block) {
duke@435 217 // Initial blocks must never be moved, so are never uncommon.
adlertz@5639 218 if (block->head()->is_Root() || block->head()->is_Start()) return false;
duke@435 219
duke@435 220 // Check for way-low freq
adlertz@5639 221 if(block->_freq < BLOCK_FREQUENCY(0.00001f) ) return true;
duke@435 222
duke@435 223 // Look for code shape indicating uncommon_trap or slow path
adlertz@5639 224 if (block->has_uncommon_code()) return true;
duke@435 225
duke@435 226 const float epsilon = 0.05f;
duke@435 227 const float guard_factor = PROB_UNLIKELY_MAG(4) / (1.f - epsilon);
duke@435 228 uint uncommon_preds = 0;
duke@435 229 uint freq_preds = 0;
duke@435 230 uint uncommon_for_freq_preds = 0;
duke@435 231
adlertz@5639 232 for( uint i=1; i< block->num_preds(); i++ ) {
adlertz@5639 233 Block* guard = get_block_for_node(block->pred(i));
duke@435 234 // Check to see if this block follows its guard 1 time out of 10000
duke@435 235 // or less.
duke@435 236 //
duke@435 237 // See list of magnitude-4 unlikely probabilities in cfgnode.hpp which
duke@435 238 // we intend to be "uncommon", such as slow-path TLE allocation,
duke@435 239 // predicted call failure, and uncommon trap triggers.
duke@435 240 //
duke@435 241 // Use an epsilon value of 5% to allow for variability in frequency
duke@435 242 // predictions and floating point calculations. The net effect is
duke@435 243 // that guard_factor is set to 9500.
duke@435 244 //
duke@435 245 // Ignore low-frequency blocks.
duke@435 246 // The next check is (guard->_freq < 1.e-5 * 9500.).
duke@435 247 if(guard->_freq*BLOCK_FREQUENCY(guard_factor) < BLOCK_FREQUENCY(0.00001f)) {
duke@435 248 uncommon_preds++;
duke@435 249 } else {
duke@435 250 freq_preds++;
adlertz@5639 251 if(block->_freq < guard->_freq * guard_factor ) {
duke@435 252 uncommon_for_freq_preds++;
duke@435 253 }
duke@435 254 }
duke@435 255 }
adlertz@5639 256 if( block->num_preds() > 1 &&
duke@435 257 // The block is uncommon if all preds are uncommon or
adlertz@5639 258 (uncommon_preds == (block->num_preds()-1) ||
duke@435 259 // it is uncommon for all frequent preds.
duke@435 260 uncommon_for_freq_preds == freq_preds) ) {
duke@435 261 return true;
duke@435 262 }
duke@435 263 return false;
duke@435 264 }
duke@435 265
duke@435 266 #ifndef PRODUCT
kvn@3049 267 void Block::dump_bidx(const Block* orig, outputStream* st) const {
kvn@3049 268 if (_pre_order) st->print("B%d",_pre_order);
kvn@3049 269 else st->print("N%d", head()->_idx);
duke@435 270
duke@435 271 if (Verbose && orig != this) {
duke@435 272 // Dump the original block's idx
kvn@3049 273 st->print(" (");
kvn@3049 274 orig->dump_bidx(orig, st);
kvn@3049 275 st->print(")");
duke@435 276 }
duke@435 277 }
duke@435 278
adlertz@5509 279 void Block::dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st) const {
duke@435 280 if (is_connector()) {
duke@435 281 for (uint i=1; i<num_preds(); i++) {
adlertz@5509 282 Block *p = cfg->get_block_for_node(pred(i));
adlertz@5509 283 p->dump_pred(cfg, orig, st);
duke@435 284 }
duke@435 285 } else {
kvn@3049 286 dump_bidx(orig, st);
kvn@3049 287 st->print(" ");
duke@435 288 }
duke@435 289 }
duke@435 290
adlertz@5509 291 void Block::dump_head(const PhaseCFG* cfg, outputStream* st) const {
duke@435 292 // Print the basic block
kvn@3049 293 dump_bidx(this, st);
kvn@3049 294 st->print(": #\t");
duke@435 295
duke@435 296 // Print the incoming CFG edges and the outgoing CFG edges
duke@435 297 for( uint i=0; i<_num_succs; i++ ) {
kvn@3049 298 non_connector_successor(i)->dump_bidx(_succs[i], st);
kvn@3049 299 st->print(" ");
duke@435 300 }
kvn@3049 301 st->print("<- ");
duke@435 302 if( head()->is_block_start() ) {
duke@435 303 for (uint i=1; i<num_preds(); i++) {
duke@435 304 Node *s = pred(i);
adlertz@5509 305 if (cfg != NULL) {
adlertz@5509 306 Block *p = cfg->get_block_for_node(s);
adlertz@5509 307 p->dump_pred(cfg, p, st);
duke@435 308 } else {
duke@435 309 while (!s->is_block_start())
duke@435 310 s = s->in(0);
kvn@3049 311 st->print("N%d ", s->_idx );
duke@435 312 }
duke@435 313 }
adlertz@5509 314 } else {
kvn@3049 315 st->print("BLOCK HEAD IS JUNK ");
adlertz@5509 316 }
duke@435 317
duke@435 318 // Print loop, if any
duke@435 319 const Block *bhead = this; // Head of self-loop
duke@435 320 Node *bh = bhead->head();
adlertz@5509 321
adlertz@5509 322 if ((cfg != NULL) && bh->is_Loop() && !head()->is_Root()) {
duke@435 323 LoopNode *loop = bh->as_Loop();
adlertz@5509 324 const Block *bx = cfg->get_block_for_node(loop->in(LoopNode::LoopBackControl));
duke@435 325 while (bx->is_connector()) {
adlertz@5509 326 bx = cfg->get_block_for_node(bx->pred(1));
duke@435 327 }
kvn@3049 328 st->print("\tLoop: B%d-B%d ", bhead->_pre_order, bx->_pre_order);
duke@435 329 // Dump any loop-specific bits, especially for CountedLoops.
kvn@3049 330 loop->dump_spec(st);
rasbold@853 331 } else if (has_loop_alignment()) {
kvn@3049 332 st->print(" top-of-loop");
duke@435 333 }
kvn@3049 334 st->print(" Freq: %g",_freq);
duke@435 335 if( Verbose || WizardMode ) {
kvn@3049 336 st->print(" IDom: %d/#%d", _idom ? _idom->_pre_order : 0, _dom_depth);
kvn@3049 337 st->print(" RegPressure: %d",_reg_pressure);
kvn@3049 338 st->print(" IHRP Index: %d",_ihrp_index);
kvn@3049 339 st->print(" FRegPressure: %d",_freg_pressure);
kvn@3049 340 st->print(" FHRP Index: %d",_fhrp_index);
duke@435 341 }
kvn@3049 342 st->print_cr("");
duke@435 343 }
duke@435 344
adlertz@5509 345 void Block::dump() const {
adlertz@5509 346 dump(NULL);
adlertz@5509 347 }
duke@435 348
adlertz@5509 349 void Block::dump(const PhaseCFG* cfg) const {
adlertz@5509 350 dump_head(cfg);
adlertz@5635 351 for (uint i=0; i< number_of_nodes(); i++) {
adlertz@5635 352 get_node(i)->dump();
adlertz@5509 353 }
duke@435 354 tty->print("\n");
duke@435 355 }
duke@435 356 #endif
duke@435 357
adlertz@5509 358 PhaseCFG::PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher)
adlertz@5509 359 : Phase(CFG)
adlertz@5509 360 , _block_arena(arena)
adlertz@5539 361 , _root(root)
adlertz@5539 362 , _matcher(matcher)
adlertz@5509 363 , _node_to_block_mapping(arena)
adlertz@5509 364 , _node_latency(NULL)
duke@435 365 #ifndef PRODUCT
adlertz@5509 366 , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
duke@435 367 #endif
kvn@1268 368 #ifdef ASSERT
adlertz@5509 369 , _raw_oops(arena)
kvn@1268 370 #endif
duke@435 371 {
duke@435 372 ResourceMark rm;
duke@435 373 // I'll need a few machine-specific GotoNodes. Make an Ideal GotoNode,
duke@435 374 // then Match it into a machine-specific Node. Then clone the machine
duke@435 375 // Node on demand.
kvn@4115 376 Node *x = new (C) GotoNode(NULL);
duke@435 377 x->init_req(0, x);
adlertz@5509 378 _goto = matcher.match_tree(x);
duke@435 379 assert(_goto != NULL, "");
duke@435 380 _goto->set_req(0,_goto);
duke@435 381
duke@435 382 // Build the CFG in Reverse Post Order
adlertz@5539 383 _number_of_blocks = build_cfg();
adlertz@5539 384 _root_block = get_block_for_node(_root);
duke@435 385 }
duke@435 386
duke@435 387 // Build a proper looking CFG. Make every block begin with either a StartNode
duke@435 388 // or a RegionNode. Make every block end with either a Goto, If or Return.
duke@435 389 // The RootNode both starts and ends it's own block. Do this with a recursive
duke@435 390 // backwards walk over the control edges.
duke@435 391 uint PhaseCFG::build_cfg() {
duke@435 392 Arena *a = Thread::current()->resource_area();
duke@435 393 VectorSet visited(a);
duke@435 394
duke@435 395 // Allocate stack with enough space to avoid frequent realloc
duke@435 396 Node_Stack nstack(a, C->unique() >> 1);
duke@435 397 nstack.push(_root, 0);
duke@435 398 uint sum = 0; // Counter for blocks
duke@435 399
duke@435 400 while (nstack.is_nonempty()) {
duke@435 401 // node and in's index from stack's top
duke@435 402 // 'np' is _root (see above) or RegionNode, StartNode: we push on stack
duke@435 403 // only nodes which point to the start of basic block (see below).
duke@435 404 Node *np = nstack.node();
duke@435 405 // idx > 0, except for the first node (_root) pushed on stack
duke@435 406 // at the beginning when idx == 0.
duke@435 407 // We will use the condition (idx == 0) later to end the build.
duke@435 408 uint idx = nstack.index();
duke@435 409 Node *proj = np->in(idx);
duke@435 410 const Node *x = proj->is_block_proj();
duke@435 411 // Does the block end with a proper block-ending Node? One of Return,
duke@435 412 // If or Goto? (This check should be done for visited nodes also).
duke@435 413 if (x == NULL) { // Does not end right...
duke@435 414 Node *g = _goto->clone(); // Force it to end in a Goto
duke@435 415 g->set_req(0, proj);
duke@435 416 np->set_req(idx, g);
duke@435 417 x = proj = g;
duke@435 418 }
duke@435 419 if (!visited.test_set(x->_idx)) { // Visit this block once
duke@435 420 // Skip any control-pinned middle'in stuff
duke@435 421 Node *p = proj;
duke@435 422 do {
duke@435 423 proj = p; // Update pointer to last Control
duke@435 424 p = p->in(0); // Move control forward
duke@435 425 } while( !p->is_block_proj() &&
duke@435 426 !p->is_block_start() );
duke@435 427 // Make the block begin with one of Region or StartNode.
duke@435 428 if( !p->is_block_start() ) {
kvn@4115 429 RegionNode *r = new (C) RegionNode( 2 );
duke@435 430 r->init_req(1, p); // Insert RegionNode in the way
duke@435 431 proj->set_req(0, r); // Insert RegionNode in the way
duke@435 432 p = r;
duke@435 433 }
duke@435 434 // 'p' now points to the start of this basic block
duke@435 435
duke@435 436 // Put self in array of basic blocks
adlertz@5509 437 Block *bb = new (_block_arena) Block(_block_arena, p);
adlertz@5509 438 map_node_to_block(p, bb);
adlertz@5509 439 map_node_to_block(x, bb);
kvn@3049 440 if( x != p ) { // Only for root is x == p
adlertz@5635 441 bb->push_node((Node*)x);
kvn@3049 442 }
duke@435 443 // Now handle predecessors
duke@435 444 ++sum; // Count 1 for self block
duke@435 445 uint cnt = bb->num_preds();
duke@435 446 for (int i = (cnt - 1); i > 0; i-- ) { // For all predecessors
duke@435 447 Node *prevproj = p->in(i); // Get prior input
duke@435 448 assert( !prevproj->is_Con(), "dead input not removed" );
duke@435 449 // Check to see if p->in(i) is a "control-dependent" CFG edge -
duke@435 450 // i.e., it splits at the source (via an IF or SWITCH) and merges
duke@435 451 // at the destination (via a many-input Region).
duke@435 452 // This breaks critical edges. The RegionNode to start the block
duke@435 453 // will be added when <p,i> is pulled off the node stack
duke@435 454 if ( cnt > 2 ) { // Merging many things?
duke@435 455 assert( prevproj== bb->pred(i),"");
duke@435 456 if(prevproj->is_block_proj() != prevproj) { // Control-dependent edge?
duke@435 457 // Force a block on the control-dependent edge
duke@435 458 Node *g = _goto->clone(); // Force it to end in a Goto
duke@435 459 g->set_req(0,prevproj);
duke@435 460 p->set_req(i,g);
duke@435 461 }
duke@435 462 }
duke@435 463 nstack.push(p, i); // 'p' is RegionNode or StartNode
duke@435 464 }
duke@435 465 } else { // Post-processing visited nodes
duke@435 466 nstack.pop(); // remove node from stack
duke@435 467 // Check if it the fist node pushed on stack at the beginning.
duke@435 468 if (idx == 0) break; // end of the build
duke@435 469 // Find predecessor basic block
adlertz@5509 470 Block *pb = get_block_for_node(x);
duke@435 471 // Insert into nodes array, if not already there
adlertz@5509 472 if (!has_block(proj)) {
duke@435 473 assert( x != proj, "" );
duke@435 474 // Map basic block of projection
adlertz@5509 475 map_node_to_block(proj, pb);
adlertz@5635 476 pb->push_node(proj);
duke@435 477 }
duke@435 478 // Insert self as a child of my predecessor block
adlertz@5509 479 pb->_succs.map(pb->_num_succs++, get_block_for_node(np));
adlertz@5635 480 assert( pb->get_node(pb->number_of_nodes() - pb->_num_succs)->is_block_proj(),
duke@435 481 "too many control users, not a CFG?" );
duke@435 482 }
duke@435 483 }
duke@435 484 // Return number of basic blocks for all children and self
duke@435 485 return sum;
duke@435 486 }
duke@435 487
duke@435 488 // Inserts a goto & corresponding basic block between
duke@435 489 // block[block_no] and its succ_no'th successor block
duke@435 490 void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
duke@435 491 // get block with block_no
adlertz@5539 492 assert(block_no < number_of_blocks(), "illegal block number");
adlertz@5539 493 Block* in = get_block(block_no);
duke@435 494 // get successor block succ_no
duke@435 495 assert(succ_no < in->_num_succs, "illegal successor number");
duke@435 496 Block* out = in->_succs[succ_no];
rasbold@743 497 // Compute frequency of the new block. Do this before inserting
rasbold@743 498 // new block in case succ_prob() needs to infer the probability from
rasbold@743 499 // surrounding blocks.
rasbold@743 500 float freq = in->_freq * in->succ_prob(succ_no);
duke@435 501 // get ProjNode corresponding to the succ_no'th successor of the in block
adlertz@5635 502 ProjNode* proj = in->get_node(in->number_of_nodes() - in->_num_succs + succ_no)->as_Proj();
duke@435 503 // create region for basic block
kvn@4115 504 RegionNode* region = new (C) RegionNode(2);
duke@435 505 region->init_req(1, proj);
duke@435 506 // setup corresponding basic block
adlertz@5509 507 Block* block = new (_block_arena) Block(_block_arena, region);
adlertz@5509 508 map_node_to_block(region, block);
duke@435 509 C->regalloc()->set_bad(region->_idx);
duke@435 510 // add a goto node
duke@435 511 Node* gto = _goto->clone(); // get a new goto node
duke@435 512 gto->set_req(0, region);
duke@435 513 // add it to the basic block
adlertz@5635 514 block->push_node(gto);
adlertz@5509 515 map_node_to_block(gto, block);
duke@435 516 C->regalloc()->set_bad(gto->_idx);
duke@435 517 // hook up successor block
duke@435 518 block->_succs.map(block->_num_succs++, out);
duke@435 519 // remap successor's predecessors if necessary
duke@435 520 for (uint i = 1; i < out->num_preds(); i++) {
duke@435 521 if (out->pred(i) == proj) out->head()->set_req(i, gto);
duke@435 522 }
duke@435 523 // remap predecessor's successor to new block
duke@435 524 in->_succs.map(succ_no, block);
rasbold@743 525 // Set the frequency of the new block
rasbold@743 526 block->_freq = freq;
duke@435 527 // add new basic block to basic block list
adlertz@5539 528 add_block_at(block_no + 1, block);
duke@435 529 }
duke@435 530
duke@435 531 // Does this block end in a multiway branch that cannot have the default case
duke@435 532 // flipped for another case?
duke@435 533 static bool no_flip_branch( Block *b ) {
adlertz@5635 534 int branch_idx = b->number_of_nodes() - b->_num_succs-1;
duke@435 535 if( branch_idx < 1 ) return false;
adlertz@5635 536 Node *bra = b->get_node(branch_idx);
rasbold@853 537 if( bra->is_Catch() )
rasbold@853 538 return true;
duke@435 539 if( bra->is_Mach() ) {
rasbold@853 540 if( bra->is_MachNullCheck() )
rasbold@853 541 return true;
duke@435 542 int iop = bra->as_Mach()->ideal_Opcode();
duke@435 543 if( iop == Op_FastLock || iop == Op_FastUnlock )
duke@435 544 return true;
duke@435 545 }
duke@435 546 return false;
duke@435 547 }
duke@435 548
duke@435 549 // Check for NeverBranch at block end. This needs to become a GOTO to the
duke@435 550 // true target. NeverBranch are treated as a conditional branch that always
duke@435 551 // goes the same direction for most of the optimizer and are used to give a
duke@435 552 // fake exit path to infinite loops. At this late stage they need to turn
duke@435 553 // into Goto's so that when you enter the infinite loop you indeed hang.
duke@435 554 void PhaseCFG::convert_NeverBranch_to_Goto(Block *b) {
duke@435 555 // Find true target
duke@435 556 int end_idx = b->end_idx();
adlertz@5635 557 int idx = b->get_node(end_idx+1)->as_Proj()->_con;
duke@435 558 Block *succ = b->_succs[idx];
duke@435 559 Node* gto = _goto->clone(); // get a new goto node
duke@435 560 gto->set_req(0, b->head());
adlertz@5635 561 Node *bp = b->get_node(end_idx);
adlertz@5635 562 b->map_node(gto, end_idx); // Slam over NeverBranch
adlertz@5509 563 map_node_to_block(gto, b);
duke@435 564 C->regalloc()->set_bad(gto->_idx);
adlertz@5635 565 b->pop_node(); // Yank projections
adlertz@5635 566 b->pop_node(); // Yank projections
duke@435 567 b->_succs.map(0,succ); // Map only successor
duke@435 568 b->_num_succs = 1;
duke@435 569 // remap successor's predecessors if necessary
duke@435 570 uint j;
duke@435 571 for( j = 1; j < succ->num_preds(); j++)
duke@435 572 if( succ->pred(j)->in(0) == bp )
duke@435 573 succ->head()->set_req(j, gto);
duke@435 574 // Kill alternate exit path
duke@435 575 Block *dead = b->_succs[1-idx];
duke@435 576 for( j = 1; j < dead->num_preds(); j++)
duke@435 577 if( dead->pred(j)->in(0) == bp )
duke@435 578 break;
duke@435 579 // Scan through block, yanking dead path from
duke@435 580 // all regions and phis.
duke@435 581 dead->head()->del_req(j);
adlertz@5635 582 for( int k = 1; dead->get_node(k)->is_Phi(); k++ )
adlertz@5635 583 dead->get_node(k)->del_req(j);
duke@435 584 }
duke@435 585
duke@435 586 // Helper function to move block bx to the slot following b_index. Return
duke@435 587 // true if the move is successful, otherwise false
rasbold@853 588 bool PhaseCFG::move_to_next(Block* bx, uint b_index) {
duke@435 589 if (bx == NULL) return false;
duke@435 590
duke@435 591 // Return false if bx is already scheduled.
duke@435 592 uint bx_index = bx->_pre_order;
adlertz@5539 593 if ((bx_index <= b_index) && (get_block(bx_index) == bx)) {
duke@435 594 return false;
duke@435 595 }
duke@435 596
duke@435 597 // Find the current index of block bx on the block list
duke@435 598 bx_index = b_index + 1;
adlertz@5539 599 while (bx_index < number_of_blocks() && get_block(bx_index) != bx) {
adlertz@5539 600 bx_index++;
adlertz@5539 601 }
adlertz@5539 602 assert(get_block(bx_index) == bx, "block not found");
duke@435 603
duke@435 604 // If the previous block conditionally falls into bx, return false,
duke@435 605 // because moving bx will create an extra jump.
duke@435 606 for(uint k = 1; k < bx->num_preds(); k++ ) {
adlertz@5509 607 Block* pred = get_block_for_node(bx->pred(k));
adlertz@5539 608 if (pred == get_block(bx_index - 1)) {
duke@435 609 if (pred->_num_succs != 1) {
duke@435 610 return false;
duke@435 611 }
duke@435 612 }
duke@435 613 }
duke@435 614
duke@435 615 // Reinsert bx just past block 'b'
duke@435 616 _blocks.remove(bx_index);
duke@435 617 _blocks.insert(b_index + 1, bx);
duke@435 618 return true;
duke@435 619 }
duke@435 620
duke@435 621 // Move empty and uncommon blocks to the end.
rasbold@853 622 void PhaseCFG::move_to_end(Block *b, uint i) {
duke@435 623 int e = b->is_Empty();
duke@435 624 if (e != Block::not_empty) {
duke@435 625 if (e == Block::empty_with_goto) {
duke@435 626 // Remove the goto, but leave the block.
adlertz@5635 627 b->pop_node();
duke@435 628 }
duke@435 629 // Mark this block as a connector block, which will cause it to be
duke@435 630 // ignored in certain functions such as non_connector_successor().
duke@435 631 b->set_connector();
duke@435 632 }
duke@435 633 // Move the empty block to the end, and don't recheck.
duke@435 634 _blocks.remove(i);
duke@435 635 _blocks.push(b);
duke@435 636 }
duke@435 637
rasbold@853 638 // Set loop alignment for every block
rasbold@853 639 void PhaseCFG::set_loop_alignment() {
adlertz@5539 640 uint last = number_of_blocks();
adlertz@5539 641 assert(get_block(0) == get_root_block(), "");
rasbold@853 642
adlertz@5539 643 for (uint i = 1; i < last; i++) {
adlertz@5539 644 Block* block = get_block(i);
adlertz@5539 645 if (block->head()->is_Loop()) {
adlertz@5539 646 block->set_loop_alignment(block);
rasbold@853 647 }
rasbold@853 648 }
rasbold@853 649 }
rasbold@853 650
rasbold@853 651 // Make empty basic blocks to be "connector" blocks, Move uncommon blocks
rasbold@853 652 // to the end.
adlertz@5539 653 void PhaseCFG::remove_empty_blocks() {
duke@435 654 // Move uncommon blocks to the end
adlertz@5539 655 uint last = number_of_blocks();
adlertz@5539 656 assert(get_block(0) == get_root_block(), "");
rasbold@853 657
rasbold@853 658 for (uint i = 1; i < last; i++) {
adlertz@5539 659 Block* block = get_block(i);
adlertz@5539 660 if (block->is_connector()) {
adlertz@5539 661 break;
adlertz@5539 662 }
duke@435 663
duke@435 664 // Check for NeverBranch at block end. This needs to become a GOTO to the
duke@435 665 // true target. NeverBranch are treated as a conditional branch that
duke@435 666 // always goes the same direction for most of the optimizer and are used
duke@435 667 // to give a fake exit path to infinite loops. At this late stage they
duke@435 668 // need to turn into Goto's so that when you enter the infinite loop you
duke@435 669 // indeed hang.
adlertz@5635 670 if (block->get_node(block->end_idx())->Opcode() == Op_NeverBranch) {
adlertz@5539 671 convert_NeverBranch_to_Goto(block);
adlertz@5539 672 }
duke@435 673
duke@435 674 // Look for uncommon blocks and move to end.
rasbold@853 675 if (!C->do_freq_based_layout()) {
adlertz@5639 676 if (is_uncommon(block)) {
adlertz@5539 677 move_to_end(block, i);
rasbold@853 678 last--; // No longer check for being uncommon!
adlertz@5539 679 if (no_flip_branch(block)) { // Fall-thru case must follow?
adlertz@5539 680 // Find the fall-thru block
adlertz@5539 681 block = get_block(i);
adlertz@5539 682 move_to_end(block, i);
rasbold@853 683 last--;
rasbold@853 684 }
adlertz@5539 685 // backup block counter post-increment
adlertz@5539 686 i--;
duke@435 687 }
duke@435 688 }
duke@435 689 }
duke@435 690
rasbold@853 691 // Move empty blocks to the end
adlertz@5539 692 last = number_of_blocks();
rasbold@853 693 for (uint i = 1; i < last; i++) {
adlertz@5539 694 Block* block = get_block(i);
adlertz@5539 695 if (block->is_Empty() != Block::not_empty) {
adlertz@5539 696 move_to_end(block, i);
rasbold@853 697 last--;
rasbold@853 698 i--;
duke@435 699 }
duke@435 700 } // End of for all blocks
rasbold@853 701 }
duke@435 702
rasbold@853 703 // Fix up the final control flow for basic blocks.
rasbold@853 704 void PhaseCFG::fixup_flow() {
duke@435 705 // Fixup final control flow for the blocks. Remove jump-to-next
goetz@6478 706 // block. If neither arm of an IF follows the conditional branch, we
duke@435 707 // have to add a second jump after the conditional. We place the
duke@435 708 // TRUE branch target in succs[0] for both GOTOs and IFs.
adlertz@5539 709 for (uint i = 0; i < number_of_blocks(); i++) {
adlertz@5539 710 Block* block = get_block(i);
adlertz@5539 711 block->_pre_order = i; // turn pre-order into block-index
duke@435 712
duke@435 713 // Connector blocks need no further processing.
adlertz@5539 714 if (block->is_connector()) {
adlertz@5539 715 assert((i+1) == number_of_blocks() || get_block(i + 1)->is_connector(), "All connector blocks should sink to the end");
duke@435 716 continue;
duke@435 717 }
adlertz@5539 718 assert(block->is_Empty() != Block::completely_empty, "Empty blocks should be connectors");
duke@435 719
adlertz@5539 720 Block* bnext = (i < number_of_blocks() - 1) ? get_block(i + 1) : NULL;
adlertz@5539 721 Block* bs0 = block->non_connector_successor(0);
duke@435 722
duke@435 723 // Check for multi-way branches where I cannot negate the test to
duke@435 724 // exchange the true and false targets.
adlertz@5539 725 if (no_flip_branch(block)) {
duke@435 726 // Find fall through case - if must fall into its target
adlertz@5635 727 int branch_idx = block->number_of_nodes() - block->_num_succs;
adlertz@5539 728 for (uint j2 = 0; j2 < block->_num_succs; j2++) {
adlertz@5635 729 const ProjNode* p = block->get_node(branch_idx + j2)->as_Proj();
duke@435 730 if (p->_con == 0) {
duke@435 731 // successor j2 is fall through case
adlertz@5539 732 if (block->non_connector_successor(j2) != bnext) {
duke@435 733 // but it is not the next block => insert a goto
duke@435 734 insert_goto_at(i, j2);
duke@435 735 }
duke@435 736 // Put taken branch in slot 0
adlertz@5539 737 if (j2 == 0 && block->_num_succs == 2) {
duke@435 738 // Flip targets in succs map
adlertz@5539 739 Block *tbs0 = block->_succs[0];
adlertz@5539 740 Block *tbs1 = block->_succs[1];
adlertz@5539 741 block->_succs.map(0, tbs1);
adlertz@5539 742 block->_succs.map(1, tbs0);
duke@435 743 }
duke@435 744 break;
duke@435 745 }
duke@435 746 }
adlertz@5539 747
duke@435 748 // Remove all CatchProjs
adlertz@5539 749 for (uint j = 0; j < block->_num_succs; j++) {
adlertz@5635 750 block->pop_node();
adlertz@5539 751 }
duke@435 752
adlertz@5539 753 } else if (block->_num_succs == 1) {
duke@435 754 // Block ends in a Goto?
duke@435 755 if (bnext == bs0) {
duke@435 756 // We fall into next block; remove the Goto
adlertz@5635 757 block->pop_node();
duke@435 758 }
duke@435 759
adlertz@5539 760 } else if(block->_num_succs == 2) { // Block ends in a If?
duke@435 761 // Get opcode of 1st projection (matches _succs[0])
duke@435 762 // Note: Since this basic block has 2 exits, the last 2 nodes must
duke@435 763 // be projections (in any order), the 3rd last node must be
duke@435 764 // the IfNode (we have excluded other 2-way exits such as
duke@435 765 // CatchNodes already).
adlertz@5635 766 MachNode* iff = block->get_node(block->number_of_nodes() - 3)->as_Mach();
adlertz@5635 767 ProjNode* proj0 = block->get_node(block->number_of_nodes() - 2)->as_Proj();
adlertz@5635 768 ProjNode* proj1 = block->get_node(block->number_of_nodes() - 1)->as_Proj();
duke@435 769
duke@435 770 // Assert that proj0 and succs[0] match up. Similarly for proj1 and succs[1].
adlertz@5539 771 assert(proj0->raw_out(0) == block->_succs[0]->head(), "Mismatch successor 0");
adlertz@5539 772 assert(proj1->raw_out(0) == block->_succs[1]->head(), "Mismatch successor 1");
duke@435 773
adlertz@5539 774 Block* bs1 = block->non_connector_successor(1);
duke@435 775
duke@435 776 // Check for neither successor block following the current
duke@435 777 // block ending in a conditional. If so, move one of the
duke@435 778 // successors after the current one, provided that the
duke@435 779 // successor was previously unscheduled, but moveable
duke@435 780 // (i.e., all paths to it involve a branch).
adlertz@5539 781 if (!C->do_freq_based_layout() && bnext != bs0 && bnext != bs1) {
duke@435 782 // Choose the more common successor based on the probability
duke@435 783 // of the conditional branch.
adlertz@5539 784 Block* bx = bs0;
adlertz@5539 785 Block* by = bs1;
duke@435 786
duke@435 787 // _prob is the probability of taking the true path. Make
duke@435 788 // p the probability of taking successor #1.
duke@435 789 float p = iff->as_MachIf()->_prob;
adlertz@5539 790 if (proj0->Opcode() == Op_IfTrue) {
duke@435 791 p = 1.0 - p;
duke@435 792 }
duke@435 793
duke@435 794 // Prefer successor #1 if p > 0.5
duke@435 795 if (p > PROB_FAIR) {
duke@435 796 bx = bs1;
duke@435 797 by = bs0;
duke@435 798 }
duke@435 799
duke@435 800 // Attempt the more common successor first
rasbold@853 801 if (move_to_next(bx, i)) {
duke@435 802 bnext = bx;
rasbold@853 803 } else if (move_to_next(by, i)) {
duke@435 804 bnext = by;
duke@435 805 }
duke@435 806 }
duke@435 807
duke@435 808 // Check for conditional branching the wrong way. Negate
duke@435 809 // conditional, if needed, so it falls into the following block
duke@435 810 // and branches to the not-following block.
duke@435 811
duke@435 812 // Check for the next block being in succs[0]. We are going to branch
duke@435 813 // to succs[0], so we want the fall-thru case as the next block in
duke@435 814 // succs[1].
duke@435 815 if (bnext == bs0) {
duke@435 816 // Fall-thru case in succs[0], so flip targets in succs map
adlertz@5539 817 Block* tbs0 = block->_succs[0];
adlertz@5539 818 Block* tbs1 = block->_succs[1];
adlertz@5539 819 block->_succs.map(0, tbs1);
adlertz@5539 820 block->_succs.map(1, tbs0);
duke@435 821 // Flip projection for each target
adlertz@5539 822 ProjNode* tmp = proj0;
adlertz@5539 823 proj0 = proj1;
adlertz@5539 824 proj1 = tmp;
duke@435 825
adlertz@5539 826 } else if(bnext != bs1) {
rasbold@853 827 // Need a double-branch
duke@435 828 // The existing conditional branch need not change.
duke@435 829 // Add a unconditional branch to the false target.
duke@435 830 // Alas, it must appear in its own block and adding a
duke@435 831 // block this late in the game is complicated. Sigh.
duke@435 832 insert_goto_at(i, 1);
duke@435 833 }
duke@435 834
duke@435 835 // Make sure we TRUE branch to the target
adlertz@5539 836 if (proj0->Opcode() == Op_IfFalse) {
kvn@3051 837 iff->as_MachIf()->negate();
rasbold@853 838 }
duke@435 839
adlertz@5635 840 block->pop_node(); // Remove IfFalse & IfTrue projections
adlertz@5635 841 block->pop_node();
duke@435 842
duke@435 843 } else {
duke@435 844 // Multi-exit block, e.g. a switch statement
duke@435 845 // But we don't need to do anything here
duke@435 846 }
duke@435 847 } // End of for all blocks
duke@435 848 }
duke@435 849
duke@435 850
goetz@6478 851 // postalloc_expand: Expand nodes after register allocation.
goetz@6478 852 //
goetz@6478 853 // postalloc_expand has to be called after register allocation, just
goetz@6478 854 // before output (i.e. scheduling). It only gets called if
goetz@6478 855 // Matcher::require_postalloc_expand is true.
goetz@6478 856 //
goetz@6478 857 // Background:
goetz@6478 858 //
goetz@6478 859 // Nodes that are expandend (one compound node requiring several
goetz@6478 860 // assembler instructions to be implemented split into two or more
goetz@6478 861 // non-compound nodes) after register allocation are not as nice as
goetz@6478 862 // the ones expanded before register allocation - they don't
goetz@6478 863 // participate in optimizations as global code motion. But after
goetz@6478 864 // register allocation we can expand nodes that use registers which
goetz@6478 865 // are not spillable or registers that are not allocated, because the
goetz@6478 866 // old compound node is simply replaced (in its location in the basic
goetz@6478 867 // block) by a new subgraph which does not contain compound nodes any
goetz@6478 868 // more. The scheduler called during output can later on process these
goetz@6478 869 // non-compound nodes.
goetz@6478 870 //
goetz@6478 871 // Implementation:
goetz@6478 872 //
goetz@6478 873 // Nodes requiring postalloc expand are specified in the ad file by using
goetz@6478 874 // a postalloc_expand statement instead of ins_encode. A postalloc_expand
goetz@6478 875 // contains a single call to an encoding, as does an ins_encode
goetz@6478 876 // statement. Instead of an emit() function a postalloc_expand() function
goetz@6478 877 // is generated that doesn't emit assembler but creates a new
goetz@6478 878 // subgraph. The code below calls this postalloc_expand function for each
goetz@6478 879 // node with the appropriate attribute. This function returns the new
goetz@6478 880 // nodes generated in an array passed in the call. The old node,
goetz@6478 881 // potential MachTemps before and potential Projs after it then get
goetz@6478 882 // disconnected and replaced by the new nodes. The instruction
goetz@6478 883 // generating the result has to be the last one in the array. In
goetz@6478 884 // general it is assumed that Projs after the node expanded are
goetz@6478 885 // kills. These kills are not required any more after expanding as
goetz@6478 886 // there are now explicitly visible def-use chains and the Projs are
goetz@6478 887 // removed. This does not hold for calls: They do not only have
goetz@6478 888 // kill-Projs but also Projs defining values. Therefore Projs after
goetz@6478 889 // the node expanded are removed for all but for calls. If a node is
goetz@6478 890 // to be reused, it must be added to the nodes list returned, and it
goetz@6478 891 // will be added again.
goetz@6478 892 //
goetz@6478 893 // Implementing the postalloc_expand function for a node in an enc_class
goetz@6478 894 // is rather tedious. It requires knowledge about many node details, as
goetz@6478 895 // the nodes and the subgraph must be hand crafted. To simplify this,
goetz@6478 896 // adlc generates some utility variables into the postalloc_expand function,
goetz@6478 897 // e.g., holding the operands as specified by the postalloc_expand encoding
goetz@6478 898 // specification, e.g.:
goetz@6478 899 // * unsigned idx_<par_name> holding the index of the node in the ins
goetz@6478 900 // * Node *n_<par_name> holding the node loaded from the ins
goetz@6478 901 // * MachOpnd *op_<par_name> holding the corresponding operand
goetz@6478 902 //
goetz@6478 903 // The ordering of operands can not be determined by looking at a
goetz@6478 904 // rule. Especially if a match rule matches several different trees,
goetz@6478 905 // several nodes are generated from one instruct specification with
goetz@6478 906 // different operand orderings. In this case the adlc generated
goetz@6478 907 // variables are the only way to access the ins and operands
goetz@6478 908 // deterministically.
goetz@6478 909 //
goetz@6478 910 // If assigning a register to a node that contains an oop, don't
goetz@6478 911 // forget to call ra_->set_oop() for the node.
goetz@6478 912 void PhaseCFG::postalloc_expand(PhaseRegAlloc* _ra) {
goetz@6478 913 GrowableArray <Node *> new_nodes(32); // Array with new nodes filled by postalloc_expand function of node.
goetz@6478 914 GrowableArray <Node *> remove(32);
goetz@6478 915 GrowableArray <Node *> succs(32);
goetz@6478 916 unsigned int max_idx = C->unique(); // Remember to distinguish new from old nodes.
goetz@6478 917 DEBUG_ONLY(bool foundNode = false);
goetz@6478 918
goetz@6478 919 // for all blocks
goetz@6478 920 for (uint i = 0; i < number_of_blocks(); i++) {
goetz@6478 921 Block *b = _blocks[i];
goetz@6478 922 // For all instructions in the current block.
goetz@6478 923 for (uint j = 0; j < b->number_of_nodes(); j++) {
goetz@6478 924 Node *n = b->get_node(j);
goetz@6478 925 if (n->is_Mach() && n->as_Mach()->requires_postalloc_expand()) {
goetz@6478 926 #ifdef ASSERT
goetz@6478 927 if (TracePostallocExpand) {
goetz@6478 928 if (!foundNode) {
goetz@6478 929 foundNode = true;
goetz@6478 930 tty->print("POSTALLOC EXPANDING %d %s\n", C->compile_id(),
goetz@6478 931 C->method() ? C->method()->name()->as_utf8() : C->stub_name());
goetz@6478 932 }
goetz@6478 933 tty->print(" postalloc expanding "); n->dump();
goetz@6478 934 if (Verbose) {
goetz@6478 935 tty->print(" with ins:\n");
goetz@6478 936 for (uint k = 0; k < n->len(); ++k) {
goetz@6478 937 if (n->in(k)) { tty->print(" "); n->in(k)->dump(); }
goetz@6478 938 }
goetz@6478 939 }
goetz@6478 940 }
goetz@6478 941 #endif
goetz@6478 942 new_nodes.clear();
goetz@6478 943 // Collect nodes that have to be removed from the block later on.
goetz@6478 944 uint req = n->req();
goetz@6478 945 remove.clear();
goetz@6478 946 for (uint k = 0; k < req; ++k) {
goetz@6478 947 if (n->in(k) && n->in(k)->is_MachTemp()) {
goetz@6478 948 remove.push(n->in(k)); // MachTemps which are inputs to the old node have to be removed.
goetz@6478 949 n->in(k)->del_req(0);
goetz@6478 950 j--;
goetz@6478 951 }
goetz@6478 952 }
goetz@6478 953
goetz@6478 954 // Check whether we can allocate enough nodes. We set a fix limit for
goetz@6478 955 // the size of postalloc expands with this.
goetz@6478 956 uint unique_limit = C->unique() + 40;
goetz@6478 957 if (unique_limit >= _ra->node_regs_max_index()) {
goetz@6478 958 Compile::current()->record_failure("out of nodes in postalloc expand");
goetz@6478 959 return;
goetz@6478 960 }
goetz@6478 961
goetz@6478 962 // Emit (i.e. generate new nodes).
goetz@6478 963 n->as_Mach()->postalloc_expand(&new_nodes, _ra);
goetz@6478 964
goetz@6478 965 assert(C->unique() < unique_limit, "You allocated too many nodes in your postalloc expand.");
goetz@6478 966
goetz@6478 967 // Disconnect the inputs of the old node.
goetz@6478 968 //
goetz@6478 969 // We reuse MachSpillCopy nodes. If we need to expand them, there
goetz@6478 970 // are many, so reusing pays off. If reused, the node already
goetz@6478 971 // has the new ins. n must be the last node on new_nodes list.
goetz@6478 972 if (!n->is_MachSpillCopy()) {
goetz@6478 973 for (int k = req - 1; k >= 0; --k) {
goetz@6478 974 n->del_req(k);
goetz@6478 975 }
goetz@6478 976 }
goetz@6478 977
goetz@6478 978 #ifdef ASSERT
goetz@6478 979 // Check that all nodes have proper operands.
goetz@6478 980 for (int k = 0; k < new_nodes.length(); ++k) {
goetz@6478 981 if (new_nodes.at(k)->_idx < max_idx || !new_nodes.at(k)->is_Mach()) continue; // old node, Proj ...
goetz@6478 982 MachNode *m = new_nodes.at(k)->as_Mach();
goetz@6478 983 for (unsigned int l = 0; l < m->num_opnds(); ++l) {
goetz@6478 984 if (MachOper::notAnOper(m->_opnds[l])) {
goetz@6478 985 outputStream *os = tty;
goetz@6478 986 os->print("Node %s ", m->Name());
goetz@6478 987 os->print("has invalid opnd %d: %p\n", l, m->_opnds[l]);
goetz@6478 988 assert(0, "Invalid operands, see inline trace in hs_err_pid file.");
goetz@6478 989 }
goetz@6478 990 }
goetz@6478 991 }
goetz@6478 992 #endif
goetz@6478 993
goetz@6478 994 // Collect succs of old node in remove (for projections) and in succs (for
goetz@6478 995 // all other nodes) do _not_ collect projections in remove (but in succs)
goetz@6478 996 // in case the node is a call. We need the projections for calls as they are
goetz@6478 997 // associated with registes (i.e. they are defs).
goetz@6478 998 succs.clear();
goetz@6478 999 for (DUIterator k = n->outs(); n->has_out(k); k++) {
goetz@6478 1000 if (n->out(k)->is_Proj() && !n->is_MachCall() && !n->is_MachBranch()) {
goetz@6478 1001 remove.push(n->out(k));
goetz@6478 1002 } else {
goetz@6478 1003 succs.push(n->out(k));
goetz@6478 1004 }
goetz@6478 1005 }
goetz@6478 1006 // Replace old node n as input of its succs by last of the new nodes.
goetz@6478 1007 for (int k = 0; k < succs.length(); ++k) {
goetz@6478 1008 Node *succ = succs.at(k);
goetz@6478 1009 for (uint l = 0; l < succ->req(); ++l) {
goetz@6478 1010 if (succ->in(l) == n) {
goetz@6478 1011 succ->set_req(l, new_nodes.at(new_nodes.length() - 1));
goetz@6478 1012 }
goetz@6478 1013 }
goetz@6478 1014 for (uint l = succ->req(); l < succ->len(); ++l) {
goetz@6478 1015 if (succ->in(l) == n) {
goetz@6478 1016 succ->set_prec(l, new_nodes.at(new_nodes.length() - 1));
goetz@6478 1017 }
goetz@6478 1018 }
goetz@6478 1019 }
goetz@6478 1020
goetz@6478 1021 // Index of old node in block.
goetz@6478 1022 uint index = b->find_node(n);
goetz@6478 1023 // Insert new nodes into block and map them in nodes->blocks array
goetz@6478 1024 // and remember last node in n2.
goetz@6478 1025 Node *n2 = NULL;
goetz@6478 1026 for (int k = 0; k < new_nodes.length(); ++k) {
goetz@6478 1027 n2 = new_nodes.at(k);
goetz@6478 1028 b->insert_node(n2, ++index);
goetz@6478 1029 map_node_to_block(n2, b);
goetz@6478 1030 }
goetz@6478 1031
goetz@6478 1032 // Add old node n to remove and remove them all from block.
goetz@6478 1033 remove.push(n);
goetz@6478 1034 j--;
goetz@6478 1035 #ifdef ASSERT
goetz@6478 1036 if (TracePostallocExpand && Verbose) {
goetz@6478 1037 tty->print(" removing:\n");
goetz@6478 1038 for (int k = 0; k < remove.length(); ++k) {
goetz@6478 1039 tty->print(" "); remove.at(k)->dump();
goetz@6478 1040 }
goetz@6478 1041 tty->print(" inserting:\n");
goetz@6478 1042 for (int k = 0; k < new_nodes.length(); ++k) {
goetz@6478 1043 tty->print(" "); new_nodes.at(k)->dump();
goetz@6478 1044 }
goetz@6478 1045 }
goetz@6478 1046 #endif
goetz@6478 1047 for (int k = 0; k < remove.length(); ++k) {
goetz@6478 1048 if (b->contains(remove.at(k))) {
goetz@6478 1049 b->find_remove(remove.at(k));
goetz@6478 1050 } else {
goetz@6478 1051 assert(remove.at(k)->is_Proj() && (remove.at(k)->in(0)->is_MachBranch()), "");
goetz@6478 1052 }
goetz@6478 1053 }
goetz@6478 1054 // If anything has been inserted (n2 != NULL), continue after last node inserted.
goetz@6478 1055 // This does not always work. Some postalloc expands don't insert any nodes, if they
goetz@6478 1056 // do optimizations (e.g., max(x,x)). In this case we decrement j accordingly.
goetz@6478 1057 j = n2 ? b->find_node(n2) : j;
goetz@6478 1058 }
goetz@6478 1059 }
goetz@6478 1060 }
goetz@6478 1061
goetz@6478 1062 #ifdef ASSERT
goetz@6478 1063 if (foundNode) {
goetz@6478 1064 tty->print("FINISHED %d %s\n", C->compile_id(),
goetz@6478 1065 C->method() ? C->method()->name()->as_utf8() : C->stub_name());
goetz@6478 1066 tty->flush();
goetz@6478 1067 }
goetz@6478 1068 #endif
goetz@6478 1069 }
goetz@6478 1070
goetz@6478 1071
goetz@6478 1072 //------------------------------dump-------------------------------------------
duke@435 1073 #ifndef PRODUCT
duke@435 1074 void PhaseCFG::_dump_cfg( const Node *end, VectorSet &visited ) const {
duke@435 1075 const Node *x = end->is_block_proj();
duke@435 1076 assert( x, "not a CFG" );
duke@435 1077
duke@435 1078 // Do not visit this block again
duke@435 1079 if( visited.test_set(x->_idx) ) return;
duke@435 1080
duke@435 1081 // Skip through this block
duke@435 1082 const Node *p = x;
duke@435 1083 do {
duke@435 1084 p = p->in(0); // Move control forward
duke@435 1085 assert( !p->is_block_proj() || p->is_Root(), "not a CFG" );
duke@435 1086 } while( !p->is_block_start() );
duke@435 1087
duke@435 1088 // Recursively visit
adlertz@5509 1089 for (uint i = 1; i < p->req(); i++) {
adlertz@5509 1090 _dump_cfg(p->in(i), visited);
adlertz@5509 1091 }
duke@435 1092
duke@435 1093 // Dump the block
adlertz@5509 1094 get_block_for_node(p)->dump(this);
duke@435 1095 }
duke@435 1096
duke@435 1097 void PhaseCFG::dump( ) const {
adlertz@5539 1098 tty->print("\n--- CFG --- %d BBs\n", number_of_blocks());
adlertz@5509 1099 if (_blocks.size()) { // Did we do basic-block layout?
adlertz@5539 1100 for (uint i = 0; i < number_of_blocks(); i++) {
adlertz@5539 1101 const Block* block = get_block(i);
adlertz@5539 1102 block->dump(this);
adlertz@5509 1103 }
duke@435 1104 } else { // Else do it with a DFS
adlertz@5509 1105 VectorSet visited(_block_arena);
duke@435 1106 _dump_cfg(_root,visited);
duke@435 1107 }
duke@435 1108 }
duke@435 1109
duke@435 1110 void PhaseCFG::dump_headers() {
adlertz@5539 1111 for (uint i = 0; i < number_of_blocks(); i++) {
adlertz@5539 1112 Block* block = get_block(i);
adlertz@5539 1113 if (block != NULL) {
adlertz@5539 1114 block->dump_head(this);
adlertz@5509 1115 }
duke@435 1116 }
duke@435 1117 }
duke@435 1118
adlertz@5539 1119 void PhaseCFG::verify() const {
kvn@1001 1120 #ifdef ASSERT
duke@435 1121 // Verify sane CFG
adlertz@5539 1122 for (uint i = 0; i < number_of_blocks(); i++) {
adlertz@5539 1123 Block* block = get_block(i);
adlertz@5635 1124 uint cnt = block->number_of_nodes();
duke@435 1125 uint j;
kvn@3311 1126 for (j = 0; j < cnt; j++) {
adlertz@5635 1127 Node *n = block->get_node(j);
adlertz@5539 1128 assert(get_block_for_node(n) == block, "");
adlertz@5539 1129 if (j >= 1 && n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_CreateEx) {
adlertz@5635 1130 assert(j == 1 || block->get_node(j-1)->is_Phi(), "CreateEx must be first instruction in block");
duke@435 1131 }
kvn@3311 1132 for (uint k = 0; k < n->req(); k++) {
kvn@1001 1133 Node *def = n->in(k);
kvn@3311 1134 if (def && def != n) {
adlertz@5509 1135 assert(get_block_for_node(def) || def->is_Con(), "must have block; constants for debug info ok");
kvn@1001 1136 // Verify that instructions in the block is in correct order.
kvn@1001 1137 // Uses must follow their definition if they are at the same block.
kvn@1001 1138 // Mostly done to check that MachSpillCopy nodes are placed correctly
kvn@1001 1139 // when CreateEx node is moved in build_ifg_physical().
adlertz@5539 1140 if (get_block_for_node(def) == block && !(block->head()->is_Loop() && n->is_Phi()) &&
kvn@1001 1141 // See (+++) comment in reg_split.cpp
kvn@3311 1142 !(n->jvms() != NULL && n->jvms()->is_monitor_use(k))) {
kvn@1328 1143 bool is_loop = false;
kvn@1328 1144 if (n->is_Phi()) {
kvn@3311 1145 for (uint l = 1; l < def->req(); l++) {
kvn@1328 1146 if (n == def->in(l)) {
kvn@1328 1147 is_loop = true;
kvn@1328 1148 break; // Some kind of loop
kvn@1328 1149 }
kvn@1328 1150 }
kvn@1328 1151 }
adlertz@5539 1152 assert(is_loop || block->find_node(def) < j, "uses must follow definitions");
kvn@1036 1153 }
duke@435 1154 }
duke@435 1155 }
duke@435 1156 }
duke@435 1157
adlertz@5539 1158 j = block->end_idx();
adlertz@5635 1159 Node* bp = (Node*)block->get_node(block->number_of_nodes() - 1)->is_block_proj();
adlertz@5539 1160 assert(bp, "last instruction must be a block proj");
adlertz@5635 1161 assert(bp == block->get_node(j), "wrong number of successors for this block");
kvn@3311 1162 if (bp->is_Catch()) {
adlertz@5635 1163 while (block->get_node(--j)->is_MachProj()) {
adlertz@5539 1164 ;
adlertz@5539 1165 }
adlertz@5635 1166 assert(block->get_node(j)->is_MachCall(), "CatchProj must follow call");
kvn@3311 1167 } else if (bp->is_Mach() && bp->as_Mach()->ideal_Opcode() == Op_If) {
adlertz@5539 1168 assert(block->_num_succs == 2, "Conditional branch must have two targets");
duke@435 1169 }
duke@435 1170 }
kvn@1001 1171 #endif
duke@435 1172 }
duke@435 1173 #endif
duke@435 1174
duke@435 1175 UnionFind::UnionFind( uint max ) : _cnt(max), _max(max), _indices(NEW_RESOURCE_ARRAY(uint,max)) {
duke@435 1176 Copy::zero_to_bytes( _indices, sizeof(uint)*max );
duke@435 1177 }
duke@435 1178
duke@435 1179 void UnionFind::extend( uint from_idx, uint to_idx ) {
duke@435 1180 _nesting.check();
duke@435 1181 if( from_idx >= _max ) {
duke@435 1182 uint size = 16;
duke@435 1183 while( size <= from_idx ) size <<=1;
duke@435 1184 _indices = REALLOC_RESOURCE_ARRAY( uint, _indices, _max, size );
duke@435 1185 _max = size;
duke@435 1186 }
duke@435 1187 while( _cnt <= from_idx ) _indices[_cnt++] = 0;
duke@435 1188 _indices[from_idx] = to_idx;
duke@435 1189 }
duke@435 1190
duke@435 1191 void UnionFind::reset( uint max ) {
duke@435 1192 assert( max <= max_uint, "Must fit within uint" );
duke@435 1193 // Force the Union-Find mapping to be at least this large
duke@435 1194 extend(max,0);
duke@435 1195 // Initialize to be the ID mapping.
rasbold@853 1196 for( uint i=0; i<max; i++ ) map(i,i);
duke@435 1197 }
duke@435 1198
duke@435 1199 // Straight out of Tarjan's union-find algorithm
duke@435 1200 uint UnionFind::Find_compress( uint idx ) {
duke@435 1201 uint cur = idx;
duke@435 1202 uint next = lookup(cur);
duke@435 1203 while( next != cur ) { // Scan chain of equivalences
duke@435 1204 assert( next < cur, "always union smaller" );
duke@435 1205 cur = next; // until find a fixed-point
duke@435 1206 next = lookup(cur);
duke@435 1207 }
duke@435 1208 // Core of union-find algorithm: update chain of
duke@435 1209 // equivalences to be equal to the root.
duke@435 1210 while( idx != next ) {
duke@435 1211 uint tmp = lookup(idx);
duke@435 1212 map(idx, next);
duke@435 1213 idx = tmp;
duke@435 1214 }
duke@435 1215 return idx;
duke@435 1216 }
duke@435 1217
duke@435 1218 // Like Find above, but no path compress, so bad asymptotic behavior
duke@435 1219 uint UnionFind::Find_const( uint idx ) const {
duke@435 1220 if( idx == 0 ) return idx; // Ignore the zero idx
duke@435 1221 // Off the end? This can happen during debugging dumps
duke@435 1222 // when data structures have not finished being updated.
duke@435 1223 if( idx >= _max ) return idx;
duke@435 1224 uint next = lookup(idx);
duke@435 1225 while( next != idx ) { // Scan chain of equivalences
duke@435 1226 idx = next; // until find a fixed-point
duke@435 1227 next = lookup(idx);
duke@435 1228 }
duke@435 1229 return next;
duke@435 1230 }
duke@435 1231
duke@435 1232 // union 2 sets together.
duke@435 1233 void UnionFind::Union( uint idx1, uint idx2 ) {
duke@435 1234 uint src = Find(idx1);
duke@435 1235 uint dst = Find(idx2);
duke@435 1236 assert( src, "" );
duke@435 1237 assert( dst, "" );
duke@435 1238 assert( src < _max, "oob" );
duke@435 1239 assert( dst < _max, "oob" );
duke@435 1240 assert( src < dst, "always union smaller" );
duke@435 1241 map(dst,src);
duke@435 1242 }
rasbold@853 1243
rasbold@853 1244 #ifndef PRODUCT
rasbold@853 1245 void Trace::dump( ) const {
rasbold@853 1246 tty->print_cr("Trace (freq %f)", first_block()->_freq);
rasbold@853 1247 for (Block *b = first_block(); b != NULL; b = next(b)) {
rasbold@853 1248 tty->print(" B%d", b->_pre_order);
rasbold@853 1249 if (b->head()->is_Loop()) {
rasbold@853 1250 tty->print(" (L%d)", b->compute_loop_alignment());
rasbold@853 1251 }
rasbold@853 1252 if (b->has_loop_alignment()) {
rasbold@853 1253 tty->print(" (T%d)", b->code_alignment());
rasbold@853 1254 }
rasbold@853 1255 }
rasbold@853 1256 tty->cr();
rasbold@853 1257 }
rasbold@853 1258
rasbold@853 1259 void CFGEdge::dump( ) const {
rasbold@853 1260 tty->print(" B%d --> B%d Freq: %f out:%3d%% in:%3d%% State: ",
rasbold@853 1261 from()->_pre_order, to()->_pre_order, freq(), _from_pct, _to_pct);
rasbold@853 1262 switch(state()) {
rasbold@853 1263 case connected:
rasbold@853 1264 tty->print("connected");
rasbold@853 1265 break;
rasbold@853 1266 case open:
rasbold@853 1267 tty->print("open");
rasbold@853 1268 break;
rasbold@853 1269 case interior:
rasbold@853 1270 tty->print("interior");
rasbold@853 1271 break;
rasbold@853 1272 }
rasbold@853 1273 if (infrequent()) {
rasbold@853 1274 tty->print(" infrequent");
rasbold@853 1275 }
rasbold@853 1276 tty->cr();
rasbold@853 1277 }
rasbold@853 1278 #endif
rasbold@853 1279
rasbold@853 1280 // Comparison function for edges
rasbold@853 1281 static int edge_order(CFGEdge **e0, CFGEdge **e1) {
rasbold@853 1282 float freq0 = (*e0)->freq();
rasbold@853 1283 float freq1 = (*e1)->freq();
rasbold@853 1284 if (freq0 != freq1) {
rasbold@853 1285 return freq0 > freq1 ? -1 : 1;
rasbold@853 1286 }
rasbold@853 1287
rasbold@853 1288 int dist0 = (*e0)->to()->_rpo - (*e0)->from()->_rpo;
rasbold@853 1289 int dist1 = (*e1)->to()->_rpo - (*e1)->from()->_rpo;
rasbold@853 1290
rasbold@853 1291 return dist1 - dist0;
rasbold@853 1292 }
rasbold@853 1293
rasbold@853 1294 // Comparison function for edges
kvn@3128 1295 extern "C" int trace_frequency_order(const void *p0, const void *p1) {
rasbold@853 1296 Trace *tr0 = *(Trace **) p0;
rasbold@853 1297 Trace *tr1 = *(Trace **) p1;
rasbold@853 1298 Block *b0 = tr0->first_block();
rasbold@853 1299 Block *b1 = tr1->first_block();
rasbold@853 1300
rasbold@853 1301 // The trace of connector blocks goes at the end;
rasbold@853 1302 // we only expect one such trace
rasbold@853 1303 if (b0->is_connector() != b1->is_connector()) {
rasbold@853 1304 return b1->is_connector() ? -1 : 1;
rasbold@853 1305 }
rasbold@853 1306
rasbold@853 1307 // Pull more frequently executed blocks to the beginning
rasbold@853 1308 float freq0 = b0->_freq;
rasbold@853 1309 float freq1 = b1->_freq;
rasbold@853 1310 if (freq0 != freq1) {
rasbold@853 1311 return freq0 > freq1 ? -1 : 1;
rasbold@853 1312 }
rasbold@853 1313
rasbold@853 1314 int diff = tr0->first_block()->_rpo - tr1->first_block()->_rpo;
rasbold@853 1315
rasbold@853 1316 return diff;
rasbold@853 1317 }
rasbold@853 1318
rasbold@853 1319 // Find edges of interest, i.e, those which can fall through. Presumes that
rasbold@853 1320 // edges which don't fall through are of low frequency and can be generally
rasbold@853 1321 // ignored. Initialize the list of traces.
adlertz@5539 1322 void PhaseBlockLayout::find_edges() {
rasbold@853 1323 // Walk the blocks, creating edges and Traces
rasbold@853 1324 uint i;
rasbold@853 1325 Trace *tr = NULL;
adlertz@5539 1326 for (i = 0; i < _cfg.number_of_blocks(); i++) {
adlertz@5539 1327 Block* b = _cfg.get_block(i);
rasbold@853 1328 tr = new Trace(b, next, prev);
rasbold@853 1329 traces[tr->id()] = tr;
rasbold@853 1330
rasbold@853 1331 // All connector blocks should be at the end of the list
rasbold@853 1332 if (b->is_connector()) break;
rasbold@853 1333
rasbold@853 1334 // If this block and the next one have a one-to-one successor
rasbold@853 1335 // predecessor relationship, simply append the next block
rasbold@853 1336 int nfallthru = b->num_fall_throughs();
rasbold@853 1337 while (nfallthru == 1 &&
rasbold@853 1338 b->succ_fall_through(0)) {
rasbold@853 1339 Block *n = b->_succs[0];
rasbold@853 1340
rasbold@853 1341 // Skip over single-entry connector blocks, we don't want to
rasbold@853 1342 // add them to the trace.
rasbold@853 1343 while (n->is_connector() && n->num_preds() == 1) {
rasbold@853 1344 n = n->_succs[0];
rasbold@853 1345 }
rasbold@853 1346
rasbold@853 1347 // We see a merge point, so stop search for the next block
rasbold@853 1348 if (n->num_preds() != 1) break;
rasbold@853 1349
rasbold@853 1350 i++;
adlertz@5539 1351 assert(n = _cfg.get_block(i), "expecting next block");
rasbold@853 1352 tr->append(n);
rasbold@853 1353 uf->map(n->_pre_order, tr->id());
rasbold@853 1354 traces[n->_pre_order] = NULL;
rasbold@853 1355 nfallthru = b->num_fall_throughs();
rasbold@853 1356 b = n;
rasbold@853 1357 }
rasbold@853 1358
rasbold@853 1359 if (nfallthru > 0) {
rasbold@853 1360 // Create a CFGEdge for each outgoing
rasbold@853 1361 // edge that could be a fall-through.
rasbold@853 1362 for (uint j = 0; j < b->_num_succs; j++ ) {
rasbold@853 1363 if (b->succ_fall_through(j)) {
rasbold@853 1364 Block *target = b->non_connector_successor(j);
rasbold@853 1365 float freq = b->_freq * b->succ_prob(j);
rasbold@853 1366 int from_pct = (int) ((100 * freq) / b->_freq);
rasbold@853 1367 int to_pct = (int) ((100 * freq) / target->_freq);
rasbold@853 1368 edges->append(new CFGEdge(b, target, freq, from_pct, to_pct));
rasbold@853 1369 }
rasbold@853 1370 }
rasbold@853 1371 }
rasbold@853 1372 }
rasbold@853 1373
rasbold@853 1374 // Group connector blocks into one trace
adlertz@5539 1375 for (i++; i < _cfg.number_of_blocks(); i++) {
adlertz@5539 1376 Block *b = _cfg.get_block(i);
rasbold@853 1377 assert(b->is_connector(), "connector blocks at the end");
rasbold@853 1378 tr->append(b);
rasbold@853 1379 uf->map(b->_pre_order, tr->id());
rasbold@853 1380 traces[b->_pre_order] = NULL;
rasbold@853 1381 }
rasbold@853 1382 }
rasbold@853 1383
rasbold@853 1384 // Union two traces together in uf, and null out the trace in the list
adlertz@5539 1385 void PhaseBlockLayout::union_traces(Trace* updated_trace, Trace* old_trace) {
rasbold@853 1386 uint old_id = old_trace->id();
rasbold@853 1387 uint updated_id = updated_trace->id();
rasbold@853 1388
rasbold@853 1389 uint lo_id = updated_id;
rasbold@853 1390 uint hi_id = old_id;
rasbold@853 1391
rasbold@853 1392 // If from is greater than to, swap values to meet
rasbold@853 1393 // UnionFind guarantee.
rasbold@853 1394 if (updated_id > old_id) {
rasbold@853 1395 lo_id = old_id;
rasbold@853 1396 hi_id = updated_id;
rasbold@853 1397
rasbold@853 1398 // Fix up the trace ids
rasbold@853 1399 traces[lo_id] = traces[updated_id];
rasbold@853 1400 updated_trace->set_id(lo_id);
rasbold@853 1401 }
rasbold@853 1402
rasbold@853 1403 // Union the lower with the higher and remove the pointer
rasbold@853 1404 // to the higher.
rasbold@853 1405 uf->Union(lo_id, hi_id);
rasbold@853 1406 traces[hi_id] = NULL;
rasbold@853 1407 }
rasbold@853 1408
rasbold@853 1409 // Append traces together via the most frequently executed edges
adlertz@5539 1410 void PhaseBlockLayout::grow_traces() {
rasbold@853 1411 // Order the edges, and drive the growth of Traces via the most
rasbold@853 1412 // frequently executed edges.
rasbold@853 1413 edges->sort(edge_order);
rasbold@853 1414 for (int i = 0; i < edges->length(); i++) {
rasbold@853 1415 CFGEdge *e = edges->at(i);
rasbold@853 1416
rasbold@853 1417 if (e->state() != CFGEdge::open) continue;
rasbold@853 1418
rasbold@853 1419 Block *src_block = e->from();
rasbold@853 1420 Block *targ_block = e->to();
rasbold@853 1421
rasbold@853 1422 // Don't grow traces along backedges?
rasbold@853 1423 if (!BlockLayoutRotateLoops) {
rasbold@853 1424 if (targ_block->_rpo <= src_block->_rpo) {
rasbold@853 1425 targ_block->set_loop_alignment(targ_block);
rasbold@853 1426 continue;
rasbold@853 1427 }
rasbold@853 1428 }
rasbold@853 1429
rasbold@853 1430 Trace *src_trace = trace(src_block);
rasbold@853 1431 Trace *targ_trace = trace(targ_block);
rasbold@853 1432
rasbold@853 1433 // If the edge in question can join two traces at their ends,
rasbold@853 1434 // append one trace to the other.
rasbold@853 1435 if (src_trace->last_block() == src_block) {
rasbold@853 1436 if (src_trace == targ_trace) {
rasbold@853 1437 e->set_state(CFGEdge::interior);
rasbold@853 1438 if (targ_trace->backedge(e)) {
rasbold@853 1439 // Reset i to catch any newly eligible edge
rasbold@853 1440 // (Or we could remember the first "open" edge, and reset there)
rasbold@853 1441 i = 0;
rasbold@853 1442 }
rasbold@853 1443 } else if (targ_trace->first_block() == targ_block) {
rasbold@853 1444 e->set_state(CFGEdge::connected);
rasbold@853 1445 src_trace->append(targ_trace);
rasbold@853 1446 union_traces(src_trace, targ_trace);
rasbold@853 1447 }
rasbold@853 1448 }
rasbold@853 1449 }
rasbold@853 1450 }
rasbold@853 1451
rasbold@853 1452 // Embed one trace into another, if the fork or join points are sufficiently
rasbold@853 1453 // balanced.
adlertz@5539 1454 void PhaseBlockLayout::merge_traces(bool fall_thru_only) {
rasbold@853 1455 // Walk the edge list a another time, looking at unprocessed edges.
rasbold@853 1456 // Fold in diamonds
rasbold@853 1457 for (int i = 0; i < edges->length(); i++) {
rasbold@853 1458 CFGEdge *e = edges->at(i);
rasbold@853 1459
rasbold@853 1460 if (e->state() != CFGEdge::open) continue;
rasbold@853 1461 if (fall_thru_only) {
rasbold@853 1462 if (e->infrequent()) continue;
rasbold@853 1463 }
rasbold@853 1464
rasbold@853 1465 Block *src_block = e->from();
rasbold@853 1466 Trace *src_trace = trace(src_block);
rasbold@853 1467 bool src_at_tail = src_trace->last_block() == src_block;
rasbold@853 1468
rasbold@853 1469 Block *targ_block = e->to();
rasbold@853 1470 Trace *targ_trace = trace(targ_block);
rasbold@853 1471 bool targ_at_start = targ_trace->first_block() == targ_block;
rasbold@853 1472
rasbold@853 1473 if (src_trace == targ_trace) {
rasbold@853 1474 // This may be a loop, but we can't do much about it.
rasbold@853 1475 e->set_state(CFGEdge::interior);
rasbold@853 1476 continue;
rasbold@853 1477 }
rasbold@853 1478
rasbold@853 1479 if (fall_thru_only) {
rasbold@853 1480 // If the edge links the middle of two traces, we can't do anything.
rasbold@853 1481 // Mark the edge and continue.
rasbold@853 1482 if (!src_at_tail & !targ_at_start) {
rasbold@853 1483 continue;
rasbold@853 1484 }
rasbold@853 1485
rasbold@853 1486 // Don't grow traces along backedges?
rasbold@853 1487 if (!BlockLayoutRotateLoops && (targ_block->_rpo <= src_block->_rpo)) {
rasbold@853 1488 continue;
rasbold@853 1489 }
rasbold@853 1490
rasbold@853 1491 // If both ends of the edge are available, why didn't we handle it earlier?
rasbold@853 1492 assert(src_at_tail ^ targ_at_start, "Should have caught this edge earlier.");
rasbold@853 1493
rasbold@853 1494 if (targ_at_start) {
rasbold@853 1495 // Insert the "targ" trace in the "src" trace if the insertion point
rasbold@853 1496 // is a two way branch.
rasbold@853 1497 // Better profitability check possible, but may not be worth it.
rasbold@853 1498 // Someday, see if the this "fork" has an associated "join";
rasbold@853 1499 // then make a policy on merging this trace at the fork or join.
rasbold@853 1500 // For example, other things being equal, it may be better to place this
rasbold@853 1501 // trace at the join point if the "src" trace ends in a two-way, but
rasbold@853 1502 // the insertion point is one-way.
rasbold@853 1503 assert(src_block->num_fall_throughs() == 2, "unexpected diamond");
rasbold@853 1504 e->set_state(CFGEdge::connected);
rasbold@853 1505 src_trace->insert_after(src_block, targ_trace);
rasbold@853 1506 union_traces(src_trace, targ_trace);
rasbold@853 1507 } else if (src_at_tail) {
adlertz@5539 1508 if (src_trace != trace(_cfg.get_root_block())) {
rasbold@853 1509 e->set_state(CFGEdge::connected);
rasbold@853 1510 targ_trace->insert_before(targ_block, src_trace);
rasbold@853 1511 union_traces(targ_trace, src_trace);
rasbold@853 1512 }
rasbold@853 1513 }
rasbold@853 1514 } else if (e->state() == CFGEdge::open) {
rasbold@853 1515 // Append traces, even without a fall-thru connection.
twisti@1040 1516 // But leave root entry at the beginning of the block list.
adlertz@5539 1517 if (targ_trace != trace(_cfg.get_root_block())) {
rasbold@853 1518 e->set_state(CFGEdge::connected);
rasbold@853 1519 src_trace->append(targ_trace);
rasbold@853 1520 union_traces(src_trace, targ_trace);
rasbold@853 1521 }
rasbold@853 1522 }
rasbold@853 1523 }
rasbold@853 1524 }
rasbold@853 1525
rasbold@853 1526 // Order the sequence of the traces in some desirable way, and fixup the
rasbold@853 1527 // jumps at the end of each block.
adlertz@5539 1528 void PhaseBlockLayout::reorder_traces(int count) {
rasbold@853 1529 ResourceArea *area = Thread::current()->resource_area();
rasbold@853 1530 Trace ** new_traces = NEW_ARENA_ARRAY(area, Trace *, count);
rasbold@853 1531 Block_List worklist;
rasbold@853 1532 int new_count = 0;
rasbold@853 1533
rasbold@853 1534 // Compact the traces.
rasbold@853 1535 for (int i = 0; i < count; i++) {
rasbold@853 1536 Trace *tr = traces[i];
rasbold@853 1537 if (tr != NULL) {
rasbold@853 1538 new_traces[new_count++] = tr;
rasbold@853 1539 }
rasbold@853 1540 }
rasbold@853 1541
rasbold@853 1542 // The entry block should be first on the new trace list.
adlertz@5539 1543 Trace *tr = trace(_cfg.get_root_block());
rasbold@853 1544 assert(tr == new_traces[0], "entry trace misplaced");
rasbold@853 1545
rasbold@853 1546 // Sort the new trace list by frequency
rasbold@853 1547 qsort(new_traces + 1, new_count - 1, sizeof(new_traces[0]), trace_frequency_order);
rasbold@853 1548
rasbold@853 1549 // Patch up the successor blocks
adlertz@5539 1550 _cfg.clear_blocks();
rasbold@853 1551 for (int i = 0; i < new_count; i++) {
rasbold@853 1552 Trace *tr = new_traces[i];
rasbold@853 1553 if (tr != NULL) {
rasbold@853 1554 tr->fixup_blocks(_cfg);
rasbold@853 1555 }
rasbold@853 1556 }
rasbold@853 1557 }
rasbold@853 1558
rasbold@853 1559 // Order basic blocks based on frequency
adlertz@5539 1560 PhaseBlockLayout::PhaseBlockLayout(PhaseCFG &cfg)
adlertz@5539 1561 : Phase(BlockLayout)
adlertz@5539 1562 , _cfg(cfg) {
rasbold@853 1563 ResourceMark rm;
rasbold@853 1564 ResourceArea *area = Thread::current()->resource_area();
rasbold@853 1565
rasbold@853 1566 // List of traces
adlertz@5539 1567 int size = _cfg.number_of_blocks() + 1;
rasbold@853 1568 traces = NEW_ARENA_ARRAY(area, Trace *, size);
rasbold@853 1569 memset(traces, 0, size*sizeof(Trace*));
rasbold@853 1570 next = NEW_ARENA_ARRAY(area, Block *, size);
rasbold@853 1571 memset(next, 0, size*sizeof(Block *));
rasbold@853 1572 prev = NEW_ARENA_ARRAY(area, Block *, size);
rasbold@853 1573 memset(prev , 0, size*sizeof(Block *));
rasbold@853 1574
rasbold@853 1575 // List of edges
rasbold@853 1576 edges = new GrowableArray<CFGEdge*>;
rasbold@853 1577
rasbold@853 1578 // Mapping block index --> block_trace
rasbold@853 1579 uf = new UnionFind(size);
rasbold@853 1580 uf->reset(size);
rasbold@853 1581
rasbold@853 1582 // Find edges and create traces.
rasbold@853 1583 find_edges();
rasbold@853 1584
rasbold@853 1585 // Grow traces at their ends via most frequent edges.
rasbold@853 1586 grow_traces();
rasbold@853 1587
rasbold@853 1588 // Merge one trace into another, but only at fall-through points.
rasbold@853 1589 // This may make diamonds and other related shapes in a trace.
rasbold@853 1590 merge_traces(true);
rasbold@853 1591
rasbold@853 1592 // Run merge again, allowing two traces to be catenated, even if
rasbold@853 1593 // one does not fall through into the other. This appends loosely
rasbold@853 1594 // related traces to be near each other.
rasbold@853 1595 merge_traces(false);
rasbold@853 1596
rasbold@853 1597 // Re-order all the remaining traces by frequency
rasbold@853 1598 reorder_traces(size);
rasbold@853 1599
adlertz@5539 1600 assert(_cfg.number_of_blocks() >= (uint) (size - 1), "number of blocks can not shrink");
rasbold@853 1601 }
rasbold@853 1602
rasbold@853 1603
rasbold@853 1604 // Edge e completes a loop in a trace. If the target block is head of the
rasbold@853 1605 // loop, rotate the loop block so that the loop ends in a conditional branch.
rasbold@853 1606 bool Trace::backedge(CFGEdge *e) {
rasbold@853 1607 bool loop_rotated = false;
rasbold@853 1608 Block *src_block = e->from();
rasbold@853 1609 Block *targ_block = e->to();
rasbold@853 1610
rasbold@853 1611 assert(last_block() == src_block, "loop discovery at back branch");
rasbold@853 1612 if (first_block() == targ_block) {
rasbold@853 1613 if (BlockLayoutRotateLoops && last_block()->num_fall_throughs() < 2) {
rasbold@853 1614 // Find the last block in the trace that has a conditional
rasbold@853 1615 // branch.
rasbold@853 1616 Block *b;
rasbold@853 1617 for (b = last_block(); b != NULL; b = prev(b)) {
rasbold@853 1618 if (b->num_fall_throughs() == 2) {
rasbold@853 1619 break;
rasbold@853 1620 }
rasbold@853 1621 }
rasbold@853 1622
rasbold@853 1623 if (b != last_block() && b != NULL) {
rasbold@853 1624 loop_rotated = true;
rasbold@853 1625
rasbold@853 1626 // Rotate the loop by doing two-part linked-list surgery.
rasbold@853 1627 append(first_block());
rasbold@853 1628 break_loop_after(b);
rasbold@853 1629 }
rasbold@853 1630 }
rasbold@853 1631
rasbold@853 1632 // Backbranch to the top of a trace
twisti@1040 1633 // Scroll forward through the trace from the targ_block. If we find
rasbold@853 1634 // a loop head before another loop top, use the the loop head alignment.
rasbold@853 1635 for (Block *b = targ_block; b != NULL; b = next(b)) {
rasbold@853 1636 if (b->has_loop_alignment()) {
rasbold@853 1637 break;
rasbold@853 1638 }
rasbold@853 1639 if (b->head()->is_Loop()) {
rasbold@853 1640 targ_block = b;
rasbold@853 1641 break;
rasbold@853 1642 }
rasbold@853 1643 }
rasbold@853 1644
rasbold@853 1645 first_block()->set_loop_alignment(targ_block);
rasbold@853 1646
rasbold@853 1647 } else {
rasbold@853 1648 // Backbranch into the middle of a trace
rasbold@853 1649 targ_block->set_loop_alignment(targ_block);
rasbold@853 1650 }
rasbold@853 1651
rasbold@853 1652 return loop_rotated;
rasbold@853 1653 }
rasbold@853 1654
rasbold@853 1655 // push blocks onto the CFG list
rasbold@853 1656 // ensure that blocks have the correct two-way branch sense
rasbold@853 1657 void Trace::fixup_blocks(PhaseCFG &cfg) {
rasbold@853 1658 Block *last = last_block();
rasbold@853 1659 for (Block *b = first_block(); b != NULL; b = next(b)) {
adlertz@5539 1660 cfg.add_block(b);
rasbold@853 1661 if (!b->is_connector()) {
rasbold@853 1662 int nfallthru = b->num_fall_throughs();
rasbold@853 1663 if (b != last) {
rasbold@853 1664 if (nfallthru == 2) {
rasbold@853 1665 // Ensure that the sense of the branch is correct
rasbold@853 1666 Block *bnext = next(b);
rasbold@853 1667 Block *bs0 = b->non_connector_successor(0);
rasbold@853 1668
adlertz@5635 1669 MachNode *iff = b->get_node(b->number_of_nodes() - 3)->as_Mach();
adlertz@5635 1670 ProjNode *proj0 = b->get_node(b->number_of_nodes() - 2)->as_Proj();
adlertz@5635 1671 ProjNode *proj1 = b->get_node(b->number_of_nodes() - 1)->as_Proj();
rasbold@853 1672
rasbold@853 1673 if (bnext == bs0) {
rasbold@853 1674 // Fall-thru case in succs[0], should be in succs[1]
rasbold@853 1675
rasbold@853 1676 // Flip targets in _succs map
rasbold@853 1677 Block *tbs0 = b->_succs[0];
rasbold@853 1678 Block *tbs1 = b->_succs[1];
rasbold@853 1679 b->_succs.map( 0, tbs1 );
rasbold@853 1680 b->_succs.map( 1, tbs0 );
rasbold@853 1681
rasbold@853 1682 // Flip projections to match targets
adlertz@5635 1683 b->map_node(proj1, b->number_of_nodes() - 2);
adlertz@5635 1684 b->map_node(proj0, b->number_of_nodes() - 1);
rasbold@853 1685 }
rasbold@853 1686 }
rasbold@853 1687 }
rasbold@853 1688 }
rasbold@853 1689 }
rasbold@853 1690 }

mercurial