Tue, 15 Apr 2008 10:49:32 -0700
6692301: Side effect in NumberFormat tests with -server -Xcomp
Summary: Optimization in CmpPNode::sub() removed the valid compare instruction because of false positive answer from detect_dominating_control().
Reviewed-by: jrose, sgoldman
duke@435 | 1 | /* |
duke@435 | 2 | * Copyright 1997-2007 Sun Microsystems, Inc. All Rights Reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
duke@435 | 19 | * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, |
duke@435 | 20 | * CA 95054 USA or visit www.sun.com if you need additional information or |
duke@435 | 21 | * have any questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
duke@435 | 25 | // Optimization - Graph Style |
duke@435 | 26 | |
duke@435 | 27 | class Block; |
duke@435 | 28 | class CFGLoop; |
duke@435 | 29 | class MachCallNode; |
duke@435 | 30 | class Matcher; |
duke@435 | 31 | class RootNode; |
duke@435 | 32 | class VectorSet; |
duke@435 | 33 | struct Tarjan; |
duke@435 | 34 | |
duke@435 | 35 | //------------------------------Block_Array------------------------------------ |
duke@435 | 36 | // Map dense integer indices to Blocks. Uses classic doubling-array trick. |
duke@435 | 37 | // Abstractly provides an infinite array of Block*'s, initialized to NULL. |
duke@435 | 38 | // Note that the constructor just zeros things, and since I use Arena |
duke@435 | 39 | // allocation I do not need a destructor to reclaim storage. |
duke@435 | 40 | class Block_Array : public ResourceObj { |
duke@435 | 41 | uint _size; // allocated size, as opposed to formal limit |
duke@435 | 42 | debug_only(uint _limit;) // limit to formal domain |
duke@435 | 43 | protected: |
duke@435 | 44 | Block **_blocks; |
duke@435 | 45 | void grow( uint i ); // Grow array node to fit |
duke@435 | 46 | |
duke@435 | 47 | public: |
duke@435 | 48 | Arena *_arena; // Arena to allocate in |
duke@435 | 49 | |
duke@435 | 50 | Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) { |
duke@435 | 51 | debug_only(_limit=0); |
duke@435 | 52 | _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize ); |
duke@435 | 53 | for( int i = 0; i < OptoBlockListSize; i++ ) { |
duke@435 | 54 | _blocks[i] = NULL; |
duke@435 | 55 | } |
duke@435 | 56 | } |
duke@435 | 57 | Block *lookup( uint i ) const // Lookup, or NULL for not mapped |
duke@435 | 58 | { return (i<Max()) ? _blocks[i] : (Block*)NULL; } |
duke@435 | 59 | Block *operator[] ( uint i ) const // Lookup, or assert for not mapped |
duke@435 | 60 | { assert( i < Max(), "oob" ); return _blocks[i]; } |
duke@435 | 61 | // Extend the mapping: index i maps to Block *n. |
duke@435 | 62 | void map( uint i, Block *n ) { if( i>=Max() ) grow(i); _blocks[i] = n; } |
duke@435 | 63 | uint Max() const { debug_only(return _limit); return _size; } |
duke@435 | 64 | }; |
duke@435 | 65 | |
duke@435 | 66 | |
duke@435 | 67 | class Block_List : public Block_Array { |
duke@435 | 68 | public: |
duke@435 | 69 | uint _cnt; |
duke@435 | 70 | Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {} |
duke@435 | 71 | void push( Block *b ) { map(_cnt++,b); } |
duke@435 | 72 | Block *pop() { return _blocks[--_cnt]; } |
duke@435 | 73 | Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;} |
duke@435 | 74 | void remove( uint i ); |
duke@435 | 75 | void insert( uint i, Block *n ); |
duke@435 | 76 | uint size() const { return _cnt; } |
duke@435 | 77 | void reset() { _cnt = 0; } |
duke@435 | 78 | }; |
duke@435 | 79 | |
duke@435 | 80 | |
duke@435 | 81 | class CFGElement : public ResourceObj { |
duke@435 | 82 | public: |
duke@435 | 83 | float _freq; // Execution frequency (estimate) |
duke@435 | 84 | |
duke@435 | 85 | CFGElement() : _freq(0.0f) {} |
duke@435 | 86 | virtual bool is_block() { return false; } |
duke@435 | 87 | virtual bool is_loop() { return false; } |
duke@435 | 88 | Block* as_Block() { assert(is_block(), "must be block"); return (Block*)this; } |
duke@435 | 89 | CFGLoop* as_CFGLoop() { assert(is_loop(), "must be loop"); return (CFGLoop*)this; } |
duke@435 | 90 | }; |
duke@435 | 91 | |
duke@435 | 92 | //------------------------------Block------------------------------------------ |
duke@435 | 93 | // This class defines a Basic Block. |
duke@435 | 94 | // Basic blocks are used during the output routines, and are not used during |
duke@435 | 95 | // any optimization pass. They are created late in the game. |
duke@435 | 96 | class Block : public CFGElement { |
duke@435 | 97 | public: |
duke@435 | 98 | // Nodes in this block, in order |
duke@435 | 99 | Node_List _nodes; |
duke@435 | 100 | |
duke@435 | 101 | // Basic blocks have a Node which defines Control for all Nodes pinned in |
duke@435 | 102 | // this block. This Node is a RegionNode. Exception-causing Nodes |
duke@435 | 103 | // (division, subroutines) and Phi functions are always pinned. Later, |
duke@435 | 104 | // every Node will get pinned to some block. |
duke@435 | 105 | Node *head() const { return _nodes[0]; } |
duke@435 | 106 | |
duke@435 | 107 | // CAUTION: num_preds() is ONE based, so that predecessor numbers match |
duke@435 | 108 | // input edges to Regions and Phis. |
duke@435 | 109 | uint num_preds() const { return head()->req(); } |
duke@435 | 110 | Node *pred(uint i) const { return head()->in(i); } |
duke@435 | 111 | |
duke@435 | 112 | // Array of successor blocks, same size as projs array |
duke@435 | 113 | Block_Array _succs; |
duke@435 | 114 | |
duke@435 | 115 | // Basic blocks have some number of Nodes which split control to all |
duke@435 | 116 | // following blocks. These Nodes are always Projections. The field in |
duke@435 | 117 | // the Projection and the block-ending Node determine which Block follows. |
duke@435 | 118 | uint _num_succs; |
duke@435 | 119 | |
duke@435 | 120 | // Basic blocks also carry all sorts of good old fashioned DFS information |
duke@435 | 121 | // used to find loops, loop nesting depth, dominators, etc. |
duke@435 | 122 | uint _pre_order; // Pre-order DFS number |
duke@435 | 123 | |
duke@435 | 124 | // Dominator tree |
duke@435 | 125 | uint _dom_depth; // Depth in dominator tree for fast LCA |
duke@435 | 126 | Block* _idom; // Immediate dominator block |
duke@435 | 127 | |
duke@435 | 128 | CFGLoop *_loop; // Loop to which this block belongs |
duke@435 | 129 | uint _rpo; // Number in reverse post order walk |
duke@435 | 130 | |
duke@435 | 131 | virtual bool is_block() { return true; } |
duke@435 | 132 | float succ_prob(uint i); // return probability of i'th successor |
duke@435 | 133 | |
duke@435 | 134 | Block* dom_lca(Block* that); // Compute LCA in dominator tree. |
duke@435 | 135 | #ifdef ASSERT |
duke@435 | 136 | bool dominates(Block* that) { |
duke@435 | 137 | int dom_diff = this->_dom_depth - that->_dom_depth; |
duke@435 | 138 | if (dom_diff > 0) return false; |
duke@435 | 139 | for (; dom_diff < 0; dom_diff++) that = that->_idom; |
duke@435 | 140 | return this == that; |
duke@435 | 141 | } |
duke@435 | 142 | #endif |
duke@435 | 143 | |
duke@435 | 144 | // Report the alignment required by this block. Must be a power of 2. |
duke@435 | 145 | // The previous block will insert nops to get this alignment. |
duke@435 | 146 | uint code_alignment(); |
duke@435 | 147 | |
duke@435 | 148 | // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies. |
duke@435 | 149 | // It is currently also used to scale such frequencies relative to |
duke@435 | 150 | // FreqCountInvocations relative to the old value of 1500. |
duke@435 | 151 | #define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations) |
duke@435 | 152 | |
duke@435 | 153 | // Register Pressure (estimate) for Splitting heuristic |
duke@435 | 154 | uint _reg_pressure; |
duke@435 | 155 | uint _ihrp_index; |
duke@435 | 156 | uint _freg_pressure; |
duke@435 | 157 | uint _fhrp_index; |
duke@435 | 158 | |
duke@435 | 159 | // Mark and visited bits for an LCA calculation in insert_anti_dependences. |
duke@435 | 160 | // Since they hold unique node indexes, they do not need reinitialization. |
duke@435 | 161 | node_idx_t _raise_LCA_mark; |
duke@435 | 162 | void set_raise_LCA_mark(node_idx_t x) { _raise_LCA_mark = x; } |
duke@435 | 163 | node_idx_t raise_LCA_mark() const { return _raise_LCA_mark; } |
duke@435 | 164 | node_idx_t _raise_LCA_visited; |
duke@435 | 165 | void set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; } |
duke@435 | 166 | node_idx_t raise_LCA_visited() const { return _raise_LCA_visited; } |
duke@435 | 167 | |
duke@435 | 168 | // Estimated size in bytes of first instructions in a loop. |
duke@435 | 169 | uint _first_inst_size; |
duke@435 | 170 | uint first_inst_size() const { return _first_inst_size; } |
duke@435 | 171 | void set_first_inst_size(uint s) { _first_inst_size = s; } |
duke@435 | 172 | |
duke@435 | 173 | // Compute the size of first instructions in this block. |
duke@435 | 174 | uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra); |
duke@435 | 175 | |
duke@435 | 176 | // Compute alignment padding if the block needs it. |
duke@435 | 177 | // Align a loop if loop's padding is less or equal to padding limit |
duke@435 | 178 | // or the size of first instructions in the loop > padding. |
duke@435 | 179 | uint alignment_padding(int current_offset) { |
duke@435 | 180 | int block_alignment = code_alignment(); |
duke@435 | 181 | int max_pad = block_alignment-relocInfo::addr_unit(); |
duke@435 | 182 | if( max_pad > 0 ) { |
duke@435 | 183 | assert(is_power_of_2(max_pad+relocInfo::addr_unit()), ""); |
duke@435 | 184 | int current_alignment = current_offset & max_pad; |
duke@435 | 185 | if( current_alignment != 0 ) { |
duke@435 | 186 | uint padding = (block_alignment-current_alignment) & max_pad; |
duke@435 | 187 | if( !head()->is_Loop() || |
duke@435 | 188 | padding <= (uint)MaxLoopPad || |
duke@435 | 189 | first_inst_size() > padding ) { |
duke@435 | 190 | return padding; |
duke@435 | 191 | } |
duke@435 | 192 | } |
duke@435 | 193 | } |
duke@435 | 194 | return 0; |
duke@435 | 195 | } |
duke@435 | 196 | |
duke@435 | 197 | // Connector blocks. Connector blocks are basic blocks devoid of |
duke@435 | 198 | // instructions, but may have relevant non-instruction Nodes, such as |
duke@435 | 199 | // Phis or MergeMems. Such blocks are discovered and marked during the |
duke@435 | 200 | // RemoveEmpty phase, and elided during Output. |
duke@435 | 201 | bool _connector; |
duke@435 | 202 | void set_connector() { _connector = true; } |
duke@435 | 203 | bool is_connector() const { return _connector; }; |
duke@435 | 204 | |
duke@435 | 205 | // Create a new Block with given head Node. |
duke@435 | 206 | // Creates the (empty) predecessor arrays. |
duke@435 | 207 | Block( Arena *a, Node *headnode ) |
duke@435 | 208 | : CFGElement(), |
duke@435 | 209 | _nodes(a), |
duke@435 | 210 | _succs(a), |
duke@435 | 211 | _num_succs(0), |
duke@435 | 212 | _pre_order(0), |
duke@435 | 213 | _idom(0), |
duke@435 | 214 | _loop(NULL), |
duke@435 | 215 | _reg_pressure(0), |
duke@435 | 216 | _ihrp_index(1), |
duke@435 | 217 | _freg_pressure(0), |
duke@435 | 218 | _fhrp_index(1), |
duke@435 | 219 | _raise_LCA_mark(0), |
duke@435 | 220 | _raise_LCA_visited(0), |
duke@435 | 221 | _first_inst_size(999999), |
duke@435 | 222 | _connector(false) { |
duke@435 | 223 | _nodes.push(headnode); |
duke@435 | 224 | } |
duke@435 | 225 | |
duke@435 | 226 | // Index of 'end' Node |
duke@435 | 227 | uint end_idx() const { |
duke@435 | 228 | // %%%%% add a proj after every goto |
duke@435 | 229 | // so (last->is_block_proj() != last) always, then simplify this code |
duke@435 | 230 | // This will not give correct end_idx for block 0 when it only contains root. |
duke@435 | 231 | int last_idx = _nodes.size() - 1; |
duke@435 | 232 | Node *last = _nodes[last_idx]; |
duke@435 | 233 | assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], ""); |
duke@435 | 234 | return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs); |
duke@435 | 235 | } |
duke@435 | 236 | |
duke@435 | 237 | // Basic blocks have a Node which ends them. This Node determines which |
duke@435 | 238 | // basic block follows this one in the program flow. This Node is either an |
duke@435 | 239 | // IfNode, a GotoNode, a JmpNode, or a ReturnNode. |
duke@435 | 240 | Node *end() const { return _nodes[end_idx()]; } |
duke@435 | 241 | |
duke@435 | 242 | // Add an instruction to an existing block. It must go after the head |
duke@435 | 243 | // instruction and before the end instruction. |
duke@435 | 244 | void add_inst( Node *n ) { _nodes.insert(end_idx(),n); } |
duke@435 | 245 | // Find node in block |
duke@435 | 246 | uint find_node( const Node *n ) const; |
duke@435 | 247 | // Find and remove n from block list |
duke@435 | 248 | void find_remove( const Node *n ); |
duke@435 | 249 | |
duke@435 | 250 | // Schedule a call next in the block |
duke@435 | 251 | uint sched_call(Matcher &matcher, Block_Array &bbs, uint node_cnt, Node_List &worklist, int *ready_cnt, MachCallNode *mcall, VectorSet &next_call); |
duke@435 | 252 | |
duke@435 | 253 | // Perform basic-block local scheduling |
duke@435 | 254 | Node *select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSet &next_call, uint sched_slot); |
duke@435 | 255 | void set_next_call( Node *n, VectorSet &next_call, Block_Array &bbs ); |
duke@435 | 256 | void needed_for_next_call(Node *this_call, VectorSet &next_call, Block_Array &bbs); |
duke@435 | 257 | bool schedule_local(PhaseCFG *cfg, Matcher &m, int *ready_cnt, VectorSet &next_call); |
duke@435 | 258 | // Cleanup if any code lands between a Call and his Catch |
duke@435 | 259 | void call_catch_cleanup(Block_Array &bbs); |
duke@435 | 260 | // Detect implicit-null-check opportunities. Basically, find NULL checks |
duke@435 | 261 | // with suitable memory ops nearby. Use the memory op to do the NULL check. |
duke@435 | 262 | // I can generate a memory op if there is not one nearby. |
duke@435 | 263 | void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons); |
duke@435 | 264 | |
duke@435 | 265 | // Return the empty status of a block |
duke@435 | 266 | enum { not_empty, empty_with_goto, completely_empty }; |
duke@435 | 267 | int is_Empty() const; |
duke@435 | 268 | |
duke@435 | 269 | // Forward through connectors |
duke@435 | 270 | Block* non_connector() { |
duke@435 | 271 | Block* s = this; |
duke@435 | 272 | while (s->is_connector()) { |
duke@435 | 273 | s = s->_succs[0]; |
duke@435 | 274 | } |
duke@435 | 275 | return s; |
duke@435 | 276 | } |
duke@435 | 277 | |
duke@435 | 278 | // Successor block, after forwarding through connectors |
duke@435 | 279 | Block* non_connector_successor(int i) const { |
duke@435 | 280 | return _succs[i]->non_connector(); |
duke@435 | 281 | } |
duke@435 | 282 | |
duke@435 | 283 | // Examine block's code shape to predict if it is not commonly executed. |
duke@435 | 284 | bool has_uncommon_code() const; |
duke@435 | 285 | |
duke@435 | 286 | // Use frequency calculations and code shape to predict if the block |
duke@435 | 287 | // is uncommon. |
duke@435 | 288 | bool is_uncommon( Block_Array &bbs ) const; |
duke@435 | 289 | |
duke@435 | 290 | #ifndef PRODUCT |
duke@435 | 291 | // Debugging print of basic block |
duke@435 | 292 | void dump_bidx(const Block* orig) const; |
duke@435 | 293 | void dump_pred(const Block_Array *bbs, Block* orig) const; |
duke@435 | 294 | void dump_head( const Block_Array *bbs ) const; |
duke@435 | 295 | void dump( ) const; |
duke@435 | 296 | void dump( const Block_Array *bbs ) const; |
duke@435 | 297 | #endif |
duke@435 | 298 | }; |
duke@435 | 299 | |
duke@435 | 300 | |
duke@435 | 301 | //------------------------------PhaseCFG--------------------------------------- |
duke@435 | 302 | // Build an array of Basic Block pointers, one per Node. |
duke@435 | 303 | class PhaseCFG : public Phase { |
duke@435 | 304 | private: |
duke@435 | 305 | // Build a proper looking cfg. Return count of basic blocks |
duke@435 | 306 | uint build_cfg(); |
duke@435 | 307 | |
duke@435 | 308 | // Perform DFS search. |
duke@435 | 309 | // Setup 'vertex' as DFS to vertex mapping. |
duke@435 | 310 | // Setup 'semi' as vertex to DFS mapping. |
duke@435 | 311 | // Set 'parent' to DFS parent. |
duke@435 | 312 | uint DFS( Tarjan *tarjan ); |
duke@435 | 313 | |
duke@435 | 314 | // Helper function to insert a node into a block |
duke@435 | 315 | void schedule_node_into_block( Node *n, Block *b ); |
duke@435 | 316 | |
duke@435 | 317 | // Set the basic block for pinned Nodes |
duke@435 | 318 | void schedule_pinned_nodes( VectorSet &visited ); |
duke@435 | 319 | |
duke@435 | 320 | // I'll need a few machine-specific GotoNodes. Clone from this one. |
duke@435 | 321 | MachNode *_goto; |
duke@435 | 322 | void insert_goto_at(uint block_no, uint succ_no); |
duke@435 | 323 | |
duke@435 | 324 | Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false); |
duke@435 | 325 | void verify_anti_dependences(Block* LCA, Node* load) { |
duke@435 | 326 | assert(LCA == _bbs[load->_idx], "should already be scheduled"); |
duke@435 | 327 | insert_anti_dependences(LCA, load, true); |
duke@435 | 328 | } |
duke@435 | 329 | |
duke@435 | 330 | public: |
duke@435 | 331 | PhaseCFG( Arena *a, RootNode *r, Matcher &m ); |
duke@435 | 332 | |
duke@435 | 333 | uint _num_blocks; // Count of basic blocks |
duke@435 | 334 | Block_List _blocks; // List of basic blocks |
duke@435 | 335 | RootNode *_root; // Root of whole program |
duke@435 | 336 | Block_Array _bbs; // Map Nodes to owning Basic Block |
duke@435 | 337 | Block *_broot; // Basic block of root |
duke@435 | 338 | uint _rpo_ctr; |
duke@435 | 339 | CFGLoop* _root_loop; |
duke@435 | 340 | |
duke@435 | 341 | // Per node latency estimation, valid only during GCM |
duke@435 | 342 | GrowableArray<uint> _node_latency; |
duke@435 | 343 | |
duke@435 | 344 | #ifndef PRODUCT |
duke@435 | 345 | bool _trace_opto_pipelining; // tracing flag |
duke@435 | 346 | #endif |
duke@435 | 347 | |
duke@435 | 348 | // Build dominators |
duke@435 | 349 | void Dominators(); |
duke@435 | 350 | |
duke@435 | 351 | // Estimate block frequencies based on IfNode probabilities |
duke@435 | 352 | void Estimate_Block_Frequency(); |
duke@435 | 353 | |
duke@435 | 354 | // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific |
duke@435 | 355 | // basic blocks; i.e. _bbs now maps _idx for all Nodes to some Block. |
duke@435 | 356 | void GlobalCodeMotion( Matcher &m, uint unique, Node_List &proj_list ); |
duke@435 | 357 | |
duke@435 | 358 | // Compute the (backwards) latency of a node from the uses |
duke@435 | 359 | void latency_from_uses(Node *n); |
duke@435 | 360 | |
duke@435 | 361 | // Compute the (backwards) latency of a node from a single use |
duke@435 | 362 | int latency_from_use(Node *n, const Node *def, Node *use); |
duke@435 | 363 | |
duke@435 | 364 | // Compute the (backwards) latency of a node from the uses of this instruction |
duke@435 | 365 | void partial_latency_of_defs(Node *n); |
duke@435 | 366 | |
duke@435 | 367 | // Schedule Nodes early in their basic blocks. |
duke@435 | 368 | bool schedule_early(VectorSet &visited, Node_List &roots); |
duke@435 | 369 | |
duke@435 | 370 | // For each node, find the latest block it can be scheduled into |
duke@435 | 371 | // and then select the cheapest block between the latest and earliest |
duke@435 | 372 | // block to place the node. |
duke@435 | 373 | void schedule_late(VectorSet &visited, Node_List &stack); |
duke@435 | 374 | |
duke@435 | 375 | // Pick a block between early and late that is a cheaper alternative |
duke@435 | 376 | // to late. Helper for schedule_late. |
duke@435 | 377 | Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self); |
duke@435 | 378 | |
duke@435 | 379 | // Compute the instruction global latency with a backwards walk |
duke@435 | 380 | void ComputeLatenciesBackwards(VectorSet &visited, Node_List &stack); |
duke@435 | 381 | |
duke@435 | 382 | // Remove empty basic blocks |
duke@435 | 383 | void RemoveEmpty(); |
duke@435 | 384 | bool MoveToNext(Block* bx, uint b_index); |
duke@435 | 385 | void MoveToEnd(Block* bx, uint b_index); |
duke@435 | 386 | |
duke@435 | 387 | // Check for NeverBranch at block end. This needs to become a GOTO to the |
duke@435 | 388 | // true target. NeverBranch are treated as a conditional branch that always |
duke@435 | 389 | // goes the same direction for most of the optimizer and are used to give a |
duke@435 | 390 | // fake exit path to infinite loops. At this late stage they need to turn |
duke@435 | 391 | // into Goto's so that when you enter the infinite loop you indeed hang. |
duke@435 | 392 | void convert_NeverBranch_to_Goto(Block *b); |
duke@435 | 393 | |
duke@435 | 394 | CFGLoop* create_loop_tree(); |
duke@435 | 395 | |
duke@435 | 396 | // Insert a node into a block, and update the _bbs |
duke@435 | 397 | void insert( Block *b, uint idx, Node *n ) { |
duke@435 | 398 | b->_nodes.insert( idx, n ); |
duke@435 | 399 | _bbs.map( n->_idx, b ); |
duke@435 | 400 | } |
duke@435 | 401 | |
duke@435 | 402 | #ifndef PRODUCT |
duke@435 | 403 | bool trace_opto_pipelining() const { return _trace_opto_pipelining; } |
duke@435 | 404 | |
duke@435 | 405 | // Debugging print of CFG |
duke@435 | 406 | void dump( ) const; // CFG only |
duke@435 | 407 | void _dump_cfg( const Node *end, VectorSet &visited ) const; |
duke@435 | 408 | void verify() const; |
duke@435 | 409 | void dump_headers(); |
duke@435 | 410 | #else |
duke@435 | 411 | bool trace_opto_pipelining() const { return false; } |
duke@435 | 412 | #endif |
duke@435 | 413 | }; |
duke@435 | 414 | |
duke@435 | 415 | |
duke@435 | 416 | //------------------------------UnionFindInfo---------------------------------- |
duke@435 | 417 | // Map Block indices to a block-index for a cfg-cover. |
duke@435 | 418 | // Array lookup in the optimized case. |
duke@435 | 419 | class UnionFind : public ResourceObj { |
duke@435 | 420 | uint _cnt, _max; |
duke@435 | 421 | uint* _indices; |
duke@435 | 422 | ReallocMark _nesting; // assertion check for reallocations |
duke@435 | 423 | public: |
duke@435 | 424 | UnionFind( uint max ); |
duke@435 | 425 | void reset( uint max ); // Reset to identity map for [0..max] |
duke@435 | 426 | |
duke@435 | 427 | uint lookup( uint nidx ) const { |
duke@435 | 428 | return _indices[nidx]; |
duke@435 | 429 | } |
duke@435 | 430 | uint operator[] (uint nidx) const { return lookup(nidx); } |
duke@435 | 431 | |
duke@435 | 432 | void map( uint from_idx, uint to_idx ) { |
duke@435 | 433 | assert( from_idx < _cnt, "oob" ); |
duke@435 | 434 | _indices[from_idx] = to_idx; |
duke@435 | 435 | } |
duke@435 | 436 | void extend( uint from_idx, uint to_idx ); |
duke@435 | 437 | |
duke@435 | 438 | uint Size() const { return _cnt; } |
duke@435 | 439 | |
duke@435 | 440 | uint Find( uint idx ) { |
duke@435 | 441 | assert( idx < 65536, "Must fit into uint"); |
duke@435 | 442 | uint uf_idx = lookup(idx); |
duke@435 | 443 | return (uf_idx == idx) ? uf_idx : Find_compress(idx); |
duke@435 | 444 | } |
duke@435 | 445 | uint Find_compress( uint idx ); |
duke@435 | 446 | uint Find_const( uint idx ) const; |
duke@435 | 447 | void Union( uint idx1, uint idx2 ); |
duke@435 | 448 | |
duke@435 | 449 | }; |
duke@435 | 450 | |
duke@435 | 451 | //----------------------------BlockProbPair--------------------------- |
duke@435 | 452 | // Ordered pair of Node*. |
duke@435 | 453 | class BlockProbPair VALUE_OBJ_CLASS_SPEC { |
duke@435 | 454 | protected: |
duke@435 | 455 | Block* _target; // block target |
duke@435 | 456 | float _prob; // probability of edge to block |
duke@435 | 457 | public: |
duke@435 | 458 | BlockProbPair() : _target(NULL), _prob(0.0) {} |
duke@435 | 459 | BlockProbPair(Block* b, float p) : _target(b), _prob(p) {} |
duke@435 | 460 | |
duke@435 | 461 | Block* get_target() const { return _target; } |
duke@435 | 462 | float get_prob() const { return _prob; } |
duke@435 | 463 | }; |
duke@435 | 464 | |
duke@435 | 465 | //------------------------------CFGLoop------------------------------------------- |
duke@435 | 466 | class CFGLoop : public CFGElement { |
duke@435 | 467 | int _id; |
duke@435 | 468 | int _depth; |
duke@435 | 469 | CFGLoop *_parent; // root of loop tree is the method level "pseudo" loop, it's parent is null |
duke@435 | 470 | CFGLoop *_sibling; // null terminated list |
duke@435 | 471 | CFGLoop *_child; // first child, use child's sibling to visit all immediately nested loops |
duke@435 | 472 | GrowableArray<CFGElement*> _members; // list of members of loop |
duke@435 | 473 | GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities |
duke@435 | 474 | float _exit_prob; // probability any loop exit is taken on a single loop iteration |
duke@435 | 475 | void update_succ_freq(Block* b, float freq); |
duke@435 | 476 | |
duke@435 | 477 | public: |
duke@435 | 478 | CFGLoop(int id) : |
duke@435 | 479 | CFGElement(), |
duke@435 | 480 | _id(id), |
duke@435 | 481 | _depth(0), |
duke@435 | 482 | _parent(NULL), |
duke@435 | 483 | _sibling(NULL), |
duke@435 | 484 | _child(NULL), |
duke@435 | 485 | _exit_prob(1.0f) {} |
duke@435 | 486 | CFGLoop* parent() { return _parent; } |
duke@435 | 487 | void push_pred(Block* blk, int i, Block_List& worklist, Block_Array& node_to_blk); |
duke@435 | 488 | void add_member(CFGElement *s) { _members.push(s); } |
duke@435 | 489 | void add_nested_loop(CFGLoop* cl); |
duke@435 | 490 | Block* head() { |
duke@435 | 491 | assert(_members.at(0)->is_block(), "head must be a block"); |
duke@435 | 492 | Block* hd = _members.at(0)->as_Block(); |
duke@435 | 493 | assert(hd->_loop == this, "just checking"); |
duke@435 | 494 | assert(hd->head()->is_Loop(), "must begin with loop head node"); |
duke@435 | 495 | return hd; |
duke@435 | 496 | } |
duke@435 | 497 | Block* backedge_block(); // Return the block on the backedge of the loop (else NULL) |
duke@435 | 498 | void compute_loop_depth(int depth); |
duke@435 | 499 | void compute_freq(); // compute frequency with loop assuming head freq 1.0f |
duke@435 | 500 | void scale_freq(); // scale frequency by loop trip count (including outer loops) |
duke@435 | 501 | bool in_loop_nest(Block* b); |
duke@435 | 502 | float trip_count() const { return 1.0f / _exit_prob; } |
duke@435 | 503 | virtual bool is_loop() { return true; } |
duke@435 | 504 | int id() { return _id; } |
duke@435 | 505 | |
duke@435 | 506 | #ifndef PRODUCT |
duke@435 | 507 | void dump( ) const; |
duke@435 | 508 | void dump_tree() const; |
duke@435 | 509 | #endif |
duke@435 | 510 | }; |