Tue, 02 Sep 2014 12:48:45 -0700
8055494: Add C2 x86 intrinsic for BigInteger::multiplyToLen() method
Summary: Add new C2 intrinsic for BigInteger::multiplyToLen() on x86 in 64-bit VM.
Reviewed-by: roland
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_BLOCK_HPP
26 #define SHARE_VM_OPTO_BLOCK_HPP
28 #include "opto/multnode.hpp"
29 #include "opto/node.hpp"
30 #include "opto/phase.hpp"
32 // Optimization - Graph Style
34 class Block;
35 class CFGLoop;
36 class MachCallNode;
37 class Matcher;
38 class RootNode;
39 class VectorSet;
40 struct Tarjan;
42 //------------------------------Block_Array------------------------------------
43 // Map dense integer indices to Blocks. Uses classic doubling-array trick.
44 // Abstractly provides an infinite array of Block*'s, initialized to NULL.
45 // Note that the constructor just zeros things, and since I use Arena
46 // allocation I do not need a destructor to reclaim storage.
47 class Block_Array : public ResourceObj {
48 friend class VMStructs;
49 uint _size; // allocated size, as opposed to formal limit
50 debug_only(uint _limit;) // limit to formal domain
51 Arena *_arena; // Arena to allocate in
52 protected:
53 Block **_blocks;
54 void grow( uint i ); // Grow array node to fit
56 public:
57 Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
58 debug_only(_limit=0);
59 _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
60 for( int i = 0; i < OptoBlockListSize; i++ ) {
61 _blocks[i] = NULL;
62 }
63 }
64 Block *lookup( uint i ) const // Lookup, or NULL for not mapped
65 { return (i<Max()) ? _blocks[i] : (Block*)NULL; }
66 Block *operator[] ( uint i ) const // Lookup, or assert for not mapped
67 { assert( i < Max(), "oob" ); return _blocks[i]; }
68 // Extend the mapping: index i maps to Block *n.
69 void map( uint i, Block *n ) { if( i>=Max() ) grow(i); _blocks[i] = n; }
70 uint Max() const { debug_only(return _limit); return _size; }
71 };
74 class Block_List : public Block_Array {
75 friend class VMStructs;
76 public:
77 uint _cnt;
78 Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
79 void push( Block *b ) { map(_cnt++,b); }
80 Block *pop() { return _blocks[--_cnt]; }
81 Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
82 void remove( uint i );
83 void insert( uint i, Block *n );
84 uint size() const { return _cnt; }
85 void reset() { _cnt = 0; }
86 void print();
87 };
90 class CFGElement : public ResourceObj {
91 friend class VMStructs;
92 public:
93 float _freq; // Execution frequency (estimate)
95 CFGElement() : _freq(0.0f) {}
96 virtual bool is_block() { return false; }
97 virtual bool is_loop() { return false; }
98 Block* as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
99 CFGLoop* as_CFGLoop() { assert(is_loop(), "must be loop"); return (CFGLoop*)this; }
100 };
102 //------------------------------Block------------------------------------------
103 // This class defines a Basic Block.
104 // Basic blocks are used during the output routines, and are not used during
105 // any optimization pass. They are created late in the game.
106 class Block : public CFGElement {
107 friend class VMStructs;
109 private:
110 // Nodes in this block, in order
111 Node_List _nodes;
113 public:
115 // Get the node at index 'at_index', if 'at_index' is out of bounds return NULL
116 Node* get_node(uint at_index) const {
117 return _nodes[at_index];
118 }
120 // Get the number of nodes in this block
121 uint number_of_nodes() const {
122 return _nodes.size();
123 }
125 // Map a node 'node' to index 'to_index' in the block, if the index is out of bounds the size of the node list is increased
126 void map_node(Node* node, uint to_index) {
127 _nodes.map(to_index, node);
128 }
130 // Insert a node 'node' at index 'at_index', moving all nodes that are on a higher index one step, if 'at_index' is out of bounds we crash
131 void insert_node(Node* node, uint at_index) {
132 _nodes.insert(at_index, node);
133 }
135 // Remove a node at index 'at_index'
136 void remove_node(uint at_index) {
137 _nodes.remove(at_index);
138 }
140 // Push a node 'node' onto the node list
141 void push_node(Node* node) {
142 _nodes.push(node);
143 }
145 // Pop the last node off the node list
146 Node* pop_node() {
147 return _nodes.pop();
148 }
150 // Basic blocks have a Node which defines Control for all Nodes pinned in
151 // this block. This Node is a RegionNode. Exception-causing Nodes
152 // (division, subroutines) and Phi functions are always pinned. Later,
153 // every Node will get pinned to some block.
154 Node *head() const { return get_node(0); }
156 // CAUTION: num_preds() is ONE based, so that predecessor numbers match
157 // input edges to Regions and Phis.
158 uint num_preds() const { return head()->req(); }
159 Node *pred(uint i) const { return head()->in(i); }
161 // Array of successor blocks, same size as projs array
162 Block_Array _succs;
164 // Basic blocks have some number of Nodes which split control to all
165 // following blocks. These Nodes are always Projections. The field in
166 // the Projection and the block-ending Node determine which Block follows.
167 uint _num_succs;
169 // Basic blocks also carry all sorts of good old fashioned DFS information
170 // used to find loops, loop nesting depth, dominators, etc.
171 uint _pre_order; // Pre-order DFS number
173 // Dominator tree
174 uint _dom_depth; // Depth in dominator tree for fast LCA
175 Block* _idom; // Immediate dominator block
177 CFGLoop *_loop; // Loop to which this block belongs
178 uint _rpo; // Number in reverse post order walk
180 virtual bool is_block() { return true; }
181 float succ_prob(uint i); // return probability of i'th successor
182 int num_fall_throughs(); // How many fall-through candidate this block has
183 void update_uncommon_branch(Block* un); // Lower branch prob to uncommon code
184 bool succ_fall_through(uint i); // Is successor "i" is a fall-through candidate
185 Block* lone_fall_through(); // Return lone fall-through Block or null
187 Block* dom_lca(Block* that); // Compute LCA in dominator tree.
188 #ifdef ASSERT
189 bool dominates(Block* that) {
190 int dom_diff = this->_dom_depth - that->_dom_depth;
191 if (dom_diff > 0) return false;
192 for (; dom_diff < 0; dom_diff++) that = that->_idom;
193 return this == that;
194 }
195 #endif
197 // Report the alignment required by this block. Must be a power of 2.
198 // The previous block will insert nops to get this alignment.
199 uint code_alignment();
200 uint compute_loop_alignment();
202 // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies.
203 // It is currently also used to scale such frequencies relative to
204 // FreqCountInvocations relative to the old value of 1500.
205 #define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations)
207 // Register Pressure (estimate) for Splitting heuristic
208 uint _reg_pressure;
209 uint _ihrp_index;
210 uint _freg_pressure;
211 uint _fhrp_index;
213 // Mark and visited bits for an LCA calculation in insert_anti_dependences.
214 // Since they hold unique node indexes, they do not need reinitialization.
215 node_idx_t _raise_LCA_mark;
216 void set_raise_LCA_mark(node_idx_t x) { _raise_LCA_mark = x; }
217 node_idx_t raise_LCA_mark() const { return _raise_LCA_mark; }
218 node_idx_t _raise_LCA_visited;
219 void set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; }
220 node_idx_t raise_LCA_visited() const { return _raise_LCA_visited; }
222 // Estimated size in bytes of first instructions in a loop.
223 uint _first_inst_size;
224 uint first_inst_size() const { return _first_inst_size; }
225 void set_first_inst_size(uint s) { _first_inst_size = s; }
227 // Compute the size of first instructions in this block.
228 uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra);
230 // Compute alignment padding if the block needs it.
231 // Align a loop if loop's padding is less or equal to padding limit
232 // or the size of first instructions in the loop > padding.
233 uint alignment_padding(int current_offset) {
234 int block_alignment = code_alignment();
235 int max_pad = block_alignment-relocInfo::addr_unit();
236 if( max_pad > 0 ) {
237 assert(is_power_of_2(max_pad+relocInfo::addr_unit()), "");
238 int current_alignment = current_offset & max_pad;
239 if( current_alignment != 0 ) {
240 uint padding = (block_alignment-current_alignment) & max_pad;
241 if( has_loop_alignment() &&
242 padding > (uint)MaxLoopPad &&
243 first_inst_size() <= padding ) {
244 return 0;
245 }
246 return padding;
247 }
248 }
249 return 0;
250 }
252 // Connector blocks. Connector blocks are basic blocks devoid of
253 // instructions, but may have relevant non-instruction Nodes, such as
254 // Phis or MergeMems. Such blocks are discovered and marked during the
255 // RemoveEmpty phase, and elided during Output.
256 bool _connector;
257 void set_connector() { _connector = true; }
258 bool is_connector() const { return _connector; };
260 // Loop_alignment will be set for blocks which are at the top of loops.
261 // The block layout pass may rotate loops such that the loop head may not
262 // be the sequentially first block of the loop encountered in the linear
263 // list of blocks. If the layout pass is not run, loop alignment is set
264 // for each block which is the head of a loop.
265 uint _loop_alignment;
266 void set_loop_alignment(Block *loop_top) {
267 uint new_alignment = loop_top->compute_loop_alignment();
268 if (new_alignment > _loop_alignment) {
269 _loop_alignment = new_alignment;
270 }
271 }
272 uint loop_alignment() const { return _loop_alignment; }
273 bool has_loop_alignment() const { return loop_alignment() > 0; }
275 // Create a new Block with given head Node.
276 // Creates the (empty) predecessor arrays.
277 Block( Arena *a, Node *headnode )
278 : CFGElement(),
279 _nodes(a),
280 _succs(a),
281 _num_succs(0),
282 _pre_order(0),
283 _idom(0),
284 _loop(NULL),
285 _reg_pressure(0),
286 _ihrp_index(1),
287 _freg_pressure(0),
288 _fhrp_index(1),
289 _raise_LCA_mark(0),
290 _raise_LCA_visited(0),
291 _first_inst_size(999999),
292 _connector(false),
293 _loop_alignment(0) {
294 _nodes.push(headnode);
295 }
297 // Index of 'end' Node
298 uint end_idx() const {
299 // %%%%% add a proj after every goto
300 // so (last->is_block_proj() != last) always, then simplify this code
301 // This will not give correct end_idx for block 0 when it only contains root.
302 int last_idx = _nodes.size() - 1;
303 Node *last = _nodes[last_idx];
304 assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], "");
305 return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs);
306 }
308 // Basic blocks have a Node which ends them. This Node determines which
309 // basic block follows this one in the program flow. This Node is either an
310 // IfNode, a GotoNode, a JmpNode, or a ReturnNode.
311 Node *end() const { return _nodes[end_idx()]; }
313 // Add an instruction to an existing block. It must go after the head
314 // instruction and before the end instruction.
315 void add_inst( Node *n ) { insert_node(n, end_idx()); }
316 // Find node in block. Fails if node not in block.
317 uint find_node( const Node *n ) const;
318 // Find and remove n from block list
319 void find_remove( const Node *n );
320 // Check wether the node is in the block.
321 bool contains (const Node *n) const;
323 // Return the empty status of a block
324 enum { not_empty, empty_with_goto, completely_empty };
325 int is_Empty() const;
327 // Forward through connectors
328 Block* non_connector() {
329 Block* s = this;
330 while (s->is_connector()) {
331 s = s->_succs[0];
332 }
333 return s;
334 }
336 // Return true if b is a successor of this block
337 bool has_successor(Block* b) const {
338 for (uint i = 0; i < _num_succs; i++ ) {
339 if (non_connector_successor(i) == b) {
340 return true;
341 }
342 }
343 return false;
344 }
346 // Successor block, after forwarding through connectors
347 Block* non_connector_successor(int i) const {
348 return _succs[i]->non_connector();
349 }
351 // Examine block's code shape to predict if it is not commonly executed.
352 bool has_uncommon_code() const;
354 #ifndef PRODUCT
355 // Debugging print of basic block
356 void dump_bidx(const Block* orig, outputStream* st = tty) const;
357 void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
358 void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
359 void dump() const;
360 void dump(const PhaseCFG* cfg) const;
361 #endif
362 };
365 //------------------------------PhaseCFG---------------------------------------
366 // Build an array of Basic Block pointers, one per Node.
367 class PhaseCFG : public Phase {
368 friend class VMStructs;
369 private:
371 // Root of whole program
372 RootNode* _root;
374 // The block containing the root node
375 Block* _root_block;
377 // List of basic blocks that are created during CFG creation
378 Block_List _blocks;
380 // Count of basic blocks
381 uint _number_of_blocks;
383 // Arena for the blocks to be stored in
384 Arena* _block_arena;
386 // The matcher for this compilation
387 Matcher& _matcher;
389 // Map nodes to owning basic block
390 Block_Array _node_to_block_mapping;
392 // Loop from the root
393 CFGLoop* _root_loop;
395 // Outmost loop frequency
396 float _outer_loop_frequency;
398 // Per node latency estimation, valid only during GCM
399 GrowableArray<uint>* _node_latency;
401 // Build a proper looking cfg. Return count of basic blocks
402 uint build_cfg();
404 // Build the dominator tree so that we know where we can move instructions
405 void build_dominator_tree();
407 // Estimate block frequencies based on IfNode probabilities, so that we know where we want to move instructions
408 void estimate_block_frequency();
410 // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific
411 // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
412 // Move nodes to ensure correctness from GVN and also try to move nodes out of loops.
413 void global_code_motion();
415 // Schedule Nodes early in their basic blocks.
416 bool schedule_early(VectorSet &visited, Node_List &roots);
418 // For each node, find the latest block it can be scheduled into
419 // and then select the cheapest block between the latest and earliest
420 // block to place the node.
421 void schedule_late(VectorSet &visited, Node_List &stack);
423 // Compute the (backwards) latency of a node from a single use
424 int latency_from_use(Node *n, const Node *def, Node *use);
426 // Compute the (backwards) latency of a node from the uses of this instruction
427 void partial_latency_of_defs(Node *n);
429 // Compute the instruction global latency with a backwards walk
430 void compute_latencies_backwards(VectorSet &visited, Node_List &stack);
432 // Pick a block between early and late that is a cheaper alternative
433 // to late. Helper for schedule_late.
434 Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
436 bool schedule_local(Block* block, GrowableArray<int>& ready_cnt, VectorSet& next_call);
437 void set_next_call(Block* block, Node* n, VectorSet& next_call);
438 void needed_for_next_call(Block* block, Node* this_call, VectorSet& next_call);
440 // Perform basic-block local scheduling
441 Node* select(Block* block, Node_List& worklist, GrowableArray<int>& ready_cnt, VectorSet& next_call, uint sched_slot);
443 // Schedule a call next in the block
444 uint sched_call(Block* block, uint node_cnt, Node_List& worklist, GrowableArray<int>& ready_cnt, MachCallNode* mcall, VectorSet& next_call);
446 // Cleanup if any code lands between a Call and his Catch
447 void call_catch_cleanup(Block* block);
449 Node* catch_cleanup_find_cloned_def(Block* use_blk, Node* def, Block* def_blk, int n_clone_idx);
450 void catch_cleanup_inter_block(Node *use, Block *use_blk, Node *def, Block *def_blk, int n_clone_idx);
452 // Detect implicit-null-check opportunities. Basically, find NULL checks
453 // with suitable memory ops nearby. Use the memory op to do the NULL check.
454 // I can generate a memory op if there is not one nearby.
455 void implicit_null_check(Block* block, Node *proj, Node *val, int allowed_reasons);
457 // Perform a Depth First Search (DFS).
458 // Setup 'vertex' as DFS to vertex mapping.
459 // Setup 'semi' as vertex to DFS mapping.
460 // Set 'parent' to DFS parent.
461 uint do_DFS(Tarjan* tarjan, uint rpo_counter);
463 // Helper function to insert a node into a block
464 void schedule_node_into_block( Node *n, Block *b );
466 void replace_block_proj_ctrl( Node *n );
468 // Set the basic block for pinned Nodes
469 void schedule_pinned_nodes( VectorSet &visited );
471 // I'll need a few machine-specific GotoNodes. Clone from this one.
472 // Used when building the CFG and creating end nodes for blocks.
473 MachNode* _goto;
475 Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
476 void verify_anti_dependences(Block* LCA, Node* load) {
477 assert(LCA == get_block_for_node(load), "should already be scheduled");
478 insert_anti_dependences(LCA, load, true);
479 }
481 bool move_to_next(Block* bx, uint b_index);
482 void move_to_end(Block* bx, uint b_index);
484 void insert_goto_at(uint block_no, uint succ_no);
486 // Check for NeverBranch at block end. This needs to become a GOTO to the
487 // true target. NeverBranch are treated as a conditional branch that always
488 // goes the same direction for most of the optimizer and are used to give a
489 // fake exit path to infinite loops. At this late stage they need to turn
490 // into Goto's so that when you enter the infinite loop you indeed hang.
491 void convert_NeverBranch_to_Goto(Block *b);
493 CFGLoop* create_loop_tree();
495 #ifndef PRODUCT
496 bool _trace_opto_pipelining; // tracing flag
497 #endif
499 public:
500 PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
502 void set_latency_for_node(Node* node, int latency) {
503 _node_latency->at_put_grow(node->_idx, latency);
504 }
506 uint get_latency_for_node(Node* node) {
507 return _node_latency->at_grow(node->_idx);
508 }
510 // Get the outer most frequency
511 float get_outer_loop_frequency() const {
512 return _outer_loop_frequency;
513 }
515 // Get the root node of the CFG
516 RootNode* get_root_node() const {
517 return _root;
518 }
520 // Get the block of the root node
521 Block* get_root_block() const {
522 return _root_block;
523 }
525 // Add a block at a position and moves the later ones one step
526 void add_block_at(uint pos, Block* block) {
527 _blocks.insert(pos, block);
528 _number_of_blocks++;
529 }
531 // Adds a block to the top of the block list
532 void add_block(Block* block) {
533 _blocks.push(block);
534 _number_of_blocks++;
535 }
537 // Clear the list of blocks
538 void clear_blocks() {
539 _blocks.reset();
540 _number_of_blocks = 0;
541 }
543 // Get the block at position pos in _blocks
544 Block* get_block(uint pos) const {
545 return _blocks[pos];
546 }
548 // Number of blocks
549 uint number_of_blocks() const {
550 return _number_of_blocks;
551 }
553 // set which block this node should reside in
554 void map_node_to_block(const Node* node, Block* block) {
555 _node_to_block_mapping.map(node->_idx, block);
556 }
558 // removes the mapping from a node to a block
559 void unmap_node_from_block(const Node* node) {
560 _node_to_block_mapping.map(node->_idx, NULL);
561 }
563 // get the block in which this node resides
564 Block* get_block_for_node(const Node* node) const {
565 return _node_to_block_mapping[node->_idx];
566 }
568 // does this node reside in a block; return true
569 bool has_block(const Node* node) const {
570 return (_node_to_block_mapping.lookup(node->_idx) != NULL);
571 }
573 // Use frequency calculations and code shape to predict if the block
574 // is uncommon.
575 bool is_uncommon(const Block* block);
577 #ifdef ASSERT
578 Unique_Node_List _raw_oops;
579 #endif
581 // Do global code motion by first building dominator tree and estimate block frequency
582 // Returns true on success
583 bool do_global_code_motion();
585 // Compute the (backwards) latency of a node from the uses
586 void latency_from_uses(Node *n);
588 // Set loop alignment
589 void set_loop_alignment();
591 // Remove empty basic blocks
592 void remove_empty_blocks();
593 Block *fixup_trap_based_check(Node *branch, Block *block, int block_pos, Block *bnext);
594 void fixup_flow();
596 // Insert a node into a block at index and map the node to the block
597 void insert(Block *b, uint idx, Node *n) {
598 b->insert_node(n , idx);
599 map_node_to_block(n, b);
600 }
602 // Check all nodes and postalloc_expand them if necessary.
603 void postalloc_expand(PhaseRegAlloc* _ra);
605 #ifndef PRODUCT
606 bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
608 // Debugging print of CFG
609 void dump( ) const; // CFG only
610 void _dump_cfg( const Node *end, VectorSet &visited ) const;
611 void verify() const;
612 void dump_headers();
613 #else
614 bool trace_opto_pipelining() const { return false; }
615 #endif
616 };
619 //------------------------------UnionFind--------------------------------------
620 // Map Block indices to a block-index for a cfg-cover.
621 // Array lookup in the optimized case.
622 class UnionFind : public ResourceObj {
623 uint _cnt, _max;
624 uint* _indices;
625 ReallocMark _nesting; // assertion check for reallocations
626 public:
627 UnionFind( uint max );
628 void reset( uint max ); // Reset to identity map for [0..max]
630 uint lookup( uint nidx ) const {
631 return _indices[nidx];
632 }
633 uint operator[] (uint nidx) const { return lookup(nidx); }
635 void map( uint from_idx, uint to_idx ) {
636 assert( from_idx < _cnt, "oob" );
637 _indices[from_idx] = to_idx;
638 }
639 void extend( uint from_idx, uint to_idx );
641 uint Size() const { return _cnt; }
643 uint Find( uint idx ) {
644 assert( idx < 65536, "Must fit into uint");
645 uint uf_idx = lookup(idx);
646 return (uf_idx == idx) ? uf_idx : Find_compress(idx);
647 }
648 uint Find_compress( uint idx );
649 uint Find_const( uint idx ) const;
650 void Union( uint idx1, uint idx2 );
652 };
654 //----------------------------BlockProbPair---------------------------
655 // Ordered pair of Node*.
656 class BlockProbPair VALUE_OBJ_CLASS_SPEC {
657 protected:
658 Block* _target; // block target
659 float _prob; // probability of edge to block
660 public:
661 BlockProbPair() : _target(NULL), _prob(0.0) {}
662 BlockProbPair(Block* b, float p) : _target(b), _prob(p) {}
664 Block* get_target() const { return _target; }
665 float get_prob() const { return _prob; }
666 };
668 //------------------------------CFGLoop-------------------------------------------
669 class CFGLoop : public CFGElement {
670 friend class VMStructs;
671 int _id;
672 int _depth;
673 CFGLoop *_parent; // root of loop tree is the method level "pseudo" loop, it's parent is null
674 CFGLoop *_sibling; // null terminated list
675 CFGLoop *_child; // first child, use child's sibling to visit all immediately nested loops
676 GrowableArray<CFGElement*> _members; // list of members of loop
677 GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities
678 float _exit_prob; // probability any loop exit is taken on a single loop iteration
679 void update_succ_freq(Block* b, float freq);
681 public:
682 CFGLoop(int id) :
683 CFGElement(),
684 _id(id),
685 _depth(0),
686 _parent(NULL),
687 _sibling(NULL),
688 _child(NULL),
689 _exit_prob(1.0f) {}
690 CFGLoop* parent() { return _parent; }
691 void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
692 void add_member(CFGElement *s) { _members.push(s); }
693 void add_nested_loop(CFGLoop* cl);
694 Block* head() {
695 assert(_members.at(0)->is_block(), "head must be a block");
696 Block* hd = _members.at(0)->as_Block();
697 assert(hd->_loop == this, "just checking");
698 assert(hd->head()->is_Loop(), "must begin with loop head node");
699 return hd;
700 }
701 Block* backedge_block(); // Return the block on the backedge of the loop (else NULL)
702 void compute_loop_depth(int depth);
703 void compute_freq(); // compute frequency with loop assuming head freq 1.0f
704 void scale_freq(); // scale frequency by loop trip count (including outer loops)
705 float outer_loop_freq() const; // frequency of outer loop
706 bool in_loop_nest(Block* b);
707 float trip_count() const { return 1.0f / _exit_prob; }
708 virtual bool is_loop() { return true; }
709 int id() { return _id; }
711 #ifndef PRODUCT
712 void dump( ) const;
713 void dump_tree() const;
714 #endif
715 };
718 //----------------------------------CFGEdge------------------------------------
719 // A edge between two basic blocks that will be embodied by a branch or a
720 // fall-through.
721 class CFGEdge : public ResourceObj {
722 friend class VMStructs;
723 private:
724 Block * _from; // Source basic block
725 Block * _to; // Destination basic block
726 float _freq; // Execution frequency (estimate)
727 int _state;
728 bool _infrequent;
729 int _from_pct;
730 int _to_pct;
732 // Private accessors
733 int from_pct() const { return _from_pct; }
734 int to_pct() const { return _to_pct; }
735 int from_infrequent() const { return from_pct() < BlockLayoutMinDiamondPercentage; }
736 int to_infrequent() const { return to_pct() < BlockLayoutMinDiamondPercentage; }
738 public:
739 enum {
740 open, // initial edge state; unprocessed
741 connected, // edge used to connect two traces together
742 interior // edge is interior to trace (could be backedge)
743 };
745 CFGEdge(Block *from, Block *to, float freq, int from_pct, int to_pct) :
746 _from(from), _to(to), _freq(freq),
747 _from_pct(from_pct), _to_pct(to_pct), _state(open) {
748 _infrequent = from_infrequent() || to_infrequent();
749 }
751 float freq() const { return _freq; }
752 Block* from() const { return _from; }
753 Block* to () const { return _to; }
754 int infrequent() const { return _infrequent; }
755 int state() const { return _state; }
757 void set_state(int state) { _state = state; }
759 #ifndef PRODUCT
760 void dump( ) const;
761 #endif
762 };
765 //-----------------------------------Trace-------------------------------------
766 // An ordered list of basic blocks.
767 class Trace : public ResourceObj {
768 private:
769 uint _id; // Unique Trace id (derived from initial block)
770 Block ** _next_list; // Array mapping index to next block
771 Block ** _prev_list; // Array mapping index to previous block
772 Block * _first; // First block in the trace
773 Block * _last; // Last block in the trace
775 // Return the block that follows "b" in the trace.
776 Block * next(Block *b) const { return _next_list[b->_pre_order]; }
777 void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; }
779 // Return the block that precedes "b" in the trace.
780 Block * prev(Block *b) const { return _prev_list[b->_pre_order]; }
781 void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; }
783 // We've discovered a loop in this trace. Reset last to be "b", and first as
784 // the block following "b
785 void break_loop_after(Block *b) {
786 _last = b;
787 _first = next(b);
788 set_prev(_first, NULL);
789 set_next(_last, NULL);
790 }
792 public:
794 Trace(Block *b, Block **next_list, Block **prev_list) :
795 _first(b),
796 _last(b),
797 _next_list(next_list),
798 _prev_list(prev_list),
799 _id(b->_pre_order) {
800 set_next(b, NULL);
801 set_prev(b, NULL);
802 };
804 // Return the id number
805 uint id() const { return _id; }
806 void set_id(uint id) { _id = id; }
808 // Return the first block in the trace
809 Block * first_block() const { return _first; }
811 // Return the last block in the trace
812 Block * last_block() const { return _last; }
814 // Insert a trace in the middle of this one after b
815 void insert_after(Block *b, Trace *tr) {
816 set_next(tr->last_block(), next(b));
817 if (next(b) != NULL) {
818 set_prev(next(b), tr->last_block());
819 }
821 set_next(b, tr->first_block());
822 set_prev(tr->first_block(), b);
824 if (b == _last) {
825 _last = tr->last_block();
826 }
827 }
829 void insert_before(Block *b, Trace *tr) {
830 Block *p = prev(b);
831 assert(p != NULL, "use append instead");
832 insert_after(p, tr);
833 }
835 // Append another trace to this one.
836 void append(Trace *tr) {
837 insert_after(_last, tr);
838 }
840 // Append a block at the end of this trace
841 void append(Block *b) {
842 set_next(_last, b);
843 set_prev(b, _last);
844 _last = b;
845 }
847 // Adjust the the blocks in this trace
848 void fixup_blocks(PhaseCFG &cfg);
849 bool backedge(CFGEdge *e);
851 #ifndef PRODUCT
852 void dump( ) const;
853 #endif
854 };
856 //------------------------------PhaseBlockLayout-------------------------------
857 // Rearrange blocks into some canonical order, based on edges and their frequencies
858 class PhaseBlockLayout : public Phase {
859 friend class VMStructs;
860 PhaseCFG &_cfg; // Control flow graph
862 GrowableArray<CFGEdge *> *edges;
863 Trace **traces;
864 Block **next;
865 Block **prev;
866 UnionFind *uf;
868 // Given a block, find its encompassing Trace
869 Trace * trace(Block *b) {
870 return traces[uf->Find_compress(b->_pre_order)];
871 }
872 public:
873 PhaseBlockLayout(PhaseCFG &cfg);
875 void find_edges();
876 void grow_traces();
877 void merge_traces(bool loose_connections);
878 void reorder_traces(int count);
879 void union_traces(Trace* from, Trace* to);
880 };
882 #endif // SHARE_VM_OPTO_BLOCK_HPP