Wed, 18 Sep 2013 14:34:56 -0700
8024342: PPC64 (part 111): Support for C calling conventions that require 64-bit ints.
Summary: Some platforms, as ppc and s390x/zArch require that 32-bit ints are passed as 64-bit values to C functions. This change adds support to adapt the signature and to issue proper casts to c2-compiled stubs. The functions are used in generate_native_wrapper(). Adapt signature used by the compiler as in PhaseIdealLoop::intrinsify_fill().
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_BLOCK_HPP
26 #define SHARE_VM_OPTO_BLOCK_HPP
28 #include "opto/multnode.hpp"
29 #include "opto/node.hpp"
30 #include "opto/phase.hpp"
32 // Optimization - Graph Style
34 class Block;
35 class CFGLoop;
36 class MachCallNode;
37 class Matcher;
38 class RootNode;
39 class VectorSet;
40 struct Tarjan;
42 //------------------------------Block_Array------------------------------------
43 // Map dense integer indices to Blocks. Uses classic doubling-array trick.
44 // Abstractly provides an infinite array of Block*'s, initialized to NULL.
45 // Note that the constructor just zeros things, and since I use Arena
46 // allocation I do not need a destructor to reclaim storage.
47 class Block_Array : public ResourceObj {
48 friend class VMStructs;
49 uint _size; // allocated size, as opposed to formal limit
50 debug_only(uint _limit;) // limit to formal domain
51 Arena *_arena; // Arena to allocate in
52 protected:
53 Block **_blocks;
54 void grow( uint i ); // Grow array node to fit
56 public:
57 Block_Array(Arena *a) : _arena(a), _size(OptoBlockListSize) {
58 debug_only(_limit=0);
59 _blocks = NEW_ARENA_ARRAY( a, Block *, OptoBlockListSize );
60 for( int i = 0; i < OptoBlockListSize; i++ ) {
61 _blocks[i] = NULL;
62 }
63 }
64 Block *lookup( uint i ) const // Lookup, or NULL for not mapped
65 { return (i<Max()) ? _blocks[i] : (Block*)NULL; }
66 Block *operator[] ( uint i ) const // Lookup, or assert for not mapped
67 { assert( i < Max(), "oob" ); return _blocks[i]; }
68 // Extend the mapping: index i maps to Block *n.
69 void map( uint i, Block *n ) { if( i>=Max() ) grow(i); _blocks[i] = n; }
70 uint Max() const { debug_only(return _limit); return _size; }
71 };
74 class Block_List : public Block_Array {
75 friend class VMStructs;
76 public:
77 uint _cnt;
78 Block_List() : Block_Array(Thread::current()->resource_area()), _cnt(0) {}
79 void push( Block *b ) { map(_cnt++,b); }
80 Block *pop() { return _blocks[--_cnt]; }
81 Block *rpop() { Block *b = _blocks[0]; _blocks[0]=_blocks[--_cnt]; return b;}
82 void remove( uint i );
83 void insert( uint i, Block *n );
84 uint size() const { return _cnt; }
85 void reset() { _cnt = 0; }
86 void print();
87 };
90 class CFGElement : public ResourceObj {
91 friend class VMStructs;
92 public:
93 float _freq; // Execution frequency (estimate)
95 CFGElement() : _freq(0.0f) {}
96 virtual bool is_block() { return false; }
97 virtual bool is_loop() { return false; }
98 Block* as_Block() { assert(is_block(), "must be block"); return (Block*)this; }
99 CFGLoop* as_CFGLoop() { assert(is_loop(), "must be loop"); return (CFGLoop*)this; }
100 };
102 //------------------------------Block------------------------------------------
103 // This class defines a Basic Block.
104 // Basic blocks are used during the output routines, and are not used during
105 // any optimization pass. They are created late in the game.
106 class Block : public CFGElement {
107 friend class VMStructs;
108 public:
109 // Nodes in this block, in order
110 Node_List _nodes;
112 // Basic blocks have a Node which defines Control for all Nodes pinned in
113 // this block. This Node is a RegionNode. Exception-causing Nodes
114 // (division, subroutines) and Phi functions are always pinned. Later,
115 // every Node will get pinned to some block.
116 Node *head() const { return _nodes[0]; }
118 // CAUTION: num_preds() is ONE based, so that predecessor numbers match
119 // input edges to Regions and Phis.
120 uint num_preds() const { return head()->req(); }
121 Node *pred(uint i) const { return head()->in(i); }
123 // Array of successor blocks, same size as projs array
124 Block_Array _succs;
126 // Basic blocks have some number of Nodes which split control to all
127 // following blocks. These Nodes are always Projections. The field in
128 // the Projection and the block-ending Node determine which Block follows.
129 uint _num_succs;
131 // Basic blocks also carry all sorts of good old fashioned DFS information
132 // used to find loops, loop nesting depth, dominators, etc.
133 uint _pre_order; // Pre-order DFS number
135 // Dominator tree
136 uint _dom_depth; // Depth in dominator tree for fast LCA
137 Block* _idom; // Immediate dominator block
139 CFGLoop *_loop; // Loop to which this block belongs
140 uint _rpo; // Number in reverse post order walk
142 virtual bool is_block() { return true; }
143 float succ_prob(uint i); // return probability of i'th successor
144 int num_fall_throughs(); // How many fall-through candidate this block has
145 void update_uncommon_branch(Block* un); // Lower branch prob to uncommon code
146 bool succ_fall_through(uint i); // Is successor "i" is a fall-through candidate
147 Block* lone_fall_through(); // Return lone fall-through Block or null
149 Block* dom_lca(Block* that); // Compute LCA in dominator tree.
150 #ifdef ASSERT
151 bool dominates(Block* that) {
152 int dom_diff = this->_dom_depth - that->_dom_depth;
153 if (dom_diff > 0) return false;
154 for (; dom_diff < 0; dom_diff++) that = that->_idom;
155 return this == that;
156 }
157 #endif
159 // Report the alignment required by this block. Must be a power of 2.
160 // The previous block will insert nops to get this alignment.
161 uint code_alignment();
162 uint compute_loop_alignment();
164 // BLOCK_FREQUENCY is a sentinel to mark uses of constant block frequencies.
165 // It is currently also used to scale such frequencies relative to
166 // FreqCountInvocations relative to the old value of 1500.
167 #define BLOCK_FREQUENCY(f) ((f * (float) 1500) / FreqCountInvocations)
169 // Register Pressure (estimate) for Splitting heuristic
170 uint _reg_pressure;
171 uint _ihrp_index;
172 uint _freg_pressure;
173 uint _fhrp_index;
175 // Mark and visited bits for an LCA calculation in insert_anti_dependences.
176 // Since they hold unique node indexes, they do not need reinitialization.
177 node_idx_t _raise_LCA_mark;
178 void set_raise_LCA_mark(node_idx_t x) { _raise_LCA_mark = x; }
179 node_idx_t raise_LCA_mark() const { return _raise_LCA_mark; }
180 node_idx_t _raise_LCA_visited;
181 void set_raise_LCA_visited(node_idx_t x) { _raise_LCA_visited = x; }
182 node_idx_t raise_LCA_visited() const { return _raise_LCA_visited; }
184 // Estimated size in bytes of first instructions in a loop.
185 uint _first_inst_size;
186 uint first_inst_size() const { return _first_inst_size; }
187 void set_first_inst_size(uint s) { _first_inst_size = s; }
189 // Compute the size of first instructions in this block.
190 uint compute_first_inst_size(uint& sum_size, uint inst_cnt, PhaseRegAlloc* ra);
192 // Compute alignment padding if the block needs it.
193 // Align a loop if loop's padding is less or equal to padding limit
194 // or the size of first instructions in the loop > padding.
195 uint alignment_padding(int current_offset) {
196 int block_alignment = code_alignment();
197 int max_pad = block_alignment-relocInfo::addr_unit();
198 if( max_pad > 0 ) {
199 assert(is_power_of_2(max_pad+relocInfo::addr_unit()), "");
200 int current_alignment = current_offset & max_pad;
201 if( current_alignment != 0 ) {
202 uint padding = (block_alignment-current_alignment) & max_pad;
203 if( has_loop_alignment() &&
204 padding > (uint)MaxLoopPad &&
205 first_inst_size() <= padding ) {
206 return 0;
207 }
208 return padding;
209 }
210 }
211 return 0;
212 }
214 // Connector blocks. Connector blocks are basic blocks devoid of
215 // instructions, but may have relevant non-instruction Nodes, such as
216 // Phis or MergeMems. Such blocks are discovered and marked during the
217 // RemoveEmpty phase, and elided during Output.
218 bool _connector;
219 void set_connector() { _connector = true; }
220 bool is_connector() const { return _connector; };
222 // Loop_alignment will be set for blocks which are at the top of loops.
223 // The block layout pass may rotate loops such that the loop head may not
224 // be the sequentially first block of the loop encountered in the linear
225 // list of blocks. If the layout pass is not run, loop alignment is set
226 // for each block which is the head of a loop.
227 uint _loop_alignment;
228 void set_loop_alignment(Block *loop_top) {
229 uint new_alignment = loop_top->compute_loop_alignment();
230 if (new_alignment > _loop_alignment) {
231 _loop_alignment = new_alignment;
232 }
233 }
234 uint loop_alignment() const { return _loop_alignment; }
235 bool has_loop_alignment() const { return loop_alignment() > 0; }
237 // Create a new Block with given head Node.
238 // Creates the (empty) predecessor arrays.
239 Block( Arena *a, Node *headnode )
240 : CFGElement(),
241 _nodes(a),
242 _succs(a),
243 _num_succs(0),
244 _pre_order(0),
245 _idom(0),
246 _loop(NULL),
247 _reg_pressure(0),
248 _ihrp_index(1),
249 _freg_pressure(0),
250 _fhrp_index(1),
251 _raise_LCA_mark(0),
252 _raise_LCA_visited(0),
253 _first_inst_size(999999),
254 _connector(false),
255 _loop_alignment(0) {
256 _nodes.push(headnode);
257 }
259 // Index of 'end' Node
260 uint end_idx() const {
261 // %%%%% add a proj after every goto
262 // so (last->is_block_proj() != last) always, then simplify this code
263 // This will not give correct end_idx for block 0 when it only contains root.
264 int last_idx = _nodes.size() - 1;
265 Node *last = _nodes[last_idx];
266 assert(last->is_block_proj() == last || last->is_block_proj() == _nodes[last_idx - _num_succs], "");
267 return (last->is_block_proj() == last) ? last_idx : (last_idx - _num_succs);
268 }
270 // Basic blocks have a Node which ends them. This Node determines which
271 // basic block follows this one in the program flow. This Node is either an
272 // IfNode, a GotoNode, a JmpNode, or a ReturnNode.
273 Node *end() const { return _nodes[end_idx()]; }
275 // Add an instruction to an existing block. It must go after the head
276 // instruction and before the end instruction.
277 void add_inst( Node *n ) { _nodes.insert(end_idx(),n); }
278 // Find node in block
279 uint find_node( const Node *n ) const;
280 // Find and remove n from block list
281 void find_remove( const Node *n );
283 // helper function that adds caller save registers to MachProjNode
284 void add_call_kills(MachProjNode *proj, RegMask& regs, const char* save_policy, bool exclude_soe);
285 // Schedule a call next in the block
286 uint sched_call(Matcher &matcher, PhaseCFG* cfg, uint node_cnt, Node_List &worklist, GrowableArray<int> &ready_cnt, MachCallNode *mcall, VectorSet &next_call);
288 // Perform basic-block local scheduling
289 Node *select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &ready_cnt, VectorSet &next_call, uint sched_slot);
290 void set_next_call( Node *n, VectorSet &next_call, PhaseCFG* cfg);
291 void needed_for_next_call(Node *this_call, VectorSet &next_call, PhaseCFG* cfg);
292 bool schedule_local(PhaseCFG *cfg, Matcher &m, GrowableArray<int> &ready_cnt, VectorSet &next_call);
293 // Cleanup if any code lands between a Call and his Catch
294 void call_catch_cleanup(PhaseCFG* cfg, Compile *C);
295 // Detect implicit-null-check opportunities. Basically, find NULL checks
296 // with suitable memory ops nearby. Use the memory op to do the NULL check.
297 // I can generate a memory op if there is not one nearby.
298 void implicit_null_check(PhaseCFG *cfg, Node *proj, Node *val, int allowed_reasons);
300 // Return the empty status of a block
301 enum { not_empty, empty_with_goto, completely_empty };
302 int is_Empty() const;
304 // Forward through connectors
305 Block* non_connector() {
306 Block* s = this;
307 while (s->is_connector()) {
308 s = s->_succs[0];
309 }
310 return s;
311 }
313 // Return true if b is a successor of this block
314 bool has_successor(Block* b) const {
315 for (uint i = 0; i < _num_succs; i++ ) {
316 if (non_connector_successor(i) == b) {
317 return true;
318 }
319 }
320 return false;
321 }
323 // Successor block, after forwarding through connectors
324 Block* non_connector_successor(int i) const {
325 return _succs[i]->non_connector();
326 }
328 // Examine block's code shape to predict if it is not commonly executed.
329 bool has_uncommon_code() const;
331 // Use frequency calculations and code shape to predict if the block
332 // is uncommon.
333 bool is_uncommon(PhaseCFG* cfg) const;
335 #ifndef PRODUCT
336 // Debugging print of basic block
337 void dump_bidx(const Block* orig, outputStream* st = tty) const;
338 void dump_pred(const PhaseCFG* cfg, Block* orig, outputStream* st = tty) const;
339 void dump_head(const PhaseCFG* cfg, outputStream* st = tty) const;
340 void dump() const;
341 void dump(const PhaseCFG* cfg) const;
342 #endif
343 };
346 //------------------------------PhaseCFG---------------------------------------
347 // Build an array of Basic Block pointers, one per Node.
348 class PhaseCFG : public Phase {
349 friend class VMStructs;
350 private:
352 // Root of whole program
353 RootNode* _root;
355 // The block containing the root node
356 Block* _root_block;
358 // List of basic blocks that are created during CFG creation
359 Block_List _blocks;
361 // Count of basic blocks
362 uint _number_of_blocks;
364 // Arena for the blocks to be stored in
365 Arena* _block_arena;
367 // The matcher for this compilation
368 Matcher& _matcher;
370 // Map nodes to owning basic block
371 Block_Array _node_to_block_mapping;
373 // Loop from the root
374 CFGLoop* _root_loop;
376 // Outmost loop frequency
377 float _outer_loop_frequency;
379 // Per node latency estimation, valid only during GCM
380 GrowableArray<uint>* _node_latency;
382 // Build a proper looking cfg. Return count of basic blocks
383 uint build_cfg();
385 // Build the dominator tree so that we know where we can move instructions
386 void build_dominator_tree();
388 // Estimate block frequencies based on IfNode probabilities, so that we know where we want to move instructions
389 void estimate_block_frequency();
391 // Global Code Motion. See Click's PLDI95 paper. Place Nodes in specific
392 // basic blocks; i.e. _node_to_block_mapping now maps _idx for all Nodes to some Block.
393 // Move nodes to ensure correctness from GVN and also try to move nodes out of loops.
394 void global_code_motion();
396 // Schedule Nodes early in their basic blocks.
397 bool schedule_early(VectorSet &visited, Node_List &roots);
399 // For each node, find the latest block it can be scheduled into
400 // and then select the cheapest block between the latest and earliest
401 // block to place the node.
402 void schedule_late(VectorSet &visited, Node_List &stack);
404 // Compute the (backwards) latency of a node from a single use
405 int latency_from_use(Node *n, const Node *def, Node *use);
407 // Compute the (backwards) latency of a node from the uses of this instruction
408 void partial_latency_of_defs(Node *n);
410 // Compute the instruction global latency with a backwards walk
411 void compute_latencies_backwards(VectorSet &visited, Node_List &stack);
413 // Pick a block between early and late that is a cheaper alternative
414 // to late. Helper for schedule_late.
415 Block* hoist_to_cheaper_block(Block* LCA, Block* early, Node* self);
417 // Perform a Depth First Search (DFS).
418 // Setup 'vertex' as DFS to vertex mapping.
419 // Setup 'semi' as vertex to DFS mapping.
420 // Set 'parent' to DFS parent.
421 uint do_DFS(Tarjan* tarjan, uint rpo_counter);
423 // Helper function to insert a node into a block
424 void schedule_node_into_block( Node *n, Block *b );
426 void replace_block_proj_ctrl( Node *n );
428 // Set the basic block for pinned Nodes
429 void schedule_pinned_nodes( VectorSet &visited );
431 // I'll need a few machine-specific GotoNodes. Clone from this one.
432 // Used when building the CFG and creating end nodes for blocks.
433 MachNode* _goto;
435 Block* insert_anti_dependences(Block* LCA, Node* load, bool verify = false);
436 void verify_anti_dependences(Block* LCA, Node* load) {
437 assert(LCA == get_block_for_node(load), "should already be scheduled");
438 insert_anti_dependences(LCA, load, true);
439 }
441 bool move_to_next(Block* bx, uint b_index);
442 void move_to_end(Block* bx, uint b_index);
444 void insert_goto_at(uint block_no, uint succ_no);
446 // Check for NeverBranch at block end. This needs to become a GOTO to the
447 // true target. NeverBranch are treated as a conditional branch that always
448 // goes the same direction for most of the optimizer and are used to give a
449 // fake exit path to infinite loops. At this late stage they need to turn
450 // into Goto's so that when you enter the infinite loop you indeed hang.
451 void convert_NeverBranch_to_Goto(Block *b);
453 CFGLoop* create_loop_tree();
455 #ifndef PRODUCT
456 bool _trace_opto_pipelining; // tracing flag
457 #endif
459 public:
460 PhaseCFG(Arena* arena, RootNode* root, Matcher& matcher);
462 void set_latency_for_node(Node* node, int latency) {
463 _node_latency->at_put_grow(node->_idx, latency);
464 }
466 uint get_latency_for_node(Node* node) {
467 return _node_latency->at_grow(node->_idx);
468 }
470 // Get the outer most frequency
471 float get_outer_loop_frequency() const {
472 return _outer_loop_frequency;
473 }
475 // Get the root node of the CFG
476 RootNode* get_root_node() const {
477 return _root;
478 }
480 // Get the block of the root node
481 Block* get_root_block() const {
482 return _root_block;
483 }
485 // Add a block at a position and moves the later ones one step
486 void add_block_at(uint pos, Block* block) {
487 _blocks.insert(pos, block);
488 _number_of_blocks++;
489 }
491 // Adds a block to the top of the block list
492 void add_block(Block* block) {
493 _blocks.push(block);
494 _number_of_blocks++;
495 }
497 // Clear the list of blocks
498 void clear_blocks() {
499 _blocks.reset();
500 _number_of_blocks = 0;
501 }
503 // Get the block at position pos in _blocks
504 Block* get_block(uint pos) const {
505 return _blocks[pos];
506 }
508 // Number of blocks
509 uint number_of_blocks() const {
510 return _number_of_blocks;
511 }
513 // set which block this node should reside in
514 void map_node_to_block(const Node* node, Block* block) {
515 _node_to_block_mapping.map(node->_idx, block);
516 }
518 // removes the mapping from a node to a block
519 void unmap_node_from_block(const Node* node) {
520 _node_to_block_mapping.map(node->_idx, NULL);
521 }
523 // get the block in which this node resides
524 Block* get_block_for_node(const Node* node) const {
525 return _node_to_block_mapping[node->_idx];
526 }
528 // does this node reside in a block; return true
529 bool has_block(const Node* node) const {
530 return (_node_to_block_mapping.lookup(node->_idx) != NULL);
531 }
533 #ifdef ASSERT
534 Unique_Node_List _raw_oops;
535 #endif
537 // Do global code motion by first building dominator tree and estimate block frequency
538 // Returns true on success
539 bool do_global_code_motion();
541 // Compute the (backwards) latency of a node from the uses
542 void latency_from_uses(Node *n);
544 // Set loop alignment
545 void set_loop_alignment();
547 // Remove empty basic blocks
548 void remove_empty_blocks();
549 void fixup_flow();
551 // Insert a node into a block at index and map the node to the block
552 void insert(Block *b, uint idx, Node *n) {
553 b->_nodes.insert( idx, n );
554 map_node_to_block(n, b);
555 }
557 #ifndef PRODUCT
558 bool trace_opto_pipelining() const { return _trace_opto_pipelining; }
560 // Debugging print of CFG
561 void dump( ) const; // CFG only
562 void _dump_cfg( const Node *end, VectorSet &visited ) const;
563 void verify() const;
564 void dump_headers();
565 #else
566 bool trace_opto_pipelining() const { return false; }
567 #endif
568 };
571 //------------------------------UnionFind--------------------------------------
572 // Map Block indices to a block-index for a cfg-cover.
573 // Array lookup in the optimized case.
574 class UnionFind : public ResourceObj {
575 uint _cnt, _max;
576 uint* _indices;
577 ReallocMark _nesting; // assertion check for reallocations
578 public:
579 UnionFind( uint max );
580 void reset( uint max ); // Reset to identity map for [0..max]
582 uint lookup( uint nidx ) const {
583 return _indices[nidx];
584 }
585 uint operator[] (uint nidx) const { return lookup(nidx); }
587 void map( uint from_idx, uint to_idx ) {
588 assert( from_idx < _cnt, "oob" );
589 _indices[from_idx] = to_idx;
590 }
591 void extend( uint from_idx, uint to_idx );
593 uint Size() const { return _cnt; }
595 uint Find( uint idx ) {
596 assert( idx < 65536, "Must fit into uint");
597 uint uf_idx = lookup(idx);
598 return (uf_idx == idx) ? uf_idx : Find_compress(idx);
599 }
600 uint Find_compress( uint idx );
601 uint Find_const( uint idx ) const;
602 void Union( uint idx1, uint idx2 );
604 };
606 //----------------------------BlockProbPair---------------------------
607 // Ordered pair of Node*.
608 class BlockProbPair VALUE_OBJ_CLASS_SPEC {
609 protected:
610 Block* _target; // block target
611 float _prob; // probability of edge to block
612 public:
613 BlockProbPair() : _target(NULL), _prob(0.0) {}
614 BlockProbPair(Block* b, float p) : _target(b), _prob(p) {}
616 Block* get_target() const { return _target; }
617 float get_prob() const { return _prob; }
618 };
620 //------------------------------CFGLoop-------------------------------------------
621 class CFGLoop : public CFGElement {
622 friend class VMStructs;
623 int _id;
624 int _depth;
625 CFGLoop *_parent; // root of loop tree is the method level "pseudo" loop, it's parent is null
626 CFGLoop *_sibling; // null terminated list
627 CFGLoop *_child; // first child, use child's sibling to visit all immediately nested loops
628 GrowableArray<CFGElement*> _members; // list of members of loop
629 GrowableArray<BlockProbPair> _exits; // list of successor blocks and their probabilities
630 float _exit_prob; // probability any loop exit is taken on a single loop iteration
631 void update_succ_freq(Block* b, float freq);
633 public:
634 CFGLoop(int id) :
635 CFGElement(),
636 _id(id),
637 _depth(0),
638 _parent(NULL),
639 _sibling(NULL),
640 _child(NULL),
641 _exit_prob(1.0f) {}
642 CFGLoop* parent() { return _parent; }
643 void push_pred(Block* blk, int i, Block_List& worklist, PhaseCFG* cfg);
644 void add_member(CFGElement *s) { _members.push(s); }
645 void add_nested_loop(CFGLoop* cl);
646 Block* head() {
647 assert(_members.at(0)->is_block(), "head must be a block");
648 Block* hd = _members.at(0)->as_Block();
649 assert(hd->_loop == this, "just checking");
650 assert(hd->head()->is_Loop(), "must begin with loop head node");
651 return hd;
652 }
653 Block* backedge_block(); // Return the block on the backedge of the loop (else NULL)
654 void compute_loop_depth(int depth);
655 void compute_freq(); // compute frequency with loop assuming head freq 1.0f
656 void scale_freq(); // scale frequency by loop trip count (including outer loops)
657 float outer_loop_freq() const; // frequency of outer loop
658 bool in_loop_nest(Block* b);
659 float trip_count() const { return 1.0f / _exit_prob; }
660 virtual bool is_loop() { return true; }
661 int id() { return _id; }
663 #ifndef PRODUCT
664 void dump( ) const;
665 void dump_tree() const;
666 #endif
667 };
670 //----------------------------------CFGEdge------------------------------------
671 // A edge between two basic blocks that will be embodied by a branch or a
672 // fall-through.
673 class CFGEdge : public ResourceObj {
674 friend class VMStructs;
675 private:
676 Block * _from; // Source basic block
677 Block * _to; // Destination basic block
678 float _freq; // Execution frequency (estimate)
679 int _state;
680 bool _infrequent;
681 int _from_pct;
682 int _to_pct;
684 // Private accessors
685 int from_pct() const { return _from_pct; }
686 int to_pct() const { return _to_pct; }
687 int from_infrequent() const { return from_pct() < BlockLayoutMinDiamondPercentage; }
688 int to_infrequent() const { return to_pct() < BlockLayoutMinDiamondPercentage; }
690 public:
691 enum {
692 open, // initial edge state; unprocessed
693 connected, // edge used to connect two traces together
694 interior // edge is interior to trace (could be backedge)
695 };
697 CFGEdge(Block *from, Block *to, float freq, int from_pct, int to_pct) :
698 _from(from), _to(to), _freq(freq),
699 _from_pct(from_pct), _to_pct(to_pct), _state(open) {
700 _infrequent = from_infrequent() || to_infrequent();
701 }
703 float freq() const { return _freq; }
704 Block* from() const { return _from; }
705 Block* to () const { return _to; }
706 int infrequent() const { return _infrequent; }
707 int state() const { return _state; }
709 void set_state(int state) { _state = state; }
711 #ifndef PRODUCT
712 void dump( ) const;
713 #endif
714 };
717 //-----------------------------------Trace-------------------------------------
718 // An ordered list of basic blocks.
719 class Trace : public ResourceObj {
720 private:
721 uint _id; // Unique Trace id (derived from initial block)
722 Block ** _next_list; // Array mapping index to next block
723 Block ** _prev_list; // Array mapping index to previous block
724 Block * _first; // First block in the trace
725 Block * _last; // Last block in the trace
727 // Return the block that follows "b" in the trace.
728 Block * next(Block *b) const { return _next_list[b->_pre_order]; }
729 void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; }
731 // Return the block that precedes "b" in the trace.
732 Block * prev(Block *b) const { return _prev_list[b->_pre_order]; }
733 void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; }
735 // We've discovered a loop in this trace. Reset last to be "b", and first as
736 // the block following "b
737 void break_loop_after(Block *b) {
738 _last = b;
739 _first = next(b);
740 set_prev(_first, NULL);
741 set_next(_last, NULL);
742 }
744 public:
746 Trace(Block *b, Block **next_list, Block **prev_list) :
747 _first(b),
748 _last(b),
749 _next_list(next_list),
750 _prev_list(prev_list),
751 _id(b->_pre_order) {
752 set_next(b, NULL);
753 set_prev(b, NULL);
754 };
756 // Return the id number
757 uint id() const { return _id; }
758 void set_id(uint id) { _id = id; }
760 // Return the first block in the trace
761 Block * first_block() const { return _first; }
763 // Return the last block in the trace
764 Block * last_block() const { return _last; }
766 // Insert a trace in the middle of this one after b
767 void insert_after(Block *b, Trace *tr) {
768 set_next(tr->last_block(), next(b));
769 if (next(b) != NULL) {
770 set_prev(next(b), tr->last_block());
771 }
773 set_next(b, tr->first_block());
774 set_prev(tr->first_block(), b);
776 if (b == _last) {
777 _last = tr->last_block();
778 }
779 }
781 void insert_before(Block *b, Trace *tr) {
782 Block *p = prev(b);
783 assert(p != NULL, "use append instead");
784 insert_after(p, tr);
785 }
787 // Append another trace to this one.
788 void append(Trace *tr) {
789 insert_after(_last, tr);
790 }
792 // Append a block at the end of this trace
793 void append(Block *b) {
794 set_next(_last, b);
795 set_prev(b, _last);
796 _last = b;
797 }
799 // Adjust the the blocks in this trace
800 void fixup_blocks(PhaseCFG &cfg);
801 bool backedge(CFGEdge *e);
803 #ifndef PRODUCT
804 void dump( ) const;
805 #endif
806 };
808 //------------------------------PhaseBlockLayout-------------------------------
809 // Rearrange blocks into some canonical order, based on edges and their frequencies
810 class PhaseBlockLayout : public Phase {
811 friend class VMStructs;
812 PhaseCFG &_cfg; // Control flow graph
814 GrowableArray<CFGEdge *> *edges;
815 Trace **traces;
816 Block **next;
817 Block **prev;
818 UnionFind *uf;
820 // Given a block, find its encompassing Trace
821 Trace * trace(Block *b) {
822 return traces[uf->Find_compress(b->_pre_order)];
823 }
824 public:
825 PhaseBlockLayout(PhaseCFG &cfg);
827 void find_edges();
828 void grow_traces();
829 void merge_traces(bool loose_connections);
830 void reorder_traces(int count);
831 void union_traces(Trace* from, Trace* to);
832 };
834 #endif // SHARE_VM_OPTO_BLOCK_HPP