Thu, 01 Aug 2013 17:25:10 -0700
Merge
1 /*
2 * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_OUTPUT_HPP
26 #define SHARE_VM_OPTO_OUTPUT_HPP
28 #include "opto/block.hpp"
29 #include "opto/node.hpp"
30 #ifdef TARGET_ARCH_MODEL_x86_32
31 # include "adfiles/ad_x86_32.hpp"
32 #endif
33 #ifdef TARGET_ARCH_MODEL_x86_64
34 # include "adfiles/ad_x86_64.hpp"
35 #endif
36 #ifdef TARGET_ARCH_MODEL_sparc
37 # include "adfiles/ad_sparc.hpp"
38 #endif
39 #ifdef TARGET_ARCH_MODEL_zero
40 # include "adfiles/ad_zero.hpp"
41 #endif
42 #ifdef TARGET_ARCH_MODEL_arm
43 # include "adfiles/ad_arm.hpp"
44 #endif
45 #ifdef TARGET_ARCH_MODEL_ppc_32
46 # include "adfiles/ad_ppc_32.hpp"
47 #endif
48 #ifdef TARGET_ARCH_MODEL_ppc_64
49 # include "adfiles/ad_ppc_64.hpp"
50 #endif
52 class Arena;
53 class Bundle;
54 class Block;
55 class Block_Array;
56 class Node;
57 class Node_Array;
58 class Node_List;
59 class PhaseCFG;
60 class PhaseChaitin;
61 class Pipeline_Use_Element;
62 class Pipeline_Use;
64 #ifndef PRODUCT
65 #define DEBUG_ARG(x) , x
66 #else
67 #define DEBUG_ARG(x)
68 #endif
70 // Define the initial sizes for allocation of the resizable code buffer
71 enum {
72 initial_code_capacity = 16 * 1024,
73 initial_stub_capacity = 4 * 1024,
74 initial_const_capacity = 4 * 1024,
75 initial_locs_capacity = 3 * 1024
76 };
78 //------------------------------Scheduling----------------------------------
79 // This class contains all the information necessary to implement instruction
80 // scheduling and bundling.
81 class Scheduling {
83 private:
84 // Arena to use
85 Arena *_arena;
87 // Control-Flow Graph info
88 PhaseCFG *_cfg;
90 // Register Allocation info
91 PhaseRegAlloc *_regalloc;
93 // Number of nodes in the method
94 uint _node_bundling_limit;
96 // List of scheduled nodes. Generated in reverse order
97 Node_List _scheduled;
99 // List of nodes currently available for choosing for scheduling
100 Node_List _available;
102 // Mapping from node (index) to basic block
103 Block_Array& _bbs;
105 // For each instruction beginning a bundle, the number of following
106 // nodes to be bundled with it.
107 Bundle *_node_bundling_base;
109 // Mapping from register to Node
110 Node_List _reg_node;
112 // Free list for pinch nodes.
113 Node_List _pinch_free_list;
115 // Latency from the beginning of the containing basic block (base 1)
116 // for each node.
117 unsigned short *_node_latency;
119 // Number of uses of this node within the containing basic block.
120 short *_uses;
122 // Schedulable portion of current block. Skips Region/Phi/CreateEx up
123 // front, branch+proj at end. Also skips Catch/CProj (same as
124 // branch-at-end), plus just-prior exception-throwing call.
125 uint _bb_start, _bb_end;
127 // Latency from the end of the basic block as scheduled
128 unsigned short *_current_latency;
130 // Remember the next node
131 Node *_next_node;
133 // Use this for an unconditional branch delay slot
134 Node *_unconditional_delay_slot;
136 // Pointer to a Nop
137 MachNopNode *_nop;
139 // Length of the current bundle, in instructions
140 uint _bundle_instr_count;
142 // Current Cycle number, for computing latencies and bundling
143 uint _bundle_cycle_number;
145 // Bundle information
146 Pipeline_Use_Element _bundle_use_elements[resource_count];
147 Pipeline_Use _bundle_use;
149 // Dump the available list
150 void dump_available() const;
152 public:
153 Scheduling(Arena *arena, Compile &compile);
155 // Destructor
156 NOT_PRODUCT( ~Scheduling(); )
158 // Step ahead "i" cycles
159 void step(uint i);
161 // Step ahead 1 cycle, and clear the bundle state (for example,
162 // at a branch target)
163 void step_and_clear();
165 Bundle* node_bundling(const Node *n) {
166 assert(valid_bundle_info(n), "oob");
167 return (&_node_bundling_base[n->_idx]);
168 }
170 bool valid_bundle_info(const Node *n) const {
171 return (_node_bundling_limit > n->_idx);
172 }
174 bool starts_bundle(const Node *n) const {
175 return (_node_bundling_limit > n->_idx && _node_bundling_base[n->_idx].starts_bundle());
176 }
178 // Do the scheduling
179 void DoScheduling();
181 // Compute the local latencies walking forward over the list of
182 // nodes for a basic block
183 void ComputeLocalLatenciesForward(const Block *bb);
185 // Compute the register antidependencies within a basic block
186 void ComputeRegisterAntidependencies(Block *bb);
187 void verify_do_def( Node *n, OptoReg::Name def, const char *msg );
188 void verify_good_schedule( Block *b, const char *msg );
189 void anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def );
190 void anti_do_use( Block *b, Node *use, OptoReg::Name use_reg );
192 // Add a node to the current bundle
193 void AddNodeToBundle(Node *n, const Block *bb);
195 // Add a node to the list of available nodes
196 void AddNodeToAvailableList(Node *n);
198 // Compute the local use count for the nodes in a block, and compute
199 // the list of instructions with no uses in the block as available
200 void ComputeUseCount(const Block *bb);
202 // Choose an instruction from the available list to add to the bundle
203 Node * ChooseNodeToBundle();
205 // See if this Node fits into the currently accumulating bundle
206 bool NodeFitsInBundle(Node *n);
208 // Decrement the use count for a node
209 void DecrementUseCounts(Node *n, const Block *bb);
211 // Garbage collect pinch nodes for reuse by other blocks.
212 void garbage_collect_pinch_nodes();
213 // Clean up a pinch node for reuse (helper for above).
214 void cleanup_pinch( Node *pinch );
216 // Information for statistics gathering
217 #ifndef PRODUCT
218 private:
219 // Gather information on size of nops relative to total
220 uint _branches, _unconditional_delays;
222 static uint _total_nop_size, _total_method_size;
223 static uint _total_branches, _total_unconditional_delays;
224 static uint _total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
226 public:
227 static void print_statistics();
229 static void increment_instructions_per_bundle(uint i) {
230 _total_instructions_per_bundle[i]++;
231 }
233 static void increment_nop_size(uint s) {
234 _total_nop_size += s;
235 }
237 static void increment_method_size(uint s) {
238 _total_method_size += s;
239 }
240 #endif
242 };
244 #endif // SHARE_VM_OPTO_OUTPUT_HPP