Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_OUTPUT_HPP
26 #define SHARE_VM_OPTO_OUTPUT_HPP
28 #include "opto/block.hpp"
29 #include "opto/node.hpp"
30 #ifdef TARGET_ARCH_MODEL_x86_32
31 # include "adfiles/ad_x86_32.hpp"
32 #endif
33 #ifdef TARGET_ARCH_MODEL_x86_64
34 # include "adfiles/ad_x86_64.hpp"
35 #endif
36 #ifdef TARGET_ARCH_MODEL_sparc
37 # include "adfiles/ad_sparc.hpp"
38 #endif
39 #ifdef TARGET_ARCH_MODEL_zero
40 # include "adfiles/ad_zero.hpp"
41 #endif
43 class Arena;
44 class Bundle;
45 class Block;
46 class Block_Array;
47 class Node;
48 class Node_Array;
49 class Node_List;
50 class PhaseCFG;
51 class PhaseChaitin;
52 class Pipeline_Use_Element;
53 class Pipeline_Use;
55 #ifndef PRODUCT
56 #define DEBUG_ARG(x) , x
57 #else
58 #define DEBUG_ARG(x)
59 #endif
61 // Define the initial sizes for allocation of the resizable code buffer
62 enum {
63 initial_code_capacity = 16 * 1024,
64 initial_stub_capacity = 4 * 1024,
65 initial_const_capacity = 4 * 1024,
66 initial_locs_capacity = 3 * 1024
67 };
69 //------------------------------Scheduling----------------------------------
70 // This class contains all the information necessary to implement instruction
71 // scheduling and bundling.
72 class Scheduling {
74 private:
75 // Arena to use
76 Arena *_arena;
78 // Control-Flow Graph info
79 PhaseCFG *_cfg;
81 // Register Allocation info
82 PhaseRegAlloc *_regalloc;
84 // Number of nodes in the method
85 uint _node_bundling_limit;
87 // List of scheduled nodes. Generated in reverse order
88 Node_List _scheduled;
90 // List of nodes currently available for choosing for scheduling
91 Node_List _available;
93 // Mapping from node (index) to basic block
94 Block_Array& _bbs;
96 // For each instruction beginning a bundle, the number of following
97 // nodes to be bundled with it.
98 Bundle *_node_bundling_base;
100 // Mapping from register to Node
101 Node_List _reg_node;
103 // Free list for pinch nodes.
104 Node_List _pinch_free_list;
106 // Latency from the beginning of the containing basic block (base 1)
107 // for each node.
108 unsigned short *_node_latency;
110 // Number of uses of this node within the containing basic block.
111 short *_uses;
113 // Schedulable portion of current block. Skips Region/Phi/CreateEx up
114 // front, branch+proj at end. Also skips Catch/CProj (same as
115 // branch-at-end), plus just-prior exception-throwing call.
116 uint _bb_start, _bb_end;
118 // Latency from the end of the basic block as scheduled
119 unsigned short *_current_latency;
121 // Remember the next node
122 Node *_next_node;
124 // Use this for an unconditional branch delay slot
125 Node *_unconditional_delay_slot;
127 // Pointer to a Nop
128 MachNopNode *_nop;
130 // Length of the current bundle, in instructions
131 uint _bundle_instr_count;
133 // Current Cycle number, for computing latencies and bundling
134 uint _bundle_cycle_number;
136 // Bundle information
137 Pipeline_Use_Element _bundle_use_elements[resource_count];
138 Pipeline_Use _bundle_use;
140 // Dump the available list
141 void dump_available() const;
143 public:
144 Scheduling(Arena *arena, Compile &compile);
146 // Destructor
147 NOT_PRODUCT( ~Scheduling(); )
149 // Step ahead "i" cycles
150 void step(uint i);
152 // Step ahead 1 cycle, and clear the bundle state (for example,
153 // at a branch target)
154 void step_and_clear();
156 Bundle* node_bundling(const Node *n) {
157 assert(valid_bundle_info(n), "oob");
158 return (&_node_bundling_base[n->_idx]);
159 }
161 bool valid_bundle_info(const Node *n) const {
162 return (_node_bundling_limit > n->_idx);
163 }
165 bool starts_bundle(const Node *n) const {
166 return (_node_bundling_limit > n->_idx && _node_bundling_base[n->_idx].starts_bundle());
167 }
169 // Do the scheduling
170 void DoScheduling();
172 // Compute the local latencies walking forward over the list of
173 // nodes for a basic block
174 void ComputeLocalLatenciesForward(const Block *bb);
176 // Compute the register antidependencies within a basic block
177 void ComputeRegisterAntidependencies(Block *bb);
178 void verify_do_def( Node *n, OptoReg::Name def, const char *msg );
179 void verify_good_schedule( Block *b, const char *msg );
180 void anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def );
181 void anti_do_use( Block *b, Node *use, OptoReg::Name use_reg );
183 // Add a node to the current bundle
184 void AddNodeToBundle(Node *n, const Block *bb);
186 // Add a node to the list of available nodes
187 void AddNodeToAvailableList(Node *n);
189 // Compute the local use count for the nodes in a block, and compute
190 // the list of instructions with no uses in the block as available
191 void ComputeUseCount(const Block *bb);
193 // Choose an instruction from the available list to add to the bundle
194 Node * ChooseNodeToBundle();
196 // See if this Node fits into the currently accumulating bundle
197 bool NodeFitsInBundle(Node *n);
199 // Decrement the use count for a node
200 void DecrementUseCounts(Node *n, const Block *bb);
202 // Garbage collect pinch nodes for reuse by other blocks.
203 void garbage_collect_pinch_nodes();
204 // Clean up a pinch node for reuse (helper for above).
205 void cleanup_pinch( Node *pinch );
207 // Information for statistics gathering
208 #ifndef PRODUCT
209 private:
210 // Gather information on size of nops relative to total
211 uint _branches, _unconditional_delays;
213 static uint _total_nop_size, _total_method_size;
214 static uint _total_branches, _total_unconditional_delays;
215 static uint _total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
217 public:
218 static void print_statistics();
220 static void increment_instructions_per_bundle(uint i) {
221 _total_instructions_per_bundle[i]++;
222 }
224 static void increment_nop_size(uint s) {
225 _total_nop_size += s;
226 }
228 static void increment_method_size(uint s) {
229 _total_method_size += s;
230 }
231 #endif
233 };
235 #endif // SHARE_VM_OPTO_OUTPUT_HPP