src/share/vm/opto/output.hpp

Fri, 03 Dec 2010 01:34:31 -0800

author
twisti
date
Fri, 03 Dec 2010 01:34:31 -0800
changeset 2350
2f644f85485d
parent 2314
f95d63e2154a
child 2508
b92c45f2bc75
permissions
-rw-r--r--

6961690: load oops from constant table on SPARC
Summary: oops should be loaded from the constant table of an nmethod instead of materializing them with a long code sequence.
Reviewed-by: never, kvn

duke@435 1 /*
stefank@2314 2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_OPTO_OUTPUT_HPP
stefank@2314 26 #define SHARE_VM_OPTO_OUTPUT_HPP
stefank@2314 27
stefank@2314 28 #include "opto/block.hpp"
stefank@2314 29 #include "opto/node.hpp"
stefank@2314 30 #ifdef TARGET_ARCH_MODEL_x86_32
stefank@2314 31 # include "adfiles/ad_x86_32.hpp"
stefank@2314 32 #endif
stefank@2314 33 #ifdef TARGET_ARCH_MODEL_x86_64
stefank@2314 34 # include "adfiles/ad_x86_64.hpp"
stefank@2314 35 #endif
stefank@2314 36 #ifdef TARGET_ARCH_MODEL_sparc
stefank@2314 37 # include "adfiles/ad_sparc.hpp"
stefank@2314 38 #endif
stefank@2314 39 #ifdef TARGET_ARCH_MODEL_zero
stefank@2314 40 # include "adfiles/ad_zero.hpp"
stefank@2314 41 #endif
stefank@2314 42
duke@435 43 class Arena;
duke@435 44 class Bundle;
duke@435 45 class Block;
duke@435 46 class Block_Array;
duke@435 47 class Node;
duke@435 48 class Node_Array;
duke@435 49 class Node_List;
duke@435 50 class PhaseCFG;
duke@435 51 class PhaseChaitin;
duke@435 52 class Pipeline_Use_Element;
duke@435 53 class Pipeline_Use;
duke@435 54
duke@435 55 #ifndef PRODUCT
duke@435 56 #define DEBUG_ARG(x) , x
duke@435 57 #else
duke@435 58 #define DEBUG_ARG(x)
duke@435 59 #endif
duke@435 60
duke@435 61 // Define the initial sizes for allocation of the resizable code buffer
duke@435 62 enum {
duke@435 63 initial_code_capacity = 16 * 1024,
duke@435 64 initial_stub_capacity = 4 * 1024,
duke@435 65 initial_const_capacity = 4 * 1024,
duke@435 66 initial_locs_capacity = 3 * 1024
duke@435 67 };
duke@435 68
duke@435 69 //------------------------------Scheduling----------------------------------
duke@435 70 // This class contains all the information necessary to implement instruction
duke@435 71 // scheduling and bundling.
duke@435 72 class Scheduling {
duke@435 73
duke@435 74 private:
duke@435 75 // Arena to use
duke@435 76 Arena *_arena;
duke@435 77
duke@435 78 // Control-Flow Graph info
duke@435 79 PhaseCFG *_cfg;
duke@435 80
duke@435 81 // Register Allocation info
duke@435 82 PhaseRegAlloc *_regalloc;
duke@435 83
duke@435 84 // Number of nodes in the method
duke@435 85 uint _node_bundling_limit;
duke@435 86
duke@435 87 // List of scheduled nodes. Generated in reverse order
duke@435 88 Node_List _scheduled;
duke@435 89
duke@435 90 // List of nodes currently available for choosing for scheduling
duke@435 91 Node_List _available;
duke@435 92
duke@435 93 // Mapping from node (index) to basic block
duke@435 94 Block_Array& _bbs;
duke@435 95
duke@435 96 // For each instruction beginning a bundle, the number of following
duke@435 97 // nodes to be bundled with it.
duke@435 98 Bundle *_node_bundling_base;
duke@435 99
duke@435 100 // Mapping from register to Node
duke@435 101 Node_List _reg_node;
duke@435 102
duke@435 103 // Free list for pinch nodes.
duke@435 104 Node_List _pinch_free_list;
duke@435 105
duke@435 106 // Latency from the beginning of the containing basic block (base 1)
duke@435 107 // for each node.
duke@435 108 unsigned short *_node_latency;
duke@435 109
duke@435 110 // Number of uses of this node within the containing basic block.
duke@435 111 short *_uses;
duke@435 112
duke@435 113 // Schedulable portion of current block. Skips Region/Phi/CreateEx up
duke@435 114 // front, branch+proj at end. Also skips Catch/CProj (same as
duke@435 115 // branch-at-end), plus just-prior exception-throwing call.
duke@435 116 uint _bb_start, _bb_end;
duke@435 117
duke@435 118 // Latency from the end of the basic block as scheduled
duke@435 119 unsigned short *_current_latency;
duke@435 120
duke@435 121 // Remember the next node
duke@435 122 Node *_next_node;
duke@435 123
duke@435 124 // Use this for an unconditional branch delay slot
duke@435 125 Node *_unconditional_delay_slot;
duke@435 126
duke@435 127 // Pointer to a Nop
duke@435 128 MachNopNode *_nop;
duke@435 129
duke@435 130 // Length of the current bundle, in instructions
duke@435 131 uint _bundle_instr_count;
duke@435 132
duke@435 133 // Current Cycle number, for computing latencies and bundling
duke@435 134 uint _bundle_cycle_number;
duke@435 135
duke@435 136 // Bundle information
duke@435 137 Pipeline_Use_Element _bundle_use_elements[resource_count];
duke@435 138 Pipeline_Use _bundle_use;
duke@435 139
duke@435 140 // Dump the available list
duke@435 141 void dump_available() const;
duke@435 142
duke@435 143 public:
duke@435 144 Scheduling(Arena *arena, Compile &compile);
duke@435 145
duke@435 146 // Destructor
duke@435 147 NOT_PRODUCT( ~Scheduling(); )
duke@435 148
duke@435 149 // Step ahead "i" cycles
duke@435 150 void step(uint i);
duke@435 151
duke@435 152 // Step ahead 1 cycle, and clear the bundle state (for example,
duke@435 153 // at a branch target)
duke@435 154 void step_and_clear();
duke@435 155
duke@435 156 Bundle* node_bundling(const Node *n) {
duke@435 157 assert(valid_bundle_info(n), "oob");
duke@435 158 return (&_node_bundling_base[n->_idx]);
duke@435 159 }
duke@435 160
duke@435 161 bool valid_bundle_info(const Node *n) const {
duke@435 162 return (_node_bundling_limit > n->_idx);
duke@435 163 }
duke@435 164
duke@435 165 bool starts_bundle(const Node *n) const {
duke@435 166 return (_node_bundling_limit > n->_idx && _node_bundling_base[n->_idx].starts_bundle());
duke@435 167 }
duke@435 168
duke@435 169 // Do the scheduling
duke@435 170 void DoScheduling();
duke@435 171
duke@435 172 // Compute the local latencies walking forward over the list of
duke@435 173 // nodes for a basic block
duke@435 174 void ComputeLocalLatenciesForward(const Block *bb);
duke@435 175
duke@435 176 // Compute the register antidependencies within a basic block
duke@435 177 void ComputeRegisterAntidependencies(Block *bb);
duke@435 178 void verify_do_def( Node *n, OptoReg::Name def, const char *msg );
duke@435 179 void verify_good_schedule( Block *b, const char *msg );
duke@435 180 void anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def );
duke@435 181 void anti_do_use( Block *b, Node *use, OptoReg::Name use_reg );
duke@435 182
duke@435 183 // Add a node to the current bundle
duke@435 184 void AddNodeToBundle(Node *n, const Block *bb);
duke@435 185
duke@435 186 // Add a node to the list of available nodes
duke@435 187 void AddNodeToAvailableList(Node *n);
duke@435 188
duke@435 189 // Compute the local use count for the nodes in a block, and compute
duke@435 190 // the list of instructions with no uses in the block as available
duke@435 191 void ComputeUseCount(const Block *bb);
duke@435 192
duke@435 193 // Choose an instruction from the available list to add to the bundle
duke@435 194 Node * ChooseNodeToBundle();
duke@435 195
duke@435 196 // See if this Node fits into the currently accumulating bundle
duke@435 197 bool NodeFitsInBundle(Node *n);
duke@435 198
duke@435 199 // Decrement the use count for a node
duke@435 200 void DecrementUseCounts(Node *n, const Block *bb);
duke@435 201
duke@435 202 // Garbage collect pinch nodes for reuse by other blocks.
duke@435 203 void garbage_collect_pinch_nodes();
duke@435 204 // Clean up a pinch node for reuse (helper for above).
duke@435 205 void cleanup_pinch( Node *pinch );
duke@435 206
duke@435 207 // Information for statistics gathering
duke@435 208 #ifndef PRODUCT
duke@435 209 private:
duke@435 210 // Gather information on size of nops relative to total
duke@435 211 uint _branches, _unconditional_delays;
duke@435 212
duke@435 213 static uint _total_nop_size, _total_method_size;
duke@435 214 static uint _total_branches, _total_unconditional_delays;
duke@435 215 static uint _total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
duke@435 216
duke@435 217 public:
duke@435 218 static void print_statistics();
duke@435 219
duke@435 220 static void increment_instructions_per_bundle(uint i) {
duke@435 221 _total_instructions_per_bundle[i]++;
duke@435 222 }
duke@435 223
duke@435 224 static void increment_nop_size(uint s) {
duke@435 225 _total_nop_size += s;
duke@435 226 }
duke@435 227
duke@435 228 static void increment_method_size(uint s) {
duke@435 229 _total_method_size += s;
duke@435 230 }
duke@435 231 #endif
duke@435 232
duke@435 233 };
stefank@2314 234
stefank@2314 235 #endif // SHARE_VM_OPTO_OUTPUT_HPP

mercurial