duke@435: /* dlong@7598: * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: stefank@2314: #ifndef SHARE_VM_OPTO_OUTPUT_HPP stefank@2314: #define SHARE_VM_OPTO_OUTPUT_HPP stefank@2314: stefank@2314: #include "opto/block.hpp" stefank@2314: #include "opto/node.hpp" dlong@7598: #if defined AD_MD_HPP dlong@7598: # include AD_MD_HPP dlong@7598: #elif defined TARGET_ARCH_MODEL_x86_32 stefank@2314: # include "adfiles/ad_x86_32.hpp" dlong@7598: #elif defined TARGET_ARCH_MODEL_x86_64 stefank@2314: # include "adfiles/ad_x86_64.hpp" dlong@7598: #elif defined TARGET_ARCH_MODEL_sparc stefank@2314: # include "adfiles/ad_sparc.hpp" dlong@7598: #elif defined TARGET_ARCH_MODEL_zero stefank@2314: # include "adfiles/ad_zero.hpp" dlong@7598: #elif defined TARGET_ARCH_MODEL_ppc_64 goetz@6441: # include "adfiles/ad_ppc_64.hpp" bobv@2508: #endif stefank@2314: duke@435: class Arena; duke@435: class Bundle; duke@435: class Block; duke@435: class Block_Array; duke@435: class Node; duke@435: class Node_Array; duke@435: class Node_List; duke@435: class PhaseCFG; duke@435: class PhaseChaitin; duke@435: class Pipeline_Use_Element; duke@435: class Pipeline_Use; duke@435: duke@435: #ifndef PRODUCT duke@435: #define DEBUG_ARG(x) , x duke@435: #else duke@435: #define DEBUG_ARG(x) duke@435: #endif duke@435: duke@435: // Define the initial sizes for allocation of the resizable code buffer duke@435: enum { duke@435: initial_code_capacity = 16 * 1024, duke@435: initial_stub_capacity = 4 * 1024, duke@435: initial_const_capacity = 4 * 1024, duke@435: initial_locs_capacity = 3 * 1024 duke@435: }; duke@435: duke@435: //------------------------------Scheduling---------------------------------- duke@435: // This class contains all the information necessary to implement instruction duke@435: // scheduling and bundling. duke@435: class Scheduling { duke@435: duke@435: private: duke@435: // Arena to use duke@435: Arena *_arena; duke@435: duke@435: // Control-Flow Graph info duke@435: PhaseCFG *_cfg; duke@435: duke@435: // Register Allocation info duke@435: PhaseRegAlloc *_regalloc; duke@435: duke@435: // Number of nodes in the method duke@435: uint _node_bundling_limit; duke@435: duke@435: // List of scheduled nodes. Generated in reverse order duke@435: Node_List _scheduled; duke@435: duke@435: // List of nodes currently available for choosing for scheduling duke@435: Node_List _available; duke@435: duke@435: // For each instruction beginning a bundle, the number of following duke@435: // nodes to be bundled with it. duke@435: Bundle *_node_bundling_base; duke@435: duke@435: // Mapping from register to Node duke@435: Node_List _reg_node; duke@435: duke@435: // Free list for pinch nodes. duke@435: Node_List _pinch_free_list; duke@435: duke@435: // Latency from the beginning of the containing basic block (base 1) duke@435: // for each node. duke@435: unsigned short *_node_latency; duke@435: duke@435: // Number of uses of this node within the containing basic block. duke@435: short *_uses; duke@435: duke@435: // Schedulable portion of current block. Skips Region/Phi/CreateEx up duke@435: // front, branch+proj at end. Also skips Catch/CProj (same as duke@435: // branch-at-end), plus just-prior exception-throwing call. duke@435: uint _bb_start, _bb_end; duke@435: duke@435: // Latency from the end of the basic block as scheduled duke@435: unsigned short *_current_latency; duke@435: duke@435: // Remember the next node duke@435: Node *_next_node; duke@435: duke@435: // Use this for an unconditional branch delay slot duke@435: Node *_unconditional_delay_slot; duke@435: duke@435: // Pointer to a Nop duke@435: MachNopNode *_nop; duke@435: duke@435: // Length of the current bundle, in instructions duke@435: uint _bundle_instr_count; duke@435: duke@435: // Current Cycle number, for computing latencies and bundling duke@435: uint _bundle_cycle_number; duke@435: duke@435: // Bundle information duke@435: Pipeline_Use_Element _bundle_use_elements[resource_count]; duke@435: Pipeline_Use _bundle_use; duke@435: duke@435: // Dump the available list duke@435: void dump_available() const; duke@435: duke@435: public: duke@435: Scheduling(Arena *arena, Compile &compile); duke@435: duke@435: // Destructor duke@435: NOT_PRODUCT( ~Scheduling(); ) duke@435: duke@435: // Step ahead "i" cycles duke@435: void step(uint i); duke@435: duke@435: // Step ahead 1 cycle, and clear the bundle state (for example, duke@435: // at a branch target) duke@435: void step_and_clear(); duke@435: duke@435: Bundle* node_bundling(const Node *n) { duke@435: assert(valid_bundle_info(n), "oob"); duke@435: return (&_node_bundling_base[n->_idx]); duke@435: } duke@435: duke@435: bool valid_bundle_info(const Node *n) const { duke@435: return (_node_bundling_limit > n->_idx); duke@435: } duke@435: duke@435: bool starts_bundle(const Node *n) const { duke@435: return (_node_bundling_limit > n->_idx && _node_bundling_base[n->_idx].starts_bundle()); duke@435: } duke@435: duke@435: // Do the scheduling duke@435: void DoScheduling(); duke@435: duke@435: // Compute the local latencies walking forward over the list of duke@435: // nodes for a basic block duke@435: void ComputeLocalLatenciesForward(const Block *bb); duke@435: duke@435: // Compute the register antidependencies within a basic block duke@435: void ComputeRegisterAntidependencies(Block *bb); duke@435: void verify_do_def( Node *n, OptoReg::Name def, const char *msg ); duke@435: void verify_good_schedule( Block *b, const char *msg ); duke@435: void anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def ); duke@435: void anti_do_use( Block *b, Node *use, OptoReg::Name use_reg ); duke@435: duke@435: // Add a node to the current bundle duke@435: void AddNodeToBundle(Node *n, const Block *bb); duke@435: duke@435: // Add a node to the list of available nodes duke@435: void AddNodeToAvailableList(Node *n); duke@435: duke@435: // Compute the local use count for the nodes in a block, and compute duke@435: // the list of instructions with no uses in the block as available duke@435: void ComputeUseCount(const Block *bb); duke@435: duke@435: // Choose an instruction from the available list to add to the bundle duke@435: Node * ChooseNodeToBundle(); duke@435: duke@435: // See if this Node fits into the currently accumulating bundle duke@435: bool NodeFitsInBundle(Node *n); duke@435: duke@435: // Decrement the use count for a node duke@435: void DecrementUseCounts(Node *n, const Block *bb); duke@435: duke@435: // Garbage collect pinch nodes for reuse by other blocks. duke@435: void garbage_collect_pinch_nodes(); duke@435: // Clean up a pinch node for reuse (helper for above). duke@435: void cleanup_pinch( Node *pinch ); duke@435: duke@435: // Information for statistics gathering duke@435: #ifndef PRODUCT duke@435: private: duke@435: // Gather information on size of nops relative to total duke@435: uint _branches, _unconditional_delays; duke@435: duke@435: static uint _total_nop_size, _total_method_size; duke@435: static uint _total_branches, _total_unconditional_delays; duke@435: static uint _total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1]; duke@435: duke@435: public: duke@435: static void print_statistics(); duke@435: duke@435: static void increment_instructions_per_bundle(uint i) { duke@435: _total_instructions_per_bundle[i]++; duke@435: } duke@435: duke@435: static void increment_nop_size(uint s) { duke@435: _total_nop_size += s; duke@435: } duke@435: duke@435: static void increment_method_size(uint s) { duke@435: _total_method_size += s; duke@435: } duke@435: #endif duke@435: duke@435: }; stefank@2314: stefank@2314: #endif // SHARE_VM_OPTO_OUTPUT_HPP