src/share/vm/opto/output.hpp

Wed, 03 Jun 2015 14:22:57 +0200

author
roland
date
Wed, 03 Jun 2015 14:22:57 +0200
changeset 7859
c1c199dde5c9
parent 7598
ddce0b7cee93
child 7994
04ff2f6cd0eb
permissions
-rw-r--r--

8077504: Unsafe load can loose control dependency and cause crash
Summary: Node::depends_only_on_test() should return false for Unsafe loads
Reviewed-by: kvn, adinn

     1 /*
     2  * Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_OPTO_OUTPUT_HPP
    26 #define SHARE_VM_OPTO_OUTPUT_HPP
    28 #include "opto/block.hpp"
    29 #include "opto/node.hpp"
    30 #if defined AD_MD_HPP
    31 # include AD_MD_HPP
    32 #elif defined TARGET_ARCH_MODEL_x86_32
    33 # include "adfiles/ad_x86_32.hpp"
    34 #elif defined TARGET_ARCH_MODEL_x86_64
    35 # include "adfiles/ad_x86_64.hpp"
    36 #elif defined TARGET_ARCH_MODEL_sparc
    37 # include "adfiles/ad_sparc.hpp"
    38 #elif defined TARGET_ARCH_MODEL_zero
    39 # include "adfiles/ad_zero.hpp"
    40 #elif defined TARGET_ARCH_MODEL_ppc_64
    41 # include "adfiles/ad_ppc_64.hpp"
    42 #endif
    44 class Arena;
    45 class Bundle;
    46 class Block;
    47 class Block_Array;
    48 class Node;
    49 class Node_Array;
    50 class Node_List;
    51 class PhaseCFG;
    52 class PhaseChaitin;
    53 class Pipeline_Use_Element;
    54 class Pipeline_Use;
    56 #ifndef PRODUCT
    57 #define DEBUG_ARG(x) , x
    58 #else
    59 #define DEBUG_ARG(x)
    60 #endif
    62 // Define the initial sizes for allocation of the resizable code buffer
    63 enum {
    64   initial_code_capacity  =  16 * 1024,
    65   initial_stub_capacity  =   4 * 1024,
    66   initial_const_capacity =   4 * 1024,
    67   initial_locs_capacity  =   3 * 1024
    68 };
    70 //------------------------------Scheduling----------------------------------
    71 // This class contains all the information necessary to implement instruction
    72 // scheduling and bundling.
    73 class Scheduling {
    75 private:
    76   // Arena to use
    77   Arena *_arena;
    79   // Control-Flow Graph info
    80   PhaseCFG *_cfg;
    82   // Register Allocation info
    83   PhaseRegAlloc *_regalloc;
    85   // Number of nodes in the method
    86   uint _node_bundling_limit;
    88   // List of scheduled nodes. Generated in reverse order
    89   Node_List _scheduled;
    91   // List of nodes currently available for choosing for scheduling
    92   Node_List _available;
    94   // For each instruction beginning a bundle, the number of following
    95   // nodes to be bundled with it.
    96   Bundle *_node_bundling_base;
    98   // Mapping from register to Node
    99   Node_List _reg_node;
   101   // Free list for pinch nodes.
   102   Node_List _pinch_free_list;
   104   // Latency from the beginning of the containing basic block (base 1)
   105   // for each node.
   106   unsigned short *_node_latency;
   108   // Number of uses of this node within the containing basic block.
   109   short *_uses;
   111   // Schedulable portion of current block.  Skips Region/Phi/CreateEx up
   112   // front, branch+proj at end.  Also skips Catch/CProj (same as
   113   // branch-at-end), plus just-prior exception-throwing call.
   114   uint _bb_start, _bb_end;
   116   // Latency from the end of the basic block as scheduled
   117   unsigned short *_current_latency;
   119   // Remember the next node
   120   Node *_next_node;
   122   // Use this for an unconditional branch delay slot
   123   Node *_unconditional_delay_slot;
   125   // Pointer to a Nop
   126   MachNopNode *_nop;
   128   // Length of the current bundle, in instructions
   129   uint _bundle_instr_count;
   131   // Current Cycle number, for computing latencies and bundling
   132   uint _bundle_cycle_number;
   134   // Bundle information
   135   Pipeline_Use_Element _bundle_use_elements[resource_count];
   136   Pipeline_Use         _bundle_use;
   138   // Dump the available list
   139   void dump_available() const;
   141 public:
   142   Scheduling(Arena *arena, Compile &compile);
   144   // Destructor
   145   NOT_PRODUCT( ~Scheduling(); )
   147   // Step ahead "i" cycles
   148   void step(uint i);
   150   // Step ahead 1 cycle, and clear the bundle state (for example,
   151   // at a branch target)
   152   void step_and_clear();
   154   Bundle* node_bundling(const Node *n) {
   155     assert(valid_bundle_info(n), "oob");
   156     return (&_node_bundling_base[n->_idx]);
   157   }
   159   bool valid_bundle_info(const Node *n) const {
   160     return (_node_bundling_limit > n->_idx);
   161   }
   163   bool starts_bundle(const Node *n) const {
   164     return (_node_bundling_limit > n->_idx && _node_bundling_base[n->_idx].starts_bundle());
   165   }
   167   // Do the scheduling
   168   void DoScheduling();
   170   // Compute the local latencies walking forward over the list of
   171   // nodes for a basic block
   172   void ComputeLocalLatenciesForward(const Block *bb);
   174   // Compute the register antidependencies within a basic block
   175   void ComputeRegisterAntidependencies(Block *bb);
   176   void verify_do_def( Node *n, OptoReg::Name def, const char *msg );
   177   void verify_good_schedule( Block *b, const char *msg );
   178   void anti_do_def( Block *b, Node *def, OptoReg::Name def_reg, int is_def );
   179   void anti_do_use( Block *b, Node *use, OptoReg::Name use_reg );
   181   // Add a node to the current bundle
   182   void AddNodeToBundle(Node *n, const Block *bb);
   184   // Add a node to the list of available nodes
   185   void AddNodeToAvailableList(Node *n);
   187   // Compute the local use count for the nodes in a block, and compute
   188   // the list of instructions with no uses in the block as available
   189   void ComputeUseCount(const Block *bb);
   191   // Choose an instruction from the available list to add to the bundle
   192   Node * ChooseNodeToBundle();
   194   // See if this Node fits into the currently accumulating bundle
   195   bool NodeFitsInBundle(Node *n);
   197   // Decrement the use count for a node
   198  void DecrementUseCounts(Node *n, const Block *bb);
   200   // Garbage collect pinch nodes for reuse by other blocks.
   201   void garbage_collect_pinch_nodes();
   202   // Clean up a pinch node for reuse (helper for above).
   203   void cleanup_pinch( Node *pinch );
   205   // Information for statistics gathering
   206 #ifndef PRODUCT
   207 private:
   208   // Gather information on size of nops relative to total
   209   uint _branches, _unconditional_delays;
   211   static uint _total_nop_size, _total_method_size;
   212   static uint _total_branches, _total_unconditional_delays;
   213   static uint _total_instructions_per_bundle[Pipeline::_max_instrs_per_cycle+1];
   215 public:
   216   static void print_statistics();
   218   static void increment_instructions_per_bundle(uint i) {
   219     _total_instructions_per_bundle[i]++;
   220   }
   222   static void increment_nop_size(uint s) {
   223     _total_nop_size += s;
   224   }
   226   static void increment_method_size(uint s) {
   227     _total_method_size += s;
   228   }
   229 #endif
   231 };
   233 #endif // SHARE_VM_OPTO_OUTPUT_HPP

mercurial