src/share/vm/opto/compile.hpp

Thu, 19 Aug 2010 14:51:47 -0700

author
never
date
Thu, 19 Aug 2010 14:51:47 -0700
changeset 2085
f55c4f82ab9d
parent 1989
60a14ad85270
child 2314
f95d63e2154a
permissions
-rw-r--r--

6978249: spill between cpu and fpu registers when those moves are fast
Reviewed-by: kvn

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 class Block;
    26 class Bundle;
    27 class C2Compiler;
    28 class CallGenerator;
    29 class ConnectionGraph;
    30 class InlineTree;
    31 class Int_Array;
    32 class Matcher;
    33 class MachNode;
    34 class MachSafePointNode;
    35 class Node;
    36 class Node_Array;
    37 class Node_Notes;
    38 class OptoReg;
    39 class PhaseCFG;
    40 class PhaseGVN;
    41 class PhaseIterGVN;
    42 class PhaseRegAlloc;
    43 class PhaseCCP;
    44 class PhaseCCP_DCE;
    45 class RootNode;
    46 class relocInfo;
    47 class Scope;
    48 class StartNode;
    49 class SafePointNode;
    50 class JVMState;
    51 class TypeData;
    52 class TypePtr;
    53 class TypeFunc;
    54 class Unique_Node_List;
    55 class nmethod;
    56 class WarmCallInfo;
    58 //------------------------------Compile----------------------------------------
    59 // This class defines a top-level Compiler invocation.
    61 class Compile : public Phase {
    62  public:
    63   // Fixed alias indexes.  (See also MergeMemNode.)
    64   enum {
    65     AliasIdxTop = 1,  // pseudo-index, aliases to nothing (used as sentinel value)
    66     AliasIdxBot = 2,  // pseudo-index, aliases to everything
    67     AliasIdxRaw = 3   // hard-wired index for TypeRawPtr::BOTTOM
    68   };
    70   // Variant of TraceTime(NULL, &_t_accumulator, TimeCompiler);
    71   // Integrated with logging.  If logging is turned on, and dolog is true,
    72   // then brackets are put into the log, with time stamps and node counts.
    73   // (The time collection itself is always conditionalized on TimeCompiler.)
    74   class TracePhase : public TraceTime {
    75    private:
    76     Compile*    C;
    77     CompileLog* _log;
    78    public:
    79     TracePhase(const char* name, elapsedTimer* accumulator, bool dolog);
    80     ~TracePhase();
    81   };
    83   // Information per category of alias (memory slice)
    84   class AliasType {
    85    private:
    86     friend class Compile;
    88     int             _index;         // unique index, used with MergeMemNode
    89     const TypePtr*  _adr_type;      // normalized address type
    90     ciField*        _field;         // relevant instance field, or null if none
    91     bool            _is_rewritable; // false if the memory is write-once only
    92     int             _general_index; // if this is type is an instance, the general
    93                                     // type that this is an instance of
    95     void Init(int i, const TypePtr* at);
    97    public:
    98     int             index()         const { return _index; }
    99     const TypePtr*  adr_type()      const { return _adr_type; }
   100     ciField*        field()         const { return _field; }
   101     bool            is_rewritable() const { return _is_rewritable; }
   102     bool            is_volatile()   const { return (_field ? _field->is_volatile() : false); }
   103     int             general_index() const { return (_general_index != 0) ? _general_index : _index; }
   105     void set_rewritable(bool z) { _is_rewritable = z; }
   106     void set_field(ciField* f) {
   107       assert(!_field,"");
   108       _field = f;
   109       if (f->is_final())  _is_rewritable = false;
   110     }
   112     void print_on(outputStream* st) PRODUCT_RETURN;
   113   };
   115   enum {
   116     logAliasCacheSize = 6,
   117     AliasCacheSize = (1<<logAliasCacheSize)
   118   };
   119   struct AliasCacheEntry { const TypePtr* _adr_type; int _index; };  // simple duple type
   120   enum {
   121     trapHistLength = methodDataOopDesc::_trap_hist_limit
   122   };
   124  private:
   125   // Fixed parameters to this compilation.
   126   const int             _compile_id;
   127   const bool            _save_argument_registers; // save/restore arg regs for trampolines
   128   const bool            _subsume_loads;         // Load can be matched as part of a larger op.
   129   const bool            _do_escape_analysis;    // Do escape analysis.
   130   ciMethod*             _method;                // The method being compiled.
   131   int                   _entry_bci;             // entry bci for osr methods.
   132   const TypeFunc*       _tf;                    // My kind of signature
   133   InlineTree*           _ilt;                   // Ditto (temporary).
   134   address               _stub_function;         // VM entry for stub being compiled, or NULL
   135   const char*           _stub_name;             // Name of stub or adapter being compiled, or NULL
   136   address               _stub_entry_point;      // Compile code entry for generated stub, or NULL
   138   // Control of this compilation.
   139   int                   _num_loop_opts;         // Number of iterations for doing loop optimiztions
   140   int                   _max_inline_size;       // Max inline size for this compilation
   141   int                   _freq_inline_size;      // Max hot method inline size for this compilation
   142   int                   _fixed_slots;           // count of frame slots not allocated by the register
   143                                                 // allocator i.e. locks, original deopt pc, etc.
   144   // For deopt
   145   int                   _orig_pc_slot;
   146   int                   _orig_pc_slot_offset_in_bytes;
   148   int                   _major_progress;        // Count of something big happening
   149   bool                  _has_loops;             // True if the method _may_ have some loops
   150   bool                  _has_split_ifs;         // True if the method _may_ have some split-if
   151   bool                  _has_unsafe_access;     // True if the method _may_ produce faults in unsafe loads or stores.
   152   bool                  _has_stringbuilder;     // True StringBuffers or StringBuilders are allocated
   153   uint                  _trap_hist[trapHistLength];  // Cumulative traps
   154   bool                  _trap_can_recompile;    // Have we emitted a recompiling trap?
   155   uint                  _decompile_count;       // Cumulative decompilation counts.
   156   bool                  _do_inlining;           // True if we intend to do inlining
   157   bool                  _do_scheduling;         // True if we intend to do scheduling
   158   bool                  _do_freq_based_layout;  // True if we intend to do frequency based block layout
   159   bool                  _do_count_invocations;  // True if we generate code to count invocations
   160   bool                  _do_method_data_update; // True if we generate code to update methodDataOops
   161   int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
   162   bool                  _print_assembly;        // True if we should dump assembly code for this compilation
   163 #ifndef PRODUCT
   164   bool                  _trace_opto_output;
   165   bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
   166 #endif
   168   // JSR 292
   169   bool                  _has_method_handle_invokes; // True if this method has MethodHandle invokes.
   171   // Compilation environment.
   172   Arena                 _comp_arena;            // Arena with lifetime equivalent to Compile
   173   ciEnv*                _env;                   // CI interface
   174   CompileLog*           _log;                   // from CompilerThread
   175   const char*           _failure_reason;        // for record_failure/failing pattern
   176   GrowableArray<CallGenerator*>* _intrinsics;   // List of intrinsics.
   177   GrowableArray<Node*>* _macro_nodes;           // List of nodes which need to be expanded before matching.
   178   GrowableArray<Node*>* _predicate_opaqs;       // List of Opaque1 nodes for the loop predicates.
   179   ConnectionGraph*      _congraph;
   180 #ifndef PRODUCT
   181   IdealGraphPrinter*    _printer;
   182 #endif
   184   // Node management
   185   uint                  _unique;                // Counter for unique Node indices
   186   debug_only(static int _debug_idx;)            // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
   187   Arena                 _node_arena;            // Arena for new-space Nodes
   188   Arena                 _old_arena;             // Arena for old-space Nodes, lifetime during xform
   189   RootNode*             _root;                  // Unique root of compilation, or NULL after bail-out.
   190   Node*                 _top;                   // Unique top node.  (Reset by various phases.)
   192   Node*                 _immutable_memory;      // Initial memory state
   194   Node*                 _recent_alloc_obj;
   195   Node*                 _recent_alloc_ctl;
   197   // Blocked array of debugging and profiling information,
   198   // tracked per node.
   199   enum { _log2_node_notes_block_size = 8,
   200          _node_notes_block_size = (1<<_log2_node_notes_block_size)
   201   };
   202   GrowableArray<Node_Notes*>* _node_note_array;
   203   Node_Notes*           _default_node_notes;  // default notes for new nodes
   205   // After parsing and every bulk phase we hang onto the Root instruction.
   206   // The RootNode instruction is where the whole program begins.  It produces
   207   // the initial Control and BOTTOM for everybody else.
   209   // Type management
   210   Arena                 _Compile_types;         // Arena for all types
   211   Arena*                _type_arena;            // Alias for _Compile_types except in Initialize_shared()
   212   Dict*                 _type_dict;             // Intern table
   213   void*                 _type_hwm;              // Last allocation (see Type::operator new/delete)
   214   size_t                _type_last_size;        // Last allocation size (see Type::operator new/delete)
   215   ciMethod*             _last_tf_m;             // Cache for
   216   const TypeFunc*       _last_tf;               //  TypeFunc::make
   217   AliasType**           _alias_types;           // List of alias types seen so far.
   218   int                   _num_alias_types;       // Logical length of _alias_types
   219   int                   _max_alias_types;       // Physical length of _alias_types
   220   AliasCacheEntry       _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
   222   // Parsing, optimization
   223   PhaseGVN*             _initial_gvn;           // Results of parse-time PhaseGVN
   224   Unique_Node_List*     _for_igvn;              // Initial work-list for next round of Iterative GVN
   225   WarmCallInfo*         _warm_calls;            // Sorted work-list for heat-based inlining.
   227   GrowableArray<CallGenerator*> _late_inlines;  // List of CallGenerators to be revisited after
   228                                                 // main parsing has finished.
   230   // Matching, CFG layout, allocation, code generation
   231   PhaseCFG*             _cfg;                   // Results of CFG finding
   232   bool                  _select_24_bit_instr;   // We selected an instruction with a 24-bit result
   233   bool                  _in_24_bit_fp_mode;     // We are emitting instructions with 24-bit results
   234   int                   _java_calls;            // Number of java calls in the method
   235   int                   _inner_loops;           // Number of inner loops in the method
   236   Matcher*              _matcher;               // Engine to map ideal to machine instructions
   237   PhaseRegAlloc*        _regalloc;              // Results of register allocation.
   238   int                   _frame_slots;           // Size of total frame in stack slots
   239   CodeOffsets           _code_offsets;          // Offsets into the code for various interesting entries
   240   RegMask               _FIRST_STACK_mask;      // All stack slots usable for spills (depends on frame layout)
   241   Arena*                _indexSet_arena;        // control IndexSet allocation within PhaseChaitin
   242   void*                 _indexSet_free_block_list; // free list of IndexSet bit blocks
   244   uint                  _node_bundling_limit;
   245   Bundle*               _node_bundling_base;    // Information for instruction bundling
   247   // Instruction bits passed off to the VM
   248   int                   _method_size;           // Size of nmethod code segment in bytes
   249   CodeBuffer            _code_buffer;           // Where the code is assembled
   250   int                   _first_block_size;      // Size of unvalidated entry point code / OSR poison code
   251   ExceptionHandlerTable _handler_table;         // Table of native-code exception handlers
   252   ImplicitExceptionTable _inc_table;            // Table of implicit null checks in native code
   253   OopMapSet*            _oop_map_set;           // Table of oop maps (one for each safepoint location)
   254   static int            _CompiledZap_count;     // counter compared against CompileZap[First/Last]
   255   BufferBlob*           _scratch_buffer_blob;   // For temporary code buffers.
   256   relocInfo*            _scratch_locs_memory;   // For temporary code buffers.
   258  public:
   259   // Accessors
   261   // The Compile instance currently active in this (compiler) thread.
   262   static Compile* current() {
   263     return (Compile*) ciEnv::current()->compiler_data();
   264   }
   266   // ID for this compilation.  Useful for setting breakpoints in the debugger.
   267   int               compile_id() const          { return _compile_id; }
   269   // Does this compilation allow instructions to subsume loads?  User
   270   // instructions that subsume a load may result in an unschedulable
   271   // instruction sequence.
   272   bool              subsume_loads() const       { return _subsume_loads; }
   273   // Do escape analysis.
   274   bool              do_escape_analysis() const  { return _do_escape_analysis; }
   275   bool              save_argument_registers() const { return _save_argument_registers; }
   278   // Other fixed compilation parameters.
   279   ciMethod*         method() const              { return _method; }
   280   int               entry_bci() const           { return _entry_bci; }
   281   bool              is_osr_compilation() const  { return _entry_bci != InvocationEntryBci; }
   282   bool              is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
   283   const TypeFunc*   tf() const                  { assert(_tf!=NULL, ""); return _tf; }
   284   void         init_tf(const TypeFunc* tf)      { assert(_tf==NULL, ""); _tf = tf; }
   285   InlineTree*       ilt() const                 { return _ilt; }
   286   address           stub_function() const       { return _stub_function; }
   287   const char*       stub_name() const           { return _stub_name; }
   288   address           stub_entry_point() const    { return _stub_entry_point; }
   290   // Control of this compilation.
   291   int               fixed_slots() const         { assert(_fixed_slots >= 0, "");         return _fixed_slots; }
   292   void          set_fixed_slots(int n)          { _fixed_slots = n; }
   293   int               major_progress() const      { return _major_progress; }
   294   void          set_major_progress()            { _major_progress++; }
   295   void        clear_major_progress()            { _major_progress = 0; }
   296   int               num_loop_opts() const       { return _num_loop_opts; }
   297   void          set_num_loop_opts(int n)        { _num_loop_opts = n; }
   298   int               max_inline_size() const     { return _max_inline_size; }
   299   void          set_freq_inline_size(int n)     { _freq_inline_size = n; }
   300   int               freq_inline_size() const    { return _freq_inline_size; }
   301   void          set_max_inline_size(int n)      { _max_inline_size = n; }
   302   bool              has_loops() const           { return _has_loops; }
   303   void          set_has_loops(bool z)           { _has_loops = z; }
   304   bool              has_split_ifs() const       { return _has_split_ifs; }
   305   void          set_has_split_ifs(bool z)       { _has_split_ifs = z; }
   306   bool              has_unsafe_access() const   { return _has_unsafe_access; }
   307   void          set_has_unsafe_access(bool z)   { _has_unsafe_access = z; }
   308   bool              has_stringbuilder() const   { return _has_stringbuilder; }
   309   void          set_has_stringbuilder(bool z)   { _has_stringbuilder = z; }
   310   void          set_trap_count(uint r, uint c)  { assert(r < trapHistLength, "oob");        _trap_hist[r] = c; }
   311   uint              trap_count(uint r) const    { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
   312   bool              trap_can_recompile() const  { return _trap_can_recompile; }
   313   void          set_trap_can_recompile(bool z)  { _trap_can_recompile = z; }
   314   uint              decompile_count() const     { return _decompile_count; }
   315   void          set_decompile_count(uint c)     { _decompile_count = c; }
   316   bool              allow_range_check_smearing() const;
   317   bool              do_inlining() const         { return _do_inlining; }
   318   void          set_do_inlining(bool z)         { _do_inlining = z; }
   319   bool              do_scheduling() const       { return _do_scheduling; }
   320   void          set_do_scheduling(bool z)       { _do_scheduling = z; }
   321   bool              do_freq_based_layout() const{ return _do_freq_based_layout; }
   322   void          set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
   323   bool              do_count_invocations() const{ return _do_count_invocations; }
   324   void          set_do_count_invocations(bool z){ _do_count_invocations = z; }
   325   bool              do_method_data_update() const { return _do_method_data_update; }
   326   void          set_do_method_data_update(bool z) { _do_method_data_update = z; }
   327   int               AliasLevel() const          { return _AliasLevel; }
   328   bool              print_assembly() const       { return _print_assembly; }
   329   void          set_print_assembly(bool z)       { _print_assembly = z; }
   330   // check the CompilerOracle for special behaviours for this compile
   331   bool          method_has_option(const char * option) {
   332     return method() != NULL && method()->has_option(option);
   333   }
   334 #ifndef PRODUCT
   335   bool          trace_opto_output() const       { return _trace_opto_output; }
   336   bool              parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
   337   void          set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
   338 #endif
   340   // JSR 292
   341   bool              has_method_handle_invokes() const { return _has_method_handle_invokes;     }
   342   void          set_has_method_handle_invokes(bool z) {        _has_method_handle_invokes = z; }
   344   void begin_method() {
   345 #ifndef PRODUCT
   346     if (_printer) _printer->begin_method(this);
   347 #endif
   348   }
   349   void print_method(const char * name, int level = 1) {
   350 #ifndef PRODUCT
   351     if (_printer) _printer->print_method(this, name, level);
   352 #endif
   353   }
   354   void end_method() {
   355 #ifndef PRODUCT
   356     if (_printer) _printer->end_method();
   357 #endif
   358   }
   360   int           macro_count()                   { return _macro_nodes->length(); }
   361   int           predicate_count()               { return _predicate_opaqs->length();}
   362   Node*         macro_node(int idx)             { return _macro_nodes->at(idx); }
   363   Node*         predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
   364   ConnectionGraph* congraph()                   { return _congraph;}
   365   void set_congraph(ConnectionGraph* congraph)  { _congraph = congraph;}
   366   void add_macro_node(Node * n) {
   367     //assert(n->is_macro(), "must be a macro node");
   368     assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
   369     _macro_nodes->append(n);
   370   }
   371   void remove_macro_node(Node * n) {
   372     // this function may be called twice for a node so check
   373     // that the node is in the array before attempting to remove it
   374     if (_macro_nodes->contains(n))
   375       _macro_nodes->remove(n);
   376     // remove from _predicate_opaqs list also if it is there
   377     if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
   378       _predicate_opaqs->remove(n);
   379     }
   380   }
   381   void add_predicate_opaq(Node * n) {
   382     assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1");
   383     assert(_macro_nodes->contains(n), "should have already been in macro list");
   384     _predicate_opaqs->append(n);
   385   }
   386   // remove the opaque nodes that protect the predicates so that the unused checks and
   387   // uncommon traps will be eliminated from the graph.
   388   void cleanup_loop_predicates(PhaseIterGVN &igvn);
   390   // Compilation environment.
   391   Arena*            comp_arena()                { return &_comp_arena; }
   392   ciEnv*            env() const                 { return _env; }
   393   CompileLog*       log() const                 { return _log; }
   394   bool              failing() const             { return _env->failing() || _failure_reason != NULL; }
   395   const char* failure_reason() { return _failure_reason; }
   396   bool              failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); }
   398   void record_failure(const char* reason);
   399   void record_method_not_compilable(const char* reason, bool all_tiers = false) {
   400     // All bailouts cover "all_tiers" when TieredCompilation is off.
   401     if (!TieredCompilation) all_tiers = true;
   402     env()->record_method_not_compilable(reason, all_tiers);
   403     // Record failure reason.
   404     record_failure(reason);
   405   }
   406   void record_method_not_compilable_all_tiers(const char* reason) {
   407     record_method_not_compilable(reason, true);
   408   }
   409   bool check_node_count(uint margin, const char* reason) {
   410     if (unique() + margin > (uint)MaxNodeLimit) {
   411       record_method_not_compilable(reason);
   412       return true;
   413     } else {
   414       return false;
   415     }
   416   }
   418   // Node management
   419   uint              unique() const              { return _unique; }
   420   uint         next_unique()                    { return _unique++; }
   421   void          set_unique(uint i)              { _unique = i; }
   422   static int        debug_idx()                 { return debug_only(_debug_idx)+0; }
   423   static void   set_debug_idx(int i)            { debug_only(_debug_idx = i); }
   424   Arena*            node_arena()                { return &_node_arena; }
   425   Arena*            old_arena()                 { return &_old_arena; }
   426   RootNode*         root() const                { return _root; }
   427   void          set_root(RootNode* r)           { _root = r; }
   428   StartNode*        start() const;              // (Derived from root.)
   429   void         init_start(StartNode* s);
   430   Node*             immutable_memory();
   432   Node*             recent_alloc_ctl() const    { return _recent_alloc_ctl; }
   433   Node*             recent_alloc_obj() const    { return _recent_alloc_obj; }
   434   void          set_recent_alloc(Node* ctl, Node* obj) {
   435                                                   _recent_alloc_ctl = ctl;
   436                                                   _recent_alloc_obj = obj;
   437                                                 }
   439   // Handy undefined Node
   440   Node*             top() const                 { return _top; }
   442   // these are used by guys who need to know about creation and transformation of top:
   443   Node*             cached_top_node()           { return _top; }
   444   void          set_cached_top_node(Node* tn);
   446   GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; }
   447   void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; }
   448   Node_Notes* default_node_notes() const        { return _default_node_notes; }
   449   void    set_default_node_notes(Node_Notes* n) { _default_node_notes = n; }
   451   Node_Notes*       node_notes_at(int idx) {
   452     return locate_node_notes(_node_note_array, idx, false);
   453   }
   454   inline bool   set_node_notes_at(int idx, Node_Notes* value);
   456   // Copy notes from source to dest, if they exist.
   457   // Overwrite dest only if source provides something.
   458   // Return true if information was moved.
   459   bool copy_node_notes_to(Node* dest, Node* source);
   461   // Workhorse function to sort out the blocked Node_Notes array:
   462   inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr,
   463                                        int idx, bool can_grow = false);
   465   void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by);
   467   // Type management
   468   Arena*            type_arena()                { return _type_arena; }
   469   Dict*             type_dict()                 { return _type_dict; }
   470   void*             type_hwm()                  { return _type_hwm; }
   471   size_t            type_last_size()            { return _type_last_size; }
   472   int               num_alias_types()           { return _num_alias_types; }
   474   void          init_type_arena()                       { _type_arena = &_Compile_types; }
   475   void          set_type_arena(Arena* a)                { _type_arena = a; }
   476   void          set_type_dict(Dict* d)                  { _type_dict = d; }
   477   void          set_type_hwm(void* p)                   { _type_hwm = p; }
   478   void          set_type_last_size(size_t sz)           { _type_last_size = sz; }
   480   const TypeFunc* last_tf(ciMethod* m) {
   481     return (m == _last_tf_m) ? _last_tf : NULL;
   482   }
   483   void set_last_tf(ciMethod* m, const TypeFunc* tf) {
   484     assert(m != NULL || tf == NULL, "");
   485     _last_tf_m = m;
   486     _last_tf = tf;
   487   }
   489   AliasType*        alias_type(int                idx)  { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
   490   AliasType*        alias_type(const TypePtr* adr_type) { return find_alias_type(adr_type, false); }
   491   bool         have_alias_type(const TypePtr* adr_type);
   492   AliasType*        alias_type(ciField*         field);
   494   int               get_alias_index(const TypePtr* at)  { return alias_type(at)->index(); }
   495   const TypePtr*    get_adr_type(uint aidx)             { return alias_type(aidx)->adr_type(); }
   496   int               get_general_index(uint aidx)        { return alias_type(aidx)->general_index(); }
   498   // Building nodes
   499   void              rethrow_exceptions(JVMState* jvms);
   500   void              return_values(JVMState* jvms);
   501   JVMState*         build_start_state(StartNode* start, const TypeFunc* tf);
   503   // Decide how to build a call.
   504   // The profile factor is a discount to apply to this site's interp. profile.
   505   CallGenerator*    call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor);
   506   bool should_delay_inlining(ciMethod* call_method, JVMState* jvms);
   508   // Report if there were too many traps at a current method and bci.
   509   // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
   510   // If there is no MDO at all, report no trap unless told to assume it.
   511   bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
   512   // This version, unspecific to a particular bci, asks if
   513   // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
   514   bool too_many_traps(Deoptimization::DeoptReason reason,
   515                       // Privately used parameter for logging:
   516                       ciMethodData* logmd = NULL);
   517   // Report if there were too many recompiles at a method and bci.
   518   bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
   520   // Parsing, optimization
   521   PhaseGVN*         initial_gvn()               { return _initial_gvn; }
   522   Unique_Node_List* for_igvn()                  { return _for_igvn; }
   523   inline void       record_for_igvn(Node* n);   // Body is after class Unique_Node_List.
   524   void          set_initial_gvn(PhaseGVN *gvn)           { _initial_gvn = gvn; }
   525   void          set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
   527   // Replace n by nn using initial_gvn, calling hash_delete and
   528   // record_for_igvn as needed.
   529   void gvn_replace_by(Node* n, Node* nn);
   532   void              identify_useful_nodes(Unique_Node_List &useful);
   533   void              remove_useless_nodes  (Unique_Node_List &useful);
   535   WarmCallInfo*     warm_calls() const          { return _warm_calls; }
   536   void          set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
   537   WarmCallInfo* pop_warm_call();
   539   // Record this CallGenerator for inlining at the end of parsing.
   540   void              add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }
   542   // Matching, CFG layout, allocation, code generation
   543   PhaseCFG*         cfg()                       { return _cfg; }
   544   bool              select_24_bit_instr() const { return _select_24_bit_instr; }
   545   bool              in_24_bit_fp_mode() const   { return _in_24_bit_fp_mode; }
   546   bool              has_java_calls() const      { return _java_calls > 0; }
   547   int               java_calls() const          { return _java_calls; }
   548   int               inner_loops() const         { return _inner_loops; }
   549   Matcher*          matcher()                   { return _matcher; }
   550   PhaseRegAlloc*    regalloc()                  { return _regalloc; }
   551   int               frame_slots() const         { return _frame_slots; }
   552   int               frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
   553   RegMask&          FIRST_STACK_mask()          { return _FIRST_STACK_mask; }
   554   Arena*            indexSet_arena()            { return _indexSet_arena; }
   555   void*             indexSet_free_block_list()  { return _indexSet_free_block_list; }
   556   uint              node_bundling_limit()       { return _node_bundling_limit; }
   557   Bundle*           node_bundling_base()        { return _node_bundling_base; }
   558   void          set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
   559   void          set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
   560   bool          starts_bundle(const Node *n) const;
   561   bool          need_stack_bang(int frame_size_in_bytes) const;
   562   bool          need_register_stack_bang() const;
   564   void          set_matcher(Matcher* m)                 { _matcher = m; }
   565 //void          set_regalloc(PhaseRegAlloc* ra)           { _regalloc = ra; }
   566   void          set_indexSet_arena(Arena* a)            { _indexSet_arena = a; }
   567   void          set_indexSet_free_block_list(void* p)   { _indexSet_free_block_list = p; }
   569   // Remember if this compilation changes hardware mode to 24-bit precision
   570   void set_24_bit_selection_and_mode(bool selection, bool mode) {
   571     _select_24_bit_instr = selection;
   572     _in_24_bit_fp_mode   = mode;
   573   }
   575   void  set_java_calls(int z) { _java_calls  = z; }
   576   void set_inner_loops(int z) { _inner_loops = z; }
   578   // Instruction bits passed off to the VM
   579   int               code_size()                 { return _method_size; }
   580   CodeBuffer*       code_buffer()               { return &_code_buffer; }
   581   int               first_block_size()          { return _first_block_size; }
   582   void              set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); }
   583   ExceptionHandlerTable*  handler_table()       { return &_handler_table; }
   584   ImplicitExceptionTable* inc_table()           { return &_inc_table; }
   585   OopMapSet*        oop_map_set()               { return _oop_map_set; }
   586   DebugInformationRecorder* debug_info()        { return env()->debug_info(); }
   587   Dependencies*     dependencies()              { return env()->dependencies(); }
   588   static int        CompiledZap_count()         { return _CompiledZap_count; }
   589   BufferBlob*       scratch_buffer_blob()       { return _scratch_buffer_blob; }
   590   void         init_scratch_buffer_blob();
   591   void          set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; }
   592   relocInfo*        scratch_locs_memory()       { return _scratch_locs_memory; }
   593   void          set_scratch_locs_memory(relocInfo* b)  { _scratch_locs_memory = b; }
   595   // emit to scratch blob, report resulting size
   596   uint              scratch_emit_size(const Node* n);
   598   enum ScratchBufferBlob {
   599     MAX_inst_size       = 1024,
   600     MAX_locs_size       = 128, // number of relocInfo elements
   601     MAX_const_size      = 128,
   602     MAX_stubs_size      = 128
   603   };
   605   // Major entry point.  Given a Scope, compile the associated method.
   606   // For normal compilations, entry_bci is InvocationEntryBci.  For on stack
   607   // replacement, entry_bci indicates the bytecode for which to compile a
   608   // continuation.
   609   Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
   610           int entry_bci, bool subsume_loads, bool do_escape_analysis);
   612   // Second major entry point.  From the TypeFunc signature, generate code
   613   // to pass arguments from the Java calling convention to the C calling
   614   // convention.
   615   Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
   616           address stub_function, const char *stub_name,
   617           int is_fancy_jump, bool pass_tls,
   618           bool save_arg_registers, bool return_pc);
   620   // From the TypeFunc signature, generate code to pass arguments
   621   // from Compiled calling convention to Interpreter's calling convention
   622   void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
   624   // From the TypeFunc signature, generate code to pass arguments
   625   // from Interpreter's calling convention to Compiler's calling convention
   626   void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
   628   // Are we compiling a method?
   629   bool has_method() { return method() != NULL; }
   631   // Maybe print some information about this compile.
   632   void print_compile_messages();
   634   // Final graph reshaping, a post-pass after the regular optimizer is done.
   635   bool final_graph_reshaping();
   637   // returns true if adr is completely contained in the given alias category
   638   bool must_alias(const TypePtr* adr, int alias_idx);
   640   // returns true if adr overlaps with the given alias category
   641   bool can_alias(const TypePtr* adr, int alias_idx);
   643   // Driver for converting compiler's IR into machine code bits
   644   void Output();
   646   // Accessors for node bundling info.
   647   Bundle* node_bundling(const Node *n);
   648   bool valid_bundle_info(const Node *n);
   650   // Schedule and Bundle the instructions
   651   void ScheduleAndBundle();
   653   // Build OopMaps for each GC point
   654   void BuildOopMaps();
   656   // Append debug info for the node "local" at safepoint node "sfpt" to the
   657   // "array",   May also consult and add to "objs", which describes the
   658   // scalar-replaced objects.
   659   void FillLocArray( int idx, MachSafePointNode* sfpt,
   660                      Node *local, GrowableArray<ScopeValue*> *array,
   661                      GrowableArray<ScopeValue*> *objs );
   663   // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL.
   664   static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id);
   665   // Requres that "objs" does not contains an ObjectValue whose id matches
   666   // that of "sv.  Appends "sv".
   667   static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
   668                                      ObjectValue* sv );
   670   // Process an OopMap Element while emitting nodes
   671   void Process_OopMap_Node(MachNode *mach, int code_offset);
   673   // Write out basic block data to code buffer
   674   void Fill_buffer();
   676   // Determine which variable sized branches can be shortened
   677   void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size);
   679   // Compute the size of first NumberOfLoopInstrToAlign instructions
   680   // at the head of a loop.
   681   void compute_loop_first_inst_sizes();
   683   // Compute the information for the exception tables
   684   void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels);
   686   // Stack slots that may be unused by the calling convention but must
   687   // otherwise be preserved.  On Intel this includes the return address.
   688   // On PowerPC it includes the 4 words holding the old TOC & LR glue.
   689   uint in_preserve_stack_slots();
   691   // "Top of Stack" slots that may be unused by the calling convention but must
   692   // otherwise be preserved.
   693   // On Intel these are not necessary and the value can be zero.
   694   // On Sparc this describes the words reserved for storing a register window
   695   // when an interrupt occurs.
   696   static uint out_preserve_stack_slots();
   698   // Number of outgoing stack slots killed above the out_preserve_stack_slots
   699   // for calls to C.  Supports the var-args backing area for register parms.
   700   uint varargs_C_out_slots_killed() const;
   702   // Number of Stack Slots consumed by a synchronization entry
   703   int sync_stack_slots() const;
   705   // Compute the name of old_SP.  See <arch>.ad for frame layout.
   706   OptoReg::Name compute_old_SP();
   708 #ifdef ENABLE_ZAP_DEAD_LOCALS
   709   static bool is_node_getting_a_safepoint(Node*);
   710   void Insert_zap_nodes();
   711   Node* call_zap_node(MachSafePointNode* n, int block_no);
   712 #endif
   714  private:
   715   // Phase control:
   716   void Init(int aliaslevel);                     // Prepare for a single compilation
   717   int  Inline_Warm();                            // Find more inlining work.
   718   void Finish_Warm();                            // Give up on further inlines.
   719   void Optimize();                               // Given a graph, optimize it
   720   void Code_Gen();                               // Generate code from a graph
   722   // Management of the AliasType table.
   723   void grow_alias_types();
   724   AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
   725   const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
   726   AliasType* find_alias_type(const TypePtr* adr_type, bool no_create);
   728   void verify_top(Node*) const PRODUCT_RETURN;
   730   // Intrinsic setup.
   731   void           register_library_intrinsics();                            // initializer
   732   CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual);          // constructor
   733   int            intrinsic_insertion_index(ciMethod* m, bool is_virtual);  // helper
   734   CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual);             // query fn
   735   void           register_intrinsic(CallGenerator* cg);                    // update fn
   737 #ifndef PRODUCT
   738   static juint  _intrinsic_hist_count[vmIntrinsics::ID_LIMIT];
   739   static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT];
   740 #endif
   742  public:
   744   // Note:  Histogram array size is about 1 Kb.
   745   enum {                        // flag bits:
   746     _intrinsic_worked = 1,      // succeeded at least once
   747     _intrinsic_failed = 2,      // tried it but it failed
   748     _intrinsic_disabled = 4,    // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
   749     _intrinsic_virtual = 8,     // was seen in the virtual form (rare)
   750     _intrinsic_both = 16        // was seen in the non-virtual form (usual)
   751   };
   752   // Update histogram.  Return boolean if this is a first-time occurrence.
   753   static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
   754                                           bool is_virtual, int flags) PRODUCT_RETURN0;
   755   static void print_intrinsic_statistics() PRODUCT_RETURN;
   757   // Graph verification code
   758   // Walk the node list, verifying that there is a one-to-one
   759   // correspondence between Use-Def edges and Def-Use edges
   760   // The option no_dead_code enables stronger checks that the
   761   // graph is strongly connected from root in both directions.
   762   void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
   764   // Print bytecodes, including the scope inlining tree
   765   void print_codes();
   767   // End-of-run dumps.
   768   static void print_statistics() PRODUCT_RETURN;
   770   // Dump formatted assembly
   771   void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN;
   772   void dump_pc(int *pcs, int pc_limit, Node *n);
   774   // Verify ADLC assumptions during startup
   775   static void adlc_verification() PRODUCT_RETURN;
   777   // Definitions of pd methods
   778   static void pd_compiler2_init();
   779 };

mercurial