src/share/vm/c1/c1_LinearScan.hpp

changeset 435
a61af66fc99e
child 739
dc7f315e41f7
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/share/vm/c1/c1_LinearScan.hpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,958 @@
     1.4 +/*
     1.5 + * Copyright 2005-2006 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +class DebugInfoCache;
    1.29 +class FpuStackAllocator;
    1.30 +class IRScopeDebugInfo;
    1.31 +class Interval;
    1.32 +class IntervalWalker;
    1.33 +class LIRGenerator;
    1.34 +class LinearScan;
    1.35 +class MoveResolver;
    1.36 +class Range;
    1.37 +
    1.38 +define_array(IntervalArray, Interval*)
    1.39 +define_stack(IntervalList, IntervalArray)
    1.40 +
    1.41 +define_array(IntervalsArray, IntervalList*)
    1.42 +define_stack(IntervalsList, IntervalsArray)
    1.43 +
    1.44 +define_array(OopMapArray, OopMap*)
    1.45 +define_stack(OopMapList, OopMapArray)
    1.46 +
    1.47 +define_array(ScopeValueArray, ScopeValue*)
    1.48 +
    1.49 +define_array(LIR_OpListArray, LIR_OpList*);
    1.50 +define_stack(LIR_OpListStack, LIR_OpListArray);
    1.51 +
    1.52 +
    1.53 +enum IntervalUseKind {
    1.54 +  // priority of use kinds must be ascending
    1.55 +  noUse = 0,
    1.56 +  loopEndMarker = 1,
    1.57 +  shouldHaveRegister = 2,
    1.58 +  mustHaveRegister = 3,
    1.59 +
    1.60 +  firstValidKind = 1,
    1.61 +  lastValidKind = 3
    1.62 +};
    1.63 +define_array(UseKindArray, IntervalUseKind)
    1.64 +define_stack(UseKindStack, UseKindArray)
    1.65 +
    1.66 +
    1.67 +enum IntervalKind {
    1.68 +  fixedKind = 0,  // interval pre-colored by LIR_Generator
    1.69 +  anyKind   = 1,  // no register/memory allocated by LIR_Generator
    1.70 +  nofKinds,
    1.71 +  firstKind = fixedKind
    1.72 +};
    1.73 +
    1.74 +
    1.75 +// during linear scan an interval is in one of four states in
    1.76 +enum IntervalState {
    1.77 +  unhandledState = 0, // unhandled state (not processed yet)
    1.78 +  activeState   = 1,  // life and is in a physical register
    1.79 +  inactiveState = 2,  // in a life time hole and is in a physical register
    1.80 +  handledState  = 3,  // spilled or not life again
    1.81 +  invalidState = -1
    1.82 +};
    1.83 +
    1.84 +
    1.85 +enum IntervalSpillState {
    1.86 +  noDefinitionFound,  // starting state of calculation: no definition found yet
    1.87 +  oneDefinitionFound, // one definition has already been found.
    1.88 +                      // Note: two consecutive definitions are treated as one (e.g. consecutive move and add because of two-operand LIR form)
    1.89 +                      // the position of this definition is stored in _definition_pos
    1.90 +  oneMoveInserted,    // one spill move has already been inserted.
    1.91 +  storeAtDefinition,  // the interval should be stored immediately after its definition because otherwise
    1.92 +                      // there would be multiple redundant stores
    1.93 +  startInMemory,      // the interval starts in memory (e.g. method parameter), so a store is never necessary
    1.94 +  noOptimization      // the interval has more then one definition (e.g. resulting from phi moves), so stores to memory are not optimized
    1.95 +};
    1.96 +
    1.97 +
    1.98 +#define for_each_interval_kind(kind) \
    1.99 +  for (IntervalKind kind = firstKind; kind < nofKinds; kind = (IntervalKind)(kind + 1))
   1.100 +
   1.101 +#define for_each_visitor_mode(mode) \
   1.102 +  for (LIR_OpVisitState::OprMode mode = LIR_OpVisitState::firstMode; mode < LIR_OpVisitState::numModes; mode = (LIR_OpVisitState::OprMode)(mode + 1))
   1.103 +
   1.104 +
   1.105 +class LinearScan : public CompilationResourceObj {
   1.106 +  // declare classes used by LinearScan as friends because they
   1.107 +  // need a wide variety of functions declared here
   1.108 +  //
   1.109 +  // Only the small interface to the rest of the compiler is public
   1.110 +  friend class Interval;
   1.111 +  friend class IntervalWalker;
   1.112 +  friend class LinearScanWalker;
   1.113 +  friend class FpuStackAllocator;
   1.114 +  friend class MoveResolver;
   1.115 +  friend class LinearScanStatistic;
   1.116 +  friend class LinearScanTimers;
   1.117 +  friend class RegisterVerifier;
   1.118 +
   1.119 + public:
   1.120 +  enum {
   1.121 +    any_reg = -1,
   1.122 +    nof_cpu_regs = pd_nof_cpu_regs_linearscan,
   1.123 +    nof_fpu_regs = pd_nof_fpu_regs_linearscan,
   1.124 +    nof_xmm_regs = pd_nof_xmm_regs_linearscan,
   1.125 +    nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs
   1.126 +  };
   1.127 +
   1.128 + private:
   1.129 +  Compilation*              _compilation;
   1.130 +  IR*                       _ir;
   1.131 +  LIRGenerator*             _gen;
   1.132 +  FrameMap*                 _frame_map;
   1.133 +
   1.134 +  BlockList                 _cached_blocks;     // cached list with all blocks in linear-scan order (only correct if original list keeps unchanged)
   1.135 +  int                       _num_virtual_regs;  // number of virtual registers (without new registers introduced because of splitting intervals)
   1.136 +  bool                      _has_fpu_registers; // true if this method uses any floating point registers (and so fpu stack allocation is necessary)
   1.137 +  int                       _num_calls;         // total number of calls in this method
   1.138 +  int                       _max_spills;        // number of stack slots used for intervals allocated to memory
   1.139 +  int                       _unused_spill_slot; // unused spill slot for a single-word value because of alignment of a double-word value
   1.140 +
   1.141 +  IntervalList              _intervals;         // mapping from register number to interval
   1.142 +  IntervalList*             _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split
   1.143 +  IntervalArray*            _sorted_intervals;  // intervals sorted by Interval::from()
   1.144 +
   1.145 +  LIR_OpArray               _lir_ops;           // mapping from LIR_Op id to LIR_Op node
   1.146 +  BlockBeginArray           _block_of_op;       // mapping from LIR_Op id to the BlockBegin containing this instruction
   1.147 +  BitMap                    _has_info;          // bit set for each LIR_Op id that has a CodeEmitInfo
   1.148 +  BitMap                    _has_call;          // bit set for each LIR_Op id that destroys all caller save registers
   1.149 +  BitMap2D                  _interval_in_loop;  // bit set for each virtual register that is contained in each loop
   1.150 +
   1.151 +  // cached debug info to prevent multiple creation of same object
   1.152 +  // TODO: cached scope values for registers could be static
   1.153 +  ScopeValueArray           _scope_value_cache;
   1.154 +
   1.155 +  static ConstantOopWriteValue _oop_null_scope_value;
   1.156 +  static ConstantIntValue    _int_m1_scope_value;
   1.157 +  static ConstantIntValue    _int_0_scope_value;
   1.158 +  static ConstantIntValue    _int_1_scope_value;
   1.159 +  static ConstantIntValue    _int_2_scope_value;
   1.160 +
   1.161 +  // accessors
   1.162 +  IR*           ir() const                       { return _ir; }
   1.163 +  Compilation*  compilation() const              { return _compilation; }
   1.164 +  LIRGenerator* gen() const                      { return _gen; }
   1.165 +  FrameMap*     frame_map() const                { return _frame_map; }
   1.166 +
   1.167 +  // unified bailout support
   1.168 +  void          bailout(const char* msg) const   { compilation()->bailout(msg); }
   1.169 +  bool          bailed_out() const               { return compilation()->bailed_out(); }
   1.170 +
   1.171 +  // access to block list (sorted in linear scan order)
   1.172 +  int           block_count() const              { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); }
   1.173 +  BlockBegin*   block_at(int idx) const          { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list");   return _cached_blocks.at(idx); }
   1.174 +
   1.175 +  int           num_virtual_regs() const         { return _num_virtual_regs; }
   1.176 +  // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)
   1.177 +  int           live_set_size() const            { return round_to(_num_virtual_regs, BitsPerWord); }
   1.178 +  bool          has_fpu_registers() const        { return _has_fpu_registers; }
   1.179 +  int           num_loops() const                { return ir()->num_loops(); }
   1.180 +  bool          is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
   1.181 +
   1.182 +  // handling of fpu stack allocation (platform dependent, needed for debug information generation)
   1.183 +#ifdef IA32
   1.184 +  FpuStackAllocator* _fpu_stack_allocator;
   1.185 +  bool use_fpu_stack_allocation() const          { return UseSSE < 2 && has_fpu_registers(); }
   1.186 +#else
   1.187 +  bool use_fpu_stack_allocation() const          { return false; }
   1.188 +#endif
   1.189 +
   1.190 +
   1.191 +  // access to interval list
   1.192 +  int           interval_count() const           { return _intervals.length(); }
   1.193 +  Interval*     interval_at(int reg_num) const   { return _intervals.at(reg_num); }
   1.194 +
   1.195 +  IntervalList* new_intervals_from_allocation() const { return _new_intervals_from_allocation; }
   1.196 +
   1.197 +  // access to LIR_Ops and Blocks indexed by op_id
   1.198 +  int          max_lir_op_id() const                { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }
   1.199 +  LIR_Op*      lir_op_with_id(int op_id) const      { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }
   1.200 +  BlockBegin*  block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }
   1.201 +
   1.202 +  bool is_block_begin(int op_id)                    { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }
   1.203 +  bool covers_block_begin(int op_id_1, int op_id_2) { return block_of_op_with_id(op_id_1) != block_of_op_with_id(op_id_2); }
   1.204 +
   1.205 +  bool has_call(int op_id)                          { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }
   1.206 +  bool has_info(int op_id)                          { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }
   1.207 +
   1.208 +
   1.209 +  // functions for converting LIR-Operands to register numbers
   1.210 +  static bool is_valid_reg_num(int reg_num)         { return reg_num >= 0; }
   1.211 +  static int  reg_num(LIR_Opr opr);
   1.212 +  static int  reg_numHi(LIR_Opr opr);
   1.213 +
   1.214 +  // functions for classification of intervals
   1.215 +  static bool is_precolored_interval(const Interval* i);
   1.216 +  static bool is_virtual_interval(const Interval* i);
   1.217 +
   1.218 +  static bool is_precolored_cpu_interval(const Interval* i);
   1.219 +  static bool is_virtual_cpu_interval(const Interval* i);
   1.220 +  static bool is_precolored_fpu_interval(const Interval* i);
   1.221 +  static bool is_virtual_fpu_interval(const Interval* i);
   1.222 +
   1.223 +  static bool is_in_fpu_register(const Interval* i);
   1.224 +  static bool is_oop_interval(const Interval* i);
   1.225 +
   1.226 +
   1.227 +  // General helper functions
   1.228 +  int         allocate_spill_slot(bool double_word);
   1.229 +  void        assign_spill_slot(Interval* it);
   1.230 +  void        propagate_spill_slots();
   1.231 +
   1.232 +  Interval*   create_interval(int reg_num);
   1.233 +  void        append_interval(Interval* it);
   1.234 +  void        copy_register_flags(Interval* from, Interval* to);
   1.235 +
   1.236 +  // platform dependent functions
   1.237 +  static bool is_processed_reg_num(int reg_num);
   1.238 +  static int  num_physical_regs(BasicType type);
   1.239 +  static bool requires_adjacent_regs(BasicType type);
   1.240 +  static bool is_caller_save(int assigned_reg);
   1.241 +
   1.242 +  // spill move optimization: eliminate moves from register to stack if
   1.243 +  // stack slot is known to be correct
   1.244 +  void        change_spill_definition_pos(Interval* interval, int def_pos);
   1.245 +  void        change_spill_state(Interval* interval, int spill_pos);
   1.246 +  static bool must_store_at_definition(const Interval* i);
   1.247 +  void        eliminate_spill_moves();
   1.248 +
   1.249 +  // Phase 1: number all instructions in all blocks
   1.250 +  void number_instructions();
   1.251 +
   1.252 +  // Phase 2: compute local live sets separately for each block
   1.253 +  // (sets live_gen and live_kill for each block)
   1.254 +  //
   1.255 +  // helper methods used by compute_local_live_sets()
   1.256 +  void set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill);
   1.257 +
   1.258 +  void compute_local_live_sets();
   1.259 +
   1.260 +  // Phase 3: perform a backward dataflow analysis to compute global live sets
   1.261 +  // (sets live_in and live_out for each block)
   1.262 +  void compute_global_live_sets();
   1.263 +
   1.264 +
   1.265 +  // Phase 4: build intervals
   1.266 +  // (fills the list _intervals)
   1.267 +  //
   1.268 +  // helper methods used by build_intervals()
   1.269 +  void add_use (Value value, int from, int to, IntervalUseKind use_kind);
   1.270 +
   1.271 +  void add_def (LIR_Opr opr, int def_pos,      IntervalUseKind use_kind);
   1.272 +  void add_use (LIR_Opr opr, int from, int to, IntervalUseKind use_kind);
   1.273 +  void add_temp(LIR_Opr opr, int temp_pos,     IntervalUseKind use_kind);
   1.274 +
   1.275 +  void add_def (int reg_num, int def_pos,      IntervalUseKind use_kind, BasicType type);
   1.276 +  void add_use (int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type);
   1.277 +  void add_temp(int reg_num, int temp_pos,     IntervalUseKind use_kind, BasicType type);
   1.278 +
   1.279 +  // Add platform dependent kills for particular LIR ops.  Can be used
   1.280 +  // to add platform dependent behaviour for some operations.
   1.281 +  void pd_add_temps(LIR_Op* op);
   1.282 +
   1.283 +  IntervalUseKind use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr);
   1.284 +  IntervalUseKind use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr);
   1.285 +  void handle_method_arguments(LIR_Op* op);
   1.286 +  void handle_doubleword_moves(LIR_Op* op);
   1.287 +  void add_register_hints(LIR_Op* op);
   1.288 +
   1.289 +  void build_intervals();
   1.290 +
   1.291 +
   1.292 +  // Phase 5: actual register allocation
   1.293 +  // (Uses LinearScanWalker)
   1.294 +  //
   1.295 +  // helper functions for building a sorted list of intervals
   1.296 +  NOT_PRODUCT(bool is_sorted(IntervalArray* intervals);)
   1.297 +  static int interval_cmp(Interval** a, Interval** b);
   1.298 +  void add_to_list(Interval** first, Interval** prev, Interval* interval);
   1.299 +  void create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i));
   1.300 +
   1.301 +  void sort_intervals_before_allocation();
   1.302 +  void sort_intervals_after_allocation();
   1.303 +  void allocate_registers();
   1.304 +
   1.305 +
   1.306 +  // Phase 6: resolve data flow
   1.307 +  // (insert moves at edges between blocks if intervals have been split)
   1.308 +  //
   1.309 +  // helper functions for resolve_data_flow()
   1.310 +  Interval* split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode);
   1.311 +  Interval* interval_at_block_begin(BlockBegin* block, int reg_num);
   1.312 +  Interval* interval_at_block_end(BlockBegin* block, int reg_num);
   1.313 +  Interval* interval_at_op_id(int reg_num, int op_id);
   1.314 +  void resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
   1.315 +  void resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
   1.316 +  void resolve_data_flow();
   1.317 +
   1.318 +  void resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver);
   1.319 +  void resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver);
   1.320 +  void resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver);
   1.321 +  void resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver);
   1.322 +  void resolve_exception_handlers();
   1.323 +
   1.324 +  // Phase 7: assign register numbers back to LIR
   1.325 +  // (includes computation of debug information and oop maps)
   1.326 +  //
   1.327 +  // helper functions for assign_reg_num()
   1.328 +  VMReg vm_reg_for_interval(Interval* interval);
   1.329 +  VMReg vm_reg_for_operand(LIR_Opr opr);
   1.330 +
   1.331 +  static LIR_Opr operand_for_interval(Interval* interval);
   1.332 +  static LIR_Opr calc_operand_for_interval(const Interval* interval);
   1.333 +  LIR_Opr       canonical_spill_opr(Interval* interval);
   1.334 +
   1.335 +  LIR_Opr color_lir_opr(LIR_Opr opr, int id, LIR_OpVisitState::OprMode);
   1.336 +
   1.337 +  // methods used for oop map computation
   1.338 +  IntervalWalker* init_compute_oop_maps();
   1.339 +  OopMap*         compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site);
   1.340 +  void            compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op);
   1.341 +
   1.342 +  // methods used for debug information computation
   1.343 +  void init_compute_debug_info();
   1.344 +
   1.345 +  MonitorValue*  location_for_monitor_index(int monitor_index);
   1.346 +  LocationValue* location_for_name(int name, Location::Type loc_type);
   1.347 +
   1.348 +  int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
   1.349 +  int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
   1.350 +  int append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values);
   1.351 +
   1.352 +  IRScopeDebugInfo* compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state, int cur_bci, int stack_end, int locks_end);
   1.353 +  void compute_debug_info(CodeEmitInfo* info, int op_id);
   1.354 +
   1.355 +  void assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw);
   1.356 +  void assign_reg_num();
   1.357 +
   1.358 +
   1.359 +  // Phase 8: fpu stack allocation
   1.360 +  // (Used only on x86 when fpu operands are present)
   1.361 +  void allocate_fpu_stack();
   1.362 +
   1.363 +
   1.364 +  // helper functions for printing state
   1.365 +#ifndef PRODUCT
   1.366 +  static void print_bitmap(BitMap& bitmap);
   1.367 +  void        print_intervals(const char* label);
   1.368 +  void        print_lir(int level, const char* label, bool hir_valid = true);
   1.369 +#endif
   1.370 +
   1.371 +#ifdef ASSERT
   1.372 +  // verification functions for allocation
   1.373 +  // (check that all intervals have a correct register and that no registers are overwritten)
   1.374 +  void verify();
   1.375 +  void verify_intervals();
   1.376 +  void verify_no_oops_in_fixed_intervals();
   1.377 +  void verify_constants();
   1.378 +  void verify_registers();
   1.379 +#endif
   1.380 +
   1.381 + public:
   1.382 +  // creation
   1.383 +  LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map);
   1.384 +
   1.385 +  // main entry function: perform linear scan register allocation
   1.386 +  void             do_linear_scan();
   1.387 +
   1.388 +  // accessors used by Compilation
   1.389 +  int         max_spills()  const { return _max_spills; }
   1.390 +  int         num_calls() const   { assert(_num_calls >= 0, "not set"); return _num_calls; }
   1.391 +
   1.392 +  // entry functions for printing
   1.393 +#ifndef PRODUCT
   1.394 +  static void print_statistics();
   1.395 +  static void print_timers(double total);
   1.396 +#endif
   1.397 +};
   1.398 +
   1.399 +
   1.400 +// Helper class for ordering moves that are inserted at the same position in the LIR
   1.401 +// When moves between registers are inserted, it is important that the moves are
   1.402 +// ordered such that no register is overwritten. So moves from register to stack
   1.403 +// are processed prior to moves from stack to register. When moves have circular
   1.404 +// dependencies, a temporary stack slot is used to break the circle.
   1.405 +// The same logic is used in the LinearScanWalker and in LinearScan during resolve_data_flow
   1.406 +// and therefore factored out in a separate class
   1.407 +class MoveResolver: public StackObj {
   1.408 + private:
   1.409 +  LinearScan*      _allocator;
   1.410 +
   1.411 +  LIR_List*        _insert_list;
   1.412 +  int              _insert_idx;
   1.413 +  LIR_InsertionBuffer _insertion_buffer; // buffer where moves are inserted
   1.414 +
   1.415 +  IntervalList     _mapping_from;
   1.416 +  LIR_OprList      _mapping_from_opr;
   1.417 +  IntervalList     _mapping_to;
   1.418 +  bool             _multiple_reads_allowed;
   1.419 +  int              _register_blocked[LinearScan::nof_regs];
   1.420 +
   1.421 +  int  register_blocked(int reg)                    { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; }
   1.422 +  void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; }
   1.423 +
   1.424 +  void block_registers(Interval* it);
   1.425 +  void unblock_registers(Interval* it);
   1.426 +  bool save_to_process_move(Interval* from, Interval* to);
   1.427 +
   1.428 +  void create_insertion_buffer(LIR_List* list);
   1.429 +  void append_insertion_buffer();
   1.430 +  void insert_move(Interval* from_interval, Interval* to_interval);
   1.431 +  void insert_move(LIR_Opr from_opr, Interval* to_interval);
   1.432 +
   1.433 +  DEBUG_ONLY(void verify_before_resolve();)
   1.434 +  void resolve_mappings();
   1.435 + public:
   1.436 +  MoveResolver(LinearScan* allocator);
   1.437 +
   1.438 +  DEBUG_ONLY(void check_empty();)
   1.439 +  void set_multiple_reads_allowed() { _multiple_reads_allowed = true; }
   1.440 +  void set_insert_position(LIR_List* insert_list, int insert_idx);
   1.441 +  void move_insert_position(LIR_List* insert_list, int insert_idx);
   1.442 +  void add_mapping(Interval* from, Interval* to);
   1.443 +  void add_mapping(LIR_Opr from, Interval* to);
   1.444 +  void resolve_and_append_moves();
   1.445 +
   1.446 +  LinearScan* allocator()   { return _allocator; }
   1.447 +  bool has_mappings()       { return _mapping_from.length() > 0; }
   1.448 +};
   1.449 +
   1.450 +
   1.451 +class Range : public CompilationResourceObj {
   1.452 +  friend class Interval;
   1.453 +
   1.454 + private:
   1.455 +  static Range*    _end;       // sentinel (from == to == max_jint)
   1.456 +
   1.457 +  int              _from;      // from (inclusive)
   1.458 +  int              _to;        // to (exclusive)
   1.459 +  Range*           _next;      // linear list of Ranges
   1.460 +
   1.461 +  // used only by class Interval, so hide them
   1.462 +  bool             intersects(Range* r) const    { return intersects_at(r) != -1; }
   1.463 +  int              intersects_at(Range* r) const;
   1.464 +
   1.465 + public:
   1.466 +  Range(int from, int to, Range* next);
   1.467 +
   1.468 +  static void      initialize();
   1.469 +  static Range*    end()                         { return _end; }
   1.470 +
   1.471 +  int              from() const                  { return _from; }
   1.472 +  int              to()   const                  { return _to; }
   1.473 +  Range*           next() const                  { return _next; }
   1.474 +  void             set_from(int from)            { _from = from; }
   1.475 +  void             set_to(int to)                { _to = to; }
   1.476 +  void             set_next(Range* next)         { _next = next; }
   1.477 +
   1.478 +  // for testing
   1.479 +  void             print(outputStream* out = tty) const PRODUCT_RETURN;
   1.480 +};
   1.481 +
   1.482 +
   1.483 +// Interval is an ordered list of disjoint ranges.
   1.484 +
   1.485 +// For pre-colored double word LIR_Oprs, one interval is created for
   1.486 +// the low word register and one is created for the hi word register.
   1.487 +// On Intel for FPU double registers only one interval is created.  At
   1.488 +// all times assigned_reg contains the reg. number of the physical
   1.489 +// register.
   1.490 +
   1.491 +// For LIR_Opr in virtual registers a single interval can represent
   1.492 +// single and double word values.  When a physical register is
   1.493 +// assigned to the interval, assigned_reg contains the
   1.494 +// phys. reg. number and for double word values assigned_regHi the
   1.495 +// phys. reg. number of the hi word if there is any.  For spilled
   1.496 +// intervals assigned_reg contains the stack index.  assigned_regHi is
   1.497 +// always -1.
   1.498 +
   1.499 +class Interval : public CompilationResourceObj {
   1.500 + private:
   1.501 +  static Interval* _end;          // sentinel (interval with only range Range::end())
   1.502 +
   1.503 +  int              _reg_num;
   1.504 +  BasicType        _type;         // valid only for virtual registers
   1.505 +  Range*           _first;        // sorted list of Ranges
   1.506 +  intStack         _use_pos_and_kinds; // sorted list of use-positions and their according use-kinds
   1.507 +
   1.508 +  Range*           _current;      // interval iteration: the current Range
   1.509 +  Interval*        _next;         // interval iteration: sorted list of Intervals (ends with sentinel)
   1.510 +  IntervalState    _state;        // interval iteration: to which set belongs this interval
   1.511 +
   1.512 +
   1.513 +  int              _assigned_reg;
   1.514 +  int              _assigned_regHi;
   1.515 +
   1.516 +  int              _cached_to;    // cached value: to of last range (-1: not cached)
   1.517 +  LIR_Opr          _cached_opr;
   1.518 +  VMReg            _cached_vm_reg;
   1.519 +
   1.520 +  Interval*        _split_parent;           // the original interval where this interval is derived from
   1.521 +  IntervalList     _split_children;         // list of all intervals that are split off from this interval (only available for split parents)
   1.522 +  Interval*        _current_split_child;    // the current split child that has been active or inactive last (always stored in split parents)
   1.523 +
   1.524 +  int              _canonical_spill_slot;   // the stack slot where all split parts of this interval are spilled to (always stored in split parents)
   1.525 +  bool             _insert_move_when_activated; // true if move is inserted between _current_split_child and this interval when interval gets active the first time
   1.526 +  IntervalSpillState _spill_state;          // for spill move optimization
   1.527 +  int              _spill_definition_pos;   // position where the interval is defined (if defined only once)
   1.528 +  Interval*        _register_hint;          // this interval should be in the same register as the hint interval
   1.529 +
   1.530 +  int              calc_to();
   1.531 +  Interval*        new_split_child();
   1.532 + public:
   1.533 +  Interval(int reg_num);
   1.534 +
   1.535 +  static void      initialize();
   1.536 +  static Interval* end()                         { return _end; }
   1.537 +
   1.538 +  // accessors
   1.539 +  int              reg_num() const               { return _reg_num; }
   1.540 +  void             set_reg_num(int r)            { assert(_reg_num == -1, "cannot change reg_num"); _reg_num = r; }
   1.541 +  BasicType        type() const                  { assert(_reg_num == -1 || _reg_num >= LIR_OprDesc::vreg_base, "cannot access type for fixed interval"); return _type; }
   1.542 +  void             set_type(BasicType type)      { assert(_reg_num < LIR_OprDesc::vreg_base || _type == T_ILLEGAL || _type == type, "overwriting existing type"); _type = type; }
   1.543 +
   1.544 +  Range*           first() const                 { return _first; }
   1.545 +  int              from() const                  { return _first->from(); }
   1.546 +  int              to()                          { if (_cached_to == -1) _cached_to = calc_to(); assert(_cached_to == calc_to(), "invalid cached value"); return _cached_to; }
   1.547 +  int              num_use_positions() const     { return _use_pos_and_kinds.length() / 2; }
   1.548 +
   1.549 +  Interval*        next() const                  { return _next; }
   1.550 +  Interval**       next_addr()                   { return &_next; }
   1.551 +  void             set_next(Interval* next)      { _next = next; }
   1.552 +
   1.553 +  int              assigned_reg() const          { return _assigned_reg; }
   1.554 +  int              assigned_regHi() const        { return _assigned_regHi; }
   1.555 +  void             assign_reg(int reg)           { _assigned_reg = reg; _assigned_regHi = LinearScan::any_reg; }
   1.556 +  void             assign_reg(int reg,int regHi) { _assigned_reg = reg; _assigned_regHi = regHi; }
   1.557 +
   1.558 +  Interval*        register_hint(bool search_split_child = true) const; // calculation needed
   1.559 +  void             set_register_hint(Interval* i) { _register_hint = i; }
   1.560 +
   1.561 +  int              state() const                 { return _state; }
   1.562 +  void             set_state(IntervalState s)    { _state = s; }
   1.563 +
   1.564 +  // access to split parent and split children
   1.565 +  bool             is_split_parent() const       { return _split_parent == this; }
   1.566 +  bool             is_split_child() const        { return _split_parent != this; }
   1.567 +  Interval*        split_parent() const          { assert(_split_parent->is_split_parent(), "must be"); return _split_parent; }
   1.568 +  Interval*        split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode);
   1.569 +  Interval*        split_child_before_op_id(int op_id);
   1.570 +  bool             split_child_covers(int op_id, LIR_OpVisitState::OprMode mode);
   1.571 +  DEBUG_ONLY(void  check_split_children();)
   1.572 +
   1.573 +  // information stored in split parent, but available for all children
   1.574 +  int              canonical_spill_slot() const            { return split_parent()->_canonical_spill_slot; }
   1.575 +  void             set_canonical_spill_slot(int slot)      { assert(split_parent()->_canonical_spill_slot == -1, "overwriting existing value"); split_parent()->_canonical_spill_slot = slot; }
   1.576 +  Interval*        current_split_child() const             { return split_parent()->_current_split_child; }
   1.577 +  void             make_current_split_child()              { split_parent()->_current_split_child = this; }
   1.578 +
   1.579 +  bool             insert_move_when_activated() const      { return _insert_move_when_activated; }
   1.580 +  void             set_insert_move_when_activated(bool b)  { _insert_move_when_activated = b; }
   1.581 +
   1.582 +  // for spill optimization
   1.583 +  IntervalSpillState spill_state() const         { return split_parent()->_spill_state; }
   1.584 +  int              spill_definition_pos() const  { return split_parent()->_spill_definition_pos; }
   1.585 +  void             set_spill_state(IntervalSpillState state) {  assert(state >= spill_state(), "state cannot decrease"); split_parent()->_spill_state = state; }
   1.586 +  void             set_spill_definition_pos(int pos) { assert(spill_definition_pos() == -1, "cannot set the position twice"); split_parent()->_spill_definition_pos = pos; }
   1.587 +  // returns true if this interval has a shadow copy on the stack that is always correct
   1.588 +  bool             always_in_memory() const      { return split_parent()->_spill_state == storeAtDefinition || split_parent()->_spill_state == startInMemory; }
   1.589 +
   1.590 +  // caching of values that take time to compute and are used multiple times
   1.591 +  LIR_Opr          cached_opr() const            { return _cached_opr; }
   1.592 +  VMReg            cached_vm_reg() const         { return _cached_vm_reg; }
   1.593 +  void             set_cached_opr(LIR_Opr opr)   { _cached_opr = opr; }
   1.594 +  void             set_cached_vm_reg(VMReg reg)  { _cached_vm_reg = reg; }
   1.595 +
   1.596 +  // access to use positions
   1.597 +  int    first_usage(IntervalUseKind min_use_kind) const;           // id of the first operation requiring this interval in a register
   1.598 +  int    next_usage(IntervalUseKind min_use_kind, int from) const;  // id of next usage seen from the given position
   1.599 +  int    next_usage_exact(IntervalUseKind exact_use_kind, int from) const;
   1.600 +  int    previous_usage(IntervalUseKind min_use_kind, int from) const;
   1.601 +
   1.602 +  // manipulating intervals
   1.603 +  void   add_use_pos(int pos, IntervalUseKind use_kind);
   1.604 +  void   add_range(int from, int to);
   1.605 +  Interval* split(int split_pos);
   1.606 +  Interval* split_from_start(int split_pos);
   1.607 +  void remove_first_use_pos()                    { _use_pos_and_kinds.truncate(_use_pos_and_kinds.length() - 2); }
   1.608 +
   1.609 +  // test intersection
   1.610 +  bool   covers(int op_id, LIR_OpVisitState::OprMode mode) const;
   1.611 +  bool   has_hole_between(int from, int to);
   1.612 +  bool   intersects(Interval* i) const           { return _first->intersects(i->_first); }
   1.613 +  int    intersects_at(Interval* i) const        { return _first->intersects_at(i->_first); }
   1.614 +
   1.615 +  // range iteration
   1.616 +  void   rewind_range()                          { _current = _first; }
   1.617 +  void   next_range()                            { assert(this != _end, "not allowed on sentinel"); _current = _current->next(); }
   1.618 +  int    current_from() const                    { return _current->from(); }
   1.619 +  int    current_to() const                      { return _current->to(); }
   1.620 +  bool   current_at_end() const                  { return _current == Range::end(); }
   1.621 +  bool   current_intersects(Interval* it)        { return _current->intersects(it->_current); };
   1.622 +  int    current_intersects_at(Interval* it)     { return _current->intersects_at(it->_current); };
   1.623 +
   1.624 +  // printing
   1.625 +  void print(outputStream* out = tty) const      PRODUCT_RETURN;
   1.626 +};
   1.627 +
   1.628 +
   1.629 +class IntervalWalker : public CompilationResourceObj {
   1.630 + protected:
   1.631 +  Compilation*     _compilation;
   1.632 +  LinearScan*      _allocator;
   1.633 +
   1.634 +  Interval*        _unhandled_first[nofKinds];  // sorted list of intervals, not life before the current position
   1.635 +  Interval*        _active_first   [nofKinds];  // sorted list of intervals, life at the current position
   1.636 +  Interval*        _inactive_first [nofKinds];  // sorted list of intervals, intervals in a life time hole at the current position
   1.637 +
   1.638 +  Interval*        _current;                     // the current interval coming from unhandled list
   1.639 +  int              _current_position;            // the current position (intercept point through the intervals)
   1.640 +  IntervalKind     _current_kind;                // and whether it is fixed_kind or any_kind.
   1.641 +
   1.642 +
   1.643 +  Compilation*     compilation() const               { return _compilation; }
   1.644 +  LinearScan*      allocator() const                 { return _allocator; }
   1.645 +
   1.646 +  // unified bailout support
   1.647 +  void             bailout(const char* msg) const    { compilation()->bailout(msg); }
   1.648 +  bool             bailed_out() const                { return compilation()->bailed_out(); }
   1.649 +
   1.650 +  void check_bounds(IntervalKind kind) { assert(kind >= fixedKind && kind <= anyKind, "invalid interval_kind"); }
   1.651 +
   1.652 +  Interval** unhandled_first_addr(IntervalKind kind) { check_bounds(kind); return &_unhandled_first[kind]; }
   1.653 +  Interval** active_first_addr(IntervalKind kind)    { check_bounds(kind); return &_active_first[kind]; }
   1.654 +  Interval** inactive_first_addr(IntervalKind kind)  { check_bounds(kind); return &_inactive_first[kind]; }
   1.655 +
   1.656 +  void append_unsorted(Interval** first, Interval* interval);
   1.657 +  void append_sorted(Interval** first, Interval* interval);
   1.658 +  void append_to_unhandled(Interval** list, Interval* interval);
   1.659 +
   1.660 +  bool remove_from_list(Interval** list, Interval* i);
   1.661 +  void remove_from_list(Interval* i);
   1.662 +
   1.663 +  void next_interval();
   1.664 +  Interval*        current() const               { return _current; }
   1.665 +  IntervalKind     current_kind() const          { return _current_kind; }
   1.666 +
   1.667 +  void walk_to(IntervalState state, int from);
   1.668 +
   1.669 +  // activate_current() is called when an unhandled interval becomes active (in current(), current_kind()).
   1.670 +  // Return false if current() should not be moved the the active interval list.
   1.671 +  // It is safe to append current to any interval list but the unhandled list.
   1.672 +  virtual bool activate_current() { return true; }
   1.673 +
   1.674 +  // interval_moved() is called whenever an interval moves from one interval list to another.
   1.675 +  // In the implementation of this method it is prohibited to move the interval to any list.
   1.676 +  virtual void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to);
   1.677 +
   1.678 + public:
   1.679 +  IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
   1.680 +
   1.681 +  Interval* unhandled_first(IntervalKind kind)   { check_bounds(kind); return _unhandled_first[kind]; }
   1.682 +  Interval* active_first(IntervalKind kind)      { check_bounds(kind); return _active_first[kind]; }
   1.683 +  Interval* inactive_first(IntervalKind kind)    { check_bounds(kind); return _inactive_first[kind]; }
   1.684 +
   1.685 +  // active contains the intervals that are live after the lir_op
   1.686 +  void walk_to(int lir_op_id);
   1.687 +  // active contains the intervals that are live before the lir_op
   1.688 +  void walk_before(int lir_op_id)  { walk_to(lir_op_id-1); }
   1.689 +  // walk through all intervals
   1.690 +  void walk()                      { walk_to(max_jint); }
   1.691 +
   1.692 +  int current_position()           { return _current_position; }
   1.693 +};
   1.694 +
   1.695 +
   1.696 +// The actual linear scan register allocator
   1.697 +class LinearScanWalker : public IntervalWalker {
   1.698 +  enum {
   1.699 +    any_reg = LinearScan::any_reg
   1.700 +  };
   1.701 +
   1.702 + private:
   1.703 +  int              _first_reg;       // the reg. number of the first phys. register
   1.704 +  int              _last_reg;        // the reg. nmber of the last phys. register
   1.705 +  int              _num_phys_regs;   // required by current interval
   1.706 +  bool             _adjacent_regs;   // have lo/hi words of phys. regs be adjacent
   1.707 +
   1.708 +  int              _use_pos[LinearScan::nof_regs];
   1.709 +  int              _block_pos[LinearScan::nof_regs];
   1.710 +  IntervalList*    _spill_intervals[LinearScan::nof_regs];
   1.711 +
   1.712 +  MoveResolver     _move_resolver;   // for ordering spill moves
   1.713 +
   1.714 +  // accessors mapped to same functions in class LinearScan
   1.715 +  int         block_count() const      { return allocator()->block_count(); }
   1.716 +  BlockBegin* block_at(int idx) const  { return allocator()->block_at(idx); }
   1.717 +  BlockBegin* block_of_op_with_id(int op_id) const { return allocator()->block_of_op_with_id(op_id); }
   1.718 +
   1.719 +  void init_use_lists(bool only_process_use_pos);
   1.720 +  void exclude_from_use(int reg);
   1.721 +  void exclude_from_use(Interval* i);
   1.722 +  void set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos);
   1.723 +  void set_use_pos(Interval* i, int use_pos, bool only_process_use_pos);
   1.724 +  void set_block_pos(int reg, Interval* i, int block_pos);
   1.725 +  void set_block_pos(Interval* i, int block_pos);
   1.726 +
   1.727 +  void free_exclude_active_fixed();
   1.728 +  void free_exclude_active_any();
   1.729 +  void free_collect_inactive_fixed(Interval* cur);
   1.730 +  void free_collect_inactive_any(Interval* cur);
   1.731 +  void free_collect_unhandled(IntervalKind kind, Interval* cur);
   1.732 +  void spill_exclude_active_fixed();
   1.733 +  void spill_block_unhandled_fixed(Interval* cur);
   1.734 +  void spill_block_inactive_fixed(Interval* cur);
   1.735 +  void spill_collect_active_any();
   1.736 +  void spill_collect_inactive_any(Interval* cur);
   1.737 +
   1.738 +  void insert_move(int op_id, Interval* src_it, Interval* dst_it);
   1.739 +  int  find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos);
   1.740 +  int  find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization);
   1.741 +  void split_before_usage(Interval* it, int min_split_pos, int max_split_pos);
   1.742 +  void split_for_spilling(Interval* it);
   1.743 +  void split_stack_interval(Interval* it);
   1.744 +  void split_when_partial_register_available(Interval* it, int register_available_until);
   1.745 +  void split_and_spill_interval(Interval* it);
   1.746 +
   1.747 +  int  find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
   1.748 +  int  find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);
   1.749 +  bool alloc_free_reg(Interval* cur);
   1.750 +
   1.751 +  int  find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
   1.752 +  int  find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);
   1.753 +  void split_and_spill_intersecting_intervals(int reg, int regHi);
   1.754 +  void alloc_locked_reg(Interval* cur);
   1.755 +
   1.756 +  bool no_allocation_possible(Interval* cur);
   1.757 +  void update_phys_reg_range(bool requires_cpu_register);
   1.758 +  void init_vars_for_alloc(Interval* cur);
   1.759 +  bool pd_init_regs_for_alloc(Interval* cur);
   1.760 +
   1.761 +  void combine_spilled_intervals(Interval* cur);
   1.762 +  bool is_move(LIR_Op* op, Interval* from, Interval* to);
   1.763 +
   1.764 +  bool activate_current();
   1.765 +
   1.766 + public:
   1.767 +  LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
   1.768 +
   1.769 +  // must be called when all intervals are allocated
   1.770 +  void             finish_allocation()           { _move_resolver.resolve_and_append_moves(); }
   1.771 +};
   1.772 +
   1.773 +
   1.774 +
   1.775 +/*
   1.776 +When a block has more than one predecessor, and all predecessors end with
   1.777 +the same sequence of move-instructions, than this moves can be placed once
   1.778 +at the beginning of the block instead of multiple times in the predecessors.
   1.779 +
   1.780 +Similarly, when a block has more than one successor, then equal sequences of
   1.781 +moves at the beginning of the successors can be placed once at the end of
   1.782 +the block. But because the moves must be inserted before all branch
   1.783 +instructions, this works only when there is exactly one conditional branch
   1.784 +at the end of the block (because the moves must be inserted before all
   1.785 +branches, but after all compares).
   1.786 +
   1.787 +This optimization affects all kind of moves (reg->reg, reg->stack and
   1.788 +stack->reg). Because this optimization works best when a block contains only
   1.789 +few moves, it has a huge impact on the number of blocks that are totally
   1.790 +empty.
   1.791 +*/
   1.792 +class EdgeMoveOptimizer : public StackObj {
   1.793 + private:
   1.794 +  // the class maintains a list with all lir-instruction-list of the
   1.795 +  // successors (predecessors) and the current index into the lir-lists
   1.796 +  LIR_OpListStack _edge_instructions;
   1.797 +  intStack        _edge_instructions_idx;
   1.798 +
   1.799 +  void init_instructions();
   1.800 +  void append_instructions(LIR_OpList* instructions, int instructions_idx);
   1.801 +  LIR_Op* instruction_at(int edge);
   1.802 +  void remove_cur_instruction(int edge, bool decrement_index);
   1.803 +
   1.804 +  bool operations_different(LIR_Op* op1, LIR_Op* op2);
   1.805 +
   1.806 +  void optimize_moves_at_block_end(BlockBegin* cur);
   1.807 +  void optimize_moves_at_block_begin(BlockBegin* cur);
   1.808 +
   1.809 +  EdgeMoveOptimizer();
   1.810 +
   1.811 + public:
   1.812 +  static void optimize(BlockList* code);
   1.813 +};
   1.814 +
   1.815 +
   1.816 +
   1.817 +class ControlFlowOptimizer : public StackObj {
   1.818 + private:
   1.819 +  BlockList _original_preds;
   1.820 +
   1.821 +  enum {
   1.822 +    ShortLoopSize = 5
   1.823 +  };
   1.824 +  void reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx);
   1.825 +  void reorder_short_loops(BlockList* code);
   1.826 +
   1.827 +  bool can_delete_block(BlockBegin* cur);
   1.828 +  void substitute_branch_target(BlockBegin* cur, BlockBegin* target_from, BlockBegin* target_to);
   1.829 +  void delete_empty_blocks(BlockList* code);
   1.830 +
   1.831 +  void delete_unnecessary_jumps(BlockList* code);
   1.832 +  void delete_jumps_to_return(BlockList* code);
   1.833 +
   1.834 +  DEBUG_ONLY(void verify(BlockList* code);)
   1.835 +
   1.836 +  ControlFlowOptimizer();
   1.837 + public:
   1.838 +  static void optimize(BlockList* code);
   1.839 +};
   1.840 +
   1.841 +
   1.842 +#ifndef PRODUCT
   1.843 +
   1.844 +// Helper class for collecting statistics of LinearScan
   1.845 +class LinearScanStatistic : public StackObj {
   1.846 + public:
   1.847 +  enum Counter {
   1.848 +    // general counters
   1.849 +    counter_method,
   1.850 +    counter_fpu_method,
   1.851 +    counter_loop_method,
   1.852 +    counter_exception_method,
   1.853 +    counter_loop,
   1.854 +    counter_block,
   1.855 +    counter_loop_block,
   1.856 +    counter_exception_block,
   1.857 +    counter_interval,
   1.858 +    counter_fixed_interval,
   1.859 +    counter_range,
   1.860 +    counter_fixed_range,
   1.861 +    counter_use_pos,
   1.862 +    counter_fixed_use_pos,
   1.863 +    counter_spill_slots,
   1.864 +    blank_line_1,
   1.865 +
   1.866 +    // counter for classes of lir instructions
   1.867 +    counter_instruction,
   1.868 +    counter_label,
   1.869 +    counter_entry,
   1.870 +    counter_return,
   1.871 +    counter_call,
   1.872 +    counter_move,
   1.873 +    counter_cmp,
   1.874 +    counter_cond_branch,
   1.875 +    counter_uncond_branch,
   1.876 +    counter_stub_branch,
   1.877 +    counter_alu,
   1.878 +    counter_alloc,
   1.879 +    counter_sync,
   1.880 +    counter_throw,
   1.881 +    counter_unwind,
   1.882 +    counter_typecheck,
   1.883 +    counter_fpu_stack,
   1.884 +    counter_misc_inst,
   1.885 +    counter_other_inst,
   1.886 +    blank_line_2,
   1.887 +
   1.888 +    // counter for different types of moves
   1.889 +    counter_move_total,
   1.890 +    counter_move_reg_reg,
   1.891 +    counter_move_reg_stack,
   1.892 +    counter_move_stack_reg,
   1.893 +    counter_move_stack_stack,
   1.894 +    counter_move_reg_mem,
   1.895 +    counter_move_mem_reg,
   1.896 +    counter_move_const_any,
   1.897 +
   1.898 +    number_of_counters,
   1.899 +    invalid_counter = -1
   1.900 +  };
   1.901 +
   1.902 + private:
   1.903 +  int _counters_sum[number_of_counters];
   1.904 +  int _counters_max[number_of_counters];
   1.905 +
   1.906 +  void inc_counter(Counter idx, int value = 1) { _counters_sum[idx] += value; }
   1.907 +
   1.908 +  const char* counter_name(int counter_idx);
   1.909 +  Counter base_counter(int counter_idx);
   1.910 +
   1.911 +  void sum_up(LinearScanStatistic &method_statistic);
   1.912 +  void collect(LinearScan* allocator);
   1.913 +
   1.914 + public:
   1.915 +  LinearScanStatistic();
   1.916 +  void print(const char* title);
   1.917 +  static void compute(LinearScan* allocator, LinearScanStatistic &global_statistic);
   1.918 +};
   1.919 +
   1.920 +
   1.921 +// Helper class for collecting compilation time of LinearScan
   1.922 +class LinearScanTimers : public StackObj {
   1.923 + public:
   1.924 +  enum Timer {
   1.925 +    timer_do_nothing,
   1.926 +    timer_number_instructions,
   1.927 +    timer_compute_local_live_sets,
   1.928 +    timer_compute_global_live_sets,
   1.929 +    timer_build_intervals,
   1.930 +    timer_sort_intervals_before,
   1.931 +    timer_allocate_registers,
   1.932 +    timer_resolve_data_flow,
   1.933 +    timer_sort_intervals_after,
   1.934 +    timer_eliminate_spill_moves,
   1.935 +    timer_assign_reg_num,
   1.936 +    timer_allocate_fpu_stack,
   1.937 +    timer_optimize_lir,
   1.938 +
   1.939 +    number_of_timers
   1.940 +  };
   1.941 +
   1.942 + private:
   1.943 +  elapsedTimer _timers[number_of_timers];
   1.944 +  const char*  timer_name(int idx);
   1.945 +
   1.946 + public:
   1.947 +  LinearScanTimers();
   1.948 +
   1.949 +  void begin_method();                     // called for each method when register allocation starts
   1.950 +  void end_method(LinearScan* allocator);  // called for each method when register allocation completed
   1.951 +  void print(double total_time);           // called before termination of VM to print global summary
   1.952 +
   1.953 +  elapsedTimer* timer(int idx) { return &(_timers[idx]); }
   1.954 +};
   1.955 +
   1.956 +
   1.957 +#endif // ifndef PRODUCT
   1.958 +
   1.959 +
   1.960 +// Pick up platform-dependent implementation details
   1.961 +# include "incls/_c1_LinearScan_pd.hpp.incl"

mercurial