src/share/vm/c1/c1_LinearScan.hpp

Tue, 05 Apr 2011 14:12:31 -0700

author
trims
date
Tue, 05 Apr 2011 14:12:31 -0700
changeset 2708
1d1603768966
parent 2508
b92c45f2bc75
child 3108
7588156f5cf9
permissions
-rw-r--r--

7010070: Update all 2010 Oracle-changed OpenJDK files to have the proper copyright dates - second pass
Summary: Update the copyright to be 2010 on all changed files in OpenJDK
Reviewed-by: ohair

duke@435 1 /*
trims@2708 2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_C1_C1_LINEARSCAN_HPP
stefank@2314 26 #define SHARE_VM_C1_C1_LINEARSCAN_HPP
stefank@2314 27
stefank@2314 28 #include "c1/c1_FpuStackSim.hpp"
stefank@2314 29 #include "c1/c1_FrameMap.hpp"
stefank@2314 30 #include "c1/c1_IR.hpp"
stefank@2314 31 #include "c1/c1_Instruction.hpp"
stefank@2314 32 #include "c1/c1_LIR.hpp"
stefank@2314 33 #include "c1/c1_LIRGenerator.hpp"
stefank@2314 34
duke@435 35 class DebugInfoCache;
duke@435 36 class FpuStackAllocator;
duke@435 37 class IRScopeDebugInfo;
duke@435 38 class Interval;
duke@435 39 class IntervalWalker;
duke@435 40 class LIRGenerator;
duke@435 41 class LinearScan;
duke@435 42 class MoveResolver;
duke@435 43 class Range;
duke@435 44
duke@435 45 define_array(IntervalArray, Interval*)
duke@435 46 define_stack(IntervalList, IntervalArray)
duke@435 47
duke@435 48 define_array(IntervalsArray, IntervalList*)
duke@435 49 define_stack(IntervalsList, IntervalsArray)
duke@435 50
duke@435 51 define_array(OopMapArray, OopMap*)
duke@435 52 define_stack(OopMapList, OopMapArray)
duke@435 53
duke@435 54 define_array(ScopeValueArray, ScopeValue*)
duke@435 55
duke@435 56 define_array(LIR_OpListArray, LIR_OpList*);
duke@435 57 define_stack(LIR_OpListStack, LIR_OpListArray);
duke@435 58
duke@435 59
duke@435 60 enum IntervalUseKind {
duke@435 61 // priority of use kinds must be ascending
duke@435 62 noUse = 0,
duke@435 63 loopEndMarker = 1,
duke@435 64 shouldHaveRegister = 2,
duke@435 65 mustHaveRegister = 3,
duke@435 66
duke@435 67 firstValidKind = 1,
duke@435 68 lastValidKind = 3
duke@435 69 };
duke@435 70 define_array(UseKindArray, IntervalUseKind)
duke@435 71 define_stack(UseKindStack, UseKindArray)
duke@435 72
duke@435 73
duke@435 74 enum IntervalKind {
duke@435 75 fixedKind = 0, // interval pre-colored by LIR_Generator
duke@435 76 anyKind = 1, // no register/memory allocated by LIR_Generator
duke@435 77 nofKinds,
duke@435 78 firstKind = fixedKind
duke@435 79 };
duke@435 80
duke@435 81
duke@435 82 // during linear scan an interval is in one of four states in
duke@435 83 enum IntervalState {
duke@435 84 unhandledState = 0, // unhandled state (not processed yet)
duke@435 85 activeState = 1, // life and is in a physical register
duke@435 86 inactiveState = 2, // in a life time hole and is in a physical register
duke@435 87 handledState = 3, // spilled or not life again
duke@435 88 invalidState = -1
duke@435 89 };
duke@435 90
duke@435 91
duke@435 92 enum IntervalSpillState {
duke@435 93 noDefinitionFound, // starting state of calculation: no definition found yet
duke@435 94 oneDefinitionFound, // one definition has already been found.
duke@435 95 // Note: two consecutive definitions are treated as one (e.g. consecutive move and add because of two-operand LIR form)
duke@435 96 // the position of this definition is stored in _definition_pos
duke@435 97 oneMoveInserted, // one spill move has already been inserted.
duke@435 98 storeAtDefinition, // the interval should be stored immediately after its definition because otherwise
duke@435 99 // there would be multiple redundant stores
duke@435 100 startInMemory, // the interval starts in memory (e.g. method parameter), so a store is never necessary
duke@435 101 noOptimization // the interval has more then one definition (e.g. resulting from phi moves), so stores to memory are not optimized
duke@435 102 };
duke@435 103
duke@435 104
duke@435 105 #define for_each_interval_kind(kind) \
duke@435 106 for (IntervalKind kind = firstKind; kind < nofKinds; kind = (IntervalKind)(kind + 1))
duke@435 107
duke@435 108 #define for_each_visitor_mode(mode) \
duke@435 109 for (LIR_OpVisitState::OprMode mode = LIR_OpVisitState::firstMode; mode < LIR_OpVisitState::numModes; mode = (LIR_OpVisitState::OprMode)(mode + 1))
duke@435 110
duke@435 111
duke@435 112 class LinearScan : public CompilationResourceObj {
duke@435 113 // declare classes used by LinearScan as friends because they
duke@435 114 // need a wide variety of functions declared here
duke@435 115 //
duke@435 116 // Only the small interface to the rest of the compiler is public
duke@435 117 friend class Interval;
duke@435 118 friend class IntervalWalker;
duke@435 119 friend class LinearScanWalker;
duke@435 120 friend class FpuStackAllocator;
duke@435 121 friend class MoveResolver;
duke@435 122 friend class LinearScanStatistic;
duke@435 123 friend class LinearScanTimers;
duke@435 124 friend class RegisterVerifier;
duke@435 125
duke@435 126 public:
duke@435 127 enum {
duke@435 128 any_reg = -1,
duke@435 129 nof_cpu_regs = pd_nof_cpu_regs_linearscan,
duke@435 130 nof_fpu_regs = pd_nof_fpu_regs_linearscan,
duke@435 131 nof_xmm_regs = pd_nof_xmm_regs_linearscan,
duke@435 132 nof_regs = nof_cpu_regs + nof_fpu_regs + nof_xmm_regs
duke@435 133 };
duke@435 134
duke@435 135 private:
duke@435 136 Compilation* _compilation;
duke@435 137 IR* _ir;
duke@435 138 LIRGenerator* _gen;
duke@435 139 FrameMap* _frame_map;
duke@435 140
duke@435 141 BlockList _cached_blocks; // cached list with all blocks in linear-scan order (only correct if original list keeps unchanged)
duke@435 142 int _num_virtual_regs; // number of virtual registers (without new registers introduced because of splitting intervals)
duke@435 143 bool _has_fpu_registers; // true if this method uses any floating point registers (and so fpu stack allocation is necessary)
duke@435 144 int _num_calls; // total number of calls in this method
duke@435 145 int _max_spills; // number of stack slots used for intervals allocated to memory
duke@435 146 int _unused_spill_slot; // unused spill slot for a single-word value because of alignment of a double-word value
duke@435 147
duke@435 148 IntervalList _intervals; // mapping from register number to interval
duke@435 149 IntervalList* _new_intervals_from_allocation; // list with all intervals created during allocation when an existing interval is split
duke@435 150 IntervalArray* _sorted_intervals; // intervals sorted by Interval::from()
never@2404 151 bool _needs_full_resort; // set to true if an Interval::from() is changed and _sorted_intervals must be resorted
duke@435 152
duke@435 153 LIR_OpArray _lir_ops; // mapping from LIR_Op id to LIR_Op node
duke@435 154 BlockBeginArray _block_of_op; // mapping from LIR_Op id to the BlockBegin containing this instruction
duke@435 155 BitMap _has_info; // bit set for each LIR_Op id that has a CodeEmitInfo
duke@435 156 BitMap _has_call; // bit set for each LIR_Op id that destroys all caller save registers
duke@435 157 BitMap2D _interval_in_loop; // bit set for each virtual register that is contained in each loop
duke@435 158
duke@435 159 // cached debug info to prevent multiple creation of same object
duke@435 160 // TODO: cached scope values for registers could be static
duke@435 161 ScopeValueArray _scope_value_cache;
duke@435 162
duke@435 163 static ConstantOopWriteValue _oop_null_scope_value;
duke@435 164 static ConstantIntValue _int_m1_scope_value;
duke@435 165 static ConstantIntValue _int_0_scope_value;
duke@435 166 static ConstantIntValue _int_1_scope_value;
duke@435 167 static ConstantIntValue _int_2_scope_value;
duke@435 168
duke@435 169 // accessors
duke@435 170 IR* ir() const { return _ir; }
duke@435 171 Compilation* compilation() const { return _compilation; }
duke@435 172 LIRGenerator* gen() const { return _gen; }
duke@435 173 FrameMap* frame_map() const { return _frame_map; }
duke@435 174
duke@435 175 // unified bailout support
duke@435 176 void bailout(const char* msg) const { compilation()->bailout(msg); }
duke@435 177 bool bailed_out() const { return compilation()->bailed_out(); }
duke@435 178
duke@435 179 // access to block list (sorted in linear scan order)
duke@435 180 int block_count() const { assert(_cached_blocks.length() == ir()->linear_scan_order()->length(), "invalid cached block list"); return _cached_blocks.length(); }
duke@435 181 BlockBegin* block_at(int idx) const { assert(_cached_blocks.at(idx) == ir()->linear_scan_order()->at(idx), "invalid cached block list"); return _cached_blocks.at(idx); }
duke@435 182
duke@435 183 int num_virtual_regs() const { return _num_virtual_regs; }
duke@435 184 // size of live_in and live_out sets of BasicBlocks (BitMap needs rounded size for iteration)
duke@435 185 int live_set_size() const { return round_to(_num_virtual_regs, BitsPerWord); }
duke@435 186 bool has_fpu_registers() const { return _has_fpu_registers; }
duke@435 187 int num_loops() const { return ir()->num_loops(); }
duke@435 188 bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
duke@435 189
duke@435 190 // handling of fpu stack allocation (platform dependent, needed for debug information generation)
never@739 191 #ifdef X86
duke@435 192 FpuStackAllocator* _fpu_stack_allocator;
duke@435 193 bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); }
duke@435 194 #else
duke@435 195 bool use_fpu_stack_allocation() const { return false; }
duke@435 196 #endif
duke@435 197
duke@435 198
duke@435 199 // access to interval list
duke@435 200 int interval_count() const { return _intervals.length(); }
duke@435 201 Interval* interval_at(int reg_num) const { return _intervals.at(reg_num); }
duke@435 202
duke@435 203 IntervalList* new_intervals_from_allocation() const { return _new_intervals_from_allocation; }
duke@435 204
duke@435 205 // access to LIR_Ops and Blocks indexed by op_id
duke@435 206 int max_lir_op_id() const { assert(_lir_ops.length() > 0, "no operations"); return (_lir_ops.length() - 1) << 1; }
duke@435 207 LIR_Op* lir_op_with_id(int op_id) const { assert(op_id >= 0 && op_id <= max_lir_op_id() && op_id % 2 == 0, "op_id out of range or not even"); return _lir_ops.at(op_id >> 1); }
duke@435 208 BlockBegin* block_of_op_with_id(int op_id) const { assert(_block_of_op.length() > 0 && op_id >= 0 && op_id <= max_lir_op_id() + 1, "op_id out of range"); return _block_of_op.at(op_id >> 1); }
duke@435 209
duke@435 210 bool is_block_begin(int op_id) { return op_id == 0 || block_of_op_with_id(op_id) != block_of_op_with_id(op_id - 1); }
duke@435 211 bool covers_block_begin(int op_id_1, int op_id_2) { return block_of_op_with_id(op_id_1) != block_of_op_with_id(op_id_2); }
duke@435 212
duke@435 213 bool has_call(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_call.at(op_id >> 1); }
duke@435 214 bool has_info(int op_id) { assert(op_id % 2 == 0, "must be even"); return _has_info.at(op_id >> 1); }
duke@435 215
duke@435 216
duke@435 217 // functions for converting LIR-Operands to register numbers
duke@435 218 static bool is_valid_reg_num(int reg_num) { return reg_num >= 0; }
duke@435 219 static int reg_num(LIR_Opr opr);
duke@435 220 static int reg_numHi(LIR_Opr opr);
duke@435 221
duke@435 222 // functions for classification of intervals
duke@435 223 static bool is_precolored_interval(const Interval* i);
duke@435 224 static bool is_virtual_interval(const Interval* i);
duke@435 225
duke@435 226 static bool is_precolored_cpu_interval(const Interval* i);
duke@435 227 static bool is_virtual_cpu_interval(const Interval* i);
duke@435 228 static bool is_precolored_fpu_interval(const Interval* i);
duke@435 229 static bool is_virtual_fpu_interval(const Interval* i);
duke@435 230
duke@435 231 static bool is_in_fpu_register(const Interval* i);
duke@435 232 static bool is_oop_interval(const Interval* i);
duke@435 233
duke@435 234
duke@435 235 // General helper functions
duke@435 236 int allocate_spill_slot(bool double_word);
duke@435 237 void assign_spill_slot(Interval* it);
duke@435 238 void propagate_spill_slots();
duke@435 239
duke@435 240 Interval* create_interval(int reg_num);
duke@435 241 void append_interval(Interval* it);
duke@435 242 void copy_register_flags(Interval* from, Interval* to);
duke@435 243
duke@435 244 // platform dependent functions
duke@435 245 static bool is_processed_reg_num(int reg_num);
duke@435 246 static int num_physical_regs(BasicType type);
duke@435 247 static bool requires_adjacent_regs(BasicType type);
duke@435 248 static bool is_caller_save(int assigned_reg);
duke@435 249
duke@435 250 // spill move optimization: eliminate moves from register to stack if
duke@435 251 // stack slot is known to be correct
duke@435 252 void change_spill_definition_pos(Interval* interval, int def_pos);
duke@435 253 void change_spill_state(Interval* interval, int spill_pos);
duke@435 254 static bool must_store_at_definition(const Interval* i);
duke@435 255 void eliminate_spill_moves();
duke@435 256
duke@435 257 // Phase 1: number all instructions in all blocks
duke@435 258 void number_instructions();
duke@435 259
duke@435 260 // Phase 2: compute local live sets separately for each block
duke@435 261 // (sets live_gen and live_kill for each block)
duke@435 262 //
duke@435 263 // helper methods used by compute_local_live_sets()
duke@435 264 void set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill);
duke@435 265
duke@435 266 void compute_local_live_sets();
duke@435 267
duke@435 268 // Phase 3: perform a backward dataflow analysis to compute global live sets
duke@435 269 // (sets live_in and live_out for each block)
duke@435 270 void compute_global_live_sets();
duke@435 271
duke@435 272
duke@435 273 // Phase 4: build intervals
duke@435 274 // (fills the list _intervals)
duke@435 275 //
duke@435 276 // helper methods used by build_intervals()
duke@435 277 void add_use (Value value, int from, int to, IntervalUseKind use_kind);
duke@435 278
duke@435 279 void add_def (LIR_Opr opr, int def_pos, IntervalUseKind use_kind);
duke@435 280 void add_use (LIR_Opr opr, int from, int to, IntervalUseKind use_kind);
duke@435 281 void add_temp(LIR_Opr opr, int temp_pos, IntervalUseKind use_kind);
duke@435 282
duke@435 283 void add_def (int reg_num, int def_pos, IntervalUseKind use_kind, BasicType type);
duke@435 284 void add_use (int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type);
duke@435 285 void add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type);
duke@435 286
duke@435 287 // Add platform dependent kills for particular LIR ops. Can be used
duke@435 288 // to add platform dependent behaviour for some operations.
duke@435 289 void pd_add_temps(LIR_Op* op);
duke@435 290
duke@435 291 IntervalUseKind use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr);
duke@435 292 IntervalUseKind use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr);
duke@435 293 void handle_method_arguments(LIR_Op* op);
duke@435 294 void handle_doubleword_moves(LIR_Op* op);
duke@435 295 void add_register_hints(LIR_Op* op);
duke@435 296
duke@435 297 void build_intervals();
duke@435 298
duke@435 299
duke@435 300 // Phase 5: actual register allocation
duke@435 301 // (Uses LinearScanWalker)
duke@435 302 //
duke@435 303 // helper functions for building a sorted list of intervals
duke@435 304 NOT_PRODUCT(bool is_sorted(IntervalArray* intervals);)
duke@435 305 static int interval_cmp(Interval** a, Interval** b);
duke@435 306 void add_to_list(Interval** first, Interval** prev, Interval* interval);
duke@435 307 void create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i));
duke@435 308
duke@435 309 void sort_intervals_before_allocation();
duke@435 310 void sort_intervals_after_allocation();
duke@435 311 void allocate_registers();
duke@435 312
duke@435 313
duke@435 314 // Phase 6: resolve data flow
duke@435 315 // (insert moves at edges between blocks if intervals have been split)
duke@435 316 //
duke@435 317 // helper functions for resolve_data_flow()
duke@435 318 Interval* split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode);
duke@435 319 Interval* interval_at_block_begin(BlockBegin* block, int reg_num);
duke@435 320 Interval* interval_at_block_end(BlockBegin* block, int reg_num);
duke@435 321 Interval* interval_at_op_id(int reg_num, int op_id);
duke@435 322 void resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
duke@435 323 void resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver);
duke@435 324 void resolve_data_flow();
duke@435 325
duke@435 326 void resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver);
duke@435 327 void resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver);
duke@435 328 void resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver);
duke@435 329 void resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver);
duke@435 330 void resolve_exception_handlers();
duke@435 331
duke@435 332 // Phase 7: assign register numbers back to LIR
duke@435 333 // (includes computation of debug information and oop maps)
duke@435 334 //
duke@435 335 // helper functions for assign_reg_num()
duke@435 336 VMReg vm_reg_for_interval(Interval* interval);
duke@435 337 VMReg vm_reg_for_operand(LIR_Opr opr);
duke@435 338
duke@435 339 static LIR_Opr operand_for_interval(Interval* interval);
duke@435 340 static LIR_Opr calc_operand_for_interval(const Interval* interval);
duke@435 341 LIR_Opr canonical_spill_opr(Interval* interval);
duke@435 342
duke@435 343 LIR_Opr color_lir_opr(LIR_Opr opr, int id, LIR_OpVisitState::OprMode);
duke@435 344
duke@435 345 // methods used for oop map computation
duke@435 346 IntervalWalker* init_compute_oop_maps();
duke@435 347 OopMap* compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site);
duke@435 348 void compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op);
duke@435 349
duke@435 350 // methods used for debug information computation
duke@435 351 void init_compute_debug_info();
duke@435 352
duke@435 353 MonitorValue* location_for_monitor_index(int monitor_index);
duke@435 354 LocationValue* location_for_name(int name, Location::Type loc_type);
duke@435 355
duke@435 356 int append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
duke@435 357 int append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values);
duke@435 358 int append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values);
duke@435 359
roland@2174 360 IRScopeDebugInfo* compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state);
duke@435 361 void compute_debug_info(CodeEmitInfo* info, int op_id);
duke@435 362
duke@435 363 void assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw);
duke@435 364 void assign_reg_num();
duke@435 365
duke@435 366
duke@435 367 // Phase 8: fpu stack allocation
duke@435 368 // (Used only on x86 when fpu operands are present)
duke@435 369 void allocate_fpu_stack();
duke@435 370
duke@435 371
duke@435 372 // helper functions for printing state
duke@435 373 #ifndef PRODUCT
duke@435 374 static void print_bitmap(BitMap& bitmap);
duke@435 375 void print_intervals(const char* label);
duke@435 376 void print_lir(int level, const char* label, bool hir_valid = true);
duke@435 377 #endif
duke@435 378
duke@435 379 #ifdef ASSERT
duke@435 380 // verification functions for allocation
duke@435 381 // (check that all intervals have a correct register and that no registers are overwritten)
duke@435 382 void verify();
duke@435 383 void verify_intervals();
duke@435 384 void verify_no_oops_in_fixed_intervals();
duke@435 385 void verify_constants();
duke@435 386 void verify_registers();
duke@435 387 #endif
duke@435 388
duke@435 389 public:
duke@435 390 // creation
duke@435 391 LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map);
duke@435 392
duke@435 393 // main entry function: perform linear scan register allocation
duke@435 394 void do_linear_scan();
duke@435 395
duke@435 396 // accessors used by Compilation
duke@435 397 int max_spills() const { return _max_spills; }
duke@435 398 int num_calls() const { assert(_num_calls >= 0, "not set"); return _num_calls; }
duke@435 399
duke@435 400 // entry functions for printing
duke@435 401 #ifndef PRODUCT
duke@435 402 static void print_statistics();
duke@435 403 static void print_timers(double total);
duke@435 404 #endif
duke@435 405 };
duke@435 406
duke@435 407
duke@435 408 // Helper class for ordering moves that are inserted at the same position in the LIR
duke@435 409 // When moves between registers are inserted, it is important that the moves are
duke@435 410 // ordered such that no register is overwritten. So moves from register to stack
duke@435 411 // are processed prior to moves from stack to register. When moves have circular
duke@435 412 // dependencies, a temporary stack slot is used to break the circle.
duke@435 413 // The same logic is used in the LinearScanWalker and in LinearScan during resolve_data_flow
duke@435 414 // and therefore factored out in a separate class
duke@435 415 class MoveResolver: public StackObj {
duke@435 416 private:
duke@435 417 LinearScan* _allocator;
duke@435 418
duke@435 419 LIR_List* _insert_list;
duke@435 420 int _insert_idx;
duke@435 421 LIR_InsertionBuffer _insertion_buffer; // buffer where moves are inserted
duke@435 422
duke@435 423 IntervalList _mapping_from;
duke@435 424 LIR_OprList _mapping_from_opr;
duke@435 425 IntervalList _mapping_to;
duke@435 426 bool _multiple_reads_allowed;
duke@435 427 int _register_blocked[LinearScan::nof_regs];
duke@435 428
duke@435 429 int register_blocked(int reg) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); return _register_blocked[reg]; }
duke@435 430 void set_register_blocked(int reg, int direction) { assert(reg >= 0 && reg < LinearScan::nof_regs, "out of bounds"); assert(direction == 1 || direction == -1, "out of bounds"); _register_blocked[reg] += direction; }
duke@435 431
duke@435 432 void block_registers(Interval* it);
duke@435 433 void unblock_registers(Interval* it);
duke@435 434 bool save_to_process_move(Interval* from, Interval* to);
duke@435 435
duke@435 436 void create_insertion_buffer(LIR_List* list);
duke@435 437 void append_insertion_buffer();
duke@435 438 void insert_move(Interval* from_interval, Interval* to_interval);
duke@435 439 void insert_move(LIR_Opr from_opr, Interval* to_interval);
duke@435 440
duke@435 441 DEBUG_ONLY(void verify_before_resolve();)
duke@435 442 void resolve_mappings();
duke@435 443 public:
duke@435 444 MoveResolver(LinearScan* allocator);
duke@435 445
duke@435 446 DEBUG_ONLY(void check_empty();)
duke@435 447 void set_multiple_reads_allowed() { _multiple_reads_allowed = true; }
duke@435 448 void set_insert_position(LIR_List* insert_list, int insert_idx);
duke@435 449 void move_insert_position(LIR_List* insert_list, int insert_idx);
duke@435 450 void add_mapping(Interval* from, Interval* to);
duke@435 451 void add_mapping(LIR_Opr from, Interval* to);
duke@435 452 void resolve_and_append_moves();
duke@435 453
duke@435 454 LinearScan* allocator() { return _allocator; }
duke@435 455 bool has_mappings() { return _mapping_from.length() > 0; }
duke@435 456 };
duke@435 457
duke@435 458
duke@435 459 class Range : public CompilationResourceObj {
duke@435 460 friend class Interval;
duke@435 461
duke@435 462 private:
duke@435 463 static Range* _end; // sentinel (from == to == max_jint)
duke@435 464
duke@435 465 int _from; // from (inclusive)
duke@435 466 int _to; // to (exclusive)
duke@435 467 Range* _next; // linear list of Ranges
duke@435 468
duke@435 469 // used only by class Interval, so hide them
duke@435 470 bool intersects(Range* r) const { return intersects_at(r) != -1; }
duke@435 471 int intersects_at(Range* r) const;
duke@435 472
duke@435 473 public:
duke@435 474 Range(int from, int to, Range* next);
duke@435 475
iveresov@1939 476 static void initialize(Arena* arena);
duke@435 477 static Range* end() { return _end; }
duke@435 478
duke@435 479 int from() const { return _from; }
duke@435 480 int to() const { return _to; }
duke@435 481 Range* next() const { return _next; }
duke@435 482 void set_from(int from) { _from = from; }
duke@435 483 void set_to(int to) { _to = to; }
duke@435 484 void set_next(Range* next) { _next = next; }
duke@435 485
duke@435 486 // for testing
duke@435 487 void print(outputStream* out = tty) const PRODUCT_RETURN;
duke@435 488 };
duke@435 489
duke@435 490
duke@435 491 // Interval is an ordered list of disjoint ranges.
duke@435 492
duke@435 493 // For pre-colored double word LIR_Oprs, one interval is created for
duke@435 494 // the low word register and one is created for the hi word register.
duke@435 495 // On Intel for FPU double registers only one interval is created. At
duke@435 496 // all times assigned_reg contains the reg. number of the physical
duke@435 497 // register.
duke@435 498
duke@435 499 // For LIR_Opr in virtual registers a single interval can represent
duke@435 500 // single and double word values. When a physical register is
duke@435 501 // assigned to the interval, assigned_reg contains the
duke@435 502 // phys. reg. number and for double word values assigned_regHi the
duke@435 503 // phys. reg. number of the hi word if there is any. For spilled
duke@435 504 // intervals assigned_reg contains the stack index. assigned_regHi is
duke@435 505 // always -1.
duke@435 506
duke@435 507 class Interval : public CompilationResourceObj {
duke@435 508 private:
duke@435 509 static Interval* _end; // sentinel (interval with only range Range::end())
duke@435 510
duke@435 511 int _reg_num;
duke@435 512 BasicType _type; // valid only for virtual registers
duke@435 513 Range* _first; // sorted list of Ranges
duke@435 514 intStack _use_pos_and_kinds; // sorted list of use-positions and their according use-kinds
duke@435 515
duke@435 516 Range* _current; // interval iteration: the current Range
duke@435 517 Interval* _next; // interval iteration: sorted list of Intervals (ends with sentinel)
duke@435 518 IntervalState _state; // interval iteration: to which set belongs this interval
duke@435 519
duke@435 520
duke@435 521 int _assigned_reg;
duke@435 522 int _assigned_regHi;
duke@435 523
duke@435 524 int _cached_to; // cached value: to of last range (-1: not cached)
duke@435 525 LIR_Opr _cached_opr;
duke@435 526 VMReg _cached_vm_reg;
duke@435 527
duke@435 528 Interval* _split_parent; // the original interval where this interval is derived from
duke@435 529 IntervalList _split_children; // list of all intervals that are split off from this interval (only available for split parents)
duke@435 530 Interval* _current_split_child; // the current split child that has been active or inactive last (always stored in split parents)
duke@435 531
duke@435 532 int _canonical_spill_slot; // the stack slot where all split parts of this interval are spilled to (always stored in split parents)
duke@435 533 bool _insert_move_when_activated; // true if move is inserted between _current_split_child and this interval when interval gets active the first time
duke@435 534 IntervalSpillState _spill_state; // for spill move optimization
duke@435 535 int _spill_definition_pos; // position where the interval is defined (if defined only once)
duke@435 536 Interval* _register_hint; // this interval should be in the same register as the hint interval
duke@435 537
duke@435 538 int calc_to();
duke@435 539 Interval* new_split_child();
duke@435 540 public:
duke@435 541 Interval(int reg_num);
duke@435 542
iveresov@1939 543 static void initialize(Arena* arena);
duke@435 544 static Interval* end() { return _end; }
duke@435 545
duke@435 546 // accessors
duke@435 547 int reg_num() const { return _reg_num; }
duke@435 548 void set_reg_num(int r) { assert(_reg_num == -1, "cannot change reg_num"); _reg_num = r; }
duke@435 549 BasicType type() const { assert(_reg_num == -1 || _reg_num >= LIR_OprDesc::vreg_base, "cannot access type for fixed interval"); return _type; }
duke@435 550 void set_type(BasicType type) { assert(_reg_num < LIR_OprDesc::vreg_base || _type == T_ILLEGAL || _type == type, "overwriting existing type"); _type = type; }
duke@435 551
duke@435 552 Range* first() const { return _first; }
duke@435 553 int from() const { return _first->from(); }
duke@435 554 int to() { if (_cached_to == -1) _cached_to = calc_to(); assert(_cached_to == calc_to(), "invalid cached value"); return _cached_to; }
duke@435 555 int num_use_positions() const { return _use_pos_and_kinds.length() / 2; }
duke@435 556
duke@435 557 Interval* next() const { return _next; }
duke@435 558 Interval** next_addr() { return &_next; }
duke@435 559 void set_next(Interval* next) { _next = next; }
duke@435 560
duke@435 561 int assigned_reg() const { return _assigned_reg; }
duke@435 562 int assigned_regHi() const { return _assigned_regHi; }
duke@435 563 void assign_reg(int reg) { _assigned_reg = reg; _assigned_regHi = LinearScan::any_reg; }
duke@435 564 void assign_reg(int reg,int regHi) { _assigned_reg = reg; _assigned_regHi = regHi; }
duke@435 565
duke@435 566 Interval* register_hint(bool search_split_child = true) const; // calculation needed
duke@435 567 void set_register_hint(Interval* i) { _register_hint = i; }
duke@435 568
duke@435 569 int state() const { return _state; }
duke@435 570 void set_state(IntervalState s) { _state = s; }
duke@435 571
duke@435 572 // access to split parent and split children
duke@435 573 bool is_split_parent() const { return _split_parent == this; }
duke@435 574 bool is_split_child() const { return _split_parent != this; }
duke@435 575 Interval* split_parent() const { assert(_split_parent->is_split_parent(), "must be"); return _split_parent; }
duke@435 576 Interval* split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode);
duke@435 577 Interval* split_child_before_op_id(int op_id);
duke@435 578 bool split_child_covers(int op_id, LIR_OpVisitState::OprMode mode);
duke@435 579 DEBUG_ONLY(void check_split_children();)
duke@435 580
duke@435 581 // information stored in split parent, but available for all children
duke@435 582 int canonical_spill_slot() const { return split_parent()->_canonical_spill_slot; }
duke@435 583 void set_canonical_spill_slot(int slot) { assert(split_parent()->_canonical_spill_slot == -1, "overwriting existing value"); split_parent()->_canonical_spill_slot = slot; }
duke@435 584 Interval* current_split_child() const { return split_parent()->_current_split_child; }
duke@435 585 void make_current_split_child() { split_parent()->_current_split_child = this; }
duke@435 586
duke@435 587 bool insert_move_when_activated() const { return _insert_move_when_activated; }
duke@435 588 void set_insert_move_when_activated(bool b) { _insert_move_when_activated = b; }
duke@435 589
duke@435 590 // for spill optimization
duke@435 591 IntervalSpillState spill_state() const { return split_parent()->_spill_state; }
duke@435 592 int spill_definition_pos() const { return split_parent()->_spill_definition_pos; }
duke@435 593 void set_spill_state(IntervalSpillState state) { assert(state >= spill_state(), "state cannot decrease"); split_parent()->_spill_state = state; }
duke@435 594 void set_spill_definition_pos(int pos) { assert(spill_definition_pos() == -1, "cannot set the position twice"); split_parent()->_spill_definition_pos = pos; }
duke@435 595 // returns true if this interval has a shadow copy on the stack that is always correct
duke@435 596 bool always_in_memory() const { return split_parent()->_spill_state == storeAtDefinition || split_parent()->_spill_state == startInMemory; }
duke@435 597
duke@435 598 // caching of values that take time to compute and are used multiple times
duke@435 599 LIR_Opr cached_opr() const { return _cached_opr; }
duke@435 600 VMReg cached_vm_reg() const { return _cached_vm_reg; }
duke@435 601 void set_cached_opr(LIR_Opr opr) { _cached_opr = opr; }
duke@435 602 void set_cached_vm_reg(VMReg reg) { _cached_vm_reg = reg; }
duke@435 603
duke@435 604 // access to use positions
duke@435 605 int first_usage(IntervalUseKind min_use_kind) const; // id of the first operation requiring this interval in a register
duke@435 606 int next_usage(IntervalUseKind min_use_kind, int from) const; // id of next usage seen from the given position
duke@435 607 int next_usage_exact(IntervalUseKind exact_use_kind, int from) const;
duke@435 608 int previous_usage(IntervalUseKind min_use_kind, int from) const;
duke@435 609
duke@435 610 // manipulating intervals
duke@435 611 void add_use_pos(int pos, IntervalUseKind use_kind);
duke@435 612 void add_range(int from, int to);
duke@435 613 Interval* split(int split_pos);
duke@435 614 Interval* split_from_start(int split_pos);
duke@435 615 void remove_first_use_pos() { _use_pos_and_kinds.truncate(_use_pos_and_kinds.length() - 2); }
duke@435 616
duke@435 617 // test intersection
duke@435 618 bool covers(int op_id, LIR_OpVisitState::OprMode mode) const;
duke@435 619 bool has_hole_between(int from, int to);
duke@435 620 bool intersects(Interval* i) const { return _first->intersects(i->_first); }
duke@435 621 int intersects_at(Interval* i) const { return _first->intersects_at(i->_first); }
duke@435 622
duke@435 623 // range iteration
duke@435 624 void rewind_range() { _current = _first; }
duke@435 625 void next_range() { assert(this != _end, "not allowed on sentinel"); _current = _current->next(); }
duke@435 626 int current_from() const { return _current->from(); }
duke@435 627 int current_to() const { return _current->to(); }
duke@435 628 bool current_at_end() const { return _current == Range::end(); }
duke@435 629 bool current_intersects(Interval* it) { return _current->intersects(it->_current); };
duke@435 630 int current_intersects_at(Interval* it) { return _current->intersects_at(it->_current); };
duke@435 631
duke@435 632 // printing
duke@435 633 void print(outputStream* out = tty) const PRODUCT_RETURN;
duke@435 634 };
duke@435 635
duke@435 636
duke@435 637 class IntervalWalker : public CompilationResourceObj {
duke@435 638 protected:
duke@435 639 Compilation* _compilation;
duke@435 640 LinearScan* _allocator;
duke@435 641
duke@435 642 Interval* _unhandled_first[nofKinds]; // sorted list of intervals, not life before the current position
duke@435 643 Interval* _active_first [nofKinds]; // sorted list of intervals, life at the current position
duke@435 644 Interval* _inactive_first [nofKinds]; // sorted list of intervals, intervals in a life time hole at the current position
duke@435 645
duke@435 646 Interval* _current; // the current interval coming from unhandled list
duke@435 647 int _current_position; // the current position (intercept point through the intervals)
duke@435 648 IntervalKind _current_kind; // and whether it is fixed_kind or any_kind.
duke@435 649
duke@435 650
duke@435 651 Compilation* compilation() const { return _compilation; }
duke@435 652 LinearScan* allocator() const { return _allocator; }
duke@435 653
duke@435 654 // unified bailout support
duke@435 655 void bailout(const char* msg) const { compilation()->bailout(msg); }
duke@435 656 bool bailed_out() const { return compilation()->bailed_out(); }
duke@435 657
duke@435 658 void check_bounds(IntervalKind kind) { assert(kind >= fixedKind && kind <= anyKind, "invalid interval_kind"); }
duke@435 659
duke@435 660 Interval** unhandled_first_addr(IntervalKind kind) { check_bounds(kind); return &_unhandled_first[kind]; }
duke@435 661 Interval** active_first_addr(IntervalKind kind) { check_bounds(kind); return &_active_first[kind]; }
duke@435 662 Interval** inactive_first_addr(IntervalKind kind) { check_bounds(kind); return &_inactive_first[kind]; }
duke@435 663
duke@435 664 void append_unsorted(Interval** first, Interval* interval);
duke@435 665 void append_sorted(Interval** first, Interval* interval);
duke@435 666 void append_to_unhandled(Interval** list, Interval* interval);
duke@435 667
duke@435 668 bool remove_from_list(Interval** list, Interval* i);
duke@435 669 void remove_from_list(Interval* i);
duke@435 670
duke@435 671 void next_interval();
duke@435 672 Interval* current() const { return _current; }
duke@435 673 IntervalKind current_kind() const { return _current_kind; }
duke@435 674
duke@435 675 void walk_to(IntervalState state, int from);
duke@435 676
duke@435 677 // activate_current() is called when an unhandled interval becomes active (in current(), current_kind()).
duke@435 678 // Return false if current() should not be moved the the active interval list.
duke@435 679 // It is safe to append current to any interval list but the unhandled list.
duke@435 680 virtual bool activate_current() { return true; }
duke@435 681
duke@435 682 // interval_moved() is called whenever an interval moves from one interval list to another.
duke@435 683 // In the implementation of this method it is prohibited to move the interval to any list.
duke@435 684 virtual void interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to);
duke@435 685
duke@435 686 public:
duke@435 687 IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
duke@435 688
duke@435 689 Interval* unhandled_first(IntervalKind kind) { check_bounds(kind); return _unhandled_first[kind]; }
duke@435 690 Interval* active_first(IntervalKind kind) { check_bounds(kind); return _active_first[kind]; }
duke@435 691 Interval* inactive_first(IntervalKind kind) { check_bounds(kind); return _inactive_first[kind]; }
duke@435 692
duke@435 693 // active contains the intervals that are live after the lir_op
duke@435 694 void walk_to(int lir_op_id);
duke@435 695 // active contains the intervals that are live before the lir_op
duke@435 696 void walk_before(int lir_op_id) { walk_to(lir_op_id-1); }
duke@435 697 // walk through all intervals
duke@435 698 void walk() { walk_to(max_jint); }
duke@435 699
duke@435 700 int current_position() { return _current_position; }
duke@435 701 };
duke@435 702
duke@435 703
duke@435 704 // The actual linear scan register allocator
duke@435 705 class LinearScanWalker : public IntervalWalker {
duke@435 706 enum {
duke@435 707 any_reg = LinearScan::any_reg
duke@435 708 };
duke@435 709
duke@435 710 private:
duke@435 711 int _first_reg; // the reg. number of the first phys. register
duke@435 712 int _last_reg; // the reg. nmber of the last phys. register
duke@435 713 int _num_phys_regs; // required by current interval
duke@435 714 bool _adjacent_regs; // have lo/hi words of phys. regs be adjacent
duke@435 715
duke@435 716 int _use_pos[LinearScan::nof_regs];
duke@435 717 int _block_pos[LinearScan::nof_regs];
duke@435 718 IntervalList* _spill_intervals[LinearScan::nof_regs];
duke@435 719
duke@435 720 MoveResolver _move_resolver; // for ordering spill moves
duke@435 721
duke@435 722 // accessors mapped to same functions in class LinearScan
duke@435 723 int block_count() const { return allocator()->block_count(); }
duke@435 724 BlockBegin* block_at(int idx) const { return allocator()->block_at(idx); }
duke@435 725 BlockBegin* block_of_op_with_id(int op_id) const { return allocator()->block_of_op_with_id(op_id); }
duke@435 726
duke@435 727 void init_use_lists(bool only_process_use_pos);
duke@435 728 void exclude_from_use(int reg);
duke@435 729 void exclude_from_use(Interval* i);
duke@435 730 void set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos);
duke@435 731 void set_use_pos(Interval* i, int use_pos, bool only_process_use_pos);
duke@435 732 void set_block_pos(int reg, Interval* i, int block_pos);
duke@435 733 void set_block_pos(Interval* i, int block_pos);
duke@435 734
duke@435 735 void free_exclude_active_fixed();
duke@435 736 void free_exclude_active_any();
duke@435 737 void free_collect_inactive_fixed(Interval* cur);
duke@435 738 void free_collect_inactive_any(Interval* cur);
duke@435 739 void free_collect_unhandled(IntervalKind kind, Interval* cur);
duke@435 740 void spill_exclude_active_fixed();
duke@435 741 void spill_block_unhandled_fixed(Interval* cur);
duke@435 742 void spill_block_inactive_fixed(Interval* cur);
duke@435 743 void spill_collect_active_any();
duke@435 744 void spill_collect_inactive_any(Interval* cur);
duke@435 745
duke@435 746 void insert_move(int op_id, Interval* src_it, Interval* dst_it);
duke@435 747 int find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos);
duke@435 748 int find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization);
duke@435 749 void split_before_usage(Interval* it, int min_split_pos, int max_split_pos);
duke@435 750 void split_for_spilling(Interval* it);
duke@435 751 void split_stack_interval(Interval* it);
duke@435 752 void split_when_partial_register_available(Interval* it, int register_available_until);
duke@435 753 void split_and_spill_interval(Interval* it);
duke@435 754
duke@435 755 int find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
duke@435 756 int find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);
duke@435 757 bool alloc_free_reg(Interval* cur);
duke@435 758
duke@435 759 int find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split);
duke@435 760 int find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split);
duke@435 761 void split_and_spill_intersecting_intervals(int reg, int regHi);
duke@435 762 void alloc_locked_reg(Interval* cur);
duke@435 763
duke@435 764 bool no_allocation_possible(Interval* cur);
duke@435 765 void update_phys_reg_range(bool requires_cpu_register);
duke@435 766 void init_vars_for_alloc(Interval* cur);
duke@435 767 bool pd_init_regs_for_alloc(Interval* cur);
duke@435 768
duke@435 769 void combine_spilled_intervals(Interval* cur);
duke@435 770 bool is_move(LIR_Op* op, Interval* from, Interval* to);
duke@435 771
duke@435 772 bool activate_current();
duke@435 773
duke@435 774 public:
duke@435 775 LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first);
duke@435 776
duke@435 777 // must be called when all intervals are allocated
duke@435 778 void finish_allocation() { _move_resolver.resolve_and_append_moves(); }
duke@435 779 };
duke@435 780
duke@435 781
duke@435 782
duke@435 783 /*
duke@435 784 When a block has more than one predecessor, and all predecessors end with
duke@435 785 the same sequence of move-instructions, than this moves can be placed once
duke@435 786 at the beginning of the block instead of multiple times in the predecessors.
duke@435 787
duke@435 788 Similarly, when a block has more than one successor, then equal sequences of
duke@435 789 moves at the beginning of the successors can be placed once at the end of
duke@435 790 the block. But because the moves must be inserted before all branch
duke@435 791 instructions, this works only when there is exactly one conditional branch
duke@435 792 at the end of the block (because the moves must be inserted before all
duke@435 793 branches, but after all compares).
duke@435 794
duke@435 795 This optimization affects all kind of moves (reg->reg, reg->stack and
duke@435 796 stack->reg). Because this optimization works best when a block contains only
duke@435 797 few moves, it has a huge impact on the number of blocks that are totally
duke@435 798 empty.
duke@435 799 */
duke@435 800 class EdgeMoveOptimizer : public StackObj {
duke@435 801 private:
duke@435 802 // the class maintains a list with all lir-instruction-list of the
duke@435 803 // successors (predecessors) and the current index into the lir-lists
duke@435 804 LIR_OpListStack _edge_instructions;
duke@435 805 intStack _edge_instructions_idx;
duke@435 806
duke@435 807 void init_instructions();
duke@435 808 void append_instructions(LIR_OpList* instructions, int instructions_idx);
duke@435 809 LIR_Op* instruction_at(int edge);
duke@435 810 void remove_cur_instruction(int edge, bool decrement_index);
duke@435 811
duke@435 812 bool operations_different(LIR_Op* op1, LIR_Op* op2);
duke@435 813
duke@435 814 void optimize_moves_at_block_end(BlockBegin* cur);
duke@435 815 void optimize_moves_at_block_begin(BlockBegin* cur);
duke@435 816
duke@435 817 EdgeMoveOptimizer();
duke@435 818
duke@435 819 public:
duke@435 820 static void optimize(BlockList* code);
duke@435 821 };
duke@435 822
duke@435 823
duke@435 824
duke@435 825 class ControlFlowOptimizer : public StackObj {
duke@435 826 private:
duke@435 827 BlockList _original_preds;
duke@435 828
duke@435 829 enum {
duke@435 830 ShortLoopSize = 5
duke@435 831 };
duke@435 832 void reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx);
duke@435 833 void reorder_short_loops(BlockList* code);
duke@435 834
duke@435 835 bool can_delete_block(BlockBegin* cur);
duke@435 836 void substitute_branch_target(BlockBegin* cur, BlockBegin* target_from, BlockBegin* target_to);
duke@435 837 void delete_empty_blocks(BlockList* code);
duke@435 838
duke@435 839 void delete_unnecessary_jumps(BlockList* code);
duke@435 840 void delete_jumps_to_return(BlockList* code);
duke@435 841
duke@435 842 DEBUG_ONLY(void verify(BlockList* code);)
duke@435 843
duke@435 844 ControlFlowOptimizer();
duke@435 845 public:
duke@435 846 static void optimize(BlockList* code);
duke@435 847 };
duke@435 848
duke@435 849
duke@435 850 #ifndef PRODUCT
duke@435 851
duke@435 852 // Helper class for collecting statistics of LinearScan
duke@435 853 class LinearScanStatistic : public StackObj {
duke@435 854 public:
duke@435 855 enum Counter {
duke@435 856 // general counters
duke@435 857 counter_method,
duke@435 858 counter_fpu_method,
duke@435 859 counter_loop_method,
duke@435 860 counter_exception_method,
duke@435 861 counter_loop,
duke@435 862 counter_block,
duke@435 863 counter_loop_block,
duke@435 864 counter_exception_block,
duke@435 865 counter_interval,
duke@435 866 counter_fixed_interval,
duke@435 867 counter_range,
duke@435 868 counter_fixed_range,
duke@435 869 counter_use_pos,
duke@435 870 counter_fixed_use_pos,
duke@435 871 counter_spill_slots,
duke@435 872 blank_line_1,
duke@435 873
duke@435 874 // counter for classes of lir instructions
duke@435 875 counter_instruction,
duke@435 876 counter_label,
duke@435 877 counter_entry,
duke@435 878 counter_return,
duke@435 879 counter_call,
duke@435 880 counter_move,
duke@435 881 counter_cmp,
duke@435 882 counter_cond_branch,
duke@435 883 counter_uncond_branch,
duke@435 884 counter_stub_branch,
duke@435 885 counter_alu,
duke@435 886 counter_alloc,
duke@435 887 counter_sync,
duke@435 888 counter_throw,
duke@435 889 counter_unwind,
duke@435 890 counter_typecheck,
duke@435 891 counter_fpu_stack,
duke@435 892 counter_misc_inst,
duke@435 893 counter_other_inst,
duke@435 894 blank_line_2,
duke@435 895
duke@435 896 // counter for different types of moves
duke@435 897 counter_move_total,
duke@435 898 counter_move_reg_reg,
duke@435 899 counter_move_reg_stack,
duke@435 900 counter_move_stack_reg,
duke@435 901 counter_move_stack_stack,
duke@435 902 counter_move_reg_mem,
duke@435 903 counter_move_mem_reg,
duke@435 904 counter_move_const_any,
duke@435 905
duke@435 906 number_of_counters,
duke@435 907 invalid_counter = -1
duke@435 908 };
duke@435 909
duke@435 910 private:
duke@435 911 int _counters_sum[number_of_counters];
duke@435 912 int _counters_max[number_of_counters];
duke@435 913
duke@435 914 void inc_counter(Counter idx, int value = 1) { _counters_sum[idx] += value; }
duke@435 915
duke@435 916 const char* counter_name(int counter_idx);
duke@435 917 Counter base_counter(int counter_idx);
duke@435 918
duke@435 919 void sum_up(LinearScanStatistic &method_statistic);
duke@435 920 void collect(LinearScan* allocator);
duke@435 921
duke@435 922 public:
duke@435 923 LinearScanStatistic();
duke@435 924 void print(const char* title);
duke@435 925 static void compute(LinearScan* allocator, LinearScanStatistic &global_statistic);
duke@435 926 };
duke@435 927
duke@435 928
duke@435 929 // Helper class for collecting compilation time of LinearScan
duke@435 930 class LinearScanTimers : public StackObj {
duke@435 931 public:
duke@435 932 enum Timer {
duke@435 933 timer_do_nothing,
duke@435 934 timer_number_instructions,
duke@435 935 timer_compute_local_live_sets,
duke@435 936 timer_compute_global_live_sets,
duke@435 937 timer_build_intervals,
duke@435 938 timer_sort_intervals_before,
duke@435 939 timer_allocate_registers,
duke@435 940 timer_resolve_data_flow,
duke@435 941 timer_sort_intervals_after,
duke@435 942 timer_eliminate_spill_moves,
duke@435 943 timer_assign_reg_num,
duke@435 944 timer_allocate_fpu_stack,
duke@435 945 timer_optimize_lir,
duke@435 946
duke@435 947 number_of_timers
duke@435 948 };
duke@435 949
duke@435 950 private:
duke@435 951 elapsedTimer _timers[number_of_timers];
duke@435 952 const char* timer_name(int idx);
duke@435 953
duke@435 954 public:
duke@435 955 LinearScanTimers();
duke@435 956
duke@435 957 void begin_method(); // called for each method when register allocation starts
duke@435 958 void end_method(LinearScan* allocator); // called for each method when register allocation completed
duke@435 959 void print(double total_time); // called before termination of VM to print global summary
duke@435 960
duke@435 961 elapsedTimer* timer(int idx) { return &(_timers[idx]); }
duke@435 962 };
duke@435 963
duke@435 964
duke@435 965 #endif // ifndef PRODUCT
duke@435 966
duke@435 967
duke@435 968 // Pick up platform-dependent implementation details
stefank@2314 969 #ifdef TARGET_ARCH_x86
stefank@2314 970 # include "c1_LinearScan_x86.hpp"
stefank@2314 971 #endif
stefank@2314 972 #ifdef TARGET_ARCH_sparc
stefank@2314 973 # include "c1_LinearScan_sparc.hpp"
stefank@2314 974 #endif
bobv@2508 975 #ifdef TARGET_ARCH_arm
bobv@2508 976 # include "c1_LinearScan_arm.hpp"
bobv@2508 977 #endif
bobv@2508 978 #ifdef TARGET_ARCH_ppc
bobv@2508 979 # include "c1_LinearScan_ppc.hpp"
bobv@2508 980 #endif
stefank@2314 981
stefank@2314 982
stefank@2314 983 #endif // SHARE_VM_C1_C1_LINEARSCAN_HPP

mercurial