src/share/vm/c1/c1_LinearScan.cpp

Tue, 18 Jun 2013 12:31:07 -0700

author
johnc
date
Tue, 18 Jun 2013 12:31:07 -0700
changeset 5277
01522ca68fc7
parent 4860
46f6f063b272
child 5994
9acbfe04b5c3
permissions
-rw-r--r--

8015237: Parallelize string table scanning during strong root processing
Summary: Parallelize the scanning of the intern string table by having each GC worker claim a given number of buckets. Changes were also reviewed by Per Liden <per.liden@oracle.com>.
Reviewed-by: tschatzl, stefank, twisti

duke@435 1 /*
mikael@4153 2 * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "c1/c1_CFGPrinter.hpp"
stefank@2314 27 #include "c1/c1_CodeStubs.hpp"
stefank@2314 28 #include "c1/c1_Compilation.hpp"
stefank@2314 29 #include "c1/c1_FrameMap.hpp"
stefank@2314 30 #include "c1/c1_IR.hpp"
stefank@2314 31 #include "c1/c1_LIRGenerator.hpp"
stefank@2314 32 #include "c1/c1_LinearScan.hpp"
stefank@2314 33 #include "c1/c1_ValueStack.hpp"
stefank@2314 34 #include "utilities/bitMap.inline.hpp"
stefank@2314 35 #ifdef TARGET_ARCH_x86
stefank@2314 36 # include "vmreg_x86.inline.hpp"
stefank@2314 37 #endif
stefank@2314 38 #ifdef TARGET_ARCH_sparc
stefank@2314 39 # include "vmreg_sparc.inline.hpp"
stefank@2314 40 #endif
stefank@2314 41 #ifdef TARGET_ARCH_zero
stefank@2314 42 # include "vmreg_zero.inline.hpp"
stefank@2314 43 #endif
bobv@2508 44 #ifdef TARGET_ARCH_arm
bobv@2508 45 # include "vmreg_arm.inline.hpp"
bobv@2508 46 #endif
bobv@2508 47 #ifdef TARGET_ARCH_ppc
bobv@2508 48 # include "vmreg_ppc.inline.hpp"
bobv@2508 49 #endif
duke@435 50
duke@435 51
duke@435 52 #ifndef PRODUCT
duke@435 53
duke@435 54 static LinearScanStatistic _stat_before_alloc;
duke@435 55 static LinearScanStatistic _stat_after_asign;
duke@435 56 static LinearScanStatistic _stat_final;
duke@435 57
duke@435 58 static LinearScanTimers _total_timer;
duke@435 59
duke@435 60 // helper macro for short definition of timer
duke@435 61 #define TIME_LINEAR_SCAN(timer_name) TraceTime _block_timer("", _total_timer.timer(LinearScanTimers::timer_name), TimeLinearScan || TimeEachLinearScan, Verbose);
duke@435 62
duke@435 63 // helper macro for short definition of trace-output inside code
duke@435 64 #define TRACE_LINEAR_SCAN(level, code) \
duke@435 65 if (TraceLinearScanLevel >= level) { \
duke@435 66 code; \
duke@435 67 }
duke@435 68
duke@435 69 #else
duke@435 70
duke@435 71 #define TIME_LINEAR_SCAN(timer_name)
duke@435 72 #define TRACE_LINEAR_SCAN(level, code)
duke@435 73
duke@435 74 #endif
duke@435 75
duke@435 76 // Map BasicType to spill size in 32-bit words, matching VMReg's notion of words
duke@435 77 #ifdef _LP64
duke@435 78 static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 2, 2, 0, 1, -1};
duke@435 79 #else
duke@435 80 static int type2spill_size[T_CONFLICT+1]={ -1, 0, 0, 0, 1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 0, 1, -1};
duke@435 81 #endif
duke@435 82
duke@435 83
duke@435 84 // Implementation of LinearScan
duke@435 85
duke@435 86 LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map)
duke@435 87 : _compilation(ir->compilation())
duke@435 88 , _ir(ir)
duke@435 89 , _gen(gen)
duke@435 90 , _frame_map(frame_map)
duke@435 91 , _num_virtual_regs(gen->max_virtual_register_number())
duke@435 92 , _has_fpu_registers(false)
duke@435 93 , _num_calls(-1)
duke@435 94 , _max_spills(0)
duke@435 95 , _unused_spill_slot(-1)
duke@435 96 , _intervals(0) // initialized later with correct length
duke@435 97 , _new_intervals_from_allocation(new IntervalList())
duke@435 98 , _sorted_intervals(NULL)
never@2404 99 , _needs_full_resort(false)
duke@435 100 , _lir_ops(0) // initialized later with correct length
duke@435 101 , _block_of_op(0) // initialized later with correct length
duke@435 102 , _has_info(0)
duke@435 103 , _has_call(0)
duke@435 104 , _scope_value_cache(0) // initialized later with correct length
duke@435 105 , _interval_in_loop(0, 0) // initialized later with correct length
duke@435 106 , _cached_blocks(*ir->linear_scan_order())
never@739 107 #ifdef X86
duke@435 108 , _fpu_stack_allocator(NULL)
duke@435 109 #endif
duke@435 110 {
duke@435 111 assert(this->ir() != NULL, "check if valid");
duke@435 112 assert(this->compilation() != NULL, "check if valid");
duke@435 113 assert(this->gen() != NULL, "check if valid");
duke@435 114 assert(this->frame_map() != NULL, "check if valid");
duke@435 115 }
duke@435 116
duke@435 117
duke@435 118 // ********** functions for converting LIR-Operands to register numbers
duke@435 119 //
duke@435 120 // Emulate a flat register file comprising physical integer registers,
duke@435 121 // physical floating-point registers and virtual registers, in that order.
duke@435 122 // Virtual registers already have appropriate numbers, since V0 is
duke@435 123 // the number of physical registers.
duke@435 124 // Returns -1 for hi word if opr is a single word operand.
duke@435 125 //
duke@435 126 // Note: the inverse operation (calculating an operand for register numbers)
duke@435 127 // is done in calc_operand_for_interval()
duke@435 128
duke@435 129 int LinearScan::reg_num(LIR_Opr opr) {
duke@435 130 assert(opr->is_register(), "should not call this otherwise");
duke@435 131
duke@435 132 if (opr->is_virtual_register()) {
duke@435 133 assert(opr->vreg_number() >= nof_regs, "found a virtual register with a fixed-register number");
duke@435 134 return opr->vreg_number();
duke@435 135 } else if (opr->is_single_cpu()) {
duke@435 136 return opr->cpu_regnr();
duke@435 137 } else if (opr->is_double_cpu()) {
duke@435 138 return opr->cpu_regnrLo();
never@739 139 #ifdef X86
duke@435 140 } else if (opr->is_single_xmm()) {
duke@435 141 return opr->fpu_regnr() + pd_first_xmm_reg;
duke@435 142 } else if (opr->is_double_xmm()) {
duke@435 143 return opr->fpu_regnrLo() + pd_first_xmm_reg;
duke@435 144 #endif
duke@435 145 } else if (opr->is_single_fpu()) {
duke@435 146 return opr->fpu_regnr() + pd_first_fpu_reg;
duke@435 147 } else if (opr->is_double_fpu()) {
duke@435 148 return opr->fpu_regnrLo() + pd_first_fpu_reg;
duke@435 149 } else {
duke@435 150 ShouldNotReachHere();
never@739 151 return -1;
duke@435 152 }
duke@435 153 }
duke@435 154
duke@435 155 int LinearScan::reg_numHi(LIR_Opr opr) {
duke@435 156 assert(opr->is_register(), "should not call this otherwise");
duke@435 157
duke@435 158 if (opr->is_virtual_register()) {
duke@435 159 return -1;
duke@435 160 } else if (opr->is_single_cpu()) {
duke@435 161 return -1;
duke@435 162 } else if (opr->is_double_cpu()) {
duke@435 163 return opr->cpu_regnrHi();
never@739 164 #ifdef X86
duke@435 165 } else if (opr->is_single_xmm()) {
duke@435 166 return -1;
duke@435 167 } else if (opr->is_double_xmm()) {
duke@435 168 return -1;
duke@435 169 #endif
duke@435 170 } else if (opr->is_single_fpu()) {
duke@435 171 return -1;
duke@435 172 } else if (opr->is_double_fpu()) {
duke@435 173 return opr->fpu_regnrHi() + pd_first_fpu_reg;
duke@435 174 } else {
duke@435 175 ShouldNotReachHere();
never@739 176 return -1;
duke@435 177 }
duke@435 178 }
duke@435 179
duke@435 180
duke@435 181 // ********** functions for classification of intervals
duke@435 182
duke@435 183 bool LinearScan::is_precolored_interval(const Interval* i) {
duke@435 184 return i->reg_num() < LinearScan::nof_regs;
duke@435 185 }
duke@435 186
duke@435 187 bool LinearScan::is_virtual_interval(const Interval* i) {
duke@435 188 return i->reg_num() >= LIR_OprDesc::vreg_base;
duke@435 189 }
duke@435 190
duke@435 191 bool LinearScan::is_precolored_cpu_interval(const Interval* i) {
duke@435 192 return i->reg_num() < LinearScan::nof_cpu_regs;
duke@435 193 }
duke@435 194
duke@435 195 bool LinearScan::is_virtual_cpu_interval(const Interval* i) {
bobv@2036 196 #if defined(__SOFTFP__) || defined(E500V2)
bobv@2036 197 return i->reg_num() >= LIR_OprDesc::vreg_base;
bobv@2036 198 #else
duke@435 199 return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() != T_FLOAT && i->type() != T_DOUBLE);
bobv@2036 200 #endif // __SOFTFP__ or E500V2
duke@435 201 }
duke@435 202
duke@435 203 bool LinearScan::is_precolored_fpu_interval(const Interval* i) {
duke@435 204 return i->reg_num() >= LinearScan::nof_cpu_regs && i->reg_num() < LinearScan::nof_regs;
duke@435 205 }
duke@435 206
duke@435 207 bool LinearScan::is_virtual_fpu_interval(const Interval* i) {
bobv@2036 208 #if defined(__SOFTFP__) || defined(E500V2)
bobv@2036 209 return false;
bobv@2036 210 #else
duke@435 211 return i->reg_num() >= LIR_OprDesc::vreg_base && (i->type() == T_FLOAT || i->type() == T_DOUBLE);
bobv@2036 212 #endif // __SOFTFP__ or E500V2
duke@435 213 }
duke@435 214
duke@435 215 bool LinearScan::is_in_fpu_register(const Interval* i) {
duke@435 216 // fixed intervals not needed for FPU stack allocation
duke@435 217 return i->reg_num() >= nof_regs && pd_first_fpu_reg <= i->assigned_reg() && i->assigned_reg() <= pd_last_fpu_reg;
duke@435 218 }
duke@435 219
duke@435 220 bool LinearScan::is_oop_interval(const Interval* i) {
duke@435 221 // fixed intervals never contain oops
duke@435 222 return i->reg_num() >= nof_regs && i->type() == T_OBJECT;
duke@435 223 }
duke@435 224
duke@435 225
duke@435 226 // ********** General helper functions
duke@435 227
duke@435 228 // compute next unused stack index that can be used for spilling
duke@435 229 int LinearScan::allocate_spill_slot(bool double_word) {
duke@435 230 int spill_slot;
duke@435 231 if (double_word) {
duke@435 232 if ((_max_spills & 1) == 1) {
duke@435 233 // alignment of double-word values
duke@435 234 // the hole because of the alignment is filled with the next single-word value
duke@435 235 assert(_unused_spill_slot == -1, "wasting a spill slot");
duke@435 236 _unused_spill_slot = _max_spills;
duke@435 237 _max_spills++;
duke@435 238 }
duke@435 239 spill_slot = _max_spills;
duke@435 240 _max_spills += 2;
duke@435 241
duke@435 242 } else if (_unused_spill_slot != -1) {
duke@435 243 // re-use hole that was the result of a previous double-word alignment
duke@435 244 spill_slot = _unused_spill_slot;
duke@435 245 _unused_spill_slot = -1;
duke@435 246
duke@435 247 } else {
duke@435 248 spill_slot = _max_spills;
duke@435 249 _max_spills++;
duke@435 250 }
duke@435 251
duke@435 252 int result = spill_slot + LinearScan::nof_regs + frame_map()->argcount();
duke@435 253
duke@435 254 // the class OopMapValue uses only 11 bits for storing the name of the
duke@435 255 // oop location. So a stack slot bigger than 2^11 leads to an overflow
duke@435 256 // that is not reported in product builds. Prevent this by checking the
duke@435 257 // spill slot here (altough this value and the later used location name
duke@435 258 // are slightly different)
duke@435 259 if (result > 2000) {
duke@435 260 bailout("too many stack slots used");
duke@435 261 }
duke@435 262
duke@435 263 return result;
duke@435 264 }
duke@435 265
duke@435 266 void LinearScan::assign_spill_slot(Interval* it) {
duke@435 267 // assign the canonical spill slot of the parent (if a part of the interval
duke@435 268 // is already spilled) or allocate a new spill slot
duke@435 269 if (it->canonical_spill_slot() >= 0) {
duke@435 270 it->assign_reg(it->canonical_spill_slot());
duke@435 271 } else {
duke@435 272 int spill = allocate_spill_slot(type2spill_size[it->type()] == 2);
duke@435 273 it->set_canonical_spill_slot(spill);
duke@435 274 it->assign_reg(spill);
duke@435 275 }
duke@435 276 }
duke@435 277
duke@435 278 void LinearScan::propagate_spill_slots() {
duke@435 279 if (!frame_map()->finalize_frame(max_spills())) {
duke@435 280 bailout("frame too large");
duke@435 281 }
duke@435 282 }
duke@435 283
duke@435 284 // create a new interval with a predefined reg_num
duke@435 285 // (only used for parent intervals that are created during the building phase)
duke@435 286 Interval* LinearScan::create_interval(int reg_num) {
duke@435 287 assert(_intervals.at(reg_num) == NULL, "overwriting exisiting interval");
duke@435 288
duke@435 289 Interval* interval = new Interval(reg_num);
duke@435 290 _intervals.at_put(reg_num, interval);
duke@435 291
duke@435 292 // assign register number for precolored intervals
duke@435 293 if (reg_num < LIR_OprDesc::vreg_base) {
duke@435 294 interval->assign_reg(reg_num);
duke@435 295 }
duke@435 296 return interval;
duke@435 297 }
duke@435 298
duke@435 299 // assign a new reg_num to the interval and append it to the list of intervals
duke@435 300 // (only used for child intervals that are created during register allocation)
duke@435 301 void LinearScan::append_interval(Interval* it) {
duke@435 302 it->set_reg_num(_intervals.length());
duke@435 303 _intervals.append(it);
duke@435 304 _new_intervals_from_allocation->append(it);
duke@435 305 }
duke@435 306
duke@435 307 // copy the vreg-flags if an interval is split
duke@435 308 void LinearScan::copy_register_flags(Interval* from, Interval* to) {
duke@435 309 if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::byte_reg)) {
duke@435 310 gen()->set_vreg_flag(to->reg_num(), LIRGenerator::byte_reg);
duke@435 311 }
duke@435 312 if (gen()->is_vreg_flag_set(from->reg_num(), LIRGenerator::callee_saved)) {
duke@435 313 gen()->set_vreg_flag(to->reg_num(), LIRGenerator::callee_saved);
duke@435 314 }
duke@435 315
duke@435 316 // Note: do not copy the must_start_in_memory flag because it is not necessary for child
duke@435 317 // intervals (only the very beginning of the interval must be in memory)
duke@435 318 }
duke@435 319
duke@435 320
duke@435 321 // ********** spill move optimization
duke@435 322 // eliminate moves from register to stack if stack slot is known to be correct
duke@435 323
duke@435 324 // called during building of intervals
duke@435 325 void LinearScan::change_spill_definition_pos(Interval* interval, int def_pos) {
duke@435 326 assert(interval->is_split_parent(), "can only be called for split parents");
duke@435 327
duke@435 328 switch (interval->spill_state()) {
duke@435 329 case noDefinitionFound:
duke@435 330 assert(interval->spill_definition_pos() == -1, "must no be set before");
duke@435 331 interval->set_spill_definition_pos(def_pos);
duke@435 332 interval->set_spill_state(oneDefinitionFound);
duke@435 333 break;
duke@435 334
duke@435 335 case oneDefinitionFound:
duke@435 336 assert(def_pos <= interval->spill_definition_pos(), "positions are processed in reverse order when intervals are created");
duke@435 337 if (def_pos < interval->spill_definition_pos() - 2) {
duke@435 338 // second definition found, so no spill optimization possible for this interval
duke@435 339 interval->set_spill_state(noOptimization);
duke@435 340 } else {
duke@435 341 // two consecutive definitions (because of two-operand LIR form)
duke@435 342 assert(block_of_op_with_id(def_pos) == block_of_op_with_id(interval->spill_definition_pos()), "block must be equal");
duke@435 343 }
duke@435 344 break;
duke@435 345
duke@435 346 case noOptimization:
duke@435 347 // nothing to do
duke@435 348 break;
duke@435 349
duke@435 350 default:
duke@435 351 assert(false, "other states not allowed at this time");
duke@435 352 }
duke@435 353 }
duke@435 354
duke@435 355 // called during register allocation
duke@435 356 void LinearScan::change_spill_state(Interval* interval, int spill_pos) {
duke@435 357 switch (interval->spill_state()) {
duke@435 358 case oneDefinitionFound: {
duke@435 359 int def_loop_depth = block_of_op_with_id(interval->spill_definition_pos())->loop_depth();
duke@435 360 int spill_loop_depth = block_of_op_with_id(spill_pos)->loop_depth();
duke@435 361
duke@435 362 if (def_loop_depth < spill_loop_depth) {
duke@435 363 // the loop depth of the spilling position is higher then the loop depth
duke@435 364 // at the definition of the interval -> move write to memory out of loop
duke@435 365 // by storing at definitin of the interval
duke@435 366 interval->set_spill_state(storeAtDefinition);
duke@435 367 } else {
duke@435 368 // the interval is currently spilled only once, so for now there is no
duke@435 369 // reason to store the interval at the definition
duke@435 370 interval->set_spill_state(oneMoveInserted);
duke@435 371 }
duke@435 372 break;
duke@435 373 }
duke@435 374
duke@435 375 case oneMoveInserted: {
duke@435 376 // the interval is spilled more then once, so it is better to store it to
duke@435 377 // memory at the definition
duke@435 378 interval->set_spill_state(storeAtDefinition);
duke@435 379 break;
duke@435 380 }
duke@435 381
duke@435 382 case storeAtDefinition:
duke@435 383 case startInMemory:
duke@435 384 case noOptimization:
duke@435 385 case noDefinitionFound:
duke@435 386 // nothing to do
duke@435 387 break;
duke@435 388
duke@435 389 default:
duke@435 390 assert(false, "other states not allowed at this time");
duke@435 391 }
duke@435 392 }
duke@435 393
duke@435 394
duke@435 395 bool LinearScan::must_store_at_definition(const Interval* i) {
duke@435 396 return i->is_split_parent() && i->spill_state() == storeAtDefinition;
duke@435 397 }
duke@435 398
duke@435 399 // called once before asignment of register numbers
duke@435 400 void LinearScan::eliminate_spill_moves() {
duke@435 401 TIME_LINEAR_SCAN(timer_eliminate_spill_moves);
duke@435 402 TRACE_LINEAR_SCAN(3, tty->print_cr("***** Eliminating unnecessary spill moves"));
duke@435 403
duke@435 404 // collect all intervals that must be stored after their definion.
duke@435 405 // the list is sorted by Interval::spill_definition_pos
duke@435 406 Interval* interval;
duke@435 407 Interval* temp_list;
duke@435 408 create_unhandled_lists(&interval, &temp_list, must_store_at_definition, NULL);
duke@435 409
duke@435 410 #ifdef ASSERT
duke@435 411 Interval* prev = NULL;
duke@435 412 Interval* temp = interval;
duke@435 413 while (temp != Interval::end()) {
duke@435 414 assert(temp->spill_definition_pos() > 0, "invalid spill definition pos");
duke@435 415 if (prev != NULL) {
duke@435 416 assert(temp->from() >= prev->from(), "intervals not sorted");
duke@435 417 assert(temp->spill_definition_pos() >= prev->spill_definition_pos(), "when intervals are sorted by from, then they must also be sorted by spill_definition_pos");
duke@435 418 }
duke@435 419
duke@435 420 assert(temp->canonical_spill_slot() >= LinearScan::nof_regs, "interval has no spill slot assigned");
duke@435 421 assert(temp->spill_definition_pos() >= temp->from(), "invalid order");
duke@435 422 assert(temp->spill_definition_pos() <= temp->from() + 2, "only intervals defined once at their start-pos can be optimized");
duke@435 423
duke@435 424 TRACE_LINEAR_SCAN(4, tty->print_cr("interval %d (from %d to %d) must be stored at %d", temp->reg_num(), temp->from(), temp->to(), temp->spill_definition_pos()));
duke@435 425
duke@435 426 temp = temp->next();
duke@435 427 }
duke@435 428 #endif
duke@435 429
duke@435 430 LIR_InsertionBuffer insertion_buffer;
duke@435 431 int num_blocks = block_count();
duke@435 432 for (int i = 0; i < num_blocks; i++) {
duke@435 433 BlockBegin* block = block_at(i);
duke@435 434 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 435 int num_inst = instructions->length();
duke@435 436 bool has_new = false;
duke@435 437
duke@435 438 // iterate all instructions of the block. skip the first because it is always a label
duke@435 439 for (int j = 1; j < num_inst; j++) {
duke@435 440 LIR_Op* op = instructions->at(j);
duke@435 441 int op_id = op->id();
duke@435 442
duke@435 443 if (op_id == -1) {
duke@435 444 // remove move from register to stack if the stack slot is guaranteed to be correct.
duke@435 445 // only moves that have been inserted by LinearScan can be removed.
duke@435 446 assert(op->code() == lir_move, "only moves can have a op_id of -1");
duke@435 447 assert(op->as_Op1() != NULL, "move must be LIR_Op1");
duke@435 448 assert(op->as_Op1()->result_opr()->is_virtual(), "LinearScan inserts only moves to virtual registers");
duke@435 449
duke@435 450 LIR_Op1* op1 = (LIR_Op1*)op;
duke@435 451 Interval* interval = interval_at(op1->result_opr()->vreg_number());
duke@435 452
duke@435 453 if (interval->assigned_reg() >= LinearScan::nof_regs && interval->always_in_memory()) {
duke@435 454 // move target is a stack slot that is always correct, so eliminate instruction
duke@435 455 TRACE_LINEAR_SCAN(4, tty->print_cr("eliminating move from interval %d to %d", op1->in_opr()->vreg_number(), op1->result_opr()->vreg_number()));
duke@435 456 instructions->at_put(j, NULL); // NULL-instructions are deleted by assign_reg_num
duke@435 457 }
duke@435 458
duke@435 459 } else {
duke@435 460 // insert move from register to stack just after the beginning of the interval
duke@435 461 assert(interval == Interval::end() || interval->spill_definition_pos() >= op_id, "invalid order");
duke@435 462 assert(interval == Interval::end() || (interval->is_split_parent() && interval->spill_state() == storeAtDefinition), "invalid interval");
duke@435 463
duke@435 464 while (interval != Interval::end() && interval->spill_definition_pos() == op_id) {
duke@435 465 if (!has_new) {
duke@435 466 // prepare insertion buffer (appended when all instructions of the block are processed)
duke@435 467 insertion_buffer.init(block->lir());
duke@435 468 has_new = true;
duke@435 469 }
duke@435 470
duke@435 471 LIR_Opr from_opr = operand_for_interval(interval);
duke@435 472 LIR_Opr to_opr = canonical_spill_opr(interval);
duke@435 473 assert(from_opr->is_fixed_cpu() || from_opr->is_fixed_fpu(), "from operand must be a register");
duke@435 474 assert(to_opr->is_stack(), "to operand must be a stack slot");
duke@435 475
duke@435 476 insertion_buffer.move(j, from_opr, to_opr);
duke@435 477 TRACE_LINEAR_SCAN(4, tty->print_cr("inserting move after definition of interval %d to stack slot %d at op_id %d", interval->reg_num(), interval->canonical_spill_slot() - LinearScan::nof_regs, op_id));
duke@435 478
duke@435 479 interval = interval->next();
duke@435 480 }
duke@435 481 }
duke@435 482 } // end of instruction iteration
duke@435 483
duke@435 484 if (has_new) {
duke@435 485 block->lir()->append(&insertion_buffer);
duke@435 486 }
duke@435 487 } // end of block iteration
duke@435 488
duke@435 489 assert(interval == Interval::end(), "missed an interval");
duke@435 490 }
duke@435 491
duke@435 492
duke@435 493 // ********** Phase 1: number all instructions in all blocks
duke@435 494 // Compute depth-first and linear scan block orders, and number LIR_Op nodes for linear scan.
duke@435 495
duke@435 496 void LinearScan::number_instructions() {
duke@435 497 {
duke@435 498 // dummy-timer to measure the cost of the timer itself
duke@435 499 // (this time is then subtracted from all other timers to get the real value)
duke@435 500 TIME_LINEAR_SCAN(timer_do_nothing);
duke@435 501 }
duke@435 502 TIME_LINEAR_SCAN(timer_number_instructions);
duke@435 503
duke@435 504 // Assign IDs to LIR nodes and build a mapping, lir_ops, from ID to LIR_Op node.
duke@435 505 int num_blocks = block_count();
duke@435 506 int num_instructions = 0;
duke@435 507 int i;
duke@435 508 for (i = 0; i < num_blocks; i++) {
duke@435 509 num_instructions += block_at(i)->lir()->instructions_list()->length();
duke@435 510 }
duke@435 511
duke@435 512 // initialize with correct length
duke@435 513 _lir_ops = LIR_OpArray(num_instructions);
duke@435 514 _block_of_op = BlockBeginArray(num_instructions);
duke@435 515
duke@435 516 int op_id = 0;
duke@435 517 int idx = 0;
duke@435 518
duke@435 519 for (i = 0; i < num_blocks; i++) {
duke@435 520 BlockBegin* block = block_at(i);
duke@435 521 block->set_first_lir_instruction_id(op_id);
duke@435 522 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 523
duke@435 524 int num_inst = instructions->length();
duke@435 525 for (int j = 0; j < num_inst; j++) {
duke@435 526 LIR_Op* op = instructions->at(j);
duke@435 527 op->set_id(op_id);
duke@435 528
duke@435 529 _lir_ops.at_put(idx, op);
duke@435 530 _block_of_op.at_put(idx, block);
duke@435 531 assert(lir_op_with_id(op_id) == op, "must match");
duke@435 532
duke@435 533 idx++;
duke@435 534 op_id += 2; // numbering of lir_ops by two
duke@435 535 }
duke@435 536 block->set_last_lir_instruction_id(op_id - 2);
duke@435 537 }
duke@435 538 assert(idx == num_instructions, "must match");
duke@435 539 assert(idx * 2 == op_id, "must match");
duke@435 540
duke@435 541 _has_call = BitMap(num_instructions); _has_call.clear();
duke@435 542 _has_info = BitMap(num_instructions); _has_info.clear();
duke@435 543 }
duke@435 544
duke@435 545
duke@435 546 // ********** Phase 2: compute local live sets separately for each block
duke@435 547 // (sets live_gen and live_kill for each block)
duke@435 548
duke@435 549 void LinearScan::set_live_gen_kill(Value value, LIR_Op* op, BitMap& live_gen, BitMap& live_kill) {
duke@435 550 LIR_Opr opr = value->operand();
duke@435 551 Constant* con = value->as_Constant();
duke@435 552
duke@435 553 // check some asumptions about debug information
duke@435 554 assert(!value->type()->is_illegal(), "if this local is used by the interpreter it shouldn't be of indeterminate type");
duke@435 555 assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands");
duke@435 556 assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands");
duke@435 557
duke@435 558 if ((con == NULL || con->is_pinned()) && opr->is_register()) {
duke@435 559 assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
duke@435 560 int reg = opr->vreg_number();
duke@435 561 if (!live_kill.at(reg)) {
duke@435 562 live_gen.set_bit(reg);
duke@435 563 TRACE_LINEAR_SCAN(4, tty->print_cr(" Setting live_gen for value %c%d, LIR op_id %d, register number %d", value->type()->tchar(), value->id(), op->id(), reg));
duke@435 564 }
duke@435 565 }
duke@435 566 }
duke@435 567
duke@435 568
duke@435 569 void LinearScan::compute_local_live_sets() {
duke@435 570 TIME_LINEAR_SCAN(timer_compute_local_live_sets);
duke@435 571
duke@435 572 int num_blocks = block_count();
duke@435 573 int live_size = live_set_size();
duke@435 574 bool local_has_fpu_registers = false;
duke@435 575 int local_num_calls = 0;
duke@435 576 LIR_OpVisitState visitor;
duke@435 577
duke@435 578 BitMap2D local_interval_in_loop = BitMap2D(_num_virtual_regs, num_loops());
duke@435 579 local_interval_in_loop.clear();
duke@435 580
duke@435 581 // iterate all blocks
duke@435 582 for (int i = 0; i < num_blocks; i++) {
duke@435 583 BlockBegin* block = block_at(i);
duke@435 584
duke@435 585 BitMap live_gen(live_size); live_gen.clear();
duke@435 586 BitMap live_kill(live_size); live_kill.clear();
duke@435 587
duke@435 588 if (block->is_set(BlockBegin::exception_entry_flag)) {
duke@435 589 // Phi functions at the begin of an exception handler are
duke@435 590 // implicitly defined (= killed) at the beginning of the block.
duke@435 591 for_each_phi_fun(block, phi,
duke@435 592 live_kill.set_bit(phi->operand()->vreg_number())
duke@435 593 );
duke@435 594 }
duke@435 595
duke@435 596 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 597 int num_inst = instructions->length();
duke@435 598
duke@435 599 // iterate all instructions of the block. skip the first because it is always a label
duke@435 600 assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label");
duke@435 601 for (int j = 1; j < num_inst; j++) {
duke@435 602 LIR_Op* op = instructions->at(j);
duke@435 603
duke@435 604 // visit operation to collect all operands
duke@435 605 visitor.visit(op);
duke@435 606
duke@435 607 if (visitor.has_call()) {
duke@435 608 _has_call.set_bit(op->id() >> 1);
duke@435 609 local_num_calls++;
duke@435 610 }
duke@435 611 if (visitor.info_count() > 0) {
duke@435 612 _has_info.set_bit(op->id() >> 1);
duke@435 613 }
duke@435 614
duke@435 615 // iterate input operands of instruction
duke@435 616 int k, n, reg;
duke@435 617 n = visitor.opr_count(LIR_OpVisitState::inputMode);
duke@435 618 for (k = 0; k < n; k++) {
duke@435 619 LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k);
duke@435 620 assert(opr->is_register(), "visitor should only return register operands");
duke@435 621
duke@435 622 if (opr->is_virtual_register()) {
duke@435 623 assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
duke@435 624 reg = opr->vreg_number();
duke@435 625 if (!live_kill.at(reg)) {
duke@435 626 live_gen.set_bit(reg);
duke@435 627 TRACE_LINEAR_SCAN(4, tty->print_cr(" Setting live_gen for register %d at instruction %d", reg, op->id()));
duke@435 628 }
duke@435 629 if (block->loop_index() >= 0) {
duke@435 630 local_interval_in_loop.set_bit(reg, block->loop_index());
duke@435 631 }
duke@435 632 local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu();
duke@435 633 }
duke@435 634
duke@435 635 #ifdef ASSERT
duke@435 636 // fixed intervals are never live at block boundaries, so
duke@435 637 // they need not be processed in live sets.
duke@435 638 // this is checked by these assertions to be sure about it.
duke@435 639 // the entry block may have incoming values in registers, which is ok.
duke@435 640 if (!opr->is_virtual_register() && block != ir()->start()) {
duke@435 641 reg = reg_num(opr);
duke@435 642 if (is_processed_reg_num(reg)) {
duke@435 643 assert(live_kill.at(reg), "using fixed register that is not defined in this block");
duke@435 644 }
duke@435 645 reg = reg_numHi(opr);
duke@435 646 if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
duke@435 647 assert(live_kill.at(reg), "using fixed register that is not defined in this block");
duke@435 648 }
duke@435 649 }
duke@435 650 #endif
duke@435 651 }
duke@435 652
duke@435 653 // Add uses of live locals from interpreter's point of view for proper debug information generation
duke@435 654 n = visitor.info_count();
duke@435 655 for (k = 0; k < n; k++) {
duke@435 656 CodeEmitInfo* info = visitor.info_at(k);
duke@435 657 ValueStack* stack = info->stack();
duke@435 658 for_each_state_value(stack, value,
duke@435 659 set_live_gen_kill(value, op, live_gen, live_kill)
duke@435 660 );
duke@435 661 }
duke@435 662
duke@435 663 // iterate temp operands of instruction
duke@435 664 n = visitor.opr_count(LIR_OpVisitState::tempMode);
duke@435 665 for (k = 0; k < n; k++) {
duke@435 666 LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k);
duke@435 667 assert(opr->is_register(), "visitor should only return register operands");
duke@435 668
duke@435 669 if (opr->is_virtual_register()) {
duke@435 670 assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
duke@435 671 reg = opr->vreg_number();
duke@435 672 live_kill.set_bit(reg);
duke@435 673 if (block->loop_index() >= 0) {
duke@435 674 local_interval_in_loop.set_bit(reg, block->loop_index());
duke@435 675 }
duke@435 676 local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu();
duke@435 677 }
duke@435 678
duke@435 679 #ifdef ASSERT
duke@435 680 // fixed intervals are never live at block boundaries, so
duke@435 681 // they need not be processed in live sets
duke@435 682 // process them only in debug mode so that this can be checked
duke@435 683 if (!opr->is_virtual_register()) {
duke@435 684 reg = reg_num(opr);
duke@435 685 if (is_processed_reg_num(reg)) {
duke@435 686 live_kill.set_bit(reg_num(opr));
duke@435 687 }
duke@435 688 reg = reg_numHi(opr);
duke@435 689 if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
duke@435 690 live_kill.set_bit(reg);
duke@435 691 }
duke@435 692 }
duke@435 693 #endif
duke@435 694 }
duke@435 695
duke@435 696 // iterate output operands of instruction
duke@435 697 n = visitor.opr_count(LIR_OpVisitState::outputMode);
duke@435 698 for (k = 0; k < n; k++) {
duke@435 699 LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k);
duke@435 700 assert(opr->is_register(), "visitor should only return register operands");
duke@435 701
duke@435 702 if (opr->is_virtual_register()) {
duke@435 703 assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
duke@435 704 reg = opr->vreg_number();
duke@435 705 live_kill.set_bit(reg);
duke@435 706 if (block->loop_index() >= 0) {
duke@435 707 local_interval_in_loop.set_bit(reg, block->loop_index());
duke@435 708 }
duke@435 709 local_has_fpu_registers = local_has_fpu_registers || opr->is_virtual_fpu();
duke@435 710 }
duke@435 711
duke@435 712 #ifdef ASSERT
duke@435 713 // fixed intervals are never live at block boundaries, so
duke@435 714 // they need not be processed in live sets
duke@435 715 // process them only in debug mode so that this can be checked
duke@435 716 if (!opr->is_virtual_register()) {
duke@435 717 reg = reg_num(opr);
duke@435 718 if (is_processed_reg_num(reg)) {
duke@435 719 live_kill.set_bit(reg_num(opr));
duke@435 720 }
duke@435 721 reg = reg_numHi(opr);
duke@435 722 if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
duke@435 723 live_kill.set_bit(reg);
duke@435 724 }
duke@435 725 }
duke@435 726 #endif
duke@435 727 }
duke@435 728 } // end of instruction iteration
duke@435 729
duke@435 730 block->set_live_gen (live_gen);
duke@435 731 block->set_live_kill(live_kill);
duke@435 732 block->set_live_in (BitMap(live_size)); block->live_in().clear();
duke@435 733 block->set_live_out (BitMap(live_size)); block->live_out().clear();
duke@435 734
duke@435 735 TRACE_LINEAR_SCAN(4, tty->print("live_gen B%d ", block->block_id()); print_bitmap(block->live_gen()));
duke@435 736 TRACE_LINEAR_SCAN(4, tty->print("live_kill B%d ", block->block_id()); print_bitmap(block->live_kill()));
duke@435 737 } // end of block iteration
duke@435 738
duke@435 739 // propagate local calculated information into LinearScan object
duke@435 740 _has_fpu_registers = local_has_fpu_registers;
duke@435 741 compilation()->set_has_fpu_code(local_has_fpu_registers);
duke@435 742
duke@435 743 _num_calls = local_num_calls;
duke@435 744 _interval_in_loop = local_interval_in_loop;
duke@435 745 }
duke@435 746
duke@435 747
duke@435 748 // ********** Phase 3: perform a backward dataflow analysis to compute global live sets
duke@435 749 // (sets live_in and live_out for each block)
duke@435 750
duke@435 751 void LinearScan::compute_global_live_sets() {
duke@435 752 TIME_LINEAR_SCAN(timer_compute_global_live_sets);
duke@435 753
duke@435 754 int num_blocks = block_count();
duke@435 755 bool change_occurred;
duke@435 756 bool change_occurred_in_block;
duke@435 757 int iteration_count = 0;
duke@435 758 BitMap live_out(live_set_size()); live_out.clear(); // scratch set for calculations
duke@435 759
duke@435 760 // Perform a backward dataflow analysis to compute live_out and live_in for each block.
duke@435 761 // The loop is executed until a fixpoint is reached (no changes in an iteration)
duke@435 762 // Exception handlers must be processed because not all live values are
duke@435 763 // present in the state array, e.g. because of global value numbering
duke@435 764 do {
duke@435 765 change_occurred = false;
duke@435 766
duke@435 767 // iterate all blocks in reverse order
duke@435 768 for (int i = num_blocks - 1; i >= 0; i--) {
duke@435 769 BlockBegin* block = block_at(i);
duke@435 770
duke@435 771 change_occurred_in_block = false;
duke@435 772
duke@435 773 // live_out(block) is the union of live_in(sux), for successors sux of block
duke@435 774 int n = block->number_of_sux();
duke@435 775 int e = block->number_of_exception_handlers();
duke@435 776 if (n + e > 0) {
duke@435 777 // block has successors
duke@435 778 if (n > 0) {
duke@435 779 live_out.set_from(block->sux_at(0)->live_in());
duke@435 780 for (int j = 1; j < n; j++) {
duke@435 781 live_out.set_union(block->sux_at(j)->live_in());
duke@435 782 }
duke@435 783 } else {
duke@435 784 live_out.clear();
duke@435 785 }
duke@435 786 for (int j = 0; j < e; j++) {
duke@435 787 live_out.set_union(block->exception_handler_at(j)->live_in());
duke@435 788 }
duke@435 789
duke@435 790 if (!block->live_out().is_same(live_out)) {
duke@435 791 // A change occurred. Swap the old and new live out sets to avoid copying.
duke@435 792 BitMap temp = block->live_out();
duke@435 793 block->set_live_out(live_out);
duke@435 794 live_out = temp;
duke@435 795
duke@435 796 change_occurred = true;
duke@435 797 change_occurred_in_block = true;
duke@435 798 }
duke@435 799 }
duke@435 800
duke@435 801 if (iteration_count == 0 || change_occurred_in_block) {
duke@435 802 // live_in(block) is the union of live_gen(block) with (live_out(block) & !live_kill(block))
duke@435 803 // note: live_in has to be computed only in first iteration or if live_out has changed!
duke@435 804 BitMap live_in = block->live_in();
duke@435 805 live_in.set_from(block->live_out());
duke@435 806 live_in.set_difference(block->live_kill());
duke@435 807 live_in.set_union(block->live_gen());
duke@435 808 }
duke@435 809
duke@435 810 #ifndef PRODUCT
duke@435 811 if (TraceLinearScanLevel >= 4) {
duke@435 812 char c = ' ';
duke@435 813 if (iteration_count == 0 || change_occurred_in_block) {
duke@435 814 c = '*';
duke@435 815 }
duke@435 816 tty->print("(%d) live_in%c B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_in());
duke@435 817 tty->print("(%d) live_out%c B%d ", iteration_count, c, block->block_id()); print_bitmap(block->live_out());
duke@435 818 }
duke@435 819 #endif
duke@435 820 }
duke@435 821 iteration_count++;
duke@435 822
duke@435 823 if (change_occurred && iteration_count > 50) {
duke@435 824 BAILOUT("too many iterations in compute_global_live_sets");
duke@435 825 }
duke@435 826 } while (change_occurred);
duke@435 827
duke@435 828
duke@435 829 #ifdef ASSERT
duke@435 830 // check that fixed intervals are not live at block boundaries
duke@435 831 // (live set must be empty at fixed intervals)
duke@435 832 for (int i = 0; i < num_blocks; i++) {
duke@435 833 BlockBegin* block = block_at(i);
duke@435 834 for (int j = 0; j < LIR_OprDesc::vreg_base; j++) {
duke@435 835 assert(block->live_in().at(j) == false, "live_in set of fixed register must be empty");
duke@435 836 assert(block->live_out().at(j) == false, "live_out set of fixed register must be empty");
duke@435 837 assert(block->live_gen().at(j) == false, "live_gen set of fixed register must be empty");
duke@435 838 }
duke@435 839 }
duke@435 840 #endif
duke@435 841
duke@435 842 // check that the live_in set of the first block is empty
duke@435 843 BitMap live_in_args(ir()->start()->live_in().size());
duke@435 844 live_in_args.clear();
duke@435 845 if (!ir()->start()->live_in().is_same(live_in_args)) {
duke@435 846 #ifdef ASSERT
duke@435 847 tty->print_cr("Error: live_in set of first block must be empty (when this fails, virtual registers are used before they are defined)");
duke@435 848 tty->print_cr("affected registers:");
duke@435 849 print_bitmap(ir()->start()->live_in());
duke@435 850
duke@435 851 // print some additional information to simplify debugging
duke@435 852 for (unsigned int i = 0; i < ir()->start()->live_in().size(); i++) {
duke@435 853 if (ir()->start()->live_in().at(i)) {
duke@435 854 Instruction* instr = gen()->instruction_for_vreg(i);
duke@435 855 tty->print_cr("* vreg %d (HIR instruction %c%d)", i, instr == NULL ? ' ' : instr->type()->tchar(), instr == NULL ? 0 : instr->id());
duke@435 856
duke@435 857 for (int j = 0; j < num_blocks; j++) {
duke@435 858 BlockBegin* block = block_at(j);
duke@435 859 if (block->live_gen().at(i)) {
duke@435 860 tty->print_cr(" used in block B%d", block->block_id());
duke@435 861 }
duke@435 862 if (block->live_kill().at(i)) {
duke@435 863 tty->print_cr(" defined in block B%d", block->block_id());
duke@435 864 }
duke@435 865 }
duke@435 866 }
duke@435 867 }
duke@435 868
duke@435 869 #endif
duke@435 870 // when this fails, virtual registers are used before they are defined.
duke@435 871 assert(false, "live_in set of first block must be empty");
duke@435 872 // bailout of if this occurs in product mode.
duke@435 873 bailout("live_in set of first block not empty");
duke@435 874 }
duke@435 875 }
duke@435 876
duke@435 877
duke@435 878 // ********** Phase 4: build intervals
duke@435 879 // (fills the list _intervals)
duke@435 880
duke@435 881 void LinearScan::add_use(Value value, int from, int to, IntervalUseKind use_kind) {
duke@435 882 assert(!value->type()->is_illegal(), "if this value is used by the interpreter it shouldn't be of indeterminate type");
duke@435 883 LIR_Opr opr = value->operand();
duke@435 884 Constant* con = value->as_Constant();
duke@435 885
duke@435 886 if ((con == NULL || con->is_pinned()) && opr->is_register()) {
duke@435 887 assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
duke@435 888 add_use(opr, from, to, use_kind);
duke@435 889 }
duke@435 890 }
duke@435 891
duke@435 892
duke@435 893 void LinearScan::add_def(LIR_Opr opr, int def_pos, IntervalUseKind use_kind) {
duke@435 894 TRACE_LINEAR_SCAN(2, tty->print(" def "); opr->print(tty); tty->print_cr(" def_pos %d (%d)", def_pos, use_kind));
duke@435 895 assert(opr->is_register(), "should not be called otherwise");
duke@435 896
duke@435 897 if (opr->is_virtual_register()) {
duke@435 898 assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
duke@435 899 add_def(opr->vreg_number(), def_pos, use_kind, opr->type_register());
duke@435 900
duke@435 901 } else {
duke@435 902 int reg = reg_num(opr);
duke@435 903 if (is_processed_reg_num(reg)) {
duke@435 904 add_def(reg, def_pos, use_kind, opr->type_register());
duke@435 905 }
duke@435 906 reg = reg_numHi(opr);
duke@435 907 if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
duke@435 908 add_def(reg, def_pos, use_kind, opr->type_register());
duke@435 909 }
duke@435 910 }
duke@435 911 }
duke@435 912
duke@435 913 void LinearScan::add_use(LIR_Opr opr, int from, int to, IntervalUseKind use_kind) {
duke@435 914 TRACE_LINEAR_SCAN(2, tty->print(" use "); opr->print(tty); tty->print_cr(" from %d to %d (%d)", from, to, use_kind));
duke@435 915 assert(opr->is_register(), "should not be called otherwise");
duke@435 916
duke@435 917 if (opr->is_virtual_register()) {
duke@435 918 assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
duke@435 919 add_use(opr->vreg_number(), from, to, use_kind, opr->type_register());
duke@435 920
duke@435 921 } else {
duke@435 922 int reg = reg_num(opr);
duke@435 923 if (is_processed_reg_num(reg)) {
duke@435 924 add_use(reg, from, to, use_kind, opr->type_register());
duke@435 925 }
duke@435 926 reg = reg_numHi(opr);
duke@435 927 if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
duke@435 928 add_use(reg, from, to, use_kind, opr->type_register());
duke@435 929 }
duke@435 930 }
duke@435 931 }
duke@435 932
duke@435 933 void LinearScan::add_temp(LIR_Opr opr, int temp_pos, IntervalUseKind use_kind) {
duke@435 934 TRACE_LINEAR_SCAN(2, tty->print(" temp "); opr->print(tty); tty->print_cr(" temp_pos %d (%d)", temp_pos, use_kind));
duke@435 935 assert(opr->is_register(), "should not be called otherwise");
duke@435 936
duke@435 937 if (opr->is_virtual_register()) {
duke@435 938 assert(reg_num(opr) == opr->vreg_number() && !is_valid_reg_num(reg_numHi(opr)), "invalid optimization below");
duke@435 939 add_temp(opr->vreg_number(), temp_pos, use_kind, opr->type_register());
duke@435 940
duke@435 941 } else {
duke@435 942 int reg = reg_num(opr);
duke@435 943 if (is_processed_reg_num(reg)) {
duke@435 944 add_temp(reg, temp_pos, use_kind, opr->type_register());
duke@435 945 }
duke@435 946 reg = reg_numHi(opr);
duke@435 947 if (is_valid_reg_num(reg) && is_processed_reg_num(reg)) {
duke@435 948 add_temp(reg, temp_pos, use_kind, opr->type_register());
duke@435 949 }
duke@435 950 }
duke@435 951 }
duke@435 952
duke@435 953
duke@435 954 void LinearScan::add_def(int reg_num, int def_pos, IntervalUseKind use_kind, BasicType type) {
duke@435 955 Interval* interval = interval_at(reg_num);
duke@435 956 if (interval != NULL) {
duke@435 957 assert(interval->reg_num() == reg_num, "wrong interval");
duke@435 958
duke@435 959 if (type != T_ILLEGAL) {
duke@435 960 interval->set_type(type);
duke@435 961 }
duke@435 962
duke@435 963 Range* r = interval->first();
duke@435 964 if (r->from() <= def_pos) {
duke@435 965 // Update the starting point (when a range is first created for a use, its
duke@435 966 // start is the beginning of the current block until a def is encountered.)
duke@435 967 r->set_from(def_pos);
duke@435 968 interval->add_use_pos(def_pos, use_kind);
duke@435 969
duke@435 970 } else {
duke@435 971 // Dead value - make vacuous interval
duke@435 972 // also add use_kind for dead intervals
duke@435 973 interval->add_range(def_pos, def_pos + 1);
duke@435 974 interval->add_use_pos(def_pos, use_kind);
duke@435 975 TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: def of reg %d at %d occurs without use", reg_num, def_pos));
duke@435 976 }
duke@435 977
duke@435 978 } else {
duke@435 979 // Dead value - make vacuous interval
duke@435 980 // also add use_kind for dead intervals
duke@435 981 interval = create_interval(reg_num);
duke@435 982 if (type != T_ILLEGAL) {
duke@435 983 interval->set_type(type);
duke@435 984 }
duke@435 985
duke@435 986 interval->add_range(def_pos, def_pos + 1);
duke@435 987 interval->add_use_pos(def_pos, use_kind);
duke@435 988 TRACE_LINEAR_SCAN(2, tty->print_cr("Warning: dead value %d at %d in live intervals", reg_num, def_pos));
duke@435 989 }
duke@435 990
duke@435 991 change_spill_definition_pos(interval, def_pos);
duke@435 992 if (use_kind == noUse && interval->spill_state() <= startInMemory) {
duke@435 993 // detection of method-parameters and roundfp-results
duke@435 994 // TODO: move this directly to position where use-kind is computed
duke@435 995 interval->set_spill_state(startInMemory);
duke@435 996 }
duke@435 997 }
duke@435 998
duke@435 999 void LinearScan::add_use(int reg_num, int from, int to, IntervalUseKind use_kind, BasicType type) {
duke@435 1000 Interval* interval = interval_at(reg_num);
duke@435 1001 if (interval == NULL) {
duke@435 1002 interval = create_interval(reg_num);
duke@435 1003 }
duke@435 1004 assert(interval->reg_num() == reg_num, "wrong interval");
duke@435 1005
duke@435 1006 if (type != T_ILLEGAL) {
duke@435 1007 interval->set_type(type);
duke@435 1008 }
duke@435 1009
duke@435 1010 interval->add_range(from, to);
duke@435 1011 interval->add_use_pos(to, use_kind);
duke@435 1012 }
duke@435 1013
duke@435 1014 void LinearScan::add_temp(int reg_num, int temp_pos, IntervalUseKind use_kind, BasicType type) {
duke@435 1015 Interval* interval = interval_at(reg_num);
duke@435 1016 if (interval == NULL) {
duke@435 1017 interval = create_interval(reg_num);
duke@435 1018 }
duke@435 1019 assert(interval->reg_num() == reg_num, "wrong interval");
duke@435 1020
duke@435 1021 if (type != T_ILLEGAL) {
duke@435 1022 interval->set_type(type);
duke@435 1023 }
duke@435 1024
duke@435 1025 interval->add_range(temp_pos, temp_pos + 1);
duke@435 1026 interval->add_use_pos(temp_pos, use_kind);
duke@435 1027 }
duke@435 1028
duke@435 1029
duke@435 1030 // the results of this functions are used for optimizing spilling and reloading
duke@435 1031 // if the functions return shouldHaveRegister and the interval is spilled,
duke@435 1032 // it is not reloaded to a register.
duke@435 1033 IntervalUseKind LinearScan::use_kind_of_output_operand(LIR_Op* op, LIR_Opr opr) {
duke@435 1034 if (op->code() == lir_move) {
duke@435 1035 assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1");
duke@435 1036 LIR_Op1* move = (LIR_Op1*)op;
duke@435 1037 LIR_Opr res = move->result_opr();
duke@435 1038 bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory);
duke@435 1039
duke@435 1040 if (result_in_memory) {
duke@435 1041 // Begin of an interval with must_start_in_memory set.
duke@435 1042 // This interval will always get a stack slot first, so return noUse.
duke@435 1043 return noUse;
duke@435 1044
duke@435 1045 } else if (move->in_opr()->is_stack()) {
duke@435 1046 // method argument (condition must be equal to handle_method_arguments)
duke@435 1047 return noUse;
duke@435 1048
duke@435 1049 } else if (move->in_opr()->is_register() && move->result_opr()->is_register()) {
duke@435 1050 // Move from register to register
duke@435 1051 if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) {
duke@435 1052 // special handling of phi-function moves inside osr-entry blocks
duke@435 1053 // input operand must have a register instead of output operand (leads to better register allocation)
duke@435 1054 return shouldHaveRegister;
duke@435 1055 }
duke@435 1056 }
duke@435 1057 }
duke@435 1058
duke@435 1059 if (opr->is_virtual() &&
duke@435 1060 gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::must_start_in_memory)) {
duke@435 1061 // result is a stack-slot, so prevent immediate reloading
duke@435 1062 return noUse;
duke@435 1063 }
duke@435 1064
duke@435 1065 // all other operands require a register
duke@435 1066 return mustHaveRegister;
duke@435 1067 }
duke@435 1068
duke@435 1069 IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) {
duke@435 1070 if (op->code() == lir_move) {
duke@435 1071 assert(op->as_Op1() != NULL, "lir_move must be LIR_Op1");
duke@435 1072 LIR_Op1* move = (LIR_Op1*)op;
duke@435 1073 LIR_Opr res = move->result_opr();
duke@435 1074 bool result_in_memory = res->is_virtual() && gen()->is_vreg_flag_set(res->vreg_number(), LIRGenerator::must_start_in_memory);
duke@435 1075
duke@435 1076 if (result_in_memory) {
duke@435 1077 // Move to an interval with must_start_in_memory set.
duke@435 1078 // To avoid moves from stack to stack (not allowed) force the input operand to a register
duke@435 1079 return mustHaveRegister;
duke@435 1080
duke@435 1081 } else if (move->in_opr()->is_register() && move->result_opr()->is_register()) {
duke@435 1082 // Move from register to register
duke@435 1083 if (block_of_op_with_id(op->id())->is_set(BlockBegin::osr_entry_flag)) {
duke@435 1084 // special handling of phi-function moves inside osr-entry blocks
duke@435 1085 // input operand must have a register instead of output operand (leads to better register allocation)
duke@435 1086 return mustHaveRegister;
duke@435 1087 }
duke@435 1088
duke@435 1089 // The input operand is not forced to a register (moves from stack to register are allowed),
duke@435 1090 // but it is faster if the input operand is in a register
duke@435 1091 return shouldHaveRegister;
duke@435 1092 }
duke@435 1093 }
duke@435 1094
duke@435 1095
never@739 1096 #ifdef X86
duke@435 1097 if (op->code() == lir_cmove) {
duke@435 1098 // conditional moves can handle stack operands
duke@435 1099 assert(op->result_opr()->is_register(), "result must always be in a register");
duke@435 1100 return shouldHaveRegister;
duke@435 1101 }
duke@435 1102
duke@435 1103 // optimizations for second input operand of arithmehtic operations on Intel
duke@435 1104 // this operand is allowed to be on the stack in some cases
duke@435 1105 BasicType opr_type = opr->type_register();
duke@435 1106 if (opr_type == T_FLOAT || opr_type == T_DOUBLE) {
duke@435 1107 if ((UseSSE == 1 && opr_type == T_FLOAT) || UseSSE >= 2) {
duke@435 1108 // SSE float instruction (T_DOUBLE only supported with SSE2)
duke@435 1109 switch (op->code()) {
duke@435 1110 case lir_cmp:
duke@435 1111 case lir_add:
duke@435 1112 case lir_sub:
duke@435 1113 case lir_mul:
duke@435 1114 case lir_div:
duke@435 1115 {
duke@435 1116 assert(op->as_Op2() != NULL, "must be LIR_Op2");
duke@435 1117 LIR_Op2* op2 = (LIR_Op2*)op;
duke@435 1118 if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) {
duke@435 1119 assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register");
duke@435 1120 return shouldHaveRegister;
duke@435 1121 }
duke@435 1122 }
duke@435 1123 }
duke@435 1124 } else {
duke@435 1125 // FPU stack float instruction
duke@435 1126 switch (op->code()) {
duke@435 1127 case lir_add:
duke@435 1128 case lir_sub:
duke@435 1129 case lir_mul:
duke@435 1130 case lir_div:
duke@435 1131 {
duke@435 1132 assert(op->as_Op2() != NULL, "must be LIR_Op2");
duke@435 1133 LIR_Op2* op2 = (LIR_Op2*)op;
duke@435 1134 if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) {
duke@435 1135 assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register");
duke@435 1136 return shouldHaveRegister;
duke@435 1137 }
duke@435 1138 }
duke@435 1139 }
duke@435 1140 }
duke@435 1141
duke@435 1142 } else if (opr_type != T_LONG) {
duke@435 1143 // integer instruction (note: long operands must always be in register)
duke@435 1144 switch (op->code()) {
duke@435 1145 case lir_cmp:
duke@435 1146 case lir_add:
duke@435 1147 case lir_sub:
duke@435 1148 case lir_logic_and:
duke@435 1149 case lir_logic_or:
duke@435 1150 case lir_logic_xor:
duke@435 1151 {
duke@435 1152 assert(op->as_Op2() != NULL, "must be LIR_Op2");
duke@435 1153 LIR_Op2* op2 = (LIR_Op2*)op;
duke@435 1154 if (op2->in_opr1() != op2->in_opr2() && op2->in_opr2() == opr) {
duke@435 1155 assert((op2->result_opr()->is_register() || op->code() == lir_cmp) && op2->in_opr1()->is_register(), "cannot mark second operand as stack if others are not in register");
duke@435 1156 return shouldHaveRegister;
duke@435 1157 }
duke@435 1158 }
duke@435 1159 }
duke@435 1160 }
never@739 1161 #endif // X86
duke@435 1162
duke@435 1163 // all other operands require a register
duke@435 1164 return mustHaveRegister;
duke@435 1165 }
duke@435 1166
duke@435 1167
duke@435 1168 void LinearScan::handle_method_arguments(LIR_Op* op) {
duke@435 1169 // special handling for method arguments (moves from stack to virtual register):
duke@435 1170 // the interval gets no register assigned, but the stack slot.
duke@435 1171 // it is split before the first use by the register allocator.
duke@435 1172
duke@435 1173 if (op->code() == lir_move) {
duke@435 1174 assert(op->as_Op1() != NULL, "must be LIR_Op1");
duke@435 1175 LIR_Op1* move = (LIR_Op1*)op;
duke@435 1176
duke@435 1177 if (move->in_opr()->is_stack()) {
duke@435 1178 #ifdef ASSERT
duke@435 1179 int arg_size = compilation()->method()->arg_size();
duke@435 1180 LIR_Opr o = move->in_opr();
duke@435 1181 if (o->is_single_stack()) {
duke@435 1182 assert(o->single_stack_ix() >= 0 && o->single_stack_ix() < arg_size, "out of range");
duke@435 1183 } else if (o->is_double_stack()) {
duke@435 1184 assert(o->double_stack_ix() >= 0 && o->double_stack_ix() < arg_size, "out of range");
duke@435 1185 } else {
duke@435 1186 ShouldNotReachHere();
duke@435 1187 }
duke@435 1188
duke@435 1189 assert(move->id() > 0, "invalid id");
duke@435 1190 assert(block_of_op_with_id(move->id())->number_of_preds() == 0, "move from stack must be in first block");
duke@435 1191 assert(move->result_opr()->is_virtual(), "result of move must be a virtual register");
duke@435 1192
duke@435 1193 TRACE_LINEAR_SCAN(4, tty->print_cr("found move from stack slot %d to vreg %d", o->is_single_stack() ? o->single_stack_ix() : o->double_stack_ix(), reg_num(move->result_opr())));
duke@435 1194 #endif
duke@435 1195
duke@435 1196 Interval* interval = interval_at(reg_num(move->result_opr()));
duke@435 1197
duke@435 1198 int stack_slot = LinearScan::nof_regs + (move->in_opr()->is_single_stack() ? move->in_opr()->single_stack_ix() : move->in_opr()->double_stack_ix());
duke@435 1199 interval->set_canonical_spill_slot(stack_slot);
duke@435 1200 interval->assign_reg(stack_slot);
duke@435 1201 }
duke@435 1202 }
duke@435 1203 }
duke@435 1204
duke@435 1205 void LinearScan::handle_doubleword_moves(LIR_Op* op) {
duke@435 1206 // special handling for doubleword move from memory to register:
duke@435 1207 // in this case the registers of the input address and the result
duke@435 1208 // registers must not overlap -> add a temp range for the input registers
duke@435 1209 if (op->code() == lir_move) {
duke@435 1210 assert(op->as_Op1() != NULL, "must be LIR_Op1");
duke@435 1211 LIR_Op1* move = (LIR_Op1*)op;
duke@435 1212
duke@435 1213 if (move->result_opr()->is_double_cpu() && move->in_opr()->is_pointer()) {
duke@435 1214 LIR_Address* address = move->in_opr()->as_address_ptr();
duke@435 1215 if (address != NULL) {
duke@435 1216 if (address->base()->is_valid()) {
duke@435 1217 add_temp(address->base(), op->id(), noUse);
duke@435 1218 }
duke@435 1219 if (address->index()->is_valid()) {
duke@435 1220 add_temp(address->index(), op->id(), noUse);
duke@435 1221 }
duke@435 1222 }
duke@435 1223 }
duke@435 1224 }
duke@435 1225 }
duke@435 1226
duke@435 1227 void LinearScan::add_register_hints(LIR_Op* op) {
duke@435 1228 switch (op->code()) {
duke@435 1229 case lir_move: // fall through
duke@435 1230 case lir_convert: {
duke@435 1231 assert(op->as_Op1() != NULL, "lir_move, lir_convert must be LIR_Op1");
duke@435 1232 LIR_Op1* move = (LIR_Op1*)op;
duke@435 1233
duke@435 1234 LIR_Opr move_from = move->in_opr();
duke@435 1235 LIR_Opr move_to = move->result_opr();
duke@435 1236
duke@435 1237 if (move_to->is_register() && move_from->is_register()) {
duke@435 1238 Interval* from = interval_at(reg_num(move_from));
duke@435 1239 Interval* to = interval_at(reg_num(move_to));
duke@435 1240 if (from != NULL && to != NULL) {
duke@435 1241 to->set_register_hint(from);
duke@435 1242 TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", move->id(), from->reg_num(), to->reg_num()));
duke@435 1243 }
duke@435 1244 }
duke@435 1245 break;
duke@435 1246 }
duke@435 1247 case lir_cmove: {
duke@435 1248 assert(op->as_Op2() != NULL, "lir_cmove must be LIR_Op2");
duke@435 1249 LIR_Op2* cmove = (LIR_Op2*)op;
duke@435 1250
duke@435 1251 LIR_Opr move_from = cmove->in_opr1();
duke@435 1252 LIR_Opr move_to = cmove->result_opr();
duke@435 1253
duke@435 1254 if (move_to->is_register() && move_from->is_register()) {
duke@435 1255 Interval* from = interval_at(reg_num(move_from));
duke@435 1256 Interval* to = interval_at(reg_num(move_to));
duke@435 1257 if (from != NULL && to != NULL) {
duke@435 1258 to->set_register_hint(from);
duke@435 1259 TRACE_LINEAR_SCAN(4, tty->print_cr("operation at op_id %d: added hint from interval %d to %d", cmove->id(), from->reg_num(), to->reg_num()));
duke@435 1260 }
duke@435 1261 }
duke@435 1262 break;
duke@435 1263 }
duke@435 1264 }
duke@435 1265 }
duke@435 1266
duke@435 1267
duke@435 1268 void LinearScan::build_intervals() {
duke@435 1269 TIME_LINEAR_SCAN(timer_build_intervals);
duke@435 1270
duke@435 1271 // initialize interval list with expected number of intervals
duke@435 1272 // (32 is added to have some space for split children without having to resize the list)
duke@435 1273 _intervals = IntervalList(num_virtual_regs() + 32);
duke@435 1274 // initialize all slots that are used by build_intervals
duke@435 1275 _intervals.at_put_grow(num_virtual_regs() - 1, NULL, NULL);
duke@435 1276
duke@435 1277 // create a list with all caller-save registers (cpu, fpu, xmm)
duke@435 1278 // when an instruction is a call, a temp range is created for all these registers
duke@435 1279 int num_caller_save_registers = 0;
duke@435 1280 int caller_save_registers[LinearScan::nof_regs];
duke@435 1281
duke@435 1282 int i;
iveresov@2344 1283 for (i = 0; i < FrameMap::nof_caller_save_cpu_regs(); i++) {
duke@435 1284 LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
duke@435 1285 assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
duke@435 1286 assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
duke@435 1287 caller_save_registers[num_caller_save_registers++] = reg_num(opr);
duke@435 1288 }
duke@435 1289
duke@435 1290 // temp ranges for fpu registers are only created when the method has
duke@435 1291 // virtual fpu operands. Otherwise no allocation for fpu registers is
duke@435 1292 // perfomed and so the temp ranges would be useless
duke@435 1293 if (has_fpu_registers()) {
never@739 1294 #ifdef X86
duke@435 1295 if (UseSSE < 2) {
duke@435 1296 #endif
duke@435 1297 for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
duke@435 1298 LIR_Opr opr = FrameMap::caller_save_fpu_reg_at(i);
duke@435 1299 assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
duke@435 1300 assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
duke@435 1301 caller_save_registers[num_caller_save_registers++] = reg_num(opr);
duke@435 1302 }
never@739 1303 #ifdef X86
duke@435 1304 }
duke@435 1305 if (UseSSE > 0) {
duke@435 1306 for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) {
duke@435 1307 LIR_Opr opr = FrameMap::caller_save_xmm_reg_at(i);
duke@435 1308 assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
duke@435 1309 assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
duke@435 1310 caller_save_registers[num_caller_save_registers++] = reg_num(opr);
duke@435 1311 }
duke@435 1312 }
duke@435 1313 #endif
duke@435 1314 }
duke@435 1315 assert(num_caller_save_registers <= LinearScan::nof_regs, "out of bounds");
duke@435 1316
duke@435 1317
duke@435 1318 LIR_OpVisitState visitor;
duke@435 1319
duke@435 1320 // iterate all blocks in reverse order
duke@435 1321 for (i = block_count() - 1; i >= 0; i--) {
duke@435 1322 BlockBegin* block = block_at(i);
duke@435 1323 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 1324 int block_from = block->first_lir_instruction_id();
duke@435 1325 int block_to = block->last_lir_instruction_id();
duke@435 1326
duke@435 1327 assert(block_from == instructions->at(0)->id(), "must be");
duke@435 1328 assert(block_to == instructions->at(instructions->length() - 1)->id(), "must be");
duke@435 1329
duke@435 1330 // Update intervals for registers live at the end of this block;
duke@435 1331 BitMap live = block->live_out();
never@739 1332 int size = (int)live.size();
never@739 1333 for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) {
duke@435 1334 assert(live.at(number), "should not stop here otherwise");
duke@435 1335 assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds");
duke@435 1336 TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2));
duke@435 1337
duke@435 1338 add_use(number, block_from, block_to + 2, noUse, T_ILLEGAL);
duke@435 1339
duke@435 1340 // add special use positions for loop-end blocks when the
duke@435 1341 // interval is used anywhere inside this loop. It's possible
duke@435 1342 // that the block was part of a non-natural loop, so it might
duke@435 1343 // have an invalid loop index.
duke@435 1344 if (block->is_set(BlockBegin::linear_scan_loop_end_flag) &&
duke@435 1345 block->loop_index() != -1 &&
duke@435 1346 is_interval_in_loop(number, block->loop_index())) {
duke@435 1347 interval_at(number)->add_use_pos(block_to + 1, loopEndMarker);
duke@435 1348 }
duke@435 1349 }
duke@435 1350
duke@435 1351 // iterate all instructions of the block in reverse order.
duke@435 1352 // skip the first instruction because it is always a label
duke@435 1353 // definitions of intervals are processed before uses
duke@435 1354 assert(visitor.no_operands(instructions->at(0)), "first operation must always be a label");
duke@435 1355 for (int j = instructions->length() - 1; j >= 1; j--) {
duke@435 1356 LIR_Op* op = instructions->at(j);
duke@435 1357 int op_id = op->id();
duke@435 1358
duke@435 1359 // visit operation to collect all operands
duke@435 1360 visitor.visit(op);
duke@435 1361
duke@435 1362 // add a temp range for each register if operation destroys caller-save registers
duke@435 1363 if (visitor.has_call()) {
duke@435 1364 for (int k = 0; k < num_caller_save_registers; k++) {
duke@435 1365 add_temp(caller_save_registers[k], op_id, noUse, T_ILLEGAL);
duke@435 1366 }
duke@435 1367 TRACE_LINEAR_SCAN(4, tty->print_cr("operation destroys all caller-save registers"));
duke@435 1368 }
duke@435 1369
duke@435 1370 // Add any platform dependent temps
duke@435 1371 pd_add_temps(op);
duke@435 1372
duke@435 1373 // visit definitions (output and temp operands)
duke@435 1374 int k, n;
duke@435 1375 n = visitor.opr_count(LIR_OpVisitState::outputMode);
duke@435 1376 for (k = 0; k < n; k++) {
duke@435 1377 LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, k);
duke@435 1378 assert(opr->is_register(), "visitor should only return register operands");
duke@435 1379 add_def(opr, op_id, use_kind_of_output_operand(op, opr));
duke@435 1380 }
duke@435 1381
duke@435 1382 n = visitor.opr_count(LIR_OpVisitState::tempMode);
duke@435 1383 for (k = 0; k < n; k++) {
duke@435 1384 LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, k);
duke@435 1385 assert(opr->is_register(), "visitor should only return register operands");
duke@435 1386 add_temp(opr, op_id, mustHaveRegister);
duke@435 1387 }
duke@435 1388
duke@435 1389 // visit uses (input operands)
duke@435 1390 n = visitor.opr_count(LIR_OpVisitState::inputMode);
duke@435 1391 for (k = 0; k < n; k++) {
duke@435 1392 LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, k);
duke@435 1393 assert(opr->is_register(), "visitor should only return register operands");
duke@435 1394 add_use(opr, block_from, op_id, use_kind_of_input_operand(op, opr));
duke@435 1395 }
duke@435 1396
duke@435 1397 // Add uses of live locals from interpreter's point of view for proper
duke@435 1398 // debug information generation
duke@435 1399 // Treat these operands as temp values (if the life range is extended
duke@435 1400 // to a call site, the value would be in a register at the call otherwise)
duke@435 1401 n = visitor.info_count();
duke@435 1402 for (k = 0; k < n; k++) {
duke@435 1403 CodeEmitInfo* info = visitor.info_at(k);
duke@435 1404 ValueStack* stack = info->stack();
duke@435 1405 for_each_state_value(stack, value,
duke@435 1406 add_use(value, block_from, op_id + 1, noUse);
duke@435 1407 );
duke@435 1408 }
duke@435 1409
duke@435 1410 // special steps for some instructions (especially moves)
duke@435 1411 handle_method_arguments(op);
duke@435 1412 handle_doubleword_moves(op);
duke@435 1413 add_register_hints(op);
duke@435 1414
duke@435 1415 } // end of instruction iteration
duke@435 1416 } // end of block iteration
duke@435 1417
duke@435 1418
duke@435 1419 // add the range [0, 1[ to all fixed intervals
duke@435 1420 // -> the register allocator need not handle unhandled fixed intervals
duke@435 1421 for (int n = 0; n < LinearScan::nof_regs; n++) {
duke@435 1422 Interval* interval = interval_at(n);
duke@435 1423 if (interval != NULL) {
duke@435 1424 interval->add_range(0, 1);
duke@435 1425 }
duke@435 1426 }
duke@435 1427 }
duke@435 1428
duke@435 1429
duke@435 1430 // ********** Phase 5: actual register allocation
duke@435 1431
duke@435 1432 int LinearScan::interval_cmp(Interval** a, Interval** b) {
duke@435 1433 if (*a != NULL) {
duke@435 1434 if (*b != NULL) {
duke@435 1435 return (*a)->from() - (*b)->from();
duke@435 1436 } else {
duke@435 1437 return -1;
duke@435 1438 }
duke@435 1439 } else {
duke@435 1440 if (*b != NULL) {
duke@435 1441 return 1;
duke@435 1442 } else {
duke@435 1443 return 0;
duke@435 1444 }
duke@435 1445 }
duke@435 1446 }
duke@435 1447
duke@435 1448 #ifndef PRODUCT
duke@435 1449 bool LinearScan::is_sorted(IntervalArray* intervals) {
duke@435 1450 int from = -1;
duke@435 1451 int i, j;
duke@435 1452 for (i = 0; i < intervals->length(); i ++) {
duke@435 1453 Interval* it = intervals->at(i);
duke@435 1454 if (it != NULL) {
duke@435 1455 if (from > it->from()) {
duke@435 1456 assert(false, "");
duke@435 1457 return false;
duke@435 1458 }
duke@435 1459 from = it->from();
duke@435 1460 }
duke@435 1461 }
duke@435 1462
duke@435 1463 // check in both directions if sorted list and unsorted list contain same intervals
duke@435 1464 for (i = 0; i < interval_count(); i++) {
duke@435 1465 if (interval_at(i) != NULL) {
duke@435 1466 int num_found = 0;
duke@435 1467 for (j = 0; j < intervals->length(); j++) {
duke@435 1468 if (interval_at(i) == intervals->at(j)) {
duke@435 1469 num_found++;
duke@435 1470 }
duke@435 1471 }
duke@435 1472 assert(num_found == 1, "lists do not contain same intervals");
duke@435 1473 }
duke@435 1474 }
duke@435 1475 for (j = 0; j < intervals->length(); j++) {
duke@435 1476 int num_found = 0;
duke@435 1477 for (i = 0; i < interval_count(); i++) {
duke@435 1478 if (interval_at(i) == intervals->at(j)) {
duke@435 1479 num_found++;
duke@435 1480 }
duke@435 1481 }
duke@435 1482 assert(num_found == 1, "lists do not contain same intervals");
duke@435 1483 }
duke@435 1484
duke@435 1485 return true;
duke@435 1486 }
duke@435 1487 #endif
duke@435 1488
duke@435 1489 void LinearScan::add_to_list(Interval** first, Interval** prev, Interval* interval) {
duke@435 1490 if (*prev != NULL) {
duke@435 1491 (*prev)->set_next(interval);
duke@435 1492 } else {
duke@435 1493 *first = interval;
duke@435 1494 }
duke@435 1495 *prev = interval;
duke@435 1496 }
duke@435 1497
duke@435 1498 void LinearScan::create_unhandled_lists(Interval** list1, Interval** list2, bool (is_list1)(const Interval* i), bool (is_list2)(const Interval* i)) {
duke@435 1499 assert(is_sorted(_sorted_intervals), "interval list is not sorted");
duke@435 1500
duke@435 1501 *list1 = *list2 = Interval::end();
duke@435 1502
duke@435 1503 Interval* list1_prev = NULL;
duke@435 1504 Interval* list2_prev = NULL;
duke@435 1505 Interval* v;
duke@435 1506
duke@435 1507 const int n = _sorted_intervals->length();
duke@435 1508 for (int i = 0; i < n; i++) {
duke@435 1509 v = _sorted_intervals->at(i);
duke@435 1510 if (v == NULL) continue;
duke@435 1511
duke@435 1512 if (is_list1(v)) {
duke@435 1513 add_to_list(list1, &list1_prev, v);
duke@435 1514 } else if (is_list2 == NULL || is_list2(v)) {
duke@435 1515 add_to_list(list2, &list2_prev, v);
duke@435 1516 }
duke@435 1517 }
duke@435 1518
duke@435 1519 if (list1_prev != NULL) list1_prev->set_next(Interval::end());
duke@435 1520 if (list2_prev != NULL) list2_prev->set_next(Interval::end());
duke@435 1521
duke@435 1522 assert(list1_prev == NULL || list1_prev->next() == Interval::end(), "linear list ends not with sentinel");
duke@435 1523 assert(list2_prev == NULL || list2_prev->next() == Interval::end(), "linear list ends not with sentinel");
duke@435 1524 }
duke@435 1525
duke@435 1526
duke@435 1527 void LinearScan::sort_intervals_before_allocation() {
duke@435 1528 TIME_LINEAR_SCAN(timer_sort_intervals_before);
duke@435 1529
never@2404 1530 if (_needs_full_resort) {
never@2404 1531 // There is no known reason why this should occur but just in case...
never@2404 1532 assert(false, "should never occur");
never@2404 1533 // Re-sort existing interval list because an Interval::from() has changed
never@2404 1534 _sorted_intervals->sort(interval_cmp);
never@2404 1535 _needs_full_resort = false;
never@2404 1536 }
never@2404 1537
duke@435 1538 IntervalList* unsorted_list = &_intervals;
duke@435 1539 int unsorted_len = unsorted_list->length();
duke@435 1540 int sorted_len = 0;
duke@435 1541 int unsorted_idx;
duke@435 1542 int sorted_idx = 0;
duke@435 1543 int sorted_from_max = -1;
duke@435 1544
duke@435 1545 // calc number of items for sorted list (sorted list must not contain NULL values)
duke@435 1546 for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) {
duke@435 1547 if (unsorted_list->at(unsorted_idx) != NULL) {
duke@435 1548 sorted_len++;
duke@435 1549 }
duke@435 1550 }
duke@435 1551 IntervalArray* sorted_list = new IntervalArray(sorted_len);
duke@435 1552
duke@435 1553 // special sorting algorithm: the original interval-list is almost sorted,
duke@435 1554 // only some intervals are swapped. So this is much faster than a complete QuickSort
duke@435 1555 for (unsorted_idx = 0; unsorted_idx < unsorted_len; unsorted_idx++) {
duke@435 1556 Interval* cur_interval = unsorted_list->at(unsorted_idx);
duke@435 1557
duke@435 1558 if (cur_interval != NULL) {
duke@435 1559 int cur_from = cur_interval->from();
duke@435 1560
duke@435 1561 if (sorted_from_max <= cur_from) {
duke@435 1562 sorted_list->at_put(sorted_idx++, cur_interval);
duke@435 1563 sorted_from_max = cur_interval->from();
duke@435 1564 } else {
duke@435 1565 // the asumption that the intervals are already sorted failed,
duke@435 1566 // so this interval must be sorted in manually
duke@435 1567 int j;
duke@435 1568 for (j = sorted_idx - 1; j >= 0 && cur_from < sorted_list->at(j)->from(); j--) {
duke@435 1569 sorted_list->at_put(j + 1, sorted_list->at(j));
duke@435 1570 }
duke@435 1571 sorted_list->at_put(j + 1, cur_interval);
duke@435 1572 sorted_idx++;
duke@435 1573 }
duke@435 1574 }
duke@435 1575 }
duke@435 1576 _sorted_intervals = sorted_list;
never@2404 1577 assert(is_sorted(_sorted_intervals), "intervals unsorted");
duke@435 1578 }
duke@435 1579
duke@435 1580 void LinearScan::sort_intervals_after_allocation() {
duke@435 1581 TIME_LINEAR_SCAN(timer_sort_intervals_after);
duke@435 1582
never@2404 1583 if (_needs_full_resort) {
never@2404 1584 // Re-sort existing interval list because an Interval::from() has changed
never@2404 1585 _sorted_intervals->sort(interval_cmp);
never@2404 1586 _needs_full_resort = false;
never@2404 1587 }
never@2404 1588
duke@435 1589 IntervalArray* old_list = _sorted_intervals;
duke@435 1590 IntervalList* new_list = _new_intervals_from_allocation;
duke@435 1591 int old_len = old_list->length();
duke@435 1592 int new_len = new_list->length();
duke@435 1593
duke@435 1594 if (new_len == 0) {
duke@435 1595 // no intervals have been added during allocation, so sorted list is already up to date
never@2404 1596 assert(is_sorted(_sorted_intervals), "intervals unsorted");
duke@435 1597 return;
duke@435 1598 }
duke@435 1599
duke@435 1600 // conventional sort-algorithm for new intervals
duke@435 1601 new_list->sort(interval_cmp);
duke@435 1602
duke@435 1603 // merge old and new list (both already sorted) into one combined list
duke@435 1604 IntervalArray* combined_list = new IntervalArray(old_len + new_len);
duke@435 1605 int old_idx = 0;
duke@435 1606 int new_idx = 0;
duke@435 1607
duke@435 1608 while (old_idx + new_idx < old_len + new_len) {
duke@435 1609 if (new_idx >= new_len || (old_idx < old_len && old_list->at(old_idx)->from() <= new_list->at(new_idx)->from())) {
duke@435 1610 combined_list->at_put(old_idx + new_idx, old_list->at(old_idx));
duke@435 1611 old_idx++;
duke@435 1612 } else {
duke@435 1613 combined_list->at_put(old_idx + new_idx, new_list->at(new_idx));
duke@435 1614 new_idx++;
duke@435 1615 }
duke@435 1616 }
duke@435 1617
duke@435 1618 _sorted_intervals = combined_list;
never@2404 1619 assert(is_sorted(_sorted_intervals), "intervals unsorted");
duke@435 1620 }
duke@435 1621
duke@435 1622
duke@435 1623 void LinearScan::allocate_registers() {
duke@435 1624 TIME_LINEAR_SCAN(timer_allocate_registers);
duke@435 1625
duke@435 1626 Interval* precolored_cpu_intervals, *not_precolored_cpu_intervals;
duke@435 1627 Interval* precolored_fpu_intervals, *not_precolored_fpu_intervals;
duke@435 1628
duke@435 1629 create_unhandled_lists(&precolored_cpu_intervals, &not_precolored_cpu_intervals, is_precolored_cpu_interval, is_virtual_cpu_interval);
duke@435 1630 if (has_fpu_registers()) {
duke@435 1631 create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
duke@435 1632 #ifdef ASSERT
duke@435 1633 } else {
duke@435 1634 // fpu register allocation is omitted because no virtual fpu registers are present
duke@435 1635 // just check this again...
duke@435 1636 create_unhandled_lists(&precolored_fpu_intervals, &not_precolored_fpu_intervals, is_precolored_fpu_interval, is_virtual_fpu_interval);
duke@435 1637 assert(not_precolored_fpu_intervals == Interval::end(), "missed an uncolored fpu interval");
duke@435 1638 #endif
duke@435 1639 }
duke@435 1640
duke@435 1641 // allocate cpu registers
duke@435 1642 LinearScanWalker cpu_lsw(this, precolored_cpu_intervals, not_precolored_cpu_intervals);
duke@435 1643 cpu_lsw.walk();
duke@435 1644 cpu_lsw.finish_allocation();
duke@435 1645
duke@435 1646 if (has_fpu_registers()) {
duke@435 1647 // allocate fpu registers
duke@435 1648 LinearScanWalker fpu_lsw(this, precolored_fpu_intervals, not_precolored_fpu_intervals);
duke@435 1649 fpu_lsw.walk();
duke@435 1650 fpu_lsw.finish_allocation();
duke@435 1651 }
duke@435 1652 }
duke@435 1653
duke@435 1654
duke@435 1655 // ********** Phase 6: resolve data flow
duke@435 1656 // (insert moves at edges between blocks if intervals have been split)
duke@435 1657
duke@435 1658 // wrapper for Interval::split_child_at_op_id that performs a bailout in product mode
duke@435 1659 // instead of returning NULL
duke@435 1660 Interval* LinearScan::split_child_at_op_id(Interval* interval, int op_id, LIR_OpVisitState::OprMode mode) {
duke@435 1661 Interval* result = interval->split_child_at_op_id(op_id, mode);
duke@435 1662 if (result != NULL) {
duke@435 1663 return result;
duke@435 1664 }
duke@435 1665
duke@435 1666 assert(false, "must find an interval, but do a clean bailout in product mode");
duke@435 1667 result = new Interval(LIR_OprDesc::vreg_base);
duke@435 1668 result->assign_reg(0);
duke@435 1669 result->set_type(T_INT);
duke@435 1670 BAILOUT_("LinearScan: interval is NULL", result);
duke@435 1671 }
duke@435 1672
duke@435 1673
duke@435 1674 Interval* LinearScan::interval_at_block_begin(BlockBegin* block, int reg_num) {
duke@435 1675 assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds");
duke@435 1676 assert(interval_at(reg_num) != NULL, "no interval found");
duke@435 1677
duke@435 1678 return split_child_at_op_id(interval_at(reg_num), block->first_lir_instruction_id(), LIR_OpVisitState::outputMode);
duke@435 1679 }
duke@435 1680
duke@435 1681 Interval* LinearScan::interval_at_block_end(BlockBegin* block, int reg_num) {
duke@435 1682 assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds");
duke@435 1683 assert(interval_at(reg_num) != NULL, "no interval found");
duke@435 1684
duke@435 1685 return split_child_at_op_id(interval_at(reg_num), block->last_lir_instruction_id() + 1, LIR_OpVisitState::outputMode);
duke@435 1686 }
duke@435 1687
duke@435 1688 Interval* LinearScan::interval_at_op_id(int reg_num, int op_id) {
duke@435 1689 assert(LinearScan::nof_regs <= reg_num && reg_num < num_virtual_regs(), "register number out of bounds");
duke@435 1690 assert(interval_at(reg_num) != NULL, "no interval found");
duke@435 1691
duke@435 1692 return split_child_at_op_id(interval_at(reg_num), op_id, LIR_OpVisitState::inputMode);
duke@435 1693 }
duke@435 1694
duke@435 1695
duke@435 1696 void LinearScan::resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) {
duke@435 1697 DEBUG_ONLY(move_resolver.check_empty());
duke@435 1698
duke@435 1699 const int num_regs = num_virtual_regs();
duke@435 1700 const int size = live_set_size();
duke@435 1701 const BitMap live_at_edge = to_block->live_in();
duke@435 1702
duke@435 1703 // visit all registers where the live_at_edge bit is set
never@739 1704 for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
duke@435 1705 assert(r < num_regs, "live information set for not exisiting interval");
duke@435 1706 assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge");
duke@435 1707
duke@435 1708 Interval* from_interval = interval_at_block_end(from_block, r);
duke@435 1709 Interval* to_interval = interval_at_block_begin(to_block, r);
duke@435 1710
duke@435 1711 if (from_interval != to_interval && (from_interval->assigned_reg() != to_interval->assigned_reg() || from_interval->assigned_regHi() != to_interval->assigned_regHi())) {
duke@435 1712 // need to insert move instruction
duke@435 1713 move_resolver.add_mapping(from_interval, to_interval);
duke@435 1714 }
duke@435 1715 }
duke@435 1716 }
duke@435 1717
duke@435 1718
duke@435 1719 void LinearScan::resolve_find_insert_pos(BlockBegin* from_block, BlockBegin* to_block, MoveResolver &move_resolver) {
duke@435 1720 if (from_block->number_of_sux() <= 1) {
duke@435 1721 TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at end of from_block B%d", from_block->block_id()));
duke@435 1722
duke@435 1723 LIR_OpList* instructions = from_block->lir()->instructions_list();
duke@435 1724 LIR_OpBranch* branch = instructions->last()->as_OpBranch();
duke@435 1725 if (branch != NULL) {
duke@435 1726 // insert moves before branch
duke@435 1727 assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump");
duke@435 1728 move_resolver.set_insert_position(from_block->lir(), instructions->length() - 2);
duke@435 1729 } else {
duke@435 1730 move_resolver.set_insert_position(from_block->lir(), instructions->length() - 1);
duke@435 1731 }
duke@435 1732
duke@435 1733 } else {
duke@435 1734 TRACE_LINEAR_SCAN(4, tty->print_cr("inserting moves at beginning of to_block B%d", to_block->block_id()));
duke@435 1735 #ifdef ASSERT
duke@435 1736 assert(from_block->lir()->instructions_list()->at(0)->as_OpLabel() != NULL, "block does not start with a label");
duke@435 1737
duke@435 1738 // because the number of predecessor edges matches the number of
duke@435 1739 // successor edges, blocks which are reached by switch statements
duke@435 1740 // may have be more than one predecessor but it will be guaranteed
duke@435 1741 // that all predecessors will be the same.
duke@435 1742 for (int i = 0; i < to_block->number_of_preds(); i++) {
duke@435 1743 assert(from_block == to_block->pred_at(i), "all critical edges must be broken");
duke@435 1744 }
duke@435 1745 #endif
duke@435 1746
duke@435 1747 move_resolver.set_insert_position(to_block->lir(), 0);
duke@435 1748 }
duke@435 1749 }
duke@435 1750
duke@435 1751
duke@435 1752 // insert necessary moves (spilling or reloading) at edges between blocks if interval has been split
duke@435 1753 void LinearScan::resolve_data_flow() {
duke@435 1754 TIME_LINEAR_SCAN(timer_resolve_data_flow);
duke@435 1755
duke@435 1756 int num_blocks = block_count();
duke@435 1757 MoveResolver move_resolver(this);
duke@435 1758 BitMap block_completed(num_blocks); block_completed.clear();
duke@435 1759 BitMap already_resolved(num_blocks); already_resolved.clear();
duke@435 1760
duke@435 1761 int i;
duke@435 1762 for (i = 0; i < num_blocks; i++) {
duke@435 1763 BlockBegin* block = block_at(i);
duke@435 1764
duke@435 1765 // check if block has only one predecessor and only one successor
duke@435 1766 if (block->number_of_preds() == 1 && block->number_of_sux() == 1 && block->number_of_exception_handlers() == 0) {
duke@435 1767 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 1768 assert(instructions->at(0)->code() == lir_label, "block must start with label");
duke@435 1769 assert(instructions->last()->code() == lir_branch, "block with successors must end with branch");
duke@435 1770 assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block with successor must end with unconditional branch");
duke@435 1771
duke@435 1772 // check if block is empty (only label and branch)
duke@435 1773 if (instructions->length() == 2) {
duke@435 1774 BlockBegin* pred = block->pred_at(0);
duke@435 1775 BlockBegin* sux = block->sux_at(0);
duke@435 1776
duke@435 1777 // prevent optimization of two consecutive blocks
duke@435 1778 if (!block_completed.at(pred->linear_scan_number()) && !block_completed.at(sux->linear_scan_number())) {
duke@435 1779 TRACE_LINEAR_SCAN(3, tty->print_cr("**** optimizing empty block B%d (pred: B%d, sux: B%d)", block->block_id(), pred->block_id(), sux->block_id()));
duke@435 1780 block_completed.set_bit(block->linear_scan_number());
duke@435 1781
duke@435 1782 // directly resolve between pred and sux (without looking at the empty block between)
duke@435 1783 resolve_collect_mappings(pred, sux, move_resolver);
duke@435 1784 if (move_resolver.has_mappings()) {
duke@435 1785 move_resolver.set_insert_position(block->lir(), 0);
duke@435 1786 move_resolver.resolve_and_append_moves();
duke@435 1787 }
duke@435 1788 }
duke@435 1789 }
duke@435 1790 }
duke@435 1791 }
duke@435 1792
duke@435 1793
duke@435 1794 for (i = 0; i < num_blocks; i++) {
duke@435 1795 if (!block_completed.at(i)) {
duke@435 1796 BlockBegin* from_block = block_at(i);
duke@435 1797 already_resolved.set_from(block_completed);
duke@435 1798
duke@435 1799 int num_sux = from_block->number_of_sux();
duke@435 1800 for (int s = 0; s < num_sux; s++) {
duke@435 1801 BlockBegin* to_block = from_block->sux_at(s);
duke@435 1802
duke@435 1803 // check for duplicate edges between the same blocks (can happen with switch blocks)
duke@435 1804 if (!already_resolved.at(to_block->linear_scan_number())) {
duke@435 1805 TRACE_LINEAR_SCAN(3, tty->print_cr("**** processing edge between B%d and B%d", from_block->block_id(), to_block->block_id()));
duke@435 1806 already_resolved.set_bit(to_block->linear_scan_number());
duke@435 1807
duke@435 1808 // collect all intervals that have been split between from_block and to_block
duke@435 1809 resolve_collect_mappings(from_block, to_block, move_resolver);
duke@435 1810 if (move_resolver.has_mappings()) {
duke@435 1811 resolve_find_insert_pos(from_block, to_block, move_resolver);
duke@435 1812 move_resolver.resolve_and_append_moves();
duke@435 1813 }
duke@435 1814 }
duke@435 1815 }
duke@435 1816 }
duke@435 1817 }
duke@435 1818 }
duke@435 1819
duke@435 1820
duke@435 1821 void LinearScan::resolve_exception_entry(BlockBegin* block, int reg_num, MoveResolver &move_resolver) {
duke@435 1822 if (interval_at(reg_num) == NULL) {
duke@435 1823 // if a phi function is never used, no interval is created -> ignore this
duke@435 1824 return;
duke@435 1825 }
duke@435 1826
duke@435 1827 Interval* interval = interval_at_block_begin(block, reg_num);
duke@435 1828 int reg = interval->assigned_reg();
duke@435 1829 int regHi = interval->assigned_regHi();
duke@435 1830
duke@435 1831 if ((reg < nof_regs && interval->always_in_memory()) ||
duke@435 1832 (use_fpu_stack_allocation() && reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg)) {
duke@435 1833 // the interval is split to get a short range that is located on the stack
duke@435 1834 // in the following two cases:
duke@435 1835 // * the interval started in memory (e.g. method parameter), but is currently in a register
duke@435 1836 // this is an optimization for exception handling that reduces the number of moves that
duke@435 1837 // are necessary for resolving the states when an exception uses this exception handler
duke@435 1838 // * the interval would be on the fpu stack at the begin of the exception handler
duke@435 1839 // this is not allowed because of the complicated fpu stack handling on Intel
duke@435 1840
duke@435 1841 // range that will be spilled to memory
duke@435 1842 int from_op_id = block->first_lir_instruction_id();
duke@435 1843 int to_op_id = from_op_id + 1; // short live range of length 1
duke@435 1844 assert(interval->from() <= from_op_id && interval->to() >= to_op_id,
duke@435 1845 "no split allowed between exception entry and first instruction");
duke@435 1846
duke@435 1847 if (interval->from() != from_op_id) {
duke@435 1848 // the part before from_op_id is unchanged
duke@435 1849 interval = interval->split(from_op_id);
duke@435 1850 interval->assign_reg(reg, regHi);
duke@435 1851 append_interval(interval);
never@2404 1852 } else {
never@2404 1853 _needs_full_resort = true;
duke@435 1854 }
duke@435 1855 assert(interval->from() == from_op_id, "must be true now");
duke@435 1856
duke@435 1857 Interval* spilled_part = interval;
duke@435 1858 if (interval->to() != to_op_id) {
duke@435 1859 // the part after to_op_id is unchanged
duke@435 1860 spilled_part = interval->split_from_start(to_op_id);
duke@435 1861 append_interval(spilled_part);
duke@435 1862 move_resolver.add_mapping(spilled_part, interval);
duke@435 1863 }
duke@435 1864 assign_spill_slot(spilled_part);
duke@435 1865
duke@435 1866 assert(spilled_part->from() == from_op_id && spilled_part->to() == to_op_id, "just checking");
duke@435 1867 }
duke@435 1868 }
duke@435 1869
duke@435 1870 void LinearScan::resolve_exception_entry(BlockBegin* block, MoveResolver &move_resolver) {
duke@435 1871 assert(block->is_set(BlockBegin::exception_entry_flag), "should not call otherwise");
duke@435 1872 DEBUG_ONLY(move_resolver.check_empty());
duke@435 1873
duke@435 1874 // visit all registers where the live_in bit is set
duke@435 1875 int size = live_set_size();
never@739 1876 for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
duke@435 1877 resolve_exception_entry(block, r, move_resolver);
duke@435 1878 }
duke@435 1879
duke@435 1880 // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
duke@435 1881 for_each_phi_fun(block, phi,
duke@435 1882 resolve_exception_entry(block, phi->operand()->vreg_number(), move_resolver)
duke@435 1883 );
duke@435 1884
duke@435 1885 if (move_resolver.has_mappings()) {
duke@435 1886 // insert moves after first instruction
roland@3609 1887 move_resolver.set_insert_position(block->lir(), 0);
duke@435 1888 move_resolver.resolve_and_append_moves();
duke@435 1889 }
duke@435 1890 }
duke@435 1891
duke@435 1892
duke@435 1893 void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, int reg_num, Phi* phi, MoveResolver &move_resolver) {
duke@435 1894 if (interval_at(reg_num) == NULL) {
duke@435 1895 // if a phi function is never used, no interval is created -> ignore this
duke@435 1896 return;
duke@435 1897 }
duke@435 1898
duke@435 1899 // the computation of to_interval is equal to resolve_collect_mappings,
duke@435 1900 // but from_interval is more complicated because of phi functions
duke@435 1901 BlockBegin* to_block = handler->entry_block();
duke@435 1902 Interval* to_interval = interval_at_block_begin(to_block, reg_num);
duke@435 1903
duke@435 1904 if (phi != NULL) {
duke@435 1905 // phi function of the exception entry block
duke@435 1906 // no moves are created for this phi function in the LIR_Generator, so the
duke@435 1907 // interval at the throwing instruction must be searched using the operands
duke@435 1908 // of the phi function
duke@435 1909 Value from_value = phi->operand_at(handler->phi_operand());
duke@435 1910
duke@435 1911 // with phi functions it can happen that the same from_value is used in
duke@435 1912 // multiple mappings, so notify move-resolver that this is allowed
duke@435 1913 move_resolver.set_multiple_reads_allowed();
duke@435 1914
duke@435 1915 Constant* con = from_value->as_Constant();
duke@435 1916 if (con != NULL && !con->is_pinned()) {
duke@435 1917 // unpinned constants may have no register, so add mapping from constant to interval
duke@435 1918 move_resolver.add_mapping(LIR_OprFact::value_type(con->type()), to_interval);
duke@435 1919 } else {
duke@435 1920 // search split child at the throwing op_id
duke@435 1921 Interval* from_interval = interval_at_op_id(from_value->operand()->vreg_number(), throwing_op_id);
duke@435 1922 move_resolver.add_mapping(from_interval, to_interval);
duke@435 1923 }
duke@435 1924
duke@435 1925 } else {
duke@435 1926 // no phi function, so use reg_num also for from_interval
duke@435 1927 // search split child at the throwing op_id
duke@435 1928 Interval* from_interval = interval_at_op_id(reg_num, throwing_op_id);
duke@435 1929 if (from_interval != to_interval) {
duke@435 1930 // optimization to reduce number of moves: when to_interval is on stack and
duke@435 1931 // the stack slot is known to be always correct, then no move is necessary
duke@435 1932 if (!from_interval->always_in_memory() || from_interval->canonical_spill_slot() != to_interval->assigned_reg()) {
duke@435 1933 move_resolver.add_mapping(from_interval, to_interval);
duke@435 1934 }
duke@435 1935 }
duke@435 1936 }
duke@435 1937 }
duke@435 1938
duke@435 1939 void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, MoveResolver &move_resolver) {
duke@435 1940 TRACE_LINEAR_SCAN(4, tty->print_cr("resolving exception handler B%d: throwing_op_id=%d", handler->entry_block()->block_id(), throwing_op_id));
duke@435 1941
duke@435 1942 DEBUG_ONLY(move_resolver.check_empty());
duke@435 1943 assert(handler->lir_op_id() == -1, "already processed this xhandler");
duke@435 1944 DEBUG_ONLY(handler->set_lir_op_id(throwing_op_id));
duke@435 1945 assert(handler->entry_code() == NULL, "code already present");
duke@435 1946
duke@435 1947 // visit all registers where the live_in bit is set
duke@435 1948 BlockBegin* block = handler->entry_block();
duke@435 1949 int size = live_set_size();
never@739 1950 for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
duke@435 1951 resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver);
duke@435 1952 }
duke@435 1953
duke@435 1954 // the live_in bits are not set for phi functions of the xhandler entry, so iterate them separately
duke@435 1955 for_each_phi_fun(block, phi,
duke@435 1956 resolve_exception_edge(handler, throwing_op_id, phi->operand()->vreg_number(), phi, move_resolver)
duke@435 1957 );
duke@435 1958
duke@435 1959 if (move_resolver.has_mappings()) {
duke@435 1960 LIR_List* entry_code = new LIR_List(compilation());
duke@435 1961 move_resolver.set_insert_position(entry_code, 0);
duke@435 1962 move_resolver.resolve_and_append_moves();
duke@435 1963
duke@435 1964 entry_code->jump(handler->entry_block());
duke@435 1965 handler->set_entry_code(entry_code);
duke@435 1966 }
duke@435 1967 }
duke@435 1968
duke@435 1969
duke@435 1970 void LinearScan::resolve_exception_handlers() {
duke@435 1971 MoveResolver move_resolver(this);
duke@435 1972 LIR_OpVisitState visitor;
duke@435 1973 int num_blocks = block_count();
duke@435 1974
duke@435 1975 int i;
duke@435 1976 for (i = 0; i < num_blocks; i++) {
duke@435 1977 BlockBegin* block = block_at(i);
duke@435 1978 if (block->is_set(BlockBegin::exception_entry_flag)) {
duke@435 1979 resolve_exception_entry(block, move_resolver);
duke@435 1980 }
duke@435 1981 }
duke@435 1982
duke@435 1983 for (i = 0; i < num_blocks; i++) {
duke@435 1984 BlockBegin* block = block_at(i);
duke@435 1985 LIR_List* ops = block->lir();
duke@435 1986 int num_ops = ops->length();
duke@435 1987
duke@435 1988 // iterate all instructions of the block. skip the first because it is always a label
duke@435 1989 assert(visitor.no_operands(ops->at(0)), "first operation must always be a label");
duke@435 1990 for (int j = 1; j < num_ops; j++) {
duke@435 1991 LIR_Op* op = ops->at(j);
duke@435 1992 int op_id = op->id();
duke@435 1993
duke@435 1994 if (op_id != -1 && has_info(op_id)) {
duke@435 1995 // visit operation to collect all operands
duke@435 1996 visitor.visit(op);
duke@435 1997 assert(visitor.info_count() > 0, "should not visit otherwise");
duke@435 1998
duke@435 1999 XHandlers* xhandlers = visitor.all_xhandler();
duke@435 2000 int n = xhandlers->length();
duke@435 2001 for (int k = 0; k < n; k++) {
duke@435 2002 resolve_exception_edge(xhandlers->handler_at(k), op_id, move_resolver);
duke@435 2003 }
duke@435 2004
duke@435 2005 #ifdef ASSERT
duke@435 2006 } else {
duke@435 2007 visitor.visit(op);
duke@435 2008 assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
duke@435 2009 #endif
duke@435 2010 }
duke@435 2011 }
duke@435 2012 }
duke@435 2013 }
duke@435 2014
duke@435 2015
duke@435 2016 // ********** Phase 7: assign register numbers back to LIR
duke@435 2017 // (includes computation of debug information and oop maps)
duke@435 2018
duke@435 2019 VMReg LinearScan::vm_reg_for_interval(Interval* interval) {
duke@435 2020 VMReg reg = interval->cached_vm_reg();
duke@435 2021 if (!reg->is_valid() ) {
duke@435 2022 reg = vm_reg_for_operand(operand_for_interval(interval));
duke@435 2023 interval->set_cached_vm_reg(reg);
duke@435 2024 }
duke@435 2025 assert(reg == vm_reg_for_operand(operand_for_interval(interval)), "wrong cached value");
duke@435 2026 return reg;
duke@435 2027 }
duke@435 2028
duke@435 2029 VMReg LinearScan::vm_reg_for_operand(LIR_Opr opr) {
duke@435 2030 assert(opr->is_oop(), "currently only implemented for oop operands");
duke@435 2031 return frame_map()->regname(opr);
duke@435 2032 }
duke@435 2033
duke@435 2034
duke@435 2035 LIR_Opr LinearScan::operand_for_interval(Interval* interval) {
duke@435 2036 LIR_Opr opr = interval->cached_opr();
duke@435 2037 if (opr->is_illegal()) {
duke@435 2038 opr = calc_operand_for_interval(interval);
duke@435 2039 interval->set_cached_opr(opr);
duke@435 2040 }
duke@435 2041
duke@435 2042 assert(opr == calc_operand_for_interval(interval), "wrong cached value");
duke@435 2043 return opr;
duke@435 2044 }
duke@435 2045
duke@435 2046 LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
duke@435 2047 int assigned_reg = interval->assigned_reg();
duke@435 2048 BasicType type = interval->type();
duke@435 2049
duke@435 2050 if (assigned_reg >= nof_regs) {
duke@435 2051 // stack slot
duke@435 2052 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
duke@435 2053 return LIR_OprFact::stack(assigned_reg - nof_regs, type);
duke@435 2054
duke@435 2055 } else {
duke@435 2056 // register
duke@435 2057 switch (type) {
duke@435 2058 case T_OBJECT: {
duke@435 2059 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
duke@435 2060 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
duke@435 2061 return LIR_OprFact::single_cpu_oop(assigned_reg);
duke@435 2062 }
duke@435 2063
never@2171 2064 case T_ADDRESS: {
never@2171 2065 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
never@2171 2066 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
never@2171 2067 return LIR_OprFact::single_cpu_address(assigned_reg);
never@2171 2068 }
never@2171 2069
roland@4051 2070 case T_METADATA: {
roland@4051 2071 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
roland@4051 2072 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
roland@4051 2073 return LIR_OprFact::single_cpu_metadata(assigned_reg);
roland@4051 2074 }
roland@4051 2075
bobv@2036 2076 #ifdef __SOFTFP__
bobv@2036 2077 case T_FLOAT: // fall through
bobv@2036 2078 #endif // __SOFTFP__
duke@435 2079 case T_INT: {
duke@435 2080 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
duke@435 2081 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
duke@435 2082 return LIR_OprFact::single_cpu(assigned_reg);
duke@435 2083 }
duke@435 2084
bobv@2036 2085 #ifdef __SOFTFP__
bobv@2036 2086 case T_DOUBLE: // fall through
bobv@2036 2087 #endif // __SOFTFP__
duke@435 2088 case T_LONG: {
duke@435 2089 int assigned_regHi = interval->assigned_regHi();
duke@435 2090 assert(assigned_reg >= pd_first_cpu_reg && assigned_reg <= pd_last_cpu_reg, "no cpu register");
duke@435 2091 assert(num_physical_regs(T_LONG) == 1 ||
duke@435 2092 (assigned_regHi >= pd_first_cpu_reg && assigned_regHi <= pd_last_cpu_reg), "no cpu register");
duke@435 2093
duke@435 2094 assert(assigned_reg != assigned_regHi, "invalid allocation");
duke@435 2095 assert(num_physical_regs(T_LONG) == 1 || assigned_reg < assigned_regHi,
duke@435 2096 "register numbers must be sorted (ensure that e.g. a move from eax,ebx to ebx,eax can not occur)");
duke@435 2097 assert((assigned_regHi != any_reg) ^ (num_physical_regs(T_LONG) == 1), "must be match");
duke@435 2098 if (requires_adjacent_regs(T_LONG)) {
duke@435 2099 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
duke@435 2100 }
duke@435 2101
duke@435 2102 #ifdef _LP64
duke@435 2103 return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
duke@435 2104 #else
bobv@2036 2105 #if defined(SPARC) || defined(PPC)
duke@435 2106 return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
duke@435 2107 #else
duke@435 2108 return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
never@739 2109 #endif // SPARC
never@739 2110 #endif // LP64
duke@435 2111 }
duke@435 2112
bobv@2036 2113 #ifndef __SOFTFP__
duke@435 2114 case T_FLOAT: {
never@739 2115 #ifdef X86
duke@435 2116 if (UseSSE >= 1) {
duke@435 2117 assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
duke@435 2118 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
duke@435 2119 return LIR_OprFact::single_xmm(assigned_reg - pd_first_xmm_reg);
duke@435 2120 }
duke@435 2121 #endif
duke@435 2122
duke@435 2123 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
duke@435 2124 assert(interval->assigned_regHi() == any_reg, "must not have hi register");
duke@435 2125 return LIR_OprFact::single_fpu(assigned_reg - pd_first_fpu_reg);
duke@435 2126 }
duke@435 2127
duke@435 2128 case T_DOUBLE: {
never@739 2129 #ifdef X86
duke@435 2130 if (UseSSE >= 2) {
duke@435 2131 assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
duke@435 2132 assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
duke@435 2133 return LIR_OprFact::double_xmm(assigned_reg - pd_first_xmm_reg);
duke@435 2134 }
duke@435 2135 #endif
duke@435 2136
duke@435 2137 #ifdef SPARC
duke@435 2138 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
duke@435 2139 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
duke@435 2140 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
duke@435 2141 LIR_Opr result = LIR_OprFact::double_fpu(interval->assigned_regHi() - pd_first_fpu_reg, assigned_reg - pd_first_fpu_reg);
bobv@2036 2142 #elif defined(ARM)
bobv@2036 2143 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
bobv@2036 2144 assert(interval->assigned_regHi() >= pd_first_fpu_reg && interval->assigned_regHi() <= pd_last_fpu_reg, "no fpu register");
bobv@2036 2145 assert(assigned_reg % 2 == 0 && assigned_reg + 1 == interval->assigned_regHi(), "must be sequential and even");
bobv@2036 2146 LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg, interval->assigned_regHi() - pd_first_fpu_reg);
duke@435 2147 #else
duke@435 2148 assert(assigned_reg >= pd_first_fpu_reg && assigned_reg <= pd_last_fpu_reg, "no fpu register");
duke@435 2149 assert(interval->assigned_regHi() == any_reg, "must not have hi register (double fpu values are stored in one register on Intel)");
duke@435 2150 LIR_Opr result = LIR_OprFact::double_fpu(assigned_reg - pd_first_fpu_reg);
duke@435 2151 #endif
duke@435 2152 return result;
duke@435 2153 }
bobv@2036 2154 #endif // __SOFTFP__
duke@435 2155
duke@435 2156 default: {
duke@435 2157 ShouldNotReachHere();
duke@435 2158 return LIR_OprFact::illegalOpr;
duke@435 2159 }
duke@435 2160 }
duke@435 2161 }
duke@435 2162 }
duke@435 2163
duke@435 2164 LIR_Opr LinearScan::canonical_spill_opr(Interval* interval) {
duke@435 2165 assert(interval->canonical_spill_slot() >= nof_regs, "canonical spill slot not set");
duke@435 2166 return LIR_OprFact::stack(interval->canonical_spill_slot() - nof_regs, interval->type());
duke@435 2167 }
duke@435 2168
duke@435 2169 LIR_Opr LinearScan::color_lir_opr(LIR_Opr opr, int op_id, LIR_OpVisitState::OprMode mode) {
duke@435 2170 assert(opr->is_virtual(), "should not call this otherwise");
duke@435 2171
duke@435 2172 Interval* interval = interval_at(opr->vreg_number());
duke@435 2173 assert(interval != NULL, "interval must exist");
duke@435 2174
duke@435 2175 if (op_id != -1) {
duke@435 2176 #ifdef ASSERT
duke@435 2177 BlockBegin* block = block_of_op_with_id(op_id);
duke@435 2178 if (block->number_of_sux() <= 1 && op_id == block->last_lir_instruction_id()) {
duke@435 2179 // check if spill moves could have been appended at the end of this block, but
duke@435 2180 // before the branch instruction. So the split child information for this branch would
duke@435 2181 // be incorrect.
duke@435 2182 LIR_OpBranch* branch = block->lir()->instructions_list()->last()->as_OpBranch();
duke@435 2183 if (branch != NULL) {
duke@435 2184 if (block->live_out().at(opr->vreg_number())) {
duke@435 2185 assert(branch->cond() == lir_cond_always, "block does not end with an unconditional jump");
duke@435 2186 assert(false, "can't get split child for the last branch of a block because the information would be incorrect (moves are inserted before the branch in resolve_data_flow)");
duke@435 2187 }
duke@435 2188 }
duke@435 2189 }
duke@435 2190 #endif
duke@435 2191
duke@435 2192 // operands are not changed when an interval is split during allocation,
duke@435 2193 // so search the right interval here
duke@435 2194 interval = split_child_at_op_id(interval, op_id, mode);
duke@435 2195 }
duke@435 2196
duke@435 2197 LIR_Opr res = operand_for_interval(interval);
duke@435 2198
never@739 2199 #ifdef X86
duke@435 2200 // new semantic for is_last_use: not only set on definite end of interval,
duke@435 2201 // but also before hole
duke@435 2202 // This may still miss some cases (e.g. for dead values), but it is not necessary that the
duke@435 2203 // last use information is completely correct
duke@435 2204 // information is only needed for fpu stack allocation
duke@435 2205 if (res->is_fpu_register()) {
duke@435 2206 if (opr->is_last_use() || op_id == interval->to() || (op_id != -1 && interval->has_hole_between(op_id, op_id + 1))) {
duke@435 2207 assert(op_id == -1 || !is_block_begin(op_id), "holes at begin of block may also result from control flow");
duke@435 2208 res = res->make_last_use();
duke@435 2209 }
duke@435 2210 }
duke@435 2211 #endif
duke@435 2212
duke@435 2213 assert(!gen()->is_vreg_flag_set(opr->vreg_number(), LIRGenerator::callee_saved) || !FrameMap::is_caller_save_register(res), "bad allocation");
duke@435 2214
duke@435 2215 return res;
duke@435 2216 }
duke@435 2217
duke@435 2218
duke@435 2219 #ifdef ASSERT
duke@435 2220 // some methods used to check correctness of debug information
duke@435 2221
duke@435 2222 void assert_no_register_values(GrowableArray<ScopeValue*>* values) {
duke@435 2223 if (values == NULL) {
duke@435 2224 return;
duke@435 2225 }
duke@435 2226
duke@435 2227 for (int i = 0; i < values->length(); i++) {
duke@435 2228 ScopeValue* value = values->at(i);
duke@435 2229
duke@435 2230 if (value->is_location()) {
duke@435 2231 Location location = ((LocationValue*)value)->location();
duke@435 2232 assert(location.where() == Location::on_stack, "value is in register");
duke@435 2233 }
duke@435 2234 }
duke@435 2235 }
duke@435 2236
duke@435 2237 void assert_no_register_values(GrowableArray<MonitorValue*>* values) {
duke@435 2238 if (values == NULL) {
duke@435 2239 return;
duke@435 2240 }
duke@435 2241
duke@435 2242 for (int i = 0; i < values->length(); i++) {
duke@435 2243 MonitorValue* value = values->at(i);
duke@435 2244
duke@435 2245 if (value->owner()->is_location()) {
duke@435 2246 Location location = ((LocationValue*)value->owner())->location();
duke@435 2247 assert(location.where() == Location::on_stack, "owner is in register");
duke@435 2248 }
duke@435 2249 assert(value->basic_lock().where() == Location::on_stack, "basic_lock is in register");
duke@435 2250 }
duke@435 2251 }
duke@435 2252
duke@435 2253 void assert_equal(Location l1, Location l2) {
duke@435 2254 assert(l1.where() == l2.where() && l1.type() == l2.type() && l1.offset() == l2.offset(), "");
duke@435 2255 }
duke@435 2256
duke@435 2257 void assert_equal(ScopeValue* v1, ScopeValue* v2) {
duke@435 2258 if (v1->is_location()) {
duke@435 2259 assert(v2->is_location(), "");
duke@435 2260 assert_equal(((LocationValue*)v1)->location(), ((LocationValue*)v2)->location());
duke@435 2261 } else if (v1->is_constant_int()) {
duke@435 2262 assert(v2->is_constant_int(), "");
duke@435 2263 assert(((ConstantIntValue*)v1)->value() == ((ConstantIntValue*)v2)->value(), "");
duke@435 2264 } else if (v1->is_constant_double()) {
duke@435 2265 assert(v2->is_constant_double(), "");
duke@435 2266 assert(((ConstantDoubleValue*)v1)->value() == ((ConstantDoubleValue*)v2)->value(), "");
duke@435 2267 } else if (v1->is_constant_long()) {
duke@435 2268 assert(v2->is_constant_long(), "");
duke@435 2269 assert(((ConstantLongValue*)v1)->value() == ((ConstantLongValue*)v2)->value(), "");
duke@435 2270 } else if (v1->is_constant_oop()) {
duke@435 2271 assert(v2->is_constant_oop(), "");
duke@435 2272 assert(((ConstantOopWriteValue*)v1)->value() == ((ConstantOopWriteValue*)v2)->value(), "");
duke@435 2273 } else {
duke@435 2274 ShouldNotReachHere();
duke@435 2275 }
duke@435 2276 }
duke@435 2277
duke@435 2278 void assert_equal(MonitorValue* m1, MonitorValue* m2) {
duke@435 2279 assert_equal(m1->owner(), m2->owner());
duke@435 2280 assert_equal(m1->basic_lock(), m2->basic_lock());
duke@435 2281 }
duke@435 2282
duke@435 2283 void assert_equal(IRScopeDebugInfo* d1, IRScopeDebugInfo* d2) {
duke@435 2284 assert(d1->scope() == d2->scope(), "not equal");
duke@435 2285 assert(d1->bci() == d2->bci(), "not equal");
duke@435 2286
duke@435 2287 if (d1->locals() != NULL) {
duke@435 2288 assert(d1->locals() != NULL && d2->locals() != NULL, "not equal");
duke@435 2289 assert(d1->locals()->length() == d2->locals()->length(), "not equal");
duke@435 2290 for (int i = 0; i < d1->locals()->length(); i++) {
duke@435 2291 assert_equal(d1->locals()->at(i), d2->locals()->at(i));
duke@435 2292 }
duke@435 2293 } else {
duke@435 2294 assert(d1->locals() == NULL && d2->locals() == NULL, "not equal");
duke@435 2295 }
duke@435 2296
duke@435 2297 if (d1->expressions() != NULL) {
duke@435 2298 assert(d1->expressions() != NULL && d2->expressions() != NULL, "not equal");
duke@435 2299 assert(d1->expressions()->length() == d2->expressions()->length(), "not equal");
duke@435 2300 for (int i = 0; i < d1->expressions()->length(); i++) {
duke@435 2301 assert_equal(d1->expressions()->at(i), d2->expressions()->at(i));
duke@435 2302 }
duke@435 2303 } else {
duke@435 2304 assert(d1->expressions() == NULL && d2->expressions() == NULL, "not equal");
duke@435 2305 }
duke@435 2306
duke@435 2307 if (d1->monitors() != NULL) {
duke@435 2308 assert(d1->monitors() != NULL && d2->monitors() != NULL, "not equal");
duke@435 2309 assert(d1->monitors()->length() == d2->monitors()->length(), "not equal");
duke@435 2310 for (int i = 0; i < d1->monitors()->length(); i++) {
duke@435 2311 assert_equal(d1->monitors()->at(i), d2->monitors()->at(i));
duke@435 2312 }
duke@435 2313 } else {
duke@435 2314 assert(d1->monitors() == NULL && d2->monitors() == NULL, "not equal");
duke@435 2315 }
duke@435 2316
duke@435 2317 if (d1->caller() != NULL) {
duke@435 2318 assert(d1->caller() != NULL && d2->caller() != NULL, "not equal");
duke@435 2319 assert_equal(d1->caller(), d2->caller());
duke@435 2320 } else {
duke@435 2321 assert(d1->caller() == NULL && d2->caller() == NULL, "not equal");
duke@435 2322 }
duke@435 2323 }
duke@435 2324
duke@435 2325 void check_stack_depth(CodeEmitInfo* info, int stack_end) {
roland@2174 2326 if (info->stack()->bci() != SynchronizationEntryBCI && !info->scope()->method()->is_native()) {
roland@2174 2327 Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
duke@435 2328 switch (code) {
duke@435 2329 case Bytecodes::_ifnull : // fall through
duke@435 2330 case Bytecodes::_ifnonnull : // fall through
duke@435 2331 case Bytecodes::_ifeq : // fall through
duke@435 2332 case Bytecodes::_ifne : // fall through
duke@435 2333 case Bytecodes::_iflt : // fall through
duke@435 2334 case Bytecodes::_ifge : // fall through
duke@435 2335 case Bytecodes::_ifgt : // fall through
duke@435 2336 case Bytecodes::_ifle : // fall through
duke@435 2337 case Bytecodes::_if_icmpeq : // fall through
duke@435 2338 case Bytecodes::_if_icmpne : // fall through
duke@435 2339 case Bytecodes::_if_icmplt : // fall through
duke@435 2340 case Bytecodes::_if_icmpge : // fall through
duke@435 2341 case Bytecodes::_if_icmpgt : // fall through
duke@435 2342 case Bytecodes::_if_icmple : // fall through
duke@435 2343 case Bytecodes::_if_acmpeq : // fall through
duke@435 2344 case Bytecodes::_if_acmpne :
duke@435 2345 assert(stack_end >= -Bytecodes::depth(code), "must have non-empty expression stack at if bytecode");
duke@435 2346 break;
duke@435 2347 }
duke@435 2348 }
duke@435 2349 }
duke@435 2350
duke@435 2351 #endif // ASSERT
duke@435 2352
duke@435 2353
duke@435 2354 IntervalWalker* LinearScan::init_compute_oop_maps() {
duke@435 2355 // setup lists of potential oops for walking
duke@435 2356 Interval* oop_intervals;
duke@435 2357 Interval* non_oop_intervals;
duke@435 2358
duke@435 2359 create_unhandled_lists(&oop_intervals, &non_oop_intervals, is_oop_interval, NULL);
duke@435 2360
duke@435 2361 // intervals that have no oops inside need not to be processed
duke@435 2362 // to ensure a walking until the last instruction id, add a dummy interval
duke@435 2363 // with a high operation id
duke@435 2364 non_oop_intervals = new Interval(any_reg);
duke@435 2365 non_oop_intervals->add_range(max_jint - 2, max_jint - 1);
duke@435 2366
duke@435 2367 return new IntervalWalker(this, oop_intervals, non_oop_intervals);
duke@435 2368 }
duke@435 2369
duke@435 2370
duke@435 2371 OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo* info, bool is_call_site) {
duke@435 2372 TRACE_LINEAR_SCAN(3, tty->print_cr("creating oop map at op_id %d", op->id()));
duke@435 2373
duke@435 2374 // walk before the current operation -> intervals that start at
duke@435 2375 // the operation (= output operands of the operation) are not
duke@435 2376 // included in the oop map
duke@435 2377 iw->walk_before(op->id());
duke@435 2378
duke@435 2379 int frame_size = frame_map()->framesize();
duke@435 2380 int arg_count = frame_map()->oop_map_arg_count();
duke@435 2381 OopMap* map = new OopMap(frame_size, arg_count);
duke@435 2382
duke@435 2383 // Check if this is a patch site.
duke@435 2384 bool is_patch_info = false;
duke@435 2385 if (op->code() == lir_move) {
duke@435 2386 assert(!is_call_site, "move must not be a call site");
duke@435 2387 assert(op->as_Op1() != NULL, "move must be LIR_Op1");
duke@435 2388 LIR_Op1* move = (LIR_Op1*)op;
duke@435 2389
duke@435 2390 is_patch_info = move->patch_code() != lir_patch_none;
duke@435 2391 }
duke@435 2392
duke@435 2393 // Iterate through active intervals
duke@435 2394 for (Interval* interval = iw->active_first(fixedKind); interval != Interval::end(); interval = interval->next()) {
duke@435 2395 int assigned_reg = interval->assigned_reg();
duke@435 2396
duke@435 2397 assert(interval->current_from() <= op->id() && op->id() <= interval->current_to(), "interval should not be active otherwise");
duke@435 2398 assert(interval->assigned_regHi() == any_reg, "oop must be single word");
duke@435 2399 assert(interval->reg_num() >= LIR_OprDesc::vreg_base, "fixed interval found");
duke@435 2400
duke@435 2401 // Check if this range covers the instruction. Intervals that
duke@435 2402 // start or end at the current operation are not included in the
duke@435 2403 // oop map, except in the case of patching moves. For patching
duke@435 2404 // moves, any intervals which end at this instruction are included
duke@435 2405 // in the oop map since we may safepoint while doing the patch
duke@435 2406 // before we've consumed the inputs.
duke@435 2407 if (is_patch_info || op->id() < interval->current_to()) {
duke@435 2408
duke@435 2409 // caller-save registers must not be included into oop-maps at calls
duke@435 2410 assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
duke@435 2411
duke@435 2412 VMReg name = vm_reg_for_interval(interval);
never@3108 2413 set_oop(map, name);
duke@435 2414
duke@435 2415 // Spill optimization: when the stack value is guaranteed to be always correct,
duke@435 2416 // then it must be added to the oop map even if the interval is currently in a register
duke@435 2417 if (interval->always_in_memory() &&
duke@435 2418 op->id() > interval->spill_definition_pos() &&
duke@435 2419 interval->assigned_reg() != interval->canonical_spill_slot()) {
duke@435 2420 assert(interval->spill_definition_pos() > 0, "position not set correctly");
duke@435 2421 assert(interval->canonical_spill_slot() >= LinearScan::nof_regs, "no spill slot assigned");
duke@435 2422 assert(interval->assigned_reg() < LinearScan::nof_regs, "interval is on stack, so stack slot is registered twice");
duke@435 2423
never@3108 2424 set_oop(map, frame_map()->slot_regname(interval->canonical_spill_slot() - LinearScan::nof_regs));
duke@435 2425 }
duke@435 2426 }
duke@435 2427 }
duke@435 2428
duke@435 2429 // add oops from lock stack
duke@435 2430 assert(info->stack() != NULL, "CodeEmitInfo must always have a stack");
roland@2174 2431 int locks_count = info->stack()->total_locks_size();
duke@435 2432 for (int i = 0; i < locks_count; i++) {
never@3108 2433 set_oop(map, frame_map()->monitor_object_regname(i));
duke@435 2434 }
duke@435 2435
duke@435 2436 return map;
duke@435 2437 }
duke@435 2438
duke@435 2439
duke@435 2440 void LinearScan::compute_oop_map(IntervalWalker* iw, const LIR_OpVisitState &visitor, LIR_Op* op) {
duke@435 2441 assert(visitor.info_count() > 0, "no oop map needed");
duke@435 2442
duke@435 2443 // compute oop_map only for first CodeEmitInfo
duke@435 2444 // because it is (in most cases) equal for all other infos of the same operation
duke@435 2445 CodeEmitInfo* first_info = visitor.info_at(0);
duke@435 2446 OopMap* first_oop_map = compute_oop_map(iw, op, first_info, visitor.has_call());
duke@435 2447
duke@435 2448 for (int i = 0; i < visitor.info_count(); i++) {
duke@435 2449 CodeEmitInfo* info = visitor.info_at(i);
duke@435 2450 OopMap* oop_map = first_oop_map;
duke@435 2451
duke@435 2452 if (info->stack()->locks_size() != first_info->stack()->locks_size()) {
duke@435 2453 // this info has a different number of locks then the precomputed oop map
duke@435 2454 // (possible for lock and unlock instructions) -> compute oop map with
duke@435 2455 // correct lock information
duke@435 2456 oop_map = compute_oop_map(iw, op, info, visitor.has_call());
duke@435 2457 }
duke@435 2458
duke@435 2459 if (info->_oop_map == NULL) {
duke@435 2460 info->_oop_map = oop_map;
duke@435 2461 } else {
duke@435 2462 // a CodeEmitInfo can not be shared between different LIR-instructions
duke@435 2463 // because interval splitting can occur anywhere between two instructions
duke@435 2464 // and so the oop maps must be different
duke@435 2465 // -> check if the already set oop_map is exactly the one calculated for this operation
duke@435 2466 assert(info->_oop_map == oop_map, "same CodeEmitInfo used for multiple LIR instructions");
duke@435 2467 }
duke@435 2468 }
duke@435 2469 }
duke@435 2470
duke@435 2471
duke@435 2472 // frequently used constants
roland@3575 2473 // Allocate them with new so they are never destroyed (otherwise, a
roland@3575 2474 // forced exit could destroy these objects while they are still in
roland@3575 2475 // use).
zgu@3900 2476 ConstantOopWriteValue* LinearScan::_oop_null_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantOopWriteValue(NULL);
zgu@3900 2477 ConstantIntValue* LinearScan::_int_m1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(-1);
zgu@3900 2478 ConstantIntValue* LinearScan::_int_0_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(0);
zgu@3900 2479 ConstantIntValue* LinearScan::_int_1_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(1);
zgu@3900 2480 ConstantIntValue* LinearScan::_int_2_scope_value = new (ResourceObj::C_HEAP, mtCompiler) ConstantIntValue(2);
zgu@3900 2481 LocationValue* _illegal_value = new (ResourceObj::C_HEAP, mtCompiler) LocationValue(Location());
duke@435 2482
duke@435 2483 void LinearScan::init_compute_debug_info() {
duke@435 2484 // cache for frequently used scope values
duke@435 2485 // (cpu registers and stack slots)
duke@435 2486 _scope_value_cache = ScopeValueArray((LinearScan::nof_cpu_regs + frame_map()->argcount() + max_spills()) * 2, NULL);
duke@435 2487 }
duke@435 2488
duke@435 2489 MonitorValue* LinearScan::location_for_monitor_index(int monitor_index) {
duke@435 2490 Location loc;
duke@435 2491 if (!frame_map()->location_for_monitor_object(monitor_index, &loc)) {
duke@435 2492 bailout("too large frame");
duke@435 2493 }
duke@435 2494 ScopeValue* object_scope_value = new LocationValue(loc);
duke@435 2495
duke@435 2496 if (!frame_map()->location_for_monitor_lock(monitor_index, &loc)) {
duke@435 2497 bailout("too large frame");
duke@435 2498 }
duke@435 2499 return new MonitorValue(object_scope_value, loc);
duke@435 2500 }
duke@435 2501
duke@435 2502 LocationValue* LinearScan::location_for_name(int name, Location::Type loc_type) {
duke@435 2503 Location loc;
duke@435 2504 if (!frame_map()->locations_for_slot(name, loc_type, &loc)) {
duke@435 2505 bailout("too large frame");
duke@435 2506 }
duke@435 2507 return new LocationValue(loc);
duke@435 2508 }
duke@435 2509
duke@435 2510
duke@435 2511 int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) {
duke@435 2512 assert(opr->is_constant(), "should not be called otherwise");
duke@435 2513
duke@435 2514 LIR_Const* c = opr->as_constant_ptr();
duke@435 2515 BasicType t = c->type();
duke@435 2516 switch (t) {
duke@435 2517 case T_OBJECT: {
duke@435 2518 jobject value = c->as_jobject();
duke@435 2519 if (value == NULL) {
roland@3575 2520 scope_values->append(_oop_null_scope_value);
duke@435 2521 } else {
duke@435 2522 scope_values->append(new ConstantOopWriteValue(c->as_jobject()));
duke@435 2523 }
duke@435 2524 return 1;
duke@435 2525 }
duke@435 2526
duke@435 2527 case T_INT: // fall through
duke@435 2528 case T_FLOAT: {
duke@435 2529 int value = c->as_jint_bits();
duke@435 2530 switch (value) {
roland@3575 2531 case -1: scope_values->append(_int_m1_scope_value); break;
roland@3575 2532 case 0: scope_values->append(_int_0_scope_value); break;
roland@3575 2533 case 1: scope_values->append(_int_1_scope_value); break;
roland@3575 2534 case 2: scope_values->append(_int_2_scope_value); break;
duke@435 2535 default: scope_values->append(new ConstantIntValue(c->as_jint_bits())); break;
duke@435 2536 }
duke@435 2537 return 1;
duke@435 2538 }
duke@435 2539
duke@435 2540 case T_LONG: // fall through
duke@435 2541 case T_DOUBLE: {
roland@1495 2542 #ifdef _LP64
roland@3575 2543 scope_values->append(_int_0_scope_value);
roland@1495 2544 scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
roland@1495 2545 #else
duke@435 2546 if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
duke@435 2547 scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
duke@435 2548 scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
duke@435 2549 } else {
duke@435 2550 scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
duke@435 2551 scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
duke@435 2552 }
roland@1495 2553 #endif
duke@435 2554 return 2;
duke@435 2555 }
duke@435 2556
roland@1732 2557 case T_ADDRESS: {
roland@1732 2558 #ifdef _LP64
roland@1732 2559 scope_values->append(new ConstantLongValue(c->as_jint()));
roland@1732 2560 #else
roland@1732 2561 scope_values->append(new ConstantIntValue(c->as_jint()));
roland@1732 2562 #endif
roland@1732 2563 return 1;
roland@1732 2564 }
roland@1732 2565
duke@435 2566 default:
duke@435 2567 ShouldNotReachHere();
never@739 2568 return -1;
duke@435 2569 }
duke@435 2570 }
duke@435 2571
duke@435 2572 int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeValue*>* scope_values) {
duke@435 2573 if (opr->is_single_stack()) {
duke@435 2574 int stack_idx = opr->single_stack_ix();
duke@435 2575 bool is_oop = opr->is_oop_register();
duke@435 2576 int cache_idx = (stack_idx + LinearScan::nof_cpu_regs) * 2 + (is_oop ? 1 : 0);
duke@435 2577
duke@435 2578 ScopeValue* sv = _scope_value_cache.at(cache_idx);
duke@435 2579 if (sv == NULL) {
duke@435 2580 Location::Type loc_type = is_oop ? Location::oop : Location::normal;
duke@435 2581 sv = location_for_name(stack_idx, loc_type);
duke@435 2582 _scope_value_cache.at_put(cache_idx, sv);
duke@435 2583 }
duke@435 2584
duke@435 2585 // check if cached value is correct
duke@435 2586 DEBUG_ONLY(assert_equal(sv, location_for_name(stack_idx, is_oop ? Location::oop : Location::normal)));
duke@435 2587
duke@435 2588 scope_values->append(sv);
duke@435 2589 return 1;
duke@435 2590
duke@435 2591 } else if (opr->is_single_cpu()) {
duke@435 2592 bool is_oop = opr->is_oop_register();
duke@435 2593 int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0);
roland@1495 2594 Location::Type int_loc_type = NOT_LP64(Location::normal) LP64_ONLY(Location::int_in_long);
duke@435 2595
duke@435 2596 ScopeValue* sv = _scope_value_cache.at(cache_idx);
duke@435 2597 if (sv == NULL) {
roland@1495 2598 Location::Type loc_type = is_oop ? Location::oop : int_loc_type;
duke@435 2599 VMReg rname = frame_map()->regname(opr);
duke@435 2600 sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
duke@435 2601 _scope_value_cache.at_put(cache_idx, sv);
duke@435 2602 }
duke@435 2603
duke@435 2604 // check if cached value is correct
roland@1495 2605 DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : int_loc_type, frame_map()->regname(opr)))));
duke@435 2606
duke@435 2607 scope_values->append(sv);
duke@435 2608 return 1;
duke@435 2609
never@739 2610 #ifdef X86
duke@435 2611 } else if (opr->is_single_xmm()) {
duke@435 2612 VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
duke@435 2613 LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname));
duke@435 2614
duke@435 2615 scope_values->append(sv);
duke@435 2616 return 1;
duke@435 2617 #endif
duke@435 2618
duke@435 2619 } else if (opr->is_single_fpu()) {
never@739 2620 #ifdef X86
duke@435 2621 // the exact location of fpu stack values is only known
duke@435 2622 // during fpu stack allocation, so the stack allocator object
duke@435 2623 // must be present
duke@435 2624 assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
duke@435 2625 assert(_fpu_stack_allocator != NULL, "must be present");
duke@435 2626 opr = _fpu_stack_allocator->to_fpu_stack(opr);
duke@435 2627 #endif
duke@435 2628
duke@435 2629 Location::Type loc_type = float_saved_as_double ? Location::float_in_dbl : Location::normal;
duke@435 2630 VMReg rname = frame_map()->fpu_regname(opr->fpu_regnr());
bdelsart@3195 2631 #ifndef __SOFTFP__
bdelsart@3195 2632 #ifndef VM_LITTLE_ENDIAN
bdelsart@3195 2633 if (! float_saved_as_double) {
bdelsart@3195 2634 // On big endian system, we may have an issue if float registers use only
bdelsart@3195 2635 // the low half of the (same) double registers.
bdelsart@3195 2636 // Both the float and the double could have the same regnr but would correspond
bdelsart@3195 2637 // to two different addresses once saved.
bdelsart@3195 2638
bdelsart@3195 2639 // get next safely (no assertion checks)
bdelsart@3195 2640 VMReg next = VMRegImpl::as_VMReg(1+rname->value());
bdelsart@3195 2641 if (next->is_reg() &&
bdelsart@3195 2642 (next->as_FloatRegister() == rname->as_FloatRegister())) {
bdelsart@3195 2643 // the back-end does use the same numbering for the double and the float
bdelsart@3195 2644 rname = next; // VMReg for the low bits, e.g. the real VMReg for the float
bdelsart@3195 2645 }
bdelsart@3195 2646 }
bdelsart@3195 2647 #endif
bdelsart@3195 2648 #endif
duke@435 2649 LocationValue* sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
duke@435 2650
duke@435 2651 scope_values->append(sv);
duke@435 2652 return 1;
duke@435 2653
duke@435 2654 } else {
duke@435 2655 // double-size operands
duke@435 2656
duke@435 2657 ScopeValue* first;
duke@435 2658 ScopeValue* second;
duke@435 2659
duke@435 2660 if (opr->is_double_stack()) {
never@739 2661 #ifdef _LP64
never@739 2662 Location loc1;
never@739 2663 Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl;
never@739 2664 if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) {
never@739 2665 bailout("too large frame");
never@739 2666 }
never@739 2667 // Does this reverse on x86 vs. sparc?
never@739 2668 first = new LocationValue(loc1);
roland@3575 2669 second = _int_0_scope_value;
never@739 2670 #else
duke@435 2671 Location loc1, loc2;
duke@435 2672 if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) {
duke@435 2673 bailout("too large frame");
duke@435 2674 }
duke@435 2675 first = new LocationValue(loc1);
duke@435 2676 second = new LocationValue(loc2);
never@739 2677 #endif // _LP64
duke@435 2678
duke@435 2679 } else if (opr->is_double_cpu()) {
duke@435 2680 #ifdef _LP64
duke@435 2681 VMReg rname_first = opr->as_register_lo()->as_VMReg();
duke@435 2682 first = new LocationValue(Location::new_reg_loc(Location::lng, rname_first));
roland@3575 2683 second = _int_0_scope_value;
duke@435 2684 #else
duke@435 2685 VMReg rname_first = opr->as_register_lo()->as_VMReg();
duke@435 2686 VMReg rname_second = opr->as_register_hi()->as_VMReg();
duke@435 2687
duke@435 2688 if (hi_word_offset_in_bytes < lo_word_offset_in_bytes) {
duke@435 2689 // lo/hi and swapped relative to first and second, so swap them
duke@435 2690 VMReg tmp = rname_first;
duke@435 2691 rname_first = rname_second;
duke@435 2692 rname_second = tmp;
duke@435 2693 }
duke@435 2694
duke@435 2695 first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
duke@435 2696 second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
never@739 2697 #endif //_LP64
never@739 2698
never@739 2699
never@739 2700 #ifdef X86
duke@435 2701 } else if (opr->is_double_xmm()) {
duke@435 2702 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
duke@435 2703 VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg();
iveresov@1804 2704 # ifdef _LP64
iveresov@1804 2705 first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
roland@3575 2706 second = _int_0_scope_value;
iveresov@1804 2707 # else
iveresov@1804 2708 first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
iveresov@1804 2709 // %%% This is probably a waste but we'll keep things as they were for now
iveresov@1804 2710 if (true) {
iveresov@1804 2711 VMReg rname_second = rname_first->next();
iveresov@1804 2712 second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
iveresov@1804 2713 }
iveresov@1804 2714 # endif
iveresov@1804 2715 #endif
iveresov@1804 2716
iveresov@1804 2717 } else if (opr->is_double_fpu()) {
iveresov@1804 2718 // On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
iveresov@1804 2719 // the double as float registers in the native ordering. On X86,
iveresov@1804 2720 // fpu_regnrLo is a FPU stack slot whose VMReg represents
iveresov@1804 2721 // the low-order word of the double and fpu_regnrLo + 1 is the
iveresov@1804 2722 // name for the other half. *first and *second must represent the
iveresov@1804 2723 // least and most significant words, respectively.
iveresov@1804 2724
iveresov@1804 2725 #ifdef X86
iveresov@1804 2726 // the exact location of fpu stack values is only known
iveresov@1804 2727 // during fpu stack allocation, so the stack allocator object
iveresov@1804 2728 // must be present
iveresov@1804 2729 assert(use_fpu_stack_allocation(), "should not have float stack values without fpu stack allocation (all floats must be SSE2)");
iveresov@1804 2730 assert(_fpu_stack_allocator != NULL, "must be present");
iveresov@1804 2731 opr = _fpu_stack_allocator->to_fpu_stack(opr);
iveresov@1804 2732
vladidan@2627 2733 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrLo is used)");
iveresov@1804 2734 #endif
iveresov@1804 2735 #ifdef SPARC
iveresov@1804 2736 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi() + 1, "assumed in calculation (only fpu_regnrHi is used)");
iveresov@1804 2737 #endif
bobv@2036 2738 #ifdef ARM
bobv@2036 2739 assert(opr->fpu_regnrHi() == opr->fpu_regnrLo() + 1, "assumed in calculation (only fpu_regnrLo is used)");
bobv@2036 2740 #endif
bobv@2036 2741 #ifdef PPC
bobv@2036 2742 assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation (only fpu_regnrHi is used)");
bobv@2036 2743 #endif
iveresov@1804 2744
vladidan@2627 2745 #ifdef VM_LITTLE_ENDIAN
vladidan@2627 2746 VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrLo());
vladidan@2627 2747 #else
iveresov@1804 2748 VMReg rname_first = frame_map()->fpu_regname(opr->fpu_regnrHi());
vladidan@2627 2749 #endif
vladidan@2627 2750
iveresov@1804 2751 #ifdef _LP64
iveresov@1804 2752 first = new LocationValue(Location::new_reg_loc(Location::dbl, rname_first));
roland@3575 2753 second = _int_0_scope_value;
iveresov@1804 2754 #else
duke@435 2755 first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
duke@435 2756 // %%% This is probably a waste but we'll keep things as they were for now
duke@435 2757 if (true) {
duke@435 2758 VMReg rname_second = rname_first->next();
duke@435 2759 second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
duke@435 2760 }
duke@435 2761 #endif
duke@435 2762
duke@435 2763 } else {
duke@435 2764 ShouldNotReachHere();
duke@435 2765 first = NULL;
duke@435 2766 second = NULL;
duke@435 2767 }
duke@435 2768
duke@435 2769 assert(first != NULL && second != NULL, "must be set");
duke@435 2770 // The convention the interpreter uses is that the second local
duke@435 2771 // holds the first raw word of the native double representation.
duke@435 2772 // This is actually reasonable, since locals and stack arrays
duke@435 2773 // grow downwards in all implementations.
duke@435 2774 // (If, on some machine, the interpreter's Java locals or stack
duke@435 2775 // were to grow upwards, the embedded doubles would be word-swapped.)
duke@435 2776 scope_values->append(second);
duke@435 2777 scope_values->append(first);
duke@435 2778 return 2;
duke@435 2779 }
duke@435 2780 }
duke@435 2781
duke@435 2782
duke@435 2783 int LinearScan::append_scope_value(int op_id, Value value, GrowableArray<ScopeValue*>* scope_values) {
duke@435 2784 if (value != NULL) {
duke@435 2785 LIR_Opr opr = value->operand();
duke@435 2786 Constant* con = value->as_Constant();
duke@435 2787
duke@435 2788 assert(con == NULL || opr->is_virtual() || opr->is_constant() || opr->is_illegal(), "asumption: Constant instructions have only constant operands (or illegal if constant is optimized away)");
duke@435 2789 assert(con != NULL || opr->is_virtual(), "asumption: non-Constant instructions have only virtual operands");
duke@435 2790
duke@435 2791 if (con != NULL && !con->is_pinned() && !opr->is_constant()) {
duke@435 2792 // Unpinned constants may have a virtual operand for a part of the lifetime
duke@435 2793 // or may be illegal when it was optimized away,
duke@435 2794 // so always use a constant operand
duke@435 2795 opr = LIR_OprFact::value_type(con->type());
duke@435 2796 }
duke@435 2797 assert(opr->is_virtual() || opr->is_constant(), "other cases not allowed here");
duke@435 2798
duke@435 2799 if (opr->is_virtual()) {
duke@435 2800 LIR_OpVisitState::OprMode mode = LIR_OpVisitState::inputMode;
duke@435 2801
duke@435 2802 BlockBegin* block = block_of_op_with_id(op_id);
duke@435 2803 if (block->number_of_sux() == 1 && op_id == block->last_lir_instruction_id()) {
duke@435 2804 // generating debug information for the last instruction of a block.
duke@435 2805 // if this instruction is a branch, spill moves are inserted before this branch
duke@435 2806 // and so the wrong operand would be returned (spill moves at block boundaries are not
duke@435 2807 // considered in the live ranges of intervals)
duke@435 2808 // Solution: use the first op_id of the branch target block instead.
duke@435 2809 if (block->lir()->instructions_list()->last()->as_OpBranch() != NULL) {
duke@435 2810 if (block->live_out().at(opr->vreg_number())) {
duke@435 2811 op_id = block->sux_at(0)->first_lir_instruction_id();
duke@435 2812 mode = LIR_OpVisitState::outputMode;
duke@435 2813 }
duke@435 2814 }
duke@435 2815 }
duke@435 2816
duke@435 2817 // Get current location of operand
duke@435 2818 // The operand must be live because debug information is considered when building the intervals
duke@435 2819 // if the interval is not live, color_lir_opr will cause an assertion failure
duke@435 2820 opr = color_lir_opr(opr, op_id, mode);
duke@435 2821 assert(!has_call(op_id) || opr->is_stack() || !is_caller_save(reg_num(opr)), "can not have caller-save register operands at calls");
duke@435 2822
duke@435 2823 // Append to ScopeValue array
duke@435 2824 return append_scope_value_for_operand(opr, scope_values);
duke@435 2825
duke@435 2826 } else {
duke@435 2827 assert(value->as_Constant() != NULL, "all other instructions have only virtual operands");
duke@435 2828 assert(opr->is_constant(), "operand must be constant");
duke@435 2829
duke@435 2830 return append_scope_value_for_constant(opr, scope_values);
duke@435 2831 }
duke@435 2832 } else {
duke@435 2833 // append a dummy value because real value not needed
roland@3575 2834 scope_values->append(_illegal_value);
duke@435 2835 return 1;
duke@435 2836 }
duke@435 2837 }
duke@435 2838
duke@435 2839
roland@2174 2840 IRScopeDebugInfo* LinearScan::compute_debug_info_for_scope(int op_id, IRScope* cur_scope, ValueStack* cur_state, ValueStack* innermost_state) {
duke@435 2841 IRScopeDebugInfo* caller_debug_info = NULL;
roland@2174 2842
roland@2174 2843 ValueStack* caller_state = cur_state->caller_state();
duke@435 2844 if (caller_state != NULL) {
duke@435 2845 // process recursively to compute outermost scope first
roland@2174 2846 caller_debug_info = compute_debug_info_for_scope(op_id, cur_scope->caller(), caller_state, innermost_state);
duke@435 2847 }
duke@435 2848
duke@435 2849 // initialize these to null.
duke@435 2850 // If we don't need deopt info or there are no locals, expressions or monitors,
duke@435 2851 // then these get recorded as no information and avoids the allocation of 0 length arrays.
duke@435 2852 GrowableArray<ScopeValue*>* locals = NULL;
duke@435 2853 GrowableArray<ScopeValue*>* expressions = NULL;
duke@435 2854 GrowableArray<MonitorValue*>* monitors = NULL;
duke@435 2855
duke@435 2856 // describe local variable values
roland@2174 2857 int nof_locals = cur_state->locals_size();
duke@435 2858 if (nof_locals > 0) {
duke@435 2859 locals = new GrowableArray<ScopeValue*>(nof_locals);
duke@435 2860
duke@435 2861 int pos = 0;
duke@435 2862 while (pos < nof_locals) {
duke@435 2863 assert(pos < cur_state->locals_size(), "why not?");
duke@435 2864
duke@435 2865 Value local = cur_state->local_at(pos);
duke@435 2866 pos += append_scope_value(op_id, local, locals);
duke@435 2867
duke@435 2868 assert(locals->length() == pos, "must match");
duke@435 2869 }
duke@435 2870 assert(locals->length() == cur_scope->method()->max_locals(), "wrong number of locals");
duke@435 2871 assert(locals->length() == cur_state->locals_size(), "wrong number of locals");
roland@2174 2872 } else if (cur_scope->method()->max_locals() > 0) {
roland@2174 2873 assert(cur_state->kind() == ValueStack::EmptyExceptionState, "should be");
roland@2174 2874 nof_locals = cur_scope->method()->max_locals();
roland@2174 2875 locals = new GrowableArray<ScopeValue*>(nof_locals);
roland@2174 2876 for(int i = 0; i < nof_locals; i++) {
roland@3575 2877 locals->append(_illegal_value);
roland@2174 2878 }
roland@2174 2879 }
duke@435 2880
duke@435 2881 // describe expression stack
roland@2174 2882 int nof_stack = cur_state->stack_size();
duke@435 2883 if (nof_stack > 0) {
duke@435 2884 expressions = new GrowableArray<ScopeValue*>(nof_stack);
duke@435 2885
roland@2174 2886 int pos = 0;
roland@2174 2887 while (pos < nof_stack) {
roland@2174 2888 Value expression = cur_state->stack_at_inc(pos);
duke@435 2889 append_scope_value(op_id, expression, expressions);
duke@435 2890
roland@2174 2891 assert(expressions->length() == pos, "must match");
roland@2174 2892 }
roland@2174 2893 assert(expressions->length() == cur_state->stack_size(), "wrong number of stack entries");
duke@435 2894 }
duke@435 2895
duke@435 2896 // describe monitors
roland@2174 2897 int nof_locks = cur_state->locks_size();
duke@435 2898 if (nof_locks > 0) {
roland@2174 2899 int lock_offset = cur_state->caller_state() != NULL ? cur_state->caller_state()->total_locks_size() : 0;
duke@435 2900 monitors = new GrowableArray<MonitorValue*>(nof_locks);
roland@2174 2901 for (int i = 0; i < nof_locks; i++) {
roland@2174 2902 monitors->append(location_for_monitor_index(lock_offset + i));
roland@2174 2903 }
roland@2174 2904 }
roland@2174 2905
roland@2174 2906 return new IRScopeDebugInfo(cur_scope, cur_state->bci(), locals, expressions, monitors, caller_debug_info);
duke@435 2907 }
duke@435 2908
duke@435 2909
duke@435 2910 void LinearScan::compute_debug_info(CodeEmitInfo* info, int op_id) {
duke@435 2911 TRACE_LINEAR_SCAN(3, tty->print_cr("creating debug information at op_id %d", op_id));
duke@435 2912
duke@435 2913 IRScope* innermost_scope = info->scope();
duke@435 2914 ValueStack* innermost_state = info->stack();
duke@435 2915
duke@435 2916 assert(innermost_scope != NULL && innermost_state != NULL, "why is it missing?");
duke@435 2917
roland@2174 2918 DEBUG_ONLY(check_stack_depth(info, innermost_state->stack_size()));
duke@435 2919
duke@435 2920 if (info->_scope_debug_info == NULL) {
duke@435 2921 // compute debug information
roland@2174 2922 info->_scope_debug_info = compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state);
duke@435 2923 } else {
duke@435 2924 // debug information already set. Check that it is correct from the current point of view
roland@2174 2925 DEBUG_ONLY(assert_equal(info->_scope_debug_info, compute_debug_info_for_scope(op_id, innermost_scope, innermost_state, innermost_state)));
duke@435 2926 }
duke@435 2927 }
duke@435 2928
duke@435 2929
duke@435 2930 void LinearScan::assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw) {
duke@435 2931 LIR_OpVisitState visitor;
duke@435 2932 int num_inst = instructions->length();
duke@435 2933 bool has_dead = false;
duke@435 2934
duke@435 2935 for (int j = 0; j < num_inst; j++) {
duke@435 2936 LIR_Op* op = instructions->at(j);
duke@435 2937 if (op == NULL) { // this can happen when spill-moves are removed in eliminate_spill_moves
duke@435 2938 has_dead = true;
duke@435 2939 continue;
duke@435 2940 }
duke@435 2941 int op_id = op->id();
duke@435 2942
duke@435 2943 // visit instruction to get list of operands
duke@435 2944 visitor.visit(op);
duke@435 2945
duke@435 2946 // iterate all modes of the visitor and process all virtual operands
duke@435 2947 for_each_visitor_mode(mode) {
duke@435 2948 int n = visitor.opr_count(mode);
duke@435 2949 for (int k = 0; k < n; k++) {
duke@435 2950 LIR_Opr opr = visitor.opr_at(mode, k);
duke@435 2951 if (opr->is_virtual_register()) {
duke@435 2952 visitor.set_opr_at(mode, k, color_lir_opr(opr, op_id, mode));
duke@435 2953 }
duke@435 2954 }
duke@435 2955 }
duke@435 2956
duke@435 2957 if (visitor.info_count() > 0) {
duke@435 2958 // exception handling
duke@435 2959 if (compilation()->has_exception_handlers()) {
duke@435 2960 XHandlers* xhandlers = visitor.all_xhandler();
duke@435 2961 int n = xhandlers->length();
duke@435 2962 for (int k = 0; k < n; k++) {
duke@435 2963 XHandler* handler = xhandlers->handler_at(k);
duke@435 2964 if (handler->entry_code() != NULL) {
duke@435 2965 assign_reg_num(handler->entry_code()->instructions_list(), NULL);
duke@435 2966 }
duke@435 2967 }
duke@435 2968 } else {
duke@435 2969 assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
duke@435 2970 }
duke@435 2971
duke@435 2972 // compute oop map
duke@435 2973 assert(iw != NULL, "needed for compute_oop_map");
duke@435 2974 compute_oop_map(iw, visitor, op);
duke@435 2975
duke@435 2976 // compute debug information
duke@435 2977 if (!use_fpu_stack_allocation()) {
duke@435 2978 // compute debug information if fpu stack allocation is not needed.
duke@435 2979 // when fpu stack allocation is needed, the debug information can not
duke@435 2980 // be computed here because the exact location of fpu operands is not known
duke@435 2981 // -> debug information is created inside the fpu stack allocator
duke@435 2982 int n = visitor.info_count();
duke@435 2983 for (int k = 0; k < n; k++) {
duke@435 2984 compute_debug_info(visitor.info_at(k), op_id);
duke@435 2985 }
duke@435 2986 }
duke@435 2987 }
duke@435 2988
duke@435 2989 #ifdef ASSERT
duke@435 2990 // make sure we haven't made the op invalid.
duke@435 2991 op->verify();
duke@435 2992 #endif
duke@435 2993
duke@435 2994 // remove useless moves
duke@435 2995 if (op->code() == lir_move) {
duke@435 2996 assert(op->as_Op1() != NULL, "move must be LIR_Op1");
duke@435 2997 LIR_Op1* move = (LIR_Op1*)op;
duke@435 2998 LIR_Opr src = move->in_opr();
duke@435 2999 LIR_Opr dst = move->result_opr();
duke@435 3000 if (dst == src ||
duke@435 3001 !dst->is_pointer() && !src->is_pointer() &&
duke@435 3002 src->is_same_register(dst)) {
duke@435 3003 instructions->at_put(j, NULL);
duke@435 3004 has_dead = true;
duke@435 3005 }
duke@435 3006 }
duke@435 3007 }
duke@435 3008
duke@435 3009 if (has_dead) {
duke@435 3010 // iterate all instructions of the block and remove all null-values.
duke@435 3011 int insert_point = 0;
duke@435 3012 for (int j = 0; j < num_inst; j++) {
duke@435 3013 LIR_Op* op = instructions->at(j);
duke@435 3014 if (op != NULL) {
duke@435 3015 if (insert_point != j) {
duke@435 3016 instructions->at_put(insert_point, op);
duke@435 3017 }
duke@435 3018 insert_point++;
duke@435 3019 }
duke@435 3020 }
duke@435 3021 instructions->truncate(insert_point);
duke@435 3022 }
duke@435 3023 }
duke@435 3024
duke@435 3025 void LinearScan::assign_reg_num() {
duke@435 3026 TIME_LINEAR_SCAN(timer_assign_reg_num);
duke@435 3027
duke@435 3028 init_compute_debug_info();
duke@435 3029 IntervalWalker* iw = init_compute_oop_maps();
duke@435 3030
duke@435 3031 int num_blocks = block_count();
duke@435 3032 for (int i = 0; i < num_blocks; i++) {
duke@435 3033 BlockBegin* block = block_at(i);
duke@435 3034 assign_reg_num(block->lir()->instructions_list(), iw);
duke@435 3035 }
duke@435 3036 }
duke@435 3037
duke@435 3038
duke@435 3039 void LinearScan::do_linear_scan() {
duke@435 3040 NOT_PRODUCT(_total_timer.begin_method());
duke@435 3041
duke@435 3042 number_instructions();
duke@435 3043
duke@435 3044 NOT_PRODUCT(print_lir(1, "Before Register Allocation"));
duke@435 3045
duke@435 3046 compute_local_live_sets();
duke@435 3047 compute_global_live_sets();
duke@435 3048 CHECK_BAILOUT();
duke@435 3049
duke@435 3050 build_intervals();
duke@435 3051 CHECK_BAILOUT();
duke@435 3052 sort_intervals_before_allocation();
duke@435 3053
duke@435 3054 NOT_PRODUCT(print_intervals("Before Register Allocation"));
duke@435 3055 NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_before_alloc));
duke@435 3056
duke@435 3057 allocate_registers();
duke@435 3058 CHECK_BAILOUT();
duke@435 3059
duke@435 3060 resolve_data_flow();
duke@435 3061 if (compilation()->has_exception_handlers()) {
duke@435 3062 resolve_exception_handlers();
duke@435 3063 }
duke@435 3064 // fill in number of spill slots into frame_map
duke@435 3065 propagate_spill_slots();
duke@435 3066 CHECK_BAILOUT();
duke@435 3067
duke@435 3068 NOT_PRODUCT(print_intervals("After Register Allocation"));
duke@435 3069 NOT_PRODUCT(print_lir(2, "LIR after register allocation:"));
never@1157 3070
never@1157 3071 sort_intervals_after_allocation();
never@1157 3072
duke@435 3073 DEBUG_ONLY(verify());
duke@435 3074
duke@435 3075 eliminate_spill_moves();
duke@435 3076 assign_reg_num();
duke@435 3077 CHECK_BAILOUT();
duke@435 3078
duke@435 3079 NOT_PRODUCT(print_lir(2, "LIR after assignment of register numbers:"));
duke@435 3080 NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_after_asign));
duke@435 3081
duke@435 3082 { TIME_LINEAR_SCAN(timer_allocate_fpu_stack);
duke@435 3083
duke@435 3084 if (use_fpu_stack_allocation()) {
duke@435 3085 allocate_fpu_stack(); // Only has effect on Intel
duke@435 3086 NOT_PRODUCT(print_lir(2, "LIR after FPU stack allocation:"));
duke@435 3087 }
duke@435 3088 }
duke@435 3089
duke@435 3090 { TIME_LINEAR_SCAN(timer_optimize_lir);
duke@435 3091
duke@435 3092 EdgeMoveOptimizer::optimize(ir()->code());
duke@435 3093 ControlFlowOptimizer::optimize(ir()->code());
duke@435 3094 // check that cfg is still correct after optimizations
duke@435 3095 ir()->verify();
duke@435 3096 }
duke@435 3097
duke@435 3098 NOT_PRODUCT(print_lir(1, "Before Code Generation", false));
duke@435 3099 NOT_PRODUCT(LinearScanStatistic::compute(this, _stat_final));
duke@435 3100 NOT_PRODUCT(_total_timer.end_method(this));
duke@435 3101 }
duke@435 3102
duke@435 3103
duke@435 3104 // ********** Printing functions
duke@435 3105
duke@435 3106 #ifndef PRODUCT
duke@435 3107
duke@435 3108 void LinearScan::print_timers(double total) {
duke@435 3109 _total_timer.print(total);
duke@435 3110 }
duke@435 3111
duke@435 3112 void LinearScan::print_statistics() {
duke@435 3113 _stat_before_alloc.print("before allocation");
duke@435 3114 _stat_after_asign.print("after assignment of register");
duke@435 3115 _stat_final.print("after optimization");
duke@435 3116 }
duke@435 3117
duke@435 3118 void LinearScan::print_bitmap(BitMap& b) {
duke@435 3119 for (unsigned int i = 0; i < b.size(); i++) {
duke@435 3120 if (b.at(i)) tty->print("%d ", i);
duke@435 3121 }
duke@435 3122 tty->cr();
duke@435 3123 }
duke@435 3124
duke@435 3125 void LinearScan::print_intervals(const char* label) {
duke@435 3126 if (TraceLinearScanLevel >= 1) {
duke@435 3127 int i;
duke@435 3128 tty->cr();
duke@435 3129 tty->print_cr("%s", label);
duke@435 3130
duke@435 3131 for (i = 0; i < interval_count(); i++) {
duke@435 3132 Interval* interval = interval_at(i);
duke@435 3133 if (interval != NULL) {
duke@435 3134 interval->print();
duke@435 3135 }
duke@435 3136 }
duke@435 3137
duke@435 3138 tty->cr();
duke@435 3139 tty->print_cr("--- Basic Blocks ---");
duke@435 3140 for (i = 0; i < block_count(); i++) {
duke@435 3141 BlockBegin* block = block_at(i);
duke@435 3142 tty->print("B%d [%d, %d, %d, %d] ", block->block_id(), block->first_lir_instruction_id(), block->last_lir_instruction_id(), block->loop_index(), block->loop_depth());
duke@435 3143 }
duke@435 3144 tty->cr();
duke@435 3145 tty->cr();
duke@435 3146 }
duke@435 3147
duke@435 3148 if (PrintCFGToFile) {
duke@435 3149 CFGPrinter::print_intervals(&_intervals, label);
duke@435 3150 }
duke@435 3151 }
duke@435 3152
duke@435 3153 void LinearScan::print_lir(int level, const char* label, bool hir_valid) {
duke@435 3154 if (TraceLinearScanLevel >= level) {
duke@435 3155 tty->cr();
duke@435 3156 tty->print_cr("%s", label);
duke@435 3157 print_LIR(ir()->linear_scan_order());
duke@435 3158 tty->cr();
duke@435 3159 }
duke@435 3160
duke@435 3161 if (level == 1 && PrintCFGToFile) {
duke@435 3162 CFGPrinter::print_cfg(ir()->linear_scan_order(), label, hir_valid, true);
duke@435 3163 }
duke@435 3164 }
duke@435 3165
duke@435 3166 #endif //PRODUCT
duke@435 3167
duke@435 3168
duke@435 3169 // ********** verification functions for allocation
duke@435 3170 // (check that all intervals have a correct register and that no registers are overwritten)
duke@435 3171 #ifdef ASSERT
duke@435 3172
duke@435 3173 void LinearScan::verify() {
duke@435 3174 TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying intervals ******************************************"));
duke@435 3175 verify_intervals();
duke@435 3176
duke@435 3177 TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that no oops are in fixed intervals ****************"));
duke@435 3178 verify_no_oops_in_fixed_intervals();
duke@435 3179
duke@435 3180 TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying that unpinned constants are not alive across block boundaries"));
duke@435 3181 verify_constants();
duke@435 3182
duke@435 3183 TRACE_LINEAR_SCAN(2, tty->print_cr("********* verifying register allocation ********************************"));
duke@435 3184 verify_registers();
duke@435 3185
duke@435 3186 TRACE_LINEAR_SCAN(2, tty->print_cr("********* no errors found **********************************************"));
duke@435 3187 }
duke@435 3188
duke@435 3189 void LinearScan::verify_intervals() {
duke@435 3190 int len = interval_count();
duke@435 3191 bool has_error = false;
duke@435 3192
duke@435 3193 for (int i = 0; i < len; i++) {
duke@435 3194 Interval* i1 = interval_at(i);
duke@435 3195 if (i1 == NULL) continue;
duke@435 3196
duke@435 3197 i1->check_split_children();
duke@435 3198
duke@435 3199 if (i1->reg_num() != i) {
duke@435 3200 tty->print_cr("Interval %d is on position %d in list", i1->reg_num(), i); i1->print(); tty->cr();
duke@435 3201 has_error = true;
duke@435 3202 }
duke@435 3203
duke@435 3204 if (i1->reg_num() >= LIR_OprDesc::vreg_base && i1->type() == T_ILLEGAL) {
duke@435 3205 tty->print_cr("Interval %d has no type assigned", i1->reg_num()); i1->print(); tty->cr();
duke@435 3206 has_error = true;
duke@435 3207 }
duke@435 3208
duke@435 3209 if (i1->assigned_reg() == any_reg) {
duke@435 3210 tty->print_cr("Interval %d has no register assigned", i1->reg_num()); i1->print(); tty->cr();
duke@435 3211 has_error = true;
duke@435 3212 }
duke@435 3213
duke@435 3214 if (i1->assigned_reg() == i1->assigned_regHi()) {
duke@435 3215 tty->print_cr("Interval %d: low and high register equal", i1->reg_num()); i1->print(); tty->cr();
duke@435 3216 has_error = true;
duke@435 3217 }
duke@435 3218
duke@435 3219 if (!is_processed_reg_num(i1->assigned_reg())) {
duke@435 3220 tty->print_cr("Can not have an Interval for an ignored register"); i1->print(); tty->cr();
duke@435 3221 has_error = true;
duke@435 3222 }
duke@435 3223
duke@435 3224 if (i1->first() == Range::end()) {
duke@435 3225 tty->print_cr("Interval %d has no Range", i1->reg_num()); i1->print(); tty->cr();
duke@435 3226 has_error = true;
duke@435 3227 }
duke@435 3228
duke@435 3229 for (Range* r = i1->first(); r != Range::end(); r = r->next()) {
duke@435 3230 if (r->from() >= r->to()) {
duke@435 3231 tty->print_cr("Interval %d has zero length range", i1->reg_num()); i1->print(); tty->cr();
duke@435 3232 has_error = true;
duke@435 3233 }
duke@435 3234 }
duke@435 3235
duke@435 3236 for (int j = i + 1; j < len; j++) {
duke@435 3237 Interval* i2 = interval_at(j);
duke@435 3238 if (i2 == NULL) continue;
duke@435 3239
duke@435 3240 // special intervals that are created in MoveResolver
duke@435 3241 // -> ignore them because the range information has no meaning there
duke@435 3242 if (i1->from() == 1 && i1->to() == 2) continue;
duke@435 3243 if (i2->from() == 1 && i2->to() == 2) continue;
duke@435 3244
duke@435 3245 int r1 = i1->assigned_reg();
duke@435 3246 int r1Hi = i1->assigned_regHi();
duke@435 3247 int r2 = i2->assigned_reg();
duke@435 3248 int r2Hi = i2->assigned_regHi();
duke@435 3249 if (i1->intersects(i2) && (r1 == r2 || r1 == r2Hi || (r1Hi != any_reg && (r1Hi == r2 || r1Hi == r2Hi)))) {
duke@435 3250 tty->print_cr("Intervals %d and %d overlap and have the same register assigned", i1->reg_num(), i2->reg_num());
duke@435 3251 i1->print(); tty->cr();
duke@435 3252 i2->print(); tty->cr();
duke@435 3253 has_error = true;
duke@435 3254 }
duke@435 3255 }
duke@435 3256 }
duke@435 3257
duke@435 3258 assert(has_error == false, "register allocation invalid");
duke@435 3259 }
duke@435 3260
duke@435 3261
duke@435 3262 void LinearScan::verify_no_oops_in_fixed_intervals() {
never@1157 3263 Interval* fixed_intervals;
never@1157 3264 Interval* other_intervals;
never@1157 3265 create_unhandled_lists(&fixed_intervals, &other_intervals, is_precolored_cpu_interval, NULL);
never@1157 3266
never@1157 3267 // to ensure a walking until the last instruction id, add a dummy interval
never@1157 3268 // with a high operation id
never@1157 3269 other_intervals = new Interval(any_reg);
never@1157 3270 other_intervals->add_range(max_jint - 2, max_jint - 1);
never@1157 3271 IntervalWalker* iw = new IntervalWalker(this, fixed_intervals, other_intervals);
never@1157 3272
duke@435 3273 LIR_OpVisitState visitor;
duke@435 3274 for (int i = 0; i < block_count(); i++) {
duke@435 3275 BlockBegin* block = block_at(i);
duke@435 3276
duke@435 3277 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 3278
duke@435 3279 for (int j = 0; j < instructions->length(); j++) {
duke@435 3280 LIR_Op* op = instructions->at(j);
duke@435 3281 int op_id = op->id();
duke@435 3282
duke@435 3283 visitor.visit(op);
duke@435 3284
never@1157 3285 if (visitor.info_count() > 0) {
never@1157 3286 iw->walk_before(op->id());
never@1157 3287 bool check_live = true;
never@1157 3288 if (op->code() == lir_move) {
never@1157 3289 LIR_Op1* move = (LIR_Op1*)op;
never@1157 3290 check_live = (move->patch_code() == lir_patch_none);
never@1157 3291 }
never@1157 3292 LIR_OpBranch* branch = op->as_OpBranch();
never@1157 3293 if (branch != NULL && branch->stub() != NULL && branch->stub()->is_exception_throw_stub()) {
never@1157 3294 // Don't bother checking the stub in this case since the
never@1157 3295 // exception stub will never return to normal control flow.
never@1157 3296 check_live = false;
never@1157 3297 }
never@1157 3298
never@1157 3299 // Make sure none of the fixed registers is live across an
never@1157 3300 // oopmap since we can't handle that correctly.
never@1157 3301 if (check_live) {
never@1157 3302 for (Interval* interval = iw->active_first(fixedKind);
never@1157 3303 interval != Interval::end();
never@1157 3304 interval = interval->next()) {
never@1157 3305 if (interval->current_to() > op->id() + 1) {
never@1157 3306 // This interval is live out of this op so make sure
never@1157 3307 // that this interval represents some value that's
never@1157 3308 // referenced by this op either as an input or output.
never@1157 3309 bool ok = false;
never@1157 3310 for_each_visitor_mode(mode) {
never@1157 3311 int n = visitor.opr_count(mode);
never@1157 3312 for (int k = 0; k < n; k++) {
never@1157 3313 LIR_Opr opr = visitor.opr_at(mode, k);
never@1157 3314 if (opr->is_fixed_cpu()) {
never@1157 3315 if (interval_at(reg_num(opr)) == interval) {
never@1157 3316 ok = true;
never@1157 3317 break;
never@1157 3318 }
never@1157 3319 int hi = reg_numHi(opr);
never@1157 3320 if (hi != -1 && interval_at(hi) == interval) {
never@1157 3321 ok = true;
never@1157 3322 break;
never@1157 3323 }
never@1157 3324 }
never@1157 3325 }
never@1157 3326 }
never@1157 3327 assert(ok, "fixed intervals should never be live across an oopmap point");
never@1157 3328 }
never@1157 3329 }
never@1157 3330 }
never@1157 3331 }
never@1157 3332
duke@435 3333 // oop-maps at calls do not contain registers, so check is not needed
duke@435 3334 if (!visitor.has_call()) {
duke@435 3335
duke@435 3336 for_each_visitor_mode(mode) {
duke@435 3337 int n = visitor.opr_count(mode);
duke@435 3338 for (int k = 0; k < n; k++) {
duke@435 3339 LIR_Opr opr = visitor.opr_at(mode, k);
duke@435 3340
duke@435 3341 if (opr->is_fixed_cpu() && opr->is_oop()) {
duke@435 3342 // operand is a non-virtual cpu register and contains an oop
duke@435 3343 TRACE_LINEAR_SCAN(4, op->print_on(tty); tty->print("checking operand "); opr->print(); tty->cr());
duke@435 3344
duke@435 3345 Interval* interval = interval_at(reg_num(opr));
duke@435 3346 assert(interval != NULL, "no interval");
duke@435 3347
duke@435 3348 if (mode == LIR_OpVisitState::inputMode) {
duke@435 3349 if (interval->to() >= op_id + 1) {
duke@435 3350 assert(interval->to() < op_id + 2 ||
duke@435 3351 interval->has_hole_between(op_id, op_id + 2),
duke@435 3352 "oop input operand live after instruction");
duke@435 3353 }
duke@435 3354 } else if (mode == LIR_OpVisitState::outputMode) {
duke@435 3355 if (interval->from() <= op_id - 1) {
duke@435 3356 assert(interval->has_hole_between(op_id - 1, op_id),
duke@435 3357 "oop input operand live after instruction");
duke@435 3358 }
duke@435 3359 }
duke@435 3360 }
duke@435 3361 }
duke@435 3362 }
duke@435 3363 }
duke@435 3364 }
duke@435 3365 }
duke@435 3366 }
duke@435 3367
duke@435 3368
duke@435 3369 void LinearScan::verify_constants() {
duke@435 3370 int num_regs = num_virtual_regs();
duke@435 3371 int size = live_set_size();
duke@435 3372 int num_blocks = block_count();
duke@435 3373
duke@435 3374 for (int i = 0; i < num_blocks; i++) {
duke@435 3375 BlockBegin* block = block_at(i);
duke@435 3376 BitMap live_at_edge = block->live_in();
duke@435 3377
duke@435 3378 // visit all registers where the live_at_edge bit is set
never@739 3379 for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
duke@435 3380 TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id()));
duke@435 3381
duke@435 3382 Value value = gen()->instruction_for_vreg(r);
duke@435 3383
duke@435 3384 assert(value != NULL, "all intervals live across block boundaries must have Value");
duke@435 3385 assert(value->operand()->is_register() && value->operand()->is_virtual(), "value must have virtual operand");
duke@435 3386 assert(value->operand()->vreg_number() == r, "register number must match");
duke@435 3387 // TKR assert(value->as_Constant() == NULL || value->is_pinned(), "only pinned constants can be alive accross block boundaries");
duke@435 3388 }
duke@435 3389 }
duke@435 3390 }
duke@435 3391
duke@435 3392
duke@435 3393 class RegisterVerifier: public StackObj {
duke@435 3394 private:
duke@435 3395 LinearScan* _allocator;
duke@435 3396 BlockList _work_list; // all blocks that must be processed
duke@435 3397 IntervalsList _saved_states; // saved information of previous check
duke@435 3398
duke@435 3399 // simplified access to methods of LinearScan
duke@435 3400 Compilation* compilation() const { return _allocator->compilation(); }
duke@435 3401 Interval* interval_at(int reg_num) const { return _allocator->interval_at(reg_num); }
duke@435 3402 int reg_num(LIR_Opr opr) const { return _allocator->reg_num(opr); }
duke@435 3403
duke@435 3404 // currently, only registers are processed
duke@435 3405 int state_size() { return LinearScan::nof_regs; }
duke@435 3406
duke@435 3407 // accessors
duke@435 3408 IntervalList* state_for_block(BlockBegin* block) { return _saved_states.at(block->block_id()); }
duke@435 3409 void set_state_for_block(BlockBegin* block, IntervalList* saved_state) { _saved_states.at_put(block->block_id(), saved_state); }
duke@435 3410 void add_to_work_list(BlockBegin* block) { if (!_work_list.contains(block)) _work_list.append(block); }
duke@435 3411
duke@435 3412 // helper functions
duke@435 3413 IntervalList* copy(IntervalList* input_state);
duke@435 3414 void state_put(IntervalList* input_state, int reg, Interval* interval);
duke@435 3415 bool check_state(IntervalList* input_state, int reg, Interval* interval);
duke@435 3416
duke@435 3417 void process_block(BlockBegin* block);
duke@435 3418 void process_xhandler(XHandler* xhandler, IntervalList* input_state);
duke@435 3419 void process_successor(BlockBegin* block, IntervalList* input_state);
duke@435 3420 void process_operations(LIR_List* ops, IntervalList* input_state);
duke@435 3421
duke@435 3422 public:
duke@435 3423 RegisterVerifier(LinearScan* allocator)
duke@435 3424 : _allocator(allocator)
duke@435 3425 , _work_list(16)
duke@435 3426 , _saved_states(BlockBegin::number_of_blocks(), NULL)
duke@435 3427 { }
duke@435 3428
duke@435 3429 void verify(BlockBegin* start);
duke@435 3430 };
duke@435 3431
duke@435 3432
duke@435 3433 // entry function from LinearScan that starts the verification
duke@435 3434 void LinearScan::verify_registers() {
duke@435 3435 RegisterVerifier verifier(this);
duke@435 3436 verifier.verify(block_at(0));
duke@435 3437 }
duke@435 3438
duke@435 3439
duke@435 3440 void RegisterVerifier::verify(BlockBegin* start) {
duke@435 3441 // setup input registers (method arguments) for first block
duke@435 3442 IntervalList* input_state = new IntervalList(state_size(), NULL);
duke@435 3443 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
duke@435 3444 for (int n = 0; n < args->length(); n++) {
duke@435 3445 LIR_Opr opr = args->at(n);
duke@435 3446 if (opr->is_register()) {
duke@435 3447 Interval* interval = interval_at(reg_num(opr));
duke@435 3448
duke@435 3449 if (interval->assigned_reg() < state_size()) {
duke@435 3450 input_state->at_put(interval->assigned_reg(), interval);
duke@435 3451 }
duke@435 3452 if (interval->assigned_regHi() != LinearScan::any_reg && interval->assigned_regHi() < state_size()) {
duke@435 3453 input_state->at_put(interval->assigned_regHi(), interval);
duke@435 3454 }
duke@435 3455 }
duke@435 3456 }
duke@435 3457
duke@435 3458 set_state_for_block(start, input_state);
duke@435 3459 add_to_work_list(start);
duke@435 3460
duke@435 3461 // main loop for verification
duke@435 3462 do {
duke@435 3463 BlockBegin* block = _work_list.at(0);
duke@435 3464 _work_list.remove_at(0);
duke@435 3465
duke@435 3466 process_block(block);
duke@435 3467 } while (!_work_list.is_empty());
duke@435 3468 }
duke@435 3469
duke@435 3470 void RegisterVerifier::process_block(BlockBegin* block) {
duke@435 3471 TRACE_LINEAR_SCAN(2, tty->cr(); tty->print_cr("process_block B%d", block->block_id()));
duke@435 3472
duke@435 3473 // must copy state because it is modified
duke@435 3474 IntervalList* input_state = copy(state_for_block(block));
duke@435 3475
duke@435 3476 if (TraceLinearScanLevel >= 4) {
duke@435 3477 tty->print_cr("Input-State of intervals:");
duke@435 3478 tty->print(" ");
duke@435 3479 for (int i = 0; i < state_size(); i++) {
duke@435 3480 if (input_state->at(i) != NULL) {
duke@435 3481 tty->print(" %4d", input_state->at(i)->reg_num());
duke@435 3482 } else {
duke@435 3483 tty->print(" __");
duke@435 3484 }
duke@435 3485 }
duke@435 3486 tty->cr();
duke@435 3487 tty->cr();
duke@435 3488 }
duke@435 3489
duke@435 3490 // process all operations of the block
duke@435 3491 process_operations(block->lir(), input_state);
duke@435 3492
duke@435 3493 // iterate all successors
duke@435 3494 for (int i = 0; i < block->number_of_sux(); i++) {
duke@435 3495 process_successor(block->sux_at(i), input_state);
duke@435 3496 }
duke@435 3497 }
duke@435 3498
duke@435 3499 void RegisterVerifier::process_xhandler(XHandler* xhandler, IntervalList* input_state) {
duke@435 3500 TRACE_LINEAR_SCAN(2, tty->print_cr("process_xhandler B%d", xhandler->entry_block()->block_id()));
duke@435 3501
duke@435 3502 // must copy state because it is modified
duke@435 3503 input_state = copy(input_state);
duke@435 3504
duke@435 3505 if (xhandler->entry_code() != NULL) {
duke@435 3506 process_operations(xhandler->entry_code(), input_state);
duke@435 3507 }
duke@435 3508 process_successor(xhandler->entry_block(), input_state);
duke@435 3509 }
duke@435 3510
duke@435 3511 void RegisterVerifier::process_successor(BlockBegin* block, IntervalList* input_state) {
duke@435 3512 IntervalList* saved_state = state_for_block(block);
duke@435 3513
duke@435 3514 if (saved_state != NULL) {
duke@435 3515 // this block was already processed before.
duke@435 3516 // check if new input_state is consistent with saved_state
duke@435 3517
duke@435 3518 bool saved_state_correct = true;
duke@435 3519 for (int i = 0; i < state_size(); i++) {
duke@435 3520 if (input_state->at(i) != saved_state->at(i)) {
duke@435 3521 // current input_state and previous saved_state assume a different
duke@435 3522 // interval in this register -> assume that this register is invalid
duke@435 3523 if (saved_state->at(i) != NULL) {
duke@435 3524 // invalidate old calculation only if it assumed that
duke@435 3525 // register was valid. when the register was already invalid,
duke@435 3526 // then the old calculation was correct.
duke@435 3527 saved_state_correct = false;
duke@435 3528 saved_state->at_put(i, NULL);
duke@435 3529
duke@435 3530 TRACE_LINEAR_SCAN(4, tty->print_cr("process_successor B%d: invalidating slot %d", block->block_id(), i));
duke@435 3531 }
duke@435 3532 }
duke@435 3533 }
duke@435 3534
duke@435 3535 if (saved_state_correct) {
duke@435 3536 // already processed block with correct input_state
duke@435 3537 TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: previous visit already correct", block->block_id()));
duke@435 3538 } else {
duke@435 3539 // must re-visit this block
duke@435 3540 TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: must re-visit because input state changed", block->block_id()));
duke@435 3541 add_to_work_list(block);
duke@435 3542 }
duke@435 3543
duke@435 3544 } else {
duke@435 3545 // block was not processed before, so set initial input_state
duke@435 3546 TRACE_LINEAR_SCAN(2, tty->print_cr("process_successor B%d: initial visit", block->block_id()));
duke@435 3547
duke@435 3548 set_state_for_block(block, copy(input_state));
duke@435 3549 add_to_work_list(block);
duke@435 3550 }
duke@435 3551 }
duke@435 3552
duke@435 3553
duke@435 3554 IntervalList* RegisterVerifier::copy(IntervalList* input_state) {
duke@435 3555 IntervalList* copy_state = new IntervalList(input_state->length());
duke@435 3556 copy_state->push_all(input_state);
duke@435 3557 return copy_state;
duke@435 3558 }
duke@435 3559
duke@435 3560 void RegisterVerifier::state_put(IntervalList* input_state, int reg, Interval* interval) {
duke@435 3561 if (reg != LinearScan::any_reg && reg < state_size()) {
duke@435 3562 if (interval != NULL) {
duke@435 3563 TRACE_LINEAR_SCAN(4, tty->print_cr(" reg[%d] = %d", reg, interval->reg_num()));
duke@435 3564 } else if (input_state->at(reg) != NULL) {
duke@435 3565 TRACE_LINEAR_SCAN(4, tty->print_cr(" reg[%d] = NULL", reg));
duke@435 3566 }
duke@435 3567
duke@435 3568 input_state->at_put(reg, interval);
duke@435 3569 }
duke@435 3570 }
duke@435 3571
duke@435 3572 bool RegisterVerifier::check_state(IntervalList* input_state, int reg, Interval* interval) {
duke@435 3573 if (reg != LinearScan::any_reg && reg < state_size()) {
duke@435 3574 if (input_state->at(reg) != interval) {
duke@435 3575 tty->print_cr("!! Error in register allocation: register %d does not contain interval %d", reg, interval->reg_num());
duke@435 3576 return true;
duke@435 3577 }
duke@435 3578 }
duke@435 3579 return false;
duke@435 3580 }
duke@435 3581
duke@435 3582 void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_state) {
duke@435 3583 // visit all instructions of the block
duke@435 3584 LIR_OpVisitState visitor;
duke@435 3585 bool has_error = false;
duke@435 3586
duke@435 3587 for (int i = 0; i < ops->length(); i++) {
duke@435 3588 LIR_Op* op = ops->at(i);
duke@435 3589 visitor.visit(op);
duke@435 3590
duke@435 3591 TRACE_LINEAR_SCAN(4, op->print_on(tty));
duke@435 3592
duke@435 3593 // check if input operands are correct
duke@435 3594 int j;
duke@435 3595 int n = visitor.opr_count(LIR_OpVisitState::inputMode);
duke@435 3596 for (j = 0; j < n; j++) {
duke@435 3597 LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::inputMode, j);
duke@435 3598 if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
duke@435 3599 Interval* interval = interval_at(reg_num(opr));
duke@435 3600 if (op->id() != -1) {
duke@435 3601 interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::inputMode);
duke@435 3602 }
duke@435 3603
duke@435 3604 has_error |= check_state(input_state, interval->assigned_reg(), interval->split_parent());
duke@435 3605 has_error |= check_state(input_state, interval->assigned_regHi(), interval->split_parent());
duke@435 3606
duke@435 3607 // When an operand is marked with is_last_use, then the fpu stack allocator
duke@435 3608 // removes the register from the fpu stack -> the register contains no value
duke@435 3609 if (opr->is_last_use()) {
duke@435 3610 state_put(input_state, interval->assigned_reg(), NULL);
duke@435 3611 state_put(input_state, interval->assigned_regHi(), NULL);
duke@435 3612 }
duke@435 3613 }
duke@435 3614 }
duke@435 3615
duke@435 3616 // invalidate all caller save registers at calls
duke@435 3617 if (visitor.has_call()) {
iveresov@2344 3618 for (j = 0; j < FrameMap::nof_caller_save_cpu_regs(); j++) {
duke@435 3619 state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
duke@435 3620 }
duke@435 3621 for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
duke@435 3622 state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
duke@435 3623 }
duke@435 3624
never@739 3625 #ifdef X86
duke@435 3626 for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
duke@435 3627 state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
duke@435 3628 }
duke@435 3629 #endif
duke@435 3630 }
duke@435 3631
duke@435 3632 // process xhandler before output and temp operands
duke@435 3633 XHandlers* xhandlers = visitor.all_xhandler();
duke@435 3634 n = xhandlers->length();
duke@435 3635 for (int k = 0; k < n; k++) {
duke@435 3636 process_xhandler(xhandlers->handler_at(k), input_state);
duke@435 3637 }
duke@435 3638
duke@435 3639 // set temp operands (some operations use temp operands also as output operands, so can't set them NULL)
duke@435 3640 n = visitor.opr_count(LIR_OpVisitState::tempMode);
duke@435 3641 for (j = 0; j < n; j++) {
duke@435 3642 LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::tempMode, j);
duke@435 3643 if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
duke@435 3644 Interval* interval = interval_at(reg_num(opr));
duke@435 3645 if (op->id() != -1) {
duke@435 3646 interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::tempMode);
duke@435 3647 }
duke@435 3648
duke@435 3649 state_put(input_state, interval->assigned_reg(), interval->split_parent());
duke@435 3650 state_put(input_state, interval->assigned_regHi(), interval->split_parent());
duke@435 3651 }
duke@435 3652 }
duke@435 3653
duke@435 3654 // set output operands
duke@435 3655 n = visitor.opr_count(LIR_OpVisitState::outputMode);
duke@435 3656 for (j = 0; j < n; j++) {
duke@435 3657 LIR_Opr opr = visitor.opr_at(LIR_OpVisitState::outputMode, j);
duke@435 3658 if (opr->is_register() && LinearScan::is_processed_reg_num(reg_num(opr))) {
duke@435 3659 Interval* interval = interval_at(reg_num(opr));
duke@435 3660 if (op->id() != -1) {
duke@435 3661 interval = interval->split_child_at_op_id(op->id(), LIR_OpVisitState::outputMode);
duke@435 3662 }
duke@435 3663
duke@435 3664 state_put(input_state, interval->assigned_reg(), interval->split_parent());
duke@435 3665 state_put(input_state, interval->assigned_regHi(), interval->split_parent());
duke@435 3666 }
duke@435 3667 }
duke@435 3668 }
duke@435 3669 assert(has_error == false, "Error in register allocation");
duke@435 3670 }
duke@435 3671
duke@435 3672 #endif // ASSERT
duke@435 3673
duke@435 3674
duke@435 3675
duke@435 3676 // **** Implementation of MoveResolver ******************************
duke@435 3677
duke@435 3678 MoveResolver::MoveResolver(LinearScan* allocator) :
duke@435 3679 _allocator(allocator),
duke@435 3680 _multiple_reads_allowed(false),
duke@435 3681 _mapping_from(8),
duke@435 3682 _mapping_from_opr(8),
duke@435 3683 _mapping_to(8),
duke@435 3684 _insert_list(NULL),
duke@435 3685 _insert_idx(-1),
duke@435 3686 _insertion_buffer()
duke@435 3687 {
duke@435 3688 for (int i = 0; i < LinearScan::nof_regs; i++) {
duke@435 3689 _register_blocked[i] = 0;
duke@435 3690 }
duke@435 3691 DEBUG_ONLY(check_empty());
duke@435 3692 }
duke@435 3693
duke@435 3694
duke@435 3695 #ifdef ASSERT
duke@435 3696
duke@435 3697 void MoveResolver::check_empty() {
duke@435 3698 assert(_mapping_from.length() == 0 && _mapping_from_opr.length() == 0 && _mapping_to.length() == 0, "list must be empty before and after processing");
duke@435 3699 for (int i = 0; i < LinearScan::nof_regs; i++) {
duke@435 3700 assert(register_blocked(i) == 0, "register map must be empty before and after processing");
duke@435 3701 }
duke@435 3702 assert(_multiple_reads_allowed == false, "must have default value");
duke@435 3703 }
duke@435 3704
duke@435 3705 void MoveResolver::verify_before_resolve() {
duke@435 3706 assert(_mapping_from.length() == _mapping_from_opr.length(), "length must be equal");
duke@435 3707 assert(_mapping_from.length() == _mapping_to.length(), "length must be equal");
duke@435 3708 assert(_insert_list != NULL && _insert_idx != -1, "insert position not set");
duke@435 3709
duke@435 3710 int i, j;
duke@435 3711 if (!_multiple_reads_allowed) {
duke@435 3712 for (i = 0; i < _mapping_from.length(); i++) {
duke@435 3713 for (j = i + 1; j < _mapping_from.length(); j++) {
duke@435 3714 assert(_mapping_from.at(i) == NULL || _mapping_from.at(i) != _mapping_from.at(j), "cannot read from same interval twice");
duke@435 3715 }
duke@435 3716 }
duke@435 3717 }
duke@435 3718
duke@435 3719 for (i = 0; i < _mapping_to.length(); i++) {
duke@435 3720 for (j = i + 1; j < _mapping_to.length(); j++) {
duke@435 3721 assert(_mapping_to.at(i) != _mapping_to.at(j), "cannot write to same interval twice");
duke@435 3722 }
duke@435 3723 }
duke@435 3724
duke@435 3725
duke@435 3726 BitMap used_regs(LinearScan::nof_regs + allocator()->frame_map()->argcount() + allocator()->max_spills());
duke@435 3727 used_regs.clear();
duke@435 3728 if (!_multiple_reads_allowed) {
duke@435 3729 for (i = 0; i < _mapping_from.length(); i++) {
duke@435 3730 Interval* it = _mapping_from.at(i);
duke@435 3731 if (it != NULL) {
duke@435 3732 assert(!used_regs.at(it->assigned_reg()), "cannot read from same register twice");
duke@435 3733 used_regs.set_bit(it->assigned_reg());
duke@435 3734
duke@435 3735 if (it->assigned_regHi() != LinearScan::any_reg) {
duke@435 3736 assert(!used_regs.at(it->assigned_regHi()), "cannot read from same register twice");
duke@435 3737 used_regs.set_bit(it->assigned_regHi());
duke@435 3738 }
duke@435 3739 }
duke@435 3740 }
duke@435 3741 }
duke@435 3742
duke@435 3743 used_regs.clear();
duke@435 3744 for (i = 0; i < _mapping_to.length(); i++) {
duke@435 3745 Interval* it = _mapping_to.at(i);
duke@435 3746 assert(!used_regs.at(it->assigned_reg()), "cannot write to same register twice");
duke@435 3747 used_regs.set_bit(it->assigned_reg());
duke@435 3748
duke@435 3749 if (it->assigned_regHi() != LinearScan::any_reg) {
duke@435 3750 assert(!used_regs.at(it->assigned_regHi()), "cannot write to same register twice");
duke@435 3751 used_regs.set_bit(it->assigned_regHi());
duke@435 3752 }
duke@435 3753 }
duke@435 3754
duke@435 3755 used_regs.clear();
duke@435 3756 for (i = 0; i < _mapping_from.length(); i++) {
duke@435 3757 Interval* it = _mapping_from.at(i);
duke@435 3758 if (it != NULL && it->assigned_reg() >= LinearScan::nof_regs) {
duke@435 3759 used_regs.set_bit(it->assigned_reg());
duke@435 3760 }
duke@435 3761 }
duke@435 3762 for (i = 0; i < _mapping_to.length(); i++) {
duke@435 3763 Interval* it = _mapping_to.at(i);
duke@435 3764 assert(!used_regs.at(it->assigned_reg()) || it->assigned_reg() == _mapping_from.at(i)->assigned_reg(), "stack slots used in _mapping_from must be disjoint to _mapping_to");
duke@435 3765 }
duke@435 3766 }
duke@435 3767
duke@435 3768 #endif // ASSERT
duke@435 3769
duke@435 3770
duke@435 3771 // mark assigned_reg and assigned_regHi of the interval as blocked
duke@435 3772 void MoveResolver::block_registers(Interval* it) {
duke@435 3773 int reg = it->assigned_reg();
duke@435 3774 if (reg < LinearScan::nof_regs) {
duke@435 3775 assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used");
duke@435 3776 set_register_blocked(reg, 1);
duke@435 3777 }
duke@435 3778 reg = it->assigned_regHi();
duke@435 3779 if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) {
duke@435 3780 assert(_multiple_reads_allowed || register_blocked(reg) == 0, "register already marked as used");
duke@435 3781 set_register_blocked(reg, 1);
duke@435 3782 }
duke@435 3783 }
duke@435 3784
duke@435 3785 // mark assigned_reg and assigned_regHi of the interval as unblocked
duke@435 3786 void MoveResolver::unblock_registers(Interval* it) {
duke@435 3787 int reg = it->assigned_reg();
duke@435 3788 if (reg < LinearScan::nof_regs) {
duke@435 3789 assert(register_blocked(reg) > 0, "register already marked as unused");
duke@435 3790 set_register_blocked(reg, -1);
duke@435 3791 }
duke@435 3792 reg = it->assigned_regHi();
duke@435 3793 if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) {
duke@435 3794 assert(register_blocked(reg) > 0, "register already marked as unused");
duke@435 3795 set_register_blocked(reg, -1);
duke@435 3796 }
duke@435 3797 }
duke@435 3798
duke@435 3799 // check if assigned_reg and assigned_regHi of the to-interval are not blocked (or only blocked by from)
duke@435 3800 bool MoveResolver::save_to_process_move(Interval* from, Interval* to) {
duke@435 3801 int from_reg = -1;
duke@435 3802 int from_regHi = -1;
duke@435 3803 if (from != NULL) {
duke@435 3804 from_reg = from->assigned_reg();
duke@435 3805 from_regHi = from->assigned_regHi();
duke@435 3806 }
duke@435 3807
duke@435 3808 int reg = to->assigned_reg();
duke@435 3809 if (reg < LinearScan::nof_regs) {
duke@435 3810 if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) {
duke@435 3811 return false;
duke@435 3812 }
duke@435 3813 }
duke@435 3814 reg = to->assigned_regHi();
duke@435 3815 if (reg != LinearScan::any_reg && reg < LinearScan::nof_regs) {
duke@435 3816 if (register_blocked(reg) > 1 || (register_blocked(reg) == 1 && reg != from_reg && reg != from_regHi)) {
duke@435 3817 return false;
duke@435 3818 }
duke@435 3819 }
duke@435 3820
duke@435 3821 return true;
duke@435 3822 }
duke@435 3823
duke@435 3824
duke@435 3825 void MoveResolver::create_insertion_buffer(LIR_List* list) {
duke@435 3826 assert(!_insertion_buffer.initialized(), "overwriting existing buffer");
duke@435 3827 _insertion_buffer.init(list);
duke@435 3828 }
duke@435 3829
duke@435 3830 void MoveResolver::append_insertion_buffer() {
duke@435 3831 if (_insertion_buffer.initialized()) {
duke@435 3832 _insertion_buffer.lir_list()->append(&_insertion_buffer);
duke@435 3833 }
duke@435 3834 assert(!_insertion_buffer.initialized(), "must be uninitialized now");
duke@435 3835
duke@435 3836 _insert_list = NULL;
duke@435 3837 _insert_idx = -1;
duke@435 3838 }
duke@435 3839
duke@435 3840 void MoveResolver::insert_move(Interval* from_interval, Interval* to_interval) {
duke@435 3841 assert(from_interval->reg_num() != to_interval->reg_num(), "from and to interval equal");
duke@435 3842 assert(from_interval->type() == to_interval->type(), "move between different types");
duke@435 3843 assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first");
duke@435 3844 assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer");
duke@435 3845
duke@435 3846 LIR_Opr from_opr = LIR_OprFact::virtual_register(from_interval->reg_num(), from_interval->type());
duke@435 3847 LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type());
duke@435 3848
duke@435 3849 if (!_multiple_reads_allowed) {
duke@435 3850 // the last_use flag is an optimization for FPU stack allocation. When the same
duke@435 3851 // input interval is used in more than one move, then it is too difficult to determine
duke@435 3852 // if this move is really the last use.
duke@435 3853 from_opr = from_opr->make_last_use();
duke@435 3854 }
duke@435 3855 _insertion_buffer.move(_insert_idx, from_opr, to_opr);
duke@435 3856
duke@435 3857 TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: inserted move from register %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
duke@435 3858 }
duke@435 3859
duke@435 3860 void MoveResolver::insert_move(LIR_Opr from_opr, Interval* to_interval) {
duke@435 3861 assert(from_opr->type() == to_interval->type(), "move between different types");
duke@435 3862 assert(_insert_list != NULL && _insert_idx != -1, "must setup insert position first");
duke@435 3863 assert(_insertion_buffer.lir_list() == _insert_list, "wrong insertion buffer");
duke@435 3864
duke@435 3865 LIR_Opr to_opr = LIR_OprFact::virtual_register(to_interval->reg_num(), to_interval->type());
duke@435 3866 _insertion_buffer.move(_insert_idx, from_opr, to_opr);
duke@435 3867
duke@435 3868 TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: inserted move from constant "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
duke@435 3869 }
duke@435 3870
duke@435 3871
duke@435 3872 void MoveResolver::resolve_mappings() {
duke@435 3873 TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: resolving mappings for Block B%d, index %d", _insert_list->block() != NULL ? _insert_list->block()->block_id() : -1, _insert_idx));
duke@435 3874 DEBUG_ONLY(verify_before_resolve());
duke@435 3875
duke@435 3876 // Block all registers that are used as input operands of a move.
duke@435 3877 // When a register is blocked, no move to this register is emitted.
duke@435 3878 // This is necessary for detecting cycles in moves.
duke@435 3879 int i;
duke@435 3880 for (i = _mapping_from.length() - 1; i >= 0; i--) {
duke@435 3881 Interval* from_interval = _mapping_from.at(i);
duke@435 3882 if (from_interval != NULL) {
duke@435 3883 block_registers(from_interval);
duke@435 3884 }
duke@435 3885 }
duke@435 3886
duke@435 3887 int spill_candidate = -1;
duke@435 3888 while (_mapping_from.length() > 0) {
duke@435 3889 bool processed_interval = false;
duke@435 3890
duke@435 3891 for (i = _mapping_from.length() - 1; i >= 0; i--) {
duke@435 3892 Interval* from_interval = _mapping_from.at(i);
duke@435 3893 Interval* to_interval = _mapping_to.at(i);
duke@435 3894
duke@435 3895 if (save_to_process_move(from_interval, to_interval)) {
duke@435 3896 // this inverval can be processed because target is free
duke@435 3897 if (from_interval != NULL) {
duke@435 3898 insert_move(from_interval, to_interval);
duke@435 3899 unblock_registers(from_interval);
duke@435 3900 } else {
duke@435 3901 insert_move(_mapping_from_opr.at(i), to_interval);
duke@435 3902 }
duke@435 3903 _mapping_from.remove_at(i);
duke@435 3904 _mapping_from_opr.remove_at(i);
duke@435 3905 _mapping_to.remove_at(i);
duke@435 3906
duke@435 3907 processed_interval = true;
duke@435 3908 } else if (from_interval != NULL && from_interval->assigned_reg() < LinearScan::nof_regs) {
duke@435 3909 // this interval cannot be processed now because target is not free
duke@435 3910 // it starts in a register, so it is a possible candidate for spilling
duke@435 3911 spill_candidate = i;
duke@435 3912 }
duke@435 3913 }
duke@435 3914
duke@435 3915 if (!processed_interval) {
duke@435 3916 // no move could be processed because there is a cycle in the move list
duke@435 3917 // (e.g. r1 -> r2, r2 -> r1), so one interval must be spilled to memory
duke@435 3918 assert(spill_candidate != -1, "no interval in register for spilling found");
duke@435 3919
duke@435 3920 // create a new spill interval and assign a stack slot to it
duke@435 3921 Interval* from_interval = _mapping_from.at(spill_candidate);
duke@435 3922 Interval* spill_interval = new Interval(-1);
duke@435 3923 spill_interval->set_type(from_interval->type());
duke@435 3924
duke@435 3925 // add a dummy range because real position is difficult to calculate
duke@435 3926 // Note: this range is a special case when the integrity of the allocation is checked
duke@435 3927 spill_interval->add_range(1, 2);
duke@435 3928
duke@435 3929 // do not allocate a new spill slot for temporary interval, but
duke@435 3930 // use spill slot assigned to from_interval. Otherwise moves from
duke@435 3931 // one stack slot to another can happen (not allowed by LIR_Assembler
duke@435 3932 int spill_slot = from_interval->canonical_spill_slot();
duke@435 3933 if (spill_slot < 0) {
duke@435 3934 spill_slot = allocator()->allocate_spill_slot(type2spill_size[spill_interval->type()] == 2);
duke@435 3935 from_interval->set_canonical_spill_slot(spill_slot);
duke@435 3936 }
duke@435 3937 spill_interval->assign_reg(spill_slot);
duke@435 3938 allocator()->append_interval(spill_interval);
duke@435 3939
duke@435 3940 TRACE_LINEAR_SCAN(4, tty->print_cr("created new Interval %d for spilling", spill_interval->reg_num()));
duke@435 3941
duke@435 3942 // insert a move from register to stack and update the mapping
duke@435 3943 insert_move(from_interval, spill_interval);
duke@435 3944 _mapping_from.at_put(spill_candidate, spill_interval);
duke@435 3945 unblock_registers(from_interval);
duke@435 3946 }
duke@435 3947 }
duke@435 3948
duke@435 3949 // reset to default value
duke@435 3950 _multiple_reads_allowed = false;
duke@435 3951
duke@435 3952 // check that all intervals have been processed
duke@435 3953 DEBUG_ONLY(check_empty());
duke@435 3954 }
duke@435 3955
duke@435 3956
duke@435 3957 void MoveResolver::set_insert_position(LIR_List* insert_list, int insert_idx) {
duke@435 3958 TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: setting insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx));
duke@435 3959 assert(_insert_list == NULL && _insert_idx == -1, "use move_insert_position instead of set_insert_position when data already set");
duke@435 3960
duke@435 3961 create_insertion_buffer(insert_list);
duke@435 3962 _insert_list = insert_list;
duke@435 3963 _insert_idx = insert_idx;
duke@435 3964 }
duke@435 3965
duke@435 3966 void MoveResolver::move_insert_position(LIR_List* insert_list, int insert_idx) {
duke@435 3967 TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: moving insert position to Block B%d, index %d", insert_list->block() != NULL ? insert_list->block()->block_id() : -1, insert_idx));
duke@435 3968
duke@435 3969 if (_insert_list != NULL && (insert_list != _insert_list || insert_idx != _insert_idx)) {
duke@435 3970 // insert position changed -> resolve current mappings
duke@435 3971 resolve_mappings();
duke@435 3972 }
duke@435 3973
duke@435 3974 if (insert_list != _insert_list) {
duke@435 3975 // block changed -> append insertion_buffer because it is
duke@435 3976 // bound to a specific block and create a new insertion_buffer
duke@435 3977 append_insertion_buffer();
duke@435 3978 create_insertion_buffer(insert_list);
duke@435 3979 }
duke@435 3980
duke@435 3981 _insert_list = insert_list;
duke@435 3982 _insert_idx = insert_idx;
duke@435 3983 }
duke@435 3984
duke@435 3985 void MoveResolver::add_mapping(Interval* from_interval, Interval* to_interval) {
duke@435 3986 TRACE_LINEAR_SCAN(4, tty->print_cr("MoveResolver: adding mapping from %d (%d, %d) to %d (%d, %d)", from_interval->reg_num(), from_interval->assigned_reg(), from_interval->assigned_regHi(), to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
duke@435 3987
duke@435 3988 _mapping_from.append(from_interval);
duke@435 3989 _mapping_from_opr.append(LIR_OprFact::illegalOpr);
duke@435 3990 _mapping_to.append(to_interval);
duke@435 3991 }
duke@435 3992
duke@435 3993
duke@435 3994 void MoveResolver::add_mapping(LIR_Opr from_opr, Interval* to_interval) {
duke@435 3995 TRACE_LINEAR_SCAN(4, tty->print("MoveResolver: adding mapping from "); from_opr->print(); tty->print_cr(" to %d (%d, %d)", to_interval->reg_num(), to_interval->assigned_reg(), to_interval->assigned_regHi()));
duke@435 3996 assert(from_opr->is_constant(), "only for constants");
duke@435 3997
duke@435 3998 _mapping_from.append(NULL);
duke@435 3999 _mapping_from_opr.append(from_opr);
duke@435 4000 _mapping_to.append(to_interval);
duke@435 4001 }
duke@435 4002
duke@435 4003 void MoveResolver::resolve_and_append_moves() {
duke@435 4004 if (has_mappings()) {
duke@435 4005 resolve_mappings();
duke@435 4006 }
duke@435 4007 append_insertion_buffer();
duke@435 4008 }
duke@435 4009
duke@435 4010
duke@435 4011
duke@435 4012 // **** Implementation of Range *************************************
duke@435 4013
duke@435 4014 Range::Range(int from, int to, Range* next) :
duke@435 4015 _from(from),
duke@435 4016 _to(to),
duke@435 4017 _next(next)
duke@435 4018 {
duke@435 4019 }
duke@435 4020
duke@435 4021 // initialize sentinel
duke@435 4022 Range* Range::_end = NULL;
iveresov@1939 4023 void Range::initialize(Arena* arena) {
iveresov@1939 4024 _end = new (arena) Range(max_jint, max_jint, NULL);
duke@435 4025 }
duke@435 4026
duke@435 4027 int Range::intersects_at(Range* r2) const {
duke@435 4028 const Range* r1 = this;
duke@435 4029
duke@435 4030 assert(r1 != NULL && r2 != NULL, "null ranges not allowed");
duke@435 4031 assert(r1 != _end && r2 != _end, "empty ranges not allowed");
duke@435 4032
duke@435 4033 do {
duke@435 4034 if (r1->from() < r2->from()) {
duke@435 4035 if (r1->to() <= r2->from()) {
duke@435 4036 r1 = r1->next(); if (r1 == _end) return -1;
duke@435 4037 } else {
duke@435 4038 return r2->from();
duke@435 4039 }
duke@435 4040 } else if (r2->from() < r1->from()) {
duke@435 4041 if (r2->to() <= r1->from()) {
duke@435 4042 r2 = r2->next(); if (r2 == _end) return -1;
duke@435 4043 } else {
duke@435 4044 return r1->from();
duke@435 4045 }
duke@435 4046 } else { // r1->from() == r2->from()
duke@435 4047 if (r1->from() == r1->to()) {
duke@435 4048 r1 = r1->next(); if (r1 == _end) return -1;
duke@435 4049 } else if (r2->from() == r2->to()) {
duke@435 4050 r2 = r2->next(); if (r2 == _end) return -1;
duke@435 4051 } else {
duke@435 4052 return r1->from();
duke@435 4053 }
duke@435 4054 }
duke@435 4055 } while (true);
duke@435 4056 }
duke@435 4057
duke@435 4058 #ifndef PRODUCT
duke@435 4059 void Range::print(outputStream* out) const {
duke@435 4060 out->print("[%d, %d[ ", _from, _to);
duke@435 4061 }
duke@435 4062 #endif
duke@435 4063
duke@435 4064
duke@435 4065
duke@435 4066 // **** Implementation of Interval **********************************
duke@435 4067
duke@435 4068 // initialize sentinel
duke@435 4069 Interval* Interval::_end = NULL;
iveresov@1939 4070 void Interval::initialize(Arena* arena) {
iveresov@1939 4071 Range::initialize(arena);
iveresov@1939 4072 _end = new (arena) Interval(-1);
duke@435 4073 }
duke@435 4074
duke@435 4075 Interval::Interval(int reg_num) :
duke@435 4076 _reg_num(reg_num),
duke@435 4077 _type(T_ILLEGAL),
duke@435 4078 _first(Range::end()),
duke@435 4079 _use_pos_and_kinds(12),
duke@435 4080 _current(Range::end()),
duke@435 4081 _next(_end),
duke@435 4082 _state(invalidState),
duke@435 4083 _assigned_reg(LinearScan::any_reg),
duke@435 4084 _assigned_regHi(LinearScan::any_reg),
duke@435 4085 _cached_to(-1),
duke@435 4086 _cached_opr(LIR_OprFact::illegalOpr),
duke@435 4087 _cached_vm_reg(VMRegImpl::Bad()),
duke@435 4088 _split_children(0),
duke@435 4089 _canonical_spill_slot(-1),
duke@435 4090 _insert_move_when_activated(false),
duke@435 4091 _register_hint(NULL),
duke@435 4092 _spill_state(noDefinitionFound),
duke@435 4093 _spill_definition_pos(-1)
duke@435 4094 {
duke@435 4095 _split_parent = this;
duke@435 4096 _current_split_child = this;
duke@435 4097 }
duke@435 4098
duke@435 4099 int Interval::calc_to() {
duke@435 4100 assert(_first != Range::end(), "interval has no range");
duke@435 4101
duke@435 4102 Range* r = _first;
duke@435 4103 while (r->next() != Range::end()) {
duke@435 4104 r = r->next();
duke@435 4105 }
duke@435 4106 return r->to();
duke@435 4107 }
duke@435 4108
duke@435 4109
duke@435 4110 #ifdef ASSERT
duke@435 4111 // consistency check of split-children
duke@435 4112 void Interval::check_split_children() {
duke@435 4113 if (_split_children.length() > 0) {
duke@435 4114 assert(is_split_parent(), "only split parents can have children");
duke@435 4115
duke@435 4116 for (int i = 0; i < _split_children.length(); i++) {
duke@435 4117 Interval* i1 = _split_children.at(i);
duke@435 4118
duke@435 4119 assert(i1->split_parent() == this, "not a split child of this interval");
duke@435 4120 assert(i1->type() == type(), "must be equal for all split children");
duke@435 4121 assert(i1->canonical_spill_slot() == canonical_spill_slot(), "must be equal for all split children");
duke@435 4122
duke@435 4123 for (int j = i + 1; j < _split_children.length(); j++) {
duke@435 4124 Interval* i2 = _split_children.at(j);
duke@435 4125
duke@435 4126 assert(i1->reg_num() != i2->reg_num(), "same register number");
duke@435 4127
duke@435 4128 if (i1->from() < i2->from()) {
duke@435 4129 assert(i1->to() <= i2->from() && i1->to() < i2->to(), "intervals overlapping");
duke@435 4130 } else {
duke@435 4131 assert(i2->from() < i1->from(), "intervals start at same op_id");
duke@435 4132 assert(i2->to() <= i1->from() && i2->to() < i1->to(), "intervals overlapping");
duke@435 4133 }
duke@435 4134 }
duke@435 4135 }
duke@435 4136 }
duke@435 4137 }
duke@435 4138 #endif // ASSERT
duke@435 4139
duke@435 4140 Interval* Interval::register_hint(bool search_split_child) const {
duke@435 4141 if (!search_split_child) {
duke@435 4142 return _register_hint;
duke@435 4143 }
duke@435 4144
duke@435 4145 if (_register_hint != NULL) {
duke@435 4146 assert(_register_hint->is_split_parent(), "ony split parents are valid hint registers");
duke@435 4147
duke@435 4148 if (_register_hint->assigned_reg() >= 0 && _register_hint->assigned_reg() < LinearScan::nof_regs) {
duke@435 4149 return _register_hint;
duke@435 4150
duke@435 4151 } else if (_register_hint->_split_children.length() > 0) {
duke@435 4152 // search the first split child that has a register assigned
duke@435 4153 int len = _register_hint->_split_children.length();
duke@435 4154 for (int i = 0; i < len; i++) {
duke@435 4155 Interval* cur = _register_hint->_split_children.at(i);
duke@435 4156
duke@435 4157 if (cur->assigned_reg() >= 0 && cur->assigned_reg() < LinearScan::nof_regs) {
duke@435 4158 return cur;
duke@435 4159 }
duke@435 4160 }
duke@435 4161 }
duke@435 4162 }
duke@435 4163
duke@435 4164 // no hint interval found that has a register assigned
duke@435 4165 return NULL;
duke@435 4166 }
duke@435 4167
duke@435 4168
duke@435 4169 Interval* Interval::split_child_at_op_id(int op_id, LIR_OpVisitState::OprMode mode) {
duke@435 4170 assert(is_split_parent(), "can only be called for split parents");
duke@435 4171 assert(op_id >= 0, "invalid op_id (method can not be called for spill moves)");
duke@435 4172
duke@435 4173 Interval* result;
duke@435 4174 if (_split_children.length() == 0) {
duke@435 4175 result = this;
duke@435 4176 } else {
duke@435 4177 result = NULL;
duke@435 4178 int len = _split_children.length();
duke@435 4179
duke@435 4180 // in outputMode, the end of the interval (op_id == cur->to()) is not valid
duke@435 4181 int to_offset = (mode == LIR_OpVisitState::outputMode ? 0 : 1);
duke@435 4182
duke@435 4183 int i;
duke@435 4184 for (i = 0; i < len; i++) {
duke@435 4185 Interval* cur = _split_children.at(i);
duke@435 4186 if (cur->from() <= op_id && op_id < cur->to() + to_offset) {
duke@435 4187 if (i > 0) {
duke@435 4188 // exchange current split child to start of list (faster access for next call)
duke@435 4189 _split_children.at_put(i, _split_children.at(0));
duke@435 4190 _split_children.at_put(0, cur);
duke@435 4191 }
duke@435 4192
duke@435 4193 // interval found
duke@435 4194 result = cur;
duke@435 4195 break;
duke@435 4196 }
duke@435 4197 }
duke@435 4198
duke@435 4199 #ifdef ASSERT
duke@435 4200 for (i = 0; i < len; i++) {
duke@435 4201 Interval* tmp = _split_children.at(i);
duke@435 4202 if (tmp != result && tmp->from() <= op_id && op_id < tmp->to() + to_offset) {
duke@435 4203 tty->print_cr("two valid result intervals found for op_id %d: %d and %d", op_id, result->reg_num(), tmp->reg_num());
duke@435 4204 result->print();
duke@435 4205 tmp->print();
duke@435 4206 assert(false, "two valid result intervals found");
duke@435 4207 }
duke@435 4208 }
duke@435 4209 #endif
duke@435 4210 }
duke@435 4211
duke@435 4212 assert(result != NULL, "no matching interval found");
duke@435 4213 assert(result->covers(op_id, mode), "op_id not covered by interval");
duke@435 4214
duke@435 4215 return result;
duke@435 4216 }
duke@435 4217
duke@435 4218
duke@435 4219 // returns the last split child that ends before the given op_id
duke@435 4220 Interval* Interval::split_child_before_op_id(int op_id) {
duke@435 4221 assert(op_id >= 0, "invalid op_id");
duke@435 4222
duke@435 4223 Interval* parent = split_parent();
duke@435 4224 Interval* result = NULL;
duke@435 4225
duke@435 4226 int len = parent->_split_children.length();
duke@435 4227 assert(len > 0, "no split children available");
duke@435 4228
duke@435 4229 for (int i = len - 1; i >= 0; i--) {
duke@435 4230 Interval* cur = parent->_split_children.at(i);
duke@435 4231 if (cur->to() <= op_id && (result == NULL || result->to() < cur->to())) {
duke@435 4232 result = cur;
duke@435 4233 }
duke@435 4234 }
duke@435 4235
duke@435 4236 assert(result != NULL, "no split child found");
duke@435 4237 return result;
duke@435 4238 }
duke@435 4239
duke@435 4240
duke@435 4241 // checks if op_id is covered by any split child
duke@435 4242 bool Interval::split_child_covers(int op_id, LIR_OpVisitState::OprMode mode) {
duke@435 4243 assert(is_split_parent(), "can only be called for split parents");
duke@435 4244 assert(op_id >= 0, "invalid op_id (method can not be called for spill moves)");
duke@435 4245
duke@435 4246 if (_split_children.length() == 0) {
duke@435 4247 // simple case if interval was not split
duke@435 4248 return covers(op_id, mode);
duke@435 4249
duke@435 4250 } else {
duke@435 4251 // extended case: check all split children
duke@435 4252 int len = _split_children.length();
duke@435 4253 for (int i = 0; i < len; i++) {
duke@435 4254 Interval* cur = _split_children.at(i);
duke@435 4255 if (cur->covers(op_id, mode)) {
duke@435 4256 return true;
duke@435 4257 }
duke@435 4258 }
duke@435 4259 return false;
duke@435 4260 }
duke@435 4261 }
duke@435 4262
duke@435 4263
duke@435 4264 // Note: use positions are sorted descending -> first use has highest index
duke@435 4265 int Interval::first_usage(IntervalUseKind min_use_kind) const {
duke@435 4266 assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
duke@435 4267
duke@435 4268 for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
duke@435 4269 if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) {
duke@435 4270 return _use_pos_and_kinds.at(i);
duke@435 4271 }
duke@435 4272 }
duke@435 4273 return max_jint;
duke@435 4274 }
duke@435 4275
duke@435 4276 int Interval::next_usage(IntervalUseKind min_use_kind, int from) const {
duke@435 4277 assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
duke@435 4278
duke@435 4279 for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
duke@435 4280 if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) >= min_use_kind) {
duke@435 4281 return _use_pos_and_kinds.at(i);
duke@435 4282 }
duke@435 4283 }
duke@435 4284 return max_jint;
duke@435 4285 }
duke@435 4286
duke@435 4287 int Interval::next_usage_exact(IntervalUseKind exact_use_kind, int from) const {
duke@435 4288 assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
duke@435 4289
duke@435 4290 for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
duke@435 4291 if (_use_pos_and_kinds.at(i) >= from && _use_pos_and_kinds.at(i + 1) == exact_use_kind) {
duke@435 4292 return _use_pos_and_kinds.at(i);
duke@435 4293 }
duke@435 4294 }
duke@435 4295 return max_jint;
duke@435 4296 }
duke@435 4297
duke@435 4298 int Interval::previous_usage(IntervalUseKind min_use_kind, int from) const {
duke@435 4299 assert(LinearScan::is_virtual_interval(this), "cannot access use positions for fixed intervals");
duke@435 4300
duke@435 4301 int prev = 0;
duke@435 4302 for (int i = _use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
duke@435 4303 if (_use_pos_and_kinds.at(i) > from) {
duke@435 4304 return prev;
duke@435 4305 }
duke@435 4306 if (_use_pos_and_kinds.at(i + 1) >= min_use_kind) {
duke@435 4307 prev = _use_pos_and_kinds.at(i);
duke@435 4308 }
duke@435 4309 }
duke@435 4310 return prev;
duke@435 4311 }
duke@435 4312
duke@435 4313 void Interval::add_use_pos(int pos, IntervalUseKind use_kind) {
duke@435 4314 assert(covers(pos, LIR_OpVisitState::inputMode), "use position not covered by live range");
duke@435 4315
duke@435 4316 // do not add use positions for precolored intervals because
duke@435 4317 // they are never used
duke@435 4318 if (use_kind != noUse && reg_num() >= LIR_OprDesc::vreg_base) {
duke@435 4319 #ifdef ASSERT
duke@435 4320 assert(_use_pos_and_kinds.length() % 2 == 0, "must be");
duke@435 4321 for (int i = 0; i < _use_pos_and_kinds.length(); i += 2) {
duke@435 4322 assert(pos <= _use_pos_and_kinds.at(i), "already added a use-position with lower position");
duke@435 4323 assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
duke@435 4324 if (i > 0) {
duke@435 4325 assert(_use_pos_and_kinds.at(i) < _use_pos_and_kinds.at(i - 2), "not sorted descending");
duke@435 4326 }
duke@435 4327 }
duke@435 4328 #endif
duke@435 4329
duke@435 4330 // Note: add_use is called in descending order, so list gets sorted
duke@435 4331 // automatically by just appending new use positions
duke@435 4332 int len = _use_pos_and_kinds.length();
duke@435 4333 if (len == 0 || _use_pos_and_kinds.at(len - 2) > pos) {
duke@435 4334 _use_pos_and_kinds.append(pos);
duke@435 4335 _use_pos_and_kinds.append(use_kind);
duke@435 4336 } else if (_use_pos_and_kinds.at(len - 1) < use_kind) {
duke@435 4337 assert(_use_pos_and_kinds.at(len - 2) == pos, "list not sorted correctly");
duke@435 4338 _use_pos_and_kinds.at_put(len - 1, use_kind);
duke@435 4339 }
duke@435 4340 }
duke@435 4341 }
duke@435 4342
duke@435 4343 void Interval::add_range(int from, int to) {
duke@435 4344 assert(from < to, "invalid range");
duke@435 4345 assert(first() == Range::end() || to < first()->next()->from(), "not inserting at begin of interval");
duke@435 4346 assert(from <= first()->to(), "not inserting at begin of interval");
duke@435 4347
duke@435 4348 if (first()->from() <= to) {
duke@435 4349 // join intersecting ranges
duke@435 4350 first()->set_from(MIN2(from, first()->from()));
duke@435 4351 first()->set_to (MAX2(to, first()->to()));
duke@435 4352 } else {
duke@435 4353 // insert new range
duke@435 4354 _first = new Range(from, to, first());
duke@435 4355 }
duke@435 4356 }
duke@435 4357
duke@435 4358 Interval* Interval::new_split_child() {
duke@435 4359 // allocate new interval
duke@435 4360 Interval* result = new Interval(-1);
duke@435 4361 result->set_type(type());
duke@435 4362
duke@435 4363 Interval* parent = split_parent();
duke@435 4364 result->_split_parent = parent;
duke@435 4365 result->set_register_hint(parent);
duke@435 4366
duke@435 4367 // insert new interval in children-list of parent
duke@435 4368 if (parent->_split_children.length() == 0) {
duke@435 4369 assert(is_split_parent(), "list must be initialized at first split");
duke@435 4370
duke@435 4371 parent->_split_children = IntervalList(4);
duke@435 4372 parent->_split_children.append(this);
duke@435 4373 }
duke@435 4374 parent->_split_children.append(result);
duke@435 4375
duke@435 4376 return result;
duke@435 4377 }
duke@435 4378
duke@435 4379 // split this interval at the specified position and return
duke@435 4380 // the remainder as a new interval.
duke@435 4381 //
duke@435 4382 // when an interval is split, a bi-directional link is established between the original interval
duke@435 4383 // (the split parent) and the intervals that are split off this interval (the split children)
duke@435 4384 // When a split child is split again, the new created interval is also a direct child
duke@435 4385 // of the original parent (there is no tree of split children stored, but a flat list)
duke@435 4386 // All split children are spilled to the same stack slot (stored in _canonical_spill_slot)
duke@435 4387 //
duke@435 4388 // Note: The new interval has no valid reg_num
duke@435 4389 Interval* Interval::split(int split_pos) {
duke@435 4390 assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals");
duke@435 4391
duke@435 4392 // allocate new interval
duke@435 4393 Interval* result = new_split_child();
duke@435 4394
duke@435 4395 // split the ranges
duke@435 4396 Range* prev = NULL;
duke@435 4397 Range* cur = _first;
duke@435 4398 while (cur != Range::end() && cur->to() <= split_pos) {
duke@435 4399 prev = cur;
duke@435 4400 cur = cur->next();
duke@435 4401 }
duke@435 4402 assert(cur != Range::end(), "split interval after end of last range");
duke@435 4403
duke@435 4404 if (cur->from() < split_pos) {
duke@435 4405 result->_first = new Range(split_pos, cur->to(), cur->next());
duke@435 4406 cur->set_to(split_pos);
duke@435 4407 cur->set_next(Range::end());
duke@435 4408
duke@435 4409 } else {
duke@435 4410 assert(prev != NULL, "split before start of first range");
duke@435 4411 result->_first = cur;
duke@435 4412 prev->set_next(Range::end());
duke@435 4413 }
duke@435 4414 result->_current = result->_first;
duke@435 4415 _cached_to = -1; // clear cached value
duke@435 4416
duke@435 4417 // split list of use positions
duke@435 4418 int total_len = _use_pos_and_kinds.length();
duke@435 4419 int start_idx = total_len - 2;
duke@435 4420 while (start_idx >= 0 && _use_pos_and_kinds.at(start_idx) < split_pos) {
duke@435 4421 start_idx -= 2;
duke@435 4422 }
duke@435 4423
duke@435 4424 intStack new_use_pos_and_kinds(total_len - start_idx);
duke@435 4425 int i;
duke@435 4426 for (i = start_idx + 2; i < total_len; i++) {
duke@435 4427 new_use_pos_and_kinds.append(_use_pos_and_kinds.at(i));
duke@435 4428 }
duke@435 4429
duke@435 4430 _use_pos_and_kinds.truncate(start_idx + 2);
duke@435 4431 result->_use_pos_and_kinds = _use_pos_and_kinds;
duke@435 4432 _use_pos_and_kinds = new_use_pos_and_kinds;
duke@435 4433
duke@435 4434 #ifdef ASSERT
duke@435 4435 assert(_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
duke@435 4436 assert(result->_use_pos_and_kinds.length() % 2 == 0, "must have use kind for each use pos");
duke@435 4437 assert(_use_pos_and_kinds.length() + result->_use_pos_and_kinds.length() == total_len, "missed some entries");
duke@435 4438
duke@435 4439 for (i = 0; i < _use_pos_and_kinds.length(); i += 2) {
duke@435 4440 assert(_use_pos_and_kinds.at(i) < split_pos, "must be");
duke@435 4441 assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
duke@435 4442 }
duke@435 4443 for (i = 0; i < result->_use_pos_and_kinds.length(); i += 2) {
duke@435 4444 assert(result->_use_pos_and_kinds.at(i) >= split_pos, "must be");
duke@435 4445 assert(result->_use_pos_and_kinds.at(i + 1) >= firstValidKind && result->_use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
duke@435 4446 }
duke@435 4447 #endif
duke@435 4448
duke@435 4449 return result;
duke@435 4450 }
duke@435 4451
duke@435 4452 // split this interval at the specified position and return
duke@435 4453 // the head as a new interval (the original interval is the tail)
duke@435 4454 //
duke@435 4455 // Currently, only the first range can be split, and the new interval
duke@435 4456 // must not have split positions
duke@435 4457 Interval* Interval::split_from_start(int split_pos) {
duke@435 4458 assert(LinearScan::is_virtual_interval(this), "cannot split fixed intervals");
duke@435 4459 assert(split_pos > from() && split_pos < to(), "can only split inside interval");
duke@435 4460 assert(split_pos > _first->from() && split_pos <= _first->to(), "can only split inside first range");
duke@435 4461 assert(first_usage(noUse) > split_pos, "can not split when use positions are present");
duke@435 4462
duke@435 4463 // allocate new interval
duke@435 4464 Interval* result = new_split_child();
duke@435 4465
duke@435 4466 // the new created interval has only one range (checked by assertion above),
duke@435 4467 // so the splitting of the ranges is very simple
duke@435 4468 result->add_range(_first->from(), split_pos);
duke@435 4469
duke@435 4470 if (split_pos == _first->to()) {
duke@435 4471 assert(_first->next() != Range::end(), "must not be at end");
duke@435 4472 _first = _first->next();
duke@435 4473 } else {
duke@435 4474 _first->set_from(split_pos);
duke@435 4475 }
duke@435 4476
duke@435 4477 return result;
duke@435 4478 }
duke@435 4479
duke@435 4480
duke@435 4481 // returns true if the op_id is inside the interval
duke@435 4482 bool Interval::covers(int op_id, LIR_OpVisitState::OprMode mode) const {
duke@435 4483 Range* cur = _first;
duke@435 4484
duke@435 4485 while (cur != Range::end() && cur->to() < op_id) {
duke@435 4486 cur = cur->next();
duke@435 4487 }
duke@435 4488 if (cur != Range::end()) {
duke@435 4489 assert(cur->to() != cur->next()->from(), "ranges not separated");
duke@435 4490
duke@435 4491 if (mode == LIR_OpVisitState::outputMode) {
duke@435 4492 return cur->from() <= op_id && op_id < cur->to();
duke@435 4493 } else {
duke@435 4494 return cur->from() <= op_id && op_id <= cur->to();
duke@435 4495 }
duke@435 4496 }
duke@435 4497 return false;
duke@435 4498 }
duke@435 4499
duke@435 4500 // returns true if the interval has any hole between hole_from and hole_to
duke@435 4501 // (even if the hole has only the length 1)
duke@435 4502 bool Interval::has_hole_between(int hole_from, int hole_to) {
duke@435 4503 assert(hole_from < hole_to, "check");
duke@435 4504 assert(from() <= hole_from && hole_to <= to(), "index out of interval");
duke@435 4505
duke@435 4506 Range* cur = _first;
duke@435 4507 while (cur != Range::end()) {
duke@435 4508 assert(cur->to() < cur->next()->from(), "no space between ranges");
duke@435 4509
duke@435 4510 // hole-range starts before this range -> hole
duke@435 4511 if (hole_from < cur->from()) {
duke@435 4512 return true;
duke@435 4513
duke@435 4514 // hole-range completely inside this range -> no hole
duke@435 4515 } else if (hole_to <= cur->to()) {
duke@435 4516 return false;
duke@435 4517
duke@435 4518 // overlapping of hole-range with this range -> hole
duke@435 4519 } else if (hole_from <= cur->to()) {
duke@435 4520 return true;
duke@435 4521 }
duke@435 4522
duke@435 4523 cur = cur->next();
duke@435 4524 }
duke@435 4525
duke@435 4526 return false;
duke@435 4527 }
duke@435 4528
duke@435 4529
duke@435 4530 #ifndef PRODUCT
duke@435 4531 void Interval::print(outputStream* out) const {
duke@435 4532 const char* SpillState2Name[] = { "no definition", "no spill store", "one spill store", "store at definition", "start in memory", "no optimization" };
duke@435 4533 const char* UseKind2Name[] = { "N", "L", "S", "M" };
duke@435 4534
duke@435 4535 const char* type_name;
duke@435 4536 LIR_Opr opr = LIR_OprFact::illegal();
duke@435 4537 if (reg_num() < LIR_OprDesc::vreg_base) {
duke@435 4538 type_name = "fixed";
duke@435 4539 // need a temporary operand for fixed intervals because type() cannot be called
duke@435 4540 if (assigned_reg() >= pd_first_cpu_reg && assigned_reg() <= pd_last_cpu_reg) {
duke@435 4541 opr = LIR_OprFact::single_cpu(assigned_reg());
duke@435 4542 } else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
duke@435 4543 opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
never@739 4544 #ifdef X86
duke@435 4545 } else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
duke@435 4546 opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
duke@435 4547 #endif
duke@435 4548 } else {
duke@435 4549 ShouldNotReachHere();
duke@435 4550 }
duke@435 4551 } else {
duke@435 4552 type_name = type2name(type());
never@2404 4553 if (assigned_reg() != -1 &&
never@2404 4554 (LinearScan::num_physical_regs(type()) == 1 || assigned_regHi() != -1)) {
duke@435 4555 opr = LinearScan::calc_operand_for_interval(this);
duke@435 4556 }
duke@435 4557 }
duke@435 4558
duke@435 4559 out->print("%d %s ", reg_num(), type_name);
duke@435 4560 if (opr->is_valid()) {
duke@435 4561 out->print("\"");
duke@435 4562 opr->print(out);
duke@435 4563 out->print("\" ");
duke@435 4564 }
duke@435 4565 out->print("%d %d ", split_parent()->reg_num(), (register_hint(false) != NULL ? register_hint(false)->reg_num() : -1));
duke@435 4566
duke@435 4567 // print ranges
duke@435 4568 Range* cur = _first;
duke@435 4569 while (cur != Range::end()) {
duke@435 4570 cur->print(out);
duke@435 4571 cur = cur->next();
duke@435 4572 assert(cur != NULL, "range list not closed with range sentinel");
duke@435 4573 }
duke@435 4574
duke@435 4575 // print use positions
duke@435 4576 int prev = 0;
duke@435 4577 assert(_use_pos_and_kinds.length() % 2 == 0, "must be");
duke@435 4578 for (int i =_use_pos_and_kinds.length() - 2; i >= 0; i -= 2) {
duke@435 4579 assert(_use_pos_and_kinds.at(i + 1) >= firstValidKind && _use_pos_and_kinds.at(i + 1) <= lastValidKind, "invalid use kind");
duke@435 4580 assert(prev < _use_pos_and_kinds.at(i), "use positions not sorted");
duke@435 4581
duke@435 4582 out->print("%d %s ", _use_pos_and_kinds.at(i), UseKind2Name[_use_pos_and_kinds.at(i + 1)]);
duke@435 4583 prev = _use_pos_and_kinds.at(i);
duke@435 4584 }
duke@435 4585
duke@435 4586 out->print(" \"%s\"", SpillState2Name[spill_state()]);
duke@435 4587 out->cr();
duke@435 4588 }
duke@435 4589 #endif
duke@435 4590
duke@435 4591
duke@435 4592
duke@435 4593 // **** Implementation of IntervalWalker ****************************
duke@435 4594
duke@435 4595 IntervalWalker::IntervalWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first)
duke@435 4596 : _compilation(allocator->compilation())
duke@435 4597 , _allocator(allocator)
duke@435 4598 {
duke@435 4599 _unhandled_first[fixedKind] = unhandled_fixed_first;
duke@435 4600 _unhandled_first[anyKind] = unhandled_any_first;
duke@435 4601 _active_first[fixedKind] = Interval::end();
duke@435 4602 _inactive_first[fixedKind] = Interval::end();
duke@435 4603 _active_first[anyKind] = Interval::end();
duke@435 4604 _inactive_first[anyKind] = Interval::end();
duke@435 4605 _current_position = -1;
duke@435 4606 _current = NULL;
duke@435 4607 next_interval();
duke@435 4608 }
duke@435 4609
duke@435 4610
duke@435 4611 // append interval at top of list
duke@435 4612 void IntervalWalker::append_unsorted(Interval** list, Interval* interval) {
duke@435 4613 interval->set_next(*list); *list = interval;
duke@435 4614 }
duke@435 4615
duke@435 4616
duke@435 4617 // append interval in order of current range from()
duke@435 4618 void IntervalWalker::append_sorted(Interval** list, Interval* interval) {
duke@435 4619 Interval* prev = NULL;
duke@435 4620 Interval* cur = *list;
duke@435 4621 while (cur->current_from() < interval->current_from()) {
duke@435 4622 prev = cur; cur = cur->next();
duke@435 4623 }
duke@435 4624 if (prev == NULL) {
duke@435 4625 *list = interval;
duke@435 4626 } else {
duke@435 4627 prev->set_next(interval);
duke@435 4628 }
duke@435 4629 interval->set_next(cur);
duke@435 4630 }
duke@435 4631
duke@435 4632 void IntervalWalker::append_to_unhandled(Interval** list, Interval* interval) {
duke@435 4633 assert(interval->from() >= current()->current_from(), "cannot append new interval before current walk position");
duke@435 4634
duke@435 4635 Interval* prev = NULL;
duke@435 4636 Interval* cur = *list;
duke@435 4637 while (cur->from() < interval->from() || (cur->from() == interval->from() && cur->first_usage(noUse) < interval->first_usage(noUse))) {
duke@435 4638 prev = cur; cur = cur->next();
duke@435 4639 }
duke@435 4640 if (prev == NULL) {
duke@435 4641 *list = interval;
duke@435 4642 } else {
duke@435 4643 prev->set_next(interval);
duke@435 4644 }
duke@435 4645 interval->set_next(cur);
duke@435 4646 }
duke@435 4647
duke@435 4648
duke@435 4649 inline bool IntervalWalker::remove_from_list(Interval** list, Interval* i) {
duke@435 4650 while (*list != Interval::end() && *list != i) {
duke@435 4651 list = (*list)->next_addr();
duke@435 4652 }
duke@435 4653 if (*list != Interval::end()) {
duke@435 4654 assert(*list == i, "check");
duke@435 4655 *list = (*list)->next();
duke@435 4656 return true;
duke@435 4657 } else {
duke@435 4658 return false;
duke@435 4659 }
duke@435 4660 }
duke@435 4661
duke@435 4662 void IntervalWalker::remove_from_list(Interval* i) {
duke@435 4663 bool deleted;
duke@435 4664
duke@435 4665 if (i->state() == activeState) {
duke@435 4666 deleted = remove_from_list(active_first_addr(anyKind), i);
duke@435 4667 } else {
duke@435 4668 assert(i->state() == inactiveState, "invalid state");
duke@435 4669 deleted = remove_from_list(inactive_first_addr(anyKind), i);
duke@435 4670 }
duke@435 4671
duke@435 4672 assert(deleted, "interval has not been found in list");
duke@435 4673 }
duke@435 4674
duke@435 4675
duke@435 4676 void IntervalWalker::walk_to(IntervalState state, int from) {
duke@435 4677 assert (state == activeState || state == inactiveState, "wrong state");
duke@435 4678 for_each_interval_kind(kind) {
duke@435 4679 Interval** prev = state == activeState ? active_first_addr(kind) : inactive_first_addr(kind);
duke@435 4680 Interval* next = *prev;
duke@435 4681 while (next->current_from() <= from) {
duke@435 4682 Interval* cur = next;
duke@435 4683 next = cur->next();
duke@435 4684
duke@435 4685 bool range_has_changed = false;
duke@435 4686 while (cur->current_to() <= from) {
duke@435 4687 cur->next_range();
duke@435 4688 range_has_changed = true;
duke@435 4689 }
duke@435 4690
duke@435 4691 // also handle move from inactive list to active list
duke@435 4692 range_has_changed = range_has_changed || (state == inactiveState && cur->current_from() <= from);
duke@435 4693
duke@435 4694 if (range_has_changed) {
duke@435 4695 // remove cur from list
duke@435 4696 *prev = next;
duke@435 4697 if (cur->current_at_end()) {
duke@435 4698 // move to handled state (not maintained as a list)
duke@435 4699 cur->set_state(handledState);
duke@435 4700 interval_moved(cur, kind, state, handledState);
duke@435 4701 } else if (cur->current_from() <= from){
duke@435 4702 // sort into active list
duke@435 4703 append_sorted(active_first_addr(kind), cur);
duke@435 4704 cur->set_state(activeState);
duke@435 4705 if (*prev == cur) {
duke@435 4706 assert(state == activeState, "check");
duke@435 4707 prev = cur->next_addr();
duke@435 4708 }
duke@435 4709 interval_moved(cur, kind, state, activeState);
duke@435 4710 } else {
duke@435 4711 // sort into inactive list
duke@435 4712 append_sorted(inactive_first_addr(kind), cur);
duke@435 4713 cur->set_state(inactiveState);
duke@435 4714 if (*prev == cur) {
duke@435 4715 assert(state == inactiveState, "check");
duke@435 4716 prev = cur->next_addr();
duke@435 4717 }
duke@435 4718 interval_moved(cur, kind, state, inactiveState);
duke@435 4719 }
duke@435 4720 } else {
duke@435 4721 prev = cur->next_addr();
duke@435 4722 continue;
duke@435 4723 }
duke@435 4724 }
duke@435 4725 }
duke@435 4726 }
duke@435 4727
duke@435 4728
duke@435 4729 void IntervalWalker::next_interval() {
duke@435 4730 IntervalKind kind;
duke@435 4731 Interval* any = _unhandled_first[anyKind];
duke@435 4732 Interval* fixed = _unhandled_first[fixedKind];
duke@435 4733
duke@435 4734 if (any != Interval::end()) {
duke@435 4735 // intervals may start at same position -> prefer fixed interval
duke@435 4736 kind = fixed != Interval::end() && fixed->from() <= any->from() ? fixedKind : anyKind;
duke@435 4737
duke@435 4738 assert (kind == fixedKind && fixed->from() <= any->from() ||
duke@435 4739 kind == anyKind && any->from() <= fixed->from(), "wrong interval!!!");
duke@435 4740 assert(any == Interval::end() || fixed == Interval::end() || any->from() != fixed->from() || kind == fixedKind, "if fixed and any-Interval start at same position, fixed must be processed first");
duke@435 4741
duke@435 4742 } else if (fixed != Interval::end()) {
duke@435 4743 kind = fixedKind;
duke@435 4744 } else {
duke@435 4745 _current = NULL; return;
duke@435 4746 }
duke@435 4747 _current_kind = kind;
duke@435 4748 _current = _unhandled_first[kind];
duke@435 4749 _unhandled_first[kind] = _current->next();
duke@435 4750 _current->set_next(Interval::end());
duke@435 4751 _current->rewind_range();
duke@435 4752 }
duke@435 4753
duke@435 4754
duke@435 4755 void IntervalWalker::walk_to(int lir_op_id) {
duke@435 4756 assert(_current_position <= lir_op_id, "can not walk backwards");
duke@435 4757 while (current() != NULL) {
duke@435 4758 bool is_active = current()->from() <= lir_op_id;
duke@435 4759 int id = is_active ? current()->from() : lir_op_id;
duke@435 4760
duke@435 4761 TRACE_LINEAR_SCAN(2, if (_current_position < id) { tty->cr(); tty->print_cr("walk_to(%d) **************************************************************", id); })
duke@435 4762
duke@435 4763 // set _current_position prior to call of walk_to
duke@435 4764 _current_position = id;
duke@435 4765
duke@435 4766 // call walk_to even if _current_position == id
duke@435 4767 walk_to(activeState, id);
duke@435 4768 walk_to(inactiveState, id);
duke@435 4769
duke@435 4770 if (is_active) {
duke@435 4771 current()->set_state(activeState);
duke@435 4772 if (activate_current()) {
duke@435 4773 append_sorted(active_first_addr(current_kind()), current());
duke@435 4774 interval_moved(current(), current_kind(), unhandledState, activeState);
duke@435 4775 }
duke@435 4776
duke@435 4777 next_interval();
duke@435 4778 } else {
duke@435 4779 return;
duke@435 4780 }
duke@435 4781 }
duke@435 4782 }
duke@435 4783
duke@435 4784 void IntervalWalker::interval_moved(Interval* interval, IntervalKind kind, IntervalState from, IntervalState to) {
duke@435 4785 #ifndef PRODUCT
duke@435 4786 if (TraceLinearScanLevel >= 4) {
duke@435 4787 #define print_state(state) \
duke@435 4788 switch(state) {\
duke@435 4789 case unhandledState: tty->print("unhandled"); break;\
duke@435 4790 case activeState: tty->print("active"); break;\
duke@435 4791 case inactiveState: tty->print("inactive"); break;\
duke@435 4792 case handledState: tty->print("handled"); break;\
duke@435 4793 default: ShouldNotReachHere(); \
duke@435 4794 }
duke@435 4795
duke@435 4796 print_state(from); tty->print(" to "); print_state(to);
duke@435 4797 tty->fill_to(23);
duke@435 4798 interval->print();
duke@435 4799
duke@435 4800 #undef print_state
duke@435 4801 }
duke@435 4802 #endif
duke@435 4803 }
duke@435 4804
duke@435 4805
duke@435 4806
duke@435 4807 // **** Implementation of LinearScanWalker **************************
duke@435 4808
duke@435 4809 LinearScanWalker::LinearScanWalker(LinearScan* allocator, Interval* unhandled_fixed_first, Interval* unhandled_any_first)
duke@435 4810 : IntervalWalker(allocator, unhandled_fixed_first, unhandled_any_first)
duke@435 4811 , _move_resolver(allocator)
duke@435 4812 {
duke@435 4813 for (int i = 0; i < LinearScan::nof_regs; i++) {
duke@435 4814 _spill_intervals[i] = new IntervalList(2);
duke@435 4815 }
duke@435 4816 }
duke@435 4817
duke@435 4818
duke@435 4819 inline void LinearScanWalker::init_use_lists(bool only_process_use_pos) {
duke@435 4820 for (int i = _first_reg; i <= _last_reg; i++) {
duke@435 4821 _use_pos[i] = max_jint;
duke@435 4822
duke@435 4823 if (!only_process_use_pos) {
duke@435 4824 _block_pos[i] = max_jint;
duke@435 4825 _spill_intervals[i]->clear();
duke@435 4826 }
duke@435 4827 }
duke@435 4828 }
duke@435 4829
duke@435 4830 inline void LinearScanWalker::exclude_from_use(int reg) {
duke@435 4831 assert(reg < LinearScan::nof_regs, "interval must have a register assigned (stack slots not allowed)");
duke@435 4832 if (reg >= _first_reg && reg <= _last_reg) {
duke@435 4833 _use_pos[reg] = 0;
duke@435 4834 }
duke@435 4835 }
duke@435 4836 inline void LinearScanWalker::exclude_from_use(Interval* i) {
duke@435 4837 assert(i->assigned_reg() != any_reg, "interval has no register assigned");
duke@435 4838
duke@435 4839 exclude_from_use(i->assigned_reg());
duke@435 4840 exclude_from_use(i->assigned_regHi());
duke@435 4841 }
duke@435 4842
duke@435 4843 inline void LinearScanWalker::set_use_pos(int reg, Interval* i, int use_pos, bool only_process_use_pos) {
duke@435 4844 assert(use_pos != 0, "must use exclude_from_use to set use_pos to 0");
duke@435 4845
duke@435 4846 if (reg >= _first_reg && reg <= _last_reg) {
duke@435 4847 if (_use_pos[reg] > use_pos) {
duke@435 4848 _use_pos[reg] = use_pos;
duke@435 4849 }
duke@435 4850 if (!only_process_use_pos) {
duke@435 4851 _spill_intervals[reg]->append(i);
duke@435 4852 }
duke@435 4853 }
duke@435 4854 }
duke@435 4855 inline void LinearScanWalker::set_use_pos(Interval* i, int use_pos, bool only_process_use_pos) {
duke@435 4856 assert(i->assigned_reg() != any_reg, "interval has no register assigned");
duke@435 4857 if (use_pos != -1) {
duke@435 4858 set_use_pos(i->assigned_reg(), i, use_pos, only_process_use_pos);
duke@435 4859 set_use_pos(i->assigned_regHi(), i, use_pos, only_process_use_pos);
duke@435 4860 }
duke@435 4861 }
duke@435 4862
duke@435 4863 inline void LinearScanWalker::set_block_pos(int reg, Interval* i, int block_pos) {
duke@435 4864 if (reg >= _first_reg && reg <= _last_reg) {
duke@435 4865 if (_block_pos[reg] > block_pos) {
duke@435 4866 _block_pos[reg] = block_pos;
duke@435 4867 }
duke@435 4868 if (_use_pos[reg] > block_pos) {
duke@435 4869 _use_pos[reg] = block_pos;
duke@435 4870 }
duke@435 4871 }
duke@435 4872 }
duke@435 4873 inline void LinearScanWalker::set_block_pos(Interval* i, int block_pos) {
duke@435 4874 assert(i->assigned_reg() != any_reg, "interval has no register assigned");
duke@435 4875 if (block_pos != -1) {
duke@435 4876 set_block_pos(i->assigned_reg(), i, block_pos);
duke@435 4877 set_block_pos(i->assigned_regHi(), i, block_pos);
duke@435 4878 }
duke@435 4879 }
duke@435 4880
duke@435 4881
duke@435 4882 void LinearScanWalker::free_exclude_active_fixed() {
duke@435 4883 Interval* list = active_first(fixedKind);
duke@435 4884 while (list != Interval::end()) {
duke@435 4885 assert(list->assigned_reg() < LinearScan::nof_regs, "active interval must have a register assigned");
duke@435 4886 exclude_from_use(list);
duke@435 4887 list = list->next();
duke@435 4888 }
duke@435 4889 }
duke@435 4890
duke@435 4891 void LinearScanWalker::free_exclude_active_any() {
duke@435 4892 Interval* list = active_first(anyKind);
duke@435 4893 while (list != Interval::end()) {
duke@435 4894 exclude_from_use(list);
duke@435 4895 list = list->next();
duke@435 4896 }
duke@435 4897 }
duke@435 4898
duke@435 4899 void LinearScanWalker::free_collect_inactive_fixed(Interval* cur) {
duke@435 4900 Interval* list = inactive_first(fixedKind);
duke@435 4901 while (list != Interval::end()) {
duke@435 4902 if (cur->to() <= list->current_from()) {
duke@435 4903 assert(list->current_intersects_at(cur) == -1, "must not intersect");
duke@435 4904 set_use_pos(list, list->current_from(), true);
duke@435 4905 } else {
duke@435 4906 set_use_pos(list, list->current_intersects_at(cur), true);
duke@435 4907 }
duke@435 4908 list = list->next();
duke@435 4909 }
duke@435 4910 }
duke@435 4911
duke@435 4912 void LinearScanWalker::free_collect_inactive_any(Interval* cur) {
duke@435 4913 Interval* list = inactive_first(anyKind);
duke@435 4914 while (list != Interval::end()) {
duke@435 4915 set_use_pos(list, list->current_intersects_at(cur), true);
duke@435 4916 list = list->next();
duke@435 4917 }
duke@435 4918 }
duke@435 4919
duke@435 4920 void LinearScanWalker::free_collect_unhandled(IntervalKind kind, Interval* cur) {
duke@435 4921 Interval* list = unhandled_first(kind);
duke@435 4922 while (list != Interval::end()) {
duke@435 4923 set_use_pos(list, list->intersects_at(cur), true);
duke@435 4924 if (kind == fixedKind && cur->to() <= list->from()) {
duke@435 4925 set_use_pos(list, list->from(), true);
duke@435 4926 }
duke@435 4927 list = list->next();
duke@435 4928 }
duke@435 4929 }
duke@435 4930
duke@435 4931 void LinearScanWalker::spill_exclude_active_fixed() {
duke@435 4932 Interval* list = active_first(fixedKind);
duke@435 4933 while (list != Interval::end()) {
duke@435 4934 exclude_from_use(list);
duke@435 4935 list = list->next();
duke@435 4936 }
duke@435 4937 }
duke@435 4938
duke@435 4939 void LinearScanWalker::spill_block_unhandled_fixed(Interval* cur) {
duke@435 4940 Interval* list = unhandled_first(fixedKind);
duke@435 4941 while (list != Interval::end()) {
duke@435 4942 set_block_pos(list, list->intersects_at(cur));
duke@435 4943 list = list->next();
duke@435 4944 }
duke@435 4945 }
duke@435 4946
duke@435 4947 void LinearScanWalker::spill_block_inactive_fixed(Interval* cur) {
duke@435 4948 Interval* list = inactive_first(fixedKind);
duke@435 4949 while (list != Interval::end()) {
duke@435 4950 if (cur->to() > list->current_from()) {
duke@435 4951 set_block_pos(list, list->current_intersects_at(cur));
duke@435 4952 } else {
duke@435 4953 assert(list->current_intersects_at(cur) == -1, "invalid optimization: intervals intersect");
duke@435 4954 }
duke@435 4955
duke@435 4956 list = list->next();
duke@435 4957 }
duke@435 4958 }
duke@435 4959
duke@435 4960 void LinearScanWalker::spill_collect_active_any() {
duke@435 4961 Interval* list = active_first(anyKind);
duke@435 4962 while (list != Interval::end()) {
duke@435 4963 set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false);
duke@435 4964 list = list->next();
duke@435 4965 }
duke@435 4966 }
duke@435 4967
duke@435 4968 void LinearScanWalker::spill_collect_inactive_any(Interval* cur) {
duke@435 4969 Interval* list = inactive_first(anyKind);
duke@435 4970 while (list != Interval::end()) {
duke@435 4971 if (list->current_intersects(cur)) {
duke@435 4972 set_use_pos(list, MIN2(list->next_usage(loopEndMarker, _current_position), list->to()), false);
duke@435 4973 }
duke@435 4974 list = list->next();
duke@435 4975 }
duke@435 4976 }
duke@435 4977
duke@435 4978
duke@435 4979 void LinearScanWalker::insert_move(int op_id, Interval* src_it, Interval* dst_it) {
duke@435 4980 // output all moves here. When source and target are equal, the move is
duke@435 4981 // optimized away later in assign_reg_nums
duke@435 4982
duke@435 4983 op_id = (op_id + 1) & ~1;
duke@435 4984 BlockBegin* op_block = allocator()->block_of_op_with_id(op_id);
duke@435 4985 assert(op_id > 0 && allocator()->block_of_op_with_id(op_id - 2) == op_block, "cannot insert move at block boundary");
duke@435 4986
duke@435 4987 // calculate index of instruction inside instruction list of current block
duke@435 4988 // the minimal index (for a block with no spill moves) can be calculated because the
duke@435 4989 // numbering of instructions is known.
duke@435 4990 // When the block already contains spill moves, the index must be increased until the
duke@435 4991 // correct index is reached.
duke@435 4992 LIR_OpList* list = op_block->lir()->instructions_list();
duke@435 4993 int index = (op_id - list->at(0)->id()) / 2;
duke@435 4994 assert(list->at(index)->id() <= op_id, "error in calculation");
duke@435 4995
duke@435 4996 while (list->at(index)->id() != op_id) {
duke@435 4997 index++;
duke@435 4998 assert(0 <= index && index < list->length(), "index out of bounds");
duke@435 4999 }
duke@435 5000 assert(1 <= index && index < list->length(), "index out of bounds");
duke@435 5001 assert(list->at(index)->id() == op_id, "error in calculation");
duke@435 5002
duke@435 5003 // insert new instruction before instruction at position index
duke@435 5004 _move_resolver.move_insert_position(op_block->lir(), index - 1);
duke@435 5005 _move_resolver.add_mapping(src_it, dst_it);
duke@435 5006 }
duke@435 5007
duke@435 5008
duke@435 5009 int LinearScanWalker::find_optimal_split_pos(BlockBegin* min_block, BlockBegin* max_block, int max_split_pos) {
duke@435 5010 int from_block_nr = min_block->linear_scan_number();
duke@435 5011 int to_block_nr = max_block->linear_scan_number();
duke@435 5012
duke@435 5013 assert(0 <= from_block_nr && from_block_nr < block_count(), "out of range");
duke@435 5014 assert(0 <= to_block_nr && to_block_nr < block_count(), "out of range");
duke@435 5015 assert(from_block_nr < to_block_nr, "must cross block boundary");
duke@435 5016
duke@435 5017 // Try to split at end of max_block. If this would be after
duke@435 5018 // max_split_pos, then use the begin of max_block
duke@435 5019 int optimal_split_pos = max_block->last_lir_instruction_id() + 2;
duke@435 5020 if (optimal_split_pos > max_split_pos) {
duke@435 5021 optimal_split_pos = max_block->first_lir_instruction_id();
duke@435 5022 }
duke@435 5023
duke@435 5024 int min_loop_depth = max_block->loop_depth();
duke@435 5025 for (int i = to_block_nr - 1; i >= from_block_nr; i--) {
duke@435 5026 BlockBegin* cur = block_at(i);
duke@435 5027
duke@435 5028 if (cur->loop_depth() < min_loop_depth) {
duke@435 5029 // block with lower loop-depth found -> split at the end of this block
duke@435 5030 min_loop_depth = cur->loop_depth();
duke@435 5031 optimal_split_pos = cur->last_lir_instruction_id() + 2;
duke@435 5032 }
duke@435 5033 }
duke@435 5034 assert(optimal_split_pos > allocator()->max_lir_op_id() || allocator()->is_block_begin(optimal_split_pos), "algorithm must move split pos to block boundary");
duke@435 5035
duke@435 5036 return optimal_split_pos;
duke@435 5037 }
duke@435 5038
duke@435 5039
duke@435 5040 int LinearScanWalker::find_optimal_split_pos(Interval* it, int min_split_pos, int max_split_pos, bool do_loop_optimization) {
duke@435 5041 int optimal_split_pos = -1;
duke@435 5042 if (min_split_pos == max_split_pos) {
duke@435 5043 // trivial case, no optimization of split position possible
duke@435 5044 TRACE_LINEAR_SCAN(4, tty->print_cr(" min-pos and max-pos are equal, no optimization possible"));
duke@435 5045 optimal_split_pos = min_split_pos;
duke@435 5046
duke@435 5047 } else {
duke@435 5048 assert(min_split_pos < max_split_pos, "must be true then");
duke@435 5049 assert(min_split_pos > 0, "cannot access min_split_pos - 1 otherwise");
duke@435 5050
duke@435 5051 // reason for using min_split_pos - 1: when the minimal split pos is exactly at the
duke@435 5052 // beginning of a block, then min_split_pos is also a possible split position.
duke@435 5053 // Use the block before as min_block, because then min_block->last_lir_instruction_id() + 2 == min_split_pos
duke@435 5054 BlockBegin* min_block = allocator()->block_of_op_with_id(min_split_pos - 1);
duke@435 5055
duke@435 5056 // reason for using max_split_pos - 1: otherwise there would be an assertion failure
duke@435 5057 // when an interval ends at the end of the last block of the method
duke@435 5058 // (in this case, max_split_pos == allocator()->max_lir_op_id() + 2, and there is no
duke@435 5059 // block at this op_id)
duke@435 5060 BlockBegin* max_block = allocator()->block_of_op_with_id(max_split_pos - 1);
duke@435 5061
duke@435 5062 assert(min_block->linear_scan_number() <= max_block->linear_scan_number(), "invalid order");
duke@435 5063 if (min_block == max_block) {
duke@435 5064 // split position cannot be moved to block boundary, so split as late as possible
duke@435 5065 TRACE_LINEAR_SCAN(4, tty->print_cr(" cannot move split pos to block boundary because min_pos and max_pos are in same block"));
duke@435 5066 optimal_split_pos = max_split_pos;
duke@435 5067
duke@435 5068 } else if (it->has_hole_between(max_split_pos - 1, max_split_pos) && !allocator()->is_block_begin(max_split_pos)) {
duke@435 5069 // Do not move split position if the interval has a hole before max_split_pos.
duke@435 5070 // Intervals resulting from Phi-Functions have more than one definition (marked
duke@435 5071 // as mustHaveRegister) with a hole before each definition. When the register is needed
duke@435 5072 // for the second definition, an earlier reloading is unnecessary.
duke@435 5073 TRACE_LINEAR_SCAN(4, tty->print_cr(" interval has hole just before max_split_pos, so splitting at max_split_pos"));
duke@435 5074 optimal_split_pos = max_split_pos;
duke@435 5075
duke@435 5076 } else {
duke@435 5077 // seach optimal block boundary between min_split_pos and max_split_pos
duke@435 5078 TRACE_LINEAR_SCAN(4, tty->print_cr(" moving split pos to optimal block boundary between block B%d and B%d", min_block->block_id(), max_block->block_id()));
duke@435 5079
duke@435 5080 if (do_loop_optimization) {
duke@435 5081 // Loop optimization: if a loop-end marker is found between min- and max-position,
duke@435 5082 // then split before this loop
duke@435 5083 int loop_end_pos = it->next_usage_exact(loopEndMarker, min_block->last_lir_instruction_id() + 2);
duke@435 5084 TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization: loop end found at pos %d", loop_end_pos));
duke@435 5085
duke@435 5086 assert(loop_end_pos > min_split_pos, "invalid order");
duke@435 5087 if (loop_end_pos < max_split_pos) {
duke@435 5088 // loop-end marker found between min- and max-position
duke@435 5089 // if it is not the end marker for the same loop as the min-position, then move
duke@435 5090 // the max-position to this loop block.
duke@435 5091 // Desired result: uses tagged as shouldHaveRegister inside a loop cause a reloading
duke@435 5092 // of the interval (normally, only mustHaveRegister causes a reloading)
duke@435 5093 BlockBegin* loop_block = allocator()->block_of_op_with_id(loop_end_pos);
duke@435 5094
duke@435 5095 TRACE_LINEAR_SCAN(4, tty->print_cr(" interval is used in loop that ends in block B%d, so trying to move max_block back from B%d to B%d", loop_block->block_id(), max_block->block_id(), loop_block->block_id()));
duke@435 5096 assert(loop_block != min_block, "loop_block and min_block must be different because block boundary is needed between");
duke@435 5097
duke@435 5098 optimal_split_pos = find_optimal_split_pos(min_block, loop_block, loop_block->last_lir_instruction_id() + 2);
duke@435 5099 if (optimal_split_pos == loop_block->last_lir_instruction_id() + 2) {
duke@435 5100 optimal_split_pos = -1;
duke@435 5101 TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization not necessary"));
duke@435 5102 } else {
duke@435 5103 TRACE_LINEAR_SCAN(4, tty->print_cr(" loop optimization successful"));
duke@435 5104 }
duke@435 5105 }
duke@435 5106 }
duke@435 5107
duke@435 5108 if (optimal_split_pos == -1) {
duke@435 5109 // not calculated by loop optimization
duke@435 5110 optimal_split_pos = find_optimal_split_pos(min_block, max_block, max_split_pos);
duke@435 5111 }
duke@435 5112 }
duke@435 5113 }
duke@435 5114 TRACE_LINEAR_SCAN(4, tty->print_cr(" optimal split position: %d", optimal_split_pos));
duke@435 5115
duke@435 5116 return optimal_split_pos;
duke@435 5117 }
duke@435 5118
duke@435 5119
duke@435 5120 /*
duke@435 5121 split an interval at the optimal position between min_split_pos and
duke@435 5122 max_split_pos in two parts:
duke@435 5123 1) the left part has already a location assigned
duke@435 5124 2) the right part is sorted into to the unhandled-list
duke@435 5125 */
duke@435 5126 void LinearScanWalker::split_before_usage(Interval* it, int min_split_pos, int max_split_pos) {
duke@435 5127 TRACE_LINEAR_SCAN(2, tty->print ("----- splitting interval: "); it->print());
duke@435 5128 TRACE_LINEAR_SCAN(2, tty->print_cr(" between %d and %d", min_split_pos, max_split_pos));
duke@435 5129
duke@435 5130 assert(it->from() < min_split_pos, "cannot split at start of interval");
duke@435 5131 assert(current_position() < min_split_pos, "cannot split before current position");
duke@435 5132 assert(min_split_pos <= max_split_pos, "invalid order");
duke@435 5133 assert(max_split_pos <= it->to(), "cannot split after end of interval");
duke@435 5134
duke@435 5135 int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, true);
duke@435 5136
duke@435 5137 assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range");
duke@435 5138 assert(optimal_split_pos <= it->to(), "cannot split after end of interval");
duke@435 5139 assert(optimal_split_pos > it->from(), "cannot split at start of interval");
duke@435 5140
duke@435 5141 if (optimal_split_pos == it->to() && it->next_usage(mustHaveRegister, min_split_pos) == max_jint) {
duke@435 5142 // the split position would be just before the end of the interval
duke@435 5143 // -> no split at all necessary
duke@435 5144 TRACE_LINEAR_SCAN(4, tty->print_cr(" no split necessary because optimal split position is at end of interval"));
duke@435 5145 return;
duke@435 5146 }
duke@435 5147
duke@435 5148 // must calculate this before the actual split is performed and before split position is moved to odd op_id
duke@435 5149 bool move_necessary = !allocator()->is_block_begin(optimal_split_pos) && !it->has_hole_between(optimal_split_pos - 1, optimal_split_pos);
duke@435 5150
duke@435 5151 if (!allocator()->is_block_begin(optimal_split_pos)) {
duke@435 5152 // move position before actual instruction (odd op_id)
duke@435 5153 optimal_split_pos = (optimal_split_pos - 1) | 1;
duke@435 5154 }
duke@435 5155
duke@435 5156 TRACE_LINEAR_SCAN(4, tty->print_cr(" splitting at position %d", optimal_split_pos));
duke@435 5157 assert(allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary");
duke@435 5158 assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary");
duke@435 5159
duke@435 5160 Interval* split_part = it->split(optimal_split_pos);
duke@435 5161
duke@435 5162 allocator()->append_interval(split_part);
duke@435 5163 allocator()->copy_register_flags(it, split_part);
duke@435 5164 split_part->set_insert_move_when_activated(move_necessary);
duke@435 5165 append_to_unhandled(unhandled_first_addr(anyKind), split_part);
duke@435 5166
duke@435 5167 TRACE_LINEAR_SCAN(2, tty->print_cr(" split interval in two parts (insert_move_when_activated: %d)", move_necessary));
duke@435 5168 TRACE_LINEAR_SCAN(2, tty->print (" "); it->print());
duke@435 5169 TRACE_LINEAR_SCAN(2, tty->print (" "); split_part->print());
duke@435 5170 }
duke@435 5171
duke@435 5172 /*
duke@435 5173 split an interval at the optimal position between min_split_pos and
duke@435 5174 max_split_pos in two parts:
duke@435 5175 1) the left part has already a location assigned
duke@435 5176 2) the right part is always on the stack and therefore ignored in further processing
duke@435 5177 */
duke@435 5178 void LinearScanWalker::split_for_spilling(Interval* it) {
duke@435 5179 // calculate allowed range of splitting position
duke@435 5180 int max_split_pos = current_position();
duke@435 5181 int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, max_split_pos) + 1, it->from());
duke@435 5182
duke@435 5183 TRACE_LINEAR_SCAN(2, tty->print ("----- splitting and spilling interval: "); it->print());
duke@435 5184 TRACE_LINEAR_SCAN(2, tty->print_cr(" between %d and %d", min_split_pos, max_split_pos));
duke@435 5185
duke@435 5186 assert(it->state() == activeState, "why spill interval that is not active?");
duke@435 5187 assert(it->from() <= min_split_pos, "cannot split before start of interval");
duke@435 5188 assert(min_split_pos <= max_split_pos, "invalid order");
duke@435 5189 assert(max_split_pos < it->to(), "cannot split at end end of interval");
duke@435 5190 assert(current_position() < it->to(), "interval must not end before current position");
duke@435 5191
duke@435 5192 if (min_split_pos == it->from()) {
duke@435 5193 // the whole interval is never used, so spill it entirely to memory
duke@435 5194 TRACE_LINEAR_SCAN(2, tty->print_cr(" spilling entire interval because split pos is at beginning of interval"));
duke@435 5195 assert(it->first_usage(shouldHaveRegister) > current_position(), "interval must not have use position before current_position");
duke@435 5196
duke@435 5197 allocator()->assign_spill_slot(it);
duke@435 5198 allocator()->change_spill_state(it, min_split_pos);
duke@435 5199
duke@435 5200 // Also kick parent intervals out of register to memory when they have no use
duke@435 5201 // position. This avoids short interval in register surrounded by intervals in
duke@435 5202 // memory -> avoid useless moves from memory to register and back
duke@435 5203 Interval* parent = it;
duke@435 5204 while (parent != NULL && parent->is_split_child()) {
duke@435 5205 parent = parent->split_child_before_op_id(parent->from());
duke@435 5206
duke@435 5207 if (parent->assigned_reg() < LinearScan::nof_regs) {
duke@435 5208 if (parent->first_usage(shouldHaveRegister) == max_jint) {
duke@435 5209 // parent is never used, so kick it out of its assigned register
duke@435 5210 TRACE_LINEAR_SCAN(4, tty->print_cr(" kicking out interval %d out of its register because it is never used", parent->reg_num()));
duke@435 5211 allocator()->assign_spill_slot(parent);
duke@435 5212 } else {
duke@435 5213 // do not go further back because the register is actually used by the interval
duke@435 5214 parent = NULL;
duke@435 5215 }
duke@435 5216 }
duke@435 5217 }
duke@435 5218
duke@435 5219 } else {
duke@435 5220 // search optimal split pos, split interval and spill only the right hand part
duke@435 5221 int optimal_split_pos = find_optimal_split_pos(it, min_split_pos, max_split_pos, false);
duke@435 5222
duke@435 5223 assert(min_split_pos <= optimal_split_pos && optimal_split_pos <= max_split_pos, "out of range");
duke@435 5224 assert(optimal_split_pos < it->to(), "cannot split at end of interval");
duke@435 5225 assert(optimal_split_pos >= it->from(), "cannot split before start of interval");
duke@435 5226
duke@435 5227 if (!allocator()->is_block_begin(optimal_split_pos)) {
duke@435 5228 // move position before actual instruction (odd op_id)
duke@435 5229 optimal_split_pos = (optimal_split_pos - 1) | 1;
duke@435 5230 }
duke@435 5231
duke@435 5232 TRACE_LINEAR_SCAN(4, tty->print_cr(" splitting at position %d", optimal_split_pos));
duke@435 5233 assert(allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 1), "split pos must be odd when not on block boundary");
duke@435 5234 assert(!allocator()->is_block_begin(optimal_split_pos) || (optimal_split_pos % 2 == 0), "split pos must be even on block boundary");
duke@435 5235
duke@435 5236 Interval* spilled_part = it->split(optimal_split_pos);
duke@435 5237 allocator()->append_interval(spilled_part);
duke@435 5238 allocator()->assign_spill_slot(spilled_part);
duke@435 5239 allocator()->change_spill_state(spilled_part, optimal_split_pos);
duke@435 5240
duke@435 5241 if (!allocator()->is_block_begin(optimal_split_pos)) {
duke@435 5242 TRACE_LINEAR_SCAN(4, tty->print_cr(" inserting move from interval %d to %d", it->reg_num(), spilled_part->reg_num()));
duke@435 5243 insert_move(optimal_split_pos, it, spilled_part);
duke@435 5244 }
duke@435 5245
duke@435 5246 // the current_split_child is needed later when moves are inserted for reloading
duke@435 5247 assert(spilled_part->current_split_child() == it, "overwriting wrong current_split_child");
duke@435 5248 spilled_part->make_current_split_child();
duke@435 5249
duke@435 5250 TRACE_LINEAR_SCAN(2, tty->print_cr(" split interval in two parts"));
duke@435 5251 TRACE_LINEAR_SCAN(2, tty->print (" "); it->print());
duke@435 5252 TRACE_LINEAR_SCAN(2, tty->print (" "); spilled_part->print());
duke@435 5253 }
duke@435 5254 }
duke@435 5255
duke@435 5256
duke@435 5257 void LinearScanWalker::split_stack_interval(Interval* it) {
duke@435 5258 int min_split_pos = current_position() + 1;
duke@435 5259 int max_split_pos = MIN2(it->first_usage(shouldHaveRegister), it->to());
duke@435 5260
duke@435 5261 split_before_usage(it, min_split_pos, max_split_pos);
duke@435 5262 }
duke@435 5263
duke@435 5264 void LinearScanWalker::split_when_partial_register_available(Interval* it, int register_available_until) {
duke@435 5265 int min_split_pos = MAX2(it->previous_usage(shouldHaveRegister, register_available_until), it->from() + 1);
duke@435 5266 int max_split_pos = register_available_until;
duke@435 5267
duke@435 5268 split_before_usage(it, min_split_pos, max_split_pos);
duke@435 5269 }
duke@435 5270
duke@435 5271 void LinearScanWalker::split_and_spill_interval(Interval* it) {
duke@435 5272 assert(it->state() == activeState || it->state() == inactiveState, "other states not allowed");
duke@435 5273
duke@435 5274 int current_pos = current_position();
duke@435 5275 if (it->state() == inactiveState) {
duke@435 5276 // the interval is currently inactive, so no spill slot is needed for now.
duke@435 5277 // when the split part is activated, the interval has a new chance to get a register,
duke@435 5278 // so in the best case no stack slot is necessary
duke@435 5279 assert(it->has_hole_between(current_pos - 1, current_pos + 1), "interval can not be inactive otherwise");
duke@435 5280 split_before_usage(it, current_pos + 1, current_pos + 1);
duke@435 5281
duke@435 5282 } else {
duke@435 5283 // search the position where the interval must have a register and split
duke@435 5284 // at the optimal position before.
duke@435 5285 // The new created part is added to the unhandled list and will get a register
duke@435 5286 // when it is activated
duke@435 5287 int min_split_pos = current_pos + 1;
duke@435 5288 int max_split_pos = MIN2(it->next_usage(mustHaveRegister, min_split_pos), it->to());
duke@435 5289
duke@435 5290 split_before_usage(it, min_split_pos, max_split_pos);
duke@435 5291
duke@435 5292 assert(it->next_usage(mustHaveRegister, current_pos) == max_jint, "the remaining part is spilled to stack and therefore has no register");
duke@435 5293 split_for_spilling(it);
duke@435 5294 }
duke@435 5295 }
duke@435 5296
duke@435 5297
duke@435 5298 int LinearScanWalker::find_free_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) {
duke@435 5299 int min_full_reg = any_reg;
duke@435 5300 int max_partial_reg = any_reg;
duke@435 5301
duke@435 5302 for (int i = _first_reg; i <= _last_reg; i++) {
duke@435 5303 if (i == ignore_reg) {
duke@435 5304 // this register must be ignored
duke@435 5305
duke@435 5306 } else if (_use_pos[i] >= interval_to) {
duke@435 5307 // this register is free for the full interval
duke@435 5308 if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) {
duke@435 5309 min_full_reg = i;
duke@435 5310 }
duke@435 5311 } else if (_use_pos[i] > reg_needed_until) {
duke@435 5312 // this register is at least free until reg_needed_until
duke@435 5313 if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) {
duke@435 5314 max_partial_reg = i;
duke@435 5315 }
duke@435 5316 }
duke@435 5317 }
duke@435 5318
duke@435 5319 if (min_full_reg != any_reg) {
duke@435 5320 return min_full_reg;
duke@435 5321 } else if (max_partial_reg != any_reg) {
duke@435 5322 *need_split = true;
duke@435 5323 return max_partial_reg;
duke@435 5324 } else {
duke@435 5325 return any_reg;
duke@435 5326 }
duke@435 5327 }
duke@435 5328
duke@435 5329 int LinearScanWalker::find_free_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split) {
duke@435 5330 assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm");
duke@435 5331
duke@435 5332 int min_full_reg = any_reg;
duke@435 5333 int max_partial_reg = any_reg;
duke@435 5334
duke@435 5335 for (int i = _first_reg; i < _last_reg; i+=2) {
duke@435 5336 if (_use_pos[i] >= interval_to && _use_pos[i + 1] >= interval_to) {
duke@435 5337 // this register is free for the full interval
duke@435 5338 if (min_full_reg == any_reg || i == hint_reg || (_use_pos[i] < _use_pos[min_full_reg] && min_full_reg != hint_reg)) {
duke@435 5339 min_full_reg = i;
duke@435 5340 }
duke@435 5341 } else if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) {
duke@435 5342 // this register is at least free until reg_needed_until
duke@435 5343 if (max_partial_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_partial_reg] && max_partial_reg != hint_reg)) {
duke@435 5344 max_partial_reg = i;
duke@435 5345 }
duke@435 5346 }
duke@435 5347 }
duke@435 5348
duke@435 5349 if (min_full_reg != any_reg) {
duke@435 5350 return min_full_reg;
duke@435 5351 } else if (max_partial_reg != any_reg) {
duke@435 5352 *need_split = true;
duke@435 5353 return max_partial_reg;
duke@435 5354 } else {
duke@435 5355 return any_reg;
duke@435 5356 }
duke@435 5357 }
duke@435 5358
duke@435 5359
duke@435 5360 bool LinearScanWalker::alloc_free_reg(Interval* cur) {
duke@435 5361 TRACE_LINEAR_SCAN(2, tty->print("trying to find free register for "); cur->print());
duke@435 5362
duke@435 5363 init_use_lists(true);
duke@435 5364 free_exclude_active_fixed();
duke@435 5365 free_exclude_active_any();
duke@435 5366 free_collect_inactive_fixed(cur);
duke@435 5367 free_collect_inactive_any(cur);
duke@435 5368 // free_collect_unhandled(fixedKind, cur);
duke@435 5369 assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
duke@435 5370
duke@435 5371 // _use_pos contains the start of the next interval that has this register assigned
duke@435 5372 // (either as a fixed register or a normal allocated register in the past)
duke@435 5373 // only intervals overlapping with cur are processed, non-overlapping invervals can be ignored safely
duke@435 5374 TRACE_LINEAR_SCAN(4, tty->print_cr(" state of registers:"));
duke@435 5375 TRACE_LINEAR_SCAN(4, for (int i = _first_reg; i <= _last_reg; i++) tty->print_cr(" reg %d: use_pos: %d", i, _use_pos[i]));
duke@435 5376
duke@435 5377 int hint_reg, hint_regHi;
duke@435 5378 Interval* register_hint = cur->register_hint();
duke@435 5379 if (register_hint != NULL) {
duke@435 5380 hint_reg = register_hint->assigned_reg();
duke@435 5381 hint_regHi = register_hint->assigned_regHi();
duke@435 5382
duke@435 5383 if (allocator()->is_precolored_cpu_interval(register_hint)) {
duke@435 5384 assert(hint_reg != any_reg && hint_regHi == any_reg, "must be for fixed intervals");
duke@435 5385 hint_regHi = hint_reg + 1; // connect e.g. eax-edx
duke@435 5386 }
duke@435 5387 TRACE_LINEAR_SCAN(4, tty->print(" hint registers %d, %d from interval ", hint_reg, hint_regHi); register_hint->print());
duke@435 5388
duke@435 5389 } else {
duke@435 5390 hint_reg = any_reg;
duke@435 5391 hint_regHi = any_reg;
duke@435 5392 }
duke@435 5393 assert(hint_reg == any_reg || hint_reg != hint_regHi, "hint reg and regHi equal");
duke@435 5394 assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned to interval");
duke@435 5395
duke@435 5396 // the register must be free at least until this position
duke@435 5397 int reg_needed_until = cur->from() + 1;
duke@435 5398 int interval_to = cur->to();
duke@435 5399
duke@435 5400 bool need_split = false;
duke@435 5401 int split_pos = -1;
duke@435 5402 int reg = any_reg;
duke@435 5403 int regHi = any_reg;
duke@435 5404
duke@435 5405 if (_adjacent_regs) {
duke@435 5406 reg = find_free_double_reg(reg_needed_until, interval_to, hint_reg, &need_split);
duke@435 5407 regHi = reg + 1;
duke@435 5408 if (reg == any_reg) {
duke@435 5409 return false;
duke@435 5410 }
duke@435 5411 split_pos = MIN2(_use_pos[reg], _use_pos[regHi]);
duke@435 5412
duke@435 5413 } else {
duke@435 5414 reg = find_free_reg(reg_needed_until, interval_to, hint_reg, any_reg, &need_split);
duke@435 5415 if (reg == any_reg) {
duke@435 5416 return false;
duke@435 5417 }
duke@435 5418 split_pos = _use_pos[reg];
duke@435 5419
duke@435 5420 if (_num_phys_regs == 2) {
duke@435 5421 regHi = find_free_reg(reg_needed_until, interval_to, hint_regHi, reg, &need_split);
duke@435 5422
duke@435 5423 if (_use_pos[reg] < interval_to && regHi == any_reg) {
duke@435 5424 // do not split interval if only one register can be assigned until the split pos
duke@435 5425 // (when one register is found for the whole interval, split&spill is only
duke@435 5426 // performed for the hi register)
duke@435 5427 return false;
duke@435 5428
duke@435 5429 } else if (regHi != any_reg) {
duke@435 5430 split_pos = MIN2(split_pos, _use_pos[regHi]);
duke@435 5431
duke@435 5432 // sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax
duke@435 5433 if (reg > regHi) {
duke@435 5434 int temp = reg;
duke@435 5435 reg = regHi;
duke@435 5436 regHi = temp;
duke@435 5437 }
duke@435 5438 }
duke@435 5439 }
duke@435 5440 }
duke@435 5441
duke@435 5442 cur->assign_reg(reg, regHi);
duke@435 5443 TRACE_LINEAR_SCAN(2, tty->print_cr("selected register %d, %d", reg, regHi));
duke@435 5444
duke@435 5445 assert(split_pos > 0, "invalid split_pos");
duke@435 5446 if (need_split) {
duke@435 5447 // register not available for full interval, so split it
duke@435 5448 split_when_partial_register_available(cur, split_pos);
duke@435 5449 }
duke@435 5450
duke@435 5451 // only return true if interval is completely assigned
duke@435 5452 return _num_phys_regs == 1 || regHi != any_reg;
duke@435 5453 }
duke@435 5454
duke@435 5455
duke@435 5456 int LinearScanWalker::find_locked_reg(int reg_needed_until, int interval_to, int hint_reg, int ignore_reg, bool* need_split) {
duke@435 5457 int max_reg = any_reg;
duke@435 5458
duke@435 5459 for (int i = _first_reg; i <= _last_reg; i++) {
duke@435 5460 if (i == ignore_reg) {
duke@435 5461 // this register must be ignored
duke@435 5462
duke@435 5463 } else if (_use_pos[i] > reg_needed_until) {
duke@435 5464 if (max_reg == any_reg || i == hint_reg || (_use_pos[i] > _use_pos[max_reg] && max_reg != hint_reg)) {
duke@435 5465 max_reg = i;
duke@435 5466 }
duke@435 5467 }
duke@435 5468 }
duke@435 5469
duke@435 5470 if (max_reg != any_reg && _block_pos[max_reg] <= interval_to) {
duke@435 5471 *need_split = true;
duke@435 5472 }
duke@435 5473
duke@435 5474 return max_reg;
duke@435 5475 }
duke@435 5476
duke@435 5477 int LinearScanWalker::find_locked_double_reg(int reg_needed_until, int interval_to, int hint_reg, bool* need_split) {
duke@435 5478 assert((_last_reg - _first_reg + 1) % 2 == 0, "adjust algorithm");
duke@435 5479
duke@435 5480 int max_reg = any_reg;
duke@435 5481
duke@435 5482 for (int i = _first_reg; i < _last_reg; i+=2) {
duke@435 5483 if (_use_pos[i] > reg_needed_until && _use_pos[i + 1] > reg_needed_until) {
duke@435 5484 if (max_reg == any_reg || _use_pos[i] > _use_pos[max_reg]) {
duke@435 5485 max_reg = i;
duke@435 5486 }
duke@435 5487 }
duke@435 5488 }
duke@435 5489
duke@435 5490 if (_block_pos[max_reg] <= interval_to || _block_pos[max_reg + 1] <= interval_to) {
duke@435 5491 *need_split = true;
duke@435 5492 }
duke@435 5493
duke@435 5494 return max_reg;
duke@435 5495 }
duke@435 5496
duke@435 5497 void LinearScanWalker::split_and_spill_intersecting_intervals(int reg, int regHi) {
duke@435 5498 assert(reg != any_reg, "no register assigned");
duke@435 5499
duke@435 5500 for (int i = 0; i < _spill_intervals[reg]->length(); i++) {
duke@435 5501 Interval* it = _spill_intervals[reg]->at(i);
duke@435 5502 remove_from_list(it);
duke@435 5503 split_and_spill_interval(it);
duke@435 5504 }
duke@435 5505
duke@435 5506 if (regHi != any_reg) {
duke@435 5507 IntervalList* processed = _spill_intervals[reg];
duke@435 5508 for (int i = 0; i < _spill_intervals[regHi]->length(); i++) {
duke@435 5509 Interval* it = _spill_intervals[regHi]->at(i);
duke@435 5510 if (processed->index_of(it) == -1) {
duke@435 5511 remove_from_list(it);
duke@435 5512 split_and_spill_interval(it);
duke@435 5513 }
duke@435 5514 }
duke@435 5515 }
duke@435 5516 }
duke@435 5517
duke@435 5518
duke@435 5519 // Split an Interval and spill it to memory so that cur can be placed in a register
duke@435 5520 void LinearScanWalker::alloc_locked_reg(Interval* cur) {
duke@435 5521 TRACE_LINEAR_SCAN(2, tty->print("need to split and spill to get register for "); cur->print());
duke@435 5522
duke@435 5523 // collect current usage of registers
duke@435 5524 init_use_lists(false);
duke@435 5525 spill_exclude_active_fixed();
duke@435 5526 // spill_block_unhandled_fixed(cur);
duke@435 5527 assert(unhandled_first(fixedKind) == Interval::end(), "must not have unhandled fixed intervals because all fixed intervals have a use at position 0");
duke@435 5528 spill_block_inactive_fixed(cur);
duke@435 5529 spill_collect_active_any();
duke@435 5530 spill_collect_inactive_any(cur);
duke@435 5531
duke@435 5532 #ifndef PRODUCT
duke@435 5533 if (TraceLinearScanLevel >= 4) {
duke@435 5534 tty->print_cr(" state of registers:");
duke@435 5535 for (int i = _first_reg; i <= _last_reg; i++) {
duke@435 5536 tty->print(" reg %d: use_pos: %d, block_pos: %d, intervals: ", i, _use_pos[i], _block_pos[i]);
duke@435 5537 for (int j = 0; j < _spill_intervals[i]->length(); j++) {
duke@435 5538 tty->print("%d ", _spill_intervals[i]->at(j)->reg_num());
duke@435 5539 }
duke@435 5540 tty->cr();
duke@435 5541 }
duke@435 5542 }
duke@435 5543 #endif
duke@435 5544
duke@435 5545 // the register must be free at least until this position
duke@435 5546 int reg_needed_until = MIN2(cur->first_usage(mustHaveRegister), cur->from() + 1);
duke@435 5547 int interval_to = cur->to();
duke@435 5548 assert (reg_needed_until > 0 && reg_needed_until < max_jint, "interval has no use");
duke@435 5549
duke@435 5550 int split_pos = 0;
duke@435 5551 int use_pos = 0;
duke@435 5552 bool need_split = false;
duke@435 5553 int reg, regHi;
duke@435 5554
duke@435 5555 if (_adjacent_regs) {
duke@435 5556 reg = find_locked_double_reg(reg_needed_until, interval_to, any_reg, &need_split);
duke@435 5557 regHi = reg + 1;
duke@435 5558
duke@435 5559 if (reg != any_reg) {
duke@435 5560 use_pos = MIN2(_use_pos[reg], _use_pos[regHi]);
duke@435 5561 split_pos = MIN2(_block_pos[reg], _block_pos[regHi]);
duke@435 5562 }
duke@435 5563 } else {
duke@435 5564 reg = find_locked_reg(reg_needed_until, interval_to, any_reg, cur->assigned_reg(), &need_split);
duke@435 5565 regHi = any_reg;
duke@435 5566
duke@435 5567 if (reg != any_reg) {
duke@435 5568 use_pos = _use_pos[reg];
duke@435 5569 split_pos = _block_pos[reg];
duke@435 5570
duke@435 5571 if (_num_phys_regs == 2) {
duke@435 5572 if (cur->assigned_reg() != any_reg) {
duke@435 5573 regHi = reg;
duke@435 5574 reg = cur->assigned_reg();
duke@435 5575 } else {
duke@435 5576 regHi = find_locked_reg(reg_needed_until, interval_to, any_reg, reg, &need_split);
duke@435 5577 if (regHi != any_reg) {
duke@435 5578 use_pos = MIN2(use_pos, _use_pos[regHi]);
duke@435 5579 split_pos = MIN2(split_pos, _block_pos[regHi]);
duke@435 5580 }
duke@435 5581 }
duke@435 5582
duke@435 5583 if (regHi != any_reg && reg > regHi) {
duke@435 5584 // sort register numbers to prevent e.g. a move from eax,ebx to ebx,eax
duke@435 5585 int temp = reg;
duke@435 5586 reg = regHi;
duke@435 5587 regHi = temp;
duke@435 5588 }
duke@435 5589 }
duke@435 5590 }
duke@435 5591 }
duke@435 5592
duke@435 5593 if (reg == any_reg || (_num_phys_regs == 2 && regHi == any_reg) || use_pos <= cur->first_usage(mustHaveRegister)) {
duke@435 5594 // the first use of cur is later than the spilling position -> spill cur
duke@435 5595 TRACE_LINEAR_SCAN(4, tty->print_cr("able to spill current interval. first_usage(register): %d, use_pos: %d", cur->first_usage(mustHaveRegister), use_pos));
duke@435 5596
duke@435 5597 if (cur->first_usage(mustHaveRegister) <= cur->from() + 1) {
duke@435 5598 assert(false, "cannot spill interval that is used in first instruction (possible reason: no register found)");
duke@435 5599 // assign a reasonable register and do a bailout in product mode to avoid errors
duke@435 5600 allocator()->assign_spill_slot(cur);
duke@435 5601 BAILOUT("LinearScan: no register found");
duke@435 5602 }
duke@435 5603
duke@435 5604 split_and_spill_interval(cur);
duke@435 5605 } else {
duke@435 5606 TRACE_LINEAR_SCAN(4, tty->print_cr("decided to use register %d, %d", reg, regHi));
duke@435 5607 assert(reg != any_reg && (_num_phys_regs == 1 || regHi != any_reg), "no register found");
duke@435 5608 assert(split_pos > 0, "invalid split_pos");
duke@435 5609 assert(need_split == false || split_pos > cur->from(), "splitting interval at from");
duke@435 5610
duke@435 5611 cur->assign_reg(reg, regHi);
duke@435 5612 if (need_split) {
duke@435 5613 // register not available for full interval, so split it
duke@435 5614 split_when_partial_register_available(cur, split_pos);
duke@435 5615 }
duke@435 5616
duke@435 5617 // perform splitting and spilling for all affected intervalls
duke@435 5618 split_and_spill_intersecting_intervals(reg, regHi);
duke@435 5619 }
duke@435 5620 }
duke@435 5621
duke@435 5622 bool LinearScanWalker::no_allocation_possible(Interval* cur) {
never@739 5623 #ifdef X86
duke@435 5624 // fast calculation of intervals that can never get a register because the
duke@435 5625 // the next instruction is a call that blocks all registers
duke@435 5626 // Note: this does not work if callee-saved registers are available (e.g. on Sparc)
duke@435 5627
duke@435 5628 // check if this interval is the result of a split operation
duke@435 5629 // (an interval got a register until this position)
duke@435 5630 int pos = cur->from();
duke@435 5631 if ((pos & 1) == 1) {
duke@435 5632 // the current instruction is a call that blocks all registers
duke@435 5633 if (pos < allocator()->max_lir_op_id() && allocator()->has_call(pos + 1)) {
duke@435 5634 TRACE_LINEAR_SCAN(4, tty->print_cr(" free register cannot be available because all registers blocked by following call"));
duke@435 5635
duke@435 5636 // safety check that there is really no register available
duke@435 5637 assert(alloc_free_reg(cur) == false, "found a register for this interval");
duke@435 5638 return true;
duke@435 5639 }
duke@435 5640
duke@435 5641 }
duke@435 5642 #endif
duke@435 5643 return false;
duke@435 5644 }
duke@435 5645
duke@435 5646 void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
duke@435 5647 BasicType type = cur->type();
duke@435 5648 _num_phys_regs = LinearScan::num_physical_regs(type);
duke@435 5649 _adjacent_regs = LinearScan::requires_adjacent_regs(type);
duke@435 5650
duke@435 5651 if (pd_init_regs_for_alloc(cur)) {
duke@435 5652 // the appropriate register range was selected.
duke@435 5653 } else if (type == T_FLOAT || type == T_DOUBLE) {
duke@435 5654 _first_reg = pd_first_fpu_reg;
duke@435 5655 _last_reg = pd_last_fpu_reg;
duke@435 5656 } else {
duke@435 5657 _first_reg = pd_first_cpu_reg;
iveresov@2344 5658 _last_reg = FrameMap::last_cpu_reg();
duke@435 5659 }
duke@435 5660
duke@435 5661 assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
duke@435 5662 assert(0 <= _last_reg && _last_reg < LinearScan::nof_regs, "out of range");
duke@435 5663 }
duke@435 5664
duke@435 5665
duke@435 5666 bool LinearScanWalker::is_move(LIR_Op* op, Interval* from, Interval* to) {
duke@435 5667 if (op->code() != lir_move) {
duke@435 5668 return false;
duke@435 5669 }
duke@435 5670 assert(op->as_Op1() != NULL, "move must be LIR_Op1");
duke@435 5671
duke@435 5672 LIR_Opr in = ((LIR_Op1*)op)->in_opr();
duke@435 5673 LIR_Opr res = ((LIR_Op1*)op)->result_opr();
duke@435 5674 return in->is_virtual() && res->is_virtual() && in->vreg_number() == from->reg_num() && res->vreg_number() == to->reg_num();
duke@435 5675 }
duke@435 5676
duke@435 5677 // optimization (especially for phi functions of nested loops):
duke@435 5678 // assign same spill slot to non-intersecting intervals
duke@435 5679 void LinearScanWalker::combine_spilled_intervals(Interval* cur) {
duke@435 5680 if (cur->is_split_child()) {
duke@435 5681 // optimization is only suitable for split parents
duke@435 5682 return;
duke@435 5683 }
duke@435 5684
duke@435 5685 Interval* register_hint = cur->register_hint(false);
duke@435 5686 if (register_hint == NULL) {
duke@435 5687 // cur is not the target of a move, otherwise register_hint would be set
duke@435 5688 return;
duke@435 5689 }
duke@435 5690 assert(register_hint->is_split_parent(), "register hint must be split parent");
duke@435 5691
duke@435 5692 if (cur->spill_state() != noOptimization || register_hint->spill_state() != noOptimization) {
duke@435 5693 // combining the stack slots for intervals where spill move optimization is applied
duke@435 5694 // is not benefitial and would cause problems
duke@435 5695 return;
duke@435 5696 }
duke@435 5697
duke@435 5698 int begin_pos = cur->from();
duke@435 5699 int end_pos = cur->to();
duke@435 5700 if (end_pos > allocator()->max_lir_op_id() || (begin_pos & 1) != 0 || (end_pos & 1) != 0) {
duke@435 5701 // safety check that lir_op_with_id is allowed
duke@435 5702 return;
duke@435 5703 }
duke@435 5704
duke@435 5705 if (!is_move(allocator()->lir_op_with_id(begin_pos), register_hint, cur) || !is_move(allocator()->lir_op_with_id(end_pos), cur, register_hint)) {
duke@435 5706 // cur and register_hint are not connected with two moves
duke@435 5707 return;
duke@435 5708 }
duke@435 5709
duke@435 5710 Interval* begin_hint = register_hint->split_child_at_op_id(begin_pos, LIR_OpVisitState::inputMode);
duke@435 5711 Interval* end_hint = register_hint->split_child_at_op_id(end_pos, LIR_OpVisitState::outputMode);
duke@435 5712 if (begin_hint == end_hint || begin_hint->to() != begin_pos || end_hint->from() != end_pos) {
duke@435 5713 // register_hint must be split, otherwise the re-writing of use positions does not work
duke@435 5714 return;
duke@435 5715 }
duke@435 5716
duke@435 5717 assert(begin_hint->assigned_reg() != any_reg, "must have register assigned");
duke@435 5718 assert(end_hint->assigned_reg() == any_reg, "must not have register assigned");
duke@435 5719 assert(cur->first_usage(mustHaveRegister) == begin_pos, "must have use position at begin of interval because of move");
duke@435 5720 assert(end_hint->first_usage(mustHaveRegister) == end_pos, "must have use position at begin of interval because of move");
duke@435 5721
duke@435 5722 if (begin_hint->assigned_reg() < LinearScan::nof_regs) {
duke@435 5723 // register_hint is not spilled at begin_pos, so it would not be benefitial to immediately spill cur
duke@435 5724 return;
duke@435 5725 }
duke@435 5726 assert(register_hint->canonical_spill_slot() != -1, "must be set when part of interval was spilled");
duke@435 5727
duke@435 5728 // modify intervals such that cur gets the same stack slot as register_hint
duke@435 5729 // delete use positions to prevent the intervals to get a register at beginning
duke@435 5730 cur->set_canonical_spill_slot(register_hint->canonical_spill_slot());
duke@435 5731 cur->remove_first_use_pos();
duke@435 5732 end_hint->remove_first_use_pos();
duke@435 5733 }
duke@435 5734
duke@435 5735
duke@435 5736 // allocate a physical register or memory location to an interval
duke@435 5737 bool LinearScanWalker::activate_current() {
duke@435 5738 Interval* cur = current();
duke@435 5739 bool result = true;
duke@435 5740
duke@435 5741 TRACE_LINEAR_SCAN(2, tty->print ("+++++ activating interval "); cur->print());
duke@435 5742 TRACE_LINEAR_SCAN(4, tty->print_cr(" split_parent: %d, insert_move_when_activated: %d", cur->split_parent()->reg_num(), cur->insert_move_when_activated()));
duke@435 5743
duke@435 5744 if (cur->assigned_reg() >= LinearScan::nof_regs) {
duke@435 5745 // activating an interval that has a stack slot assigned -> split it at first use position
duke@435 5746 // used for method parameters
duke@435 5747 TRACE_LINEAR_SCAN(4, tty->print_cr(" interval has spill slot assigned (method parameter) -> split it before first use"));
duke@435 5748
duke@435 5749 split_stack_interval(cur);
duke@435 5750 result = false;
duke@435 5751
duke@435 5752 } else if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::must_start_in_memory)) {
duke@435 5753 // activating an interval that must start in a stack slot, but may get a register later
duke@435 5754 // used for lir_roundfp: rounding is done by store to stack and reload later
duke@435 5755 TRACE_LINEAR_SCAN(4, tty->print_cr(" interval must start in stack slot -> split it before first use"));
duke@435 5756 assert(cur->assigned_reg() == any_reg && cur->assigned_regHi() == any_reg, "register already assigned");
duke@435 5757
duke@435 5758 allocator()->assign_spill_slot(cur);
duke@435 5759 split_stack_interval(cur);
duke@435 5760 result = false;
duke@435 5761
duke@435 5762 } else if (cur->assigned_reg() == any_reg) {
duke@435 5763 // interval has not assigned register -> normal allocation
duke@435 5764 // (this is the normal case for most intervals)
duke@435 5765 TRACE_LINEAR_SCAN(4, tty->print_cr(" normal allocation of register"));
duke@435 5766
duke@435 5767 // assign same spill slot to non-intersecting intervals
duke@435 5768 combine_spilled_intervals(cur);
duke@435 5769
duke@435 5770 init_vars_for_alloc(cur);
duke@435 5771 if (no_allocation_possible(cur) || !alloc_free_reg(cur)) {
duke@435 5772 // no empty register available.
duke@435 5773 // split and spill another interval so that this interval gets a register
duke@435 5774 alloc_locked_reg(cur);
duke@435 5775 }
duke@435 5776
duke@435 5777 // spilled intervals need not be move to active-list
duke@435 5778 if (cur->assigned_reg() >= LinearScan::nof_regs) {
duke@435 5779 result = false;
duke@435 5780 }
duke@435 5781 }
duke@435 5782
duke@435 5783 // load spilled values that become active from stack slot to register
duke@435 5784 if (cur->insert_move_when_activated()) {
duke@435 5785 assert(cur->is_split_child(), "must be");
duke@435 5786 assert(cur->current_split_child() != NULL, "must be");
duke@435 5787 assert(cur->current_split_child()->reg_num() != cur->reg_num(), "cannot insert move between same interval");
duke@435 5788 TRACE_LINEAR_SCAN(4, tty->print_cr("Inserting move from interval %d to %d because insert_move_when_activated is set", cur->current_split_child()->reg_num(), cur->reg_num()));
duke@435 5789
duke@435 5790 insert_move(cur->from(), cur->current_split_child(), cur);
duke@435 5791 }
duke@435 5792 cur->make_current_split_child();
duke@435 5793
duke@435 5794 return result; // true = interval is moved to active list
duke@435 5795 }
duke@435 5796
duke@435 5797
duke@435 5798 // Implementation of EdgeMoveOptimizer
duke@435 5799
duke@435 5800 EdgeMoveOptimizer::EdgeMoveOptimizer() :
duke@435 5801 _edge_instructions(4),
duke@435 5802 _edge_instructions_idx(4)
duke@435 5803 {
duke@435 5804 }
duke@435 5805
duke@435 5806 void EdgeMoveOptimizer::optimize(BlockList* code) {
duke@435 5807 EdgeMoveOptimizer optimizer = EdgeMoveOptimizer();
duke@435 5808
duke@435 5809 // ignore the first block in the list (index 0 is not processed)
duke@435 5810 for (int i = code->length() - 1; i >= 1; i--) {
duke@435 5811 BlockBegin* block = code->at(i);
duke@435 5812
duke@435 5813 if (block->number_of_preds() > 1 && !block->is_set(BlockBegin::exception_entry_flag)) {
duke@435 5814 optimizer.optimize_moves_at_block_end(block);
duke@435 5815 }
duke@435 5816 if (block->number_of_sux() == 2) {
duke@435 5817 optimizer.optimize_moves_at_block_begin(block);
duke@435 5818 }
duke@435 5819 }
duke@435 5820 }
duke@435 5821
duke@435 5822
duke@435 5823 // clear all internal data structures
duke@435 5824 void EdgeMoveOptimizer::init_instructions() {
duke@435 5825 _edge_instructions.clear();
duke@435 5826 _edge_instructions_idx.clear();
duke@435 5827 }
duke@435 5828
duke@435 5829 // append a lir-instruction-list and the index of the current operation in to the list
duke@435 5830 void EdgeMoveOptimizer::append_instructions(LIR_OpList* instructions, int instructions_idx) {
duke@435 5831 _edge_instructions.append(instructions);
duke@435 5832 _edge_instructions_idx.append(instructions_idx);
duke@435 5833 }
duke@435 5834
duke@435 5835 // return the current operation of the given edge (predecessor or successor)
duke@435 5836 LIR_Op* EdgeMoveOptimizer::instruction_at(int edge) {
duke@435 5837 LIR_OpList* instructions = _edge_instructions.at(edge);
duke@435 5838 int idx = _edge_instructions_idx.at(edge);
duke@435 5839
duke@435 5840 if (idx < instructions->length()) {
duke@435 5841 return instructions->at(idx);
duke@435 5842 } else {
duke@435 5843 return NULL;
duke@435 5844 }
duke@435 5845 }
duke@435 5846
duke@435 5847 // removes the current operation of the given edge (predecessor or successor)
duke@435 5848 void EdgeMoveOptimizer::remove_cur_instruction(int edge, bool decrement_index) {
duke@435 5849 LIR_OpList* instructions = _edge_instructions.at(edge);
duke@435 5850 int idx = _edge_instructions_idx.at(edge);
duke@435 5851 instructions->remove_at(idx);
duke@435 5852
duke@435 5853 if (decrement_index) {
duke@435 5854 _edge_instructions_idx.at_put(edge, idx - 1);
duke@435 5855 }
duke@435 5856 }
duke@435 5857
duke@435 5858
duke@435 5859 bool EdgeMoveOptimizer::operations_different(LIR_Op* op1, LIR_Op* op2) {
duke@435 5860 if (op1 == NULL || op2 == NULL) {
duke@435 5861 // at least one block is already empty -> no optimization possible
duke@435 5862 return true;
duke@435 5863 }
duke@435 5864
duke@435 5865 if (op1->code() == lir_move && op2->code() == lir_move) {
duke@435 5866 assert(op1->as_Op1() != NULL, "move must be LIR_Op1");
duke@435 5867 assert(op2->as_Op1() != NULL, "move must be LIR_Op1");
duke@435 5868 LIR_Op1* move1 = (LIR_Op1*)op1;
duke@435 5869 LIR_Op1* move2 = (LIR_Op1*)op2;
duke@435 5870 if (move1->info() == move2->info() && move1->in_opr() == move2->in_opr() && move1->result_opr() == move2->result_opr()) {
duke@435 5871 // these moves are exactly equal and can be optimized
duke@435 5872 return false;
duke@435 5873 }
duke@435 5874
duke@435 5875 } else if (op1->code() == lir_fxch && op2->code() == lir_fxch) {
duke@435 5876 assert(op1->as_Op1() != NULL, "fxch must be LIR_Op1");
duke@435 5877 assert(op2->as_Op1() != NULL, "fxch must be LIR_Op1");
duke@435 5878 LIR_Op1* fxch1 = (LIR_Op1*)op1;
duke@435 5879 LIR_Op1* fxch2 = (LIR_Op1*)op2;
duke@435 5880 if (fxch1->in_opr()->as_jint() == fxch2->in_opr()->as_jint()) {
duke@435 5881 // equal FPU stack operations can be optimized
duke@435 5882 return false;
duke@435 5883 }
duke@435 5884
duke@435 5885 } else if (op1->code() == lir_fpop_raw && op2->code() == lir_fpop_raw) {
duke@435 5886 // equal FPU stack operations can be optimized
duke@435 5887 return false;
duke@435 5888 }
duke@435 5889
duke@435 5890 // no optimization possible
duke@435 5891 return true;
duke@435 5892 }
duke@435 5893
duke@435 5894 void EdgeMoveOptimizer::optimize_moves_at_block_end(BlockBegin* block) {
duke@435 5895 TRACE_LINEAR_SCAN(4, tty->print_cr("optimizing moves at end of block B%d", block->block_id()));
duke@435 5896
duke@435 5897 if (block->is_predecessor(block)) {
duke@435 5898 // currently we can't handle this correctly.
duke@435 5899 return;
duke@435 5900 }
duke@435 5901
duke@435 5902 init_instructions();
duke@435 5903 int num_preds = block->number_of_preds();
duke@435 5904 assert(num_preds > 1, "do not call otherwise");
duke@435 5905 assert(!block->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed");
duke@435 5906
duke@435 5907 // setup a list with the lir-instructions of all predecessors
duke@435 5908 int i;
duke@435 5909 for (i = 0; i < num_preds; i++) {
duke@435 5910 BlockBegin* pred = block->pred_at(i);
duke@435 5911 LIR_OpList* pred_instructions = pred->lir()->instructions_list();
duke@435 5912
duke@435 5913 if (pred->number_of_sux() != 1) {
duke@435 5914 // this can happen with switch-statements where multiple edges are between
duke@435 5915 // the same blocks.
duke@435 5916 return;
duke@435 5917 }
duke@435 5918
duke@435 5919 assert(pred->number_of_sux() == 1, "can handle only one successor");
duke@435 5920 assert(pred->sux_at(0) == block, "invalid control flow");
duke@435 5921 assert(pred_instructions->last()->code() == lir_branch, "block with successor must end with branch");
duke@435 5922 assert(pred_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch");
duke@435 5923 assert(pred_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch");
duke@435 5924
duke@435 5925 if (pred_instructions->last()->info() != NULL) {
duke@435 5926 // can not optimize instructions when debug info is needed
duke@435 5927 return;
duke@435 5928 }
duke@435 5929
duke@435 5930 // ignore the unconditional branch at the end of the block
duke@435 5931 append_instructions(pred_instructions, pred_instructions->length() - 2);
duke@435 5932 }
duke@435 5933
duke@435 5934
duke@435 5935 // process lir-instructions while all predecessors end with the same instruction
duke@435 5936 while (true) {
duke@435 5937 LIR_Op* op = instruction_at(0);
duke@435 5938 for (i = 1; i < num_preds; i++) {
duke@435 5939 if (operations_different(op, instruction_at(i))) {
duke@435 5940 // these instructions are different and cannot be optimized ->
duke@435 5941 // no further optimization possible
duke@435 5942 return;
duke@435 5943 }
duke@435 5944 }
duke@435 5945
duke@435 5946 TRACE_LINEAR_SCAN(4, tty->print("found instruction that is equal in all %d predecessors: ", num_preds); op->print());
duke@435 5947
duke@435 5948 // insert the instruction at the beginning of the current block
duke@435 5949 block->lir()->insert_before(1, op);
duke@435 5950
duke@435 5951 // delete the instruction at the end of all predecessors
duke@435 5952 for (i = 0; i < num_preds; i++) {
duke@435 5953 remove_cur_instruction(i, true);
duke@435 5954 }
duke@435 5955 }
duke@435 5956 }
duke@435 5957
duke@435 5958
duke@435 5959 void EdgeMoveOptimizer::optimize_moves_at_block_begin(BlockBegin* block) {
duke@435 5960 TRACE_LINEAR_SCAN(4, tty->print_cr("optimization moves at begin of block B%d", block->block_id()));
duke@435 5961
duke@435 5962 init_instructions();
duke@435 5963 int num_sux = block->number_of_sux();
duke@435 5964
duke@435 5965 LIR_OpList* cur_instructions = block->lir()->instructions_list();
duke@435 5966
duke@435 5967 assert(num_sux == 2, "method should not be called otherwise");
duke@435 5968 assert(cur_instructions->last()->code() == lir_branch, "block with successor must end with branch");
duke@435 5969 assert(cur_instructions->last()->as_OpBranch() != NULL, "branch must be LIR_OpBranch");
duke@435 5970 assert(cur_instructions->last()->as_OpBranch()->cond() == lir_cond_always, "block must end with unconditional branch");
duke@435 5971
duke@435 5972 if (cur_instructions->last()->info() != NULL) {
duke@435 5973 // can no optimize instructions when debug info is needed
duke@435 5974 return;
duke@435 5975 }
duke@435 5976
duke@435 5977 LIR_Op* branch = cur_instructions->at(cur_instructions->length() - 2);
duke@435 5978 if (branch->info() != NULL || (branch->code() != lir_branch && branch->code() != lir_cond_float_branch)) {
duke@435 5979 // not a valid case for optimization
duke@435 5980 // currently, only blocks that end with two branches (conditional branch followed
duke@435 5981 // by unconditional branch) are optimized
duke@435 5982 return;
duke@435 5983 }
duke@435 5984
duke@435 5985 // now it is guaranteed that the block ends with two branch instructions.
duke@435 5986 // the instructions are inserted at the end of the block before these two branches
duke@435 5987 int insert_idx = cur_instructions->length() - 2;
duke@435 5988
duke@435 5989 int i;
duke@435 5990 #ifdef ASSERT
duke@435 5991 for (i = insert_idx - 1; i >= 0; i--) {
duke@435 5992 LIR_Op* op = cur_instructions->at(i);
duke@435 5993 if ((op->code() == lir_branch || op->code() == lir_cond_float_branch) && ((LIR_OpBranch*)op)->block() != NULL) {
duke@435 5994 assert(false, "block with two successors can have only two branch instructions");
duke@435 5995 }
duke@435 5996 }
duke@435 5997 #endif
duke@435 5998
duke@435 5999 // setup a list with the lir-instructions of all successors
duke@435 6000 for (i = 0; i < num_sux; i++) {
duke@435 6001 BlockBegin* sux = block->sux_at(i);
duke@435 6002 LIR_OpList* sux_instructions = sux->lir()->instructions_list();
duke@435 6003
duke@435 6004 assert(sux_instructions->at(0)->code() == lir_label, "block must start with label");
duke@435 6005
duke@435 6006 if (sux->number_of_preds() != 1) {
duke@435 6007 // this can happen with switch-statements where multiple edges are between
duke@435 6008 // the same blocks.
duke@435 6009 return;
duke@435 6010 }
duke@435 6011 assert(sux->pred_at(0) == block, "invalid control flow");
duke@435 6012 assert(!sux->is_set(BlockBegin::exception_entry_flag), "exception handlers not allowed");
duke@435 6013
duke@435 6014 // ignore the label at the beginning of the block
duke@435 6015 append_instructions(sux_instructions, 1);
duke@435 6016 }
duke@435 6017
duke@435 6018 // process lir-instructions while all successors begin with the same instruction
duke@435 6019 while (true) {
duke@435 6020 LIR_Op* op = instruction_at(0);
duke@435 6021 for (i = 1; i < num_sux; i++) {
duke@435 6022 if (operations_different(op, instruction_at(i))) {
duke@435 6023 // these instructions are different and cannot be optimized ->
duke@435 6024 // no further optimization possible
duke@435 6025 return;
duke@435 6026 }
duke@435 6027 }
duke@435 6028
duke@435 6029 TRACE_LINEAR_SCAN(4, tty->print("----- found instruction that is equal in all %d successors: ", num_sux); op->print());
duke@435 6030
duke@435 6031 // insert instruction at end of current block
duke@435 6032 block->lir()->insert_before(insert_idx, op);
duke@435 6033 insert_idx++;
duke@435 6034
duke@435 6035 // delete the instructions at the beginning of all successors
duke@435 6036 for (i = 0; i < num_sux; i++) {
duke@435 6037 remove_cur_instruction(i, false);
duke@435 6038 }
duke@435 6039 }
duke@435 6040 }
duke@435 6041
duke@435 6042
duke@435 6043 // Implementation of ControlFlowOptimizer
duke@435 6044
duke@435 6045 ControlFlowOptimizer::ControlFlowOptimizer() :
duke@435 6046 _original_preds(4)
duke@435 6047 {
duke@435 6048 }
duke@435 6049
duke@435 6050 void ControlFlowOptimizer::optimize(BlockList* code) {
duke@435 6051 ControlFlowOptimizer optimizer = ControlFlowOptimizer();
duke@435 6052
duke@435 6053 // push the OSR entry block to the end so that we're not jumping over it.
duke@435 6054 BlockBegin* osr_entry = code->at(0)->end()->as_Base()->osr_entry();
duke@435 6055 if (osr_entry) {
duke@435 6056 int index = osr_entry->linear_scan_number();
duke@435 6057 assert(code->at(index) == osr_entry, "wrong index");
duke@435 6058 code->remove_at(index);
duke@435 6059 code->append(osr_entry);
duke@435 6060 }
duke@435 6061
duke@435 6062 optimizer.reorder_short_loops(code);
duke@435 6063 optimizer.delete_empty_blocks(code);
duke@435 6064 optimizer.delete_unnecessary_jumps(code);
duke@435 6065 optimizer.delete_jumps_to_return(code);
duke@435 6066 }
duke@435 6067
duke@435 6068 void ControlFlowOptimizer::reorder_short_loop(BlockList* code, BlockBegin* header_block, int header_idx) {
duke@435 6069 int i = header_idx + 1;
duke@435 6070 int max_end = MIN2(header_idx + ShortLoopSize, code->length());
duke@435 6071 while (i < max_end && code->at(i)->loop_depth() >= header_block->loop_depth()) {
duke@435 6072 i++;
duke@435 6073 }
duke@435 6074
duke@435 6075 if (i == code->length() || code->at(i)->loop_depth() < header_block->loop_depth()) {
duke@435 6076 int end_idx = i - 1;
duke@435 6077 BlockBegin* end_block = code->at(end_idx);
duke@435 6078
duke@435 6079 if (end_block->number_of_sux() == 1 && end_block->sux_at(0) == header_block) {
duke@435 6080 // short loop from header_idx to end_idx found -> reorder blocks such that
duke@435 6081 // the header_block is the last block instead of the first block of the loop
duke@435 6082 TRACE_LINEAR_SCAN(1, tty->print_cr("Reordering short loop: length %d, header B%d, end B%d",
duke@435 6083 end_idx - header_idx + 1,
duke@435 6084 header_block->block_id(), end_block->block_id()));
duke@435 6085
duke@435 6086 for (int j = header_idx; j < end_idx; j++) {
duke@435 6087 code->at_put(j, code->at(j + 1));
duke@435 6088 }
duke@435 6089 code->at_put(end_idx, header_block);
duke@435 6090
duke@435 6091 // correct the flags so that any loop alignment occurs in the right place.
duke@435 6092 assert(code->at(end_idx)->is_set(BlockBegin::backward_branch_target_flag), "must be backward branch target");
duke@435 6093 code->at(end_idx)->clear(BlockBegin::backward_branch_target_flag);
duke@435 6094 code->at(header_idx)->set(BlockBegin::backward_branch_target_flag);
duke@435 6095 }
duke@435 6096 }
duke@435 6097 }
duke@435 6098
duke@435 6099 void ControlFlowOptimizer::reorder_short_loops(BlockList* code) {
duke@435 6100 for (int i = code->length() - 1; i >= 0; i--) {
duke@435 6101 BlockBegin* block = code->at(i);
duke@435 6102
duke@435 6103 if (block->is_set(BlockBegin::linear_scan_loop_header_flag)) {
duke@435 6104 reorder_short_loop(code, block, i);
duke@435 6105 }
duke@435 6106 }
duke@435 6107
duke@435 6108 DEBUG_ONLY(verify(code));
duke@435 6109 }
duke@435 6110
duke@435 6111 // only blocks with exactly one successor can be deleted. Such blocks
duke@435 6112 // must always end with an unconditional branch to this successor
duke@435 6113 bool ControlFlowOptimizer::can_delete_block(BlockBegin* block) {
duke@435 6114 if (block->number_of_sux() != 1 || block->number_of_exception_handlers() != 0 || block->is_entry_block()) {
duke@435 6115 return false;
duke@435 6116 }
duke@435 6117
duke@435 6118 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 6119
duke@435 6120 assert(instructions->length() >= 2, "block must have label and branch");
duke@435 6121 assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label");
duke@435 6122 assert(instructions->last()->as_OpBranch() != NULL, "last instrcution must always be a branch");
duke@435 6123 assert(instructions->last()->as_OpBranch()->cond() == lir_cond_always, "branch must be unconditional");
duke@435 6124 assert(instructions->last()->as_OpBranch()->block() == block->sux_at(0), "branch target must be the successor");
duke@435 6125
duke@435 6126 // block must have exactly one successor
duke@435 6127
duke@435 6128 if (instructions->length() == 2 && instructions->last()->info() == NULL) {
duke@435 6129 return true;
duke@435 6130 }
duke@435 6131 return false;
duke@435 6132 }
duke@435 6133
duke@435 6134 // substitute branch targets in all branch-instructions of this blocks
duke@435 6135 void ControlFlowOptimizer::substitute_branch_target(BlockBegin* block, BlockBegin* target_from, BlockBegin* target_to) {
duke@435 6136 TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting empty block: substituting from B%d to B%d inside B%d", target_from->block_id(), target_to->block_id(), block->block_id()));
duke@435 6137
duke@435 6138 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 6139
duke@435 6140 assert(instructions->at(0)->code() == lir_label, "first instruction must always be a label");
duke@435 6141 for (int i = instructions->length() - 1; i >= 1; i--) {
duke@435 6142 LIR_Op* op = instructions->at(i);
duke@435 6143
duke@435 6144 if (op->code() == lir_branch || op->code() == lir_cond_float_branch) {
duke@435 6145 assert(op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
duke@435 6146 LIR_OpBranch* branch = (LIR_OpBranch*)op;
duke@435 6147
duke@435 6148 if (branch->block() == target_from) {
duke@435 6149 branch->change_block(target_to);
duke@435 6150 }
duke@435 6151 if (branch->ublock() == target_from) {
duke@435 6152 branch->change_ublock(target_to);
duke@435 6153 }
duke@435 6154 }
duke@435 6155 }
duke@435 6156 }
duke@435 6157
duke@435 6158 void ControlFlowOptimizer::delete_empty_blocks(BlockList* code) {
duke@435 6159 int old_pos = 0;
duke@435 6160 int new_pos = 0;
duke@435 6161 int num_blocks = code->length();
duke@435 6162
duke@435 6163 while (old_pos < num_blocks) {
duke@435 6164 BlockBegin* block = code->at(old_pos);
duke@435 6165
duke@435 6166 if (can_delete_block(block)) {
duke@435 6167 BlockBegin* new_target = block->sux_at(0);
duke@435 6168
duke@435 6169 // propagate backward branch target flag for correct code alignment
duke@435 6170 if (block->is_set(BlockBegin::backward_branch_target_flag)) {
duke@435 6171 new_target->set(BlockBegin::backward_branch_target_flag);
duke@435 6172 }
duke@435 6173
duke@435 6174 // collect a list with all predecessors that contains each predecessor only once
duke@435 6175 // the predecessors of cur are changed during the substitution, so a copy of the
duke@435 6176 // predecessor list is necessary
duke@435 6177 int j;
duke@435 6178 _original_preds.clear();
duke@435 6179 for (j = block->number_of_preds() - 1; j >= 0; j--) {
duke@435 6180 BlockBegin* pred = block->pred_at(j);
duke@435 6181 if (_original_preds.index_of(pred) == -1) {
duke@435 6182 _original_preds.append(pred);
duke@435 6183 }
duke@435 6184 }
duke@435 6185
duke@435 6186 for (j = _original_preds.length() - 1; j >= 0; j--) {
duke@435 6187 BlockBegin* pred = _original_preds.at(j);
duke@435 6188 substitute_branch_target(pred, block, new_target);
duke@435 6189 pred->substitute_sux(block, new_target);
duke@435 6190 }
duke@435 6191 } else {
duke@435 6192 // adjust position of this block in the block list if blocks before
duke@435 6193 // have been deleted
duke@435 6194 if (new_pos != old_pos) {
duke@435 6195 code->at_put(new_pos, code->at(old_pos));
duke@435 6196 }
duke@435 6197 new_pos++;
duke@435 6198 }
duke@435 6199 old_pos++;
duke@435 6200 }
duke@435 6201 code->truncate(new_pos);
duke@435 6202
duke@435 6203 DEBUG_ONLY(verify(code));
duke@435 6204 }
duke@435 6205
duke@435 6206 void ControlFlowOptimizer::delete_unnecessary_jumps(BlockList* code) {
duke@435 6207 // skip the last block because there a branch is always necessary
duke@435 6208 for (int i = code->length() - 2; i >= 0; i--) {
duke@435 6209 BlockBegin* block = code->at(i);
duke@435 6210 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 6211
duke@435 6212 LIR_Op* last_op = instructions->last();
duke@435 6213 if (last_op->code() == lir_branch) {
duke@435 6214 assert(last_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
duke@435 6215 LIR_OpBranch* last_branch = (LIR_OpBranch*)last_op;
duke@435 6216
duke@435 6217 assert(last_branch->block() != NULL, "last branch must always have a block as target");
duke@435 6218 assert(last_branch->label() == last_branch->block()->label(), "must be equal");
duke@435 6219
duke@435 6220 if (last_branch->info() == NULL) {
duke@435 6221 if (last_branch->block() == code->at(i + 1)) {
duke@435 6222
duke@435 6223 TRACE_LINEAR_SCAN(3, tty->print_cr("Deleting unconditional branch at end of block B%d", block->block_id()));
duke@435 6224
duke@435 6225 // delete last branch instruction
duke@435 6226 instructions->truncate(instructions->length() - 1);
duke@435 6227
duke@435 6228 } else {
duke@435 6229 LIR_Op* prev_op = instructions->at(instructions->length() - 2);
duke@435 6230 if (prev_op->code() == lir_branch || prev_op->code() == lir_cond_float_branch) {
duke@435 6231 assert(prev_op->as_OpBranch() != NULL, "branch must be of type LIR_OpBranch");
duke@435 6232 LIR_OpBranch* prev_branch = (LIR_OpBranch*)prev_op;
duke@435 6233
roland@4860 6234 if (prev_branch->stub() == NULL) {
roland@4860 6235
roland@4860 6236 LIR_Op2* prev_cmp = NULL;
roland@4860 6237
roland@4860 6238 for(int j = instructions->length() - 3; j >= 0 && prev_cmp == NULL; j--) {
roland@4860 6239 prev_op = instructions->at(j);
roland@4860 6240 if (prev_op->code() == lir_cmp) {
roland@4860 6241 assert(prev_op->as_Op2() != NULL, "branch must be of type LIR_Op2");
roland@4860 6242 prev_cmp = (LIR_Op2*)prev_op;
roland@4860 6243 assert(prev_branch->cond() == prev_cmp->condition(), "should be the same");
roland@4860 6244 }
bobv@2036 6245 }
roland@4860 6246 assert(prev_cmp != NULL, "should have found comp instruction for branch");
roland@4860 6247 if (prev_branch->block() == code->at(i + 1) && prev_branch->info() == NULL) {
roland@4860 6248
roland@4860 6249 TRACE_LINEAR_SCAN(3, tty->print_cr("Negating conditional branch and deleting unconditional branch at end of block B%d", block->block_id()));
roland@4860 6250
roland@4860 6251 // eliminate a conditional branch to the immediate successor
roland@4860 6252 prev_branch->change_block(last_branch->block());
roland@4860 6253 prev_branch->negate_cond();
roland@4860 6254 prev_cmp->set_condition(prev_branch->cond());
roland@4860 6255 instructions->truncate(instructions->length() - 1);
roland@4860 6256 }
duke@435 6257 }
duke@435 6258 }
duke@435 6259 }
duke@435 6260 }
duke@435 6261 }
duke@435 6262 }
duke@435 6263
duke@435 6264 DEBUG_ONLY(verify(code));
duke@435 6265 }
duke@435 6266
duke@435 6267 void ControlFlowOptimizer::delete_jumps_to_return(BlockList* code) {
duke@435 6268 #ifdef ASSERT
duke@435 6269 BitMap return_converted(BlockBegin::number_of_blocks());
duke@435 6270 return_converted.clear();
duke@435 6271 #endif
duke@435 6272
duke@435 6273 for (int i = code->length() - 1; i >= 0; i--) {
duke@435 6274 BlockBegin* block = code->at(i);
duke@435 6275 LIR_OpList* cur_instructions = block->lir()->instructions_list();
duke@435 6276 LIR_Op* cur_last_op = cur_instructions->last();
duke@435 6277
duke@435 6278 assert(cur_instructions->at(0)->code() == lir_label, "first instruction must always be a label");
duke@435 6279 if (cur_instructions->length() == 2 && cur_last_op->code() == lir_return) {
duke@435 6280 // the block contains only a label and a return
duke@435 6281 // if a predecessor ends with an unconditional jump to this block, then the jump
duke@435 6282 // can be replaced with a return instruction
duke@435 6283 //
duke@435 6284 // Note: the original block with only a return statement cannot be deleted completely
duke@435 6285 // because the predecessors might have other (conditional) jumps to this block
duke@435 6286 // -> this may lead to unnecesary return instructions in the final code
duke@435 6287
duke@435 6288 assert(cur_last_op->info() == NULL, "return instructions do not have debug information");
duke@435 6289 assert(block->number_of_sux() == 0 ||
duke@435 6290 (return_converted.at(block->block_id()) && block->number_of_sux() == 1),
duke@435 6291 "blocks that end with return must not have successors");
duke@435 6292
duke@435 6293 assert(cur_last_op->as_Op1() != NULL, "return must be LIR_Op1");
duke@435 6294 LIR_Opr return_opr = ((LIR_Op1*)cur_last_op)->in_opr();
duke@435 6295
duke@435 6296 for (int j = block->number_of_preds() - 1; j >= 0; j--) {
duke@435 6297 BlockBegin* pred = block->pred_at(j);
duke@435 6298 LIR_OpList* pred_instructions = pred->lir()->instructions_list();
duke@435 6299 LIR_Op* pred_last_op = pred_instructions->last();
duke@435 6300
duke@435 6301 if (pred_last_op->code() == lir_branch) {
duke@435 6302 assert(pred_last_op->as_OpBranch() != NULL, "branch must be LIR_OpBranch");
duke@435 6303 LIR_OpBranch* pred_last_branch = (LIR_OpBranch*)pred_last_op;
duke@435 6304
duke@435 6305 if (pred_last_branch->block() == block && pred_last_branch->cond() == lir_cond_always && pred_last_branch->info() == NULL) {
duke@435 6306 // replace the jump to a return with a direct return
duke@435 6307 // Note: currently the edge between the blocks is not deleted
duke@435 6308 pred_instructions->at_put(pred_instructions->length() - 1, new LIR_Op1(lir_return, return_opr));
duke@435 6309 #ifdef ASSERT
duke@435 6310 return_converted.set_bit(pred->block_id());
duke@435 6311 #endif
duke@435 6312 }
duke@435 6313 }
duke@435 6314 }
duke@435 6315 }
duke@435 6316 }
duke@435 6317 }
duke@435 6318
duke@435 6319
duke@435 6320 #ifdef ASSERT
duke@435 6321 void ControlFlowOptimizer::verify(BlockList* code) {
duke@435 6322 for (int i = 0; i < code->length(); i++) {
duke@435 6323 BlockBegin* block = code->at(i);
duke@435 6324 LIR_OpList* instructions = block->lir()->instructions_list();
duke@435 6325
duke@435 6326 int j;
duke@435 6327 for (j = 0; j < instructions->length(); j++) {
duke@435 6328 LIR_OpBranch* op_branch = instructions->at(j)->as_OpBranch();
duke@435 6329
duke@435 6330 if (op_branch != NULL) {
duke@435 6331 assert(op_branch->block() == NULL || code->index_of(op_branch->block()) != -1, "branch target not valid");
duke@435 6332 assert(op_branch->ublock() == NULL || code->index_of(op_branch->ublock()) != -1, "branch target not valid");
duke@435 6333 }
duke@435 6334 }
duke@435 6335
duke@435 6336 for (j = 0; j < block->number_of_sux() - 1; j++) {
duke@435 6337 BlockBegin* sux = block->sux_at(j);
duke@435 6338 assert(code->index_of(sux) != -1, "successor not valid");
duke@435 6339 }
duke@435 6340
duke@435 6341 for (j = 0; j < block->number_of_preds() - 1; j++) {
duke@435 6342 BlockBegin* pred = block->pred_at(j);
duke@435 6343 assert(code->index_of(pred) != -1, "successor not valid");
duke@435 6344 }
duke@435 6345 }
duke@435 6346 }
duke@435 6347 #endif
duke@435 6348
duke@435 6349
duke@435 6350 #ifndef PRODUCT
duke@435 6351
duke@435 6352 // Implementation of LinearStatistic
duke@435 6353
duke@435 6354 const char* LinearScanStatistic::counter_name(int counter_idx) {
duke@435 6355 switch (counter_idx) {
duke@435 6356 case counter_method: return "compiled methods";
duke@435 6357 case counter_fpu_method: return "methods using fpu";
duke@435 6358 case counter_loop_method: return "methods with loops";
duke@435 6359 case counter_exception_method:return "methods with xhandler";
duke@435 6360
duke@435 6361 case counter_loop: return "loops";
duke@435 6362 case counter_block: return "blocks";
duke@435 6363 case counter_loop_block: return "blocks inside loop";
duke@435 6364 case counter_exception_block: return "exception handler entries";
duke@435 6365 case counter_interval: return "intervals";
duke@435 6366 case counter_fixed_interval: return "fixed intervals";
duke@435 6367 case counter_range: return "ranges";
duke@435 6368 case counter_fixed_range: return "fixed ranges";
duke@435 6369 case counter_use_pos: return "use positions";
duke@435 6370 case counter_fixed_use_pos: return "fixed use positions";
duke@435 6371 case counter_spill_slots: return "spill slots";
duke@435 6372
duke@435 6373 // counter for classes of lir instructions
duke@435 6374 case counter_instruction: return "total instructions";
duke@435 6375 case counter_label: return "labels";
duke@435 6376 case counter_entry: return "method entries";
duke@435 6377 case counter_return: return "method returns";
duke@435 6378 case counter_call: return "method calls";
duke@435 6379 case counter_move: return "moves";
duke@435 6380 case counter_cmp: return "compare";
duke@435 6381 case counter_cond_branch: return "conditional branches";
duke@435 6382 case counter_uncond_branch: return "unconditional branches";
duke@435 6383 case counter_stub_branch: return "branches to stub";
duke@435 6384 case counter_alu: return "artithmetic + logic";
duke@435 6385 case counter_alloc: return "allocations";
duke@435 6386 case counter_sync: return "synchronisation";
duke@435 6387 case counter_throw: return "throw";
duke@435 6388 case counter_unwind: return "unwind";
duke@435 6389 case counter_typecheck: return "type+null-checks";
duke@435 6390 case counter_fpu_stack: return "fpu-stack";
duke@435 6391 case counter_misc_inst: return "other instructions";
duke@435 6392 case counter_other_inst: return "misc. instructions";
duke@435 6393
duke@435 6394 // counter for different types of moves
duke@435 6395 case counter_move_total: return "total moves";
duke@435 6396 case counter_move_reg_reg: return "register->register";
duke@435 6397 case counter_move_reg_stack: return "register->stack";
duke@435 6398 case counter_move_stack_reg: return "stack->register";
duke@435 6399 case counter_move_stack_stack:return "stack->stack";
duke@435 6400 case counter_move_reg_mem: return "register->memory";
duke@435 6401 case counter_move_mem_reg: return "memory->register";
duke@435 6402 case counter_move_const_any: return "constant->any";
duke@435 6403
duke@435 6404 case blank_line_1: return "";
duke@435 6405 case blank_line_2: return "";
duke@435 6406
duke@435 6407 default: ShouldNotReachHere(); return "";
duke@435 6408 }
duke@435 6409 }
duke@435 6410
duke@435 6411 LinearScanStatistic::Counter LinearScanStatistic::base_counter(int counter_idx) {
duke@435 6412 if (counter_idx == counter_fpu_method || counter_idx == counter_loop_method || counter_idx == counter_exception_method) {
duke@435 6413 return counter_method;
duke@435 6414 } else if (counter_idx == counter_loop_block || counter_idx == counter_exception_block) {
duke@435 6415 return counter_block;
duke@435 6416 } else if (counter_idx >= counter_instruction && counter_idx <= counter_other_inst) {
duke@435 6417 return counter_instruction;
duke@435 6418 } else if (counter_idx >= counter_move_total && counter_idx <= counter_move_const_any) {
duke@435 6419 return counter_move_total;
duke@435 6420 }
duke@435 6421 return invalid_counter;
duke@435 6422 }
duke@435 6423
duke@435 6424 LinearScanStatistic::LinearScanStatistic() {
duke@435 6425 for (int i = 0; i < number_of_counters; i++) {
duke@435 6426 _counters_sum[i] = 0;
duke@435 6427 _counters_max[i] = -1;
duke@435 6428 }
duke@435 6429
duke@435 6430 }
duke@435 6431
duke@435 6432 // add the method-local numbers to the total sum
duke@435 6433 void LinearScanStatistic::sum_up(LinearScanStatistic &method_statistic) {
duke@435 6434 for (int i = 0; i < number_of_counters; i++) {
duke@435 6435 _counters_sum[i] += method_statistic._counters_sum[i];
duke@435 6436 _counters_max[i] = MAX2(_counters_max[i], method_statistic._counters_sum[i]);
duke@435 6437 }
duke@435 6438 }
duke@435 6439
duke@435 6440 void LinearScanStatistic::print(const char* title) {
duke@435 6441 if (CountLinearScan || TraceLinearScanLevel > 0) {
duke@435 6442 tty->cr();
duke@435 6443 tty->print_cr("***** LinearScan statistic - %s *****", title);
duke@435 6444
duke@435 6445 for (int i = 0; i < number_of_counters; i++) {
duke@435 6446 if (_counters_sum[i] > 0 || _counters_max[i] >= 0) {
duke@435 6447 tty->print("%25s: %8d", counter_name(i), _counters_sum[i]);
duke@435 6448
duke@435 6449 if (base_counter(i) != invalid_counter) {
duke@435 6450 tty->print(" (%5.1f%%) ", _counters_sum[i] * 100.0 / _counters_sum[base_counter(i)]);
duke@435 6451 } else {
duke@435 6452 tty->print(" ");
duke@435 6453 }
duke@435 6454
duke@435 6455 if (_counters_max[i] >= 0) {
duke@435 6456 tty->print("%8d", _counters_max[i]);
duke@435 6457 }
duke@435 6458 }
duke@435 6459 tty->cr();
duke@435 6460 }
duke@435 6461 }
duke@435 6462 }
duke@435 6463
duke@435 6464 void LinearScanStatistic::collect(LinearScan* allocator) {
duke@435 6465 inc_counter(counter_method);
duke@435 6466 if (allocator->has_fpu_registers()) {
duke@435 6467 inc_counter(counter_fpu_method);
duke@435 6468 }
duke@435 6469 if (allocator->num_loops() > 0) {
duke@435 6470 inc_counter(counter_loop_method);
duke@435 6471 }
duke@435 6472 inc_counter(counter_loop, allocator->num_loops());
duke@435 6473 inc_counter(counter_spill_slots, allocator->max_spills());
duke@435 6474
duke@435 6475 int i;
duke@435 6476 for (i = 0; i < allocator->interval_count(); i++) {
duke@435 6477 Interval* cur = allocator->interval_at(i);
duke@435 6478
duke@435 6479 if (cur != NULL) {
duke@435 6480 inc_counter(counter_interval);
duke@435 6481 inc_counter(counter_use_pos, cur->num_use_positions());
duke@435 6482 if (LinearScan::is_precolored_interval(cur)) {
duke@435 6483 inc_counter(counter_fixed_interval);
duke@435 6484 inc_counter(counter_fixed_use_pos, cur->num_use_positions());
duke@435 6485 }
duke@435 6486
duke@435 6487 Range* range = cur->first();
duke@435 6488 while (range != Range::end()) {
duke@435 6489 inc_counter(counter_range);
duke@435 6490 if (LinearScan::is_precolored_interval(cur)) {
duke@435 6491 inc_counter(counter_fixed_range);
duke@435 6492 }
duke@435 6493 range = range->next();
duke@435 6494 }
duke@435 6495 }
duke@435 6496 }
duke@435 6497
duke@435 6498 bool has_xhandlers = false;
duke@435 6499 // Note: only count blocks that are in code-emit order
duke@435 6500 for (i = 0; i < allocator->ir()->code()->length(); i++) {
duke@435 6501 BlockBegin* cur = allocator->ir()->code()->at(i);
duke@435 6502
duke@435 6503 inc_counter(counter_block);
duke@435 6504 if (cur->loop_depth() > 0) {
duke@435 6505 inc_counter(counter_loop_block);
duke@435 6506 }
duke@435 6507 if (cur->is_set(BlockBegin::exception_entry_flag)) {
duke@435 6508 inc_counter(counter_exception_block);
duke@435 6509 has_xhandlers = true;
duke@435 6510 }
duke@435 6511
duke@435 6512 LIR_OpList* instructions = cur->lir()->instructions_list();
duke@435 6513 for (int j = 0; j < instructions->length(); j++) {
duke@435 6514 LIR_Op* op = instructions->at(j);
duke@435 6515
duke@435 6516 inc_counter(counter_instruction);
duke@435 6517
duke@435 6518 switch (op->code()) {
duke@435 6519 case lir_label: inc_counter(counter_label); break;
duke@435 6520 case lir_std_entry:
duke@435 6521 case lir_osr_entry: inc_counter(counter_entry); break;
duke@435 6522 case lir_return: inc_counter(counter_return); break;
duke@435 6523
duke@435 6524 case lir_rtcall:
duke@435 6525 case lir_static_call:
duke@435 6526 case lir_optvirtual_call:
duke@435 6527 case lir_virtual_call: inc_counter(counter_call); break;
duke@435 6528
duke@435 6529 case lir_move: {
duke@435 6530 inc_counter(counter_move);
duke@435 6531 inc_counter(counter_move_total);
duke@435 6532
duke@435 6533 LIR_Opr in = op->as_Op1()->in_opr();
duke@435 6534 LIR_Opr res = op->as_Op1()->result_opr();
duke@435 6535 if (in->is_register()) {
duke@435 6536 if (res->is_register()) {
duke@435 6537 inc_counter(counter_move_reg_reg);
duke@435 6538 } else if (res->is_stack()) {
duke@435 6539 inc_counter(counter_move_reg_stack);
duke@435 6540 } else if (res->is_address()) {
duke@435 6541 inc_counter(counter_move_reg_mem);
duke@435 6542 } else {
duke@435 6543 ShouldNotReachHere();
duke@435 6544 }
duke@435 6545 } else if (in->is_stack()) {
duke@435 6546 if (res->is_register()) {
duke@435 6547 inc_counter(counter_move_stack_reg);
duke@435 6548 } else {
duke@435 6549 inc_counter(counter_move_stack_stack);
duke@435 6550 }
duke@435 6551 } else if (in->is_address()) {
duke@435 6552 assert(res->is_register(), "must be");
duke@435 6553 inc_counter(counter_move_mem_reg);
duke@435 6554 } else if (in->is_constant()) {
duke@435 6555 inc_counter(counter_move_const_any);
duke@435 6556 } else {
duke@435 6557 ShouldNotReachHere();
duke@435 6558 }
duke@435 6559 break;
duke@435 6560 }
duke@435 6561
duke@435 6562 case lir_cmp: inc_counter(counter_cmp); break;
duke@435 6563
duke@435 6564 case lir_branch:
duke@435 6565 case lir_cond_float_branch: {
duke@435 6566 LIR_OpBranch* branch = op->as_OpBranch();
duke@435 6567 if (branch->block() == NULL) {
duke@435 6568 inc_counter(counter_stub_branch);
duke@435 6569 } else if (branch->cond() == lir_cond_always) {
duke@435 6570 inc_counter(counter_uncond_branch);
duke@435 6571 } else {
duke@435 6572 inc_counter(counter_cond_branch);
duke@435 6573 }
duke@435 6574 break;
duke@435 6575 }
duke@435 6576
duke@435 6577 case lir_neg:
duke@435 6578 case lir_add:
duke@435 6579 case lir_sub:
duke@435 6580 case lir_mul:
duke@435 6581 case lir_mul_strictfp:
duke@435 6582 case lir_div:
duke@435 6583 case lir_div_strictfp:
duke@435 6584 case lir_rem:
duke@435 6585 case lir_sqrt:
duke@435 6586 case lir_sin:
duke@435 6587 case lir_cos:
duke@435 6588 case lir_abs:
duke@435 6589 case lir_log10:
duke@435 6590 case lir_log:
roland@3787 6591 case lir_pow:
roland@3787 6592 case lir_exp:
duke@435 6593 case lir_logic_and:
duke@435 6594 case lir_logic_or:
duke@435 6595 case lir_logic_xor:
duke@435 6596 case lir_shl:
duke@435 6597 case lir_shr:
duke@435 6598 case lir_ushr: inc_counter(counter_alu); break;
duke@435 6599
duke@435 6600 case lir_alloc_object:
duke@435 6601 case lir_alloc_array: inc_counter(counter_alloc); break;
duke@435 6602
duke@435 6603 case lir_monaddr:
duke@435 6604 case lir_lock:
duke@435 6605 case lir_unlock: inc_counter(counter_sync); break;
duke@435 6606
duke@435 6607 case lir_throw: inc_counter(counter_throw); break;
duke@435 6608
duke@435 6609 case lir_unwind: inc_counter(counter_unwind); break;
duke@435 6610
duke@435 6611 case lir_null_check:
duke@435 6612 case lir_leal:
duke@435 6613 case lir_instanceof:
duke@435 6614 case lir_checkcast:
duke@435 6615 case lir_store_check: inc_counter(counter_typecheck); break;
duke@435 6616
duke@435 6617 case lir_fpop_raw:
duke@435 6618 case lir_fxch:
duke@435 6619 case lir_fld: inc_counter(counter_fpu_stack); break;
duke@435 6620
duke@435 6621 case lir_nop:
duke@435 6622 case lir_push:
duke@435 6623 case lir_pop:
duke@435 6624 case lir_convert:
duke@435 6625 case lir_roundfp:
duke@435 6626 case lir_cmove: inc_counter(counter_misc_inst); break;
duke@435 6627
duke@435 6628 default: inc_counter(counter_other_inst); break;
duke@435 6629 }
duke@435 6630 }
duke@435 6631 }
duke@435 6632
duke@435 6633 if (has_xhandlers) {
duke@435 6634 inc_counter(counter_exception_method);
duke@435 6635 }
duke@435 6636 }
duke@435 6637
duke@435 6638 void LinearScanStatistic::compute(LinearScan* allocator, LinearScanStatistic &global_statistic) {
duke@435 6639 if (CountLinearScan || TraceLinearScanLevel > 0) {
duke@435 6640
duke@435 6641 LinearScanStatistic local_statistic = LinearScanStatistic();
duke@435 6642
duke@435 6643 local_statistic.collect(allocator);
duke@435 6644 global_statistic.sum_up(local_statistic);
duke@435 6645
duke@435 6646 if (TraceLinearScanLevel > 2) {
duke@435 6647 local_statistic.print("current local statistic");
duke@435 6648 }
duke@435 6649 }
duke@435 6650 }
duke@435 6651
duke@435 6652
duke@435 6653 // Implementation of LinearTimers
duke@435 6654
duke@435 6655 LinearScanTimers::LinearScanTimers() {
duke@435 6656 for (int i = 0; i < number_of_timers; i++) {
duke@435 6657 timer(i)->reset();
duke@435 6658 }
duke@435 6659 }
duke@435 6660
duke@435 6661 const char* LinearScanTimers::timer_name(int idx) {
duke@435 6662 switch (idx) {
duke@435 6663 case timer_do_nothing: return "Nothing (Time Check)";
duke@435 6664 case timer_number_instructions: return "Number Instructions";
duke@435 6665 case timer_compute_local_live_sets: return "Local Live Sets";
duke@435 6666 case timer_compute_global_live_sets: return "Global Live Sets";
duke@435 6667 case timer_build_intervals: return "Build Intervals";
duke@435 6668 case timer_sort_intervals_before: return "Sort Intervals Before";
duke@435 6669 case timer_allocate_registers: return "Allocate Registers";
duke@435 6670 case timer_resolve_data_flow: return "Resolve Data Flow";
duke@435 6671 case timer_sort_intervals_after: return "Sort Intervals After";
duke@435 6672 case timer_eliminate_spill_moves: return "Spill optimization";
duke@435 6673 case timer_assign_reg_num: return "Assign Reg Num";
duke@435 6674 case timer_allocate_fpu_stack: return "Allocate FPU Stack";
duke@435 6675 case timer_optimize_lir: return "Optimize LIR";
duke@435 6676 default: ShouldNotReachHere(); return "";
duke@435 6677 }
duke@435 6678 }
duke@435 6679
duke@435 6680 void LinearScanTimers::begin_method() {
duke@435 6681 if (TimeEachLinearScan) {
duke@435 6682 // reset all timers to measure only current method
duke@435 6683 for (int i = 0; i < number_of_timers; i++) {
duke@435 6684 timer(i)->reset();
duke@435 6685 }
duke@435 6686 }
duke@435 6687 }
duke@435 6688
duke@435 6689 void LinearScanTimers::end_method(LinearScan* allocator) {
duke@435 6690 if (TimeEachLinearScan) {
duke@435 6691
duke@435 6692 double c = timer(timer_do_nothing)->seconds();
duke@435 6693 double total = 0;
duke@435 6694 for (int i = 1; i < number_of_timers; i++) {
duke@435 6695 total += timer(i)->seconds() - c;
duke@435 6696 }
duke@435 6697
duke@435 6698 if (total >= 0.0005) {
duke@435 6699 // print all information in one line for automatic processing
duke@435 6700 tty->print("@"); allocator->compilation()->method()->print_name();
duke@435 6701
duke@435 6702 tty->print("@ %d ", allocator->compilation()->method()->code_size());
duke@435 6703 tty->print("@ %d ", allocator->block_at(allocator->block_count() - 1)->last_lir_instruction_id() / 2);
duke@435 6704 tty->print("@ %d ", allocator->block_count());
duke@435 6705 tty->print("@ %d ", allocator->num_virtual_regs());
duke@435 6706 tty->print("@ %d ", allocator->interval_count());
duke@435 6707 tty->print("@ %d ", allocator->_num_calls);
duke@435 6708 tty->print("@ %d ", allocator->num_loops());
duke@435 6709
duke@435 6710 tty->print("@ %6.6f ", total);
duke@435 6711 for (int i = 1; i < number_of_timers; i++) {
duke@435 6712 tty->print("@ %4.1f ", ((timer(i)->seconds() - c) / total) * 100);
duke@435 6713 }
duke@435 6714 tty->cr();
duke@435 6715 }
duke@435 6716 }
duke@435 6717 }
duke@435 6718
duke@435 6719 void LinearScanTimers::print(double total_time) {
duke@435 6720 if (TimeLinearScan) {
duke@435 6721 // correction value: sum of dummy-timer that only measures the time that
duke@435 6722 // is necesary to start and stop itself
duke@435 6723 double c = timer(timer_do_nothing)->seconds();
duke@435 6724
duke@435 6725 for (int i = 0; i < number_of_timers; i++) {
duke@435 6726 double t = timer(i)->seconds();
duke@435 6727 tty->print_cr(" %25s: %6.3f s (%4.1f%%) corrected: %6.3f s (%4.1f%%)", timer_name(i), t, (t / total_time) * 100.0, t - c, (t - c) / (total_time - 2 * number_of_timers * c) * 100);
duke@435 6728 }
duke@435 6729 }
duke@435 6730 }
duke@435 6731
duke@435 6732 #endif // #ifndef PRODUCT

mercurial