src/cpu/x86/vm/c1_LinearScan_x86.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "c1/c1_Instruction.hpp"
27 #include "c1/c1_LinearScan.hpp"
28 #include "utilities/bitMap.inline.hpp"
29
30
31 //----------------------------------------------------------------------
32 // Allocation of FPU stack slots (Intel x86 only)
33 //----------------------------------------------------------------------
34
35 void LinearScan::allocate_fpu_stack() {
36 // First compute which FPU registers are live at the start of each basic block
37 // (To minimize the amount of work we have to do if we have to merge FPU stacks)
38 if (ComputeExactFPURegisterUsage) {
39 Interval* intervals_in_register, *intervals_in_memory;
40 create_unhandled_lists(&intervals_in_register, &intervals_in_memory, is_in_fpu_register, NULL);
41
42 // ignore memory intervals by overwriting intervals_in_memory
43 // the dummy interval is needed to enforce the walker to walk until the given id:
44 // without it, the walker stops when the unhandled-list is empty -> live information
45 // beyond this point would be incorrect.
46 Interval* dummy_interval = new Interval(any_reg);
47 dummy_interval->add_range(max_jint - 2, max_jint - 1);
48 dummy_interval->set_next(Interval::end());
49 intervals_in_memory = dummy_interval;
50
51 IntervalWalker iw(this, intervals_in_register, intervals_in_memory);
52
53 const int num_blocks = block_count();
54 for (int i = 0; i < num_blocks; i++) {
55 BlockBegin* b = block_at(i);
56
57 // register usage is only needed for merging stacks -> compute only
58 // when more than one predecessor.
59 // the block must not have any spill moves at the beginning (checked by assertions)
60 // spill moves would use intervals that are marked as handled and so the usage bit
61 // would been set incorrectly
62
63 // NOTE: the check for number_of_preds > 1 is necessary. A block with only one
64 // predecessor may have spill moves at the begin of the block.
65 // If an interval ends at the current instruction id, it is not possible
66 // to decide if the register is live or not at the block begin -> the
67 // register information would be incorrect.
68 if (b->number_of_preds() > 1) {
69 int id = b->first_lir_instruction_id();
70 BitMap regs(FrameMap::nof_fpu_regs);
71 regs.clear();
72
73 iw.walk_to(id); // walk after the first instruction (always a label) of the block
74 assert(iw.current_position() == id, "did not walk completely to id");
75
76 // Only consider FPU values in registers
77 Interval* interval = iw.active_first(fixedKind);
78 while (interval != Interval::end()) {
79 int reg = interval->assigned_reg();
80 assert(reg >= pd_first_fpu_reg && reg <= pd_last_fpu_reg, "no fpu register");
81 assert(interval->assigned_regHi() == -1, "must not have hi register (doubles stored in one register)");
82 assert(interval->from() <= id && id < interval->to(), "interval out of range");
83
84 #ifndef PRODUCT
85 if (TraceFPURegisterUsage) {
86 tty->print("fpu reg %d is live because of ", reg - pd_first_fpu_reg); interval->print();
87 }
88 #endif
89
90 regs.set_bit(reg - pd_first_fpu_reg);
91 interval = interval->next();
92 }
93
94 b->set_fpu_register_usage(regs);
95
96 #ifndef PRODUCT
97 if (TraceFPURegisterUsage) {
98 tty->print("FPU regs for block %d, LIR instr %d): ", b->block_id(), id); regs.print_on(tty); tty->cr();
99 }
100 #endif
101 }
102 }
103 }
104
105 FpuStackAllocator alloc(ir()->compilation(), this);
106 _fpu_stack_allocator = &alloc;
107 alloc.allocate();
108 _fpu_stack_allocator = NULL;
109 }
110
111
112 FpuStackAllocator::FpuStackAllocator(Compilation* compilation, LinearScan* allocator)
113 : _compilation(compilation)
114 , _lir(NULL)
115 , _pos(-1)
116 , _allocator(allocator)
117 , _sim(compilation)
118 , _temp_sim(compilation)
119 {}
120
121 void FpuStackAllocator::allocate() {
122 int num_blocks = allocator()->block_count();
123 for (int i = 0; i < num_blocks; i++) {
124 // Set up to process block
125 BlockBegin* block = allocator()->block_at(i);
126 intArray* fpu_stack_state = block->fpu_stack_state();
127
128 #ifndef PRODUCT
129 if (TraceFPUStack) {
130 tty->cr();
131 tty->print_cr("------- Begin of new Block %d -------", block->block_id());
132 }
133 #endif
134
135 assert(fpu_stack_state != NULL ||
136 block->end()->as_Base() != NULL ||
137 block->is_set(BlockBegin::exception_entry_flag),
138 "FPU stack state must be present due to linear-scan order for FPU stack allocation");
139 // note: exception handler entries always start with an empty fpu stack
140 // because stack merging would be too complicated
141
142 if (fpu_stack_state != NULL) {
143 sim()->read_state(fpu_stack_state);
144 } else {
145 sim()->clear();
146 }
147
148 #ifndef PRODUCT
149 if (TraceFPUStack) {
150 tty->print("Reading FPU state for block %d:", block->block_id());
151 sim()->print();
152 tty->cr();
153 }
154 #endif
155
156 allocate_block(block);
157 CHECK_BAILOUT();
158 }
159 }
160
161 void FpuStackAllocator::allocate_block(BlockBegin* block) {
162 bool processed_merge = false;
163 LIR_OpList* insts = block->lir()->instructions_list();
164 set_lir(block->lir());
165 set_pos(0);
166
167
168 // Note: insts->length() may change during loop
169 while (pos() < insts->length()) {
170 LIR_Op* op = insts->at(pos());
171 _debug_information_computed = false;
172
173 #ifndef PRODUCT
174 if (TraceFPUStack) {
175 op->print();
176 }
177 check_invalid_lir_op(op);
178 #endif
179
180 LIR_OpBranch* branch = op->as_OpBranch();
181 LIR_Op1* op1 = op->as_Op1();
182 LIR_Op2* op2 = op->as_Op2();
183 LIR_OpCall* opCall = op->as_OpCall();
184
185 if (branch != NULL && branch->block() != NULL) {
186 if (!processed_merge) {
187 // propagate stack at first branch to a successor
188 processed_merge = true;
189 bool required_merge = merge_fpu_stack_with_successors(block);
190
191 assert(!required_merge || branch->cond() == lir_cond_always, "splitting of critical edges should prevent FPU stack mismatches at cond branches");
192 }
193
194 } else if (op1 != NULL) {
195 handle_op1(op1);
196 } else if (op2 != NULL) {
197 handle_op2(op2);
198 } else if (opCall != NULL) {
199 handle_opCall(opCall);
200 }
201
202 compute_debug_information(op);
203
204 set_pos(1 + pos());
205 }
206
207 // Propagate stack when block does not end with branch
208 if (!processed_merge) {
209 merge_fpu_stack_with_successors(block);
210 }
211 }
212
213
214 void FpuStackAllocator::compute_debug_information(LIR_Op* op) {
215 if (!_debug_information_computed && op->id() != -1 && allocator()->has_info(op->id())) {
216 visitor.visit(op);
217
218 // exception handling
219 if (allocator()->compilation()->has_exception_handlers()) {
220 XHandlers* xhandlers = visitor.all_xhandler();
221 int n = xhandlers->length();
222 for (int k = 0; k < n; k++) {
223 allocate_exception_handler(xhandlers->handler_at(k));
224 }
225 } else {
226 assert(visitor.all_xhandler()->length() == 0, "missed exception handler");
227 }
228
229 // compute debug information
230 int n = visitor.info_count();
231 assert(n > 0, "should not visit operation otherwise");
232
233 for (int j = 0; j < n; j++) {
234 CodeEmitInfo* info = visitor.info_at(j);
235 // Compute debug information
236 allocator()->compute_debug_info(info, op->id());
237 }
238 }
239 _debug_information_computed = true;
240 }
241
242 void FpuStackAllocator::allocate_exception_handler(XHandler* xhandler) {
243 if (!sim()->is_empty()) {
244 LIR_List* old_lir = lir();
245 int old_pos = pos();
246 intArray* old_state = sim()->write_state();
247
248 #ifndef PRODUCT
249 if (TraceFPUStack) {
250 tty->cr();
251 tty->print_cr("------- begin of exception handler -------");
252 }
253 #endif
254
255 if (xhandler->entry_code() == NULL) {
256 // need entry code to clear FPU stack
257 LIR_List* entry_code = new LIR_List(_compilation);
258 entry_code->jump(xhandler->entry_block());
259 xhandler->set_entry_code(entry_code);
260 }
261
262 LIR_OpList* insts = xhandler->entry_code()->instructions_list();
263 set_lir(xhandler->entry_code());
264 set_pos(0);
265
266 // Note: insts->length() may change during loop
267 while (pos() < insts->length()) {
268 LIR_Op* op = insts->at(pos());
269
270 #ifndef PRODUCT
271 if (TraceFPUStack) {
272 op->print();
273 }
274 check_invalid_lir_op(op);
275 #endif
276
277 switch (op->code()) {
278 case lir_move:
279 assert(op->as_Op1() != NULL, "must be LIR_Op1");
280 assert(pos() != insts->length() - 1, "must not be last operation");
281
282 handle_op1((LIR_Op1*)op);
283 break;
284
285 case lir_branch:
286 assert(op->as_OpBranch()->cond() == lir_cond_always, "must be unconditional branch");
287 assert(pos() == insts->length() - 1, "must be last operation");
288
289 // remove all remaining dead registers from FPU stack
290 clear_fpu_stack(LIR_OprFact::illegalOpr);
291 break;
292
293 default:
294 // other operations not allowed in exception entry code
295 ShouldNotReachHere();
296 }
297
298 set_pos(pos() + 1);
299 }
300
301 #ifndef PRODUCT
302 if (TraceFPUStack) {
303 tty->cr();
304 tty->print_cr("------- end of exception handler -------");
305 }
306 #endif
307
308 set_lir(old_lir);
309 set_pos(old_pos);
310 sim()->read_state(old_state);
311 }
312 }
313
314
315 int FpuStackAllocator::fpu_num(LIR_Opr opr) {
316 assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
317 return opr->is_single_fpu() ? opr->fpu_regnr() : opr->fpu_regnrLo();
318 }
319
320 int FpuStackAllocator::tos_offset(LIR_Opr opr) {
321 return sim()->offset_from_tos(fpu_num(opr));
322 }
323
324
325 LIR_Opr FpuStackAllocator::to_fpu_stack(LIR_Opr opr) {
326 assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
327
328 int stack_offset = tos_offset(opr);
329 if (opr->is_single_fpu()) {
330 return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset();
331 } else {
332 assert(opr->is_double_fpu(), "shouldn't call this otherwise");
333 return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset();
334 }
335 }
336
337 LIR_Opr FpuStackAllocator::to_fpu_stack_top(LIR_Opr opr, bool dont_check_offset) {
338 assert(opr->is_fpu_register() && !opr->is_xmm_register(), "shouldn't call this otherwise");
339 assert(dont_check_offset || tos_offset(opr) == 0, "operand is not on stack top");
340
341 int stack_offset = 0;
342 if (opr->is_single_fpu()) {
343 return LIR_OprFact::single_fpu(stack_offset)->make_fpu_stack_offset();
344 } else {
345 assert(opr->is_double_fpu(), "shouldn't call this otherwise");
346 return LIR_OprFact::double_fpu(stack_offset)->make_fpu_stack_offset();
347 }
348 }
349
350
351
352 void FpuStackAllocator::insert_op(LIR_Op* op) {
353 lir()->insert_before(pos(), op);
354 set_pos(1 + pos());
355 }
356
357
358 void FpuStackAllocator::insert_exchange(int offset) {
359 if (offset > 0) {
360 LIR_Op1* fxch_op = new LIR_Op1(lir_fxch, LIR_OprFact::intConst(offset), LIR_OprFact::illegalOpr);
361 insert_op(fxch_op);
362 sim()->swap(offset);
363
364 #ifndef PRODUCT
365 if (TraceFPUStack) {
366 tty->print("Exchanged register: %d New state: ", sim()->get_slot(0)); sim()->print(); tty->cr();
367 }
368 #endif
369
370 }
371 }
372
373 void FpuStackAllocator::insert_exchange(LIR_Opr opr) {
374 insert_exchange(tos_offset(opr));
375 }
376
377
378 void FpuStackAllocator::insert_free(int offset) {
379 // move stack slot to the top of stack and then pop it
380 insert_exchange(offset);
381
382 LIR_Op* fpop = new LIR_Op0(lir_fpop_raw);
383 insert_op(fpop);
384 sim()->pop();
385
386 #ifndef PRODUCT
387 if (TraceFPUStack) {
388 tty->print("Inserted pop New state: "); sim()->print(); tty->cr();
389 }
390 #endif
391 }
392
393
394 void FpuStackAllocator::insert_free_if_dead(LIR_Opr opr) {
395 if (sim()->contains(fpu_num(opr))) {
396 int res_slot = tos_offset(opr);
397 insert_free(res_slot);
398 }
399 }
400
401 void FpuStackAllocator::insert_free_if_dead(LIR_Opr opr, LIR_Opr ignore) {
402 if (fpu_num(opr) != fpu_num(ignore) && sim()->contains(fpu_num(opr))) {
403 int res_slot = tos_offset(opr);
404 insert_free(res_slot);
405 }
406 }
407
408 void FpuStackAllocator::insert_copy(LIR_Opr from, LIR_Opr to) {
409 int offset = tos_offset(from);
410 LIR_Op1* fld = new LIR_Op1(lir_fld, LIR_OprFact::intConst(offset), LIR_OprFact::illegalOpr);
411 insert_op(fld);
412
413 sim()->push(fpu_num(to));
414
415 #ifndef PRODUCT
416 if (TraceFPUStack) {
417 tty->print("Inserted copy (%d -> %d) New state: ", fpu_num(from), fpu_num(to)); sim()->print(); tty->cr();
418 }
419 #endif
420 }
421
422 void FpuStackAllocator::do_rename(LIR_Opr from, LIR_Opr to) {
423 sim()->rename(fpu_num(from), fpu_num(to));
424 }
425
426 void FpuStackAllocator::do_push(LIR_Opr opr) {
427 sim()->push(fpu_num(opr));
428 }
429
430 void FpuStackAllocator::pop_if_last_use(LIR_Op* op, LIR_Opr opr) {
431 assert(op->fpu_pop_count() == 0, "fpu_pop_count alredy set");
432 assert(tos_offset(opr) == 0, "can only pop stack top");
433
434 if (opr->is_last_use()) {
435 op->set_fpu_pop_count(1);
436 sim()->pop();
437 }
438 }
439
440 void FpuStackAllocator::pop_always(LIR_Op* op, LIR_Opr opr) {
441 assert(op->fpu_pop_count() == 0, "fpu_pop_count alredy set");
442 assert(tos_offset(opr) == 0, "can only pop stack top");
443
444 op->set_fpu_pop_count(1);
445 sim()->pop();
446 }
447
448 void FpuStackAllocator::clear_fpu_stack(LIR_Opr preserve) {
449 int result_stack_size = (preserve->is_fpu_register() && !preserve->is_xmm_register() ? 1 : 0);
450 while (sim()->stack_size() > result_stack_size) {
451 assert(!sim()->slot_is_empty(0), "not allowed");
452
453 if (result_stack_size == 0 || sim()->get_slot(0) != fpu_num(preserve)) {
454 insert_free(0);
455 } else {
456 // move "preserve" to bottom of stack so that all other stack slots can be popped
457 insert_exchange(sim()->stack_size() - 1);
458 }
459 }
460 }
461
462
463 void FpuStackAllocator::handle_op1(LIR_Op1* op1) {
464 LIR_Opr in = op1->in_opr();
465 LIR_Opr res = op1->result_opr();
466
467 LIR_Opr new_in = in; // new operands relative to the actual fpu stack top
468 LIR_Opr new_res = res;
469
470 // Note: this switch is processed for all LIR_Op1, regardless if they have FPU-arguments,
471 // so checks for is_float_kind() are necessary inside the cases
472 switch (op1->code()) {
473
474 case lir_return: {
475 // FPU-Stack must only contain the (optional) fpu return value.
476 // All remaining dead values are popped from the stack
477 // If the input operand is a fpu-register, it is exchanged to the bottom of the stack
478
479 clear_fpu_stack(in);
480 if (in->is_fpu_register() && !in->is_xmm_register()) {
481 new_in = to_fpu_stack_top(in);
482 }
483
484 break;
485 }
486
487 case lir_move: {
488 if (in->is_fpu_register() && !in->is_xmm_register()) {
489 if (res->is_xmm_register()) {
490 // move from fpu register to xmm register (necessary for operations that
491 // are not available in the SSE instruction set)
492 insert_exchange(in);
493 new_in = to_fpu_stack_top(in);
494 pop_always(op1, in);
495
496 } else if (res->is_fpu_register() && !res->is_xmm_register()) {
497 // move from fpu-register to fpu-register:
498 // * input and result register equal:
499 // nothing to do
500 // * input register is last use:
501 // rename the input register to result register -> input register
502 // not present on fpu-stack afterwards
503 // * input register not last use:
504 // duplicate input register to result register to preserve input
505 //
506 // Note: The LIR-Assembler does not produce any code for fpu register moves,
507 // so input and result stack index must be equal
508
509 if (fpu_num(in) == fpu_num(res)) {
510 // nothing to do
511 } else if (in->is_last_use()) {
512 insert_free_if_dead(res);//, in);
513 do_rename(in, res);
514 } else {
515 insert_free_if_dead(res);
516 insert_copy(in, res);
517 }
518 new_in = to_fpu_stack(res);
519 new_res = new_in;
520
521 } else {
522 // move from fpu-register to memory
523 // input operand must be on top of stack
524
525 insert_exchange(in);
526
527 // create debug information here because afterwards the register may have been popped
528 compute_debug_information(op1);
529
530 new_in = to_fpu_stack_top(in);
531 pop_if_last_use(op1, in);
532 }
533
534 } else if (res->is_fpu_register() && !res->is_xmm_register()) {
535 // move from memory/constant to fpu register
536 // result is pushed on the stack
537
538 insert_free_if_dead(res);
539
540 // create debug information before register is pushed
541 compute_debug_information(op1);
542
543 do_push(res);
544 new_res = to_fpu_stack_top(res);
545 }
546 break;
547 }
548
549 case lir_neg: {
550 if (in->is_fpu_register() && !in->is_xmm_register()) {
551 assert(res->is_fpu_register() && !res->is_xmm_register(), "must be");
552 assert(in->is_last_use(), "old value gets destroyed");
553
554 insert_free_if_dead(res, in);
555 insert_exchange(in);
556 new_in = to_fpu_stack_top(in);
557
558 do_rename(in, res);
559 new_res = to_fpu_stack_top(res);
560 }
561 break;
562 }
563
564 case lir_convert: {
565 Bytecodes::Code bc = op1->as_OpConvert()->bytecode();
566 switch (bc) {
567 case Bytecodes::_d2f:
568 case Bytecodes::_f2d:
569 assert(res->is_fpu_register(), "must be");
570 assert(in->is_fpu_register(), "must be");
571
572 if (!in->is_xmm_register() && !res->is_xmm_register()) {
573 // this is quite the same as a move from fpu-register to fpu-register
574 // Note: input and result operands must have different types
575 if (fpu_num(in) == fpu_num(res)) {
576 // nothing to do
577 new_in = to_fpu_stack(in);
578 } else if (in->is_last_use()) {
579 insert_free_if_dead(res);//, in);
580 new_in = to_fpu_stack(in);
581 do_rename(in, res);
582 } else {
583 insert_free_if_dead(res);
584 insert_copy(in, res);
585 new_in = to_fpu_stack_top(in, true);
586 }
587 new_res = to_fpu_stack(res);
588 }
589
590 break;
591
592 case Bytecodes::_i2f:
593 case Bytecodes::_l2f:
594 case Bytecodes::_i2d:
595 case Bytecodes::_l2d:
596 assert(res->is_fpu_register(), "must be");
597 if (!res->is_xmm_register()) {
598 insert_free_if_dead(res);
599 do_push(res);
600 new_res = to_fpu_stack_top(res);
601 }
602 break;
603
604 case Bytecodes::_f2i:
605 case Bytecodes::_d2i:
606 assert(in->is_fpu_register(), "must be");
607 if (!in->is_xmm_register()) {
608 insert_exchange(in);
609 new_in = to_fpu_stack_top(in);
610
611 // TODO: update registes of stub
612 }
613 break;
614
615 case Bytecodes::_f2l:
616 case Bytecodes::_d2l:
617 assert(in->is_fpu_register(), "must be");
618 if (!in->is_xmm_register()) {
619 insert_exchange(in);
620 new_in = to_fpu_stack_top(in);
621 pop_always(op1, in);
622 }
623 break;
624
625 case Bytecodes::_i2l:
626 case Bytecodes::_l2i:
627 case Bytecodes::_i2b:
628 case Bytecodes::_i2c:
629 case Bytecodes::_i2s:
630 // no fpu operands
631 break;
632
633 default:
634 ShouldNotReachHere();
635 }
636 break;
637 }
638
639 case lir_roundfp: {
640 assert(in->is_fpu_register() && !in->is_xmm_register(), "input must be in register");
641 assert(res->is_stack(), "result must be on stack");
642
643 insert_exchange(in);
644 new_in = to_fpu_stack_top(in);
645 pop_if_last_use(op1, in);
646 break;
647 }
648
649 default: {
650 assert(!in->is_float_kind() && !res->is_float_kind(), "missed a fpu-operation");
651 }
652 }
653
654 op1->set_in_opr(new_in);
655 op1->set_result_opr(new_res);
656 }
657
658 void FpuStackAllocator::handle_op2(LIR_Op2* op2) {
659 LIR_Opr left = op2->in_opr1();
660 if (!left->is_float_kind()) {
661 return;
662 }
663 if (left->is_xmm_register()) {
664 return;
665 }
666
667 LIR_Opr right = op2->in_opr2();
668 LIR_Opr res = op2->result_opr();
669 LIR_Opr new_left = left; // new operands relative to the actual fpu stack top
670 LIR_Opr new_right = right;
671 LIR_Opr new_res = res;
672
673 assert(!left->is_xmm_register() && !right->is_xmm_register() && !res->is_xmm_register(), "not for xmm registers");
674
675 switch (op2->code()) {
676 case lir_cmp:
677 case lir_cmp_fd2i:
678 case lir_ucmp_fd2i:
679 case lir_assert: {
680 assert(left->is_fpu_register(), "invalid LIR");
681 assert(right->is_fpu_register(), "invalid LIR");
682
683 // the left-hand side must be on top of stack.
684 // the right-hand side is never popped, even if is_last_use is set
685 insert_exchange(left);
686 new_left = to_fpu_stack_top(left);
687 new_right = to_fpu_stack(right);
688 pop_if_last_use(op2, left);
689 break;
690 }
691
692 case lir_mul_strictfp:
693 case lir_div_strictfp: {
694 assert(op2->tmp1_opr()->is_fpu_register(), "strict operations need temporary fpu stack slot");
695 insert_free_if_dead(op2->tmp1_opr());
696 assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
697 // fall-through: continue with the normal handling of lir_mul and lir_div
698 }
699 case lir_add:
700 case lir_sub:
701 case lir_mul:
702 case lir_div: {
703 assert(left->is_fpu_register(), "must be");
704 assert(res->is_fpu_register(), "must be");
705 assert(left->is_equal(res), "must be");
706
707 // either the left-hand or the right-hand side must be on top of stack
708 // (if right is not a register, left must be on top)
709 if (!right->is_fpu_register()) {
710 insert_exchange(left);
711 new_left = to_fpu_stack_top(left);
712 } else {
713 // no exchange necessary if right is alredy on top of stack
714 if (tos_offset(right) == 0) {
715 new_left = to_fpu_stack(left);
716 new_right = to_fpu_stack_top(right);
717 } else {
718 insert_exchange(left);
719 new_left = to_fpu_stack_top(left);
720 new_right = to_fpu_stack(right);
721 }
722
723 if (right->is_last_use()) {
724 op2->set_fpu_pop_count(1);
725
726 if (tos_offset(right) == 0) {
727 sim()->pop();
728 } else {
729 // if left is on top of stack, the result is placed in the stack
730 // slot of right, so a renaming from right to res is necessary
731 assert(tos_offset(left) == 0, "must be");
732 sim()->pop();
733 do_rename(right, res);
734 }
735 }
736 }
737 new_res = to_fpu_stack(res);
738
739 break;
740 }
741
742 case lir_rem: {
743 assert(left->is_fpu_register(), "must be");
744 assert(right->is_fpu_register(), "must be");
745 assert(res->is_fpu_register(), "must be");
746 assert(left->is_equal(res), "must be");
747
748 // Must bring both operands to top of stack with following operand ordering:
749 // * fpu stack before rem: ... right left
750 // * fpu stack after rem: ... left
751 if (tos_offset(right) != 1) {
752 insert_exchange(right);
753 insert_exchange(1);
754 }
755 insert_exchange(left);
756 assert(tos_offset(right) == 1, "check");
757 assert(tos_offset(left) == 0, "check");
758
759 new_left = to_fpu_stack_top(left);
760 new_right = to_fpu_stack(right);
761
762 op2->set_fpu_pop_count(1);
763 sim()->pop();
764 do_rename(right, res);
765
766 new_res = to_fpu_stack_top(res);
767 break;
768 }
769
770 case lir_abs:
771 case lir_sqrt: {
772 // Right argument appears to be unused
773 assert(right->is_illegal(), "must be");
774 assert(left->is_fpu_register(), "must be");
775 assert(res->is_fpu_register(), "must be");
776 assert(left->is_last_use(), "old value gets destroyed");
777
778 insert_free_if_dead(res, left);
779 insert_exchange(left);
780 do_rename(left, res);
781
782 new_left = to_fpu_stack_top(res);
783 new_res = new_left;
784
785 op2->set_fpu_stack_size(sim()->stack_size());
786 break;
787 }
788
789 case lir_log:
790 case lir_log10: {
791 // log and log10 need one temporary fpu stack slot, so
792 // there is one temporary registers stored in temp of the
793 // operation. the stack allocator must guarantee that the stack
794 // slots are really free, otherwise there might be a stack
795 // overflow.
796 assert(right->is_illegal(), "must be");
797 assert(left->is_fpu_register(), "must be");
798 assert(res->is_fpu_register(), "must be");
799 assert(op2->tmp1_opr()->is_fpu_register(), "must be");
800
801 insert_free_if_dead(op2->tmp1_opr());
802 insert_free_if_dead(res, left);
803 insert_exchange(left);
804 do_rename(left, res);
805
806 new_left = to_fpu_stack_top(res);
807 new_res = new_left;
808
809 op2->set_fpu_stack_size(sim()->stack_size());
810 assert(sim()->stack_size() <= 7, "at least one stack slot must be free");
811 break;
812 }
813
814
815 case lir_tan:
816 case lir_sin:
817 case lir_cos:
818 case lir_exp: {
819 // sin, cos and exp need two temporary fpu stack slots, so there are two temporary
820 // registers (stored in right and temp of the operation).
821 // the stack allocator must guarantee that the stack slots are really free,
822 // otherwise there might be a stack overflow.
823 assert(left->is_fpu_register(), "must be");
824 assert(res->is_fpu_register(), "must be");
825 // assert(left->is_last_use(), "old value gets destroyed");
826 assert(right->is_fpu_register(), "right is used as the first temporary register");
827 assert(op2->tmp1_opr()->is_fpu_register(), "temp is used as the second temporary register");
828 assert(fpu_num(left) != fpu_num(right) && fpu_num(right) != fpu_num(op2->tmp1_opr()) && fpu_num(op2->tmp1_opr()) != fpu_num(res), "need distinct temp registers");
829
830 insert_free_if_dead(right);
831 insert_free_if_dead(op2->tmp1_opr());
832
833 insert_free_if_dead(res, left);
834 insert_exchange(left);
835 do_rename(left, res);
836
837 new_left = to_fpu_stack_top(res);
838 new_res = new_left;
839
840 op2->set_fpu_stack_size(sim()->stack_size());
841 assert(sim()->stack_size() <= 6, "at least two stack slots must be free");
842 break;
843 }
844
845 case lir_pow: {
846 // pow needs two temporary fpu stack slots, so there are two temporary
847 // registers (stored in tmp1 and tmp2 of the operation).
848 // the stack allocator must guarantee that the stack slots are really free,
849 // otherwise there might be a stack overflow.
850 assert(left->is_fpu_register(), "must be");
851 assert(right->is_fpu_register(), "must be");
852 assert(res->is_fpu_register(), "must be");
853
854 assert(op2->tmp1_opr()->is_fpu_register(), "tmp1 is the first temporary register");
855 assert(op2->tmp2_opr()->is_fpu_register(), "tmp2 is the second temporary register");
856 assert(fpu_num(left) != fpu_num(right) && fpu_num(left) != fpu_num(op2->tmp1_opr()) && fpu_num(left) != fpu_num(op2->tmp2_opr()) && fpu_num(left) != fpu_num(res), "need distinct temp registers");
857 assert(fpu_num(right) != fpu_num(op2->tmp1_opr()) && fpu_num(right) != fpu_num(op2->tmp2_opr()) && fpu_num(right) != fpu_num(res), "need distinct temp registers");
858 assert(fpu_num(op2->tmp1_opr()) != fpu_num(op2->tmp2_opr()) && fpu_num(op2->tmp1_opr()) != fpu_num(res), "need distinct temp registers");
859 assert(fpu_num(op2->tmp2_opr()) != fpu_num(res), "need distinct temp registers");
860
861 insert_free_if_dead(op2->tmp1_opr());
862 insert_free_if_dead(op2->tmp2_opr());
863
864 // Must bring both operands to top of stack with following operand ordering:
865 // * fpu stack before pow: ... right left
866 // * fpu stack after pow: ... left
867
868 insert_free_if_dead(res, right);
869
870 if (tos_offset(right) != 1) {
871 insert_exchange(right);
872 insert_exchange(1);
873 }
874 insert_exchange(left);
875 assert(tos_offset(right) == 1, "check");
876 assert(tos_offset(left) == 0, "check");
877
878 new_left = to_fpu_stack_top(left);
879 new_right = to_fpu_stack(right);
880
881 op2->set_fpu_stack_size(sim()->stack_size());
882 assert(sim()->stack_size() <= 6, "at least two stack slots must be free");
883
884 sim()->pop();
885
886 do_rename(right, res);
887
888 new_res = to_fpu_stack_top(res);
889 break;
890 }
891
892 default: {
893 assert(false, "missed a fpu-operation");
894 }
895 }
896
897 op2->set_in_opr1(new_left);
898 op2->set_in_opr2(new_right);
899 op2->set_result_opr(new_res);
900 }
901
902 void FpuStackAllocator::handle_opCall(LIR_OpCall* opCall) {
903 LIR_Opr res = opCall->result_opr();
904
905 // clear fpu-stack before call
906 // it may contain dead values that could not have been remved by previous operations
907 clear_fpu_stack(LIR_OprFact::illegalOpr);
908 assert(sim()->is_empty(), "fpu stack must be empty now");
909
910 // compute debug information before (possible) fpu result is pushed
911 compute_debug_information(opCall);
912
913 if (res->is_fpu_register() && !res->is_xmm_register()) {
914 do_push(res);
915 opCall->set_result_opr(to_fpu_stack_top(res));
916 }
917 }
918
919 #ifndef PRODUCT
920 void FpuStackAllocator::check_invalid_lir_op(LIR_Op* op) {
921 switch (op->code()) {
922 case lir_24bit_FPU:
923 case lir_reset_FPU:
924 case lir_ffree:
925 assert(false, "operations not allowed in lir. If one of these operations is needed, check if they have fpu operands");
926 break;
927
928 case lir_fpop_raw:
929 case lir_fxch:
930 case lir_fld:
931 assert(false, "operations only inserted by FpuStackAllocator");
932 break;
933 }
934 }
935 #endif
936
937
938 void FpuStackAllocator::merge_insert_add(LIR_List* instrs, FpuStackSim* cur_sim, int reg) {
939 LIR_Op1* move = new LIR_Op1(lir_move, LIR_OprFact::doubleConst(0), LIR_OprFact::double_fpu(reg)->make_fpu_stack_offset());
940
941 instrs->instructions_list()->push(move);
942
943 cur_sim->push(reg);
944 move->set_result_opr(to_fpu_stack(move->result_opr()));
945
946 #ifndef PRODUCT
947 if (TraceFPUStack) {
948 tty->print("Added new register: %d New state: ", reg); cur_sim->print(); tty->cr();
949 }
950 #endif
951 }
952
953 void FpuStackAllocator::merge_insert_xchg(LIR_List* instrs, FpuStackSim* cur_sim, int slot) {
954 assert(slot > 0, "no exchange necessary");
955
956 LIR_Op1* fxch = new LIR_Op1(lir_fxch, LIR_OprFact::intConst(slot));
957 instrs->instructions_list()->push(fxch);
958 cur_sim->swap(slot);
959
960 #ifndef PRODUCT
961 if (TraceFPUStack) {
962 tty->print("Exchanged register: %d New state: ", cur_sim->get_slot(slot)); cur_sim->print(); tty->cr();
963 }
964 #endif
965 }
966
967 void FpuStackAllocator::merge_insert_pop(LIR_List* instrs, FpuStackSim* cur_sim) {
968 int reg = cur_sim->get_slot(0);
969
970 LIR_Op* fpop = new LIR_Op0(lir_fpop_raw);
971 instrs->instructions_list()->push(fpop);
972 cur_sim->pop(reg);
973
974 #ifndef PRODUCT
975 if (TraceFPUStack) {
976 tty->print("Removed register: %d New state: ", reg); cur_sim->print(); tty->cr();
977 }
978 #endif
979 }
980
981 bool FpuStackAllocator::merge_rename(FpuStackSim* cur_sim, FpuStackSim* sux_sim, int start_slot, int change_slot) {
982 int reg = cur_sim->get_slot(change_slot);
983
984 for (int slot = start_slot; slot >= 0; slot--) {
985 int new_reg = sux_sim->get_slot(slot);
986
987 if (!cur_sim->contains(new_reg)) {
988 cur_sim->set_slot(change_slot, new_reg);
989
990 #ifndef PRODUCT
991 if (TraceFPUStack) {
992 tty->print("Renamed register %d to %d New state: ", reg, new_reg); cur_sim->print(); tty->cr();
993 }
994 #endif
995
996 return true;
997 }
998 }
999 return false;
1000 }
1001
1002
1003 void FpuStackAllocator::merge_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, FpuStackSim* sux_sim) {
1004 #ifndef PRODUCT
1005 if (TraceFPUStack) {
1006 tty->cr();
1007 tty->print("before merging: pred: "); cur_sim->print(); tty->cr();
1008 tty->print(" sux: "); sux_sim->print(); tty->cr();
1009 }
1010
1011 int slot;
1012 for (slot = 0; slot < cur_sim->stack_size(); slot++) {
1013 assert(!cur_sim->slot_is_empty(slot), "not handled by algorithm");
1014 }
1015 for (slot = 0; slot < sux_sim->stack_size(); slot++) {
1016 assert(!sux_sim->slot_is_empty(slot), "not handled by algorithm");
1017 }
1018 #endif
1019
1020 // size difference between cur and sux that must be resolved by adding or removing values form the stack
1021 int size_diff = cur_sim->stack_size() - sux_sim->stack_size();
1022
1023 if (!ComputeExactFPURegisterUsage) {
1024 // add slots that are currently free, but used in successor
1025 // When the exact FPU register usage is computed, the stack does
1026 // not contain dead values at merging -> no values must be added
1027
1028 int sux_slot = sux_sim->stack_size() - 1;
1029 while (size_diff < 0) {
1030 assert(sux_slot >= 0, "slot out of bounds -> error in algorithm");
1031
1032 int reg = sux_sim->get_slot(sux_slot);
1033 if (!cur_sim->contains(reg)) {
1034 merge_insert_add(instrs, cur_sim, reg);
1035 size_diff++;
1036
1037 if (sux_slot + size_diff != 0) {
1038 merge_insert_xchg(instrs, cur_sim, sux_slot + size_diff);
1039 }
1040 }
1041 sux_slot--;
1042 }
1043 }
1044
1045 assert(cur_sim->stack_size() >= sux_sim->stack_size(), "stack size must be equal or greater now");
1046 assert(size_diff == cur_sim->stack_size() - sux_sim->stack_size(), "must be");
1047
1048 // stack merge algorithm:
1049 // 1) as long as the current stack top is not in the right location (that meens
1050 // it should not be on the stack top), exchange it into the right location
1051 // 2) if the stack top is right, but the remaining stack is not ordered correctly,
1052 // the stack top is exchanged away to get another value on top ->
1053 // now step 1) can be continued
1054 // the stack can also contain unused items -> these items are removed from stack
1055
1056 int finished_slot = sux_sim->stack_size() - 1;
1057 while (finished_slot >= 0 || size_diff > 0) {
1058 while (size_diff > 0 || (cur_sim->stack_size() > 0 && cur_sim->get_slot(0) != sux_sim->get_slot(0))) {
1059 int reg = cur_sim->get_slot(0);
1060 if (sux_sim->contains(reg)) {
1061 int sux_slot = sux_sim->offset_from_tos(reg);
1062 merge_insert_xchg(instrs, cur_sim, sux_slot + size_diff);
1063
1064 } else if (!merge_rename(cur_sim, sux_sim, finished_slot, 0)) {
1065 assert(size_diff > 0, "must be");
1066
1067 merge_insert_pop(instrs, cur_sim);
1068 size_diff--;
1069 }
1070 assert(cur_sim->stack_size() == 0 || cur_sim->get_slot(0) != reg, "register must have been changed");
1071 }
1072
1073 while (finished_slot >= 0 && cur_sim->get_slot(finished_slot) == sux_sim->get_slot(finished_slot)) {
1074 finished_slot--;
1075 }
1076
1077 if (finished_slot >= 0) {
1078 int reg = cur_sim->get_slot(finished_slot);
1079
1080 if (sux_sim->contains(reg) || !merge_rename(cur_sim, sux_sim, finished_slot, finished_slot)) {
1081 assert(sux_sim->contains(reg) || size_diff > 0, "must be");
1082 merge_insert_xchg(instrs, cur_sim, finished_slot);
1083 }
1084 assert(cur_sim->get_slot(finished_slot) != reg, "register must have been changed");
1085 }
1086 }
1087
1088 #ifndef PRODUCT
1089 if (TraceFPUStack) {
1090 tty->print("after merging: pred: "); cur_sim->print(); tty->cr();
1091 tty->print(" sux: "); sux_sim->print(); tty->cr();
1092 tty->cr();
1093 }
1094 #endif
1095 assert(cur_sim->stack_size() == sux_sim->stack_size(), "stack size must be equal now");
1096 }
1097
1098
1099 void FpuStackAllocator::merge_cleanup_fpu_stack(LIR_List* instrs, FpuStackSim* cur_sim, BitMap& live_fpu_regs) {
1100 #ifndef PRODUCT
1101 if (TraceFPUStack) {
1102 tty->cr();
1103 tty->print("before cleanup: state: "); cur_sim->print(); tty->cr();
1104 tty->print(" live: "); live_fpu_regs.print_on(tty); tty->cr();
1105 }
1106 #endif
1107
1108 int slot = 0;
1109 while (slot < cur_sim->stack_size()) {
1110 int reg = cur_sim->get_slot(slot);
1111 if (!live_fpu_regs.at(reg)) {
1112 if (slot != 0) {
1113 merge_insert_xchg(instrs, cur_sim, slot);
1114 }
1115 merge_insert_pop(instrs, cur_sim);
1116 } else {
1117 slot++;
1118 }
1119 }
1120
1121 #ifndef PRODUCT
1122 if (TraceFPUStack) {
1123 tty->print("after cleanup: state: "); cur_sim->print(); tty->cr();
1124 tty->print(" live: "); live_fpu_regs.print_on(tty); tty->cr();
1125 tty->cr();
1126 }
1127
1128 // check if fpu stack only contains live registers
1129 for (unsigned int i = 0; i < live_fpu_regs.size(); i++) {
1130 if (live_fpu_regs.at(i) != cur_sim->contains(i)) {
1131 tty->print_cr("mismatch between required and actual stack content");
1132 break;
1133 }
1134 }
1135 #endif
1136 }
1137
1138
1139 bool FpuStackAllocator::merge_fpu_stack_with_successors(BlockBegin* block) {
1140 #ifndef PRODUCT
1141 if (TraceFPUStack) {
1142 tty->print_cr("Propagating FPU stack state for B%d at LIR_Op position %d to successors:",
1143 block->block_id(), pos());
1144 sim()->print();
1145 tty->cr();
1146 }
1147 #endif
1148
1149 bool changed = false;
1150 int number_of_sux = block->number_of_sux();
1151
1152 if (number_of_sux == 1 && block->sux_at(0)->number_of_preds() > 1) {
1153 // The successor has at least two incoming edges, so a stack merge will be necessary
1154 // If this block is the first predecessor, cleanup the current stack and propagate it
1155 // If this block is not the first predecessor, a stack merge will be necessary
1156
1157 BlockBegin* sux = block->sux_at(0);
1158 intArray* state = sux->fpu_stack_state();
1159 LIR_List* instrs = new LIR_List(_compilation);
1160
1161 if (state != NULL) {
1162 // Merge with a successors that already has a FPU stack state
1163 // the block must only have one successor because critical edges must been split
1164 FpuStackSim* cur_sim = sim();
1165 FpuStackSim* sux_sim = temp_sim();
1166 sux_sim->read_state(state);
1167
1168 merge_fpu_stack(instrs, cur_sim, sux_sim);
1169
1170 } else {
1171 // propagate current FPU stack state to successor without state
1172 // clean up stack first so that there are no dead values on the stack
1173 if (ComputeExactFPURegisterUsage) {
1174 FpuStackSim* cur_sim = sim();
1175 BitMap live_fpu_regs = block->sux_at(0)->fpu_register_usage();
1176 assert(live_fpu_regs.size() == FrameMap::nof_fpu_regs, "missing register usage");
1177
1178 merge_cleanup_fpu_stack(instrs, cur_sim, live_fpu_regs);
1179 }
1180
1181 intArray* state = sim()->write_state();
1182 if (TraceFPUStack) {
1183 tty->print_cr("Setting FPU stack state of B%d (merge path)", sux->block_id());
1184 sim()->print(); tty->cr();
1185 }
1186 sux->set_fpu_stack_state(state);
1187 }
1188
1189 if (instrs->instructions_list()->length() > 0) {
1190 lir()->insert_before(pos(), instrs);
1191 set_pos(instrs->instructions_list()->length() + pos());
1192 changed = true;
1193 }
1194
1195 } else {
1196 // Propagate unmodified Stack to successors where a stack merge is not necessary
1197 intArray* state = sim()->write_state();
1198 for (int i = 0; i < number_of_sux; i++) {
1199 BlockBegin* sux = block->sux_at(i);
1200
1201 #ifdef ASSERT
1202 for (int j = 0; j < sux->number_of_preds(); j++) {
1203 assert(block == sux->pred_at(j), "all critical edges must be broken");
1204 }
1205
1206 // check if new state is same
1207 if (sux->fpu_stack_state() != NULL) {
1208 intArray* sux_state = sux->fpu_stack_state();
1209 assert(state->length() == sux_state->length(), "overwriting existing stack state");
1210 for (int j = 0; j < state->length(); j++) {
1211 assert(state->at(j) == sux_state->at(j), "overwriting existing stack state");
1212 }
1213 }
1214 #endif
1215 #ifndef PRODUCT
1216 if (TraceFPUStack) {
1217 tty->print_cr("Setting FPU stack state of B%d", sux->block_id());
1218 sim()->print(); tty->cr();
1219 }
1220 #endif
1221
1222 sux->set_fpu_stack_state(state);
1223 }
1224 }
1225
1226 #ifndef PRODUCT
1227 // assertions that FPU stack state conforms to all successors' states
1228 intArray* cur_state = sim()->write_state();
1229 for (int i = 0; i < number_of_sux; i++) {
1230 BlockBegin* sux = block->sux_at(i);
1231 intArray* sux_state = sux->fpu_stack_state();
1232
1233 assert(sux_state != NULL, "no fpu state");
1234 assert(cur_state->length() == sux_state->length(), "incorrect length");
1235 for (int i = 0; i < cur_state->length(); i++) {
1236 assert(cur_state->at(i) == sux_state->at(i), "element not equal");
1237 }
1238 }
1239 #endif
1240
1241 return changed;
1242 }

mercurial