Tue, 26 Jul 2016 17:06:17 +0800
Add multiply word to GPR instruction (mul) in MIPS assembler.
1 /*
2 * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArray.hpp"
35 #include "ci/ciObjArrayKlass.hpp"
36 #include "ci/ciTypeArrayKlass.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "vmreg_mips.inline.hpp"
41 #ifdef ASSERT
42 #define __ gen()->lir(__FILE__, __LINE__)->
43 #else
44 #define __ gen()->lir()->
45 #endif
47 // Item will be loaded into a byte register; Intel only
48 void LIRItem::load_byte_item() {
49 load_item();
50 LIR_Opr res = result();
52 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
53 // make sure that it is a byte register
54 assert(!value()->type()->is_float() && !value()->type()->is_double(),
55 "can't load floats in byte register");
56 LIR_Opr reg = _gen->rlock_byte(T_BYTE);
57 __ move(res, reg);
59 _result = reg;
60 }
61 }
64 void LIRItem::load_nonconstant() {
65 LIR_Opr r = value()->operand();
66 if (r->is_constant()) {
67 _result = r;
68 } else {
69 load_item();
70 }
71 }
73 //--------------------------------------------------------------
74 // LIRGenerator
75 //--------------------------------------------------------------
76 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::_v0_oop_opr; }
77 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::_v1_opr; }
78 LIR_Opr LIRGenerator::divInOpr() { return FrameMap::_a0_opr; }//FIXME
79 LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::_f0_opr; } //FIXME
80 LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::_f0_opr; } //FIXME
81 LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::_t3_opr; } //
82 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::_t2_opr; }
83 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } //
86 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
87 LIR_Opr opr;
88 switch (type->tag()) {
89 case intTag:
90 {
91 opr = FrameMap::_v0_opr;
92 break;
93 }
94 case objectTag:
95 {
96 opr = FrameMap::_v0_oop_opr;
97 break;
98 }
99 case longTag:
100 {
101 opr = FrameMap::_v0_v1_long_opr;
102 break;
103 }
104 case floatTag:
105 {
106 opr = FrameMap::_f0_float_opr;
107 break;
108 }
109 case doubleTag: {
110 opr = FrameMap::_d0_double_opr;
111 break;
112 }
113 case addressTag:
114 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
115 }
117 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
118 return opr;
119 }
121 LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
122 LIR_Opr reg = new_register(type);
123 set_vreg_flag(reg, callee_saved);
124 return reg;
125 }
128 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
129 return new_register(T_INT);
130 }
132 /*
133 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
134 LIR_Opr reg = new_register(T_INT);
135 set_vreg_flag(reg, LIRGenerator::byte_reg);
136 return reg;
137 }
138 */
140 //--------- loading items into registers --------------------------------
143 // i486 instructions can inline constants
144 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
145 if (type == T_SHORT || type == T_CHAR) {
146 // there is no immediate move of word values in asembler_i486.?pp
147 return false;
148 }
149 Constant* c = v->as_Constant();
150 if (c && c->state_before() == NULL) {
151 // constants of any type can be stored directly, except for
152 // unloaded object constants.
153 return true;
154 }
155 return false;
156 }
159 bool LIRGenerator::can_inline_as_constant(Value v) const {
160 if (v->type()->is_constant() && v->type()->as_IntConstant() != NULL) {
161 return Assembler::is_simm16(v->type()->as_IntConstant()->value());
162 } else {
163 return false;
164 }
165 }
168 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
169 if (c->type() == T_INT && c->as_constant() != NULL) {
170 return Assembler::is_simm16(c->as_jint());
171 } else {
172 return false;
173 }
174 }
177 LIR_Opr LIRGenerator::safepoint_poll_register() {
178 return new_register(T_INT);
179 }
181 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
182 int shift, int disp, BasicType type) {
183 /* assert(base->is_register(), "must be");
184 if (index->is_constant()) {
185 return new LIR_Address(base,
186 (index->as_constant_ptr()->as_jint() << shift) + disp,
187 type);
188 } else {
189 return new LIR_Address(base, index, (LIR_Address::Scale)shift, disp, type);
190 }
191 */
192 assert(base->is_register(), "must be");
194 if (index->is_constant()) {
195 disp += index->as_constant_ptr()->as_jint() << shift;
196 if (Assembler::is_simm16(disp)) {
197 return new LIR_Address(base,disp, type);
198 } else {
200 if(disp!=0){
201 #ifdef _LP64
202 LIR_Opr tmp = new_register(T_LONG);
203 #else
204 LIR_Opr tmp = new_register(T_INT);
205 #endif
207 __ move(LIR_OprFact::intConst((int)disp), tmp);
208 __ add(tmp, base, tmp);
209 return new LIR_Address(tmp, 0, type);
211 }
212 else
213 return new LIR_Address(base, 0, type);
215 }
217 }
218 else if( index->is_register()){
220 #ifdef _LP64
221 LIR_Opr tmpa = new_register(T_LONG);
222 #else
223 LIR_Opr tmpa = new_register(T_INT);
224 #endif
225 __ move(index, tmpa);
226 __ shift_left(tmpa, shift, tmpa);
227 __ add(tmpa,base, tmpa);
228 if (Assembler::is_simm16(disp)) {
229 return new LIR_Address(tmpa, disp, type);
230 } else {
232 if(disp!=0){
233 #ifdef _LP64
234 LIR_Opr tmp = new_register(T_LONG);
235 #else
236 LIR_Opr tmp = new_register(T_INT);
237 #endif
239 __ move(LIR_OprFact::intConst((int)disp), tmp);
240 __ add(tmp, tmpa, tmp);
241 return new LIR_Address(tmp, 0, type);
242 }
243 else
244 return new LIR_Address(tmpa, 0, type);
245 }
247 }
248 else {
250 if (Assembler::is_simm16(disp)) {
251 return new LIR_Address(base,disp, type);
252 } else {
253 if(disp!=0){
254 #ifdef _LP64
255 LIR_Opr tmp = new_register(T_LONG);
256 #else
257 LIR_Opr tmp = new_register(T_INT);
258 #endif
260 __ move(LIR_OprFact::intConst((int)disp), tmp);
261 __ add(tmp, base, tmp);
262 return new LIR_Address(tmp, 0, type);
263 }
264 else
265 return new LIR_Address(base, 0, type);
266 }
271 }
272 }
274 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,BasicType type, bool needs_card_mark) {
275 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
277 LIR_Address* addr;
278 if (index_opr->is_constant()) {
279 int elem_size = _type2aelembytes[type];
280 addr = new LIR_Address(array_opr,
281 offset_in_bytes + index_opr->as_jint() * elem_size, type);
282 } else if( index_opr->is_register()){
283 #ifdef _LP64
284 LIR_Opr tmp = new_register(T_LONG);
285 #else
286 LIR_Opr tmp = new_register(T_INT);
287 #endif
288 __ move(index_opr, tmp);
289 __ shift_left(tmp, LIR_Address::scale(type),tmp);
290 __ add(tmp, array_opr, tmp);
291 addr = new LIR_Address(tmp, offset_in_bytes,type);
292 // addr = new LIR_Address(array_opr,
293 // index_opr,
294 // LIR_Address::scale(type),
295 // offset_in_bytes, type);
297 }
298 else{
299 addr = new LIR_Address(array_opr,
300 offset_in_bytes, type);
301 }
303 if (needs_card_mark) {
304 // This store will need a precise card mark, so go ahead and
305 // compute the full adddres instead of computing once for the
306 // store and again for the card mark.
307 #ifdef _LP64
308 LIR_Opr tmp = new_register(T_ADDRESS);
309 #else
310 LIR_Opr tmp = new_register(T_INT);
311 #endif
312 __ leal(LIR_OprFact::address(addr), tmp);
313 return new LIR_Address(tmp, 0, type);
314 } else {
315 return addr;
316 }
319 }
321 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
322 LIR_Opr r;
323 if (type == T_LONG) {
324 r = LIR_OprFact::longConst(x);
325 } else if (type == T_INT) {
326 r = LIR_OprFact::intConst(x);
327 } else {
328 ShouldNotReachHere();
329 }
330 return r;
331 }
334 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
335 LIR_Opr temp = new_register(T_INT);
336 LIR_Opr pointer = new_register(T_INT);
337 #ifndef _LP64
338 //by_css
339 __ move(LIR_OprFact::intConst((int)counter), pointer);
340 #else
341 __ move(LIR_OprFact::longConst((long)counter), pointer);
342 #endif
343 LIR_Opr addr = (LIR_Opr)new LIR_Address(pointer, type);
344 LIR_Opr c = LIR_OprFact::intConst((int)step);
345 __ add(addr, c, addr);
346 }
349 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
350 Unimplemented();
351 }
353 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
354 if (tmp->is_valid()) {
355 if (is_power_of_2(c + 1)) {
356 __ move(left, result);
357 __ shift_left(result, log2_intptr(c + 1), result);
358 __ sub(result, left, result);
359 return true;
360 } else if (is_power_of_2(c - 1)) {
361 __ move(left, result);
362 __ shift_left(result, log2_intptr(c - 1), result);
363 __ add(result, left, result);
364 return true;
365 }
366 }
367 return false;
368 }
370 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
371 BasicType type = item->type();
372 __ store(item, new LIR_Address(FrameMap::_sp_opr, in_bytes(offset_from_sp), type));
373 }
376 //----------------------------------------------------------------------
377 // visitor functions
378 //----------------------------------------------------------------------
381 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
382 assert(x->is_pinned(),"");
383 bool needs_range_check = true;
384 bool use_length = x->length() != NULL;
385 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
386 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
387 !get_jobject_constant(x->value())->is_null_object());
389 LIRItem array(x->array(), this);
390 LIRItem index(x->index(), this);
391 LIRItem value(x->value(), this);
392 LIRItem length(this);
394 array.load_item();
395 index.load_nonconstant();
397 if (use_length) {
398 needs_range_check = x->compute_needs_range_check();
399 if (needs_range_check) {
400 length.set_instruction(x->length());
401 length.load_item();
402 }
403 }
404 if (needs_store_check) {
405 value.load_item();
406 } else {
407 value.load_for_store(x->elt_type());
408 }
410 set_no_result(x);
412 // the CodeEmitInfo must be duplicated for each different
413 // LIR-instruction because spilling can occur anywhere between two
414 // instructions and so the debug information must be different
415 CodeEmitInfo* range_check_info = state_for(x);
416 CodeEmitInfo* null_check_info = NULL;
417 if (x->needs_null_check()) {
418 null_check_info = new CodeEmitInfo(range_check_info);
419 }
421 // emit array address setup early so it schedules better
422 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
424 if (GenerateRangeChecks && needs_range_check) {
425 if (use_length) {
426 __ branch(lir_cond_belowEqual, length.result(),index.result(),T_INT,new RangeCheckStub(range_check_info, index.result()));
427 } else {
428 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
429 // range_check also does the null check
430 null_check_info = NULL;
431 }
432 }
434 if (GenerateArrayStoreCheck && needs_store_check) {
435 LIR_Opr tmp1 = new_register(objectType);
436 LIR_Opr tmp2 = new_register(objectType);
437 LIR_Opr tmp3 = new_register(objectType);
439 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
440 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
441 }
443 if (obj_store) {
444 // Needs GC write barriers.
445 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr, true, false, NULL);
446 __ move(value.result(), array_addr, null_check_info);
447 // Seems to be a precise
448 post_barrier(LIR_OprFact::address(array_addr), value.result());
449 } else {
450 __ move(value.result(), array_addr, null_check_info);
451 }
452 }
455 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
456 assert(x->is_pinned(),"");
457 LIRItem obj(x->obj(), this);
458 obj.load_item();
460 set_no_result(x);
462 // "lock" stores the address of the monitor stack slot, so this is not an oop
463 #ifdef _LP64
464 LIR_Opr lock = new_register(T_LONG);
465 #else
466 LIR_Opr lock = new_register(T_INT);
467 #endif
468 // Need a scratch register for biased locking on mips
469 LIR_Opr scratch = LIR_OprFact::illegalOpr;
470 if (UseBiasedLocking) {
471 scratch = new_register(T_INT);
472 }
474 CodeEmitInfo* info_for_exception = NULL;
475 if (x->needs_null_check()) {
476 info_for_exception = state_for(x);
477 }
478 // this CodeEmitInfo must not have the xhandlers because here the
479 // object is already locked (xhandlers expect object to be unlocked)
480 CodeEmitInfo* info = state_for(x, x->state(), true);
481 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
482 x->monitor_no(), info_for_exception, info);
483 }
486 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
487 assert(x->is_pinned(),"");
489 LIRItem obj(x->obj(), this);
490 obj.dont_load_item();
492 LIR_Opr lock = new_register(T_INT);
493 LIR_Opr obj_temp = new_register(T_INT);
494 set_no_result(x);
495 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
496 }
499 // _ineg, _lneg, _fneg, _dneg
500 void LIRGenerator::do_NegateOp(NegateOp* x) {
501 LIRItem value(x->x(), this);
502 value.set_destroys_register();
503 value.load_item();
504 LIR_Opr reg = rlock(x);
505 __ negate(value.result(), reg);
507 set_result(x, round_item(reg));
508 }
512 // for _fadd, _fmul, _fsub, _fdiv, _frem
513 // _dadd, _dmul, _dsub, _ddiv, _drem
514 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
515 LIR_Opr tmp;
516 LIRItem left(x->x(), this);
517 LIRItem right(x->y(), this);
518 left.load_item();
519 right.load_item();
520 rlock_result(x);
521 switch (x->op()) {
522 case Bytecodes::_drem:
523 tmp = new_register(T_DOUBLE);
524 __ frem(left.result(), right.result(), x->operand(), tmp);
526 break;
527 case Bytecodes::_frem:
528 tmp = new_register(T_FLOAT);
529 __ frem(left.result(), right.result(), x->operand(), tmp);
530 break;
531 default: arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
532 }
533 }
538 // for _ladd, _lmul, _lsub, _ldiv, _lrem
539 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
540 switch (x->op()) {
541 case Bytecodes::_lrem:
542 case Bytecodes::_lmul:
543 case Bytecodes::_ldiv: {
545 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
546 LIRItem right(x->y(), this);
547 right.load_item();
549 CodeEmitInfo* info = state_for(x);
550 LIR_Opr item = right.result();
551 assert(item->is_register(), "must be");
552 __ branch(lir_cond_equal,item,LIR_OprFact::longConst(0), T_LONG, new DivByZeroStub(info));
553 }
555 address entry;
556 switch (x->op()) {
557 case Bytecodes::_lrem:
558 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
559 break; // check if dividend is 0 is done elsewhere
560 case Bytecodes::_ldiv:
561 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
562 break; // check if dividend is 0 is done elsewhere
563 case Bytecodes::_lmul:
564 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul);
565 break;
566 default:
567 ShouldNotReachHere();
568 }
569 // order of arguments to runtime call is reversed.
570 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
571 set_result(x, result);
572 break;
573 }
574 /* _ladd, _lsub is delete in sharedRuntime.hpp
575 case Bytecodes::_ladd:
576 case Bytecodes::_lsub: {
577 address entry;
578 switch (x->op()) {
579 case Bytecodes::_ladd:
580 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ladd);
581 break; // check if dividend is 0 is done elsewhere
582 case Bytecodes::_lsub:
583 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lsub);
584 break; // check if dividend is 0 is done elsewhere
585 default:
586 ShouldNotReachHere();
587 }
589 // order of arguments to runtime call is reversed.
590 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
591 set_result(x, result);
592 break;
593 }*/
595 /* {
596 LIRItem left(x->x(), this);
597 LIRItem right(x->y(), this);
598 left.load_item();
599 right.load_item();
600 rlock_result(x);
602 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
603 break;
604 }
605 */
606 default: ShouldNotReachHere();
607 }
608 }
613 // for: _iadd, _imul, _isub, _idiv, _irem
614 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
615 bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
616 LIRItem left(x->x(), this);
617 LIRItem right(x->y(), this);
618 // missing test if instr is commutative and if we should swap
619 right.load_nonconstant();
620 assert(right.is_constant() || right.is_register(), "wrong state of right");
621 left.load_item();
622 rlock_result(x);
623 if (is_div_rem) {
624 CodeEmitInfo* info = state_for(x);
625 LIR_Opr tmp =new_register(T_INT);
626 if (x->op() == Bytecodes::_irem) {
627 __ irem(left.result(), right.result(), x->operand(), tmp, info);
628 } else if (x->op() == Bytecodes::_idiv) {
629 __ idiv(left.result(), right.result(), x->operand(), tmp, info);
630 }
631 } else {
632 //arithmetic_op_int(x->op(), x->operand(), left.result(),
633 //right.result(), FrameMap::G1_opr);
635 LIR_Opr tmp =new_register(T_INT);
636 arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(),
637 tmp);
638 }
639 }
642 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
643 // when an operand with use count 1 is the left operand, then it is
644 // likely that no move for 2-operand-LIR-form is necessary
645 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
646 x->swap_operands();
647 }
649 ValueTag tag = x->type()->tag();
650 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
651 switch (tag) {
652 case floatTag:
653 case doubleTag: do_ArithmeticOp_FPU(x); return;
654 case longTag: do_ArithmeticOp_Long(x); return;
655 case intTag: do_ArithmeticOp_Int(x); return;
656 }
657 ShouldNotReachHere();
658 }
661 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
662 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
663 if(x->op() == Bytecodes::_lshl
664 || x->op() == Bytecodes::_lshr
665 || x->op() == Bytecodes::_lushr) {
666 address entry;
667 /* lushr, lshr, lshl, is delete in ShredRuntime.hpp
668 switch (x->op()) {
669 case Bytecodes::_lshl:
670 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lshl);
671 break; // check if dividend is 0 is done elsewhere
672 case Bytecodes::_lshr:
673 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lshr);
674 break; // check if dividend is 0 is done elsewhere
675 case Bytecodes::_lushr:
676 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lushr);
677 break;
678 default:
679 ShouldNotReachHere();
680 }
681 */
682 // order of arguments to runtime call is reversed.
683 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
684 set_result(x, result);
685 return;
686 }
688 // count must always be in rcx
689 LIRItem value(x->x(), this);
690 LIRItem count(x->y(), this);
692 ValueTag elemType = x->type()->tag();
693 bool must_load_count = !count.is_constant() || elemType == longTag;
694 if (must_load_count) {
695 // count for long must be in register
696 count.load_item();
697 } else {
698 count.dont_load_item();
699 }
700 value.load_item();
701 LIR_Opr reg = rlock_result(x);
703 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
704 }
707 // _iand, _land, _ior, _lor, _ixor, _lxor
708 void LIRGenerator::do_LogicOp(LogicOp* x) {
709 // when an operand with use count 1 is the left operand, then it is
710 // likely that no move for 2-operand-LIR-form is necessary
711 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
712 x->swap_operands();
713 }
715 LIRItem left(x->x(), this);
716 LIRItem right(x->y(), this);
718 left.load_item();
719 right.load_nonconstant();
720 LIR_Opr reg = rlock_result(x);
722 logic_op(x->op(), reg, left.result(), right.result());
723 }
727 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
728 void LIRGenerator::do_CompareOp(CompareOp* x) {
729 LIRItem left(x->x(), this);
730 LIRItem right(x->y(), this);
731 ValueTag tag = x->x()->type()->tag();
732 if (tag == longTag) {
733 left.set_destroys_register();
734 }
735 left.load_item();
736 right.load_item();
737 LIR_Opr reg = rlock_result(x);
739 if (x->x()->type()->is_float_kind()) {
740 Bytecodes::Code code = x->op();
741 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
742 } else if (x->x()->type()->tag() == longTag) {
743 __ lcmp2int(left.result(), right.result(), reg);
744 } else {
745 Unimplemented();
746 }
747 }
748 /*
749 void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
750 assert(x->number_of_arguments() == 3, "wrong type");
751 LIRItem obj (x->argument_at(0), this); // AtomicLong object
752 LIRItem cmp_value (x->argument_at(1), this); // value to compare with field
753 LIRItem new_value (x->argument_at(2), this);
754 // replace field with new_value if it matches cmp_value
756 // compare value must be in edx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
757 // cmp_value.load_item_force(FrameMap::eax_edx_long_opr);
758 cmp_value.load_item_force(FrameMap::_a0_a1_long_opr);
760 // new value must be in ecx,ebx (hi,lo)
761 // new_value.load_item_force(FrameMap::ebx_ecx_long_opr);
762 new_value.load_item_force(FrameMap::_a2_a3_long_opr);
763 // object pointer register is overwritten with field address
764 obj.load_item();
766 // generate compare-and-swap; produces zero condition if swap occurs
767 int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
768 LIR_Opr addr = obj.result();
769 __ add(addr, LIR_OprFact::intConst(value_offset), addr);
770 LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed
771 LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed
772 __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2, FrameMap::_at_opr);
774 // generate conditional move of boolean result
775 LIR_Opr result = rlock_result(x);
776 //__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
777 __ move(FrameMap::_at_opr, result);
778 }
779 */
780 //FIXME, for mips, compareandswap is a bit different
781 //I have no idea use which register
782 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
783 assert(x->number_of_arguments() == 4, "wrong type");
784 LIRItem obj (x->argument_at(0), this); // object
785 LIRItem offset(x->argument_at(1), this); // offset of field
786 LIRItem cmp (x->argument_at(2), this); // value to compare with field
787 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
789 assert(obj.type()->tag() == objectTag, "invalid type");
791 //In 64bit the type can be long, sparc doesn't have this assert
792 //assert(offset.type()->tag() == intTag, "invalid type");
794 assert(cmp.type()->tag() == type->tag(), "invalid type");
795 assert(val.type()->tag() == type->tag(), "invalid type");
797 // get address of field
798 obj.load_item();
799 offset.load_nonconstant();
801 if (type == objectType) {
802 // cmp.load_item_force(FrameMap::eax_oop_opr);
803 cmp.load_item_force(FrameMap::_a0_oop_opr);
804 val.load_item();
805 } else if (type == intType) {
806 // cmp.load_item_force(FrameMap::eax_opr);
807 cmp.load_item_force(FrameMap::_a0_opr);
808 val.load_item();
809 } else if (type == longType) {
810 //// cmp.load_item_force(FrameMap::eax_edx_long_opr);
811 cmp.load_item_force(FrameMap::_a0_a1_long_opr);
812 // val.load_item_force(FrameMap::ebx_ecx_long_opr);
813 val.load_item_force(FrameMap::_a2_a3_long_opr);
814 } else {
815 ShouldNotReachHere();
816 }
817 LIR_Opr addr = new_pointer_register();
818 __ move(obj.result(), addr);
819 __ add(addr, offset.result(), addr);
821 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
822 if (type == objectType)
823 __ cas_obj(addr, cmp.result(), val.result(), ill, ill, FrameMap::_at_opr);
824 else if (type == intType)
825 __ cas_int(addr, cmp.result(), val.result(), ill, ill, FrameMap::_at_opr);
826 else if (type == longType)
827 __ cas_long(addr, cmp.result(), val.result(), ill, ill, FrameMap::_at_opr);
828 else {
829 ShouldNotReachHere();
830 }
831 // generate conditional move of boolean result
832 LIR_Opr result = rlock_result(x);
833 //cmove not exist on mips,
834 // __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
835 // our implementation of cmpxchg put result in AT
836 // LIR_Opr result = rlock_result_with_hint(x, hint());
837 __ move(FrameMap::_at_opr, result);
839 if (type == objectType) // Write-barrier needed for Object fields.
840 write_barrier(addr);
841 }
844 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
845 switch (x->id()) {
846 case vmIntrinsics::_dabs:
847 case vmIntrinsics::_dsqrt: {
848 assert(x->number_of_arguments() == 1, "wrong type");
849 LIRItem value(x->argument_at(0), this);
850 value.load_item();
851 LIR_Opr dst = rlock_result(x);
853 switch (x->id()) {
854 case vmIntrinsics::_dsqrt: {
855 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
856 break;
857 }
858 case vmIntrinsics::_dabs: {
859 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
860 break;
861 }
862 }
863 break;
864 }
865 case vmIntrinsics::_dlog10: // fall through
866 case vmIntrinsics::_dlog: // fall through
867 case vmIntrinsics::_dsin: // fall through
868 case vmIntrinsics::_dtan: // fall through
869 case vmIntrinsics::_dcos: {
870 assert(x->number_of_arguments() == 1, "wrong type");
872 address runtime_entry = NULL;
873 switch (x->id()) {
874 case vmIntrinsics::_dsin:
875 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
876 break;
877 case vmIntrinsics::_dcos:
878 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
879 break;
880 case vmIntrinsics::_dtan:
881 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
882 break;
883 case vmIntrinsics::_dlog:
884 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
885 break;
886 case vmIntrinsics::_dlog10:
887 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
888 break;
889 default:
890 ShouldNotReachHere();
891 }
892 LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
893 set_result(x, result);
894 }
895 }
896 }
898 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
899 assert(x->number_of_arguments() == 5, "wrong type");
900 // Note: spill caller save before setting the item
901 LIRItem src (x->argument_at(0), this);
902 LIRItem src_pos (x->argument_at(1), this);
903 LIRItem dst (x->argument_at(2), this);
904 LIRItem dst_pos (x->argument_at(3), this);
905 LIRItem length (x->argument_at(4), this);
906 // load all values in callee_save_registers, as this makes the
907 // parameter passing to the fast case simpler
908 src.load_item_force (FrameMap::_t0_oop_opr);
909 src_pos.load_item_force (FrameMap::_a0_opr);
910 dst.load_item_force (FrameMap::_a1_oop_opr);
911 dst_pos.load_item_force (FrameMap::_a2_opr);
912 length.load_item_force (FrameMap::_a3_opr);
914 int flags;
915 ciArrayKlass* expected_type;
916 arraycopy_helper(x, &flags, &expected_type);
918 CodeEmitInfo* info = state_for(x, x->state());
919 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
920 length.result(), rlock_callee_saved(T_INT),
921 expected_type, flags, info);
922 set_no_result(x);
923 }
925 void LIRGenerator::do_update_CRC32(Intrinsic* x) { // Fu: 20130832
926 tty->print_cr("LIRGenerator::do_update_CRC32 unimplemented yet !");
927 Unimplemented();
928 }
930 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
931 // _i2b, _i2c, _i2s
932 LIR_Opr fixed_register_for(BasicType type) {
933 switch (type) {
934 case T_FLOAT: return FrameMap::_f0_float_opr;
935 case T_DOUBLE: return FrameMap::_d0_double_opr;
936 case T_INT: return FrameMap::_v0_opr;
937 case T_LONG: return FrameMap::_v0_v1_long_opr;
938 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
939 }
940 }
943 void LIRGenerator::do_Convert(Convert* x) {
944 // flags that vary for the different operations and different SSE-settings
945 bool fixed_input, fixed_result, round_result, needs_stub;
947 switch (x->op()) {
948 case Bytecodes::_i2l: // fall through
949 case Bytecodes::_l2i: // fall through
950 case Bytecodes::_i2b: // fall through
951 case Bytecodes::_i2c: // fall through
952 case Bytecodes::_i2s:
953 fixed_input = false;
954 fixed_result = false;
955 round_result = false;
956 needs_stub = false; break;
957 case Bytecodes::_f2d:
958 fixed_input = UseSSE == 1;
959 fixed_result = false;
960 round_result = false;
961 needs_stub = false; break;
962 case Bytecodes::_d2f:
963 fixed_input = false;
964 fixed_result = UseSSE == 1;
965 round_result = UseSSE < 1;
966 needs_stub = false; break;
967 case Bytecodes::_i2f:
968 fixed_input = false;
969 fixed_result = false;
970 round_result = UseSSE < 1;
971 needs_stub = false; break;
972 case Bytecodes::_i2d:
973 fixed_input = false;
974 fixed_result = false;
975 round_result = false;
976 needs_stub = false; break;
977 case Bytecodes::_f2i:
978 fixed_input = false;
979 fixed_result = false;
980 round_result = false;
981 needs_stub = true; break;
982 case Bytecodes::_d2i:
983 fixed_input = false;
984 fixed_result = false;
985 round_result = false;
986 needs_stub = true; break;
987 case Bytecodes::_l2f:
988 fixed_input = false;
989 fixed_result = UseSSE >= 1;
990 round_result = UseSSE < 1;
991 needs_stub = false; break;
992 case Bytecodes::_l2d:
993 fixed_input = false;
994 fixed_result = UseSSE >= 2;
995 round_result = UseSSE < 2;
996 needs_stub = false; break;
997 case Bytecodes::_f2l:
998 fixed_input = true;
999 fixed_result = true;
1000 round_result = false;
1001 needs_stub = false; break;
1002 case Bytecodes::_d2l:
1003 fixed_input = true;
1004 fixed_result = true;
1005 round_result = false;
1006 needs_stub = false; break;
1007 default: ShouldNotReachHere();
1008 }
1010 LIRItem value(x->value(), this);
1011 value.load_item();
1012 LIR_Opr input = value.result();
1013 LIR_Opr result = rlock(x);
1015 // arguments of lir_convert
1016 LIR_Opr conv_input = input;
1017 LIR_Opr conv_result = result;
1018 ConversionStub* stub = NULL;
1020 if (fixed_input) {
1021 conv_input = fixed_register_for(input->type());
1022 __ move(input, conv_input);
1023 }
1025 assert(fixed_result == false || round_result == false, "cannot set both");
1026 if (fixed_result) {
1027 conv_result = fixed_register_for(result->type());
1028 } else if (round_result) {
1029 result = new_register(result->type());
1030 set_vreg_flag(result, must_start_in_memory);
1031 }
1033 if (needs_stub) {
1034 stub = new ConversionStub(x->op(), conv_input, conv_result);
1035 }
1037 __ convert(x->op(), conv_input, conv_result, stub);
1039 if (result != conv_result) {
1040 __ move(conv_result, result);
1041 }
1043 assert(result->is_virtual(), "result must be virtual register");
1044 set_result(x, result);
1045 }
1047 void LIRGenerator::do_NewInstance(NewInstance* x) {
1048 const LIR_Opr reg = result_register_for(x->type());
1049 #ifndef PRODUCT
1050 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1051 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1052 }
1053 #endif
1054 CodeEmitInfo* info = state_for(x, x->state());
1055 // LIR_Opr tmp1 = new_register(T_INT);
1056 // LIR_Opr tmp2 = new_register(T_INT);
1057 // LIR_Opr tmp3 = new_register(T_INT);
1058 // LIR_Opr tmp4 = new_register(T_INT);
1059 #ifndef _LP64
1060 LIR_Opr klass_reg = FrameMap::_t4_oop_opr;
1061 #else
1062 LIR_Opr klass_reg = FrameMap::_a4_oop_opr;
1063 #endif
1064 // new_instance(reg, x->klass(), FrameMap::_t0_oop_opr, FrameMap::_t1_oop_opr,FrameMap::_t2_oop_opr, LIR_OprFact::illegalOpr, klass_reg, info);
1065 new_instance(reg,
1066 x->klass(),
1067 FrameMap::_t0_oop_opr,
1068 FrameMap::_t1_oop_opr,
1069 FrameMap::_t2_oop_opr,
1070 FrameMap::_t3_oop_opr,
1071 #ifndef _LP64
1072 FrameMap::_t5_oop_opr,
1073 FrameMap::_t6_oop_opr,
1074 #else
1075 FrameMap::_a5_oop_opr,
1076 FrameMap::_a6_oop_opr,
1077 #endif
1078 klass_reg,
1079 info);
1080 LIR_Opr result = rlock_result(x);
1081 __ move(reg, result);
1083 }
1084 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1085 CodeEmitInfo* info = state_for(x, x->state());
1087 LIRItem length(x->length(), this);
1088 length.load_item_force(FrameMap::_t2_opr);
1090 LIR_Opr reg = result_register_for(x->type());
1091 //LIR_Opr tmp1 = new_register(T_INT);
1092 //LIR_Opr tmp2 = new_register(T_INT);
1093 //LIR_Opr tmp3 = new_register(T_INT);
1094 //LIR_Opr tmp4 = new_register(T_INT);
1095 LIR_Opr tmp1 = FrameMap::_t0_oop_opr;
1096 LIR_Opr tmp2 = FrameMap::_t1_oop_opr;
1097 LIR_Opr tmp3 = FrameMap::_t3_oop_opr;
1098 #ifndef _LP64
1099 LIR_Opr tmp4 = FrameMap::_t5_oop_opr;
1100 LIR_Opr tmp5 = FrameMap::_t6_oop_opr;
1101 LIR_Opr klass_reg = FrameMap::_t4_oop_opr;
1102 #else
1103 LIR_Opr tmp4 = FrameMap::_a5_oop_opr;
1104 LIR_Opr tmp5 = FrameMap::_a6_oop_opr;
1105 LIR_Opr klass_reg = FrameMap::_a4_oop_opr;
1106 #endif
1107 LIR_Opr len = length.result();
1108 BasicType elem_type = x->elt_type();
1110 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1112 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1113 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4,tmp5, elem_type, klass_reg, slow_path);
1115 LIR_Opr result = rlock_result(x);
1116 __ move(reg, result);
1117 }
1121 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1122 LIRItem length(x->length(), this);
1123 // in case of patching (i.e., object class is not yet loaded), we
1124 // need to reexecute the instruction
1125 // and therefore provide the state before the parameters have been consumed
1126 CodeEmitInfo* patching_info = NULL;
1127 if (!x->klass()->is_loaded() || PatchALot) {
1128 patching_info = state_for(x, x->state_before());
1129 }
1131 const LIR_Opr reg = result_register_for(x->type());
1132 //LIR_Opr tmp1 = new_register(T_INT);
1133 //LIR_Opr tmp2 = new_register(T_INT);
1134 //LIR_Opr tmp3 = new_register(T_INT);
1135 //LIR_Opr tmp4 = new_register(T_INT);
1136 LIR_Opr tmp1 = FrameMap::_t0_oop_opr;
1137 LIR_Opr tmp2 = FrameMap::_t1_oop_opr;
1138 LIR_Opr tmp3 = FrameMap::_t3_oop_opr;
1139 #ifndef _LP64
1140 LIR_Opr tmp4 = FrameMap::_t5_oop_opr;
1141 LIR_Opr tmp5 = FrameMap::_t6_oop_opr;
1142 LIR_Opr klass_reg = FrameMap::_t4_oop_opr;
1143 #else
1144 LIR_Opr tmp4 = FrameMap::_a5_oop_opr;
1145 LIR_Opr tmp5 = FrameMap::_a6_oop_opr;
1146 LIR_Opr klass_reg = FrameMap::_a4_oop_opr;
1147 #endif
1149 length.load_item_force(FrameMap::_t2_opr);
1151 LIR_Opr len = length.result();
1152 CodeEmitInfo* info = state_for(x, x->state());
1154 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1155 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1156 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1157 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1158 }
1159 klass2reg_with_patching(klass_reg, obj, patching_info);
1160 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, tmp5, T_OBJECT, klass_reg, slow_path);
1162 LIR_Opr result = rlock_result(x);
1163 __ move(reg, result);
1164 }
1167 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1168 Values* dims = x->dims();
1169 int i = dims->length();
1170 LIRItemList* items = new LIRItemList(dims->length(), NULL);
1171 while (i-- > 0) {
1172 LIRItem* size = new LIRItem(dims->at(i), this);
1173 items->at_put(i, size);
1174 }
1176 // need to get the info before, as the items may become invalid through item_free
1177 CodeEmitInfo* patching_info = NULL;
1178 if (!x->klass()->is_loaded() || PatchALot) {
1179 patching_info = state_for(x, x->state_before());
1180 // cannot re-use same xhandlers for multiple CodeEmitInfos, so
1181 // clone all handlers.
1182 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1183 }
1185 CodeEmitInfo* info = state_for(x, x->state());
1187 i = dims->length();
1188 while (i-- > 0) {
1189 LIRItem* size = items->at(i);
1190 size->load_nonconstant();
1191 store_stack_parameter(size->result(), in_ByteSize(i*4));
1192 }
1194 LIR_Opr reg = result_register_for(x->type());
1195 klass2reg_with_patching(reg, x->klass(), patching_info);
1197 // LIR_Opr rank = FrameMap::ebx_opr;
1198 LIR_Opr rank = FrameMap::_t2_opr;
1199 __ move(LIR_OprFact::intConst(x->rank()), rank);
1200 // LIR_Opr varargs = FrameMap::ecx_opr;
1201 LIR_Opr varargs = FrameMap::_t0_opr;
1202 __ move(FrameMap::_sp_opr, varargs);
1203 LIR_OprList* args = new LIR_OprList(3);
1204 args->append(reg);
1205 args->append(rank);
1206 args->append(varargs);
1207 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1208 LIR_OprFact::illegalOpr,
1209 reg, args, info);
1210 LIR_Opr result = rlock_result(x);
1211 __ move(reg, result);
1212 }
1214 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1215 // nothing to do for now
1216 }
1219 void LIRGenerator::do_CheckCast(CheckCast* x) {
1220 LIRItem obj(x->obj(), this);
1222 CodeEmitInfo* patching_info = NULL;
1223 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1224 // must do this before locking the destination register as an oop register,
1225 // and before the obj is loaded (the latter is for deoptimization)
1226 patching_info = state_for(x, x->state_before());
1227 }
1228 obj.load_item();
1230 // info for exceptions
1231 CodeEmitInfo* info_for_exception = state_for(x);
1233 CodeStub* stub;
1234 if (x->is_incompatible_class_change_check()) {
1235 assert(patching_info == NULL, "can't patch this");
1236 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1237 } else {
1238 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1239 }
1240 LIR_Opr reg = rlock_result(x);
1241 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1242 if (!x->klass()->is_loaded() || UseCompressedOops) {
1243 tmp3 = new_register(objectType);
1244 }
1245 __ checkcast(reg, obj.result(), x->klass(),
1246 new_register(objectType), new_register(objectType),
1247 tmp3, x->direct_compare(), info_for_exception, patching_info, stub,
1248 x->profiled_method(), x->profiled_bci());
1249 }
1252 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1253 LIRItem obj(x->obj(), this);
1255 // result and test object may not be in same register
1256 LIR_Opr reg = rlock_result(x);
1257 CodeEmitInfo* patching_info = NULL;
1258 if ((!x->klass()->is_loaded() || PatchALot)) {
1259 // must do this before locking the destination register as an oop register
1260 patching_info = state_for(x, x->state_before());
1261 }
1262 obj.load_item();
1263 LIR_Opr tmp = new_register(objectType);
1264 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1265 if (!x->klass()->is_loaded() || UseCompressedOops) {
1266 tmp3 = new_register(objectType);
1267 }
1269 __ instanceof(reg, obj.result(), x->klass(),
1270 tmp, new_register(objectType), tmp3,
1271 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1272 }
1275 void LIRGenerator::do_If(If* x) {
1276 assert(x->number_of_sux() == 2, "inconsistency");
1277 ValueTag tag = x->x()->type()->tag();
1278 bool is_safepoint = x->is_safepoint();
1280 If::Condition cond = x->cond();
1282 LIRItem xitem(x->x(), this);
1283 LIRItem yitem(x->y(), this);
1284 LIRItem* xin = &xitem;
1285 LIRItem* yin = &yitem;
1287 if (tag == longTag) {
1288 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1289 // mirror for other conditions
1290 if (cond == If::gtr || cond == If::leq) {
1291 cond = Instruction::mirror(cond);
1292 xin = &yitem;
1293 yin = &xitem;
1294 }
1295 xin->set_destroys_register();
1296 }
1297 xin->load_item();
1298 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1299 // inline long zero
1300 yin->dont_load_item();
1301 } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1302 // longs cannot handle constants at right side
1303 yin->load_item();
1304 } else {
1305 yin->dont_load_item();
1306 }
1308 // add safepoint before generating condition code so it can be recomputed
1309 if (x->is_safepoint()) {
1310 // increment backedge counter if needed
1311 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1313 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1314 }
1315 set_no_result(x);
1317 LIR_Opr left = xin->result();
1318 LIR_Opr right = yin->result();
1319 // __ cmp(lir_cond(cond), left, right);
1320 profile_branch(x, cond, left, right);
1321 move_to_phi(x->state());
1322 if (x->x()->type()->is_float_kind()) {
1323 __ branch(lir_cond(cond), left, right, right->type(), x->tsux(), x->usux());
1324 } else {
1325 __ branch(lir_cond(cond), left, right, right->type(), x->tsux());
1326 }
1327 assert(x->default_sux() == x->fsux(), "wrong destination above");
1328 __ jump(x->default_sux());
1329 }
1332 LIR_Opr LIRGenerator::getThreadPointer() {
1333 #ifdef _LP64
1334 //FIXME, does as_pointer need to be implemented? or 64bit can use one register. by aoqi
1335 //return FrameMap::as_pointer_opr(r15_thread);
1336 LIR_Opr result = new_register(T_LONG);
1337 __ get_thread(result);
1338 return result;
1339 #else
1340 LIR_Opr result = new_register(T_INT);
1341 __ get_thread(result);
1342 return result;
1343 #endif //
1344 }
1346 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1347 store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0));
1348 LIR_OprList* args = new LIR_OprList();
1349 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1350 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1351 }
1354 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1355 CodeEmitInfo* info) {
1356 if (address->type() == T_LONG) {
1357 __ volatile_store_mem_reg(value, address, info);
1358 } else {
1359 __ store(value, address, info);
1360 }
1361 }
1363 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1364 CodeEmitInfo* info) {
1366 if (address->type() == T_LONG) {
1367 __ volatile_load_mem_reg(address, result, info);
1368 } else {
1369 __ load(address, result, info);
1370 }
1371 }
1374 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1375 BasicType type, bool is_volatile) {
1376 __ add(src, offset, FrameMap::_at_opr);
1377 if (is_volatile && type == T_LONG) {
1378 LIR_Address* addr = new LIR_Address(FrameMap::_at_opr, 0, T_DOUBLE);
1379 LIR_Opr tmp = new_register(T_DOUBLE);
1380 __ load(addr, tmp);
1381 LIR_Opr spill = new_register(T_LONG);
1382 set_vreg_flag(spill, must_start_in_memory);
1383 __ move(tmp, spill);
1384 __ move(spill, dst);
1385 } else {
1386 LIR_Address* addr = new LIR_Address(FrameMap::_at_opr, 0, type);
1387 __ load(addr, dst);
1388 }
1389 }
1392 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1393 BasicType type, bool is_volatile) {
1394 __ add(src, offset, FrameMap::_at_opr);
1395 if (is_volatile && type == T_LONG) {
1396 LIR_Address* addr = new LIR_Address(FrameMap::_at_opr, 0, T_DOUBLE);
1397 LIR_Opr tmp = new_register(T_DOUBLE);
1398 LIR_Opr spill = new_register(T_DOUBLE);
1399 set_vreg_flag(spill, must_start_in_memory);
1400 __ move(data, spill);
1401 __ move(spill, tmp);
1402 __ move(tmp, addr);
1404 } else {
1405 LIR_Address* addr = new LIR_Address(FrameMap::_at_opr, 0, type);
1406 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1407 if (is_obj) {
1408 // Do the pre-write barrier, if any.
1409 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr/* pre_val */,
1410 true/* do_load */,false /*patch*/, NULL);
1411 __ move(data, addr);
1412 assert(src->is_register(), "must be register");
1413 // Seems to be a precise address
1414 post_barrier(LIR_OprFact::address(addr), data);
1415 } else {
1416 __ move(data, addr);
1417 }
1418 }
1419 }
1421 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1422 BasicType type = x->basic_type();
1423 LIRItem src(x->object(), this);
1424 LIRItem off(x->offset(), this);
1425 LIRItem value(x->value(), this);
1427 src.load_item();
1428 value.load_item();
1429 off.load_nonconstant();
1431 LIR_Opr dst = rlock_result(x, type);
1432 LIR_Opr data = value.result();
1433 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1434 LIR_Opr offset = off.result();
1436 assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type");
1437 LIR_Address* addr;
1438 if (offset->is_constant()) {
1439 #ifdef _LP64
1440 jlong c = offset->as_jlong();
1441 if ((jlong)((jint)c) == c) {
1442 addr = new LIR_Address(src.result(), (jint)c, type);
1443 } else {
1444 LIR_Opr tmp = new_register(T_LONG);
1445 __ move(offset, tmp);
1446 addr = new LIR_Address(src.result(), tmp, type);
1447 }
1448 #else
1449 addr = new LIR_Address(src.result(), offset->as_jint(), type);
1450 #endif
1451 } else {
1452 addr = new LIR_Address(src.result(), offset, type);
1453 }
1455 if (data != dst) {
1456 __ move(data, dst);
1457 data = dst;
1458 }
1459 if (x->is_add()) {
1460 __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
1461 } else {
1462 if (is_obj) {
1463 // Do the pre-write barrier, if any.
1464 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1465 true /* do_load */, false /* patch */, NULL);
1466 }
1467 __ xchg(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
1468 if (is_obj) {
1469 // Seems to be a precise address
1470 post_barrier(LIR_OprFact::address(addr), data);
1471 }
1472 }
1473 }