Thu, 07 Sep 2017 09:12:16 +0800
#5745 [Code Reorganization] code cleanup and code style fix
This is a huge patch, but only code cleanup, code style fix and useless code deletion are included, for example:
tab -> two spaces, deleted spacees at the end of a line, delete useless comments.
This patch also included:
Declaration and definition of class MacroAssembler is moved from assembler_mips.h/cpp to macroAssembler_mips.h/cpp
1 /*
2 * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
3 * Copyright (c) 2015, 2016, Loongson Technology. All rights reserved.
4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5 *
6 * This code is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 only, as
8 * published by the Free Software Foundation.
9 *
10 * This code is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
26 #include "precompiled.hpp"
27 #include "c1/c1_Compilation.hpp"
28 #include "c1/c1_FrameMap.hpp"
29 #include "c1/c1_Instruction.hpp"
30 #include "c1/c1_LIRAssembler.hpp"
31 #include "c1/c1_LIRGenerator.hpp"
32 #include "c1/c1_Runtime1.hpp"
33 #include "c1/c1_ValueStack.hpp"
34 #include "ci/ciArray.hpp"
35 #include "ci/ciObjArrayKlass.hpp"
36 #include "ci/ciTypeArrayKlass.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "vmreg_mips.inline.hpp"
41 #ifdef ASSERT
42 #define __ gen()->lir(__FILE__, __LINE__)->
43 #else
44 #define __ gen()->lir()->
45 #endif
47 // Item will be loaded into a byte register; Intel only
48 void LIRItem::load_byte_item() {
49 load_item();
50 LIR_Opr res = result();
52 if (!res->is_virtual() || !_gen->is_vreg_flag_set(res, LIRGenerator::byte_reg)) {
53 // make sure that it is a byte register
54 assert(!value()->type()->is_float() && !value()->type()->is_double(),
55 "can't load floats in byte register");
56 LIR_Opr reg = _gen->rlock_byte(T_BYTE);
57 __ move(res, reg);
59 _result = reg;
60 }
61 }
64 void LIRItem::load_nonconstant() {
65 LIR_Opr r = value()->operand();
66 if (r->is_constant()) {
67 _result = r;
68 } else {
69 load_item();
70 }
71 }
73 //--------------------------------------------------------------
74 // LIRGenerator
75 //--------------------------------------------------------------
76 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::_v0_oop_opr; }
77 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::_v1_opr; }
78 LIR_Opr LIRGenerator::divInOpr() { return FrameMap::_a0_opr; }//FIXME
79 LIR_Opr LIRGenerator::divOutOpr() { return FrameMap::_f0_opr; } //FIXME
80 LIR_Opr LIRGenerator::remOutOpr() { return FrameMap::_f0_opr; } //FIXME
81 LIR_Opr LIRGenerator::shiftCountOpr() { return FrameMap::_t3_opr; } //
82 LIR_Opr LIRGenerator::syncTempOpr() { return FrameMap::_t2_opr; }
83 LIR_Opr LIRGenerator::getThreadTemp() { return LIR_OprFact::illegalOpr; } //
86 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
87 LIR_Opr opr;
88 switch (type->tag()) {
89 case intTag: {
90 opr = FrameMap::_v0_opr;
91 break;
92 }
93 case objectTag: {
94 opr = FrameMap::_v0_oop_opr;
95 break;
96 }
97 case longTag: {
98 opr = FrameMap::_v0_v1_long_opr;
99 break;
100 }
101 case floatTag: {
102 opr = FrameMap::_f0_float_opr;
103 break;
104 }
105 case doubleTag: {
106 opr = FrameMap::_d0_double_opr;
107 break;
108 }
109 case addressTag:
110 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
111 }
113 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
114 return opr;
115 }
117 LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
118 LIR_Opr reg = new_register(type);
119 set_vreg_flag(reg, callee_saved);
120 return reg;
121 }
124 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
125 return new_register(T_INT);
126 }
129 //--------- loading items into registers --------------------------------
132 // i486 instructions can inline constants
133 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
134 if (type == T_SHORT || type == T_CHAR) {
135 // there is no immediate move of word values in asembler_i486.?pp
136 return false;
137 }
138 Constant* c = v->as_Constant();
139 if (c && c->state_before() == NULL) {
140 // constants of any type can be stored directly, except for
141 // unloaded object constants.
142 return true;
143 }
144 return false;
145 }
148 bool LIRGenerator::can_inline_as_constant(Value v) const {
149 if (v->type()->is_constant() && v->type()->as_IntConstant() != NULL) {
150 return Assembler::is_simm16(v->type()->as_IntConstant()->value());
151 } else {
152 return false;
153 }
154 }
157 bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
158 if (c->type() == T_INT && c->as_constant() != NULL) {
159 return Assembler::is_simm16(c->as_jint());
160 } else {
161 return false;
162 }
163 }
166 LIR_Opr LIRGenerator::safepoint_poll_register() {
167 return new_register(T_INT);
168 }
171 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
172 int shift, int disp, BasicType type) {
173 assert(base->is_register(), "must be");
174 if (index->is_constant()) {
175 disp += index->as_constant_ptr()->as_jint() << shift;
176 if (Assembler::is_simm16(disp)) {
177 return new LIR_Address(base,disp, type);
178 } else {
180 if(disp!=0){
181 #ifdef _LP64
182 LIR_Opr tmp = new_register(T_LONG);
183 #else
184 LIR_Opr tmp = new_register(T_INT);
185 #endif
186 __ move(LIR_OprFact::intConst((int)disp), tmp);
187 __ add(tmp, base, tmp);
188 return new LIR_Address(tmp, 0, type);
189 }
190 else
191 return new LIR_Address(base, 0, type);
192 }
193 } else if( index->is_register()) {
195 #ifdef _LP64
196 LIR_Opr tmpa = new_register(T_LONG);
197 #else
198 LIR_Opr tmpa = new_register(T_INT);
199 #endif
200 __ move(index, tmpa);
201 __ shift_left(tmpa, shift, tmpa);
202 __ add(tmpa,base, tmpa);
203 if (Assembler::is_simm16(disp)) {
204 return new LIR_Address(tmpa, disp, type);
205 } else {
206 if (disp!=0) {
207 #ifdef _LP64
208 LIR_Opr tmp = new_register(T_LONG);
209 #else
210 LIR_Opr tmp = new_register(T_INT);
211 #endif
213 __ move(LIR_OprFact::intConst((int)disp), tmp);
214 __ add(tmp, tmpa, tmp);
215 return new LIR_Address(tmp, 0, type);
216 } else
217 return new LIR_Address(tmpa, 0, type);
218 }
219 } else {
220 if (Assembler::is_simm16(disp)) {
221 return new LIR_Address(base,disp, type);
222 } else {
223 if (disp!=0) {
224 #ifdef _LP64
225 LIR_Opr tmp = new_register(T_LONG);
226 #else
227 LIR_Opr tmp = new_register(T_INT);
228 #endif
229 __ move(LIR_OprFact::intConst((int)disp), tmp);
230 __ add(tmp, base, tmp);
231 return new LIR_Address(tmp, 0, type);
232 } else
233 return new LIR_Address(base, 0, type);
234 }
235 }
236 }
238 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,BasicType type, bool needs_card_mark) {
239 int offset_in_bytes = arrayOopDesc::base_offset_in_bytes(type);
241 LIR_Address* addr;
242 if (index_opr->is_constant()) {
243 int elem_size = _type2aelembytes[type];
244 addr = new LIR_Address(array_opr,
245 offset_in_bytes + index_opr->as_jint() * elem_size, type);
246 } else if( index_opr->is_register()){
247 #ifdef _LP64
248 LIR_Opr tmp = new_register(T_LONG);
249 #else
250 LIR_Opr tmp = new_register(T_INT);
251 #endif
252 __ move(index_opr, tmp);
253 __ shift_left(tmp, LIR_Address::scale(type),tmp);
254 __ add(tmp, array_opr, tmp);
255 addr = new LIR_Address(tmp, offset_in_bytes,type);
257 }
258 else{
259 addr = new LIR_Address(array_opr,
260 offset_in_bytes, type);
261 }
263 if (needs_card_mark) {
264 // This store will need a precise card mark, so go ahead and
265 // compute the full adddres instead of computing once for the
266 // store and again for the card mark.
267 #ifdef _LP64
268 LIR_Opr tmp = new_register(T_ADDRESS);
269 #else
270 LIR_Opr tmp = new_register(T_INT);
271 #endif
272 __ leal(LIR_OprFact::address(addr), tmp);
273 return new LIR_Address(tmp, 0, type);
274 } else {
275 return addr;
276 }
279 }
282 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
283 LIR_Opr r;
284 if (type == T_LONG) {
285 r = LIR_OprFact::longConst(x);
286 } else if (type == T_INT) {
287 r = LIR_OprFact::intConst(x);
288 } else {
289 ShouldNotReachHere();
290 }
291 return r;
292 }
294 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
295 LIR_Opr temp = new_register(T_INT);
296 LIR_Opr pointer = new_register(T_INT);
297 #ifndef _LP64
298 __ move(LIR_OprFact::intConst((int)counter), pointer);
299 #else
300 __ move(LIR_OprFact::longConst((long)counter), pointer);
301 #endif
302 LIR_Opr addr = (LIR_Opr)new LIR_Address(pointer, type);
303 LIR_Opr c = LIR_OprFact::intConst((int)step);
304 __ add(addr, c, addr);
305 }
308 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
309 Unimplemented();
310 }
313 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
314 if (tmp->is_valid()) {
315 if (is_power_of_2(c + 1)) {
316 __ move(left, result);
317 __ shift_left(result, log2_intptr(c + 1), result);
318 __ sub(result, left, result);
319 return true;
320 } else if (is_power_of_2(c - 1)) {
321 __ move(left, result);
322 __ shift_left(result, log2_intptr(c - 1), result);
323 __ add(result, left, result);
324 return true;
325 }
326 }
327 return false;
328 }
331 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
332 BasicType type = item->type();
333 __ store(item, new LIR_Address(FrameMap::_sp_opr, in_bytes(offset_from_sp), type));
334 }
336 //----------------------------------------------------------------------
337 // visitor functions
338 //----------------------------------------------------------------------
341 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
342 assert(x->is_pinned(),"");
343 bool needs_range_check = true;
344 bool use_length = x->length() != NULL;
345 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
346 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
347 !get_jobject_constant(x->value())->is_null_object());
349 LIRItem array(x->array(), this);
350 LIRItem index(x->index(), this);
351 LIRItem value(x->value(), this);
352 LIRItem length(this);
354 array.load_item();
355 index.load_nonconstant();
357 if (use_length) {
358 needs_range_check = x->compute_needs_range_check();
359 if (needs_range_check) {
360 length.set_instruction(x->length());
361 length.load_item();
362 }
363 }
364 if (needs_store_check) {
365 value.load_item();
366 } else {
367 value.load_for_store(x->elt_type());
368 }
370 set_no_result(x);
372 // the CodeEmitInfo must be duplicated for each different
373 // LIR-instruction because spilling can occur anywhere between two
374 // instructions and so the debug information must be different
375 CodeEmitInfo* range_check_info = state_for(x);
376 CodeEmitInfo* null_check_info = NULL;
377 if (x->needs_null_check()) {
378 null_check_info = new CodeEmitInfo(range_check_info);
379 }
381 // emit array address setup early so it schedules better
382 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
384 if (GenerateRangeChecks && needs_range_check) {
385 if (use_length) {
386 __ branch(lir_cond_belowEqual, length.result(),index.result(),T_INT,new RangeCheckStub(range_check_info, index.result()));
387 } else {
388 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
389 // range_check also does the null check
390 null_check_info = NULL;
391 }
392 }
394 if (GenerateArrayStoreCheck && needs_store_check) {
395 LIR_Opr tmp1 = new_register(objectType);
396 LIR_Opr tmp2 = new_register(objectType);
397 LIR_Opr tmp3 = new_register(objectType);
399 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
400 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
401 }
403 if (obj_store) {
404 // Needs GC write barriers.
405 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr, true, false, NULL);
406 __ move(value.result(), array_addr, null_check_info);
407 // Seems to be a precise
408 post_barrier(LIR_OprFact::address(array_addr), value.result());
409 } else {
410 __ move(value.result(), array_addr, null_check_info);
411 }
412 }
415 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
416 assert(x->is_pinned(),"");
417 LIRItem obj(x->obj(), this);
418 obj.load_item();
420 set_no_result(x);
422 // "lock" stores the address of the monitor stack slot, so this is not an oop
423 #ifdef _LP64
424 LIR_Opr lock = new_register(T_LONG);
425 #else
426 LIR_Opr lock = new_register(T_INT);
427 #endif
428 // Need a scratch register for biased locking on mips
429 LIR_Opr scratch = LIR_OprFact::illegalOpr;
430 if (UseBiasedLocking) {
431 scratch = new_register(T_INT);
432 }
434 CodeEmitInfo* info_for_exception = NULL;
435 if (x->needs_null_check()) {
436 info_for_exception = state_for(x);
437 }
438 // this CodeEmitInfo must not have the xhandlers because here the
439 // object is already locked (xhandlers expect object to be unlocked)
440 CodeEmitInfo* info = state_for(x, x->state(), true);
441 monitor_enter(obj.result(), lock, syncTempOpr(), scratch,
442 x->monitor_no(), info_for_exception, info);
443 }
446 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
447 assert(x->is_pinned(),"");
449 LIRItem obj(x->obj(), this);
450 obj.dont_load_item();
452 LIR_Opr lock = new_register(T_INT);
453 LIR_Opr obj_temp = new_register(T_INT);
454 set_no_result(x);
455 monitor_exit(obj_temp, lock, syncTempOpr(), LIR_OprFact::illegalOpr, x->monitor_no());
456 }
459 // _ineg, _lneg, _fneg, _dneg
460 void LIRGenerator::do_NegateOp(NegateOp* x) {
461 LIRItem value(x->x(), this);
462 value.set_destroys_register();
463 value.load_item();
464 LIR_Opr reg = rlock(x);
465 __ negate(value.result(), reg);
467 set_result(x, round_item(reg));
468 }
472 // for _fadd, _fmul, _fsub, _fdiv, _frem
473 // _dadd, _dmul, _dsub, _ddiv, _drem
474 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
475 LIR_Opr tmp;
476 LIRItem left(x->x(), this);
477 LIRItem right(x->y(), this);
478 left.load_item();
479 right.load_item();
480 rlock_result(x);
481 switch (x->op()) {
482 case Bytecodes::_drem:
483 tmp = new_register(T_DOUBLE);
484 __ frem(left.result(), right.result(), x->operand(), tmp);
486 break;
487 case Bytecodes::_frem:
488 tmp = new_register(T_FLOAT);
489 __ frem(left.result(), right.result(), x->operand(), tmp);
490 break;
491 default: arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
492 }
493 }
498 // for _ladd, _lmul, _lsub, _ldiv, _lrem
499 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
500 switch (x->op()) {
501 case Bytecodes::_lrem:
502 case Bytecodes::_lmul:
503 case Bytecodes::_ldiv: {
505 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
506 LIRItem right(x->y(), this);
507 right.load_item();
509 CodeEmitInfo* info = state_for(x);
510 LIR_Opr item = right.result();
511 assert(item->is_register(), "must be");
512 __ branch(lir_cond_equal,item,LIR_OprFact::longConst(0), T_LONG, new DivByZeroStub(info));
513 }
515 address entry;
516 switch (x->op()) {
517 case Bytecodes::_lrem:
518 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
519 break; // check if dividend is 0 is done elsewhere
520 case Bytecodes::_ldiv:
521 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
522 break; // check if dividend is 0 is done elsewhere
523 case Bytecodes::_lmul:
524 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul);
525 break;
526 default:
527 ShouldNotReachHere();
528 }
529 // order of arguments to runtime call is reversed.
530 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
531 set_result(x, result);
532 break;
533 }
534 /* _ladd, _lsub is delete in sharedRuntime.hpp
535 case Bytecodes::_ladd:
536 case Bytecodes::_lsub: {
537 address entry;
538 switch (x->op()) {
539 case Bytecodes::_ladd:
540 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ladd);
541 break; // check if dividend is 0 is done elsewhere
542 case Bytecodes::_lsub:
543 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lsub);
544 break; // check if dividend is 0 is done elsewhere
545 default:
546 ShouldNotReachHere();
547 }
549 // order of arguments to runtime call is reversed.
550 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
551 set_result(x, result);
552 break;
553 }*/
555 /* {
556 LIRItem left(x->x(), this);
557 LIRItem right(x->y(), this);
558 left.load_item();
559 right.load_item();
560 rlock_result(x);
562 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
563 break;
564 }
565 */
566 default: ShouldNotReachHere();
567 }
568 }
573 // for: _iadd, _imul, _isub, _idiv, _irem
574 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
575 bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
576 LIRItem left(x->x(), this);
577 LIRItem right(x->y(), this);
578 // missing test if instr is commutative and if we should swap
579 right.load_nonconstant();
580 assert(right.is_constant() || right.is_register(), "wrong state of right");
581 left.load_item();
582 rlock_result(x);
583 if (is_div_rem) {
584 CodeEmitInfo* info = state_for(x);
585 LIR_Opr tmp =new_register(T_INT);
586 if (x->op() == Bytecodes::_irem) {
587 __ irem(left.result(), right.result(), x->operand(), tmp, info);
588 } else if (x->op() == Bytecodes::_idiv) {
589 __ idiv(left.result(), right.result(), x->operand(), tmp, info);
590 }
591 } else {
592 //arithmetic_op_int(x->op(), x->operand(), left.result(),
593 //right.result(), FrameMap::G1_opr);
595 LIR_Opr tmp =new_register(T_INT);
596 arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(),
597 tmp);
598 }
599 }
602 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
603 // when an operand with use count 1 is the left operand, then it is
604 // likely that no move for 2-operand-LIR-form is necessary
605 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
606 x->swap_operands();
607 }
609 ValueTag tag = x->type()->tag();
610 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
611 switch (tag) {
612 case floatTag:
613 case doubleTag: do_ArithmeticOp_FPU(x); return;
614 case longTag: do_ArithmeticOp_Long(x); return;
615 case intTag: do_ArithmeticOp_Int(x); return;
616 }
617 ShouldNotReachHere();
618 }
621 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
622 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
623 if(x->op() == Bytecodes::_lshl
624 || x->op() == Bytecodes::_lshr
625 || x->op() == Bytecodes::_lushr) {
626 address entry;
627 /* lushr, lshr, lshl, is delete in ShredRuntime.hpp
628 switch (x->op()) {
629 case Bytecodes::_lshl:
630 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lshl);
631 break; // check if dividend is 0 is done elsewhere
632 case Bytecodes::_lshr:
633 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lshr);
634 break; // check if dividend is 0 is done elsewhere
635 case Bytecodes::_lushr:
636 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lushr);
637 break;
638 default:
639 ShouldNotReachHere();
640 }
641 */
642 // order of arguments to runtime call is reversed.
643 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
644 set_result(x, result);
645 return;
646 }
648 // count must always be in rcx
649 LIRItem value(x->x(), this);
650 LIRItem count(x->y(), this);
652 ValueTag elemType = x->type()->tag();
653 bool must_load_count = !count.is_constant() || elemType == longTag;
654 if (must_load_count) {
655 // count for long must be in register
656 count.load_item();
657 } else {
658 count.dont_load_item();
659 }
660 value.load_item();
661 LIR_Opr reg = rlock_result(x);
663 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
664 }
667 // _iand, _land, _ior, _lor, _ixor, _lxor
668 void LIRGenerator::do_LogicOp(LogicOp* x) {
669 // when an operand with use count 1 is the left operand, then it is
670 // likely that no move for 2-operand-LIR-form is necessary
671 if (x->is_commutative() && x->y()->as_Constant() == NULL && x->x()->use_count() > x->y()->use_count()) {
672 x->swap_operands();
673 }
675 LIRItem left(x->x(), this);
676 LIRItem right(x->y(), this);
678 left.load_item();
679 right.load_nonconstant();
680 LIR_Opr reg = rlock_result(x);
682 logic_op(x->op(), reg, left.result(), right.result());
683 }
687 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
688 void LIRGenerator::do_CompareOp(CompareOp* x) {
689 LIRItem left(x->x(), this);
690 LIRItem right(x->y(), this);
691 ValueTag tag = x->x()->type()->tag();
692 if (tag == longTag) {
693 left.set_destroys_register();
694 }
695 left.load_item();
696 right.load_item();
697 LIR_Opr reg = rlock_result(x);
699 if (x->x()->type()->is_float_kind()) {
700 Bytecodes::Code code = x->op();
701 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
702 } else if (x->x()->type()->tag() == longTag) {
703 __ lcmp2int(left.result(), right.result(), reg);
704 } else {
705 Unimplemented();
706 }
707 }
708 /*
709 void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
710 assert(x->number_of_arguments() == 3, "wrong type");
711 LIRItem obj (x->argument_at(0), this); // AtomicLong object
712 LIRItem cmp_value (x->argument_at(1), this); // value to compare with field
713 LIRItem new_value (x->argument_at(2), this);
714 // replace field with new_value if it matches cmp_value
716 // compare value must be in edx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
717 // cmp_value.load_item_force(FrameMap::eax_edx_long_opr);
718 cmp_value.load_item_force(FrameMap::_a0_a1_long_opr);
720 // new value must be in ecx,ebx (hi,lo)
721 // new_value.load_item_force(FrameMap::ebx_ecx_long_opr);
722 new_value.load_item_force(FrameMap::_a2_a3_long_opr);
723 // object pointer register is overwritten with field address
724 obj.load_item();
726 // generate compare-and-swap; produces zero condition if swap occurs
727 int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
728 LIR_Opr addr = obj.result();
729 __ add(addr, LIR_OprFact::intConst(value_offset), addr);
730 LIR_Opr t1 = LIR_OprFact::illegalOpr; // no temp needed
731 LIR_Opr t2 = LIR_OprFact::illegalOpr; // no temp needed
732 __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2, FrameMap::_at_opr);
734 // generate conditional move of boolean result
735 LIR_Opr result = rlock_result(x);
736 //__ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
737 __ move(FrameMap::_at_opr, result);
738 }
739 */
740 //FIXME, for mips, compareandswap is a bit different
741 //I have no idea use which register
742 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
743 assert(x->number_of_arguments() == 4, "wrong type");
744 LIRItem obj (x->argument_at(0), this); // object
745 LIRItem offset(x->argument_at(1), this); // offset of field
746 LIRItem cmp (x->argument_at(2), this); // value to compare with field
747 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
749 assert(obj.type()->tag() == objectTag, "invalid type");
751 //In 64bit the type can be long, sparc doesn't have this assert
752 //assert(offset.type()->tag() == intTag, "invalid type");
754 assert(cmp.type()->tag() == type->tag(), "invalid type");
755 assert(val.type()->tag() == type->tag(), "invalid type");
757 // get address of field
758 obj.load_item();
759 offset.load_nonconstant();
761 if (type == objectType) {
762 // cmp.load_item_force(FrameMap::eax_oop_opr);
763 cmp.load_item_force(FrameMap::_a0_oop_opr);
764 val.load_item();
765 } else if (type == intType) {
766 // cmp.load_item_force(FrameMap::eax_opr);
767 cmp.load_item_force(FrameMap::_a0_opr);
768 val.load_item();
769 } else if (type == longType) {
770 //// cmp.load_item_force(FrameMap::eax_edx_long_opr);
771 cmp.load_item_force(FrameMap::_a0_a1_long_opr);
772 // val.load_item_force(FrameMap::ebx_ecx_long_opr);
773 val.load_item_force(FrameMap::_a2_a3_long_opr);
774 } else {
775 ShouldNotReachHere();
776 }
777 LIR_Opr addr = new_pointer_register();
778 __ move(obj.result(), addr);
779 __ add(addr, offset.result(), addr);
781 LIR_Opr ill = LIR_OprFact::illegalOpr; // for convenience
782 if (type == objectType)
783 __ cas_obj(addr, cmp.result(), val.result(), ill, ill, FrameMap::_at_opr);
784 else if (type == intType)
785 __ cas_int(addr, cmp.result(), val.result(), ill, ill, FrameMap::_at_opr);
786 else if (type == longType)
787 __ cas_long(addr, cmp.result(), val.result(), ill, ill, FrameMap::_at_opr);
788 else {
789 ShouldNotReachHere();
790 }
791 // generate conditional move of boolean result
792 LIR_Opr result = rlock_result(x);
793 //cmove not exist on mips,
794 // __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result);
795 // our implementation of cmpxchg put result in AT
796 // LIR_Opr result = rlock_result_with_hint(x, hint());
797 __ move(FrameMap::_at_opr, result);
799 if (type == objectType) // Write-barrier needed for Object fields.
800 write_barrier(addr);
801 }
804 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
805 switch (x->id()) {
806 case vmIntrinsics::_dabs:
807 case vmIntrinsics::_dsqrt: {
808 assert(x->number_of_arguments() == 1, "wrong type");
809 LIRItem value(x->argument_at(0), this);
810 value.load_item();
811 LIR_Opr dst = rlock_result(x);
813 switch (x->id()) {
814 case vmIntrinsics::_dsqrt: {
815 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
816 break;
817 }
818 case vmIntrinsics::_dabs: {
819 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
820 break;
821 }
822 }
823 break;
824 }
825 case vmIntrinsics::_dlog10: // fall through
826 case vmIntrinsics::_dlog: // fall through
827 case vmIntrinsics::_dsin: // fall through
828 case vmIntrinsics::_dtan: // fall through
829 case vmIntrinsics::_dcos: {
830 assert(x->number_of_arguments() == 1, "wrong type");
832 address runtime_entry = NULL;
833 switch (x->id()) {
834 case vmIntrinsics::_dsin:
835 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
836 break;
837 case vmIntrinsics::_dcos:
838 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
839 break;
840 case vmIntrinsics::_dtan:
841 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
842 break;
843 case vmIntrinsics::_dlog:
844 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
845 break;
846 case vmIntrinsics::_dlog10:
847 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
848 break;
849 default:
850 ShouldNotReachHere();
851 }
852 LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
853 set_result(x, result);
854 }
855 }
856 }
858 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
859 assert(x->number_of_arguments() == 5, "wrong type");
860 // Note: spill caller save before setting the item
861 LIRItem src (x->argument_at(0), this);
862 LIRItem src_pos (x->argument_at(1), this);
863 LIRItem dst (x->argument_at(2), this);
864 LIRItem dst_pos (x->argument_at(3), this);
865 LIRItem length (x->argument_at(4), this);
866 // load all values in callee_save_registers, as this makes the
867 // parameter passing to the fast case simpler
868 src.load_item_force (FrameMap::_t0_oop_opr);
869 src_pos.load_item_force (FrameMap::_a0_opr);
870 dst.load_item_force (FrameMap::_a1_oop_opr);
871 dst_pos.load_item_force (FrameMap::_a2_opr);
872 length.load_item_force (FrameMap::_a3_opr);
874 int flags;
875 ciArrayKlass* expected_type;
876 arraycopy_helper(x, &flags, &expected_type);
878 CodeEmitInfo* info = state_for(x, x->state());
879 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), rlock_callee_saved(T_INT), expected_type, flags, info);
880 set_no_result(x);
881 }
883 void LIRGenerator::do_update_CRC32(Intrinsic* x) { // Fu: 20130832
884 tty->print_cr("LIRGenerator::do_update_CRC32 unimplemented yet !");
885 Unimplemented();
886 }
888 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
889 // _i2b, _i2c, _i2s
890 LIR_Opr fixed_register_for(BasicType type) {
891 switch (type) {
892 case T_FLOAT: return FrameMap::_f0_float_opr;
893 case T_DOUBLE: return FrameMap::_d0_double_opr;
894 case T_INT: return FrameMap::_v0_opr;
895 case T_LONG: return FrameMap::_v0_v1_long_opr;
896 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
897 }
898 }
901 void LIRGenerator::do_Convert(Convert* x) {
902 // flags that vary for the different operations and different SSE-settings
903 bool fixed_input, fixed_result, round_result, needs_stub;
905 switch (x->op()) {
906 case Bytecodes::_i2l: // fall through
907 case Bytecodes::_l2i: // fall through
908 case Bytecodes::_i2b: // fall through
909 case Bytecodes::_i2c: // fall through
910 case Bytecodes::_i2s:
911 fixed_input = false;
912 fixed_result = false;
913 round_result = false;
914 needs_stub = false; break;
915 case Bytecodes::_f2d:
916 fixed_input = UseSSE == 1;
917 fixed_result = false;
918 round_result = false;
919 needs_stub = false; break;
920 case Bytecodes::_d2f:
921 fixed_input = false;
922 fixed_result = UseSSE == 1;
923 round_result = UseSSE < 1;
924 needs_stub = false; break;
925 case Bytecodes::_i2f:
926 fixed_input = false;
927 fixed_result = false;
928 round_result = UseSSE < 1;
929 needs_stub = false; break;
930 case Bytecodes::_i2d:
931 fixed_input = false;
932 fixed_result = false;
933 round_result = false;
934 needs_stub = false; break;
935 case Bytecodes::_f2i:
936 fixed_input = false;
937 fixed_result = false;
938 round_result = false;
939 needs_stub = true; break;
940 case Bytecodes::_d2i:
941 fixed_input = false;
942 fixed_result = false;
943 round_result = false;
944 needs_stub = true; break;
945 case Bytecodes::_l2f:
946 fixed_input = false;
947 fixed_result = UseSSE >= 1;
948 round_result = UseSSE < 1;
949 needs_stub = false; break;
950 case Bytecodes::_l2d:
951 fixed_input = false;
952 fixed_result = UseSSE >= 2;
953 round_result = UseSSE < 2;
954 needs_stub = false; break;
955 case Bytecodes::_f2l:
956 fixed_input = true;
957 fixed_result = true;
958 round_result = false;
959 needs_stub = false; break;
960 case Bytecodes::_d2l:
961 fixed_input = true;
962 fixed_result = true;
963 round_result = false;
964 needs_stub = false; break;
965 default: ShouldNotReachHere();
966 }
968 LIRItem value(x->value(), this);
969 value.load_item();
970 LIR_Opr input = value.result();
971 LIR_Opr result = rlock(x);
973 // arguments of lir_convert
974 LIR_Opr conv_input = input;
975 LIR_Opr conv_result = result;
976 ConversionStub* stub = NULL;
978 if (fixed_input) {
979 conv_input = fixed_register_for(input->type());
980 __ move(input, conv_input);
981 }
983 assert(fixed_result == false || round_result == false, "cannot set both");
984 if (fixed_result) {
985 conv_result = fixed_register_for(result->type());
986 } else if (round_result) {
987 result = new_register(result->type());
988 set_vreg_flag(result, must_start_in_memory);
989 }
991 if (needs_stub) {
992 stub = new ConversionStub(x->op(), conv_input, conv_result);
993 }
995 __ convert(x->op(), conv_input, conv_result, stub);
997 if (result != conv_result) {
998 __ move(conv_result, result);
999 }
1001 assert(result->is_virtual(), "result must be virtual register");
1002 set_result(x, result);
1003 }
1005 void LIRGenerator::do_NewInstance(NewInstance* x) {
1006 const LIR_Opr reg = result_register_for(x->type());
1007 #ifndef PRODUCT
1008 if (PrintNotLoaded && !x->klass()->is_loaded()) {
1009 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
1010 }
1011 #endif
1012 CodeEmitInfo* info = state_for(x, x->state());
1013 // LIR_Opr tmp1 = new_register(T_INT);
1014 // LIR_Opr tmp2 = new_register(T_INT);
1015 // LIR_Opr tmp3 = new_register(T_INT);
1016 // LIR_Opr tmp4 = new_register(T_INT);
1017 #ifndef _LP64
1018 LIR_Opr klass_reg = FrameMap::_t4_oop_opr;
1019 #else
1020 LIR_Opr klass_reg = FrameMap::_a4_oop_opr;
1021 #endif
1022 // new_instance(reg, x->klass(), FrameMap::_t0_oop_opr, FrameMap::_t1_oop_opr,FrameMap::_t2_oop_opr, LIR_OprFact::illegalOpr, klass_reg, info);
1023 new_instance(reg,
1024 x->klass(),
1025 FrameMap::_t0_oop_opr,
1026 FrameMap::_t1_oop_opr,
1027 FrameMap::_t2_oop_opr,
1028 FrameMap::_t3_oop_opr,
1029 #ifndef _LP64
1030 FrameMap::_t5_oop_opr,
1031 FrameMap::_t6_oop_opr,
1032 #else
1033 FrameMap::_a5_oop_opr,
1034 FrameMap::_a6_oop_opr,
1035 #endif
1036 klass_reg,
1037 info);
1038 LIR_Opr result = rlock_result(x);
1039 __ move(reg, result);
1040 }
1043 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
1044 CodeEmitInfo* info = state_for(x, x->state());
1046 LIRItem length(x->length(), this);
1047 length.load_item_force(FrameMap::_t2_opr);
1049 LIR_Opr reg = result_register_for(x->type());
1050 LIR_Opr tmp1 = FrameMap::_t0_oop_opr;
1051 LIR_Opr tmp2 = FrameMap::_t1_oop_opr;
1052 LIR_Opr tmp3 = FrameMap::_t3_oop_opr;
1053 #ifndef _LP64
1054 LIR_Opr tmp4 = FrameMap::_t5_oop_opr;
1055 LIR_Opr tmp5 = FrameMap::_t6_oop_opr;
1056 LIR_Opr klass_reg = FrameMap::_t4_oop_opr;
1057 #else
1058 LIR_Opr tmp4 = FrameMap::_a5_oop_opr;
1059 LIR_Opr tmp5 = FrameMap::_a6_oop_opr;
1060 LIR_Opr klass_reg = FrameMap::_a4_oop_opr;
1061 #endif
1062 LIR_Opr len = length.result();
1063 BasicType elem_type = x->elt_type();
1065 __ metadata2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
1067 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
1068 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4,tmp5, elem_type, klass_reg, slow_path);
1070 LIR_Opr result = rlock_result(x);
1071 __ move(reg, result);
1072 }
1076 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
1077 LIRItem length(x->length(), this);
1078 // in case of patching (i.e., object class is not yet loaded), we
1079 // need to reexecute the instruction
1080 // and therefore provide the state before the parameters have been consumed
1081 CodeEmitInfo* patching_info = NULL;
1082 if (!x->klass()->is_loaded() || PatchALot) {
1083 patching_info = state_for(x, x->state_before());
1084 }
1086 const LIR_Opr reg = result_register_for(x->type());
1087 LIR_Opr tmp1 = FrameMap::_t0_oop_opr;
1088 LIR_Opr tmp2 = FrameMap::_t1_oop_opr;
1089 LIR_Opr tmp3 = FrameMap::_t3_oop_opr;
1090 #ifndef _LP64
1091 LIR_Opr tmp4 = FrameMap::_t5_oop_opr;
1092 LIR_Opr tmp5 = FrameMap::_t6_oop_opr;
1093 LIR_Opr klass_reg = FrameMap::_t4_oop_opr;
1094 #else
1095 LIR_Opr tmp4 = FrameMap::_a5_oop_opr;
1096 LIR_Opr tmp5 = FrameMap::_a6_oop_opr;
1097 LIR_Opr klass_reg = FrameMap::_a4_oop_opr;
1098 #endif
1100 length.load_item_force(FrameMap::_t2_opr);
1102 LIR_Opr len = length.result();
1103 CodeEmitInfo* info = state_for(x, x->state());
1105 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
1106 ciKlass* obj = (ciKlass*) ciObjArrayKlass::make(x->klass());
1107 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
1108 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
1109 }
1110 klass2reg_with_patching(klass_reg, obj, patching_info);
1111 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, tmp5, T_OBJECT, klass_reg, slow_path);
1113 LIR_Opr result = rlock_result(x);
1114 __ move(reg, result);
1115 }
1118 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
1119 Values* dims = x->dims();
1120 int i = dims->length();
1121 LIRItemList* items = new LIRItemList(dims->length(), NULL);
1122 while (i-- > 0) {
1123 LIRItem* size = new LIRItem(dims->at(i), this);
1124 items->at_put(i, size);
1125 }
1127 // need to get the info before, as the items may become invalid through item_free
1128 CodeEmitInfo* patching_info = NULL;
1129 if (!x->klass()->is_loaded() || PatchALot) {
1130 patching_info = state_for(x, x->state_before());
1131 // cannot re-use same xhandlers for multiple CodeEmitInfos, so
1132 // clone all handlers.
1133 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
1134 }
1136 CodeEmitInfo* info = state_for(x, x->state());
1138 i = dims->length();
1139 while (i-- > 0) {
1140 LIRItem* size = items->at(i);
1141 size->load_nonconstant();
1142 store_stack_parameter(size->result(), in_ByteSize(i*4));
1143 }
1145 LIR_Opr reg = result_register_for(x->type());
1146 klass2reg_with_patching(reg, x->klass(), patching_info);
1148 // LIR_Opr rank = FrameMap::ebx_opr;
1149 LIR_Opr rank = FrameMap::_t2_opr;
1150 __ move(LIR_OprFact::intConst(x->rank()), rank);
1151 // LIR_Opr varargs = FrameMap::ecx_opr;
1152 LIR_Opr varargs = FrameMap::_t0_opr;
1153 __ move(FrameMap::_sp_opr, varargs);
1154 LIR_OprList* args = new LIR_OprList(3);
1155 args->append(reg);
1156 args->append(rank);
1157 args->append(varargs);
1158 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1159 LIR_OprFact::illegalOpr,
1160 reg, args, info);
1161 LIR_Opr result = rlock_result(x);
1162 __ move(reg, result);
1163 }
1165 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1166 // nothing to do for now
1167 }
1170 void LIRGenerator::do_CheckCast(CheckCast* x) {
1171 LIRItem obj(x->obj(), this);
1173 CodeEmitInfo* patching_info = NULL;
1174 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1175 // must do this before locking the destination register as an oop register,
1176 // and before the obj is loaded (the latter is for deoptimization)
1177 patching_info = state_for(x, x->state_before());
1178 }
1179 obj.load_item();
1181 // info for exceptions
1182 CodeEmitInfo* info_for_exception = state_for(x);
1184 CodeStub* stub;
1185 if (x->is_incompatible_class_change_check()) {
1186 assert(patching_info == NULL, "can't patch this");
1187 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1188 } else {
1189 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1190 }
1191 LIR_Opr reg = rlock_result(x);
1192 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1193 if (!x->klass()->is_loaded() || UseCompressedOops) {
1194 tmp3 = new_register(objectType);
1195 }
1196 __ checkcast(reg, obj.result(), x->klass(),
1197 new_register(objectType), new_register(objectType), tmp3,
1198 x->direct_compare(), info_for_exception, patching_info, stub,
1199 x->profiled_method(), x->profiled_bci());
1200 }
1203 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1204 LIRItem obj(x->obj(), this);
1206 // result and test object may not be in same register
1207 LIR_Opr reg = rlock_result(x);
1208 CodeEmitInfo* patching_info = NULL;
1209 if ((!x->klass()->is_loaded() || PatchALot)) {
1210 // must do this before locking the destination register as an oop register
1211 patching_info = state_for(x, x->state_before());
1212 }
1213 obj.load_item();
1214 LIR_Opr tmp = new_register(objectType);
1215 LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
1216 if (!x->klass()->is_loaded() || UseCompressedOops) {
1217 tmp3 = new_register(objectType);
1218 }
1220 __ instanceof(reg, obj.result(), x->klass(),
1221 tmp, new_register(objectType), tmp3,
1222 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
1223 }
1226 void LIRGenerator::do_If(If* x) {
1227 assert(x->number_of_sux() == 2, "inconsistency");
1228 ValueTag tag = x->x()->type()->tag();
1229 bool is_safepoint = x->is_safepoint();
1231 If::Condition cond = x->cond();
1233 LIRItem xitem(x->x(), this);
1234 LIRItem yitem(x->y(), this);
1235 LIRItem* xin = &xitem;
1236 LIRItem* yin = &yitem;
1238 if (tag == longTag) {
1239 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1240 // mirror for other conditions
1241 if (cond == If::gtr || cond == If::leq) {
1242 cond = Instruction::mirror(cond);
1243 xin = &yitem;
1244 yin = &xitem;
1245 }
1246 xin->set_destroys_register();
1247 }
1248 xin->load_item();
1249 if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 && (cond == If::eql || cond == If::neq)) {
1250 // inline long zero
1251 yin->dont_load_item();
1252 } else if (tag == longTag || tag == floatTag || tag == doubleTag) {
1253 // longs cannot handle constants at right side
1254 yin->load_item();
1255 } else {
1256 yin->dont_load_item();
1257 }
1259 // add safepoint before generating condition code so it can be recomputed
1260 if (x->is_safepoint()) {
1261 // increment backedge counter if needed
1262 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1263 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
1264 }
1265 set_no_result(x);
1267 LIR_Opr left = xin->result();
1268 LIR_Opr right = yin->result();
1269 profile_branch(x, cond, left, right);
1270 move_to_phi(x->state());
1271 if (x->x()->type()->is_float_kind()) {
1272 __ branch(lir_cond(cond), left, right, right->type(), x->tsux(), x->usux());
1273 } else {
1274 __ branch(lir_cond(cond), left, right, right->type(), x->tsux());
1275 }
1276 assert(x->default_sux() == x->fsux(), "wrong destination above");
1277 __ jump(x->default_sux());
1278 }
1281 LIR_Opr LIRGenerator::getThreadPointer() {
1282 #ifdef _LP64
1283 //FIXME, does as_pointer need to be implemented? or 64bit can use one register. by aoqi
1284 //return FrameMap::as_pointer_opr(r15_thread);
1285 LIR_Opr result = new_register(T_LONG);
1286 __ get_thread(result);
1287 return result;
1288 #else
1289 LIR_Opr result = new_register(T_INT);
1290 __ get_thread(result);
1291 return result;
1292 #endif //
1293 }
1295 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1296 store_stack_parameter(LIR_OprFact::intConst(block->block_id()), in_ByteSize(0));
1297 LIR_OprList* args = new LIR_OprList();
1298 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1299 __ call_runtime_leaf(func, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, args);
1300 }
1303 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1304 CodeEmitInfo* info) {
1305 if (address->type() == T_LONG) {
1306 __ volatile_store_mem_reg(value, address, info);
1307 } else {
1308 __ store(value, address, info);
1309 }
1310 }
1312 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1313 CodeEmitInfo* info) {
1315 if (address->type() == T_LONG) {
1316 __ volatile_load_mem_reg(address, result, info);
1317 } else {
1318 __ load(address, result, info);
1319 }
1320 }
1322 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1323 BasicType type, bool is_volatile) {
1324 __ add(src, offset, FrameMap::_at_opr);
1325 if (is_volatile && type == T_LONG) {
1326 LIR_Address* addr = new LIR_Address(FrameMap::_at_opr, 0, T_DOUBLE);
1327 LIR_Opr tmp = new_register(T_DOUBLE);
1328 __ load(addr, tmp);
1329 LIR_Opr spill = new_register(T_LONG);
1330 set_vreg_flag(spill, must_start_in_memory);
1331 __ move(tmp, spill);
1332 __ move(spill, dst);
1333 } else {
1334 LIR_Address* addr = new LIR_Address(FrameMap::_at_opr, 0, type);
1335 __ load(addr, dst);
1336 }
1337 }
1340 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1341 BasicType type, bool is_volatile) {
1342 __ add(src, offset, FrameMap::_at_opr);
1343 if (is_volatile && type == T_LONG) {
1344 LIR_Address* addr = new LIR_Address(FrameMap::_at_opr, 0, T_DOUBLE);
1345 LIR_Opr tmp = new_register(T_DOUBLE);
1346 LIR_Opr spill = new_register(T_DOUBLE);
1347 set_vreg_flag(spill, must_start_in_memory);
1348 __ move(data, spill);
1349 __ move(spill, tmp);
1350 __ move(tmp, addr);
1351 } else {
1352 LIR_Address* addr = new LIR_Address(FrameMap::_at_opr, 0, type);
1353 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1354 if (is_obj) {
1355 // Do the pre-write barrier, if any.
1356 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr/* pre_val */,
1357 true/* do_load */,false /*patch*/, NULL);
1358 __ move(data, addr);
1359 assert(src->is_register(), "must be register");
1360 // Seems to be a precise address
1361 post_barrier(LIR_OprFact::address(addr), data);
1362 } else {
1363 __ move(data, addr);
1364 }
1365 }
1366 }
1368 void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
1369 BasicType type = x->basic_type();
1370 LIRItem src(x->object(), this);
1371 LIRItem off(x->offset(), this);
1372 LIRItem value(x->value(), this);
1374 src.load_item();
1375 value.load_item();
1376 off.load_nonconstant();
1378 LIR_Opr dst = rlock_result(x, type);
1379 LIR_Opr data = value.result();
1380 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1381 LIR_Opr offset = off.result();
1383 assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type");
1384 LIR_Address* addr;
1385 if (offset->is_constant()) {
1386 #ifdef _LP64
1387 jlong c = offset->as_jlong();
1388 if ((jlong)((jint)c) == c) {
1389 addr = new LIR_Address(src.result(), (jint)c, type);
1390 } else {
1391 LIR_Opr tmp = new_register(T_LONG);
1392 __ move(offset, tmp);
1393 addr = new LIR_Address(src.result(), tmp, type);
1394 }
1395 #else
1396 addr = new LIR_Address(src.result(), offset->as_jint(), type);
1397 #endif
1398 } else {
1399 addr = new LIR_Address(src.result(), offset, type);
1400 }
1402 if (data != dst) {
1403 __ move(data, dst);
1404 data = dst;
1405 }
1406 if (x->is_add()) {
1407 __ xadd(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
1408 } else {
1409 if (is_obj) {
1410 // Do the pre-write barrier, if any.
1411 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1412 true /* do_load */, false /* patch */, NULL);
1413 }
1414 __ xchg(LIR_OprFact::address(addr), data, dst, LIR_OprFact::illegalOpr);
1415 if (is_obj) {
1416 // Seems to be a precise address
1417 post_barrier(LIR_OprFact::address(addr), data);
1418 }
1419 }
1420 }