Tue, 15 May 2012 10:10:23 +0200
7133857: exp() and pow() should use the x87 ISA on x86
Summary: use x87 instructions to implement exp() and pow() in interpreter/c1/c2.
Reviewed-by: kvn, never, twisti
1 /*
2 * Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_Compilation.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_Instruction.hpp"
29 #include "c1/c1_LIRAssembler.hpp"
30 #include "c1/c1_LIRGenerator.hpp"
31 #include "c1/c1_Runtime1.hpp"
32 #include "c1/c1_ValueStack.hpp"
33 #include "ci/ciArray.hpp"
34 #include "ci/ciObjArrayKlass.hpp"
35 #include "ci/ciTypeArrayKlass.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #include "vmreg_sparc.inline.hpp"
40 #ifdef ASSERT
41 #define __ gen()->lir(__FILE__, __LINE__)->
42 #else
43 #define __ gen()->lir()->
44 #endif
46 void LIRItem::load_byte_item() {
47 // byte loads use same registers as other loads
48 load_item();
49 }
52 void LIRItem::load_nonconstant() {
53 LIR_Opr r = value()->operand();
54 if (_gen->can_inline_as_constant(value())) {
55 if (!r->is_constant()) {
56 r = LIR_OprFact::value_type(value()->type());
57 }
58 _result = r;
59 } else {
60 load_item();
61 }
62 }
65 //--------------------------------------------------------------
66 // LIRGenerator
67 //--------------------------------------------------------------
69 LIR_Opr LIRGenerator::exceptionOopOpr() { return FrameMap::Oexception_opr; }
70 LIR_Opr LIRGenerator::exceptionPcOpr() { return FrameMap::Oissuing_pc_opr; }
71 LIR_Opr LIRGenerator::syncTempOpr() { return new_register(T_OBJECT); }
72 LIR_Opr LIRGenerator::getThreadTemp() { return rlock_callee_saved(T_INT); }
74 LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
75 LIR_Opr opr;
76 switch (type->tag()) {
77 case intTag: opr = callee ? FrameMap::I0_opr : FrameMap::O0_opr; break;
78 case objectTag: opr = callee ? FrameMap::I0_oop_opr : FrameMap::O0_oop_opr; break;
79 case longTag: opr = callee ? FrameMap::in_long_opr : FrameMap::out_long_opr; break;
80 case floatTag: opr = FrameMap::F0_opr; break;
81 case doubleTag: opr = FrameMap::F0_double_opr; break;
83 case addressTag:
84 default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
85 }
87 assert(opr->type_field() == as_OprType(as_BasicType(type)), "type mismatch");
88 return opr;
89 }
91 LIR_Opr LIRGenerator::rlock_callee_saved(BasicType type) {
92 LIR_Opr reg = new_register(type);
93 set_vreg_flag(reg, callee_saved);
94 return reg;
95 }
98 LIR_Opr LIRGenerator::rlock_byte(BasicType type) {
99 return new_register(T_INT);
100 }
106 //--------- loading items into registers --------------------------------
108 // SPARC cannot inline all constants
109 bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
110 if (v->type()->as_IntConstant() != NULL) {
111 return v->type()->as_IntConstant()->value() == 0;
112 } else if (v->type()->as_LongConstant() != NULL) {
113 return v->type()->as_LongConstant()->value() == 0L;
114 } else if (v->type()->as_ObjectConstant() != NULL) {
115 return v->type()->as_ObjectConstant()->value()->is_null_object();
116 } else {
117 return false;
118 }
119 }
122 // only simm13 constants can be inlined
123 bool LIRGenerator:: can_inline_as_constant(Value i) const {
124 if (i->type()->as_IntConstant() != NULL) {
125 return Assembler::is_simm13(i->type()->as_IntConstant()->value());
126 } else {
127 return can_store_as_constant(i, as_BasicType(i->type()));
128 }
129 }
132 bool LIRGenerator:: can_inline_as_constant(LIR_Const* c) const {
133 if (c->type() == T_INT) {
134 return Assembler::is_simm13(c->as_jint());
135 }
136 return false;
137 }
140 LIR_Opr LIRGenerator::safepoint_poll_register() {
141 return new_register(T_INT);
142 }
146 LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
147 int shift, int disp, BasicType type) {
148 assert(base->is_register(), "must be");
150 // accumulate fixed displacements
151 if (index->is_constant()) {
152 disp += index->as_constant_ptr()->as_jint() << shift;
153 index = LIR_OprFact::illegalOpr;
154 }
156 if (index->is_register()) {
157 // apply the shift and accumulate the displacement
158 if (shift > 0) {
159 LIR_Opr tmp = new_pointer_register();
160 __ shift_left(index, shift, tmp);
161 index = tmp;
162 }
163 if (disp != 0) {
164 LIR_Opr tmp = new_pointer_register();
165 if (Assembler::is_simm13(disp)) {
166 __ add(tmp, LIR_OprFact::intptrConst(disp), tmp);
167 index = tmp;
168 } else {
169 __ move(LIR_OprFact::intptrConst(disp), tmp);
170 __ add(tmp, index, tmp);
171 index = tmp;
172 }
173 disp = 0;
174 }
175 } else if (disp != 0 && !Assembler::is_simm13(disp)) {
176 // index is illegal so replace it with the displacement loaded into a register
177 index = new_pointer_register();
178 __ move(LIR_OprFact::intptrConst(disp), index);
179 disp = 0;
180 }
182 // at this point we either have base + index or base + displacement
183 if (disp == 0) {
184 return new LIR_Address(base, index, type);
185 } else {
186 assert(Assembler::is_simm13(disp), "must be");
187 return new LIR_Address(base, disp, type);
188 }
189 }
192 LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
193 BasicType type, bool needs_card_mark) {
194 int elem_size = type2aelembytes(type);
195 int shift = exact_log2(elem_size);
197 LIR_Opr base_opr;
198 int offset = arrayOopDesc::base_offset_in_bytes(type);
200 if (index_opr->is_constant()) {
201 int i = index_opr->as_constant_ptr()->as_jint();
202 int array_offset = i * elem_size;
203 if (Assembler::is_simm13(array_offset + offset)) {
204 base_opr = array_opr;
205 offset = array_offset + offset;
206 } else {
207 base_opr = new_pointer_register();
208 if (Assembler::is_simm13(array_offset)) {
209 __ add(array_opr, LIR_OprFact::intptrConst(array_offset), base_opr);
210 } else {
211 __ move(LIR_OprFact::intptrConst(array_offset), base_opr);
212 __ add(base_opr, array_opr, base_opr);
213 }
214 }
215 } else {
216 #ifdef _LP64
217 if (index_opr->type() == T_INT) {
218 LIR_Opr tmp = new_register(T_LONG);
219 __ convert(Bytecodes::_i2l, index_opr, tmp);
220 index_opr = tmp;
221 }
222 #endif
224 base_opr = new_pointer_register();
225 assert (index_opr->is_register(), "Must be register");
226 if (shift > 0) {
227 __ shift_left(index_opr, shift, base_opr);
228 __ add(base_opr, array_opr, base_opr);
229 } else {
230 __ add(index_opr, array_opr, base_opr);
231 }
232 }
233 if (needs_card_mark) {
234 LIR_Opr ptr = new_pointer_register();
235 __ add(base_opr, LIR_OprFact::intptrConst(offset), ptr);
236 return new LIR_Address(ptr, type);
237 } else {
238 return new LIR_Address(base_opr, offset, type);
239 }
240 }
242 LIR_Opr LIRGenerator::load_immediate(int x, BasicType type) {
243 LIR_Opr r;
244 if (type == T_LONG) {
245 r = LIR_OprFact::longConst(x);
246 } else if (type == T_INT) {
247 r = LIR_OprFact::intConst(x);
248 } else {
249 ShouldNotReachHere();
250 }
251 if (!Assembler::is_simm13(x)) {
252 LIR_Opr tmp = new_register(type);
253 __ move(r, tmp);
254 return tmp;
255 }
256 return r;
257 }
259 void LIRGenerator::increment_counter(address counter, BasicType type, int step) {
260 LIR_Opr pointer = new_pointer_register();
261 __ move(LIR_OprFact::intptrConst(counter), pointer);
262 LIR_Address* addr = new LIR_Address(pointer, type);
263 increment_counter(addr, step);
264 }
266 void LIRGenerator::increment_counter(LIR_Address* addr, int step) {
267 LIR_Opr temp = new_register(addr->type());
268 __ move(addr, temp);
269 __ add(temp, load_immediate(step, addr->type()), temp);
270 __ move(temp, addr);
271 }
273 void LIRGenerator::cmp_mem_int(LIR_Condition condition, LIR_Opr base, int disp, int c, CodeEmitInfo* info) {
274 LIR_Opr o7opr = FrameMap::O7_opr;
275 __ load(new LIR_Address(base, disp, T_INT), o7opr, info);
276 __ cmp(condition, o7opr, c);
277 }
280 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, int disp, BasicType type, CodeEmitInfo* info) {
281 LIR_Opr o7opr = FrameMap::O7_opr;
282 __ load(new LIR_Address(base, disp, type), o7opr, info);
283 __ cmp(condition, reg, o7opr);
284 }
287 void LIRGenerator::cmp_reg_mem(LIR_Condition condition, LIR_Opr reg, LIR_Opr base, LIR_Opr disp, BasicType type, CodeEmitInfo* info) {
288 LIR_Opr o7opr = FrameMap::O7_opr;
289 __ load(new LIR_Address(base, disp, type), o7opr, info);
290 __ cmp(condition, reg, o7opr);
291 }
294 bool LIRGenerator::strength_reduce_multiply(LIR_Opr left, int c, LIR_Opr result, LIR_Opr tmp) {
295 assert(left != result, "should be different registers");
296 if (is_power_of_2(c + 1)) {
297 __ shift_left(left, log2_intptr(c + 1), result);
298 __ sub(result, left, result);
299 return true;
300 } else if (is_power_of_2(c - 1)) {
301 __ shift_left(left, log2_intptr(c - 1), result);
302 __ add(result, left, result);
303 return true;
304 }
305 return false;
306 }
309 void LIRGenerator::store_stack_parameter (LIR_Opr item, ByteSize offset_from_sp) {
310 BasicType t = item->type();
311 LIR_Opr sp_opr = FrameMap::SP_opr;
312 if ((t == T_LONG || t == T_DOUBLE) &&
313 ((in_bytes(offset_from_sp) - STACK_BIAS) % 8 != 0)) {
314 __ unaligned_move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
315 } else {
316 __ move(item, new LIR_Address(sp_opr, in_bytes(offset_from_sp), t));
317 }
318 }
320 //----------------------------------------------------------------------
321 // visitor functions
322 //----------------------------------------------------------------------
325 void LIRGenerator::do_StoreIndexed(StoreIndexed* x) {
326 assert(x->is_pinned(),"");
327 bool needs_range_check = true;
328 bool use_length = x->length() != NULL;
329 bool obj_store = x->elt_type() == T_ARRAY || x->elt_type() == T_OBJECT;
330 bool needs_store_check = obj_store && (x->value()->as_Constant() == NULL ||
331 !get_jobject_constant(x->value())->is_null_object() ||
332 x->should_profile());
334 LIRItem array(x->array(), this);
335 LIRItem index(x->index(), this);
336 LIRItem value(x->value(), this);
337 LIRItem length(this);
339 array.load_item();
340 index.load_nonconstant();
342 if (use_length) {
343 needs_range_check = x->compute_needs_range_check();
344 if (needs_range_check) {
345 length.set_instruction(x->length());
346 length.load_item();
347 }
348 }
349 if (needs_store_check) {
350 value.load_item();
351 } else {
352 value.load_for_store(x->elt_type());
353 }
355 set_no_result(x);
357 // the CodeEmitInfo must be duplicated for each different
358 // LIR-instruction because spilling can occur anywhere between two
359 // instructions and so the debug information must be different
360 CodeEmitInfo* range_check_info = state_for(x);
361 CodeEmitInfo* null_check_info = NULL;
362 if (x->needs_null_check()) {
363 null_check_info = new CodeEmitInfo(range_check_info);
364 }
366 // emit array address setup early so it schedules better
367 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), obj_store);
369 if (GenerateRangeChecks && needs_range_check) {
370 if (use_length) {
371 __ cmp(lir_cond_belowEqual, length.result(), index.result());
372 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
373 } else {
374 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
375 // range_check also does the null check
376 null_check_info = NULL;
377 }
378 }
380 if (GenerateArrayStoreCheck && needs_store_check) {
381 LIR_Opr tmp1 = FrameMap::G1_opr;
382 LIR_Opr tmp2 = FrameMap::G3_opr;
383 LIR_Opr tmp3 = FrameMap::G5_opr;
385 CodeEmitInfo* store_check_info = new CodeEmitInfo(range_check_info);
386 __ store_check(value.result(), array.result(), tmp1, tmp2, tmp3, store_check_info, x->profiled_method(), x->profiled_bci());
387 }
389 if (obj_store) {
390 // Needs GC write barriers.
391 pre_barrier(LIR_OprFact::address(array_addr), LIR_OprFact::illegalOpr /* pre_val */,
392 true /* do_load */, false /* patch */, NULL);
393 }
394 __ move(value.result(), array_addr, null_check_info);
395 if (obj_store) {
396 // Precise card mark
397 post_barrier(LIR_OprFact::address(array_addr), value.result());
398 }
399 }
402 void LIRGenerator::do_MonitorEnter(MonitorEnter* x) {
403 assert(x->is_pinned(),"");
404 LIRItem obj(x->obj(), this);
405 obj.load_item();
407 set_no_result(x);
409 LIR_Opr lock = FrameMap::G1_opr;
410 LIR_Opr scratch = FrameMap::G3_opr;
411 LIR_Opr hdr = FrameMap::G4_opr;
413 CodeEmitInfo* info_for_exception = NULL;
414 if (x->needs_null_check()) {
415 info_for_exception = state_for(x);
416 }
418 // this CodeEmitInfo must not have the xhandlers because here the
419 // object is already locked (xhandlers expects object to be unlocked)
420 CodeEmitInfo* info = state_for(x, x->state(), true);
421 monitor_enter(obj.result(), lock, hdr, scratch, x->monitor_no(), info_for_exception, info);
422 }
425 void LIRGenerator::do_MonitorExit(MonitorExit* x) {
426 assert(x->is_pinned(),"");
427 LIRItem obj(x->obj(), this);
428 obj.dont_load_item();
430 set_no_result(x);
431 LIR_Opr lock = FrameMap::G1_opr;
432 LIR_Opr hdr = FrameMap::G3_opr;
433 LIR_Opr obj_temp = FrameMap::G4_opr;
434 monitor_exit(obj_temp, lock, hdr, LIR_OprFact::illegalOpr, x->monitor_no());
435 }
438 // _ineg, _lneg, _fneg, _dneg
439 void LIRGenerator::do_NegateOp(NegateOp* x) {
440 LIRItem value(x->x(), this);
441 value.load_item();
442 LIR_Opr reg = rlock_result(x);
443 __ negate(value.result(), reg);
444 }
448 // for _fadd, _fmul, _fsub, _fdiv, _frem
449 // _dadd, _dmul, _dsub, _ddiv, _drem
450 void LIRGenerator::do_ArithmeticOp_FPU(ArithmeticOp* x) {
451 switch (x->op()) {
452 case Bytecodes::_fadd:
453 case Bytecodes::_fmul:
454 case Bytecodes::_fsub:
455 case Bytecodes::_fdiv:
456 case Bytecodes::_dadd:
457 case Bytecodes::_dmul:
458 case Bytecodes::_dsub:
459 case Bytecodes::_ddiv: {
460 LIRItem left(x->x(), this);
461 LIRItem right(x->y(), this);
462 left.load_item();
463 right.load_item();
464 rlock_result(x);
465 arithmetic_op_fpu(x->op(), x->operand(), left.result(), right.result(), x->is_strictfp());
466 }
467 break;
469 case Bytecodes::_frem:
470 case Bytecodes::_drem: {
471 address entry;
472 switch (x->op()) {
473 case Bytecodes::_frem:
474 entry = CAST_FROM_FN_PTR(address, SharedRuntime::frem);
475 break;
476 case Bytecodes::_drem:
477 entry = CAST_FROM_FN_PTR(address, SharedRuntime::drem);
478 break;
479 default:
480 ShouldNotReachHere();
481 }
482 LIR_Opr result = call_runtime(x->x(), x->y(), entry, x->type(), NULL);
483 set_result(x, result);
484 }
485 break;
487 default: ShouldNotReachHere();
488 }
489 }
492 // for _ladd, _lmul, _lsub, _ldiv, _lrem
493 void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
494 switch (x->op()) {
495 case Bytecodes::_lrem:
496 case Bytecodes::_lmul:
497 case Bytecodes::_ldiv: {
499 if (x->op() == Bytecodes::_ldiv || x->op() == Bytecodes::_lrem) {
500 LIRItem right(x->y(), this);
501 right.load_item();
503 CodeEmitInfo* info = state_for(x);
504 LIR_Opr item = right.result();
505 assert(item->is_register(), "must be");
506 __ cmp(lir_cond_equal, item, LIR_OprFact::longConst(0));
507 __ branch(lir_cond_equal, T_LONG, new DivByZeroStub(info));
508 }
510 address entry;
511 switch (x->op()) {
512 case Bytecodes::_lrem:
513 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lrem);
514 break; // check if dividend is 0 is done elsewhere
515 case Bytecodes::_ldiv:
516 entry = CAST_FROM_FN_PTR(address, SharedRuntime::ldiv);
517 break; // check if dividend is 0 is done elsewhere
518 case Bytecodes::_lmul:
519 entry = CAST_FROM_FN_PTR(address, SharedRuntime::lmul);
520 break;
521 default:
522 ShouldNotReachHere();
523 }
525 // order of arguments to runtime call is reversed.
526 LIR_Opr result = call_runtime(x->y(), x->x(), entry, x->type(), NULL);
527 set_result(x, result);
528 break;
529 }
530 case Bytecodes::_ladd:
531 case Bytecodes::_lsub: {
532 LIRItem left(x->x(), this);
533 LIRItem right(x->y(), this);
534 left.load_item();
535 right.load_item();
536 rlock_result(x);
538 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
539 break;
540 }
541 default: ShouldNotReachHere();
542 }
543 }
546 // Returns if item is an int constant that can be represented by a simm13
547 static bool is_simm13(LIR_Opr item) {
548 if (item->is_constant() && item->type() == T_INT) {
549 return Assembler::is_simm13(item->as_constant_ptr()->as_jint());
550 } else {
551 return false;
552 }
553 }
556 // for: _iadd, _imul, _isub, _idiv, _irem
557 void LIRGenerator::do_ArithmeticOp_Int(ArithmeticOp* x) {
558 bool is_div_rem = x->op() == Bytecodes::_idiv || x->op() == Bytecodes::_irem;
559 LIRItem left(x->x(), this);
560 LIRItem right(x->y(), this);
561 // missing test if instr is commutative and if we should swap
562 right.load_nonconstant();
563 assert(right.is_constant() || right.is_register(), "wrong state of right");
564 left.load_item();
565 rlock_result(x);
566 if (is_div_rem) {
567 CodeEmitInfo* info = state_for(x);
568 LIR_Opr tmp = FrameMap::G1_opr;
569 if (x->op() == Bytecodes::_irem) {
570 __ irem(left.result(), right.result(), x->operand(), tmp, info);
571 } else if (x->op() == Bytecodes::_idiv) {
572 __ idiv(left.result(), right.result(), x->operand(), tmp, info);
573 }
574 } else {
575 arithmetic_op_int(x->op(), x->operand(), left.result(), right.result(), FrameMap::G1_opr);
576 }
577 }
580 void LIRGenerator::do_ArithmeticOp(ArithmeticOp* x) {
581 ValueTag tag = x->type()->tag();
582 assert(x->x()->type()->tag() == tag && x->y()->type()->tag() == tag, "wrong parameters");
583 switch (tag) {
584 case floatTag:
585 case doubleTag: do_ArithmeticOp_FPU(x); return;
586 case longTag: do_ArithmeticOp_Long(x); return;
587 case intTag: do_ArithmeticOp_Int(x); return;
588 }
589 ShouldNotReachHere();
590 }
593 // _ishl, _lshl, _ishr, _lshr, _iushr, _lushr
594 void LIRGenerator::do_ShiftOp(ShiftOp* x) {
595 LIRItem value(x->x(), this);
596 LIRItem count(x->y(), this);
597 // Long shift destroys count register
598 if (value.type()->is_long()) {
599 count.set_destroys_register();
600 }
601 value.load_item();
602 // the old backend doesn't support this
603 if (count.is_constant() && count.type()->as_IntConstant() != NULL && value.type()->is_int()) {
604 jint c = count.get_jint_constant() & 0x1f;
605 assert(c >= 0 && c < 32, "should be small");
606 count.dont_load_item();
607 } else {
608 count.load_item();
609 }
610 LIR_Opr reg = rlock_result(x);
611 shift_op(x->op(), reg, value.result(), count.result(), LIR_OprFact::illegalOpr);
612 }
615 // _iand, _land, _ior, _lor, _ixor, _lxor
616 void LIRGenerator::do_LogicOp(LogicOp* x) {
617 LIRItem left(x->x(), this);
618 LIRItem right(x->y(), this);
620 left.load_item();
621 right.load_nonconstant();
622 LIR_Opr reg = rlock_result(x);
624 logic_op(x->op(), reg, left.result(), right.result());
625 }
629 // _lcmp, _fcmpl, _fcmpg, _dcmpl, _dcmpg
630 void LIRGenerator::do_CompareOp(CompareOp* x) {
631 LIRItem left(x->x(), this);
632 LIRItem right(x->y(), this);
633 left.load_item();
634 right.load_item();
635 LIR_Opr reg = rlock_result(x);
636 if (x->x()->type()->is_float_kind()) {
637 Bytecodes::Code code = x->op();
638 __ fcmp2int(left.result(), right.result(), reg, (code == Bytecodes::_fcmpl || code == Bytecodes::_dcmpl));
639 } else if (x->x()->type()->tag() == longTag) {
640 __ lcmp2int(left.result(), right.result(), reg);
641 } else {
642 Unimplemented();
643 }
644 }
647 void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
648 assert(x->number_of_arguments() == 3, "wrong type");
649 LIRItem obj (x->argument_at(0), this); // AtomicLong object
650 LIRItem cmp_value (x->argument_at(1), this); // value to compare with field
651 LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
653 obj.load_item();
654 cmp_value.load_item();
655 new_value.load_item();
657 // generate compare-and-swap and produce zero condition if swap occurs
658 int value_offset = sun_misc_AtomicLongCSImpl::value_offset();
659 LIR_Opr addr = FrameMap::O7_opr;
660 __ add(obj.result(), LIR_OprFact::intConst(value_offset), addr);
661 LIR_Opr t1 = FrameMap::G1_opr; // temp for 64-bit value
662 LIR_Opr t2 = FrameMap::G3_opr; // temp for 64-bit value
663 __ cas_long(addr, cmp_value.result(), new_value.result(), t1, t2);
665 // generate conditional move of boolean result
666 LIR_Opr result = rlock_result(x);
667 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0), result, T_LONG);
668 }
671 void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
672 assert(x->number_of_arguments() == 4, "wrong type");
673 LIRItem obj (x->argument_at(0), this); // object
674 LIRItem offset(x->argument_at(1), this); // offset of field
675 LIRItem cmp (x->argument_at(2), this); // value to compare with field
676 LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
678 // Use temps to avoid kills
679 LIR_Opr t1 = FrameMap::G1_opr;
680 LIR_Opr t2 = FrameMap::G3_opr;
681 LIR_Opr addr = new_pointer_register();
683 // get address of field
684 obj.load_item();
685 offset.load_item();
686 cmp.load_item();
687 val.load_item();
689 __ add(obj.result(), offset.result(), addr);
691 if (type == objectType) { // Write-barrier needed for Object fields.
692 pre_barrier(addr, LIR_OprFact::illegalOpr /* pre_val */,
693 true /* do_load */, false /* patch */, NULL);
694 }
696 if (type == objectType)
697 __ cas_obj(addr, cmp.result(), val.result(), t1, t2);
698 else if (type == intType)
699 __ cas_int(addr, cmp.result(), val.result(), t1, t2);
700 else if (type == longType)
701 __ cas_long(addr, cmp.result(), val.result(), t1, t2);
702 else {
703 ShouldNotReachHere();
704 }
705 // generate conditional move of boolean result
706 LIR_Opr result = rlock_result(x);
707 __ cmove(lir_cond_equal, LIR_OprFact::intConst(1), LIR_OprFact::intConst(0),
708 result, as_BasicType(type));
709 if (type == objectType) { // Write-barrier needed for Object fields.
710 // Precise card mark since could either be object or array
711 post_barrier(addr, val.result());
712 }
713 }
716 void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
717 switch (x->id()) {
718 case vmIntrinsics::_dabs:
719 case vmIntrinsics::_dsqrt: {
720 assert(x->number_of_arguments() == 1, "wrong type");
721 LIRItem value(x->argument_at(0), this);
722 value.load_item();
723 LIR_Opr dst = rlock_result(x);
725 switch (x->id()) {
726 case vmIntrinsics::_dsqrt: {
727 __ sqrt(value.result(), dst, LIR_OprFact::illegalOpr);
728 break;
729 }
730 case vmIntrinsics::_dabs: {
731 __ abs(value.result(), dst, LIR_OprFact::illegalOpr);
732 break;
733 }
734 }
735 break;
736 }
737 case vmIntrinsics::_dlog10: // fall through
738 case vmIntrinsics::_dlog: // fall through
739 case vmIntrinsics::_dsin: // fall through
740 case vmIntrinsics::_dtan: // fall through
741 case vmIntrinsics::_dcos: // fall through
742 case vmIntrinsics::_dexp: {
743 assert(x->number_of_arguments() == 1, "wrong type");
745 address runtime_entry = NULL;
746 switch (x->id()) {
747 case vmIntrinsics::_dsin:
748 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dsin);
749 break;
750 case vmIntrinsics::_dcos:
751 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dcos);
752 break;
753 case vmIntrinsics::_dtan:
754 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dtan);
755 break;
756 case vmIntrinsics::_dlog:
757 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog);
758 break;
759 case vmIntrinsics::_dlog10:
760 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dlog10);
761 break;
762 case vmIntrinsics::_dexp:
763 runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dexp);
764 break;
765 default:
766 ShouldNotReachHere();
767 }
769 LIR_Opr result = call_runtime(x->argument_at(0), runtime_entry, x->type(), NULL);
770 set_result(x, result);
771 break;
772 }
773 case vmIntrinsics::_dpow: {
774 assert(x->number_of_arguments() == 2, "wrong type");
775 address runtime_entry = CAST_FROM_FN_PTR(address, SharedRuntime::dpow);
776 LIR_Opr result = call_runtime(x->argument_at(0), x->argument_at(1), runtime_entry, x->type(), NULL);
777 set_result(x, result);
778 break;
779 }
780 }
781 }
784 void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
785 assert(x->number_of_arguments() == 5, "wrong type");
787 // Make all state_for calls early since they can emit code
788 CodeEmitInfo* info = state_for(x, x->state());
790 // Note: spill caller save before setting the item
791 LIRItem src (x->argument_at(0), this);
792 LIRItem src_pos (x->argument_at(1), this);
793 LIRItem dst (x->argument_at(2), this);
794 LIRItem dst_pos (x->argument_at(3), this);
795 LIRItem length (x->argument_at(4), this);
796 // load all values in callee_save_registers, as this makes the
797 // parameter passing to the fast case simpler
798 src.load_item_force (rlock_callee_saved(T_OBJECT));
799 src_pos.load_item_force (rlock_callee_saved(T_INT));
800 dst.load_item_force (rlock_callee_saved(T_OBJECT));
801 dst_pos.load_item_force (rlock_callee_saved(T_INT));
802 length.load_item_force (rlock_callee_saved(T_INT));
804 int flags;
805 ciArrayKlass* expected_type;
806 arraycopy_helper(x, &flags, &expected_type);
808 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(),
809 length.result(), rlock_callee_saved(T_INT),
810 expected_type, flags, info);
811 set_no_result(x);
812 }
814 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f
815 // _i2b, _i2c, _i2s
816 void LIRGenerator::do_Convert(Convert* x) {
818 switch (x->op()) {
819 case Bytecodes::_f2l:
820 case Bytecodes::_d2l:
821 case Bytecodes::_d2i:
822 case Bytecodes::_l2f:
823 case Bytecodes::_l2d: {
825 address entry;
826 switch (x->op()) {
827 case Bytecodes::_l2f:
828 entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2f);
829 break;
830 case Bytecodes::_l2d:
831 entry = CAST_FROM_FN_PTR(address, SharedRuntime::l2d);
832 break;
833 case Bytecodes::_f2l:
834 entry = CAST_FROM_FN_PTR(address, SharedRuntime::f2l);
835 break;
836 case Bytecodes::_d2l:
837 entry = CAST_FROM_FN_PTR(address, SharedRuntime::d2l);
838 break;
839 case Bytecodes::_d2i:
840 entry = CAST_FROM_FN_PTR(address, SharedRuntime::d2i);
841 break;
842 default:
843 ShouldNotReachHere();
844 }
845 LIR_Opr result = call_runtime(x->value(), entry, x->type(), NULL);
846 set_result(x, result);
847 break;
848 }
850 case Bytecodes::_i2f:
851 case Bytecodes::_i2d: {
852 LIRItem value(x->value(), this);
854 LIR_Opr reg = rlock_result(x);
855 // To convert an int to double, we need to load the 32-bit int
856 // from memory into a single precision floating point register
857 // (even numbered). Then the sparc fitod instruction takes care
858 // of the conversion. This is a bit ugly, but is the best way to
859 // get the int value in a single precision floating point register
860 value.load_item();
861 LIR_Opr tmp = force_to_spill(value.result(), T_FLOAT);
862 __ convert(x->op(), tmp, reg);
863 break;
864 }
865 break;
867 case Bytecodes::_i2l:
868 case Bytecodes::_i2b:
869 case Bytecodes::_i2c:
870 case Bytecodes::_i2s:
871 case Bytecodes::_l2i:
872 case Bytecodes::_f2d:
873 case Bytecodes::_d2f: { // inline code
874 LIRItem value(x->value(), this);
876 value.load_item();
877 LIR_Opr reg = rlock_result(x);
878 __ convert(x->op(), value.result(), reg, false);
879 }
880 break;
882 case Bytecodes::_f2i: {
883 LIRItem value (x->value(), this);
884 value.set_destroys_register();
885 value.load_item();
886 LIR_Opr reg = rlock_result(x);
887 set_vreg_flag(reg, must_start_in_memory);
888 __ convert(x->op(), value.result(), reg, false);
889 }
890 break;
892 default: ShouldNotReachHere();
893 }
894 }
897 void LIRGenerator::do_NewInstance(NewInstance* x) {
898 // This instruction can be deoptimized in the slow path : use
899 // O0 as result register.
900 const LIR_Opr reg = result_register_for(x->type());
901 #ifndef PRODUCT
902 if (PrintNotLoaded && !x->klass()->is_loaded()) {
903 tty->print_cr(" ###class not loaded at new bci %d", x->printable_bci());
904 }
905 #endif
906 CodeEmitInfo* info = state_for(x, x->state());
907 LIR_Opr tmp1 = FrameMap::G1_oop_opr;
908 LIR_Opr tmp2 = FrameMap::G3_oop_opr;
909 LIR_Opr tmp3 = FrameMap::G4_oop_opr;
910 LIR_Opr tmp4 = FrameMap::O1_oop_opr;
911 LIR_Opr klass_reg = FrameMap::G5_oop_opr;
912 new_instance(reg, x->klass(), tmp1, tmp2, tmp3, tmp4, klass_reg, info);
913 LIR_Opr result = rlock_result(x);
914 __ move(reg, result);
915 }
918 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) {
919 // Evaluate state_for early since it may emit code
920 CodeEmitInfo* info = state_for(x, x->state());
922 LIRItem length(x->length(), this);
923 length.load_item();
925 LIR_Opr reg = result_register_for(x->type());
926 LIR_Opr tmp1 = FrameMap::G1_oop_opr;
927 LIR_Opr tmp2 = FrameMap::G3_oop_opr;
928 LIR_Opr tmp3 = FrameMap::G4_oop_opr;
929 LIR_Opr tmp4 = FrameMap::O1_oop_opr;
930 LIR_Opr klass_reg = FrameMap::G5_oop_opr;
931 LIR_Opr len = length.result();
932 BasicType elem_type = x->elt_type();
934 __ oop2reg(ciTypeArrayKlass::make(elem_type)->constant_encoding(), klass_reg);
936 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info);
937 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path);
939 LIR_Opr result = rlock_result(x);
940 __ move(reg, result);
941 }
944 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) {
945 // Evaluate state_for early since it may emit code.
946 CodeEmitInfo* info = state_for(x, x->state());
947 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction
948 // and therefore provide the state before the parameters have been consumed
949 CodeEmitInfo* patching_info = NULL;
950 if (!x->klass()->is_loaded() || PatchALot) {
951 patching_info = state_for(x, x->state_before());
952 }
954 LIRItem length(x->length(), this);
955 length.load_item();
957 const LIR_Opr reg = result_register_for(x->type());
958 LIR_Opr tmp1 = FrameMap::G1_oop_opr;
959 LIR_Opr tmp2 = FrameMap::G3_oop_opr;
960 LIR_Opr tmp3 = FrameMap::G4_oop_opr;
961 LIR_Opr tmp4 = FrameMap::O1_oop_opr;
962 LIR_Opr klass_reg = FrameMap::G5_oop_opr;
963 LIR_Opr len = length.result();
965 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info);
966 ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass());
967 if (obj == ciEnv::unloaded_ciobjarrayklass()) {
968 BAILOUT("encountered unloaded_ciobjarrayklass due to out of memory error");
969 }
970 jobject2reg_with_patching(klass_reg, obj, patching_info);
971 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, T_OBJECT, klass_reg, slow_path);
973 LIR_Opr result = rlock_result(x);
974 __ move(reg, result);
975 }
978 void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
979 Values* dims = x->dims();
980 int i = dims->length();
981 LIRItemList* items = new LIRItemList(dims->length(), NULL);
982 while (i-- > 0) {
983 LIRItem* size = new LIRItem(dims->at(i), this);
984 items->at_put(i, size);
985 }
987 // Evaluate state_for early since it may emit code.
988 CodeEmitInfo* patching_info = NULL;
989 if (!x->klass()->is_loaded() || PatchALot) {
990 patching_info = state_for(x, x->state_before());
992 // cannot re-use same xhandlers for multiple CodeEmitInfos, so
993 // clone all handlers. This is handled transparently in other
994 // places by the CodeEmitInfo cloning logic but is handled
995 // specially here because a stub isn't being used.
996 x->set_exception_handlers(new XHandlers(x->exception_handlers()));
997 }
998 CodeEmitInfo* info = state_for(x, x->state());
1000 i = dims->length();
1001 while (i-- > 0) {
1002 LIRItem* size = items->at(i);
1003 size->load_item();
1004 store_stack_parameter (size->result(),
1005 in_ByteSize(STACK_BIAS +
1006 frame::memory_parameter_word_sp_offset * wordSize +
1007 i * sizeof(jint)));
1008 }
1010 // This instruction can be deoptimized in the slow path : use
1011 // O0 as result register.
1012 const LIR_Opr reg = result_register_for(x->type());
1013 jobject2reg_with_patching(reg, x->klass(), patching_info);
1014 LIR_Opr rank = FrameMap::O1_opr;
1015 __ move(LIR_OprFact::intConst(x->rank()), rank);
1016 LIR_Opr varargs = FrameMap::as_pointer_opr(O2);
1017 int offset_from_sp = (frame::memory_parameter_word_sp_offset * wordSize) + STACK_BIAS;
1018 __ add(FrameMap::SP_opr,
1019 LIR_OprFact::intptrConst(offset_from_sp),
1020 varargs);
1021 LIR_OprList* args = new LIR_OprList(3);
1022 args->append(reg);
1023 args->append(rank);
1024 args->append(varargs);
1025 __ call_runtime(Runtime1::entry_for(Runtime1::new_multi_array_id),
1026 LIR_OprFact::illegalOpr,
1027 reg, args, info);
1029 LIR_Opr result = rlock_result(x);
1030 __ move(reg, result);
1031 }
1034 void LIRGenerator::do_BlockBegin(BlockBegin* x) {
1035 }
1038 void LIRGenerator::do_CheckCast(CheckCast* x) {
1039 LIRItem obj(x->obj(), this);
1040 CodeEmitInfo* patching_info = NULL;
1041 if (!x->klass()->is_loaded() || (PatchALot && !x->is_incompatible_class_change_check())) {
1042 // must do this before locking the destination register as an oop register,
1043 // and before the obj is loaded (so x->obj()->item() is valid for creating a debug info location)
1044 patching_info = state_for(x, x->state_before());
1045 }
1046 obj.load_item();
1047 LIR_Opr out_reg = rlock_result(x);
1048 CodeStub* stub;
1049 CodeEmitInfo* info_for_exception = state_for(x);
1051 if (x->is_incompatible_class_change_check()) {
1052 assert(patching_info == NULL, "can't patch this");
1053 stub = new SimpleExceptionStub(Runtime1::throw_incompatible_class_change_error_id, LIR_OprFact::illegalOpr, info_for_exception);
1054 } else {
1055 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
1056 }
1057 LIR_Opr tmp1 = FrameMap::G1_oop_opr;
1058 LIR_Opr tmp2 = FrameMap::G3_oop_opr;
1059 LIR_Opr tmp3 = FrameMap::G4_oop_opr;
1060 __ checkcast(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1061 x->direct_compare(), info_for_exception, patching_info, stub,
1062 x->profiled_method(), x->profiled_bci());
1063 }
1066 void LIRGenerator::do_InstanceOf(InstanceOf* x) {
1067 LIRItem obj(x->obj(), this);
1068 CodeEmitInfo* patching_info = NULL;
1069 if (!x->klass()->is_loaded() || PatchALot) {
1070 patching_info = state_for(x, x->state_before());
1071 }
1072 // ensure the result register is not the input register because the result is initialized before the patching safepoint
1073 obj.load_item();
1074 LIR_Opr out_reg = rlock_result(x);
1075 LIR_Opr tmp1 = FrameMap::G1_oop_opr;
1076 LIR_Opr tmp2 = FrameMap::G3_oop_opr;
1077 LIR_Opr tmp3 = FrameMap::G4_oop_opr;
1078 __ instanceof(out_reg, obj.result(), x->klass(), tmp1, tmp2, tmp3,
1079 x->direct_compare(), patching_info,
1080 x->profiled_method(), x->profiled_bci());
1081 }
1084 void LIRGenerator::do_If(If* x) {
1085 assert(x->number_of_sux() == 2, "inconsistency");
1086 ValueTag tag = x->x()->type()->tag();
1087 LIRItem xitem(x->x(), this);
1088 LIRItem yitem(x->y(), this);
1089 LIRItem* xin = &xitem;
1090 LIRItem* yin = &yitem;
1091 If::Condition cond = x->cond();
1093 if (tag == longTag) {
1094 // for longs, only conditions "eql", "neq", "lss", "geq" are valid;
1095 // mirror for other conditions
1096 if (cond == If::gtr || cond == If::leq) {
1097 // swap inputs
1098 cond = Instruction::mirror(cond);
1099 xin = &yitem;
1100 yin = &xitem;
1101 }
1102 xin->set_destroys_register();
1103 }
1105 LIR_Opr left = LIR_OprFact::illegalOpr;
1106 LIR_Opr right = LIR_OprFact::illegalOpr;
1108 xin->load_item();
1109 left = xin->result();
1111 if (is_simm13(yin->result())) {
1112 // inline int constants which are small enough to be immediate operands
1113 right = LIR_OprFact::value_type(yin->value()->type());
1114 } else if (tag == longTag && yin->is_constant() && yin->get_jlong_constant() == 0 &&
1115 (cond == If::eql || cond == If::neq)) {
1116 // inline long zero
1117 right = LIR_OprFact::value_type(yin->value()->type());
1118 } else if (tag == objectTag && yin->is_constant() && (yin->get_jobject_constant()->is_null_object())) {
1119 right = LIR_OprFact::value_type(yin->value()->type());
1120 } else {
1121 yin->load_item();
1122 right = yin->result();
1123 }
1124 set_no_result(x);
1126 // add safepoint before generating condition code so it can be recomputed
1127 if (x->is_safepoint()) {
1128 // increment backedge counter if needed
1129 increment_backedge_counter(state_for(x, x->state_before()), x->profiled_bci());
1130 __ safepoint(new_register(T_INT), state_for(x, x->state_before()));
1131 }
1133 __ cmp(lir_cond(cond), left, right);
1134 // Generate branch profiling. Profiling code doesn't kill flags.
1135 profile_branch(x, cond);
1136 move_to_phi(x->state());
1137 if (x->x()->type()->is_float_kind()) {
1138 __ branch(lir_cond(cond), right->type(), x->tsux(), x->usux());
1139 } else {
1140 __ branch(lir_cond(cond), right->type(), x->tsux());
1141 }
1142 assert(x->default_sux() == x->fsux(), "wrong destination above");
1143 __ jump(x->default_sux());
1144 }
1147 LIR_Opr LIRGenerator::getThreadPointer() {
1148 return FrameMap::as_pointer_opr(G2);
1149 }
1152 void LIRGenerator::trace_block_entry(BlockBegin* block) {
1153 __ move(LIR_OprFact::intConst(block->block_id()), FrameMap::O0_opr);
1154 LIR_OprList* args = new LIR_OprList(1);
1155 args->append(FrameMap::O0_opr);
1156 address func = CAST_FROM_FN_PTR(address, Runtime1::trace_block_entry);
1157 __ call_runtime_leaf(func, rlock_callee_saved(T_INT), LIR_OprFact::illegalOpr, args);
1158 }
1161 void LIRGenerator::volatile_field_store(LIR_Opr value, LIR_Address* address,
1162 CodeEmitInfo* info) {
1163 #ifdef _LP64
1164 __ store(value, address, info);
1165 #else
1166 __ volatile_store_mem_reg(value, address, info);
1167 #endif
1168 }
1170 void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
1171 CodeEmitInfo* info) {
1172 #ifdef _LP64
1173 __ load(address, result, info);
1174 #else
1175 __ volatile_load_mem_reg(address, result, info);
1176 #endif
1177 }
1180 void LIRGenerator::put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data,
1181 BasicType type, bool is_volatile) {
1182 LIR_Opr base_op = src;
1183 LIR_Opr index_op = offset;
1185 bool is_obj = (type == T_ARRAY || type == T_OBJECT);
1186 #ifndef _LP64
1187 if (is_volatile && type == T_LONG) {
1188 __ volatile_store_unsafe_reg(data, src, offset, type, NULL, lir_patch_none);
1189 } else
1190 #endif
1191 {
1192 if (type == T_BOOLEAN) {
1193 type = T_BYTE;
1194 }
1195 LIR_Address* addr;
1196 if (type == T_ARRAY || type == T_OBJECT) {
1197 LIR_Opr tmp = new_pointer_register();
1198 __ add(base_op, index_op, tmp);
1199 addr = new LIR_Address(tmp, type);
1200 } else {
1201 addr = new LIR_Address(base_op, index_op, type);
1202 }
1204 if (is_obj) {
1205 pre_barrier(LIR_OprFact::address(addr), LIR_OprFact::illegalOpr /* pre_val */,
1206 true /* do_load */, false /* patch */, NULL);
1207 // _bs->c1_write_barrier_pre(this, LIR_OprFact::address(addr));
1208 }
1209 __ move(data, addr);
1210 if (is_obj) {
1211 // This address is precise
1212 post_barrier(LIR_OprFact::address(addr), data);
1213 }
1214 }
1215 }
1218 void LIRGenerator::get_Object_unsafe(LIR_Opr dst, LIR_Opr src, LIR_Opr offset,
1219 BasicType type, bool is_volatile) {
1220 #ifndef _LP64
1221 if (is_volatile && type == T_LONG) {
1222 __ volatile_load_unsafe_reg(src, offset, dst, type, NULL, lir_patch_none);
1223 } else
1224 #endif
1225 {
1226 LIR_Address* addr = new LIR_Address(src, offset, type);
1227 __ load(addr, dst);
1228 }
1229 }