Thu, 24 May 2018 18:41:44 +0800
Merge
1 /*
2 * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #include "precompiled.hpp"
32 #include "c1/c1_Defs.hpp"
33 #include "c1/c1_Compilation.hpp"
34 #include "c1/c1_FrameMap.hpp"
35 #include "c1/c1_Instruction.hpp"
36 #include "c1/c1_LIRAssembler.hpp"
37 #include "c1/c1_LIRGenerator.hpp"
38 #include "c1/c1_ValueStack.hpp"
39 #include "ci/ciArrayKlass.hpp"
40 #include "ci/ciInstance.hpp"
41 #include "ci/ciObjArray.hpp"
42 #include "runtime/sharedRuntime.hpp"
43 #include "runtime/stubRoutines.hpp"
44 #include "utilities/bitMap.inline.hpp"
45 #include "utilities/macros.hpp"
46 #if INCLUDE_ALL_GCS
47 #include "gc_implementation/g1/heapRegion.hpp"
48 #endif // INCLUDE_ALL_GCS
50 #ifdef ASSERT
51 #define __ gen()->lir(__FILE__, __LINE__)->
52 #else
53 #define __ gen()->lir()->
54 #endif
56 #ifndef PATCHED_ADDR
57 #define PATCHED_ADDR (max_jint)
58 #endif
60 void PhiResolverState::reset(int max_vregs) {
61 // Initialize array sizes
62 _virtual_operands.at_put_grow(max_vregs - 1, NULL, NULL);
63 _virtual_operands.trunc_to(0);
64 _other_operands.at_put_grow(max_vregs - 1, NULL, NULL);
65 _other_operands.trunc_to(0);
66 _vreg_table.at_put_grow(max_vregs - 1, NULL, NULL);
67 _vreg_table.trunc_to(0);
68 }
72 //--------------------------------------------------------------
73 // PhiResolver
75 // Resolves cycles:
76 //
77 // r1 := r2 becomes temp := r1
78 // r2 := r1 r1 := r2
79 // r2 := temp
80 // and orders moves:
81 //
82 // r2 := r3 becomes r1 := r2
83 // r1 := r2 r2 := r3
85 PhiResolver::PhiResolver(LIRGenerator* gen, int max_vregs)
86 : _gen(gen)
87 , _state(gen->resolver_state())
88 , _temp(LIR_OprFact::illegalOpr)
89 {
90 // reinitialize the shared state arrays
91 _state.reset(max_vregs);
92 }
95 void PhiResolver::emit_move(LIR_Opr src, LIR_Opr dest) {
96 assert(src->is_valid(), "");
97 assert(dest->is_valid(), "");
98 __ move(src, dest);
99 }
102 void PhiResolver::move_temp_to(LIR_Opr dest) {
103 assert(_temp->is_valid(), "");
104 emit_move(_temp, dest);
105 NOT_PRODUCT(_temp = LIR_OprFact::illegalOpr);
106 }
109 void PhiResolver::move_to_temp(LIR_Opr src) {
110 assert(_temp->is_illegal(), "");
111 _temp = _gen->new_register(src->type());
112 emit_move(src, _temp);
113 }
116 // Traverse assignment graph in depth first order and generate moves in post order
117 // ie. two assignments: b := c, a := b start with node c:
118 // Call graph: move(NULL, c) -> move(c, b) -> move(b, a)
119 // Generates moves in this order: move b to a and move c to b
120 // ie. cycle a := b, b := a start with node a
121 // Call graph: move(NULL, a) -> move(a, b) -> move(b, a)
122 // Generates moves in this order: move b to temp, move a to b, move temp to a
123 void PhiResolver::move(ResolveNode* src, ResolveNode* dest) {
124 if (!dest->visited()) {
125 dest->set_visited();
126 for (int i = dest->no_of_destinations()-1; i >= 0; i --) {
127 move(dest, dest->destination_at(i));
128 }
129 } else if (!dest->start_node()) {
130 // cylce in graph detected
131 assert(_loop == NULL, "only one loop valid!");
132 _loop = dest;
133 move_to_temp(src->operand());
134 return;
135 } // else dest is a start node
137 if (!dest->assigned()) {
138 if (_loop == dest) {
139 move_temp_to(dest->operand());
140 dest->set_assigned();
141 } else if (src != NULL) {
142 emit_move(src->operand(), dest->operand());
143 dest->set_assigned();
144 }
145 }
146 }
149 PhiResolver::~PhiResolver() {
150 int i;
151 // resolve any cycles in moves from and to virtual registers
152 for (i = virtual_operands().length() - 1; i >= 0; i --) {
153 ResolveNode* node = virtual_operands()[i];
154 if (!node->visited()) {
155 _loop = NULL;
156 move(NULL, node);
157 node->set_start_node();
158 assert(_temp->is_illegal(), "move_temp_to() call missing");
159 }
160 }
162 // generate move for move from non virtual register to abitrary destination
163 for (i = other_operands().length() - 1; i >= 0; i --) {
164 ResolveNode* node = other_operands()[i];
165 for (int j = node->no_of_destinations() - 1; j >= 0; j --) {
166 emit_move(node->operand(), node->destination_at(j)->operand());
167 }
168 }
169 }
172 ResolveNode* PhiResolver::create_node(LIR_Opr opr, bool source) {
173 ResolveNode* node;
174 if (opr->is_virtual()) {
175 int vreg_num = opr->vreg_number();
176 node = vreg_table().at_grow(vreg_num, NULL);
177 assert(node == NULL || node->operand() == opr, "");
178 if (node == NULL) {
179 node = new ResolveNode(opr);
180 vreg_table()[vreg_num] = node;
181 }
182 // Make sure that all virtual operands show up in the list when
183 // they are used as the source of a move.
184 if (source && !virtual_operands().contains(node)) {
185 virtual_operands().append(node);
186 }
187 } else {
188 assert(source, "");
189 node = new ResolveNode(opr);
190 other_operands().append(node);
191 }
192 return node;
193 }
196 void PhiResolver::move(LIR_Opr src, LIR_Opr dest) {
197 assert(dest->is_virtual(), "");
198 // tty->print("move "); src->print(); tty->print(" to "); dest->print(); tty->cr();
199 assert(src->is_valid(), "");
200 assert(dest->is_valid(), "");
201 ResolveNode* source = source_node(src);
202 source->append(destination_node(dest));
203 }
206 //--------------------------------------------------------------
207 // LIRItem
209 void LIRItem::set_result(LIR_Opr opr) {
210 assert(value()->operand()->is_illegal() || value()->operand()->is_constant(), "operand should never change");
211 value()->set_operand(opr);
213 if (opr->is_virtual()) {
214 _gen->_instruction_for_operand.at_put_grow(opr->vreg_number(), value(), NULL);
215 }
217 _result = opr;
218 }
220 void LIRItem::load_item() {
221 if (result()->is_illegal()) {
222 // update the items result
223 _result = value()->operand();
224 }
225 if (!result()->is_register()) {
226 LIR_Opr reg = _gen->new_register(value()->type());
227 __ move(result(), reg);
228 if (result()->is_constant()) {
229 _result = reg;
230 } else {
231 set_result(reg);
232 }
233 }
234 }
237 void LIRItem::load_for_store(BasicType type) {
238 if (_gen->can_store_as_constant(value(), type)) {
239 _result = value()->operand();
240 if (!_result->is_constant()) {
241 _result = LIR_OprFact::value_type(value()->type());
242 }
243 } else if (type == T_BYTE || type == T_BOOLEAN) {
244 load_byte_item();
245 } else {
246 load_item();
247 }
248 }
250 void LIRItem::load_item_force(LIR_Opr reg) {
251 LIR_Opr r = result();
252 if (r != reg) {
253 #if !defined(ARM) && !defined(E500V2)
254 if (r->type() != reg->type()) {
255 // moves between different types need an intervening spill slot
256 r = _gen->force_to_spill(r, reg->type());
257 }
258 #endif
259 __ move(r, reg);
260 _result = reg;
261 }
262 }
264 ciObject* LIRItem::get_jobject_constant() const {
265 ObjectType* oc = type()->as_ObjectType();
266 if (oc) {
267 return oc->constant_value();
268 }
269 return NULL;
270 }
273 jint LIRItem::get_jint_constant() const {
274 assert(is_constant() && value() != NULL, "");
275 assert(type()->as_IntConstant() != NULL, "type check");
276 return type()->as_IntConstant()->value();
277 }
280 jint LIRItem::get_address_constant() const {
281 assert(is_constant() && value() != NULL, "");
282 assert(type()->as_AddressConstant() != NULL, "type check");
283 return type()->as_AddressConstant()->value();
284 }
287 jfloat LIRItem::get_jfloat_constant() const {
288 assert(is_constant() && value() != NULL, "");
289 assert(type()->as_FloatConstant() != NULL, "type check");
290 return type()->as_FloatConstant()->value();
291 }
294 jdouble LIRItem::get_jdouble_constant() const {
295 assert(is_constant() && value() != NULL, "");
296 assert(type()->as_DoubleConstant() != NULL, "type check");
297 return type()->as_DoubleConstant()->value();
298 }
301 jlong LIRItem::get_jlong_constant() const {
302 assert(is_constant() && value() != NULL, "");
303 assert(type()->as_LongConstant() != NULL, "type check");
304 return type()->as_LongConstant()->value();
305 }
309 //--------------------------------------------------------------
312 void LIRGenerator::init() {
313 _bs = Universe::heap()->barrier_set();
314 #ifdef MIPS64
315 assert(_bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
316 CardTableModRefBS* ct = (CardTableModRefBS*)_bs;
317 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
318 //_card_table_base = new LIR_Const((intptr_t)ct->byte_map_base);
319 // //FIXME, untested in 32bit. by aoqi
320 _card_table_base = new LIR_Const(ct->byte_map_base);
321 #endif
322 }
325 void LIRGenerator::block_do_prolog(BlockBegin* block) {
326 #ifndef PRODUCT
327 if (PrintIRWithLIR) {
328 block->print();
329 }
330 #endif
332 // set up the list of LIR instructions
333 assert(block->lir() == NULL, "LIR list already computed for this block");
334 _lir = new LIR_List(compilation(), block);
335 block->set_lir(_lir);
337 __ branch_destination(block->label());
339 if (LIRTraceExecution &&
340 Compilation::current()->hir()->start()->block_id() != block->block_id() &&
341 !block->is_set(BlockBegin::exception_entry_flag)) {
342 assert(block->lir()->instructions_list()->length() == 1, "should come right after br_dst");
343 trace_block_entry(block);
344 }
345 }
348 void LIRGenerator::block_do_epilog(BlockBegin* block) {
349 #ifndef PRODUCT
350 if (PrintIRWithLIR) {
351 tty->cr();
352 }
353 #endif
355 // LIR_Opr for unpinned constants shouldn't be referenced by other
356 // blocks so clear them out after processing the block.
357 for (int i = 0; i < _unpinned_constants.length(); i++) {
358 _unpinned_constants.at(i)->clear_operand();
359 }
360 _unpinned_constants.trunc_to(0);
362 // clear our any registers for other local constants
363 _constants.trunc_to(0);
364 _reg_for_constants.trunc_to(0);
365 }
368 void LIRGenerator::block_do(BlockBegin* block) {
369 CHECK_BAILOUT();
371 block_do_prolog(block);
372 set_block(block);
374 for (Instruction* instr = block; instr != NULL; instr = instr->next()) {
375 if (instr->is_pinned()) do_root(instr);
376 }
378 set_block(NULL);
379 block_do_epilog(block);
380 }
383 //-------------------------LIRGenerator-----------------------------
385 // This is where the tree-walk starts; instr must be root;
386 void LIRGenerator::do_root(Value instr) {
387 CHECK_BAILOUT();
389 InstructionMark im(compilation(), instr);
391 assert(instr->is_pinned(), "use only with roots");
392 assert(instr->subst() == instr, "shouldn't have missed substitution");
394 instr->visit(this);
396 assert(!instr->has_uses() || instr->operand()->is_valid() ||
397 instr->as_Constant() != NULL || bailed_out(), "invalid item set");
398 }
401 // This is called for each node in tree; the walk stops if a root is reached
402 void LIRGenerator::walk(Value instr) {
403 InstructionMark im(compilation(), instr);
404 //stop walk when encounter a root
405 if (instr->is_pinned() && instr->as_Phi() == NULL || instr->operand()->is_valid()) {
406 assert(instr->operand() != LIR_OprFact::illegalOpr || instr->as_Constant() != NULL, "this root has not yet been visited");
407 } else {
408 assert(instr->subst() == instr, "shouldn't have missed substitution");
409 instr->visit(this);
410 // assert(instr->use_count() > 0 || instr->as_Phi() != NULL, "leaf instruction must have a use");
411 }
412 }
415 CodeEmitInfo* LIRGenerator::state_for(Instruction* x, ValueStack* state, bool ignore_xhandler) {
416 assert(state != NULL, "state must be defined");
418 #ifndef PRODUCT
419 state->verify();
420 #endif
422 ValueStack* s = state;
423 for_each_state(s) {
424 if (s->kind() == ValueStack::EmptyExceptionState) {
425 assert(s->stack_size() == 0 && s->locals_size() == 0 && (s->locks_size() == 0 || s->locks_size() == 1), "state must be empty");
426 continue;
427 }
429 int index;
430 Value value;
431 for_each_stack_value(s, index, value) {
432 assert(value->subst() == value, "missed substitution");
433 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
434 walk(value);
435 assert(value->operand()->is_valid(), "must be evaluated now");
436 }
437 }
439 int bci = s->bci();
440 IRScope* scope = s->scope();
441 ciMethod* method = scope->method();
443 MethodLivenessResult liveness = method->liveness_at_bci(bci);
444 if (bci == SynchronizationEntryBCI) {
445 if (x->as_ExceptionObject() || x->as_Throw()) {
446 // all locals are dead on exit from the synthetic unlocker
447 liveness.clear();
448 } else {
449 assert(x->as_MonitorEnter() || x->as_ProfileInvoke(), "only other cases are MonitorEnter and ProfileInvoke");
450 }
451 }
452 if (!liveness.is_valid()) {
453 // Degenerate or breakpointed method.
454 bailout("Degenerate or breakpointed method");
455 } else {
456 assert((int)liveness.size() == s->locals_size(), "error in use of liveness");
457 for_each_local_value(s, index, value) {
458 assert(value->subst() == value, "missed substition");
459 if (liveness.at(index) && !value->type()->is_illegal()) {
460 if (!value->is_pinned() && value->as_Constant() == NULL && value->as_Local() == NULL) {
461 walk(value);
462 assert(value->operand()->is_valid(), "must be evaluated now");
463 }
464 } else {
465 // NULL out this local so that linear scan can assume that all non-NULL values are live.
466 s->invalidate_local(index);
467 }
468 }
469 }
470 }
472 return new CodeEmitInfo(state, ignore_xhandler ? NULL : x->exception_handlers(), x->check_flag(Instruction::DeoptimizeOnException));
473 }
476 CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
477 return state_for(x, x->exception_state());
478 }
481 void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
482 /* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
483 * is active and the class hasn't yet been resolved we need to emit a patch that resolves
484 * the class. */
485 if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
486 assert(info != NULL, "info must be set if class is not loaded");
487 __ klass2reg_patch(NULL, r, info);
488 } else {
489 // no patching needed
490 __ metadata2reg(obj->constant_encoding(), r);
491 }
492 }
495 void LIRGenerator::array_range_check(LIR_Opr array, LIR_Opr index,
496 CodeEmitInfo* null_check_info, CodeEmitInfo* range_check_info) {
497 CodeStub* stub = new RangeCheckStub(range_check_info, index);
498 if (index->is_constant()) {
499 #ifndef MIPS64
500 cmp_mem_int(lir_cond_belowEqual, array, arrayOopDesc::length_offset_in_bytes(),
501 index->as_jint(), null_check_info);
502 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
503 #else
504 LIR_Opr left = LIR_OprFact::address(new LIR_Address(array, arrayOopDesc::length_offset_in_bytes(), T_INT));
505 LIR_Opr right = LIR_OprFact::intConst(index->as_jint());
506 __ null_check_for_branch(lir_cond_belowEqual, left, right, null_check_info);
507 __ branch(lir_cond_belowEqual, left, right ,T_INT, stub); // forward branch
508 #endif
509 } else {
510 #ifndef MIPS64
511 cmp_reg_mem(lir_cond_aboveEqual, index, array,
512 arrayOopDesc::length_offset_in_bytes(), T_INT, null_check_info);
513 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
514 #else
515 LIR_Opr left = index;
516 LIR_Opr right = LIR_OprFact::address(new LIR_Address( array, arrayOopDesc::length_offset_in_bytes(), T_INT));
517 __ null_check_for_branch(lir_cond_aboveEqual, left, right, null_check_info);
518 __ branch(lir_cond_aboveEqual,left, right ,T_INT, stub); // forward branch
519 #endif
520 }
521 }
524 void LIRGenerator::nio_range_check(LIR_Opr buffer, LIR_Opr index, LIR_Opr result, CodeEmitInfo* info) {
525 CodeStub* stub = new RangeCheckStub(info, index, true);
526 if (index->is_constant()) {
527 #ifndef MIPS64
528 cmp_mem_int(lir_cond_belowEqual, buffer, java_nio_Buffer::limit_offset(), index->as_jint(), info);
529 __ branch(lir_cond_belowEqual, T_INT, stub); // forward branch
530 #else
531 LIR_Opr left = LIR_OprFact::address(new LIR_Address(buffer, java_nio_Buffer::limit_offset(),T_INT));
532 LIR_Opr right = LIR_OprFact::intConst(index->as_jint());
533 __ null_check_for_branch(lir_cond_belowEqual, left, right, info);
534 __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
535 #endif
536 } else {
537 #ifndef MIPS64
538 cmp_reg_mem(lir_cond_aboveEqual, index, buffer,
539 java_nio_Buffer::limit_offset(), T_INT, info);
540 __ branch(lir_cond_aboveEqual, T_INT, stub); // forward branch
541 #else
542 LIR_Opr left = index;
543 LIR_Opr right = LIR_OprFact::address(new LIR_Address( buffer, java_nio_Buffer::limit_offset(), T_INT));
544 __ null_check_for_branch(lir_cond_aboveEqual, left, right, info);
545 __ branch(lir_cond_aboveEqual,left, right ,T_INT, stub); // forward branch
546 #endif
547 }
548 __ move(index, result);
549 }
553 void LIRGenerator::arithmetic_op(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp_op, CodeEmitInfo* info) {
554 LIR_Opr result_op = result;
555 LIR_Opr left_op = left;
556 LIR_Opr right_op = right;
558 if (TwoOperandLIRForm && left_op != result_op) {
559 assert(right_op != result_op, "malformed");
560 __ move(left_op, result_op);
561 left_op = result_op;
562 }
564 switch(code) {
565 case Bytecodes::_dadd:
566 case Bytecodes::_fadd:
567 case Bytecodes::_ladd:
568 case Bytecodes::_iadd: __ add(left_op, right_op, result_op); break;
569 case Bytecodes::_fmul:
570 case Bytecodes::_lmul: __ mul(left_op, right_op, result_op); break;
572 case Bytecodes::_dmul:
573 {
574 if (is_strictfp) {
575 __ mul_strictfp(left_op, right_op, result_op, tmp_op); break;
576 } else {
577 __ mul(left_op, right_op, result_op); break;
578 }
579 }
580 break;
582 case Bytecodes::_imul:
583 {
584 bool did_strength_reduce = false;
586 if (right->is_constant()) {
587 int c = right->as_jint();
588 if (is_power_of_2(c)) {
589 // do not need tmp here
590 __ shift_left(left_op, exact_log2(c), result_op);
591 did_strength_reduce = true;
592 } else {
593 did_strength_reduce = strength_reduce_multiply(left_op, c, result_op, tmp_op);
594 }
595 }
596 // we couldn't strength reduce so just emit the multiply
597 if (!did_strength_reduce) {
598 __ mul(left_op, right_op, result_op);
599 }
600 }
601 break;
603 case Bytecodes::_dsub:
604 case Bytecodes::_fsub:
605 case Bytecodes::_lsub:
606 case Bytecodes::_isub: __ sub(left_op, right_op, result_op); break;
608 case Bytecodes::_fdiv: __ div (left_op, right_op, result_op); break;
609 // ldiv and lrem are implemented with a direct runtime call
611 case Bytecodes::_ddiv:
612 {
613 if (is_strictfp) {
614 __ div_strictfp (left_op, right_op, result_op, tmp_op); break;
615 } else {
616 __ div (left_op, right_op, result_op); break;
617 }
618 }
619 break;
621 case Bytecodes::_drem:
622 case Bytecodes::_frem: __ rem (left_op, right_op, result_op); break;
624 default: ShouldNotReachHere();
625 }
626 }
629 void LIRGenerator::arithmetic_op_int(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, LIR_Opr tmp) {
630 arithmetic_op(code, result, left, right, false, tmp);
631 }
634 void LIRGenerator::arithmetic_op_long(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, CodeEmitInfo* info) {
635 arithmetic_op(code, result, left, right, false, LIR_OprFact::illegalOpr, info);
636 }
639 void LIRGenerator::arithmetic_op_fpu(Bytecodes::Code code, LIR_Opr result, LIR_Opr left, LIR_Opr right, bool is_strictfp, LIR_Opr tmp) {
640 arithmetic_op(code, result, left, right, is_strictfp, tmp);
641 }
644 void LIRGenerator::shift_op(Bytecodes::Code code, LIR_Opr result_op, LIR_Opr value, LIR_Opr count, LIR_Opr tmp) {
645 if (TwoOperandLIRForm && value != result_op) {
646 assert(count != result_op, "malformed");
647 __ move(value, result_op);
648 value = result_op;
649 }
651 assert(count->is_constant() || count->is_register(), "must be");
652 switch(code) {
653 case Bytecodes::_ishl:
654 case Bytecodes::_lshl: __ shift_left(value, count, result_op, tmp); break;
655 case Bytecodes::_ishr:
656 case Bytecodes::_lshr: __ shift_right(value, count, result_op, tmp); break;
657 case Bytecodes::_iushr:
658 case Bytecodes::_lushr: __ unsigned_shift_right(value, count, result_op, tmp); break;
659 default: ShouldNotReachHere();
660 }
661 }
664 void LIRGenerator::logic_op (Bytecodes::Code code, LIR_Opr result_op, LIR_Opr left_op, LIR_Opr right_op) {
665 if (TwoOperandLIRForm && left_op != result_op) {
666 assert(right_op != result_op, "malformed");
667 __ move(left_op, result_op);
668 left_op = result_op;
669 }
671 switch(code) {
672 case Bytecodes::_iand:
673 case Bytecodes::_land: __ logical_and(left_op, right_op, result_op); break;
675 case Bytecodes::_ior:
676 case Bytecodes::_lor: __ logical_or(left_op, right_op, result_op); break;
678 case Bytecodes::_ixor:
679 case Bytecodes::_lxor: __ logical_xor(left_op, right_op, result_op); break;
681 default: ShouldNotReachHere();
682 }
683 }
686 void LIRGenerator::monitor_enter(LIR_Opr object, LIR_Opr lock, LIR_Opr hdr, LIR_Opr scratch, int monitor_no, CodeEmitInfo* info_for_exception, CodeEmitInfo* info) {
687 if (!GenerateSynchronizationCode) return;
688 // for slow path, use debug info for state after successful locking
689 CodeStub* slow_path = new MonitorEnterStub(object, lock, info);
690 __ load_stack_address_monitor(monitor_no, lock);
691 // for handling NullPointerException, use debug info representing just the lock stack before this monitorenter
692 __ lock_object(hdr, object, lock, scratch, slow_path, info_for_exception);
693 }
696 void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, LIR_Opr scratch, int monitor_no) {
697 if (!GenerateSynchronizationCode) return;
698 // setup registers
699 LIR_Opr hdr = lock;
700 lock = new_hdr;
701 CodeStub* slow_path = new MonitorExitStub(lock, UseFastLocking, monitor_no);
702 __ load_stack_address_monitor(monitor_no, lock);
703 __ unlock_object(hdr, object, lock, scratch, slow_path);
704 }
706 #ifndef PRODUCT
707 void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
708 if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
709 tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
710 } else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
711 tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
712 }
713 }
714 #endif
716 #ifndef MIPS64
717 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3, LIR_Opr scratch4, LIR_Opr klass_reg, CodeEmitInfo* info) {
718 #else
719 void LIRGenerator::new_instance(LIR_Opr dst, ciInstanceKlass* klass, bool is_unresolved, LIR_Opr scratch1, LIR_Opr scratch2, LIR_Opr scratch3,
720 LIR_Opr scratch4, LIR_Opr scratch5, LIR_Opr scratch6,LIR_Opr klass_reg, CodeEmitInfo* info) {
721 #endif
722 klass2reg_with_patching(klass_reg, klass, info, is_unresolved);
723 // If klass is not loaded we do not know if the klass has finalizers:
724 if (UseFastNewInstance && klass->is_loaded()
725 && !Klass::layout_helper_needs_slow_path(klass->layout_helper())) {
727 Runtime1::StubID stub_id = klass->is_initialized() ? Runtime1::fast_new_instance_id : Runtime1::fast_new_instance_init_check_id;
729 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, stub_id);
731 assert(klass->is_loaded(), "must be loaded");
732 // allocate space for instance
733 assert(klass->size_helper() >= 0, "illegal instance size");
734 const int instance_size = align_object_size(klass->size_helper());
735 #ifndef MIPS64
736 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4,
737 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
738 #else
739 __ allocate_object(dst, scratch1, scratch2, scratch3, scratch4, scratch5, scratch6,
740 oopDesc::header_size(), instance_size, klass_reg, !klass->is_initialized(), slow_path);
742 #endif
743 } else {
744 CodeStub* slow_path = new NewInstanceStub(klass_reg, dst, klass, info, Runtime1::new_instance_id);
745 #ifndef MIPS64
746 __ branch(lir_cond_always, T_ILLEGAL, slow_path);
747 __ branch_destination(slow_path->continuation());
748 #else
749 __ branch(lir_cond_always, LIR_OprFact::illegalOpr, LIR_OprFact::illegalOpr, T_ILLEGAL, slow_path);
750 __ branch_destination(slow_path->continuation());
751 #endif
752 }
753 }
756 static bool is_constant_zero(Instruction* inst) {
757 IntConstant* c = inst->type()->as_IntConstant();
758 if (c) {
759 return (c->value() == 0);
760 }
761 return false;
762 }
765 static bool positive_constant(Instruction* inst) {
766 IntConstant* c = inst->type()->as_IntConstant();
767 if (c) {
768 return (c->value() >= 0);
769 }
770 return false;
771 }
774 static ciArrayKlass* as_array_klass(ciType* type) {
775 if (type != NULL && type->is_array_klass() && type->is_loaded()) {
776 return (ciArrayKlass*)type;
777 } else {
778 return NULL;
779 }
780 }
782 static ciType* phi_declared_type(Phi* phi) {
783 ciType* t = phi->operand_at(0)->declared_type();
784 if (t == NULL) {
785 return NULL;
786 }
787 for(int i = 1; i < phi->operand_count(); i++) {
788 if (t != phi->operand_at(i)->declared_type()) {
789 return NULL;
790 }
791 }
792 return t;
793 }
795 void LIRGenerator::arraycopy_helper(Intrinsic* x, int* flagsp, ciArrayKlass** expected_typep) {
796 Instruction* src = x->argument_at(0);
797 Instruction* src_pos = x->argument_at(1);
798 Instruction* dst = x->argument_at(2);
799 Instruction* dst_pos = x->argument_at(3);
800 Instruction* length = x->argument_at(4);
802 // first try to identify the likely type of the arrays involved
803 ciArrayKlass* expected_type = NULL;
804 bool is_exact = false, src_objarray = false, dst_objarray = false;
805 {
806 ciArrayKlass* src_exact_type = as_array_klass(src->exact_type());
807 ciArrayKlass* src_declared_type = as_array_klass(src->declared_type());
808 Phi* phi;
809 if (src_declared_type == NULL && (phi = src->as_Phi()) != NULL) {
810 src_declared_type = as_array_klass(phi_declared_type(phi));
811 }
812 ciArrayKlass* dst_exact_type = as_array_klass(dst->exact_type());
813 ciArrayKlass* dst_declared_type = as_array_klass(dst->declared_type());
814 if (dst_declared_type == NULL && (phi = dst->as_Phi()) != NULL) {
815 dst_declared_type = as_array_klass(phi_declared_type(phi));
816 }
818 if (src_exact_type != NULL && src_exact_type == dst_exact_type) {
819 // the types exactly match so the type is fully known
820 is_exact = true;
821 expected_type = src_exact_type;
822 } else if (dst_exact_type != NULL && dst_exact_type->is_obj_array_klass()) {
823 ciArrayKlass* dst_type = (ciArrayKlass*) dst_exact_type;
824 ciArrayKlass* src_type = NULL;
825 if (src_exact_type != NULL && src_exact_type->is_obj_array_klass()) {
826 src_type = (ciArrayKlass*) src_exact_type;
827 } else if (src_declared_type != NULL && src_declared_type->is_obj_array_klass()) {
828 src_type = (ciArrayKlass*) src_declared_type;
829 }
830 if (src_type != NULL) {
831 if (src_type->element_type()->is_subtype_of(dst_type->element_type())) {
832 is_exact = true;
833 expected_type = dst_type;
834 }
835 }
836 }
837 // at least pass along a good guess
838 if (expected_type == NULL) expected_type = dst_exact_type;
839 if (expected_type == NULL) expected_type = src_declared_type;
840 if (expected_type == NULL) expected_type = dst_declared_type;
842 src_objarray = (src_exact_type && src_exact_type->is_obj_array_klass()) || (src_declared_type && src_declared_type->is_obj_array_klass());
843 dst_objarray = (dst_exact_type && dst_exact_type->is_obj_array_klass()) || (dst_declared_type && dst_declared_type->is_obj_array_klass());
844 }
846 // if a probable array type has been identified, figure out if any
847 // of the required checks for a fast case can be elided.
848 int flags = LIR_OpArrayCopy::all_flags;
850 if (!src_objarray)
851 flags &= ~LIR_OpArrayCopy::src_objarray;
852 if (!dst_objarray)
853 flags &= ~LIR_OpArrayCopy::dst_objarray;
855 if (!x->arg_needs_null_check(0))
856 flags &= ~LIR_OpArrayCopy::src_null_check;
857 if (!x->arg_needs_null_check(2))
858 flags &= ~LIR_OpArrayCopy::dst_null_check;
861 if (expected_type != NULL) {
862 Value length_limit = NULL;
864 IfOp* ifop = length->as_IfOp();
865 if (ifop != NULL) {
866 // look for expressions like min(v, a.length) which ends up as
867 // x > y ? y : x or x >= y ? y : x
868 if ((ifop->cond() == If::gtr || ifop->cond() == If::geq) &&
869 ifop->x() == ifop->fval() &&
870 ifop->y() == ifop->tval()) {
871 length_limit = ifop->y();
872 }
873 }
875 // try to skip null checks and range checks
876 NewArray* src_array = src->as_NewArray();
877 if (src_array != NULL) {
878 flags &= ~LIR_OpArrayCopy::src_null_check;
879 if (length_limit != NULL &&
880 src_array->length() == length_limit &&
881 is_constant_zero(src_pos)) {
882 flags &= ~LIR_OpArrayCopy::src_range_check;
883 }
884 }
886 NewArray* dst_array = dst->as_NewArray();
887 if (dst_array != NULL) {
888 flags &= ~LIR_OpArrayCopy::dst_null_check;
889 if (length_limit != NULL &&
890 dst_array->length() == length_limit &&
891 is_constant_zero(dst_pos)) {
892 flags &= ~LIR_OpArrayCopy::dst_range_check;
893 }
894 }
896 // check from incoming constant values
897 if (positive_constant(src_pos))
898 flags &= ~LIR_OpArrayCopy::src_pos_positive_check;
899 if (positive_constant(dst_pos))
900 flags &= ~LIR_OpArrayCopy::dst_pos_positive_check;
901 if (positive_constant(length))
902 flags &= ~LIR_OpArrayCopy::length_positive_check;
904 // see if the range check can be elided, which might also imply
905 // that src or dst is non-null.
906 ArrayLength* al = length->as_ArrayLength();
907 if (al != NULL) {
908 if (al->array() == src) {
909 // it's the length of the source array
910 flags &= ~LIR_OpArrayCopy::length_positive_check;
911 flags &= ~LIR_OpArrayCopy::src_null_check;
912 if (is_constant_zero(src_pos))
913 flags &= ~LIR_OpArrayCopy::src_range_check;
914 }
915 if (al->array() == dst) {
916 // it's the length of the destination array
917 flags &= ~LIR_OpArrayCopy::length_positive_check;
918 flags &= ~LIR_OpArrayCopy::dst_null_check;
919 if (is_constant_zero(dst_pos))
920 flags &= ~LIR_OpArrayCopy::dst_range_check;
921 }
922 }
923 if (is_exact) {
924 flags &= ~LIR_OpArrayCopy::type_check;
925 }
926 }
928 IntConstant* src_int = src_pos->type()->as_IntConstant();
929 IntConstant* dst_int = dst_pos->type()->as_IntConstant();
930 if (src_int && dst_int) {
931 int s_offs = src_int->value();
932 int d_offs = dst_int->value();
933 if (src_int->value() >= dst_int->value()) {
934 flags &= ~LIR_OpArrayCopy::overlapping;
935 }
936 if (expected_type != NULL) {
937 BasicType t = expected_type->element_type()->basic_type();
938 int element_size = type2aelembytes(t);
939 if (((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
940 ((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0)) {
941 flags &= ~LIR_OpArrayCopy::unaligned;
942 }
943 }
944 } else if (src_pos == dst_pos || is_constant_zero(dst_pos)) {
945 // src and dest positions are the same, or dst is zero so assume
946 // nonoverlapping copy.
947 flags &= ~LIR_OpArrayCopy::overlapping;
948 }
950 if (src == dst) {
951 // moving within a single array so no type checks are needed
952 if (flags & LIR_OpArrayCopy::type_check) {
953 flags &= ~LIR_OpArrayCopy::type_check;
954 }
955 }
956 *flagsp = flags;
957 *expected_typep = (ciArrayKlass*)expected_type;
958 }
961 LIR_Opr LIRGenerator::round_item(LIR_Opr opr) {
962 assert(opr->is_register(), "why spill if item is not register?");
964 if (RoundFPResults && UseSSE < 1 && opr->is_single_fpu()) {
965 LIR_Opr result = new_register(T_FLOAT);
966 set_vreg_flag(result, must_start_in_memory);
967 assert(opr->is_register(), "only a register can be spilled");
968 assert(opr->value_type()->is_float(), "rounding only for floats available");
969 __ roundfp(opr, LIR_OprFact::illegalOpr, result);
970 return result;
971 }
972 return opr;
973 }
976 LIR_Opr LIRGenerator::force_to_spill(LIR_Opr value, BasicType t) {
977 assert(type2size[t] == type2size[value->type()],
978 err_msg_res("size mismatch: t=%s, value->type()=%s", type2name(t), type2name(value->type())));
979 if (!value->is_register()) {
980 // force into a register
981 LIR_Opr r = new_register(value->type());
982 __ move(value, r);
983 value = r;
984 }
986 // create a spill location
987 LIR_Opr tmp = new_register(t);
988 set_vreg_flag(tmp, LIRGenerator::must_start_in_memory);
990 // move from register to spill
991 __ move(value, tmp);
992 return tmp;
993 }
995 #ifndef MIPS64
996 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
997 if (if_instr->should_profile()) {
998 ciMethod* method = if_instr->profiled_method();
999 assert(method != NULL, "method should be set if branch is profiled");
1000 ciMethodData* md = method->method_data_or_null();
1001 assert(md != NULL, "Sanity");
1002 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
1003 assert(data != NULL, "must have profiling data");
1004 assert(data->is_BranchData(), "need BranchData for two-way branches");
1005 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
1006 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
1007 if (if_instr->is_swapped()) {
1008 int t = taken_count_offset;
1009 taken_count_offset = not_taken_count_offset;
1010 not_taken_count_offset = t;
1011 }
1013 LIR_Opr md_reg = new_register(T_METADATA);
1014 __ metadata2reg(md->constant_encoding(), md_reg);
1016 LIR_Opr data_offset_reg = new_pointer_register();
1017 __ cmove(lir_cond(cond),
1018 LIR_OprFact::intptrConst(taken_count_offset),
1019 LIR_OprFact::intptrConst(not_taken_count_offset),
1020 data_offset_reg, as_BasicType(if_instr->x()->type()));
1022 // MDO cells are intptr_t, so the data_reg width is arch-dependent.
1023 LIR_Opr data_reg = new_pointer_register();
1024 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
1025 __ move(data_addr, data_reg);
1026 // Use leal instead of add to avoid destroying condition codes on x86
1027 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
1028 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
1029 __ move(data_reg, data_addr);
1030 }
1031 }
1032 #else
1033 void LIRGenerator::profile_branch(If* if_instr, If::Condition cond , LIR_Opr left, LIR_Opr right) {
1034 if (if_instr->should_profile()) {
1035 ciMethod* method = if_instr->profiled_method();
1036 assert(method != NULL, "method should be set if branch is profiled");
1037 ciMethodData* md = method->method_data_or_null();
1038 if (md == NULL) {
1039 bailout("out of memory building methodDataOop");
1040 return;
1041 }
1042 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
1043 assert(data != NULL, "must have profiling data");
1044 assert(data->is_BranchData(), "need BranchData for two-way branches");
1045 int taken_count_offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
1046 int not_taken_count_offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
1047 if (if_instr->is_swapped()) {
1048 int t = taken_count_offset;
1049 taken_count_offset = not_taken_count_offset;
1050 not_taken_count_offset = t;
1051 }
1052 LIR_Opr md_reg = new_register(T_METADATA);
1053 __ metadata2reg(md->constant_encoding(), md_reg);
1054 //__ move(LIR_OprFact::oopConst(md->constant_encoding()), md_reg);
1055 LIR_Opr data_offset_reg = new_pointer_register();
1057 LIR_Opr opr1 = LIR_OprFact::intConst(taken_count_offset);
1058 LIR_Opr opr2 = LIR_OprFact::intConst(not_taken_count_offset);
1059 LabelObj* skip = new LabelObj();
1061 __ move(opr1, data_offset_reg);
1062 __ branch( lir_cond(cond), left, right, skip->label());
1063 __ move(opr2, data_offset_reg);
1064 __ branch_destination(skip->label());
1066 LIR_Opr data_reg = new_pointer_register();
1067 LIR_Opr tmp_reg = new_pointer_register();
1068 // LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, T_INT);
1069 __ move(data_offset_reg, tmp_reg);
1070 __ add(tmp_reg, md_reg, tmp_reg);
1071 LIR_Address* data_addr = new LIR_Address(tmp_reg, 0, T_INT);
1072 __ move(LIR_OprFact::address(data_addr), data_reg);
1073 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
1074 // Use leal instead of add to avoid destroying condition codes on x86
1075 __ leal(LIR_OprFact::address(fake_incr_value), data_reg);
1076 __ move(data_reg, LIR_OprFact::address(data_addr));
1077 }
1078 }
1080 #endif
1082 // Phi technique:
1083 // This is about passing live values from one basic block to the other.
1084 // In code generated with Java it is rather rare that more than one
1085 // value is on the stack from one basic block to the other.
1086 // We optimize our technique for efficient passing of one value
1087 // (of type long, int, double..) but it can be extended.
1088 // When entering or leaving a basic block, all registers and all spill
1089 // slots are release and empty. We use the released registers
1090 // and spill slots to pass the live values from one block
1091 // to the other. The topmost value, i.e., the value on TOS of expression
1092 // stack is passed in registers. All other values are stored in spilling
1093 // area. Every Phi has an index which designates its spill slot
1094 // At exit of a basic block, we fill the register(s) and spill slots.
1095 // At entry of a basic block, the block_prolog sets up the content of phi nodes
1096 // and locks necessary registers and spilling slots.
1099 // move current value to referenced phi function
1100 void LIRGenerator::move_to_phi(PhiResolver* resolver, Value cur_val, Value sux_val) {
1101 Phi* phi = sux_val->as_Phi();
1102 // cur_val can be null without phi being null in conjunction with inlining
1103 if (phi != NULL && cur_val != NULL && cur_val != phi && !phi->is_illegal()) {
1104 LIR_Opr operand = cur_val->operand();
1105 if (cur_val->operand()->is_illegal()) {
1106 assert(cur_val->as_Constant() != NULL || cur_val->as_Local() != NULL,
1107 "these can be produced lazily");
1108 operand = operand_for_instruction(cur_val);
1109 }
1110 resolver->move(operand, operand_for_instruction(phi));
1111 }
1112 }
1115 // Moves all stack values into their PHI position
1116 void LIRGenerator::move_to_phi(ValueStack* cur_state) {
1117 BlockBegin* bb = block();
1118 if (bb->number_of_sux() == 1) {
1119 BlockBegin* sux = bb->sux_at(0);
1120 assert(sux->number_of_preds() > 0, "invalid CFG");
1122 // a block with only one predecessor never has phi functions
1123 if (sux->number_of_preds() > 1) {
1124 int max_phis = cur_state->stack_size() + cur_state->locals_size();
1125 PhiResolver resolver(this, _virtual_register_number + max_phis * 2);
1127 ValueStack* sux_state = sux->state();
1128 Value sux_value;
1129 int index;
1131 assert(cur_state->scope() == sux_state->scope(), "not matching");
1132 assert(cur_state->locals_size() == sux_state->locals_size(), "not matching");
1133 assert(cur_state->stack_size() == sux_state->stack_size(), "not matching");
1135 for_each_stack_value(sux_state, index, sux_value) {
1136 move_to_phi(&resolver, cur_state->stack_at(index), sux_value);
1137 }
1139 for_each_local_value(sux_state, index, sux_value) {
1140 move_to_phi(&resolver, cur_state->local_at(index), sux_value);
1141 }
1143 assert(cur_state->caller_state() == sux_state->caller_state(), "caller states must be equal");
1144 }
1145 }
1146 }
1149 LIR_Opr LIRGenerator::new_register(BasicType type) {
1150 int vreg = _virtual_register_number;
1151 // add a little fudge factor for the bailout, since the bailout is
1152 // only checked periodically. This gives a few extra registers to
1153 // hand out before we really run out, which helps us keep from
1154 // tripping over assertions.
1155 if (vreg + 20 >= LIR_OprDesc::vreg_max) {
1156 bailout("out of virtual registers");
1157 if (vreg + 2 >= LIR_OprDesc::vreg_max) {
1158 // wrap it around
1159 _virtual_register_number = LIR_OprDesc::vreg_base;
1160 }
1161 }
1162 _virtual_register_number += 1;
1163 return LIR_OprFact::virtual_register(vreg, type);
1164 }
1167 // Try to lock using register in hint
1168 LIR_Opr LIRGenerator::rlock(Value instr) {
1169 return new_register(instr->type());
1170 }
1173 // does an rlock and sets result
1174 LIR_Opr LIRGenerator::rlock_result(Value x) {
1175 LIR_Opr reg = rlock(x);
1176 set_result(x, reg);
1177 return reg;
1178 }
1181 // does an rlock and sets result
1182 LIR_Opr LIRGenerator::rlock_result(Value x, BasicType type) {
1183 LIR_Opr reg;
1184 switch (type) {
1185 case T_BYTE:
1186 case T_BOOLEAN:
1187 reg = rlock_byte(type);
1188 break;
1189 default:
1190 reg = rlock(x);
1191 break;
1192 }
1194 set_result(x, reg);
1195 return reg;
1196 }
1199 //---------------------------------------------------------------------
1200 ciObject* LIRGenerator::get_jobject_constant(Value value) {
1201 ObjectType* oc = value->type()->as_ObjectType();
1202 if (oc) {
1203 return oc->constant_value();
1204 }
1205 return NULL;
1206 }
1207 #ifdef MIPS64
1208 void LIRGenerator::write_barrier(LIR_Opr addr) {
1209 if (addr->is_address()) {
1210 LIR_Address* address = (LIR_Address*)addr;
1211 LIR_Opr ptr = new_register(T_OBJECT);
1212 if (!address->index()->is_valid() && address->disp() == 0) {
1213 __ move(address->base(), ptr);
1214 } else {
1215 __ leal(addr, ptr);
1216 }
1217 addr = ptr;
1218 }
1219 assert(addr->is_register(), "must be a register at this point");
1221 LIR_Opr tmp = new_pointer_register();
1222 if (TwoOperandLIRForm) {
1223 __ move(addr, tmp);
1224 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1225 } else {
1226 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1227 }
1228 if (can_inline_as_constant(card_table_base())) {
1229 __ move(LIR_OprFact::intConst(0), new LIR_Address(tmp, card_table_base()->as_jint(), T_BYTE));
1230 } else {
1231 __ add(tmp, load_constant(card_table_base()), tmp);
1232 __ move(LIR_OprFact::intConst(0), new LIR_Address(tmp, 0, T_BYTE));
1233 }
1234 }
1235 #endif
1238 void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
1239 assert(block()->is_set(BlockBegin::exception_entry_flag), "ExceptionObject only allowed in exception handler block");
1240 assert(block()->next() == x, "ExceptionObject must be first instruction of block");
1242 // no moves are created for phi functions at the begin of exception
1243 // handlers, so assign operands manually here
1244 for_each_phi_fun(block(), phi,
1245 operand_for_instruction(phi));
1247 LIR_Opr thread_reg = getThreadPointer();
1248 __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
1249 exceptionOopOpr());
1250 __ move_wide(LIR_OprFact::oopConst(NULL),
1251 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
1252 __ move_wide(LIR_OprFact::oopConst(NULL),
1253 new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
1255 LIR_Opr result = new_register(T_OBJECT);
1256 __ move(exceptionOopOpr(), result);
1257 set_result(x, result);
1258 }
1261 //----------------------------------------------------------------------
1262 //----------------------------------------------------------------------
1263 //----------------------------------------------------------------------
1264 //----------------------------------------------------------------------
1265 // visitor functions
1266 //----------------------------------------------------------------------
1267 //----------------------------------------------------------------------
1268 //----------------------------------------------------------------------
1269 //----------------------------------------------------------------------
1271 void LIRGenerator::do_Phi(Phi* x) {
1272 // phi functions are never visited directly
1273 ShouldNotReachHere();
1274 }
1277 // Code for a constant is generated lazily unless the constant is frequently used and can't be inlined.
1278 void LIRGenerator::do_Constant(Constant* x) {
1279 if (x->state_before() != NULL) {
1280 // Any constant with a ValueStack requires patching so emit the patch here
1281 LIR_Opr reg = rlock_result(x);
1282 CodeEmitInfo* info = state_for(x, x->state_before());
1283 __ oop2reg_patch(NULL, reg, info);
1284 } else if (x->use_count() > 1 && !can_inline_as_constant(x)) {
1285 if (!x->is_pinned()) {
1286 // unpinned constants are handled specially so that they can be
1287 // put into registers when they are used multiple times within a
1288 // block. After the block completes their operand will be
1289 // cleared so that other blocks can't refer to that register.
1290 set_result(x, load_constant(x));
1291 } else {
1292 LIR_Opr res = x->operand();
1293 if (!res->is_valid()) {
1294 res = LIR_OprFact::value_type(x->type());
1295 }
1296 if (res->is_constant()) {
1297 LIR_Opr reg = rlock_result(x);
1298 __ move(res, reg);
1299 } else {
1300 set_result(x, res);
1301 }
1302 }
1303 } else {
1304 set_result(x, LIR_OprFact::value_type(x->type()));
1305 }
1306 }
1309 void LIRGenerator::do_Local(Local* x) {
1310 // operand_for_instruction has the side effect of setting the result
1311 // so there's no need to do it here.
1312 operand_for_instruction(x);
1313 }
1316 void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
1317 Unimplemented();
1318 }
1321 void LIRGenerator::do_Return(Return* x) {
1322 if (compilation()->env()->dtrace_method_probes()) {
1323 BasicTypeList signature;
1324 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
1325 signature.append(T_METADATA); // Method*
1326 LIR_OprList* args = new LIR_OprList();
1327 args->append(getThreadPointer());
1328 LIR_Opr meth = new_register(T_METADATA);
1329 __ metadata2reg(method()->constant_encoding(), meth);
1330 args->append(meth);
1331 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), voidType, NULL);
1332 }
1334 if (x->type()->is_void()) {
1335 __ return_op(LIR_OprFact::illegalOpr);
1336 } else {
1337 LIR_Opr reg = result_register_for(x->type(), /*callee=*/true);
1338 LIRItem result(x->result(), this);
1340 result.load_item_force(reg);
1341 __ return_op(result.result());
1342 }
1343 set_no_result(x);
1344 }
1346 // Examble: ref.get()
1347 // Combination of LoadField and g1 pre-write barrier
1348 void LIRGenerator::do_Reference_get(Intrinsic* x) {
1350 const int referent_offset = java_lang_ref_Reference::referent_offset;
1351 guarantee(referent_offset > 0, "referent offset not initialized");
1353 assert(x->number_of_arguments() == 1, "wrong type");
1355 LIRItem reference(x->argument_at(0), this);
1356 reference.load_item();
1358 // need to perform the null check on the reference objecy
1359 CodeEmitInfo* info = NULL;
1360 if (x->needs_null_check()) {
1361 info = state_for(x);
1362 }
1364 LIR_Address* referent_field_adr =
1365 new LIR_Address(reference.result(), referent_offset, T_OBJECT);
1367 LIR_Opr result = rlock_result(x);
1369 __ load(referent_field_adr, result, info);
1371 // Register the value in the referent field with the pre-barrier
1372 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
1373 result /* pre_val */,
1374 false /* do_load */,
1375 false /* patch */,
1376 NULL /* info */);
1377 }
1379 // Example: clazz.isInstance(object)
1380 void LIRGenerator::do_isInstance(Intrinsic* x) {
1381 assert(x->number_of_arguments() == 2, "wrong type");
1383 // TODO could try to substitute this node with an equivalent InstanceOf
1384 // if clazz is known to be a constant Class. This will pick up newly found
1385 // constants after HIR construction. I'll leave this to a future change.
1387 // as a first cut, make a simple leaf call to runtime to stay platform independent.
1388 // could follow the aastore example in a future change.
1390 LIRItem clazz(x->argument_at(0), this);
1391 LIRItem object(x->argument_at(1), this);
1392 clazz.load_item();
1393 object.load_item();
1394 LIR_Opr result = rlock_result(x);
1396 // need to perform null check on clazz
1397 if (x->needs_null_check()) {
1398 CodeEmitInfo* info = state_for(x);
1399 __ null_check(clazz.result(), info);
1400 }
1402 LIR_Opr call_result = call_runtime(clazz.value(), object.value(),
1403 CAST_FROM_FN_PTR(address, Runtime1::is_instance_of),
1404 x->type(),
1405 NULL); // NULL CodeEmitInfo results in a leaf call
1406 __ move(call_result, result);
1407 }
1409 // Example: object.getClass ()
1410 void LIRGenerator::do_getClass(Intrinsic* x) {
1411 assert(x->number_of_arguments() == 1, "wrong type");
1413 LIRItem rcvr(x->argument_at(0), this);
1414 rcvr.load_item();
1415 LIR_Opr temp = new_register(T_METADATA);
1416 LIR_Opr result = rlock_result(x);
1418 // need to perform the null check on the rcvr
1419 CodeEmitInfo* info = NULL;
1420 if (x->needs_null_check()) {
1421 info = state_for(x);
1422 }
1424 // FIXME T_ADDRESS should actually be T_METADATA but it can't because the
1425 // meaning of these two is mixed up (see JDK-8026837).
1426 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
1427 __ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
1428 }
1431 // Example: Thread.currentThread()
1432 void LIRGenerator::do_currentThread(Intrinsic* x) {
1433 assert(x->number_of_arguments() == 0, "wrong type");
1434 LIR_Opr reg = rlock_result(x);
1435 __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
1436 }
1439 void LIRGenerator::do_RegisterFinalizer(Intrinsic* x) {
1440 assert(x->number_of_arguments() == 1, "wrong type");
1441 LIRItem receiver(x->argument_at(0), this);
1443 receiver.load_item();
1444 BasicTypeList signature;
1445 signature.append(T_OBJECT); // receiver
1446 LIR_OprList* args = new LIR_OprList();
1447 args->append(receiver.result());
1448 CodeEmitInfo* info = state_for(x, x->state());
1449 call_runtime(&signature, args,
1450 CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::register_finalizer_id)),
1451 voidType, info);
1453 set_no_result(x);
1454 }
1457 //------------------------local access--------------------------------------
1459 LIR_Opr LIRGenerator::operand_for_instruction(Instruction* x) {
1460 if (x->operand()->is_illegal()) {
1461 Constant* c = x->as_Constant();
1462 if (c != NULL) {
1463 x->set_operand(LIR_OprFact::value_type(c->type()));
1464 } else {
1465 assert(x->as_Phi() || x->as_Local() != NULL, "only for Phi and Local");
1466 // allocate a virtual register for this local or phi
1467 x->set_operand(rlock(x));
1468 _instruction_for_operand.at_put_grow(x->operand()->vreg_number(), x, NULL);
1469 }
1470 }
1471 return x->operand();
1472 }
1475 Instruction* LIRGenerator::instruction_for_opr(LIR_Opr opr) {
1476 if (opr->is_virtual()) {
1477 return instruction_for_vreg(opr->vreg_number());
1478 }
1479 return NULL;
1480 }
1483 Instruction* LIRGenerator::instruction_for_vreg(int reg_num) {
1484 if (reg_num < _instruction_for_operand.length()) {
1485 return _instruction_for_operand.at(reg_num);
1486 }
1487 return NULL;
1488 }
1491 void LIRGenerator::set_vreg_flag(int vreg_num, VregFlag f) {
1492 if (_vreg_flags.size_in_bits() == 0) {
1493 BitMap2D temp(100, num_vreg_flags);
1494 temp.clear();
1495 _vreg_flags = temp;
1496 }
1497 _vreg_flags.at_put_grow(vreg_num, f, true);
1498 }
1500 bool LIRGenerator::is_vreg_flag_set(int vreg_num, VregFlag f) {
1501 if (!_vreg_flags.is_valid_index(vreg_num, f)) {
1502 return false;
1503 }
1504 return _vreg_flags.at(vreg_num, f);
1505 }
1508 // Block local constant handling. This code is useful for keeping
1509 // unpinned constants and constants which aren't exposed in the IR in
1510 // registers. Unpinned Constant instructions have their operands
1511 // cleared when the block is finished so that other blocks can't end
1512 // up referring to their registers.
1514 LIR_Opr LIRGenerator::load_constant(Constant* x) {
1515 assert(!x->is_pinned(), "only for unpinned constants");
1516 _unpinned_constants.append(x);
1517 return load_constant(LIR_OprFact::value_type(x->type())->as_constant_ptr());
1518 }
1521 LIR_Opr LIRGenerator::load_constant(LIR_Const* c) {
1522 BasicType t = c->type();
1523 for (int i = 0; i < _constants.length(); i++) {
1524 LIR_Const* other = _constants.at(i);
1525 if (t == other->type()) {
1526 switch (t) {
1527 case T_INT:
1528 case T_FLOAT:
1529 if (c->as_jint_bits() != other->as_jint_bits()) continue;
1530 break;
1531 case T_LONG:
1532 case T_DOUBLE:
1533 if (c->as_jint_hi_bits() != other->as_jint_hi_bits()) continue;
1534 if (c->as_jint_lo_bits() != other->as_jint_lo_bits()) continue;
1535 break;
1536 case T_OBJECT:
1537 if (c->as_jobject() != other->as_jobject()) continue;
1538 break;
1539 }
1540 return _reg_for_constants.at(i);
1541 }
1542 }
1544 LIR_Opr result = new_register(t);
1545 __ move((LIR_Opr)c, result);
1546 _constants.append(c);
1547 _reg_for_constants.append(result);
1548 return result;
1549 }
1551 // Various barriers
1553 void LIRGenerator::pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1554 bool do_load, bool patch, CodeEmitInfo* info) {
1555 // Do the pre-write barrier, if any.
1556 switch (_bs->kind()) {
1557 #if INCLUDE_ALL_GCS
1558 case BarrierSet::G1SATBCT:
1559 case BarrierSet::G1SATBCTLogging:
1560 G1SATBCardTableModRef_pre_barrier(addr_opr, pre_val, do_load, patch, info);
1561 break;
1562 #endif // INCLUDE_ALL_GCS
1563 case BarrierSet::CardTableModRef:
1564 case BarrierSet::CardTableExtension:
1565 // No pre barriers
1566 break;
1567 case BarrierSet::ModRef:
1568 case BarrierSet::Other:
1569 // No pre barriers
1570 break;
1571 default :
1572 ShouldNotReachHere();
1574 }
1575 }
1577 void LIRGenerator::post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1578 switch (_bs->kind()) {
1579 #if INCLUDE_ALL_GCS
1580 case BarrierSet::G1SATBCT:
1581 case BarrierSet::G1SATBCTLogging:
1582 G1SATBCardTableModRef_post_barrier(addr, new_val);
1583 break;
1584 #endif // INCLUDE_ALL_GCS
1585 case BarrierSet::CardTableModRef:
1586 case BarrierSet::CardTableExtension:
1587 CardTableModRef_post_barrier(addr, new_val);
1588 break;
1589 case BarrierSet::ModRef:
1590 case BarrierSet::Other:
1591 // No post barriers
1592 break;
1593 default :
1594 ShouldNotReachHere();
1595 }
1596 }
1598 ////////////////////////////////////////////////////////////////////////
1599 #if INCLUDE_ALL_GCS
1601 void LIRGenerator::G1SATBCardTableModRef_pre_barrier(LIR_Opr addr_opr, LIR_Opr pre_val,
1602 bool do_load, bool patch, CodeEmitInfo* info) {
1603 // First we test whether marking is in progress.
1604 BasicType flag_type;
1605 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
1606 flag_type = T_INT;
1607 } else {
1608 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
1609 "Assumption");
1610 flag_type = T_BYTE;
1611 }
1612 LIR_Opr thrd = getThreadPointer();
1613 LIR_Address* mark_active_flag_addr =
1614 new LIR_Address(thrd,
1615 in_bytes(JavaThread::satb_mark_queue_offset() +
1616 PtrQueue::byte_offset_of_active()),
1617 flag_type);
1618 // Read the marking-in-progress flag.
1619 LIR_Opr flag_val = new_register(T_INT);
1620 __ load(mark_active_flag_addr, flag_val);
1621 //MIPS not support cmp.
1622 #ifndef MIPS64
1623 __ cmp(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0));
1624 #endif
1626 LIR_PatchCode pre_val_patch_code = lir_patch_none;
1628 CodeStub* slow;
1630 if (do_load) {
1631 assert(pre_val == LIR_OprFact::illegalOpr, "sanity");
1632 assert(addr_opr != LIR_OprFact::illegalOpr, "sanity");
1634 if (patch)
1635 pre_val_patch_code = lir_patch_normal;
1637 pre_val = new_register(T_OBJECT);
1639 if (!addr_opr->is_address()) {
1640 assert(addr_opr->is_register(), "must be");
1641 addr_opr = LIR_OprFact::address(new LIR_Address(addr_opr, T_OBJECT));
1642 }
1643 slow = new G1PreBarrierStub(addr_opr, pre_val, pre_val_patch_code, info);
1644 } else {
1645 assert(addr_opr == LIR_OprFact::illegalOpr, "sanity");
1646 assert(pre_val->is_register(), "must be");
1647 assert(pre_val->type() == T_OBJECT, "must be an object");
1648 assert(info == NULL, "sanity");
1650 slow = new G1PreBarrierStub(pre_val);
1651 }
1653 #ifndef MIPS64
1654 __ branch(lir_cond_notEqual, T_INT, slow);
1655 #else
1656 __ branch(lir_cond_notEqual, flag_val, LIR_OprFact::intConst(0), T_INT, slow);
1657 #endif
1658 __ branch_destination(slow->continuation());
1659 }
1661 void LIRGenerator::G1SATBCardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1662 // If the "new_val" is a constant NULL, no barrier is necessary.
1663 if (new_val->is_constant() &&
1664 new_val->as_constant_ptr()->as_jobject() == NULL) return;
1666 if (!new_val->is_register()) {
1667 LIR_Opr new_val_reg = new_register(T_OBJECT);
1668 if (new_val->is_constant()) {
1669 __ move(new_val, new_val_reg);
1670 } else {
1671 __ leal(new_val, new_val_reg);
1672 }
1673 new_val = new_val_reg;
1674 }
1675 assert(new_val->is_register(), "must be a register at this point");
1677 if (addr->is_address()) {
1678 LIR_Address* address = addr->as_address_ptr();
1679 LIR_Opr ptr = new_pointer_register();
1680 if (!address->index()->is_valid() && address->disp() == 0) {
1681 __ move(address->base(), ptr);
1682 } else {
1683 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1684 __ leal(addr, ptr);
1685 }
1686 addr = ptr;
1687 }
1688 assert(addr->is_register(), "must be a register at this point");
1690 LIR_Opr xor_res = new_pointer_register();
1691 LIR_Opr xor_shift_res = new_pointer_register();
1692 if (TwoOperandLIRForm ) {
1693 __ move(addr, xor_res);
1694 __ logical_xor(xor_res, new_val, xor_res);
1695 __ move(xor_res, xor_shift_res);
1696 __ unsigned_shift_right(xor_shift_res,
1697 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1698 xor_shift_res,
1699 LIR_OprDesc::illegalOpr());
1700 } else {
1701 __ logical_xor(addr, new_val, xor_res);
1702 __ unsigned_shift_right(xor_res,
1703 LIR_OprFact::intConst(HeapRegion::LogOfHRGrainBytes),
1704 xor_shift_res,
1705 LIR_OprDesc::illegalOpr());
1706 }
1708 if (!new_val->is_register()) {
1709 LIR_Opr new_val_reg = new_register(T_OBJECT);
1710 __ leal(new_val, new_val_reg);
1711 new_val = new_val_reg;
1712 }
1713 assert(new_val->is_register(), "must be a register at this point");
1715 #ifndef MIPS64
1716 __ cmp(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst(NULL_WORD));
1718 #endif
1719 CodeStub* slow = new G1PostBarrierStub(addr, new_val);
1720 #ifndef MIPS64
1721 __ branch(lir_cond_notEqual, LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1722 #else
1723 __ branch(lir_cond_notEqual, xor_shift_res, LIR_OprFact::intptrConst((intptr_t)NULL_WORD), LP64_ONLY(T_LONG) NOT_LP64(T_INT), slow);
1724 #endif
1725 __ branch_destination(slow->continuation());
1726 }
1728 #endif // INCLUDE_ALL_GCS
1729 ////////////////////////////////////////////////////////////////////////
1731 void LIRGenerator::CardTableModRef_post_barrier(LIR_OprDesc* addr, LIR_OprDesc* new_val) {
1733 assert(sizeof(*((CardTableModRefBS*)_bs)->byte_map_base) == sizeof(jbyte), "adjust this code");
1734 LIR_Const* card_table_base = new LIR_Const(((CardTableModRefBS*)_bs)->byte_map_base);
1735 if (addr->is_address()) {
1736 LIR_Address* address = addr->as_address_ptr();
1737 // ptr cannot be an object because we use this barrier for array card marks
1738 // and addr can point in the middle of an array.
1739 LIR_Opr ptr = new_pointer_register();
1740 if (!address->index()->is_valid() && address->disp() == 0) {
1741 __ move(address->base(), ptr);
1742 } else {
1743 assert(address->disp() != max_jint, "lea doesn't support patched addresses!");
1744 __ leal(addr, ptr);
1745 }
1746 addr = ptr;
1747 }
1748 assert(addr->is_register(), "must be a register at this point");
1750 #ifdef CARDTABLEMODREF_POST_BARRIER_HELPER
1751 CardTableModRef_post_barrier_helper(addr, card_table_base);
1752 #else
1753 LIR_Opr tmp = new_pointer_register();
1754 if (TwoOperandLIRForm) {
1755 __ move(addr, tmp);
1756 __ unsigned_shift_right(tmp, CardTableModRefBS::card_shift, tmp);
1757 } else {
1758 __ unsigned_shift_right(addr, CardTableModRefBS::card_shift, tmp);
1759 }
1760 if (can_inline_as_constant(card_table_base)) {
1761 __ move(LIR_OprFact::intConst(0),
1762 new LIR_Address(tmp, card_table_base->as_jint(), T_BYTE));
1763 } else {
1764 #ifndef MIPS64
1765 __ move(LIR_OprFact::intConst(0),
1766 new LIR_Address(tmp, load_constant(card_table_base),
1767 T_BYTE));
1768 #else
1769 __ add(tmp, load_constant(card_table_base), tmp);
1770 __ move(LIR_OprFact::intConst(0),
1771 new LIR_Address(tmp, 0,
1772 T_BYTE));
1773 #endif
1774 }
1775 #endif
1776 }
1779 //------------------------field access--------------------------------------
1781 // Comment copied form templateTable_i486.cpp
1782 // ----------------------------------------------------------------------------
1783 // Volatile variables demand their effects be made known to all CPU's in
1784 // order. Store buffers on most chips allow reads & writes to reorder; the
1785 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
1786 // memory barrier (i.e., it's not sufficient that the interpreter does not
1787 // reorder volatile references, the hardware also must not reorder them).
1788 //
1789 // According to the new Java Memory Model (JMM):
1790 // (1) All volatiles are serialized wrt to each other.
1791 // ALSO reads & writes act as aquire & release, so:
1792 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
1793 // the read float up to before the read. It's OK for non-volatile memory refs
1794 // that happen before the volatile read to float down below it.
1795 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
1796 // that happen BEFORE the write float down to after the write. It's OK for
1797 // non-volatile memory refs that happen after the volatile write to float up
1798 // before it.
1799 //
1800 // We only put in barriers around volatile refs (they are expensive), not
1801 // _between_ memory refs (that would require us to track the flavor of the
1802 // previous memory refs). Requirements (2) and (3) require some barriers
1803 // before volatile stores and after volatile loads. These nearly cover
1804 // requirement (1) but miss the volatile-store-volatile-load case. This final
1805 // case is placed after volatile-stores although it could just as well go
1806 // before volatile-loads.
1809 void LIRGenerator::do_StoreField(StoreField* x) {
1810 bool needs_patching = x->needs_patching();
1811 bool is_volatile = x->field()->is_volatile();
1812 BasicType field_type = x->field_type();
1813 bool is_oop = (field_type == T_ARRAY || field_type == T_OBJECT);
1815 CodeEmitInfo* info = NULL;
1816 if (needs_patching) {
1817 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1818 info = state_for(x, x->state_before());
1819 } else if (x->needs_null_check()) {
1820 NullCheck* nc = x->explicit_null_check();
1821 if (nc == NULL) {
1822 info = state_for(x);
1823 } else {
1824 info = state_for(nc);
1825 }
1826 }
1829 LIRItem object(x->obj(), this);
1830 LIRItem value(x->value(), this);
1832 object.load_item();
1834 if (is_volatile || needs_patching) {
1835 // load item if field is volatile (fewer special cases for volatiles)
1836 // load item if field not initialized
1837 // load item if field not constant
1838 // because of code patching we cannot inline constants
1839 if (field_type == T_BYTE || field_type == T_BOOLEAN) {
1840 value.load_byte_item();
1841 } else {
1842 value.load_item();
1843 }
1844 } else {
1845 value.load_for_store(field_type);
1846 }
1848 set_no_result(x);
1850 #ifndef PRODUCT
1851 if (PrintNotLoaded && needs_patching) {
1852 tty->print_cr(" ###class not loaded at store_%s bci %d",
1853 x->is_static() ? "static" : "field", x->printable_bci());
1854 }
1855 #endif
1857 if (x->needs_null_check() &&
1858 (needs_patching ||
1859 MacroAssembler::needs_explicit_null_check(x->offset()))) {
1860 // Emit an explicit null check because the offset is too large.
1861 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1862 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1863 __ null_check(object.result(), new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1864 }
1866 LIR_Address* address;
1867 if (needs_patching) {
1868 // we need to patch the offset in the instruction so don't allow
1869 // generate_address to try to be smart about emitting the -1.
1870 // Otherwise the patching code won't know how to find the
1871 // instruction to patch.
1872 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1873 } else {
1874 address = generate_address(object.result(), x->offset(), field_type);
1875 }
1877 if (is_volatile && os::is_MP()) {
1878 __ membar_release();
1879 }
1881 if (is_oop) {
1882 // Do the pre-write barrier, if any.
1883 pre_barrier(LIR_OprFact::address(address),
1884 LIR_OprFact::illegalOpr /* pre_val */,
1885 true /* do_load*/,
1886 needs_patching,
1887 (info ? new CodeEmitInfo(info) : NULL));
1888 }
1890 if (is_volatile && !needs_patching) {
1891 volatile_field_store(value.result(), address, info);
1892 } else {
1893 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1894 __ store(value.result(), address, info, patch_code);
1895 }
1897 if (is_oop) {
1898 // Store to object so mark the card of the header
1899 post_barrier(object.result(), value.result());
1900 }
1902 if (is_volatile && os::is_MP()) {
1903 __ membar();
1904 }
1905 }
1908 void LIRGenerator::do_LoadField(LoadField* x) {
1909 bool needs_patching = x->needs_patching();
1910 bool is_volatile = x->field()->is_volatile();
1911 BasicType field_type = x->field_type();
1913 CodeEmitInfo* info = NULL;
1914 if (needs_patching) {
1915 assert(x->explicit_null_check() == NULL, "can't fold null check into patching field access");
1916 info = state_for(x, x->state_before());
1917 } else if (x->needs_null_check()) {
1918 NullCheck* nc = x->explicit_null_check();
1919 if (nc == NULL) {
1920 info = state_for(x);
1921 } else {
1922 info = state_for(nc);
1923 }
1924 }
1926 LIRItem object(x->obj(), this);
1928 object.load_item();
1930 #ifndef PRODUCT
1931 if (PrintNotLoaded && needs_patching) {
1932 tty->print_cr(" ###class not loaded at load_%s bci %d",
1933 x->is_static() ? "static" : "field", x->printable_bci());
1934 }
1935 #endif
1937 bool stress_deopt = StressLoopInvariantCodeMotion && info && info->deoptimize_on_exception();
1938 if (x->needs_null_check() &&
1939 (needs_patching ||
1940 MacroAssembler::needs_explicit_null_check(x->offset()) ||
1941 stress_deopt)) {
1942 LIR_Opr obj = object.result();
1943 if (stress_deopt) {
1944 obj = new_register(T_OBJECT);
1945 __ move(LIR_OprFact::oopConst(NULL), obj);
1946 }
1947 // Emit an explicit null check because the offset is too large.
1948 // If the class is not loaded and the object is NULL, we need to deoptimize to throw a
1949 // NoClassDefFoundError in the interpreter instead of an implicit NPE from compiled code.
1950 __ null_check(obj, new CodeEmitInfo(info), /* deoptimize */ needs_patching);
1951 }
1953 LIR_Opr reg = rlock_result(x, field_type);
1954 LIR_Address* address;
1955 if (needs_patching) {
1956 // we need to patch the offset in the instruction so don't allow
1957 // generate_address to try to be smart about emitting the -1.
1958 // Otherwise the patching code won't know how to find the
1959 // instruction to patch.
1960 address = new LIR_Address(object.result(), PATCHED_ADDR, field_type);
1961 } else {
1962 address = generate_address(object.result(), x->offset(), field_type);
1963 }
1965 if (is_volatile && !needs_patching) {
1966 volatile_field_load(address, reg, info);
1967 } else {
1968 LIR_PatchCode patch_code = needs_patching ? lir_patch_normal : lir_patch_none;
1969 __ load(address, reg, info, patch_code);
1970 }
1972 if (is_volatile && os::is_MP()) {
1973 __ membar_acquire();
1974 }
1975 }
1978 //------------------------java.nio.Buffer.checkIndex------------------------
1980 // int java.nio.Buffer.checkIndex(int)
1981 void LIRGenerator::do_NIOCheckIndex(Intrinsic* x) {
1982 // NOTE: by the time we are in checkIndex() we are guaranteed that
1983 // the buffer is non-null (because checkIndex is package-private and
1984 // only called from within other methods in the buffer).
1985 assert(x->number_of_arguments() == 2, "wrong type");
1986 LIRItem buf (x->argument_at(0), this);
1987 LIRItem index(x->argument_at(1), this);
1988 buf.load_item();
1989 index.load_item();
1991 LIR_Opr result = rlock_result(x);
1992 if (GenerateRangeChecks) {
1993 CodeEmitInfo* info = state_for(x);
1994 CodeStub* stub = new RangeCheckStub(info, index.result(), true);
1995 if (index.result()->is_constant()) {
1996 #ifndef MIPS64
1997 cmp_mem_int(lir_cond_belowEqual, buf.result(), java_nio_Buffer::limit_offset(), index.result()->as_jint(), info);
1998 __ branch(lir_cond_belowEqual, T_INT, stub);
1999 #else
2000 LIR_Opr left = LIR_OprFact::address(new LIR_Address( buf.result(),
2001 java_nio_Buffer::limit_offset(),T_INT));
2002 LIR_Opr right = LIR_OprFact::intConst(index.result()->as_jint());
2003 __ null_check_for_branch(lir_cond_belowEqual, left, right, info);
2004 __ branch(lir_cond_belowEqual,left, right ,T_INT, stub); // forward branch
2006 #endif
2007 } else {
2008 #ifndef MIPS64
2009 cmp_reg_mem(lir_cond_aboveEqual, index.result(), buf.result(),
2010 java_nio_Buffer::limit_offset(), T_INT, info);
2011 __ branch(lir_cond_aboveEqual, T_INT, stub);
2012 #else
2013 LIR_Opr right = LIR_OprFact::address(new LIR_Address( buf.result(), java_nio_Buffer::limit_offset(),T_INT));
2014 LIR_Opr left = index.result();
2015 __ null_check_for_branch(lir_cond_aboveEqual, left, right, info);
2016 __ branch(lir_cond_aboveEqual, left, right , T_INT, stub); // forward branch
2017 #endif
2018 }
2019 __ move(index.result(), result);
2020 } else {
2021 // Just load the index into the result register
2022 __ move(index.result(), result);
2023 }
2024 }
2027 //------------------------array access--------------------------------------
2030 void LIRGenerator::do_ArrayLength(ArrayLength* x) {
2031 LIRItem array(x->array(), this);
2032 array.load_item();
2033 LIR_Opr reg = rlock_result(x);
2035 CodeEmitInfo* info = NULL;
2036 if (x->needs_null_check()) {
2037 NullCheck* nc = x->explicit_null_check();
2038 if (nc == NULL) {
2039 info = state_for(x);
2040 } else {
2041 info = state_for(nc);
2042 }
2043 if (StressLoopInvariantCodeMotion && info->deoptimize_on_exception()) {
2044 LIR_Opr obj = new_register(T_OBJECT);
2045 __ move(LIR_OprFact::oopConst(NULL), obj);
2046 __ null_check(obj, new CodeEmitInfo(info));
2047 }
2048 }
2049 __ load(new LIR_Address(array.result(), arrayOopDesc::length_offset_in_bytes(), T_INT), reg, info, lir_patch_none);
2050 }
2053 void LIRGenerator::do_LoadIndexed(LoadIndexed* x) {
2054 bool use_length = x->length() != NULL;
2055 LIRItem array(x->array(), this);
2056 LIRItem index(x->index(), this);
2057 LIRItem length(this);
2058 bool needs_range_check = x->compute_needs_range_check();
2060 if (use_length && needs_range_check) {
2061 length.set_instruction(x->length());
2062 length.load_item();
2063 }
2065 array.load_item();
2066 if (index.is_constant() && can_inline_as_constant(x->index())) {
2067 // let it be a constant
2068 index.dont_load_item();
2069 } else {
2070 index.load_item();
2071 }
2073 CodeEmitInfo* range_check_info = state_for(x);
2074 CodeEmitInfo* null_check_info = NULL;
2075 if (x->needs_null_check()) {
2076 NullCheck* nc = x->explicit_null_check();
2077 if (nc != NULL) {
2078 null_check_info = state_for(nc);
2079 } else {
2080 null_check_info = range_check_info;
2081 }
2082 if (StressLoopInvariantCodeMotion && null_check_info->deoptimize_on_exception()) {
2083 LIR_Opr obj = new_register(T_OBJECT);
2084 __ move(LIR_OprFact::oopConst(NULL), obj);
2085 __ null_check(obj, new CodeEmitInfo(null_check_info));
2086 }
2087 }
2089 // emit array address setup early so it schedules better
2090 LIR_Address* array_addr = emit_array_address(array.result(), index.result(), x->elt_type(), false);
2092 if (GenerateRangeChecks && needs_range_check) {
2093 if (StressLoopInvariantCodeMotion && range_check_info->deoptimize_on_exception()) {
2094 #ifndef MIPS64
2095 __ branch(lir_cond_always, T_ILLEGAL, new RangeCheckStub(range_check_info, index.result()));
2096 #else
2097 tty->print_cr("LIRGenerator::do_LoadIndexed(LoadIndexed* x) unimplemented yet!");
2098 Unimplemented();
2099 #endif
2100 } else if (use_length) {
2101 // TODO: use a (modified) version of array_range_check that does not require a
2102 // constant length to be loaded to a register
2103 #ifndef MIPS64
2104 __ cmp(lir_cond_belowEqual, length.result(), index.result());
2105 __ branch(lir_cond_belowEqual, T_INT, new RangeCheckStub(range_check_info, index.result()));
2106 #else
2107 __ branch(lir_cond_belowEqual, length.result(), index.result(),T_INT, new RangeCheckStub(range_check_info, index.result()));
2108 #endif
2109 } else {
2110 array_range_check(array.result(), index.result(), null_check_info, range_check_info);
2111 // The range check performs the null check, so clear it out for the load
2112 null_check_info = NULL;
2113 }
2114 }
2116 __ move(array_addr, rlock_result(x, x->elt_type()), null_check_info);
2117 }
2120 void LIRGenerator::do_NullCheck(NullCheck* x) {
2121 if (x->can_trap()) {
2122 LIRItem value(x->obj(), this);
2123 value.load_item();
2124 CodeEmitInfo* info = state_for(x);
2125 __ null_check(value.result(), info);
2126 }
2127 }
2130 void LIRGenerator::do_TypeCast(TypeCast* x) {
2131 LIRItem value(x->obj(), this);
2132 value.load_item();
2133 // the result is the same as from the node we are casting
2134 set_result(x, value.result());
2135 }
2138 void LIRGenerator::do_Throw(Throw* x) {
2139 LIRItem exception(x->exception(), this);
2140 exception.load_item();
2141 set_no_result(x);
2142 LIR_Opr exception_opr = exception.result();
2143 CodeEmitInfo* info = state_for(x, x->state());
2145 #ifndef PRODUCT
2146 if (PrintC1Statistics) {
2147 increment_counter(Runtime1::throw_count_address(), T_INT);
2148 }
2149 #endif
2151 // check if the instruction has an xhandler in any of the nested scopes
2152 bool unwind = false;
2153 if (info->exception_handlers()->length() == 0) {
2154 // this throw is not inside an xhandler
2155 unwind = true;
2156 } else {
2157 // get some idea of the throw type
2158 bool type_is_exact = true;
2159 ciType* throw_type = x->exception()->exact_type();
2160 if (throw_type == NULL) {
2161 type_is_exact = false;
2162 throw_type = x->exception()->declared_type();
2163 }
2164 if (throw_type != NULL && throw_type->is_instance_klass()) {
2165 ciInstanceKlass* throw_klass = (ciInstanceKlass*)throw_type;
2166 unwind = !x->exception_handlers()->could_catch(throw_klass, type_is_exact);
2167 }
2168 }
2170 // do null check before moving exception oop into fixed register
2171 // to avoid a fixed interval with an oop during the null check.
2172 // Use a copy of the CodeEmitInfo because debug information is
2173 // different for null_check and throw.
2174 if (GenerateCompilerNullChecks &&
2175 (x->exception()->as_NewInstance() == NULL && x->exception()->as_ExceptionObject() == NULL)) {
2176 // if the exception object wasn't created using new then it might be null.
2177 __ null_check(exception_opr, new CodeEmitInfo(info, x->state()->copy(ValueStack::ExceptionState, x->state()->bci())));
2178 }
2180 if (compilation()->env()->jvmti_can_post_on_exceptions()) {
2181 // we need to go through the exception lookup path to get JVMTI
2182 // notification done
2183 unwind = false;
2184 }
2186 // move exception oop into fixed register
2187 __ move(exception_opr, exceptionOopOpr());
2189 if (unwind) {
2190 __ unwind_exception(exceptionOopOpr());
2191 } else {
2192 __ throw_exception(exceptionPcOpr(), exceptionOopOpr(), info);
2193 }
2194 }
2197 void LIRGenerator::do_RoundFP(RoundFP* x) {
2198 LIRItem input(x->input(), this);
2199 input.load_item();
2200 LIR_Opr input_opr = input.result();
2201 assert(input_opr->is_register(), "why round if value is not in a register?");
2202 assert(input_opr->is_single_fpu() || input_opr->is_double_fpu(), "input should be floating-point value");
2203 if (input_opr->is_single_fpu()) {
2204 set_result(x, round_item(input_opr)); // This code path not currently taken
2205 } else {
2206 LIR_Opr result = new_register(T_DOUBLE);
2207 set_vreg_flag(result, must_start_in_memory);
2208 __ roundfp(input_opr, LIR_OprFact::illegalOpr, result);
2209 set_result(x, result);
2210 }
2211 }
2213 // Here UnsafeGetRaw may have x->base() and x->index() be int or long
2214 // on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
2215 void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
2216 LIRItem base(x->base(), this);
2217 LIRItem idx(this);
2219 base.load_item();
2220 if (x->has_index()) {
2221 idx.set_instruction(x->index());
2222 idx.load_nonconstant();
2223 }
2225 LIR_Opr reg = rlock_result(x, x->basic_type());
2227 int log2_scale = 0;
2228 if (x->has_index()) {
2229 log2_scale = x->log2_scale();
2230 }
2232 assert(!x->has_index() || idx.value() == x->index(), "should match");
2234 LIR_Opr base_op = base.result();
2235 LIR_Opr index_op = idx.result();
2236 #ifndef _LP64
2237 if (base_op->type() == T_LONG) {
2238 base_op = new_register(T_INT);
2239 __ convert(Bytecodes::_l2i, base.result(), base_op);
2240 }
2241 if (x->has_index()) {
2242 if (index_op->type() == T_LONG) {
2243 LIR_Opr long_index_op = index_op;
2244 if (index_op->is_constant()) {
2245 long_index_op = new_register(T_LONG);
2246 __ move(index_op, long_index_op);
2247 }
2248 index_op = new_register(T_INT);
2249 __ convert(Bytecodes::_l2i, long_index_op, index_op);
2250 } else {
2251 assert(x->index()->type()->tag() == intTag, "must be");
2252 }
2253 }
2254 // At this point base and index should be all ints.
2255 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2256 assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
2257 #else
2258 if (x->has_index()) {
2259 if (index_op->type() == T_INT) {
2260 if (!index_op->is_constant()) {
2261 index_op = new_register(T_LONG);
2262 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2263 }
2264 } else {
2265 assert(index_op->type() == T_LONG, "must be");
2266 if (index_op->is_constant()) {
2267 index_op = new_register(T_LONG);
2268 __ move(idx.result(), index_op);
2269 }
2270 }
2271 }
2272 // At this point base is a long non-constant
2273 // Index is a long register or a int constant.
2274 // We allow the constant to stay an int because that would allow us a more compact encoding by
2275 // embedding an immediate offset in the address expression. If we have a long constant, we have to
2276 // move it into a register first.
2277 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
2278 assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
2279 (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
2280 #endif
2282 BasicType dst_type = x->basic_type();
2284 LIR_Address* addr;
2285 if (index_op->is_constant()) {
2286 assert(log2_scale == 0, "must not have a scale");
2287 assert(index_op->type() == T_INT, "only int constants supported");
2288 addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
2289 } else {
2290 #ifdef X86
2291 addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
2292 #elif defined(GENERATE_ADDRESS_IS_PREFERRED)
2293 addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
2294 #else
2295 if (index_op->is_illegal() || log2_scale == 0) {
2296 #ifndef MIPS64
2297 addr = new LIR_Address(base_op, index_op, dst_type);
2298 #else
2299 #ifdef _LP64
2300 LIR_Opr ptr = new_register(T_LONG);
2301 #else
2302 LIR_Opr ptr = new_register(T_INT);
2303 #endif
2304 __ move(base_op, ptr);
2305 if(index_op -> is_valid())
2306 __ add(ptr, index_op, ptr);
2307 addr = new LIR_Address(ptr, 0, dst_type);
2308 #endif
2309 } else {
2310 LIR_Opr tmp = new_pointer_register();
2311 __ shift_left(index_op, log2_scale, tmp);
2312 addr = new LIR_Address(base_op, tmp, dst_type);
2313 }
2314 #endif
2315 }
2317 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
2318 __ unaligned_move(addr, reg);
2319 } else {
2320 if (dst_type == T_OBJECT && x->is_wide()) {
2321 __ move_wide(addr, reg);
2322 } else {
2323 __ move(addr, reg);
2324 }
2325 }
2326 }
2329 void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
2330 int log2_scale = 0;
2331 BasicType type = x->basic_type();
2333 if (x->has_index()) {
2334 log2_scale = x->log2_scale();
2335 }
2337 LIRItem base(x->base(), this);
2338 LIRItem value(x->value(), this);
2339 LIRItem idx(this);
2341 base.load_item();
2342 if (x->has_index()) {
2343 idx.set_instruction(x->index());
2344 idx.load_item();
2345 }
2347 if (type == T_BYTE || type == T_BOOLEAN) {
2348 value.load_byte_item();
2349 } else {
2350 value.load_item();
2351 }
2353 set_no_result(x);
2355 LIR_Opr base_op = base.result();
2356 LIR_Opr index_op = idx.result();
2358 #ifdef GENERATE_ADDRESS_IS_PREFERRED
2359 LIR_Address* addr = generate_address(base_op, index_op, log2_scale, 0, x->basic_type());
2360 #else
2361 #ifndef _LP64
2362 if (base_op->type() == T_LONG) {
2363 base_op = new_register(T_INT);
2364 __ convert(Bytecodes::_l2i, base.result(), base_op);
2365 }
2366 if (x->has_index()) {
2367 if (index_op->type() == T_LONG) {
2368 index_op = new_register(T_INT);
2369 __ convert(Bytecodes::_l2i, idx.result(), index_op);
2370 }
2371 }
2372 // At this point base and index should be all ints and not constants
2373 assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
2374 assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
2375 #else
2376 if (x->has_index()) {
2377 if (index_op->type() == T_INT) {
2378 index_op = new_register(T_LONG);
2379 __ convert(Bytecodes::_i2l, idx.result(), index_op);
2380 }
2381 }
2382 // At this point base and index are long and non-constant
2383 assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
2384 assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
2385 #endif
2387 if (log2_scale != 0) {
2388 // temporary fix (platform dependent code without shift on Intel would be better)
2389 // TODO: ARM also allows embedded shift in the address
2390 LIR_Opr tmp = new_pointer_register();
2391 if (TwoOperandLIRForm) {
2392 __ move(index_op, tmp);
2393 index_op = tmp;
2394 }
2395 __ shift_left(index_op, log2_scale, tmp);
2396 if (!TwoOperandLIRForm) {
2397 index_op = tmp;
2398 }
2399 }
2401 LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
2402 #endif // !GENERATE_ADDRESS_IS_PREFERRED
2403 __ move(value.result(), addr);
2404 }
2407 void LIRGenerator::do_UnsafeGetObject(UnsafeGetObject* x) {
2408 BasicType type = x->basic_type();
2409 LIRItem src(x->object(), this);
2410 LIRItem off(x->offset(), this);
2412 off.load_item();
2413 src.load_item();
2415 LIR_Opr value = rlock_result(x, x->basic_type());
2417 get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile());
2419 #if INCLUDE_ALL_GCS
2420 // We might be reading the value of the referent field of a
2421 // Reference object in order to attach it back to the live
2422 // object graph. If G1 is enabled then we need to record
2423 // the value that is being returned in an SATB log buffer.
2424 //
2425 // We need to generate code similar to the following...
2426 //
2427 // if (offset == java_lang_ref_Reference::referent_offset) {
2428 // if (src != NULL) {
2429 // if (klass(src)->reference_type() != REF_NONE) {
2430 // pre_barrier(..., value, ...);
2431 // }
2432 // }
2433 // }
2435 if (UseG1GC && type == T_OBJECT) {
2436 bool gen_pre_barrier = true; // Assume we need to generate pre_barrier.
2437 bool gen_offset_check = true; // Assume we need to generate the offset guard.
2438 bool gen_source_check = true; // Assume we need to check the src object for null.
2439 bool gen_type_check = true; // Assume we need to check the reference_type.
2441 if (off.is_constant()) {
2442 jlong off_con = (off.type()->is_int() ?
2443 (jlong) off.get_jint_constant() :
2444 off.get_jlong_constant());
2447 if (off_con != (jlong) java_lang_ref_Reference::referent_offset) {
2448 // The constant offset is something other than referent_offset.
2449 // We can skip generating/checking the remaining guards and
2450 // skip generation of the code stub.
2451 gen_pre_barrier = false;
2452 } else {
2453 // The constant offset is the same as referent_offset -
2454 // we do not need to generate a runtime offset check.
2455 gen_offset_check = false;
2456 }
2457 }
2459 // We don't need to generate stub if the source object is an array
2460 if (gen_pre_barrier && src.type()->is_array()) {
2461 gen_pre_barrier = false;
2462 }
2464 if (gen_pre_barrier) {
2465 // We still need to continue with the checks.
2466 if (src.is_constant()) {
2467 ciObject* src_con = src.get_jobject_constant();
2468 guarantee(src_con != NULL, "no source constant");
2470 if (src_con->is_null_object()) {
2471 // The constant src object is null - We can skip
2472 // generating the code stub.
2473 gen_pre_barrier = false;
2474 } else {
2475 // Non-null constant source object. We still have to generate
2476 // the slow stub - but we don't need to generate the runtime
2477 // null object check.
2478 gen_source_check = false;
2479 }
2480 }
2481 }
2482 if (gen_pre_barrier && !PatchALot) {
2483 // Can the klass of object be statically determined to be
2484 // a sub-class of Reference?
2485 ciType* type = src.value()->declared_type();
2486 if ((type != NULL) && type->is_loaded()) {
2487 if (type->is_subtype_of(compilation()->env()->Reference_klass())) {
2488 gen_type_check = false;
2489 } else if (type->is_klass() &&
2490 !compilation()->env()->Object_klass()->is_subtype_of(type->as_klass())) {
2491 // Not Reference and not Object klass.
2492 gen_pre_barrier = false;
2493 }
2494 }
2495 }
2497 if (gen_pre_barrier) {
2498 LabelObj* Lcont = new LabelObj();
2500 // We can have generate one runtime check here. Let's start with
2501 // the offset check.
2502 if (gen_offset_check) {
2503 // if (offset != referent_offset) -> continue
2504 // If offset is an int then we can do the comparison with the
2505 // referent_offset constant; otherwise we need to move
2506 // referent_offset into a temporary register and generate
2507 // a reg-reg compare.
2509 LIR_Opr referent_off;
2511 if (off.type()->is_int()) {
2512 referent_off = LIR_OprFact::intConst(java_lang_ref_Reference::referent_offset);
2513 } else {
2514 assert(off.type()->is_long(), "what else?");
2515 referent_off = new_register(T_LONG);
2516 __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off);
2517 }
2518 #ifndef MIPS64
2519 __ cmp(lir_cond_notEqual, off.result(), referent_off);
2520 __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label());
2521 #else
2522 __ branch(lir_cond_notEqual, off.result(), referent_off, Lcont->label());
2523 #endif
2524 }
2525 if (gen_source_check) {
2526 // offset is a const and equals referent offset
2527 // if (source == null) -> continue
2528 #ifndef MIPS64
2529 __ cmp(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL));
2530 __ branch(lir_cond_equal, T_OBJECT, Lcont->label());
2531 #else
2532 __ branch(lir_cond_equal, src.result(), LIR_OprFact::oopConst(NULL), Lcont->label());
2533 #endif
2534 }
2535 LIR_Opr src_klass = new_register(T_OBJECT);
2536 if (gen_type_check) {
2537 // We have determined that offset == referent_offset && src != null.
2538 // if (src->_klass->_reference_type == REF_NONE) -> continue
2539 __ move(new LIR_Address(src.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), src_klass);
2540 LIR_Address* reference_type_addr = new LIR_Address(src_klass, in_bytes(InstanceKlass::reference_type_offset()), T_BYTE);
2541 LIR_Opr reference_type = new_register(T_INT);
2542 __ move(reference_type_addr, reference_type);
2543 #ifndef MIPS64
2544 __ cmp(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE));
2545 __ branch(lir_cond_equal, T_INT, Lcont->label());
2546 #else
2547 __ branch(lir_cond_equal, reference_type, LIR_OprFact::intConst(REF_NONE), Lcont->label());
2548 #endif
2549 }
2550 {
2551 // We have determined that src->_klass->_reference_type != REF_NONE
2552 // so register the value in the referent field with the pre-barrier.
2553 pre_barrier(LIR_OprFact::illegalOpr /* addr_opr */,
2554 value /* pre_val */,
2555 false /* do_load */,
2556 false /* patch */,
2557 NULL /* info */);
2558 }
2559 __ branch_destination(Lcont->label());
2560 }
2561 }
2562 #endif // INCLUDE_ALL_GCS
2564 if (x->is_volatile() && os::is_MP()) __ membar_acquire();
2565 }
2568 void LIRGenerator::do_UnsafePutObject(UnsafePutObject* x) {
2569 BasicType type = x->basic_type();
2570 LIRItem src(x->object(), this);
2571 LIRItem off(x->offset(), this);
2572 LIRItem data(x->value(), this);
2574 src.load_item();
2575 if (type == T_BOOLEAN || type == T_BYTE) {
2576 data.load_byte_item();
2577 } else {
2578 data.load_item();
2579 }
2580 off.load_item();
2582 set_no_result(x);
2584 if (x->is_volatile() && os::is_MP()) __ membar_release();
2585 put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile());
2586 if (x->is_volatile() && os::is_MP()) __ membar();
2587 }
2590 void LIRGenerator::do_UnsafePrefetch(UnsafePrefetch* x, bool is_store) {
2591 LIRItem src(x->object(), this);
2592 LIRItem off(x->offset(), this);
2594 src.load_item();
2595 if (off.is_constant() && can_inline_as_constant(x->offset())) {
2596 // let it be a constant
2597 off.dont_load_item();
2598 } else {
2599 off.load_item();
2600 }
2602 set_no_result(x);
2604 LIR_Address* addr = generate_address(src.result(), off.result(), 0, 0, T_BYTE);
2605 __ prefetch(addr, is_store);
2606 }
2609 void LIRGenerator::do_UnsafePrefetchRead(UnsafePrefetchRead* x) {
2610 do_UnsafePrefetch(x, false);
2611 }
2614 void LIRGenerator::do_UnsafePrefetchWrite(UnsafePrefetchWrite* x) {
2615 do_UnsafePrefetch(x, true);
2616 }
2619 void LIRGenerator::do_SwitchRanges(SwitchRangeArray* x, LIR_Opr value, BlockBegin* default_sux) {
2620 int lng = x->length();
2622 for (int i = 0; i < lng; i++) {
2623 SwitchRange* one_range = x->at(i);
2624 int low_key = one_range->low_key();
2625 int high_key = one_range->high_key();
2626 BlockBegin* dest = one_range->sux();
2627 if (low_key == high_key) {
2628 #ifndef MIPS64
2629 __ cmp(lir_cond_equal, value, low_key);
2630 __ branch(lir_cond_equal, T_INT, dest);
2631 #else
2632 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(low_key), T_INT, dest);
2633 #endif
2634 } else if (high_key - low_key == 1) {
2635 #ifndef MIPS64
2636 __ cmp(lir_cond_equal, value, low_key);
2637 __ branch(lir_cond_equal, T_INT, dest);
2638 __ cmp(lir_cond_equal, value, high_key);
2639 __ branch(lir_cond_equal, T_INT, dest);
2640 #else
2641 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(low_key), T_INT, dest);
2642 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(high_key), T_INT, dest);
2644 #endif
2645 } else {
2646 LabelObj* L = new LabelObj();
2647 #ifndef MIPS64
2648 __ cmp(lir_cond_less, value, low_key);
2649 __ branch(lir_cond_less, T_INT, L->label());
2650 __ cmp(lir_cond_lessEqual, value, high_key);
2651 __ branch(lir_cond_lessEqual, T_INT, dest);
2652 __ branch_destination(L->label());
2653 #else
2654 __ branch(lir_cond_less, value, LIR_OprFact::intConst(low_key), L->label());
2655 __ branch(lir_cond_lessEqual, value, LIR_OprFact::intConst(high_key), T_INT, dest);
2656 __ branch_destination(L->label());
2657 #endif
2658 }
2659 }
2660 __ jump(default_sux);
2661 }
2664 SwitchRangeArray* LIRGenerator::create_lookup_ranges(TableSwitch* x) {
2665 SwitchRangeList* res = new SwitchRangeList();
2666 int len = x->length();
2667 if (len > 0) {
2668 BlockBegin* sux = x->sux_at(0);
2669 int key = x->lo_key();
2670 BlockBegin* default_sux = x->default_sux();
2671 SwitchRange* range = new SwitchRange(key, sux);
2672 for (int i = 0; i < len; i++, key++) {
2673 BlockBegin* new_sux = x->sux_at(i);
2674 if (sux == new_sux) {
2675 // still in same range
2676 range->set_high_key(key);
2677 } else {
2678 // skip tests which explicitly dispatch to the default
2679 if (sux != default_sux) {
2680 res->append(range);
2681 }
2682 range = new SwitchRange(key, new_sux);
2683 }
2684 sux = new_sux;
2685 }
2686 if (res->length() == 0 || res->last() != range) res->append(range);
2687 }
2688 return res;
2689 }
2692 // we expect the keys to be sorted by increasing value
2693 SwitchRangeArray* LIRGenerator::create_lookup_ranges(LookupSwitch* x) {
2694 SwitchRangeList* res = new SwitchRangeList();
2695 int len = x->length();
2696 if (len > 0) {
2697 BlockBegin* default_sux = x->default_sux();
2698 int key = x->key_at(0);
2699 BlockBegin* sux = x->sux_at(0);
2700 SwitchRange* range = new SwitchRange(key, sux);
2701 for (int i = 1; i < len; i++) {
2702 int new_key = x->key_at(i);
2703 BlockBegin* new_sux = x->sux_at(i);
2704 if (key+1 == new_key && sux == new_sux) {
2705 // still in same range
2706 range->set_high_key(new_key);
2707 } else {
2708 // skip tests which explicitly dispatch to the default
2709 if (range->sux() != default_sux) {
2710 res->append(range);
2711 }
2712 range = new SwitchRange(new_key, new_sux);
2713 }
2714 key = new_key;
2715 sux = new_sux;
2716 }
2717 if (res->length() == 0 || res->last() != range) res->append(range);
2718 }
2719 return res;
2720 }
2723 void LIRGenerator::do_TableSwitch(TableSwitch* x) {
2724 LIRItem tag(x->tag(), this);
2725 tag.load_item();
2726 set_no_result(x);
2728 if (x->is_safepoint()) {
2729 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2730 }
2732 // move values into phi locations
2733 move_to_phi(x->state());
2735 int lo_key = x->lo_key();
2736 int hi_key = x->hi_key();
2737 int len = x->length();
2738 LIR_Opr value = tag.result();
2739 if (UseTableRanges) {
2740 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2741 } else {
2742 for (int i = 0; i < len; i++) {
2743 #ifndef MIPS64
2744 __ cmp(lir_cond_equal, value, i + lo_key);
2745 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2746 #else
2747 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(i+lo_key), T_INT, x->sux_at(i));
2748 #endif
2749 }
2750 __ jump(x->default_sux());
2751 }
2752 }
2755 void LIRGenerator::do_LookupSwitch(LookupSwitch* x) {
2756 LIRItem tag(x->tag(), this);
2757 tag.load_item();
2758 set_no_result(x);
2760 if (x->is_safepoint()) {
2761 __ safepoint(safepoint_poll_register(), state_for(x, x->state_before()));
2762 }
2764 // move values into phi locations
2765 move_to_phi(x->state());
2767 LIR_Opr value = tag.result();
2768 if (UseTableRanges) {
2769 do_SwitchRanges(create_lookup_ranges(x), value, x->default_sux());
2770 } else {
2771 int len = x->length();
2772 for (int i = 0; i < len; i++) {
2773 #ifndef MIPS64
2774 __ cmp(lir_cond_equal, value, x->key_at(i));
2775 __ branch(lir_cond_equal, T_INT, x->sux_at(i));
2776 #else
2777 __ branch(lir_cond_equal, value, LIR_OprFact::intConst(x->key_at(i)), T_INT, x->sux_at(i));
2778 #endif
2779 }
2780 __ jump(x->default_sux());
2781 }
2782 }
2785 void LIRGenerator::do_Goto(Goto* x) {
2786 set_no_result(x);
2788 if (block()->next()->as_OsrEntry()) {
2789 // need to free up storage used for OSR entry point
2790 LIR_Opr osrBuffer = block()->next()->operand();
2791 BasicTypeList signature;
2792 signature.append(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); // pass a pointer to osrBuffer
2793 CallingConvention* cc = frame_map()->c_calling_convention(&signature);
2794 __ move(osrBuffer, cc->args()->at(0));
2795 __ call_runtime_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_end),
2796 getThreadTemp(), LIR_OprFact::illegalOpr, cc->args());
2797 }
2799 if (x->is_safepoint()) {
2800 ValueStack* state = x->state_before() ? x->state_before() : x->state();
2802 // increment backedge counter if needed
2803 CodeEmitInfo* info = state_for(x, state);
2804 increment_backedge_counter(info, x->profiled_bci());
2805 CodeEmitInfo* safepoint_info = state_for(x, state);
2806 __ safepoint(safepoint_poll_register(), safepoint_info);
2807 }
2809 // Gotos can be folded Ifs, handle this case.
2810 if (x->should_profile()) {
2811 ciMethod* method = x->profiled_method();
2812 assert(method != NULL, "method should be set if branch is profiled");
2813 ciMethodData* md = method->method_data_or_null();
2814 assert(md != NULL, "Sanity");
2815 ciProfileData* data = md->bci_to_data(x->profiled_bci());
2816 assert(data != NULL, "must have profiling data");
2817 int offset;
2818 if (x->direction() == Goto::taken) {
2819 assert(data->is_BranchData(), "need BranchData for two-way branches");
2820 offset = md->byte_offset_of_slot(data, BranchData::taken_offset());
2821 } else if (x->direction() == Goto::not_taken) {
2822 assert(data->is_BranchData(), "need BranchData for two-way branches");
2823 offset = md->byte_offset_of_slot(data, BranchData::not_taken_offset());
2824 } else {
2825 assert(data->is_JumpData(), "need JumpData for branches");
2826 offset = md->byte_offset_of_slot(data, JumpData::taken_offset());
2827 }
2828 LIR_Opr md_reg = new_register(T_METADATA);
2829 __ metadata2reg(md->constant_encoding(), md_reg);
2831 increment_counter(new LIR_Address(md_reg, offset,
2832 NOT_LP64(T_INT) LP64_ONLY(T_LONG)), DataLayout::counter_increment);
2833 }
2835 // emit phi-instruction move after safepoint since this simplifies
2836 // describing the state as the safepoint.
2837 move_to_phi(x->state());
2839 __ jump(x->default_sux());
2840 }
2842 /**
2843 * Emit profiling code if needed for arguments, parameters, return value types
2844 *
2845 * @param md MDO the code will update at runtime
2846 * @param md_base_offset common offset in the MDO for this profile and subsequent ones
2847 * @param md_offset offset in the MDO (on top of md_base_offset) for this profile
2848 * @param profiled_k current profile
2849 * @param obj IR node for the object to be profiled
2850 * @param mdp register to hold the pointer inside the MDO (md + md_base_offset).
2851 * Set once we find an update to make and use for next ones.
2852 * @param not_null true if we know obj cannot be null
2853 * @param signature_at_call_k signature at call for obj
2854 * @param callee_signature_k signature of callee for obj
2855 * at call and callee signatures differ at method handle call
2856 * @return the only klass we know will ever be seen at this profile point
2857 */
2858 ciKlass* LIRGenerator::profile_type(ciMethodData* md, int md_base_offset, int md_offset, intptr_t profiled_k,
2859 Value obj, LIR_Opr& mdp, bool not_null, ciKlass* signature_at_call_k,
2860 ciKlass* callee_signature_k) {
2861 ciKlass* result = NULL;
2862 bool do_null = !not_null && !TypeEntries::was_null_seen(profiled_k);
2863 bool do_update = !TypeEntries::is_type_unknown(profiled_k);
2864 // known not to be null or null bit already set and already set to
2865 // unknown: nothing we can do to improve profiling
2866 if (!do_null && !do_update) {
2867 return result;
2868 }
2870 ciKlass* exact_klass = NULL;
2871 Compilation* comp = Compilation::current();
2872 if (do_update) {
2873 // try to find exact type, using CHA if possible, so that loading
2874 // the klass from the object can be avoided
2875 ciType* type = obj->exact_type();
2876 if (type == NULL) {
2877 type = obj->declared_type();
2878 type = comp->cha_exact_type(type);
2879 }
2880 assert(type == NULL || type->is_klass(), "type should be class");
2881 exact_klass = (type != NULL && type->is_loaded()) ? (ciKlass*)type : NULL;
2883 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2884 }
2886 if (!do_null && !do_update) {
2887 return result;
2888 }
2890 ciKlass* exact_signature_k = NULL;
2891 if (do_update) {
2892 // Is the type from the signature exact (the only one possible)?
2893 exact_signature_k = signature_at_call_k->exact_klass();
2894 if (exact_signature_k == NULL) {
2895 exact_signature_k = comp->cha_exact_type(signature_at_call_k);
2896 } else {
2897 result = exact_signature_k;
2898 // Known statically. No need to emit any code: prevent
2899 // LIR_Assembler::emit_profile_type() from emitting useless code
2900 profiled_k = ciTypeEntries::with_status(result, profiled_k);
2901 }
2902 // exact_klass and exact_signature_k can be both non NULL but
2903 // different if exact_klass is loaded after the ciObject for
2904 // exact_signature_k is created.
2905 if (exact_klass == NULL && exact_signature_k != NULL && exact_klass != exact_signature_k) {
2906 // sometimes the type of the signature is better than the best type
2907 // the compiler has
2908 exact_klass = exact_signature_k;
2909 }
2910 if (callee_signature_k != NULL &&
2911 callee_signature_k != signature_at_call_k) {
2912 ciKlass* improved_klass = callee_signature_k->exact_klass();
2913 if (improved_klass == NULL) {
2914 improved_klass = comp->cha_exact_type(callee_signature_k);
2915 }
2916 if (exact_klass == NULL && improved_klass != NULL && exact_klass != improved_klass) {
2917 exact_klass = exact_signature_k;
2918 }
2919 }
2920 do_update = exact_klass == NULL || ciTypeEntries::valid_ciklass(profiled_k) != exact_klass;
2921 }
2923 if (!do_null && !do_update) {
2924 return result;
2925 }
2927 if (mdp == LIR_OprFact::illegalOpr) {
2928 mdp = new_register(T_METADATA);
2929 __ metadata2reg(md->constant_encoding(), mdp);
2930 if (md_base_offset != 0) {
2931 LIR_Address* base_type_address = new LIR_Address(mdp, md_base_offset, T_ADDRESS);
2932 mdp = new_pointer_register();
2933 __ leal(LIR_OprFact::address(base_type_address), mdp);
2934 }
2935 }
2936 LIRItem value(obj, this);
2937 value.load_item();
2938 __ profile_type(new LIR_Address(mdp, md_offset, T_METADATA),
2939 value.result(), exact_klass, profiled_k, new_pointer_register(), not_null, exact_signature_k != NULL);
2940 return result;
2941 }
2943 // profile parameters on entry to the root of the compilation
2944 void LIRGenerator::profile_parameters(Base* x) {
2945 if (compilation()->profile_parameters()) {
2946 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2947 ciMethodData* md = scope()->method()->method_data_or_null();
2948 assert(md != NULL, "Sanity");
2950 if (md->parameters_type_data() != NULL) {
2951 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
2952 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
2953 LIR_Opr mdp = LIR_OprFact::illegalOpr;
2954 for (int java_index = 0, i = 0, j = 0; j < parameters_type_data->number_of_parameters(); i++) {
2955 LIR_Opr src = args->at(i);
2956 assert(!src->is_illegal(), "check");
2957 BasicType t = src->type();
2958 if (t == T_OBJECT || t == T_ARRAY) {
2959 intptr_t profiled_k = parameters->type(j);
2960 Local* local = x->state()->local_at(java_index)->as_Local();
2961 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
2962 in_bytes(ParametersTypeData::type_offset(j)) - in_bytes(ParametersTypeData::type_offset(0)),
2963 profiled_k, local, mdp, false, local->declared_type()->as_klass(), NULL);
2964 // If the profile is known statically set it once for all and do not emit any code
2965 if (exact != NULL) {
2966 md->set_parameter_type(j, exact);
2967 }
2968 j++;
2969 }
2970 java_index += type2size[t];
2971 }
2972 }
2973 }
2974 }
2976 void LIRGenerator::do_Base(Base* x) {
2977 __ std_entry(LIR_OprFact::illegalOpr);
2978 // Emit moves from physical registers / stack slots to virtual registers
2979 CallingConvention* args = compilation()->frame_map()->incoming_arguments();
2980 IRScope* irScope = compilation()->hir()->top_scope();
2981 int java_index = 0;
2982 for (int i = 0; i < args->length(); i++) {
2983 LIR_Opr src = args->at(i);
2984 assert(!src->is_illegal(), "check");
2985 BasicType t = src->type();
2987 // Types which are smaller than int are passed as int, so
2988 // correct the type which passed.
2989 switch (t) {
2990 case T_BYTE:
2991 case T_BOOLEAN:
2992 case T_SHORT:
2993 case T_CHAR:
2994 t = T_INT;
2995 break;
2996 }
2998 LIR_Opr dest = new_register(t);
2999 __ move(src, dest);
3001 // Assign new location to Local instruction for this local
3002 Local* local = x->state()->local_at(java_index)->as_Local();
3003 assert(local != NULL, "Locals for incoming arguments must have been created");
3004 #ifndef __SOFTFP__
3005 // The java calling convention passes double as long and float as int.
3006 assert(as_ValueType(t)->tag() == local->type()->tag(), "check");
3007 #endif // __SOFTFP__
3008 local->set_operand(dest);
3009 _instruction_for_operand.at_put_grow(dest->vreg_number(), local, NULL);
3010 java_index += type2size[t];
3011 }
3013 if (compilation()->env()->dtrace_method_probes()) {
3014 BasicTypeList signature;
3015 signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3016 signature.append(T_METADATA); // Method*
3017 LIR_OprList* args = new LIR_OprList();
3018 args->append(getThreadPointer());
3019 LIR_Opr meth = new_register(T_METADATA);
3020 __ metadata2reg(method()->constant_encoding(), meth);
3021 args->append(meth);
3022 call_runtime(&signature, args, CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry), voidType, NULL);
3023 }
3025 if (method()->is_synchronized()) {
3026 LIR_Opr obj;
3027 if (method()->is_static()) {
3028 obj = new_register(T_OBJECT);
3029 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj);
3030 } else {
3031 Local* receiver = x->state()->local_at(0)->as_Local();
3032 assert(receiver != NULL, "must already exist");
3033 obj = receiver->operand();
3034 }
3035 assert(obj->is_valid(), "must be valid");
3037 if (method()->is_synchronized() && GenerateSynchronizationCode) {
3038 LIR_Opr lock = new_register(T_INT);
3039 __ load_stack_address_monitor(0, lock);
3041 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException));
3042 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info);
3044 // receiver is guaranteed non-NULL so don't need CodeEmitInfo
3045 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL);
3046 }
3047 }
3049 // increment invocation counters if needed
3050 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting.
3051 profile_parameters(x);
3052 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false);
3053 increment_invocation_counter(info);
3054 }
3056 // all blocks with a successor must end with an unconditional jump
3057 // to the successor even if they are consecutive
3058 __ jump(x->default_sux());
3059 }
3062 void LIRGenerator::do_OsrEntry(OsrEntry* x) {
3063 // construct our frame and model the production of incoming pointer
3064 // to the OSR buffer.
3065 __ osr_entry(LIR_Assembler::osrBufferPointer());
3066 LIR_Opr result = rlock_result(x);
3067 __ move(LIR_Assembler::osrBufferPointer(), result);
3068 }
3071 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) {
3072 assert(args->length() == arg_list->length(),
3073 err_msg_res("args=%d, arg_list=%d", args->length(), arg_list->length()));
3074 for (int i = x->has_receiver() ? 1 : 0; i < args->length(); i++) {
3075 LIRItem* param = args->at(i);
3076 LIR_Opr loc = arg_list->at(i);
3077 if (loc->is_register()) {
3078 param->load_item_force(loc);
3079 } else {
3080 LIR_Address* addr = loc->as_address_ptr();
3081 param->load_for_store(addr->type());
3082 if (addr->type() == T_OBJECT) {
3083 __ move_wide(param->result(), addr);
3084 } else
3085 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3086 __ unaligned_move(param->result(), addr);
3087 } else {
3088 __ move(param->result(), addr);
3089 }
3090 }
3091 }
3093 if (x->has_receiver()) {
3094 LIRItem* receiver = args->at(0);
3095 LIR_Opr loc = arg_list->at(0);
3096 if (loc->is_register()) {
3097 receiver->load_item_force(loc);
3098 } else {
3099 assert(loc->is_address(), "just checking");
3100 receiver->load_for_store(T_OBJECT);
3101 __ move_wide(receiver->result(), loc->as_address_ptr());
3102 }
3103 }
3104 }
3107 // Visits all arguments, returns appropriate items without loading them
3108 LIRItemList* LIRGenerator::invoke_visit_arguments(Invoke* x) {
3109 LIRItemList* argument_items = new LIRItemList();
3110 if (x->has_receiver()) {
3111 LIRItem* receiver = new LIRItem(x->receiver(), this);
3112 argument_items->append(receiver);
3113 }
3114 for (int i = 0; i < x->number_of_arguments(); i++) {
3115 LIRItem* param = new LIRItem(x->argument_at(i), this);
3116 argument_items->append(param);
3117 }
3118 return argument_items;
3119 }
3122 // The invoke with receiver has following phases:
3123 // a) traverse and load/lock receiver;
3124 // b) traverse all arguments -> item-array (invoke_visit_argument)
3125 // c) push receiver on stack
3126 // d) load each of the items and push on stack
3127 // e) unlock receiver
3128 // f) move receiver into receiver-register %o0
3129 // g) lock result registers and emit call operation
3130 //
3131 // Before issuing a call, we must spill-save all values on stack
3132 // that are in caller-save register. "spill-save" moves those registers
3133 // either in a free callee-save register or spills them if no free
3134 // callee save register is available.
3135 //
3136 // The problem is where to invoke spill-save.
3137 // - if invoked between e) and f), we may lock callee save
3138 // register in "spill-save" that destroys the receiver register
3139 // before f) is executed
3140 // - if we rearrange f) to be earlier (by loading %o0) it
3141 // may destroy a value on the stack that is currently in %o0
3142 // and is waiting to be spilled
3143 // - if we keep the receiver locked while doing spill-save,
3144 // we cannot spill it as it is spill-locked
3145 //
3146 void LIRGenerator::do_Invoke(Invoke* x) {
3147 CallingConvention* cc = frame_map()->java_calling_convention(x->signature(), true);
3149 LIR_OprList* arg_list = cc->args();
3150 LIRItemList* args = invoke_visit_arguments(x);
3151 LIR_Opr receiver = LIR_OprFact::illegalOpr;
3153 // setup result register
3154 LIR_Opr result_register = LIR_OprFact::illegalOpr;
3155 if (x->type() != voidType) {
3156 result_register = result_register_for(x->type());
3157 }
3159 CodeEmitInfo* info = state_for(x, x->state());
3161 invoke_load_arguments(x, args, arg_list);
3163 if (x->has_receiver()) {
3164 args->at(0)->load_item_force(LIR_Assembler::receiverOpr());
3165 receiver = args->at(0)->result();
3166 }
3168 // emit invoke code
3169 bool optimized = x->target_is_loaded() && x->target_is_final();
3170 assert(receiver->is_illegal() || receiver->is_equal(LIR_Assembler::receiverOpr()), "must match");
3172 // JSR 292
3173 // Preserve the SP over MethodHandle call sites, if needed.
3174 ciMethod* target = x->target();
3175 bool is_method_handle_invoke = (// %%% FIXME: Are both of these relevant?
3176 target->is_method_handle_intrinsic() ||
3177 target->is_compiled_lambda_form());
3178 if (is_method_handle_invoke) {
3179 info->set_is_method_handle_invoke(true);
3180 if(FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
3181 __ move(FrameMap::stack_pointer(), FrameMap::method_handle_invoke_SP_save_opr());
3182 }
3183 }
3185 switch (x->code()) {
3186 case Bytecodes::_invokestatic:
3187 __ call_static(target, result_register,
3188 SharedRuntime::get_resolve_static_call_stub(),
3189 arg_list, info);
3190 break;
3191 case Bytecodes::_invokespecial:
3192 case Bytecodes::_invokevirtual:
3193 case Bytecodes::_invokeinterface:
3194 // for final target we still produce an inline cache, in order
3195 // to be able to call mixed mode
3196 if (x->code() == Bytecodes::_invokespecial || optimized) {
3197 __ call_opt_virtual(target, receiver, result_register,
3198 SharedRuntime::get_resolve_opt_virtual_call_stub(),
3199 arg_list, info);
3200 } else if (x->vtable_index() < 0) {
3201 __ call_icvirtual(target, receiver, result_register,
3202 SharedRuntime::get_resolve_virtual_call_stub(),
3203 arg_list, info);
3204 } else {
3205 int entry_offset = InstanceKlass::vtable_start_offset() + x->vtable_index() * vtableEntry::size();
3206 int vtable_offset = entry_offset * wordSize + vtableEntry::method_offset_in_bytes();
3207 __ call_virtual(target, receiver, result_register, vtable_offset, arg_list, info);
3208 }
3209 break;
3210 case Bytecodes::_invokedynamic: {
3211 __ call_dynamic(target, receiver, result_register,
3212 SharedRuntime::get_resolve_static_call_stub(),
3213 arg_list, info);
3214 break;
3215 }
3216 default:
3217 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(x->code())));
3218 break;
3219 }
3221 // JSR 292
3222 // Restore the SP after MethodHandle call sites, if needed.
3223 if (is_method_handle_invoke
3224 && FrameMap::method_handle_invoke_SP_save_opr() != LIR_OprFact::illegalOpr) {
3225 __ move(FrameMap::method_handle_invoke_SP_save_opr(), FrameMap::stack_pointer());
3226 }
3228 if (x->type()->is_float() || x->type()->is_double()) {
3229 // Force rounding of results from non-strictfp when in strictfp
3230 // scope (or when we don't know the strictness of the callee, to
3231 // be safe.)
3232 if (method()->is_strict()) {
3233 if (!x->target_is_loaded() || !x->target_is_strictfp()) {
3234 result_register = round_item(result_register);
3235 }
3236 }
3237 }
3239 if (result_register->is_valid()) {
3240 LIR_Opr result = rlock_result(x);
3241 __ move(result_register, result);
3242 }
3243 }
3246 void LIRGenerator::do_FPIntrinsics(Intrinsic* x) {
3247 assert(x->number_of_arguments() == 1, "wrong type");
3248 LIRItem value (x->argument_at(0), this);
3249 LIR_Opr reg = rlock_result(x);
3250 value.load_item();
3251 LIR_Opr tmp = force_to_spill(value.result(), as_BasicType(x->type()));
3252 __ move(tmp, reg);
3253 }
3257 // Code for : x->x() {x->cond()} x->y() ? x->tval() : x->fval()
3258 void LIRGenerator::do_IfOp(IfOp* x) {
3259 #ifdef ASSERT
3260 {
3261 ValueTag xtag = x->x()->type()->tag();
3262 ValueTag ttag = x->tval()->type()->tag();
3263 assert(xtag == intTag || xtag == objectTag, "cannot handle others");
3264 assert(ttag == addressTag || ttag == intTag || ttag == objectTag || ttag == longTag, "cannot handle others");
3265 assert(ttag == x->fval()->type()->tag(), "cannot handle others");
3266 }
3267 #endif
3269 LIRItem left(x->x(), this);
3270 LIRItem right(x->y(), this);
3271 left.load_item();
3272 if (can_inline_as_constant(right.value())) {
3273 right.dont_load_item();
3274 } else {
3275 right.load_item();
3276 }
3278 LIRItem t_val(x->tval(), this);
3279 LIRItem f_val(x->fval(), this);
3280 t_val.dont_load_item();
3281 f_val.dont_load_item();
3282 LIR_Opr reg = rlock_result(x);
3284 #ifndef MIPS64
3285 __ cmp(lir_cond(x->cond()), left.result(), right.result());
3286 __ cmove(lir_cond(x->cond()), t_val.result(), f_val.result(), reg, as_BasicType(x->x()->type()));
3287 #else
3288 LIR_Opr opr1 = t_val.result();
3289 LIR_Opr opr2 = f_val.result();
3290 LabelObj* skip = new LabelObj();
3291 __ move(opr1, reg);
3292 __ branch(lir_cond(x->cond()), left.result(), right.result(), skip->label());
3293 __ move(opr2, reg);
3294 __ branch_destination(skip->label());
3295 #endif
3296 }
3298 void LIRGenerator::do_RuntimeCall(address routine, int expected_arguments, Intrinsic* x) {
3299 assert(x->number_of_arguments() == expected_arguments, "wrong type");
3300 LIR_Opr reg = result_register_for(x->type());
3301 __ call_runtime_leaf(routine, getThreadTemp(),
3302 reg, new LIR_OprList());
3303 LIR_Opr result = rlock_result(x);
3304 __ move(reg, result);
3305 }
3307 #ifdef TRACE_HAVE_INTRINSICS
3308 void LIRGenerator::do_ThreadIDIntrinsic(Intrinsic* x) {
3309 LIR_Opr thread = getThreadPointer();
3310 LIR_Opr osthread = new_pointer_register();
3311 __ move(new LIR_Address(thread, in_bytes(JavaThread::osthread_offset()), osthread->type()), osthread);
3312 size_t thread_id_size = OSThread::thread_id_size();
3313 if (thread_id_size == (size_t) BytesPerLong) {
3314 LIR_Opr id = new_register(T_LONG);
3315 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_LONG), id);
3316 __ convert(Bytecodes::_l2i, id, rlock_result(x));
3317 } else if (thread_id_size == (size_t) BytesPerInt) {
3318 __ move(new LIR_Address(osthread, in_bytes(OSThread::thread_id_offset()), T_INT), rlock_result(x));
3319 } else {
3320 ShouldNotReachHere();
3321 }
3322 }
3324 void LIRGenerator::do_ClassIDIntrinsic(Intrinsic* x) {
3325 CodeEmitInfo* info = state_for(x);
3326 CodeEmitInfo* info2 = new CodeEmitInfo(info); // Clone for the second null check
3327 BasicType klass_pointer_type = NOT_LP64(T_INT) LP64_ONLY(T_LONG);
3328 assert(info != NULL, "must have info");
3329 LIRItem arg(x->argument_at(1), this);
3330 arg.load_item();
3331 LIR_Opr klass = new_pointer_register();
3332 __ move(new LIR_Address(arg.result(), java_lang_Class::klass_offset_in_bytes(), klass_pointer_type), klass, info);
3333 LIR_Opr id = new_register(T_LONG);
3334 ByteSize offset = TRACE_ID_OFFSET;
3335 LIR_Address* trace_id_addr = new LIR_Address(klass, in_bytes(offset), T_LONG);
3336 __ move(trace_id_addr, id);
3337 __ logical_or(id, LIR_OprFact::longConst(0x01l), id);
3338 __ store(id, trace_id_addr);
3339 __ logical_and(id, LIR_OprFact::longConst(~0x3l), id);
3340 __ move(id, rlock_result(x));
3341 }
3342 #endif
3344 void LIRGenerator::do_Intrinsic(Intrinsic* x) {
3345 switch (x->id()) {
3346 case vmIntrinsics::_intBitsToFloat :
3347 case vmIntrinsics::_doubleToRawLongBits :
3348 case vmIntrinsics::_longBitsToDouble :
3349 case vmIntrinsics::_floatToRawIntBits : {
3350 do_FPIntrinsics(x);
3351 break;
3352 }
3354 #ifdef TRACE_HAVE_INTRINSICS
3355 case vmIntrinsics::_threadID: do_ThreadIDIntrinsic(x); break;
3356 case vmIntrinsics::_classID: do_ClassIDIntrinsic(x); break;
3357 case vmIntrinsics::_counterTime:
3358 do_RuntimeCall(CAST_FROM_FN_PTR(address, TRACE_TIME_METHOD), 0, x);
3359 break;
3360 #endif
3362 case vmIntrinsics::_currentTimeMillis:
3363 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeMillis), 0, x);
3364 break;
3366 case vmIntrinsics::_nanoTime:
3367 do_RuntimeCall(CAST_FROM_FN_PTR(address, os::javaTimeNanos), 0, x);
3368 break;
3370 case vmIntrinsics::_Object_init: do_RegisterFinalizer(x); break;
3371 case vmIntrinsics::_isInstance: do_isInstance(x); break;
3372 case vmIntrinsics::_getClass: do_getClass(x); break;
3373 case vmIntrinsics::_currentThread: do_currentThread(x); break;
3375 case vmIntrinsics::_dlog: // fall through
3376 case vmIntrinsics::_dlog10: // fall through
3377 case vmIntrinsics::_dabs: // fall through
3378 case vmIntrinsics::_dsqrt: // fall through
3379 case vmIntrinsics::_dtan: // fall through
3380 case vmIntrinsics::_dsin : // fall through
3381 case vmIntrinsics::_dcos : // fall through
3382 case vmIntrinsics::_dexp : // fall through
3383 case vmIntrinsics::_dpow : do_MathIntrinsic(x); break;
3384 case vmIntrinsics::_arraycopy: do_ArrayCopy(x); break;
3386 // java.nio.Buffer.checkIndex
3387 case vmIntrinsics::_checkIndex: do_NIOCheckIndex(x); break;
3389 case vmIntrinsics::_compareAndSwapObject:
3390 do_CompareAndSwap(x, objectType);
3391 break;
3392 case vmIntrinsics::_compareAndSwapInt:
3393 do_CompareAndSwap(x, intType);
3394 break;
3395 case vmIntrinsics::_compareAndSwapLong:
3396 do_CompareAndSwap(x, longType);
3397 break;
3399 case vmIntrinsics::_loadFence :
3400 if (os::is_MP()) __ membar_acquire();
3401 break;
3402 case vmIntrinsics::_storeFence:
3403 if (os::is_MP()) __ membar_release();
3404 break;
3405 case vmIntrinsics::_fullFence :
3406 if (os::is_MP()) __ membar();
3407 break;
3409 case vmIntrinsics::_Reference_get:
3410 do_Reference_get(x);
3411 break;
3413 case vmIntrinsics::_updateCRC32:
3414 case vmIntrinsics::_updateBytesCRC32:
3415 case vmIntrinsics::_updateByteBufferCRC32:
3416 do_update_CRC32(x);
3417 break;
3419 default: ShouldNotReachHere(); break;
3420 }
3421 }
3423 void LIRGenerator::profile_arguments(ProfileCall* x) {
3424 if (compilation()->profile_arguments()) {
3425 int bci = x->bci_of_invoke();
3426 ciMethodData* md = x->method()->method_data_or_null();
3427 ciProfileData* data = md->bci_to_data(bci);
3428 if ((data->is_CallTypeData() && data->as_CallTypeData()->has_arguments()) ||
3429 (data->is_VirtualCallTypeData() && data->as_VirtualCallTypeData()->has_arguments())) {
3430 ByteSize extra = data->is_CallTypeData() ? CallTypeData::args_data_offset() : VirtualCallTypeData::args_data_offset();
3431 int base_offset = md->byte_offset_of_slot(data, extra);
3432 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3433 ciTypeStackSlotEntries* args = data->is_CallTypeData() ? ((ciCallTypeData*)data)->args() : ((ciVirtualCallTypeData*)data)->args();
3435 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3436 int start = 0;
3437 int stop = data->is_CallTypeData() ? ((ciCallTypeData*)data)->number_of_arguments() : ((ciVirtualCallTypeData*)data)->number_of_arguments();
3438 if (x->callee()->is_loaded() && x->callee()->is_static() && Bytecodes::has_receiver(bc)) {
3439 // first argument is not profiled at call (method handle invoke)
3440 assert(x->method()->raw_code_at_bci(bci) == Bytecodes::_invokehandle, "invokehandle expected");
3441 start = 1;
3442 }
3443 ciSignature* callee_signature = x->callee()->signature();
3444 // method handle call to virtual method
3445 bool has_receiver = x->callee()->is_loaded() && !x->callee()->is_static() && !Bytecodes::has_receiver(bc);
3446 ciSignatureStream callee_signature_stream(callee_signature, has_receiver ? x->callee()->holder() : NULL);
3448 bool ignored_will_link;
3449 ciSignature* signature_at_call = NULL;
3450 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3451 ciSignatureStream signature_at_call_stream(signature_at_call);
3453 // if called through method handle invoke, some arguments may have been popped
3454 for (int i = 0; i < stop && i+start < x->nb_profiled_args(); i++) {
3455 int off = in_bytes(TypeEntriesAtCall::argument_type_offset(i)) - in_bytes(TypeEntriesAtCall::args_data_offset());
3456 ciKlass* exact = profile_type(md, base_offset, off,
3457 args->type(i), x->profiled_arg_at(i+start), mdp,
3458 !x->arg_needs_null_check(i+start),
3459 signature_at_call_stream.next_klass(), callee_signature_stream.next_klass());
3460 if (exact != NULL) {
3461 md->set_argument_type(bci, i, exact);
3462 }
3463 }
3464 } else {
3465 #ifdef ASSERT
3466 Bytecodes::Code code = x->method()->raw_code_at_bci(x->bci_of_invoke());
3467 int n = x->nb_profiled_args();
3468 assert(MethodData::profile_parameters() && (MethodData::profile_arguments_jsr292_only() ||
3469 (x->inlined() && ((code == Bytecodes::_invokedynamic && n <= 1) || (code == Bytecodes::_invokehandle && n <= 2)))),
3470 "only at JSR292 bytecodes");
3471 #endif
3472 }
3473 }
3474 }
3476 // profile parameters on entry to an inlined method
3477 void LIRGenerator::profile_parameters_at_call(ProfileCall* x) {
3478 if (compilation()->profile_parameters() && x->inlined()) {
3479 ciMethodData* md = x->callee()->method_data_or_null();
3480 if (md != NULL) {
3481 ciParametersTypeData* parameters_type_data = md->parameters_type_data();
3482 if (parameters_type_data != NULL) {
3483 ciTypeStackSlotEntries* parameters = parameters_type_data->parameters();
3484 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3485 bool has_receiver = !x->callee()->is_static();
3486 ciSignature* sig = x->callee()->signature();
3487 ciSignatureStream sig_stream(sig, has_receiver ? x->callee()->holder() : NULL);
3488 int i = 0; // to iterate on the Instructions
3489 Value arg = x->recv();
3490 bool not_null = false;
3491 int bci = x->bci_of_invoke();
3492 Bytecodes::Code bc = x->method()->java_code_at_bci(bci);
3493 // The first parameter is the receiver so that's what we start
3494 // with if it exists. One exception is method handle call to
3495 // virtual method: the receiver is in the args list
3496 if (arg == NULL || !Bytecodes::has_receiver(bc)) {
3497 i = 1;
3498 arg = x->profiled_arg_at(0);
3499 not_null = !x->arg_needs_null_check(0);
3500 }
3501 int k = 0; // to iterate on the profile data
3502 for (;;) {
3503 intptr_t profiled_k = parameters->type(k);
3504 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(parameters_type_data, ParametersTypeData::type_offset(0)),
3505 in_bytes(ParametersTypeData::type_offset(k)) - in_bytes(ParametersTypeData::type_offset(0)),
3506 profiled_k, arg, mdp, not_null, sig_stream.next_klass(), NULL);
3507 // If the profile is known statically set it once for all and do not emit any code
3508 if (exact != NULL) {
3509 md->set_parameter_type(k, exact);
3510 }
3511 k++;
3512 if (k >= parameters_type_data->number_of_parameters()) {
3513 #ifdef ASSERT
3514 int extra = 0;
3515 if (MethodData::profile_arguments() && TypeProfileParmsLimit != -1 &&
3516 x->nb_profiled_args() >= TypeProfileParmsLimit &&
3517 x->recv() != NULL && Bytecodes::has_receiver(bc)) {
3518 extra += 1;
3519 }
3520 assert(i == x->nb_profiled_args() - extra || (TypeProfileParmsLimit != -1 && TypeProfileArgsLimit > TypeProfileParmsLimit), "unused parameters?");
3521 #endif
3522 break;
3523 }
3524 arg = x->profiled_arg_at(i);
3525 not_null = !x->arg_needs_null_check(i);
3526 i++;
3527 }
3528 }
3529 }
3530 }
3531 }
3533 void LIRGenerator::do_ProfileCall(ProfileCall* x) {
3534 // Need recv in a temporary register so it interferes with the other temporaries
3535 LIR_Opr recv = LIR_OprFact::illegalOpr;
3536 LIR_Opr mdo = new_register(T_OBJECT);
3537 // tmp is used to hold the counters on SPARC
3538 LIR_Opr tmp = new_pointer_register();
3540 if (x->nb_profiled_args() > 0) {
3541 profile_arguments(x);
3542 }
3544 // profile parameters on inlined method entry including receiver
3545 if (x->recv() != NULL || x->nb_profiled_args() > 0) {
3546 profile_parameters_at_call(x);
3547 }
3549 if (x->recv() != NULL) {
3550 LIRItem value(x->recv(), this);
3551 value.load_item();
3552 recv = new_register(T_OBJECT);
3553 __ move(value.result(), recv);
3554 }
3555 __ profile_call(x->method(), x->bci_of_invoke(), x->callee(), mdo, recv, tmp, x->known_holder());
3556 }
3558 void LIRGenerator::do_ProfileReturnType(ProfileReturnType* x) {
3559 int bci = x->bci_of_invoke();
3560 ciMethodData* md = x->method()->method_data_or_null();
3561 ciProfileData* data = md->bci_to_data(bci);
3562 assert(data->is_CallTypeData() || data->is_VirtualCallTypeData(), "wrong profile data type");
3563 ciReturnTypeEntry* ret = data->is_CallTypeData() ? ((ciCallTypeData*)data)->ret() : ((ciVirtualCallTypeData*)data)->ret();
3564 LIR_Opr mdp = LIR_OprFact::illegalOpr;
3566 bool ignored_will_link;
3567 ciSignature* signature_at_call = NULL;
3568 x->method()->get_method_at_bci(bci, ignored_will_link, &signature_at_call);
3570 // The offset within the MDO of the entry to update may be too large
3571 // to be used in load/store instructions on some platforms. So have
3572 // profile_type() compute the address of the profile in a register.
3573 ciKlass* exact = profile_type(md, md->byte_offset_of_slot(data, ret->type_offset()), 0,
3574 ret->type(), x->ret(), mdp,
3575 !x->needs_null_check(),
3576 signature_at_call->return_type()->as_klass(),
3577 x->callee()->signature()->return_type()->as_klass());
3578 if (exact != NULL) {
3579 md->set_return_type(bci, exact);
3580 }
3581 }
3583 void LIRGenerator::do_ProfileInvoke(ProfileInvoke* x) {
3584 // We can safely ignore accessors here, since c2 will inline them anyway,
3585 // accessors are also always mature.
3586 if (!x->inlinee()->is_accessor()) {
3587 CodeEmitInfo* info = state_for(x, x->state(), true);
3588 // Notify the runtime very infrequently only to take care of counter overflows
3589 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true);
3590 }
3591 }
3593 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) {
3594 int freq_log = 0;
3595 int level = compilation()->env()->comp_level();
3596 if (level == CompLevel_limited_profile) {
3597 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog);
3598 } else if (level == CompLevel_full_profile) {
3599 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog);
3600 } else {
3601 ShouldNotReachHere();
3602 }
3603 // Increment the appropriate invocation/backedge counter and notify the runtime.
3604 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true);
3605 }
3607 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
3608 ciMethod *method, int frequency,
3609 int bci, bool backedge, bool notify) {
3610 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0");
3611 int level = _compilation->env()->comp_level();
3612 assert(level > CompLevel_simple, "Shouldn't be here");
3614 int offset = -1;
3615 LIR_Opr counter_holder = NULL;
3616 if (level == CompLevel_limited_profile) {
3617 MethodCounters* counters_adr = method->ensure_method_counters();
3618 if (counters_adr == NULL) {
3619 bailout("method counters allocation failed");
3620 return;
3621 }
3622 counter_holder = new_pointer_register();
3623 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
3624 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
3625 MethodCounters::invocation_counter_offset());
3626 } else if (level == CompLevel_full_profile) {
3627 counter_holder = new_register(T_METADATA);
3628 offset = in_bytes(backedge ? MethodData::backedge_counter_offset() :
3629 MethodData::invocation_counter_offset());
3630 ciMethodData* md = method->method_data_or_null();
3631 assert(md != NULL, "Sanity");
3632 __ metadata2reg(md->constant_encoding(), counter_holder);
3633 } else {
3634 ShouldNotReachHere();
3635 }
3636 LIR_Address* counter = new LIR_Address(counter_holder, offset, T_INT);
3637 LIR_Opr result = new_register(T_INT);
3638 __ load(counter, result);
3639 __ add(result, LIR_OprFact::intConst(InvocationCounter::count_increment), result);
3640 __ store(result, counter);
3641 if (notify) {
3642 LIR_Opr mask = load_immediate(frequency << InvocationCounter::count_shift, T_INT);
3643 LIR_Opr meth = new_register(T_METADATA);
3644 __ metadata2reg(method->constant_encoding(), meth);
3645 __ logical_and(result, mask, result);
3646 #ifndef MIPS64
3647 __ cmp(lir_cond_equal, result, LIR_OprFact::intConst(0));
3648 #endif
3649 // The bci for info can point to cmp for if's we want the if bci
3650 CodeStub* overflow = new CounterOverflowStub(info, bci, meth);
3651 #ifndef MIPS64
3652 __ branch(lir_cond_equal, T_INT, overflow);
3653 #else
3654 __ branch(lir_cond_equal, result, LIR_OprFact::intConst(0), T_INT, overflow);
3655 #endif
3656 __ branch_destination(overflow->continuation());
3657 }
3658 }
3660 void LIRGenerator::do_RuntimeCall(RuntimeCall* x) {
3661 LIR_OprList* args = new LIR_OprList(x->number_of_arguments());
3662 BasicTypeList* signature = new BasicTypeList(x->number_of_arguments());
3664 if (x->pass_thread()) {
3665 signature->append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
3666 args->append(getThreadPointer());
3667 }
3669 for (int i = 0; i < x->number_of_arguments(); i++) {
3670 Value a = x->argument_at(i);
3671 LIRItem* item = new LIRItem(a, this);
3672 item->load_item();
3673 args->append(item->result());
3674 signature->append(as_BasicType(a->type()));
3675 }
3677 LIR_Opr result = call_runtime(signature, args, x->entry(), x->type(), NULL);
3678 if (x->type() == voidType) {
3679 set_no_result(x);
3680 } else {
3681 __ move(result, rlock_result(x));
3682 }
3683 }
3685 #ifdef ASSERT
3686 void LIRGenerator::do_Assert(Assert *x) {
3687 ValueTag tag = x->x()->type()->tag();
3688 If::Condition cond = x->cond();
3690 LIRItem xitem(x->x(), this);
3691 LIRItem yitem(x->y(), this);
3692 LIRItem* xin = &xitem;
3693 LIRItem* yin = &yitem;
3695 assert(tag == intTag, "Only integer assertions are valid!");
3697 xin->load_item();
3698 yin->dont_load_item();
3700 set_no_result(x);
3702 LIR_Opr left = xin->result();
3703 LIR_Opr right = yin->result();
3705 __ lir_assert(lir_cond(x->cond()), left, right, x->message(), true);
3706 }
3707 #endif
3709 void LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) {
3712 Instruction *a = x->x();
3713 Instruction *b = x->y();
3714 if (!a || StressRangeCheckElimination) {
3715 assert(!b || StressRangeCheckElimination, "B must also be null");
3717 CodeEmitInfo *info = state_for(x, x->state());
3718 CodeStub* stub = new PredicateFailedStub(info);
3720 __ jump(stub);
3721 } else if (a->type()->as_IntConstant() && b->type()->as_IntConstant()) {
3722 int a_int = a->type()->as_IntConstant()->value();
3723 int b_int = b->type()->as_IntConstant()->value();
3725 bool ok = false;
3727 switch(x->cond()) {
3728 case Instruction::eql: ok = (a_int == b_int); break;
3729 case Instruction::neq: ok = (a_int != b_int); break;
3730 case Instruction::lss: ok = (a_int < b_int); break;
3731 case Instruction::leq: ok = (a_int <= b_int); break;
3732 case Instruction::gtr: ok = (a_int > b_int); break;
3733 case Instruction::geq: ok = (a_int >= b_int); break;
3734 case Instruction::aeq: ok = ((unsigned int)a_int >= (unsigned int)b_int); break;
3735 case Instruction::beq: ok = ((unsigned int)a_int <= (unsigned int)b_int); break;
3736 default: ShouldNotReachHere();
3737 }
3739 if (ok) {
3741 CodeEmitInfo *info = state_for(x, x->state());
3742 CodeStub* stub = new PredicateFailedStub(info);
3744 __ jump(stub);
3745 }
3746 } else {
3748 ValueTag tag = x->x()->type()->tag();
3749 If::Condition cond = x->cond();
3750 LIRItem xitem(x->x(), this);
3751 LIRItem yitem(x->y(), this);
3752 LIRItem* xin = &xitem;
3753 LIRItem* yin = &yitem;
3755 assert(tag == intTag, "Only integer deoptimizations are valid!");
3757 xin->load_item();
3758 yin->dont_load_item();
3759 set_no_result(x);
3761 LIR_Opr left = xin->result();
3762 LIR_Opr right = yin->result();
3764 CodeEmitInfo *info = state_for(x, x->state());
3765 CodeStub* stub = new PredicateFailedStub(info);
3767 #ifndef MIPS64
3768 __ cmp(lir_cond(cond), left, right);
3769 __ branch(lir_cond(cond), right->type(), stub);
3770 #else
3771 tty->print_cr("LIRGenerator::do_RangeCheckPredicate(RangeCheckPredicate *x) unimplemented yet!");
3772 Unimplemented();
3773 #endif
3774 }
3775 }
3778 LIR_Opr LIRGenerator::call_runtime(Value arg1, address entry, ValueType* result_type, CodeEmitInfo* info) {
3779 LIRItemList args(1);
3780 LIRItem value(arg1, this);
3781 args.append(&value);
3782 BasicTypeList signature;
3783 signature.append(as_BasicType(arg1->type()));
3785 return call_runtime(&signature, &args, entry, result_type, info);
3786 }
3789 LIR_Opr LIRGenerator::call_runtime(Value arg1, Value arg2, address entry, ValueType* result_type, CodeEmitInfo* info) {
3790 LIRItemList args(2);
3791 LIRItem value1(arg1, this);
3792 LIRItem value2(arg2, this);
3793 args.append(&value1);
3794 args.append(&value2);
3795 BasicTypeList signature;
3796 signature.append(as_BasicType(arg1->type()));
3797 signature.append(as_BasicType(arg2->type()));
3799 return call_runtime(&signature, &args, entry, result_type, info);
3800 }
3803 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIR_OprList* args,
3804 address entry, ValueType* result_type, CodeEmitInfo* info) {
3805 // get a result register
3806 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3807 LIR_Opr result = LIR_OprFact::illegalOpr;
3808 if (result_type->tag() != voidTag) {
3809 result = new_register(result_type);
3810 phys_reg = result_register_for(result_type);
3811 }
3813 // move the arguments into the correct location
3814 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3815 assert(cc->length() == args->length(), "argument mismatch");
3816 for (int i = 0; i < args->length(); i++) {
3817 LIR_Opr arg = args->at(i);
3818 LIR_Opr loc = cc->at(i);
3819 if (loc->is_register()) {
3820 __ move(arg, loc);
3821 } else {
3822 LIR_Address* addr = loc->as_address_ptr();
3823 // if (!can_store_as_constant(arg)) {
3824 // LIR_Opr tmp = new_register(arg->type());
3825 // __ move(arg, tmp);
3826 // arg = tmp;
3827 // }
3828 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3829 __ unaligned_move(arg, addr);
3830 } else {
3831 __ move(arg, addr);
3832 }
3833 }
3834 }
3836 if (info) {
3837 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3838 } else {
3839 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3840 }
3841 if (result->is_valid()) {
3842 __ move(phys_reg, result);
3843 }
3844 return result;
3845 }
3848 LIR_Opr LIRGenerator::call_runtime(BasicTypeArray* signature, LIRItemList* args,
3849 address entry, ValueType* result_type, CodeEmitInfo* info) {
3850 // get a result register
3851 LIR_Opr phys_reg = LIR_OprFact::illegalOpr;
3852 LIR_Opr result = LIR_OprFact::illegalOpr;
3853 if (result_type->tag() != voidTag) {
3854 result = new_register(result_type);
3855 phys_reg = result_register_for(result_type);
3856 }
3858 // move the arguments into the correct location
3859 CallingConvention* cc = frame_map()->c_calling_convention(signature);
3861 assert(cc->length() == args->length(), "argument mismatch");
3862 for (int i = 0; i < args->length(); i++) {
3863 LIRItem* arg = args->at(i);
3864 LIR_Opr loc = cc->at(i);
3865 if (loc->is_register()) {
3866 arg->load_item_force(loc);
3867 } else {
3868 LIR_Address* addr = loc->as_address_ptr();
3869 arg->load_for_store(addr->type());
3870 if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
3871 __ unaligned_move(arg->result(), addr);
3872 } else {
3873 __ move(arg->result(), addr);
3874 }
3875 }
3876 }
3878 if (info) {
3879 __ call_runtime(entry, getThreadTemp(), phys_reg, cc->args(), info);
3880 } else {
3881 __ call_runtime_leaf(entry, getThreadTemp(), phys_reg, cc->args());
3882 }
3883 if (result->is_valid()) {
3884 __ move(phys_reg, result);
3885 }
3886 return result;
3887 }
3889 void LIRGenerator::do_MemBar(MemBar* x) {
3890 if (os::is_MP()) {
3891 LIR_Code code = x->code();
3892 switch(code) {
3893 case lir_membar_acquire : __ membar_acquire(); break;
3894 case lir_membar_release : __ membar_release(); break;
3895 case lir_membar : __ membar(); break;
3896 case lir_membar_loadload : __ membar_loadload(); break;
3897 case lir_membar_storestore: __ membar_storestore(); break;
3898 case lir_membar_loadstore : __ membar_loadstore(); break;
3899 case lir_membar_storeload : __ membar_storeload(); break;
3900 default : ShouldNotReachHere(); break;
3901 }
3902 }
3903 }
3905 LIR_Opr LIRGenerator::maybe_mask_boolean(StoreIndexed* x, LIR_Opr array, LIR_Opr value, CodeEmitInfo*& null_check_info) {
3906 if (x->check_boolean()) {
3907 LIR_Opr value_fixed = rlock_byte(T_BYTE);
3908 if (TwoOperandLIRForm) {
3909 __ move(value, value_fixed);
3910 __ logical_and(value_fixed, LIR_OprFact::intConst(1), value_fixed);
3911 } else {
3912 __ logical_and(value, LIR_OprFact::intConst(1), value_fixed);
3913 }
3914 LIR_Opr klass = new_register(T_METADATA);
3915 __ move(new LIR_Address(array, oopDesc::klass_offset_in_bytes(), T_ADDRESS), klass, null_check_info);
3916 null_check_info = NULL;
3917 LIR_Opr layout = new_register(T_INT);
3918 __ move(new LIR_Address(klass, in_bytes(Klass::layout_helper_offset()), T_INT), layout);
3919 int diffbit = Klass::layout_helper_boolean_diffbit();
3920 __ logical_and(layout, LIR_OprFact::intConst(diffbit), layout);
3921 __ cmp(lir_cond_notEqual, layout, LIR_OprFact::intConst(0));
3922 __ cmove(lir_cond_notEqual, value_fixed, value, value_fixed, T_BYTE);
3923 value = value_fixed;
3924 }
3925 return value;
3926 }